diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig new file mode 100644 index 0000000000..f1c1ee2847 --- /dev/null +++ b/grsecurity/Kconfig @@ -0,0 +1,1206 @@ +# +# grecurity configuration +# +menu "Memory Protections" +depends on GRKERNSEC + +config GRKERNSEC_KMEM + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port" + default y if GRKERNSEC_CONFIG_AUTO + select STRICT_DEVMEM if (X86 || ARM || TILE || S390) + help + If you say Y here, /dev/kmem and /dev/mem won't be allowed to + be written to or read from to modify or leak the contents of the running + kernel. /dev/port will also not be allowed to be opened, writing to + /dev/cpu/*/msr will be prevented, and support for kexec will be removed. + If you have module support disabled, enabling this will close up several + ways that are currently used to insert malicious code into the running + kernel. + + Even with this feature enabled, we still highly recommend that + you use the RBAC system, as it is still possible for an attacker to + modify the running kernel through other more obscure methods. + + Enabling this feature will prevent the "cpupower" and "powertop" tools + from working and excludes debugfs from being compiled into the kernel. + + It is highly recommended that you say Y here if you meet all the + conditions above. + +config GRKERNSEC_VM86 + bool "Restrict VM86 mode" + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER) + depends on X86_32 + + help + If you say Y here, only processes with CAP_SYS_RAWIO will be able to + make use of a special execution mode on 32bit x86 processors called + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain + video cards and will still work with this option enabled. The purpose + of the option is to prevent exploitation of emulation errors in + virtualization of vm86 mode like the one discovered in VMWare in 2009. + Nearly all users should be able to enable this option. + +config GRKERNSEC_IO + bool "Disable privileged I/O" + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER) + depends on X86 + select RTC_CLASS + select RTC_INTF_DEV + select RTC_DRV_CMOS + + help + If you say Y here, all ioperm and iopl calls will return an error. + Ioperm and iopl can be used to modify the running kernel. + Unfortunately, some programs need this access to operate properly, + the most notable of which are XFree86 and hwclock. hwclock can be + remedied by having RTC support in the kernel, so real-time + clock support is enabled if this option is enabled, to ensure + that hwclock operates correctly. If hwclock still does not work, + either update udev or symlink /dev/rtc to /dev/rtc0. + + If you're using XFree86 or a version of Xorg from 2012 or earlier, + you may not be able to boot into a graphical environment with this + option enabled. In this case, you should use the RBAC system instead. + +config GRKERNSEC_BPF_HARDEN + bool "Harden BPF interpreter" + default y if GRKERNSEC_CONFIG_AUTO + help + Unlike previous versions of grsecurity that hardened both the BPF + interpreted code against corruption at rest as well as the JIT code + against JIT-spray attacks and attacker-controlled immediate values + for ROP, this feature will enforce disabling of the new eBPF JIT engine + and will ensure the interpreted code is read-only at rest. This feature + may be removed at a later time when eBPF stabilizes to entirely revert + back to the more secure pre-3.16 BPF interpreter/JIT. + + If you're using KERNEXEC, it's recommended that you enable this option + to supplement the hardening of the kernel. + +config GRKERNSEC_PERF_HARDEN + bool "Disable unprivileged PERF_EVENTS usage by default" + default y if GRKERNSEC_CONFIG_AUTO + depends on PERF_EVENTS + help + If you say Y here, the range of acceptable values for the + /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and + default to a new value: 3. When the sysctl is set to this value, no + unprivileged use of the PERF_EVENTS syscall interface will be permitted. + + Though PERF_EVENTS can be used legitimately for performance monitoring + and low-level application profiling, it is forced on regardless of + configuration, has been at fault for several vulnerabilities, and + creates new opportunities for side channels and other information leaks. + + This feature puts PERF_EVENTS into a secure default state and permits + the administrator to change out of it temporarily if unprivileged + application profiling is needed. + +config GRKERNSEC_RAND_THREADSTACK + bool "Insert random gaps between thread stacks" + default y if GRKERNSEC_CONFIG_AUTO + depends on PAX_RANDMMAP && !PPC + help + If you say Y here, a random-sized gap will be enforced between allocated + thread stacks. Glibc's NPTL and other threading libraries that + pass MAP_STACK to the kernel for thread stack allocation are supported. + The implementation currently provides 8 bits of entropy for the gap. + + Many distributions do not compile threaded remote services with the + -fstack-check argument to GCC, causing the variable-sized stack-based + allocator, alloca(), to not probe the stack on allocation. This + permits an unbounded alloca() to skip over any guard page and potentially + modify another thread's stack reliably. An enforced random gap + reduces the reliability of such an attack and increases the chance + that such a read/write to another thread's stack instead lands in + an unmapped area, causing a crash and triggering grsecurity's + anti-bruteforcing logic. + +config GRKERNSEC_PROC_MEMMAP + bool "Harden ASLR against information leaks and entropy reduction" + default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR) + depends on PAX_NOEXEC || PAX_ASLR + help + If you say Y here, the /proc//maps and /proc//stat files will + give no information about the addresses of its mappings if + PaX features that rely on random addresses are enabled on the task. + In addition to sanitizing this information and disabling other + dangerous sources of information, this option causes reads of sensitive + /proc/ entries where the file descriptor was opened in a different + task than the one performing the read. Such attempts are logged. + This option also limits argv/env strings for suid/sgid binaries + to 512KB to prevent a complete exhaustion of the stack entropy provided + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid + binaries to prevent alternative mmap layouts from being abused. + + If you use PaX it is essential that you say Y here as it closes up + several holes that make full ASLR useless locally. + + +config GRKERNSEC_KSTACKOVERFLOW + bool "Prevent kernel stack overflows" + default y if GRKERNSEC_CONFIG_AUTO + depends on X86_64 + help + If you say Y here, the kernel's process stacks will be allocated + with vmalloc instead of the kernel's default allocator. This + introduces guard pages that in combination with the alloca checking + of the STACKLEAK feature and removal of thread_info from the kernel + stack prevents all forms of kernel process stack overflow abuse. + Note that this is different from kernel stack buffer overflows. + +config GRKERNSEC_BRUTE + bool "Deter exploit bruteforcing" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, attempts to bruteforce exploits against forking + daemons such as apache or sshd, as well as against suid/sgid binaries + will be deterred. When a child of a forking daemon is killed by PaX + or crashes due to an illegal instruction or other suspicious signal, + the parent process will be delayed 30 seconds upon every subsequent + fork until the administrator is able to assess the situation and + restart the daemon. + In the suid/sgid case, the attempt is logged, the user has all their + existing instances of the suid/sgid binary terminated and will + be unable to execute any suid/sgid binaries for 15 minutes. + + It is recommended that you also enable signal logging in the auditing + section so that logs are generated when a process triggers a suspicious + signal. + If the sysctl option is enabled, a sysctl option with name + "deter_bruteforce" is created. + +config GRKERNSEC_MODHARDEN + bool "Harden module auto-loading" + default y if GRKERNSEC_CONFIG_AUTO + depends on MODULES + help + If you say Y here, module auto-loading in response to use of some + feature implemented by an unloaded module will be restricted to + root users. Enabling this option helps defend against attacks + by unprivileged users who abuse the auto-loading behavior to + cause a vulnerable module to load that is then exploited. + + If this option prevents a legitimate use of auto-loading for a + non-root user, the administrator can execute modprobe manually + with the exact name of the module mentioned in the alert log. + Alternatively, the administrator can add the module to the list + of modules loaded at boot by modifying init scripts. + + Modification of init scripts will most likely be needed on + Ubuntu servers with encrypted home directory support enabled, + as the first non-root user logging in will cause the ecb(aes), + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded. + +config GRKERNSEC_HIDESYM + bool "Hide kernel symbols" + default y if GRKERNSEC_CONFIG_AUTO + select PAX_USERCOPY + help + If you say Y here, getting information on loaded modules, and + displaying all kernel symbols through a syscall will be restricted + to users with CAP_SYS_MODULE. For software compatibility reasons, + /proc/kallsyms will be restricted to the root user. The RBAC + system can hide that entry even from root. + + This option also prevents leaking of kernel addresses through + several /proc entries. + + Note that this option is only effective provided the following + conditions are met: + 1) The kernel using grsecurity is not precompiled by some distribution + 2) You have also enabled GRKERNSEC_DMESG + 3) You are using the RBAC system and hiding other files such as your + kernel image and System.map. Alternatively, enabling this option + causes the permissions on /boot, /lib/modules, and the kernel + source directory to change at compile time to prevent + reading by non-root users. + If the above conditions are met, this option will aid in providing a + useful protection against local kernel exploitation of overflows + and arbitrary read/write vulnerabilities. + + It is highly recommended that you enable GRKERNSEC_PERF_HARDEN + in addition to this feature. + +config GRKERNSEC_RANDSTRUCT + bool "Randomize layout of sensitive kernel structures" + default y if GRKERNSEC_CONFIG_AUTO + depends on GCC_PLUGINS + select GRKERNSEC_HIDESYM + select MODVERSIONS if MODULES + help + If you say Y here, the layouts of a number of sensitive kernel + structures (task, fs, cred, etc) and all structures composed entirely + of function pointers (aka "ops" structs) will be randomized at compile-time. + This can introduce the requirement of an additional infoleak + vulnerability for exploits targeting these structure types. + + Enabling this feature will introduce some performance impact, slightly + increase memory usage, and prevent the use of forensic tools like + Volatility against the system (unless the kernel source tree isn't + cleaned after kernel installation). + + The seed used for compilation is located at tools/gcc/randomize_layout_seed.h. + It remains after a make clean to allow for external modules to be compiled + with the existing seed and will be removed by a make mrproper or + make distclean. + + Note that the implementation requires gcc 4.6.4. or newer. You may need + to install the supporting headers explicitly in addition to the normal + gcc package. + +config GRKERNSEC_RANDSTRUCT_PERFORMANCE + bool "Use cacheline-aware structure randomization" + depends on GRKERNSEC_RANDSTRUCT + default y if GRKERNSEC_CONFIG_PRIORITY_PERF + help + If you say Y here, the RANDSTRUCT randomization will make a best effort + at restricting randomization to cacheline-sized groups of elements. It + will further not randomize bitfields in structures. This reduces the + performance hit of RANDSTRUCT at the cost of weakened randomization. + +config GRKERNSEC_KERN_LOCKOUT + bool "Active kernel exploit response" + default y if GRKERNSEC_CONFIG_AUTO + depends on X86 || ARM || PPC || SPARC + help + If you say Y here, when a PaX alert is triggered due to suspicious + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY) + or an OOPS occurs due to bad memory accesses, instead of just + terminating the offending process (and potentially allowing + a subsequent exploit from the same user), we will take one of two + actions: + If the user was root, we will panic the system + If the user was non-root, we will log the attempt, terminate + all processes owned by the user, then prevent them from creating + any new processes until the system is restarted + This deters repeated kernel exploitation/bruteforcing attempts + and is useful for later forensics. + +config GRKERNSEC_OLD_ARM_USERLAND + bool "Old ARM userland compatibility" + depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7) + help + If you say Y here, stubs of executable code to perform such operations + as "compare-exchange" will be placed at fixed locations in the ARM vector + table. This is unfortunately needed for old ARM userland meant to run + across a wide range of processors. Without this option enabled, + the get_tls and data memory barrier stubs will be emulated by the kernel, + which is enough for Linaro userlands or other userlands designed for v6 + and newer ARM CPUs. It's recommended that you try without this option enabled + first, and only enable it if your userland does not boot (it will likely fail + at init time). + +endmenu +menu "Role Based Access Control Options" +depends on GRKERNSEC + +config GRKERNSEC_RBAC_DEBUG + bool + +config GRKERNSEC_NO_RBAC + bool "Disable RBAC system" + help + If you say Y here, the /dev/grsec device will be removed from the kernel, + preventing the RBAC system from being enabled. You should only say Y + here if you have no intention of using the RBAC system, so as to prevent + an attacker with root access from misusing the RBAC system to hide files + and processes when loadable module support and /dev/[k]mem have been + locked down. + +config GRKERNSEC_ACL_HIDEKERN + bool "Hide kernel processes" + help + If you say Y here, all kernel threads will be hidden to all + processes but those whose subject has the "view hidden processes" + flag. + +config GRKERNSEC_ACL_MAXTRIES + int "Maximum tries before password lockout" + default 3 + help + This option enforces the maximum number of times a user can attempt + to authorize themselves with the grsecurity RBAC system before being + denied the ability to attempt authorization again for a specified time. + The lower the number, the harder it will be to brute-force a password. + +config GRKERNSEC_ACL_TIMEOUT + int "Time to wait after max password tries, in seconds" + default 30 + help + This option specifies the time the user must wait after attempting to + authorize to the RBAC system with the maximum number of invalid + passwords. The higher the number, the harder it will be to brute-force + a password. + +endmenu +menu "Filesystem Protections" +depends on GRKERNSEC + +config GRKERNSEC_PROC + bool "Proc restrictions" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, the permissions of the /proc filesystem + will be altered to enhance system security and privacy. You MUST + choose either a user only restriction or a user and group restriction. + Depending upon the option you choose, you can either restrict users to + see only the processes they themselves run, or choose a group that can + view all processes and files normally restricted to root if you choose + the "restrict to user only" option. NOTE: If you're running identd or + ntpd as a non-root user, you will have to run it as the group you + specify here. + +config GRKERNSEC_PROC_USER + bool "Restrict /proc to user only" + depends on GRKERNSEC_PROC + help + If you say Y here, non-root users will only be able to view their own + processes, and restricts them from viewing network-related information, + and viewing kernel symbol and module information. + +config GRKERNSEC_PROC_USERGROUP + bool "Allow special group" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER + help + If you say Y here, you will be able to select a group that will be + able to view all processes and network-related information. If you've + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still + remain hidden. This option is useful if you want to run identd as + a non-root user. The group you select may also be chosen at boot time + via "grsec_proc_gid=" on the kernel commandline. + +config GRKERNSEC_PROC_GID + int "GID for special group" + depends on GRKERNSEC_PROC_USERGROUP + default 1001 + +config GRKERNSEC_PROC_ADD + bool "Additional restrictions" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP + help + If you say Y here, additional restrictions will be placed on + /proc that keep normal users from viewing device information and + slabinfo information that could be useful for exploits. + +config GRKERNSEC_LINK + bool "Linking restrictions" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, /tmp race exploits will be prevented, since users + will no longer be able to follow symlinks owned by other users in + world-writable +t directories (e.g. /tmp), unless the owner of the + symlink is the owner of the directory. users will also not be + able to hardlink to files they do not own. If the sysctl option is + enabled, a sysctl option with name "linking_restrictions" is created. + +config GRKERNSEC_SYMLINKOWN + bool "Kernel-enforced SymlinksIfOwnerMatch" + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER + help + Apache's SymlinksIfOwnerMatch option has an inherent race condition + that prevents it from being used as a security feature. As Apache + verifies the symlink by performing a stat() against the target of + the symlink before it is followed, an attacker can setup a symlink + to point to a same-owned file, then replace the symlink with one + that targets another user's file just after Apache "validates" the + symlink -- a classic TOCTOU race. If you say Y here, a complete, + race-free replacement for Apache's "SymlinksIfOwnerMatch" option + will be in place for the group you specify. If the sysctl option + is enabled, a sysctl option with name "enforce_symlinksifowner" is + created. + +config GRKERNSEC_SYMLINKOWN_GID + int "GID for users with kernel-enforced SymlinksIfOwnerMatch" + depends on GRKERNSEC_SYMLINKOWN + default 1006 + help + Setting this GID determines what group kernel-enforced + SymlinksIfOwnerMatch will be enabled for. If the sysctl option + is enabled, a sysctl option with name "symlinkown_gid" is created. + +config GRKERNSEC_FIFO + bool "FIFO restrictions" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, users will not be able to write to FIFOs they don't + own in world-writable +t directories (e.g. /tmp), unless the owner of + the FIFO is the same owner of the directory it's held in. If the sysctl + option is enabled, a sysctl option with name "fifo_restrictions" is + created. + +config GRKERNSEC_SYSFS_RESTRICT + bool "Sysfs/debugfs restriction" + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER) + depends on SYSFS + help + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and + any filesystem normally mounted under it (e.g. debugfs) will be + mostly accessible only by root. These filesystems generally provide access + to hardware and debug information that isn't appropriate for unprivileged + users of the system. Sysfs and debugfs have also become a large source + of new vulnerabilities, ranging from infoleaks to local compromise. + There has been very little oversight with an eye toward security involved + in adding new exporters of information to these filesystems, so their + use is discouraged. + For reasons of compatibility, a few directories have been whitelisted + for access by non-root users: + /sys/fs/selinux + /sys/fs/fuse + /sys/devices/system/cpu + +config GRKERNSEC_ROFS + bool "Runtime read-only mount protection" + depends on SYSCTL + help + If you say Y here, a sysctl option with name "romount_protect" will + be created. By setting this option to 1 at runtime, filesystems + will be protected in the following ways: + * No new writable mounts will be allowed + * Existing read-only mounts won't be able to be remounted read/write + * Write operations will be denied on all block devices + This option acts independently of grsec_lock: once it is set to 1, + it cannot be turned off. Therefore, please be mindful of the resulting + behavior if this option is enabled in an init script on a read-only + filesystem. + Also be aware that as with other root-focused features, GRKERNSEC_KMEM + and GRKERNSEC_IO should be enabled and module loading disabled via + config or at runtime. + This feature is mainly intended for secure embedded systems. + + +config GRKERNSEC_DEVICE_SIDECHANNEL + bool "Eliminate stat/notify-based device sidechannels" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, timing analyses on block or character + devices like /dev/ptmx using stat or inotify/dnotify/fanotify + will be thwarted for unprivileged users. If a process without + CAP_MKNOD stats such a device, the last access and last modify times + will match the device's create time. No access or modify events + will be triggered through inotify/dnotify/fanotify for such devices. + This feature will prevent attacks that may at a minimum + allow an attacker to determine the administrator's password length. + +config GRKERNSEC_CHROOT + bool "Chroot jail restrictions" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, you will be able to choose several options that will + make breaking out of a chrooted jail much more difficult. If you + encounter no software incompatibilities with the following options, it + is recommended that you enable each one. + + Note that the chroot restrictions are not intended to apply to "chroots" + to directories that are simple bind mounts of the global root filesystem. + For several other reasons, a user shouldn't expect any significant + security by performing such a chroot. + +config GRKERNSEC_CHROOT_MOUNT + bool "Deny mounts" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to + mount or remount filesystems. If the sysctl option is enabled, a + sysctl option with name "chroot_deny_mount" is created. + +config GRKERNSEC_CHROOT_DOUBLE + bool "Deny double-chroots" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to chroot + again outside the chroot. This is a widely used method of breaking + out of a chroot jail and should not be allowed. If the sysctl + option is enabled, a sysctl option with name + "chroot_deny_chroot" is created. + +config GRKERNSEC_CHROOT_PIVOT + bool "Deny pivot_root in chroot" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to use + a function called pivot_root() that was introduced in Linux 2.3.41. It + works similar to chroot in that it changes the root filesystem. This + function could be misused in a chrooted process to attempt to break out + of the chroot, and therefore should not be allowed. If the sysctl + option is enabled, a sysctl option with name "chroot_deny_pivot" is + created. + +config GRKERNSEC_CHROOT_CHDIR + bool "Enforce chdir(\"/\") on all chroots" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, the current working directory of all newly-chrooted + applications will be set to the the root directory of the chroot. + The man page on chroot(2) states: + Note that this call does not change the current working + directory, so that `.' can be outside the tree rooted at + `/'. In particular, the super-user can escape from a + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'. + + It is recommended that you say Y here, since it's not known to break + any software. If the sysctl option is enabled, a sysctl option with + name "chroot_enforce_chdir" is created. + +config GRKERNSEC_CHROOT_CHMOD + bool "Deny (f)chmod +s" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to chmod + or fchmod files to make them have suid or sgid bits. This protects + against another published method of breaking a chroot. If the sysctl + option is enabled, a sysctl option with name "chroot_deny_chmod" is + created. + +config GRKERNSEC_CHROOT_FCHDIR + bool "Deny fchdir and fhandle out of chroot" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, a well-known method of breaking chroots by fchdir'ing + to a file descriptor of the chrooting process that points to a directory + outside the filesystem will be stopped. This option also prevents use of + the recently-created syscall for opening files by a guessable "file handle" + inside a chroot, as well as accessing relative paths outside of a + directory passed in via file descriptor with openat and similar syscalls. + If the sysctl option is enabled, a sysctl option with name "chroot_deny_fchdir" + is created. + +config GRKERNSEC_CHROOT_MKNOD + bool "Deny mknod" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be allowed to + mknod. The problem with using mknod inside a chroot is that it + would allow an attacker to create a device entry that is the same + as one on the physical root of your system, which could range from + anything from the console device to a device for your harddrive (which + they could then use to wipe the drive or steal data). It is recommended + that you say Y here, unless you run into software incompatibilities. + If the sysctl option is enabled, a sysctl option with name + "chroot_deny_mknod" is created. + +config GRKERNSEC_CHROOT_SHMAT + bool "Deny shmat() out of chroot" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to attach + to shared memory segments that were created outside of the chroot jail. + It is recommended that you say Y here. If the sysctl option is enabled, + a sysctl option with name "chroot_deny_shmat" is created. + +config GRKERNSEC_CHROOT_UNIX + bool "Deny access to abstract AF_UNIX sockets out of chroot" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to + connect to abstract (meaning not belonging to a filesystem) Unix + domain sockets that were bound outside of a chroot. It is recommended + that you say Y here. If the sysctl option is enabled, a sysctl option + with name "chroot_deny_unix" is created. + +config GRKERNSEC_CHROOT_FINDTASK + bool "Protect outside processes" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid, + getsid, or view any process outside of the chroot. If the sysctl + option is enabled, a sysctl option with name "chroot_findtask" is + created. + +config GRKERNSEC_CHROOT_NICE + bool "Restrict priority changes" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, processes inside a chroot will not be able to raise + the priority of processes in the chroot, or alter the priority of + processes outside the chroot. This provides more security than simply + removing CAP_SYS_NICE from the process' capability set. If the + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice" + is created. + +config GRKERNSEC_CHROOT_SYSCTL + bool "Deny sysctl writes" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, an attacker in a chroot will not be able to + write to sysctl entries, either by sysctl(2) or through a /proc + interface. It is strongly recommended that you say Y here. If the + sysctl option is enabled, a sysctl option with name + "chroot_deny_sysctl" is created. + +config GRKERNSEC_CHROOT_RENAME + bool "Deny bad renames" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, an attacker in a chroot will not be able to + abuse the ability to create double chroots to break out of the + chroot by exploiting a race condition between a rename of a directory + within a chroot against an open of a symlink with relative path + components. This feature will likewise prevent an accomplice outside + a chroot from enabling a user inside the chroot to break out and make + use of their credentials on the global filesystem. Enabling this + feature is essential to prevent root users from breaking out of a + chroot. If the sysctl option is enabled, a sysctl option with name + "chroot_deny_bad_rename" is created. + +config GRKERNSEC_CHROOT_CAPS + bool "Capability restrictions" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT + help + If you say Y here, the capabilities on all processes within a + chroot jail will be lowered to stop module insertion, raw i/o, + system and net admin tasks, rebooting the system, modifying immutable + files, modifying IPC owned by another, and changing the system time. + This is left an option because it can break some apps. Disable this + if your chrooted apps are having problems performing those kinds of + tasks. If the sysctl option is enabled, a sysctl option with + name "chroot_caps" is created. + +config GRKERNSEC_CHROOT_INITRD + bool "Exempt initrd tasks from restrictions" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD + help + If you say Y here, tasks started prior to init will be exempted from + grsecurity's chroot restrictions. This option is mainly meant to + resolve Plymouth's performing privileged operations unnecessarily + in a chroot. + +endmenu +menu "Kernel Auditing" +depends on GRKERNSEC + +config GRKERNSEC_AUDIT_GROUP + bool "Single group for auditing" + help + If you say Y here, the exec and chdir logging features will only operate + on a group you specify. This option is recommended if you only want to + watch certain users instead of having a large amount of logs from the + entire system. If the sysctl option is enabled, a sysctl option with + name "audit_group" is created. + +config GRKERNSEC_AUDIT_GID + int "GID for auditing" + depends on GRKERNSEC_AUDIT_GROUP + default 1007 + +config GRKERNSEC_EXECLOG + bool "Exec logging" + help + If you say Y here, all execve() calls will be logged (since the + other exec*() calls are frontends to execve(), all execution + will be logged). Useful for shell-servers that like to keep track + of their users. If the sysctl option is enabled, a sysctl option with + name "exec_logging" is created. + WARNING: This option when enabled will produce a LOT of logs, especially + on an active system. + +config GRKERNSEC_RESLOG + bool "Resource logging" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, all attempts to overstep resource limits will + be logged with the resource name, the requested size, and the current + limit. It is highly recommended that you say Y here. If the sysctl + option is enabled, a sysctl option with name "resource_logging" is + created. If the RBAC system is enabled, the sysctl value is ignored. + +config GRKERNSEC_CHROOT_EXECLOG + bool "Log execs within chroot" + help + If you say Y here, all executions inside a chroot jail will be logged + to syslog. This can cause a large amount of logs if certain + applications (eg. djb's daemontools) are installed on the system, and + is therefore left as an option. If the sysctl option is enabled, a + sysctl option with name "chroot_execlog" is created. + +config GRKERNSEC_AUDIT_PTRACE + bool "Ptrace logging" + help + If you say Y here, all attempts to attach to a process via ptrace + will be logged. If the sysctl option is enabled, a sysctl option + with name "audit_ptrace" is created. + +config GRKERNSEC_AUDIT_CHDIR + bool "Chdir logging" + help + If you say Y here, all chdir() calls will be logged. If the sysctl + option is enabled, a sysctl option with name "audit_chdir" is created. + +config GRKERNSEC_AUDIT_MOUNT + bool "(Un)Mount logging" + help + If you say Y here, all mounts and unmounts will be logged. If the + sysctl option is enabled, a sysctl option with name "audit_mount" is + created. + +config GRKERNSEC_SIGNAL + bool "Signal logging" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, certain important signals will be logged, such as + SIGSEGV, which will as a result inform you of when a error in a program + occurred, which in some cases could mean a possible exploit attempt. + If the sysctl option is enabled, a sysctl option with name + "signal_logging" is created. + +config GRKERNSEC_FORKFAIL + bool "Fork failure logging" + help + If you say Y here, all failed fork() attempts will be logged. + This could suggest a fork bomb, or someone attempting to overstep + their process limit. If the sysctl option is enabled, a sysctl option + with name "forkfail_logging" is created. + +config GRKERNSEC_TIME + bool "Time change logging" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, any changes of the system clock will be logged. + If the sysctl option is enabled, a sysctl option with name + "timechange_logging" is created. + +config GRKERNSEC_PROC_IPADDR + bool "/proc//ipaddr support" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, a new entry will be added to each /proc/ + directory that contains the IP address of the person using the task. + The IP is carried across local TCP and AF_UNIX stream sockets. + This information can be useful for IDS/IPSes to perform remote response + to a local attack. The entry is readable by only the owner of the + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via + the RBAC system), and thus does not create privacy concerns. + +config GRKERNSEC_RWXMAP_LOG + bool 'Denied RWX mmap/mprotect logging' + default y if GRKERNSEC_CONFIG_AUTO + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT + help + If you say Y here, calls to mmap() and mprotect() with explicit + usage of PROT_WRITE and PROT_EXEC together will be logged when + denied by the PAX_MPROTECT feature. This feature will also + log other problematic scenarios that can occur when PAX_MPROTECT + is enabled on a binary, like textrels and PT_GNU_STACK. If the + sysctl option is enabled, a sysctl option with name "rwxmap_logging" + is created. + +endmenu + +menu "Executable Protections" +depends on GRKERNSEC + +config GRKERNSEC_DMESG + bool "Dmesg(8) restriction" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, non-root users will not be able to use dmesg(8) + to view the contents of the kernel's circular log buffer. + The kernel's log buffer often contains kernel addresses and other + identifying information useful to an attacker in fingerprinting a + system for a targeted exploit. + If the sysctl option is enabled, a sysctl option with name "dmesg" is + created. + +config GRKERNSEC_HARDEN_PTRACE + bool "Deter ptrace-based process snooping" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, TTY sniffers and other malicious monitoring + programs implemented through ptrace will be defeated. If you + have been using the RBAC system, this option has already been + enabled for several years for all users, with the ability to make + fine-grained exceptions. + + This option only affects the ability of non-root users to ptrace + processes that are not a descendent of the ptracing process. + This means that strace ./binary and gdb ./binary will still work, + but attaching to arbitrary processes will not. If the sysctl + option is enabled, a sysctl option with name "harden_ptrace" is + created. + +config GRKERNSEC_PTRACE_READEXEC + bool "Require read access to ptrace sensitive binaries" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, unprivileged users will not be able to ptrace unreadable + binaries. This option is useful in environments that + remove the read bits (e.g. file mode 4711) from suid binaries to + prevent infoleaking of their contents. This option adds + consistency to the use of that file mode, as the binary could normally + be read out when run without privileges while ptracing. + + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec" + is created. + +config GRKERNSEC_SETXID + bool "Enforce consistent multithreaded privileges" + default y if GRKERNSEC_CONFIG_AUTO + depends on (X86 || SPARC64 || PPC || ARM || MIPS) + help + If you say Y here, a change from a root uid to a non-root uid + in a multithreaded application will cause the resulting uids, + gids, supplementary groups, and capabilities in that thread + to be propagated to the other threads of the process. In most + cases this is unnecessary, as glibc will emulate this behavior + on behalf of the application. Other libcs do not act in the + same way, allowing the other threads of the process to continue + running with root privileges. If the sysctl option is enabled, + a sysctl option with name "consistent_setxid" is created. + +config GRKERNSEC_HARDEN_IPC + bool "Disallow access to overly-permissive IPC objects" + default y if GRKERNSEC_CONFIG_AUTO + depends on SYSVIPC + help + If you say Y here, access to overly-permissive IPC objects (shared + memory, message queues, and semaphores) will be denied for processes + given the following criteria beyond normal permission checks: + 1) If the IPC object is world-accessible and the euid doesn't match + that of the creator or current uid for the IPC object + 2) If the IPC object is group-accessible and the egid doesn't + match that of the creator or current gid for the IPC object + It's a common error to grant too much permission to these objects, + with impact ranging from denial of service and information leaking to + privilege escalation. This feature was developed in response to + research by Tim Brown: + http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/ + who found hundreds of such insecure usages. Processes with + CAP_IPC_OWNER are still permitted to access these IPC objects. + If the sysctl option is enabled, a sysctl option with name + "harden_ipc" is created. + +config GRKERNSEC_HARDEN_TTY + bool "Disallow unprivileged use of command injection" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, the ability to use the TIOCSTI ioctl for + terminal command injection will be denied for unprivileged users. + There are very few legitimate uses for this functionality and it + has made vulnerabilities in several 'su'-like programs possible in + the past. Even without these vulnerabilities, it provides an + attacker with an easy mechanism to move laterally among other + processes within the same user's compromised session. + By default, Linux allows unprivileged use of command injection as + long as the injection is being performed into the same tty session. + This feature makes that case the same as attempting to inject into + another session, making any TIOCSTI use require CAP_SYS_ADMIN. + If the sysctl option is enabled, a sysctl option with name + "harden_tty" is created. + +config GRKERNSEC_TPE + bool "Trusted Path Execution (TPE)" + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER + help + If you say Y here, you will be able to choose a gid to add to the + supplementary groups of users you want to mark as "untrusted." + These users will not be able to execute any files that are not in + root-owned directories writable only by root. If the sysctl option + is enabled, a sysctl option with name "tpe" is created. + +config GRKERNSEC_TPE_ALL + bool "Partially restrict all non-root users" + depends on GRKERNSEC_TPE + help + If you say Y here, all non-root users will be covered under + a weaker TPE restriction. This is separate from, and in addition to, + the main TPE options that you have selected elsewhere. Thus, if a + "trusted" GID is chosen, this restriction applies to even that GID. + Under this restriction, all non-root users will only be allowed to + execute files in directories they own that are not group or + world-writable, or in directories owned by root and writable only by + root. If the sysctl option is enabled, a sysctl option with name + "tpe_restrict_all" is created. + +config GRKERNSEC_TPE_INVERT + bool "Invert GID option" + depends on GRKERNSEC_TPE + help + If you say Y here, the group you specify in the TPE configuration will + decide what group TPE restrictions will be *disabled* for. This + option is useful if you want TPE restrictions to be applied to most + users on the system. If the sysctl option is enabled, a sysctl option + with name "tpe_invert" is created. Unlike other sysctl options, this + entry will default to on for backward-compatibility. + +config GRKERNSEC_TPE_GID + int + default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT) + default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT) + +config GRKERNSEC_TPE_UNTRUSTED_GID + int "GID for TPE-untrusted users" + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT + default 1005 + help + Setting this GID determines what group TPE restrictions will be + *enabled* for. If the sysctl option is enabled, a sysctl option + with name "tpe_gid" is created. + +config GRKERNSEC_TPE_TRUSTED_GID + int "GID for TPE-trusted users" + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT + default 1005 + help + Setting this GID determines what group TPE restrictions will be + *disabled* for. If the sysctl option is enabled, a sysctl option + with name "tpe_gid" is created. + +endmenu +menu "Network Protections" +depends on GRKERNSEC + +config GRKERNSEC_BLACKHOLE + bool "TCP/UDP blackhole and LAST_ACK DoS prevention" + default y if GRKERNSEC_CONFIG_AUTO + depends on NET + help + If you say Y here, neither TCP resets nor ICMP + destination-unreachable packets will be sent in response to packets + sent to ports for which no associated listening process exists. + It will also prevent the sending of ICMP protocol unreachable packets + in response to packets with unknown protocols. + This feature supports both IPV4 and IPV6 and exempts the + loopback interface from blackholing. Enabling this feature + makes a host more resilient to DoS attacks and reduces network + visibility against scanners. + + The blackhole feature as-implemented is equivalent to the FreeBSD + blackhole feature, as it prevents RST responses to all packets, not + just SYNs. Under most application behavior this causes no + problems, but applications (like haproxy) may not close certain + connections in a way that cleanly terminates them on the remote + end, leaving the remote host in LAST_ACK state. Because of this + side-effect and to prevent intentional LAST_ACK DoSes, this + feature also adds automatic mitigation against such attacks. + The mitigation drastically reduces the amount of time a socket + can spend in LAST_ACK state. If you're using haproxy and not + all servers it connects to have this option enabled, consider + disabling this feature on the haproxy host. + + If the sysctl option is enabled, two sysctl options with names + "ip_blackhole" and "lastack_retries" will be created. + While "ip_blackhole" takes the standard zero/non-zero on/off + toggle, "lastack_retries" uses the same kinds of values as + "tcp_retries1" and "tcp_retries2". The default value of 4 + prevents a socket from lasting more than 45 seconds in LAST_ACK + state. + +config GRKERNSEC_NO_SIMULT_CONNECT + bool "Disable TCP Simultaneous Connect" + default y if GRKERNSEC_CONFIG_AUTO + depends on NET + help + If you say Y here, a feature by Willy Tarreau will be enabled that + removes a weakness in Linux's strict implementation of TCP that + allows two clients to connect to each other without either entering + a listening state. The weakness allows an attacker to easily prevent + a client from connecting to a known server provided the source port + for the connection is guessed correctly. + + As the weakness could be used to prevent an antivirus or IPS from + fetching updates, or prevent an SSL gateway from fetching a CRL, + it should be eliminated by enabling this option. Though Linux is + one of few operating systems supporting simultaneous connect, it + has no legitimate use in practice and is rarely supported by firewalls. + +config GRKERNSEC_SOCKET + bool "Socket restrictions" + depends on NET + help + If you say Y here, you will be able to choose from several options. + If you assign a GID on your system and add it to the supplementary + groups of users you want to restrict socket access to, this patch + will perform up to three things, based on the option(s) you choose. + +config GRKERNSEC_SOCKET_ALL + bool "Deny any sockets to group" + depends on GRKERNSEC_SOCKET + help + If you say Y here, you will be able to choose a GID of whose users will + be unable to connect to other hosts from your machine or run server + applications from your machine. If the sysctl option is enabled, a + sysctl option with name "socket_all" is created. + +config GRKERNSEC_SOCKET_ALL_GID + int "GID to deny all sockets for" + depends on GRKERNSEC_SOCKET_ALL + default 1004 + help + Here you can choose the GID to disable socket access for. Remember to + add the users you want socket access disabled for to the GID + specified here. If the sysctl option is enabled, a sysctl option + with name "socket_all_gid" is created. + +config GRKERNSEC_SOCKET_CLIENT + bool "Deny client sockets to group" + depends on GRKERNSEC_SOCKET + help + If you say Y here, you will be able to choose a GID of whose users will + be unable to connect to other hosts from your machine, but will be + able to run servers. If this option is enabled, all users in the group + you specify will have to use passive mode when initiating ftp transfers + from the shell on your machine. If the sysctl option is enabled, a + sysctl option with name "socket_client" is created. + +config GRKERNSEC_SOCKET_CLIENT_GID + int "GID to deny client sockets for" + depends on GRKERNSEC_SOCKET_CLIENT + default 1003 + help + Here you can choose the GID to disable client socket access for. + Remember to add the users you want client socket access disabled for to + the GID specified here. If the sysctl option is enabled, a sysctl + option with name "socket_client_gid" is created. + +config GRKERNSEC_SOCKET_SERVER + bool "Deny server sockets to group" + depends on GRKERNSEC_SOCKET + help + If you say Y here, you will be able to choose a GID of whose users will + be unable to run server applications from your machine. If the sysctl + option is enabled, a sysctl option with name "socket_server" is created. + +config GRKERNSEC_SOCKET_SERVER_GID + int "GID to deny server sockets for" + depends on GRKERNSEC_SOCKET_SERVER + default 1002 + help + Here you can choose the GID to disable server socket access for. + Remember to add the users you want server socket access disabled for to + the GID specified here. If the sysctl option is enabled, a sysctl + option with name "socket_server_gid" is created. + +endmenu + +menu "Physical Protections" +depends on GRKERNSEC + +config GRKERNSEC_DENYUSB + bool "Deny new USB connections after toggle" + default y if GRKERNSEC_CONFIG_AUTO + depends on SYSCTL && USB_SUPPORT + help + If you say Y here, a new sysctl option with name "deny_new_usb" + will be created. Setting its value to 1 will prevent any new + USB devices from being recognized by the OS. Any attempted USB + device insertion will be logged. This option is intended to be + used against custom USB devices designed to exploit vulnerabilities + in various USB device drivers. + + For greatest effectiveness, this sysctl should be set after any + relevant init scripts. This option is safe to enable in distros + as each user can choose whether or not to toggle the sysctl. + +config GRKERNSEC_DENYUSB_FORCE + bool "Reject all USB devices not connected at boot" + select USB + depends on GRKERNSEC_DENYUSB + help + If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled + that doesn't involve a sysctl entry. This option should only be + enabled if you're sure you want to deny all new USB connections + at runtime and don't want to modify init scripts. This should not + be enabled by distros. It forces the core USB code to be built + into the kernel image so that all devices connected at boot time + can be recognized and new USB device connections can be prevented + prior to init running. + +endmenu + +menu "Sysctl Support" +depends on GRKERNSEC && SYSCTL + +config GRKERNSEC_SYSCTL + bool "Sysctl support" + default y if GRKERNSEC_CONFIG_AUTO + help + If you say Y here, you will be able to change the options that + grsecurity runs with at bootup, without having to recompile your + kernel. You can echo values to files in /proc/sys/kernel/grsecurity + to enable (1) or disable (0) various features. All the sysctl entries + are mutable until the "grsec_lock" entry is set to a non-zero value. + All features enabled in the kernel configuration are disabled at boot + if you do not say Y to the "Turn on features by default" option. + All options should be set at startup, and the grsec_lock entry should + be set to a non-zero value after all the options are set. + *THIS IS EXTREMELY IMPORTANT* + +config GRKERNSEC_SYSCTL_DISTRO + bool "Extra sysctl support for distro makers (READ HELP)" + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO + help + If you say Y here, additional sysctl options will be created + for features that affect processes running as root. Therefore, + it is critical when using this option that the grsec_lock entry be + enabled after boot. Only distros with prebuilt kernel packages + with this option enabled that can ensure grsec_lock is enabled + after boot should use this option. + *Failure to set grsec_lock after boot makes all grsec features + this option covers useless* + + Currently this option creates the following sysctl entries: + "Disable Privileged I/O": "disable_priv_io" + +config GRKERNSEC_SYSCTL_ON + bool "Turn on features by default" + default y if GRKERNSEC_CONFIG_AUTO + depends on GRKERNSEC_SYSCTL + help + If you say Y here, instead of having all features enabled in the + kernel configuration disabled at boot time, the features will be + enabled at boot time. It is recommended you say Y here unless + there is some reason you would want all sysctl-tunable features to + be disabled by default. As mentioned elsewhere, it is important + to enable the grsec_lock entry once you have finished modifying + the sysctl entries. + +endmenu +menu "Logging Options" +depends on GRKERNSEC + +config GRKERNSEC_FLOODTIME + int "Seconds in between log messages (minimum)" + default 10 + help + This option allows you to enforce the number of seconds between + grsecurity log messages. The default should be suitable for most + people, however, if you choose to change it, choose a value small enough + to allow informative logs to be produced, but large enough to + prevent flooding. + + Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable + any rate limiting on grsecurity log messages. + +config GRKERNSEC_FLOODBURST + int "Number of messages in a burst (maximum)" + default 6 + help + This option allows you to choose the maximum number of messages allowed + within the flood time interval you chose in a separate option. The + default should be suitable for most people, however if you find that + many of your logs are being interpreted as flooding, you may want to + raise this value. + + Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable + any rate limiting on grsecurity log messages. + +endmenu diff --git a/grsecurity/Makefile b/grsecurity/Makefile new file mode 100644 index 0000000000..e136e5fd7b --- /dev/null +++ b/grsecurity/Makefile @@ -0,0 +1,54 @@ +# grsecurity - access control and security hardening for Linux +# All code in this directory and various hooks located throughout the Linux kernel are +# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc. +# http://www.grsecurity.net spender@grsecurity.net +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License version 2 +# as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +KBUILD_CFLAGS += -Werror + +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \ + grsec_mount.o grsec_sig.o grsec_sysctl.o \ + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \ + grsec_usb.o grsec_ipc.o grsec_proc.o grsec_tty.o + +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \ + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \ + gracl_learn.o grsec_log.o gracl_policy.o +ifdef CONFIG_COMPAT +obj-$(CONFIG_GRKERNSEC) += gracl_compat.o +endif + +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o + +ifdef CONFIG_NET +obj-y += grsec_sock.o +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o +endif + +ifndef CONFIG_GRKERNSEC +obj-y += grsec_disabled.o +endif + +ifdef CONFIG_GRKERNSEC_HIDESYM +extra-y := grsec_hidesym.o +$(obj)/grsec_hidesym.o: + @-chmod -f 500 /boot + @-chmod -f 500 /lib/modules + @-chmod -f 500 /lib64/modules + @-chmod -f 500 /lib32/modules + @-chmod -f 700 . + @-chmod -f 700 $(objtree) + @echo ' grsec: protected kernel image paths' +endif diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c new file mode 100644 index 0000000000..fa03095d6b --- /dev/null +++ b/grsecurity/gracl.c @@ -0,0 +1,2773 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE) +#include +#include +#include "../fs/btrfs/async-thread.h" +#include "../fs/btrfs/ctree.h" +#include "../fs/btrfs/btrfs_inode.h" +#endif +#include "../fs/mount.h" + +#include +#include +#include + +#define FOR_EACH_ROLE_START(role) \ + role = running_polstate.role_list; \ + while (role) { + +#define FOR_EACH_ROLE_END(role) \ + role = role->prev; \ + } + +extern struct path gr_real_root; + +static struct gr_policy_state running_polstate; +struct gr_policy_state *polstate = &running_polstate; +extern struct gr_alloc_state *current_alloc_state; + +extern char *gr_shared_page[4]; +DEFINE_RWLOCK(gr_inode_lock); + +static unsigned int gr_status __read_only = GR_STATUS_INIT; + +#ifdef CONFIG_NET +extern struct vfsmount *sock_mnt; +#endif + +extern struct vfsmount *pipe_mnt; +extern struct vfsmount *shm_mnt; + +#ifdef CONFIG_HUGETLBFS +extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; +#endif + +extern u16 acl_sp_role_value; +extern struct acl_object_label *fakefs_obj_rw; +extern struct acl_object_label *fakefs_obj_rwx; + +int gr_acl_is_enabled(void) +{ + return (gr_status & GR_READY); +} + +void gr_enable_rbac_system(void) +{ + pax_open_kernel(); + gr_status |= GR_READY; + pax_close_kernel(); +} + +int gr_rbac_disable(void *unused) +{ + pax_open_kernel(); + gr_status &= ~GR_READY; + pax_close_kernel(); + + return 0; +} + +static inline dev_t __get_dev(const struct dentry *dentry) +{ + struct dentry *ldentry = d_backing_dentry((struct dentry *)dentry); + +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE) + if (ldentry->d_sb->s_magic == BTRFS_SUPER_MAGIC) + return BTRFS_I(d_inode(ldentry))->root->anon_dev; + else +#endif + return d_inode(ldentry)->i_sb->s_dev; +} + +static inline u64 __get_ino(const struct dentry *dentry) +{ + struct dentry *ldentry = d_backing_dentry((struct dentry *)dentry); + +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE) + if (ldentry->d_sb->s_magic == BTRFS_SUPER_MAGIC) + return btrfs_ino(d_inode(dentry)); + else +#endif + return d_inode(ldentry)->i_ino; +} + +dev_t gr_get_dev_from_dentry(struct dentry *dentry) +{ + return __get_dev(dentry); +} + +u64 gr_get_ino_from_dentry(struct dentry *dentry) +{ + return __get_ino(dentry); +} + +static char gr_task_roletype_to_char(struct task_struct *task) +{ + switch (task->role->roletype & + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP | + GR_ROLE_SPECIAL)) { + case GR_ROLE_DEFAULT: + return 'D'; + case GR_ROLE_USER: + return 'U'; + case GR_ROLE_GROUP: + return 'G'; + case GR_ROLE_SPECIAL: + return 'S'; + } + + return 'X'; +} + +char gr_roletype_to_char(void) +{ + return gr_task_roletype_to_char(current); +} + +int +gr_acl_tpe_check(void) +{ + if (unlikely(!(gr_status & GR_READY))) + return 0; + if (current->role->roletype & GR_ROLE_TPE) + return 1; + else + return 0; +} + +int +gr_handle_rawio(const struct inode *inode) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) && + grsec_enable_chroot_caps && proc_is_chrooted(current) && + !capable(CAP_SYS_RAWIO)) + return 1; +#endif + return 0; +} + +int +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb) +{ + if (likely(lena != lenb)) + return 0; + + return !memcmp(a, b, lena); +} + +static int prepend(char **buffer, int *buflen, const char *str, int namelen) +{ + *buflen -= namelen; + if (*buflen < 0) + return -ENAMETOOLONG; + *buffer -= namelen; + memcpy(*buffer, str, namelen); + return 0; +} + +static int prepend_name(char **buffer, int *buflen, struct qstr *name) +{ + return prepend(buffer, buflen, (const char *)name->name, name->len); +} + +static int prepend_path(const struct path *path, struct path *root, + char **buffer, int *buflen) +{ + struct dentry *dentry = path->dentry; + struct vfsmount *vfsmnt = path->mnt; + struct mount *mnt = real_mount(vfsmnt); + bool slash = false; + int error = 0; + + while (dentry != root->dentry || vfsmnt != root->mnt) { + struct dentry * parent; + + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { + /* Global root? */ + if (!mnt_has_parent(mnt)) { + goto out; + } + dentry = mnt->mnt_mountpoint; + mnt = mnt->mnt_parent; + vfsmnt = &mnt->mnt; + continue; + } + parent = dentry->d_parent; + prefetch(parent); + spin_lock(&dentry->d_lock); + error = prepend_name(buffer, buflen, &dentry->d_name); + spin_unlock(&dentry->d_lock); + if (!error) + error = prepend(buffer, buflen, "/", 1); + if (error) + break; + + slash = true; + dentry = parent; + } + +out: + if (!error && !slash) + error = prepend(buffer, buflen, "/", 1); + + return error; +} + +/* this must be called with mount_lock and rename_lock held */ + +static char *__our_d_path(const struct path *path, struct path *root, + char *buf, int buflen) +{ + char *res = buf + buflen; + int error; + + prepend(&res, &buflen, "\0", 1); + error = prepend_path(path, root, &res, &buflen); + if (error) + return ERR_PTR(error); + + return res; +} + +static char * +gen_full_path(struct path *path, struct path *root, char *buf, int buflen) +{ + char *retval; + + retval = __our_d_path(path, root, buf, buflen); + if (unlikely(IS_ERR(retval))) + retval = strcpy(buf, ""); + else if (unlikely(retval[1] == '/' && retval[2] == '\0')) + retval[1] = '\0'; + + return retval; +} + +static char * +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, + char *buf, int buflen) +{ + struct path path; + char *res; + + path.dentry = (struct dentry *)dentry; + path.mnt = (struct vfsmount *)vfsmnt; + + /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called + by the RBAC system */ + res = gen_full_path(&path, &gr_real_root, buf, buflen); + + return res; +} + +static char * +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, + char *buf, int buflen) +{ + char *res; + struct path path; + struct path root; + struct task_struct *reaper = init_pid_ns.child_reaper; + + path.dentry = (struct dentry *)dentry; + path.mnt = (struct vfsmount *)vfsmnt; + + /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */ + get_fs_root(reaper->fs, &root); + + read_seqlock_excl(&mount_lock); + write_seqlock(&rename_lock); + res = gen_full_path(&path, &root, buf, buflen); + write_sequnlock(&rename_lock); + read_sequnlock_excl(&mount_lock); + + path_put(&root); + return res; +} + +char * +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) +{ + char *ret; + read_seqlock_excl(&mount_lock); + write_seqlock(&rename_lock); + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), + PAGE_SIZE); + write_sequnlock(&rename_lock); + read_sequnlock_excl(&mount_lock); + return ret; +} + +static char * +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) +{ + char *ret; + char *buf; + int buflen; + + read_seqlock_excl(&mount_lock); + write_seqlock(&rename_lock); + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id()); + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6); + buflen = (int)(ret - buf); + if (buflen >= 5) + prepend(&ret, &buflen, "/proc", 5); + else + ret = strcpy(buf, ""); + write_sequnlock(&rename_lock); + read_sequnlock_excl(&mount_lock); + return ret; +} + +char * +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), + PAGE_SIZE); +} + +char * +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), + PAGE_SIZE); +} + +char * +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()), + PAGE_SIZE); +} + +char * +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()), + PAGE_SIZE); +} + +char * +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()), + PAGE_SIZE); +} + +__u32 +to_gr_audit(const __u32 reqmode) +{ + /* masks off auditable permission flags, then shifts them to create + auditing flags, and adds the special case of append auditing if + we're requesting write */ + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0)); +} + +struct acl_role_label * +__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, + const gid_t gid) +{ + unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size); + struct acl_role_label *match; + struct role_allowed_ip *ipp; + unsigned int x; + u32 curr_ip = task->signal->saved_ip; + + match = state->acl_role_set.r_hash[index]; + + while (match) { + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) { + for (x = 0; x < match->domain_child_num; x++) { + if (match->domain_children[x] == uid) + goto found; + } + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER) + break; + match = match->next; + } +found: + if (match == NULL) { + try_group: + index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size); + match = state->acl_role_set.r_hash[index]; + + while (match) { + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) { + for (x = 0; x < match->domain_child_num; x++) { + if (match->domain_children[x] == gid) + goto found2; + } + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP) + break; + match = match->next; + } +found2: + if (match == NULL) + match = state->default_role; + if (match->allowed_ips == NULL) + return match; + else { + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { + if (likely + ((ntohl(curr_ip) & ipp->netmask) == + (ntohl(ipp->addr) & ipp->netmask))) + return match; + } + match = state->default_role; + } + } else if (match->allowed_ips == NULL) { + return match; + } else { + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { + if (likely + ((ntohl(curr_ip) & ipp->netmask) == + (ntohl(ipp->addr) & ipp->netmask))) + return match; + } + goto try_group; + } + + return match; +} + +static struct acl_role_label * +lookup_acl_role_label(const struct task_struct *task, const uid_t uid, + const gid_t gid) +{ + return __lookup_acl_role_label(&running_polstate, task, uid, gid); +} + +struct acl_subject_label * +lookup_acl_subj_label(const u64 ino, const dev_t dev, + const struct acl_role_label *role) +{ + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size); + struct acl_subject_label *match; + + match = role->subj_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + (match->mode & GR_DELETED))) { + match = match->next; + } + + if (match && !(match->mode & GR_DELETED)) + return match; + else + return NULL; +} + +struct acl_subject_label * +lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, + const struct acl_role_label *role) +{ + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size); + struct acl_subject_label *match; + + match = role->subj_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + !(match->mode & GR_DELETED))) { + match = match->next; + } + + if (match && (match->mode & GR_DELETED)) + return match; + else + return NULL; +} + +static struct acl_object_label * +lookup_acl_obj_label(const u64 ino, const dev_t dev, + const struct acl_subject_label *subj) +{ + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size); + struct acl_object_label *match; + + match = subj->obj_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + (match->mode & GR_DELETED))) { + match = match->next; + } + + if (match && !(match->mode & GR_DELETED)) + return match; + else + return NULL; +} + +static struct acl_object_label * +lookup_acl_obj_label_create(const u64 ino, const dev_t dev, + const struct acl_subject_label *subj) +{ + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size); + struct acl_object_label *match; + + match = subj->obj_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + !(match->mode & GR_DELETED))) { + match = match->next; + } + + if (match && (match->mode & GR_DELETED)) + return match; + + match = subj->obj_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + (match->mode & GR_DELETED))) { + match = match->next; + } + + if (match && !(match->mode & GR_DELETED)) + return match; + else + return NULL; +} + +struct name_entry * +__lookup_name_entry(const struct gr_policy_state *state, const char *name) +{ + unsigned int len = strlen(name); + unsigned int key = full_name_hash(NULL, (const unsigned char *)name, len); + unsigned int index = key % state->name_set.n_size; + struct name_entry *match; + + match = state->name_set.n_hash[index]; + + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len))) + match = match->next; + + return match; +} + +static struct name_entry * +lookup_name_entry(const char *name) +{ + return __lookup_name_entry(&running_polstate, name); +} + +static struct name_entry * +lookup_name_entry_create(const char *name) +{ + unsigned int len = strlen(name); + unsigned int key = full_name_hash(NULL, (const unsigned char *)name, len); + unsigned int index = key % running_polstate.name_set.n_size; + struct name_entry *match; + + match = running_polstate.name_set.n_hash[index]; + + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || + !match->deleted)) + match = match->next; + + if (match && match->deleted) + return match; + + match = running_polstate.name_set.n_hash[index]; + + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || + match->deleted)) + match = match->next; + + if (match && !match->deleted) + return match; + else + return NULL; +} + +static struct inodev_entry * +lookup_inodev_entry(const u64 ino, const dev_t dev) +{ + unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size); + struct inodev_entry *match; + + match = running_polstate.inodev_set.i_hash[index]; + + while (match && (match->nentry->inode != ino || match->nentry->device != dev)) + match = match->next; + + return match; +} + +void +__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry) +{ + unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device, + state->inodev_set.i_size); + struct inodev_entry **curr; + + entry->prev = NULL; + + curr = &state->inodev_set.i_hash[index]; + if (*curr != NULL) + (*curr)->prev = entry; + + entry->next = *curr; + *curr = entry; + + return; +} + +static void +insert_inodev_entry(struct inodev_entry *entry) +{ + __insert_inodev_entry(&running_polstate, entry); +} + +void +insert_acl_obj_label(struct acl_object_label *obj, + struct acl_subject_label *subj) +{ + unsigned int index = + gr_fhash(obj->inode, obj->device, subj->obj_hash_size); + struct acl_object_label **curr; + + obj->prev = NULL; + + curr = &subj->obj_hash[index]; + if (*curr != NULL) + (*curr)->prev = obj; + + obj->next = *curr; + *curr = obj; + + return; +} + +void +insert_acl_subj_label(struct acl_subject_label *obj, + struct acl_role_label *role) +{ + unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size); + struct acl_subject_label **curr; + + obj->prev = NULL; + + curr = &role->subj_hash[index]; + if (*curr != NULL) + (*curr)->prev = obj; + + obj->next = *curr; + *curr = obj; + + return; +} + +/* derived from glibc fnmatch() 0: match, 1: no match*/ + +static int +glob_match(const char *p, const char *n) +{ + char c; + + while ((c = *p++) != '\0') { + switch (c) { + case '?': + if (*n == '\0') + return 1; + else if (*n == '/') + return 1; + break; + case '\\': + if (*n != c) + return 1; + break; + case '*': + for (c = *p++; c == '?' || c == '*'; c = *p++) { + if (*n == '/') + return 1; + else if (c == '?') { + if (*n == '\0') + return 1; + else + ++n; + } + } + if (c == '\0') { + return 0; + } else { + const char *endp; + + if ((endp = strchr(n, '/')) == NULL) + endp = n + strlen(n); + + if (c == '[') { + for (--p; n < endp; ++n) + if (!glob_match(p, n)) + return 0; + } else if (c == '/') { + while (*n != '\0' && *n != '/') + ++n; + if (*n == '/' && !glob_match(p, n + 1)) + return 0; + } else { + for (--p; n < endp; ++n) + if (*n == c && !glob_match(p, n)) + return 0; + } + + return 1; + } + case '[': + { + int not; + char cold; + + if (*n == '\0' || *n == '/') + return 1; + + not = (*p == '!' || *p == '^'); + if (not) + ++p; + + c = *p++; + for (;;) { + unsigned char fn = (unsigned char)*n; + + if (c == '\0') + return 1; + else { + if (c == fn) + goto matched; + cold = c; + c = *p++; + + if (c == '-' && *p != ']') { + unsigned char cend = *p++; + + if (cend == '\0') + return 1; + + if (cold <= fn && fn <= cend) + goto matched; + + c = *p++; + } + } + + if (c == ']') + break; + } + if (!not) + return 1; + break; + matched: + while (c != ']') { + if (c == '\0') + return 1; + + c = *p++; + } + if (not) + return 1; + } + break; + default: + if (c != *n) + return 1; + } + + ++n; + } + + if (*n == '\0') + return 0; + + if (*n == '/') + return 0; + + return 1; +} + +static struct acl_object_label * +chk_glob_label(struct acl_object_label *globbed, + const struct dentry *dentry, const struct vfsmount *mnt, char **path) +{ + struct acl_object_label *tmp; + + if (*path == NULL) + *path = gr_to_filename_nolock(dentry, mnt); + + tmp = globbed; + + while (tmp) { + if (!glob_match(tmp->filename, *path)) + return tmp; + tmp = tmp->next; + } + + return NULL; +} + +static struct acl_object_label * +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, + const u64 curr_ino, const dev_t curr_dev, + const struct acl_subject_label *subj, char **path, const int checkglob) +{ + struct acl_subject_label *tmpsubj; + struct acl_object_label *retval; + struct acl_object_label *retval2; + + tmpsubj = (struct acl_subject_label *) subj; + read_lock(&gr_inode_lock); + do { + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj); + if (retval) { + if (checkglob && retval->globbed) { + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path); + if (retval2) + retval = retval2; + } + break; + } + } while ((tmpsubj = tmpsubj->parent_subject)); + read_unlock(&gr_inode_lock); + + return retval; +} + +static struct acl_object_label * +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, + struct dentry *curr_dentry, + const struct acl_subject_label *subj, char **path, const int checkglob) +{ + int newglob = checkglob; + u64 inode; + dev_t device; + + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking + as we don't want a / * rule to match instead of the / object + don't do this for create lookups that call this function though, since they're looking up + on the parent and thus need globbing checks on all paths + */ + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB) + newglob = GR_NO_GLOB; + + spin_lock(&curr_dentry->d_lock); + inode = __get_ino(curr_dentry); + device = __get_dev(curr_dentry); + spin_unlock(&curr_dentry->d_lock); + + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob); +} + +#ifdef CONFIG_HUGETLBFS +static inline bool +is_hugetlbfs_mnt(const struct vfsmount *mnt) +{ + int i; + for (i = 0; i < HUGE_MAX_HSTATE; i++) { + if (unlikely(hugetlbfs_vfsmount[i] == mnt)) + return true; + } + + return false; +} +#endif + +static struct acl_object_label * +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_subject_label *subj, char *path, const int checkglob) +{ + struct dentry *dentry = (struct dentry *) l_dentry; + struct vfsmount *mnt = (struct vfsmount *) l_mnt; + struct inode * inode = d_backing_inode(dentry); + struct mount *real_mnt = real_mount(mnt); + struct acl_object_label *retval; + struct dentry *parent; + + read_seqlock_excl(&mount_lock); + write_seqlock(&rename_lock); + + if (unlikely((mnt == shm_mnt && inode->i_nlink == 0) || mnt == pipe_mnt || +#ifdef CONFIG_NET + mnt == sock_mnt || +#endif +#ifdef CONFIG_HUGETLBFS + (is_hugetlbfs_mnt(mnt) && inode->i_nlink == 0) || +#endif + /* ignore Eric Biederman */ + IS_PRIVATE(inode))) { + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw; + goto out; + } + + for (;;) { + if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt) + break; + + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { + if (!mnt_has_parent(real_mnt)) + break; + + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); + if (retval != NULL) + goto out; + + dentry = real_mnt->mnt_mountpoint; + real_mnt = real_mnt->mnt_parent; + mnt = &real_mnt->mnt; + continue; + } + + parent = dentry->d_parent; + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); + if (retval != NULL) + goto out; + + dentry = parent; + } + + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); + + /* gr_real_root is pinned so we don't have to hold a reference */ + if (retval == NULL) + retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob); +out: + write_sequnlock(&rename_lock); + read_sequnlock_excl(&mount_lock); + + BUG_ON(retval == NULL); + + return retval; +} + +static struct acl_object_label * +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_subject_label *subj) +{ + char *path = NULL; + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB); +} + +static struct acl_object_label * +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_subject_label *subj) +{ + char *path = NULL; + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB); +} + +static struct acl_object_label * +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_subject_label *subj, char *path) +{ + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB); +} + +struct acl_subject_label * +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_role_label *role) +{ + struct dentry *dentry = (struct dentry *) l_dentry; + struct vfsmount *mnt = (struct vfsmount *) l_mnt; + struct mount *real_mnt = real_mount(mnt); + struct acl_subject_label *retval; + struct dentry *parent; + + read_seqlock_excl(&mount_lock); + write_seqlock(&rename_lock); + + for (;;) { + if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt) + break; + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { + if (!mnt_has_parent(real_mnt)) + break; + + spin_lock(&dentry->d_lock); + read_lock(&gr_inode_lock); + retval = + lookup_acl_subj_label(__get_ino(dentry), + __get_dev(dentry), role); + read_unlock(&gr_inode_lock); + spin_unlock(&dentry->d_lock); + if (retval != NULL) + goto out; + + dentry = real_mnt->mnt_mountpoint; + real_mnt = real_mnt->mnt_parent; + mnt = &real_mnt->mnt; + continue; + } + + spin_lock(&dentry->d_lock); + read_lock(&gr_inode_lock); + retval = lookup_acl_subj_label(__get_ino(dentry), + __get_dev(dentry), role); + read_unlock(&gr_inode_lock); + parent = dentry->d_parent; + spin_unlock(&dentry->d_lock); + + if (retval != NULL) + goto out; + + dentry = parent; + } + + spin_lock(&dentry->d_lock); + read_lock(&gr_inode_lock); + retval = lookup_acl_subj_label(__get_ino(dentry), + __get_dev(dentry), role); + read_unlock(&gr_inode_lock); + spin_unlock(&dentry->d_lock); + + if (unlikely(retval == NULL)) { + /* gr_real_root is pinned, we don't need to hold a reference */ + read_lock(&gr_inode_lock); + retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry), + __get_dev(gr_real_root.dentry), role); + read_unlock(&gr_inode_lock); + } +out: + write_sequnlock(&rename_lock); + read_sequnlock_excl(&mount_lock); + + BUG_ON(retval == NULL); + + return retval; +} + +void +assign_special_role(const char *rolename) +{ + struct acl_object_label *obj; + struct acl_role_label *r; + struct acl_role_label *assigned = NULL; + struct task_struct *tsk; + struct file *filp; + + FOR_EACH_ROLE_START(r) + if (!strcmp(rolename, r->rolename) && + (r->roletype & GR_ROLE_SPECIAL)) { + assigned = r; + break; + } + FOR_EACH_ROLE_END(r) + + if (!assigned) + return; + + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + + tsk = current->real_parent; + if (tsk == NULL) + goto out_unlock; + + filp = tsk->exec_file; + if (filp == NULL) + goto out_unlock; + + tsk->is_writable = 0; + tsk->inherited = 0; + + tsk->acl_sp_role = 1; + tsk->acl_role_id = ++acl_sp_role_value; + tsk->role = assigned; + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role); + + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + tsk->is_writable = 1; + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + tsk->is_writable = 1; + +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, + tsk->acl->filename, tsk->comm, task_pid_nr(tsk)); +#endif + +out_unlock: + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + return; +} + + +static void +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode) +{ + struct task_struct *task = current; + const struct cred *cred = current_cred(); + + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip); + + return; +} + +static void +gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs) +{ + struct task_struct *task = current; + const struct cred *cred = current_cred(); + + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype, + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, + 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip); + + return; +} + +static void +gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs) +{ + struct task_struct *task = current; + const struct cred *cred = current_cred(); + + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype, + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, + 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip); + + return; +} + +static void +gr_set_proc_res(struct task_struct *task) +{ + struct acl_subject_label *proc; + unsigned short i; + + proc = task->acl; + + if (proc->mode & (GR_LEARN | GR_INHERITLEARN)) + return; + + for (i = 0; i < RLIM_NLIMITS; i++) { + unsigned long rlim_cur, rlim_max; + + if (!(proc->resmask & (1U << i))) + continue; + + rlim_cur = proc->res[i].rlim_cur; + rlim_max = proc->res[i].rlim_max; + + if (i == RLIMIT_NOFILE) { + unsigned long saved_sysctl_nr_open = sysctl_nr_open; + if (rlim_cur > saved_sysctl_nr_open) + rlim_cur = saved_sysctl_nr_open; + if (rlim_max > saved_sysctl_nr_open) + rlim_max = saved_sysctl_nr_open; + } + + task->signal->rlim[i].rlim_cur = rlim_cur; + task->signal->rlim[i].rlim_max = rlim_max; + + if (i == RLIMIT_CPU) + update_rlimit_cpu(task, rlim_cur); + } + + return; +} + +/* both of the below must be called with + rcu_read_lock(); + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + except in the case of gr_set_role_label() (for __gr_get_subject_for_task) +*/ + +struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback) +{ + char *tmpname; + struct acl_subject_label *tmpsubj; + struct file *filp; + struct name_entry *nmatch; + + filp = task->exec_file; + if (filp == NULL) + return NULL; + + /* the following is to apply the correct subject + on binaries running when the RBAC system + is enabled, when the binaries have been + replaced or deleted since their execution + ----- + when the RBAC system starts, the inode/dev + from exec_file will be one the RBAC system + is unaware of. It only knows the inode/dev + of the present file on disk, or the absence + of it. + */ + + if (filename) + nmatch = __lookup_name_entry(state, filename); + else { + preempt_disable(); + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt); + + nmatch = __lookup_name_entry(state, tmpname); + preempt_enable(); + } + tmpsubj = NULL; + if (nmatch) { + if (nmatch->deleted) + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role); + else + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role); + } + /* this also works for the reload case -- if we don't match a potentially inherited subject + then we fall back to a normal lookup based on the binary's ino/dev + */ + if (tmpsubj == NULL && fallback) + tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role); + + return tmpsubj; +} + +static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback) +{ + return __gr_get_subject_for_task(&running_polstate, task, filename, fallback); +} + +void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj) +{ + struct acl_object_label *obj; + struct file *filp; + + filp = task->exec_file; + + task->acl = subj; + task->is_writable = 0; + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + + gr_set_proc_res(task); + +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename); +#endif +} + +static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj) +{ + __gr_apply_subject_to_task(&running_polstate, task, subj); +} + +__u32 +gr_search_file(const struct dentry * dentry, const __u32 mode, + const struct vfsmount * mnt) +{ + __u32 retval = mode; + struct acl_subject_label *curracl; + struct acl_object_label *currobj; + + if (unlikely(!(gr_status & GR_READY))) + return (mode & ~GR_AUDITS); + + curracl = current->acl; + + currobj = chk_obj_label(dentry, mnt, curracl); + retval = currobj->mode & mode; + + /* if we're opening a specified transfer file for writing + (e.g. /dev/initctl), then transfer our role to init + */ + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE && + current->role->roletype & GR_ROLE_PERSIST)) { + struct task_struct *task = init_pid_ns.child_reaper; + + if (task->role != current->role) { + struct acl_subject_label *subj; + + task->acl_sp_role = 0; + task->acl_role_id = current->acl_role_id; + task->role = current->role; + rcu_read_lock(); + read_lock(&grsec_exec_file_lock); + subj = gr_get_subject_for_task(task, NULL, 1); + gr_apply_subject_to_task(task, subj); + read_unlock(&grsec_exec_file_lock); + rcu_read_unlock(); + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG); + } + } + + if (unlikely + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE) + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) { + __u32 new_mode = mode; + + new_mode &= ~(GR_AUDITS | GR_SUPPRESS); + + retval = new_mode; + + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN) + new_mode |= GR_INHERIT; + + if (!(mode & GR_NOLEARN)) + gr_log_learn(dentry, mnt, new_mode); + } + + return retval; +} + +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry, + const struct dentry *parent, + const struct vfsmount *mnt) +{ + struct name_entry *match; + struct acl_object_label *matchpo; + struct acl_subject_label *curracl; + char *path; + + if (unlikely(!(gr_status & GR_READY))) + return NULL; + + preempt_disable(); + path = gr_to_filename_rbac(new_dentry, mnt); + match = lookup_name_entry_create(path); + + curracl = current->acl; + + if (match) { + read_lock(&gr_inode_lock); + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl); + read_unlock(&gr_inode_lock); + + if (matchpo) { + preempt_enable(); + return matchpo; + } + } + + // lookup parent + + matchpo = chk_obj_create_label(parent, mnt, curracl, path); + + preempt_enable(); + return matchpo; +} + +__u32 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent, + const struct vfsmount * mnt, const __u32 mode) +{ + struct acl_object_label *matchpo; + __u32 retval; + + if (unlikely(!(gr_status & GR_READY))) + return (mode & ~GR_AUDITS); + + matchpo = gr_get_create_object(new_dentry, parent, mnt); + + retval = matchpo->mode & mode; + + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))) + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) { + __u32 new_mode = mode; + + new_mode &= ~(GR_AUDITS | GR_SUPPRESS); + + gr_log_learn(new_dentry, mnt, new_mode); + return new_mode; + } + + return retval; +} + +__u32 +gr_check_link(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const struct dentry * old_dentry, const struct vfsmount * old_mnt) +{ + struct acl_object_label *obj; + __u32 oldmode, newmode; + __u32 needmode; + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ | + GR_DELETE | GR_INHERIT; + + if (unlikely(!(gr_status & GR_READY))) + return (GR_CREATE | GR_LINK); + + obj = chk_obj_label(old_dentry, old_mnt, current->acl); + oldmode = obj->mode; + + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt); + newmode = obj->mode; + + needmode = newmode & checkmodes; + + // old name for hardlink must have at least the permissions of the new name + if ((oldmode & needmode) != needmode) + goto bad; + + // if old name had restrictions/auditing, make sure the new name does as well + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS); + + // don't allow hardlinking of suid/sgid/fcapped files without permission + if (is_privileged_binary(old_dentry)) + needmode |= GR_SETID; + + if ((newmode & needmode) != needmode) + goto bad; + + // enforce minimum permissions + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK)) + return newmode; +bad: + needmode = oldmode; + if (is_privileged_binary(old_dentry)) + needmode |= GR_SETID; + + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) { + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK); + return (GR_CREATE | GR_LINK); + } else if (newmode & GR_SUPPRESS) + return GR_SUPPRESS; + else + return 0; +} + +int +gr_check_hidden_task(const struct task_struct *task) +{ + if (unlikely(!(gr_status & GR_READY))) + return 0; + + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW)) + return 1; + + return 0; +} + +int +gr_check_protected_task(const struct task_struct *task) +{ + if (unlikely(!(gr_status & GR_READY) || !task)) + return 0; + + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && + task->acl != current->acl) + return 1; + + return 0; +} + +int +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) +{ + struct task_struct *p; + int ret = 0; + + if (unlikely(!(gr_status & GR_READY) || !pid)) + return ret; + + read_lock(&tasklist_lock); + do_each_pid_task(pid, type, p) { + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && + p->acl != current->acl) { + ret = 1; + goto out; + } + } while_each_pid_task(pid, type, p); +out: + read_unlock(&tasklist_lock); + + return ret; +} + +void +gr_copy_label(struct task_struct *tsk) +{ + struct task_struct *p = current; + + tsk->inherited = p->inherited; + tsk->acl_sp_role = 0; + tsk->acl_role_id = p->acl_role_id; + tsk->acl = p->acl; + tsk->role = p->role; + tsk->signal->used_accept = 0; + tsk->signal->curr_ip = p->signal->curr_ip; + tsk->signal->saved_ip = p->signal->saved_ip; + if (p->exec_file) + get_file(p->exec_file); + tsk->exec_file = p->exec_file; + tsk->is_writable = p->is_writable; + if (unlikely(p->signal->used_accept)) { + p->signal->curr_ip = 0; + p->signal->saved_ip = 0; + } + + return; +} + +extern int gr_process_kernel_setuid_ban(struct user_struct *user); + +int +gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs) +{ + unsigned int i; + __u16 num; + uid_t *uidlist; + uid_t curuid; + int realok = 0; + int effectiveok = 0; + int fsok = 0; + uid_t globalreal, globaleffective, globalfs; + +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) + struct user_struct *user; + + if (!uid_valid(real)) + goto skipit; + + /* find user based on global namespace */ + + globalreal = GR_GLOBAL_UID(real); + + user = find_user(make_kuid(&init_user_ns, globalreal)); + if (user == NULL) + goto skipit; + + if (gr_process_kernel_setuid_ban(user)) { + /* for find_user */ + free_uid(user); + return 1; + } + + /* for find_user */ + free_uid(user); + +skipit: +#endif + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) + gr_log_learn_uid_change(real, effective, fs); + + num = current->acl->user_trans_num; + uidlist = current->acl->user_transitions; + + if (uidlist == NULL) + return 0; + + if (!uid_valid(real)) { + realok = 1; + globalreal = (uid_t)-1; + } else { + globalreal = GR_GLOBAL_UID(real); + } + if (!uid_valid(effective)) { + effectiveok = 1; + globaleffective = (uid_t)-1; + } else { + globaleffective = GR_GLOBAL_UID(effective); + } + if (!uid_valid(fs)) { + fsok = 1; + globalfs = (uid_t)-1; + } else { + globalfs = GR_GLOBAL_UID(fs); + } + + if (current->acl->user_trans_type & GR_ID_ALLOW) { + for (i = 0; i < num; i++) { + curuid = uidlist[i]; + if (globalreal == curuid) + realok = 1; + if (globaleffective == curuid) + effectiveok = 1; + if (globalfs == curuid) + fsok = 1; + } + } else if (current->acl->user_trans_type & GR_ID_DENY) { + for (i = 0; i < num; i++) { + curuid = uidlist[i]; + if (globalreal == curuid) + break; + if (globaleffective == curuid) + break; + if (globalfs == curuid) + break; + } + /* not in deny list */ + if (i == num) { + realok = 1; + effectiveok = 1; + fsok = 1; + } + } + + if (realok && effectiveok && fsok) + return 0; + else { + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal); + return 1; + } +} + +int +gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs) +{ + unsigned int i; + __u16 num; + gid_t *gidlist; + gid_t curgid; + int realok = 0; + int effectiveok = 0; + int fsok = 0; + gid_t globalreal, globaleffective, globalfs; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) + gr_log_learn_gid_change(real, effective, fs); + + num = current->acl->group_trans_num; + gidlist = current->acl->group_transitions; + + if (gidlist == NULL) + return 0; + + if (!gid_valid(real)) { + realok = 1; + globalreal = (gid_t)-1; + } else { + globalreal = GR_GLOBAL_GID(real); + } + if (!gid_valid(effective)) { + effectiveok = 1; + globaleffective = (gid_t)-1; + } else { + globaleffective = GR_GLOBAL_GID(effective); + } + if (!gid_valid(fs)) { + fsok = 1; + globalfs = (gid_t)-1; + } else { + globalfs = GR_GLOBAL_GID(fs); + } + + if (current->acl->group_trans_type & GR_ID_ALLOW) { + for (i = 0; i < num; i++) { + curgid = gidlist[i]; + if (globalreal == curgid) + realok = 1; + if (globaleffective == curgid) + effectiveok = 1; + if (globalfs == curgid) + fsok = 1; + } + } else if (current->acl->group_trans_type & GR_ID_DENY) { + for (i = 0; i < num; i++) { + curgid = gidlist[i]; + if (globalreal == curgid) + break; + if (globaleffective == curgid) + break; + if (globalfs == curgid) + break; + } + /* not in deny list */ + if (i == num) { + realok = 1; + effectiveok = 1; + fsok = 1; + } + } + + if (realok && effectiveok && fsok) + return 0; + else { + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal); + return 1; + } +} + +extern int gr_acl_is_capable(const int cap); + +void +gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid) +{ + struct acl_role_label *role = task->role; + struct acl_role_label *origrole = role; + struct acl_subject_label *subj = NULL; + struct acl_object_label *obj; + struct file *filp; + uid_t uid; + gid_t gid; + + if (unlikely(!(gr_status & GR_READY))) + return; + + uid = GR_GLOBAL_UID(kuid); + gid = GR_GLOBAL_GID(kgid); + + filp = task->exec_file; + + /* kernel process, we'll give them the kernel role */ + if (unlikely(!filp)) { + task->role = running_polstate.kernel_role; + task->acl = running_polstate.kernel_role->root_label; + return; + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) { + /* save the current ip at time of role lookup so that the proper + IP will be learned for role_allowed_ip */ + task->signal->saved_ip = task->signal->curr_ip; + role = lookup_acl_role_label(task, uid, gid); + } + + /* don't change the role if we're not a privileged process */ + if (role && task->role != role && + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) || + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID)))) + return; + + task->role = role; + + if (task->inherited) { + /* if we reached our subject through inheritance, then first see + if there's a subject of the same name in the new role that has + an object that would result in the same inherited subject + */ + subj = gr_get_subject_for_task(task, task->acl->filename, 0); + if (subj) { + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj); + if (!(obj->mode & GR_INHERIT)) + subj = NULL; + } + + } + if (subj == NULL) { + /* otherwise: + perform subject lookup in possibly new role + we can use this result below in the case where role == task->role + */ + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role); + } + + /* if we changed uid/gid, but result in the same role + and are using inheritance, don't lose the inherited subject + if current subject is other than what normal lookup + would result in, we arrived via inheritance, don't + lose subject + */ + if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) && + (subj == task->acl))) + task->acl = subj; + + /* leave task->inherited unaffected */ + + task->is_writable = 0; + + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename); +#endif + + gr_set_proc_res(task); + + return; +} + +int +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, + const int unsafe_flags) +{ + struct task_struct *task = current; + struct acl_subject_label *newacl; + struct acl_object_label *obj; + __u32 retmode; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + newacl = chk_subj_label(dentry, mnt, task->role); + + /* special handling for if we did an strace -f -p from an admin role, where pid then + did an exec + */ + rcu_read_lock(); + read_lock(&tasklist_lock); + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) || + (task->parent->acl->mode & GR_POVERRIDE))) { + read_unlock(&tasklist_lock); + rcu_read_unlock(); + goto skip_check; + } + read_unlock(&tasklist_lock); + rcu_read_unlock(); + + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) && + !(task->role->roletype & GR_ROLE_GOD) && + !gr_search_file(dentry, GR_PTRACERD, mnt) && + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) { + if (unsafe_flags & LSM_UNSAFE_SHARE) + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt); + else if (unsafe_flags & (LSM_UNSAFE_PTRACE_CAP | LSM_UNSAFE_PTRACE)) + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt); + else + gr_log_fs_generic(GR_DONT_AUDIT, GR_NNP_EXEC_ACL_MSG, dentry, mnt); + return -EACCES; + } + +skip_check: + + obj = chk_obj_label(dentry, mnt, task->acl); + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT); + + if (!(task->acl->mode & GR_INHERITLEARN) && + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) { + if (obj->nested) + task->acl = obj->nested; + else + task->acl = newacl; + task->inherited = 0; + } else { + task->inherited = 1; + if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT) + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt); + } + + task->is_writable = 0; + + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + obj = chk_obj_label(dentry, mnt, task->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + + gr_set_proc_res(task); + +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename); +#endif + return 0; +} + +/* always called with valid inodev ptr */ +static void +do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev) +{ + struct acl_object_label *matchpo; + struct acl_subject_label *matchps; + struct acl_subject_label *subj; + struct acl_role_label *role; + unsigned int x; + + FOR_EACH_ROLE_START(role) + FOR_EACH_SUBJECT_START(role, subj, x) + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL) + matchpo->mode |= GR_DELETED; + FOR_EACH_SUBJECT_END(subj,x) + FOR_EACH_NESTED_SUBJECT_START(role, subj) + /* nested subjects aren't in the role's subj_hash table */ + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL) + matchpo->mode |= GR_DELETED; + FOR_EACH_NESTED_SUBJECT_END(subj) + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL) + matchps->mode |= GR_DELETED; + FOR_EACH_ROLE_END(role) + + inodev->nentry->deleted = 1; + + return; +} + +void +gr_handle_delete(const u64 ino, const dev_t dev) +{ + struct inodev_entry *inodev; + + if (unlikely(!(gr_status & GR_READY))) + return; + + write_lock(&gr_inode_lock); + inodev = lookup_inodev_entry(ino, dev); + if (inodev != NULL) + do_handle_delete(inodev, ino, dev); + write_unlock(&gr_inode_lock); + + return; +} + +static void +update_acl_obj_label(const u64 oldinode, const dev_t olddevice, + const u64 newinode, const dev_t newdevice, + struct acl_subject_label *subj) +{ + unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size); + struct acl_object_label *match; + + match = subj->obj_hash[index]; + + while (match && (match->inode != oldinode || + match->device != olddevice || + !(match->mode & GR_DELETED))) + match = match->next; + + if (match && (match->inode == oldinode) + && (match->device == olddevice) + && (match->mode & GR_DELETED)) { + if (match->prev == NULL) { + subj->obj_hash[index] = match->next; + if (match->next != NULL) + match->next->prev = NULL; + } else { + match->prev->next = match->next; + if (match->next != NULL) + match->next->prev = match->prev; + } + match->prev = NULL; + match->next = NULL; + match->inode = newinode; + match->device = newdevice; + match->mode &= ~GR_DELETED; + + insert_acl_obj_label(match, subj); + } + + return; +} + +static void +update_acl_subj_label(const u64 oldinode, const dev_t olddevice, + const u64 newinode, const dev_t newdevice, + struct acl_role_label *role) +{ + unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size); + struct acl_subject_label *match; + + match = role->subj_hash[index]; + + while (match && (match->inode != oldinode || + match->device != olddevice || + !(match->mode & GR_DELETED))) + match = match->next; + + if (match && (match->inode == oldinode) + && (match->device == olddevice) + && (match->mode & GR_DELETED)) { + if (match->prev == NULL) { + role->subj_hash[index] = match->next; + if (match->next != NULL) + match->next->prev = NULL; + } else { + match->prev->next = match->next; + if (match->next != NULL) + match->next->prev = match->prev; + } + match->prev = NULL; + match->next = NULL; + match->inode = newinode; + match->device = newdevice; + match->mode &= ~GR_DELETED; + + insert_acl_subj_label(match, role); + } + + return; +} + +static void +update_inodev_entry(const u64 oldinode, const dev_t olddevice, + const u64 newinode, const dev_t newdevice) +{ + unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size); + struct inodev_entry *match; + + match = running_polstate.inodev_set.i_hash[index]; + + while (match && (match->nentry->inode != oldinode || + match->nentry->device != olddevice || !match->nentry->deleted)) + match = match->next; + + if (match && (match->nentry->inode == oldinode) + && (match->nentry->device == olddevice) && + match->nentry->deleted) { + if (match->prev == NULL) { + running_polstate.inodev_set.i_hash[index] = match->next; + if (match->next != NULL) + match->next->prev = NULL; + } else { + match->prev->next = match->next; + if (match->next != NULL) + match->next->prev = match->prev; + } + match->prev = NULL; + match->next = NULL; + match->nentry->inode = newinode; + match->nentry->device = newdevice; + match->nentry->deleted = 0; + + insert_inodev_entry(match); + } + + return; +} + +static void +__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev) +{ + struct acl_subject_label *subj; + struct acl_role_label *role; + unsigned int x; + + FOR_EACH_ROLE_START(role) + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role); + + FOR_EACH_NESTED_SUBJECT_START(role, subj) + if ((subj->inode == ino) && (subj->device == dev)) { + subj->inode = ino; + subj->device = dev; + } + /* nested subjects aren't in the role's subj_hash table */ + update_acl_obj_label(matchn->inode, matchn->device, + ino, dev, subj); + FOR_EACH_NESTED_SUBJECT_END(subj) + FOR_EACH_SUBJECT_START(role, subj, x) + update_acl_obj_label(matchn->inode, matchn->device, + ino, dev, subj); + FOR_EACH_SUBJECT_END(subj,x) + FOR_EACH_ROLE_END(role) + + update_inodev_entry(matchn->inode, matchn->device, ino, dev); + + return; +} + +static void +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry, + const struct vfsmount *mnt) +{ + u64 ino = __get_ino(dentry); + dev_t dev = __get_dev(dentry); + + __do_handle_create(matchn, ino, dev); + + return; +} + +void +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) +{ + struct name_entry *matchn; + + if (unlikely(!(gr_status & GR_READY))) + return; + + preempt_disable(); + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt)); + + if (unlikely((unsigned long)matchn)) { + write_lock(&gr_inode_lock); + do_handle_create(matchn, dentry, mnt); + write_unlock(&gr_inode_lock); + } + preempt_enable(); + + return; +} + +void +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) +{ + struct name_entry *matchn; + + if (unlikely(!(gr_status & GR_READY))) + return; + + preempt_disable(); + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt)); + + if (unlikely((unsigned long)matchn)) { + write_lock(&gr_inode_lock); + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev); + write_unlock(&gr_inode_lock); + } + preempt_enable(); + + return; +} + +void +gr_handle_rename(struct inode *old_dir, struct inode *new_dir, + struct dentry *old_dentry, + struct dentry *new_dentry, + struct vfsmount *mnt, const __u8 replace, unsigned int flags) +{ + struct name_entry *matchn; + struct name_entry *matchn2 = NULL; + struct inodev_entry *inodev; + struct inode *inode = d_backing_inode(new_dentry); + struct inode *old_inode = d_backing_inode(old_dentry); + u64 old_ino = __get_ino(old_dentry); + dev_t old_dev = __get_dev(old_dentry); + unsigned int exchange = flags & RENAME_EXCHANGE; + + /* vfs_rename swaps the name and parent link for old_dentry and + new_dentry + at this point, old_dentry has the new name, parent link, and inode + for the renamed file + if a file is being replaced by a rename, new_dentry has the inode + and name for the replaced file + */ + + if (unlikely(!(gr_status & GR_READY))) + return; + + preempt_disable(); + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt)); + + /* exchange cases: + a filename exists for the source, but not dest + do a recreate on source + a filename exists for the dest, but not source + do a recreate on dest + a filename exists for both source and dest + delete source and dest, then create source and dest + a filename exists for neither source nor dest + no updates needed + + the name entry lookups get us the old inode/dev associated with + each name, so do the deletes first (if possible) so that when + we do the create, we pick up on the right entries + */ + + if (exchange) + matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt)); + + /* we wouldn't have to check d_inode if it weren't for + NFS silly-renaming + */ + + write_lock(&gr_inode_lock); + if (unlikely((replace || exchange) && inode)) { + u64 new_ino = __get_ino(new_dentry); + dev_t new_dev = __get_dev(new_dentry); + + inodev = lookup_inodev_entry(new_ino, new_dev); + if (inodev != NULL && ((inode->i_nlink <= 1) || d_is_dir(new_dentry))) + do_handle_delete(inodev, new_ino, new_dev); + } + + inodev = lookup_inodev_entry(old_ino, old_dev); + if (inodev != NULL && ((old_inode->i_nlink <= 1) || d_is_dir(old_dentry))) + do_handle_delete(inodev, old_ino, old_dev); + + if (unlikely(matchn != NULL)) + do_handle_create(matchn, old_dentry, mnt); + + if (unlikely(matchn2 != NULL)) + do_handle_create(matchn2, new_dentry, mnt); + + write_unlock(&gr_inode_lock); + preempt_enable(); + + return; +} + +#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC) +static const unsigned long res_learn_bumps[GR_NLIMITS] = { + [RLIMIT_CPU] = GR_RLIM_CPU_BUMP, + [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP, + [RLIMIT_DATA] = GR_RLIM_DATA_BUMP, + [RLIMIT_STACK] = GR_RLIM_STACK_BUMP, + [RLIMIT_CORE] = GR_RLIM_CORE_BUMP, + [RLIMIT_RSS] = GR_RLIM_RSS_BUMP, + [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP, + [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP, + [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP, + [RLIMIT_AS] = GR_RLIM_AS_BUMP, + [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP, + [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP, + [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP, + [RLIMIT_NICE] = GR_RLIM_NICE_BUMP, + [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP, + [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP +}; + +void +gr_learn_resource(const struct task_struct *task, + const int res, const unsigned long wanted, const int gt) +{ + struct acl_subject_label *acl; + const struct cred *cred; + + if (unlikely((gr_status & GR_READY) && + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) + goto skip_reslog; + + gr_log_resource(task, res, wanted, gt); +skip_reslog: + + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS)) + return; + + acl = task->acl; + + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) || + !(acl->resmask & (1U << (unsigned short) res)))) + return; + + if (wanted >= acl->res[res].rlim_cur) { + unsigned long res_add; + + res_add = wanted + res_learn_bumps[res]; + + acl->res[res].rlim_cur = res_add; + + if (wanted > acl->res[res].rlim_max) + acl->res[res].rlim_max = res_add; + + /* only log the subject filename, since resource logging is supported for + single-subject learning only */ + rcu_read_lock(); + cred = __task_cred(task); + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, + task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename, + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max, + "", (unsigned long) res, &task->signal->saved_ip); + rcu_read_unlock(); + } + + return; +} +EXPORT_SYMBOL_GPL(gr_learn_resource); +#endif + +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)) +void +pax_set_initial_flags(struct linux_binprm *bprm) +{ + struct task_struct *task = current; + struct acl_subject_label *proc; + unsigned long flags; + + if (unlikely(!(gr_status & GR_READY))) + return; + + flags = pax_get_flags(task); + + proc = task->acl; + + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC) + flags &= ~MF_PAX_PAGEEXEC; + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC) + flags &= ~MF_PAX_SEGMEXEC; + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP) + flags &= ~MF_PAX_RANDMMAP; + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP) + flags &= ~MF_PAX_EMUTRAMP; + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT) + flags &= ~MF_PAX_MPROTECT; + + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC) + flags |= MF_PAX_PAGEEXEC; + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC) + flags |= MF_PAX_SEGMEXEC; + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP) + flags |= MF_PAX_RANDMMAP; + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP) + flags |= MF_PAX_EMUTRAMP; + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT) + flags |= MF_PAX_MPROTECT; + + pax_set_flags(task, flags); + + return; +} +#endif + +int +gr_handle_proc_ptrace(struct task_struct *task) +{ + struct file *filp; + struct task_struct *tmp = task; + struct task_struct *curtemp = current; + __u32 retmode; + +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE + if (unlikely(!(gr_status & GR_READY))) + return 0; +#endif + + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + filp = task->exec_file; + + while (task_pid_nr(tmp) > 0) { + if (tmp == curtemp) + break; + tmp = tmp->real_parent; + } + + if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) || + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) { + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + return 1; + } + +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE + if (!(gr_status & GR_READY)) { + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + return 0; + } +#endif + + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt); + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + + if (retmode & GR_NOPTRACE) + return 1; + + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD) + && (current->acl != task->acl || (current->acl != current->role->root_label + && task_pid_nr(current) != task_pid_nr(task)))) + return 1; + + return 0; +} + +void task_grsec_rbac(struct seq_file *m, struct task_struct *p) +{ + if (unlikely(!(gr_status & GR_READY))) + return; + + if (!(current->role->roletype & GR_ROLE_GOD)) + return; + + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n", + p->role->rolename, gr_task_roletype_to_char(p), + p->acl->filename); +} + +int +gr_handle_ptrace(struct task_struct *task, const long request) +{ + struct task_struct *tmp = task; + struct task_struct *curtemp = current; + __u32 retmode; + +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE + if (unlikely(!(gr_status & GR_READY))) + return 0; +#endif + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { + read_lock(&tasklist_lock); + while (task_pid_nr(tmp) > 0) { + if (tmp == curtemp) + break; + tmp = tmp->real_parent; + } + + if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) || + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) { + read_unlock(&tasklist_lock); + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); + return 1; + } + read_unlock(&tasklist_lock); + } + +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE + if (!(gr_status & GR_READY)) + return 0; +#endif + + read_lock(&grsec_exec_file_lock); + if (unlikely(!task->exec_file)) { + read_unlock(&grsec_exec_file_lock); + return 0; + } + + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt); + read_unlock(&grsec_exec_file_lock); + + if (retmode & GR_NOPTRACE) { + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); + return 1; + } + + if (retmode & GR_PTRACERD) { + switch (request) { + case PTRACE_SEIZE: + case PTRACE_POKETEXT: + case PTRACE_POKEDATA: + case PTRACE_POKEUSR: +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64) && !defined(CONFIG_ARM64) + case PTRACE_SETREGS: + case PTRACE_SETFPREGS: +#endif +#ifdef CONFIG_COMPAT +#ifdef CONFIG_ARM64 + case COMPAT_PTRACE_SETREGS: + case COMPAT_PTRACE_SETVFPREGS: +#ifdef CONFIG_HAVE_HW_BREAKPOINT + case COMPAT_PTRACE_SETHBPREGS: +#endif +#endif +#endif +#ifdef CONFIG_X86 + case PTRACE_SETFPXREGS: +#endif +#ifdef CONFIG_ALTIVEC + case PTRACE_SETVRREGS: +#endif +#ifdef CONFIG_ARM + case PTRACE_SET_SYSCALL: + case PTRACE_SETVFPREGS: +#ifdef CONFIG_HAVE_HW_BREAKPOINT + case PTRACE_SETHBPREGS: +#endif +#endif + return 1; + default: + return 0; + } + } else if (!(current->acl->mode & GR_POVERRIDE) && + !(current->role->roletype & GR_ROLE_GOD) && + (current->acl != task->acl)) { + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); + return 1; + } + + return 0; +} + +static int is_writable_mmap(const struct file *filp) +{ + struct task_struct *task = current; + struct acl_object_label *obj, *obj2; + struct dentry *dentry = filp->f_path.dentry; + struct vfsmount *mnt = filp->f_path.mnt; + struct inode *inode = d_backing_inode(dentry); + + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) && + !task->is_writable && d_is_reg(dentry) && (mnt != shm_mnt || (inode->i_nlink > 0))) { + obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label); + obj2 = chk_obj_label(dentry, mnt, task->role->root_label); + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, dentry, mnt); + return 1; + } + } + return 0; +} + +int +gr_acl_handle_mmap(const struct file *file, const unsigned long prot) +{ + __u32 mode; + + if (unlikely(!file || !(prot & PROT_EXEC))) + return 1; + + if (is_writable_mmap(file)) + return 0; + + mode = + gr_search_file(file->f_path.dentry, + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, + file->f_path.mnt); + + if (!gr_tpe_allow(file)) + return 0; + + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); + return 0; + } else if (unlikely(!(mode & GR_EXEC))) { + return 0; + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); + return 1; + } + + return 1; +} + +int +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) +{ + __u32 mode; + + if (unlikely(!file || !(prot & PROT_EXEC))) + return 1; + + if (is_writable_mmap(file)) + return 0; + + mode = + gr_search_file(file->f_path.dentry, + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, + file->f_path.mnt); + + if (!gr_tpe_allow(file)) + return 0; + + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); + return 0; + } else if (unlikely(!(mode & GR_EXEC))) { + return 0; + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); + return 1; + } + + return 1; +} + +void +gr_acl_handle_psacct(struct task_struct *task, const long code) +{ + unsigned long runtime, cputime; + cputime_t utime, stime; + unsigned int wday, cday; + __u8 whr, chr; + __u8 wmin, cmin; + __u8 wsec, csec; + struct timespec curtime, starttime; + + if (unlikely(!(gr_status & GR_READY) || !task->acl || + !(task->acl->mode & GR_PROCACCT))) + return; + + curtime = ns_to_timespec(ktime_get_ns()); + starttime = ns_to_timespec(task->start_time); + runtime = curtime.tv_sec - starttime.tv_sec; + wday = runtime / (60 * 60 * 24); + runtime -= wday * (60 * 60 * 24); + whr = runtime / (60 * 60); + runtime -= whr * (60 * 60); + wmin = runtime / 60; + runtime -= wmin * 60; + wsec = runtime; + + task_cputime(task, &utime, &stime); + cputime = cputime_to_secs(utime + stime); + cday = cputime / (60 * 60 * 24); + cputime -= cday * (60 * 60 * 24); + chr = cputime / (60 * 60); + cputime -= chr * (60 * 60); + cmin = cputime / 60; + cputime -= cmin * 60; + csec = cputime; + + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code); + + return; +} + +#ifdef CONFIG_TASKSTATS +int gr_is_taskstats_denied(int pid) +{ + struct task_struct *task; +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + const struct cred *cred; +#endif + int ret = 0; + + /* restrict taskstats viewing to un-chrooted root users + who have the 'view' subject flag if the RBAC system is enabled + */ + + rcu_read_lock(); + read_lock(&tasklist_lock); + task = find_task_by_vpid(pid); + if (task) { +#ifdef CONFIG_GRKERNSEC_CHROOT + if (proc_is_chrooted(task)) + ret = -EACCES; +#endif +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + cred = __task_cred(task); +#ifdef CONFIG_GRKERNSEC_PROC_USER + if (gr_is_global_nonroot(cred->uid)) + ret = -EACCES; +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid)) + ret = -EACCES; +#endif +#endif + if (gr_status & GR_READY) { + if (!(task->acl->mode & GR_VIEW)) + ret = -EACCES; + } + } else + ret = -ENOENT; + + read_unlock(&tasklist_lock); + rcu_read_unlock(); + + return ret; +} +#endif + +/* AUXV entries are filled via a descendant of search_binary_handler + after we've already applied the subject for the target +*/ +int gr_acl_enable_at_secure(void) +{ + if (unlikely(!(gr_status & GR_READY))) + return 0; + + if (current->acl->mode & GR_ATSECURE) + return 1; + + return 0; +} + +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino) +{ + struct task_struct *task = current; + struct dentry *dentry = file->f_path.dentry; + struct vfsmount *mnt = file->f_path.mnt; + struct acl_object_label *obj, *tmp; + struct acl_subject_label *subj; + unsigned int bufsize; + int is_not_root; + char *path; + dev_t dev = __get_dev(dentry); + + if (unlikely(!(gr_status & GR_READY))) + return 1; + + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)) + return 1; + + /* ignore Eric Biederman */ + if (IS_PRIVATE(d_backing_inode(dentry))) + return 1; + + subj = task->acl; + read_lock(&gr_inode_lock); + do { + obj = lookup_acl_obj_label(ino, dev, subj); + if (obj != NULL) { + read_unlock(&gr_inode_lock); + return (obj->mode & GR_FIND) ? 1 : 0; + } + } while ((subj = subj->parent_subject)); + read_unlock(&gr_inode_lock); + + /* this is purely an optimization since we're looking for an object + for the directory we're doing a readdir on + if it's possible for any globbed object to match the entry we're + filling into the directory, then the object we find here will be + an anchor point with attached globbed objects + */ + obj = chk_obj_label_noglob(dentry, mnt, task->acl); + if (obj->globbed == NULL) + return (obj->mode & GR_FIND) ? 1 : 0; + + is_not_root = ((obj->filename[0] == '/') && + (obj->filename[1] == '\0')) ? 0 : 1; + bufsize = PAGE_SIZE - namelen - is_not_root; + + /* check bufsize > PAGE_SIZE || bufsize == 0 */ + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1))) + return 1; + + preempt_disable(); + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), + bufsize); + + bufsize = strlen(path); + + /* if base is "/", don't append an additional slash */ + if (is_not_root) + *(path + bufsize) = '/'; + memcpy(path + bufsize + is_not_root, name, namelen); + *(path + bufsize + namelen + is_not_root) = '\0'; + + tmp = obj->globbed; + while (tmp) { + if (!glob_match(tmp->filename, path)) { + preempt_enable(); + return (tmp->mode & GR_FIND) ? 1 : 0; + } + tmp = tmp->next; + } + preempt_enable(); + return (obj->mode & GR_FIND) ? 1 : 0; +} + +void gr_put_exec_file(struct task_struct *task) +{ + struct file *filp; + + write_lock(&grsec_exec_file_lock); + filp = task->exec_file; + task->exec_file = NULL; + write_unlock(&grsec_exec_file_lock); + + if (filp) + fput(filp); + + return; +} + + +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE +EXPORT_SYMBOL_GPL(gr_acl_is_enabled); +#endif +#ifdef CONFIG_SECURITY +EXPORT_SYMBOL_GPL(gr_check_user_change); +EXPORT_SYMBOL_GPL(gr_check_group_change); +#endif + diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c new file mode 100644 index 0000000000..9adc75c948 --- /dev/null +++ b/grsecurity/gracl_alloc.c @@ -0,0 +1,105 @@ +#include +#include +#include +#include +#include +#include + +static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL }; +struct gr_alloc_state *current_alloc_state = &__current_alloc_state; + +static int +alloc_pop(void) +{ + if (current_alloc_state->alloc_stack_next == 1) + return 0; + + kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]); + + current_alloc_state->alloc_stack_next--; + + return 1; +} + +static int +alloc_push(void *buf) +{ + if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size) + return 1; + + current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf; + + current_alloc_state->alloc_stack_next++; + + return 0; +} + +void * +acl_alloc(unsigned long len) +{ + void *ret = NULL; + + if (!len || len > PAGE_SIZE) + goto out; + + ret = kmalloc(len, GFP_KERNEL); + + if (ret) { + if (alloc_push(ret)) { + kfree(ret); + ret = NULL; + } + } + +out: + return ret; +} + +void * +acl_alloc_num(unsigned long num, unsigned long len) +{ + if (!len || (num > (PAGE_SIZE / len))) + return NULL; + + return acl_alloc(num * len); +} + +void +acl_free_all(void) +{ + if (!current_alloc_state->alloc_stack) + return; + + while (alloc_pop()) ; + + if (current_alloc_state->alloc_stack) { + if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE) + kfree(current_alloc_state->alloc_stack); + else + vfree(current_alloc_state->alloc_stack); + } + + current_alloc_state->alloc_stack = NULL; + current_alloc_state->alloc_stack_size = 1; + current_alloc_state->alloc_stack_next = 1; + + return; +} + +int +acl_alloc_stack_init(unsigned long size) +{ + if ((size * sizeof (void *)) <= PAGE_SIZE) + current_alloc_state->alloc_stack = + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL); + else + current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *)); + + current_alloc_state->alloc_stack_size = size; + current_alloc_state->alloc_stack_next = 1; + + if (!current_alloc_state->alloc_stack) + return 0; + else + return 1; +} diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c new file mode 100644 index 0000000000..8747091f68 --- /dev/null +++ b/grsecurity/gracl_cap.c @@ -0,0 +1,96 @@ +#include +#include +#include +#include +#include +#include + +extern const char *captab_log[]; +extern int captab_log_entries; + +int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap, bool log) +{ + struct acl_subject_label *curracl; + + if (!gr_acl_is_enabled()) + return 1; + + curracl = task->acl; + + if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) { + if (log) + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, + task->role->roletype, GR_GLOBAL_UID(cred->uid), + GR_GLOBAL_GID(cred->gid), task->exec_file ? + gr_to_filename(task->exec_file->f_path.dentry, + task->exec_file->f_path.mnt) : curracl->filename, + curracl->filename, 0UL, + 0UL, "", (unsigned long) cap, &task->signal->saved_ip); + return 1; + } + + return 0; +} + +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap, bool log) +{ + struct acl_subject_label *curracl; + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set; + kernel_cap_t cap_audit = __cap_empty_set; + + if (!gr_acl_is_enabled()) + return 1; + + curracl = task->acl; + + cap_drop = curracl->cap_lower; + cap_mask = curracl->cap_mask; + cap_audit = curracl->cap_invert_audit; + + while ((curracl = curracl->parent_subject)) { + /* if the cap isn't specified in the current computed mask but is specified in the + current level subject, and is lowered in the current level subject, then add + it to the set of dropped capabilities + otherwise, add the current level subject's mask to the current computed mask + */ + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) { + cap_raise(cap_mask, cap); + if (cap_raised(curracl->cap_lower, cap)) + cap_raise(cap_drop, cap); + if (cap_raised(curracl->cap_invert_audit, cap)) + cap_raise(cap_audit, cap); + } + } + + if (!cap_raised(cap_drop, cap)) { + if (log && cap_raised(cap_audit, cap)) + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]); + return 1; + } + + /* only learn the capability use if the process has the capability in the + general case, the two uses in sys.c of gr_learn_cap are an exception + to this rule to ensure any role transition involves what the full-learned + policy believes in a privileged process + */ + if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap, log)) + return 1; + + if (log && (cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap)) + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]); + + return 0; +} + +int +gr_acl_is_capable(const int cap) +{ + return gr_task_acl_is_capable(current, current_cred(), cap, true); +} + +int +gr_acl_is_capable_nolog(const int cap) +{ + return gr_task_acl_is_capable(current, current_cred(), cap, false); +} + diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c new file mode 100644 index 0000000000..a43dd06a27 --- /dev/null +++ b/grsecurity/gracl_compat.c @@ -0,0 +1,269 @@ +#include +#include +#include +#include + +#include + +int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap) +{ + struct gr_arg_wrapper_compat uwrapcompat; + + if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat))) + return -EFAULT; + + if ((uwrapcompat.version != GRSECURITY_VERSION) || + (uwrapcompat.size != sizeof(struct gr_arg_compat))) + return -EINVAL; + + uwrap->arg = compat_ptr(uwrapcompat.arg); + uwrap->version = uwrapcompat.version; + uwrap->size = sizeof(struct gr_arg); + + return 0; +} + +int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg) +{ + struct gr_arg_compat argcompat; + + if (copy_from_user(&argcompat, buf, sizeof(argcompat))) + return -EFAULT; + + arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table); + arg->role_db.num_pointers = argcompat.role_db.num_pointers; + arg->role_db.num_roles = argcompat.role_db.num_roles; + arg->role_db.num_domain_children = argcompat.role_db.num_domain_children; + arg->role_db.num_subjects = argcompat.role_db.num_subjects; + arg->role_db.num_objects = argcompat.role_db.num_objects; + + memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw)); + memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt)); + memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum)); + memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role)); + arg->sprole_pws = compat_ptr(argcompat.sprole_pws); + arg->segv_device = argcompat.segv_device; + arg->segv_inode = argcompat.segv_inode; + arg->segv_uid = argcompat.segv_uid; + arg->num_sprole_pws = argcompat.num_sprole_pws; + arg->mode = argcompat.mode; + + return 0; +} + +int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp) +{ + struct acl_object_label_compat objcompat; + + if (copy_from_user(&objcompat, userp, sizeof(objcompat))) + return -EFAULT; + + obj->filename = compat_ptr(objcompat.filename); + obj->inode = objcompat.inode; + obj->device = objcompat.device; + obj->mode = objcompat.mode; + + obj->nested = compat_ptr(objcompat.nested); + obj->globbed = compat_ptr(objcompat.globbed); + + obj->prev = compat_ptr(objcompat.prev); + obj->next = compat_ptr(objcompat.next); + + return 0; +} + +int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp) +{ + unsigned int i; + struct acl_subject_label_compat subjcompat; + + if (copy_from_user(&subjcompat, userp, sizeof(subjcompat))) + return -EFAULT; + + subj->filename = compat_ptr(subjcompat.filename); + subj->inode = subjcompat.inode; + subj->device = subjcompat.device; + subj->mode = subjcompat.mode; + subj->cap_mask = subjcompat.cap_mask; + subj->cap_lower = subjcompat.cap_lower; + subj->cap_invert_audit = subjcompat.cap_invert_audit; + + for (i = 0; i < GR_NLIMITS; i++) { + if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY) + subj->res[i].rlim_cur = RLIM_INFINITY; + else + subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur; + if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY) + subj->res[i].rlim_max = RLIM_INFINITY; + else + subj->res[i].rlim_max = subjcompat.res[i].rlim_max; + } + subj->resmask = subjcompat.resmask; + + subj->user_trans_type = subjcompat.user_trans_type; + subj->group_trans_type = subjcompat.group_trans_type; + subj->user_transitions = compat_ptr(subjcompat.user_transitions); + subj->group_transitions = compat_ptr(subjcompat.group_transitions); + subj->user_trans_num = subjcompat.user_trans_num; + subj->group_trans_num = subjcompat.group_trans_num; + + memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families)); + memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto)); + subj->ip_type = subjcompat.ip_type; + subj->ips = compat_ptr(subjcompat.ips); + subj->ip_num = subjcompat.ip_num; + subj->inaddr_any_override = subjcompat.inaddr_any_override; + + subj->crashes = subjcompat.crashes; + subj->expires = subjcompat.expires; + + subj->parent_subject = compat_ptr(subjcompat.parent_subject); + subj->hash = compat_ptr(subjcompat.hash); + subj->prev = compat_ptr(subjcompat.prev); + subj->next = compat_ptr(subjcompat.next); + + subj->obj_hash = compat_ptr(subjcompat.obj_hash); + subj->obj_hash_size = subjcompat.obj_hash_size; + subj->pax_flags = subjcompat.pax_flags; + + return 0; +} + +int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp) +{ + struct acl_role_label_compat rolecompat; + + if (copy_from_user(&rolecompat, userp, sizeof(rolecompat))) + return -EFAULT; + + role->rolename = compat_ptr(rolecompat.rolename); + role->uidgid = rolecompat.uidgid; + role->roletype = rolecompat.roletype; + + role->auth_attempts = rolecompat.auth_attempts; + role->expires = rolecompat.expires; + + role->root_label = compat_ptr(rolecompat.root_label); + role->hash = compat_ptr(rolecompat.hash); + + role->prev = compat_ptr(rolecompat.prev); + role->next = compat_ptr(rolecompat.next); + + role->transitions = compat_ptr(rolecompat.transitions); + role->allowed_ips = compat_ptr(rolecompat.allowed_ips); + role->domain_children = compat_ptr(rolecompat.domain_children); + role->domain_child_num = rolecompat.domain_child_num; + + role->umask = rolecompat.umask; + + role->subj_hash = compat_ptr(rolecompat.subj_hash); + role->subj_hash_size = rolecompat.subj_hash_size; + + return 0; +} + +int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) +{ + struct role_allowed_ip_compat roleip_compat; + + if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat))) + return -EFAULT; + + roleip->addr = roleip_compat.addr; + roleip->netmask = roleip_compat.netmask; + + roleip->prev = compat_ptr(roleip_compat.prev); + roleip->next = compat_ptr(roleip_compat.next); + + return 0; +} + +int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp) +{ + struct role_transition_compat trans_compat; + + if (copy_from_user(&trans_compat, userp, sizeof(trans_compat))) + return -EFAULT; + + trans->rolename = compat_ptr(trans_compat.rolename); + + trans->prev = compat_ptr(trans_compat.prev); + trans->next = compat_ptr(trans_compat.next); + + return 0; + +} + +int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) +{ + struct gr_hash_struct_compat hash_compat; + + if (copy_from_user(&hash_compat, userp, sizeof(hash_compat))) + return -EFAULT; + + hash->table = compat_ptr(hash_compat.table); + hash->nametable = compat_ptr(hash_compat.nametable); + hash->first = compat_ptr(hash_compat.first); + + hash->table_size = hash_compat.table_size; + hash->used_size = hash_compat.used_size; + + hash->type = hash_compat.type; + + return 0; +} + +int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp) +{ + compat_uptr_t ptrcompat; + + if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat))) + return -EFAULT; + + *(void **)ptr = compat_ptr(ptrcompat); + + return 0; +} + +int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp) +{ + struct acl_ip_label_compat ip_compat; + + if (copy_from_user(&ip_compat, userp, sizeof(ip_compat))) + return -EFAULT; + + ip->iface = compat_ptr(ip_compat.iface); + ip->addr = ip_compat.addr; + ip->netmask = ip_compat.netmask; + ip->low = ip_compat.low; + ip->high = ip_compat.high; + ip->mode = ip_compat.mode; + ip->type = ip_compat.type; + + memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto)); + + ip->prev = compat_ptr(ip_compat.prev); + ip->next = compat_ptr(ip_compat.next); + + return 0; +} + +int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) +{ + struct sprole_pw_compat pw_compat; + + if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat))) + return -EFAULT; + + pw->rolename = compat_ptr(pw_compat.rolename); + memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt)); + memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum)); + + return 0; +} + +size_t get_gr_arg_wrapper_size_compat(void) +{ + return sizeof(struct gr_arg_wrapper_compat); +} + diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c new file mode 100644 index 0000000000..fce7f71fb0 --- /dev/null +++ b/grsecurity/gracl_fs.c @@ -0,0 +1,448 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +umode_t +gr_acl_umask(void) +{ + if (unlikely(!gr_acl_is_enabled())) + return 0; + + return current->role->umask; +} + +__u32 +gr_acl_handle_hidden_file(const struct dentry * dentry, + const struct vfsmount * mnt) +{ + __u32 mode; + + if (unlikely(d_is_negative(dentry))) + return GR_FIND; + + mode = + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt); + + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); + return mode; + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); + return 0; + } else if (unlikely(!(mode & GR_FIND))) + return 0; + + return GR_FIND; +} + +__u32 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, + int acc_mode) +{ + __u32 reqmode = GR_FIND; + __u32 mode; + + if (unlikely(d_is_negative(dentry))) + return reqmode; + + if (acc_mode & MAY_APPEND) + reqmode |= GR_APPEND; + else if (acc_mode & MAY_WRITE) + reqmode |= GR_WRITE; + if ((acc_mode & MAY_READ) && !d_is_dir(dentry)) + reqmode |= GR_READ; + + mode = + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, + mnt); + + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return reqmode; + } else + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) + { + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return 0; + } else if (unlikely((mode & reqmode) != reqmode)) + return 0; + + return reqmode; +} + +__u32 +gr_acl_handle_creat(const struct dentry * dentry, + const struct dentry * p_dentry, + const struct vfsmount * p_mnt, int open_flags, int acc_mode, + const int imode) +{ + __u32 reqmode = GR_WRITE | GR_CREATE; + __u32 mode; + + if (acc_mode & MAY_APPEND) + reqmode |= GR_APPEND; + // if a directory was required or the directory already exists, then + // don't count this open as a read + if ((acc_mode & MAY_READ) && + !((open_flags & O_DIRECTORY) || d_is_dir(dentry))) + reqmode |= GR_READ; + if ((open_flags & O_CREAT) && + ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)))) + reqmode |= GR_SETID; + + mode = + gr_check_create(dentry, p_dentry, p_mnt, + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); + + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return reqmode; + } else + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) + { + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return 0; + } else if (unlikely((mode & reqmode) != reqmode)) + return 0; + + return reqmode; +} + +__u32 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt, + const int fmode) +{ + __u32 mode, reqmode = GR_FIND; + + if ((fmode & S_IXOTH) && !d_is_dir(dentry)) + reqmode |= GR_EXEC; + if (fmode & S_IWOTH) + reqmode |= GR_WRITE; + if (fmode & S_IROTH) + reqmode |= GR_READ; + + mode = + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, + mnt); + + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : "", + reqmode & GR_EXEC ? " executing" : ""); + return reqmode; + } else + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) + { + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : "", + reqmode & GR_EXEC ? " executing" : ""); + return 0; + } else if (unlikely((mode & reqmode) != reqmode)) + return 0; + + return reqmode; +} + +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt) +{ + __u32 mode; + + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt); + + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt); + return mode; + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt); + return 0; + } else if (unlikely((mode & (reqmode)) != (reqmode))) + return 0; + + return (reqmode); +} + +__u32 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG); +} + +__u32 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG); +} + +__u32 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG); +} + +__u32 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG); +} + +__u32 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt, + umode_t *modeptr) +{ + umode_t mode; + struct inode *inode = d_backing_inode(dentry); + + *modeptr &= ~gr_acl_umask(); + mode = *modeptr; + + if (unlikely(inode && S_ISSOCK(inode->i_mode))) + return 1; + + if (unlikely(!d_is_dir(dentry) && + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) { + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, + GR_CHMOD_ACL_MSG); + } else { + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG); + } +} + +__u32 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG); +} + +__u32 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG); +} + +__u32 +gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG); +} + +__u32 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG); +} + +__u32 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE, + GR_UNIXCONNECT_ACL_MSG); +} + +/* hardlinks require at minimum create and link permission, + any additional privilege required is based on the + privilege of the file being linked to +*/ +__u32 +gr_acl_handle_link(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const struct dentry * old_dentry, + const struct vfsmount * old_mnt, const struct filename *to) +{ + __u32 mode; + __u32 needmode = GR_CREATE | GR_LINK; + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK; + + mode = + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry, + old_mnt); + + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) { + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name); + return mode; + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name); + return 0; + } else if (unlikely((mode & needmode) != needmode)) + return 0; + + return 1; +} + +__u32 +gr_acl_handle_symlink(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, const struct filename *from) +{ + __u32 needmode = GR_WRITE | GR_CREATE; + __u32 mode; + + mode = + gr_check_create(new_dentry, parent_dentry, parent_mnt, + GR_CREATE | GR_AUDIT_CREATE | + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS); + + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) { + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt); + return mode; + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt); + return 0; + } else if (unlikely((mode & needmode) != needmode)) + return 0; + + return (GR_WRITE | GR_CREATE); +} + +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt) +{ + __u32 mode; + + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); + + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt); + return mode; + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt); + return 0; + } else if (unlikely((mode & (reqmode)) != (reqmode))) + return 0; + + return (reqmode); +} + +__u32 +gr_acl_handle_mknod(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const int mode) +{ + __u32 reqmode = GR_WRITE | GR_CREATE; + if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)))) + reqmode |= GR_SETID; + + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, + reqmode, GR_MKNOD_ACL_MSG); +} + +__u32 +gr_acl_handle_mkdir(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt) +{ + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG); +} + +#define RENAME_CHECK_SUCCESS(old, new) \ + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \ + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ))) + +int +gr_acl_handle_rename(struct dentry *new_dentry, + struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + struct dentry *old_dentry, + struct inode *old_parent_inode, + struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags) +{ + __u32 comp1, comp2; + int error = 0; + + if (unlikely(!gr_acl_is_enabled())) + return 0; + + if (flags & RENAME_EXCHANGE) { + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE | + GR_AUDIT_READ | GR_AUDIT_WRITE | + GR_SUPPRESS, parent_mnt); + comp2 = + gr_search_file(old_dentry, + GR_READ | GR_WRITE | GR_AUDIT_READ | + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt); + } else if (d_is_negative(new_dentry)) { + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt, + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ | + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS); + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE | + GR_DELETE | GR_AUDIT_DELETE | + GR_AUDIT_READ | GR_AUDIT_WRITE | + GR_SUPPRESS, old_mnt); + } else { + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE | + GR_CREATE | GR_DELETE | + GR_AUDIT_CREATE | GR_AUDIT_DELETE | + GR_AUDIT_READ | GR_AUDIT_WRITE | + GR_SUPPRESS, parent_mnt); + comp2 = + gr_search_file(old_dentry, + GR_READ | GR_WRITE | GR_AUDIT_READ | + GR_DELETE | GR_AUDIT_DELETE | + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt); + } + + if (RENAME_CHECK_SUCCESS(comp1, comp2) && + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS))) + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name); + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS) + && !(comp2 & GR_SUPPRESS)) { + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name); + error = -EACCES; + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2))) + error = -EACCES; + + return error; +} + +void +gr_acl_handle_exit(void) +{ + u16 id; + char *rolename; + + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() && + !(current->role->roletype & GR_ROLE_PERSIST))) { + id = current->acl_role_id; + rolename = current->role->rolename; + gr_set_acls(1); + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id); + } + + gr_put_exec_file(current); + return; +} + +int +gr_acl_handle_procpidmem(const struct task_struct *task) +{ + if (unlikely(!gr_acl_is_enabled())) + return 0; + + if (task != current && (task->acl->mode & GR_PROTPROCFD) && + !(current->acl->mode & GR_POVERRIDE) && + !(current->role->roletype & GR_ROLE_GOD)) + return -EACCES; + + return 0; +} diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c new file mode 100644 index 0000000000..d877c3879f --- /dev/null +++ b/grsecurity/gracl_ip.c @@ -0,0 +1,387 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GR_BIND 0x01 +#define GR_CONNECT 0x02 +#define GR_INVERT 0x04 +#define GR_BINDOVERRIDE 0x08 +#define GR_CONNECTOVERRIDE 0x10 +#define GR_SOCK_FAMILY 0x20 + +static const char * gr_protocols[IPPROTO_MAX] = { + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt", + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet", + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1", + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp", + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++", + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre", + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile", + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63", + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv", + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp", + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim", + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip", + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp", + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup", + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135", + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143", + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151", + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159", + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167", + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175", + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183", + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191", + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199", + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207", + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215", + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223", + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231", + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239", + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247", + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255", + }; + +static const char * gr_socktypes[SOCK_MAX] = { + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", + "unknown:7", "unknown:8", "unknown:9", "packet" + }; + +static const char * gr_sockfamilies[AF_MAX] = { + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25", + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash", + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "ib", "mpls", "can", + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf", "alg", + "nfc", "vsock", "kcm", "qipcrtr" + }; + +const char * +gr_proto_to_name(unsigned char proto) +{ + return gr_protocols[proto]; +} + +const char * +gr_socktype_to_name(unsigned char type) +{ + return gr_socktypes[type]; +} + +const char * +gr_sockfamily_to_name(unsigned char family) +{ + return gr_sockfamilies[family]; +} + +extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; + +int +gr_search_socket(const int domain, const int type, const int protocol) +{ + struct acl_subject_label *curr; + const struct cred *cred = current_cred(); + + if (unlikely(!gr_acl_is_enabled())) + goto exit; + + if ((domain < 0) || (type < 0) || (protocol < 0) || + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX)) + goto exit; // let the kernel handle it + + curr = current->acl; + + if (curr->sock_families[domain / 32] & (1U << (domain % 32))) { + /* the family is allowed, if this is PF_INET allow it only if + the extra sock type/protocol checks pass */ + if (domain == PF_INET) + goto inet_check; + goto exit; + } else { + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { + __u32 fakeip = 0; + security_learn(GR_IP_LEARN_MSG, current->role->rolename, + current->role->roletype, GR_GLOBAL_UID(cred->uid), + GR_GLOBAL_GID(cred->gid), current->exec_file ? + gr_to_filename(current->exec_file->f_path.dentry, + current->exec_file->f_path.mnt) : + curr->filename, curr->filename, + &fakeip, domain, 0, 0, GR_SOCK_FAMILY, + ¤t->signal->saved_ip); + goto exit; + } + goto exit_fail; + } + +inet_check: + /* the rest of this checking is for IPv4 only */ + if (!curr->ips) + goto exit; + + if ((curr->ip_type & (1U << type)) && + (curr->ip_proto[protocol / 32] & (1U << (protocol % 32)))) + goto exit; + + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { + /* we don't place acls on raw sockets , and sometimes + dgram/ip sockets are opened for ioctl and not + bind/connect, so we'll fake a bind learn log */ + if (type == SOCK_RAW || type == SOCK_PACKET) { + __u32 fakeip = 0; + security_learn(GR_IP_LEARN_MSG, current->role->rolename, + current->role->roletype, GR_GLOBAL_UID(cred->uid), + GR_GLOBAL_GID(cred->gid), current->exec_file ? + gr_to_filename(current->exec_file->f_path.dentry, + current->exec_file->f_path.mnt) : + curr->filename, curr->filename, + &fakeip, 0, type, + protocol, GR_CONNECT, ¤t->signal->saved_ip); + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) { + __u32 fakeip = 0; + security_learn(GR_IP_LEARN_MSG, current->role->rolename, + current->role->roletype, GR_GLOBAL_UID(cred->uid), + GR_GLOBAL_GID(cred->gid), current->exec_file ? + gr_to_filename(current->exec_file->f_path.dentry, + current->exec_file->f_path.mnt) : + curr->filename, curr->filename, + &fakeip, 0, type, + protocol, GR_BIND, ¤t->signal->saved_ip); + } + /* we'll log when they use connect or bind */ + goto exit; + } + +exit_fail: + if (domain == PF_INET) + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain), + gr_socktype_to_name(type), gr_proto_to_name(protocol)); + else if (rcu_access_pointer(net_families[domain]) != NULL) + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain), + gr_socktype_to_name(type), protocol); + + return 0; +exit: + return 1; +} + +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask) +{ + if ((ip->mode & mode) && + (ip_port >= ip->low) && + (ip_port <= ip->high) && + ((ntohl(ip_addr) & our_netmask) == + (ntohl(our_addr) & our_netmask)) + && (ip->proto[protocol / 32] & (1U << (protocol % 32))) + && (ip->type & (1U << type))) { + if (ip->mode & GR_INVERT) + return 2; // specifically denied + else + return 1; // allowed + } + + return 0; // not specifically allowed, may continue parsing +} + +static int +gr_search_connectbind(const int full_mode, struct sock *sk, + struct sockaddr_in *addr, const int type) +{ + char iface[IFNAMSIZ] = {0}; + struct acl_subject_label *curr; + struct acl_ip_label *ip; + struct inet_sock *isk; + struct net_device *dev; + struct in_device *idev; + unsigned long i; + int ret; + int mode = full_mode & (GR_BIND | GR_CONNECT); + __u32 ip_addr = 0; + __u32 our_addr; + __u32 our_netmask; + char *p; + __u16 ip_port = 0; + const struct cred *cred = current_cred(); + + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET)) + return 0; + + curr = current->acl; + isk = inet_sk(sk); + + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */ + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) + addr->sin_addr.s_addr = curr->inaddr_any_override; + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) { + struct sockaddr_in saddr; + int err; + + saddr.sin_family = AF_INET; + saddr.sin_addr.s_addr = curr->inaddr_any_override; + saddr.sin_port = isk->inet_sport; + + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); + if (err) + return err; + + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); + if (err) + return err; + } + + if (!curr->ips) + return 0; + + ip_addr = addr->sin_addr.s_addr; + ip_port = ntohs(addr->sin_port); + + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { + security_learn(GR_IP_LEARN_MSG, current->role->rolename, + current->role->roletype, GR_GLOBAL_UID(cred->uid), + GR_GLOBAL_GID(cred->gid), current->exec_file ? + gr_to_filename(current->exec_file->f_path.dentry, + current->exec_file->f_path.mnt) : + curr->filename, curr->filename, + &ip_addr, ip_port, type, + sk->sk_protocol, mode, ¤t->signal->saved_ip); + return 0; + } + + for (i = 0; i < curr->ip_num; i++) { + ip = *(curr->ips + i); + if (ip->iface != NULL) { + strncpy(iface, ip->iface, IFNAMSIZ - 1); + p = strchr(iface, ':'); + if (p != NULL) + *p = '\0'; + dev = dev_get_by_name(sock_net(sk), iface); + if (dev == NULL) + continue; + idev = in_dev_get(dev); + if (idev == NULL) { + dev_put(dev); + continue; + } + rcu_read_lock(); + for_ifa(idev) { + if (!strcmp(ip->iface, ifa->ifa_label)) { + our_addr = ifa->ifa_address; + our_netmask = 0xffffffff; + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); + if (ret == 1) { + rcu_read_unlock(); + in_dev_put(idev); + dev_put(dev); + return 0; + } else if (ret == 2) { + rcu_read_unlock(); + in_dev_put(idev); + dev_put(dev); + goto denied; + } + } + } endfor_ifa(idev); + rcu_read_unlock(); + in_dev_put(idev); + dev_put(dev); + } else { + our_addr = ip->addr; + our_netmask = ip->netmask; + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); + if (ret == 1) + return 0; + else if (ret == 2) + goto denied; + } + } + +denied: + if (mode == GR_BIND) + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); + else if (mode == GR_CONNECT) + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); + + return -EACCES; +} + +int +gr_search_connect(struct socket *sock, struct sockaddr_in *addr) +{ + /* always allow disconnection of dgram sockets with connect */ + if (addr->sin_family == AF_UNSPEC) + return 0; + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type); +} + +int +gr_search_bind(struct socket *sock, struct sockaddr_in *addr) +{ + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type); +} + +int gr_search_listen(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct sockaddr_in addr; + + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr; + addr.sin_port = inet_sk(sk)->inet_sport; + + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); +} + +int gr_search_accept(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct sockaddr_in addr; + + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr; + addr.sin_port = inet_sk(sk)->inet_sport; + + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); +} + +int +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr) +{ + if (addr) + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM); + else { + struct sockaddr_in sin; + const struct inet_sock *inet = inet_sk(sk); + + sin.sin_addr.s_addr = inet->inet_daddr; + sin.sin_port = inet->inet_dport; + + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); + } +} + +int +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb) +{ + struct sockaddr_in sin; + + if (unlikely(skb->len < sizeof (struct udphdr))) + return 0; // skip this packet + + sin.sin_addr.s_addr = ip_hdr(skb)->saddr; + sin.sin_port = udp_hdr(skb)->source; + + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); +} diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c new file mode 100644 index 0000000000..c5abda5c83 --- /dev/null +++ b/grsecurity/gracl_learn.c @@ -0,0 +1,209 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf, + size_t count, loff_t *ppos); +extern int gr_acl_is_enabled(void); + +static DECLARE_WAIT_QUEUE_HEAD(learn_wait); +static int gr_learn_attached; + +/* use a 512k buffer */ +#define LEARN_BUFFER_SIZE (512 * 1024) + +static DEFINE_SPINLOCK(gr_learn_lock); +static DEFINE_MUTEX(gr_learn_user_mutex); + +/* we need to maintain two buffers, so that the kernel context of grlearn + uses a semaphore around the userspace copying, and the other kernel contexts + use a spinlock when copying into the buffer, since they cannot sleep +*/ +static char *learn_buffer; +static char *learn_buffer_user; +static int learn_buffer_len; +static int learn_buffer_user_len; + +static ssize_t +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos) +{ + DECLARE_WAITQUEUE(wait, current); + ssize_t retval = 0; + + add_wait_queue(&learn_wait, &wait); + do { + mutex_lock(&gr_learn_user_mutex); + set_current_state(TASK_INTERRUPTIBLE); + spin_lock(&gr_learn_lock); + if (learn_buffer_len) { + set_current_state(TASK_RUNNING); + break; + } + spin_unlock(&gr_learn_lock); + mutex_unlock(&gr_learn_user_mutex); + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } + if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + + schedule(); + } while (1); + + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len); + learn_buffer_user_len = learn_buffer_len; + retval = learn_buffer_len; + learn_buffer_len = 0; + + spin_unlock(&gr_learn_lock); + + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len)) + retval = -EFAULT; + + mutex_unlock(&gr_learn_user_mutex); +out: + set_current_state(TASK_RUNNING); + remove_wait_queue(&learn_wait, &wait); + return retval; +} + +static unsigned int +poll_learn(struct file * file, poll_table * wait) +{ + poll_wait(file, &learn_wait, wait); + + if (learn_buffer_len) + return (POLLIN | POLLRDNORM); + + return 0; +} + +void +gr_clear_learn_entries(void) +{ + char *tmp; + + mutex_lock(&gr_learn_user_mutex); + spin_lock(&gr_learn_lock); + tmp = learn_buffer; + learn_buffer = NULL; + spin_unlock(&gr_learn_lock); + if (tmp) + vfree(tmp); + if (learn_buffer_user != NULL) { + vfree(learn_buffer_user); + learn_buffer_user = NULL; + } + learn_buffer_len = 0; + mutex_unlock(&gr_learn_user_mutex); + + return; +} + +void +gr_add_learn_entry(const char *fmt, ...) +{ + va_list args; + unsigned int len; + + if (!gr_learn_attached) + return; + + spin_lock(&gr_learn_lock); + + /* leave a gap at the end so we know when it's "full" but don't have to + compute the exact length of the string we're trying to append + */ + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) { + spin_unlock(&gr_learn_lock); + wake_up_interruptible(&learn_wait); + return; + } + if (learn_buffer == NULL) { + spin_unlock(&gr_learn_lock); + return; + } + + va_start(args, fmt); + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args); + va_end(args); + + learn_buffer_len += len + 1; + + spin_unlock(&gr_learn_lock); + wake_up_interruptible(&learn_wait); + + return; +} + +static int +open_learn(struct inode *inode, struct file *file) +{ + if (file->f_mode & FMODE_READ && gr_learn_attached) + return -EBUSY; + if (file->f_mode & FMODE_READ) { + int retval = 0; + mutex_lock(&gr_learn_user_mutex); + if (learn_buffer == NULL) + learn_buffer = vmalloc(LEARN_BUFFER_SIZE); + if (learn_buffer_user == NULL) + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE); + if (learn_buffer == NULL) { + retval = -ENOMEM; + goto out_error; + } + if (learn_buffer_user == NULL) { + retval = -ENOMEM; + goto out_error; + } + learn_buffer_len = 0; + learn_buffer_user_len = 0; + gr_learn_attached = 1; +out_error: + mutex_unlock(&gr_learn_user_mutex); + return retval; + } + return 0; +} + +static int +close_learn(struct inode *inode, struct file *file) +{ + if (file->f_mode & FMODE_READ) { + char *tmp = NULL; + mutex_lock(&gr_learn_user_mutex); + spin_lock(&gr_learn_lock); + tmp = learn_buffer; + learn_buffer = NULL; + spin_unlock(&gr_learn_lock); + if (tmp) + vfree(tmp); + if (learn_buffer_user != NULL) { + vfree(learn_buffer_user); + learn_buffer_user = NULL; + } + learn_buffer_len = 0; + learn_buffer_user_len = 0; + gr_learn_attached = 0; + mutex_unlock(&gr_learn_user_mutex); + } + + return 0; +} + +const struct file_operations grsec_fops = { + .read = read_learn, + .write = write_grsec_handler, + .open = open_learn, + .release = close_learn, + .poll = poll_learn, +}; diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c new file mode 100644 index 0000000000..7c42102d65 --- /dev/null +++ b/grsecurity/gracl_policy.c @@ -0,0 +1,1782 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../fs/mount.h" + +#include +#include +#include + +extern struct gr_policy_state *polstate; + +#define FOR_EACH_ROLE_START(role) \ + role = polstate->role_list; \ + while (role) { + +#define FOR_EACH_ROLE_END(role) \ + role = role->prev; \ + } + +struct path gr_real_root; + +extern struct gr_alloc_state *current_alloc_state; + +u16 acl_sp_role_value; + +static DEFINE_MUTEX(gr_dev_mutex); + +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum); +extern void gr_clear_learn_entries(void); + +struct gr_arg *gr_usermode __read_only; +unsigned char *gr_system_salt __read_only; +unsigned char *gr_system_sum __read_only; + +static unsigned int gr_auth_attempts = 0; +static unsigned long gr_auth_expires = 0UL; + +struct acl_object_label *fakefs_obj_rw; +struct acl_object_label *fakefs_obj_rwx; + +extern int gr_init_uidset(void); +extern void gr_free_uidset(void); +extern int gr_find_and_remove_uid(uid_t uid); + +extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback); +extern void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj); +extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb); +extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry); +extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid); +extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj); +extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role); +extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name); +extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt); +extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role); +extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role); +extern void assign_special_role(const char *rolename); +extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role); +extern int gr_rbac_disable(void *unused); +extern void gr_enable_rbac_system(void); + +static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp) +{ + if (copy_from_user(obj, userp, sizeof(struct acl_object_label))) + return -EFAULT; + + return 0; +} + +static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp) +{ + if (copy_from_user(ip, userp, sizeof(struct acl_ip_label))) + return -EFAULT; + + return 0; +} + +static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp) +{ + if (copy_from_user(subj, userp, sizeof(struct acl_subject_label))) + return -EFAULT; + + return 0; +} + +static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp) +{ + if (copy_from_user(role, userp, sizeof(struct acl_role_label))) + return -EFAULT; + + return 0; +} + +static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) +{ + if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip))) + return -EFAULT; + + return 0; +} + +static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) +{ + if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw))) + return -EFAULT; + + return 0; +} + +static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) +{ + if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct))) + return -EFAULT; + + return 0; +} + +static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp) +{ + if (copy_from_user(trans, userp, sizeof(struct role_transition))) + return -EFAULT; + + return 0; +} + +int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp) +{ + if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *))) + return -EFAULT; + + return 0; +} + +static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap) +{ + if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper))) + return -EFAULT; + + if ((uwrap->version != GRSECURITY_VERSION) || + (uwrap->size != sizeof(struct gr_arg))) + return -EINVAL; + + return 0; +} + +static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg) +{ + if (copy_from_user(arg, buf, sizeof (struct gr_arg))) + return -EFAULT; + + return 0; +} + +static size_t get_gr_arg_wrapper_size_normal(void) +{ + return sizeof(struct gr_arg_wrapper); +} + +#ifdef CONFIG_COMPAT +extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap); +extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg); +extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp); +extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp); +extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp); +extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp); +extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp); +extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp); +extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp); +extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp); +extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp); +extern size_t get_gr_arg_wrapper_size_compat(void); + +int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only; +int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only; +int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only; +int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only; +int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only; +int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only; +int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only; +int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only; +int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only; +int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only; +int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only; +size_t (* get_gr_arg_wrapper_size)(void) __read_only; + +#else +#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal +#define copy_gr_arg copy_gr_arg_normal +#define copy_gr_hash_struct copy_gr_hash_struct_normal +#define copy_acl_object_label copy_acl_object_label_normal +#define copy_acl_subject_label copy_acl_subject_label_normal +#define copy_acl_role_label copy_acl_role_label_normal +#define copy_acl_ip_label copy_acl_ip_label_normal +#define copy_pointer_from_array copy_pointer_from_array_normal +#define copy_sprole_pw copy_sprole_pw_normal +#define copy_role_transition copy_role_transition_normal +#define copy_role_allowed_ip copy_role_allowed_ip_normal +#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal +#endif + +static struct acl_subject_label * +lookup_subject_map(const struct acl_subject_label *userp) +{ + unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size); + struct subject_map *match; + + match = polstate->subj_map_set.s_hash[index]; + + while (match && match->user != userp) + match = match->next; + + if (match != NULL) + return match->kernel; + else + return NULL; +} + +static void +insert_subj_map_entry(struct subject_map *subjmap) +{ + unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size); + struct subject_map **curr; + + subjmap->prev = NULL; + + curr = &polstate->subj_map_set.s_hash[index]; + if (*curr != NULL) + (*curr)->prev = subjmap; + + subjmap->next = *curr; + *curr = subjmap; + + return; +} + +static void +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid) +{ + unsigned int index = + gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size); + struct acl_role_label **curr; + struct acl_role_label *tmp, *tmp2; + + curr = &polstate->acl_role_set.r_hash[index]; + + /* simple case, slot is empty, just set it to our role */ + if (*curr == NULL) { + *curr = role; + } else { + /* example: + 1 -> 2 -> 3 (adding 2 -> 3 to here) + 2 -> 3 + */ + /* first check to see if we can already be reached via this slot */ + tmp = *curr; + while (tmp && tmp != role) + tmp = tmp->next; + if (tmp == role) { + /* we don't need to add ourselves to this slot's chain */ + return; + } + /* we need to add ourselves to this chain, two cases */ + if (role->next == NULL) { + /* simple case, append the current chain to our role */ + role->next = *curr; + *curr = role; + } else { + /* 1 -> 2 -> 3 -> 4 + 2 -> 3 -> 4 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here) + */ + /* trickier case: walk our role's chain until we find + the role for the start of the current slot's chain */ + tmp = role; + tmp2 = *curr; + while (tmp->next && tmp->next != tmp2) + tmp = tmp->next; + if (tmp->next == tmp2) { + /* from example above, we found 3, so just + replace this slot's chain with ours */ + *curr = role; + } else { + /* we didn't find a subset of our role's chain + in the current slot's chain, so append their + chain to ours, and set us as the first role in + the slot's chain + + we could fold this case with the case above, + but making it explicit for clarity + */ + tmp->next = tmp2; + *curr = role; + } + } + } + + return; +} + +static void +insert_acl_role_label(struct acl_role_label *role) +{ + int i; + + if (polstate->role_list == NULL) { + polstate->role_list = role; + role->prev = NULL; + } else { + role->prev = polstate->role_list; + polstate->role_list = role; + } + + /* used for hash chains */ + role->next = NULL; + + if (role->roletype & GR_ROLE_DOMAIN) { + for (i = 0; i < role->domain_child_num; i++) + __insert_acl_role_label(role, role->domain_children[i]); + } else + __insert_acl_role_label(role, role->uidgid); +} + +static int +insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted) +{ + struct name_entry **curr, *nentry; + struct inodev_entry *ientry; + unsigned int len = strlen(name); + unsigned int key = full_name_hash(NULL, (const unsigned char *)name, len); + unsigned int index = key % polstate->name_set.n_size; + + curr = &polstate->name_set.n_hash[index]; + + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len))) + curr = &((*curr)->next); + + if (*curr != NULL) + return 1; + + nentry = acl_alloc(sizeof (struct name_entry)); + if (nentry == NULL) + return 0; + ientry = acl_alloc(sizeof (struct inodev_entry)); + if (ientry == NULL) + return 0; + ientry->nentry = nentry; + + nentry->key = key; + nentry->name = name; + nentry->inode = inode; + nentry->device = device; + nentry->len = len; + nentry->deleted = deleted; + + nentry->prev = NULL; + curr = &polstate->name_set.n_hash[index]; + if (*curr != NULL) + (*curr)->prev = nentry; + nentry->next = *curr; + *curr = nentry; + + /* insert us into the table searchable by inode/dev */ + __insert_inodev_entry(polstate, ientry); + + return 1; +} + +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */ + +static void * +create_table(__u32 * len, int elementsize) +{ + unsigned int table_sizes[] = { + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381, + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143, + 4194301, 8388593, 16777213, 33554393, 67108859 + }; + void *newtable = NULL; + unsigned int pwr = 0; + + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) && + table_sizes[pwr] <= *len) + pwr++; + + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize)) + return newtable; + + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE) + newtable = + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL); + else + newtable = vmalloc(table_sizes[pwr] * elementsize); + + *len = table_sizes[pwr]; + + return newtable; +} + +static int +init_variables(const struct gr_arg *arg, bool reload) +{ + struct task_struct *reaper = init_pid_ns.child_reaper; + unsigned int stacksize; + + polstate->subj_map_set.s_size = arg->role_db.num_subjects; + polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children; + polstate->name_set.n_size = arg->role_db.num_objects; + polstate->inodev_set.i_size = arg->role_db.num_objects; + + if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size || + !polstate->name_set.n_size || !polstate->inodev_set.i_size) + return 1; + + if (!reload) { + if (!gr_init_uidset()) + return 1; + } + + /* set up the stack that holds allocation info */ + + stacksize = arg->role_db.num_pointers + 5; + + if (!acl_alloc_stack_init(stacksize)) + return 1; + + if (!reload) { + /* grab reference for the real root dentry and vfsmount */ + get_fs_root(reaper->fs, &gr_real_root); + +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", gr_get_dev_from_dentry(gr_real_root.dentry), gr_get_ino_from_dentry(gr_real_root.dentry)); +#endif + + fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL); + if (fakefs_obj_rw == NULL) + return 1; + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE; + + fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL); + if (fakefs_obj_rwx == NULL) + return 1; + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC; + } + + polstate->subj_map_set.s_hash = + (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *)); + polstate->acl_role_set.r_hash = + (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *)); + polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *)); + polstate->inodev_set.i_hash = + (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *)); + + if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash || + !polstate->name_set.n_hash || !polstate->inodev_set.i_hash) + return 1; + + memset(polstate->subj_map_set.s_hash, 0, + sizeof(struct subject_map *) * polstate->subj_map_set.s_size); + memset(polstate->acl_role_set.r_hash, 0, + sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size); + memset(polstate->name_set.n_hash, 0, + sizeof (struct name_entry *) * polstate->name_set.n_size); + memset(polstate->inodev_set.i_hash, 0, + sizeof (struct inodev_entry *) * polstate->inodev_set.i_size); + + return 0; +} + +/* free information not needed after startup + currently contains user->kernel pointer mappings for subjects +*/ + +static void +free_init_variables(void) +{ + __u32 i; + + if (polstate->subj_map_set.s_hash) { + for (i = 0; i < polstate->subj_map_set.s_size; i++) { + if (polstate->subj_map_set.s_hash[i]) { + kfree(polstate->subj_map_set.s_hash[i]); + polstate->subj_map_set.s_hash[i] = NULL; + } + } + + if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <= + PAGE_SIZE) + kfree(polstate->subj_map_set.s_hash); + else + vfree(polstate->subj_map_set.s_hash); + } + + return; +} + +static void +free_variables(bool reload) +{ + struct acl_subject_label *s; + struct acl_role_label *r; + struct task_struct *task, *task2; + unsigned int x; + + if (!reload) { + gr_clear_learn_entries(); + + read_lock(&tasklist_lock); + do_each_thread(task2, task) { + task->acl_sp_role = 0; + task->acl_role_id = 0; + task->inherited = 0; + task->acl = NULL; + task->role = NULL; + } while_each_thread(task2, task); + read_unlock(&tasklist_lock); + + kfree(fakefs_obj_rw); + fakefs_obj_rw = NULL; + kfree(fakefs_obj_rwx); + fakefs_obj_rwx = NULL; + + /* release the reference to the real root dentry and vfsmount */ + path_put(&gr_real_root); + memset(&gr_real_root, 0, sizeof(gr_real_root)); + } + + /* free all object hash tables */ + + FOR_EACH_ROLE_START(r) + if (r->subj_hash == NULL) + goto next_role; + FOR_EACH_SUBJECT_START(r, s, x) + if (s->obj_hash == NULL) + break; + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) + kfree(s->obj_hash); + else + vfree(s->obj_hash); + FOR_EACH_SUBJECT_END(s, x) + FOR_EACH_NESTED_SUBJECT_START(r, s) + if (s->obj_hash == NULL) + break; + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) + kfree(s->obj_hash); + else + vfree(s->obj_hash); + FOR_EACH_NESTED_SUBJECT_END(s) + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE) + kfree(r->subj_hash); + else + vfree(r->subj_hash); + r->subj_hash = NULL; +next_role: + FOR_EACH_ROLE_END(r) + + acl_free_all(); + + if (polstate->acl_role_set.r_hash) { + if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <= + PAGE_SIZE) + kfree(polstate->acl_role_set.r_hash); + else + vfree(polstate->acl_role_set.r_hash); + } + if (polstate->name_set.n_hash) { + if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <= + PAGE_SIZE) + kfree(polstate->name_set.n_hash); + else + vfree(polstate->name_set.n_hash); + } + + if (polstate->inodev_set.i_hash) { + if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <= + PAGE_SIZE) + kfree(polstate->inodev_set.i_hash); + else + vfree(polstate->inodev_set.i_hash); + } + + if (!reload) + gr_free_uidset(); + + memset(&polstate->name_set, 0, sizeof (struct name_db)); + memset(&polstate->inodev_set, 0, sizeof (struct inodev_db)); + memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db)); + memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db)); + + polstate->default_role = NULL; + polstate->kernel_role = NULL; + polstate->role_list = NULL; + + return; +} + +static struct acl_subject_label * +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied); + +static int alloc_and_copy_string(char **name, unsigned int maxlen) +{ + unsigned int len = strnlen_user(*name, maxlen); + char *tmp; + + if (!len || len >= maxlen) + return -EINVAL; + + if ((tmp = (char *) acl_alloc(len)) == NULL) + return -ENOMEM; + + if (copy_from_user(tmp, *name, len)) + return -EFAULT; + + tmp[len-1] = '\0'; + *name = tmp; + + return 0; +} + +static int +copy_user_glob(struct acl_object_label *obj) +{ + struct acl_object_label *g_tmp, **guser; + int error; + + if (obj->globbed == NULL) + return 0; + + guser = &obj->globbed; + while (*guser) { + g_tmp = (struct acl_object_label *) + acl_alloc(sizeof (struct acl_object_label)); + if (g_tmp == NULL) + return -ENOMEM; + + if (copy_acl_object_label(g_tmp, *guser)) + return -EFAULT; + + error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX); + if (error) + return error; + + *guser = g_tmp; + guser = &(g_tmp->next); + } + + return 0; +} + +static int +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj, + struct acl_role_label *role) +{ + struct acl_object_label *o_tmp; + int ret; + + while (userp) { + if ((o_tmp = (struct acl_object_label *) + acl_alloc(sizeof (struct acl_object_label))) == NULL) + return -ENOMEM; + + if (copy_acl_object_label(o_tmp, userp)) + return -EFAULT; + + userp = o_tmp->prev; + + ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX); + if (ret) + return ret; + + insert_acl_obj_label(o_tmp, subj); + if (!insert_name_entry(o_tmp->filename, o_tmp->inode, + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0)) + return -ENOMEM; + + ret = copy_user_glob(o_tmp); + if (ret) + return ret; + + if (o_tmp->nested) { + int already_copied; + + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied); + if (IS_ERR(o_tmp->nested)) + return PTR_ERR(o_tmp->nested); + + /* insert into nested subject list if we haven't copied this one yet + to prevent duplicate entries */ + if (!already_copied) { + o_tmp->nested->next = role->hash->first; + role->hash->first = o_tmp->nested; + } + } + } + + return 0; +} + +static __u32 +count_user_subjs(struct acl_subject_label *userp) +{ + struct acl_subject_label s_tmp; + __u32 num = 0; + + while (userp) { + if (copy_acl_subject_label(&s_tmp, userp)) + break; + + userp = s_tmp.prev; + } + + return num; +} + +static int +copy_user_allowedips(struct acl_role_label *rolep) +{ + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast; + + ruserip = rolep->allowed_ips; + + while (ruserip) { + rlast = rtmp; + + if ((rtmp = (struct role_allowed_ip *) + acl_alloc(sizeof (struct role_allowed_ip))) == NULL) + return -ENOMEM; + + if (copy_role_allowed_ip(rtmp, ruserip)) + return -EFAULT; + + ruserip = rtmp->prev; + + if (!rlast) { + rtmp->prev = NULL; + rolep->allowed_ips = rtmp; + } else { + rlast->next = rtmp; + rtmp->prev = rlast; + } + + if (!ruserip) + rtmp->next = NULL; + } + + return 0; +} + +static int +copy_user_transitions(struct acl_role_label *rolep) +{ + struct role_transition *rusertp, *rtmp = NULL, *rlast; + int error; + + rusertp = rolep->transitions; + + while (rusertp) { + rlast = rtmp; + + if ((rtmp = (struct role_transition *) + acl_alloc(sizeof (struct role_transition))) == NULL) + return -ENOMEM; + + if (copy_role_transition(rtmp, rusertp)) + return -EFAULT; + + rusertp = rtmp->prev; + + error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN); + if (error) + return error; + + if (!rlast) { + rtmp->prev = NULL; + rolep->transitions = rtmp; + } else { + rlast->next = rtmp; + rtmp->prev = rlast; + } + + if (!rusertp) + rtmp->next = NULL; + } + + return 0; +} + +static __u32 count_user_objs(const struct acl_object_label __user *userp) +{ + struct acl_object_label o_tmp; + __u32 num = 0; + + while (userp) { + if (copy_acl_object_label(&o_tmp, userp)) + break; + + userp = o_tmp.prev; + num++; + } + + return num; +} + +static struct acl_subject_label * +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied) +{ + struct acl_subject_label *s_tmp = NULL, *s_tmp2; + __u32 num_objs; + struct acl_ip_label **i_tmp, *i_utmp2; + struct gr_hash_struct ghash; + struct subject_map *subjmap; + unsigned int i_num; + int err; + + if (already_copied != NULL) + *already_copied = 0; + + s_tmp = lookup_subject_map(userp); + + /* we've already copied this subject into the kernel, just return + the reference to it, and don't copy it over again + */ + if (s_tmp) { + if (already_copied != NULL) + *already_copied = 1; + return(s_tmp); + } + + if ((s_tmp = (struct acl_subject_label *) + acl_alloc(sizeof (struct acl_subject_label))) == NULL) + return ERR_PTR(-ENOMEM); + + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL); + if (subjmap == NULL) + return ERR_PTR(-ENOMEM); + + subjmap->user = userp; + subjmap->kernel = s_tmp; + insert_subj_map_entry(subjmap); + + if (copy_acl_subject_label(s_tmp, userp)) + return ERR_PTR(-EFAULT); + + err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX); + if (err) + return ERR_PTR(err); + + if (!strcmp(s_tmp->filename, "/")) + role->root_label = s_tmp; + + if (copy_gr_hash_struct(&ghash, s_tmp->hash)) + return ERR_PTR(-EFAULT); + + /* copy user and group transition tables */ + + if (s_tmp->user_trans_num) { + uid_t *uidlist; + + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t)); + if (uidlist == NULL) + return ERR_PTR(-ENOMEM); + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t))) + return ERR_PTR(-EFAULT); + + s_tmp->user_transitions = uidlist; + } + + if (s_tmp->group_trans_num) { + gid_t *gidlist; + + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t)); + if (gidlist == NULL) + return ERR_PTR(-ENOMEM); + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t))) + return ERR_PTR(-EFAULT); + + s_tmp->group_transitions = gidlist; + } + + /* set up object hash table */ + num_objs = count_user_objs(ghash.first); + + s_tmp->obj_hash_size = num_objs; + s_tmp->obj_hash = + (struct acl_object_label **) + create_table(&(s_tmp->obj_hash_size), sizeof(void *)); + + if (!s_tmp->obj_hash) + return ERR_PTR(-ENOMEM); + + memset(s_tmp->obj_hash, 0, + s_tmp->obj_hash_size * + sizeof (struct acl_object_label *)); + + /* add in objects */ + err = copy_user_objs(ghash.first, s_tmp, role); + + if (err) + return ERR_PTR(err); + + /* set pointer for parent subject */ + if (s_tmp->parent_subject) { + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL); + + if (IS_ERR(s_tmp2)) + return s_tmp2; + + s_tmp->parent_subject = s_tmp2; + } + + /* add in ip acls */ + + if (!s_tmp->ip_num) { + s_tmp->ips = NULL; + goto insert; + } + + i_tmp = + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num, + sizeof (struct acl_ip_label *)); + + if (!i_tmp) + return ERR_PTR(-ENOMEM); + + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) { + *(i_tmp + i_num) = + (struct acl_ip_label *) + acl_alloc(sizeof (struct acl_ip_label)); + if (!*(i_tmp + i_num)) + return ERR_PTR(-ENOMEM); + + if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips)) + return ERR_PTR(-EFAULT); + + if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2)) + return ERR_PTR(-EFAULT); + + if ((*(i_tmp + i_num))->iface == NULL) + continue; + + err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ); + if (err) + return ERR_PTR(err); + } + + s_tmp->ips = i_tmp; + +insert: + if (!insert_name_entry(s_tmp->filename, s_tmp->inode, + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0)) + return ERR_PTR(-ENOMEM); + + return s_tmp; +} + +static int +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role) +{ + struct acl_subject_label s_pre; + struct acl_subject_label * ret; + int err; + + while (userp) { + if (copy_acl_subject_label(&s_pre, userp)) + return -EFAULT; + + ret = do_copy_user_subj(userp, role, NULL); + + err = PTR_ERR(ret); + if (IS_ERR(ret)) + return err; + + insert_acl_subj_label(ret, role); + + userp = s_pre.prev; + } + + return 0; +} + +static int +copy_user_acl(struct gr_arg *arg) +{ + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2; + struct acl_subject_label *subj_list; + struct sprole_pw *sptmp; + struct gr_hash_struct *ghash; + uid_t *domainlist; + unsigned int r_num; + int err = 0; + __u16 i; + __u32 num_subjs; + + /* we need a default and kernel role */ + if (arg->role_db.num_roles < 2) + return -EINVAL; + + /* copy special role authentication info from userspace */ + + polstate->num_sprole_pws = arg->num_sprole_pws; + polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *)); + + if (!polstate->acl_special_roles && polstate->num_sprole_pws) + return -ENOMEM; + + for (i = 0; i < polstate->num_sprole_pws; i++) { + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw)); + if (!sptmp) + return -ENOMEM; + if (copy_sprole_pw(sptmp, i, arg->sprole_pws)) + return -EFAULT; + + err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN); + if (err) + return err; + +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG + printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename); +#endif + + polstate->acl_special_roles[i] = sptmp; + } + + r_utmp = (struct acl_role_label **) arg->role_db.r_table; + + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) { + r_tmp = acl_alloc(sizeof (struct acl_role_label)); + + if (!r_tmp) + return -ENOMEM; + + if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp)) + return -EFAULT; + + if (copy_acl_role_label(r_tmp, r_utmp2)) + return -EFAULT; + + err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN); + if (err) + return err; + + if (!strcmp(r_tmp->rolename, "default") + && (r_tmp->roletype & GR_ROLE_DEFAULT)) { + polstate->default_role = r_tmp; + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) { + polstate->kernel_role = r_tmp; + } + + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) + return -ENOMEM; + + if (copy_gr_hash_struct(ghash, r_tmp->hash)) + return -EFAULT; + + r_tmp->hash = ghash; + + num_subjs = count_user_subjs(r_tmp->hash->first); + + r_tmp->subj_hash_size = num_subjs; + r_tmp->subj_hash = + (struct acl_subject_label **) + create_table(&(r_tmp->subj_hash_size), sizeof(void *)); + + if (!r_tmp->subj_hash) + return -ENOMEM; + + err = copy_user_allowedips(r_tmp); + if (err) + return err; + + /* copy domain info */ + if (r_tmp->domain_children != NULL) { + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t)); + if (domainlist == NULL) + return -ENOMEM; + + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) + return -EFAULT; + + r_tmp->domain_children = domainlist; + } + + err = copy_user_transitions(r_tmp); + if (err) + return err; + + memset(r_tmp->subj_hash, 0, + r_tmp->subj_hash_size * + sizeof (struct acl_subject_label *)); + + /* acquire the list of subjects, then NULL out + the list prior to parsing the subjects for this role, + as during this parsing the list is replaced with a list + of *nested* subjects for the role + */ + subj_list = r_tmp->hash->first; + + /* set nested subject list to null */ + r_tmp->hash->first = NULL; + + err = copy_user_subjs(subj_list, r_tmp); + + if (err) + return err; + + insert_acl_role_label(r_tmp); + } + + if (polstate->default_role == NULL || polstate->kernel_role == NULL) + return -EINVAL; + + return err; +} + +static int gracl_reload_apply_policies(void *reload) +{ + struct gr_reload_state *reload_state = (struct gr_reload_state *)reload; + struct task_struct *task, *task2; + struct acl_role_label *role, *rtmp; + struct acl_subject_label *subj; + const struct cred *cred; + int role_applied; + int ret = 0; + + memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state)); + memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state)); + + /* first make sure we'll be able to apply the new policy cleanly */ + do_each_thread(task2, task) { + if (task->exec_file == NULL) + continue; + role_applied = 0; + if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) { + /* preserve special roles */ + FOR_EACH_ROLE_START(role) + if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) { + rtmp = task->role; + task->role = role; + role_applied = 1; + break; + } + FOR_EACH_ROLE_END(role) + } + if (!role_applied) { + cred = __task_cred(task); + rtmp = task->role; + task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid)); + } + /* this handles non-nested inherited subjects, nested subjects will still + be dropped currently */ + subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1); + task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1); + /* change the role back so that we've made no modifications to the policy */ + task->role = rtmp; + + if (subj == NULL || task->tmpacl == NULL) { + ret = -EINVAL; + goto out; + } + } while_each_thread(task2, task); + + /* now actually apply the policy */ + + do_each_thread(task2, task) { + if (task->exec_file) { + role_applied = 0; + if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) { + /* preserve special roles */ + FOR_EACH_ROLE_START(role) + if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) { + task->role = role; + role_applied = 1; + break; + } + FOR_EACH_ROLE_END(role) + } + if (!role_applied) { + cred = __task_cred(task); + task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid)); + } + /* this handles non-nested inherited subjects, nested subjects will still + be dropped currently */ + if (!reload_state->oldmode && task->inherited) + subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1); + else { + /* looked up and tagged to the task previously */ + subj = task->tmpacl; + } + /* subj will be non-null */ + __gr_apply_subject_to_task(polstate, task, subj); + if (reload_state->oldmode) { + task->acl_role_id = 0; + task->acl_sp_role = 0; + task->inherited = 0; + } + } else { + // it's a kernel process + task->role = polstate->kernel_role; + task->acl = polstate->kernel_role->root_label; +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN + task->acl->mode &= ~GR_PROCFIND; +#endif + } + } while_each_thread(task2, task); + + memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state)); + memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state)); + +out: + + return ret; +} + +static int gracl_reload(struct gr_arg *args, unsigned char oldmode) +{ + struct gr_reload_state new_reload_state = { }; + int err; + + new_reload_state.oldpolicy_ptr = polstate; + new_reload_state.oldalloc_ptr = current_alloc_state; + new_reload_state.oldmode = oldmode; + + current_alloc_state = &new_reload_state.newalloc; + polstate = &new_reload_state.newpolicy; + + /* everything relevant is now saved off, copy in the new policy */ + if (init_variables(args, true)) { + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION); + err = -ENOMEM; + goto error; + } + + err = copy_user_acl(args); + free_init_variables(); + if (err) + goto error; + /* the new policy is copied in, with the old policy available via saved_state + first go through applying roles, making sure to preserve special roles + then apply new subjects, making sure to preserve inherited and nested subjects, + though currently only inherited subjects will be preserved + */ + err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL); + if (err) + goto error; + + /* we've now applied the new policy, so restore the old policy state to free it */ + polstate = &new_reload_state.oldpolicy; + current_alloc_state = &new_reload_state.oldalloc; + free_variables(true); + + /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied + to running_polstate/current_alloc_state inside stop_machine + */ + err = 0; + goto out; +error: + /* on error of loading the new policy, we'll just keep the previous + policy set around + */ + free_variables(true); + + /* doesn't affect runtime, but maintains consistent state */ +out: + polstate = new_reload_state.oldpolicy_ptr; + current_alloc_state = new_reload_state.oldalloc_ptr; + + return err; +} + +static int +gracl_init(struct gr_arg *args) +{ + int error = 0; + + memcpy(gr_system_salt, args->salt, GR_SALT_LEN); + memcpy(gr_system_sum, args->sum, GR_SHA_LEN); + + if (init_variables(args, false)) { + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION); + error = -ENOMEM; + goto out; + } + + error = copy_user_acl(args); + free_init_variables(); + if (error) + goto out; + + error = gr_set_acls(0); + if (error) + goto out; + + gr_enable_rbac_system(); + + return 0; + +out: + free_variables(false); + return error; +} + +static int +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt, + unsigned char **sum) +{ + struct acl_role_label *r; + struct role_allowed_ip *ipp; + struct role_transition *trans; + unsigned int i; + int found = 0; + u32 curr_ip = current->signal->curr_ip; + + current->signal->saved_ip = curr_ip; + + /* check transition table */ + + for (trans = current->role->transitions; trans; trans = trans->next) { + if (!strcmp(rolename, trans->rolename)) { + found = 1; + break; + } + } + + if (!found) + return 0; + + /* handle special roles that do not require authentication + and check ip */ + + FOR_EACH_ROLE_START(r) + if (!strcmp(rolename, r->rolename) && + (r->roletype & GR_ROLE_SPECIAL)) { + found = 0; + if (r->allowed_ips != NULL) { + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) { + if ((ntohl(curr_ip) & ipp->netmask) == + (ntohl(ipp->addr) & ipp->netmask)) + found = 1; + } + } else + found = 2; + if (!found) + return 0; + + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) || + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) { + *salt = NULL; + *sum = NULL; + return 1; + } + } + FOR_EACH_ROLE_END(r) + + for (i = 0; i < polstate->num_sprole_pws; i++) { + if (!strcmp(rolename, (const char *)polstate->acl_special_roles[i]->rolename)) { + *salt = polstate->acl_special_roles[i]->salt; + *sum = polstate->acl_special_roles[i]->sum; + return 1; + } + } + + return 0; +} + +int gr_check_secure_terminal(struct task_struct *task) +{ + struct task_struct *p, *p2, *p3; + struct files_struct *files; + struct fdtable *fdt; + struct file *our_file = NULL, *file; + struct inode *our_inode = NULL; + int i; + + if (task->signal->tty == NULL) + return 1; + + files = get_files_struct(task); + if (files != NULL) { + rcu_read_lock(); + fdt = files_fdtable(files); + for (i=0; i < fdt->max_fds; i++) { + file = fcheck_files(files, i); + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) { + get_file(file); + our_file = file; + } + } + rcu_read_unlock(); + put_files_struct(files); + } + + if (our_file == NULL) + return 1; + + our_inode = d_backing_inode(our_file->f_path.dentry); + + read_lock(&tasklist_lock); + do_each_thread(p2, p) { + files = get_files_struct(p); + if (files == NULL || + (p->signal && p->signal->tty == task->signal->tty)) { + if (files != NULL) + put_files_struct(files); + continue; + } + rcu_read_lock(); + fdt = files_fdtable(files); + for (i=0; i < fdt->max_fds; i++) { + struct inode *inode = NULL; + file = fcheck_files(files, i); + if (file) + inode = d_backing_inode(file->f_path.dentry); + if (inode && S_ISCHR(inode->i_mode) && inode->i_rdev == our_inode->i_rdev) { + p3 = task; + while (task_pid_nr(p3) > 0) { + if (p3 == p) + break; + p3 = p3->real_parent; + } + if (p3 == p) + break; + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p); + gr_handle_alertkill(p); + rcu_read_unlock(); + put_files_struct(files); + read_unlock(&tasklist_lock); + fput(our_file); + return 0; + } + } + rcu_read_unlock(); + put_files_struct(files); + } while_each_thread(p2, p); + read_unlock(&tasklist_lock); + + fput(our_file); + return 1; +} + +ssize_t +write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos) +{ + struct gr_arg_wrapper uwrap; + unsigned char *sprole_salt = NULL; + unsigned char *sprole_sum = NULL; + int error = 0; + int error2 = 0; + size_t req_count = 0; + unsigned char oldmode = 0; + + mutex_lock(&gr_dev_mutex); + + if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) { + error = -EPERM; + goto out; + } + +#ifdef CONFIG_COMPAT + pax_open_kernel(); + if (in_compat_syscall()) { + copy_gr_arg_wrapper = ©_gr_arg_wrapper_compat; + copy_gr_arg = ©_gr_arg_compat; + copy_acl_object_label = ©_acl_object_label_compat; + copy_acl_subject_label = ©_acl_subject_label_compat; + copy_acl_role_label = ©_acl_role_label_compat; + copy_acl_ip_label = ©_acl_ip_label_compat; + copy_role_allowed_ip = ©_role_allowed_ip_compat; + copy_role_transition = ©_role_transition_compat; + copy_sprole_pw = ©_sprole_pw_compat; + copy_gr_hash_struct = ©_gr_hash_struct_compat; + copy_pointer_from_array = ©_pointer_from_array_compat; + get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat; + } else { + copy_gr_arg_wrapper = ©_gr_arg_wrapper_normal; + copy_gr_arg = ©_gr_arg_normal; + copy_acl_object_label = ©_acl_object_label_normal; + copy_acl_subject_label = ©_acl_subject_label_normal; + copy_acl_role_label = ©_acl_role_label_normal; + copy_acl_ip_label = ©_acl_ip_label_normal; + copy_role_allowed_ip = ©_role_allowed_ip_normal; + copy_role_transition = ©_role_transition_normal; + copy_sprole_pw = ©_sprole_pw_normal; + copy_gr_hash_struct = ©_gr_hash_struct_normal; + copy_pointer_from_array = ©_pointer_from_array_normal; + get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal; + } + pax_close_kernel(); +#endif + + req_count = get_gr_arg_wrapper_size(); + + if (count != req_count) { + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count); + error = -EINVAL; + goto out; + } + + + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) { + gr_auth_expires = 0; + gr_auth_attempts = 0; + } + + error = copy_gr_arg_wrapper(buf, &uwrap); + if (error) + goto out; + + error = copy_gr_arg(uwrap.arg, gr_usermode); + if (error) + goto out; + + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM && + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && + time_after(gr_auth_expires, get_seconds())) { + error = -EBUSY; + goto out; + } + + /* if non-root trying to do anything other than use a special role, + do not attempt authentication, do not count towards authentication + locking + */ + + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS && + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM && + gr_is_global_nonroot(current_uid())) { + error = -EPERM; + goto out; + } + + /* ensure pw and special role name are null terminated */ + + gr_usermode->pw[GR_PW_LEN - 1] = '\0'; + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0'; + + /* Okay. + * We have our enough of the argument structure..(we have yet + * to copy_from_user the tables themselves) . Copy the tables + * only if we need them, i.e. for loading operations. */ + + switch (gr_usermode->mode) { + case GR_STATUS: + if (gr_acl_is_enabled()) { + error = 1; + if (!gr_check_secure_terminal(current)) + error = 3; + } else + error = 2; + goto out; + case GR_SHUTDOWN: + if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { + stop_machine(gr_rbac_disable, NULL, NULL); + free_variables(false); + memset(gr_usermode, 0, sizeof(struct gr_arg)); + memset(gr_system_salt, 0, GR_SALT_LEN); + memset(gr_system_sum, 0, GR_SHA_LEN); + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG); + } else if (gr_acl_is_enabled()) { + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG); + error = -EPERM; + } else { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG); + error = -EAGAIN; + } + break; + case GR_ENABLE: + if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode))) + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION); + else { + if (gr_acl_is_enabled()) + error = -EAGAIN; + else + error = error2; + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION); + } + break; + case GR_OLDRELOAD: + oldmode = 1; + case GR_RELOAD: + if (!gr_acl_is_enabled()) { + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION); + error = -EAGAIN; + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { + error2 = gracl_reload(gr_usermode, oldmode); + if (!error2) + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION); + else { + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); + error = error2; + } + } else { + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); + error = -EPERM; + } + break; + case GR_SEGVMOD: + if (unlikely(!gr_acl_is_enabled())) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG); + error = -EAGAIN; + break; + } + + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG); + if (gr_usermode->segv_device && gr_usermode->segv_inode) { + struct acl_subject_label *segvacl; + segvacl = + lookup_acl_subj_label(gr_usermode->segv_inode, + gr_usermode->segv_device, + current->role); + if (segvacl) { + segvacl->crashes = 0; + segvacl->expires = 0; + } + } else + gr_find_and_remove_uid(gr_usermode->segv_uid); + } else { + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG); + error = -EPERM; + } + break; + case GR_SPROLE: + case GR_SPROLEPAM: + if (unlikely(!gr_acl_is_enabled())) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG); + error = -EAGAIN; + break; + } + + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) { + current->role->expires = 0; + current->role->auth_attempts = 0; + } + + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && + time_after(current->role->expires, get_seconds())) { + error = -EBUSY; + goto out; + } + + if (lookup_special_role_auth + (gr_usermode->mode, (const char *)gr_usermode->sp_role, &sprole_salt, &sprole_sum) + && ((!sprole_salt && !sprole_sum) + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) { + char *p = ""; + assign_special_role((const char *)gr_usermode->sp_role); + read_lock(&tasklist_lock); + if (current->real_parent) + p = current->real_parent->role->rolename; + read_unlock(&tasklist_lock); + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG, + p, acl_sp_role_value); + } else { + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role); + error = -EPERM; + if(!(current->role->auth_attempts++)) + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; + + goto out; + } + break; + case GR_UNSPROLE: + if (unlikely(!gr_acl_is_enabled())) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG); + error = -EAGAIN; + break; + } + + if (current->role->roletype & GR_ROLE_SPECIAL) { + char *p = ""; + int i = 0; + + read_lock(&tasklist_lock); + if (current->real_parent) { + p = current->real_parent->role->rolename; + i = current->real_parent->acl_role_id; + } + read_unlock(&tasklist_lock); + + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i); + gr_set_acls(1); + } else { + error = -EPERM; + goto out; + } + break; + default: + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode); + error = -EINVAL; + break; + } + + if (error != -EPERM) + goto out; + + if(!(gr_auth_attempts++)) + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; + + out: + mutex_unlock(&gr_dev_mutex); + + if (!error) + error = req_count; + + return error; +} + +int +gr_set_acls(const int type) +{ + struct task_struct *task, *task2; + struct acl_role_label *role = current->role; + struct acl_subject_label *subj; + __u16 acl_role_id = current->acl_role_id; + const struct cred *cred; + int ret; + + rcu_read_lock(); + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + do_each_thread(task2, task) { + /* check to see if we're called from the exit handler, + if so, only replace ACLs that have inherited the admin + ACL */ + + if (type && (task->role != role || + task->acl_role_id != acl_role_id)) + continue; + + task->acl_role_id = 0; + task->acl_sp_role = 0; + task->inherited = 0; + + if (task->exec_file) { + cred = __task_cred(task); + task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid)); + subj = __gr_get_subject_for_task(polstate, task, NULL, 1); + if (subj == NULL) { + ret = -EINVAL; + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + rcu_read_unlock(); + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task)); + return ret; + } + __gr_apply_subject_to_task(polstate, task, subj); + } else { + // it's a kernel process + task->role = polstate->kernel_role; + task->acl = polstate->kernel_role->root_label; +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN + task->acl->mode &= ~GR_PROCFIND; +#endif + } + } while_each_thread(task2, task); + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + rcu_read_unlock(); + + return 0; +} diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c new file mode 100644 index 0000000000..dfba8fd4ba --- /dev/null +++ b/grsecurity/gracl_res.c @@ -0,0 +1,74 @@ +#include +#include +#include +#include + +static const char *restab_log[] = { + [RLIMIT_CPU] = "RLIMIT_CPU", + [RLIMIT_FSIZE] = "RLIMIT_FSIZE", + [RLIMIT_DATA] = "RLIMIT_DATA", + [RLIMIT_STACK] = "RLIMIT_STACK", + [RLIMIT_CORE] = "RLIMIT_CORE", + [RLIMIT_RSS] = "RLIMIT_RSS", + [RLIMIT_NPROC] = "RLIMIT_NPROC", + [RLIMIT_NOFILE] = "RLIMIT_NOFILE", + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK", + [RLIMIT_AS] = "RLIMIT_AS", + [RLIMIT_LOCKS] = "RLIMIT_LOCKS", + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING", + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE", + [RLIMIT_NICE] = "RLIMIT_NICE", + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO", + [RLIMIT_RTTIME] = "RLIMIT_RTTIME", + [GR_CRASH_RES] = "RLIMIT_CRASH" +}; + +void +gr_log_resource(const struct task_struct *task, + const int res, const unsigned long wanted, const int gt) +{ + const struct cred *cred; + unsigned long rlim; + + if (!gr_acl_is_enabled() && !grsec_resource_logging) + return; + + // not yet supported resource + if (unlikely(!restab_log[res])) + return; + + /* + * not really security relevant, too much userland code shared + * from pulseaudio that blindly attempts to violate limits in a loop, + * resulting in log spam + */ + if (res == RLIMIT_NICE) + return; + + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME) + rlim = task_rlimit_max(task, res); + else + rlim = task_rlimit(task, res); + + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim))) + return; + + rcu_read_lock(); + cred = __task_cred(task); + + if (res == RLIMIT_NPROC && + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) || + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE))) + goto out_rcu_unlock; + else if (res == RLIMIT_MEMLOCK && + cap_raised(cred->cap_effective, CAP_IPC_LOCK)) + goto out_rcu_unlock; + rcu_read_unlock(); + + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim); + + return; +out_rcu_unlock: + rcu_read_unlock(); + return; +} diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c new file mode 100644 index 0000000000..02c5a2bb9c --- /dev/null +++ b/grsecurity/gracl_segv.c @@ -0,0 +1,306 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE) +#include +#include +#include "../fs/btrfs/async-thread.h" +#include "../fs/btrfs/ctree.h" +#include "../fs/btrfs/btrfs_inode.h" +#endif + +static struct crash_uid *uid_set; +static unsigned short uid_used; +static DEFINE_SPINLOCK(gr_uid_lock); +extern rwlock_t gr_inode_lock; +extern struct acl_subject_label * + lookup_acl_subj_label(const u64 inode, const dev_t dev, + const struct acl_role_label *role); + +int +gr_init_uidset(void) +{ + uid_set = + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL); + uid_used = 0; + + return uid_set ? 1 : 0; +} + +void +gr_free_uidset(void) +{ + if (uid_set) { + struct crash_uid *tmpset; + spin_lock(&gr_uid_lock); + tmpset = uid_set; + uid_set = NULL; + uid_used = 0; + spin_unlock(&gr_uid_lock); + if (tmpset) + kfree(tmpset); + } + + return; +} + +int +gr_find_uid(const uid_t uid) +{ + struct crash_uid *tmp = uid_set; + uid_t buid; + int low = 0, high = uid_used - 1, mid; + + while (high >= low) { + mid = (low + high) >> 1; + buid = tmp[mid].uid; + if (buid == uid) + return mid; + if (buid > uid) + high = mid - 1; + if (buid < uid) + low = mid + 1; + } + + return -1; +} + +static void +gr_insertsort(void) +{ + unsigned short i, j; + struct crash_uid index; + + for (i = 1; i < uid_used; i++) { + index = uid_set[i]; + j = i; + while ((j > 0) && uid_set[j - 1].uid > index.uid) { + uid_set[j] = uid_set[j - 1]; + j--; + } + uid_set[j] = index; + } + + return; +} + +static void +gr_insert_uid(const kuid_t kuid, const unsigned long expires) +{ + int loc; + uid_t uid = GR_GLOBAL_UID(kuid); + + if (uid_used == GR_UIDTABLE_MAX) + return; + + loc = gr_find_uid(uid); + + if (loc >= 0) { + uid_set[loc].expires = expires; + return; + } + + uid_set[uid_used].uid = uid; + uid_set[uid_used].expires = expires; + uid_used++; + + gr_insertsort(); + + return; +} + +void +gr_remove_uid(const unsigned short loc) +{ + unsigned short i; + + for (i = loc + 1; i < uid_used; i++) + uid_set[i - 1] = uid_set[i]; + + uid_used--; + + return; +} + +int gr_find_and_remove_uid(uid_t uid) +{ + int loc; + + spin_lock(&gr_uid_lock); + loc = gr_find_uid(uid); + if (loc >= 0) + gr_remove_uid(loc); + spin_unlock(&gr_uid_lock); + + return loc >= 0 ? 1 : 0; +} + +int +gr_check_crash_uid(const kuid_t kuid) +{ + int loc; + int ret = 0; + uid_t uid; + + if (unlikely(!gr_acl_is_enabled())) + return 0; + + uid = GR_GLOBAL_UID(kuid); + + spin_lock(&gr_uid_lock); + loc = gr_find_uid(uid); + + if (loc < 0) + goto out_unlock; + + if (time_before_eq(uid_set[loc].expires, get_seconds())) + gr_remove_uid(loc); + else + ret = 1; + +out_unlock: + spin_unlock(&gr_uid_lock); + return ret; +} + +extern int gr_fake_force_sig(int sig, struct task_struct *t); + +void +gr_handle_crash(struct task_struct *task, const int sig) +{ + struct acl_subject_label *curr; + struct task_struct *tsk, *tsk2; + const struct cred *cred; + const struct cred *cred2; + + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL) + return; + + if (unlikely(!gr_acl_is_enabled())) + return; + + curr = task->acl; + + if (!(curr->resmask & (1U << GR_CRASH_RES))) + return; + + if (time_before_eq(curr->expires, get_seconds())) { + curr->expires = 0; + curr->crashes = 0; + } + + curr->crashes++; + + if (!curr->expires) + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max; + + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && + time_after(curr->expires, get_seconds())) { + int is_priv = is_privileged_binary(task->mm->exe_file->f_path.dentry); + + rcu_read_lock(); + cred = __task_cred(task); + if (gr_is_global_nonroot(cred->uid) && is_priv) { + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); + spin_lock(&gr_uid_lock); + gr_insert_uid(cred->uid, curr->expires); + spin_unlock(&gr_uid_lock); + curr->expires = 0; + curr->crashes = 0; + read_lock(&tasklist_lock); + do_each_thread(tsk2, tsk) { + cred2 = __task_cred(tsk); + if (tsk != task && uid_eq(cred2->uid, cred->uid)) + gr_fake_force_sig(SIGKILL, tsk); + } while_each_thread(tsk2, tsk); + read_unlock(&tasklist_lock); + } else { + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + do_each_thread(tsk2, tsk) { + if (likely(tsk != task)) { + // if this thread has the same subject as the one that triggered + // RES_CRASH and it's the same binary, kill it + if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file)) + gr_fake_force_sig(SIGKILL, tsk); + } + } while_each_thread(tsk2, tsk); + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + } + rcu_read_unlock(); + } + + return; +} + +int +gr_check_crash_exec(const struct file *filp) +{ + struct acl_subject_label *curr; + struct dentry *dentry; + + if (unlikely(!gr_acl_is_enabled())) + return 0; + + read_lock(&gr_inode_lock); + dentry = filp->f_path.dentry; + curr = lookup_acl_subj_label(gr_get_ino_from_dentry(dentry), gr_get_dev_from_dentry(dentry), + current->role); + read_unlock(&gr_inode_lock); + + if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) || + (!curr->crashes && !curr->expires)) + return 0; + + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && + time_after(curr->expires, get_seconds())) + return 1; + else if (time_before_eq(curr->expires, get_seconds())) { + curr->crashes = 0; + curr->expires = 0; + } + + return 0; +} + +void +gr_handle_alertkill(struct task_struct *task) +{ + struct acl_subject_label *curracl; + __u32 curr_ip; + struct task_struct *p, *p2; + + if (unlikely(!gr_acl_is_enabled())) + return; + + curracl = task->acl; + curr_ip = task->signal->curr_ip; + + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) { + read_lock(&tasklist_lock); + do_each_thread(p2, p) { + if (p->signal->curr_ip == curr_ip) + gr_fake_force_sig(SIGKILL, p); + } while_each_thread(p2, p); + read_unlock(&tasklist_lock); + } else if (curracl->mode & GR_KILLPROC) + gr_fake_force_sig(SIGKILL, task); + + return; +} diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c new file mode 100644 index 0000000000..6b0c9cc644 --- /dev/null +++ b/grsecurity/gracl_shm.c @@ -0,0 +1,40 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +int +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const u64 shm_createtime, const kuid_t cuid, const int shmid) +{ + struct task_struct *task; + + if (!gr_acl_is_enabled()) + return 1; + + rcu_read_lock(); + read_lock(&tasklist_lock); + + task = find_task_by_vpid(shm_cprid); + + if (unlikely(!task)) + task = find_task_by_vpid(shm_lapid); + + if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) || + (task_pid_nr(task) == shm_lapid)) && + (task->acl->mode & GR_PROTSHM) && + (task->acl != current->acl))) { + read_unlock(&tasklist_lock); + rcu_read_unlock(); + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid); + return 0; + } + read_unlock(&tasklist_lock); + rcu_read_unlock(); + + return 1; +} diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c new file mode 100644 index 0000000000..bc0be01b02 --- /dev/null +++ b/grsecurity/grsec_chdir.c @@ -0,0 +1,19 @@ +#include +#include +#include +#include +#include +#include + +void +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR + if ((grsec_enable_chdir && grsec_enable_group && + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir && + !grsec_enable_group)) { + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt); + } +#endif + return; +} diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c new file mode 100644 index 0000000000..1964ab1c88 --- /dev/null +++ b/grsecurity/grsec_chroot.c @@ -0,0 +1,506 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "../fs/mount.h" +#include +#include + +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD +int gr_init_ran; +#endif + +void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME + struct dentry *tmpd = dentry; + + read_seqlock_excl(&mount_lock); + write_seqlock(&rename_lock); + + while (tmpd != mnt->mnt_root) { + atomic_inc(&tmpd->chroot_refcnt); + tmpd = tmpd->d_parent; + } + atomic_inc(&tmpd->chroot_refcnt); + + write_sequnlock(&rename_lock); + read_sequnlock_excl(&mount_lock); +#endif +} + +void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME + struct dentry *tmpd = dentry; + + read_seqlock_excl(&mount_lock); + write_seqlock(&rename_lock); + + while (tmpd != mnt->mnt_root) { + atomic_dec(&tmpd->chroot_refcnt); + tmpd = tmpd->d_parent; + } + atomic_dec(&tmpd->chroot_refcnt); + + write_sequnlock(&rename_lock); + read_sequnlock_excl(&mount_lock); +#endif +} + +#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME +static struct dentry *get_closest_chroot(struct dentry *dentry) +{ + write_seqlock(&rename_lock); + do { + if (atomic_read(&dentry->chroot_refcnt)) { + write_sequnlock(&rename_lock); + return dentry; + } + dentry = dentry->d_parent; + } while (!IS_ROOT(dentry)); + write_sequnlock(&rename_lock); + return NULL; +} +#endif + +int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt, + struct dentry *newdentry, struct vfsmount *newmnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME + struct dentry *chroot; + + if (unlikely(!grsec_enable_chroot_rename)) + return 0; + + if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid()))) + return 0; + + chroot = get_closest_chroot(olddentry); + + if (chroot == NULL) + return 0; + + if (is_subdir(newdentry, chroot)) + return 0; + + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt); + + return 1; +#else + return 0; +#endif +} + +void gr_set_chroot_entries(struct task_struct *task, const struct path *path) +{ +#ifdef CONFIG_GRKERNSEC + if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry && + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD + && gr_init_ran +#endif + ) + task->gr_is_chrooted = 1; + else { +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD + if (task_pid_nr(task) == 1 && !gr_init_ran) + gr_init_ran = 1; +#endif + task->gr_is_chrooted = 0; + } + + task->gr_chroot_dentry = path->dentry; +#endif + return; +} + +void gr_clear_chroot_entries(struct task_struct *task) +{ +#ifdef CONFIG_GRKERNSEC + task->gr_is_chrooted = 0; + task->gr_chroot_dentry = NULL; +#endif + return; +} + +int +gr_handle_chroot_unix(const pid_t pid) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX + struct task_struct *p; + + if (unlikely(!grsec_enable_chroot_unix)) + return 1; + + if (likely(!proc_is_chrooted(current))) + return 1; + + rcu_read_lock(); + read_lock(&tasklist_lock); + p = find_task_by_vpid_unrestricted(pid); + if (unlikely(p && !have_same_root(current, p))) { + read_unlock(&tasklist_lock); + rcu_read_unlock(); + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG); + return 0; + } + read_unlock(&tasklist_lock); + rcu_read_unlock(); +#endif + return 1; +} + +int +gr_handle_chroot_nice(void) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) { + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + if (grsec_enable_chroot_nice && (niceval < task_nice(p)) + && proc_is_chrooted(current)) { + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p)); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK + struct task_struct *p; + int ret = 0; + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid) + return ret; + + read_lock(&tasklist_lock); + do_each_pid_task(pid, type, p) { + if (!have_same_root(current, p)) { + ret = 1; + goto out; + } + } while_each_pid_task(pid, type, p); +out: + read_unlock(&tasklist_lock); + return ret; +#endif + return 0; +} + +int +gr_pid_is_chrooted(struct task_struct *p) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL) + return 0; + + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) || + !have_same_root(current, p)) { + return 1; + } +#endif + return 0; +} + +EXPORT_SYMBOL_GPL(gr_pid_is_chrooted); + +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR) +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt) +{ + struct path path, currentroot; + int ret = 0; + + path.dentry = (struct dentry *)u_dentry; + path.mnt = (struct vfsmount *)u_mnt; + get_fs_root(current->fs, ¤troot); + if (path_is_under(&path, ¤troot)) + ret = 1; + path_put(¤troot); + + return ret; +} +#endif + +int +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR + if (!grsec_enable_chroot_fchdir) + return 1; + + if (!proc_is_chrooted(current)) + return 1; + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt); + return 0; + } +#endif + return 1; +} + +int +gr_chroot_pathat(int dfd, struct dentry *u_dentry, struct vfsmount *u_mnt, unsigned flags) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR + struct fd f; + struct path fd_path; + struct path file_path; + + if (!grsec_enable_chroot_fchdir) + return 0; + + if (!proc_is_chrooted(current) || dfd == -1 || dfd == AT_FDCWD) + return 0; + + if (flags & LOOKUP_RCU) + return -ECHILD; + + f = fdget_raw(dfd); + if (!f.file) + return 0; + + fd_path = f.file->f_path; + path_get(&fd_path); + fdput(f); + + file_path.dentry = u_dentry; + file_path.mnt = u_mnt; + + if (!gr_is_outside_chroot(u_dentry, u_mnt) && !path_is_under(&file_path, &fd_path)) { + path_put(&fd_path); + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_PATHAT_MSG, u_dentry, u_mnt); + return -ENOENT; + } + path_put(&fd_path); +#endif + return 0; +} + +int +gr_chroot_fhandle(void) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR + if (!grsec_enable_chroot_fchdir) + return 1; + + if (!proc_is_chrooted(current)) + return 1; + else { + gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG); + return 0; + } +#endif + return 1; +} + +int +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const u64 shm_createtime) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT + struct task_struct *p; + + if (unlikely(!grsec_enable_chroot_shmat)) + return 1; + + if (likely(!proc_is_chrooted(current))) + return 1; + + rcu_read_lock(); + read_lock(&tasklist_lock); + + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) { + if (time_before_eq64(p->start_time, shm_createtime)) { + if (have_same_root(current, p)) { + goto allow; + } else { + read_unlock(&tasklist_lock); + rcu_read_unlock(); + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); + return 0; + } + } + /* creator exited, pid reuse, fall through to next check */ + } + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) { + if (unlikely(!have_same_root(current, p))) { + read_unlock(&tasklist_lock); + rcu_read_unlock(); + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); + return 0; + } + } + +allow: + read_unlock(&tasklist_lock); + rcu_read_unlock(); +#endif + return 1; +} + +void +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG + if (grsec_enable_chroot_execlog && proc_is_chrooted(current)) + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt); +#endif + return; +} + +int +gr_handle_chroot_mknod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && + proc_is_chrooted(current)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_mount(const struct dentry *dentry, + const struct vfsmount *mnt, const char *dev_name) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) { + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_pivot(void) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) { + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE + if (grsec_enable_chroot_double && proc_is_chrooted(current) && + !gr_is_outside_chroot(dentry, mnt)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt); + return -EPERM; + } +#endif + return 0; +} + +extern const char *captab_log[]; +extern int captab_log_entries; + +int +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) { + kernel_cap_t chroot_caps = GR_CHROOT_CAPS; + if (cap_raised(chroot_caps, cap)) { + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) { + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]); + } + return 0; + } + } +#endif + return 1; +} + +int +gr_chroot_is_capable(const int cap) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + return gr_task_chroot_is_capable(current, current_cred(), cap); +#endif + return 1; +} + +int +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) { + kernel_cap_t chroot_caps = GR_CHROOT_CAPS; + if (cap_raised(chroot_caps, cap)) { + return 0; + } + } +#endif + return 1; +} + +int +gr_chroot_is_capable_nolog(const int cap) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + return gr_task_chroot_is_capable_nolog(current, cap); +#endif + return 1; +} + +int +gr_handle_chroot_sysctl(const int op) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) && + proc_is_chrooted(current)) + return -EACCES; +#endif + return 0; +} + +void +gr_handle_chroot_chdir(const struct path *path) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR + if (grsec_enable_chroot_chdir) + set_fs_pwd(current->fs, path); +#endif + return; +} + +int +gr_handle_chroot_chmod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD + /* allow chmod +s on directories, but not files */ + if (grsec_enable_chroot_chmod && !d_is_dir(dentry) && + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) && + proc_is_chrooted(current)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt); + return -EPERM; + } +#endif + return 0; +} diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c new file mode 100644 index 0000000000..ba8d9971ed --- /dev/null +++ b/grsecurity/grsec_disabled.c @@ -0,0 +1,445 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS +void +pax_set_initial_flags(struct linux_binprm *bprm) +{ + return; +} +#endif + +#ifdef CONFIG_SYSCTL +__u32 +gr_handle_sysctl(const struct ctl_table * table, const int op) +{ + return 0; +} +#endif + +#ifdef CONFIG_TASKSTATS +int gr_is_taskstats_denied(int pid) +{ + return 0; +} +#endif + +int +gr_acl_is_enabled(void) +{ + return 0; +} + +int +gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap, bool log) +{ + return 0; +} + +void +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) +{ + return; +} + +int +gr_handle_rawio(const struct inode *inode) +{ + return 0; +} + +void +gr_acl_handle_psacct(struct task_struct *task, const long code) +{ + return; +} + +int +gr_handle_ptrace(struct task_struct *task, const long request) +{ + return 0; +} + +int +gr_handle_proc_ptrace(struct task_struct *task) +{ + return 0; +} + +int +gr_set_acls(const int type) +{ + return 0; +} + +int +gr_check_hidden_task(const struct task_struct *tsk) +{ + return 0; +} + +int +gr_check_protected_task(const struct task_struct *task) +{ + return 0; +} + +int +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) +{ + return 0; +} + +void +gr_copy_label(struct task_struct *tsk) +{ + return; +} + +void +gr_set_pax_flags(struct task_struct *task) +{ + return; +} + +int +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, + const int unsafe_share) +{ + return 0; +} + +void +gr_handle_delete(const u64 ino, const dev_t dev) +{ + return; +} + +void +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return; +} + +void +gr_handle_crash(struct task_struct *task, const int sig) +{ + return; +} + +int +gr_check_crash_exec(const struct file *filp) +{ + return 0; +} + +int +gr_check_crash_uid(const kuid_t uid) +{ + return 0; +} + +void +gr_handle_rename(struct inode *old_dir, struct inode *new_dir, + struct dentry *old_dentry, + struct dentry *new_dentry, + struct vfsmount *mnt, const __u8 replace, unsigned int flags) +{ + return; +} + +int +gr_search_socket(const int family, const int type, const int protocol) +{ + return 1; +} + +int +gr_search_connectbind(const int mode, const struct socket *sock, + const struct sockaddr_in *addr) +{ + return 0; +} + +void +gr_handle_alertkill(struct task_struct *task) +{ + return; +} + +__u32 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_hidden_file(const struct dentry * dentry, + const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, + int acc_mode) +{ + return 1; +} + +__u32 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +int +gr_acl_handle_mmap(const struct file *file, const unsigned long prot, + unsigned int *vm_flags) +{ + return 1; +} + +__u32 +gr_acl_handle_truncate(const struct dentry * dentry, + const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_access(const struct dentry * dentry, + const struct vfsmount * mnt, const int fmode) +{ + return 1; +} + +__u32 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt, + umode_t *mode) +{ + return 1; +} + +__u32 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +void +grsecurity_init(void) +{ + return; +} + +umode_t gr_acl_umask(void) +{ + return 0; +} + +__u32 +gr_acl_handle_mknod(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const int mode) +{ + return 1; +} + +__u32 +gr_acl_handle_mkdir(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_symlink(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, const struct filename *from) +{ + return 1; +} + +__u32 +gr_acl_handle_link(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const struct dentry * old_dentry, + const struct vfsmount * old_mnt, const struct filename *to) +{ + return 1; +} + +int +gr_acl_handle_rename(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const struct dentry *old_dentry, + const struct inode *old_parent_inode, + const struct vfsmount *old_mnt, const struct filename *newname, + unsigned int flags) +{ + return 0; +} + +int +gr_acl_handle_filldir(const struct file *file, const char *name, + const int namelen, const u64 ino) +{ + return 1; +} + +int +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const u64 shm_createtime, const kuid_t cuid, const int shmid) +{ + return 1; +} + +int +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr) +{ + return 0; +} + +int +gr_search_accept(const struct socket *sock) +{ + return 0; +} + +int +gr_search_listen(const struct socket *sock) +{ + return 0; +} + +int +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr) +{ + return 0; +} + +__u32 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__u32 +gr_acl_handle_creat(const struct dentry * dentry, + const struct dentry * p_dentry, + const struct vfsmount * p_mnt, int open_flags, int acc_mode, + const int imode) +{ + return 1; +} + +void +gr_acl_handle_exit(void) +{ + return; +} + +int +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) +{ + return 1; +} + +void +gr_set_role_label(const kuid_t uid, const kgid_t gid) +{ + return; +} + +int +gr_acl_handle_procpidmem(const struct task_struct *task) +{ + return 0; +} + +int +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb) +{ + return 0; +} + +int +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr) +{ + return 0; +} + +int +gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs) +{ + return 0; +} + +int +gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs) +{ + return 0; +} + +int gr_acl_enable_at_secure(void) +{ + return 0; +} + +dev_t gr_get_dev_from_dentry(struct dentry *dentry) +{ + return d_backing_inode(dentry)->i_sb->s_dev; +} + +u64 gr_get_ino_from_dentry(struct dentry *dentry) +{ + return d_backing_inode(dentry)->i_ino; +} + +void gr_put_exec_file(struct task_struct *task) +{ + return; +} + +#ifdef CONFIG_SECURITY +EXPORT_SYMBOL_GPL(gr_check_user_change); +EXPORT_SYMBOL_GPL(gr_check_group_change); +#endif diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c new file mode 100644 index 0000000000..808006ec24 --- /dev/null +++ b/grsecurity/grsec_exec.c @@ -0,0 +1,188 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_GRKERNSEC_EXECLOG +static char gr_exec_arg_buf[132]; +static DEFINE_MUTEX(gr_exec_arg_mutex); +#endif + +struct user_arg_ptr { +#ifdef CONFIG_COMPAT + bool is_compat; +#endif + union { + const char __user *const __user *native; +#ifdef CONFIG_COMPAT + const compat_uptr_t __user *compat; +#endif + } ptr; +}; + +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr); + +void +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv) +{ +#ifdef CONFIG_GRKERNSEC_EXECLOG + char *grarg = gr_exec_arg_buf; + unsigned int i, x, execlen = 0; + char c; + + if (!((grsec_enable_execlog && grsec_enable_group && + in_group_p(grsec_audit_gid)) + || (grsec_enable_execlog && !grsec_enable_group))) + return; + + mutex_lock(&gr_exec_arg_mutex); + memset(grarg, 0, sizeof(gr_exec_arg_buf)); + + for (i = 0; i < bprm->argc && execlen < 128; i++) { + const char __user *p; + unsigned int len; + + p = get_user_arg_ptr(argv, i); + if (IS_ERR(p)) + goto log; + + len = strnlen_user(p, 128 - execlen); + if (len > 128 - execlen) + len = 128 - execlen; + else if (len > 0) + len--; + if (copy_from_user(grarg + execlen, p, len)) + goto log; + + /* rewrite unprintable characters */ + for (x = 0; x < len; x++) { + c = *(grarg + execlen + x); + if (c < 32 || c > 126) + *(grarg + execlen + x) = ' '; + } + + execlen += len; + *(grarg + execlen) = ' '; + *(grarg + execlen + 1) = '\0'; + execlen++; + } + + log: + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry, + bprm->file->f_path.mnt, grarg); + mutex_unlock(&gr_exec_arg_mutex); +#endif + return; +} + +#ifdef CONFIG_GRKERNSEC +extern int gr_acl_is_capable(const int cap); +extern int gr_acl_is_capable_nolog(const int cap); +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap, bool log); +extern int gr_chroot_is_capable(const int cap); +extern int gr_chroot_is_capable_nolog(const int cap); +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap); +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap); +#endif + +const char *captab_log[] = { + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_MAC_OVERRIDE", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" +}; + +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]); + +int gr_is_capable(const int cap) +{ +#ifdef CONFIG_GRKERNSEC + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap)) + return 1; + return 0; +#else + return 1; +#endif +} + +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap) +{ +#ifdef CONFIG_GRKERNSEC + if (gr_task_acl_is_capable(task, cred, cap, true) && gr_task_chroot_is_capable(task, cred, cap)) + return 1; + return 0; +#else + return 1; +#endif +} + +int gr_is_capable_nolog(const int cap) +{ +#ifdef CONFIG_GRKERNSEC + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap)) + return 1; + return 0; +#else + return 1; +#endif +} + +int gr_task_is_capable_nolog(const struct task_struct *task, const struct cred *cred, const int cap) +{ +#ifdef CONFIG_GRKERNSEC + if (gr_task_acl_is_capable(task, cred, cap, false) && gr_task_chroot_is_capable_nolog(task, cap)) + return 1; + return 0; +#else + return 1; +#endif +} + +EXPORT_SYMBOL_GPL(gr_is_capable); +EXPORT_SYMBOL_GPL(gr_is_capable_nolog); +EXPORT_SYMBOL_GPL(gr_task_is_capable); +EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog); diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c new file mode 100644 index 0000000000..cdec49b4f5 --- /dev/null +++ b/grsecurity/grsec_fifo.c @@ -0,0 +1,26 @@ +#include +#include +#include +#include +#include + +int +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt, + const struct dentry *dir, const int flag, const int acc_mode) +{ +#ifdef CONFIG_GRKERNSEC_FIFO + const struct cred *cred = current_cred(); + struct inode *inode = d_backing_inode(dentry); + struct inode *dir_inode = d_backing_inode(dir); + + if (grsec_enable_fifo && S_ISFIFO(inode->i_mode) && + !(flag & O_EXCL) && (dir_inode->i_mode & S_ISVTX) && + !uid_eq(inode->i_uid, dir_inode->i_uid) && + !uid_eq(cred->fsuid, inode->i_uid)) { + if (!inode_permission(inode, acc_mode)) + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(inode->i_uid), GR_GLOBAL_GID(inode->i_gid)); + return -EACCES; + } +#endif + return 0; +} diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c new file mode 100644 index 0000000000..8ca18bfafe --- /dev/null +++ b/grsecurity/grsec_fork.c @@ -0,0 +1,23 @@ +#include +#include +#include +#include +#include + +void +gr_log_forkfail(const int retval) +{ +#ifdef CONFIG_GRKERNSEC_FORKFAIL + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) { + switch (retval) { + case -EAGAIN: + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN"); + break; + case -ENOMEM: + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM"); + break; + } + } +#endif + return; +} diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c new file mode 100644 index 0000000000..68222080fd --- /dev/null +++ b/grsecurity/grsec_init.c @@ -0,0 +1,294 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +int grsec_enable_ptrace_readexec __read_only; +int grsec_enable_setxid __read_only; +int grsec_enable_symlinkown __read_only; +kgid_t grsec_symlinkown_gid __read_only; +int grsec_enable_brute __read_only; +int grsec_enable_link __read_only; +int grsec_enable_dmesg __read_only; +int grsec_enable_harden_ptrace __read_only; +int grsec_enable_harden_ipc __read_only; +int grsec_enable_fifo __read_only; +int grsec_enable_execlog __read_only; +int grsec_enable_signal __read_only; +int grsec_enable_forkfail __read_only; +int grsec_enable_audit_ptrace __read_only; +int grsec_enable_time __read_only; +int grsec_enable_group __read_only; +kgid_t grsec_audit_gid __read_only; +int grsec_enable_chdir __read_only; +int grsec_enable_mount __read_only; +int grsec_enable_rofs __read_only; +int grsec_deny_new_usb __read_only; +int grsec_enable_chroot_findtask __read_only; +int grsec_enable_chroot_mount __read_only; +int grsec_enable_chroot_shmat __read_only; +int grsec_enable_chroot_fchdir __read_only; +int grsec_enable_chroot_double __read_only; +int grsec_enable_chroot_pivot __read_only; +int grsec_enable_chroot_chdir __read_only; +int grsec_enable_chroot_chmod __read_only; +int grsec_enable_chroot_mknod __read_only; +int grsec_enable_chroot_nice __read_only; +int grsec_enable_chroot_execlog __read_only; +int grsec_enable_chroot_caps __read_only; +int grsec_enable_chroot_rename __read_only; +int grsec_enable_chroot_sysctl __read_only; +int grsec_enable_chroot_unix __read_only; +int grsec_enable_tpe __read_only; +kgid_t grsec_tpe_gid __read_only; +int grsec_enable_blackhole __read_only; +#ifdef CONFIG_IPV6_MODULE +EXPORT_SYMBOL_GPL(grsec_enable_blackhole); +#endif +int grsec_lastack_retries __read_only; +int grsec_enable_tpe_all __read_only; +int grsec_enable_tpe_invert __read_only; +int grsec_enable_socket_all __read_only; +kgid_t grsec_socket_all_gid __read_only; +int grsec_enable_socket_client __read_only; +kgid_t grsec_socket_client_gid __read_only; +int grsec_enable_socket_server __read_only; +kgid_t grsec_socket_server_gid __read_only; +int grsec_resource_logging __read_only; +int grsec_disable_privio __read_only; +int grsec_enable_log_rwxmaps __read_only; +int grsec_enable_harden_tty __read_only; +int grsec_lock __read_only; + +DEFINE_SPINLOCK(grsec_alert_lock); +unsigned long grsec_alert_wtime = 0; +unsigned long grsec_alert_fyet = 0; + +DEFINE_SPINLOCK(grsec_audit_lock); + +DEFINE_RWLOCK(grsec_exec_file_lock); + +char *gr_shared_page[4]; + +char *gr_alert_log_fmt; +char *gr_audit_log_fmt; +char *gr_alert_log_buf; +char *gr_audit_log_buf; + +extern struct gr_arg *gr_usermode; +extern unsigned char *gr_system_salt; +extern unsigned char *gr_system_sum; + +void __init +grsecurity_init(void) +{ + int j; + /* create the per-cpu shared pages */ + +#ifdef CONFIG_X86 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36); +#endif + + for (j = 0; j < 4; j++) { + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long)); + if (gr_shared_page[j] == NULL) { + panic("Unable to allocate grsecurity shared page"); + return; + } + } + + /* allocate log buffers */ + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL); + if (!gr_alert_log_fmt) { + panic("Unable to allocate grsecurity alert log format buffer"); + return; + } + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL); + if (!gr_audit_log_fmt) { + panic("Unable to allocate grsecurity audit log format buffer"); + return; + } + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL); + if (!gr_alert_log_buf) { + panic("Unable to allocate grsecurity alert log buffer"); + return; + } + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL); + if (!gr_audit_log_buf) { + panic("Unable to allocate grsecurity audit log buffer"); + return; + } + + /* allocate memory for authentication structure */ + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL); + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL); + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL); + + if (!gr_usermode || !gr_system_salt || !gr_system_sum) { + panic("Unable to allocate grsecurity authentication structure"); + return; + } + +#ifdef CONFIG_GRKERNSEC_IO +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO) + grsec_disable_privio = 1; +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON) + grsec_disable_privio = 1; +#else + grsec_disable_privio = 0; +#endif +#endif + +#ifdef CONFIG_GRKERNSEC_TPE_INVERT + /* for backward compatibility, tpe_invert always defaults to on if + enabled in the kernel + */ + grsec_enable_tpe_invert = 1; +#endif + +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON) +#ifndef CONFIG_GRKERNSEC_SYSCTL + grsec_lock = 1; +#endif + +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG + grsec_enable_log_rwxmaps = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP + grsec_enable_group = 1; + grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID); +#endif +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC + grsec_enable_ptrace_readexec = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR + grsec_enable_chdir = 1; +#endif +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE + grsec_enable_harden_ptrace = 1; +#endif +#ifdef CONFIG_GRKERNSEC_HARDEN_IPC + grsec_enable_harden_ipc = 1; +#endif +#ifdef CONFIG_GRKERNSEC_HARDEN_TTY + grsec_enable_harden_tty = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + grsec_enable_mount = 1; +#endif +#ifdef CONFIG_GRKERNSEC_LINK + grsec_enable_link = 1; +#endif +#ifdef CONFIG_GRKERNSEC_BRUTE + grsec_enable_brute = 1; +#endif +#ifdef CONFIG_GRKERNSEC_DMESG + grsec_enable_dmesg = 1; +#endif +#ifdef CONFIG_GRKERNSEC_BLACKHOLE + grsec_enable_blackhole = 1; + grsec_lastack_retries = 4; +#endif +#ifdef CONFIG_GRKERNSEC_FIFO + grsec_enable_fifo = 1; +#endif +#ifdef CONFIG_GRKERNSEC_EXECLOG + grsec_enable_execlog = 1; +#endif +#ifdef CONFIG_GRKERNSEC_SETXID + grsec_enable_setxid = 1; +#endif +#ifdef CONFIG_GRKERNSEC_SIGNAL + grsec_enable_signal = 1; +#endif +#ifdef CONFIG_GRKERNSEC_FORKFAIL + grsec_enable_forkfail = 1; +#endif +#ifdef CONFIG_GRKERNSEC_TIME + grsec_enable_time = 1; +#endif +#ifdef CONFIG_GRKERNSEC_RESLOG + grsec_resource_logging = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK + grsec_enable_chroot_findtask = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX + grsec_enable_chroot_unix = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT + grsec_enable_chroot_mount = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR + grsec_enable_chroot_fchdir = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT + grsec_enable_chroot_shmat = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE + grsec_enable_audit_ptrace = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE + grsec_enable_chroot_double = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT + grsec_enable_chroot_pivot = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR + grsec_enable_chroot_chdir = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD + grsec_enable_chroot_chmod = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD + grsec_enable_chroot_mknod = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + grsec_enable_chroot_nice = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG + grsec_enable_chroot_execlog = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + grsec_enable_chroot_caps = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME + grsec_enable_chroot_rename = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL + grsec_enable_chroot_sysctl = 1; +#endif +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN + grsec_enable_symlinkown = 1; + grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID); +#endif +#ifdef CONFIG_GRKERNSEC_TPE + grsec_enable_tpe = 1; + grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID); +#ifdef CONFIG_GRKERNSEC_TPE_ALL + grsec_enable_tpe_all = 1; +#endif +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL + grsec_enable_socket_all = 1; + grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID); +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT + grsec_enable_socket_client = 1; + grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID); +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + grsec_enable_socket_server = 1; + grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID); +#endif +#endif +#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE + grsec_deny_new_usb = 1; +#endif + + return; +} diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c new file mode 100644 index 0000000000..6a8ed69052 --- /dev/null +++ b/grsecurity/grsec_ipc.c @@ -0,0 +1,48 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +int +gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode) +{ +#ifdef CONFIG_GRKERNSEC_HARDEN_IPC + int write; + int orig_granted_mode; + kuid_t euid; + kgid_t egid; + + if (!grsec_enable_harden_ipc) + return 1; + + euid = current_euid(); + egid = current_egid(); + + write = requested_mode & 00002; + orig_granted_mode = ipcp->mode; + + if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid)) + orig_granted_mode >>= 6; + else { + /* if likely wrong permissions, lock to user */ + if (orig_granted_mode & 0007) + orig_granted_mode = 0; + /* otherwise do a egid-only check */ + else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid)) + orig_granted_mode >>= 3; + /* otherwise, no access */ + else + orig_granted_mode = 0; + } + if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) && + !ns_capable_noaudit(ns->user_ns, CAP_IPC_OWNER)) { + gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid)); + return 0; + } +#endif + return 1; +} diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c new file mode 100644 index 0000000000..84c44a03ca --- /dev/null +++ b/grsecurity/grsec_link.c @@ -0,0 +1,65 @@ +#include +#include +#include +#include +#include + +int gr_get_symlinkown_enabled(void) +{ +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN + if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid)) + return 1; +#endif + return 0; +} + +int gr_handle_symlink_owner(const struct path *link, const struct inode *target) +{ +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN + const struct inode *link_inode = d_backing_inode(link->dentry); + + if (target && !uid_eq(link_inode->i_uid, target->i_uid)) { + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, GR_GLOBAL_UID(link_inode->i_uid), GR_GLOBAL_UID(target->i_uid)); + return 1; + } +#endif + return 0; +} + +int +gr_handle_follow_link(const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_LINK + struct inode *inode = d_backing_inode(dentry); + struct inode *parent = d_backing_inode(dentry->d_parent); + const struct cred *cred = current_cred(); + + if (grsec_enable_link && d_is_symlink(dentry) && + (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) && + (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) { + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, GR_GLOBAL_UID(inode->i_uid), GR_GLOBAL_GID(inode->i_gid)); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_hardlink(const struct dentry *dentry, + const struct vfsmount *mnt, + const struct filename *to) +{ +#ifdef CONFIG_GRKERNSEC_LINK + struct inode *inode = d_backing_inode(dentry); + const struct cred *cred = current_cred(); + + if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) && + (!d_is_reg(dentry) || is_privileged_binary(dentry) || + (inode_permission(inode, MAY_READ | MAY_WRITE))) && + !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) { + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, GR_GLOBAL_UID(inode->i_uid), GR_GLOBAL_GID(inode->i_gid), to->name); + return -EPERM; + } +#endif + return 0; +} diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c new file mode 100644 index 0000000000..a24b3385dc --- /dev/null +++ b/grsecurity/grsec_log.c @@ -0,0 +1,340 @@ +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_TREE_PREEMPT_RCU +#define DISABLE_PREEMPT() preempt_disable() +#define ENABLE_PREEMPT() preempt_enable() +#else +#define DISABLE_PREEMPT() +#define ENABLE_PREEMPT() +#endif + +#define BEGIN_LOCKS(x) \ + DISABLE_PREEMPT(); \ + rcu_read_lock(); \ + read_lock(&tasklist_lock); \ + read_lock(&grsec_exec_file_lock); \ + if (x != GR_DO_AUDIT) \ + spin_lock(&grsec_alert_lock); \ + else \ + spin_lock(&grsec_audit_lock) + +#define END_LOCKS(x) \ + if (x != GR_DO_AUDIT) \ + spin_unlock(&grsec_alert_lock); \ + else \ + spin_unlock(&grsec_audit_lock); \ + read_unlock(&grsec_exec_file_lock); \ + read_unlock(&tasklist_lock); \ + rcu_read_unlock(); \ + ENABLE_PREEMPT(); \ + if (x == GR_DONT_AUDIT) \ + gr_handle_alertkill(current) + +enum { + FLOODING, + NO_FLOODING +}; + +extern char *gr_alert_log_fmt; +extern char *gr_audit_log_fmt; +extern char *gr_alert_log_buf; +extern char *gr_audit_log_buf; + +static int gr_log_start(int audit) +{ + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT; + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt; + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0) + unsigned long curr_secs = get_seconds(); + + if (audit == GR_DO_AUDIT) + goto set_fmt; + + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) { + grsec_alert_wtime = curr_secs; + grsec_alert_fyet = 0; + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME) + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { + grsec_alert_fyet++; + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) { + grsec_alert_wtime = curr_secs; + grsec_alert_fyet++; + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); + return FLOODING; + } + else return FLOODING; + +set_fmt: +#endif + memset(buf, 0, PAGE_SIZE); + if (current->signal->curr_ip && gr_acl_is_enabled()) { + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) "); + snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename); + } else if (current->signal->curr_ip) { + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: "); + snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip); + } else if (gr_acl_is_enabled()) { + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) "); + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename); + } else { + sprintf(fmt, "%s%s", loglevel, "grsec: "); + strcpy(buf, fmt); + } + + return NO_FLOODING; +} + +static void gr_log_middle(int audit, const char *msg, va_list ap) + __attribute__ ((format (printf, 2, 0))); + +static void gr_log_middle(int audit, const char *msg, va_list ap) +{ + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; + unsigned int len = strlen(buf); + + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); + + return; +} + +static void gr_log_middle_varargs(int audit, const char *msg, ...) + __attribute__ ((format (printf, 2, 3))); + +static void gr_log_middle_varargs(int audit, const char *msg, ...) +{ + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; + unsigned int len = strlen(buf); + va_list ap; + + va_start(ap, msg); + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); + va_end(ap); + + return; +} + +static void gr_log_end(int audit, int append_default) +{ + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; + if (append_default) { + struct task_struct *task = current; + struct task_struct *parent = task->real_parent; + const struct cred *cred = __task_cred(task); + const struct cred *pcred = __task_cred(parent); + unsigned int len = strlen(buf); + + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid)); + } + + printk("%s\n", buf); + + return; +} + +void gr_log_varargs(int audit, const char *msg, int argtypes, ...) +{ + int logtype; + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied"; + char *str1 = NULL, *str2 = NULL, *str3 = NULL; + void *voidptr = NULL; + int num1 = 0, num2 = 0; + unsigned long ulong1 = 0, ulong2 = 0; + struct dentry *dentry = NULL; + struct vfsmount *mnt = NULL; + struct file *file = NULL; + struct task_struct *task = NULL; + struct vm_area_struct *vma = NULL; + const struct cred *cred, *pcred; + va_list ap; + + BEGIN_LOCKS(audit); + logtype = gr_log_start(audit); + if (logtype == FLOODING) { + END_LOCKS(audit); + return; + } + va_start(ap, argtypes); + switch (argtypes) { + case GR_TTYSNIFF: + task = va_arg(ap, struct task_struct *); + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent)); + break; + case GR_SYSCTL_HIDDEN: + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, result, str1); + break; + case GR_RBAC: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt)); + break; + case GR_RBAC_STR: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1); + break; + case GR_STR_RBAC: + str1 = va_arg(ap, char *); + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt)); + break; + case GR_RBAC_MODE2: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + str2 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2); + break; + case GR_RBAC_MODE3: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + str2 = va_arg(ap, char *); + str3 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3); + break; + case GR_FILENAME: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt)); + break; + case GR_STR_FILENAME: + str1 = va_arg(ap, char *); + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt)); + break; + case GR_FILENAME_STR: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1); + break; + case GR_FILENAME_TWO_INT: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + num1 = va_arg(ap, int); + num2 = va_arg(ap, int); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2); + break; + case GR_FILENAME_TWO_INT_STR: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + num1 = va_arg(ap, int); + num2 = va_arg(ap, int); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1); + break; + case GR_TEXTREL: + str1 = va_arg(ap, char *); + file = va_arg(ap, struct file *); + ulong1 = va_arg(ap, unsigned long); + ulong2 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, str1, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "", ulong1, ulong2); + break; + case GR_PTRACE: + task = va_arg(ap, struct task_struct *); + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task)); + break; + case GR_RESOURCE: + task = va_arg(ap, struct task_struct *); + cred = __task_cred(task); + pcred = __task_cred(task->real_parent); + ulong1 = va_arg(ap, unsigned long); + str1 = va_arg(ap, char *); + ulong2 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid)); + break; + case GR_CAP: + task = va_arg(ap, struct task_struct *); + cred = __task_cred(task); + pcred = __task_cred(task->real_parent); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid)); + break; + case GR_SIG: + str1 = va_arg(ap, char *); + voidptr = va_arg(ap, void *); + gr_log_middle_varargs(audit, msg, str1, voidptr); + break; + case GR_SIG2: + task = va_arg(ap, struct task_struct *); + cred = __task_cred(task); + pcred = __task_cred(task->real_parent); + num1 = va_arg(ap, int); + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid)); + break; + case GR_CRASH1: + task = va_arg(ap, struct task_struct *); + cred = __task_cred(task); + pcred = __task_cred(task->real_parent); + ulong1 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1); + break; + case GR_CRASH2: + task = va_arg(ap, struct task_struct *); + cred = __task_cred(task); + pcred = __task_cred(task->real_parent); + ulong1 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1); + break; + case GR_RWXMAP: + file = va_arg(ap, struct file *); + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : ""); + break; + case GR_RWXMAPVMA: + vma = va_arg(ap, struct vm_area_struct *); + if (vma->vm_file) + str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt); + else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) + str1 = ""; + else if (vma->vm_start <= current->mm->brk && + vma->vm_end >= current->mm->start_brk) + str1 = ""; + else + str1 = ""; + gr_log_middle_varargs(audit, msg, str1); + break; + case GR_PSACCT: + { + unsigned int wday, cday; + __u8 whr, chr; + __u8 wmin, cmin; + __u8 wsec, csec; + + task = va_arg(ap, struct task_struct *); + wday = va_arg(ap, unsigned int); + cday = va_arg(ap, unsigned int); + whr = va_arg(ap, int); + chr = va_arg(ap, int); + wmin = va_arg(ap, int); + cmin = va_arg(ap, int); + wsec = va_arg(ap, int); + csec = va_arg(ap, int); + ulong1 = va_arg(ap, unsigned long); + cred = __task_cred(task); + pcred = __task_cred(task->real_parent); + + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid)); + } + break; + default: + gr_log_middle(audit, msg, ap); + } + va_end(ap); + // these don't need DEFAULTSECARGS printed on the end + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2) + gr_log_end(audit, 0); + else + gr_log_end(audit, 1); + END_LOCKS(audit); +} diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c new file mode 100644 index 0000000000..0e39d8c703 --- /dev/null +++ b/grsecurity/grsec_mem.c @@ -0,0 +1,48 @@ +#include +#include +#include +#include +#include +#include + +void gr_handle_msr_write(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG); + return; +} +EXPORT_SYMBOL_GPL(gr_handle_msr_write); + +void +gr_handle_ioperm(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG); + return; +} + +void +gr_handle_iopl(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG); + return; +} + +void +gr_handle_mem_readwrite(u64 from, u64 to) +{ + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to); + return; +} + +void +gr_handle_vm86(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG); + return; +} + +void +gr_log_badprocpid(const char *entry) +{ + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry); + return; +} diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c new file mode 100644 index 0000000000..fe02bf495b --- /dev/null +++ b/grsecurity/grsec_mount.c @@ -0,0 +1,65 @@ +#include +#include +#include +#include +#include +#include + +void +gr_log_remount(const char *devname, const int retval) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + if (grsec_enable_mount && (retval >= 0)) + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none"); +#endif + return; +} + +void +gr_log_unmount(const char *devname, const int retval) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + if (grsec_enable_mount && (retval >= 0)) + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none"); +#endif + return; +} + +void +gr_log_mount(const char *from, struct path *to, const int retval) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + if (grsec_enable_mount && (retval >= 0)) + gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt); +#endif + return; +} + +int +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags) +{ +#ifdef CONFIG_GRKERNSEC_ROFS + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) { + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt); + return -EPERM; + } else + return 0; +#endif + return 0; +} + +int +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode) +{ +#ifdef CONFIG_GRKERNSEC_ROFS + struct inode *inode = d_backing_inode(dentry); + + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) && + inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) { + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt); + return -EPERM; + } else + return 0; +#endif + return 0; +} diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c new file mode 100644 index 0000000000..2ad7b964a2 --- /dev/null +++ b/grsecurity/grsec_pax.c @@ -0,0 +1,47 @@ +#include +#include +#include +#include +#include +#include + +void +gr_log_textrel(struct vm_area_struct * vma, bool is_textrel_rw) +{ +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG + if (grsec_enable_log_rwxmaps) + gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, + is_textrel_rw ? "executable to writable" : "writable to executable", + vma->vm_file, vma->vm_start, vma->vm_pgoff); +#endif + return; +} + +void gr_log_ptgnustack(struct file *file) +{ +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG + if (grsec_enable_log_rwxmaps) + gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file); +#endif + return; +} + +void +gr_log_rwxmmap(struct file *file) +{ +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG + if (grsec_enable_log_rwxmaps) + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file); +#endif + return; +} + +void +gr_log_rwxmprotect(struct vm_area_struct *vma) +{ +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG + if (grsec_enable_log_rwxmaps) + gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma); +#endif + return; +} diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c new file mode 100644 index 0000000000..2005a3a322 --- /dev/null +++ b/grsecurity/grsec_proc.c @@ -0,0 +1,20 @@ +#include +#include +#include +#include + +int gr_proc_is_restricted(void) +{ +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + const struct cred *cred = current_cred(); +#endif + +#ifdef CONFIG_GRKERNSEC_PROC_USER + if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID)) + return -EACCES; +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid)) + return -EACCES; +#endif + return 0; +} diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c new file mode 100644 index 0000000000..304c5180b9 --- /dev/null +++ b/grsecurity/grsec_ptrace.c @@ -0,0 +1,30 @@ +#include +#include +#include +#include + +void +gr_audit_ptrace(struct task_struct *task) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE + if (grsec_enable_audit_ptrace) + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task); +#endif + return; +} + +int +gr_ptrace_readexec(struct file *file, int unsafe_flags) +{ +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC + const struct dentry *dentry = file->f_path.dentry; + const struct vfsmount *mnt = file->f_path.mnt; + + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) && + (inode_permission(d_backing_inode(dentry), MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt); + return -EACCES; + } +#endif + return 0; +} diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c new file mode 100644 index 0000000000..f072c9d429 --- /dev/null +++ b/grsecurity/grsec_sig.c @@ -0,0 +1,248 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +char *signames[] = { + [SIGSEGV] = "Segmentation fault", + [SIGILL] = "Illegal instruction", + [SIGABRT] = "Abort", + [SIGBUS] = "Invalid alignment/Bus error" +}; + +void +gr_log_signal(const int sig, const void *addr, const struct task_struct *t) +{ +#ifdef CONFIG_GRKERNSEC_SIGNAL + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) || + (sig == SIGABRT) || (sig == SIGBUS))) { + if (task_pid_nr(t) == task_pid_nr(current)) { + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr); + } else { + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig); + } + } +#endif + return; +} + +int +gr_handle_signal(const struct task_struct *p, const int sig) +{ +#ifdef CONFIG_GRKERNSEC + /* ignore the 0 signal for protected task checks */ + if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) { + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig); + return -EPERM; + } else if (gr_pid_is_chrooted((struct task_struct *)p)) { + return -EPERM; + } +#endif + return 0; +} + +#ifdef CONFIG_GRKERNSEC +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t); + +int gr_fake_force_sig(int sig, struct task_struct *t) +{ + unsigned long int flags; + int ret, blocked, ignored; + struct k_sigaction *action; + + spin_lock_irqsave(&t->sighand->siglock, flags); + action = &t->sighand->action[sig-1]; + ignored = action->sa.sa_handler == SIG_IGN; + blocked = sigismember(&t->blocked, sig); + if (blocked || ignored) { + action->sa.sa_handler = SIG_DFL; + if (blocked) { + sigdelset(&t->blocked, sig); + recalc_sigpending_and_wake(t); + } + } + if (action->sa.sa_handler == SIG_DFL) + t->signal->flags &= ~SIGNAL_UNKILLABLE; + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t); + + spin_unlock_irqrestore(&t->sighand->siglock, flags); + + return ret; +} +#endif + +#define GR_USER_BAN_TIME (15 * 60) +#define GR_DAEMON_BRUTE_TIME (30 * 60) + +void gr_handle_brute_attach(int dumpable) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE + struct task_struct *p = current; + kuid_t uid = GLOBAL_ROOT_UID; + int is_priv = 0; + int daemon = 0; + + if (!grsec_enable_brute) + return; + + if (is_privileged_binary(p->mm->exe_file->f_path.dentry)) + is_priv = 1; + + rcu_read_lock(); + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + if (!is_priv && p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) { + p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME; + p->real_parent->brute = 1; + daemon = 1; + } else { + const struct cred *cred = __task_cred(p), *cred2; + struct task_struct *tsk, *tsk2; + + if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) { + struct user_struct *user; + + uid = cred->uid; + + /* this is put upon execution past expiration */ + user = find_user(uid); + if (user == NULL) + goto unlock; + user->sugid_banned = 1; + user->sugid_ban_expires = get_seconds() + GR_USER_BAN_TIME; + if (user->sugid_ban_expires == ~0UL) + user->sugid_ban_expires--; + + /* only kill other threads of the same binary, from the same user */ + do_each_thread(tsk2, tsk) { + cred2 = __task_cred(tsk); + if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file)) + gr_fake_force_sig(SIGKILL, tsk); + } while_each_thread(tsk2, tsk); + } + } +unlock: + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + rcu_read_unlock(); + + if (gr_is_global_nonroot(uid)) + gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60); + else if (daemon) + gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG); + +#endif + return; +} + +void gr_handle_brute_check(void) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE + struct task_struct *p = current; + + if (unlikely(p->brute)) { + if (!grsec_enable_brute) + p->brute = 0; + else if (time_before(get_seconds(), p->brute_expires)) + msleep(30 * 1000); + } +#endif + return; +} + +void gr_handle_kernel_exploit(void) +{ +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT + static unsigned int num_banned_users __read_only; + const struct cred *cred; + struct task_struct *tsk, *tsk2; + struct user_struct *user; + kuid_t uid; + + if (in_irq() || in_serving_softirq() || in_nmi()) + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context"); + + uid = current_uid(); + + if (gr_is_global_root(uid)) + panic("grsec: halting the system due to suspicious kernel crash caused by root"); + else { + pax_open_kernel(); + num_banned_users++; + pax_close_kernel(); + if (num_banned_users > 8) + panic("grsec: halting the system due to suspicious kernel crash caused by a large number of different users"); + + /* kill all the processes of this user, hold a reference + to their creds struct, and prevent them from creating + another process until system reset + */ + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", + GR_GLOBAL_UID(uid)); + /* we intentionally leak this ref */ + user = get_uid(current->cred->user); + if (user) + user->kernel_banned = 1; + + /* kill all processes of this user */ + read_lock(&tasklist_lock); + do_each_thread(tsk2, tsk) { + cred = __task_cred(tsk); + if (uid_eq(cred->uid, uid)) + gr_fake_force_sig(SIGKILL, tsk); + } while_each_thread(tsk2, tsk); + read_unlock(&tasklist_lock); + } +#endif +} + +#ifdef CONFIG_GRKERNSEC_BRUTE +static bool sugid_ban_expired(struct user_struct *user) +{ + if (user->sugid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->sugid_ban_expires)) { + user->sugid_banned = 0; + user->sugid_ban_expires = 0; + free_uid(user); + return true; + } + + return false; +} +#endif + +int gr_process_kernel_exec_ban(void) +{ +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT + if (unlikely(current->cred->user->kernel_banned)) + return -EPERM; +#endif + return 0; +} + +int gr_process_kernel_setuid_ban(struct user_struct *user) +{ +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT + if (unlikely(user->kernel_banned)) + gr_fake_force_sig(SIGKILL, current); +#endif + return 0; +} + +int gr_process_sugid_exec_ban(const struct linux_binprm *bprm) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE + struct user_struct *user = current->cred->user; + if (unlikely(user->sugid_banned)) { + if (sugid_ban_expired(user)) + return 0; + /* disallow execution of suid/sgid binaries only */ + else if (is_privileged_binary(bprm->file->f_path.dentry)) + return -EPERM; + } +#endif + return 0; +} diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c new file mode 100644 index 0000000000..3cdd9461fc --- /dev/null +++ b/grsecurity/grsec_sock.c @@ -0,0 +1,244 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb); +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr); + +EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg); +EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg); + +#ifdef CONFIG_UNIX_MODULE +EXPORT_SYMBOL_GPL(gr_acl_handle_unix); +EXPORT_SYMBOL_GPL(gr_acl_handle_mknod); +EXPORT_SYMBOL_GPL(gr_handle_chroot_unix); +EXPORT_SYMBOL_GPL(gr_handle_create); +#endif + +#ifdef CONFIG_GRKERNSEC +#define gr_conn_table_size 32749 +struct conn_table_entry { + struct conn_table_entry *next; + struct signal_struct *sig; +}; + +struct conn_table_entry *gr_conn_table[gr_conn_table_size]; +DEFINE_SPINLOCK(gr_conn_table_lock); + +extern const char * gr_socktype_to_name(unsigned char type); +extern const char * gr_proto_to_name(unsigned char proto); +extern const char * gr_sockfamily_to_name(unsigned char family); + +static int +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size) +{ + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size); +} + +static int +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, + __u16 sport, __u16 dport) +{ + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr && + sig->gr_sport == sport && sig->gr_dport == dport)) + return 1; + else + return 0; +} + +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent) +{ + struct conn_table_entry **match; + unsigned int index; + + index = conn_hash(sig->gr_saddr, sig->gr_daddr, + sig->gr_sport, sig->gr_dport, + gr_conn_table_size); + + newent->sig = sig; + + match = &gr_conn_table[index]; + newent->next = *match; + *match = newent; + + return; +} + +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig) +{ + struct conn_table_entry *match, *last = NULL; + unsigned int index; + + index = conn_hash(sig->gr_saddr, sig->gr_daddr, + sig->gr_sport, sig->gr_dport, + gr_conn_table_size); + + match = gr_conn_table[index]; + while (match && !conn_match(match->sig, + sig->gr_saddr, sig->gr_daddr, sig->gr_sport, + sig->gr_dport)) { + last = match; + match = match->next; + } + + if (match) { + if (last) + last->next = match->next; + else + gr_conn_table[index] = NULL; + kfree(match); + } + + return; +} + +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr, + __u16 sport, __u16 dport) +{ + struct conn_table_entry *match; + unsigned int index; + + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size); + + match = gr_conn_table[index]; + while (match && !conn_match(match->sig, saddr, daddr, sport, dport)) + match = match->next; + + if (match) + return match->sig; + else + return NULL; +} + +#endif + +void gr_update_task_in_ip_table(const struct inet_sock *inet) +{ +#ifdef CONFIG_GRKERNSEC + struct signal_struct *sig = current->signal; + struct conn_table_entry *newent; + + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC); + if (newent == NULL) + return; + /* no bh lock needed since we are called with bh disabled */ + spin_lock(&gr_conn_table_lock); + gr_del_task_from_ip_table_nolock(sig); + sig->gr_saddr = inet->inet_rcv_saddr; + sig->gr_daddr = inet->inet_daddr; + sig->gr_sport = inet->inet_sport; + sig->gr_dport = inet->inet_dport; + gr_add_to_task_ip_table_nolock(sig, newent); + spin_unlock(&gr_conn_table_lock); +#endif + return; +} + +void gr_del_task_from_ip_table(struct task_struct *task) +{ +#ifdef CONFIG_GRKERNSEC + spin_lock_bh(&gr_conn_table_lock); + gr_del_task_from_ip_table_nolock(task->signal); + spin_unlock_bh(&gr_conn_table_lock); +#endif + return; +} + +void +gr_attach_curr_ip(const struct sock *sk) +{ +#ifdef CONFIG_GRKERNSEC + struct signal_struct *p, *set; + const struct inet_sock *inet = inet_sk(sk); + + if (unlikely(sk->sk_protocol != IPPROTO_TCP)) + return; + + set = current->signal; + + spin_lock_bh(&gr_conn_table_lock); + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr, + inet->inet_dport, inet->inet_sport); + if (unlikely(p != NULL)) { + set->curr_ip = p->curr_ip; + set->used_accept = 1; + gr_del_task_from_ip_table_nolock(p); + spin_unlock_bh(&gr_conn_table_lock); + return; + } + spin_unlock_bh(&gr_conn_table_lock); + + set->curr_ip = inet->inet_daddr; + set->used_accept = 1; +#endif + return; +} + +int +gr_handle_sock_all(const int family, const int type, const int protocol) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) && + (family != AF_UNIX)) { + if (family == AF_INET) + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol)); + else + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_sock_server(const struct sockaddr *sck) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + if (grsec_enable_socket_server && + in_group_p(grsec_socket_server_gid) && + sck && (sck->sa_family != AF_UNIX) && + (sck->sa_family != AF_LOCAL)) { + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_sock_server_other(const struct sock *sck) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + if (grsec_enable_socket_server && + in_group_p(grsec_socket_server_gid) && + sck && (sck->sk_family != AF_UNIX) && + (sck->sk_family != AF_LOCAL)) { + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_sock_client(const struct sockaddr *sck) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) && + sck && (sck->sa_family != AF_UNIX) && + (sck->sa_family != AF_LOCAL)) { + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG); + return -EACCES; + } +#endif + return 0; +} diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c new file mode 100644 index 0000000000..4f673f8c21 --- /dev/null +++ b/grsecurity/grsec_sysctl.c @@ -0,0 +1,497 @@ +#include +#include +#include +#include +#include + +int +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op) +{ +#ifdef CONFIG_GRKERNSEC_SYSCTL + if (dirname == NULL || name == NULL) + return 0; + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) { + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name); + return -EACCES; + } +#endif + return 0; +} + +#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB) +static int __maybe_unused __read_only one = 1; +#endif + +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \ + defined(CONFIG_GRKERNSEC_DENYUSB) +struct ctl_table grsecurity_table[] = { +#ifdef CONFIG_GRKERNSEC_SYSCTL +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO +#ifdef CONFIG_GRKERNSEC_IO + { + .procname = "disable_priv_io", + .data = &grsec_disable_privio, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#endif +#ifdef CONFIG_GRKERNSEC_LINK + { + .procname = "linking_restrictions", + .data = &grsec_enable_link, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN + { + .procname = "enforce_symlinksifowner", + .data = &grsec_enable_symlinkown, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, + { + .procname = "symlinkown_gid", + .data = &grsec_symlinkown_gid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_BRUTE + { + .procname = "deter_bruteforce", + .data = &grsec_enable_brute, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_FIFO + { + .procname = "fifo_restrictions", + .data = &grsec_enable_fifo, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC + { + .procname = "ptrace_readexec", + .data = &grsec_enable_ptrace_readexec, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_SETXID + { + .procname = "consistent_setxid", + .data = &grsec_enable_setxid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_BLACKHOLE + { + .procname = "ip_blackhole", + .data = &grsec_enable_blackhole, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, + { + .procname = "lastack_retries", + .data = &grsec_lastack_retries, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_EXECLOG + { + .procname = "exec_logging", + .data = &grsec_enable_execlog, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG + { + .procname = "rwxmap_logging", + .data = &grsec_enable_log_rwxmaps, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_SIGNAL + { + .procname = "signal_logging", + .data = &grsec_enable_signal, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_FORKFAIL + { + .procname = "forkfail_logging", + .data = &grsec_enable_forkfail, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_TIME + { + .procname = "timechange_logging", + .data = &grsec_enable_time, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT + { + .procname = "chroot_deny_shmat", + .data = &grsec_enable_chroot_shmat, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX + { + .procname = "chroot_deny_unix", + .data = &grsec_enable_chroot_unix, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT + { + .procname = "chroot_deny_mount", + .data = &grsec_enable_chroot_mount, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR + { + .procname = "chroot_deny_fchdir", + .data = &grsec_enable_chroot_fchdir, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE + { + .procname = "chroot_deny_chroot", + .data = &grsec_enable_chroot_double, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT + { + .procname = "chroot_deny_pivot", + .data = &grsec_enable_chroot_pivot, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR + { + .procname = "chroot_enforce_chdir", + .data = &grsec_enable_chroot_chdir, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD + { + .procname = "chroot_deny_chmod", + .data = &grsec_enable_chroot_chmod, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD + { + .procname = "chroot_deny_mknod", + .data = &grsec_enable_chroot_mknod, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + { + .procname = "chroot_restrict_nice", + .data = &grsec_enable_chroot_nice, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG + { + .procname = "chroot_execlog", + .data = &grsec_enable_chroot_execlog, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + { + .procname = "chroot_caps", + .data = &grsec_enable_chroot_caps, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME + { + .procname = "chroot_deny_bad_rename", + .data = &grsec_enable_chroot_rename, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL + { + .procname = "chroot_deny_sysctl", + .data = &grsec_enable_chroot_sysctl, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_TPE + { + .procname = "tpe", + .data = &grsec_enable_tpe, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, + { + .procname = "tpe_gid", + .data = &grsec_tpe_gid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_TPE_INVERT + { + .procname = "tpe_invert", + .data = &grsec_enable_tpe_invert, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_TPE_ALL + { + .procname = "tpe_restrict_all", + .data = &grsec_enable_tpe_all, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL + { + .procname = "socket_all", + .data = &grsec_enable_socket_all, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, + { + .procname = "socket_all_gid", + .data = &grsec_socket_all_gid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT + { + .procname = "socket_client", + .data = &grsec_enable_socket_client, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, + { + .procname = "socket_client_gid", + .data = &grsec_socket_client_gid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + { + .procname = "socket_server", + .data = &grsec_enable_socket_server, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, + { + .procname = "socket_server_gid", + .data = &grsec_socket_server_gid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP + { + .procname = "audit_group", + .data = &grsec_enable_group, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, + { + .procname = "audit_gid", + .data = &grsec_audit_gid, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR + { + .procname = "audit_chdir", + .data = &grsec_enable_chdir, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + { + .procname = "audit_mount", + .data = &grsec_enable_mount, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_DMESG + { + .procname = "dmesg", + .data = &grsec_enable_dmesg, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK + { + .procname = "chroot_findtask", + .data = &grsec_enable_chroot_findtask, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_RESLOG + { + .procname = "resource_logging", + .data = &grsec_resource_logging, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE + { + .procname = "audit_ptrace", + .data = &grsec_enable_audit_ptrace, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE + { + .procname = "harden_ptrace", + .data = &grsec_enable_harden_ptrace, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_HARDEN_IPC + { + .procname = "harden_ipc", + .data = &grsec_enable_harden_ipc, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_HARDEN_TTY + { + .procname = "harden_tty", + .data = &grsec_enable_harden_tty, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif + { + .procname = "grsec_lock", + .data = &grsec_lock, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif +#ifdef CONFIG_GRKERNSEC_ROFS + { + .procname = "romount_protect", + .data = &grsec_enable_rofs, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_minmax_secure, + .extra1 = &one, + .extra2 = &one, + }, +#endif +#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE) + { + .procname = "deny_new_usb", + .data = &grsec_deny_new_usb, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = &proc_dointvec_secure, + }, +#endif + { } +}; +#endif diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c new file mode 100644 index 0000000000..61b514e0d4 --- /dev/null +++ b/grsecurity/grsec_time.c @@ -0,0 +1,16 @@ +#include +#include +#include +#include + +void +gr_log_timechange(void) +{ +#ifdef CONFIG_GRKERNSEC_TIME + if (grsec_enable_time) + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG); +#endif + return; +} + +EXPORT_SYMBOL_GPL(gr_log_timechange); diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c new file mode 100644 index 0000000000..cbd277622b --- /dev/null +++ b/grsecurity/grsec_tpe.c @@ -0,0 +1,78 @@ +#include +#include +#include +#include +#include + +extern int gr_acl_tpe_check(void); + +int +gr_tpe_allow(const struct file *file) +{ +#ifdef CONFIG_GRKERNSEC + struct inode *inode = d_backing_inode(file->f_path.dentry->d_parent); + struct inode *file_inode = d_backing_inode(file->f_path.dentry); + const struct cred *cred = current_cred(); + char *msg = NULL; + char *msg2 = NULL; + + // never restrict root + if (gr_is_global_root(cred->uid)) + return 1; + + if (grsec_enable_tpe) { +#ifdef CONFIG_GRKERNSEC_TPE_INVERT + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) + msg = "not being in trusted group"; + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)) + msg = "being in untrusted group"; +#else + if (in_group_p(grsec_tpe_gid)) + msg = "being in untrusted group"; +#endif + } + if (!msg && gr_acl_tpe_check()) + msg = "being in untrusted role"; + + // not in any affected group/role + if (!msg) + goto next_check; + + if (gr_is_global_nonroot(inode->i_uid)) + msg2 = "file in non-root-owned directory"; + else if (inode->i_mode & S_IWOTH) + msg2 = "file in world-writable directory"; + else if ((inode->i_mode & S_IWGRP) && gr_is_global_nonroot_gid(inode->i_gid)) + msg2 = "file in group-writable directory"; + else if (file_inode->i_mode & S_IWOTH) + msg2 = "file is world-writable"; + + if (msg && msg2) { + char fullmsg[70] = {0}; + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2); + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt); + return 0; + } + msg = NULL; +next_check: +#ifdef CONFIG_GRKERNSEC_TPE_ALL + if (!grsec_enable_tpe || !grsec_enable_tpe_all) + return 1; + + if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid)) + msg = "directory not owned by user"; + else if (inode->i_mode & S_IWOTH) + msg = "file in world-writable directory"; + else if ((inode->i_mode & S_IWGRP) && gr_is_global_nonroot_gid(inode->i_gid)) + msg = "file in group-writable directory"; + else if (file_inode->i_mode & S_IWOTH) + msg = "file is world-writable"; + + if (msg) { + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt); + return 0; + } +#endif +#endif + return 1; +} diff --git a/grsecurity/grsec_tty.c b/grsecurity/grsec_tty.c new file mode 100644 index 0000000000..ad8b9c5423 --- /dev/null +++ b/grsecurity/grsec_tty.c @@ -0,0 +1,18 @@ +#include +#include +#include +#include +#include +#include + +int gr_handle_tiocsti(struct tty_struct *tty) +{ +#ifdef CONFIG_GRKERNSEC_HARDEN_TTY + if (grsec_enable_harden_tty && (current->signal->tty == tty) && + !capable(CAP_SYS_ADMIN)) { + gr_log_noargs(GR_DONT_AUDIT, GR_TIOCSTI_MSG); + return 1; + } +#endif + return 0; +} diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c new file mode 100644 index 0000000000..ae02d8e052 --- /dev/null +++ b/grsecurity/grsec_usb.c @@ -0,0 +1,15 @@ +#include +#include +#include + +int gr_handle_new_usb(void) +{ +#ifdef CONFIG_GRKERNSEC_DENYUSB + if (grsec_deny_new_usb) { + printk(KERN_ALERT "grsec: denied insert of new USB device\n"); + return 1; + } +#endif + return 0; +} +EXPORT_SYMBOL_GPL(gr_handle_new_usb); diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c new file mode 100644 index 0000000000..1af1e63a35 --- /dev/null +++ b/grsecurity/grsum.c @@ -0,0 +1,56 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE) +#error "crypto and sha256 must be built into the kernel" +#endif + +int +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) +{ + struct crypto_ahash *tfm; + struct ahash_request *req; + struct scatterlist sg[2]; + unsigned char temp_sum[GR_SHA_LEN]; + unsigned long *tmpsumptr = (unsigned long *)temp_sum; + unsigned long *sumptr = (unsigned long *)sum; + int retval = 1; + + tfm = crypto_alloc_ahash("sha256", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + goto out_wipe; + + sg_init_table(sg, 2); + sg_set_buf(&sg[0], salt, GR_SALT_LEN); + sg_set_buf(&sg[1], entry->pw, strlen((const char *)entry->pw)); + + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) { + crypto_free_ahash(tfm); + goto out_wipe; + } + + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_crypt(req, sg, temp_sum, GR_SALT_LEN + strlen((const char *)entry->pw)); + + if (crypto_ahash_digest(req)) + goto out_free; + + if (!crypto_memneq(sumptr, tmpsumptr, GR_SHA_LEN)) + retval = 0; + +out_free: + ahash_request_free(req); + crypto_free_ahash(tfm); +out_wipe: + memset(entry->pw, 0, GR_PW_LEN); + + return retval; +} diff --git a/include/Kbuild b/include/Kbuild new file mode 100644 index 0000000000..bab1145bc7 --- /dev/null +++ b/include/Kbuild @@ -0,0 +1,2 @@ +# Top-level Makefile calls into asm-$(ARCH) +# List only non-arch directories below diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h index 3e8d969b22..cd20d5586f 100644 --- a/include/acpi/acbuffer.h +++ b/include/acpi/acbuffer.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acbuffer.h - Support for buffers returned by ACPI predefined names * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACBUFFER_H__ #define __ACBUFFER_H__ @@ -207,14 +241,4 @@ struct acpi_pld_info { #define ACPI_PLD_GET_HORIZ_OFFSET(dword) ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK) #define ACPI_PLD_SET_HORIZ_OFFSET(dword,value) ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value) /* Offset 128+16=144, Len 16 */ -/* Panel position defined in _PLD section of ACPI Specification 6.3 */ - -#define ACPI_PLD_PANEL_TOP 0 -#define ACPI_PLD_PANEL_BOTTOM 1 -#define ACPI_PLD_PANEL_LEFT 2 -#define ACPI_PLD_PANEL_RIGHT 3 -#define ACPI_PLD_PANEL_FRONT 4 -#define ACPI_PLD_PANEL_BACK 5 -#define ACPI_PLD_PANEL_UNKNOWN 6 - #endif /* ACBUFFER_H */ diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index 0362cbb723..12c2882bf6 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acconfig.h - Global configuration constants * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef _ACCONFIG_H #define _ACCONFIG_H @@ -44,7 +78,6 @@ #define ACPI_MAX_EXTPARSE_CACHE_DEPTH 96 /* Parse tree objects */ #define ACPI_MAX_OBJECT_CACHE_DEPTH 96 /* Interpreter operand objects */ #define ACPI_MAX_NAMESPACE_CACHE_DEPTH 96 /* Namespace objects */ -#define ACPI_MAX_COMMENT_CACHE_DEPTH 96 /* Comments for the -ca option */ /* * Should the subsystem abort the loading of an ACPI table if the @@ -89,15 +122,15 @@ /* Maximum object reference count (detects object deletion issues) */ -#define ACPI_MAX_REFERENCE_COUNT 0x4000 +#define ACPI_MAX_REFERENCE_COUNT 0x1000 /* Default page size for use in mapping memory for operation regions */ #define ACPI_DEFAULT_PAGE_SIZE 4096 /* Must be power of 2 */ -/* owner_id tracking. 128 entries allows for 4095 owner_ids */ +/* owner_id tracking. 8 entries allows for 255 owner_ids */ -#define ACPI_NUM_OWNERID_MASKS 128 +#define ACPI_NUM_OWNERID_MASKS 8 /* Size of the root table array is increased by this increment */ @@ -111,9 +144,9 @@ #define ACPI_ADDRESS_RANGE_MAX 2 -/* Maximum time (default 30s) of While() loops before abort */ +/* Maximum number of While() loops before abort */ -#define ACPI_MAX_LOOP_TIMEOUT 30 +#define ACPI_MAX_LOOP_COUNT 0xFFFF /****************************************************************************** * @@ -121,7 +154,7 @@ * *****************************************************************************/ -/* Method info (in WALK_STATE), containing local variables and arguments */ +/* Method info (in WALK_STATE), containing local variables and argumetns */ #define ACPI_METHOD_NUM_LOCALS 8 #define ACPI_METHOD_MAX_LOCAL 7 @@ -141,7 +174,7 @@ /* * Maximal number of elements the Result Stack can contain, - * it may be an arbitrary value not exceeding the types of + * it may be an arbitray value not exceeding the types of * result_size and result_count (now u8). */ #define ACPI_RESULTS_OBJ_NUM_MAX 255 @@ -173,22 +206,11 @@ #define ACPI_RSDP_CHECKSUM_LENGTH 20 #define ACPI_RSDP_XCHECKSUM_LENGTH 36 -/* - * SMBus, GSBus and IPMI buffer sizes. All have a 2-byte header, - * containing both Status and Length. - */ -#define ACPI_SERIAL_HEADER_SIZE 2 /* Common for below. Status and Length fields */ +/* SMBus, GSBus and IPMI bidirectional buffer size */ -#define ACPI_SMBUS_DATA_SIZE 32 -#define ACPI_SMBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_SMBUS_DATA_SIZE - -#define ACPI_IPMI_DATA_SIZE 64 -#define ACPI_IPMI_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_IPMI_DATA_SIZE - -#define ACPI_MAX_GSBUS_DATA_SIZE 255 -#define ACPI_MAX_GSBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE - -#define ACPI_PRM_INPUT_BUFFER_SIZE 26 +#define ACPI_SMBUS_BUFFER_SIZE 34 +#define ACPI_GSBUS_BUFFER_SIZE 34 +#define ACPI_IPMI_BUFFER_SIZE 66 /* _sx_d and _sx_w control methods */ diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index ea3b1c41bc..2c396344a7 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acexcep.h - Exception codes returned by the ACPI subsystem * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACEXCEP_H__ #define __ACEXCEP_H__ @@ -40,12 +74,12 @@ struct acpi_exception_info { char *name; -#if defined (ACPI_HELP_APP) || defined (ACPI_ASL_COMPILER) +#ifdef ACPI_HELP_APP char *description; #endif }; -#if defined (ACPI_HELP_APP) || defined (ACPI_ASL_COMPILER) +#ifdef ACPI_HELP_APP #define EXCEP_TXT(name,description) {name, description} #else #define EXCEP_TXT(name,description) {name} @@ -57,14 +91,9 @@ struct acpi_exception_info { #define ACPI_SUCCESS(a) (!(a)) #define ACPI_FAILURE(a) (a) +#define ACPI_SKIP(a) (a == AE_CTRL_SKIP) #define AE_OK (acpi_status) 0x0000 -#define ACPI_ENV_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL) -#define ACPI_AML_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_AML) -#define ACPI_PROG_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER) -#define ACPI_TABLE_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES) -#define ACPI_CNTL_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_CONTROL) - /* * Environmental exceptions */ @@ -98,13 +127,8 @@ struct acpi_exception_info { #define AE_NOT_CONFIGURED EXCEP_ENV (0x001C) #define AE_ACCESS EXCEP_ENV (0x001D) #define AE_IO_ERROR EXCEP_ENV (0x001E) -#define AE_NUMERIC_OVERFLOW EXCEP_ENV (0x001F) -#define AE_HEX_OVERFLOW EXCEP_ENV (0x0020) -#define AE_DECIMAL_OVERFLOW EXCEP_ENV (0x0021) -#define AE_OCTAL_OVERFLOW EXCEP_ENV (0x0022) -#define AE_END_OF_TABLE EXCEP_ENV (0x0023) -#define AE_CODE_ENV_MAX 0x0023 +#define AE_CODE_ENV_MAX 0x001E /* * Programmer exceptions @@ -168,13 +192,11 @@ struct acpi_exception_info { #define AE_AML_CIRCULAR_REFERENCE EXCEP_AML (0x001E) #define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F) #define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020) -#define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021) +#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021) #define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022) #define AE_AML_TARGET_TYPE EXCEP_AML (0x0023) -#define AE_AML_PROTOCOL EXCEP_AML (0x0024) -#define AE_AML_BUFFER_LENGTH EXCEP_AML (0x0025) -#define AE_CODE_AML_MAX 0x0025 +#define AE_CODE_AML_MAX 0x0023 /* * Internal exceptions used for control @@ -189,10 +211,11 @@ struct acpi_exception_info { #define AE_CTRL_TRANSFER EXCEP_CTL (0x0008) #define AE_CTRL_BREAK EXCEP_CTL (0x0009) #define AE_CTRL_CONTINUE EXCEP_CTL (0x000A) -#define AE_CTRL_PARSE_CONTINUE EXCEP_CTL (0x000B) -#define AE_CTRL_PARSE_PENDING EXCEP_CTL (0x000C) +#define AE_CTRL_SKIP EXCEP_CTL (0x000B) +#define AE_CTRL_PARSE_CONTINUE EXCEP_CTL (0x000C) +#define AE_CTRL_PARSE_PENDING EXCEP_CTL (0x000D) -#define AE_CODE_CTRL_MAX 0x000C +#define AE_CODE_CTRL_MAX 0x000D /* Exception strings for acpi_format_exception */ @@ -242,16 +265,7 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = { EXCEP_TXT("AE_NOT_CONFIGURED", "The interface is not part of the current subsystem configuration"), EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation"), - EXCEP_TXT("AE_IO_ERROR", "An I/O error occurred"), - EXCEP_TXT("AE_NUMERIC_OVERFLOW", - "Overflow during string-to-integer conversion"), - EXCEP_TXT("AE_HEX_OVERFLOW", - "Overflow during ASCII hex-to-binary conversion"), - EXCEP_TXT("AE_DECIMAL_OVERFLOW", - "Overflow during ASCII decimal-to-binary conversion"), - EXCEP_TXT("AE_OCTAL_OVERFLOW", - "Overflow during ASCII octal-to-binary conversion"), - EXCEP_TXT("AE_END_OF_TABLE", "Reached the end of table") + EXCEP_TXT("AE_IO_ERROR", "An I/O error occurred") }; static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = { @@ -311,8 +325,7 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = { "An ACPI name contains invalid character(s)"), EXCEP_TXT("AE_AML_NAME_NOT_FOUND", "Could not resolve a named reference"), - EXCEP_TXT("AE_AML_INTERNAL", - "An internal error within the interpreter"), + EXCEP_TXT("AE_AML_INTERNAL", "An internal error within the interprete"), EXCEP_TXT("AE_AML_INVALID_SPACE_ID", "An Operation Region SpaceID is invalid"), EXCEP_TXT("AE_AML_STRING_LIMIT", @@ -345,15 +358,12 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = { "The length of a Resource Descriptor in the AML is incorrect"), EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS", "A memory, I/O, or PCI configuration address is invalid"), - EXCEP_TXT("AE_AML_LOOP_TIMEOUT", - "An AML While loop exceeded the maximum execution time"), + EXCEP_TXT("AE_AML_INFINITE_LOOP", + "An apparent infinite AML While loop, method was aborted"), EXCEP_TXT("AE_AML_UNINITIALIZED_NODE", "A namespace node is uninitialized or unresolved"), EXCEP_TXT("AE_AML_TARGET_TYPE", - "A target operand of an incorrect type was encountered"), - EXCEP_TXT("AE_AML_PROTOCOL", "Violation of a fixed ACPI protocol"), - EXCEP_TXT("AE_AML_BUFFER_LENGTH", - "The length of the buffer is invalid/incorrect") + "A target operand of an incorrect type was encountered") }; static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = { @@ -368,6 +378,7 @@ static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = { EXCEP_TXT("AE_CTRL_TRANSFER", "Transfer control to called method"), EXCEP_TXT("AE_CTRL_BREAK", "A Break has been executed"), EXCEP_TXT("AE_CTRL_CONTINUE", "A Continue has been executed"), + EXCEP_TXT("AE_CTRL_SKIP", "Not currently used"), EXCEP_TXT("AE_CTRL_PARSE_CONTINUE", "Used to skip over bad opcodes"), EXCEP_TXT("AE_CTRL_PARSE_PENDING", "Used to implement AML While loops") }; diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index 30869ab77f..be779db708 100644 --- a/include/acpi/acnames.h +++ b/include/acpi/acnames.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acnames.h - Global names and strings * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACNAMES_H__ #define __ACNAMES_H__ @@ -20,8 +54,6 @@ #define METHOD_NAME__CLS "_CLS" #define METHOD_NAME__CRS "_CRS" #define METHOD_NAME__DDN "_DDN" -#define METHOD_NAME__DIS "_DIS" -#define METHOD_NAME__DMA "_DMA" #define METHOD_NAME__HID "_HID" #define METHOD_NAME__INI "_INI" #define METHOD_NAME__PLD "_PLD" @@ -50,14 +82,11 @@ /* Definitions of the predefined namespace names */ #define ACPI_UNKNOWN_NAME (u32) 0x3F3F3F3F /* Unknown name is "????" */ +#define ACPI_ROOT_NAME (u32) 0x5F5F5F5C /* Root name is "\___" */ + #define ACPI_PREFIX_MIXED (u32) 0x69706341 /* "Acpi" */ #define ACPI_PREFIX_LOWER (u32) 0x69706361 /* "acpi" */ -/* Root name stuff */ - -#define ACPI_ROOT_NAME (u32) 0x5F5F5F5C /* Root name is "\___" */ -#define ACPI_ROOT_PATHNAME "\\___" -#define ACPI_NAMESPACE_ROOT "Namespace Root" #define ACPI_NS_ROOT_PATH "\\" #endif /* __ACNAMES_H__ */ diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index c5d900c0ec..48eb4dd99b 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acoutput.h -- debug output * - * Copyright (C) 2000 - 2020, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACOUTPUT_H__ #define __ACOUTPUT_H__ @@ -73,16 +107,14 @@ #define ACPI_LV_RESOURCES 0x00010000 #define ACPI_LV_USER_REQUESTS 0x00020000 #define ACPI_LV_PACKAGE 0x00040000 -#define ACPI_LV_EVALUATION 0x00080000 -#define ACPI_LV_VERBOSITY1 0x000FFF40 | ACPI_LV_ALL_EXCEPTIONS +#define ACPI_LV_VERBOSITY1 0x0007FF40 | ACPI_LV_ALL_EXCEPTIONS /* Trace verbosity level 2 [Function tracing and memory allocation] */ #define ACPI_LV_ALLOCATIONS 0x00100000 #define ACPI_LV_FUNCTIONS 0x00200000 #define ACPI_LV_OPTIMIZATIONS 0x00400000 -#define ACPI_LV_PARSE_TREES 0x00800000 -#define ACPI_LV_VERBOSITY2 0x00F00000 | ACPI_LV_VERBOSITY1 +#define ACPI_LV_VERBOSITY2 0x00700000 | ACPI_LV_VERBOSITY1 #define ACPI_LV_ALL ACPI_LV_VERBOSITY2 /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ @@ -133,7 +165,6 @@ #define ACPI_DB_TABLES ACPI_DEBUG_LEVEL (ACPI_LV_TABLES) #define ACPI_DB_FUNCTIONS ACPI_DEBUG_LEVEL (ACPI_LV_FUNCTIONS) #define ACPI_DB_OPTIMIZATIONS ACPI_DEBUG_LEVEL (ACPI_LV_OPTIMIZATIONS) -#define ACPI_DB_PARSE_TREES ACPI_DEBUG_LEVEL (ACPI_LV_PARSE_TREES) #define ACPI_DB_VALUES ACPI_DEBUG_LEVEL (ACPI_LV_VALUES) #define ACPI_DB_OBJECTS ACPI_DEBUG_LEVEL (ACPI_LV_OBJECTS) #define ACPI_DB_ALLOCATIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALLOCATIONS) @@ -142,7 +173,6 @@ #define ACPI_DB_INTERRUPTS ACPI_DEBUG_LEVEL (ACPI_LV_INTERRUPTS) #define ACPI_DB_USER_REQUESTS ACPI_DEBUG_LEVEL (ACPI_LV_USER_REQUESTS) #define ACPI_DB_PACKAGE ACPI_DEBUG_LEVEL (ACPI_LV_PACKAGE) -#define ACPI_DB_EVALUATION ACPI_DEBUG_LEVEL (ACPI_LV_EVALUATION) #define ACPI_DB_MUTEX ACPI_DEBUG_LEVEL (ACPI_LV_MUTEX) #define ACPI_DB_EVENTS ACPI_DEBUG_LEVEL (ACPI_LV_EVENTS) @@ -150,10 +180,7 @@ /* Defaults for debug_level, debug and normal */ -#ifndef ACPI_DEBUG_DEFAULT -#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR) -#endif - +#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR) #define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR) #define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) @@ -204,7 +231,6 @@ #define ACPI_EXCEPTION(plist) acpi_exception plist #define ACPI_ERROR(plist) acpi_error plist #define ACPI_BIOS_WARNING(plist) acpi_bios_warning plist -#define ACPI_BIOS_EXCEPTION(plist) acpi_bios_exception plist #define ACPI_BIOS_ERROR(plist) acpi_bios_error plist #define ACPI_DEBUG_OBJECT(obj,l,i) acpi_ex_do_debug_object(obj,l,i) @@ -217,7 +243,6 @@ #define ACPI_EXCEPTION(plist) #define ACPI_ERROR(plist) #define ACPI_BIOS_WARNING(plist) -#define ACPI_BIOS_EXCEPTION(plist) #define ACPI_BIOS_ERROR(plist) #define ACPI_DEBUG_OBJECT(obj,l,i) diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h index 6f6282a862..82803ae971 100644 --- a/include/acpi/acpi.h +++ b/include/acpi/acpi.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acpi.h - Master public include file used to interface to ACPICA * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACPI_H__ #define __ACPI_H__ @@ -24,10 +58,10 @@ #include /* ACPICA data types and structures */ #include /* ACPICA exceptions */ #include /* ACPI table definitions */ -#include /* Resource Descriptor structs */ -#include /* Extra environment-specific items */ #include /* Error output and Debug macros */ +#include /* Resource Descriptor structs */ #include /* OSL interfaces (ACPICA-to-OS) */ #include /* ACPI core subsystem external interfaces */ +#include /* Extra environment-specific items */ #endif /* __ACPI_H__ */ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 13d9337179..c1a524de67 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -1,9 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * acpi_bus.h - ACPI Bus Driver ($Revision: 22 $) * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef __ACPI_BUS_H__ @@ -44,23 +57,21 @@ acpi_status acpi_execute_simple_method(acpi_handle handle, char *method, u64 arg); acpi_status acpi_evaluate_ej0(acpi_handle handle); acpi_status acpi_evaluate_lck(acpi_handle handle, int lock); -acpi_status acpi_evaluate_reg(acpi_handle handle, u8 space_id, u32 function); bool acpi_ata_match(acpi_handle handle); bool acpi_bay_match(acpi_handle handle); bool acpi_dock_match(acpi_handle handle); -bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs); -union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, +bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs); +union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4); static inline union acpi_object * -acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, - u64 func, union acpi_object *argv4, - acpi_object_type type) +acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, u64 rev, u64 func, + union acpi_object *argv4, acpi_object_type type) { union acpi_object *obj; - obj = acpi_evaluate_dsm(handle, guid, rev, func, argv4); + obj = acpi_evaluate_dsm(handle, uuid, rev, func, argv4); if (obj && obj->type != type) { ACPI_FREE(obj); obj = NULL; @@ -77,12 +88,10 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, } bool acpi_dev_found(const char *hid); -bool acpi_dev_present(const char *hid, const char *uid, s64 hrv); -bool acpi_reduced_hardware(void); #ifdef CONFIG_ACPI -struct proc_dir_entry; +#include #define ACPI_BUS_FILE_ROOT "acpi" extern struct proc_dir_entry *acpi_root_dir; @@ -94,7 +103,6 @@ enum acpi_bus_device_type { ACPI_BUS_TYPE_THERMAL, ACPI_BUS_TYPE_POWER_BUTTON, ACPI_BUS_TYPE_SLEEP_BUTTON, - ACPI_BUS_TYPE_ECDT_EC, ACPI_BUS_DEVICE_TYPE_COUNT }; @@ -201,8 +209,7 @@ struct acpi_device_flags { u32 of_compatible_ok:1; u32 coherent_dma:1; u32 cca_seen:1; - u32 enumeration_by_parent:1; - u32 reserved:19; + u32 reserved:20; }; /* File System */ @@ -216,7 +223,7 @@ struct acpi_device_dir { /* Plug and Play */ typedef char acpi_bus_id[8]; -typedef u64 acpi_bus_address; +typedef unsigned long acpi_bus_address; typedef char acpi_device_name[40]; typedef char acpi_device_class[20]; @@ -234,7 +241,6 @@ struct acpi_pnp_type { struct acpi_device_pnp { acpi_bus_id bus_id; /* Object name */ - int instance_no; /* Instance number of this object */ struct acpi_pnp_type type; /* ID type */ acpi_bus_address bus_address; /* _ADR */ char *unique_id; /* _UID */ @@ -280,12 +286,6 @@ struct acpi_device_power { struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */ }; -struct acpi_dep_data { - struct list_head node; - acpi_handle supplier; - acpi_handle consumer; -}; - /* Performance Management */ struct acpi_device_perf_flags { @@ -312,11 +312,13 @@ struct acpi_device_perf { /* Wakeup Management */ struct acpi_device_wakeup_flags { u8 valid:1; /* Can successfully enable wakeup? */ + u8 run_wake:1; /* Run-Wake GPE devices */ u8 notifier_present:1; /* Wake-up notify handler has been installed */ + u8 enabled:1; /* Enabled for wakeup */ }; struct acpi_device_wakeup_context { - void (*func)(struct acpi_device_wakeup_context *context); + struct work_struct work; struct device *dev; }; @@ -329,7 +331,6 @@ struct acpi_device_wakeup { struct acpi_device_wakeup_context context; struct wakeup_source *ws; int prepare_count; - int enable_count; }; struct acpi_device_physical_node { @@ -339,16 +340,10 @@ struct acpi_device_physical_node { bool put_online:1; }; -struct acpi_device_properties { - const guid_t *guid; - const union acpi_object *properties; - struct list_head list; -}; - /* ACPI Device Specific Data (_DSD) */ struct acpi_device_data { const union acpi_object *pointer; - struct list_head properties; + const union acpi_object *properties; const union acpi_object *of_compatible; struct list_head subnodes; }; @@ -391,52 +386,41 @@ struct acpi_data_node { const char *name; acpi_handle handle; struct fwnode_handle fwnode; - struct fwnode_handle *parent; struct acpi_device_data data; struct list_head sibling; struct kobject kobj; struct completion kobj_done; }; -extern const struct fwnode_operations acpi_device_fwnode_ops; -extern const struct fwnode_operations acpi_data_fwnode_ops; -extern const struct fwnode_operations acpi_static_fwnode_ops; - -bool is_acpi_device_node(const struct fwnode_handle *fwnode); -bool is_acpi_data_node(const struct fwnode_handle *fwnode); - -static inline bool is_acpi_node(const struct fwnode_handle *fwnode) +static inline bool is_acpi_node(struct fwnode_handle *fwnode) { - return (is_acpi_device_node(fwnode) || is_acpi_data_node(fwnode)); + return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI + || fwnode->type == FWNODE_ACPI_DATA); } -#define to_acpi_device_node(__fwnode) \ - ({ \ - typeof(__fwnode) __to_acpi_device_node_fwnode = __fwnode; \ - \ - is_acpi_device_node(__to_acpi_device_node_fwnode) ? \ - container_of(__to_acpi_device_node_fwnode, \ - struct acpi_device, fwnode) : \ - NULL; \ - }) - -#define to_acpi_data_node(__fwnode) \ - ({ \ - typeof(__fwnode) __to_acpi_data_node_fwnode = __fwnode; \ - \ - is_acpi_data_node(__to_acpi_data_node_fwnode) ? \ - container_of(__to_acpi_data_node_fwnode, \ - struct acpi_data_node, fwnode) : \ - NULL; \ - }) - -static inline bool is_acpi_static_node(const struct fwnode_handle *fwnode) +static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) { - return !IS_ERR_OR_NULL(fwnode) && - fwnode->ops == &acpi_static_fwnode_ops; + return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI; } -static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode, +static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) +{ + return is_acpi_device_node(fwnode) ? + container_of(fwnode, struct acpi_device, fwnode) : NULL; +} + +static inline bool is_acpi_data_node(struct fwnode_handle *fwnode) +{ + return fwnode && fwnode->type == FWNODE_ACPI_DATA; +} + +static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) +{ + return is_acpi_data_node(fwnode) ? + container_of(fwnode, struct acpi_data_node, fwnode) : NULL; +} + +static inline bool acpi_data_node_match(struct fwnode_handle *fwnode, const char *name) { return is_acpi_data_node(fwnode) ? @@ -504,22 +488,21 @@ extern int unregister_acpi_notifier(struct notifier_block *); */ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); +struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle); +void acpi_bus_put_acpi_device(struct acpi_device *adev); acpi_status acpi_bus_get_status_handle(acpi_handle handle, unsigned long long *sta); int acpi_bus_get_status(struct acpi_device *device); int acpi_bus_set_power(acpi_handle handle, int state); const char *acpi_power_state_string(int state); +int acpi_device_get_power(struct acpi_device *device, int *state); int acpi_device_set_power(struct acpi_device *device, int state); int acpi_bus_init_power(struct acpi_device *device); int acpi_device_fix_up_power(struct acpi_device *device); int acpi_bus_update_power(acpi_handle handle, int *state_p); int acpi_device_update_power(struct acpi_device *device, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); -int acpi_device_power_add_dependent(struct acpi_device *adev, - struct device *dev); -void acpi_device_power_remove_dependent(struct acpi_device *adev, - struct device *dev); #ifdef CONFIG_PM bool acpi_bus_can_wakeup(acpi_handle handle); @@ -539,8 +522,6 @@ void acpi_bus_trim(struct acpi_device *start); acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd); int acpi_match_device_ids(struct acpi_device *device, const struct acpi_device_id *ids); -void acpi_set_modalias(struct acpi_device *adev, const char *default_id, - char *modalias, size_t len); int acpi_create_dir(struct acpi_device *); void acpi_remove_dir(struct acpi_device *); @@ -590,20 +571,9 @@ struct acpi_pci_root { /* helper */ -bool acpi_dma_supported(const struct acpi_device *adev); +bool acpi_dma_supported(struct acpi_device *adev); enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); -int acpi_iommu_fwspec_init(struct device *dev, u32 id, - struct fwnode_handle *fwnode, - const struct iommu_ops *ops); -int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, - u64 *size); -int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, - const u32 *input_id); -static inline int acpi_dma_configure(struct device *dev, - enum dev_dma_attr attr) -{ - return acpi_dma_configure_id(dev, attr, NULL); -} + struct acpi_device *acpi_find_child_device(struct acpi_device *parent, u64 address, bool check_children); int acpi_is_root_bridge(acpi_handle); @@ -612,30 +582,16 @@ struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); int acpi_disable_wakeup_device_power(struct acpi_device *dev); -#ifdef CONFIG_X86 -bool acpi_device_always_present(struct acpi_device *adev); -#else -static inline bool acpi_device_always_present(struct acpi_device *adev) -{ - return false; -} -#endif - #ifdef CONFIG_PM -void acpi_pm_wakeup_event(struct device *dev); acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, - void (*func)(struct acpi_device_wakeup_context *context)); + void (*work_func)(struct work_struct *work)); acpi_status acpi_remove_pm_notifier(struct acpi_device *adev); -bool acpi_pm_device_can_wakeup(struct device *dev); int acpi_pm_device_sleep_state(struct device *, int *, int); -int acpi_pm_set_device_wakeup(struct device *dev, bool enable); +int acpi_pm_device_run_wake(struct device *, bool); #else -static inline void acpi_pm_wakeup_event(struct device *dev) -{ -} static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, - void (*func)(struct acpi_device_wakeup_context *context)) + void (*work_func)(struct work_struct *work)) { return AE_SUPPORT; } @@ -643,10 +599,6 @@ static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) { return AE_SUPPORT; } -static inline bool acpi_pm_device_can_wakeup(struct device *dev) -{ - return false; -} static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) { if (p) @@ -655,16 +607,19 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ? m : ACPI_STATE_D0; } -static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable) +static inline int acpi_pm_device_run_wake(struct device *dev, bool enable) { return -ENODEV; } #endif -#ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT -bool acpi_sleep_state_supported(u8 sleep_state); +#ifdef CONFIG_PM_SLEEP +int acpi_pm_device_sleep_wake(struct device *, bool); #else -static inline bool acpi_sleep_state_supported(u8 sleep_state) { return false; } +static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) +{ + return -ENODEV; +} #endif #ifdef CONFIG_ACPI_SLEEP @@ -690,46 +645,6 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev) adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set); } -bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2); - -void acpi_dev_clear_dependencies(struct acpi_device *supplier); -struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier); -struct acpi_device * -acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv); -struct acpi_device * -acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv); - -/** - * for_each_acpi_dev_match - iterate over ACPI devices that matching the criteria - * @adev: pointer to the matching ACPI device, NULL at the end of the loop - * @hid: Hardware ID of the device. - * @uid: Unique ID of the device, pass NULL to not check _UID - * @hrv: Hardware Revision of the device, pass -1 to not check _HRV - * - * The caller is responsible for invoking acpi_dev_put() on the returned device. - */ -#define for_each_acpi_dev_match(adev, hid, uid, hrv) \ - for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv); \ - adev; \ - adev = acpi_dev_get_next_match_dev(adev, hid, uid, hrv)) - -static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev) -{ - return adev ? to_acpi_device(get_device(&adev->dev)) : NULL; -} - -static inline void acpi_dev_put(struct acpi_device *adev) -{ - if (adev) - put_device(&adev->dev); -} - -struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle); - -static inline void acpi_bus_put_acpi_device(struct acpi_device *adev) -{ - acpi_dev_put(adev); -} #else /* CONFIG_ACPI */ static inline int register_acpi_bus_type(void *bus) { return 0; } diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index 8372b0e7fd..29c691265b 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -1,9 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * acpi_drivers.h ($Revision: 31 $) * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef __ACPI_DRIVERS_H__ @@ -11,6 +24,25 @@ #define ACPI_MAX_STRING 80 +/* + * Please update drivers/acpi/debug.c and Documentation/acpi/debug.txt + * if you add to this list. + */ +#define ACPI_BUS_COMPONENT 0x00010000 +#define ACPI_AC_COMPONENT 0x00020000 +#define ACPI_BATTERY_COMPONENT 0x00040000 +#define ACPI_BUTTON_COMPONENT 0x00080000 +#define ACPI_SBS_COMPONENT 0x00100000 +#define ACPI_FAN_COMPONENT 0x00200000 +#define ACPI_PCI_COMPONENT 0x00400000 +#define ACPI_POWER_COMPONENT 0x00800000 +#define ACPI_CONTAINER_COMPONENT 0x01000000 +#define ACPI_SYSTEM_COMPONENT 0x02000000 +#define ACPI_THERMAL_COMPONENT 0x04000000 +#define ACPI_MEMORY_DEVICE_COMPONENT 0x08000000 +#define ACPI_VIDEO_COMPONENT 0x10000000 +#define ACPI_PROCESSOR_COMPONENT 0x20000000 + /* * _HID definitions * HIDs must conform to ACPI spec(6.1.4) @@ -26,7 +58,6 @@ #define ACPI_VIDEO_HID "LNXVIDEO" #define ACPI_BAY_HID "LNXIOBAY" #define ACPI_DOCK_HID "LNXDOCK" -#define ACPI_ECDT_HID "LNXEC" /* Quirk for broken IBM BIOSes */ #define ACPI_SMBUS_IBM_HID "SMBUSIBM" @@ -45,25 +76,18 @@ -------------------------------------------------------------------------- */ -/* ACPI PCI Interrupt Link */ +/* ACPI PCI Interrupt Link (pci_link.c) */ int acpi_irq_penalty_init(void); int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, int *polarity, char **name); int acpi_pci_link_free_irq(acpi_handle handle); -/* ACPI PCI Device Binding */ +/* ACPI PCI Device Binding (pci_bind.c) */ struct pci_bus; -#ifdef CONFIG_PCI struct pci_dev *acpi_get_pci_dev(acpi_handle); -#else -static inline struct pci_dev *acpi_get_pci_dev(acpi_handle handle) -{ - return NULL; -} -#endif /* Arch-defined function to add a bus to the system */ @@ -75,6 +99,14 @@ void pci_acpi_crs_quirks(void); static inline void pci_acpi_crs_quirks(void) { } #endif +/* -------------------------------------------------------------------------- + Processor + -------------------------------------------------------------------------- */ + +#define ACPI_PROCESSOR_LIMIT_NONE 0x00 +#define ACPI_PROCESSOR_LIMIT_INCREMENT 0x01 +#define ACPI_PROCESSOR_LIMIT_DECREMENT 0x02 + /*-------------------------------------------------------------------------- Dock Station -------------------------------------------------------------------------- */ diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h index 027faa8883..d7d0f495a3 100644 --- a/include/acpi/acpi_io.h +++ b/include/acpi/acpi_io.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ACPI_IO_H_ #define _ACPI_IO_H_ @@ -14,14 +13,12 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, } #endif -extern bool acpi_permanent_mmap; - -void __iomem __ref -*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size); +void __iomem *__ref +acpi_os_map_iomem(acpi_physical_address phys, acpi_size size); void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size); void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size); -void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *addr); +int acpi_os_map_generic_address(struct acpi_generic_address *addr); void acpi_os_unmap_generic_address(struct acpi_generic_address *addr); #endif diff --git a/include/acpi/acpi_lpat.h b/include/acpi/acpi_lpat.h index 72d6264ef2..da37e12d23 100644 --- a/include/acpi/acpi_lpat.h +++ b/include/acpi/acpi_lpat.h @@ -1,8 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * acpi_lpat.h - LPAT table processing functions * * Copyright (C) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef ACPI_LPAT_H diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h index 68e4d80c1b..d4b72944cc 100644 --- a/include/acpi/acpi_numa.h +++ b/include/acpi/acpi_numa.h @@ -1,10 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ACPI_NUMA_H #define __ACPI_NUMA_H #ifdef CONFIG_ACPI_NUMA #include -#include /* Proximity bitmap length */ #if MAX_NUMNODES > 256 @@ -17,30 +15,10 @@ extern int pxm_to_node(int); extern int node_to_pxm(int); extern int acpi_map_pxm_to_node(int); extern unsigned char acpi_srat_revision; -extern void disable_srat(void); +extern int acpi_numa __initdata; extern void bad_srat(void); extern int srat_disabled(void); -#else /* CONFIG_ACPI_NUMA */ -static inline void disable_srat(void) -{ -} -static inline int pxm_to_node(int pxm) -{ - return 0; -} -static inline int node_to_pxm(int node) -{ - return 0; -} #endif /* CONFIG_ACPI_NUMA */ - -#ifdef CONFIG_ACPI_HMAT -extern void disable_hmat(void); -#else /* CONFIG_ACPI_HMAT */ -static inline void disable_hmat(void) -{ -} -#endif /* CONFIG_ACPI_HMAT */ -#endif /* __ACPI_NUMA_H */ +#endif /* __ACP_NUMA_H */ diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 690c369b71..48b21490bb 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -1,14 +1,48 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These * interfaces must be implemented by OSL to interface the * ACPI components to the host operating system. * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACPIOSXF_H__ #define __ACPIOSXF_H__ @@ -97,27 +131,6 @@ acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle); void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags); #endif -/* - * RAW spinlock primitives. If the OS does not provide them, fallback to - * spinlock primitives - */ -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_raw_lock -# define acpi_os_create_raw_lock(out_handle) acpi_os_create_lock(out_handle) -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_raw_lock -# define acpi_os_delete_raw_lock(handle) acpi_os_delete_lock(handle) -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_raw_lock -# define acpi_os_acquire_raw_lock(handle) acpi_os_acquire_lock(handle) -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_raw_lock -# define acpi_os_release_raw_lock(handle, flags) \ - acpi_os_release_lock(handle, flags) -#endif - /* * Semaphore primitives */ @@ -274,8 +287,6 @@ acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width); /* * Platform and hardware-independent physical memory interfaces */ -int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width); - #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_memory acpi_status acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width); @@ -322,20 +333,16 @@ u64 acpi_os_get_timer(void); acpi_status acpi_os_signal(u32 function, void *info); #endif -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_enter_sleep -acpi_status acpi_os_enter_sleep(u8 sleep_state, u32 rega_value, u32 regb_value); -#endif - /* * Debug print routines */ #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_printf -ACPI_PRINTF_LIKE(1) +__printf(1, 2) void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *format, ...); #endif #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_vprintf -void acpi_os_vprintf(const char *format, va_list args); +__printf(1, 0) void acpi_os_vprintf(const char *format, va_list args); #endif #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_redirect_output @@ -349,12 +356,12 @@ void acpi_os_redirect_output(void *destination); acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read); #endif -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_debugger -acpi_status acpi_os_initialize_debugger(void); +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_command_signals +acpi_status acpi_os_initialize_command_signals(void); #endif -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_debugger -void acpi_os_terminate_debugger(void); +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_command_signals +void acpi_os_terminate_command_signals(void); #endif #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_command_ready diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 370293ee83..951c1f4102 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -1,18 +1,52 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acpixf.h - External interfaces to the ACPI subsystem * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACXFACE_H__ #define __ACXFACE_H__ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20210105 +#define ACPI_CA_VERSION 0x20160831 #include #include @@ -126,14 +160,13 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_create_osi_method, TRUE); ACPI_INIT_GLOBAL(u8, acpi_gbl_use_default_register_widths, TRUE); /* - * Whether or not to validate (map) an entire table to verify - * checksum/duplication in early stage before install. Set this to TRUE to - * allow early table validation before install it to the table manager. - * Note that enabling this option causes errors to happen in some OSPMs - * during early initialization stages. Default behavior is to allow such - * validation. + * Whether or not to verify the table checksum before installation. Set + * this to TRUE to verify the table checksum before install it to the table + * manager. Note that enabling this option causes errors to happen in some + * OSPMs during early initialization stages. Default behavior is to do such + * verification. */ -ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_table_validation, TRUE); +ACPI_INIT_GLOBAL(u8, acpi_gbl_verify_table_checksum, TRUE); /* * Optionally enable output from the AML Debug Object. @@ -156,6 +189,18 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE); */ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE); +/* + * Optionally support group module level code. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE); + +/* + * Optionally support module level code by parsing the entire table as + * a term_list. Default is FALSE, do not execute entire table until some + * lock order issues are fixed. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_parse_table_as_term_list, FALSE); + /* * Optionally use 32-bit FADT addresses if and when there is a conflict * (address mismatch) between the 32-bit and 64-bit versions of the @@ -213,23 +258,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0); */ ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE); -/* - * Maximum timeout for While() loop iterations before forced method abort. - * This mechanism is intended to prevent infinite loops during interpreter - * execution within a host kernel. - */ -ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_TIMEOUT); - -/* - * Optionally ignore AE_NOT_FOUND errors from named reference package elements - * during DSDT/SSDT table loading. This reduces error "noise" in platforms - * whose firmware is carrying around a bunch of unused package objects that - * refer to non-existent named objects. However, If the AML actually tries to - * use such a package, the unresolved element(s) will be replaced with NULL - * elements. - */ -ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_package_resolution_errors, FALSE); - /* * This mechanism is used to trace a specified AML method. The method is * traced each time it is executed. @@ -297,9 +325,6 @@ ACPI_GLOBAL(u8, acpi_gbl_system_awake_and_running); #define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \ ACPI_EXTERNAL_RETURN_OK(prototype) -#define ACPI_HW_DEPENDENT_RETURN_UINT32(prototype) \ - ACPI_EXTERNAL_RETURN_UINT32(prototype) - #define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \ ACPI_EXTERNAL_RETURN_VOID(prototype) @@ -310,9 +335,6 @@ ACPI_GLOBAL(u8, acpi_gbl_system_awake_and_running); #define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \ static ACPI_INLINE prototype {return(AE_OK);} -#define ACPI_HW_DEPENDENT_RETURN_UINT32(prototype) \ - static ACPI_INLINE prototype {return(0);} - #define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \ static ACPI_INLINE prototype {return;} @@ -458,11 +480,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION u8 physical)) ACPI_EXTERNAL_RETURN_STATUS(acpi_status - acpi_load_table(struct acpi_table_header *table, - u32 *table_idx)) - -ACPI_EXTERNAL_RETURN_STATUS(acpi_status - acpi_unload_table(u32 table_index)) + acpi_load_table(struct acpi_table_header *table)) ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_unload_parent_table(acpi_handle object)) @@ -488,12 +506,10 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_get_table(acpi_string signature, u32 instance, struct acpi_table_header **out_table)) -ACPI_EXTERNAL_RETURN_VOID(void acpi_put_table(struct acpi_table_header *table)) - ACPI_EXTERNAL_RETURN_STATUS(acpi_status - acpi_get_table_by_index(u32 table_index, - struct acpi_table_header - **out_table)) + acpi_get_table_by_index(u32 table_index, + struct acpi_table_header + **out_table)) ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_install_table_handler(acpi_table_handler handler, void *context)) @@ -748,12 +764,9 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status u32 gpe_number, acpi_event_status *event_status)) -ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) -ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(u32 gpe_skip_number)) -ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_gpe_device(u32 gpe_index, @@ -907,12 +920,6 @@ ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) acpi_bios_error(const char *module_name, u32 line_number, const char *format, ...)) -ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(4) - void ACPI_INTERNAL_VAR_XFACE - acpi_bios_exception(const char *module_name, - u32 line_number, - acpi_status status, - const char *format, ...)) ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) void ACPI_INTERNAL_VAR_XFACE acpi_bios_warning(const char *module_name, @@ -922,7 +929,7 @@ ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) /* * Debug output */ -ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6) +ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6) __nocapture(3) void ACPI_INTERNAL_VAR_XFACE acpi_debug_print(u32 requested_debug_level, u32 line_number, @@ -951,6 +958,15 @@ void acpi_terminate_debugger(void); /* * Divergences */ +ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap); + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_table_with_size(acpi_string signature, + u32 instance, + struct acpi_table_header + **out_table, + acpi_size *tbl_size)) + ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_get_data_full(acpi_handle object, acpi_object_handler handler, diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h index 8e2319bbd0..16c189283e 100644 --- a/include/acpi/acrestyp.h +++ b/include/acpi/acrestyp.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acrestyp.h - Defines, types, and structures for resource descriptors * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACRESTYP_H__ #define __ACRESTYP_H__ @@ -139,7 +173,7 @@ struct acpi_resource_irq { u8 descriptor_length; u8 triggering; u8 polarity; - u8 shareable; + u8 sharable; u8 wake_capable; u8 interrupt_count; u8 interrupts[1]; @@ -255,11 +289,6 @@ union acpi_resource_attribute { u8 type_specific; }; -struct acpi_resource_label { - u16 string_length; - char *string_ptr; -}; - struct acpi_resource_source { u8 index; u16 string_length; @@ -328,7 +357,7 @@ struct acpi_resource_extended_irq { u8 producer_consumer; u8 triggering; u8 polarity; - u8 shareable; + u8 sharable; u8 wake_capable; u8 interrupt_count; struct acpi_resource_source resource_source; @@ -348,7 +377,7 @@ struct acpi_resource_gpio { u8 connection_type; u8 producer_consumer; /* For values, see Producer/Consumer above */ u8 pin_config; - u8 shareable; /* For values, see Interrupt Attributes above */ + u8 sharable; /* For values, see Interrupt Attributes above */ u8 wake_capable; /* For values, see Interrupt Attributes above */ u8 io_restriction; u8 triggering; /* For values, see Interrupt Attributes above */ @@ -381,7 +410,7 @@ struct acpi_resource_gpio { #define ACPI_IO_RESTRICT_OUTPUT 2 #define ACPI_IO_RESTRICT_NONE_PRESERVE 3 -/* Common structure for I2C, SPI, UART, CSI2 serial descriptors */ +/* Common structure for I2C, SPI, and UART serial descriptors */ #define ACPI_RESOURCE_SERIAL_COMMON \ u8 revision_id; \ @@ -403,7 +432,6 @@ ACPI_RESOURCE_SERIAL_COMMON}; #define ACPI_RESOURCE_SERIAL_TYPE_I2C 1 #define ACPI_RESOURCE_SERIAL_TYPE_SPI 2 #define ACPI_RESOURCE_SERIAL_TYPE_UART 3 -#define ACPI_RESOURCE_SERIAL_TYPE_CSI2 4 /* Values for slave_mode field above */ @@ -506,86 +534,6 @@ struct acpi_resource_uart_serialbus { #define ACPI_UART_CLEAR_TO_SEND (1<<6) #define ACPI_UART_REQUEST_TO_SEND (1<<7) -struct acpi_resource_csi2_serialbus { - ACPI_RESOURCE_SERIAL_COMMON u8 local_port_instance; - u8 phy_type; -}; - -struct acpi_resource_pin_function { - u8 revision_id; - u8 pin_config; - u8 shareable; /* For values, see Interrupt Attributes above */ - u16 function_number; - u16 pin_table_length; - u16 vendor_length; - struct acpi_resource_source resource_source; - u16 *pin_table; - u8 *vendor_data; -}; - -struct acpi_resource_pin_config { - u8 revision_id; - u8 producer_consumer; /* For values, see Producer/Consumer above */ - u8 shareable; /* For values, see Interrupt Attributes above */ - u8 pin_config_type; - u32 pin_config_value; - u16 pin_table_length; - u16 vendor_length; - struct acpi_resource_source resource_source; - u16 *pin_table; - u8 *vendor_data; -}; - -/* Values for pin_config_type field above */ - -#define ACPI_PIN_CONFIG_DEFAULT 0 -#define ACPI_PIN_CONFIG_BIAS_PULL_UP 1 -#define ACPI_PIN_CONFIG_BIAS_PULL_DOWN 2 -#define ACPI_PIN_CONFIG_BIAS_DEFAULT 3 -#define ACPI_PIN_CONFIG_BIAS_DISABLE 4 -#define ACPI_PIN_CONFIG_BIAS_HIGH_IMPEDANCE 5 -#define ACPI_PIN_CONFIG_BIAS_BUS_HOLD 6 -#define ACPI_PIN_CONFIG_DRIVE_OPEN_DRAIN 7 -#define ACPI_PIN_CONFIG_DRIVE_OPEN_SOURCE 8 -#define ACPI_PIN_CONFIG_DRIVE_PUSH_PULL 9 -#define ACPI_PIN_CONFIG_DRIVE_STRENGTH 10 -#define ACPI_PIN_CONFIG_SLEW_RATE 11 -#define ACPI_PIN_CONFIG_INPUT_DEBOUNCE 12 -#define ACPI_PIN_CONFIG_INPUT_SCHMITT_TRIGGER 13 - -struct acpi_resource_pin_group { - u8 revision_id; - u8 producer_consumer; /* For values, see Producer/Consumer above */ - u16 pin_table_length; - u16 vendor_length; - u16 *pin_table; - struct acpi_resource_label resource_label; - u8 *vendor_data; -}; - -struct acpi_resource_pin_group_function { - u8 revision_id; - u8 producer_consumer; /* For values, see Producer/Consumer above */ - u8 shareable; /* For values, see Interrupt Attributes above */ - u16 function_number; - u16 vendor_length; - struct acpi_resource_source resource_source; - struct acpi_resource_label resource_source_label; - u8 *vendor_data; -}; - -struct acpi_resource_pin_group_config { - u8 revision_id; - u8 producer_consumer; /* For values, see Producer/Consumer above */ - u8 shareable; /* For values, see Interrupt Attributes above */ - u8 pin_config_type; /* For values, see pin_config_type above */ - u32 pin_config_value; - u16 vendor_length; - struct acpi_resource_source resource_source; - struct acpi_resource_label resource_source_label; - u8 *vendor_data; -}; - /* ACPI_RESOURCE_TYPEs */ #define ACPI_RESOURCE_TYPE_IRQ 0 @@ -608,12 +556,7 @@ struct acpi_resource_pin_group_config { #define ACPI_RESOURCE_TYPE_GPIO 17 /* ACPI 5.0 */ #define ACPI_RESOURCE_TYPE_FIXED_DMA 18 /* ACPI 5.0 */ #define ACPI_RESOURCE_TYPE_SERIAL_BUS 19 /* ACPI 5.0 */ -#define ACPI_RESOURCE_TYPE_PIN_FUNCTION 20 /* ACPI 6.2 */ -#define ACPI_RESOURCE_TYPE_PIN_CONFIG 21 /* ACPI 6.2 */ -#define ACPI_RESOURCE_TYPE_PIN_GROUP 22 /* ACPI 6.2 */ -#define ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION 23 /* ACPI 6.2 */ -#define ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG 24 /* ACPI 6.2 */ -#define ACPI_RESOURCE_TYPE_MAX 24 +#define ACPI_RESOURCE_TYPE_MAX 19 /* Master union for resource descriptors */ @@ -640,13 +583,7 @@ union acpi_resource_data { struct acpi_resource_i2c_serialbus i2c_serial_bus; struct acpi_resource_spi_serialbus spi_serial_bus; struct acpi_resource_uart_serialbus uart_serial_bus; - struct acpi_resource_csi2_serialbus csi2_serial_bus; struct acpi_resource_common_serialbus common_serial_bus; - struct acpi_resource_pin_function pin_function; - struct acpi_resource_pin_config pin_config; - struct acpi_resource_pin_group pin_group; - struct acpi_resource_pin_group_function pin_group_function; - struct acpi_resource_pin_group_config pin_group_config; /* Common fields */ diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index f9cda909f9..c19700e2a2 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: actbl.h - Basic ACPI Table Definitions * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACTBL_H__ #define __ACTBL_H__ @@ -38,7 +72,6 @@ #define ACPI_SIG_XSDT "XSDT" /* Extended System Description Table */ #define ACPI_SIG_SSDT "SSDT" /* Secondary System Description Table */ #define ACPI_RSDP_NAME "RSDP" /* Short name for RSDP, not signature */ -#define ACPI_OEM_NAME "OEM" /* Short name for OEM, not signature */ /* * All tables and structures must be byte-packed to match the ACPI @@ -66,14 +99,14 @@ ******************************************************************************/ struct acpi_table_header { - char signature[ACPI_NAMESEG_SIZE]; /* ASCII table signature */ + char signature[ACPI_NAME_SIZE]; /* ASCII table signature */ u32 length; /* Length of table in bytes, including this header */ u8 revision; /* ACPI Specification minor version number */ u8 checksum; /* To make sum of entire table == 0 */ char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */ u32 oem_revision; /* OEM revision number */ - char asl_compiler_id[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */ + char asl_compiler_id[ACPI_NAME_SIZE]; /* ASCII ASL compiler vendor ID */ u32 asl_compiler_revision; /* ASL compiler version */ }; @@ -338,30 +371,14 @@ struct acpi_table_desc { union acpi_name_union signature; acpi_owner_id owner_id; u8 flags; - u16 validation_count; }; -/* - * Maximum value of the validation_count field in struct acpi_table_desc. - * When reached, validation_count cannot be changed any more and the table will - * be permanently regarded as validated. - * - * This is to prevent situations in which unbalanced table get/put operations - * may cause premature table unmapping in the OS to happen. - * - * The maximum validation count can be defined to any value, but should be - * greater than the maximum number of OS early stage mapping slots to avoid - * leaking early stage table mappings to the late stage. - */ -#define ACPI_MAX_TABLE_VALIDATIONS ACPI_UINT16_MAX - /* Masks for Flags field above */ #define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */ #define ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL (1) /* Physical address, internally mapped */ #define ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL (2) /* Virtual address, internallly allocated */ #define ACPI_TABLE_ORIGIN_MASK (3) -#define ACPI_TABLE_IS_VERIFIED (4) #define ACPI_TABLE_IS_LOADED (8) /* diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 159070edd0..796d6baae3 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -1,22 +1,58 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: actbl1.h - Additional ACPI table definitions * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACTBL1_H__ #define __ACTBL1_H__ /******************************************************************************* * - * Additional ACPI Tables + * Additional ACPI Tables (1) * * These tables are not consumed directly by the ACPICA subsystem, but are * included here to support device drivers and the AML disassembler. * + * The tables in this file are fully defined within the ACPI specification. + * ******************************************************************************/ /* @@ -24,44 +60,18 @@ * file. Useful because they make it more difficult to inadvertently type in * the wrong signature. */ -#define ACPI_SIG_AEST "AEST" /* Arm Error Source Table */ -#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */ #define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */ -#define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */ -#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ -#define ACPI_SIG_CEDT "CEDT" /* CXL Early Discovery Table */ #define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */ -#define ACPI_SIG_CSRT "CSRT" /* Core System Resource Table */ -#define ACPI_SIG_DBG2 "DBG2" /* Debug Port table type 2 */ -#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */ -#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */ -#define ACPI_SIG_DRTM "DRTM" /* Dynamic Root of Trust for Measurement table */ #define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */ #define ACPI_SIG_EINJ "EINJ" /* Error Injection table */ #define ACPI_SIG_ERST "ERST" /* Error Record Serialization Table */ -#define ACPI_SIG_FPDT "FPDT" /* Firmware Performance Data Table */ -#define ACPI_SIG_GTDT "GTDT" /* Generic Timer Description Table */ #define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */ -#define ACPI_SIG_HMAT "HMAT" /* Heterogeneous Memory Attributes Table */ -#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ -#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */ - -#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */ -#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */ - -/* Reserved table signatures */ - -#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */ -#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ - -/* - * These tables have been seen in the field, but no definition has been found - */ -#ifdef ACPI_UNDEFINED_TABLES -#define ACPI_SIG_ATKG "ATKG" -#define ACPI_SIG_GSCI "GSCI" /* GMCH SCI table */ -#define ACPI_SIG_IEIT "IEIT" -#endif +#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ +#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */ +#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ +#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */ +#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */ +#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */ /* * All tables must be byte-packed to match the ACPI specification, since @@ -106,120 +116,6 @@ struct acpi_whea_header { u64 mask; /* Bitmask required for this register instruction */ }; -/******************************************************************************* - * - * ASF - Alert Standard Format table (Signature "ASF!") - * Revision 0x10 - * - * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003 - * - ******************************************************************************/ - -struct acpi_table_asf { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/* ASF subtable header */ - -struct acpi_asf_header { - u8 type; - u8 reserved; - u16 length; -}; - -/* Values for Type field above */ - -enum acpi_asf_type { - ACPI_ASF_TYPE_INFO = 0, - ACPI_ASF_TYPE_ALERT = 1, - ACPI_ASF_TYPE_CONTROL = 2, - ACPI_ASF_TYPE_BOOT = 3, - ACPI_ASF_TYPE_ADDRESS = 4, - ACPI_ASF_TYPE_RESERVED = 5 -}; - -/* - * ASF subtables - */ - -/* 0: ASF Information */ - -struct acpi_asf_info { - struct acpi_asf_header header; - u8 min_reset_value; - u8 min_poll_interval; - u16 system_id; - u32 mfg_id; - u8 flags; - u8 reserved2[3]; -}; - -/* Masks for Flags field above */ - -#define ACPI_ASF_SMBUS_PROTOCOLS (1) - -/* 1: ASF Alerts */ - -struct acpi_asf_alert { - struct acpi_asf_header header; - u8 assert_mask; - u8 deassert_mask; - u8 alerts; - u8 data_length; -}; - -struct acpi_asf_alert_data { - u8 address; - u8 command; - u8 mask; - u8 value; - u8 sensor_type; - u8 type; - u8 offset; - u8 source_type; - u8 severity; - u8 sensor_number; - u8 entity; - u8 instance; -}; - -/* 2: ASF Remote Control */ - -struct acpi_asf_remote { - struct acpi_asf_header header; - u8 controls; - u8 data_length; - u16 reserved2; -}; - -struct acpi_asf_control_data { - u8 function; - u8 address; - u8 command; - u8 value; -}; - -/* 3: ASF RMCP Boot Options */ - -struct acpi_asf_rmcp { - struct acpi_asf_header header; - u8 capabilities[7]; - u8 completion_code; - u32 enterprise_id; - u8 command; - u16 parameter; - u16 boot_options; - u16 oem_parameters; -}; - -/* 4: ASF Address */ - -struct acpi_asf_address { - struct acpi_asf_header header; - u8 eprom_address; - u8 devices; -}; - /******************************************************************************* * * BERT - Boot Error Record Table (ACPI 4.0) @@ -266,125 +162,6 @@ enum acpi_bert_error_severity { * uses the struct acpi_hest_generic_data defined under the HEST table below */ -/******************************************************************************* - * - * BGRT - Boot Graphics Resource Table (ACPI 5.0) - * Version 1 - * - ******************************************************************************/ - -struct acpi_table_bgrt { - struct acpi_table_header header; /* Common ACPI table header */ - u16 version; - u8 status; - u8 image_type; - u64 image_address; - u32 image_offset_x; - u32 image_offset_y; -}; - -/* Flags for Status field above */ - -#define ACPI_BGRT_DISPLAYED (1) -#define ACPI_BGRT_ORIENTATION_OFFSET (3 << 1) - -/******************************************************************************* - * - * BOOT - Simple Boot Flag Table - * Version 1 - * - * Conforms to the "Simple Boot Flag Specification", Version 2.1 - * - ******************************************************************************/ - -struct acpi_table_boot { - struct acpi_table_header header; /* Common ACPI table header */ - u8 cmos_index; /* Index in CMOS RAM for the boot register */ - u8 reserved[3]; -}; - -/******************************************************************************* - * - * CEDT - CXL Early Discovery Table - * Version 1 - * - * Conforms to the "CXL Early Discovery Table" (CXL 2.0) - * - ******************************************************************************/ - -struct acpi_table_cedt { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/* CEDT subtable header (Performance Record Structure) */ - -struct acpi_cedt_header { - u8 type; - u8 reserved; - u16 length; -}; - -/* Values for Type field above */ - -enum acpi_cedt_type { - ACPI_CEDT_TYPE_CHBS = 0, - ACPI_CEDT_TYPE_CFMWS = 1, - ACPI_CEDT_TYPE_RESERVED = 2, -}; - -/* Values for version field above */ - -#define ACPI_CEDT_CHBS_VERSION_CXL11 (0) -#define ACPI_CEDT_CHBS_VERSION_CXL20 (1) - -/* Values for length field above */ - -#define ACPI_CEDT_CHBS_LENGTH_CXL11 (0x2000) -#define ACPI_CEDT_CHBS_LENGTH_CXL20 (0x10000) - -/* - * CEDT subtables - */ - -/* 0: CXL Host Bridge Structure */ - -struct acpi_cedt_chbs { - struct acpi_cedt_header header; - u32 uid; - u32 cxl_version; - u32 reserved; - u64 base; - u64 length; -}; - -/* 1: CXL Fixed Memory Window Structure */ - -struct acpi_cedt_cfmws { - struct acpi_cedt_header header; - u32 reserved1; - u64 base_hpa; - u64 window_size; - u8 interleave_ways; - u8 interleave_arithmetic; - u16 reserved2; - u32 granularity; - u16 restrictions; - u16 qtg_id; - u32 interleave_targets[]; -}; - -/* Values for Interleave Arithmetic field above */ - -#define ACPI_CEDT_CFMWS_ARITHMETIC_MODULO (0) - -/* Values for Restrictions field above */ - -#define ACPI_CEDT_CFMWS_RESTRICT_TYPE2 (1) -#define ACPI_CEDT_CFMWS_RESTRICT_TYPE3 (1<<1) -#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE (1<<2) -#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3) -#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4) - /******************************************************************************* * * CPEP - Corrected Platform Error Polling table (ACPI 4.0) @@ -406,370 +183,6 @@ struct acpi_cpep_polling { u32 interval; /* Polling interval (msec) */ }; -/******************************************************************************* - * - * CSRT - Core System Resource Table - * Version 0 - * - * Conforms to the "Core System Resource Table (CSRT)", November 14, 2011 - * - ******************************************************************************/ - -struct acpi_table_csrt { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/* Resource Group subtable */ - -struct acpi_csrt_group { - u32 length; - u32 vendor_id; - u32 subvendor_id; - u16 device_id; - u16 subdevice_id; - u16 revision; - u16 reserved; - u32 shared_info_length; - - /* Shared data immediately follows (Length = shared_info_length) */ -}; - -/* Shared Info subtable */ - -struct acpi_csrt_shared_info { - u16 major_version; - u16 minor_version; - u32 mmio_base_low; - u32 mmio_base_high; - u32 gsi_interrupt; - u8 interrupt_polarity; - u8 interrupt_mode; - u8 num_channels; - u8 dma_address_width; - u16 base_request_line; - u16 num_handshake_signals; - u32 max_block_size; - - /* Resource descriptors immediately follow (Length = Group length - shared_info_length) */ -}; - -/* Resource Descriptor subtable */ - -struct acpi_csrt_descriptor { - u32 length; - u16 type; - u16 subtype; - u32 uid; - - /* Resource-specific information immediately follows */ -}; - -/* Resource Types */ - -#define ACPI_CSRT_TYPE_INTERRUPT 0x0001 -#define ACPI_CSRT_TYPE_TIMER 0x0002 -#define ACPI_CSRT_TYPE_DMA 0x0003 - -/* Resource Subtypes */ - -#define ACPI_CSRT_XRUPT_LINE 0x0000 -#define ACPI_CSRT_XRUPT_CONTROLLER 0x0001 -#define ACPI_CSRT_TIMER 0x0000 -#define ACPI_CSRT_DMA_CHANNEL 0x0000 -#define ACPI_CSRT_DMA_CONTROLLER 0x0001 - -/******************************************************************************* - * - * DBG2 - Debug Port Table 2 - * Version 0 (Both main table and subtables) - * - * Conforms to "Microsoft Debug Port Table 2 (DBG2)", September 21, 2020 - * - ******************************************************************************/ - -struct acpi_table_dbg2 { - struct acpi_table_header header; /* Common ACPI table header */ - u32 info_offset; - u32 info_count; -}; - -struct acpi_dbg2_header { - u32 info_offset; - u32 info_count; -}; - -/* Debug Device Information Subtable */ - -struct acpi_dbg2_device { - u8 revision; - u16 length; - u8 register_count; /* Number of base_address registers */ - u16 namepath_length; - u16 namepath_offset; - u16 oem_data_length; - u16 oem_data_offset; - u16 port_type; - u16 port_subtype; - u16 reserved; - u16 base_address_offset; - u16 address_size_offset; - /* - * Data that follows: - * base_address (required) - Each in 12-byte Generic Address Structure format. - * address_size (required) - Array of u32 sizes corresponding to each base_address register. - * Namepath (required) - Null terminated string. Single dot if not supported. - * oem_data (optional) - Length is oem_data_length. - */ -}; - -/* Types for port_type field above */ - -#define ACPI_DBG2_SERIAL_PORT 0x8000 -#define ACPI_DBG2_1394_PORT 0x8001 -#define ACPI_DBG2_USB_PORT 0x8002 -#define ACPI_DBG2_NET_PORT 0x8003 - -/* Subtypes for port_subtype field above */ - -#define ACPI_DBG2_16550_COMPATIBLE 0x0000 -#define ACPI_DBG2_16550_SUBSET 0x0001 -#define ACPI_DBG2_MAX311XE_SPI 0x0002 -#define ACPI_DBG2_ARM_PL011 0x0003 -#define ACPI_DBG2_MSM8X60 0x0004 -#define ACPI_DBG2_16550_NVIDIA 0x0005 -#define ACPI_DBG2_TI_OMAP 0x0006 -#define ACPI_DBG2_APM88XXXX 0x0008 -#define ACPI_DBG2_MSM8974 0x0009 -#define ACPI_DBG2_SAM5250 0x000A -#define ACPI_DBG2_INTEL_USIF 0x000B -#define ACPI_DBG2_IMX6 0x000C -#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D -#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E -#define ACPI_DBG2_ARM_DCC 0x000F -#define ACPI_DBG2_BCM2835 0x0010 -#define ACPI_DBG2_SDM845_1_8432MHZ 0x0011 -#define ACPI_DBG2_16550_WITH_GAS 0x0012 -#define ACPI_DBG2_SDM845_7_372MHZ 0x0013 -#define ACPI_DBG2_INTEL_LPSS 0x0014 - -#define ACPI_DBG2_1394_STANDARD 0x0000 - -#define ACPI_DBG2_USB_XHCI 0x0000 -#define ACPI_DBG2_USB_EHCI 0x0001 - -/******************************************************************************* - * - * DBGP - Debug Port table - * Version 1 - * - * Conforms to the "Debug Port Specification", Version 1.00, 2/9/2000 - * - ******************************************************************************/ - -struct acpi_table_dbgp { - struct acpi_table_header header; /* Common ACPI table header */ - u8 type; /* 0=full 16550, 1=subset of 16550 */ - u8 reserved[3]; - struct acpi_generic_address debug_port; -}; - -/******************************************************************************* - * - * DMAR - DMA Remapping table - * Version 1 - * - * Conforms to "Intel Virtualization Technology for Directed I/O", - * Version 2.3, October 2014 - * - ******************************************************************************/ - -struct acpi_table_dmar { - struct acpi_table_header header; /* Common ACPI table header */ - u8 width; /* Host Address Width */ - u8 flags; - u8 reserved[10]; -}; - -/* Masks for Flags field above */ - -#define ACPI_DMAR_INTR_REMAP (1) -#define ACPI_DMAR_X2APIC_OPT_OUT (1<<1) -#define ACPI_DMAR_X2APIC_MODE (1<<2) - -/* DMAR subtable header */ - -struct acpi_dmar_header { - u16 type; - u16 length; -}; - -/* Values for subtable type in struct acpi_dmar_header */ - -enum acpi_dmar_type { - ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, - ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, - ACPI_DMAR_TYPE_ROOT_ATS = 2, - ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3, - ACPI_DMAR_TYPE_NAMESPACE = 4, - ACPI_DMAR_TYPE_SATC = 5, - ACPI_DMAR_TYPE_RESERVED = 6 /* 6 and greater are reserved */ -}; - -/* DMAR Device Scope structure */ - -struct acpi_dmar_device_scope { - u8 entry_type; - u8 length; - u16 reserved; - u8 enumeration_id; - u8 bus; -}; - -/* Values for entry_type in struct acpi_dmar_device_scope - device types */ - -enum acpi_dmar_scope_type { - ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, - ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, - ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, - ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, - ACPI_DMAR_SCOPE_TYPE_HPET = 4, - ACPI_DMAR_SCOPE_TYPE_NAMESPACE = 5, - ACPI_DMAR_SCOPE_TYPE_RESERVED = 6 /* 6 and greater are reserved */ -}; - -struct acpi_dmar_pci_path { - u8 device; - u8 function; -}; - -/* - * DMAR Subtables, correspond to Type in struct acpi_dmar_header - */ - -/* 0: Hardware Unit Definition */ - -struct acpi_dmar_hardware_unit { - struct acpi_dmar_header header; - u8 flags; - u8 reserved; - u16 segment; - u64 address; /* Register Base Address */ -}; - -/* Masks for Flags field above */ - -#define ACPI_DMAR_INCLUDE_ALL (1) - -/* 1: Reserved Memory Definition */ - -struct acpi_dmar_reserved_memory { - struct acpi_dmar_header header; - u16 reserved; - u16 segment; - u64 base_address; /* 4K aligned base address */ - u64 end_address; /* 4K aligned limit address */ -}; - -/* Masks for Flags field above */ - -#define ACPI_DMAR_ALLOW_ALL (1) - -/* 2: Root Port ATS Capability Reporting Structure */ - -struct acpi_dmar_atsr { - struct acpi_dmar_header header; - u8 flags; - u8 reserved; - u16 segment; -}; - -/* Masks for Flags field above */ - -#define ACPI_DMAR_ALL_PORTS (1) - -/* 3: Remapping Hardware Static Affinity Structure */ - -struct acpi_dmar_rhsa { - struct acpi_dmar_header header; - u32 reserved; - u64 base_address; - u32 proximity_domain; -}; - -/* 4: ACPI Namespace Device Declaration Structure */ - -struct acpi_dmar_andd { - struct acpi_dmar_header header; - u8 reserved[3]; - u8 device_number; - char device_name[1]; -}; - -/* 5: SOC Integrated Address Translation Cache Reporting Structure */ - -struct acpi_dmar_satc { - struct acpi_dmar_header header; - u8 flags; - u8 reserved; - u16 segment; -}; -/******************************************************************************* - * - * DRTM - Dynamic Root of Trust for Measurement table - * Conforms to "TCG D-RTM Architecture" June 17 2013, Version 1.0.0 - * Table version 1 - * - ******************************************************************************/ - -struct acpi_table_drtm { - struct acpi_table_header header; /* Common ACPI table header */ - u64 entry_base_address; - u64 entry_length; - u32 entry_address32; - u64 entry_address64; - u64 exit_address; - u64 log_area_address; - u32 log_area_length; - u64 arch_dependent_address; - u32 flags; -}; - -/* Flag Definitions for above */ - -#define ACPI_DRTM_ACCESS_ALLOWED (1) -#define ACPI_DRTM_ENABLE_GAP_CODE (1<<1) -#define ACPI_DRTM_INCOMPLETE_MEASUREMENTS (1<<2) -#define ACPI_DRTM_AUTHORITY_ORDER (1<<3) - -/* 1) Validated Tables List (64-bit addresses) */ - -struct acpi_drtm_vtable_list { - u32 validated_table_count; - u64 validated_tables[1]; -}; - -/* 2) Resources List (of Resource Descriptors) */ - -/* Resource Descriptor */ - -struct acpi_drtm_resource { - u8 size[7]; - u8 type; - u64 address; -}; - -struct acpi_drtm_resource_list { - u32 resource_count; - struct acpi_drtm_resource resources[1]; -}; - -/* 3) Platform-specific Identifiers List */ - -struct acpi_drtm_dps_id { - u32 dps_id_length; - u8 dps_id[16]; -}; - /******************************************************************************* * * ECDT - Embedded Controller Boot Resources Table @@ -968,7 +381,7 @@ enum acpi_erst_instructions { /* Command status return values */ enum acpi_erst_command_status { - ACPI_ERST_SUCCESS = 0, + ACPI_ERST_SUCESS = 0, ACPI_ERST_NO_SPACE = 1, ACPI_ERST_NOT_AVAILABLE = 2, ACPI_ERST_FAILURE = 3, @@ -984,202 +397,6 @@ struct acpi_erst_info { u8 data[48]; }; -/******************************************************************************* - * - * FPDT - Firmware Performance Data Table (ACPI 5.0) - * Version 1 - * - ******************************************************************************/ - -struct acpi_table_fpdt { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/* FPDT subtable header (Performance Record Structure) */ - -struct acpi_fpdt_header { - u16 type; - u8 length; - u8 revision; -}; - -/* Values for Type field above */ - -enum acpi_fpdt_type { - ACPI_FPDT_TYPE_BOOT = 0, - ACPI_FPDT_TYPE_S3PERF = 1 -}; - -/* - * FPDT subtables - */ - -/* 0: Firmware Basic Boot Performance Record */ - -struct acpi_fpdt_boot_pointer { - struct acpi_fpdt_header header; - u8 reserved[4]; - u64 address; -}; - -/* 1: S3 Performance Table Pointer Record */ - -struct acpi_fpdt_s3pt_pointer { - struct acpi_fpdt_header header; - u8 reserved[4]; - u64 address; -}; - -/* - * S3PT - S3 Performance Table. This table is pointed to by the - * S3 Pointer Record above. - */ -struct acpi_table_s3pt { - u8 signature[4]; /* "S3PT" */ - u32 length; -}; - -/* - * S3PT Subtables (Not part of the actual FPDT) - */ - -/* Values for Type field in S3PT header */ - -enum acpi_s3pt_type { - ACPI_S3PT_TYPE_RESUME = 0, - ACPI_S3PT_TYPE_SUSPEND = 1, - ACPI_FPDT_BOOT_PERFORMANCE = 2 -}; - -struct acpi_s3pt_resume { - struct acpi_fpdt_header header; - u32 resume_count; - u64 full_resume; - u64 average_resume; -}; - -struct acpi_s3pt_suspend { - struct acpi_fpdt_header header; - u64 suspend_start; - u64 suspend_end; -}; - -/* - * FPDT Boot Performance Record (Not part of the actual FPDT) - */ -struct acpi_fpdt_boot { - struct acpi_fpdt_header header; - u8 reserved[4]; - u64 reset_end; - u64 load_start; - u64 startup_start; - u64 exit_services_entry; - u64 exit_services_exit; -}; - -/******************************************************************************* - * - * GTDT - Generic Timer Description Table (ACPI 5.1) - * Version 2 - * - ******************************************************************************/ - -struct acpi_table_gtdt { - struct acpi_table_header header; /* Common ACPI table header */ - u64 counter_block_addresss; - u32 reserved; - u32 secure_el1_interrupt; - u32 secure_el1_flags; - u32 non_secure_el1_interrupt; - u32 non_secure_el1_flags; - u32 virtual_timer_interrupt; - u32 virtual_timer_flags; - u32 non_secure_el2_interrupt; - u32 non_secure_el2_flags; - u64 counter_read_block_address; - u32 platform_timer_count; - u32 platform_timer_offset; -}; - -/* Flag Definitions: Timer Block Physical Timers and Virtual timers */ - -#define ACPI_GTDT_INTERRUPT_MODE (1) -#define ACPI_GTDT_INTERRUPT_POLARITY (1<<1) -#define ACPI_GTDT_ALWAYS_ON (1<<2) - -struct acpi_gtdt_el2 { - u32 virtual_el2_timer_gsiv; - u32 virtual_el2_timer_flags; -}; - -/* Common GTDT subtable header */ - -struct acpi_gtdt_header { - u8 type; - u16 length; -}; - -/* Values for GTDT subtable type above */ - -enum acpi_gtdt_type { - ACPI_GTDT_TYPE_TIMER_BLOCK = 0, - ACPI_GTDT_TYPE_WATCHDOG = 1, - ACPI_GTDT_TYPE_RESERVED = 2 /* 2 and greater are reserved */ -}; - -/* GTDT Subtables, correspond to Type in struct acpi_gtdt_header */ - -/* 0: Generic Timer Block */ - -struct acpi_gtdt_timer_block { - struct acpi_gtdt_header header; - u8 reserved; - u64 block_address; - u32 timer_count; - u32 timer_offset; -}; - -/* Timer Sub-Structure, one per timer */ - -struct acpi_gtdt_timer_entry { - u8 frame_number; - u8 reserved[3]; - u64 base_address; - u64 el0_base_address; - u32 timer_interrupt; - u32 timer_flags; - u32 virtual_timer_interrupt; - u32 virtual_timer_flags; - u32 common_flags; -}; - -/* Flag Definitions: timer_flags and virtual_timer_flags above */ - -#define ACPI_GTDT_GT_IRQ_MODE (1) -#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1) - -/* Flag Definitions: common_flags above */ - -#define ACPI_GTDT_GT_IS_SECURE_TIMER (1) -#define ACPI_GTDT_GT_ALWAYS_ON (1<<1) - -/* 1: SBSA Generic Watchdog Structure */ - -struct acpi_gtdt_watchdog { - struct acpi_gtdt_header header; - u8 reserved; - u64 refresh_frame_address; - u64 control_frame_address; - u32 timer_interrupt; - u32 timer_flags; -}; - -/* Flag Definitions: timer_flags above */ - -#define ACPI_GTDT_WATCHDOG_IRQ_MODE (1) -#define ACPI_GTDT_WATCHDOG_IRQ_POLARITY (1<<1) -#define ACPI_GTDT_WATCHDOG_SECURE (1<<2) - /******************************************************************************* * * HEST - Hardware Error Source Table (ACPI 4.0) @@ -1213,8 +430,7 @@ enum acpi_hest_types { ACPI_HEST_TYPE_AER_BRIDGE = 8, ACPI_HEST_TYPE_GENERIC_ERROR = 9, ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10, - ACPI_HEST_TYPE_IA32_DEFERRED_CHECK = 11, - ACPI_HEST_TYPE_RESERVED = 12 /* 12 and greater are reserved */ + ACPI_HEST_TYPE_RESERVED = 11 /* 11 and greater are reserved */ }; /* @@ -1260,7 +476,6 @@ struct acpi_hest_aer_common { #define ACPI_HEST_FIRMWARE_FIRST (1) #define ACPI_HEST_GLOBAL (1<<1) -#define ACPI_HEST_GHES_ASSIST (1<<2) /* * Macros to access the bus/segment numbers in Bus field above: @@ -1298,8 +513,7 @@ enum acpi_hest_notify_types { ACPI_HEST_NOTIFY_SEA = 8, /* ACPI 6.1 */ ACPI_HEST_NOTIFY_SEI = 9, /* ACPI 6.1 */ ACPI_HEST_NOTIFY_GSIV = 10, /* ACPI 6.1 */ - ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED = 11, /* ACPI 6.2 */ - ACPI_HEST_NOTIFY_RESERVED = 12 /* 12 and greater are reserved */ + ACPI_HEST_NOTIFY_RESERVED = 11 /* 11 and greater are reserved */ }; /* Values for config_write_enable bitfield above */ @@ -1320,7 +534,7 @@ enum acpi_hest_notify_types { struct acpi_hest_ia_machine_check { struct acpi_hest_header header; u16 reserved1; - u8 flags; /* See flags ACPI_HEST_GLOBAL, etc. above */ + u8 flags; u8 enabled; u32 records_to_preallocate; u32 max_sections_per_record; @@ -1335,7 +549,7 @@ struct acpi_hest_ia_machine_check { struct acpi_hest_ia_corrected { struct acpi_hest_header header; u16 reserved1; - u8 flags; /* See flags ACPI_HEST_GLOBAL, etc. above */ + u8 flags; u8 enabled; u32 records_to_preallocate; u32 max_sections_per_record; @@ -1472,268 +686,584 @@ struct acpi_hest_generic_data_v300 { #define ACPI_HEST_GEN_VALID_FRU_STRING (1<<1) #define ACPI_HEST_GEN_VALID_TIMESTAMP (1<<2) -/* 11: IA32 Deferred Machine Check Exception (ACPI 6.2) */ - -struct acpi_hest_ia_deferred_check { - struct acpi_hest_header header; - u16 reserved1; - u8 flags; /* See flags ACPI_HEST_GLOBAL, etc. above */ - u8 enabled; - u32 records_to_preallocate; - u32 max_sections_per_record; - struct acpi_hest_notify notify; - u8 num_hardware_banks; - u8 reserved2[3]; -}; - /******************************************************************************* * - * HMAT - Heterogeneous Memory Attributes Table (ACPI 6.2) - * Version 1 + * MADT - Multiple APIC Description Table + * Version 3 * ******************************************************************************/ -struct acpi_table_hmat { +struct acpi_table_madt { struct acpi_table_header header; /* Common ACPI table header */ - u32 reserved; + u32 address; /* Physical address of local APIC */ + u32 flags; }; -/* Values for HMAT structure types */ +/* Masks for Flags field above */ -enum acpi_hmat_type { - ACPI_HMAT_TYPE_PROXIMITY = 0, /* Memory proximity domain attributes */ - ACPI_HMAT_TYPE_LOCALITY = 1, /* System locality latency and bandwidth information */ - ACPI_HMAT_TYPE_CACHE = 2, /* Memory side cache information */ - ACPI_HMAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */ -}; +#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */ -struct acpi_hmat_structure { - u16 type; - u16 reserved; - u32 length; +/* Values for PCATCompat flag */ + +#define ACPI_MADT_DUAL_PIC 0 +#define ACPI_MADT_MULTIPLE_APIC 1 + +/* Values for MADT subtable type in struct acpi_subtable_header */ + +enum acpi_madt_type { + ACPI_MADT_TYPE_LOCAL_APIC = 0, + ACPI_MADT_TYPE_IO_APIC = 1, + ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2, + ACPI_MADT_TYPE_NMI_SOURCE = 3, + ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4, + ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5, + ACPI_MADT_TYPE_IO_SAPIC = 6, + ACPI_MADT_TYPE_LOCAL_SAPIC = 7, + ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8, + ACPI_MADT_TYPE_LOCAL_X2APIC = 9, + ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10, + ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11, + ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12, + ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, + ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, + ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, + ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */ }; /* - * HMAT Structures, correspond to Type in struct acpi_hmat_structure + * MADT Subtables, correspond to Type in struct acpi_subtable_header */ -/* 0: Memory proximity domain attributes */ +/* 0: Processor Local APIC */ -struct acpi_hmat_proximity_domain { - struct acpi_hmat_structure header; - u16 flags; - u16 reserved1; - u32 processor_PD; /* Processor proximity domain */ - u32 memory_PD; /* Memory proximity domain */ - u32 reserved2; - u64 reserved3; - u64 reserved4; +struct acpi_madt_local_apic { + struct acpi_subtable_header header; + u8 processor_id; /* ACPI processor id */ + u8 id; /* Processor's local APIC id */ + u32 lapic_flags; +}; + +/* 1: IO APIC */ + +struct acpi_madt_io_apic { + struct acpi_subtable_header header; + u8 id; /* I/O APIC ID */ + u8 reserved; /* reserved - must be zero */ + u32 address; /* APIC physical address */ + u32 global_irq_base; /* Global system interrupt where INTI lines start */ +}; + +/* 2: Interrupt Override */ + +struct acpi_madt_interrupt_override { + struct acpi_subtable_header header; + u8 bus; /* 0 - ISA */ + u8 source_irq; /* Interrupt source (IRQ) */ + u32 global_irq; /* Global system interrupt */ + u16 inti_flags; +}; + +/* 3: NMI Source */ + +struct acpi_madt_nmi_source { + struct acpi_subtable_header header; + u16 inti_flags; + u32 global_irq; /* Global system interrupt */ +}; + +/* 4: Local APIC NMI */ + +struct acpi_madt_local_apic_nmi { + struct acpi_subtable_header header; + u8 processor_id; /* ACPI processor id */ + u16 inti_flags; + u8 lint; /* LINTn to which NMI is connected */ +}; + +/* 5: Address Override */ + +struct acpi_madt_local_apic_override { + struct acpi_subtable_header header; + u16 reserved; /* Reserved, must be zero */ + u64 address; /* APIC physical address */ +}; + +/* 6: I/O Sapic */ + +struct acpi_madt_io_sapic { + struct acpi_subtable_header header; + u8 id; /* I/O SAPIC ID */ + u8 reserved; /* Reserved, must be zero */ + u32 global_irq_base; /* Global interrupt for SAPIC start */ + u64 address; /* SAPIC physical address */ +}; + +/* 7: Local Sapic */ + +struct acpi_madt_local_sapic { + struct acpi_subtable_header header; + u8 processor_id; /* ACPI processor id */ + u8 id; /* SAPIC ID */ + u8 eid; /* SAPIC EID */ + u8 reserved[3]; /* Reserved, must be zero */ + u32 lapic_flags; + u32 uid; /* Numeric UID - ACPI 3.0 */ + char uid_string[1]; /* String UID - ACPI 3.0 */ +}; + +/* 8: Platform Interrupt Source */ + +struct acpi_madt_interrupt_source { + struct acpi_subtable_header header; + u16 inti_flags; + u8 type; /* 1=PMI, 2=INIT, 3=corrected */ + u8 id; /* Processor ID */ + u8 eid; /* Processor EID */ + u8 io_sapic_vector; /* Vector value for PMI interrupts */ + u32 global_irq; /* Global system interrupt */ + u32 flags; /* Interrupt Source Flags */ }; /* Masks for Flags field above */ -#define ACPI_HMAT_PROCESSOR_PD_VALID (1) /* 1: processor_PD field is valid */ -#define ACPI_HMAT_MEMORY_PD_VALID (1<<1) /* 1: memory_PD field is valid */ -#define ACPI_HMAT_RESERVATION_HINT (1<<2) /* 1: Reservation hint */ +#define ACPI_MADT_CPEI_OVERRIDE (1) -/* 1: System locality latency and bandwidth information */ +/* 9: Processor Local X2APIC (ACPI 4.0) */ -struct acpi_hmat_locality { - struct acpi_hmat_structure header; - u8 flags; - u8 data_type; - u8 min_transfer_size; - u8 reserved1; - u32 number_of_initiator_Pds; - u32 number_of_target_Pds; - u32 reserved2; - u64 entry_base_unit; +struct acpi_madt_local_x2apic { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 local_apic_id; /* Processor x2APIC ID */ + u32 lapic_flags; + u32 uid; /* ACPI processor UID */ +}; + +/* 10: Local X2APIC NMI (ACPI 4.0) */ + +struct acpi_madt_local_x2apic_nmi { + struct acpi_subtable_header header; + u16 inti_flags; + u32 uid; /* ACPI processor UID */ + u8 lint; /* LINTn to which NMI is connected */ + u8 reserved[3]; /* reserved - must be zero */ +}; + +/* 11: Generic Interrupt (ACPI 5.0 + ACPI 6.0 changes) */ + +struct acpi_madt_generic_interrupt { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 cpu_interface_number; + u32 uid; + u32 flags; + u32 parking_version; + u32 performance_interrupt; + u64 parked_address; + u64 base_address; + u64 gicv_base_address; + u64 gich_base_address; + u32 vgic_interrupt; + u64 gicr_base_address; + u64 arm_mpidr; + u8 efficiency_class; + u8 reserved2[3]; }; /* Masks for Flags field above */ -#define ACPI_HMAT_MEMORY_HIERARCHY (0x0F) /* Bits 0-3 */ +/* ACPI_MADT_ENABLED (1) Processor is usable if set */ +#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */ +#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */ -/* Values for Memory Hierarchy flags */ +/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */ -#define ACPI_HMAT_MEMORY 0 -#define ACPI_HMAT_LAST_LEVEL_CACHE 1 -#define ACPI_HMAT_1ST_LEVEL_CACHE 2 -#define ACPI_HMAT_2ND_LEVEL_CACHE 3 -#define ACPI_HMAT_3RD_LEVEL_CACHE 4 -#define ACPI_HMAT_MINIMUM_XFER_SIZE 0x10 /* Bit 4: ACPI 6.4 */ -#define ACPI_HMAT_NON_SEQUENTIAL_XFERS 0x20 /* Bit 5: ACPI 6.4 */ - - -/* Values for data_type field above */ - -#define ACPI_HMAT_ACCESS_LATENCY 0 -#define ACPI_HMAT_READ_LATENCY 1 -#define ACPI_HMAT_WRITE_LATENCY 2 -#define ACPI_HMAT_ACCESS_BANDWIDTH 3 -#define ACPI_HMAT_READ_BANDWIDTH 4 -#define ACPI_HMAT_WRITE_BANDWIDTH 5 - -/* 2: Memory side cache information */ - -struct acpi_hmat_cache { - struct acpi_hmat_structure header; - u32 memory_PD; - u32 reserved1; - u64 cache_size; - u32 cache_attributes; - u16 reserved2; - u16 number_of_SMBIOShandles; -}; - -/* Masks for cache_attributes field above */ - -#define ACPI_HMAT_TOTAL_CACHE_LEVEL (0x0000000F) -#define ACPI_HMAT_CACHE_LEVEL (0x000000F0) -#define ACPI_HMAT_CACHE_ASSOCIATIVITY (0x00000F00) -#define ACPI_HMAT_WRITE_POLICY (0x0000F000) -#define ACPI_HMAT_CACHE_LINE_SIZE (0xFFFF0000) - -/* Values for cache associativity flag */ - -#define ACPI_HMAT_CA_NONE (0) -#define ACPI_HMAT_CA_DIRECT_MAPPED (1) -#define ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING (2) - -/* Values for write policy flag */ - -#define ACPI_HMAT_CP_NONE (0) -#define ACPI_HMAT_CP_WB (1) -#define ACPI_HMAT_CP_WT (2) - -/******************************************************************************* - * - * HPET - High Precision Event Timer table - * Version 1 - * - * Conforms to "IA-PC HPET (High Precision Event Timers) Specification", - * Version 1.0a, October 2004 - * - ******************************************************************************/ - -struct acpi_table_hpet { - struct acpi_table_header header; /* Common ACPI table header */ - u32 id; /* Hardware ID of event timer block */ - struct acpi_generic_address address; /* Address of event timer block */ - u8 sequence; /* HPET sequence number */ - u16 minimum_tick; /* Main counter min tick, periodic mode */ - u8 flags; -}; - -/* Masks for Flags field above */ - -#define ACPI_HPET_PAGE_PROTECT_MASK (3) - -/* Values for Page Protect flags */ - -enum acpi_hpet_page_protect { - ACPI_HPET_NO_PAGE_PROTECT = 0, - ACPI_HPET_PAGE_PROTECT4 = 1, - ACPI_HPET_PAGE_PROTECT64 = 2 -}; - -/******************************************************************************* - * - * IBFT - Boot Firmware Table - * Version 1 - * - * Conforms to "iSCSI Boot Firmware Table (iBFT) as Defined in ACPI 3.0b - * Specification", Version 1.01, March 1, 2007 - * - * Note: It appears that this table is not intended to appear in the RSDT/XSDT. - * Therefore, it is not currently supported by the disassembler. - * - ******************************************************************************/ - -struct acpi_table_ibft { - struct acpi_table_header header; /* Common ACPI table header */ - u8 reserved[12]; -}; - -/* IBFT common subtable header */ - -struct acpi_ibft_header { - u8 type; +struct acpi_madt_generic_distributor { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 gic_id; + u64 base_address; + u32 global_irq_base; u8 version; + u8 reserved2[3]; /* reserved - must be zero */ +}; + +/* Values for Version field above */ + +enum acpi_madt_gic_version { + ACPI_MADT_GIC_VERSION_NONE = 0, + ACPI_MADT_GIC_VERSION_V1 = 1, + ACPI_MADT_GIC_VERSION_V2 = 2, + ACPI_MADT_GIC_VERSION_V3 = 3, + ACPI_MADT_GIC_VERSION_V4 = 4, + ACPI_MADT_GIC_VERSION_RESERVED = 5 /* 5 and greater are reserved */ +}; + +/* 13: Generic MSI Frame (ACPI 5.1) */ + +struct acpi_madt_generic_msi_frame { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 msi_frame_id; + u64 base_address; + u32 flags; + u16 spi_count; + u16 spi_base; +}; + +/* Masks for Flags field above */ + +#define ACPI_MADT_OVERRIDE_SPI_VALUES (1) + +/* 14: Generic Redistributor (ACPI 5.1) */ + +struct acpi_madt_generic_redistributor { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u64 base_address; + u32 length; +}; + +/* 15: Generic Translator (ACPI 6.0) */ + +struct acpi_madt_generic_translator { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 translation_id; + u64 base_address; + u32 reserved2; +}; + +/* + * Common flags fields for MADT subtables + */ + +/* MADT Local APIC flags */ + +#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */ + +/* MADT MPS INTI flags (inti_flags) */ + +#define ACPI_MADT_POLARITY_MASK (3) /* 00-01: Polarity of APIC I/O input signals */ +#define ACPI_MADT_TRIGGER_MASK (3<<2) /* 02-03: Trigger mode of APIC input signals */ + +/* Values for MPS INTI flags */ + +#define ACPI_MADT_POLARITY_CONFORMS 0 +#define ACPI_MADT_POLARITY_ACTIVE_HIGH 1 +#define ACPI_MADT_POLARITY_RESERVED 2 +#define ACPI_MADT_POLARITY_ACTIVE_LOW 3 + +#define ACPI_MADT_TRIGGER_CONFORMS (0) +#define ACPI_MADT_TRIGGER_EDGE (1<<2) +#define ACPI_MADT_TRIGGER_RESERVED (2<<2) +#define ACPI_MADT_TRIGGER_LEVEL (3<<2) + +/******************************************************************************* + * + * MSCT - Maximum System Characteristics Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_msct { + struct acpi_table_header header; /* Common ACPI table header */ + u32 proximity_offset; /* Location of proximity info struct(s) */ + u32 max_proximity_domains; /* Max number of proximity domains */ + u32 max_clock_domains; /* Max number of clock domains */ + u64 max_address; /* Max physical address in system */ +}; + +/* subtable - Maximum Proximity Domain Information. Version 1 */ + +struct acpi_msct_proximity { + u8 revision; + u8 length; + u32 range_start; /* Start of domain range */ + u32 range_end; /* End of domain range */ + u32 processor_capacity; + u64 memory_capacity; /* In bytes */ +}; + +/******************************************************************************* + * + * NFIT - NVDIMM Interface Table (ACPI 6.0+) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_nfit { + struct acpi_table_header header; /* Common ACPI table header */ + u32 reserved; /* Reserved, must be zero */ +}; + +/* Subtable header for NFIT */ + +struct acpi_nfit_header { + u16 type; u16 length; - u8 index; - u8 flags; }; -/* Values for Type field above */ +/* Values for subtable type in struct acpi_nfit_header */ -enum acpi_ibft_type { - ACPI_IBFT_TYPE_NOT_USED = 0, - ACPI_IBFT_TYPE_CONTROL = 1, - ACPI_IBFT_TYPE_INITIATOR = 2, - ACPI_IBFT_TYPE_NIC = 3, - ACPI_IBFT_TYPE_TARGET = 4, - ACPI_IBFT_TYPE_EXTENSIONS = 5, - ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */ +enum acpi_nfit_type { + ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0, + ACPI_NFIT_TYPE_MEMORY_MAP = 1, + ACPI_NFIT_TYPE_INTERLEAVE = 2, + ACPI_NFIT_TYPE_SMBIOS = 3, + ACPI_NFIT_TYPE_CONTROL_REGION = 4, + ACPI_NFIT_TYPE_DATA_REGION = 5, + ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6, + ACPI_NFIT_TYPE_RESERVED = 7 /* 7 and greater are reserved */ }; -/* IBFT subtables */ +/* + * NFIT Subtables + */ -struct acpi_ibft_control { - struct acpi_ibft_header header; - u16 extensions; - u16 initiator_offset; - u16 nic0_offset; - u16 target0_offset; - u16 nic1_offset; - u16 target1_offset; +/* 0: System Physical Address Range Structure */ + +struct acpi_nfit_system_address { + struct acpi_nfit_header header; + u16 range_index; + u16 flags; + u32 reserved; /* Reseved, must be zero */ + u32 proximity_domain; + u8 range_guid[16]; + u64 address; + u64 length; + u64 memory_mapping; }; -struct acpi_ibft_initiator { - struct acpi_ibft_header header; - u8 sns_server[16]; - u8 slp_server[16]; - u8 primary_server[16]; - u8 secondary_server[16]; - u16 name_length; - u16 name_offset; +/* Flags */ + +#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */ +#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */ + +/* Range Type GUIDs appear in the include/acuuid.h file */ + +/* 1: Memory Device to System Address Range Map Structure */ + +struct acpi_nfit_memory_map { + struct acpi_nfit_header header; + u32 device_handle; + u16 physical_id; + u16 region_id; + u16 range_index; + u16 region_index; + u64 region_size; + u64 region_offset; + u64 address; + u16 interleave_index; + u16 interleave_ways; + u16 flags; + u16 reserved; /* Reserved, must be zero */ }; -struct acpi_ibft_nic { - struct acpi_ibft_header header; - u8 ip_address[16]; - u8 subnet_mask_prefix; - u8 origin; - u8 gateway[16]; - u8 primary_dns[16]; - u8 secondary_dns[16]; - u8 dhcp[16]; - u16 vlan; - u8 mac_address[6]; - u16 pci_address; - u16 name_length; - u16 name_offset; +/* Flags */ + +#define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */ +#define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */ +#define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */ +#define ACPI_NFIT_MEM_NOT_ARMED (1<<3) /* 03: Memory Device is not armed */ +#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */ +#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */ +#define ACPI_NFIT_MEM_MAP_FAILED (1<<6) /* 06: Mapping to SPA failed */ + +/* 2: Interleave Structure */ + +struct acpi_nfit_interleave { + struct acpi_nfit_header header; + u16 interleave_index; + u16 reserved; /* Reserved, must be zero */ + u32 line_count; + u32 line_size; + u32 line_offset[1]; /* Variable length */ }; -struct acpi_ibft_target { - struct acpi_ibft_header header; - u8 target_ip_address[16]; - u16 target_ip_socket; - u8 target_boot_lun[8]; - u8 chap_type; - u8 nic_association; - u16 target_name_length; - u16 target_name_offset; - u16 chap_name_length; - u16 chap_name_offset; - u16 chap_secret_length; - u16 chap_secret_offset; - u16 reverse_chap_name_length; - u16 reverse_chap_name_offset; - u16 reverse_chap_secret_length; - u16 reverse_chap_secret_offset; +/* 3: SMBIOS Management Information Structure */ + +struct acpi_nfit_smbios { + struct acpi_nfit_header header; + u32 reserved; /* Reserved, must be zero */ + u8 data[1]; /* Variable length */ }; +/* 4: NVDIMM Control Region Structure */ + +struct acpi_nfit_control_region { + struct acpi_nfit_header header; + u16 region_index; + u16 vendor_id; + u16 device_id; + u16 revision_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 subsystem_revision_id; + u8 valid_fields; + u8 manufacturing_location; + u16 manufacturing_date; + u8 reserved[2]; /* Reserved, must be zero */ + u32 serial_number; + u16 code; + u16 windows; + u64 window_size; + u64 command_offset; + u64 command_size; + u64 status_offset; + u64 status_size; + u16 flags; + u8 reserved1[6]; /* Reserved, must be zero */ +}; + +/* Flags */ + +#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */ + +/* valid_fields bits */ + +#define ACPI_NFIT_CONTROL_MFG_INFO_VALID (1) /* Manufacturing fields are valid */ + +/* 5: NVDIMM Block Data Window Region Structure */ + +struct acpi_nfit_data_region { + struct acpi_nfit_header header; + u16 region_index; + u16 windows; + u64 offset; + u64 size; + u64 capacity; + u64 start_address; +}; + +/* 6: Flush Hint Address Structure */ + +struct acpi_nfit_flush_address { + struct acpi_nfit_header header; + u32 device_handle; + u16 hint_count; + u8 reserved[6]; /* Reserved, must be zero */ + u64 hint_address[1]; /* Variable length */ +}; + +/******************************************************************************* + * + * SBST - Smart Battery Specification Table + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_sbst { + struct acpi_table_header header; /* Common ACPI table header */ + u32 warning_level; + u32 low_level; + u32 critical_level; +}; + +/******************************************************************************* + * + * SLIT - System Locality Distance Information Table + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_slit { + struct acpi_table_header header; /* Common ACPI table header */ + u64 locality_count; + u8 entry[1]; /* Real size = localities^2 */ +}; + +/******************************************************************************* + * + * SRAT - System Resource Affinity Table + * Version 3 + * + ******************************************************************************/ + +struct acpi_table_srat { + struct acpi_table_header header; /* Common ACPI table header */ + u32 table_revision; /* Must be value '1' */ + u64 reserved; /* Reserved, must be zero */ +}; + +/* Values for subtable type in struct acpi_subtable_header */ + +enum acpi_srat_type { + ACPI_SRAT_TYPE_CPU_AFFINITY = 0, + ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, + ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, + ACPI_SRAT_TYPE_GICC_AFFINITY = 3, + ACPI_SRAT_TYPE_RESERVED = 4 /* 4 and greater are reserved */ +}; + +/* + * SRAT Subtables, correspond to Type in struct acpi_subtable_header + */ + +/* 0: Processor Local APIC/SAPIC Affinity */ + +struct acpi_srat_cpu_affinity { + struct acpi_subtable_header header; + u8 proximity_domain_lo; + u8 apic_id; + u32 flags; + u8 local_sapic_eid; + u8 proximity_domain_hi[3]; + u32 clock_domain; +}; + +/* Flags */ + +#define ACPI_SRAT_CPU_USE_AFFINITY (1) /* 00: Use affinity structure */ + +/* 1: Memory Affinity */ + +struct acpi_srat_mem_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u16 reserved; /* Reserved, must be zero */ + u64 base_address; + u64 length; + u32 reserved1; + u32 flags; + u64 reserved2; /* Reserved, must be zero */ +}; + +/* Flags */ + +#define ACPI_SRAT_MEM_ENABLED (1) /* 00: Use affinity structure */ +#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */ +#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */ + +/* 2: Processor Local X2_APIC Affinity (ACPI 4.0) */ + +struct acpi_srat_x2apic_cpu_affinity { + struct acpi_subtable_header header; + u16 reserved; /* Reserved, must be zero */ + u32 proximity_domain; + u32 apic_id; + u32 flags; + u32 clock_domain; + u32 reserved2; +}; + +/* Flags for struct acpi_srat_cpu_affinity and struct acpi_srat_x2apic_cpu_affinity */ + +#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ + +/* 3: GICC Affinity (ACPI 5.1) */ + +struct acpi_srat_gicc_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u32 acpi_processor_uid; + u32 flags; + u32 clock_domain; +}; + +/* Flags for struct acpi_srat_gicc_affinity */ + +#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */ + /* Reset to default packing */ #pragma pack() diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index a47b32a5cb..c93dbadfc7 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec) * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACTBL2_H__ #define __ACTBL2_H__ @@ -17,6 +51,9 @@ * These tables are not consumed directly by the ACPICA subsystem, but are * included here to support device drivers and the AML disassembler. * + * Generally, the tables in this file are defined by third-party specifications, + * and are not defined directly by the ACPI specification itself. + * ******************************************************************************/ /* @@ -24,30 +61,41 @@ * file. Useful because they make it more difficult to inadvertently type in * the wrong signature. */ -#define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */ +#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */ +#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ +#define ACPI_SIG_CSRT "CSRT" /* Core System Resource Table */ +#define ACPI_SIG_DBG2 "DBG2" /* Debug Port table type 2 */ +#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */ +#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */ +#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ +#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */ #define ACPI_SIG_IORT "IORT" /* IO Remapping Table */ #define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */ #define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */ -#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ #define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */ #define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */ -#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */ -#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */ #define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ -#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */ -#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */ -#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */ -#define ACPI_SIG_PHAT "PHAT" /* Platform Health Assessment Table */ -#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */ -#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */ -#define ACPI_SIG_PRMT "PRMT" /* Platform Runtime Mechanism Table */ -#define ACPI_SIG_RASF "RASF" /* RAS Feature table */ -#define ACPI_SIG_RGRT "RGRT" /* Regulatory Graphics Resource Table */ -#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ -#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */ -#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */ -#define ACPI_SIG_NHLT "NHLT" /* Non-HDAudio Link Table */ -#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */ +#define ACPI_SIG_MTMR "MTMR" /* MID Timer table */ +#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */ +#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */ +#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */ +#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ +#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */ +#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ +#define ACPI_SIG_VRTC "VRTC" /* Virtual Real Time Clock Table */ +#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */ +#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ +#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */ +#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ + +#ifdef ACPI_UNDEFINED_TABLES +/* + * These tables have been seen in the field, but no definition has been found + */ +#define ACPI_SIG_ATKG "ATKG" +#define ACPI_SIG_GSCI "GSCI" /* GMCH SCI table */ +#define ACPI_SIG_IEIT "IEIT" +#endif /* * All tables must be byte-packed to match the ACPI specification, since @@ -69,186 +117,544 @@ /******************************************************************************* * - * AEST - Arm Error Source Table + * ASF - Alert Standard Format table (Signature "ASF!") + * Revision 0x10 * - * Conforms to: ACPI for the Armv8 RAS Extensions 1.1 Platform Design Document - * September 2020. + * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003 * ******************************************************************************/ -struct acpi_table_aest { - struct acpi_table_header header; - void *node_array[]; +struct acpi_table_asf { + struct acpi_table_header header; /* Common ACPI table header */ }; -/* Common Subtable header - one per Node Structure (Subtable) */ +/* ASF subtable header */ -struct acpi_aest_hdr { +struct acpi_asf_header { u8 type; - u16 length; u8 reserved; - u32 node_specific_offset; - u32 node_interface_offset; - u32 node_interrupt_offset; - u32 node_interrupt_count; - u64 timestamp_rate; - u64 reserved1; - u64 error_injection_rate; + u16 length; }; -/* Values for Type above */ +/* Values for Type field above */ -#define ACPI_AEST_PROCESSOR_ERROR_NODE 0 -#define ACPI_AEST_MEMORY_ERROR_NODE 1 -#define ACPI_AEST_SMMU_ERROR_NODE 2 -#define ACPI_AEST_VENDOR_ERROR_NODE 3 -#define ACPI_AEST_GIC_ERROR_NODE 4 -#define ACPI_AEST_NODE_TYPE_RESERVED 5 /* 5 and above are reserved */ +enum acpi_asf_type { + ACPI_ASF_TYPE_INFO = 0, + ACPI_ASF_TYPE_ALERT = 1, + ACPI_ASF_TYPE_CONTROL = 2, + ACPI_ASF_TYPE_BOOT = 3, + ACPI_ASF_TYPE_ADDRESS = 4, + ACPI_ASF_TYPE_RESERVED = 5 +}; /* - * AEST subtables (Error nodes) + * ASF subtables */ -/* 0: Processor Error */ +/* 0: ASF Information */ -typedef struct acpi_aest_processor { - u32 processor_id; - u8 resource_type; - u8 reserved; +struct acpi_asf_info { + struct acpi_asf_header header; + u8 min_reset_value; + u8 min_poll_interval; + u16 system_id; + u32 mfg_id; u8 flags; - u8 revision; - u64 processor_affinity; + u8 reserved2[3]; +}; -} acpi_aest_processor; +/* Masks for Flags field above */ -/* Values for resource_type above, related structs below */ +#define ACPI_ASF_SMBUS_PROTOCOLS (1) -#define ACPI_AEST_CACHE_RESOURCE 0 -#define ACPI_AEST_TLB_RESOURCE 1 -#define ACPI_AEST_GENERIC_RESOURCE 2 -#define ACPI_AEST_RESOURCE_RESERVED 3 /* 3 and above are reserved */ +/* 1: ASF Alerts */ -/* 0R: Processor Cache Resource Substructure */ +struct acpi_asf_alert { + struct acpi_asf_header header; + u8 assert_mask; + u8 deassert_mask; + u8 alerts; + u8 data_length; +}; -typedef struct acpi_aest_processor_cache { - u32 cache_reference; - u32 reserved; - -} acpi_aest_processor_cache; - -/* Values for cache_type above */ - -#define ACPI_AEST_CACHE_DATA 0 -#define ACPI_AEST_CACHE_INSTRUCTION 1 -#define ACPI_AEST_CACHE_UNIFIED 2 -#define ACPI_AEST_CACHE_RESERVED 3 /* 3 and above are reserved */ - -/* 1R: Processor TLB Resource Substructure */ - -typedef struct acpi_aest_processor_tlb { - u32 tlb_level; - u32 reserved; - -} acpi_aest_processor_tlb; - -/* 2R: Processor Generic Resource Substructure */ - -typedef struct acpi_aest_processor_generic { - u8 *resource; - -} acpi_aest_processor_generic; - -/* 1: Memory Error */ - -typedef struct acpi_aest_memory { - u32 srat_proximity_domain; - -} acpi_aest_memory; - -/* 2: Smmu Error */ - -typedef struct acpi_aest_smmu { - u32 iort_node_reference; - u32 subcomponent_reference; - -} acpi_aest_smmu; - -/* 3: Vendor Defined */ - -typedef struct acpi_aest_vendor { - u32 acpi_hid; - u32 acpi_uid; - u8 vendor_specific_data[16]; - -} acpi_aest_vendor; - -/* 4: Gic Error */ - -typedef struct acpi_aest_gic { - u32 interface_type; - u32 instance_id; - -} acpi_aest_gic; - -/* Values for interface_type above */ - -#define ACPI_AEST_GIC_CPU 0 -#define ACPI_AEST_GIC_DISTRIBUTOR 1 -#define ACPI_AEST_GIC_REDISTRIBUTOR 2 -#define ACPI_AEST_GIC_ITS 3 -#define ACPI_AEST_GIC_RESERVED 4 /* 4 and above are reserved */ - -/* Node Interface Structure */ - -typedef struct acpi_aest_node_interface { +struct acpi_asf_alert_data { + u8 address; + u8 command; + u8 mask; + u8 value; + u8 sensor_type; u8 type; - u8 reserved[3]; - u32 flags; - u64 address; - u32 error_record_index; - u32 error_record_count; - u64 error_record_implemented; - u64 error_status_reporting; - u64 addressing_mode; + u8 offset; + u8 source_type; + u8 severity; + u8 sensor_number; + u8 entity; + u8 instance; +}; -} acpi_aest_node_interface; +/* 2: ASF Remote Control */ -/* Values for Type field above */ +struct acpi_asf_remote { + struct acpi_asf_header header; + u8 controls; + u8 data_length; + u16 reserved2; +}; -#define ACPI_AEST_NODE_SYSTEM_REGISTER 0 -#define ACPI_AEST_NODE_MEMORY_MAPPED 1 -#define ACPI_AEST_XFACE_RESERVED 2 /* 2 and above are reserved */ +struct acpi_asf_control_data { + u8 function; + u8 address; + u8 command; + u8 value; +}; -/* Node Interrupt Structure */ +/* 3: ASF RMCP Boot Options */ -typedef struct acpi_aest_node_interrupt { - u8 type; - u8 reserved[2]; - u8 flags; - u32 gsiv; - u8 iort_id; - u8 reserved1[3]; +struct acpi_asf_rmcp { + struct acpi_asf_header header; + u8 capabilities[7]; + u8 completion_code; + u32 enterprise_id; + u8 command; + u16 parameter; + u16 boot_options; + u16 oem_parameters; +}; -} acpi_aest_node_interrupt; +/* 4: ASF Address */ -/* Values for Type field above */ - -#define ACPI_AEST_NODE_FAULT_HANDLING 0 -#define ACPI_AEST_NODE_ERROR_RECOVERY 1 -#define ACPI_AEST_XRUPT_RESERVED 2 /* 2 and above are reserved */ +struct acpi_asf_address { + struct acpi_asf_header header; + u8 eprom_address; + u8 devices; +}; /******************************************************************************* * - * BDAT - BIOS Data ACPI Table + * BOOT - Simple Boot Flag Table + * Version 1 * - * Conforms to "BIOS Data ACPI Table", Interface Specification v4.0 Draft 5 - * Nov 2020 + * Conforms to the "Simple Boot Flag Specification", Version 2.1 * ******************************************************************************/ -struct acpi_table_bdat { - struct acpi_table_header header; - struct acpi_generic_address gas; +struct acpi_table_boot { + struct acpi_table_header header; /* Common ACPI table header */ + u8 cmos_index; /* Index in CMOS RAM for the boot register */ + u8 reserved[3]; +}; + +/******************************************************************************* + * + * CSRT - Core System Resource Table + * Version 0 + * + * Conforms to the "Core System Resource Table (CSRT)", November 14, 2011 + * + ******************************************************************************/ + +struct acpi_table_csrt { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* Resource Group subtable */ + +struct acpi_csrt_group { + u32 length; + u32 vendor_id; + u32 subvendor_id; + u16 device_id; + u16 subdevice_id; + u16 revision; + u16 reserved; + u32 shared_info_length; + + /* Shared data immediately follows (Length = shared_info_length) */ +}; + +/* Shared Info subtable */ + +struct acpi_csrt_shared_info { + u16 major_version; + u16 minor_version; + u32 mmio_base_low; + u32 mmio_base_high; + u32 gsi_interrupt; + u8 interrupt_polarity; + u8 interrupt_mode; + u8 num_channels; + u8 dma_address_width; + u16 base_request_line; + u16 num_handshake_signals; + u32 max_block_size; + + /* Resource descriptors immediately follow (Length = Group length - shared_info_length) */ +}; + +/* Resource Descriptor subtable */ + +struct acpi_csrt_descriptor { + u32 length; + u16 type; + u16 subtype; + u32 uid; + + /* Resource-specific information immediately follows */ +}; + +/* Resource Types */ + +#define ACPI_CSRT_TYPE_INTERRUPT 0x0001 +#define ACPI_CSRT_TYPE_TIMER 0x0002 +#define ACPI_CSRT_TYPE_DMA 0x0003 + +/* Resource Subtypes */ + +#define ACPI_CSRT_XRUPT_LINE 0x0000 +#define ACPI_CSRT_XRUPT_CONTROLLER 0x0001 +#define ACPI_CSRT_TIMER 0x0000 +#define ACPI_CSRT_DMA_CHANNEL 0x0000 +#define ACPI_CSRT_DMA_CONTROLLER 0x0001 + +/******************************************************************************* + * + * DBG2 - Debug Port Table 2 + * Version 0 (Both main table and subtables) + * + * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015 + * + ******************************************************************************/ + +struct acpi_table_dbg2 { + struct acpi_table_header header; /* Common ACPI table header */ + u32 info_offset; + u32 info_count; +}; + +struct acpi_dbg2_header { + u32 info_offset; + u32 info_count; +}; + +/* Debug Device Information Subtable */ + +struct acpi_dbg2_device { + u8 revision; + u16 length; + u8 register_count; /* Number of base_address registers */ + u16 namepath_length; + u16 namepath_offset; + u16 oem_data_length; + u16 oem_data_offset; + u16 port_type; + u16 port_subtype; + u16 reserved; + u16 base_address_offset; + u16 address_size_offset; + /* + * Data that follows: + * base_address (required) - Each in 12-byte Generic Address Structure format. + * address_size (required) - Array of u32 sizes corresponding to each base_address register. + * Namepath (required) - Null terminated string. Single dot if not supported. + * oem_data (optional) - Length is oem_data_length. + */ +}; + +/* Types for port_type field above */ + +#define ACPI_DBG2_SERIAL_PORT 0x8000 +#define ACPI_DBG2_1394_PORT 0x8001 +#define ACPI_DBG2_USB_PORT 0x8002 +#define ACPI_DBG2_NET_PORT 0x8003 + +/* Subtypes for port_subtype field above */ + +#define ACPI_DBG2_16550_COMPATIBLE 0x0000 +#define ACPI_DBG2_16550_SUBSET 0x0001 +#define ACPI_DBG2_ARM_PL011 0x0003 +#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D +#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E +#define ACPI_DBG2_ARM_DCC 0x000F +#define ACPI_DBG2_BCM2835 0x0010 + +#define ACPI_DBG2_1394_STANDARD 0x0000 + +#define ACPI_DBG2_USB_XHCI 0x0000 +#define ACPI_DBG2_USB_EHCI 0x0001 + +/******************************************************************************* + * + * DBGP - Debug Port table + * Version 1 + * + * Conforms to the "Debug Port Specification", Version 1.00, 2/9/2000 + * + ******************************************************************************/ + +struct acpi_table_dbgp { + struct acpi_table_header header; /* Common ACPI table header */ + u8 type; /* 0=full 16550, 1=subset of 16550 */ + u8 reserved[3]; + struct acpi_generic_address debug_port; +}; + +/******************************************************************************* + * + * DMAR - DMA Remapping table + * Version 1 + * + * Conforms to "Intel Virtualization Technology for Directed I/O", + * Version 2.3, October 2014 + * + ******************************************************************************/ + +struct acpi_table_dmar { + struct acpi_table_header header; /* Common ACPI table header */ + u8 width; /* Host Address Width */ + u8 flags; + u8 reserved[10]; +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_INTR_REMAP (1) +#define ACPI_DMAR_X2APIC_OPT_OUT (1<<1) +#define ACPI_DMAR_X2APIC_MODE (1<<2) + +/* DMAR subtable header */ + +struct acpi_dmar_header { + u16 type; + u16 length; +}; + +/* Values for subtable type in struct acpi_dmar_header */ + +enum acpi_dmar_type { + ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, + ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, + ACPI_DMAR_TYPE_ROOT_ATS = 2, + ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3, + ACPI_DMAR_TYPE_NAMESPACE = 4, + ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */ +}; + +/* DMAR Device Scope structure */ + +struct acpi_dmar_device_scope { + u8 entry_type; + u8 length; + u16 reserved; + u8 enumeration_id; + u8 bus; +}; + +/* Values for entry_type in struct acpi_dmar_device_scope - device types */ + +enum acpi_dmar_scope_type { + ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, + ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, + ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, + ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, + ACPI_DMAR_SCOPE_TYPE_HPET = 4, + ACPI_DMAR_SCOPE_TYPE_NAMESPACE = 5, + ACPI_DMAR_SCOPE_TYPE_RESERVED = 6 /* 6 and greater are reserved */ +}; + +struct acpi_dmar_pci_path { + u8 device; + u8 function; +}; + +/* + * DMAR Subtables, correspond to Type in struct acpi_dmar_header + */ + +/* 0: Hardware Unit Definition */ + +struct acpi_dmar_hardware_unit { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; + u64 address; /* Register Base Address */ +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_INCLUDE_ALL (1) + +/* 1: Reserved Memory Defininition */ + +struct acpi_dmar_reserved_memory { + struct acpi_dmar_header header; + u16 reserved; + u16 segment; + u64 base_address; /* 4K aligned base address */ + u64 end_address; /* 4K aligned limit address */ +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_ALLOW_ALL (1) + +/* 2: Root Port ATS Capability Reporting Structure */ + +struct acpi_dmar_atsr { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_ALL_PORTS (1) + +/* 3: Remapping Hardware Static Affinity Structure */ + +struct acpi_dmar_rhsa { + struct acpi_dmar_header header; + u32 reserved; + u64 base_address; + u32 proximity_domain; +}; + +/* 4: ACPI Namespace Device Declaration Structure */ + +struct acpi_dmar_andd { + struct acpi_dmar_header header; + u8 reserved[3]; + u8 device_number; + char device_name[1]; +}; + +/******************************************************************************* + * + * HPET - High Precision Event Timer table + * Version 1 + * + * Conforms to "IA-PC HPET (High Precision Event Timers) Specification", + * Version 1.0a, October 2004 + * + ******************************************************************************/ + +struct acpi_table_hpet { + struct acpi_table_header header; /* Common ACPI table header */ + u32 id; /* Hardware ID of event timer block */ + struct acpi_generic_address address; /* Address of event timer block */ + u8 sequence; /* HPET sequence number */ + u16 minimum_tick; /* Main counter min tick, periodic mode */ + u8 flags; +}; + +/* Masks for Flags field above */ + +#define ACPI_HPET_PAGE_PROTECT_MASK (3) + +/* Values for Page Protect flags */ + +enum acpi_hpet_page_protect { + ACPI_HPET_NO_PAGE_PROTECT = 0, + ACPI_HPET_PAGE_PROTECT4 = 1, + ACPI_HPET_PAGE_PROTECT64 = 2 +}; + +/******************************************************************************* + * + * IBFT - Boot Firmware Table + * Version 1 + * + * Conforms to "iSCSI Boot Firmware Table (iBFT) as Defined in ACPI 3.0b + * Specification", Version 1.01, March 1, 2007 + * + * Note: It appears that this table is not intended to appear in the RSDT/XSDT. + * Therefore, it is not currently supported by the disassembler. + * + ******************************************************************************/ + +struct acpi_table_ibft { + struct acpi_table_header header; /* Common ACPI table header */ + u8 reserved[12]; +}; + +/* IBFT common subtable header */ + +struct acpi_ibft_header { + u8 type; + u8 version; + u16 length; + u8 index; + u8 flags; +}; + +/* Values for Type field above */ + +enum acpi_ibft_type { + ACPI_IBFT_TYPE_NOT_USED = 0, + ACPI_IBFT_TYPE_CONTROL = 1, + ACPI_IBFT_TYPE_INITIATOR = 2, + ACPI_IBFT_TYPE_NIC = 3, + ACPI_IBFT_TYPE_TARGET = 4, + ACPI_IBFT_TYPE_EXTENSIONS = 5, + ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */ +}; + +/* IBFT subtables */ + +struct acpi_ibft_control { + struct acpi_ibft_header header; + u16 extensions; + u16 initiator_offset; + u16 nic0_offset; + u16 target0_offset; + u16 nic1_offset; + u16 target1_offset; +}; + +struct acpi_ibft_initiator { + struct acpi_ibft_header header; + u8 sns_server[16]; + u8 slp_server[16]; + u8 primary_server[16]; + u8 secondary_server[16]; + u16 name_length; + u16 name_offset; +}; + +struct acpi_ibft_nic { + struct acpi_ibft_header header; + u8 ip_address[16]; + u8 subnet_mask_prefix; + u8 origin; + u8 gateway[16]; + u8 primary_dns[16]; + u8 secondary_dns[16]; + u8 dhcp[16]; + u16 vlan; + u8 mac_address[6]; + u16 pci_address; + u16 name_length; + u16 name_offset; +}; + +struct acpi_ibft_target { + struct acpi_ibft_header header; + u8 target_ip_address[16]; + u16 target_ip_socket; + u8 target_boot_lun[8]; + u8 chap_type; + u8 nic_association; + u16 target_name_length; + u16 target_name_offset; + u16 chap_name_length; + u16 chap_name_offset; + u16 chap_secret_length; + u16 chap_secret_offset; + u16 reverse_chap_name_length; + u16 reverse_chap_name_offset; + u16 reverse_chap_secret_length; + u16 reverse_chap_secret_offset; }; /******************************************************************************* @@ -256,7 +662,7 @@ struct acpi_table_bdat { * IORT - IO Remapping Table * * Conforms to "IO Remapping Table System Software on ARM Platforms", - * Document number: ARM DEN 0049E.b, Feb 2021 + * Document number: ARM DEN 0049B, October 2015 * ******************************************************************************/ @@ -274,7 +680,7 @@ struct acpi_iort_node { u8 type; u16 length; u8 revision; - u32 identifier; + u32 reserved; u32 mapping_count; u32 mapping_offset; char node_data[1]; @@ -287,9 +693,7 @@ enum acpi_iort_node_type { ACPI_IORT_NODE_NAMED_COMPONENT = 0x01, ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02, ACPI_IORT_NODE_SMMU = 0x03, - ACPI_IORT_NODE_SMMU_V3 = 0x04, - ACPI_IORT_NODE_PMCG = 0x05, - ACPI_IORT_NODE_RMR = 0x06, + ACPI_IORT_NODE_SMMU_V3 = 0x04 }; struct acpi_iort_id_mapping { @@ -333,7 +737,7 @@ struct acpi_iort_memory_access { */ struct acpi_iort_its_group { u32 its_count; - u32 identifiers[1]; /* GIC ITS identifier array */ + u32 identifiers[1]; /* GIC ITS identifier arrary */ }; struct acpi_iort_named_component { @@ -343,24 +747,16 @@ struct acpi_iort_named_component { char device_name[1]; /* Path of namespace object */ }; -/* Masks for Flags field above */ - -#define ACPI_IORT_NC_STALL_SUPPORTED (1) -#define ACPI_IORT_NC_PASID_BITS (31<<1) - struct acpi_iort_root_complex { u64 memory_properties; /* Memory access properties */ u32 ats_attribute; u32 pci_segment_number; - u8 memory_address_limit; /* Memory address size limit */ - u8 reserved[3]; /* Reserved, must be zero */ }; -/* Masks for ats_attribute field above */ +/* Values for ats_attribute field above */ -#define ACPI_IORT_ATS_SUPPORTED (1) /* The root complex ATS support */ -#define ACPI_IORT_PRI_SUPPORTED (1<<1) /* The root complex PRI support */ -#define ACPI_IORT_PASID_FWD_SUPPORTED (1<<2) /* The root complex PASID forward support */ +#define ACPI_IORT_ATS_SUPPORTED 0x00000001 /* The root complex supports ATS */ +#define ACPI_IORT_ATS_UNSUPPORTED 0x00000000 /* The root complex doesn't support ATS */ struct acpi_iort_smmu { u64 base_address; /* SMMU base address */ @@ -381,67 +777,28 @@ struct acpi_iort_smmu { #define ACPI_IORT_SMMU_V2 0x00000001 /* Generic SMMUv2 */ #define ACPI_IORT_SMMU_CORELINK_MMU400 0x00000002 /* ARM Corelink MMU-400 */ #define ACPI_IORT_SMMU_CORELINK_MMU500 0x00000003 /* ARM Corelink MMU-500 */ -#define ACPI_IORT_SMMU_CORELINK_MMU401 0x00000004 /* ARM Corelink MMU-401 */ -#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x00000005 /* Cavium thunder_x SMMUv2 */ /* Masks for Flags field above */ #define ACPI_IORT_SMMU_DVM_SUPPORTED (1) #define ACPI_IORT_SMMU_COHERENT_WALK (1<<1) -/* Global interrupt format */ - -struct acpi_iort_smmu_gsi { - u32 nsg_irpt; - u32 nsg_irpt_flags; - u32 nsg_cfg_irpt; - u32 nsg_cfg_irpt_flags; -}; - struct acpi_iort_smmu_v3 { u64 base_address; /* SMMUv3 base address */ u32 flags; u32 reserved; u64 vatos_address; - u32 model; + u32 model; /* O: generic SMMUv3 */ u32 event_gsiv; u32 pri_gsiv; u32 gerr_gsiv; u32 sync_gsiv; - u32 pxm; - u32 id_mapping_index; }; -/* Values for Model field above */ - -#define ACPI_IORT_SMMU_V3_GENERIC 0x00000000 /* Generic SMMUv3 */ -#define ACPI_IORT_SMMU_V3_HISILICON_HI161X 0x00000001 /* hi_silicon Hi161x SMMUv3 */ -#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x00000002 /* Cavium CN99xx SMMUv3 */ - /* Masks for Flags field above */ #define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1) -#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (3<<1) -#define ACPI_IORT_SMMU_V3_PXM_VALID (1<<3) - -struct acpi_iort_pmcg { - u64 page0_base_address; - u32 overflow_gsiv; - u32 node_reference; - u64 page1_base_address; -}; - -struct acpi_iort_rmr { - u32 flags; - u32 rmr_count; - u32 rmr_offset; -}; - -struct acpi_iort_rmr_desc { - u64 base_address; - u64 length; - u32 reserved; -}; +#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (1<<1) /******************************************************************************* * @@ -477,9 +834,7 @@ struct acpi_ivrs_header { /* Values for subtable Type above */ enum acpi_ivrs_type { - ACPI_IVRS_TYPE_HARDWARE1 = 0x10, - ACPI_IVRS_TYPE_HARDWARE2 = 0x11, - ACPI_IVRS_TYPE_HARDWARE3 = 0x40, + ACPI_IVRS_TYPE_HARDWARE = 0x10, ACPI_IVRS_TYPE_MEMORY1 = 0x20, ACPI_IVRS_TYPE_MEMORY2 = 0x21, ACPI_IVRS_TYPE_MEMORY3 = 0x22 @@ -506,26 +861,13 @@ enum acpi_ivrs_type { /* 0x10: I/O Virtualization Hardware Definition Block (IVHD) */ -struct acpi_ivrs_hardware_10 { +struct acpi_ivrs_hardware { struct acpi_ivrs_header header; u16 capability_offset; /* Offset for IOMMU control fields */ u64 base_address; /* IOMMU control registers */ u16 pci_segment_group; u16 info; /* MSI number and unit ID */ - u32 feature_reporting; -}; - -/* 0x11: I/O Virtualization Hardware Definition Block (IVHD) */ - -struct acpi_ivrs_hardware_11 { - struct acpi_ivrs_header header; - u16 capability_offset; /* Offset for IOMMU control fields */ - u64 base_address; /* IOMMU control registers */ - u16 pci_segment_group; - u16 info; /* MSI number and unit ID */ - u32 attributes; - u64 efr_register_image; - u64 reserved; + u32 reserved; }; /* Masks for Info field above */ @@ -568,11 +910,7 @@ enum acpi_ivrs_device_entry_type { ACPI_IVRS_TYPE_ALIAS_START = 67, /* Uses struct acpi_ivrs_device8a */ ACPI_IVRS_TYPE_EXT_SELECT = 70, /* Uses struct acpi_ivrs_device8b */ ACPI_IVRS_TYPE_EXT_START = 71, /* Uses struct acpi_ivrs_device8b */ - ACPI_IVRS_TYPE_SPECIAL = 72, /* Uses struct acpi_ivrs_device8c */ - - /* Variable-length device entries */ - - ACPI_IVRS_TYPE_HID = 240 /* Uses ACPI_IVRS_DEVICE_HID */ + ACPI_IVRS_TYPE_SPECIAL = 72 /* Uses struct acpi_ivrs_device8c */ }; /* Values for Data field above */ @@ -624,22 +962,6 @@ struct acpi_ivrs_device8c { #define ACPI_IVHD_IOAPIC 1 #define ACPI_IVHD_HPET 2 -/* Type 240: variable-length device entry */ - -struct acpi_ivrs_device_hid { - struct acpi_ivrs_de_header header; - u64 acpi_hid; - u64 acpi_cid; - u8 uid_type; - u8 uid_length; -}; - -/* Values for uid_type above */ - -#define ACPI_IVRS_UID_NOT_PRESENT 0 -#define ACPI_IVRS_UID_IS_INTEGER 1 -#define ACPI_IVRS_UID_IS_STRING 2 - /* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */ struct acpi_ivrs_memory { @@ -699,303 +1021,6 @@ struct acpi_lpit_native { u64 counter_frequency; }; -/******************************************************************************* - * - * MADT - Multiple APIC Description Table - * Version 3 - * - ******************************************************************************/ - -struct acpi_table_madt { - struct acpi_table_header header; /* Common ACPI table header */ - u32 address; /* Physical address of local APIC */ - u32 flags; -}; - -/* Masks for Flags field above */ - -#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */ - -/* Values for PCATCompat flag */ - -#define ACPI_MADT_DUAL_PIC 1 -#define ACPI_MADT_MULTIPLE_APIC 0 - -/* Values for MADT subtable type in struct acpi_subtable_header */ - -enum acpi_madt_type { - ACPI_MADT_TYPE_LOCAL_APIC = 0, - ACPI_MADT_TYPE_IO_APIC = 1, - ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2, - ACPI_MADT_TYPE_NMI_SOURCE = 3, - ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4, - ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5, - ACPI_MADT_TYPE_IO_SAPIC = 6, - ACPI_MADT_TYPE_LOCAL_SAPIC = 7, - ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8, - ACPI_MADT_TYPE_LOCAL_X2APIC = 9, - ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10, - ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11, - ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12, - ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, - ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, - ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, - ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16, - ACPI_MADT_TYPE_RESERVED = 17 /* 17 and greater are reserved */ -}; - -/* - * MADT Subtables, correspond to Type in struct acpi_subtable_header - */ - -/* 0: Processor Local APIC */ - -struct acpi_madt_local_apic { - struct acpi_subtable_header header; - u8 processor_id; /* ACPI processor id */ - u8 id; /* Processor's local APIC id */ - u32 lapic_flags; -}; - -/* 1: IO APIC */ - -struct acpi_madt_io_apic { - struct acpi_subtable_header header; - u8 id; /* I/O APIC ID */ - u8 reserved; /* reserved - must be zero */ - u32 address; /* APIC physical address */ - u32 global_irq_base; /* Global system interrupt where INTI lines start */ -}; - -/* 2: Interrupt Override */ - -struct acpi_madt_interrupt_override { - struct acpi_subtable_header header; - u8 bus; /* 0 - ISA */ - u8 source_irq; /* Interrupt source (IRQ) */ - u32 global_irq; /* Global system interrupt */ - u16 inti_flags; -}; - -/* 3: NMI Source */ - -struct acpi_madt_nmi_source { - struct acpi_subtable_header header; - u16 inti_flags; - u32 global_irq; /* Global system interrupt */ -}; - -/* 4: Local APIC NMI */ - -struct acpi_madt_local_apic_nmi { - struct acpi_subtable_header header; - u8 processor_id; /* ACPI processor id */ - u16 inti_flags; - u8 lint; /* LINTn to which NMI is connected */ -}; - -/* 5: Address Override */ - -struct acpi_madt_local_apic_override { - struct acpi_subtable_header header; - u16 reserved; /* Reserved, must be zero */ - u64 address; /* APIC physical address */ -}; - -/* 6: I/O Sapic */ - -struct acpi_madt_io_sapic { - struct acpi_subtable_header header; - u8 id; /* I/O SAPIC ID */ - u8 reserved; /* Reserved, must be zero */ - u32 global_irq_base; /* Global interrupt for SAPIC start */ - u64 address; /* SAPIC physical address */ -}; - -/* 7: Local Sapic */ - -struct acpi_madt_local_sapic { - struct acpi_subtable_header header; - u8 processor_id; /* ACPI processor id */ - u8 id; /* SAPIC ID */ - u8 eid; /* SAPIC EID */ - u8 reserved[3]; /* Reserved, must be zero */ - u32 lapic_flags; - u32 uid; /* Numeric UID - ACPI 3.0 */ - char uid_string[1]; /* String UID - ACPI 3.0 */ -}; - -/* 8: Platform Interrupt Source */ - -struct acpi_madt_interrupt_source { - struct acpi_subtable_header header; - u16 inti_flags; - u8 type; /* 1=PMI, 2=INIT, 3=corrected */ - u8 id; /* Processor ID */ - u8 eid; /* Processor EID */ - u8 io_sapic_vector; /* Vector value for PMI interrupts */ - u32 global_irq; /* Global system interrupt */ - u32 flags; /* Interrupt Source Flags */ -}; - -/* Masks for Flags field above */ - -#define ACPI_MADT_CPEI_OVERRIDE (1) - -/* 9: Processor Local X2APIC (ACPI 4.0) */ - -struct acpi_madt_local_x2apic { - struct acpi_subtable_header header; - u16 reserved; /* reserved - must be zero */ - u32 local_apic_id; /* Processor x2APIC ID */ - u32 lapic_flags; - u32 uid; /* ACPI processor UID */ -}; - -/* 10: Local X2APIC NMI (ACPI 4.0) */ - -struct acpi_madt_local_x2apic_nmi { - struct acpi_subtable_header header; - u16 inti_flags; - u32 uid; /* ACPI processor UID */ - u8 lint; /* LINTn to which NMI is connected */ - u8 reserved[3]; /* reserved - must be zero */ -}; - -/* 11: Generic interrupt - GICC (ACPI 5.0 + ACPI 6.0 + ACPI 6.3 changes) */ - -struct acpi_madt_generic_interrupt { - struct acpi_subtable_header header; - u16 reserved; /* reserved - must be zero */ - u32 cpu_interface_number; - u32 uid; - u32 flags; - u32 parking_version; - u32 performance_interrupt; - u64 parked_address; - u64 base_address; - u64 gicv_base_address; - u64 gich_base_address; - u32 vgic_interrupt; - u64 gicr_base_address; - u64 arm_mpidr; - u8 efficiency_class; - u8 reserved2[1]; - u16 spe_interrupt; /* ACPI 6.3 */ -}; - -/* Masks for Flags field above */ - -/* ACPI_MADT_ENABLED (1) Processor is usable if set */ -#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */ -#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */ - -/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */ - -struct acpi_madt_generic_distributor { - struct acpi_subtable_header header; - u16 reserved; /* reserved - must be zero */ - u32 gic_id; - u64 base_address; - u32 global_irq_base; - u8 version; - u8 reserved2[3]; /* reserved - must be zero */ -}; - -/* Values for Version field above */ - -enum acpi_madt_gic_version { - ACPI_MADT_GIC_VERSION_NONE = 0, - ACPI_MADT_GIC_VERSION_V1 = 1, - ACPI_MADT_GIC_VERSION_V2 = 2, - ACPI_MADT_GIC_VERSION_V3 = 3, - ACPI_MADT_GIC_VERSION_V4 = 4, - ACPI_MADT_GIC_VERSION_RESERVED = 5 /* 5 and greater are reserved */ -}; - -/* 13: Generic MSI Frame (ACPI 5.1) */ - -struct acpi_madt_generic_msi_frame { - struct acpi_subtable_header header; - u16 reserved; /* reserved - must be zero */ - u32 msi_frame_id; - u64 base_address; - u32 flags; - u16 spi_count; - u16 spi_base; -}; - -/* Masks for Flags field above */ - -#define ACPI_MADT_OVERRIDE_SPI_VALUES (1) - -/* 14: Generic Redistributor (ACPI 5.1) */ - -struct acpi_madt_generic_redistributor { - struct acpi_subtable_header header; - u16 reserved; /* reserved - must be zero */ - u64 base_address; - u32 length; -}; - -/* 15: Generic Translator (ACPI 6.0) */ - -struct acpi_madt_generic_translator { - struct acpi_subtable_header header; - u16 reserved; /* reserved - must be zero */ - u32 translation_id; - u64 base_address; - u32 reserved2; -}; - -/* 16: Multiprocessor wakeup (ACPI 6.4) */ - -struct acpi_madt_multiproc_wakeup { - struct acpi_subtable_header header; - u16 mailbox_version; - u32 reserved; /* reserved - must be zero */ - u64 base_address; -}; - -#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032 -#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048 - -struct acpi_madt_multiproc_wakeup_mailbox { - u16 command; - u16 reserved; /* reserved - must be zero */ - u32 apic_id; - u64 wakeup_vector; - u8 reserved_os[ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE]; /* reserved for OS use */ - u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE]; /* reserved for firmware use */ -}; - -#define ACPI_MP_WAKE_COMMAND_WAKEUP 1 - -/* - * Common flags fields for MADT subtables - */ - -/* MADT Local APIC flags */ - -#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */ - -/* MADT MPS INTI flags (inti_flags) */ - -#define ACPI_MADT_POLARITY_MASK (3) /* 00-01: Polarity of APIC I/O input signals */ -#define ACPI_MADT_TRIGGER_MASK (3<<2) /* 02-03: Trigger mode of APIC input signals */ - -/* Values for MPS INTI flags */ - -#define ACPI_MADT_POLARITY_CONFORMS 0 -#define ACPI_MADT_POLARITY_ACTIVE_HIGH 1 -#define ACPI_MADT_POLARITY_RESERVED 2 -#define ACPI_MADT_POLARITY_ACTIVE_LOW 3 - -#define ACPI_MADT_TRIGGER_CONFORMS (0) -#define ACPI_MADT_TRIGGER_EDGE (1<<2) -#define ACPI_MADT_TRIGGER_RESERVED (2<<2) -#define ACPI_MADT_TRIGGER_LEVEL (3<<2) - /******************************************************************************* * * MCFG - PCI Memory Mapped Configuration table and subtable @@ -1046,127 +1071,6 @@ struct acpi_table_mchi { u8 pci_function; }; -/******************************************************************************* - * - * MPST - Memory Power State Table (ACPI 5.0) - * Version 1 - * - ******************************************************************************/ - -#define ACPI_MPST_CHANNEL_INFO \ - u8 channel_id; \ - u8 reserved1[3]; \ - u16 power_node_count; \ - u16 reserved2; - -/* Main table */ - -struct acpi_table_mpst { - struct acpi_table_header header; /* Common ACPI table header */ - ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */ -}; - -/* Memory Platform Communication Channel Info */ - -struct acpi_mpst_channel { - ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */ -}; - -/* Memory Power Node Structure */ - -struct acpi_mpst_power_node { - u8 flags; - u8 reserved1; - u16 node_id; - u32 length; - u64 range_address; - u64 range_length; - u32 num_power_states; - u32 num_physical_components; -}; - -/* Values for Flags field above */ - -#define ACPI_MPST_ENABLED 1 -#define ACPI_MPST_POWER_MANAGED 2 -#define ACPI_MPST_HOT_PLUG_CAPABLE 4 - -/* Memory Power State Structure (follows POWER_NODE above) */ - -struct acpi_mpst_power_state { - u8 power_state; - u8 info_index; -}; - -/* Physical Component ID Structure (follows POWER_STATE above) */ - -struct acpi_mpst_component { - u16 component_id; -}; - -/* Memory Power State Characteristics Structure (follows all POWER_NODEs) */ - -struct acpi_mpst_data_hdr { - u16 characteristics_count; - u16 reserved; -}; - -struct acpi_mpst_power_data { - u8 structure_id; - u8 flags; - u16 reserved1; - u32 average_power; - u32 power_saving; - u64 exit_latency; - u64 reserved2; -}; - -/* Values for Flags field above */ - -#define ACPI_MPST_PRESERVE 1 -#define ACPI_MPST_AUTOENTRY 2 -#define ACPI_MPST_AUTOEXIT 4 - -/* Shared Memory Region (not part of an ACPI table) */ - -struct acpi_mpst_shared { - u32 signature; - u16 pcc_command; - u16 pcc_status; - u32 command_register; - u32 status_register; - u32 power_state_id; - u32 power_node_id; - u64 energy_consumed; - u64 average_power; -}; - -/******************************************************************************* - * - * MSCT - Maximum System Characteristics Table (ACPI 4.0) - * Version 1 - * - ******************************************************************************/ - -struct acpi_table_msct { - struct acpi_table_header header; /* Common ACPI table header */ - u32 proximity_offset; /* Location of proximity info struct(s) */ - u32 max_proximity_domains; /* Max number of proximity domains */ - u32 max_clock_domains; /* Max number of clock domains */ - u64 max_address; /* Max physical address in system */ -}; - -/* subtable - Maximum Proximity Domain Information. Version 1 */ - -struct acpi_msct_proximity { - u8 revision; - u8 length; - u32 range_start; /* Start of domain range */ - u32 range_end; /* End of domain range */ - u32 processor_capacity; - u64 memory_capacity; /* In bytes */ -}; - /******************************************************************************* * * MSDM - Microsoft Data Management table @@ -1184,1026 +1088,403 @@ struct acpi_table_msdm { /******************************************************************************* * - * NFIT - NVDIMM Interface Table (ACPI 6.0+) + * MTMR - MID Timer Table * Version 1 * + * Conforms to "Simple Firmware Interface Specification", + * Draft 0.8.2, Oct 19, 2010 + * NOTE: The ACPI MTMR is equivalent to the SFI MTMR table. + * ******************************************************************************/ -struct acpi_table_nfit { +struct acpi_table_mtmr { struct acpi_table_header header; /* Common ACPI table header */ - u32 reserved; /* Reserved, must be zero */ }; -/* Subtable header for NFIT */ +/* MTMR entry */ -struct acpi_nfit_header { - u16 type; - u16 length; +struct acpi_mtmr_entry { + struct acpi_generic_address physical_address; + u32 frequency; + u32 irq; }; -/* Values for subtable type in struct acpi_nfit_header */ +/******************************************************************************* + * + * SLIC - Software Licensing Description Table + * + * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)", + * November 29, 2011. Copyright 2011 Microsoft + * + ******************************************************************************/ -enum acpi_nfit_type { - ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0, - ACPI_NFIT_TYPE_MEMORY_MAP = 1, - ACPI_NFIT_TYPE_INTERLEAVE = 2, - ACPI_NFIT_TYPE_SMBIOS = 3, - ACPI_NFIT_TYPE_CONTROL_REGION = 4, - ACPI_NFIT_TYPE_DATA_REGION = 5, - ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6, - ACPI_NFIT_TYPE_CAPABILITIES = 7, - ACPI_NFIT_TYPE_RESERVED = 8 /* 8 and greater are reserved */ +/* Basic SLIC table is only the common ACPI header */ + +struct acpi_table_slic { + struct acpi_table_header header; /* Common ACPI table header */ }; -/* - * NFIT Subtables - */ +/******************************************************************************* + * + * SPCR - Serial Port Console Redirection table + * Version 2 + * + * Conforms to "Serial Port Console Redirection Table", + * Version 1.03, August 10, 2015 + * + ******************************************************************************/ -/* 0: System Physical Address Range Structure */ - -struct acpi_nfit_system_address { - struct acpi_nfit_header header; - u16 range_index; - u16 flags; - u32 reserved; /* Reserved, must be zero */ - u32 proximity_domain; - u8 range_guid[16]; - u64 address; - u64 length; - u64 memory_mapping; - u64 location_cookie; /* ACPI 6.4 */ -}; - -/* Flags */ - -#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */ -#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */ -#define ACPI_NFIT_LOCATION_COOKIE_VALID (1<<2) /* 02: SPA location cookie valid (ACPI 6.4) */ - -/* Range Type GUIDs appear in the include/acuuid.h file */ - -/* 1: Memory Device to System Address Range Map Structure */ - -struct acpi_nfit_memory_map { - struct acpi_nfit_header header; - u32 device_handle; - u16 physical_id; - u16 region_id; - u16 range_index; - u16 region_index; - u64 region_size; - u64 region_offset; - u64 address; - u16 interleave_index; - u16 interleave_ways; - u16 flags; - u16 reserved; /* Reserved, must be zero */ -}; - -/* Flags */ - -#define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */ -#define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */ -#define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */ -#define ACPI_NFIT_MEM_NOT_ARMED (1<<3) /* 03: Memory Device is not armed */ -#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */ -#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */ -#define ACPI_NFIT_MEM_MAP_FAILED (1<<6) /* 06: Mapping to SPA failed */ - -/* 2: Interleave Structure */ - -struct acpi_nfit_interleave { - struct acpi_nfit_header header; - u16 interleave_index; - u16 reserved; /* Reserved, must be zero */ - u32 line_count; - u32 line_size; - u32 line_offset[1]; /* Variable length */ -}; - -/* 3: SMBIOS Management Information Structure */ - -struct acpi_nfit_smbios { - struct acpi_nfit_header header; - u32 reserved; /* Reserved, must be zero */ - u8 data[1]; /* Variable length */ -}; - -/* 4: NVDIMM Control Region Structure */ - -struct acpi_nfit_control_region { - struct acpi_nfit_header header; - u16 region_index; - u16 vendor_id; - u16 device_id; - u16 revision_id; - u16 subsystem_vendor_id; - u16 subsystem_device_id; - u16 subsystem_revision_id; - u8 valid_fields; - u8 manufacturing_location; - u16 manufacturing_date; - u8 reserved[2]; /* Reserved, must be zero */ - u32 serial_number; - u16 code; - u16 windows; - u64 window_size; - u64 command_offset; - u64 command_size; - u64 status_offset; - u64 status_size; - u16 flags; - u8 reserved1[6]; /* Reserved, must be zero */ -}; - -/* Flags */ - -#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */ - -/* valid_fields bits */ - -#define ACPI_NFIT_CONTROL_MFG_INFO_VALID (1) /* Manufacturing fields are valid */ - -/* 5: NVDIMM Block Data Window Region Structure */ - -struct acpi_nfit_data_region { - struct acpi_nfit_header header; - u16 region_index; - u16 windows; - u64 offset; - u64 size; - u64 capacity; - u64 start_address; -}; - -/* 6: Flush Hint Address Structure */ - -struct acpi_nfit_flush_address { - struct acpi_nfit_header header; - u32 device_handle; - u16 hint_count; - u8 reserved[6]; /* Reserved, must be zero */ - u64 hint_address[1]; /* Variable length */ -}; - -/* 7: Platform Capabilities Structure */ - -struct acpi_nfit_capabilities { - struct acpi_nfit_header header; - u8 highest_capability; - u8 reserved[3]; /* Reserved, must be zero */ - u32 capabilities; +struct acpi_table_spcr { + struct acpi_table_header header; /* Common ACPI table header */ + u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ + u8 reserved[3]; + struct acpi_generic_address serial_port; + u8 interrupt_type; + u8 pc_interrupt; + u32 interrupt; + u8 baud_rate; + u8 parity; + u8 stop_bits; + u8 flow_control; + u8 terminal_type; + u8 reserved1; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u32 pci_flags; + u8 pci_segment; u32 reserved2; }; -/* Capabilities Flags */ +/* Masks for pci_flags field above */ -#define ACPI_NFIT_CAPABILITY_CACHE_FLUSH (1) /* 00: Cache Flush to NVDIMM capable */ -#define ACPI_NFIT_CAPABILITY_MEM_FLUSH (1<<1) /* 01: Memory Flush to NVDIMM capable */ -#define ACPI_NFIT_CAPABILITY_MEM_MIRRORING (1<<2) /* 02: Memory Mirroring capable */ +#define ACPI_SPCR_DO_NOT_DISABLE (1) -/* - * NFIT/DVDIMM device handle support - used as the _ADR for each NVDIMM - */ -struct nfit_device_handle { - u32 handle; -}; - -/* Device handle construction and extraction macros */ - -#define ACPI_NFIT_DIMM_NUMBER_MASK 0x0000000F -#define ACPI_NFIT_CHANNEL_NUMBER_MASK 0x000000F0 -#define ACPI_NFIT_MEMORY_ID_MASK 0x00000F00 -#define ACPI_NFIT_SOCKET_ID_MASK 0x0000F000 -#define ACPI_NFIT_NODE_ID_MASK 0x0FFF0000 - -#define ACPI_NFIT_DIMM_NUMBER_OFFSET 0 -#define ACPI_NFIT_CHANNEL_NUMBER_OFFSET 4 -#define ACPI_NFIT_MEMORY_ID_OFFSET 8 -#define ACPI_NFIT_SOCKET_ID_OFFSET 12 -#define ACPI_NFIT_NODE_ID_OFFSET 16 - -/* Macro to construct a NFIT/NVDIMM device handle */ - -#define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \ - ((dimm) | \ - ((channel) << ACPI_NFIT_CHANNEL_NUMBER_OFFSET) | \ - ((memory) << ACPI_NFIT_MEMORY_ID_OFFSET) | \ - ((socket) << ACPI_NFIT_SOCKET_ID_OFFSET) | \ - ((node) << ACPI_NFIT_NODE_ID_OFFSET)) - -/* Macros to extract individual fields from a NFIT/NVDIMM device handle */ - -#define ACPI_NFIT_GET_DIMM_NUMBER(handle) \ - ((handle) & ACPI_NFIT_DIMM_NUMBER_MASK) - -#define ACPI_NFIT_GET_CHANNEL_NUMBER(handle) \ - (((handle) & ACPI_NFIT_CHANNEL_NUMBER_MASK) >> ACPI_NFIT_CHANNEL_NUMBER_OFFSET) - -#define ACPI_NFIT_GET_MEMORY_ID(handle) \ - (((handle) & ACPI_NFIT_MEMORY_ID_MASK) >> ACPI_NFIT_MEMORY_ID_OFFSET) - -#define ACPI_NFIT_GET_SOCKET_ID(handle) \ - (((handle) & ACPI_NFIT_SOCKET_ID_MASK) >> ACPI_NFIT_SOCKET_ID_OFFSET) - -#define ACPI_NFIT_GET_NODE_ID(handle) \ - (((handle) & ACPI_NFIT_NODE_ID_MASK) >> ACPI_NFIT_NODE_ID_OFFSET) +/* Values for Interface Type: See the definition of the DBG2 table */ /******************************************************************************* * - * PCCT - Platform Communications Channel Table (ACPI 5.0) - * Version 2 (ACPI 6.2) + * SPMI - Server Platform Management Interface table + * Version 5 + * + * Conforms to "Intelligent Platform Management Interface Specification + * Second Generation v2.0", Document Revision 1.0, February 12, 2004 with + * June 12, 2009 markup. * ******************************************************************************/ -struct acpi_table_pcct { +struct acpi_table_spmi { struct acpi_table_header header; /* Common ACPI table header */ - u32 flags; - u64 reserved; -}; - -/* Values for Flags field above */ - -#define ACPI_PCCT_DOORBELL 1 - -/* Values for subtable type in struct acpi_subtable_header */ - -enum acpi_pcct_type { - ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0, - ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1, - ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */ - ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, /* ACPI 6.2 */ - ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, /* ACPI 6.2 */ - ACPI_PCCT_TYPE_HW_REG_COMM_SUBSPACE = 5, /* ACPI 6.4 */ - ACPI_PCCT_TYPE_RESERVED = 6 /* 6 and greater are reserved */ -}; - -/* - * PCCT Subtables, correspond to Type in struct acpi_subtable_header - */ - -/* 0: Generic Communications Subspace */ - -struct acpi_pcct_subspace { - struct acpi_subtable_header header; - u8 reserved[6]; - u64 base_address; - u64 length; - struct acpi_generic_address doorbell_register; - u64 preserve_mask; - u64 write_mask; - u32 latency; - u32 max_access_rate; - u16 min_turnaround_time; -}; - -/* 1: HW-reduced Communications Subspace (ACPI 5.1) */ - -struct acpi_pcct_hw_reduced { - struct acpi_subtable_header header; - u32 platform_interrupt; - u8 flags; - u8 reserved; - u64 base_address; - u64 length; - struct acpi_generic_address doorbell_register; - u64 preserve_mask; - u64 write_mask; - u32 latency; - u32 max_access_rate; - u16 min_turnaround_time; -}; - -/* 2: HW-reduced Communications Subspace Type 2 (ACPI 6.1) */ - -struct acpi_pcct_hw_reduced_type2 { - struct acpi_subtable_header header; - u32 platform_interrupt; - u8 flags; - u8 reserved; - u64 base_address; - u64 length; - struct acpi_generic_address doorbell_register; - u64 preserve_mask; - u64 write_mask; - u32 latency; - u32 max_access_rate; - u16 min_turnaround_time; - struct acpi_generic_address platform_ack_register; - u64 ack_preserve_mask; - u64 ack_write_mask; -}; - -/* 3: Extended PCC Master Subspace Type 3 (ACPI 6.2) */ - -struct acpi_pcct_ext_pcc_master { - struct acpi_subtable_header header; - u32 platform_interrupt; - u8 flags; + u8 interface_type; + u8 reserved; /* Must be 1 */ + u16 spec_revision; /* Version of IPMI */ + u8 interrupt_type; + u8 gpe_number; /* GPE assigned */ u8 reserved1; - u64 base_address; - u32 length; - struct acpi_generic_address doorbell_register; - u64 preserve_mask; - u64 write_mask; - u32 latency; - u32 max_access_rate; - u32 min_turnaround_time; - struct acpi_generic_address platform_ack_register; - u64 ack_preserve_mask; - u64 ack_set_mask; - u64 reserved2; - struct acpi_generic_address cmd_complete_register; - u64 cmd_complete_mask; - struct acpi_generic_address cmd_update_register; - u64 cmd_update_preserve_mask; - u64 cmd_update_set_mask; - struct acpi_generic_address error_status_register; - u64 error_status_mask; + u8 pci_device_flag; + u32 interrupt; + struct acpi_generic_address ipmi_register; + u8 pci_segment; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u8 reserved2; }; -/* 4: Extended PCC Slave Subspace Type 4 (ACPI 6.2) */ +/* Values for interface_type above */ -struct acpi_pcct_ext_pcc_slave { - struct acpi_subtable_header header; - u32 platform_interrupt; - u8 flags; - u8 reserved1; - u64 base_address; - u32 length; - struct acpi_generic_address doorbell_register; - u64 preserve_mask; - u64 write_mask; - u32 latency; - u32 max_access_rate; - u32 min_turnaround_time; - struct acpi_generic_address platform_ack_register; - u64 ack_preserve_mask; - u64 ack_set_mask; - u64 reserved2; - struct acpi_generic_address cmd_complete_register; - u64 cmd_complete_mask; - struct acpi_generic_address cmd_update_register; - u64 cmd_update_preserve_mask; - u64 cmd_update_set_mask; - struct acpi_generic_address error_status_register; - u64 error_status_mask; -}; - -/* 5: HW Registers based Communications Subspace */ - -struct acpi_pcct_hw_reg { - struct acpi_subtable_header header; - u16 version; - u64 base_address; - u64 length; - struct acpi_generic_address doorbell_register; - u64 doorbell_preserve; - u64 doorbell_write; - struct acpi_generic_address cmd_complete_register; - u64 cmd_complete_mask; - struct acpi_generic_address error_status_register; - u64 error_status_mask; - u32 nominal_latency; - u32 min_turnaround_time; -}; - -/* Values for doorbell flags above */ - -#define ACPI_PCCT_INTERRUPT_POLARITY (1) -#define ACPI_PCCT_INTERRUPT_MODE (1<<1) - -/* - * PCC memory structures (not part of the ACPI table) - */ - -/* Shared Memory Region */ - -struct acpi_pcct_shared_memory { - u32 signature; - u16 command; - u16 status; -}; - -/* Extended PCC Subspace Shared Memory Region (ACPI 6.2) */ - -struct acpi_pcct_ext_pcc_shared_memory { - u32 signature; - u32 flags; - u32 length; - u32 command; +enum acpi_spmi_interface_types { + ACPI_SPMI_NOT_USED = 0, + ACPI_SPMI_KEYBOARD = 1, + ACPI_SPMI_SMI = 2, + ACPI_SPMI_BLOCK_TRANSFER = 3, + ACPI_SPMI_SMBUS = 4, + ACPI_SPMI_RESERVED = 5 /* 5 and above are reserved */ }; /******************************************************************************* * - * PDTT - Platform Debug Trigger Table (ACPI 6.2) - * Version 0 + * TCPA - Trusted Computing Platform Alliance table + * Version 2 + * + * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", + * December 19, 2014 + * + * NOTE: There are two versions of the table with the same signature -- + * the client version and the server version. The common platform_class + * field is used to differentiate the two types of tables. * ******************************************************************************/ -struct acpi_table_pdtt { +struct acpi_table_tcpa_hdr { struct acpi_table_header header; /* Common ACPI table header */ - u8 trigger_count; - u8 reserved[3]; - u32 array_offset; + u16 platform_class; }; /* - * PDTT Communication Channel Identifier Structure. - * The number of these structures is defined by trigger_count above, - * starting at array_offset. + * Values for platform_class above. + * This is how the client and server subtables are differentiated */ -struct acpi_pdtt_channel { - u8 subchannel_id; - u8 flags; +#define ACPI_TCPA_CLIENT_TABLE 0 +#define ACPI_TCPA_SERVER_TABLE 1 + +struct acpi_table_tcpa_client { + u32 minimum_log_length; /* Minimum length for the event log area */ + u64 log_address; /* Address of the event log area */ }; -/* Flags for above */ - -#define ACPI_PDTT_RUNTIME_TRIGGER (1) -#define ACPI_PDTT_WAIT_COMPLETION (1<<1) -#define ACPI_PDTT_TRIGGER_ORDER (1<<2) - -/******************************************************************************* - * - * PHAT - Platform Health Assessment Table (ACPI 6.4) - * Version 1 - * - ******************************************************************************/ - -struct acpi_table_phat { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/* Common header for PHAT subtables that follow main table */ - -struct acpi_phat_header { - u16 type; - u16 length; - u8 revision; -}; - -/* Values for Type field above */ - -#define ACPI_PHAT_TYPE_FW_VERSION_DATA 0 -#define ACPI_PHAT_TYPE_FW_HEALTH_DATA 1 -#define ACPI_PHAT_TYPE_RESERVED 2 /* 0x02-0xFFFF are reserved */ - -/* - * PHAT subtables, correspond to Type in struct acpi_phat_header - */ - -/* 0: Firmware Version Data Record */ - -struct acpi_phat_version_data { - struct acpi_phat_header header; - u8 reserved[3]; - u32 element_count; -}; - -struct acpi_phat_version_element { - u8 guid[16]; - u64 version_value; - u32 producer_id; -}; - -/* 1: Firmware Health Data Record */ - -struct acpi_phat_health_data { - struct acpi_phat_header header; - u8 reserved[2]; - u8 health; - u8 device_guid[16]; - u32 device_specific_offset; /* Zero if no Device-specific data */ -}; - -/* Values for Health field above */ - -#define ACPI_PHAT_ERRORS_FOUND 0 -#define ACPI_PHAT_NO_ERRORS 1 -#define ACPI_PHAT_UNKNOWN_ERRORS 2 -#define ACPI_PHAT_ADVISORY 3 - -/******************************************************************************* - * - * PMTT - Platform Memory Topology Table (ACPI 5.0) - * Version 1 - * - ******************************************************************************/ - -struct acpi_table_pmtt { - struct acpi_table_header header; /* Common ACPI table header */ - u32 memory_device_count; - /* - * Immediately followed by: - * MEMORY_DEVICE memory_device_struct[memory_device_count]; - */ -}; - -/* Common header for PMTT subtables that follow main table */ - -struct acpi_pmtt_header { - u8 type; - u8 reserved1; - u16 length; - u16 flags; - u16 reserved2; - u32 memory_device_count; /* Zero means no memory device structs follow */ - /* - * Immediately followed by: - * u8 type_specific_data[] - * MEMORY_DEVICE memory_device_struct[memory_device_count]; - */ -}; - -/* Values for Type field above */ - -#define ACPI_PMTT_TYPE_SOCKET 0 -#define ACPI_PMTT_TYPE_CONTROLLER 1 -#define ACPI_PMTT_TYPE_DIMM 2 -#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFE are reserved */ -#define ACPI_PMTT_TYPE_VENDOR 0xFF - -/* Values for Flags field above */ - -#define ACPI_PMTT_TOP_LEVEL 0x0001 -#define ACPI_PMTT_PHYSICAL 0x0002 -#define ACPI_PMTT_MEMORY_TYPE 0x000C - -/* - * PMTT subtables, correspond to Type in struct acpi_pmtt_header - */ - -/* 0: Socket Structure */ - -struct acpi_pmtt_socket { - struct acpi_pmtt_header header; - u16 socket_id; +struct acpi_table_tcpa_server { u16 reserved; -}; - /* - * Immediately followed by: - * MEMORY_DEVICE memory_device_struct[memory_device_count]; - */ - -/* 1: Memory Controller subtable */ - -struct acpi_pmtt_controller { - struct acpi_pmtt_header header; - u16 controller_id; - u16 reserved; -}; - /* - * Immediately followed by: - * MEMORY_DEVICE memory_device_struct[memory_device_count]; - */ - -/* 2: Physical Component Identifier (DIMM) */ - -struct acpi_pmtt_physical_component { - struct acpi_pmtt_header header; - u32 bios_handle; -}; - -/* 0xFF: Vendor Specific Data */ - -struct acpi_pmtt_vendor_specific { - struct acpi_pmtt_header header; - u8 type_uuid[16]; - u8 specific[]; - /* - * Immediately followed by: - * u8 vendor_specific_data[]; - * MEMORY_DEVICE memory_device_struct[memory_device_count]; - */ -}; - -/******************************************************************************* - * - * PPTT - Processor Properties Topology Table (ACPI 6.2) - * Version 1 - * - ******************************************************************************/ - -struct acpi_table_pptt { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/* Values for Type field above */ - -enum acpi_pptt_type { - ACPI_PPTT_TYPE_PROCESSOR = 0, - ACPI_PPTT_TYPE_CACHE = 1, - ACPI_PPTT_TYPE_ID = 2, - ACPI_PPTT_TYPE_RESERVED = 3 -}; - -/* 0: Processor Hierarchy Node Structure */ - -struct acpi_pptt_processor { - struct acpi_subtable_header header; - u16 reserved; - u32 flags; - u32 parent; - u32 acpi_processor_id; - u32 number_of_priv_resources; -}; - -/* Flags */ - -#define ACPI_PPTT_PHYSICAL_PACKAGE (1) -#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID (1<<1) -#define ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD (1<<2) /* ACPI 6.3 */ -#define ACPI_PPTT_ACPI_LEAF_NODE (1<<3) /* ACPI 6.3 */ -#define ACPI_PPTT_ACPI_IDENTICAL (1<<4) /* ACPI 6.3 */ - -/* 1: Cache Type Structure */ - -struct acpi_pptt_cache { - struct acpi_subtable_header header; - u16 reserved; - u32 flags; - u32 next_level_of_cache; - u32 size; - u32 number_of_sets; - u8 associativity; - u8 attributes; - u16 line_size; -}; - -/* 1: Cache Type Structure for PPTT version 3 */ - -struct acpi_pptt_cache_v1 { - u32 cache_id; -}; - -/* Flags */ - -#define ACPI_PPTT_SIZE_PROPERTY_VALID (1) /* Physical property valid */ -#define ACPI_PPTT_NUMBER_OF_SETS_VALID (1<<1) /* Number of sets valid */ -#define ACPI_PPTT_ASSOCIATIVITY_VALID (1<<2) /* Associativity valid */ -#define ACPI_PPTT_ALLOCATION_TYPE_VALID (1<<3) /* Allocation type valid */ -#define ACPI_PPTT_CACHE_TYPE_VALID (1<<4) /* Cache type valid */ -#define ACPI_PPTT_WRITE_POLICY_VALID (1<<5) /* Write policy valid */ -#define ACPI_PPTT_LINE_SIZE_VALID (1<<6) /* Line size valid */ -#define ACPI_PPTT_CACHE_ID_VALID (1<<7) /* Cache ID valid */ - -/* Masks for Attributes */ - -#define ACPI_PPTT_MASK_ALLOCATION_TYPE (0x03) /* Allocation type */ -#define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */ -#define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */ - -/* Attributes describing cache */ -#define ACPI_PPTT_CACHE_READ_ALLOCATE (0x0) /* Cache line is allocated on read */ -#define ACPI_PPTT_CACHE_WRITE_ALLOCATE (0x01) /* Cache line is allocated on write */ -#define ACPI_PPTT_CACHE_RW_ALLOCATE (0x02) /* Cache line is allocated on read and write */ -#define ACPI_PPTT_CACHE_RW_ALLOCATE_ALT (0x03) /* Alternate representation of above */ - -#define ACPI_PPTT_CACHE_TYPE_DATA (0x0) /* Data cache */ -#define ACPI_PPTT_CACHE_TYPE_INSTR (1<<2) /* Instruction cache */ -#define ACPI_PPTT_CACHE_TYPE_UNIFIED (2<<2) /* Unified I & D cache */ -#define ACPI_PPTT_CACHE_TYPE_UNIFIED_ALT (3<<2) /* Alternate representation of above */ - -#define ACPI_PPTT_CACHE_POLICY_WB (0x0) /* Cache is write back */ -#define ACPI_PPTT_CACHE_POLICY_WT (1<<4) /* Cache is write through */ - -/* 2: ID Structure */ - -struct acpi_pptt_id { - struct acpi_subtable_header header; - u16 reserved; - u32 vendor_id; - u64 level1_id; - u64 level2_id; - u16 major_rev; - u16 minor_rev; - u16 spin_rev; -}; - -/******************************************************************************* - * - * PRMT - Platform Runtime Mechanism Table - * Version 1 - * - ******************************************************************************/ - -struct acpi_table_prmt { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -struct acpi_table_prmt_header { - u8 platform_guid[16]; - u32 module_info_offset; - u32 module_info_count; -}; - -struct acpi_prmt_module_header { - u16 revision; - u16 length; -}; - -struct acpi_prmt_module_info { - u16 revision; - u16 length; - u8 module_guid[16]; - u16 major_rev; - u16 minor_rev; - u16 handler_info_count; - u32 handler_info_offset; - u64 mmio_list_pointer; -}; - -struct acpi_prmt_handler_info { - u16 revision; - u16 length; - u8 handler_guid[16]; - u64 handler_address; - u64 static_data_buffer_address; - u64 acpi_param_buffer_address; -}; - -/******************************************************************************* - * - * RASF - RAS Feature Table (ACPI 5.0) - * Version 1 - * - ******************************************************************************/ - -struct acpi_table_rasf { - struct acpi_table_header header; /* Common ACPI table header */ - u8 channel_id[12]; -}; - -/* RASF Platform Communication Channel Shared Memory Region */ - -struct acpi_rasf_shared_memory { - u32 signature; - u16 command; - u16 status; - u16 version; - u8 capabilities[16]; - u8 set_capabilities[16]; - u16 num_parameter_blocks; - u32 set_capabilities_status; -}; - -/* RASF Parameter Block Structure Header */ - -struct acpi_rasf_parameter_block { - u16 type; - u16 version; - u16 length; -}; - -/* RASF Parameter Block Structure for PATROL_SCRUB */ - -struct acpi_rasf_patrol_scrub_parameter { - struct acpi_rasf_parameter_block header; - u16 patrol_scrub_command; - u64 requested_address_range[2]; - u64 actual_address_range[2]; - u16 flags; - u8 requested_speed; -}; - -/* Masks for Flags and Speed fields above */ - -#define ACPI_RASF_SCRUBBER_RUNNING 1 -#define ACPI_RASF_SPEED (7<<1) -#define ACPI_RASF_SPEED_SLOW (0<<1) -#define ACPI_RASF_SPEED_MEDIUM (4<<1) -#define ACPI_RASF_SPEED_FAST (7<<1) - -/* Channel Commands */ - -enum acpi_rasf_commands { - ACPI_RASF_EXECUTE_RASF_COMMAND = 1 -}; - -/* Platform RAS Capabilities */ - -enum acpi_rasf_capabiliities { - ACPI_HW_PATROL_SCRUB_SUPPORTED = 0, - ACPI_SW_PATROL_SCRUB_EXPOSED = 1 -}; - -/* Patrol Scrub Commands */ - -enum acpi_rasf_patrol_scrub_commands { - ACPI_RASF_GET_PATROL_PARAMETERS = 1, - ACPI_RASF_START_PATROL_SCRUBBER = 2, - ACPI_RASF_STOP_PATROL_SCRUBBER = 3 -}; - -/* Channel Command flags */ - -#define ACPI_RASF_GENERATE_SCI (1<<15) - -/* Status values */ - -enum acpi_rasf_status { - ACPI_RASF_SUCCESS = 0, - ACPI_RASF_NOT_VALID = 1, - ACPI_RASF_NOT_SUPPORTED = 2, - ACPI_RASF_BUSY = 3, - ACPI_RASF_FAILED = 4, - ACPI_RASF_ABORTED = 5, - ACPI_RASF_INVALID_DATA = 6 -}; - -/* Status flags */ - -#define ACPI_RASF_COMMAND_COMPLETE (1) -#define ACPI_RASF_SCI_DOORBELL (1<<1) -#define ACPI_RASF_ERROR (1<<2) -#define ACPI_RASF_STATUS (0x1F<<3) - -/******************************************************************************* - * - * RGRT - Regulatory Graphics Resource Table - * Version 1 - * - * Conforms to "ACPI RGRT" available at: - * https://microsoft.github.io/mu/dyn/mu_plus/ms_core_pkg/acpi_RGRT/feature_acpi_rgrt/ - * - ******************************************************************************/ - -struct acpi_table_rgrt { - struct acpi_table_header header; /* Common ACPI table header */ - u16 version; - u8 image_type; - u8 reserved; - u8 image[0]; -}; - -/* image_type values */ - -enum acpi_rgrt_image_type { - ACPI_RGRT_TYPE_RESERVED0 = 0, - ACPI_RGRT_IMAGE_TYPE_PNG = 1, - ACPI_RGRT_TYPE_RESERVED = 2 /* 2 and greater are reserved */ -}; - -/******************************************************************************* - * - * SBST - Smart Battery Specification Table - * Version 1 - * - ******************************************************************************/ - -struct acpi_table_sbst { - struct acpi_table_header header; /* Common ACPI table header */ - u32 warning_level; - u32 low_level; - u32 critical_level; -}; - -/******************************************************************************* - * - * SDEI - Software Delegated Exception Interface Descriptor Table - * - * Conforms to "Software Delegated Exception Interface (SDEI)" ARM DEN0054A, - * May 8th, 2017. Copyright 2017 ARM Ltd. - * - ******************************************************************************/ - -struct acpi_table_sdei { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/******************************************************************************* - * - * SDEV - Secure Devices Table (ACPI 6.2) - * Version 1 - * - ******************************************************************************/ - -struct acpi_table_sdev { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -struct acpi_sdev_header { - u8 type; - u8 flags; - u16 length; -}; - -/* Values for subtable type above */ - -enum acpi_sdev_type { - ACPI_SDEV_TYPE_NAMESPACE_DEVICE = 0, - ACPI_SDEV_TYPE_PCIE_ENDPOINT_DEVICE = 1, - ACPI_SDEV_TYPE_RESERVED = 2 /* 2 and greater are reserved */ -}; - -/* Values for flags above */ - -#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1) -#define ACPI_SDEV_SECURE_COMPONENTS_PRESENT (1<<1) - -/* - * SDEV subtables - */ - -/* 0: Namespace Device Based Secure Device Structure */ - -struct acpi_sdev_namespace { - struct acpi_sdev_header header; - u16 device_id_offset; - u16 device_id_length; - u16 vendor_data_offset; - u16 vendor_data_length; -}; - -struct acpi_sdev_secure_component { - u16 secure_component_offset; - u16 secure_component_length; -}; - -/* - * SDEV sub-subtables ("Components") for above - */ -struct acpi_sdev_component { - struct acpi_sdev_header header; -}; - -/* Values for sub-subtable type above */ - -enum acpi_sac_type { - ACPI_SDEV_TYPE_ID_COMPONENT = 0, - ACPI_SDEV_TYPE_MEM_COMPONENT = 1 -}; - -struct acpi_sdev_id_component { - struct acpi_sdev_header header; - u16 hardware_id_offset; - u16 hardware_id_length; - u16 subsystem_id_offset; - u16 subsystem_id_length; - u16 hardware_revision; - u8 hardware_rev_present; - u8 class_code_present; - u8 pci_base_class; - u8 pci_sub_class; - u8 pci_programming_xface; -}; - -struct acpi_sdev_mem_component { - struct acpi_sdev_header header; - u32 reserved; - u64 memory_base_address; - u64 memory_length; -}; - -/* 1: PCIe Endpoint Device Based Device Structure */ - -struct acpi_sdev_pcie { - struct acpi_sdev_header header; - u16 segment; - u16 start_bus; - u16 path_offset; - u16 path_length; - u16 vendor_data_offset; - u16 vendor_data_length; -}; - -/* 1a: PCIe Endpoint path entry */ - -struct acpi_sdev_pcie_path { + u64 minimum_log_length; /* Minimum length for the event log area */ + u64 log_address; /* Address of the event log area */ + u16 spec_revision; + u8 device_flags; + u8 interrupt_flags; + u8 gpe_number; + u8 reserved2[3]; + u32 global_interrupt; + struct acpi_generic_address address; + u32 reserved3; + struct acpi_generic_address config_address; + u8 group; + u8 bus; /* PCI Bus/Segment/Function numbers */ u8 device; u8 function; }; +/* Values for device_flags above */ + +#define ACPI_TCPA_PCI_DEVICE (1) +#define ACPI_TCPA_BUS_PNP (1<<1) +#define ACPI_TCPA_ADDRESS_VALID (1<<2) + +/* Values for interrupt_flags above */ + +#define ACPI_TCPA_INTERRUPT_MODE (1) +#define ACPI_TCPA_INTERRUPT_POLARITY (1<<1) +#define ACPI_TCPA_SCI_VIA_GPE (1<<2) +#define ACPI_TCPA_GLOBAL_INTERRUPT (1<<3) + /******************************************************************************* * - * SVKL - Storage Volume Key Location Table (ACPI 6.4) - * From: "Guest-Host-Communication Interface (GHCI) for Intel - * Trust Domain Extensions (Intel TDX)". - * Version 1 + * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table + * Version 4 + * + * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", + * December 19, 2014 * ******************************************************************************/ -struct acpi_table_svkl { +struct acpi_table_tpm2 { struct acpi_table_header header; /* Common ACPI table header */ - u32 count; + u16 platform_class; + u16 reserved; + u64 control_address; + u32 start_method; + + /* Platform-specific data follows */ }; -struct acpi_svkl_key { - u16 type; - u16 format; - u32 size; - u64 address; +/* Values for start_method above */ + +#define ACPI_TPM2_NOT_ALLOWED 0 +#define ACPI_TPM2_START_METHOD 2 +#define ACPI_TPM2_MEMORY_MAPPED 6 +#define ACPI_TPM2_COMMAND_BUFFER 7 +#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8 + +/******************************************************************************* + * + * UEFI - UEFI Boot optimization Table + * Version 1 + * + * Conforms to "Unified Extensible Firmware Interface Specification", + * Version 2.3, May 8, 2009 + * + ******************************************************************************/ + +struct acpi_table_uefi { + struct acpi_table_header header; /* Common ACPI table header */ + u8 identifier[16]; /* UUID identifier */ + u16 data_offset; /* Offset of remaining data in table */ }; -enum acpi_svkl_type { - ACPI_SVKL_TYPE_MAIN_STORAGE = 0, - ACPI_SVKL_TYPE_RESERVED = 1 /* 1 and greater are reserved */ +/******************************************************************************* + * + * VRTC - Virtual Real Time Clock Table + * Version 1 + * + * Conforms to "Simple Firmware Interface Specification", + * Draft 0.8.2, Oct 19, 2010 + * NOTE: The ACPI VRTC is equivalent to The SFI MRTC table. + * + ******************************************************************************/ + +struct acpi_table_vrtc { + struct acpi_table_header header; /* Common ACPI table header */ }; -enum acpi_svkl_format { - ACPI_SVKL_FORMAT_RAW_BINARY = 0, - ACPI_SVKL_FORMAT_RESERVED = 1 /* 1 and greater are reserved */ +/* VRTC entry */ + +struct acpi_vrtc_entry { + struct acpi_generic_address physical_address; + u32 irq; +}; + +/******************************************************************************* + * + * WAET - Windows ACPI Emulated devices Table + * Version 1 + * + * Conforms to "Windows ACPI Emulated Devices Table", version 1.0, April 6, 2009 + * + ******************************************************************************/ + +struct acpi_table_waet { + struct acpi_table_header header; /* Common ACPI table header */ + u32 flags; +}; + +/* Masks for Flags field above */ + +#define ACPI_WAET_RTC_NO_ACK (1) /* RTC requires no int acknowledge */ +#define ACPI_WAET_TIMER_ONE_READ (1<<1) /* PM timer requires only one read */ + +/******************************************************************************* + * + * WDAT - Watchdog Action Table + * Version 1 + * + * Conforms to "Hardware Watchdog Timers Design Specification", + * Copyright 2006 Microsoft Corporation. + * + ******************************************************************************/ + +struct acpi_table_wdat { + struct acpi_table_header header; /* Common ACPI table header */ + u32 header_length; /* Watchdog Header Length */ + u16 pci_segment; /* PCI Segment number */ + u8 pci_bus; /* PCI Bus number */ + u8 pci_device; /* PCI Device number */ + u8 pci_function; /* PCI Function number */ + u8 reserved[3]; + u32 timer_period; /* Period of one timer count (msec) */ + u32 max_count; /* Maximum counter value supported */ + u32 min_count; /* Minimum counter value */ + u8 flags; + u8 reserved2[3]; + u32 entries; /* Number of watchdog entries that follow */ +}; + +/* Masks for Flags field above */ + +#define ACPI_WDAT_ENABLED (1) +#define ACPI_WDAT_STOPPED 0x80 + +/* WDAT Instruction Entries (actions) */ + +struct acpi_wdat_entry { + u8 action; + u8 instruction; + u16 reserved; + struct acpi_generic_address register_region; + u32 value; /* Value used with Read/Write register */ + u32 mask; /* Bitmask required for this register instruction */ +}; + +/* Values for Action field above */ + +enum acpi_wdat_actions { + ACPI_WDAT_RESET = 1, + ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4, + ACPI_WDAT_GET_COUNTDOWN = 5, + ACPI_WDAT_SET_COUNTDOWN = 6, + ACPI_WDAT_GET_RUNNING_STATE = 8, + ACPI_WDAT_SET_RUNNING_STATE = 9, + ACPI_WDAT_GET_STOPPED_STATE = 10, + ACPI_WDAT_SET_STOPPED_STATE = 11, + ACPI_WDAT_GET_REBOOT = 16, + ACPI_WDAT_SET_REBOOT = 17, + ACPI_WDAT_GET_SHUTDOWN = 18, + ACPI_WDAT_SET_SHUTDOWN = 19, + ACPI_WDAT_GET_STATUS = 32, + ACPI_WDAT_SET_STATUS = 33, + ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */ +}; + +/* Values for Instruction field above */ + +enum acpi_wdat_instructions { + ACPI_WDAT_READ_VALUE = 0, + ACPI_WDAT_READ_COUNTDOWN = 1, + ACPI_WDAT_WRITE_VALUE = 2, + ACPI_WDAT_WRITE_COUNTDOWN = 3, + ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */ + ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */ +}; + +/******************************************************************************* + * + * WDDT - Watchdog Descriptor Table + * Version 1 + * + * Conforms to "Using the Intel ICH Family Watchdog Timer (WDT)", + * Version 001, September 2002 + * + ******************************************************************************/ + +struct acpi_table_wddt { + struct acpi_table_header header; /* Common ACPI table header */ + u16 spec_version; + u16 table_version; + u16 pci_vendor_id; + struct acpi_generic_address address; + u16 max_count; /* Maximum counter value supported */ + u16 min_count; /* Minimum counter value supported */ + u16 period; + u16 status; + u16 capability; +}; + +/* Flags for Status field above */ + +#define ACPI_WDDT_AVAILABLE (1) +#define ACPI_WDDT_ACTIVE (1<<1) +#define ACPI_WDDT_TCO_OS_OWNED (1<<2) +#define ACPI_WDDT_USER_RESET (1<<11) +#define ACPI_WDDT_WDT_RESET (1<<12) +#define ACPI_WDDT_POWER_FAIL (1<<13) +#define ACPI_WDDT_UNKNOWN_RESET (1<<14) + +/* Flags for Capability field above */ + +#define ACPI_WDDT_AUTO_RESET (1) +#define ACPI_WDDT_ALERT_SUPPORT (1<<1) + +/******************************************************************************* + * + * WDRT - Watchdog Resource Table + * Version 1 + * + * Conforms to "Watchdog Timer Hardware Requirements for Windows Server 2003", + * Version 1.01, August 28, 2006 + * + ******************************************************************************/ + +struct acpi_table_wdrt { + struct acpi_table_header header; /* Common ACPI table header */ + struct acpi_generic_address control_register; + struct acpi_generic_address count_register; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; /* PCI Bus number */ + u8 pci_device; /* PCI Device number */ + u8 pci_function; /* PCI Function number */ + u8 pci_segment; /* PCI Segment number */ + u16 max_count; /* Maximum counter value supported */ + u8 units; }; /* Reset to default packing */ diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index 9125e2f163..ebc1f4f9fe 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -1,22 +1,59 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: actbl3.h - ACPI Table Definitions * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACTBL3_H__ #define __ACTBL3_H__ /******************************************************************************* * - * Additional ACPI Tables + * Additional ACPI Tables (3) * * These tables are not consumed directly by the ACPICA subsystem, but are * included here to support device drivers and the AML disassembler. * + * In general, the tables in this file are fully defined within the ACPI + * specification. + * ******************************************************************************/ /* @@ -24,24 +61,25 @@ * file. Useful because they make it more difficult to inadvertently type in * the wrong signature. */ -#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */ -#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */ -#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */ -#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */ -#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */ +#define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */ +#define ACPI_SIG_DRTM "DRTM" /* Dynamic Root of Trust for Measurement table */ +#define ACPI_SIG_FPDT "FPDT" /* Firmware Performance Data Table */ +#define ACPI_SIG_GTDT "GTDT" /* Generic Timer Description Table */ +#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */ +#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */ +#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */ +#define ACPI_SIG_RASF "RASF" /* RAS Feature table */ #define ACPI_SIG_STAO "STAO" /* Status Override table */ -#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ -#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */ -#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ -#define ACPI_SIG_VIOT "VIOT" /* Virtual I/O Translation Table */ -#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */ -#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ -#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */ -#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ #define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */ -#define ACPI_SIG_WSMT "WSMT" /* Windows SMM Security Mitigations Table */ #define ACPI_SIG_XENV "XENV" /* Xen Environment table */ -#define ACPI_SIG_XXXX "XXXX" /* Intermediate AML header for ASL/ASL+ converter */ + +#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */ +#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */ + +/* Reserved table signatures */ + +#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */ +#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ /* * All tables must be byte-packed to match the ACPI specification, since @@ -63,231 +101,635 @@ /******************************************************************************* * - * SLIC - Software Licensing Description Table - * - * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)", - * November 29, 2011. Copyright 2011 Microsoft - * - ******************************************************************************/ - -/* Basic SLIC table is only the common ACPI header */ - -struct acpi_table_slic { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/******************************************************************************* - * - * SLIT - System Locality Distance Information Table + * BGRT - Boot Graphics Resource Table (ACPI 5.0) * Version 1 * ******************************************************************************/ -struct acpi_table_slit { +struct acpi_table_bgrt { struct acpi_table_header header; /* Common ACPI table header */ - u64 locality_count; - u8 entry[1]; /* Real size = localities^2 */ + u16 version; + u8 status; + u8 image_type; + u64 image_address; + u32 image_offset_x; + u32 image_offset_y; }; /******************************************************************************* * - * SPCR - Serial Port Console Redirection table - * Version 2 - * - * Conforms to "Serial Port Console Redirection Table", - * Version 1.03, August 10, 2015 + * DRTM - Dynamic Root of Trust for Measurement table + * Conforms to "TCG D-RTM Architecture" June 17 2013, Version 1.0.0 + * Table version 1 * ******************************************************************************/ -struct acpi_table_spcr { +struct acpi_table_drtm { struct acpi_table_header header; /* Common ACPI table header */ - u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ - u8 reserved[3]; - struct acpi_generic_address serial_port; - u8 interrupt_type; - u8 pc_interrupt; - u32 interrupt; - u8 baud_rate; - u8 parity; - u8 stop_bits; - u8 flow_control; - u8 terminal_type; - u8 reserved1; - u16 pci_device_id; - u16 pci_vendor_id; - u8 pci_bus; - u8 pci_device; - u8 pci_function; - u32 pci_flags; - u8 pci_segment; - u32 reserved2; + u64 entry_base_address; + u64 entry_length; + u32 entry_address32; + u64 entry_address64; + u64 exit_address; + u64 log_area_address; + u32 log_area_length; + u64 arch_dependent_address; + u32 flags; }; -/* Masks for pci_flags field above */ +/* Flag Definitions for above */ -#define ACPI_SPCR_DO_NOT_DISABLE (1) +#define ACPI_DRTM_ACCESS_ALLOWED (1) +#define ACPI_DRTM_ENABLE_GAP_CODE (1<<1) +#define ACPI_DRTM_INCOMPLETE_MEASUREMENTS (1<<2) +#define ACPI_DRTM_AUTHORITY_ORDER (1<<3) -/* Values for Interface Type: See the definition of the DBG2 table */ +/* 1) Validated Tables List (64-bit addresses) */ -/******************************************************************************* - * - * SPMI - Server Platform Management Interface table - * Version 5 - * - * Conforms to "Intelligent Platform Management Interface Specification - * Second Generation v2.0", Document Revision 1.0, February 12, 2004 with - * June 12, 2009 markup. - * - ******************************************************************************/ - -struct acpi_table_spmi { - struct acpi_table_header header; /* Common ACPI table header */ - u8 interface_type; - u8 reserved; /* Must be 1 */ - u16 spec_revision; /* Version of IPMI */ - u8 interrupt_type; - u8 gpe_number; /* GPE assigned */ - u8 reserved1; - u8 pci_device_flag; - u32 interrupt; - struct acpi_generic_address ipmi_register; - u8 pci_segment; - u8 pci_bus; - u8 pci_device; - u8 pci_function; - u8 reserved2; +struct acpi_drtm_vtable_list { + u32 validated_table_count; + u64 validated_tables[1]; }; -/* Values for interface_type above */ +/* 2) Resources List (of Resource Descriptors) */ -enum acpi_spmi_interface_types { - ACPI_SPMI_NOT_USED = 0, - ACPI_SPMI_KEYBOARD = 1, - ACPI_SPMI_SMI = 2, - ACPI_SPMI_BLOCK_TRANSFER = 3, - ACPI_SPMI_SMBUS = 4, - ACPI_SPMI_RESERVED = 5 /* 5 and above are reserved */ +/* Resource Descriptor */ + +struct acpi_drtm_resource { + u8 size[7]; + u8 type; + u64 address; +}; + +struct acpi_drtm_resource_list { + u32 resource_count; + struct acpi_drtm_resource resources[1]; +}; + +/* 3) Platform-specific Identifiers List */ + +struct acpi_drtm_dps_id { + u32 dps_id_length; + u8 dps_id[16]; }; /******************************************************************************* * - * SRAT - System Resource Affinity Table - * Version 3 + * FPDT - Firmware Performance Data Table (ACPI 5.0) + * Version 1 * ******************************************************************************/ -struct acpi_table_srat { +struct acpi_table_fpdt { struct acpi_table_header header; /* Common ACPI table header */ - u32 table_revision; /* Must be value '1' */ - u64 reserved; /* Reserved, must be zero */ }; -/* Values for subtable type in struct acpi_subtable_header */ +/* FPDT subtable header (Performance Record Structure) */ -enum acpi_srat_type { - ACPI_SRAT_TYPE_CPU_AFFINITY = 0, - ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, - ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, - ACPI_SRAT_TYPE_GICC_AFFINITY = 3, - ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */ - ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, /* ACPI 6.3 */ - ACPI_SRAT_TYPE_RESERVED = 6 /* 5 and greater are reserved */ +struct acpi_fpdt_header { + u16 type; + u8 length; + u8 revision; +}; + +/* Values for Type field above */ + +enum acpi_fpdt_type { + ACPI_FPDT_TYPE_BOOT = 0, + ACPI_FPDT_TYPE_S3PERF = 1 }; /* - * SRAT Subtables, correspond to Type in struct acpi_subtable_header + * FPDT subtables */ -/* 0: Processor Local APIC/SAPIC Affinity */ +/* 0: Firmware Basic Boot Performance Record */ -struct acpi_srat_cpu_affinity { - struct acpi_subtable_header header; - u8 proximity_domain_lo; - u8 apic_id; - u32 flags; - u8 local_sapic_eid; - u8 proximity_domain_hi[3]; - u32 clock_domain; +struct acpi_fpdt_boot_pointer { + struct acpi_fpdt_header header; + u8 reserved[4]; + u64 address; }; -/* Flags */ +/* 1: S3 Performance Table Pointer Record */ -#define ACPI_SRAT_CPU_USE_AFFINITY (1) /* 00: Use affinity structure */ +struct acpi_fpdt_s3pt_pointer { + struct acpi_fpdt_header header; + u8 reserved[4]; + u64 address; +}; -/* 1: Memory Affinity */ +/* + * S3PT - S3 Performance Table. This table is pointed to by the + * S3 Pointer Record above. + */ +struct acpi_table_s3pt { + u8 signature[4]; /* "S3PT" */ + u32 length; +}; -struct acpi_srat_mem_affinity { +/* + * S3PT Subtables (Not part of the actual FPDT) + */ + +/* Values for Type field in S3PT header */ + +enum acpi_s3pt_type { + ACPI_S3PT_TYPE_RESUME = 0, + ACPI_S3PT_TYPE_SUSPEND = 1, + ACPI_FPDT_BOOT_PERFORMANCE = 2 +}; + +struct acpi_s3pt_resume { + struct acpi_fpdt_header header; + u32 resume_count; + u64 full_resume; + u64 average_resume; +}; + +struct acpi_s3pt_suspend { + struct acpi_fpdt_header header; + u64 suspend_start; + u64 suspend_end; +}; + +/* + * FPDT Boot Performance Record (Not part of the actual FPDT) + */ +struct acpi_fpdt_boot { + struct acpi_fpdt_header header; + u8 reserved[4]; + u64 reset_end; + u64 load_start; + u64 startup_start; + u64 exit_services_entry; + u64 exit_services_exit; +}; + +/******************************************************************************* + * + * GTDT - Generic Timer Description Table (ACPI 5.1) + * Version 2 + * + ******************************************************************************/ + +struct acpi_table_gtdt { + struct acpi_table_header header; /* Common ACPI table header */ + u64 counter_block_addresss; + u32 reserved; + u32 secure_el1_interrupt; + u32 secure_el1_flags; + u32 non_secure_el1_interrupt; + u32 non_secure_el1_flags; + u32 virtual_timer_interrupt; + u32 virtual_timer_flags; + u32 non_secure_el2_interrupt; + u32 non_secure_el2_flags; + u64 counter_read_block_address; + u32 platform_timer_count; + u32 platform_timer_offset; +}; + +/* Flag Definitions: Timer Block Physical Timers and Virtual timers */ + +#define ACPI_GTDT_INTERRUPT_MODE (1) +#define ACPI_GTDT_INTERRUPT_POLARITY (1<<1) +#define ACPI_GTDT_ALWAYS_ON (1<<2) + +/* Common GTDT subtable header */ + +struct acpi_gtdt_header { + u8 type; + u16 length; +}; + +/* Values for GTDT subtable type above */ + +enum acpi_gtdt_type { + ACPI_GTDT_TYPE_TIMER_BLOCK = 0, + ACPI_GTDT_TYPE_WATCHDOG = 1, + ACPI_GTDT_TYPE_RESERVED = 2 /* 2 and greater are reserved */ +}; + +/* GTDT Subtables, correspond to Type in struct acpi_gtdt_header */ + +/* 0: Generic Timer Block */ + +struct acpi_gtdt_timer_block { + struct acpi_gtdt_header header; + u8 reserved; + u64 block_address; + u32 timer_count; + u32 timer_offset; +}; + +/* Timer Sub-Structure, one per timer */ + +struct acpi_gtdt_timer_entry { + u8 frame_number; + u8 reserved[3]; + u64 base_address; + u64 el0_base_address; + u32 timer_interrupt; + u32 timer_flags; + u32 virtual_timer_interrupt; + u32 virtual_timer_flags; + u32 common_flags; +}; + +/* Flag Definitions: timer_flags and virtual_timer_flags above */ + +#define ACPI_GTDT_GT_IRQ_MODE (1) +#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1) + +/* Flag Definitions: common_flags above */ + +#define ACPI_GTDT_GT_IS_SECURE_TIMER (1) +#define ACPI_GTDT_GT_ALWAYS_ON (1<<1) + +/* 1: SBSA Generic Watchdog Structure */ + +struct acpi_gtdt_watchdog { + struct acpi_gtdt_header header; + u8 reserved; + u64 refresh_frame_address; + u64 control_frame_address; + u32 timer_interrupt; + u32 timer_flags; +}; + +/* Flag Definitions: timer_flags above */ + +#define ACPI_GTDT_WATCHDOG_IRQ_MODE (1) +#define ACPI_GTDT_WATCHDOG_IRQ_POLARITY (1<<1) +#define ACPI_GTDT_WATCHDOG_SECURE (1<<2) + +/******************************************************************************* + * + * MPST - Memory Power State Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +#define ACPI_MPST_CHANNEL_INFO \ + u8 channel_id; \ + u8 reserved1[3]; \ + u16 power_node_count; \ + u16 reserved2; + +/* Main table */ + +struct acpi_table_mpst { + struct acpi_table_header header; /* Common ACPI table header */ + ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */ +}; + +/* Memory Platform Communication Channel Info */ + +struct acpi_mpst_channel { + ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */ +}; + +/* Memory Power Node Structure */ + +struct acpi_mpst_power_node { + u8 flags; + u8 reserved1; + u16 node_id; + u32 length; + u64 range_address; + u64 range_length; + u32 num_power_states; + u32 num_physical_components; +}; + +/* Values for Flags field above */ + +#define ACPI_MPST_ENABLED 1 +#define ACPI_MPST_POWER_MANAGED 2 +#define ACPI_MPST_HOT_PLUG_CAPABLE 4 + +/* Memory Power State Structure (follows POWER_NODE above) */ + +struct acpi_mpst_power_state { + u8 power_state; + u8 info_index; +}; + +/* Physical Component ID Structure (follows POWER_STATE above) */ + +struct acpi_mpst_component { + u16 component_id; +}; + +/* Memory Power State Characteristics Structure (follows all POWER_NODEs) */ + +struct acpi_mpst_data_hdr { + u16 characteristics_count; + u16 reserved; +}; + +struct acpi_mpst_power_data { + u8 structure_id; + u8 flags; + u16 reserved1; + u32 average_power; + u32 power_saving; + u64 exit_latency; + u64 reserved2; +}; + +/* Values for Flags field above */ + +#define ACPI_MPST_PRESERVE 1 +#define ACPI_MPST_AUTOENTRY 2 +#define ACPI_MPST_AUTOEXIT 4 + +/* Shared Memory Region (not part of an ACPI table) */ + +struct acpi_mpst_shared { + u32 signature; + u16 pcc_command; + u16 pcc_status; + u32 command_register; + u32 status_register; + u32 power_state_id; + u32 power_node_id; + u64 energy_consumed; + u64 average_power; +}; + +/******************************************************************************* + * + * PCCT - Platform Communications Channel Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_pcct { + struct acpi_table_header header; /* Common ACPI table header */ + u32 flags; + u64 reserved; +}; + +/* Values for Flags field above */ + +#define ACPI_PCCT_DOORBELL 1 + +/* Values for subtable type in struct acpi_subtable_header */ + +enum acpi_pcct_type { + ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */ + ACPI_PCCT_TYPE_RESERVED = 3 /* 3 and greater are reserved */ +}; + +/* + * PCCT Subtables, correspond to Type in struct acpi_subtable_header + */ + +/* 0: Generic Communications Subspace */ + +struct acpi_pcct_subspace { struct acpi_subtable_header header; - u32 proximity_domain; - u16 reserved; /* Reserved, must be zero */ + u8 reserved[6]; u64 base_address; u64 length; - u32 reserved1; - u32 flags; - u64 reserved2; /* Reserved, must be zero */ + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; }; -/* Flags */ +/* 1: HW-reduced Communications Subspace (ACPI 5.1) */ -#define ACPI_SRAT_MEM_ENABLED (1) /* 00: Use affinity structure */ -#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */ -#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */ - -/* 2: Processor Local X2_APIC Affinity (ACPI 4.0) */ - -struct acpi_srat_x2apic_cpu_affinity { - struct acpi_subtable_header header; - u16 reserved; /* Reserved, must be zero */ - u32 proximity_domain; - u32 apic_id; - u32 flags; - u32 clock_domain; - u32 reserved2; -}; - -/* Flags for struct acpi_srat_cpu_affinity and struct acpi_srat_x2apic_cpu_affinity */ - -#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ - -/* 3: GICC Affinity (ACPI 5.1) */ - -struct acpi_srat_gicc_affinity { - struct acpi_subtable_header header; - u32 proximity_domain; - u32 acpi_processor_uid; - u32 flags; - u32 clock_domain; -}; - -/* Flags for struct acpi_srat_gicc_affinity */ - -#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */ - -/* 4: GCC ITS Affinity (ACPI 6.2) */ - -struct acpi_srat_gic_its_affinity { - struct acpi_subtable_header header; - u32 proximity_domain; - u16 reserved; - u32 its_id; -}; - -/* 5: Generic Initiator Affinity Structure (ACPI 6.3) */ - -struct acpi_srat_generic_affinity { +struct acpi_pcct_hw_reduced { struct acpi_subtable_header header; + u32 doorbell_interrupt; + u8 flags; u8 reserved; - u8 device_handle_type; - u32 proximity_domain; - u8 device_handle[16]; - u32 flags; - u32 reserved1; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; }; -/* Flags for struct acpi_srat_generic_affinity */ +/* 2: HW-reduced Communications Subspace Type 2 (ACPI 6.1) */ -#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */ -#define ACPI_SRAT_ARCHITECTURAL_TRANSACTIONS (1<<1) /* ACPI 6.4 */ +struct acpi_pcct_hw_reduced_type2 { + struct acpi_subtable_header header; + u32 doorbell_interrupt; + u8 flags; + u8 reserved; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; + struct acpi_generic_address doorbell_ack_register; + u64 ack_preserve_mask; + u64 ack_write_mask; +}; + +/* Values for doorbell flags above */ + +#define ACPI_PCCT_INTERRUPT_POLARITY (1) +#define ACPI_PCCT_INTERRUPT_MODE (1<<1) + +/* + * PCC memory structures (not part of the ACPI table) + */ + +/* Shared Memory Region */ + +struct acpi_pcct_shared_memory { + u32 signature; + u16 command; + u16 status; +}; + +/******************************************************************************* + * + * PMTT - Platform Memory Topology Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_pmtt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 reserved; +}; + +/* Common header for PMTT subtables that follow main table */ + +struct acpi_pmtt_header { + u8 type; + u8 reserved1; + u16 length; + u16 flags; + u16 reserved2; +}; + +/* Values for Type field above */ + +#define ACPI_PMTT_TYPE_SOCKET 0 +#define ACPI_PMTT_TYPE_CONTROLLER 1 +#define ACPI_PMTT_TYPE_DIMM 2 +#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFF are reserved */ + +/* Values for Flags field above */ + +#define ACPI_PMTT_TOP_LEVEL 0x0001 +#define ACPI_PMTT_PHYSICAL 0x0002 +#define ACPI_PMTT_MEMORY_TYPE 0x000C + +/* + * PMTT subtables, correspond to Type in struct acpi_pmtt_header + */ + +/* 0: Socket Structure */ + +struct acpi_pmtt_socket { + struct acpi_pmtt_header header; + u16 socket_id; + u16 reserved; +}; + +/* 1: Memory Controller subtable */ + +struct acpi_pmtt_controller { + struct acpi_pmtt_header header; + u32 read_latency; + u32 write_latency; + u32 read_bandwidth; + u32 write_bandwidth; + u16 access_width; + u16 alignment; + u16 reserved; + u16 domain_count; +}; + +/* 1a: Proximity Domain substructure */ + +struct acpi_pmtt_domain { + u32 proximity_domain; +}; + +/* 2: Physical Component Identifier (DIMM) */ + +struct acpi_pmtt_physical_component { + struct acpi_pmtt_header header; + u16 component_id; + u16 reserved; + u32 memory_size; + u32 bios_handle; +}; + +/******************************************************************************* + * + * RASF - RAS Feature Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_rasf { + struct acpi_table_header header; /* Common ACPI table header */ + u8 channel_id[12]; +}; + +/* RASF Platform Communication Channel Shared Memory Region */ + +struct acpi_rasf_shared_memory { + u32 signature; + u16 command; + u16 status; + u16 version; + u8 capabilities[16]; + u8 set_capabilities[16]; + u16 num_parameter_blocks; + u32 set_capabilities_status; +}; + +/* RASF Parameter Block Structure Header */ + +struct acpi_rasf_parameter_block { + u16 type; + u16 version; + u16 length; +}; + +/* RASF Parameter Block Structure for PATROL_SCRUB */ + +struct acpi_rasf_patrol_scrub_parameter { + struct acpi_rasf_parameter_block header; + u16 patrol_scrub_command; + u64 requested_address_range[2]; + u64 actual_address_range[2]; + u16 flags; + u8 requested_speed; +}; + +/* Masks for Flags and Speed fields above */ + +#define ACPI_RASF_SCRUBBER_RUNNING 1 +#define ACPI_RASF_SPEED (7<<1) +#define ACPI_RASF_SPEED_SLOW (0<<1) +#define ACPI_RASF_SPEED_MEDIUM (4<<1) +#define ACPI_RASF_SPEED_FAST (7<<1) + +/* Channel Commands */ + +enum acpi_rasf_commands { + ACPI_RASF_EXECUTE_RASF_COMMAND = 1 +}; + +/* Platform RAS Capabilities */ + +enum acpi_rasf_capabiliities { + ACPI_HW_PATROL_SCRUB_SUPPORTED = 0, + ACPI_SW_PATROL_SCRUB_EXPOSED = 1 +}; + +/* Patrol Scrub Commands */ + +enum acpi_rasf_patrol_scrub_commands { + ACPI_RASF_GET_PATROL_PARAMETERS = 1, + ACPI_RASF_START_PATROL_SCRUBBER = 2, + ACPI_RASF_STOP_PATROL_SCRUBBER = 3 +}; + +/* Channel Command flags */ + +#define ACPI_RASF_GENERATE_SCI (1<<15) + +/* Status values */ + +enum acpi_rasf_status { + ACPI_RASF_SUCCESS = 0, + ACPI_RASF_NOT_VALID = 1, + ACPI_RASF_NOT_SUPPORTED = 2, + ACPI_RASF_BUSY = 3, + ACPI_RASF_FAILED = 4, + ACPI_RASF_ABORTED = 5, + ACPI_RASF_INVALID_DATA = 6 +}; + +/* Status flags */ + +#define ACPI_RASF_COMMAND_COMPLETE (1) +#define ACPI_RASF_SCI_DOORBELL (1<<1) +#define ACPI_RASF_ERROR (1<<2) +#define ACPI_RASF_STATUS (0x1F<<3) /******************************************************************************* * @@ -304,407 +746,6 @@ struct acpi_table_stao { u8 ignore_uart; }; -/******************************************************************************* - * - * TCPA - Trusted Computing Platform Alliance table - * Version 2 - * - * TCG Hardware Interface Table for TPM 1.2 Clients and Servers - * - * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", - * Version 1.2, Revision 8 - * February 27, 2017 - * - * NOTE: There are two versions of the table with the same signature -- - * the client version and the server version. The common platform_class - * field is used to differentiate the two types of tables. - * - ******************************************************************************/ - -struct acpi_table_tcpa_hdr { - struct acpi_table_header header; /* Common ACPI table header */ - u16 platform_class; -}; - -/* - * Values for platform_class above. - * This is how the client and server subtables are differentiated - */ -#define ACPI_TCPA_CLIENT_TABLE 0 -#define ACPI_TCPA_SERVER_TABLE 1 - -struct acpi_table_tcpa_client { - u32 minimum_log_length; /* Minimum length for the event log area */ - u64 log_address; /* Address of the event log area */ -}; - -struct acpi_table_tcpa_server { - u16 reserved; - u64 minimum_log_length; /* Minimum length for the event log area */ - u64 log_address; /* Address of the event log area */ - u16 spec_revision; - u8 device_flags; - u8 interrupt_flags; - u8 gpe_number; - u8 reserved2[3]; - u32 global_interrupt; - struct acpi_generic_address address; - u32 reserved3; - struct acpi_generic_address config_address; - u8 group; - u8 bus; /* PCI Bus/Segment/Function numbers */ - u8 device; - u8 function; -}; - -/* Values for device_flags above */ - -#define ACPI_TCPA_PCI_DEVICE (1) -#define ACPI_TCPA_BUS_PNP (1<<1) -#define ACPI_TCPA_ADDRESS_VALID (1<<2) - -/* Values for interrupt_flags above */ - -#define ACPI_TCPA_INTERRUPT_MODE (1) -#define ACPI_TCPA_INTERRUPT_POLARITY (1<<1) -#define ACPI_TCPA_SCI_VIA_GPE (1<<2) -#define ACPI_TCPA_GLOBAL_INTERRUPT (1<<3) - -/******************************************************************************* - * - * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table - * Version 4 - * - * TCG Hardware Interface Table for TPM 2.0 Clients and Servers - * - * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", - * Version 1.2, Revision 8 - * February 27, 2017 - * - ******************************************************************************/ - -/* Revision 3 */ - -struct acpi_table_tpm23 { - struct acpi_table_header header; /* Common ACPI table header */ - u32 reserved; - u64 control_address; - u32 start_method; -}; - -/* Value for start_method above */ - -#define ACPI_TPM23_ACPI_START_METHOD 2 - -/* - * Optional trailer for revision 3. If start method is 2, there is a 4 byte - * reserved area of all zeros. - */ -struct acpi_tmp23_trailer { - u32 reserved; -}; - -/* Revision 4 */ - -struct acpi_table_tpm2 { - struct acpi_table_header header; /* Common ACPI table header */ - u16 platform_class; - u16 reserved; - u64 control_address; - u32 start_method; - - /* Platform-specific data follows */ -}; - -/* Optional trailer for revision 4 holding platform-specific data */ -struct acpi_tpm2_phy { - u8 start_method_specific[12]; - u32 log_area_minimum_length; - u64 log_area_start_address; -}; - -/* Values for start_method above */ - -#define ACPI_TPM2_NOT_ALLOWED 0 -#define ACPI_TPM2_RESERVED1 1 -#define ACPI_TPM2_START_METHOD 2 -#define ACPI_TPM2_RESERVED3 3 -#define ACPI_TPM2_RESERVED4 4 -#define ACPI_TPM2_RESERVED5 5 -#define ACPI_TPM2_MEMORY_MAPPED 6 -#define ACPI_TPM2_COMMAND_BUFFER 7 -#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8 -#define ACPI_TPM2_RESERVED9 9 -#define ACPI_TPM2_RESERVED10 10 -#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */ -#define ACPI_TPM2_RESERVED 12 - -/* Optional trailer appears after any start_method subtables */ - -struct acpi_tpm2_trailer { - u8 method_parameters[12]; - u32 minimum_log_length; /* Minimum length for the event log area */ - u64 log_address; /* Address of the event log area */ -}; - -/* - * Subtables (start_method-specific) - */ - -/* 11: Start Method for ARM SMC (V1.2 Rev 8) */ - -struct acpi_tpm2_arm_smc { - u32 global_interrupt; - u8 interrupt_flags; - u8 operation_flags; - u16 reserved; - u32 function_id; -}; - -/* Values for interrupt_flags above */ - -#define ACPI_TPM2_INTERRUPT_SUPPORT (1) - -/* Values for operation_flags above */ - -#define ACPI_TPM2_IDLE_SUPPORT (1) - -/******************************************************************************* - * - * UEFI - UEFI Boot optimization Table - * Version 1 - * - * Conforms to "Unified Extensible Firmware Interface Specification", - * Version 2.3, May 8, 2009 - * - ******************************************************************************/ - -struct acpi_table_uefi { - struct acpi_table_header header; /* Common ACPI table header */ - u8 identifier[16]; /* UUID identifier */ - u16 data_offset; /* Offset of remaining data in table */ -}; - -/******************************************************************************* - * - * VIOT - Virtual I/O Translation Table - * Version 1 - * - ******************************************************************************/ - -struct acpi_table_viot { - struct acpi_table_header header; /* Common ACPI table header */ - u16 node_count; - u16 node_offset; - u8 reserved[8]; -}; - -/* VIOT subtable header */ - -struct acpi_viot_header { - u8 type; - u8 reserved; - u16 length; -}; - -/* Values for Type field above */ - -enum acpi_viot_node_type { - ACPI_VIOT_NODE_PCI_RANGE = 0x01, - ACPI_VIOT_NODE_MMIO = 0x02, - ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI = 0x03, - ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO = 0x04, - ACPI_VIOT_RESERVED = 0x05 -}; - -/* VIOT subtables */ - -struct acpi_viot_pci_range { - struct acpi_viot_header header; - u32 endpoint_start; - u16 segment_start; - u16 segment_end; - u16 bdf_start; - u16 bdf_end; - u16 output_node; - u8 reserved[6]; -}; - -struct acpi_viot_mmio { - struct acpi_viot_header header; - u32 endpoint; - u64 base_address; - u16 output_node; - u8 reserved[6]; -}; - -struct acpi_viot_virtio_iommu_pci { - struct acpi_viot_header header; - u16 segment; - u16 bdf; - u8 reserved[8]; -}; - -struct acpi_viot_virtio_iommu_mmio { - struct acpi_viot_header header; - u8 reserved[4]; - u64 base_address; -}; - -/******************************************************************************* - * - * WAET - Windows ACPI Emulated devices Table - * Version 1 - * - * Conforms to "Windows ACPI Emulated Devices Table", version 1.0, April 6, 2009 - * - ******************************************************************************/ - -struct acpi_table_waet { - struct acpi_table_header header; /* Common ACPI table header */ - u32 flags; -}; - -/* Masks for Flags field above */ - -#define ACPI_WAET_RTC_NO_ACK (1) /* RTC requires no int acknowledge */ -#define ACPI_WAET_TIMER_ONE_READ (1<<1) /* PM timer requires only one read */ - -/******************************************************************************* - * - * WDAT - Watchdog Action Table - * Version 1 - * - * Conforms to "Hardware Watchdog Timers Design Specification", - * Copyright 2006 Microsoft Corporation. - * - ******************************************************************************/ - -struct acpi_table_wdat { - struct acpi_table_header header; /* Common ACPI table header */ - u32 header_length; /* Watchdog Header Length */ - u16 pci_segment; /* PCI Segment number */ - u8 pci_bus; /* PCI Bus number */ - u8 pci_device; /* PCI Device number */ - u8 pci_function; /* PCI Function number */ - u8 reserved[3]; - u32 timer_period; /* Period of one timer count (msec) */ - u32 max_count; /* Maximum counter value supported */ - u32 min_count; /* Minimum counter value */ - u8 flags; - u8 reserved2[3]; - u32 entries; /* Number of watchdog entries that follow */ -}; - -/* Masks for Flags field above */ - -#define ACPI_WDAT_ENABLED (1) -#define ACPI_WDAT_STOPPED 0x80 - -/* WDAT Instruction Entries (actions) */ - -struct acpi_wdat_entry { - u8 action; - u8 instruction; - u16 reserved; - struct acpi_generic_address register_region; - u32 value; /* Value used with Read/Write register */ - u32 mask; /* Bitmask required for this register instruction */ -}; - -/* Values for Action field above */ - -enum acpi_wdat_actions { - ACPI_WDAT_RESET = 1, - ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4, - ACPI_WDAT_GET_COUNTDOWN = 5, - ACPI_WDAT_SET_COUNTDOWN = 6, - ACPI_WDAT_GET_RUNNING_STATE = 8, - ACPI_WDAT_SET_RUNNING_STATE = 9, - ACPI_WDAT_GET_STOPPED_STATE = 10, - ACPI_WDAT_SET_STOPPED_STATE = 11, - ACPI_WDAT_GET_REBOOT = 16, - ACPI_WDAT_SET_REBOOT = 17, - ACPI_WDAT_GET_SHUTDOWN = 18, - ACPI_WDAT_SET_SHUTDOWN = 19, - ACPI_WDAT_GET_STATUS = 32, - ACPI_WDAT_SET_STATUS = 33, - ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */ -}; - -/* Values for Instruction field above */ - -enum acpi_wdat_instructions { - ACPI_WDAT_READ_VALUE = 0, - ACPI_WDAT_READ_COUNTDOWN = 1, - ACPI_WDAT_WRITE_VALUE = 2, - ACPI_WDAT_WRITE_COUNTDOWN = 3, - ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */ - ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */ -}; - -/******************************************************************************* - * - * WDDT - Watchdog Descriptor Table - * Version 1 - * - * Conforms to "Using the Intel ICH Family Watchdog Timer (WDT)", - * Version 001, September 2002 - * - ******************************************************************************/ - -struct acpi_table_wddt { - struct acpi_table_header header; /* Common ACPI table header */ - u16 spec_version; - u16 table_version; - u16 pci_vendor_id; - struct acpi_generic_address address; - u16 max_count; /* Maximum counter value supported */ - u16 min_count; /* Minimum counter value supported */ - u16 period; - u16 status; - u16 capability; -}; - -/* Flags for Status field above */ - -#define ACPI_WDDT_AVAILABLE (1) -#define ACPI_WDDT_ACTIVE (1<<1) -#define ACPI_WDDT_TCO_OS_OWNED (1<<2) -#define ACPI_WDDT_USER_RESET (1<<11) -#define ACPI_WDDT_WDT_RESET (1<<12) -#define ACPI_WDDT_POWER_FAIL (1<<13) -#define ACPI_WDDT_UNKNOWN_RESET (1<<14) - -/* Flags for Capability field above */ - -#define ACPI_WDDT_AUTO_RESET (1) -#define ACPI_WDDT_ALERT_SUPPORT (1<<1) - -/******************************************************************************* - * - * WDRT - Watchdog Resource Table - * Version 1 - * - * Conforms to "Watchdog Timer Hardware Requirements for Windows Server 2003", - * Version 1.01, August 28, 2006 - * - ******************************************************************************/ - -struct acpi_table_wdrt { - struct acpi_table_header header; /* Common ACPI table header */ - struct acpi_generic_address control_register; - struct acpi_generic_address count_register; - u16 pci_device_id; - u16 pci_vendor_id; - u8 pci_bus; /* PCI Bus number */ - u8 pci_device; /* PCI Device number */ - u8 pci_function; /* PCI Function number */ - u8 pci_segment; /* PCI Segment number */ - u16 max_count; /* Maximum counter value supported */ - u8 units; -}; - /******************************************************************************* * * WPBT - Windows Platform Environment Table (ACPI 6.0) @@ -723,31 +764,6 @@ struct acpi_table_wpbt { u16 arguments_length; }; -struct acpi_wpbt_unicode { - u16 *unicode_string; -}; - -/******************************************************************************* - * - * WSMT - Windows SMM Security Mitigations Table - * Version 1 - * - * Conforms to "Windows SMM Security Mitigations Table", - * Version 1.0, April 18, 2016 - * - ******************************************************************************/ - -struct acpi_table_wsmt { - struct acpi_table_header header; /* Common ACPI table header */ - u32 protection_flags; -}; - -/* Flags for protection_flags field above */ - -#define ACPI_WSMT_FIXED_COMM_BUFFERS (1) -#define ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION (2) -#define ACPI_WSMT_SYSTEM_RESOURCE_PROTECTION (4) - /******************************************************************************* * * XENV - Xen Environment Table (ACPI 6.0) diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 92c71dfce0..1d798abae7 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -1,21 +1,55 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: actypes.h - Common data types for the entire ACPI subsystem * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACTYPES_H__ #define __ACTYPES_H__ /* acpisrc:struct_defs -- for acpisrc conversion */ /* - * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent - * header and must be either 32 or 64. 16-bit ACPICA is no longer - * supported, as of 12/2006. + * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header + * and must be either 32 or 64. 16-bit ACPICA is no longer supported, as of + * 12/2006. */ #ifndef ACPI_MACHINE_WIDTH #error ACPI_MACHINE_WIDTH not defined @@ -53,9 +87,9 @@ * s64 64-bit (8 byte) signed value * * COMPILER_DEPENDENT_UINT64/s64 - These types are defined in the - * compiler-dependent header(s) and were introduced because there is no - * common 64-bit integer type across the various compilation models, as - * shown in the table below. + * compiler-dependent header(s) and were introduced because there is no common + * 64-bit integer type across the various compilation models, as shown in + * the table below. * * Datatype LP64 ILP64 LLP64 ILP32 LP32 16bit * char 8 8 8 8 8 8 @@ -72,10 +106,10 @@ * 2) These types represent the native word size of the target mode of the * processor, and may be 16-bit, 32-bit, or 64-bit as required. They are * usually used for memory allocation, efficient loop counters, and array - * indexes. The types are similar to the size_t type in the C library and - * are required because there is no C type that consistently represents the - * native data width. acpi_size is needed because there is no guarantee - * that a kernel-level C library is present. + * indexes. The types are similar to the size_t type in the C library and are + * required because there is no C type that consistently represents the native + * data width. acpi_size is needed because there is no guarantee that a + * kernel-level C library is present. * * acpi_size 16/32/64-bit unsigned value * acpi_native_int 16/32/64-bit signed value @@ -132,14 +166,12 @@ typedef u64 acpi_physical_address; #define ACPI_SIZE_MAX ACPI_UINT64_MAX #define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */ -#define ACPI_USE_NATIVE_MATH64 /* Has native 64-bit integer support */ /* * In the case of the Itanium Processor Family (IPF), the hardware does not - * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED - * flag to indicate that special precautions must be taken to avoid alignment - * faults. (IA64 or ia64 is currently used by existing compilers to indicate - * IPF.) + * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED flag + * to indicate that special precautions must be taken to avoid alignment faults. + * (IA64 or ia64 is currently used by existing compilers to indicate IPF.) * * Note: EM64T and other X86-64 processors support misaligned transfers, * so there is no need to define this flag. @@ -245,10 +277,6 @@ typedef u64 acpi_physical_address; #define acpi_spinlock void * #endif -#ifndef acpi_raw_spinlock -#define acpi_raw_spinlock acpi_spinlock -#endif - #ifndef acpi_semaphore #define acpi_semaphore void * #endif @@ -281,8 +309,8 @@ typedef u64 acpi_physical_address; #endif /* - * Some compilers complain about unused variables. Sometimes we don't want - * to use all the variables (for example, _acpi_module_name). This allows us + * Some compilers complain about unused variables. Sometimes we don't want to + * use all the variables (for example, _acpi_module_name). This allows us * to tell the compiler in a per-variable manner that a variable * is unused */ @@ -291,9 +319,8 @@ typedef u64 acpi_physical_address; #endif /* - * All ACPICA external functions that are available to the rest of the - * kernel are tagged with these macros which can be defined as appropriate - * for the host. + * All ACPICA external functions that are available to the rest of the kernel + * are tagged with thes macros which can be defined as appropriate for the host. * * Notes: * ACPI_EXPORT_SYMBOL_INIT is used for initialization and termination @@ -356,8 +383,7 @@ typedef u64 acpi_physical_address; /****************************************************************************** * - * ACPI Specification constants (Do not change unless the specification - * changes) + * ACPI Specification constants (Do not change unless the specification changes) * *****************************************************************************/ @@ -375,7 +401,7 @@ typedef u64 acpi_physical_address; /* Names within the namespace are 4 bytes long */ -#define ACPI_NAMESEG_SIZE 4 /* Fixed by ACPI spec */ +#define ACPI_NAME_SIZE 4 #define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ #define ACPI_PATH_SEPARATOR '.' @@ -438,12 +464,10 @@ typedef void *acpi_handle; /* Actually a ptr to a NS Node */ #define ACPI_NSEC_PER_MSEC 1000000L #define ACPI_NSEC_PER_SEC 1000000000L -#define ACPI_TIME_AFTER(a, b) ((s64)((b) - (a)) < 0) - /* Owner IDs are used to track namespace nodes for selective deletion */ -typedef u16 acpi_owner_id; -#define ACPI_OWNER_ID_MAX 0xFFF /* 4095 possible owner IDs */ +typedef u8 acpi_owner_id; +#define ACPI_OWNER_ID_MAX 0xFF #define ACPI_INTEGER_BIT_SIZE 64 #define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ @@ -455,15 +479,15 @@ typedef u16 acpi_owner_id; /* * Constants with special meanings */ -#define ACPI_ROOT_OBJECT ((acpi_handle) ACPI_TO_POINTER (ACPI_MAX_PTR)) +#define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR) #define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ #define ACPI_DO_NOT_WAIT 0 /* - * Obsolete: Acpi integer width. In ACPI version 1 (1996), integers are - * 32 bits. In ACPI version 2 (2000) and later, integers are max 64 bits. - * Note that this pertains to the ACPI integer type only, not to other - * integers used in the implementation of the ACPICA subsystem. + * Obsolete: Acpi integer width. In ACPI version 1 (1996), integers are 32 bits. + * In ACPI version 2 (2000) and later, integers are 64 bits. Note that this + * pertains to the ACPI integer type only, not to other integers used in the + * implementation of the ACPICA subsystem. * * 01/2010: This type is obsolete and has been removed from the entire ACPICA * code base. It remains here for compatibility with device drivers that use @@ -502,24 +526,24 @@ typedef u64 acpi_integer; #define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p)) #define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b))) #define ACPI_SUB_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) - (acpi_size)(b))) -#define ACPI_PTR_DIFF(a, b) ((acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))) +#define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b))) /* Pointer/Integer type conversions */ -#define ACPI_TO_POINTER(i) ACPI_CAST_PTR (void, (acpi_size) (i)) -#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0) -#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0) +#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) NULL,(acpi_size) i) +#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) NULL) +#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) NULL) #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) /* Optimizations for 4-character (32-bit) acpi_name manipulation */ #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED -#define ACPI_COMPARE_NAMESEG(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) -#define ACPI_COPY_NAMESEG(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src))) +#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) +#define ACPI_MOVE_NAME(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src))) #else -#define ACPI_COMPARE_NAMESEG(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAMESEG_SIZE)) -#define ACPI_COPY_NAMESEG(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE)) +#define ACPI_COMPARE_NAME(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) +#define ACPI_MOVE_NAME(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE)) #endif /* Support for the special RSDP signature (8 characters) */ @@ -527,18 +551,6 @@ typedef u64 acpi_integer; #define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8)) #define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8)) -/* Support for OEMx signature (x can be any character) */ -#define ACPI_IS_OEM_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_OEM_NAME, 3) &&\ - strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE) - -/* - * Algorithm to obtain access bit or byte width. - * Can be used with access_width of struct acpi_generic_address and access_size of - * struct acpi_resource_generic_register. - */ -#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2)) -#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1)) - /******************************************************************************* * * Miscellaneous constants @@ -546,17 +558,17 @@ typedef u64 acpi_integer; ******************************************************************************/ /* - * Initialization sequence options + * Initialization sequence */ -#define ACPI_FULL_INITIALIZATION 0x0000 -#define ACPI_NO_FACS_INIT 0x0001 -#define ACPI_NO_ACPI_ENABLE 0x0002 -#define ACPI_NO_HARDWARE_INIT 0x0004 -#define ACPI_NO_EVENT_INIT 0x0008 -#define ACPI_NO_HANDLER_INIT 0x0010 -#define ACPI_NO_OBJECT_INIT 0x0020 -#define ACPI_NO_DEVICE_INIT 0x0040 -#define ACPI_NO_ADDRESS_SPACE_INIT 0x0080 +#define ACPI_FULL_INITIALIZATION 0x00 +#define ACPI_NO_ADDRESS_SPACE_INIT 0x01 +#define ACPI_NO_HARDWARE_INIT 0x02 +#define ACPI_NO_EVENT_INIT 0x04 +#define ACPI_NO_HANDLER_INIT 0x08 +#define ACPI_NO_ACPI_ENABLE 0x10 +#define ACPI_NO_DEVICE_INIT 0x20 +#define ACPI_NO_OBJECT_INIT 0x40 +#define ACPI_NO_FACS_INIT 0x80 /* * Initialization state @@ -617,10 +629,8 @@ typedef u64 acpi_integer; #define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B #define ACPI_NOTIFY_SHUTDOWN_REQUEST (u8) 0x0C #define ACPI_NOTIFY_AFFINITY_UPDATE (u8) 0x0D -#define ACPI_NOTIFY_MEMORY_UPDATE (u8) 0x0E -#define ACPI_NOTIFY_DISCONNECT_RECOVER (u8) 0x0F -#define ACPI_GENERIC_NOTIFY_MAX 0x0F +#define ACPI_GENERIC_NOTIFY_MAX 0x0D #define ACPI_SPECIFIC_NOTIFY_MAX 0x84 /* @@ -657,11 +667,10 @@ typedef u32 acpi_object_type; /* * These are object types that do not map directly to the ACPI - * object_type() operator. They are used for various internal purposes - * only. If new predefined ACPI_TYPEs are added (via the ACPI - * specification), these internal types must move upwards. (There - * is code that depends on these values being contiguous with the - * external types above.) + * object_type() operator. They are used for various internal purposes only. + * If new predefined ACPI_TYPEs are added (via the ACPI specification), these + * internal types must move upwards. (There is code that depends on these + * values being contiguous with the external types above.) */ #define ACPI_TYPE_LOCAL_REGION_FIELD 0x11 #define ACPI_TYPE_LOCAL_BANK_FIELD 0x12 @@ -761,7 +770,7 @@ typedef u32 acpi_event_status; * | | | | +-- Type of dispatch:to method, handler, notify, or none * | | | +----- Interrupt type: edge or level triggered * | | +------- Is a Wake GPE - * | +--------- Has been enabled automatically at init time + * | +--------- Is GPE masked by the software GPE masking machanism * +------------ */ #define ACPI_GPE_DISPATCH_NONE (u8) 0x00 @@ -777,8 +786,6 @@ typedef u32 acpi_event_status; #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x08 #define ACPI_GPE_CAN_WAKE (u8) 0x10 -#define ACPI_GPE_AUTO_ENABLED (u8) 0x20 -#define ACPI_GPE_INITIALIZED (u8) 0x40 /* * Flags for GPE and Lock interfaces @@ -815,16 +822,15 @@ typedef u8 acpi_adr_space_type; #define ACPI_ADR_SPACE_GPIO (acpi_adr_space_type) 8 #define ACPI_ADR_SPACE_GSBUS (acpi_adr_space_type) 9 #define ACPI_ADR_SPACE_PLATFORM_COMM (acpi_adr_space_type) 10 -#define ACPI_ADR_SPACE_PLATFORM_RT (acpi_adr_space_type) 11 -#define ACPI_NUM_PREDEFINED_REGIONS 12 +#define ACPI_NUM_PREDEFINED_REGIONS 11 /* * Special Address Spaces * * Note: A Data Table region is a special type of operation region * that has its own AML opcode. However, internally, the AML - * interpreter simply creates an operation region with an address + * interpreter simply creates an operation region with an an address * space type of ACPI_ADR_SPACE_DATA_TABLE. */ #define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 0x7E /* Internal to ACPICA only */ @@ -888,13 +894,22 @@ typedef u8 acpi_adr_space_type; #define ACPI_ENABLE_EVENT 1 #define ACPI_DISABLE_EVENT 0 +/* Sleep function dispatch */ + +typedef acpi_status (*acpi_sleep_function) (u8 sleep_state); + +struct acpi_sleep_functions { + acpi_sleep_function legacy_function; + acpi_sleep_function extended_function; +}; + /* * External ACPI object definition */ /* - * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package - * element or an unresolved named reference. + * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package element + * or an unresolved named reference. */ union acpi_object { acpi_object_type type; /* See definition of acpi_ns_type for values */ @@ -1146,12 +1161,12 @@ struct acpi_pnp_device_id { struct acpi_pnp_device_id_list { u32 count; /* Number of IDs in Ids array */ u32 list_size; /* Size of list, including ID strings */ - struct acpi_pnp_device_id ids[]; /* ID array */ + struct acpi_pnp_device_id ids[1]; /* ID array */ }; /* * Structure returned from acpi_get_object_info. - * Optimized for both 32-bit and 64-bit builds. + * Optimized for both 32- and 64-bit builds */ struct acpi_device_info { u32 info_size; /* Size of info, including ID strings */ @@ -1162,6 +1177,7 @@ struct acpi_device_info { u8 flags; /* Miscellaneous info */ u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ + u32 current_status; /* _STA value */ u64 address; /* _ADR value */ struct acpi_pnp_device_id hardware_id; /* _HID value */ struct acpi_pnp_device_id unique_id; /* _UID value */ @@ -1175,6 +1191,7 @@ struct acpi_device_info { /* Flags for Valid field above (acpi_get_object_info) */ +#define ACPI_VALID_STA 0x0001 #define ACPI_VALID_ADR 0x0002 #define ACPI_VALID_HID 0x0004 #define ACPI_VALID_UID 0x0008 @@ -1201,18 +1218,12 @@ struct acpi_pci_id { u16 function; }; -struct acpi_mem_mapping { - acpi_physical_address physical_address; - u8 *logical_address; - acpi_size length; - struct acpi_mem_mapping *next_mm; -}; - struct acpi_mem_space_context { u32 length; acpi_physical_address address; - struct acpi_mem_mapping *cur_mm; - struct acpi_mem_mapping *first_mm; + acpi_physical_address mapped_physical_address; + u8 *mapped_logical_address; + acpi_size mapped_length; }; /* @@ -1273,23 +1284,10 @@ typedef enum { #define ACPI_OSI_WIN_VISTA_SP2 0x0A #define ACPI_OSI_WIN_7 0x0B #define ACPI_OSI_WIN_8 0x0C -#define ACPI_OSI_WIN_8_1 0x0D -#define ACPI_OSI_WIN_10 0x0E -#define ACPI_OSI_WIN_10_RS1 0x0F -#define ACPI_OSI_WIN_10_RS2 0x10 -#define ACPI_OSI_WIN_10_RS3 0x11 -#define ACPI_OSI_WIN_10_RS4 0x12 -#define ACPI_OSI_WIN_10_RS5 0x13 -#define ACPI_OSI_WIN_10_19H1 0x14 +#define ACPI_OSI_WIN_10 0x0D /* Definitions of getopt */ #define ACPI_OPT_END -1 -/* Definitions for explicit fallthrough */ - -#ifndef ACPI_FALLTHROUGH -#define ACPI_FALLTHROUGH do {} while(0) -#endif - #endif /* __ACTYPES_H__ */ diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h index bc24388ce9..0f269e088f 100644 --- a/include/acpi/acuuid.h +++ b/include/acpi/acuuid.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acuuid.h - ACPI-related UUID/GUID definitions * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACUUID_H__ #define __ACUUID_H__ @@ -27,10 +61,6 @@ #define UUID_PCI_HOST_BRIDGE "33db4d5b-1ff7-401c-9657-7441c03dd766" #define UUID_I2C_DEVICE "3cdff6f7-4267-4555-ad05-b30a3d8938de" #define UUID_POWER_BUTTON "dfbcf3c5-e7a5-44e6-9c1f-29c76f6e059c" -#define UUID_MEMORY_DEVICE "03b19910-f473-11dd-87af-0800200c9a66" -#define UUID_GENERIC_BUTTONS_DEVICE "fa6bd625-9ce8-470d-a2c7-b3ca36c4282e" -#define UUID_NVDIMM_ROOT_DEVICE "2f10e7a4-9e91-11e4-89d3-123b93f75cba" -#define UUID_CONTROL_METHOD_BATTERY "f18fc78b-0f15-4978-b793-53f833a1d35b" /* Interfaces */ @@ -39,7 +69,6 @@ /* NVDIMM - NFIT table */ -#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66" #define UUID_VOLATILE_MEMORY "7305944f-fdda-44e3-b16c-3f22d252e5d0" #define UUID_PERSISTENT_MEMORY "66f0d379-b4f3-4074-ac43-0d3318b78cdb" #define UUID_CONTROL_REGION "92f701f6-13b4-405d-910b-299367e8234c" @@ -48,15 +77,6 @@ #define UUID_VOLATILE_VIRTUAL_CD "3d5abd30-4175-87ce-6d64-d2ade523c4bb" #define UUID_PERSISTENT_VIRTUAL_DISK "5cea02c9-4d07-69d3-269f-4496fbe096f9" #define UUID_PERSISTENT_VIRTUAL_CD "08018188-42cd-bb48-100f-5387d53ded3d" -#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05" -#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6" -#define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e" -#define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80" - -/* Processor Properties (ACPI 6.2) */ - -#define UUID_CACHE_PROPERTIES "6DC63E77-257E-4E78-A973-A21F2796898D" -#define UUID_PHYSICAL_PROPERTY "DDE4D59A-AA42-4349-B407-EA40F57D9FB7" /* Miscellaneous */ @@ -65,9 +85,5 @@ #define UUID_BATTERY_THERMAL_LIMIT "4c2067e3-887d-475c-9720-4af1d3ed602e" #define UUID_THERMAL_EXTENSIONS "14d399cd-7a27-4b18-8fb4-7cb7b9f4e500" #define UUID_DEVICE_PROPERTIES "daffd814-6eba-4d8c-8a91-bc9bbf4aa301" -#define UUID_DEVICE_GRAPHS "ab02a46b-74c7-45a2-bd68-f7d344ef2153" -#define UUID_HIERARCHICAL_DATA_EXTENSION "dbb8e3e6-5886-4ba6-8795-1319f52a966b" -#define UUID_CORESIGHT_GRAPH "3ecbc8b6-1d0e-4fb3-8107-e627f805c6cd" -#define UUID_USB4_CAPABILITIES "23a0d13a-26ab-486c-9c5f-0ffa525a575a" -#endif /* __ACUUID_H__ */ +#endif /* __AUUID_H__ */ diff --git a/include/acpi/apei.h b/include/acpi/apei.h index 680f80960c..76284bb560 100644 --- a/include/acpi/apei.h +++ b/include/acpi/apei.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * apei.h - ACPI Platform Error Interface */ @@ -17,13 +16,7 @@ #ifdef __KERNEL__ -enum hest_status { - HEST_ENABLED, - HEST_DISABLED, - HEST_NOT_FOUND, -}; - -extern int hest_disable; +extern bool hest_disable; extern int erst_disable; #ifdef CONFIG_ACPI_APEI_GHES extern bool ghes_disable; @@ -51,6 +44,7 @@ int erst_clear(u64 record_id); int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data); void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); +void arch_apei_flush_tlb_one(unsigned long addr); #endif #endif diff --git a/include/acpi/button.h b/include/acpi/button.h index af2fce5d2e..1cad8b2d46 100644 --- a/include/acpi/button.h +++ b/include/acpi/button.h @@ -1,14 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef ACPI_BUTTON_H #define ACPI_BUTTON_H -#define ACPI_BUTTON_HID_POWER "PNP0C0C" -#define ACPI_BUTTON_HID_LID "PNP0C0D" -#define ACPI_BUTTON_HID_SLEEP "PNP0C0E" +#include #if IS_ENABLED(CONFIG_ACPI_BUTTON) +extern int acpi_lid_notifier_register(struct notifier_block *nb); +extern int acpi_lid_notifier_unregister(struct notifier_block *nb); extern int acpi_lid_open(void); #else +static inline int acpi_lid_notifier_register(struct notifier_block *nb) +{ + return 0; +} +static inline int acpi_lid_notifier_unregister(struct notifier_block *nb) +{ + return 0; +} static inline int acpi_lid_open(void) { return 1; diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index bc159a9b4a..427a7c3e6c 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -1,32 +1,33 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * CPPC (Collaborative Processor Performance Control) methods used * by CPUfreq drivers. * * (C) Copyright 2014, 2015 Linaro Ltd. * Author: Ashwin Chaugule + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. */ #ifndef _CPPC_ACPI_H #define _CPPC_ACPI_H #include -#include #include #include #include -/* Support CPPCv2 and CPPCv3 */ -#define CPPC_V2_REV 2 -#define CPPC_V3_REV 3 -#define CPPC_V2_NUM_ENT 21 -#define CPPC_V3_NUM_ENT 23 +/* Only support CPPCv2 for now. */ +#define CPPC_NUM_ENT 21 +#define CPPC_REV 2 #define PCC_CMD_COMPLETE_MASK (1 << 0) #define PCC_ERROR_MASK (1 << 2) -#define MAX_CPC_REG_ENT 21 +#define MAX_CPC_REG_ENT 19 /* CPPC specific PCC commands. */ #define CMD_READ 0 @@ -40,7 +41,7 @@ struct cpc_reg { u8 bit_width; u8 bit_offset; u8 access_width; - u64 address; + u64 __iomem address; } __packed; /* @@ -90,8 +91,6 @@ enum cppc_regs { AUTO_ACT_WINDOW, ENERGY_PERF, REFERENCE_PERF, - LOWEST_FREQ, - NOMINAL_FREQ, }; /* @@ -101,13 +100,9 @@ enum cppc_regs { * today. */ struct cppc_perf_caps { - u32 guaranteed_perf; u32 highest_perf; u32 nominal_perf; u32 lowest_perf; - u32 lowest_nonlinear_perf; - u32 lowest_freq; - u32 nominal_freq; }; struct cppc_perf_ctrls { @@ -120,72 +115,24 @@ struct cppc_perf_fb_ctrs { u64 reference; u64 delivered; u64 reference_perf; - u64 wraparound_time; + u64 ctr_wrap_time; }; /* Per CPU container for runtime CPPC management. */ struct cppc_cpudata { - struct list_head node; + int cpu; struct cppc_perf_caps perf_caps; struct cppc_perf_ctrls perf_ctrls; struct cppc_perf_fb_ctrs perf_fb_ctrs; + struct cpufreq_policy *cur_policy; unsigned int shared_type; cpumask_var_t shared_cpu_map; }; -#ifdef CONFIG_ACPI_CPPC_LIB -extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf); -extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf); extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); -extern bool acpi_cpc_valid(void); -extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data); +extern int acpi_get_psd_map(struct cppc_cpudata **); extern unsigned int cppc_get_transition_latency(int cpu); -extern bool cpc_ffh_supported(void); -extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val); -extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val); -#else /* !CONFIG_ACPI_CPPC_LIB */ -static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf) -{ - return -ENOTSUPP; -} -static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf) -{ - return -ENOTSUPP; -} -static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs) -{ - return -ENOTSUPP; -} -static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) -{ - return -ENOTSUPP; -} -static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps) -{ - return -ENOTSUPP; -} -static inline bool acpi_cpc_valid(void) -{ - return false; -} -static inline unsigned int cppc_get_transition_latency(int cpu) -{ - return CPUFREQ_ETERNAL; -} -static inline bool cpc_ffh_supported(void) -{ - return false; -} -static inline int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) -{ - return -ENOTSUPP; -} -static inline int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) -{ - return -ENOTSUPP; -} -#endif /* !CONFIG_ACPI_CPPC_LIB */ #endif /* _CPPC_ACPI_H*/ diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 34fb3431a8..f32baee507 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -1,7 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef GHES_H -#define GHES_H - #include #include @@ -13,14 +9,13 @@ * estatus: memory buffer for error status block, allocated during * HEST parsing. */ +#define GHES_TO_CLEAR 0x0001 #define GHES_EXITING 0x0002 struct ghes { - union { - struct acpi_hest_generic *generic; - struct acpi_hest_generic_v2 *generic_v2; - }; + struct acpi_hest_generic *generic; struct acpi_hest_generic_status *estatus; + u64 buffer_paddr; unsigned long flags; union { struct list_head list; @@ -33,14 +28,11 @@ struct ghes_estatus_node { struct llist_node llnode; struct acpi_hest_generic *generic; struct ghes *ghes; - - int task_work_cpu; - struct callback_head task_work; }; struct ghes_estatus_cache { u32 estatus_len; - atomic_t count; + atomic_unchecked_t count; struct acpi_hest_generic *generic; unsigned long long time_in; struct rcu_head rcu; @@ -53,96 +45,28 @@ enum { GHES_SEV_PANIC = 0x3, }; -#ifdef CONFIG_ACPI_APEI_GHES -/** - * ghes_register_vendor_record_notifier - register a notifier for vendor - * records that the kernel would otherwise ignore. - * @nb: pointer to the notifier_block structure of the event handler. - * - * return 0 : SUCCESS, non-zero : FAIL - */ -int ghes_register_vendor_record_notifier(struct notifier_block *nb); - -/** - * ghes_unregister_vendor_record_notifier - unregister the previously - * registered vendor record notifier. - * @nb: pointer to the notifier_block structure of the vendor record handler. - */ -void ghes_unregister_vendor_record_notifier(struct notifier_block *nb); -#endif - -int ghes_estatus_pool_init(int num_ghes); - /* From drivers/edac/ghes_edac.c */ #ifdef CONFIG_EDAC_GHES -void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); +void ghes_edac_report_mem_error(struct ghes *ghes, int sev, + struct cper_sec_mem_err *mem_err); int ghes_edac_register(struct ghes *ghes, struct device *dev); void ghes_edac_unregister(struct ghes *ghes); #else -static inline void ghes_edac_report_mem_error(int sev, +static inline void ghes_edac_report_mem_error(struct ghes *ghes, int sev, struct cper_sec_mem_err *mem_err) { } static inline int ghes_edac_register(struct ghes *ghes, struct device *dev) { - return -ENODEV; + return 0; } static inline void ghes_edac_unregister(struct ghes *ghes) { } #endif - -static inline int acpi_hest_get_version(struct acpi_hest_generic_data *gdata) -{ - return gdata->revision >> 8; -} - -static inline void *acpi_hest_get_payload(struct acpi_hest_generic_data *gdata) -{ - if (acpi_hest_get_version(gdata) >= 3) - return (void *)(((struct acpi_hest_generic_data_v300 *)(gdata)) + 1); - - return gdata + 1; -} - -static inline int acpi_hest_get_error_length(struct acpi_hest_generic_data *gdata) -{ - return ((struct acpi_hest_generic_data *)(gdata))->error_data_length; -} - -static inline int acpi_hest_get_size(struct acpi_hest_generic_data *gdata) -{ - if (acpi_hest_get_version(gdata) >= 3) - return sizeof(struct acpi_hest_generic_data_v300); - - return sizeof(struct acpi_hest_generic_data); -} - -static inline int acpi_hest_get_record_size(struct acpi_hest_generic_data *gdata) -{ - return (acpi_hest_get_size(gdata) + acpi_hest_get_error_length(gdata)); -} - -static inline void *acpi_hest_get_next(struct acpi_hest_generic_data *gdata) -{ - return (void *)(gdata) + acpi_hest_get_record_size(gdata); -} - -#define apei_estatus_for_each_section(estatus, section) \ - for (section = (struct acpi_hest_generic_data *)(estatus + 1); \ - (void *)section - (void *)(estatus + 1) < estatus->data_length; \ - section = acpi_hest_get_next(section)) - -#ifdef CONFIG_ACPI_APEI_SEA -int ghes_notify_sea(void); -#else -static inline int ghes_notify_sea(void) { return -ENOENT; } -#endif - -#endif /* GHES_H */ diff --git a/include/acpi/hed.h b/include/acpi/hed.h index ebef902afd..46e1249b70 100644 --- a/include/acpi/hed.h +++ b/include/acpi/hed.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * hed.h - ACPI Hardware Error Device * * Copyright (C) 2009, Intel Corp. * Author: Huang Ying + * + * This file is released under the GPLv2. */ #ifndef ACPI_HED_H diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h index 4dec4ed138..8caa79c617 100644 --- a/include/acpi/pcc.h +++ b/include/acpi/pcc.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * PCC (Platform Communications Channel) methods + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. */ #ifndef _PCC_H @@ -9,7 +13,6 @@ #include #include -#define MAX_PCC_SUBSPACES 256 #ifdef CONFIG_PCC extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id); diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h index 967c552d1c..552637b0d0 100644 --- a/include/acpi/pdc_intel.h +++ b/include/acpi/pdc_intel.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* _PDC bit definition for Intel processors */ diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h index e8958e0d16..fca15390a4 100644 --- a/include/acpi/platform/acenv.h +++ b/include/acpi/platform/acenv.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acenv.h - Host and compiler configuration * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACENV_H__ #define __ACENV_H__ @@ -41,8 +75,7 @@ (defined ACPI_NAMES_APP) || \ (defined ACPI_SRC_APP) || \ (defined ACPI_XTRACT_APP) || \ - (defined ACPI_EXAMPLE_APP) || \ - (defined ACPI_EFI_HELLO) + (defined ACPI_EXAMPLE_APP) #define ACPI_APPLICATION #define ACPI_SINGLE_THREADED #define USE_NATIVE_ALLOCATE_ZEROED @@ -128,17 +161,6 @@ #endif -/* - * acpisrc CR\LF support - * Unix file line endings do not include the carriage return. - * If the acpisrc utility is being built using a microsoft compiler, it means - * that it will be running on a windows machine which means that the output is - * expected to have CR/LF newlines. If the acpisrc utility is built with - * anything else, it will likely run on a system with LF newlines. This flag - * tells the acpisrc utility that newlines will be in the LF format. - */ -#define ACPI_SRC_OS_LF_ONLY 0 - /*! [Begin] no source code translation */ /****************************************************************************** @@ -265,11 +287,6 @@ #define ACPI_INLINE #endif -/* Use ordered initialization if compiler doesn't support designated. */ -#ifndef ACPI_STRUCT_INIT -#define ACPI_STRUCT_INIT(field, value) value -#endif - /* * Configurable calling conventions: * @@ -340,7 +357,7 @@ #include #include #include -#if defined (ACPI_APPLICATION) || defined(ACPI_LIBRARY) +#ifdef ACPI_APPLICATION #include #include #include diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h index 277fe2fa4d..b3171b9d69 100644 --- a/include/acpi/platform/acenvex.h +++ b/include/acpi/platform/acenvex.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acenvex.h - Extra host and compiler configuration * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACENVEX_H__ #define __ACENVEX_H__ diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h index 20ecb004f5..8f66aaabad 100644 --- a/include/acpi/platform/acgcc.h +++ b/include/acpi/platform/acgcc.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acgcc.h - GCC specific defines, etc. * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACGCC_H__ #define __ACGCC_H__ @@ -14,22 +48,7 @@ * Use compiler specific is a good practice for even when * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined. */ -#ifndef va_arg -#ifdef ACPI_USE_BUILTIN_STDARG -typedef __builtin_va_list va_list; -#define va_start(v, l) __builtin_va_start(v, l) -#define va_end(v) __builtin_va_end(v) -#define va_arg(v, l) __builtin_va_arg(v, l) -#define va_copy(d, s) __builtin_va_copy(d, s) -#else -#ifdef __KERNEL__ -#include -#else -/* Used to build acpi tools */ #include -#endif /* __KERNEL__ */ -#endif /* ACPI_USE_BUILTIN_STDARG */ -#endif /* ! va_arg */ #define ACPI_INLINE __inline__ @@ -55,23 +74,4 @@ typedef __builtin_va_list va_list; #define COMPILER_VA_MACRO 1 -/* GCC supports native multiply/shift on 32-bit platforms */ - -#define ACPI_USE_NATIVE_MATH64 - -/* GCC did not support __has_attribute until 5.1. */ - -#ifndef __has_attribute -#define __has_attribute(x) 0 -#endif - -/* - * Explicitly mark intentional explicit fallthrough to silence - * -Wimplicit-fallthrough in GCC 7.1+. - */ - -#if __has_attribute(__fallthrough__) -#define ACPI_FALLTHROUGH __attribute__((__fallthrough__)) -#endif - #endif /* __ACGCC_H__ */ diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h index 738d52865e..46ead2caad 100644 --- a/include/acpi/platform/acgccex.h +++ b/include/acpi/platform/acgccex.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acgccex.h - Extra GCC specific defines, etc. * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACGCCEX_H__ #define __ACGCCEX_H__ diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h index 550fe9a8cd..17bd3b7b4e 100644 --- a/include/acpi/platform/acintel.h +++ b/include/acpi/platform/acintel.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: acintel.h - VC specific defines, etc. * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2017, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACINTEL_H__ #define __ACINTEL_H__ @@ -14,9 +48,7 @@ * Use compiler specific is a good practice for even when * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined. */ -#ifndef va_arg #include -#endif /* Configuration specific to Intel 64-bit C compiler */ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index b3ffb9bbf6..e861a24f06 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: aclinux.h - OS specific defines, etc. for Linux * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACLINUX_H__ #define __ACLINUX_H__ @@ -24,19 +58,13 @@ #define ACPI_USE_SYSTEM_CLIBRARY #define ACPI_USE_DO_WHILE_0 -#define ACPI_IGNORE_PACKAGE_RESOLUTION_ERRORS #ifdef __KERNEL__ #define ACPI_USE_SYSTEM_INTTYPES -#define ACPI_USE_GPE_POLLING /* Kernel specific ACPICA configuration */ -#ifdef CONFIG_PCI -#define ACPI_PCI_CONFIGURED -#endif - #ifdef CONFIG_ACPI_REDUCED_HARDWARE_ONLY #define ACPI_REDUCED_HARDWARE 1 #endif @@ -66,11 +94,6 @@ #define ACPI_INIT_FUNCTION __init -/* Use a specific bugging default separate from ACPICA */ - -#undef ACPI_DEBUG_DEFAULT -#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR) - #ifndef CONFIG_ACPI /* External globals for __KERNEL__, stubs is needed */ @@ -105,23 +128,17 @@ /* Host-dependent types and defines for in-kernel ACPICA */ #define ACPI_MACHINE_WIDTH BITS_PER_LONG -#define ACPI_USE_NATIVE_MATH64 #define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); #define strtoul simple_strtoul #define acpi_cache_t struct kmem_cache #define acpi_spinlock spinlock_t * -#define acpi_raw_spinlock raw_spinlock_t * #define acpi_cpu_flags unsigned long /* Use native linux version of acpi_os_allocate_zeroed */ #define USE_NATIVE_ALLOCATE_ZEROED -/* Use logical addresses for accessing GPE registers in system memory */ - -#define ACPI_GPE_USE_LOGICAL_ADDRESSES - /* * Overrides for in-kernel ACPICA */ @@ -133,18 +150,14 @@ #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_object #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock -#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_raw_lock -#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_raw_lock -#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_raw_lock -#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_raw_lock /* * OSL interfaces used by debugger/disassembler */ #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable -#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_debugger -#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_debugger +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_command_signals +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_command_signals /* * OSL interfaces used by utilities @@ -165,11 +178,6 @@ #define ACPI_MSG_BIOS_ERROR KERN_ERR "ACPI BIOS Error (bug): " #define ACPI_MSG_BIOS_WARNING KERN_WARNING "ACPI BIOS Warning (bug): " -/* - * Linux wants to use designated initializers for function pointer structs. - */ -#define ACPI_STRUCT_INIT(field, value) .field = value - #else /* !__KERNEL__ */ #define ACPI_USE_STANDARD_HEADERS @@ -192,10 +200,8 @@ #define ACPI_FLUSH_CPU_CACHE() #define ACPI_CAST_PTHREAD_T(pthread) ((acpi_thread_id) (pthread)) -#if defined(__ia64__) || (defined(__x86_64__) && !defined(__ILP32__)) ||\ - defined(__aarch64__) || defined(__PPC64__) ||\ - defined(__s390x__) ||\ - (defined(__riscv) && (defined(__LP64__) || defined(_LP64))) +#if defined(__ia64__) || defined(__x86_64__) ||\ + defined(__aarch64__) || defined(__PPC64__) #define ACPI_MACHINE_WIDTH 64 #define COMPILER_DEPENDENT_INT64 long #define COMPILER_DEPENDENT_UINT64 unsigned long @@ -204,7 +210,6 @@ #define COMPILER_DEPENDENT_INT64 long long #define COMPILER_DEPENDENT_UINT64 unsigned long long #define ACPI_USE_NATIVE_DIVIDE -#define ACPI_USE_NATIVE_MATH64 #endif #ifndef __cdecl diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h index 5f642b07ad..a5509d8723 100644 --- a/include/acpi/platform/aclinuxex.h +++ b/include/acpi/platform/aclinuxex.h @@ -1,12 +1,46 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /****************************************************************************** * * Name: aclinuxex.h - Extra OS specific defines, etc. for Linux * - * Copyright (C) 2000 - 2021, Intel Corp. - * *****************************************************************************/ +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + #ifndef __ACLINUXEX_H__ #define __ACLINUXEX_H__ @@ -90,47 +124,17 @@ static inline acpi_thread_id acpi_os_get_thread_id(void) lock ? AE_OK : AE_NO_MEMORY; \ }) - -#define acpi_os_create_raw_lock(__handle) \ - ({ \ - raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ - if (lock) { \ - *(__handle) = lock; \ - raw_spin_lock_init(*(__handle)); \ - } \ - lock ? AE_OK : AE_NO_MEMORY; \ - }) - -static inline acpi_cpu_flags acpi_os_acquire_raw_lock(acpi_raw_spinlock lockp) -{ - acpi_cpu_flags flags; - - raw_spin_lock_irqsave(lockp, flags); - return flags; -} - -static inline void acpi_os_release_raw_lock(acpi_raw_spinlock lockp, - acpi_cpu_flags flags) -{ - raw_spin_unlock_irqrestore(lockp, flags); -} - -static inline void acpi_os_delete_raw_lock(acpi_raw_spinlock handle) -{ - ACPI_FREE(handle); -} - static inline u8 acpi_os_readable(void *pointer, acpi_size length) { return TRUE; } -static inline acpi_status acpi_os_initialize_debugger(void) +static inline acpi_status acpi_os_initialize_command_signals(void) { return AE_OK; } -static inline void acpi_os_terminate_debugger(void) +static inline void acpi_os_terminate_command_signals(void) { return; } @@ -138,6 +142,7 @@ static inline void acpi_os_terminate_debugger(void) /* * OSL interfaces added by Linux */ +void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size); #endif /* __KERNEL__ */ diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 683e124ad5..f3db11c246 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -1,11 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ACPI_PROCESSOR_H #define __ACPI_PROCESSOR_H #include #include -#include -#include #include #include @@ -232,8 +229,6 @@ struct acpi_processor { struct acpi_processor_limit limit; struct thermal_cooling_device *cdev; struct device *dev; /* Processor device. */ - struct freq_qos_request perflib_req; - struct freq_qos_request thermal_req; }; struct acpi_processor_errata { @@ -254,12 +249,9 @@ extern int acpi_processor_register_performance(struct acpi_processor_performance *performance, unsigned int cpu); extern void acpi_processor_unregister_performance(unsigned int cpu); -int acpi_processor_pstate_control(void); /* note: this locks both the calling module and the processor module if a _PPC object exists, rmmod is disallowed then */ int acpi_processor_notify_smm(struct module *calling_module); -int acpi_processor_get_psd(acpi_handle handle, - struct acpi_psd_package *pdomain); /* parsing the _P* objects. */ extern int acpi_processor_get_performance_info(struct acpi_processor *pr); @@ -297,37 +289,23 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx } #endif -static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg, - bool direct) -{ - if (direct || (is_percpu_thread() && cpu == smp_processor_id())) - return fn(arg); - return work_on_cpu(cpu, fn, arg); -} - /* in processor_perflib.c */ #ifdef CONFIG_CPU_FREQ -extern bool acpi_processor_cpufreq_init; -void acpi_processor_ignore_ppc_init(void); -void acpi_processor_ppc_init(struct cpufreq_policy *policy); -void acpi_processor_ppc_exit(struct cpufreq_policy *policy); -void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag); +void acpi_processor_ppc_init(void); +void acpi_processor_ppc_exit(void); +int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag); extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit); #else -static inline void acpi_processor_ignore_ppc_init(void) +static inline void acpi_processor_ppc_init(void) { return; } -static inline void acpi_processor_ppc_init(struct cpufreq_policy *policy) +static inline void acpi_processor_ppc_exit(void) { return; } -static inline void acpi_processor_ppc_exit(struct cpufreq_policy *policy) -{ - return; -} -static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr, +static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) { static unsigned int printout = 1; @@ -338,6 +316,7 @@ static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr, "Consider compiling CPUfreq support into your kernel.\n"); printout = 0; } + return 0; } static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) { @@ -439,14 +418,14 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr) int acpi_processor_get_limit_info(struct acpi_processor *pr); extern const struct thermal_cooling_device_ops processor_cooling_ops; #if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ) -void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy); -void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy); +void acpi_thermal_cpufreq_init(void); +void acpi_thermal_cpufreq_exit(void); #else -static inline void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy) +static inline void acpi_thermal_cpufreq_init(void) { return; } -static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) +static inline void acpi_thermal_cpufreq_exit(void) { return; } diff --git a/include/acpi/reboot.h b/include/acpi/reboot.h index 14122fc55b..0419184ce8 100644 --- a/include/acpi/reboot.h +++ b/include/acpi/reboot.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ACPI_REBOOT_H #define __ACPI_REBOOT_H diff --git a/include/acpi/video.h b/include/acpi/video.h index db8548ff03..4536bd345a 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ACPI_VIDEO_H #define __ACPI_VIDEO_H @@ -31,17 +30,6 @@ struct acpi_device; #define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110 #define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200 -#define ACPI_VIDEO_NOTIFY_SWITCH 0x80 -#define ACPI_VIDEO_NOTIFY_PROBE 0x81 -#define ACPI_VIDEO_NOTIFY_CYCLE 0x82 -#define ACPI_VIDEO_NOTIFY_NEXT_OUTPUT 0x83 -#define ACPI_VIDEO_NOTIFY_PREV_OUTPUT 0x84 -#define ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS 0x85 -#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86 -#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87 -#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS 0x88 -#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF 0x89 - enum acpi_backlight_type { acpi_backlight_undef = -1, acpi_backlight_none = 0, diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h new file mode 100644 index 0000000000..9ae82fe8e0 --- /dev/null +++ b/include/asm-generic/4level-fixup.h @@ -0,0 +1,40 @@ +#ifndef _4LEVEL_FIXUP_H +#define _4LEVEL_FIXUP_H + +#define __ARCH_HAS_4LEVEL_HACK +#define __PAGETABLE_PUD_FOLDED + +#define PUD_SHIFT PGDIR_SHIFT +#define PUD_SIZE PGDIR_SIZE +#define PUD_MASK PGDIR_MASK +#define PTRS_PER_PUD 1 + +#define pud_t pgd_t + +#define pmd_alloc(mm, pud, address) \ + ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ + NULL: pmd_offset(pud, address)) +#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address)) + +#define pud_alloc(mm, pgd, address) (pgd) +#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address)) +#define pud_offset(pgd, start) (pgd) +#define pud_none(pud) 0 +#define pud_bad(pud) 0 +#define pud_present(pud) 1 +#define pud_ERROR(pud) do { } while (0) +#define pud_clear(pud) pgd_clear(pud) +#define pud_val(pud) pgd_val(pud) +#define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd) +#define pud_page(pud) pgd_page(pud) +#define pud_page_vaddr(pud) pgd_page_vaddr(pud) + +#undef pud_free_tlb +#define pud_free_tlb(tlb, x, addr) do { } while (0) +#define pud_free(mm, x) do { } while (0) +#define __pud_free_tlb(tlb, x, addr) do { } while (0) + +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + +#endif diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm new file mode 100644 index 0000000000..d2ee86b4c0 --- /dev/null +++ b/include/asm-generic/Kbuild.asm @@ -0,0 +1 @@ +include include/uapi/asm-generic/Kbuild.asm diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h index 2fa2bc2083..939869c772 100644 --- a/include/asm-generic/asm-prototypes.h +++ b/include/asm-generic/asm-prototypes.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #include #undef __memset extern void *__memset(void *, int, __kernel_size_t); diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 073cf40f43..714fd14670 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -1,1014 +1,346 @@ -// SPDX-License-Identifier: GPL-2.0 - -// Generated by scripts/atomic/gen-atomic-long.sh -// DO NOT MODIFY THIS FILE DIRECTLY - #ifndef _ASM_GENERIC_ATOMIC_LONG_H #define _ASM_GENERIC_ATOMIC_LONG_H +/* + * Copyright (C) 2005 Silicon Graphics, Inc. + * Christoph Lameter + * + * Allows to provide arch independent atomic definitions without the need to + * edit all arch specific atomic.h files. + */ -#include #include -#ifdef CONFIG_64BIT +/* + * Suppport for atomic_long_t + * + * Casts for parameters are avoided for existing atomic functions in order to + * avoid issues with cast-as-lval under gcc 4.x and other limitations that the + * macros of a platform may have. + */ + +#if BITS_PER_LONG == 64 + typedef atomic64_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) -#define atomic_long_cond_read_acquire atomic64_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed + +#ifdef CONFIG_PAX_REFCOUNT +typedef atomic64_unchecked_t atomic_long_unchecked_t; #else -typedef atomic_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) -#define atomic_long_cond_read_acquire atomic_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +typedef atomic64_t atomic_long_unchecked_t; #endif -#ifdef CONFIG_64BIT +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define ATOMIC_LONG_PFX(x) atomic64 ## x + +#else + +typedef atomic_t atomic_long_t; -static __always_inline long -atomic_long_read(const atomic_long_t *v) -{ - return atomic64_read(v); -} - -static __always_inline long -atomic_long_read_acquire(const atomic_long_t *v) -{ - return atomic64_read_acquire(v); -} - -static __always_inline void -atomic_long_set(atomic_long_t *v, long i) -{ - atomic64_set(v, i); -} - -static __always_inline void -atomic_long_set_release(atomic_long_t *v, long i) -{ - atomic64_set_release(v, i); -} - -static __always_inline void -atomic_long_add(long i, atomic_long_t *v) -{ - atomic64_add(i, v); -} - -static __always_inline long -atomic_long_add_return(long i, atomic_long_t *v) -{ - return atomic64_add_return(i, v); -} - -static __always_inline long -atomic_long_add_return_acquire(long i, atomic_long_t *v) -{ - return atomic64_add_return_acquire(i, v); -} - -static __always_inline long -atomic_long_add_return_release(long i, atomic_long_t *v) -{ - return atomic64_add_return_release(i, v); -} - -static __always_inline long -atomic_long_add_return_relaxed(long i, atomic_long_t *v) -{ - return atomic64_add_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_add(long i, atomic_long_t *v) -{ - return atomic64_fetch_add(i, v); -} - -static __always_inline long -atomic_long_fetch_add_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_add_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_release(i, v); -} - -static __always_inline long -atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_relaxed(i, v); -} - -static __always_inline void -atomic_long_sub(long i, atomic_long_t *v) -{ - atomic64_sub(i, v); -} - -static __always_inline long -atomic_long_sub_return(long i, atomic_long_t *v) -{ - return atomic64_sub_return(i, v); -} - -static __always_inline long -atomic_long_sub_return_acquire(long i, atomic_long_t *v) -{ - return atomic64_sub_return_acquire(i, v); -} - -static __always_inline long -atomic_long_sub_return_release(long i, atomic_long_t *v) -{ - return atomic64_sub_return_release(i, v); -} - -static __always_inline long -atomic_long_sub_return_relaxed(long i, atomic_long_t *v) -{ - return atomic64_sub_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_sub(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_release(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_relaxed(i, v); -} - -static __always_inline void -atomic_long_inc(atomic_long_t *v) -{ - atomic64_inc(v); -} - -static __always_inline long -atomic_long_inc_return(atomic_long_t *v) -{ - return atomic64_inc_return(v); -} - -static __always_inline long -atomic_long_inc_return_acquire(atomic_long_t *v) -{ - return atomic64_inc_return_acquire(v); -} - -static __always_inline long -atomic_long_inc_return_release(atomic_long_t *v) -{ - return atomic64_inc_return_release(v); -} - -static __always_inline long -atomic_long_inc_return_relaxed(atomic_long_t *v) -{ - return atomic64_inc_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_inc(atomic_long_t *v) -{ - return atomic64_fetch_inc(v); -} - -static __always_inline long -atomic_long_fetch_inc_acquire(atomic_long_t *v) -{ - return atomic64_fetch_inc_acquire(v); -} - -static __always_inline long -atomic_long_fetch_inc_release(atomic_long_t *v) -{ - return atomic64_fetch_inc_release(v); -} - -static __always_inline long -atomic_long_fetch_inc_relaxed(atomic_long_t *v) -{ - return atomic64_fetch_inc_relaxed(v); -} - -static __always_inline void -atomic_long_dec(atomic_long_t *v) -{ - atomic64_dec(v); -} - -static __always_inline long -atomic_long_dec_return(atomic_long_t *v) -{ - return atomic64_dec_return(v); -} - -static __always_inline long -atomic_long_dec_return_acquire(atomic_long_t *v) -{ - return atomic64_dec_return_acquire(v); -} - -static __always_inline long -atomic_long_dec_return_release(atomic_long_t *v) -{ - return atomic64_dec_return_release(v); -} - -static __always_inline long -atomic_long_dec_return_relaxed(atomic_long_t *v) -{ - return atomic64_dec_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_dec(atomic_long_t *v) -{ - return atomic64_fetch_dec(v); -} - -static __always_inline long -atomic_long_fetch_dec_acquire(atomic_long_t *v) -{ - return atomic64_fetch_dec_acquire(v); -} - -static __always_inline long -atomic_long_fetch_dec_release(atomic_long_t *v) -{ - return atomic64_fetch_dec_release(v); -} - -static __always_inline long -atomic_long_fetch_dec_relaxed(atomic_long_t *v) -{ - return atomic64_fetch_dec_relaxed(v); -} - -static __always_inline void -atomic_long_and(long i, atomic_long_t *v) -{ - atomic64_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and(long i, atomic_long_t *v) -{ - return atomic64_fetch_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_and_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_release(i, v); -} - -static __always_inline long -atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_relaxed(i, v); -} - -static __always_inline void -atomic_long_andnot(long i, atomic_long_t *v) -{ - atomic64_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_release(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_relaxed(i, v); -} - -static __always_inline void -atomic_long_or(long i, atomic_long_t *v) -{ - atomic64_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or(long i, atomic_long_t *v) -{ - return atomic64_fetch_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_or_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_release(i, v); -} - -static __always_inline long -atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_relaxed(i, v); -} - -static __always_inline void -atomic_long_xor(long i, atomic_long_t *v) -{ - atomic64_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_release(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_relaxed(i, v); -} - -static __always_inline long -atomic_long_xchg(atomic_long_t *v, long i) -{ - return atomic64_xchg(v, i); -} - -static __always_inline long -atomic_long_xchg_acquire(atomic_long_t *v, long i) -{ - return atomic64_xchg_acquire(v, i); -} - -static __always_inline long -atomic_long_xchg_release(atomic_long_t *v, long i) -{ - return atomic64_xchg_release(v, i); -} - -static __always_inline long -atomic_long_xchg_relaxed(atomic_long_t *v, long i) -{ - return atomic64_xchg_relaxed(v, i); -} - -static __always_inline long -atomic_long_cmpxchg(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_acquire(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_release(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_release(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_sub_and_test(long i, atomic_long_t *v) -{ - return atomic64_sub_and_test(i, v); -} - -static __always_inline bool -atomic_long_dec_and_test(atomic_long_t *v) -{ - return atomic64_dec_and_test(v); -} - -static __always_inline bool -atomic_long_inc_and_test(atomic_long_t *v) -{ - return atomic64_inc_and_test(v); -} - -static __always_inline bool -atomic_long_add_negative(long i, atomic_long_t *v) -{ - return atomic64_add_negative(i, v); -} - -static __always_inline long -atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic64_fetch_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic64_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_inc_not_zero(atomic_long_t *v) -{ - return atomic64_inc_not_zero(v); -} - -static __always_inline bool -atomic_long_inc_unless_negative(atomic_long_t *v) -{ - return atomic64_inc_unless_negative(v); -} - -static __always_inline bool -atomic_long_dec_unless_positive(atomic_long_t *v) -{ - return atomic64_dec_unless_positive(v); -} - -static __always_inline long -atomic_long_dec_if_positive(atomic_long_t *v) -{ - return atomic64_dec_if_positive(v); -} - -#else /* CONFIG_64BIT */ - -static __always_inline long -atomic_long_read(const atomic_long_t *v) -{ - return atomic_read(v); -} - -static __always_inline long -atomic_long_read_acquire(const atomic_long_t *v) -{ - return atomic_read_acquire(v); -} - -static __always_inline void -atomic_long_set(atomic_long_t *v, long i) -{ - atomic_set(v, i); -} - -static __always_inline void -atomic_long_set_release(atomic_long_t *v, long i) -{ - atomic_set_release(v, i); -} - -static __always_inline void -atomic_long_add(long i, atomic_long_t *v) -{ - atomic_add(i, v); -} - -static __always_inline long -atomic_long_add_return(long i, atomic_long_t *v) -{ - return atomic_add_return(i, v); -} - -static __always_inline long -atomic_long_add_return_acquire(long i, atomic_long_t *v) -{ - return atomic_add_return_acquire(i, v); -} - -static __always_inline long -atomic_long_add_return_release(long i, atomic_long_t *v) -{ - return atomic_add_return_release(i, v); -} - -static __always_inline long -atomic_long_add_return_relaxed(long i, atomic_long_t *v) -{ - return atomic_add_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_add(long i, atomic_long_t *v) -{ - return atomic_fetch_add(i, v); -} - -static __always_inline long -atomic_long_fetch_add_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_add_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_add_release(long i, atomic_long_t *v) -{ - return atomic_fetch_add_release(i, v); -} - -static __always_inline long -atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_add_relaxed(i, v); -} - -static __always_inline void -atomic_long_sub(long i, atomic_long_t *v) -{ - atomic_sub(i, v); -} - -static __always_inline long -atomic_long_sub_return(long i, atomic_long_t *v) -{ - return atomic_sub_return(i, v); -} - -static __always_inline long -atomic_long_sub_return_acquire(long i, atomic_long_t *v) -{ - return atomic_sub_return_acquire(i, v); -} - -static __always_inline long -atomic_long_sub_return_release(long i, atomic_long_t *v) -{ - return atomic_sub_return_release(i, v); -} - -static __always_inline long -atomic_long_sub_return_relaxed(long i, atomic_long_t *v) -{ - return atomic_sub_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_sub(long i, atomic_long_t *v) -{ - return atomic_fetch_sub(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_release(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_release(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_relaxed(i, v); -} - -static __always_inline void -atomic_long_inc(atomic_long_t *v) -{ - atomic_inc(v); -} - -static __always_inline long -atomic_long_inc_return(atomic_long_t *v) -{ - return atomic_inc_return(v); -} - -static __always_inline long -atomic_long_inc_return_acquire(atomic_long_t *v) -{ - return atomic_inc_return_acquire(v); -} - -static __always_inline long -atomic_long_inc_return_release(atomic_long_t *v) -{ - return atomic_inc_return_release(v); -} - -static __always_inline long -atomic_long_inc_return_relaxed(atomic_long_t *v) -{ - return atomic_inc_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_inc(atomic_long_t *v) -{ - return atomic_fetch_inc(v); -} - -static __always_inline long -atomic_long_fetch_inc_acquire(atomic_long_t *v) -{ - return atomic_fetch_inc_acquire(v); -} - -static __always_inline long -atomic_long_fetch_inc_release(atomic_long_t *v) -{ - return atomic_fetch_inc_release(v); -} - -static __always_inline long -atomic_long_fetch_inc_relaxed(atomic_long_t *v) -{ - return atomic_fetch_inc_relaxed(v); -} - -static __always_inline void -atomic_long_dec(atomic_long_t *v) -{ - atomic_dec(v); -} +#ifdef CONFIG_PAX_REFCOUNT +typedef atomic_unchecked_t atomic_long_unchecked_t; +#else +typedef atomic_t atomic_long_unchecked_t; +#endif -static __always_inline long -atomic_long_dec_return(atomic_long_t *v) -{ - return atomic_dec_return(v); -} - -static __always_inline long -atomic_long_dec_return_acquire(atomic_long_t *v) -{ - return atomic_dec_return_acquire(v); -} - -static __always_inline long -atomic_long_dec_return_release(atomic_long_t *v) -{ - return atomic_dec_return_release(v); -} - -static __always_inline long -atomic_long_dec_return_relaxed(atomic_long_t *v) -{ - return atomic_dec_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_dec(atomic_long_t *v) -{ - return atomic_fetch_dec(v); -} - -static __always_inline long -atomic_long_fetch_dec_acquire(atomic_long_t *v) -{ - return atomic_fetch_dec_acquire(v); -} - -static __always_inline long -atomic_long_fetch_dec_release(atomic_long_t *v) -{ - return atomic_fetch_dec_release(v); -} - -static __always_inline long -atomic_long_fetch_dec_relaxed(atomic_long_t *v) -{ - return atomic_fetch_dec_relaxed(v); -} - -static __always_inline void -atomic_long_and(long i, atomic_long_t *v) -{ - atomic_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and(long i, atomic_long_t *v) -{ - return atomic_fetch_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_and_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_and_release(long i, atomic_long_t *v) -{ - return atomic_fetch_and_release(i, v); -} - -static __always_inline long -atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_and_relaxed(i, v); -} - -static __always_inline void -atomic_long_andnot(long i, atomic_long_t *v) -{ - atomic_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_release(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_release(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_relaxed(i, v); -} - -static __always_inline void -atomic_long_or(long i, atomic_long_t *v) -{ - atomic_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or(long i, atomic_long_t *v) -{ - return atomic_fetch_or(i, v); -} +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +#define ATOMIC_LONG_PFX(x) atomic ## x -static __always_inline long -atomic_long_fetch_or_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_or_acquire(i, v); -} +#endif -static __always_inline long -atomic_long_fetch_or_release(long i, atomic_long_t *v) -{ - return atomic_fetch_or_release(i, v); +#define ATOMIC_LONG_READ_OP(mo, suffix) \ +static inline long atomic_long_read##mo##suffix(const atomic_long##suffix##_t *l)\ +{ \ + ATOMIC_LONG_PFX(suffix##_t) *v = (ATOMIC_LONG_PFX(suffix##_t) *)l;\ + \ + return (long)ATOMIC_LONG_PFX(_read##mo##suffix)(v); \ } +ATOMIC_LONG_READ_OP(,) +ATOMIC_LONG_READ_OP(,_unchecked) +ATOMIC_LONG_READ_OP(_acquire,) -static __always_inline long -atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_or_relaxed(i, v); -} +#undef ATOMIC_LONG_READ_OP -static __always_inline void -atomic_long_xor(long i, atomic_long_t *v) -{ - atomic_xor(i, v); +#define ATOMIC_LONG_SET_OP(mo, suffix) \ +static inline void atomic_long_set##mo##suffix(atomic_long##suffix##_t *l, long i)\ +{ \ + ATOMIC_LONG_PFX(suffix##_t) *v = (ATOMIC_LONG_PFX(suffix##_t) *)l;\ + \ + ATOMIC_LONG_PFX(_set##mo##suffix)(v, i); \ +} +ATOMIC_LONG_SET_OP(,) +ATOMIC_LONG_SET_OP(,_unchecked) +ATOMIC_LONG_SET_OP(_release,) + +#undef ATOMIC_LONG_SET_OP + +#define ATOMIC_LONG_ADD_SUB_OP(op, mo, suffix) \ +static inline long \ +atomic_long_##op##_return##mo##suffix(long i, atomic_long##suffix##_t *l)\ +{ \ + ATOMIC_LONG_PFX(suffix##_t) *v = (ATOMIC_LONG_PFX(suffix##_t) *)l;\ + \ + return (long)ATOMIC_LONG_PFX(_##op##_return##mo##suffix)(i, v); \ +} +ATOMIC_LONG_ADD_SUB_OP(add,,) +ATOMIC_LONG_ADD_SUB_OP(add,,_unchecked) +ATOMIC_LONG_ADD_SUB_OP(add, _relaxed,) +ATOMIC_LONG_ADD_SUB_OP(add, _acquire,) +ATOMIC_LONG_ADD_SUB_OP(add, _release,) +ATOMIC_LONG_ADD_SUB_OP(sub,,) +//ATOMIC_LONG_ADD_SUB_OP(sub,,_unchecked) +ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed,) +ATOMIC_LONG_ADD_SUB_OP(sub, _acquire,) +ATOMIC_LONG_ADD_SUB_OP(sub, _release,) + +#undef ATOMIC_LONG_ADD_SUB_OP + +#define atomic_long_cmpxchg_relaxed(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \ + (old), (new))) +#define atomic_long_cmpxchg_acquire(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \ + (old), (new))) +#define atomic_long_cmpxchg_release(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \ + (old), (new))) +#define atomic_long_cmpxchg(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new))) + +#define atomic_long_xchg_relaxed(v, new) \ + (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new))) +#define atomic_long_xchg_acquire(v, new) \ + (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new))) +#define atomic_long_xchg_release(v, new) \ + (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new))) +#define atomic_long_xchg(v, new) \ + (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new))) + +#ifdef CONFIG_PAX_REFCOUNT +#define atomic_long_xchg_unchecked(v, new) \ + (ATOMIC_LONG_PFX(_xchg_unchecked)((ATOMIC_LONG_PFX(_unchecked_t) *)(v), (new))) +#endif + +static __always_inline void atomic_long_inc(atomic_long_t *l) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + ATOMIC_LONG_PFX(_inc)(v); +} + +#ifdef CONFIG_PAX_REFCOUNT +static __always_inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) +{ + ATOMIC_LONG_PFX(_unchecked_t) *v = (ATOMIC_LONG_PFX(_unchecked_t) *)l; + + ATOMIC_LONG_PFX(_inc_unchecked)(v); +} +#endif + +static __always_inline void atomic_long_dec(atomic_long_t *l) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + ATOMIC_LONG_PFX(_dec)(v); } -static __always_inline long -atomic_long_fetch_xor(long i, atomic_long_t *v) -{ - return atomic_fetch_xor(i, v); +#define ATOMIC_LONG_FETCH_OP(op, mo) \ +static inline long \ +atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \ } -static __always_inline long -atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_acquire(i, v); -} +ATOMIC_LONG_FETCH_OP(add, ) +ATOMIC_LONG_FETCH_OP(add, _relaxed) +ATOMIC_LONG_FETCH_OP(add, _acquire) +ATOMIC_LONG_FETCH_OP(add, _release) +ATOMIC_LONG_FETCH_OP(sub, ) +ATOMIC_LONG_FETCH_OP(sub, _relaxed) +ATOMIC_LONG_FETCH_OP(sub, _acquire) +ATOMIC_LONG_FETCH_OP(sub, _release) +ATOMIC_LONG_FETCH_OP(and, ) +ATOMIC_LONG_FETCH_OP(and, _relaxed) +ATOMIC_LONG_FETCH_OP(and, _acquire) +ATOMIC_LONG_FETCH_OP(and, _release) +ATOMIC_LONG_FETCH_OP(andnot, ) +ATOMIC_LONG_FETCH_OP(andnot, _relaxed) +ATOMIC_LONG_FETCH_OP(andnot, _acquire) +ATOMIC_LONG_FETCH_OP(andnot, _release) +ATOMIC_LONG_FETCH_OP(or, ) +ATOMIC_LONG_FETCH_OP(or, _relaxed) +ATOMIC_LONG_FETCH_OP(or, _acquire) +ATOMIC_LONG_FETCH_OP(or, _release) +ATOMIC_LONG_FETCH_OP(xor, ) +ATOMIC_LONG_FETCH_OP(xor, _relaxed) +ATOMIC_LONG_FETCH_OP(xor, _acquire) +ATOMIC_LONG_FETCH_OP(xor, _release) -static __always_inline long -atomic_long_fetch_xor_release(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_release(i, v); -} +#undef ATOMIC_LONG_FETCH_OP -static __always_inline long -atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_relaxed(i, v); +#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo) \ +static inline long \ +atomic_long_fetch_##op##mo(atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v); \ } -static __always_inline long -atomic_long_xchg(atomic_long_t *v, long i) -{ - return atomic_xchg(v, i); -} +ATOMIC_LONG_FETCH_INC_DEC_OP(inc,) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec,) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release) -static __always_inline long -atomic_long_xchg_acquire(atomic_long_t *v, long i) -{ - return atomic_xchg_acquire(v, i); -} +#undef ATOMIC_LONG_FETCH_INC_DEC_OP -static __always_inline long -atomic_long_xchg_release(atomic_long_t *v, long i) +#ifdef CONFIG_PAX_REFCOUNT +static __always_inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) { - return atomic_xchg_release(v, i); -} + ATOMIC_LONG_PFX(_unchecked_t) *v = (ATOMIC_LONG_PFX(_unchecked_t) *)l; -static __always_inline long -atomic_long_xchg_relaxed(atomic_long_t *v, long i) -{ - return atomic_xchg_relaxed(v, i); + ATOMIC_LONG_PFX(_dec_unchecked)(v); } +#endif -static __always_inline long -atomic_long_cmpxchg(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg(v, old, new); +#define ATOMIC_LONG_OP(op, suffix) \ +static __always_inline void \ +atomic_long_##op##suffix(long i, atomic_long##suffix##_t *l) \ +{ \ + ATOMIC_LONG_PFX(suffix##_t) *v = (ATOMIC_LONG_PFX(suffix##_t) *)l;\ + \ + ATOMIC_LONG_PFX(_##op##suffix)(i, v); \ } -static __always_inline long -atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg_acquire(v, old, new); -} +ATOMIC_LONG_OP(add,) +ATOMIC_LONG_OP(add,_unchecked) +ATOMIC_LONG_OP(sub,) +ATOMIC_LONG_OP(sub,_unchecked) +ATOMIC_LONG_OP(and,) +ATOMIC_LONG_OP(andnot,) +ATOMIC_LONG_OP(or,) +ATOMIC_LONG_OP(xor,) -static __always_inline long -atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg_release(v, old, new); -} +#undef ATOMIC_LONG_OP -static __always_inline long -atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) { - return atomic_cmpxchg_relaxed(v, old, new); -} + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; -static __always_inline bool -atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg(v, (int *)old, new); + return ATOMIC_LONG_PFX(_sub_and_test)(i, v); } -static __always_inline bool -atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg_acquire(v, (int *)old, new); -} +static inline int atomic_long_dec_and_test(atomic_long_t *l) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + return ATOMIC_LONG_PFX(_dec_and_test)(v); +} + +static inline int atomic_long_inc_and_test(atomic_long_t *l) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + return ATOMIC_LONG_PFX(_inc_and_test)(v); +} + +static inline int atomic_long_add_negative(long i, atomic_long_t *l) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + return ATOMIC_LONG_PFX(_add_negative)(i, v); +} + +#define ATOMIC_LONG_INC_DEC_OP(op, mo, suffix) \ +static inline long \ +atomic_long_##op##_return##mo##suffix(atomic_long##suffix##_t *l) \ +{ \ + ATOMIC_LONG_PFX(suffix##_t) *v = (ATOMIC_LONG_PFX(suffix##_t) *)l;\ + \ + return (long)ATOMIC_LONG_PFX(_##op##_return##mo##suffix)(v); \ +} +ATOMIC_LONG_INC_DEC_OP(inc,,) +ATOMIC_LONG_INC_DEC_OP(inc,,_unchecked) +ATOMIC_LONG_INC_DEC_OP(inc, _relaxed,) +ATOMIC_LONG_INC_DEC_OP(inc, _acquire,) +ATOMIC_LONG_INC_DEC_OP(inc, _release,) +ATOMIC_LONG_INC_DEC_OP(dec,,) +ATOMIC_LONG_INC_DEC_OP(dec, _relaxed,) +ATOMIC_LONG_INC_DEC_OP(dec, _acquire,) +ATOMIC_LONG_INC_DEC_OP(dec, _release,) + +#undef ATOMIC_LONG_INC_DEC_OP -static __always_inline bool -atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) { - return atomic_try_cmpxchg_release(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg_relaxed(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_sub_and_test(long i, atomic_long_t *v) -{ - return atomic_sub_and_test(i, v); -} - -static __always_inline bool -atomic_long_dec_and_test(atomic_long_t *v) -{ - return atomic_dec_and_test(v); -} - -static __always_inline bool -atomic_long_inc_and_test(atomic_long_t *v) -{ - return atomic_inc_and_test(v); -} - -static __always_inline bool -atomic_long_add_negative(long i, atomic_long_t *v) -{ - return atomic_add_negative(i, v); -} - -static __always_inline long -atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic_fetch_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic_add_unless(v, a, u); -} + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; -static __always_inline bool -atomic_long_inc_not_zero(atomic_long_t *v) -{ - return atomic_inc_not_zero(v); + return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u); } -static __always_inline bool -atomic_long_inc_unless_negative(atomic_long_t *v) -{ - return atomic_inc_unless_negative(v); -} +#define atomic_long_inc_not_zero(l) \ + ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) -static __always_inline bool -atomic_long_dec_unless_positive(atomic_long_t *v) +#ifdef CONFIG_PAX_REFCOUNT +static inline void pax_refcount_needs_these_functions(void) { - return atomic_dec_unless_positive(v); -} + atomic_read_unchecked((atomic_unchecked_t *)NULL); + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0); + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL); + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL); + atomic_inc_unchecked((atomic_unchecked_t *)NULL); + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL); + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL); + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL); + atomic_dec_unchecked((atomic_unchecked_t *)NULL); + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0); + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0); -static __always_inline long -atomic_long_dec_if_positive(atomic_long_t *v) -{ - return atomic_dec_if_positive(v); -} + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL); + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0); + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL); + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL); + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL); + atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL); + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL); + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL); +} +#else +#define atomic_read_unchecked(v) atomic_read(v) +#define atomic_set_unchecked(v, i) atomic_set((v), (i)) +#define atomic_add_unchecked(i, v) atomic_add((i), (v)) +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v)) +#define atomic_inc_unchecked(v) atomic_inc(v) +#ifndef atomic_inc_and_test_unchecked +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v) +#endif +#ifndef atomic_inc_return_unchecked +#define atomic_inc_return_unchecked(v) atomic_inc_return(v) +#endif +#ifndef atomic_add_return_unchecked +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v)) +#endif +#define atomic_dec_unchecked(v) atomic_dec(v) +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n)) +#ifndef atomic_xchg_unchecked +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i)) +#endif -#endif /* CONFIG_64BIT */ -#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ -// a624200981f552b2c6be4f32fe44da8289f30d87 +#define atomic_long_read_unchecked(v) atomic_long_read(v) +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i)) +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v)) +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v)) +#define atomic_long_inc_unchecked(v) atomic_long_inc(v) +#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v)) +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v) +#define atomic_long_dec_unchecked(v) atomic_long_dec(v) +#ifndef atomic_long_xchg_unchecked +#define atomic_long_xchg_unchecked(v, i) atomic_long_xchg((v), (i)) +#endif +#endif + +#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 04b8be9f1a..9ed8b98718 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -1,10 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Generic C implementation of atomic counter operations. Do not include in - * machine independent code. + * Generic C implementation of atomic counter operations. Usable on + * UP systems only. Do not include in machine independent code. + * + * Originally implemented for MN10300. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef __ASM_GENERIC_ATOMIC_H #define __ASM_GENERIC_ATOMIC_H @@ -12,39 +18,56 @@ #include #include +/* + * atomic_$op() - $op integer to atomic variable + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use + * smp_mb__{before,after}_atomic(). + */ + +/* + * atomic_$op_return() - $op interer to atomic variable and returns the result + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does imply a full memory barrier. + */ + #ifdef CONFIG_SMP /* we can build all atomic primitives from cmpxchg */ #define ATOMIC_OP(op, c_op) \ -static inline void generic_atomic_##op(int i, atomic_t *v) \ +static inline void atomic_##op(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ - while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ } #define ATOMIC_OP_RETURN(op, c_op) \ -static inline int generic_atomic_##op##_return(int i, atomic_t *v) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ - while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ \ return c c_op i; \ } #define ATOMIC_FETCH_OP(op, c_op) \ -static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ - while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ \ return c; \ @@ -55,7 +78,7 @@ static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \ #include #define ATOMIC_OP(op, c_op) \ -static inline void generic_atomic_##op(int i, atomic_t *v) \ +static inline void atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ @@ -65,7 +88,7 @@ static inline void generic_atomic_##op(int i, atomic_t *v) \ } #define ATOMIC_OP_RETURN(op, c_op) \ -static inline int generic_atomic_##op##_return(int i, atomic_t *v) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ @@ -78,7 +101,7 @@ static inline int generic_atomic_##op##_return(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op) \ -static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ @@ -93,44 +116,120 @@ static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \ #endif /* CONFIG_SMP */ +#ifndef atomic_add_return ATOMIC_OP_RETURN(add, +) +#endif + +#ifndef atomic_sub_return ATOMIC_OP_RETURN(sub, -) +#endif +#ifndef atomic_fetch_add ATOMIC_FETCH_OP(add, +) -ATOMIC_FETCH_OP(sub, -) -ATOMIC_FETCH_OP(and, &) -ATOMIC_FETCH_OP(or, |) -ATOMIC_FETCH_OP(xor, ^) +#endif -ATOMIC_OP(add, +) -ATOMIC_OP(sub, -) +#ifndef atomic_fetch_sub +ATOMIC_FETCH_OP(sub, -) +#endif + +#ifndef atomic_fetch_and +ATOMIC_FETCH_OP(and, &) +#endif + +#ifndef atomic_fetch_or +ATOMIC_FETCH_OP(or, |) +#endif + +#ifndef atomic_fetch_xor +ATOMIC_FETCH_OP(xor, ^) +#endif + +#ifndef atomic_and ATOMIC_OP(and, &) +#endif + +#ifndef atomic_or ATOMIC_OP(or, |) +#endif + +#ifndef atomic_xor ATOMIC_OP(xor, ^) +#endif #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define arch_atomic_add_return generic_atomic_add_return -#define arch_atomic_sub_return generic_atomic_sub_return +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ -#define arch_atomic_fetch_add generic_atomic_fetch_add -#define arch_atomic_fetch_sub generic_atomic_fetch_sub -#define arch_atomic_fetch_and generic_atomic_fetch_and -#define arch_atomic_fetch_or generic_atomic_fetch_or -#define arch_atomic_fetch_xor generic_atomic_fetch_xor +#define ATOMIC_INIT(i) { (i) } -#define arch_atomic_add generic_atomic_add -#define arch_atomic_sub generic_atomic_sub -#define arch_atomic_and generic_atomic_and -#define arch_atomic_or generic_atomic_or -#define arch_atomic_xor generic_atomic_xor +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +#ifndef atomic_read +#define atomic_read(v) READ_ONCE((v)->counter) +#endif -#define arch_atomic_read(v) READ_ONCE((v)->counter) -#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) -#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v))) -#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new))) +#include + +static inline int atomic_add_negative(int i, atomic_t *v) +{ + return atomic_add_return(i, v) < 0; +} + +static inline void atomic_add(int i, atomic_t *v) +{ + atomic_add_return(i, v); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + atomic_sub_return(i, v); +} + +static inline void atomic_inc(atomic_t *v) +{ + atomic_add_return(1, v); +} + +static inline void atomic_dec(atomic_t *v) +{ + atomic_sub_return(1, v); +} + +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) + +#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) +#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) + +#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) + +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) + c = old; + return c; +} #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index 100d24b02e..cadcc641eb 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -1,31 +1,36 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Generic implementation of 64-bit atomics using spinlocks, * useful on processors that don't have 64-bit atomic instructions. * * Copyright © 2009 Paul Mackerras, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _ASM_GENERIC_ATOMIC64_H #define _ASM_GENERIC_ATOMIC64_H -#include typedef struct { - s64 counter; + long long counter; } atomic64_t; +typedef atomic64_t atomic64_unchecked_t; + #define ATOMIC64_INIT(i) { (i) } -extern s64 generic_atomic64_read(const atomic64_t *v); -extern void generic_atomic64_set(atomic64_t *v, s64 i); +extern long long atomic64_read(const atomic64_t *v); +extern void atomic64_set(atomic64_t *v, long long i); #define ATOMIC64_OP(op) \ -extern void generic_atomic64_##op(s64 a, atomic64_t *v); +extern void atomic64_##op(long long a, atomic64_t *v); #define ATOMIC64_OP_RETURN(op) \ -extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v); +extern long long atomic64_##op##_return(long long a, atomic64_t *v); #define ATOMIC64_FETCH_OP(op) \ -extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v); +extern long long atomic64_fetch_##op(long long a, atomic64_t *v); #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) @@ -44,32 +49,30 @@ ATOMIC64_OPS(xor) #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -extern s64 generic_atomic64_dec_if_positive(atomic64_t *v); -extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n); -extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new); -extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u); +extern long long atomic64_dec_if_positive(atomic64_t *v); +extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); +extern long long atomic64_xchg(atomic64_t *v, long long new); +extern int atomic64_add_unless(atomic64_t *v, long long a, long long u); -#define arch_atomic64_read generic_atomic64_read -#define arch_atomic64_set generic_atomic64_set -#define arch_atomic64_set_release generic_atomic64_set +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) +#define atomic64_inc(v) atomic64_add(1LL, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec(v) atomic64_sub(1LL, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) -#define arch_atomic64_add generic_atomic64_add -#define arch_atomic64_add_return generic_atomic64_add_return -#define arch_atomic64_fetch_add generic_atomic64_fetch_add -#define arch_atomic64_sub generic_atomic64_sub -#define arch_atomic64_sub_return generic_atomic64_sub_return -#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub - -#define arch_atomic64_and generic_atomic64_and -#define arch_atomic64_fetch_and generic_atomic64_fetch_and -#define arch_atomic64_or generic_atomic64_or -#define arch_atomic64_fetch_or generic_atomic64_fetch_or -#define arch_atomic64_xor generic_atomic64_xor -#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor - -#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive -#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg -#define arch_atomic64_xchg generic_atomic64_xchg -#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless +#define atomic64_read_unchecked(v) atomic64_read(v) +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) +#define atomic64_inc_unchecked(v) atomic64_inc(v) +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) +#define atomic64_dec_unchecked(v) atomic64_dec(v) +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) +#define atomic64_xchg_unchecked(v, n) atomic64_xchg((v), (n)) #endif /* _ASM_GENERIC_ATOMIC64_H */ diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h index 331670807c..a186553733 100644 --- a/include/asm-generic/audit_change_attr.h +++ b/include/asm-generic/audit_change_attr.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifdef __NR_chmod __NR_chmod, #endif diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h index dd5a9dd7a1..7b61db4fe7 100644 --- a/include/asm-generic/audit_dir_write.h +++ b/include/asm-generic/audit_dir_write.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifdef __NR_rename __NR_rename, #endif @@ -27,12 +26,7 @@ __NR_mknod, __NR_mkdirat, __NR_mknodat, __NR_unlinkat, -#ifdef __NR_renameat __NR_renameat, -#endif __NR_linkat, __NR_symlinkat, #endif -#ifdef __NR_renameat2 -__NR_renameat2, -#endif diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h index 7bb7b5a83a..3b249cb857 100644 --- a/include/asm-generic/audit_read.h +++ b/include/asm-generic/audit_read.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifdef __NR_readlink __NR_readlink, #endif diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h index f9f1d0ae11..274575d712 100644 --- a/include/asm-generic/audit_write.h +++ b/include/asm-generic/audit_write.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #include __NR_acct, #ifdef __NR_swapon @@ -20,6 +19,3 @@ __NR_ftruncate64, #ifdef __NR_bind __NR_bind, /* bind can affect fs object only in one way... */ #endif -#ifdef __NR_fallocate -__NR_fallocate, -#endif diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 640f09479b..fe297b599b 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -1,12 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Generic barrier definitions. + * Generic barrier definitions, originally based on MN10300 definitions. * * It should be possible to use these on really simple architectures, * but it serves more as a starting point for new ports. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef __ASM_GENERIC_BARRIER_H #define __ASM_GENERIC_BARRIER_H @@ -14,7 +18,6 @@ #ifndef __ASSEMBLY__ #include -#include #ifndef nop #define nop() asm volatile ("nop") @@ -47,6 +50,10 @@ #define dma_wmb() wmb() #endif +#ifndef read_barrier_depends +#define read_barrier_depends() do { } while (0) +#endif + #ifndef __smp_mb #define __smp_mb() mb() #endif @@ -59,6 +66,10 @@ #define __smp_wmb() wmb() #endif +#ifndef __smp_read_barrier_depends +#define __smp_read_barrier_depends() read_barrier_depends() +#endif + #ifdef CONFIG_SMP #ifndef smp_mb @@ -73,6 +84,10 @@ #define smp_wmb() __smp_wmb() #endif +#ifndef smp_read_barrier_depends +#define smp_read_barrier_depends() __smp_read_barrier_depends() +#endif + #else /* !CONFIG_SMP */ #ifndef smp_mb @@ -87,6 +102,10 @@ #define smp_wmb() barrier() #endif +#ifndef smp_read_barrier_depends +#define smp_read_barrier_depends() do { } while (0) +#endif + #endif /* CONFIG_SMP */ #ifndef __smp_store_mb @@ -113,10 +132,10 @@ do { \ #ifndef __smp_load_acquire #define __smp_load_acquire(p) \ ({ \ - __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ __smp_mb(); \ - (typeof(*p))___p1; \ + ___p1; \ }) #endif @@ -168,10 +187,10 @@ do { \ #ifndef smp_load_acquire #define smp_load_acquire(p) \ ({ \ - __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ - (typeof(*p))___p1; \ + ___p1; \ }) #endif @@ -181,6 +200,7 @@ do { \ #define virt_mb() __smp_mb() #define virt_rmb() __smp_rmb() #define virt_wmb() __smp_wmb() +#define virt_read_barrier_depends() __smp_read_barrier_depends() #define virt_store_mb(var, value) __smp_store_mb(var, value) #define virt_mb__before_atomic() __smp_mb__before_atomic() #define virt_mb__after_atomic() __smp_mb__after_atomic() @@ -200,30 +220,6 @@ do { \ #define smp_acquire__after_ctrl_dep() smp_rmb() #endif -/** - * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees - * @ptr: pointer to the variable to wait on - * @cond: boolean expression to wait for - * - * Equivalent to using READ_ONCE() on the condition variable. - * - * Due to C lacking lambda expressions we load the value of *ptr into a - * pre-named variable @VAL to be used in @cond. - */ -#ifndef smp_cond_load_relaxed -#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ - typeof(ptr) __PTR = (ptr); \ - __unqual_scalar_typeof(*ptr) VAL; \ - for (;;) { \ - VAL = READ_ONCE(*__PTR); \ - if (cond_expr) \ - break; \ - cpu_relax(); \ - } \ - (typeof(*ptr))VAL; \ -}) -#endif - /** * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering * @ptr: pointer to the variable to wait on @@ -231,25 +227,24 @@ do { \ * * Equivalent to using smp_load_acquire() on the condition variable but employs * the control dependency of the wait to reduce the barrier on many platforms. + * + * Due to C lacking lambda expressions we load the value of *ptr into a + * pre-named variable @VAL to be used in @cond. */ #ifndef smp_cond_load_acquire #define smp_cond_load_acquire(ptr, cond_expr) ({ \ - __unqual_scalar_typeof(*ptr) _val; \ - _val = smp_cond_load_relaxed(ptr, cond_expr); \ + typeof(ptr) __PTR = (ptr); \ + typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + cpu_relax(); \ + } \ smp_acquire__after_ctrl_dep(); \ - (typeof(*ptr))_val; \ + VAL; \ }) #endif -/* - * pmem_wmb() ensures that all stores for which the modification - * are written to persistent storage by preceding instructions have - * updated persistent storage before any data access or data transfer - * caused by subsequent instructions is initiated. - */ -#ifndef pmem_wmb -#define pmem_wmb() wmb() -#endif - #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h index df9b5bc3d2..dcdcacf2fd 100644 --- a/include/asm-generic/bitops.h +++ b/include/asm-generic/bitops.h @@ -1,12 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_BITOPS_H #define __ASM_GENERIC_BITOPS_H /* * For the benefit of those who are trying to port Linux to another - * architecture, here are some C-language equivalents. They should - * generate reasonable code, so take a look at what your compiler spits - * out before rolling your own buggy implementation in assembly language. + * architecture, here are some C-language equivalents. You should + * recode these in the native assembly language, if at all possible. * * C language equivalents written by Theodore Ts'o, 9/26/92 */ diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h index 39e56e1c72..937d7c4355 100644 --- a/include/asm-generic/bitops/__ffs.h +++ b/include/asm-generic/bitops/__ffs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS___FFS_H_ #define _ASM_GENERIC_BITOPS___FFS_H_ diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h index 03f721a8a2..0fe12f27b0 100644 --- a/include/asm-generic/bitops/__fls.h +++ b/include/asm-generic/bitops/__fls.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS___FLS_H_ #define _ASM_GENERIC_BITOPS___FLS_H_ @@ -10,7 +9,7 @@ * * Undefined if no set bit exists, so code should check against 0 first. */ -static __always_inline unsigned long __fls(unsigned long word) +static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word) { int num = BITS_PER_LONG - 1; diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h index c2705e1d22..6a211f4066 100644 --- a/include/asm-generic/bitops/arch_hweight.h +++ b/include/asm-generic/bitops/arch_hweight.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ #define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 3096f086b5..49673510b4 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -1,76 +1,189 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_ATOMIC_H_ -#include -#include -#include +#include +#include + +#ifdef CONFIG_SMP +#include +#include /* we use L1_CACHE_BYTES */ + +/* Use an array of spinlocks for our atomic_ts. + * Hash function to index into a different SPINLOCK. + * Since "a" is usually an address, use one spinlock per cacheline. + */ +# define ATOMIC_HASH_SIZE 4 +# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) + +extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; + +/* Can't use raw_spin_lock_irq because of #include problems, so + * this is the substitute */ +#define _atomic_spin_lock_irqsave(l,f) do { \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ + local_irq_save(f); \ + arch_spin_lock(s); \ +} while(0) + +#define _atomic_spin_unlock_irqrestore(l,f) do { \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spin_unlock(s); \ + local_irq_restore(f); \ +} while(0) + + +#else +# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) +# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) +#endif /* - * Implementation of atomic bitops using atomic-fetch ops. - * See Documentation/atomic_bitops.txt for details. + * NMI events can occur at any time, including when interrupts have been + * disabled by *_irqsave(). So you can get NMI events occurring while a + * *_bit function is holding a spin lock. If the NMI handler also wants + * to do bit manipulation (and they do) then you can get a deadlock + * between the original caller of *_bit() and the NMI handler. + * + * by Keith Owens */ -static __always_inline void -arch_set_bit(unsigned int nr, volatile unsigned long *p) +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writing portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(int nr, volatile unsigned long *addr) { - p += BIT_WORD(nr); - arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); -} - -static __always_inline void -arch_clear_bit(unsigned int nr, volatile unsigned long *p) -{ - p += BIT_WORD(nr); - arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); -} - -static __always_inline void -arch_change_bit(unsigned int nr, volatile unsigned long *p) -{ - p += BIT_WORD(nr); - arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); -} - -static __always_inline int -arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p) -{ - long old; unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; - p += BIT_WORD(nr); - if (READ_ONCE(*p) & mask) - return 1; - - old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p); - return !!(old & mask); + _atomic_spin_lock_irqsave(p, flags); + *p |= mask; + _atomic_spin_unlock_irqrestore(p, flags); } -static __always_inline int -arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p) +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() + * in order to ensure changes are visible on other processors. + */ +static inline void clear_bit(int nr, volatile unsigned long *addr) { - long old; unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; - p += BIT_WORD(nr); - if (!(READ_ONCE(*p) & mask)) - return 0; - - old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p); - return !!(old & mask); + _atomic_spin_lock_irqsave(p, flags); + *p &= ~mask; + _atomic_spin_unlock_irqrestore(p, flags); } -static __always_inline int -arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p) +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. It may be + * reordered on other architectures than x86. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(int nr, volatile unsigned long *addr) { - long old; unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; - p += BIT_WORD(nr); - old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p); - return !!(old & mask); + _atomic_spin_lock_irqsave(p, flags); + *p ^= mask; + _atomic_spin_unlock_irqrestore(p, flags); } -#include +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It may be reordered on other architectures than x86. + * It also implies a memory barrier. + */ +static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It can be reorderdered on other architectures other than x86. + * It also implies a memory barrier. + */ +static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old ^ mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h index 87024da44d..90041e3a41 100644 --- a/include/asm-generic/bitops/builtin-__ffs.h +++ b/include/asm-generic/bitops/builtin-__ffs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_ #define _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_ diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h index 43a5aa9afb..0248f38663 100644 --- a/include/asm-generic/bitops/builtin-__fls.h +++ b/include/asm-generic/bitops/builtin-__fls.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_ #define _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_ diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h index 7b12932904..064825829e 100644 --- a/include/asm-generic/bitops/builtin-ffs.h +++ b/include/asm-generic/bitops/builtin-ffs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_ #define _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_ @@ -8,8 +7,11 @@ * * This is defined the same way as * the libc and compiler builtin ffs routines, therefore - * differs in spirit from ffz (man ffs). + * differs in spirit from the above ffz (man ffs). */ -#define ffs(x) __builtin_ffs(x) +static __always_inline int ffs(int x) +{ + return __builtin_ffs(x); +} #endif diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h index c8455cc288..eda652d0ac 100644 --- a/include/asm-generic/bitops/builtin-fls.h +++ b/include/asm-generic/bitops/builtin-fls.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_ #define _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_ @@ -9,7 +8,7 @@ * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __always_inline int fls(unsigned int x) +static __always_inline int fls(int x) { return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; } diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h index 149faeeeea..0a7e066234 100644 --- a/include/asm-generic/bitops/const_hweight.h +++ b/include/asm-generic/bitops/const_hweight.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ #define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ diff --git a/include/asm-generic/bitops/ext2-atomic-setbit.h b/include/asm-generic/bitops/ext2-atomic-setbit.h index b041cbf0d8..5a0997857b 100644 --- a/include/asm-generic/bitops/ext2-atomic-setbit.h +++ b/include/asm-generic/bitops/ext2-atomic-setbit.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ #define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h index 0cfc3180b0..87f0f109d7 100644 --- a/include/asm-generic/bitops/ext2-atomic.h +++ b/include/asm-generic/bitops/ext2-atomic.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h index 323fd5d6ae..fbbb43af7d 100644 --- a/include/asm-generic/bitops/ffs.h +++ b/include/asm-generic/bitops/ffs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_FFS_H_ #define _ASM_GENERIC_BITOPS_FFS_H_ @@ -8,7 +7,7 @@ * * This is defined the same way as * the libc and compiler builtin ffs routines, therefore - * differs in spirit from ffz (man ffs). + * differs in spirit from the above ffz (man ffs). */ static inline int ffs(int x) { diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h index 0d010085fd..6744bd4cdf 100644 --- a/include/asm-generic/bitops/ffz.h +++ b/include/asm-generic/bitops/ffz.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_FFZ_H_ #define _ASM_GENERIC_BITOPS_FFZ_H_ diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 0d132ee2a2..998d4d544f 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -1,14 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_FIND_H_ #define _ASM_GENERIC_BITOPS_FIND_H_ -extern unsigned long _find_next_bit(const unsigned long *addr1, - const unsigned long *addr2, unsigned long nbits, - unsigned long start, unsigned long invert, unsigned long le); -extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size); -extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size); -extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size); - #ifndef find_next_bit /** * find_next_bit - find the next set bit in a memory region @@ -19,52 +11,8 @@ extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long siz * Returns the bit number for the next set bit * If no bits are set, returns @size. */ -static inline -unsigned long find_next_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val; - - if (unlikely(offset >= size)) - return size; - - val = *addr & GENMASK(size - 1, offset); - return val ? __ffs(val) : size; - } - - return _find_next_bit(addr, NULL, size, offset, 0UL, 0); -} -#endif - -#ifndef find_next_and_bit -/** - * find_next_and_bit - find the next set bit in both memory regions - * @addr1: The first address to base the search on - * @addr2: The second address to base the search on - * @offset: The bitnumber to start searching at - * @size: The bitmap size in bits - * - * Returns the bit number for the next set bit - * If no bits are set, returns @size. - */ -static inline -unsigned long find_next_and_bit(const unsigned long *addr1, - const unsigned long *addr2, unsigned long size, - unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val; - - if (unlikely(offset >= size)) - return size; - - val = *addr1 & *addr2 & GENMASK(size - 1, offset); - return val ? __ffs(val) : size; - } - - return _find_next_bit(addr1, addr2, size, offset, 0UL, 0); -} +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long + size, unsigned long offset); #endif #ifndef find_next_zero_bit @@ -77,22 +25,8 @@ unsigned long find_next_and_bit(const unsigned long *addr1, * Returns the bit number of the next zero bit * If no bits are zero, returns @size. */ -static inline -unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val; - - if (unlikely(offset >= size)) - return size; - - val = *addr | ~GENMASK(size - 1, offset); - return val == ~0UL ? size : ffz(val); - } - - return _find_next_bit(addr, NULL, size, offset, ~0UL, 0); -} +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); #endif #ifdef CONFIG_GENERIC_FIND_FIRST_BIT @@ -105,17 +39,8 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, * Returns the bit number of the first set bit. * If no bits are set, returns @size. */ -static inline -unsigned long find_first_bit(const unsigned long *addr, unsigned long size) -{ - if (small_const_nbits(size)) { - unsigned long val = *addr & GENMASK(size - 1, 0); - - return val ? __ffs(val) : size; - } - - return _find_first_bit(addr, size); -} +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); /** * find_first_zero_bit - find the first cleared bit in a memory region @@ -125,64 +50,13 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size) * Returns the bit number of the first cleared bit. * If no bits are zero, returns @size. */ -static inline -unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) -{ - if (small_const_nbits(size)) { - unsigned long val = *addr | ~GENMASK(size - 1, 0); - - return val == ~0UL ? size : ffz(val); - } - - return _find_first_zero_bit(addr, size); -} +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); #else /* CONFIG_GENERIC_FIND_FIRST_BIT */ -#ifndef find_first_bit #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) -#endif -#ifndef find_first_zero_bit #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) -#endif #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ -#ifndef find_last_bit -/** - * find_last_bit - find the last set bit in a memory region - * @addr: The address to start the search at - * @size: The number of bits to search - * - * Returns the bit number of the last set bit, or size. - */ -static inline -unsigned long find_last_bit(const unsigned long *addr, unsigned long size) -{ - if (small_const_nbits(size)) { - unsigned long val = *addr & GENMASK(size - 1, 0); - - return val ? __fls(val) : size; - } - - return _find_last_bit(addr, size); -} -#endif - -/** - * find_next_clump8 - find next 8-bit clump with set bits in a memory region - * @clump: location to store copy of found clump - * @addr: address to base the search on - * @size: bitmap size in number of bits - * @offset: bit offset at which to start searching - * - * Returns the bit offset for the next set clump; the found clump value is - * copied to the location pointed by @clump. If no bits are set, returns @size. - */ -extern unsigned long find_next_clump8(unsigned long *clump, - const unsigned long *addr, - unsigned long size, unsigned long offset); - -#define find_first_clump8(clump, bits, size) \ - find_next_clump8((clump), (bits), (size), 0) - #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h index b168bb10e1..dad6c713ec 100644 --- a/include/asm-generic/bitops/fls.h +++ b/include/asm-generic/bitops/fls.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_FLS_H_ #define _ASM_GENERIC_BITOPS_FLS_H_ @@ -10,7 +9,7 @@ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __always_inline int fls(unsigned int x) +static __always_inline int __intentional_overflow(-1) fls(int x) { int r = 32; diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h index 866f2b2304..3d40e14aad 100644 --- a/include/asm-generic/bitops/fls64.h +++ b/include/asm-generic/bitops/fls64.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_FLS64_H_ #define _ASM_GENERIC_BITOPS_FLS64_H_ @@ -16,7 +15,7 @@ * at position 64. */ #if BITS_PER_LONG == 32 -static __always_inline int fls64(__u64 x) +static __always_inline int __intentional_overflow(-1) fls64(__u64 x) { __u32 h = x >> 32; if (h) @@ -24,7 +23,7 @@ static __always_inline int fls64(__u64 x) return fls(x); } #elif BITS_PER_LONG == 64 -static __always_inline int fls64(__u64 x) +static __always_inline int __intentional_overflow(-1) fls64(__u64 x) { if (x == 0) return 0; diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h index 6bf1bba835..a94d6519c7 100644 --- a/include/asm-generic/bitops/hweight.h +++ b/include/asm-generic/bitops/hweight.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h index 5a28629cbf..61731543c0 100644 --- a/include/asm-generic/bitops/le.h +++ b/include/asm-generic/bitops/le.h @@ -1,11 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_LE_H_ #define _ASM_GENERIC_BITOPS_LE_H_ -#include #include #include -#include #if defined(__LITTLE_ENDIAN) @@ -34,41 +31,13 @@ static inline unsigned long find_first_zero_bit_le(const void *addr, #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) #ifndef find_next_zero_bit_le -static inline -unsigned long find_next_zero_bit_le(const void *addr, unsigned - long size, unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val = *(const unsigned long *)addr; - - if (unlikely(offset >= size)) - return size; - - val = swab(val) | ~GENMASK(size - 1, offset); - return val == ~0UL ? size : ffz(val); - } - - return _find_next_bit(addr, NULL, size, offset, ~0UL, 1); -} +extern unsigned long find_next_zero_bit_le(const void *addr, + unsigned long size, unsigned long offset); #endif #ifndef find_next_bit_le -static inline -unsigned long find_next_bit_le(const void *addr, unsigned - long size, unsigned long offset) -{ - if (small_const_nbits(size)) { - unsigned long val = *(const unsigned long *)addr; - - if (unlikely(offset >= size)) - return size; - - val = swab(val) & GENMASK(size - 1, offset); - return val ? __ffs(val) : size; - } - - return _find_next_bit(addr, NULL, size, offset, 0UL, 1); -} +extern unsigned long find_next_bit_le(const void *addr, + unsigned long size, unsigned long offset); #endif #ifndef find_first_zero_bit_le diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 630f2f6b95..8ef0ccbf81 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -1,51 +1,31 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_LOCK_H_ #define _ASM_GENERIC_BITOPS_LOCK_H_ -#include -#include -#include - /** - * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock + * test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * - * This operation is atomic and provides acquire barrier semantics if - * the returned value is 0. + * This operation is atomic and provides acquire barrier semantics. * It can be used to implement bit locks. */ -static __always_inline int -arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p) -{ - long old; - unsigned long mask = BIT_MASK(nr); - - p += BIT_WORD(nr); - if (READ_ONCE(*p) & mask) - return 1; - - old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); - return !!(old & mask); -} - +#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr) /** - * arch_clear_bit_unlock - Clear a bit in memory, for unlock + * clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ -static __always_inline void -arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p) -{ - p += BIT_WORD(nr); - arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); -} +#define clear_bit_unlock(nr, addr) \ +do { \ + smp_mb__before_atomic(); \ + clear_bit(nr, addr); \ +} while (0) /** - * arch___clear_bit_unlock - Clear a bit in memory, for unlock + * __clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * @@ -55,40 +35,11 @@ arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p) * * See for example x86's implementation. */ -static inline void -arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p) -{ - unsigned long old; - - p += BIT_WORD(nr); - old = READ_ONCE(*p); - old &= ~BIT_MASK(nr); - arch_atomic_long_set_release((atomic_long_t *)p, old); -} - -/** - * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom - * byte is negative, for unlock. - * @nr: the bit to clear - * @addr: the address to start counting from - * - * This is a bit of a one-trick-pony for the filemap code, which clears - * PG_locked and tests PG_waiters, - */ -#ifndef arch_clear_bit_unlock_is_negative_byte -static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr, - volatile unsigned long *p) -{ - long old; - unsigned long mask = BIT_MASK(nr); - - p += BIT_WORD(nr); - old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); - return !!(old & BIT(7)); -} -#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte -#endif - -#include +#define __clear_bit_unlock(nr, addr) \ +do { \ + smp_mb__before_atomic(); \ + clear_bit(nr, addr); \ +} while (0) #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ + diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h index 078cc68be2..697cc2b7e0 100644 --- a/include/asm-generic/bitops/non-atomic.h +++ b/include/asm-generic/bitops/non-atomic.h @@ -1,11 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #include /** - * arch___set_bit - Set a bit in memory + * __set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @@ -13,28 +12,24 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __always_inline void -arch___set_bit(unsigned int nr, volatile unsigned long *addr) +static inline void __set_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p |= mask; } -#define __set_bit arch___set_bit -static __always_inline void -arch___clear_bit(unsigned int nr, volatile unsigned long *addr) +static inline void __clear_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p &= ~mask; } -#define __clear_bit arch___clear_bit /** - * arch___change_bit - Toggle a bit in memory + * __change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * @@ -42,18 +37,16 @@ arch___clear_bit(unsigned int nr, volatile unsigned long *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __always_inline -void arch___change_bit(unsigned int nr, volatile unsigned long *addr) +static inline void __change_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p ^= mask; } -#define __change_bit arch___change_bit /** - * arch___test_and_set_bit - Set a bit and return its old value + * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * @@ -61,8 +54,7 @@ void arch___change_bit(unsigned int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __always_inline int -arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -71,10 +63,9 @@ arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) *p = old | mask; return (old & mask) != 0; } -#define __test_and_set_bit arch___test_and_set_bit /** - * arch___test_and_clear_bit - Clear a bit and return its old value + * __test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * @@ -82,8 +73,7 @@ arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __always_inline int -arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -92,11 +82,10 @@ arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) *p = old & ~mask; return (old & mask) != 0; } -#define __test_and_clear_bit arch___test_and_clear_bit /* WARNING: non atomic and it can be reordered! */ -static __always_inline int -arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr) +static inline int __test_and_change_bit(int nr, + volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -105,18 +94,15 @@ arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr) *p = old ^ mask; return (old & mask) != 0; } -#define __test_and_change_bit arch___test_and_change_bit /** - * arch_test_bit - Determine whether a bit is set + * test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ -static __always_inline int -arch_test_bit(unsigned int nr, const volatile unsigned long *addr) +static inline int test_bit(int nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } -#define test_bit arch_test_bit #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h index 86470cfcef..604fab7031 100644 --- a/include/asm-generic/bitops/sched.h +++ b/include/asm-generic/bitops/sched.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_SCHED_H_ #define _ASM_GENERIC_BITOPS_SCHED_H_ diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h index 1023e2a4bd..d1d70aa190 100644 --- a/include/asm-generic/bitsperlong.h +++ b/include/asm-generic/bitsperlong.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_BITS_PER_LONG #define __ASM_GENERIC_BITS_PER_LONG @@ -23,16 +22,4 @@ #define BITS_PER_LONG_LONG 64 #endif -/* - * small_const_nbits(n) is true precisely when it is known at compile-time - * that BITMAP_SIZE(n) is 1, i.e. 1 <= n <= BITS_PER_LONG. This allows - * various bit/bitmap APIs to provide a fast inline implementation. Bitmaps - * of size 0 are very rare, and a compile-time-known-size 0 is most likely - * a sign of error. They will be handled correctly by the bit/bitmap APIs, - * but using the out-of-line functions, so that the inline implementations - * can unconditionally dereference the pointer(s). - */ -#define small_const_nbits(nbits) \ - (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0) - #endif /* __ASM_GENERIC_BITS_PER_LONG */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index edb0e2a602..ca27641274 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -1,25 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BUG_H #define _ASM_GENERIC_BUG_H #include -#include -#include - -#define CUT_HERE "------------[ cut here ]------------\n" #ifdef CONFIG_GENERIC_BUG #define BUGFLAG_WARNING (1 << 0) -#define BUGFLAG_ONCE (1 << 1) -#define BUGFLAG_DONE (1 << 2) -#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */ -#define BUGFLAG_TAINT(taint) ((taint) << 8) +#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8)) #define BUG_GET_TAINT(bug) ((bug)->flags >> 8) #endif #ifndef __ASSEMBLY__ -#include -#include +#include #ifdef CONFIG_BUG @@ -56,7 +47,6 @@ struct bug_entry { #ifndef HAVE_ARCH_BUG #define BUG() do { \ printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ - barrier_before_unreachable(); \ panic("BUG!"); \ } while (0) #endif @@ -67,52 +57,34 @@ struct bug_entry { /* * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report - * significant kernel issues that need prompt attention if they should ever - * appear at runtime. - * - * Do not use these macros when checking for invalid external inputs - * (e.g. invalid system call arguments, or invalid data coming from - * network/devices), and on transient conditions like ENOMEM or EAGAIN. - * These macros should be used for recoverable kernel issues only. - * For invalid external inputs, transient conditions, etc use - * pr_err[_once/_ratelimited]() followed by dump_stack(), if necessary. - * Do not include "BUG"/"WARNING" in format strings manually to make these - * conditions distinguishable from kernel issues. - * - * Use the versions with printk format strings to provide better diagnostics. + * significant issues that need prompt attention if they should ever + * appear at runtime. Use the versions with printk format strings + * to provide better diagnostics. */ -#ifndef __WARN_FLAGS -extern __printf(4, 5) -void warn_slowpath_fmt(const char *file, const int line, unsigned taint, +#ifndef __WARN_TAINT +extern __printf(3, 4) __nocapture(1) +void warn_slowpath_fmt(const char *file, const int line, const char *fmt, ...); -#define __WARN() __WARN_printf(TAINT_WARN, NULL) -#define __WARN_printf(taint, arg...) do { \ - instrumentation_begin(); \ - warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \ - instrumentation_end(); \ - } while (0) +extern __printf(4, 5) __nocapture(1) +void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, + const char *fmt, ...); +extern __nocapture(1) void warn_slowpath_null(const char *file, const int line); +#define WANT_WARN_ON_SLOWPATH +#define __WARN() warn_slowpath_null(__FILE__, __LINE__) +#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) +#define __WARN_printf_taint(taint, arg...) \ + warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg) #else -extern __printf(1, 2) void __warn_printk(const char *fmt, ...); -#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN)) -#define __WARN_printf(taint, arg...) do { \ - instrumentation_begin(); \ - __warn_printk(arg); \ - __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\ - instrumentation_end(); \ - } while (0) -#define WARN_ON_ONCE(condition) ({ \ - int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) \ - __WARN_FLAGS(BUGFLAG_ONCE | \ - BUGFLAG_TAINT(TAINT_WARN)); \ - unlikely(__ret_warn_on); \ -}) +#define __WARN() __WARN_TAINT(TAINT_WARN) +#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) +#define __WARN_printf_taint(taint, arg...) \ + do { printk(arg); __WARN_TAINT(taint); } while (0) #endif /* used internally by panic.c */ struct warn_args; -struct pt_regs; +__nocapture(1, 0) void __warn(const char *file, int line, void *caller, unsigned taint, struct pt_regs *regs, struct warn_args *args); @@ -126,10 +98,10 @@ void __warn(const char *file, int line, void *caller, unsigned taint, #endif #ifndef WARN -#define WARN(condition, format...) ({ \ +#define WARN(condition, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ - __WARN_printf(TAINT_WARN, format); \ + __WARN_printf(format); \ unlikely(__ret_warn_on); \ }) #endif @@ -137,20 +109,42 @@ void __warn(const char *file, int line, void *caller, unsigned taint, #define WARN_TAINT(condition, taint, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ - __WARN_printf(taint, format); \ + __WARN_printf_taint(taint, format); \ unlikely(__ret_warn_on); \ }) -#ifndef WARN_ON_ONCE -#define WARN_ON_ONCE(condition) \ - DO_ONCE_LITE_IF(condition, WARN_ON, 1) -#endif +#define WARN_ON_ONCE(condition) ({ \ + static bool __section(.data.unlikely) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) -#define WARN_ONCE(condition, format...) \ - DO_ONCE_LITE_IF(condition, WARN, 1, format) +#define WARN_ONCE(condition, format...) ({ \ + static bool __section(.data.unlikely) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN(1, format); \ + } \ + unlikely(__ret_warn_once); \ +}) -#define WARN_TAINT_ONCE(condition, taint, format...) \ - DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format) +#define WARN_TAINT_ONCE(condition, taint, format...) ({ \ + static bool __section(.data.unlikely) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN_TAINT(1, taint, format); \ + } \ + unlikely(__ret_warn_once); \ +}) #else /* !CONFIG_BUG */ #ifndef HAVE_ARCH_BUG @@ -186,6 +180,9 @@ void __warn(const char *file, int line, void *caller, unsigned taint, /* * WARN_ON_SMP() is for cases that the warning is either * meaningless for !SMP or may even cause failures. + * This is usually used for cases that we have + * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked() + * returns 0 for uniprocessor settings. * It can also be used with values that are only defined * on SMP: * @@ -219,22 +216,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint, # define WARN_ON_SMP(x) ({0;}) #endif -/* - * WARN_ON_FUNCTION_MISMATCH() warns if a value doesn't match a - * function address, and can be useful for catching issues with - * callback functions, for example. - * - * With CONFIG_CFI_CLANG, the warning is disabled because the - * compiler replaces function addresses taken in C code with - * local jump table addresses, which breaks cross-module function - * address equality. - */ -#if defined(CONFIG_CFI_CLANG) && defined(CONFIG_MODULES) -# define WARN_ON_FUNCTION_MISMATCH(x, fn) ({ 0; }) -#else -# define WARN_ON_FUNCTION_MISMATCH(x, fn) WARN_ON_ONCE((x) != (fn)) -#endif - #endif /* __ASSEMBLY__ */ #endif diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h index 69021830f0..6c4f62ea71 100644 --- a/include/asm-generic/bugs.h +++ b/include/asm-generic/bugs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_BUGS_H #define __ASM_GENERIC_BUGS_H /* diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h index 60386e1642..e04c5c9490 100644 --- a/include/asm-generic/cache.h +++ b/include/asm-generic/cache.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_CACHE_H #define __ASM_GENERIC_CACHE_H /* @@ -7,7 +6,7 @@ * cache lines need to provide their own cache.h. */ -#define L1_CACHE_SHIFT 5 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_SHIFT 5UL +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT) #endif /* __ASM_GENERIC_CACHE_H */ diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index 4a674db4e1..87bc536ccd 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -1,118 +1,34 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_GENERIC_CACHEFLUSH_H -#define _ASM_GENERIC_CACHEFLUSH_H +#ifndef __ASM_CACHEFLUSH_H +#define __ASM_CACHEFLUSH_H -struct mm_struct; -struct vm_area_struct; -struct page; -struct address_space; +/* Keep includes the same across arches. */ +#include /* * The cache doesn't need to be flushed when TLB entries change when * the cache is mapped to physical memory, not virtual memory */ -#ifndef flush_cache_all -static inline void flush_cache_all(void) -{ -} -#endif - -#ifndef flush_cache_mm -static inline void flush_cache_mm(struct mm_struct *mm) -{ -} -#endif - -#ifndef flush_cache_dup_mm -static inline void flush_cache_dup_mm(struct mm_struct *mm) -{ -} -#endif - -#ifndef flush_cache_range -static inline void flush_cache_range(struct vm_area_struct *vma, - unsigned long start, - unsigned long end) -{ -} -#endif - -#ifndef flush_cache_page -static inline void flush_cache_page(struct vm_area_struct *vma, - unsigned long vmaddr, - unsigned long pfn) -{ -} -#endif - -#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -static inline void flush_dcache_page(struct page *page) -{ -} +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#endif +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_icache_range(start, end) do { } while (0) +#define flush_icache_page(vma,pg) do { } while (0) +#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) - -#ifndef flush_dcache_mmap_lock -static inline void flush_dcache_mmap_lock(struct address_space *mapping) -{ -} -#endif - -#ifndef flush_dcache_mmap_unlock -static inline void flush_dcache_mmap_unlock(struct address_space *mapping) -{ -} -#endif - -#ifndef flush_icache_range -static inline void flush_icache_range(unsigned long start, unsigned long end) -{ -} -#endif - -#ifndef flush_icache_user_range -#define flush_icache_user_range flush_icache_range -#endif - -#ifndef flush_icache_page -static inline void flush_icache_page(struct vm_area_struct *vma, - struct page *page) -{ -} -#endif - -#ifndef flush_icache_user_page -static inline void flush_icache_user_page(struct vm_area_struct *vma, - struct page *page, - unsigned long addr, int len) -{ -} -#endif - -#ifndef flush_cache_vmap -static inline void flush_cache_vmap(unsigned long start, unsigned long end) -{ -} -#endif - -#ifndef flush_cache_vunmap -static inline void flush_cache_vunmap(unsigned long start, unsigned long end) -{ -} -#endif - -#ifndef copy_to_user_page -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ - flush_icache_user_page(vma, page, vaddr, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ } while (0) -#endif - -#ifndef copy_from_user_page #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) -#endif -#endif /* _ASM_GENERIC_CACHEFLUSH_H */ +#endif /* __ASM_CACHEFLUSH_H */ diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index 43e18db89c..3150cbd8eb 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_CHECKSUM_H #define __ASM_GENERIC_CHECKSUM_H @@ -16,6 +15,29 @@ */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); + +/* + * the same as csum_partial_copy, but copies from user space. + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, int *csum_err); + +#ifndef csum_partial_copy_nocheck +#define csum_partial_copy_nocheck(src, dst, len, sum) \ + csum_partial_copy((src), (dst), (len), (sum)) +#endif + #ifndef ip_fast_csum /* * This is a version of ip_compute_csum() optimized for IP headers, diff --git a/include/asm-generic/clkdev.h b/include/asm-generic/clkdev.h new file mode 100644 index 0000000000..4ff334749e --- /dev/null +++ b/include/asm-generic/clkdev.h @@ -0,0 +1,30 @@ +/* + * include/asm-generic/clkdev.h + * + * Based on the ARM clkdev.h: + * Copyright (C) 2008 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Helper for the clk API to assist looking up a struct clk. + */ +#ifndef __ASM_CLKDEV_H +#define __ASM_CLKDEV_H + +#include + +#ifndef CONFIG_COMMON_CLK +struct clk; + +static inline int __clk_get(struct clk *clk) { return 1; } +static inline void __clk_put(struct clk *clk) { } +#endif + +static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) +{ + return kzalloc(size, GFP_KERNEL); +} + +#endif diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index f17f14f84d..70bef78912 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H #define __ASM_GENERIC_CMPXCHG_LOCAL_H diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h index dca4419922..e5f9080e8e 100644 --- a/include/asm-generic/cmpxchg.h +++ b/include/asm-generic/cmpxchg.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Generic UP xchg and cmpxchg using interrupt disablement. Does not * support SMP. @@ -14,14 +13,16 @@ #include #include +#ifndef xchg + /* * This function doesn't exist, so you'll get a linker error if * something tries to do an invalidly-sized xchg(). */ -extern void __generic_xchg_called_with_bad_pointer(void); +extern void __xchg_called_with_bad_pointer(void); static inline -unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) +unsigned long __xchg(unsigned long x, volatile void *ptr, int size) { unsigned long ret, flags; @@ -73,43 +74,35 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) #endif /* CONFIG_64BIT */ default: - __generic_xchg_called_with_bad_pointer(); + __xchg_called_with_bad_pointer(); return x; } } -#define generic_xchg(ptr, x) ({ \ - ((__typeof__(*(ptr))) \ - __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ +#define xchg(ptr, x) ({ \ + ((__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ }) +#endif /* xchg */ + /* * Atomic compare and exchange. */ #include -#define generic_cmpxchg_local(ptr, o, n) ({ \ - ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))); \ +#ifndef cmpxchg_local +#define cmpxchg_local(ptr, o, n) ({ \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))); \ }) - -#define generic_cmpxchg64_local(ptr, o, n) \ - __generic_cmpxchg64_local((ptr), (o), (n)) - - -#ifndef arch_xchg -#define arch_xchg generic_xchg #endif -#ifndef arch_cmpxchg_local -#define arch_cmpxchg_local generic_cmpxchg_local +#ifndef cmpxchg64_local +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #endif -#ifndef arch_cmpxchg64_local -#define arch_cmpxchg64_local generic_cmpxchg64_local -#endif - -#define arch_cmpxchg arch_cmpxchg_local -#define arch_cmpxchg64 arch_cmpxchg64_local +#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) +#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #endif /* __ASM_GENERIC_CMPXCHG_H */ diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h new file mode 100644 index 0000000000..51969436b8 --- /dev/null +++ b/include/asm-generic/cputime.h @@ -0,0 +1,15 @@ +#ifndef _ASM_GENERIC_CPUTIME_H +#define _ASM_GENERIC_CPUTIME_H + +#include +#include + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING +# include +#endif + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +# include +#endif + +#endif diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h new file mode 100644 index 0000000000..fe386fc6e8 --- /dev/null +++ b/include/asm-generic/cputime_jiffies.h @@ -0,0 +1,76 @@ +#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H +#define _ASM_GENERIC_CPUTIME_JIFFIES_H + +typedef unsigned long __nocast cputime_t; + +#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new) + +#define cputime_one_jiffy jiffies_to_cputime(1) +#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) +#define cputime_to_scaled(__ct) (__ct) +#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) + +typedef u64 __nocast cputime64_t; + +#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) +#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) + + +/* + * Convert nanoseconds <-> cputime + */ +#define cputime_to_nsecs(__ct) \ + jiffies_to_nsecs(cputime_to_jiffies(__ct)) +#define nsecs_to_cputime64(__nsec) \ + jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec)) +#define nsecs_to_cputime(__nsec) \ + jiffies_to_cputime(nsecs_to_jiffies(__nsec)) + + +/* + * Convert cputime to microseconds and back. + */ +#define cputime_to_usecs(__ct) \ + jiffies_to_usecs(cputime_to_jiffies(__ct)) +#define usecs_to_cputime(__usec) \ + jiffies_to_cputime(usecs_to_jiffies(__usec)) +#define usecs_to_cputime64(__usec) \ + jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000)) + +/* + * Convert cputime to seconds and back. + */ +#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) +#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) + +/* + * Convert cputime to timespec and back. + */ +#define timespec_to_cputime(__val) \ + jiffies_to_cputime(timespec_to_jiffies(__val)) +#define cputime_to_timespec(__ct,__val) \ + jiffies_to_timespec(cputime_to_jiffies(__ct),__val) + +/* + * Convert cputime to timeval and back. + */ +#define timeval_to_cputime(__val) \ + jiffies_to_cputime(timeval_to_jiffies(__val)) +#define cputime_to_timeval(__ct,__val) \ + jiffies_to_timeval(cputime_to_jiffies(__ct),__val) + +/* + * Convert cputime to clock and back. + */ +#define cputime_to_clock_t(__ct) \ + jiffies_to_clock_t(cputime_to_jiffies(__ct)) +#define clock_t_to_cputime(__x) \ + jiffies_to_cputime(clock_t_to_jiffies(__x)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) \ + jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) + +#endif diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h new file mode 100644 index 0000000000..a84e28e0c6 --- /dev/null +++ b/include/asm-generic/cputime_nsecs.h @@ -0,0 +1,122 @@ +/* + * Definitions for measuring cputime in nsecs resolution. + * + * Based on + * + * Copyright (C) 2007 FUJITSU LIMITED + * Copyright (C) 2007 Hidetoshi Seto + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _ASM_GENERIC_CPUTIME_NSECS_H +#define _ASM_GENERIC_CPUTIME_NSECS_H + +#include + +typedef u64 __nocast cputime_t; +typedef u64 __nocast cputime64_t; + +#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) + +#define cputime_one_jiffy jiffies_to_cputime(1) + +#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor) +#define cputime_div_rem(__ct, divisor, remainder) \ + div_u64_rem((__force u64)__ct, divisor, remainder); + +/* + * Convert cputime <-> jiffies (HZ) + */ +#define cputime_to_jiffies(__ct) \ + cputime_div(__ct, NSEC_PER_SEC / HZ) +#define cputime_to_scaled(__ct) (__ct) +#define jiffies_to_cputime(__jif) \ + (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) +#define cputime64_to_jiffies64(__ct) \ + cputime_div(__ct, NSEC_PER_SEC / HZ) +#define jiffies64_to_cputime64(__jif) \ + (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) + + +/* + * Convert cputime <-> nanoseconds + */ +#define cputime_to_nsecs(__ct) \ + (__force u64)(__ct) +#define nsecs_to_cputime(__nsecs) \ + (__force cputime_t)(__nsecs) +#define nsecs_to_cputime64(__nsecs) \ + (__force cputime64_t)(__nsecs) + + +/* + * Convert cputime <-> microseconds + */ +#define cputime_to_usecs(__ct) \ + cputime_div(__ct, NSEC_PER_USEC) +#define usecs_to_cputime(__usecs) \ + (__force cputime_t)((__usecs) * NSEC_PER_USEC) +#define usecs_to_cputime64(__usecs) \ + (__force cputime64_t)((__usecs) * NSEC_PER_USEC) + +/* + * Convert cputime <-> seconds + */ +#define cputime_to_secs(__ct) \ + cputime_div(__ct, NSEC_PER_SEC) +#define secs_to_cputime(__secs) \ + (__force cputime_t)((__secs) * NSEC_PER_SEC) + +/* + * Convert cputime <-> timespec (nsec) + */ +static inline cputime_t timespec_to_cputime(const struct timespec *val) +{ + u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec; + return (__force cputime_t) ret; +} +static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) +{ + u32 rem; + + val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); + val->tv_nsec = rem; +} + +/* + * Convert cputime <-> timeval (msec) + */ +static inline cputime_t timeval_to_cputime(const struct timeval *val) +{ + u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + + val->tv_usec * NSEC_PER_USEC; + return (__force cputime_t) ret; +} +static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) +{ + u32 rem; + + val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); + val->tv_usec = rem / NSEC_PER_USEC; +} + +/* + * Convert cputime <-> clock (USER_HZ) + */ +#define cputime_to_clock_t(__ct) \ + cputime_div(__ct, (NSEC_PER_SEC / USER_HZ)) +#define clock_t_to_cputime(__x) \ + (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) \ + cputime_to_clock_t((__force cputime_t)__ct) + +#endif diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h index 3a2e224b9f..5e86f6ae7c 100644 --- a/include/asm-generic/current.h +++ b/include/asm-generic/current.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_CURRENT_H #define __ASM_GENERIC_CURRENT_H diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h index e448ac6143..0f79054ce7 100644 --- a/include/asm-generic/delay.h +++ b/include/asm-generic/delay.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_DELAY_H #define __ASM_GENERIC_DELAY_H diff --git a/include/asm-generic/device.h b/include/asm-generic/device.h index 974517cdf7..d7c76bba64 100644 --- a/include/asm-generic/device.h +++ b/include/asm-generic/device.h @@ -1,6 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Arch specific extensions to struct device + * + * This file is released under the GPLv2 */ #ifndef _ASM_GENERIC_DEVICE_H #define _ASM_GENERIC_DEVICE_H diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index 13f5aa68a4..163f77999e 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_DIV64_H #define _ASM_GENERIC_DIV64_H /* @@ -8,14 +7,12 @@ * Optimization for constant divisors on 32-bit machines: * Copyright (C) 2006-2015 Nicolas Pitre * - * The semantics of do_div() is, in C++ notation, observing that the name - * is a function-like macro and the n parameter has the semantics of a C++ - * reference: + * The semantics of do_div() are: * - * uint32_t do_div(uint64_t &n, uint32_t base) + * uint32_t do_div(uint64_t *n, uint32_t base) * { - * uint32_t remainder = n % base; - * n = n / base; + * uint32_t remainder = *n % base; + * *n = *n / base; * return remainder; * } * @@ -28,20 +25,6 @@ #if BITS_PER_LONG == 64 -/** - * do_div - returns 2 values: calculate remainder and update new dividend - * @n: uint64_t dividend (will be updated) - * @base: uint32_t divisor - * - * Summary: - * ``uint32_t remainder = n % base;`` - * ``n = n / base;`` - * - * Return: (uint32_t)remainder - * - * NOTE: macro parameter @n is evaluated multiple times, - * beware of side effects! - */ # define do_div(n,base) ({ \ uint32_t __base = (base); \ uint32_t __rem; \ @@ -57,11 +40,17 @@ /* * If the divisor happens to be constant, we determine the appropriate * inverse at compile time to turn the division into a few inline - * multiplications which ought to be much faster. + * multiplications which ought to be much faster. And yet only if compiling + * with a sufficiently recent gcc version to perform proper 64-bit constant + * propagation. * * (It is unfortunate that gcc doesn't perform all this internally.) */ +#ifndef __div64_const32_is_OK +#define __div64_const32_is_OK (__GNUC__ >= 4) +#endif + #define __div64_const32(n, ___b) \ ({ \ /* \ @@ -174,8 +163,7 @@ static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) uint32_t m_hi = m >> 32; uint32_t n_lo = n; uint32_t n_hi = n >> 32; - uint64_t res; - uint32_t res_lo, res_hi, tmp; + uint64_t res, tmp; if (!bias) { res = ((uint64_t)m_lo * n_lo) >> 32; @@ -184,9 +172,8 @@ static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) res = (m + (uint64_t)m_lo * n_lo) >> 32; } else { res = m + (uint64_t)m_lo * n_lo; - res_lo = res >> 32; - res_hi = (res_lo < m_hi); - res = res_lo | ((uint64_t)res_hi << 32); + tmp = (res < m) ? (1ULL << 32) : 0; + res = (res >> 32) + tmp; } if (!(m & ((1ULL << 63) | (1ULL << 31)))) { @@ -195,12 +182,10 @@ static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) res += (uint64_t)m_hi * n_lo; res >>= 32; } else { - res += (uint64_t)m_lo * n_hi; - tmp = res >> 32; + tmp = res += (uint64_t)m_lo * n_hi; res += (uint64_t)m_hi * n_lo; - res_lo = res >> 32; - res_hi = (res_lo < tmp); - res = res_lo | ((uint64_t)res_hi << 32); + tmp = (res < tmp) ? (1ULL << 32) : 0; + res = (res >> 32) + tmp; } res += (uint64_t)m_hi * n_hi; @@ -224,7 +209,8 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); is_power_of_2(__base)) { \ __rem = (n) & (__base - 1); \ (n) >>= ilog2(__base); \ - } else if (__builtin_constant_p(__base) && \ + } else if (__div64_const32_is_OK && \ + __builtin_constant_p(__base) && \ __base != 0) { \ uint32_t __res_lo, __n_lo = (n); \ (n) = __div64_const32(n, __base); \ @@ -234,9 +220,8 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); } else if (likely(((n) >> 32) == 0)) { \ __rem = (uint32_t)(n) % __base; \ (n) = (uint32_t)(n) / __base; \ - } else { \ + } else \ __rem = __div64_32(&(n), __base); \ - } \ __rem; \ }) diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h new file mode 100644 index 0000000000..292c571750 --- /dev/null +++ b/include/asm-generic/dma-contiguous.h @@ -0,0 +1,9 @@ +#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H +#define _ASM_GENERIC_DMA_CONTIGUOUS_H + +#include + +static inline void +dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } + +#endif diff --git a/include/asm-generic/dma.h b/include/asm-generic/dma.h index 43d0c8af80..9dfc3a7f36 100644 --- a/include/asm-generic/dma.h +++ b/include/asm-generic/dma.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_DMA_H #define __ASM_GENERIC_DMA_H /* diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index 9d0479f50f..734ad4db38 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_EARLY_IOREMAP_H_ #define _ASM_EARLY_IOREMAP_H_ @@ -14,11 +13,15 @@ extern void *early_memremap(resource_size_t phys_addr, unsigned long size); extern void *early_memremap_ro(resource_size_t phys_addr, unsigned long size); -extern void *early_memremap_prot(resource_size_t phys_addr, - unsigned long size, unsigned long prot_val); extern void early_iounmap(void __iomem *addr, unsigned long size); extern void early_memunmap(void *addr, unsigned long size); +/* + * Weak function called by early_ioremap_reset(). It does nothing, but + * architectures may provide their own version to do any needed cleanups. + */ +extern void early_ioremap_shutdown(void); + #if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU) /* Arch-specific initialization */ extern void early_ioremap_init(void); diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h index 445de38b79..b74a761cae 100644 --- a/include/asm-generic/emergency-restart.h +++ b/include/asm-generic/emergency-restart.h @@ -1,8 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H #define _ASM_GENERIC_EMERGENCY_RESTART_H -static inline void machine_emergency_restart(void) +static inline __noreturn void machine_emergency_restart(void) { machine_restart(NULL); } diff --git a/include/asm-generic/exec.h b/include/asm-generic/exec.h index f66dc71fac..567766b007 100644 --- a/include/asm-generic/exec.h +++ b/include/asm-generic/exec.h @@ -1,11 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* Generic process execution definitions. +/* Generic process execution definitions, based on MN10300 definitions. * * It should be possible to use these on really simple architectures, * but it serves more as a starting point for new ports. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef __ASM_GENERIC_EXEC_H #define __ASM_GENERIC_EXEC_H diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 07a36a874d..63554e9f6e 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -1,75 +1,75 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __ASM_GENERIC_EXPORT_H #define __ASM_GENERIC_EXPORT_H #ifndef KSYM_FUNC #define KSYM_FUNC(x) x #endif -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS -#define KSYM_ALIGN 4 -#elif defined(CONFIG_64BIT) +#ifdef CONFIG_64BIT +#define __put .quad +#ifndef KSYM_ALIGN #define KSYM_ALIGN 8 +#endif +#ifndef KCRC_ALIGN +#define KCRC_ALIGN 8 +#endif #else +#define __put .long +#ifndef KSYM_ALIGN #define KSYM_ALIGN 4 #endif #ifndef KCRC_ALIGN #define KCRC_ALIGN 4 #endif - -.macro __put, val, name -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS - .long \val - ., \name - ., 0 -#elif defined(CONFIG_64BIT) - .quad \val, \name, 0 -#else - .long \val, \name, 0 #endif -.endm + +#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX +#define KSYM(name) _##name +#else +#define KSYM(name) name +#endif /* - * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE) - * section flag requires it. Use '%progbits' instead of '@progbits' since the - * former apparently works on all arches according to the binutils source. + * note on .section use: @progbits vs %progbits nastiness doesn't matter, + * since we immediately emit into those sections anyway. */ - .macro ___EXPORT_SYMBOL name,val,sec -#if defined(CONFIG_MODULES) && !defined(__DISABLE_EXPORTS) +#ifdef CONFIG_MODULES + .globl KSYM(__ksymtab_\name) .section ___ksymtab\sec+\name,"a" .balign KSYM_ALIGN -__ksymtab_\name: - __put \val, __kstrtab_\name +KSYM(__ksymtab_\name): + __put \val, KSYM(__kstrtab_\name) .previous - .section __ksymtab_strings,"aMS",%progbits,1 -__kstrtab_\name: + .section __ksymtab_strings,"a" +KSYM(__kstrtab_\name): +#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX + .asciz "_\name" +#else .asciz "\name" +#endif .previous #ifdef CONFIG_MODVERSIONS .section ___kcrctab\sec+\name,"a" .balign KCRC_ALIGN -#if defined(CONFIG_MODULE_REL_CRCS) - .long __crc_\name - . -#else - .long __crc_\name -#endif - .weak __crc_\name +KSYM(__kcrctab_\name): + __put KSYM(__crc_\name) + .weak KSYM(__crc_\name) .previous #endif #endif .endm +#undef __put -#if defined(CONFIG_TRIM_UNUSED_KSYMS) +#if defined(__KSYM_DEPS__) + +#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym === + +#elif defined(CONFIG_TRIM_UNUSED_KSYMS) #include #include -.macro __ksym_marker sym - .section ".discard.ksym","a" -__ksym_marker_\sym: - .previous -.endm - #define __EXPORT_SYMBOL(sym, val, sec) \ - __ksym_marker sym; \ __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym)) #define __cond_export_sym(sym, val, sec, conf) \ ___cond_export_sym(sym, val, sec, conf) @@ -83,12 +83,12 @@ __ksym_marker_\sym: #endif #define EXPORT_SYMBOL(name) \ - __EXPORT_SYMBOL(name, KSYM_FUNC(name),) + __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),) #define EXPORT_SYMBOL_GPL(name) \ - __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl) + __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl) #define EXPORT_DATA_SYMBOL(name) \ - __EXPORT_SYMBOL(name, name,) + __EXPORT_SYMBOL(name, KSYM(name),) #define EXPORT_DATA_SYMBOL_GPL(name) \ - __EXPORT_SYMBOL(name, name,_gpl) + __EXPORT_SYMBOL(name, KSYM(name),_gpl) #endif diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h index f9f18101ed..fe8ca7fcea 100644 --- a/include/asm-generic/fb.h +++ b/include/asm-generic/fb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_FB_H_ #define __ASM_GENERIC_FB_H_ #include diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h index 8cc7b09c1b..827e4d3bbc 100644 --- a/include/asm-generic/fixmap.h +++ b/include/asm-generic/fixmap.h @@ -16,7 +16,6 @@ #define __ASM_GENERIC_FIXMAP_H #include -#include #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) diff --git a/include/asm-generic/ftrace.h b/include/asm-generic/ftrace.h index 3a23028d69..51abba9ea7 100644 --- a/include/asm-generic/ftrace.h +++ b/include/asm-generic/ftrace.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/asm-generic/ftrace.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __ASM_GENERIC_FTRACE_H__ #define __ASM_GENERIC_FTRACE_H__ diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index f4c3470480..bf2d34c9d8 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_FUTEX_H #define _ASM_GENERIC_FUTEX_H @@ -14,7 +13,7 @@ */ /** - * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant + * futex_atomic_op_inuser() - Atomic arithmetic operation with constant * argument and comparison of the previous * futex value with another constant. * @@ -23,17 +22,23 @@ * * Return: * 0 - On success - * -EFAULT - User access resulted in a page fault - * -EAGAIN - Atomic operation was unable to complete due to contention - * -ENOSYS - Operation not supported + * <0 - On error */ static inline int -arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) +futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; int oldval, ret; u32 tmp; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + preempt_disable(); + pagefault_disable(); ret = -EFAULT; if (unlikely(get_user(oldval, uaddr) != 0)) @@ -66,11 +71,20 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) ret = -EFAULT; out_pagefault_enable: + pagefault_enable(); preempt_enable(); - if (ret == 0) - *oval = oldval; - + if (ret == 0) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } return ret; } @@ -85,9 +99,7 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) * * Return: * 0 - On success - * -EFAULT - User access resulted in a page fault - * -EAGAIN - Atomic operation was unable to complete due to contention - * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG) + * <0 - On error */ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, @@ -114,9 +126,45 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, #else static inline int -arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { - return -ENOSYS; + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + case FUTEX_OP_ADD: + case FUTEX_OP_OR: + case FUTEX_OP_ANDN: + case FUTEX_OP_XOR: + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; } static inline int diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h index f2979e3a96..65e4468ac5 100644 --- a/include/asm-generic/getorder.h +++ b/include/asm-generic/getorder.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_GETORDER_H #define __ASM_GENERIC_GETORDER_H @@ -7,6 +6,24 @@ #include #include +/* + * Runtime evaluation of get_order() + */ +static inline __attribute_const__ +int __get_order(unsigned long size) +{ + int order; + + size--; + size >>= PAGE_SHIFT; +#if BITS_PER_LONG == 32 + order = fls(size); +#else + order = fls64(size); +#endif + return order; +} + /** * get_order - Determine the allocation order of a memory size * @size: The size for which to get the order @@ -25,27 +42,19 @@ * to hold an object of the specified size. * * The result is undefined if the size is 0. + * + * This function may be used to initialise variables with compile time + * evaluations of constants. */ -static __always_inline __attribute_const__ int get_order(unsigned long size) -{ - if (__builtin_constant_p(size)) { - if (!size) - return BITS_PER_LONG - PAGE_SHIFT; - - if (size < (1UL << PAGE_SHIFT)) - return 0; - - return ilog2((size) - 1) - PAGE_SHIFT + 1; - } - - size--; - size >>= PAGE_SHIFT; -#if BITS_PER_LONG == 32 - return fls(size); -#else - return fls64(size); -#endif -} +#define get_order(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \ + (((n) < (1UL << PAGE_SHIFT)) ? 0 : \ + ilog2((n) - 1) - PAGE_SHIFT + 1) \ + ) : \ + __get_order(n) \ +) #endif /* __ASSEMBLY__ */ diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index aea9aee1f3..8ca627dcea 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_GPIO_H #define _ASM_GENERIC_GPIO_H +#include #include #include +#include #ifdef CONFIG_GPIOLIB @@ -138,8 +139,6 @@ static inline void gpio_unexport(unsigned gpio) #else /* !CONFIG_GPIOLIB */ -#include - static inline bool gpio_is_valid(int number) { /* only non-negative numbers are valid */ diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h index 7317e8258b..04d0a977cd 100644 --- a/include/asm-generic/hardirq.h +++ b/include/asm-generic/hardirq.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_HARDIRQ_H #define __ASM_GENERIC_HARDIRQ_H @@ -7,13 +6,9 @@ typedef struct { unsigned int __softirq_pending; -#ifdef ARCH_WANTS_NMI_IRQSTAT - unsigned int __nmi_count; -#endif } ____cacheline_aligned irq_cpustat_t; -DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); - +#include /* Standard mappings for irq_cpustat_t above */ #include #ifndef ack_bad_irq diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 8e1e6244a8..99b490b4d0 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_HUGETLB_H #define _ASM_GENERIC_HUGETLB_H @@ -32,105 +31,10 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) return pte_modify(pte, newprot); } -#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, unsigned long sz) + pte_t *ptep) { pte_clear(mm, addr, ptep); } -#endif - -#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE -static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - free_pgd_range(tlb, addr, end, floor, ceiling); -} -#endif - -#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT -static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - set_pte_at(mm, addr, ptep, pte); -} -#endif - -#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR -static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - return ptep_get_and_clear(mm, addr, ptep); -} -#endif - -#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH -static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) -{ - ptep_clear_flush(vma, addr, ptep); -} -#endif - -#ifndef __HAVE_ARCH_HUGE_PTE_NONE -static inline int huge_pte_none(pte_t pte) -{ - return pte_none(pte); -} -#endif - -#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT -static inline pte_t huge_pte_wrprotect(pte_t pte) -{ - return pte_wrprotect(pte); -} -#endif - -#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - struct hstate *h = hstate_file(file); - - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (addr & ~huge_page_mask(h)) - return -EINVAL; - - return 0; -} -#endif - -#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT -static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - ptep_set_wrprotect(mm, addr, ptep); -} -#endif - -#ifndef __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS -static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t pte, int dirty) -{ - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); -} -#endif - -#ifndef __HAVE_ARCH_HUGE_PTEP_GET -static inline pte_t huge_ptep_get(pte_t *ptep) -{ - return ptep_get(ptep); -} -#endif - -#ifndef __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED -static inline bool gigantic_page_runtime_supported(void) -{ - return IS_ENABLED(CONFIG_ARCH_HAS_GIGANTIC_PAGE); -} -#endif /* __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED */ #endif /* _ASM_GENERIC_HUGETLB_H */ diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h index 81dfa3ee5e..1b91d06819 100644 --- a/include/asm-generic/ide_iops.h +++ b/include/asm-generic/ide_iops.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* Generic I/O and MEMIO string operations. */ #define __ide_insw insw diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h index a248545f1e..4cd84855cb 100644 --- a/include/asm-generic/int-ll64.h +++ b/include/asm-generic/int-ll64.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * asm-generic/int-ll64.h * @@ -13,14 +12,17 @@ #ifndef __ASSEMBLY__ -typedef __s8 s8; -typedef __u8 u8; -typedef __s16 s16; -typedef __u16 u16; -typedef __s32 s32; -typedef __u32 u32; -typedef __s64 s64; -typedef __u64 u64; +typedef signed char s8; +typedef unsigned char u8; + +typedef signed short s16; +typedef unsigned short u16; + +typedef signed int s32; +typedef unsigned int u32; + +typedef signed long long s64; +typedef unsigned long long u64; #define S8_C(x) x #define U8_C(x) x ## U diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 7ce93aaf69..7ef015eb34 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* Generic I/O port emulation. +/* Generic I/O port emulation, based on MN10300 code * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef __ASM_GENERIC_IO_H #define __ASM_GENERIC_IO_H @@ -15,53 +19,12 @@ #include #endif -#include #include -#ifndef __io_br -#define __io_br() barrier() +#ifndef mmiowb +#define mmiowb() do {} while (0) #endif -/* prevent prefetching of coherent DMA data ahead of a dma-complete */ -#ifndef __io_ar -#ifdef rmb -#define __io_ar(v) rmb() -#else -#define __io_ar(v) barrier() -#endif -#endif - -/* flush writes to coherent DMA data before possibly triggering a DMA read */ -#ifndef __io_bw -#ifdef wmb -#define __io_bw() wmb() -#else -#define __io_bw() barrier() -#endif -#endif - -/* serialize device access against a spin_unlock, usually handled there. */ -#ifndef __io_aw -#define __io_aw() mmiowb_set_pending() -#endif - -#ifndef __io_pbw -#define __io_pbw() __io_bw() -#endif - -#ifndef __io_paw -#define __io_paw() __io_aw() -#endif - -#ifndef __io_pbr -#define __io_pbr() __io_br() -#endif - -#ifndef __io_par -#define __io_par(v) __io_ar(v) -#endif - - /* * __raw_{read,write}{b,w,l,q}() access memory in native endianness. * @@ -147,12 +110,7 @@ static inline void __raw_writeq(u64 value, volatile void __iomem *addr) #define readb readb static inline u8 readb(const volatile void __iomem *addr) { - u8 val; - - __io_br(); - val = __raw_readb(addr); - __io_ar(val); - return val; + return __raw_readb(addr); } #endif @@ -160,12 +118,7 @@ static inline u8 readb(const volatile void __iomem *addr) #define readw readw static inline u16 readw(const volatile void __iomem *addr) { - u16 val; - - __io_br(); - val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); - __io_ar(val); - return val; + return __le16_to_cpu(__raw_readw(addr)); } #endif @@ -173,12 +126,7 @@ static inline u16 readw(const volatile void __iomem *addr) #define readl readl static inline u32 readl(const volatile void __iomem *addr) { - u32 val; - - __io_br(); - val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); - __io_ar(val); - return val; + return __le32_to_cpu(__raw_readl(addr)); } #endif @@ -187,12 +135,7 @@ static inline u32 readl(const volatile void __iomem *addr) #define readq readq static inline u64 readq(const volatile void __iomem *addr) { - u64 val; - - __io_br(); - val = __le64_to_cpu(__raw_readq(addr)); - __io_ar(val); - return val; + return __le64_to_cpu(__raw_readq(addr)); } #endif #endif /* CONFIG_64BIT */ @@ -201,9 +144,7 @@ static inline u64 readq(const volatile void __iomem *addr) #define writeb writeb static inline void writeb(u8 value, volatile void __iomem *addr) { - __io_bw(); __raw_writeb(value, addr); - __io_aw(); } #endif @@ -211,9 +152,7 @@ static inline void writeb(u8 value, volatile void __iomem *addr) #define writew writew static inline void writew(u16 value, volatile void __iomem *addr) { - __io_bw(); - __raw_writew((u16 __force)cpu_to_le16(value), addr); - __io_aw(); + __raw_writew(cpu_to_le16(value), addr); } #endif @@ -221,9 +160,7 @@ static inline void writew(u16 value, volatile void __iomem *addr) #define writel writel static inline void writel(u32 value, volatile void __iomem *addr) { - __io_bw(); - __raw_writel((u32 __force)__cpu_to_le32(value), addr); - __io_aw(); + __raw_writel(__cpu_to_le32(value), addr); } #endif @@ -232,9 +169,7 @@ static inline void writel(u32 value, volatile void __iomem *addr) #define writeq writeq static inline void writeq(u64 value, volatile void __iomem *addr) { - __io_bw(); __raw_writeq(__cpu_to_le64(value), addr); - __io_aw(); } #endif #endif /* CONFIG_64BIT */ @@ -245,67 +180,35 @@ static inline void writeq(u64 value, volatile void __iomem *addr) * accesses. */ #ifndef readb_relaxed -#define readb_relaxed readb_relaxed -static inline u8 readb_relaxed(const volatile void __iomem *addr) -{ - return __raw_readb(addr); -} +#define readb_relaxed readb #endif #ifndef readw_relaxed -#define readw_relaxed readw_relaxed -static inline u16 readw_relaxed(const volatile void __iomem *addr) -{ - return __le16_to_cpu(__raw_readw(addr)); -} +#define readw_relaxed readw #endif #ifndef readl_relaxed -#define readl_relaxed readl_relaxed -static inline u32 readl_relaxed(const volatile void __iomem *addr) -{ - return __le32_to_cpu(__raw_readl(addr)); -} +#define readl_relaxed readl #endif #if defined(readq) && !defined(readq_relaxed) -#define readq_relaxed readq_relaxed -static inline u64 readq_relaxed(const volatile void __iomem *addr) -{ - return __le64_to_cpu(__raw_readq(addr)); -} +#define readq_relaxed readq #endif #ifndef writeb_relaxed -#define writeb_relaxed writeb_relaxed -static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) -{ - __raw_writeb(value, addr); -} +#define writeb_relaxed writeb #endif #ifndef writew_relaxed -#define writew_relaxed writew_relaxed -static inline void writew_relaxed(u16 value, volatile void __iomem *addr) -{ - __raw_writew(cpu_to_le16(value), addr); -} +#define writew_relaxed writew #endif #ifndef writel_relaxed -#define writel_relaxed writel_relaxed -static inline void writel_relaxed(u32 value, volatile void __iomem *addr) -{ - __raw_writel(__cpu_to_le32(value), addr); -} +#define writel_relaxed writel #endif #if defined(writeq) && !defined(writeq_relaxed) -#define writeq_relaxed writeq_relaxed -static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) -{ - __raw_writeq(__cpu_to_le64(value), addr); -} +#define writeq_relaxed writeq #endif /* @@ -454,99 +357,52 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer, * take effect. */ -#if !defined(inb) && !defined(_inb) -#define _inb _inb -static inline u8 _inb(unsigned long addr) -{ - u8 val; - - __io_pbr(); - val = __raw_readb(PCI_IOBASE + addr); - __io_par(val); - return val; -} -#endif - -#if !defined(inw) && !defined(_inw) -#define _inw _inw -static inline u16 _inw(unsigned long addr) -{ - u16 val; - - __io_pbr(); - val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr)); - __io_par(val); - return val; -} -#endif - -#if !defined(inl) && !defined(_inl) -#define _inl _inl -static inline u32 _inl(unsigned long addr) -{ - u32 val; - - __io_pbr(); - val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr)); - __io_par(val); - return val; -} -#endif - -#if !defined(outb) && !defined(_outb) -#define _outb _outb -static inline void _outb(u8 value, unsigned long addr) -{ - __io_pbw(); - __raw_writeb(value, PCI_IOBASE + addr); - __io_paw(); -} -#endif - -#if !defined(outw) && !defined(_outw) -#define _outw _outw -static inline void _outw(u16 value, unsigned long addr) -{ - __io_pbw(); - __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr); - __io_paw(); -} -#endif - -#if !defined(outl) && !defined(_outl) -#define _outl _outl -static inline void _outl(u32 value, unsigned long addr) -{ - __io_pbw(); - __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr); - __io_paw(); -} -#endif - -#include - #ifndef inb -#define inb _inb +#define inb inb +static inline u8 inb(unsigned long addr) +{ + return readb(PCI_IOBASE + addr); +} #endif #ifndef inw -#define inw _inw +#define inw inw +static inline u16 inw(unsigned long addr) +{ + return readw(PCI_IOBASE + addr); +} #endif #ifndef inl -#define inl _inl +#define inl inl +static inline u32 inl(unsigned long addr) +{ + return readl(PCI_IOBASE + addr); +} #endif #ifndef outb -#define outb _outb +#define outb outb +static inline void outb(u8 value, unsigned long addr) +{ + writeb(value, PCI_IOBASE + addr); +} #endif #ifndef outw -#define outw _outw +#define outw outw +static inline void outw(u16 value, unsigned long addr) +{ + writew(value, PCI_IOBASE + addr); +} #endif #ifndef outl -#define outl _outl +#define outl outl +static inline void outl(u32 value, unsigned long addr) +{ + writel(value, PCI_IOBASE + addr); +} #endif #ifndef inb_p @@ -911,6 +767,18 @@ static inline void iowrite64_rep(volatile void __iomem *addr, #include #define __io_virt(x) ((void __force *)(x)) +#ifndef CONFIG_GENERIC_IOMAP +struct pci_dev; +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); + +#ifndef pci_iounmap +#define pci_iounmap pci_iounmap +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) +{ +} +#endif +#endif /* CONFIG_GENERIC_IOMAP */ + /* * Change virtual addresses to physical addresses and vv. * These are pretty trivial @@ -934,61 +802,22 @@ static inline void *phys_to_virt(unsigned long address) /** * DOC: ioremap() and ioremap_*() variants * - * Architectures with an MMU are expected to provide ioremap() and iounmap() - * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide - * a default nop-op implementation that expect that the physical address used - * for MMIO are already marked as uncached, and can be used as kernel virtual - * addresses. + * If you have an IOMMU your architecture is expected to have both ioremap() + * and iounmap() implemented otherwise the asm-generic helpers will provide a + * direct mapping. * - * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes - * for specific drivers if the architecture choses to implement them. If they - * are not implemented we fall back to plain ioremap. Conversely, ioremap_np() - * can provide stricter non-posted write semantics if the architecture - * implements them. + * There are ioremap_*() call variants, if you have no IOMMU we naturally will + * default to direct mapping for all of them, you can override these defaults. + * If you have an IOMMU you are highly encouraged to provide your own + * ioremap variant implementation as there currently is no safe architecture + * agnostic default. To avoid possible improper behaviour default asm-generic + * ioremap_*() variants all return NULL when an IOMMU is available. If you've + * defined your own ioremap_*() variant you must then declare your own + * ioremap_*() variant as defined to itself to avoid the default NULL return. */ -#ifndef CONFIG_MMU -#ifndef ioremap -#define ioremap ioremap -static inline void __iomem *ioremap(phys_addr_t offset, size_t size) -{ - return (void __iomem *)(unsigned long)offset; -} -#endif -#ifndef iounmap -#define iounmap iounmap -static inline void iounmap(volatile void __iomem *addr) -{ -} -#endif -#elif defined(CONFIG_GENERIC_IOREMAP) -#include +#ifdef CONFIG_MMU -void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); -void iounmap(volatile void __iomem *addr); - -static inline void __iomem *ioremap(phys_addr_t addr, size_t size) -{ - /* _PAGE_IOREMAP needs to be supplied by the architecture */ - return ioremap_prot(addr, size, _PAGE_IOREMAP); -} -#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ - -#ifndef ioremap_wc -#define ioremap_wc ioremap -#endif - -#ifndef ioremap_wt -#define ioremap_wt ioremap -#endif - -/* - * ioremap_uc is special in that we do require an explicit architecture - * implementation. In general you do not want to use this function in a - * driver and use plain ioremap, which is uncached by default. Similarly - * architectures should not implement it unless they have a very good - * reason. - */ #ifndef ioremap_uc #define ioremap_uc ioremap_uc static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) @@ -997,33 +826,81 @@ static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) } #endif +#else /* !CONFIG_MMU */ + /* - * ioremap_np needs an explicit architecture implementation, as it - * requests stronger semantics than regular ioremap(). Portable drivers - * should instead use one of the higher-level abstractions, like - * devm_ioremap_resource(), to choose the correct variant for any given - * device and bus. Portable drivers with a good reason to want non-posted - * write semantics should always provide an ioremap() fallback in case - * ioremap_np() is not available. + * Change "struct page" to physical address. + * + * This implementation is for the no-MMU case only... if you have an MMU + * you'll need to provide your own definitions. */ -#ifndef ioremap_np -#define ioremap_np ioremap_np -static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) + +#ifndef ioremap +#define ioremap ioremap +static inline void __iomem *ioremap(phys_addr_t offset, size_t size) { - return NULL; + return (void __iomem *)(unsigned long)offset; } #endif +#ifndef __ioremap +#define __ioremap __ioremap +static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, + unsigned long flags) +{ + return ioremap(offset, size); +} +#endif + +#ifndef ioremap_nocache +#define ioremap_nocache ioremap_nocache +static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) +{ + return ioremap(offset, size); +} +#endif + +#ifndef ioremap_uc +#define ioremap_uc ioremap_uc +static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} +#endif + +#ifndef ioremap_wc +#define ioremap_wc ioremap_wc +static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} +#endif + +#ifndef ioremap_wt +#define ioremap_wt ioremap_wt +static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} +#endif + +#ifndef iounmap +#define iounmap iounmap + +static inline void iounmap(void __iomem *addr) +{ +} +#endif +#endif /* CONFIG_MMU */ + #ifdef CONFIG_HAS_IOPORT_MAP #ifndef CONFIG_GENERIC_IOMAP #ifndef ioport_map #define ioport_map ioport_map static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { - port &= IO_SPACE_LIMIT; - return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; + return PCI_IOBASE + (port & IO_SPACE_LIMIT); } -#define ARCH_HAS_GENERIC_IOPORT_MAP #endif #ifndef ioport_unmap @@ -1038,10 +915,12 @@ extern void ioport_unmap(void __iomem *p); #endif /* CONFIG_GENERIC_IOMAP */ #endif /* CONFIG_HAS_IOPORT_MAP */ -#ifndef CONFIG_GENERIC_IOMAP -#ifndef pci_iounmap -#define ARCH_WANTS_GENERIC_PCI_IOUNMAP -#endif +#ifndef xlate_dev_kmem_ptr +#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr +static inline void *xlate_dev_kmem_ptr(void *addr) +{ + return addr; +} #endif #ifndef xlate_dev_mem_ptr @@ -1075,14 +954,6 @@ static inline void *bus_to_virt(unsigned long address) #ifndef memset_io #define memset_io memset_io -/** - * memset_io Set a range of I/O memory to a constant value - * @addr: The beginning of the I/O-memory range to set - * @val: The value to set the memory to - * @count: The number of bytes to set - * - * Set a range of I/O memory to a given value. - */ static inline void memset_io(volatile void __iomem *addr, int value, size_t size) { @@ -1092,14 +963,6 @@ static inline void memset_io(volatile void __iomem *addr, int value, #ifndef memcpy_fromio #define memcpy_fromio memcpy_fromio -/** - * memcpy_fromio Copy a block of data from I/O memory - * @dst: The (RAM) destination for the copy - * @src: The (I/O memory) source for the data - * @count: The number of bytes to copy - * - * Copy a block of data from I/O memory. - */ static inline void memcpy_fromio(void *buffer, const volatile void __iomem *addr, size_t size) @@ -1110,14 +973,6 @@ static inline void memcpy_fromio(void *buffer, #ifndef memcpy_toio #define memcpy_toio memcpy_toio -/** - * memcpy_toio Copy a block of data into I/O memory - * @dst: The (I/O memory) destination for the copy - * @src: The (RAM) source for the data - * @count: The number of bytes to copy - * - * Copy a block of data to I/O memory. - */ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size) { @@ -1125,10 +980,6 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, } #endif -#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED -extern int devmem_is_allowed(unsigned long pfn); -#endif - #endif /* __KERNEL__ */ #endif /* __ASM_GENERIC_IO_H */ diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h index 9fda9ed000..297fb0d7cd 100644 --- a/include/asm-generic/ioctl.h +++ b/include/asm-generic/ioctl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_IOCTL_H #define _ASM_GENERIC_IOCTL_H diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index 08237ae8b8..650fede33c 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __GENERIC_IO_H #define __GENERIC_IO_H @@ -26,25 +25,14 @@ * in the low address range. Architectures for which this is not * true can't use this generic implementation. */ -extern unsigned int ioread8(const void __iomem *); -extern unsigned int ioread16(const void __iomem *); -extern unsigned int ioread16be(const void __iomem *); -extern unsigned int ioread32(const void __iomem *); -extern unsigned int ioread32be(const void __iomem *); +extern unsigned int ioread8(void __iomem *); +extern unsigned int ioread16(void __iomem *); +extern unsigned int ioread16be(void __iomem *); +extern unsigned int ioread32(void __iomem *); +extern unsigned int ioread32be(void __iomem *); #ifdef CONFIG_64BIT -extern u64 ioread64(const void __iomem *); -extern u64 ioread64be(const void __iomem *); -#endif - -#ifdef readq -#define ioread64_lo_hi ioread64_lo_hi -#define ioread64_hi_lo ioread64_hi_lo -#define ioread64be_lo_hi ioread64be_lo_hi -#define ioread64be_hi_lo ioread64be_hi_lo -extern u64 ioread64_lo_hi(const void __iomem *addr); -extern u64 ioread64_hi_lo(const void __iomem *addr); -extern u64 ioread64be_lo_hi(const void __iomem *addr); -extern u64 ioread64be_hi_lo(const void __iomem *addr); +extern u64 ioread64(void __iomem *); +extern u64 ioread64be(void __iomem *); #endif extern void iowrite8(u8, void __iomem *); @@ -57,17 +45,6 @@ extern void iowrite64(u64, void __iomem *); extern void iowrite64be(u64, void __iomem *); #endif -#ifdef writeq -#define iowrite64_lo_hi iowrite64_lo_hi -#define iowrite64_hi_lo iowrite64_hi_lo -#define iowrite64be_lo_hi iowrite64be_lo_hi -#define iowrite64be_hi_lo iowrite64be_hi_lo -extern void iowrite64_lo_hi(u64 val, void __iomem *addr); -extern void iowrite64_hi_lo(u64 val, void __iomem *addr); -extern void iowrite64be_lo_hi(u64 val, void __iomem *addr); -extern void iowrite64be_hi_lo(u64 val, void __iomem *addr); -#endif - /* * "string" versions of the above. Note that they * use native byte ordering for the accesses (on @@ -79,9 +56,9 @@ extern void iowrite64be_hi_lo(u64 val, void __iomem *addr); * memory across multiple ports, use "memcpy_toio()" * and friends. */ -extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count); -extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count); -extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count); +extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count); extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); @@ -94,20 +71,21 @@ extern void ioport_unmap(void __iomem *); #endif #ifndef ARCH_HAS_IOREMAP_WC -#define ioremap_wc ioremap +#define ioremap_wc ioremap_nocache #endif #ifndef ARCH_HAS_IOREMAP_WT -#define ioremap_wt ioremap +#define ioremap_wt ioremap_nocache #endif -#ifndef ARCH_HAS_IOREMAP_NP -/* See the comment in asm-generic/io.h about ioremap_np(). */ -#define ioremap_np ioremap_np -static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) -{ - return NULL; -} +#ifdef CONFIG_PCI +/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ +struct pci_dev; +extern void pci_iounmap(struct pci_dev *dev, void __iomem *); +#elif defined(CONFIG_GENERIC_IOMAP) +struct pci_dev; +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ } #endif #include diff --git a/include/asm-generic/irq.h b/include/asm-generic/irq.h index da21de991e..b90ec0bc48 100644 --- a/include/asm-generic/irq.h +++ b/include/asm-generic/irq.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_IRQ_H #define __ASM_GENERIC_IRQ_H diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h index 2e7c6e89d4..6bf9355fa7 100644 --- a/include/asm-generic/irq_regs.h +++ b/include/asm-generic/irq_regs.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Fallback per-CPU frame pointer holder * * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _ASM_GENERIC_IRQ_REGS_H diff --git a/include/asm-generic/irq_work.h b/include/asm-generic/irq_work.h index d5dce06f74..a44f452c65 100644 --- a/include/asm-generic/irq_work.h +++ b/include/asm-generic/irq_work.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_IRQ_WORK_H #define __ASM_IRQ_WORK_H diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h index 19ccbf483a..1f40d0024c 100644 --- a/include/asm-generic/irqflags.h +++ b/include/asm-generic/irqflags.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_IRQFLAGS_H #define __ASM_GENERIC_IRQFLAGS_H diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h index 2b10b31b02..d1814497bc 100644 --- a/include/asm-generic/kdebug.h +++ b/include/asm-generic/kdebug.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_KDEBUG_H #define _ASM_GENERIC_KDEBUG_H diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h index 9f95b7b63d..00ce236266 100644 --- a/include/asm-generic/kmap_types.h +++ b/include/asm-generic/kmap_types.h @@ -1,11 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_KMAP_TYPES_H #define _ASM_GENERIC_KMAP_TYPES_H #ifdef __WITH_KM_FENCE -# define KM_TYPE_NR 41 +# define KM_TYPE_NR 42 #else -# define KM_TYPE_NR 20 +# define KM_TYPE_NR 21 #endif #endif diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h index 728e5c5706..fa25becbdc 100644 --- a/include/asm-generic/kvm_para.h +++ b/include/asm-generic/kvm_para.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_KVM_PARA_H #define _ASM_GENERIC_KVM_PARA_H @@ -19,11 +18,6 @@ static inline unsigned int kvm_arch_para_features(void) return 0; } -static inline unsigned int kvm_arch_para_hints(void) -{ - return 0; -} - static inline bool kvm_para_available(void) { return false; diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h index fca7f1d848..62b0b8f1e7 100644 --- a/include/asm-generic/local.h +++ b/include/asm-generic/local.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_LOCAL_H #define _ASM_GENERIC_LOCAL_H @@ -24,24 +23,37 @@ typedef struct atomic_long_t a; } local_t; +typedef struct { + atomic_long_unchecked_t a; +} local_unchecked_t; + #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } #define local_read(l) atomic_long_read(&(l)->a) +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) #define local_set(l,i) atomic_long_set((&(l)->a),(i)) +#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i)) #define local_inc(l) atomic_long_inc(&(l)->a) +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a) #define local_dec(l) atomic_long_dec(&(l)->a) +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a) #define local_add(i,l) atomic_long_add((i),(&(l)->a)) +#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a)) #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) +#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a)) #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) +#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a)) #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) #define local_inc_return(l) atomic_long_inc_return(&(l)->a) +#define local_dec_return(l) atomic_long_dec_return(&(l)->a) #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) +#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h index 765be0b7d8..5980002b8b 100644 --- a/include/asm-generic/local64.h +++ b/include/asm-generic/local64.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_LOCAL64_H #define _ASM_GENERIC_LOCAL64_H diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index a2c8ed6023..5148150cc8 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_MEMORY_MODEL_H #define __ASM_MEMORY_MODEL_H @@ -6,18 +5,47 @@ #ifndef __ASSEMBLY__ -/* - * supports 3 memory models. - */ #if defined(CONFIG_FLATMEM) #ifndef ARCH_PFN_OFFSET #define ARCH_PFN_OFFSET (0UL) #endif +#elif defined(CONFIG_DISCONTIGMEM) + +#ifndef arch_pfn_to_nid +#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) +#endif + +#ifndef arch_local_page_offset +#define arch_local_page_offset(pfn, nid) \ + ((pfn) - NODE_DATA(nid)->node_start_pfn) +#endif + +#endif /* CONFIG_DISCONTIGMEM */ + +/* + * supports 3 memory models. + */ +#if defined(CONFIG_FLATMEM) + #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ ARCH_PFN_OFFSET) +#elif defined(CONFIG_DISCONTIGMEM) + +#define __pfn_to_page(pfn) \ +({ unsigned long __pfn = (pfn); \ + unsigned long __nid = arch_pfn_to_nid(__pfn); \ + NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ +}) + +#define __page_to_pfn(pg) \ +({ const struct page *__pg = (pg); \ + struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ + (unsigned long)(__pg - __pgdat->node_mem_map) + \ + __pgdat->node_start_pfn; \ +}) #elif defined(CONFIG_SPARSEMEM_VMEMMAP) @@ -41,7 +69,7 @@ struct mem_section *__sec = __pfn_to_section(__pfn); \ __section_mem_map_addr(__sec) + __pfn; \ }) -#endif /* CONFIG_FLATMEM/SPARSEMEM */ +#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ /* * Convert a physical address to a Page Frame Number and back diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h index 4dbb177d11..cc5d9a1405 100644 --- a/include/asm-generic/mm_hooks.h +++ b/include/asm-generic/mm_hooks.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap * and arch_unmap to be included in asm-FOO/mmu_context.h for any @@ -7,10 +6,9 @@ #ifndef _ASM_GENERIC_MM_HOOKS_H #define _ASM_GENERIC_MM_HOOKS_H -static inline int arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline void arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { - return 0; } static inline void arch_exit_mmap(struct mm_struct *mm) @@ -18,14 +16,26 @@ static inline void arch_exit_mmap(struct mm_struct *mm) } static inline void arch_unmap(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long start, unsigned long end) { } +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign) { /* by default, allow everything */ return true; } + +static inline bool arch_pte_access_permitted(pte_t pte, bool write) +{ + /* by default, allow everything */ + return true; +} #endif /* _ASM_GENERIC_MM_HOOKS_H */ diff --git a/include/asm-generic/mmu.h b/include/asm-generic/mmu.h index 0618380375..0ed3f1cfb8 100644 --- a/include/asm-generic/mmu.h +++ b/include/asm-generic/mmu.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_MMU_H #define __ASM_GENERIC_MMU_H diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h index 91727065ba..a7eec910ba 100644 --- a/include/asm-generic/mmu_context.h +++ b/include/asm-generic/mmu_context.h @@ -1,76 +1,45 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_MMU_CONTEXT_H #define __ASM_GENERIC_MMU_CONTEXT_H /* - * Generic hooks to implement no-op functionality. + * Generic hooks for NOMMU architectures, which do not need to do + * anything special here. */ +#include + struct task_struct; struct mm_struct; -/* - * enter_lazy_tlb - Called when "tsk" is about to enter lazy TLB mode. - * - * @mm: the currently active mm context which is becoming lazy - * @tsk: task which is entering lazy tlb - * - * tsk->mm will be NULL - */ -#ifndef enter_lazy_tlb static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } -#endif -/** - * init_new_context - Initialize context of a new mm_struct. - * @tsk: task struct for the mm - * @mm: the new mm struct - * @return: 0 on success, -errno on failure - */ -#ifndef init_new_context static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { return 0; } -#endif -/** - * destroy_context - Undo init_new_context when the mm is going away - * @mm: old mm struct - */ -#ifndef destroy_context static inline void destroy_context(struct mm_struct *mm) { } -#endif -/** - * activate_mm - called after exec switches the current task to a new mm, to switch to it - * @prev_mm: previous mm of this task - * @next_mm: new mm - */ -#ifndef activate_mm -static inline void activate_mm(struct mm_struct *prev_mm, - struct mm_struct *next_mm) -{ - switch_mm(prev_mm, next_mm, current); -} -#endif - -/** - * dectivate_mm - called when an mm is released after exit or exec switches away from it - * @tsk: the task - * @mm: the old mm - */ -#ifndef deactivate_mm -static inline void deactivate_mm(struct task_struct *tsk, +static inline void deactivate_mm(struct task_struct *task, struct mm_struct *mm) { } -#endif + +static inline void switch_mm(struct mm_struct *prev, + struct mm_struct *next, + struct task_struct *tsk) +{ +} + +static inline void activate_mm(struct mm_struct *prev_mm, + struct mm_struct *next_mm) +{ +} #endif /* __ASM_GENERIC_MMU_CONTEXT_H */ diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h index 98e1541b72..14dc41d185 100644 --- a/include/asm-generic/module.h +++ b/include/asm-generic/module.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_MODULE_H #define __ASM_GENERIC_MODULE_H diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h index bf910d47e9..61c58d8878 100644 --- a/include/asm-generic/msi.h +++ b/include/asm-generic/msi.h @@ -1,11 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_MSI_H #define __ASM_GENERIC_MSI_H #include -#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN - #ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS # define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2 #endif @@ -24,18 +21,12 @@ struct msi_desc; typedef struct msi_alloc_info { struct msi_desc *desc; irq_hw_number_t hwirq; - unsigned long flags; union { unsigned long ul; void *ptr; } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS]; } msi_alloc_info_t; -/* Device generating MSIs is proxying for another device */ -#define MSI_ALLOC_FLAGS_PROXY_DEVICE (1UL << 0) - #define GENERIC_MSI_DOMAIN_OPS 1 -#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ - #endif diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h new file mode 100644 index 0000000000..c54829d3de --- /dev/null +++ b/include/asm-generic/mutex-dec.h @@ -0,0 +1,88 @@ +/* + * include/asm-generic/mutex-dec.h + * + * Generic implementation of the mutex fastpath, based on atomic + * decrement/increment. + */ +#ifndef _ASM_GENERIC_MUTEX_DEC_H +#define _ASM_GENERIC_MUTEX_DEC_H + +/** + * __mutex_fastpath_lock - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call if + * it wasn't 1 originally. This function MUST leave the value lower than + * 1 even when the "1" assertion wasn't true. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(atomic_dec_return_acquire(count) < 0)) + fail_fn(count); +} + +/** + * __mutex_fastpath_lock_retval - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * + * Change the count from 1 to a value lower than 1. This function returns 0 + * if the fastpath succeeds, or -1 otherwise. + */ +static inline int +__mutex_fastpath_lock_retval(atomic_t *count) +{ + if (unlikely(atomic_dec_return_acquire(count) < 0)) + return -1; + return 0; +} + +/** + * __mutex_fastpath_unlock - try to promote the count from 0 to 1 + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 0 + * + * Try to promote the count from 0 to 1. If it wasn't 0, call . + * In the failure case, this function is allowed to either set the value to + * 1, or to set it to a value lower than 1. + * + * If the implementation sets it to a value of lower than 1, then the + * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs + * to return 0 otherwise. + */ +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(atomic_inc_return_release(count) <= 0)) + fail_fn(count); +} + +#define __mutex_slowpath_needs_to_unlock() 1 + +/** + * __mutex_fastpath_trylock - try to acquire the mutex, without waiting + * + * @count: pointer of type atomic_t + * @fail_fn: fallback function + * + * Change the count from 1 to a value lower than 1, and return 0 (failure) + * if it wasn't 1 originally, or return 1 (success) otherwise. This function + * MUST leave the value lower than 1 even when the "1" assertion wasn't true. + * Additionally, if the value was < 0 originally, this function must not leave + * it to 0 on failure. + * + * If the architecture has no effective trylock variant, it should call the + * spinlock-based trylock variant unconditionally. + */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (likely(atomic_read(count) == 1 && atomic_cmpxchg_acquire(count, 1, 0) == 1)) + return 1; + return 0; +} + +#endif diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h new file mode 100644 index 0000000000..61069ed334 --- /dev/null +++ b/include/asm-generic/mutex-null.h @@ -0,0 +1,19 @@ +/* + * include/asm-generic/mutex-null.h + * + * Generic implementation of the mutex fastpath, based on NOP :-) + * + * This is used by the mutex-debugging infrastructure, but it can also + * be used by architectures that (for whatever reason) want to use the + * spinlock based slowpath. + */ +#ifndef _ASM_GENERIC_MUTEX_NULL_H +#define _ASM_GENERIC_MUTEX_NULL_H + +#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_lock_retval(count) (-1) +#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) +#define __mutex_slowpath_needs_to_unlock() 1 + +#endif diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h new file mode 100644 index 0000000000..3269ec4e19 --- /dev/null +++ b/include/asm-generic/mutex-xchg.h @@ -0,0 +1,120 @@ +/* + * include/asm-generic/mutex-xchg.h + * + * Generic implementation of the mutex fastpath, based on xchg(). + * + * NOTE: An xchg based implementation might be less optimal than an atomic + * decrement/increment based implementation. If your architecture + * has a reasonable atomic dec/inc then you should probably use + * asm-generic/mutex-dec.h instead, or you could open-code an + * optimized version in asm/mutex.h. + */ +#ifndef _ASM_GENERIC_MUTEX_XCHG_H +#define _ASM_GENERIC_MUTEX_XCHG_H + +/** + * __mutex_fastpath_lock - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call if it + * wasn't 1 originally. This function MUST leave the value lower than 1 + * even when the "1" assertion wasn't true. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(atomic_xchg(count, 0) != 1)) + /* + * We failed to acquire the lock, so mark it contended + * to ensure that any waiting tasks are woken up by the + * unlock slow path. + */ + if (likely(atomic_xchg_acquire(count, -1) != 1)) + fail_fn(count); +} + +/** + * __mutex_fastpath_lock_retval - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * + * Change the count from 1 to a value lower than 1. This function returns 0 + * if the fastpath succeeds, or -1 otherwise. + */ +static inline int +__mutex_fastpath_lock_retval(atomic_t *count) +{ + if (unlikely(atomic_xchg_acquire(count, 0) != 1)) + if (likely(atomic_xchg(count, -1) != 1)) + return -1; + return 0; +} + +/** + * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1 + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 0 + * + * try to promote the mutex from 0 to 1. if it wasn't 0, call + * In the failure case, this function is allowed to either set the value to + * 1, or to set it to a value lower than one. + * If the implementation sets it to a value of lower than one, the + * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs + * to return 0 otherwise. + */ +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(atomic_xchg_release(count, 1) != 0)) + fail_fn(count); +} + +#define __mutex_slowpath_needs_to_unlock() 0 + +/** + * __mutex_fastpath_trylock - try to acquire the mutex, without waiting + * + * @count: pointer of type atomic_t + * @fail_fn: spinlock based trylock implementation + * + * Change the count from 1 to a value lower than 1, and return 0 (failure) + * if it wasn't 1 originally, or return 1 (success) otherwise. This function + * MUST leave the value lower than 1 even when the "1" assertion wasn't true. + * Additionally, if the value was < 0 originally, this function must not leave + * it to 0 on failure. + * + * If the architecture has no effective trylock variant, it should call the + * spinlock-based trylock variant unconditionally. + */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + int prev; + + if (atomic_read(count) != 1) + return 0; + + prev = atomic_xchg_acquire(count, 0); + if (unlikely(prev < 0)) { + /* + * The lock was marked contended so we must restore that + * state. If while doing so we get back a prev value of 1 + * then we just own it. + * + * [ In the rare case of the mutex going to 1, to 0, to -1 + * and then back to 0 in this few-instructions window, + * this has the potential to trigger the slowpath for the + * owner's unlock path needlessly, but that's not a problem + * in practice. ] + */ + prev = atomic_xchg_acquire(count, prev); + if (prev < 0) + prev = 0; + } + + return prev; +} + +#endif diff --git a/include/asm-generic/mutex.h b/include/asm-generic/mutex.h new file mode 100644 index 0000000000..fe91ab5027 --- /dev/null +++ b/include/asm-generic/mutex.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_MUTEX_H +#define __ASM_GENERIC_MUTEX_H +/* + * Pull in the generic implementation for the mutex fastpath, + * which is a reasonable default on many architectures. + */ + +#include +#endif /* __ASM_GENERIC_MUTEX_H */ diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h index 6fc4756181..67cfb7dbc2 100644 --- a/include/asm-generic/page.h +++ b/include/asm-generic/page.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_PAGE_H #define __ASM_GENERIC_PAGE_H /* @@ -7,7 +6,7 @@ */ #ifdef CONFIG_MMU -#error need to provide a real asm/page.h +#error need to prove a real asm/page.h #endif @@ -63,7 +62,11 @@ extern unsigned long memory_end; #endif /* !__ASSEMBLY__ */ +#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS +#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS) +#else #define PAGE_OFFSET (0) +#endif #ifndef ARCH_PFN_OFFSET #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h index 8d3009dd28..04e715bccc 100644 --- a/include/asm-generic/param.h +++ b/include/asm-generic/param.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_PARAM_H #define __ASM_GENERIC_PARAM_H diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h index 483991d619..2c9f9d4336 100644 --- a/include/asm-generic/parport.h +++ b/include/asm-generic/parport.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_PARPORT_H #define __ASM_GENERIC_PARPORT_H diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h index 6bb3cd3d69..f24bc519bf 100644 --- a/include/asm-generic/pci.h +++ b/include/asm-generic/pci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/asm-generic/pci.h * @@ -14,4 +13,12 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) } #endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ +/* + * By default, assume that no iommu is in use and that the PCI + * space is mapped to address physical 0. + */ +#ifndef PCI_DMA_BUS_IS_PHYS +#define PCI_DMA_BUS_IS_PHYS (1) +#endif + #endif /* _ASM_GENERIC_PCI_H */ diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h index 5a2f9bf533..b1e17fcee2 100644 --- a/include/asm-generic/pci_iomap.h +++ b/include/asm-generic/pci_iomap.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* Generic I/O port emulation. +/* Generic I/O port emulation, based on MN10300 code * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef __ASM_GENERIC_PCI_IOMAP_H #define __ASM_GENERIC_PCI_IOMAP_H @@ -18,7 +22,6 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen); -extern void pci_iounmap(struct pci_dev *dev, void __iomem *); /* Create a virtual mapping cookie for a port on a given PCI device. * Do not call this directly, it exists to make it easier for architectures * to override */ @@ -51,8 +54,6 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, { return NULL; } -static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) -{ } #endif -#endif /* __ASM_GENERIC_PCI_IOMAP_H */ +#endif /* __ASM_GENERIC_IO_H */ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 6432a7fade..0504ef8f3a 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_PERCPU_H_ #define _ASM_GENERIC_PERCPU_H_ @@ -62,6 +61,10 @@ extern void setup_per_cpu_areas(void); #define PER_CPU_ATTRIBUTES #endif +#ifndef PER_CPU_DEF_ATTRIBUTES +#define PER_CPU_DEF_ATTRIBUTES +#endif + #define raw_cpu_generic_read(pcp) \ ({ \ *raw_cpu_ptr(&(pcp)); \ @@ -74,7 +77,7 @@ do { \ #define raw_cpu_generic_add_return(pcp, val) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ \ *__p += val; \ *__p; \ @@ -82,7 +85,7 @@ do { \ #define raw_cpu_generic_xchg(pcp, nval) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ typeof(pcp) __ret; \ __ret = *__p; \ *__p = nval; \ @@ -91,7 +94,7 @@ do { \ #define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ typeof(pcp) __ret; \ __ret = *__p; \ if (__ret == (oval)) \ @@ -101,8 +104,8 @@ do { \ #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ ({ \ - typeof(pcp1) *__p1 = raw_cpu_ptr(&(pcp1)); \ - typeof(pcp2) *__p2 = raw_cpu_ptr(&(pcp2)); \ + typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1)); \ + typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2)); \ int __ret = 0; \ if (*__p1 == (oval1) && *__p2 == (oval2)) { \ *__p1 = nval1; \ @@ -112,32 +115,12 @@ do { \ (__ret); \ }) -#define __this_cpu_generic_read_nopreempt(pcp) \ -({ \ - typeof(pcp) ___ret; \ - preempt_disable_notrace(); \ - ___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \ - preempt_enable_notrace(); \ - ___ret; \ -}) - -#define __this_cpu_generic_read_noirq(pcp) \ -({ \ - typeof(pcp) ___ret; \ - unsigned long ___flags; \ - raw_local_irq_save(___flags); \ - ___ret = raw_cpu_generic_read(pcp); \ - raw_local_irq_restore(___flags); \ - ___ret; \ -}) - #define this_cpu_generic_read(pcp) \ ({ \ typeof(pcp) __ret; \ - if (__native_word(pcp)) \ - __ret = __this_cpu_generic_read_nopreempt(pcp); \ - else \ - __ret = __this_cpu_generic_read_noirq(pcp); \ + preempt_disable_notrace(); \ + __ret = raw_cpu_generic_read(pcp); \ + preempt_enable_notrace(); \ __ret; \ }) diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 02932efad3..9e429d08b1 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -1,187 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_PGALLOC_H #define __ASM_GENERIC_PGALLOC_H - -#ifdef CONFIG_MMU - -#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO) -#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) - -/** - * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table - * @mm: the mm_struct of the current context - * - * This function is intended for architectures that need - * anything beyond simple page allocation. - * - * Return: pointer to the allocated memory or %NULL on error - */ -static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) -{ - return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL); -} - -#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL -/** - * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table - * @mm: the mm_struct of the current context - * - * Return: pointer to the allocated memory or %NULL on error - */ -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) -{ - return __pte_alloc_one_kernel(mm); -} -#endif - -/** - * pte_free_kernel - free PTE-level kernel page table page - * @mm: the mm_struct of the current context - * @pte: pointer to the memory containing the page table - */ -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ - free_page((unsigned long)pte); -} - -/** - * __pte_alloc_one - allocate a page for PTE-level user page table - * @mm: the mm_struct of the current context - * @gfp: GFP flags to use for the allocation - * - * Allocates a page and runs the pgtable_pte_page_ctor(). - * - * This function is intended for architectures that need - * anything beyond simple page allocation or must have custom GFP flags. - * - * Return: `struct page` initialized as page table or %NULL on error - */ -static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) -{ - struct page *pte; - - pte = alloc_page(gfp); - if (!pte) - return NULL; - if (!pgtable_pte_page_ctor(pte)) { - __free_page(pte); - return NULL; - } - - return pte; -} - -#ifndef __HAVE_ARCH_PTE_ALLOC_ONE -/** - * pte_alloc_one - allocate a page for PTE-level user page table - * @mm: the mm_struct of the current context - * - * Allocates a page and runs the pgtable_pte_page_ctor(). - * - * Return: `struct page` initialized as page table or %NULL on error - */ -static inline pgtable_t pte_alloc_one(struct mm_struct *mm) -{ - return __pte_alloc_one(mm, GFP_PGTABLE_USER); -} -#endif - /* - * Should really implement gc for free page table pages. This could be - * done with a reference count in struct page. + * an empty file is enough for a nommu architecture */ - -/** - * pte_free - free PTE-level user page table page - * @mm: the mm_struct of the current context - * @pte_page: the `struct page` representing the page table - */ -static inline void pte_free(struct mm_struct *mm, struct page *pte_page) -{ - pgtable_pte_page_dtor(pte_page); - __free_page(pte_page); -} - - -#if CONFIG_PGTABLE_LEVELS > 2 - -#ifndef __HAVE_ARCH_PMD_ALLOC_ONE -/** - * pmd_alloc_one - allocate a page for PMD-level page table - * @mm: the mm_struct of the current context - * - * Allocates a page and runs the pgtable_pmd_page_ctor(). - * Allocations use %GFP_PGTABLE_USER in user context and - * %GFP_PGTABLE_KERNEL in kernel context. - * - * Return: pointer to the allocated memory or %NULL on error - */ -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - struct page *page; - gfp_t gfp = GFP_PGTABLE_USER; - - if (mm == &init_mm) - gfp = GFP_PGTABLE_KERNEL; - page = alloc_pages(gfp, 0); - if (!page) - return NULL; - if (!pgtable_pmd_page_ctor(page)) { - __free_pages(page, 0); - return NULL; - } - return (pmd_t *)page_address(page); -} +#ifdef CONFIG_MMU +#error need to implement an architecture specific asm/pgalloc.h #endif -#ifndef __HAVE_ARCH_PMD_FREE -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); - pgtable_pmd_page_dtor(virt_to_page(pmd)); - free_page((unsigned long)pmd); -} -#endif - -#endif /* CONFIG_PGTABLE_LEVELS > 2 */ - -#if CONFIG_PGTABLE_LEVELS > 3 - -#ifndef __HAVE_ARCH_PUD_ALLOC_ONE -/** - * pud_alloc_one - allocate a page for PUD-level page table - * @mm: the mm_struct of the current context - * - * Allocates a page using %GFP_PGTABLE_USER for user context and - * %GFP_PGTABLE_KERNEL for kernel context. - * - * Return: pointer to the allocated memory or %NULL on error - */ -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - gfp_t gfp = GFP_PGTABLE_USER; - - if (mm == &init_mm) - gfp = GFP_PGTABLE_KERNEL; - return (pud_t *)get_zeroed_page(gfp); -} -#endif - -static inline void pud_free(struct mm_struct *mm, pud_t *pud) -{ - BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); - free_page((unsigned long)pud); -} - -#endif /* CONFIG_PGTABLE_LEVELS > 3 */ - -#ifndef __HAVE_ARCH_PGD_FREE -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long)pgd); -} -#endif - -#endif /* CONFIG_MMU */ +#define check_pgt_cache() do { } while (0) #endif /* __ASM_GENERIC_PGALLOC_H */ diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index 10789cf51d..8458d850f7 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h @@ -1,14 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PGTABLE_NOPMD_H #define _PGTABLE_NOPMD_H -#ifndef __ASSEMBLY__ - #include -struct mm_struct; +#define __PAGETABLE_PMD_FOLDED -#define __PAGETABLE_PMD_FOLDED 1 +#define PMD_SHIFT PUD_SHIFT +#define PTRS_PER_PMD 1 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +#ifndef __ASSEMBLY__ + +struct mm_struct; /* * Having the pmd type consist of a pud gets the size right, and allows @@ -17,11 +21,6 @@ struct mm_struct; */ typedef struct { pud_t pud; } pmd_t; -#define PMD_SHIFT PUD_SHIFT -#define PTRS_PER_PMD 1 -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) - /* * The "pud_xxx()" functions here are trivial for a folded two-level * setup: the pmd is never bad, and a pmd always exists (as it's folded @@ -34,6 +33,7 @@ static inline void pud_clear(pud_t *pud) { } #define pmd_ERROR(pmd) (pud_ERROR((pmd).pud)) #define pud_populate(mm, pmd, pte) do { } while (0) +#define pud_populate_kernel(mm, pmd, pte) do { } while (0) /* * (pmds are folded into puds so this doesn't get actually called, @@ -45,13 +45,12 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) { return (pmd_t *)pud; } -#define pmd_offset pmd_offset #define pmd_val(x) (pud_val((x).pud)) #define __pmd(x) ((pmd_t) { __pud(x) } ) #define pud_page(pud) (pmd_page((pmd_t){ pud })) -#define pud_pgtable(pud) ((pmd_t *)(pmd_page_vaddr((pmd_t){ pud }))) +#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud })) /* * allocating and freeing a pmd is trivial: the 1-entry pmd is @@ -61,7 +60,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { } -#define pmd_free_tlb(tlb, x, a) do { } while (0) +#define __pmd_free_tlb(tlb, x, a) do { } while (0) #undef pmd_addr_end #define pmd_addr_end(addr, end) (end) diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index eb70c6d7ce..0ec4804f40 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h @@ -1,63 +1,59 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PGTABLE_NOPUD_H #define _PGTABLE_NOPUD_H -#ifndef __ASSEMBLY__ +#define __PAGETABLE_PUD_FOLDED -#include - -#define __PAGETABLE_PUD_FOLDED 1 - -/* - * Having the pud type consist of a p4d gets the size right, and allows - * us to conceptually access the p4d entry that this pud is folded into - * without casting. - */ -typedef struct { p4d_t p4d; } pud_t; - -#define PUD_SHIFT P4D_SHIFT +#define PUD_SHIFT PGDIR_SHIFT #define PTRS_PER_PUD 1 -#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) -/* - * The "p4d_xxx()" functions here are trivial for a folded two-level - * setup: the pud is never bad, and a pud always exists (as it's folded - * into the p4d entry) - */ -static inline int p4d_none(p4d_t p4d) { return 0; } -static inline int p4d_bad(p4d_t p4d) { return 0; } -static inline int p4d_present(p4d_t p4d) { return 1; } -static inline void p4d_clear(p4d_t *p4d) { } -#define pud_ERROR(pud) (p4d_ERROR((pud).p4d)) +#ifndef __ASSEMBLY__ -#define p4d_populate(mm, p4d, pud) do { } while (0) -#define p4d_populate_safe(mm, p4d, pud) do { } while (0) /* - * (puds are folded into p4ds so this doesn't get actually called, + * Having the pud type consist of a pgd gets the size right, and allows + * us to conceptually access the pgd entry that this pud is folded into + * without casting. + */ +typedef struct { pgd_t pgd; } pud_t; + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline void pgd_clear(pgd_t *pgd) { } +#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) + +#define pgd_populate(mm, pgd, pud) do { } while (0) +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0) +/* + * (puds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) */ -#define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval }) +#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) -static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) { - return (pud_t *)p4d; + return (pud_t *)pgd; } -#define pud_offset pud_offset -#define pud_val(x) (p4d_val((x).p4d)) -#define __pud(x) ((pud_t) { __p4d(x) }) +#define pud_val(x) (pgd_val((x).pgd)) +#define __pud(x) ((pud_t) { __pgd(x) } ) -#define p4d_page(p4d) (pud_page((pud_t){ p4d })) -#define p4d_pgtable(p4d) ((pud_t *)(pud_pgtable((pud_t){ p4d }))) +#define pgd_page(pgd) (pud_page((pud_t){ pgd })) +#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) /* * allocating and freeing a pud is trivial: the 1-entry pud is - * inside the p4d, so has no extra memory associated with it. + * inside the pgd, so has no extra memory associated with it. */ #define pud_alloc_one(mm, address) NULL #define pud_free(mm, x) do { } while (0) -#define pud_free_tlb(tlb, x, a) do { } while (0) +#define __pud_free_tlb(tlb, x, a) do { } while (0) #undef pud_addr_end #define pud_addr_end(addr, end) (end) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h new file mode 100644 index 0000000000..c1283ca151 --- /dev/null +++ b/include/asm-generic/pgtable.h @@ -0,0 +1,836 @@ +#ifndef _ASM_GENERIC_PGTABLE_H +#define _ASM_GENERIC_PGTABLE_H + +#include + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_MMU + +#include +#include +#include + +#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \ + CONFIG_PGTABLE_LEVELS +#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED +#endif + +/* + * On almost all architectures and configurations, 0 can be used as the + * upper ceiling to free_pgtables(): on many architectures it has the same + * effect as using TASK_SIZE. However, there is one configuration which + * must impose a more careful limit, to avoid freeing kernel pgtables. + */ +#ifndef USER_PGTABLES_CEILING +#define USER_PGTABLES_CEILING 0UL +#endif + +#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty); +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); +#else +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + int r = 1; + if (!pte_young(pte)) + r = 0; + else + set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); + return r; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + int r = 1; + if (!pmd_young(pmd)) + r = 0; + else + set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); + return r; +} +#else +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +/* + * Despite relevant to THP only, this API is called from generic rmap code + * under PageTransHuge(), hence needs a dummy implementation for !THP + */ +static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + pte_clear(mm, address, ptep); + return pte; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + pmd_clear(pmdp); + return pmd; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp, + int full) +{ + return pmdp_huge_get_and_clear(mm, address, pmdp); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pte_t *ptep, + int full) +{ + pte_t pte; + pte = ptep_get_and_clear(mm, address, ptep); + return pte; +} +#endif + +/* + * Some architectures may be able to avoid expensive synchronization + * primitives when modifications are made to PTE's which are already + * not present, or in the process of an address space destruction. + */ +#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL +static inline void pte_clear_not_present_full(struct mm_struct *mm, + unsigned long address, + pte_t *ptep, + int full) +{ + pte_clear(mm, address, ptep); +} +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH +extern pte_t ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH +extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT +struct mm_struct; +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) +{ + pte_t old_pte = *ptep; + set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); +} +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); +} +#else +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef pmdp_collapse_flush +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUILD_BUG(); + return *pmdp; +} +#define pmdp_collapse_flush pmdp_collapse_flush +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +#endif + +#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PMDP_INVALIDATE +extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE +static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + +} +#endif + +#ifndef __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t pte_a, pte_t pte_b) +{ + return pte_val(pte_a) == pte_val(pte_b); +} +#endif + +#ifndef __HAVE_ARCH_PTE_UNUSED +/* + * Some architectures provide facilities to virtualization guests + * so that they can flag allocated pages as unused. This allows the + * host to transparently reclaim unused pages. This function returns + * whether the pte's page is unused. + */ +static inline int pte_unused(pte_t pte) +{ + return 0; +} +#endif + +#ifndef __HAVE_ARCH_PMD_SAME +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return pmd_val(pmd_a) == pmd_val(pmd_b); +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PGD_OFFSET_GATE +#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) +#endif + +#ifndef __HAVE_ARCH_MOVE_PTE +#define move_pte(pte, prot, old_addr, new_addr) (pte) +#endif + +#ifndef pte_accessible +# define pte_accessible(mm, pte) ((void)(pte), 1) +#endif + +#ifndef flush_tlb_fix_spurious_fault +#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) +#endif + +#ifndef pgprot_noncached +#define pgprot_noncached(prot) (prot) +#endif + +#ifndef pgprot_writecombine +#define pgprot_writecombine pgprot_noncached +#endif + +#ifndef pgprot_writethrough +#define pgprot_writethrough pgprot_noncached +#endif + +#ifndef pgprot_device +#define pgprot_device pgprot_noncached +#endif + +#ifndef pgprot_modify +#define pgprot_modify pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) + newprot = pgprot_noncached(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) + newprot = pgprot_writecombine(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) + newprot = pgprot_device(newprot); + return newprot; +} +#endif + +/* + * When walking page tables, get the address of the next boundary, + * or the end address of the range if that comes earlier. Although no + * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. + */ + +#define pgd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#ifndef pud_addr_end +#define pud_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef pmd_addr_end +#define pmd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +/* + * When walking page tables, we usually want to skip any p?d_none entries; + * and any p?d_bad entries - reporting the error before resetting to none. + * Do the tests inline, but report and clear the bad entry in mm/memory.c. + */ +void pgd_clear_bad(pgd_t *); +void pud_clear_bad(pud_t *); +void pmd_clear_bad(pmd_t *); + +static inline int pgd_none_or_clear_bad(pgd_t *pgd) +{ + if (pgd_none(*pgd)) + return 1; + if (unlikely(pgd_bad(*pgd))) { + pgd_clear_bad(pgd); + return 1; + } + return 0; +} + +static inline int pud_none_or_clear_bad(pud_t *pud) +{ + if (pud_none(*pud)) + return 1; + if (unlikely(pud_bad(*pud))) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + +static inline int pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (unlikely(pmd_bad(*pmd))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep) +{ + /* + * Get the current pte state, but zero it out to make it + * non-present, preventing the hardware from asynchronously + * updating it. + */ + return ptep_get_and_clear(mm, addr, ptep); +} + +static inline void __ptep_modify_prot_commit(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * The pte is non-present, so there's no hardware state to + * preserve. + */ + set_pte_at(mm, addr, ptep, pte); +} + +#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION +/* + * Start a pte protection read-modify-write transaction, which + * protects against asynchronous hardware modifications to the pte. + * The intention is not to prevent the hardware from making pte + * updates, but to prevent any updates it may make from being lost. + * + * This does not protect against other software modifications of the + * pte; the appropriate pte lock must be held over the transation. + * + * Note that this interface is intended to be batchable, meaning that + * ptep_modify_prot_commit may not actually update the pte, but merely + * queue the update to be done at some later time. The update must be + * actually committed before the pte lock is released, however. + */ +static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep) +{ + return __ptep_modify_prot_start(mm, addr, ptep); +} + +/* + * Commit an update to a pte, leaving any hardware-controlled bits in + * the PTE unmodified. + */ +static inline void ptep_modify_prot_commit(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, pte_t pte) +{ + __ptep_modify_prot_commit(mm, addr, ptep, pte); +} +#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ +#endif /* CONFIG_MMU */ + +/* + * A facility to provide lazy MMU batching. This allows PTE updates and + * page invalidations to be delayed until a call to leave lazy MMU mode + * is issued. Some architectures may benefit from doing this, and it is + * beneficial for both shadow and direct mode hypervisors, which may batch + * the PTE updates which happen during this window. Note that using this + * interface requires that read hazards be removed from the code. A read + * hazard could result in the direct mode hypervisor case, since the actual + * write to the page tables may not yet have taken place, so reads though + * a raw PTE pointer after it has been modified are not guaranteed to be + * up to date. This mode can only be entered and left under the protection of + * the page table locks for all page tables which may be modified. In the UP + * case, this is required so that preemption is disabled, and in the SMP case, + * it must synchronize the delayed page table writes properly on other CPUs. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE +#define arch_enter_lazy_mmu_mode() do {} while (0) +#define arch_leave_lazy_mmu_mode() do {} while (0) +#define arch_flush_lazy_mmu_mode() do {} while (0) +#endif + +/* + * A facility to provide batching of the reload of page tables and + * other process state with the actual context switch code for + * paravirtualized guests. By convention, only one of the batched + * update (lazy) modes (CPU, MMU) should be active at any given time, + * entry should never be nested, and entry and exits should always be + * paired. This is for sanity of maintaining and reasoning about the + * kernel code. In this case, the exit (end of the context switch) is + * in architecture-specific code, and so doesn't need a generic + * definition. + */ +#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH +#define arch_start_context_switch(prev) do {} while (0) +#endif + +#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY +static inline int pte_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline int pmd_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte; +} +#endif + +#ifndef __HAVE_PFNMAP_TRACKING +/* + * Interfaces that can be used by architecture code to keep track of + * memory type of pfn mappings specified by the remap_pfn_range, + * vm_insert_pfn. + */ + +/* + * track_pfn_remap is called when a _new_ pfn mapping is being established + * by remap_pfn_range() for physical range indicated by pfn and size. + */ +static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size) +{ + return 0; +} + +/* + * track_pfn_insert is called when a _new_ single pfn is established + * by vm_insert_pfn(). + */ +static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + pfn_t pfn) +{ + return 0; +} + +/* + * track_pfn_copy is called when vma that is covering the pfnmap gets + * copied through copy_page_range(). + */ +static inline int track_pfn_copy(struct vm_area_struct *vma) +{ + return 0; +} + +/* + * untrack_pfn is called while unmapping a pfnmap for a region. + * untrack can be called for a specific region indicated by pfn and size or + * can be for the entire vma (in which case pfn, size are zero). + */ +static inline void untrack_pfn(struct vm_area_struct *vma, + unsigned long pfn, unsigned long size) +{ +} + +/* + * untrack_pfn_moved is called while mremapping a pfnmap for a new region. + */ +static inline void untrack_pfn_moved(struct vm_area_struct *vma) +{ +} +#else +extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size); +extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + pfn_t pfn); +extern int track_pfn_copy(struct vm_area_struct *vma); +extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size); +extern void untrack_pfn_moved(struct vm_area_struct *vma); +#endif + +#ifdef __HAVE_COLOR_ZERO_PAGE +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + unsigned long offset_from_zero_pfn = pfn - zero_pfn; + return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); +} + +#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + +#else +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + return pfn == zero_pfn; +} + +static inline unsigned long my_zero_pfn(unsigned long addr) +{ + extern unsigned long zero_pfn; + return zero_pfn; +} +#endif + +#ifdef CONFIG_MMU + +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_huge(pmd_t pmd) +{ + return 0; +} +#ifndef __HAVE_ARCH_PMD_WRITE +static inline int pmd_write(pmd_t pmd) +{ + BUG(); + return 0; +} +#endif /* __HAVE_ARCH_PMD_WRITE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifndef pmd_read_atomic +static inline pmd_t pmd_read_atomic(pmd_t *pmdp) +{ + /* + * Depend on compiler for an atomic pmd read. NOTE: this is + * only going to work, if the pmdval_t isn't larger than + * an unsigned long. + */ + return *pmdp; +} +#endif + +#ifndef pmd_move_must_withdraw +static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, + spinlock_t *old_pmd_ptl) +{ + /* + * With split pmd lock we also need to move preallocated + * PTE page table if new_pmd is on different PMD page table. + */ + return new_pmd_ptl != old_pmd_ptl; +} +#endif + +/* + * This function is meant to be used by sites walking pagetables with + * the mmap_sem hold in read mode to protect against MADV_DONTNEED and + * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd + * into a null pmd and the transhuge page fault can convert a null pmd + * into an hugepmd or into a regular pmd (if the hugepage allocation + * fails). While holding the mmap_sem in read mode the pmd becomes + * stable and stops changing under us only if it's not null and not a + * transhuge pmd. When those races occurs and this function makes a + * difference vs the standard pmd_none_or_clear_bad, the result is + * undefined so behaving like if the pmd was none is safe (because it + * can return none anyway). The compiler level barrier() is critically + * important to compute the two checks atomically on the same pmdval. + * + * For 32bit kernels with a 64bit large pmd_t this automatically takes + * care of reading the pmd atomically to avoid SMP race conditions + * against pmd_populate() when the mmap_sem is hold for reading by the + * caller (a special atomic read not done by "gcc" as in the generic + * version above, is also needed when THP is disabled because the page + * fault can populate the pmd from under us). + */ +static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) +{ + pmd_t pmdval = pmd_read_atomic(pmd); + /* + * The barrier will stabilize the pmdval in a register or on + * the stack so that it will stop changing under the code. + * + * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, + * pmd_read_atomic is allowed to return a not atomic pmdval + * (for example pointing to an hugepage that has never been + * mapped in the pmd). The below checks will only care about + * the low part of the pmd with 32bit PAE x86 anyway, with the + * exception of pmd_none(). So the important thing is that if + * the low part of the pmd is found null, the high part will + * be also null or the pmd_none() check below would be + * confused. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + barrier(); +#endif + if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) + return 1; + if (unlikely(pmd_bad(pmdval))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +/* + * This is a noop if Transparent Hugepage Support is not built into + * the kernel. Otherwise it is equivalent to + * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in + * places that already verified the pmd is not none and they want to + * walk ptes while holding the mmap sem in read mode (write mode don't + * need this). If THP is not enabled, the pmd can't go away under the + * code even if MADV_DONTNEED runs, but if THP is enabled we need to + * run a pmd_trans_unstable before walking the ptes after + * split_huge_page_pmd returns (because it may have run when the pmd + * become null, but then a page fault can map in a THP and not a + * regular page). + */ +static inline int pmd_trans_unstable(pmd_t *pmd) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return pmd_none_or_trans_huge_or_clear_bad(pmd); +#else + return 0; +#endif +} + +#ifndef CONFIG_NUMA_BALANCING +/* + * Technically a PTE can be PROTNONE even when not doing NUMA balancing but + * the only case the kernel cares is for NUMA balancing and is only ever set + * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked + * _PAGE_PROTNONE so by by default, implement the helper as "always no". It + * is the responsibility of the caller to distinguish between PROT_NONE + * protections and NUMA hinting fault protections. + */ +static inline int pte_protnone(pte_t pte) +{ + return 0; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return 0; +} +#endif /* CONFIG_NUMA_BALANCING */ + +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL +#ifdef CONFIG_PAX_KERNEXEC +#error KERNEXEC requires pax_open_kernel +#else +static inline unsigned long pax_open_kernel(void) { return 0; } +#endif +#endif + +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL +#ifdef CONFIG_PAX_KERNEXEC +#error KERNEXEC requires pax_close_kernel +#else +static inline unsigned long pax_close_kernel(void) { return 0; } +#endif +#endif + +#endif /* CONFIG_MMU */ + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); +int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); +int pud_clear_huge(pud_t *pud); +int pmd_clear_huge(pmd_t *pmd); +#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ +static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pud_clear_huge(pud_t *pud) +{ + return 0; +} +static inline int pmd_clear_huge(pmd_t *pmd) +{ + return 0; +} +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ + +#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * ARCHes with special requirements for evicting THP backing TLB entries can + * implement this. Otherwise also, it can help optimize normal TLB flush in + * THP regime. stock flush_tlb_range() typically has optimization to nuke the + * entire TLB TLB if flush span is greater than a threshold, which will + * likely be true for a single huge page. Thus a single thp flush will + * invalidate the entire TLB which is not desitable. + * e.g. see arch/arc: flush_pmd_tlb_range + */ +#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#else +#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() +#endif +#endif + +struct file; +int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t *vma_prot); +#endif /* !__ASSEMBLY__ */ + +#ifndef io_remap_pfn_range +#define io_remap_pfn_range remap_pfn_range +#endif + +#ifndef has_transparent_hugepage +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define has_transparent_hugepage() 1 +#else +#define has_transparent_hugepage() 0 +#endif +#endif + +#endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index b4d43a4af5..c1cde35775 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_PREEMPT_H #define __ASM_PREEMPT_H @@ -29,7 +28,7 @@ static __always_inline void preempt_count_set(int pc) } while (0) #define init_idle_preempt_count(p, cpu) do { \ - task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ + task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ } while (0) static __always_inline void set_preempt_need_resched(void) @@ -78,11 +77,11 @@ static __always_inline bool should_resched(int preempt_offset) tif_need_resched()); } -#ifdef CONFIG_PREEMPTION +#ifdef CONFIG_PREEMPT extern asmlinkage void preempt_schedule(void); #define __preempt_schedule() preempt_schedule() extern asmlinkage void preempt_schedule_notrace(void); #define __preempt_schedule_notrace() preempt_schedule_notrace() -#endif /* CONFIG_PREEMPTION */ +#endif /* CONFIG_PREEMPT */ #endif /* __ASM_PREEMPT_H */ diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h new file mode 100644 index 0000000000..82e674f6b3 --- /dev/null +++ b/include/asm-generic/ptrace.h @@ -0,0 +1,74 @@ +/* + * Common low level (register) ptrace helpers + * + * Copyright 2004-2011 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __ASM_GENERIC_PTRACE_H__ +#define __ASM_GENERIC_PTRACE_H__ + +#ifndef __ASSEMBLY__ + +/* Helpers for working with the instruction pointer */ +#ifndef GET_IP +#define GET_IP(regs) ((regs)->pc) +#endif +#ifndef SET_IP +#define SET_IP(regs, val) (GET_IP(regs) = (val)) +#endif + +static inline unsigned long instruction_pointer(struct pt_regs *regs) +{ + return GET_IP(regs); +} +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_IP(regs, val); +} + +#ifndef profile_pc +#define profile_pc(regs) instruction_pointer(regs) +#endif + +/* Helpers for working with the user stack pointer */ +#ifndef GET_USP +#define GET_USP(regs) ((regs)->usp) +#endif +#ifndef SET_USP +#define SET_USP(regs, val) (GET_USP(regs) = (val)) +#endif + +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return GET_USP(regs); +} +static inline void user_stack_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_USP(regs, val); +} + +/* Helpers for working with the frame pointer */ +#ifndef GET_FP +#define GET_FP(regs) ((regs)->fp) +#endif +#ifndef SET_FP +#define SET_FP(regs, val) (GET_FP(regs) = (val)) +#endif + +static inline unsigned long frame_pointer(struct pt_regs *regs) +{ + return GET_FP(regs); +} +static inline void frame_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_FP(regs, val); +} + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 7ae0ece07b..7d026bf277 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Queue read/write lock * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. * * Authors: Waiman Long @@ -15,23 +24,52 @@ #include -/* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */ - /* * Writer states & reader shift and bias. + * + * | +0 | +1 | +2 | +3 | + * ----+----+----+----+----+ + * LE | 78 | 56 | 34 | 12 | 0x12345678 + * ----+----+----+----+----+ + * | wr | rd | + * +----+----+----+----+ + * + * ----+----+----+----+----+ + * BE | 12 | 34 | 56 | 78 | 0x12345678 + * ----+----+----+----+----+ + * | rd | wr | + * +----+----+----+----+ */ -#define _QW_WAITING 0x100 /* A writer is waiting */ -#define _QW_LOCKED 0x0ff /* A writer holds the lock */ -#define _QW_WMASK 0x1ff /* Writer mask */ -#define _QR_SHIFT 9 /* Reader count shift */ +#define _QW_WAITING 1 /* A writer is waiting */ +#define _QW_LOCKED 0xff /* A writer holds the lock */ +#define _QW_WMASK 0xff /* Writer mask */ +#define _QR_SHIFT 8 /* Reader count shift */ #define _QR_BIAS (1U << _QR_SHIFT) /* * External function declarations */ -extern void queued_read_lock_slowpath(struct qrwlock *lock); +extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts); extern void queued_write_lock_slowpath(struct qrwlock *lock); +/** + * queued_read_can_lock- would read_trylock() succeed? + * @lock: Pointer to queue rwlock structure + */ +static inline int queued_read_can_lock(struct qrwlock *lock) +{ + return !(atomic_read(&lock->cnts) & _QW_WMASK); +} + +/** + * queued_write_can_lock- would write_trylock() succeed? + * @lock: Pointer to queue rwlock structure + */ +static inline int queued_write_can_lock(struct qrwlock *lock) +{ + return !atomic_read(&lock->cnts); +} + /** * queued_read_trylock - try to acquire read lock of a queue rwlock * @lock : Pointer to queue rwlock structure @@ -39,7 +77,7 @@ extern void queued_write_lock_slowpath(struct qrwlock *lock); */ static inline int queued_read_trylock(struct qrwlock *lock) { - int cnts; + u32 cnts; cnts = atomic_read(&lock->cnts); if (likely(!(cnts & _QW_WMASK))) { @@ -58,14 +96,14 @@ static inline int queued_read_trylock(struct qrwlock *lock) */ static inline int queued_write_trylock(struct qrwlock *lock) { - int cnts; + u32 cnts; cnts = atomic_read(&lock->cnts); if (unlikely(cnts)) return 0; - return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, - _QW_LOCKED)); + return likely(atomic_cmpxchg_acquire(&lock->cnts, + cnts, cnts | _QW_LOCKED) == cnts); } /** * queued_read_lock - acquire read lock of a queue rwlock @@ -73,14 +111,14 @@ static inline int queued_write_trylock(struct qrwlock *lock) */ static inline void queued_read_lock(struct qrwlock *lock) { - int cnts; + u32 cnts; cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); if (likely(!(cnts & _QW_WMASK))) return; /* The slowpath will decrement the reader count, if necessary. */ - queued_read_lock_slowpath(lock); + queued_read_lock_slowpath(lock, cnts); } /** @@ -89,9 +127,8 @@ static inline void queued_read_lock(struct qrwlock *lock) */ static inline void queued_write_lock(struct qrwlock *lock) { - int cnts = 0; /* Optimize for the unfair lock case where the fair flag is 0. */ - if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) + if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0) return; queued_write_lock_slowpath(lock); @@ -109,35 +146,36 @@ static inline void queued_read_unlock(struct qrwlock *lock) (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); } +/** + * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: the write byte address of a queue rwlock + */ +static inline u8 *__qrwlock_write_byte(struct qrwlock *lock) +{ + return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN); +} + /** * queued_write_unlock - release write lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ static inline void queued_write_unlock(struct qrwlock *lock) { - smp_store_release(&lock->wlocked, 0); -} - -/** - * queued_rwlock_is_contended - check if the lock is contended - * @lock : Pointer to queue rwlock structure - * Return: 1 if lock contended, 0 otherwise - */ -static inline int queued_rwlock_is_contended(struct qrwlock *lock) -{ - return arch_spin_is_locked(&lock->wait_lock); + smp_store_release(__qrwlock_write_byte(lock), 0); } /* * Remapping rwlock architecture specific functions to the corresponding * queue rwlock functions. */ -#define arch_read_lock(l) queued_read_lock(l) -#define arch_write_lock(l) queued_write_lock(l) -#define arch_read_trylock(l) queued_read_trylock(l) -#define arch_write_trylock(l) queued_write_trylock(l) -#define arch_read_unlock(l) queued_read_unlock(l) -#define arch_write_unlock(l) queued_write_unlock(l) -#define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l) +#define arch_read_can_lock(l) queued_read_can_lock(l) +#define arch_write_can_lock(l) queued_write_can_lock(l) +#define arch_read_lock(l) queued_read_lock(l) +#define arch_write_lock(l) queued_write_lock(l) +#define arch_read_trylock(l) queued_read_trylock(l) +#define arch_write_trylock(l) queued_write_trylock(l) +#define arch_read_unlock(l) queued_read_unlock(l) +#define arch_write_unlock(l) queued_write_unlock(l) #endif /* __ASM_GENERIC_QRWLOCK_H */ diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h index c36f1d5a25..0abc6b6062 100644 --- a/include/asm-generic/qrwlock_types.h +++ b/include/asm-generic/qrwlock_types.h @@ -1,9 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_QRWLOCK_TYPES_H #define __ASM_GENERIC_QRWLOCK_TYPES_H #include -#include #include /* @@ -11,23 +9,12 @@ */ typedef struct qrwlock { - union { - atomic_t cnts; - struct { -#ifdef __LITTLE_ENDIAN - u8 wlocked; /* Locked for write? */ - u8 __lstate[3]; -#else - u8 __lstate[3]; - u8 wlocked; /* Locked for write? */ -#endif - }; - }; + atomic_t cnts; arch_spinlock_t wait_lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { \ - { .cnts = ATOMIC_INIT(0), }, \ + .cnts = ATOMIC_INIT(0), \ .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ } diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index d74b138255..9f0681bf1e 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Queued spinlock * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP * @@ -11,17 +20,29 @@ #define __ASM_GENERIC_QSPINLOCK_H #include -#include -#ifndef queued_spin_is_locked +/** + * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock + * @lock : Pointer to queued spinlock structure + * + * There is a very slight possibility of live-lock if the lockers keep coming + * and the waiter is just unfortunate enough to not see any unlock state. + */ +#ifndef queued_spin_unlock_wait +extern void queued_spin_unlock_wait(struct qspinlock *lock); +#endif + /** * queued_spin_is_locked - is the spinlock locked? * @lock: Pointer to queued spinlock structure * Return: 1 if it is locked, 0 otherwise */ +#ifndef queued_spin_is_locked static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { /* + * See queued_spin_unlock_wait(). + * * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL * isn't immediately observable. */ @@ -60,31 +81,27 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) */ static __always_inline int queued_spin_trylock(struct qspinlock *lock) { - int val = atomic_read(&lock->val); - - if (unlikely(val)) - return 0; - - return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); + if (!atomic_read(&lock->val) && + (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0)) + return 1; + return 0; } extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -#ifndef queued_spin_lock /** * queued_spin_lock - acquire a queued spinlock * @lock: Pointer to queued spinlock structure */ static __always_inline void queued_spin_lock(struct qspinlock *lock) { - int val = 0; + u32 val; - if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) + val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL); + if (likely(val == 0)) return; - queued_spin_lock_slowpath(lock, val); } -#endif #ifndef queued_spin_unlock /** @@ -96,7 +113,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) /* * unlock() needs release semantics: */ - smp_store_release(&lock->locked, 0); + (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val); } #endif @@ -117,5 +134,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #define arch_spin_lock(l) queued_spin_lock(l) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) +#define arch_spin_lock_flags(l, f) queued_spin_lock(l) +#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l) #endif /* __ASM_GENERIC_QSPINLOCK_H */ diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h index 2fd1fb89ec..034acd0c49 100644 --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Queued spinlock * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. * * Authors: Waiman Long @@ -9,44 +18,24 @@ #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H #define __ASM_GENERIC_QSPINLOCK_TYPES_H +/* + * Including atomic.h with PARAVIRT on will cause compilation errors because + * of recursive header file incluson via paravirt_types.h. So don't include + * it if PARAVIRT is on. + */ +#ifndef CONFIG_PARAVIRT #include +#include +#endif typedef struct qspinlock { - union { - atomic_t val; - - /* - * By using the whole 2nd least significant byte for the - * pending bit, we can allow better optimization of the lock - * acquisition for the pending bit holder. - */ -#ifdef __LITTLE_ENDIAN - struct { - u8 locked; - u8 pending; - }; - struct { - u16 locked_pending; - u16 tail; - }; -#else - struct { - u16 tail; - u16 locked_pending; - }; - struct { - u8 reserved[2]; - u8 pending; - u8 locked; - }; -#endif - }; + atomic_t val; } arch_spinlock_t; /* * Initializier */ -#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } } +#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) } /* * Bitfields in the atomic value: diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h index 8874f681b0..5e752b9590 100644 --- a/include/asm-generic/resource.h +++ b/include/asm-generic/resource.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_RESOURCE_H #define _ASM_GENERIC_RESOURCE_H diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h new file mode 100644 index 0000000000..5be122e3d3 --- /dev/null +++ b/include/asm-generic/rwsem.h @@ -0,0 +1,130 @@ +#ifndef _ASM_GENERIC_RWSEM_H +#define _ASM_GENERIC_RWSEM_H + +#ifndef _LINUX_RWSEM_H +#error "Please don't include directly, use instead." +#endif + +#ifdef __KERNEL__ + +/* + * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. + * Adapted largely from include/asm-i386/rwsem.h + * by Paul Mackerras . + */ + +/* + * the semaphore definition + */ +#ifdef CONFIG_64BIT +# define RWSEM_ACTIVE_MASK 0xffffffffL +#else +# define RWSEM_ACTIVE_MASK 0x0000ffffL +#endif + +#define RWSEM_UNLOCKED_VALUE 0x00000000L +#define RWSEM_ACTIVE_BIAS 0x00000001L +#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +/* + * lock for reading + */ +static inline void __down_read(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0)) + rwsem_down_read_failed(sem); +} + +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + long tmp; + + while ((tmp = atomic_long_read(&sem->count)) >= 0) { + if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, + tmp + RWSEM_ACTIVE_READ_BIAS)) { + return 1; + } + } + return 0; +} + +/* + * lock for writing + */ +static inline void __down_write(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count); + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) + rwsem_down_write_failed(sem); +} + +static inline int __down_write_killable(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count); + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) + if (IS_ERR(rwsem_down_write_failed_killable(sem))) + return -EINTR; + return 0; +} + +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + return tmp == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count); + if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count) < 0)) + rwsem_wake(sem); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + long tmp; + + /* + * When downgrading from exclusive to shared ownership, + * anything inside the write-locked region cannot leak + * into the read side. In contrast, anything in the + * read-locked region is ok to be re-ordered into the + * write side. As such, rely on RELEASE semantics. + */ + tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, + (atomic_long_t *)&sem->count); + if (tmp < 0) + rwsem_downgrade_wake(sem); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_GENERIC_RWSEM_H */ diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h index 6b6f42bc58..e74072d23e 100644 --- a/include/asm-generic/seccomp.h +++ b/include/asm-generic/seccomp.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/asm-generic/seccomp.h * * Copyright (C) 2014 Linaro Limited * Author: AKASHI Takahiro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _ASM_GENERIC_SECCOMP_H #define _ASM_GENERIC_SECCOMP_H @@ -33,7 +36,7 @@ static inline const int *get_compat_mode1_syscalls(void) static const int mode1_syscalls_32[] = { __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, - -1, /* negative terminated */ + 0, /* null terminated */ }; return mode1_syscalls_32; } diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index d16302d3eb..74d19f39e1 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_SECTIONS_H_ #define _ASM_GENERIC_SECTIONS_H_ @@ -15,8 +14,8 @@ * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* * and/or .init.* sections. * [__start_rodata, __end_rodata]: contains .rodata.* sections - * [__start_ro_after_init, __end_ro_after_init]: - * contains .data..ro_after_init section + * [__start_data_ro_after_init, __end_data_ro_after_init]: + * contains data.ro_after_init section * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* * may be out of this range on some architectures. * [_sinittext, _einittext]: contains .init.text.* sections @@ -28,40 +27,29 @@ * __kprobes_text_start, __kprobes_text_end * __entry_text_start, __entry_text_end * __ctors_start, __ctors_end - * __irqentry_text_start, __irqentry_text_end - * __softirqentry_text_start, __softirqentry_text_end - * __start_opd, __end_opd */ extern char _text[], _stext[], _etext[]; extern char _data[], _sdata[], _edata[]; extern char __bss_start[], __bss_stop[]; extern char __init_begin[], __init_end[]; extern char _sinittext[], _einittext[]; -extern char __start_ro_after_init[], __end_ro_after_init[]; +extern char _sinitdata[], _einitdata[]; +extern char __start_data_ro_after_init[], __end_data_ro_after_init[]; extern char _end[]; extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; extern char __kprobes_text_start[], __kprobes_text_end[]; extern char __entry_text_start[], __entry_text_end[]; extern char __start_rodata[], __end_rodata[]; -extern char __irqentry_text_start[], __irqentry_text_end[]; -extern char __softirqentry_text_start[], __softirqentry_text_end[]; -extern char __start_once[], __end_once[]; /* Start and end of .ctors section - used for constructor calls. */ extern char __ctors_start[], __ctors_end[]; -/* Start and end of .opd section - used for function descriptors. */ -extern char __start_opd[], __end_opd[]; - -/* Start and end of instrumentation protected text section */ -extern char __noinstr_text_start[], __noinstr_text_end[]; - extern __visible const void __nosave_begin, __nosave_end; -/* Function descriptor handling (if any). Override in asm/sections.h */ +/* function descriptor handling (if any). Override + * in asm/sections.h */ #ifndef dereference_function_descriptor -#define dereference_function_descriptor(p) ((void *)(p)) -#define dereference_kernel_function_descriptor(p) ((void *)(p)) +#define dereference_function_descriptor(p) (p) #endif /* random extra sections (if any). Override @@ -80,20 +68,6 @@ static inline int arch_is_kernel_data(unsigned long addr) } #endif -/* - * Check if an address is part of freed initmem. This is needed on architectures - * with virt == phys kernel mapping, for code that wants to check if an address - * is part of a static object within [_stext, _end]. After initmem is freed, - * memory can be allocated from it, and such allocations would then have - * addresses within the range [_stext, _end]. - */ -#ifndef arch_is_kernel_initmem_freed -static inline int arch_is_kernel_initmem_freed(unsigned long addr) -{ - return 0; -} -#endif - /** * memory_contains - checks if an object is contained within a memory region * @begin: virtual address of the beginning of the memory region @@ -158,18 +132,4 @@ static inline bool init_section_intersects(void *virt, size_t size) return memory_intersects(__init_begin, __init_end, virt, size); } -/** - * is_kernel_rodata - checks if the pointer address is located in the - * .rodata section - * - * @addr: address to check - * - * Returns: true if the address is located in .rodata, false otherwise. - */ -static inline bool is_kernel_rodata(unsigned long addr) -{ - return addr >= (unsigned long)__start_rodata && - addr < (unsigned long)__end_rodata; -} - #endif /* _ASM_GENERIC_SECTIONS_H_ */ diff --git a/include/asm-generic/segment.h b/include/asm-generic/segment.h new file mode 100644 index 0000000000..5580eace62 --- /dev/null +++ b/include/asm-generic/segment.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_SEGMENT_H +#define __ASM_GENERIC_SEGMENT_H +/* + * Only here because we have some old header files that expect it... + * + * New architectures probably don't want to have their own version. + */ + +#endif /* __ASM_GENERIC_SEGMENT_H */ diff --git a/include/asm-generic/serial.h b/include/asm-generic/serial.h index ca9f7b6be3..5e291090fe 100644 --- a/include/asm-generic/serial.h +++ b/include/asm-generic/serial.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_SERIAL_H #define __ASM_GENERIC_SERIAL_H diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h new file mode 100644 index 0000000000..a2508a8f9a --- /dev/null +++ b/include/asm-generic/siginfo.h @@ -0,0 +1,22 @@ +#ifndef _ASM_GENERIC_SIGINFO_H +#define _ASM_GENERIC_SIGINFO_H + +#include + +#define __SI_MASK 0xffff0000u +#define __SI_KILL (0 << 16) +#define __SI_TIMER (1 << 16) +#define __SI_POLL (2 << 16) +#define __SI_FAULT (3 << 16) +#define __SI_CHLD (4 << 16) +#define __SI_RT (5 << 16) +#define __SI_MESGQ (6 << 16) +#define __SI_SYS (7 << 16) +#define __SI_CODE(T,N) ((T) | ((N) & 0xffff)) + +struct siginfo; +void do_schedule_next_timer(struct siginfo *info); + +extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); + +#endif diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h index c53984fa97..d840c90a15 100644 --- a/include/asm-generic/signal.h +++ b/include/asm-generic/signal.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_SIGNAL_H #define __ASM_GENERIC_SIGNAL_H diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h index d0343d58a7..f57eb7b5c2 100644 --- a/include/asm-generic/simd.h +++ b/include/asm-generic/simd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #include diff --git a/include/asm-generic/sizes.h b/include/asm-generic/sizes.h new file mode 100644 index 0000000000..1dcfad9629 --- /dev/null +++ b/include/asm-generic/sizes.h @@ -0,0 +1,2 @@ +/* This is a placeholder, to be removed over time */ +#include diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index adaf6acab1..1547a03ac5 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_SPINLOCK_H #define __ASM_GENERIC_SPINLOCK_H /* diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h index f88dcd8ed9..4b934e9ec9 100644 --- a/include/asm-generic/statfs.h +++ b/include/asm-generic/statfs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _GENERIC_STATFS_H #define _GENERIC_STATFS_H diff --git a/include/asm-generic/switch_to.h b/include/asm-generic/switch_to.h index 5897d100a6..052c4ac04f 100644 --- a/include/asm-generic/switch_to.h +++ b/include/asm-generic/switch_to.h @@ -1,11 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* Generic task switch macro wrapper. +/* Generic task switch macro wrapper, based on MN10300 definitions. * * It should be possible to use these on really simple architectures, * but it serves more as a starting point for new ports. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef __ASM_GENERIC_SWITCH_TO_H #define __ASM_GENERIC_SWITCH_TO_H diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 524218ae38..0c938a4354 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Access to user system call parameters and results * * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * * This file is a stub providing documentation for what functions * asm-ARCH/syscall.h files need to define. Most arch definitions * will be simple inlines. @@ -43,9 +46,9 @@ int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); * @regs: task_pt_regs() of @task * * It's only valid to call this when @task is stopped for system - * call exit tracing (due to %SYSCALL_WORK_SYSCALL_TRACE or - * %SYSCALL_WORK_SYSCALL_AUDIT), after tracehook_report_syscall_entry() - * returned nonzero to prevent the system call from taking place. + * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT), + * after tracehook_report_syscall_entry() returned nonzero to prevent + * the system call from taking place. * * This rolls back the register state in @regs so it's as if the * system call instruction was a no-op. The registers containing @@ -63,8 +66,7 @@ void syscall_rollback(struct task_struct *task, struct pt_regs *regs); * Returns 0 if the system call succeeded, or -ERRORCODE if it failed. * * It's only valid to call this when @task is stopped for tracing on exit - * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or - * %SYSCALL_WORK_SYSCALL_AUDIT. + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. */ long syscall_get_error(struct task_struct *task, struct pt_regs *regs); @@ -77,8 +79,7 @@ long syscall_get_error(struct task_struct *task, struct pt_regs *regs); * This value is meaningless if syscall_get_error() returned nonzero. * * It's only valid to call this when @task is stopped for tracing on exit - * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or - * %SYSCALL_WORK_SYSCALL_AUDIT. + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. */ long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs); @@ -95,8 +96,7 @@ long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs); * code; the user sees a failed system call with this errno code. * * It's only valid to call this when @task is stopped for tracing on exit - * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or - * %SYSCALL_WORK_SYSCALL_AUDIT. + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. */ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val); @@ -105,46 +105,53 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, * syscall_get_arguments - extract system call parameter values * @task: task of interest, must be blocked * @regs: task_pt_regs() of @task + * @i: argument index [0,5] + * @n: number of arguments; n+i must be [1,6]. * @args: array filled with argument values * - * Fetches 6 arguments to the system call. First argument is stored in -* @args[0], and so on. + * Fetches @n arguments to the system call starting with the @i'th argument + * (from 0 through 5). Argument @i is stored in @args[0], and so on. + * An arch inline version is probably optimal when @i and @n are constants. * * It's only valid to call this when @task is stopped for tracing on - * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or - * %SYSCALL_WORK_SYSCALL_AUDIT. + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + * It's invalid to call this with @i + @n > 6; we only support system calls + * taking up to 6 arguments. */ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - unsigned long *args); + unsigned int i, unsigned int n, unsigned long *args); /** * syscall_set_arguments - change system call parameter value * @task: task of interest, must be in system call entry tracing * @regs: task_pt_regs() of @task + * @i: argument index [0,5] + * @n: number of arguments; n+i must be [1,6]. * @args: array of argument values to store * - * Changes 6 arguments to the system call. - * The first argument gets value @args[0], and so on. + * Changes @n arguments to the system call starting with the @i'th argument. + * Argument @i gets value @args[0], and so on. + * An arch inline version is probably optimal when @i and @n are constants. * * It's only valid to call this when @task is stopped for tracing on - * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or - * %SYSCALL_WORK_SYSCALL_AUDIT. + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + * It's invalid to call this with @i + @n > 6; we only support system calls + * taking up to 6 arguments. */ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, const unsigned long *args); /** * syscall_get_arch - return the AUDIT_ARCH for the current system call - * @task: task of interest, must be blocked * * Returns the AUDIT_ARCH_* based on the system call convention in use. * - * It's only valid to call this when @task is stopped on entry to a system - * call, due to %SYSCALL_WORK_SYSCALL_TRACE, %SYSCALL_WORK_SYSCALL_AUDIT, or - * %SYSCALL_WORK_SECCOMP. + * It's only valid to call this when current is stopped on entry to a system + * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP. * * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must * provide an implementation of this. */ -int syscall_get_arch(struct task_struct *task); +int syscall_get_arch(void); #endif /* _ASM_SYSCALL_H */ diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index 933ca6581a..1f74be5113 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_SYSCALLS_H #define __ASM_GENERIC_SYSCALLS_H diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h index 59c5a3bd4a..0a769feb22 100644 --- a/include/asm-generic/termios-base.h +++ b/include/asm-generic/termios-base.h @@ -1,11 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* termios.h: generic termios/termio user copying/translation */ #ifndef _ASM_GENERIC_TERMIOS_BASE_H #define _ASM_GENERIC_TERMIOS_BASE_H -#include +#include #ifndef __ARCH_TERMIO_GETPUT diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h index b1398d0d4a..4fa6fe0fc2 100644 --- a/include/asm-generic/termios.h +++ b/include/asm-generic/termios.h @@ -1,9 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_TERMIOS_H #define _ASM_GENERIC_TERMIOS_H -#include +#include #include /* intr=^C quit=^\ erase=del kill=^U diff --git a/include/asm-generic/timex.h b/include/asm-generic/timex.h index 50ba9b5ce9..b2243cb8d6 100644 --- a/include/asm-generic/timex.h +++ b/include/asm-generic/timex.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_TIMEX_H #define __ASM_GENERIC_TIMEX_H diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 2c68a545ff..c6d6671876 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* include/asm-generic/tlb.h * * Generic TLB shootdown code @@ -7,178 +6,50 @@ * Based on code from mm/memory.c Copyright Linus Torvalds and others. * * Copyright 2011 Red Hat, Inc., Peter Zijlstra + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _ASM_GENERIC__TLB_H #define _ASM_GENERIC__TLB_H -#include #include -#include +#include #include -#include +#ifdef CONFIG_HAVE_RCU_TABLE_FREE /* - * Blindly accessing user memory from NMI context can be dangerous - * if we're in the middle of switching the current user task or switching - * the loaded mm. + * Semi RCU freeing of the page directories. + * + * This is needed by some architectures to implement software pagetable walkers. + * + * gup_fast() and other software pagetable walkers do a lockless page-table + * walk and therefore needs some synchronization with the freeing of the page + * directories. The chosen means to accomplish that is by disabling IRQs over + * the walk. + * + * Architectures that use IPIs to flush TLBs will then automagically DTRT, + * since we unlink the page, flush TLBs, free the page. Since the disabling of + * IRQs delays the completion of the TLB flush we can never observe an already + * freed page. + * + * Architectures that do not have this (PPC) need to delay the freeing by some + * other means, this is that means. + * + * What we do is batch the freed directory pages (tables) and RCU free them. + * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling + * holds off grace periods. + * + * However, in order to batch these pages we need to allocate storage, this + * allocation is deep inside the MM code and can thus easily fail on memory + * pressure. To guarantee progress we fall back to single table freeing, see + * the implementation of tlb_remove_table_one(). + * */ -#ifndef nmi_uaccess_okay -# define nmi_uaccess_okay() true -#endif - -#ifdef CONFIG_MMU - -/* - * Generic MMU-gather implementation. - * - * The mmu_gather data structure is used by the mm code to implement the - * correct and efficient ordering of freeing pages and TLB invalidations. - * - * This correct ordering is: - * - * 1) unhook page - * 2) TLB invalidate page - * 3) free page - * - * That is, we must never free a page before we have ensured there are no live - * translations left to it. Otherwise it might be possible to observe (or - * worse, change) the page content after it has been reused. - * - * The mmu_gather API consists of: - * - * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu() - * - * start and finish a mmu_gather - * - * Finish in particular will issue a (final) TLB invalidate and free - * all (remaining) queued pages. - * - * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA - * - * Defaults to flushing at tlb_end_vma() to reset the range; helps when - * there's large holes between the VMAs. - * - * - tlb_remove_table() - * - * tlb_remove_table() is the basic primitive to free page-table directories - * (__p*_free_tlb()). In it's most primitive form it is an alias for - * tlb_remove_page() below, for when page directories are pages and have no - * additional constraints. - * - * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE. - * - * - tlb_remove_page() / __tlb_remove_page() - * - tlb_remove_page_size() / __tlb_remove_page_size() - * - * __tlb_remove_page_size() is the basic primitive that queues a page for - * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a - * boolean indicating if the queue is (now) full and a call to - * tlb_flush_mmu() is required. - * - * tlb_remove_page() and tlb_remove_page_size() imply the call to - * tlb_flush_mmu() when required and has no return value. - * - * - tlb_change_page_size() - * - * call before __tlb_remove_page*() to set the current page-size; implies a - * possible tlb_flush_mmu() call. - * - * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() - * - * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets - * related state, like the range) - * - * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees - * whatever pages are still batched. - * - * - mmu_gather::fullmm - * - * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free - * the entire mm; this allows a number of optimizations. - * - * - We can ignore tlb_{start,end}_vma(); because we don't - * care about ranges. Everything will be shot down. - * - * - (RISC) architectures that use ASIDs can cycle to a new ASID - * and delay the invalidation until ASID space runs out. - * - * - mmu_gather::need_flush_all - * - * A flag that can be set by the arch code if it wants to force - * flush the entire TLB irrespective of the range. For instance - * x86-PAE needs this when changing top-level entries. - * - * And allows the architecture to provide and implement tlb_flush(): - * - * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make - * use of: - * - * - mmu_gather::start / mmu_gather::end - * - * which provides the range that needs to be flushed to cover the pages to - * be freed. - * - * - mmu_gather::freed_tables - * - * set when we freed page table pages - * - * - tlb_get_unmap_shift() / tlb_get_unmap_size() - * - * returns the smallest TLB entry size unmapped in this range. - * - * If an architecture does not provide tlb_flush() a default implementation - * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is - * specified, in which case we'll default to flush_tlb_mm(). - * - * Additionally there are a few opt-in features: - * - * MMU_GATHER_PAGE_SIZE - * - * This ensures we call tlb_flush() every time tlb_change_page_size() actually - * changes the size and provides mmu_gather::page_size to tlb_flush(). - * - * This might be useful if your architecture has size specific TLB - * invalidation instructions. - * - * MMU_GATHER_TABLE_FREE - * - * This provides tlb_remove_table(), to be used instead of tlb_remove_page() - * for page directores (__p*_free_tlb()). - * - * Useful if your architecture has non-page page directories. - * - * When used, an architecture is expected to provide __tlb_remove_table() - * which does the actual freeing of these pages. - * - * MMU_GATHER_RCU_TABLE_FREE - * - * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see - * comment below). - * - * Useful if your architecture doesn't use IPIs for remote TLB invalidates - * and therefore doesn't naturally serialize with software page-table walkers. - * - * MMU_GATHER_NO_RANGE - * - * Use this if your architecture lacks an efficient flush_tlb_range(). - * - * MMU_GATHER_NO_GATHER - * - * If the option is set the mmu_gather will not track individual pages for - * delayed page free anymore. A platform that enables the option needs to - * provide its own implementation of the __tlb_remove_page_size() function to - * free pages. - * - * This is useful if your architecture already flushes TLB entries in the - * various ptep_get_and_clear() functions. - */ - -#ifdef CONFIG_MMU_GATHER_TABLE_FREE - struct mmu_table_batch { -#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE struct rcu_head rcu; -#endif unsigned int nr; void *tables[0]; }; @@ -186,37 +57,11 @@ struct mmu_table_batch { #define MAX_TABLE_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) +extern void tlb_table_flush(struct mmu_gather *tlb); extern void tlb_remove_table(struct mmu_gather *tlb, void *table); -#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */ - -/* - * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based - * page directories and we can use the normal page batching to free them. - */ -#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page)) - -#endif /* CONFIG_MMU_GATHER_TABLE_FREE */ - -#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE -/* - * This allows an architecture that does not use the linux page-tables for - * hardware to skip the TLBI when freeing page tables. - */ -#ifndef tlb_needs_table_invalidate -#define tlb_needs_table_invalidate() (true) #endif -#else - -#ifdef tlb_needs_table_invalidate -#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE -#endif - -#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ - - -#ifndef CONFIG_MMU_GATHER_NO_GATHER /* * If we can't allocate a page to make a big batch of page pointers * to work on, then just handle a few from the on-stack structure. @@ -241,75 +86,55 @@ struct mmu_gather_batch { */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) -extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, - int page_size); -#endif - -/* - * struct mmu_gather is an opaque type used by the mm code for passing around +/* struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ struct mmu_gather { struct mm_struct *mm; - -#ifdef CONFIG_MMU_GATHER_TABLE_FREE +#ifdef CONFIG_HAVE_RCU_TABLE_FREE struct mmu_table_batch *batch; #endif - unsigned long start; unsigned long end; - /* - * we are in the middle of an operation to clear - * a full mm and can make some optimizations - */ - unsigned int fullmm : 1; + /* we are in the middle of an operation to clear + * a full mm and can make some optimizations */ + unsigned int fullmm : 1, + /* we have performed an operation which + * requires a complete flush of the tlb */ + need_flush_all : 1; - /* - * we have performed an operation which - * requires a complete flush of the tlb - */ - unsigned int need_flush_all : 1; - - /* - * we have removed page directories - */ - unsigned int freed_tables : 1; - - /* - * at which levels have we cleared entries? - */ - unsigned int cleared_ptes : 1; - unsigned int cleared_pmds : 1; - unsigned int cleared_puds : 1; - unsigned int cleared_p4ds : 1; - - /* - * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma - */ - unsigned int vma_exec : 1; - unsigned int vma_huge : 1; - - unsigned int batch_count; - -#ifndef CONFIG_MMU_GATHER_NO_GATHER struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; - -#ifdef CONFIG_MMU_GATHER_PAGE_SIZE - unsigned int page_size; -#endif -#endif + unsigned int batch_count; + /* + * __tlb_adjust_range will track the new addr here, + * that that we can adjust the range after the flush + */ + unsigned long addr; + int page_size; }; +#define HAVE_GENERIC_MMU_GATHER + +void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); void tlb_flush_mmu(struct mmu_gather *tlb); +void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, + unsigned long end); +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, - unsigned long address, - unsigned int range_size) + unsigned long address) { tlb->start = min(tlb->start, address); - tlb->end = max(tlb->end, address + range_size); + tlb->end = max(tlb->end, address + PAGE_SIZE); + /* + * Track the last address with which we adjusted the range. This + * will be used later to adjust again after a mmu_flush due to + * failed __tlb_remove_page + */ + tlb->addr = address; } static inline void __tlb_reset_range(struct mmu_gather *tlb) @@ -320,122 +145,20 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) tlb->start = TASK_SIZE; tlb->end = 0; } - tlb->freed_tables = 0; - tlb->cleared_ptes = 0; - tlb->cleared_pmds = 0; - tlb->cleared_puds = 0; - tlb->cleared_p4ds = 0; - /* - * Do not reset mmu_gather::vma_* fields here, we do not - * call into tlb_start_vma() again to set them if there is an - * intermediate flush. - */ -} - -#ifdef CONFIG_MMU_GATHER_NO_RANGE - -#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma) -#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma() -#endif - -/* - * When an architecture does not have efficient means of range flushing TLBs - * there is no point in doing intermediate flushes on tlb_end_vma() to keep the - * range small. We equally don't have to worry about page granularity or other - * things. - * - * All we need to do is issue a full flush for any !0 range. - */ -static inline void tlb_flush(struct mmu_gather *tlb) -{ - if (tlb->end) - flush_tlb_mm(tlb->mm); -} - -static inline void -tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } - -#define tlb_end_vma tlb_end_vma -static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } - -#else /* CONFIG_MMU_GATHER_NO_RANGE */ - -#ifndef tlb_flush - -#if defined(tlb_start_vma) || defined(tlb_end_vma) -#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma() -#endif - -/* - * When an architecture does not provide its own tlb_flush() implementation - * but does have a reasonably efficient flush_vma_range() implementation - * use that. - */ -static inline void tlb_flush(struct mmu_gather *tlb) -{ - if (tlb->fullmm || tlb->need_flush_all) { - flush_tlb_mm(tlb->mm); - } else if (tlb->end) { - struct vm_area_struct vma = { - .vm_mm = tlb->mm, - .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | - (tlb->vma_huge ? VM_HUGETLB : 0), - }; - - flush_tlb_range(&vma, tlb->start, tlb->end); - } -} - -static inline void -tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) -{ - /* - * flush_tlb_range() implementations that look at VM_HUGETLB (tile, - * mips-4k) flush only large pages. - * - * flush_tlb_range() implementations that flush I-TLB also flush D-TLB - * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing - * range. - * - * We rely on tlb_end_vma() to issue a flush, such that when we reset - * these values the batch is empty. - */ - tlb->vma_huge = is_vm_hugetlb_page(vma); - tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); -} - -#else - -static inline void -tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } - -#endif - -#endif /* CONFIG_MMU_GATHER_NO_RANGE */ - -static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) -{ - /* - * Anything calling __tlb_adjust_range() also sets at least one of - * these bits. - */ - if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || - tlb->cleared_puds || tlb->cleared_p4ds)) - return; - - tlb_flush(tlb); - mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); - __tlb_reset_range(tlb); } static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { - if (__tlb_remove_page_size(tlb, page, page_size)) + if (__tlb_remove_page_size(tlb, page, page_size)) { tlb_flush_mmu(tlb); + tlb->page_size = page_size; + __tlb_adjust_range(tlb, tlb->addr); + __tlb_remove_page_size(tlb, page, page_size); + } } -static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page_size(tlb, page, PAGE_SIZE); } @@ -449,36 +172,13 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) return tlb_remove_page_size(tlb, page, PAGE_SIZE); } -static inline void tlb_change_page_size(struct mmu_gather *tlb, - unsigned int page_size) +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { -#ifdef CONFIG_MMU_GATHER_PAGE_SIZE - if (tlb->page_size && tlb->page_size != page_size) { - if (!tlb->fullmm && !tlb->need_flush_all) - tlb_flush_mmu(tlb); - } - - tlb->page_size = page_size; -#endif -} - -static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) -{ - if (tlb->cleared_ptes) - return PAGE_SHIFT; - if (tlb->cleared_pmds) - return PMD_SHIFT; - if (tlb->cleared_puds) - return PUD_SHIFT; - if (tlb->cleared_p4ds) - return P4D_SHIFT; - - return PAGE_SHIFT; -} - -static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) -{ - return 1UL << tlb_get_unmap_shift(tlb); + /* active->nr should be zero when we call this */ + VM_BUG_ON_PAGE(tlb->active->nr, page); + tlb->page_size = PAGE_SIZE; + __tlb_adjust_range(tlb, tlb->addr); + return __tlb_remove_page(tlb, page); } /* @@ -487,64 +187,21 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) * the vmas are adjusted to only cover the region to be torn down. */ #ifndef tlb_start_vma -static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) -{ - if (tlb->fullmm) - return; - - tlb_update_vma_flags(tlb, vma); - flush_cache_range(vma, vma->vm_start, vma->vm_end); -} +#define tlb_start_vma(tlb, vma) do { } while (0) #endif +#define __tlb_end_vma(tlb, vma) \ + do { \ + if (!tlb->fullmm && tlb->end) { \ + tlb_flush(tlb); \ + __tlb_reset_range(tlb); \ + } \ + } while (0) + #ifndef tlb_end_vma -static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) -{ - if (tlb->fullmm) - return; - - /* - * Do a TLB flush and reset the range at VMA boundaries; this avoids - * the ranges growing with the unused space between consecutive VMAs, - * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on - * this. - */ - tlb_flush_mmu_tlbonly(tlb); -} +#define tlb_end_vma __tlb_end_vma #endif -/* - * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end, - * and set corresponding cleared_*. - */ -static inline void tlb_flush_pte_range(struct mmu_gather *tlb, - unsigned long address, unsigned long size) -{ - __tlb_adjust_range(tlb, address, size); - tlb->cleared_ptes = 1; -} - -static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, - unsigned long address, unsigned long size) -{ - __tlb_adjust_range(tlb, address, size); - tlb->cleared_pmds = 1; -} - -static inline void tlb_flush_pud_range(struct mmu_gather *tlb, - unsigned long address, unsigned long size) -{ - __tlb_adjust_range(tlb, address, size); - tlb->cleared_puds = 1; -} - -static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, - unsigned long address, unsigned long size) -{ - __tlb_adjust_range(tlb, address, size); - tlb->cleared_p4ds = 1; -} - #ifndef __tlb_remove_tlb_entry #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #endif @@ -558,17 +215,7 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, */ #define tlb_remove_tlb_entry(tlb, ptep, address) \ do { \ - tlb_flush_pte_range(tlb, address, PAGE_SIZE); \ - __tlb_remove_tlb_entry(tlb, ptep, address); \ - } while (0) - -#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ - do { \ - unsigned long _sz = huge_page_size(h); \ - if (_sz == PMD_SIZE) \ - tlb_flush_pmd_range(tlb, address, _sz); \ - else if (_sz == PUD_SIZE) \ - tlb_flush_pud_range(tlb, address, _sz); \ + __tlb_adjust_range(tlb, address); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) @@ -580,80 +227,32 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) #endif -#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ - do { \ - tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \ - __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ +#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ + do { \ + __tlb_adjust_range(tlb, address); \ + __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0) -/** - * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb - * invalidation. This is a nop so far, because only x86 needs it. - */ -#ifndef __tlb_remove_pud_tlb_entry -#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) -#endif - -#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ - do { \ - tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \ - __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ - } while (0) - -/* - * For things like page tables caches (ie caching addresses "inside" the - * page tables, like x86 does), for legacy reasons, flushing an - * individual page had better flush the page table caches behind it. This - * is definitely how x86 works, for example. And if you have an - * architected non-legacy page table cache (which I'm not aware of - * anybody actually doing), you're going to have some architecturally - * explicit flushing for that, likely *separate* from a regular TLB entry - * flush, and thus you'd need more than just some range expansion.. - * - * So if we ever find an architecture - * that would want something that odd, I think it is up to that - * architecture to do its own odd thing, not cause pain for others - * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com - * - * For now w.r.t page table cache, mark the range_size as PAGE_SIZE - */ - -#ifndef pte_free_tlb #define pte_free_tlb(tlb, ptep, address) \ do { \ - tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \ - tlb->freed_tables = 1; \ + __tlb_adjust_range(tlb, address); \ __pte_free_tlb(tlb, ptep, address); \ } while (0) -#endif -#ifndef pmd_free_tlb -#define pmd_free_tlb(tlb, pmdp, address) \ - do { \ - tlb_flush_pud_range(tlb, address, PAGE_SIZE); \ - tlb->freed_tables = 1; \ - __pmd_free_tlb(tlb, pmdp, address); \ - } while (0) -#endif - -#ifndef pud_free_tlb +#ifndef __ARCH_HAS_4LEVEL_HACK #define pud_free_tlb(tlb, pudp, address) \ do { \ - tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \ - tlb->freed_tables = 1; \ + __tlb_adjust_range(tlb, address); \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif -#ifndef p4d_free_tlb -#define p4d_free_tlb(tlb, pudp, address) \ +#define pmd_free_tlb(tlb, pmdp, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ - tlb->freed_tables = 1; \ - __p4d_free_tlb(tlb, pudp, address); \ + __tlb_adjust_range(tlb, address); \ + __pmd_free_tlb(tlb, pmdp, address); \ } while (0) -#endif -#endif /* CONFIG_MMU */ +#define tlb_migrate_finish(mm) do {} while (0) #endif /* _ASM_GENERIC__TLB_H */ diff --git a/include/asm-generic/tlbflush.h b/include/asm-generic/tlbflush.h index dc2669289f..d6d0a88430 100644 --- a/include/asm-generic/tlbflush.h +++ b/include/asm-generic/tlbflush.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_TLBFLUSH_H #define __ASM_GENERIC_TLBFLUSH_H /* diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 4dbe715be6..fc824e2828 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -44,12 +44,11 @@ #define cpu_to_mem(cpu) ((void)(cpu),0) #endif +#ifndef parent_node +#define parent_node(node) ((void)(node),0) +#endif #ifndef cpumask_of_node - #ifdef CONFIG_NUMA - #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) - #else - #define cpumask_of_node(node) ((void)(node), cpu_online_mask) - #endif +#define cpumask_of_node(node) ((void)node, cpu_online_mask) #endif #ifndef pcibus_to_node #define pcibus_to_node(bus) ((void)(bus), -1) diff --git a/include/asm-generic/trace_clock.h b/include/asm-generic/trace_clock.h index cbbca29593..6726f1bafb 100644 --- a/include/asm-generic/trace_clock.h +++ b/include/asm-generic/trace_clock.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_TRACE_CLOCK_H #define _ASM_GENERIC_TRACE_CLOCK_H /* diff --git a/include/asm-generic/uaccess-unaligned.h b/include/asm-generic/uaccess-unaligned.h new file mode 100644 index 0000000000..67deb898f0 --- /dev/null +++ b/include/asm-generic/uaccess-unaligned.h @@ -0,0 +1,26 @@ +#ifndef __ASM_GENERIC_UACCESS_UNALIGNED_H +#define __ASM_GENERIC_UACCESS_UNALIGNED_H + +/* + * This macro should be used instead of __get_user() when accessing + * values at locations that are not known to be aligned. + */ +#define __get_user_unaligned(x, ptr) \ +({ \ + __typeof__ (*(ptr)) __x; \ + __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \ + (x) = __x; \ +}) + + +/* + * This macro should be used instead of __put_user() when accessing + * values at locations that are not known to be aligned. + */ +#define __put_user_unaligned(x, ptr) \ +({ \ + __typeof__ (*(ptr)) __x = (x); \ + __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ +}) + +#endif /* __ASM_GENERIC_UACCESS_UNALIGNED_H */ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 10ffa8b5c1..804a417e32 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_UACCESS_H #define __ASM_GENERIC_UACCESS_H @@ -7,96 +6,11 @@ * on any machine that has kernel and user data in the same * address space, e.g. all NOMMU machines. */ +#include #include -#ifdef CONFIG_UACCESS_MEMCPY -#include +#include -static __always_inline int -__get_user_fn(size_t size, const void __user *from, void *to) -{ - BUILD_BUG_ON(!__builtin_constant_p(size)); - - switch (size) { - case 1: - *(u8 *)to = *((u8 __force *)from); - return 0; - case 2: - *(u16 *)to = get_unaligned((u16 __force *)from); - return 0; - case 4: - *(u32 *)to = get_unaligned((u32 __force *)from); - return 0; - case 8: - *(u64 *)to = get_unaligned((u64 __force *)from); - return 0; - default: - BUILD_BUG(); - return 0; - } - -} -#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) - -static __always_inline int -__put_user_fn(size_t size, void __user *to, void *from) -{ - BUILD_BUG_ON(!__builtin_constant_p(size)); - - switch (size) { - case 1: - *(u8 __force *)to = *(u8 *)from; - return 0; - case 2: - put_unaligned(*(u16 *)from, (u16 __force *)to); - return 0; - case 4: - put_unaligned(*(u32 *)from, (u32 __force *)to); - return 0; - case 8: - put_unaligned(*(u64 *)from, (u64 __force *)to); - return 0; - default: - BUILD_BUG(); - return 0; - } -} -#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) - -#define __get_kernel_nofault(dst, src, type, err_label) \ -do { \ - *((type *)dst) = get_unaligned((type *)(src)); \ - if (0) /* make sure the label looks used to the compiler */ \ - goto err_label; \ -} while (0) - -#define __put_kernel_nofault(dst, src, type, err_label) \ -do { \ - put_unaligned(*((type *)src), (type *)(dst)); \ - if (0) /* make sure the label looks used to the compiler */ \ - goto err_label; \ -} while (0) - -#define HAVE_GET_KERNEL_NOFAULT 1 - -static inline __must_check unsigned long -raw_copy_from_user(void *to, const void __user * from, unsigned long n) -{ - memcpy(to, (const void __force *)from, n); - return 0; -} - -static inline __must_check unsigned long -raw_copy_to_user(void __user *to, const void *from, unsigned long n) -{ - memcpy((void __force *)to, from, n); - return 0; -} -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER -#endif /* CONFIG_UACCESS_MEMCPY */ - -#ifdef CONFIG_SET_FS #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) #ifndef KERNEL_DS @@ -108,6 +22,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) #endif #ifndef get_fs +#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) @@ -116,17 +31,14 @@ static inline void set_fs(mm_segment_t fs) } #endif -#ifndef uaccess_kernel -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) +#ifndef segment_eq +#define segment_eq(a, b) ((a).seg == (b).seg) #endif -#ifndef user_addr_max -#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) -#endif +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 -#endif /* CONFIG_SET_FS */ - -#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) +#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size)) /* * The architecture should really override this if possible, at least @@ -139,6 +51,87 @@ static inline int __access_ok(unsigned long addr, unsigned long size) } #endif +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + +/* + * architectures with an MMU should override these two + */ +#ifndef __copy_from_user +static inline __must_check long __copy_from_user(void *to, + const void __user * from, unsigned long n) +{ + if (__builtin_constant_p(n)) { + switch(n) { + case 1: + *(u8 *)to = *(u8 __force *)from; + return 0; + case 2: + *(u16 *)to = *(u16 __force *)from; + return 0; + case 4: + *(u32 *)to = *(u32 __force *)from; + return 0; +#ifdef CONFIG_64BIT + case 8: + *(u64 *)to = *(u64 __force *)from; + return 0; +#endif + default: + break; + } + } + + memcpy(to, (const void __force *)from, n); + return 0; +} +#endif + +#ifndef __copy_to_user +static inline __must_check long __copy_to_user(void __user *to, + const void *from, unsigned long n) +{ + if (__builtin_constant_p(n)) { + switch(n) { + case 1: + *(u8 __force *)to = *(u8 *)from; + return 0; + case 2: + *(u16 __force *)to = *(u16 *)from; + return 0; + case 4: + *(u32 __force *)to = *(u32 *)from; + return 0; +#ifdef CONFIG_64BIT + case 8: + *(u64 __force *)to = *(u64 *)from; + return 0; +#endif + default: + break; + } + } + + memcpy((void __force *)to, from, n); + return 0; +} +#endif + /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. @@ -167,10 +160,10 @@ static inline int __access_ok(unsigned long addr, unsigned long size) #define put_user(x, ptr) \ ({ \ - void __user *__p = (ptr); \ + void *__p = (ptr); \ might_fault(); \ - access_ok(__p, sizeof(*ptr)) ? \ - __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \ + access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \ + __put_user((x), ((__typeof__(*(ptr)) *)__p)) : \ -EFAULT; \ }) @@ -178,7 +171,8 @@ static inline int __access_ok(unsigned long addr, unsigned long size) static inline int __put_user_fn(size_t size, void __user *ptr, void *x) { - return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0; + size = __copy_to_user(ptr, x, size); + return size ? -EFAULT : size; } #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) @@ -193,28 +187,28 @@ extern int __put_user_bad(void) __attribute__((noreturn)); __chk_user_ptr(ptr); \ switch (sizeof(*(ptr))) { \ case 1: { \ - unsigned char __x = 0; \ + unsigned char __x; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 2: { \ - unsigned short __x = 0; \ + unsigned short __x; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 4: { \ - unsigned int __x = 0; \ + unsigned int __x; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 8: { \ - unsigned long long __x = 0; \ + unsigned long long __x; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ @@ -229,17 +223,22 @@ extern int __put_user_bad(void) __attribute__((noreturn)); #define get_user(x, ptr) \ ({ \ - const void __user *__p = (ptr); \ + const void *__p = (ptr); \ might_fault(); \ - access_ok(__p, sizeof(*ptr)) ? \ - __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\ + access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \ + __get_user((x), (__typeof__(*(ptr)) *)__p) : \ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ }) #ifndef __get_user_fn static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) { - return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0; + size_t n = __copy_from_user(x, ptr, size); + if (unlikely(n)) { + memset(x + (size - n), 0, n); + return -EFAULT; + } + return 0; } #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) @@ -248,6 +247,85 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) extern int __get_user_bad(void) __attribute__((noreturn)); +#ifndef __copy_from_user_inatomic +#define __copy_from_user_inatomic __copy_from_user +#endif + +#ifndef __copy_to_user_inatomic +#define __copy_to_user_inatomic __copy_to_user +#endif + +static inline long copy_from_user(void *to, + const void __user * from, unsigned long n) +{ + unsigned long res = n; + might_fault(); + if (likely(access_ok(VERIFY_READ, from, n))) + res = __copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; +} + +static inline long copy_to_user(void __user *to, + const void *from, unsigned long n) +{ + might_fault(); + if (access_ok(VERIFY_WRITE, to, n)) + return __copy_to_user(to, from, n); + else + return n; +} + +/* + * Copy a null terminated string from userspace. + */ +#ifndef __strncpy_from_user +static inline long +__strncpy_from_user(char *dst, const char __user *src, long count) +{ + char *tmp; + strncpy(dst, (const char __force *)src, count); + for (tmp = dst; *tmp && count > 0; tmp++, count--) + ; + return (tmp - dst); +} +#endif + +static inline long +strncpy_from_user(char *dst, const char __user *src, long count) +{ + if (!access_ok(VERIFY_READ, src, 1)) + return -EFAULT; + return __strncpy_from_user(dst, src, count); +} + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 on exception, a value greater than N if too long + */ +#ifndef __strnlen_user +#define __strnlen_user(s, n) (strnlen((s), (n)) + 1) +#endif + +/* + * Unlike strnlen, strnlen_user includes the nul terminator in + * its returned count. Callers should check for a returned value + * greater than N as an indication the string is too long. + */ +static inline long strnlen_user(const char __user *src, long n) +{ + if (!access_ok(VERIFY_READ, src, 1)) + return 0; + return __strnlen_user(src, n); +} + +static inline long strlen_user(const char __user *src) +{ + return strnlen_user(src, 32767); +} + /* * Zero Userspace */ @@ -264,16 +342,26 @@ static inline __must_check unsigned long clear_user(void __user *to, unsigned long n) { might_fault(); - if (!access_ok(to, n)) + if (!access_ok(VERIFY_WRITE, to, n)) return n; return __clear_user(to, n); } -#include +#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND +#ifdef CONFIG_PAX_MEMORY_UDEREF +#error UDEREF requires pax_open_userland +#else +static inline unsigned long pax_open_userland(void) { return 0; } +#endif +#endif -__must_check long strncpy_from_user(char *dst, const char __user *src, - long count); -__must_check long strnlen_user(const char __user *src, long n); +#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND +#ifdef CONFIG_PAX_MEMORY_UDEREF +#error UDEREF requires pax_close_userland +#else +static inline unsigned long pax_close_userland(void) { return 0; } +#endif +#endif #endif /* __ASM_GENERIC_UACCESS_H */ diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h index 1c4242416c..1ac097279d 100644 --- a/include/asm-generic/unaligned.h +++ b/include/asm-generic/unaligned.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_UNALIGNED_H #define __ASM_GENERIC_UNALIGNED_H @@ -6,124 +5,31 @@ * This is the most generic implementation of unaligned accesses * and should work almost anywhere. */ -#include #include -#define __get_unaligned_t(type, ptr) ({ \ - const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \ - __pptr->x; \ -}) +/* Set by the arch if it can handle unaligned accesses in hardware. */ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include +#endif -#define __put_unaligned_t(type, val, ptr) do { \ - struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \ - __pptr->x = (val); \ -} while (0) - -#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr)) -#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr)) - -static inline u16 get_unaligned_le16(const void *p) -{ - return le16_to_cpu(__get_unaligned_t(__le16, p)); -} - -static inline u32 get_unaligned_le32(const void *p) -{ - return le32_to_cpu(__get_unaligned_t(__le32, p)); -} - -static inline u64 get_unaligned_le64(const void *p) -{ - return le64_to_cpu(__get_unaligned_t(__le64, p)); -} - -static inline void put_unaligned_le16(u16 val, void *p) -{ - __put_unaligned_t(__le16, cpu_to_le16(val), p); -} - -static inline void put_unaligned_le32(u32 val, void *p) -{ - __put_unaligned_t(__le32, cpu_to_le32(val), p); -} - -static inline void put_unaligned_le64(u64 val, void *p) -{ - __put_unaligned_t(__le64, cpu_to_le64(val), p); -} - -static inline u16 get_unaligned_be16(const void *p) -{ - return be16_to_cpu(__get_unaligned_t(__be16, p)); -} - -static inline u32 get_unaligned_be32(const void *p) -{ - return be32_to_cpu(__get_unaligned_t(__be32, p)); -} - -static inline u64 get_unaligned_be64(const void *p) -{ - return be64_to_cpu(__get_unaligned_t(__be64, p)); -} - -static inline void put_unaligned_be16(u16 val, void *p) -{ - __put_unaligned_t(__be16, cpu_to_be16(val), p); -} - -static inline void put_unaligned_be32(u32 val, void *p) -{ - __put_unaligned_t(__be32, cpu_to_be32(val), p); -} - -static inline void put_unaligned_be64(u64 val, void *p) -{ - __put_unaligned_t(__be64, cpu_to_be64(val), p); -} - -static inline u32 __get_unaligned_be24(const u8 *p) -{ - return p[0] << 16 | p[1] << 8 | p[2]; -} - -static inline u32 get_unaligned_be24(const void *p) -{ - return __get_unaligned_be24(p); -} - -static inline u32 __get_unaligned_le24(const u8 *p) -{ - return p[0] | p[1] << 8 | p[2] << 16; -} - -static inline u32 get_unaligned_le24(const void *p) -{ - return __get_unaligned_le24(p); -} - -static inline void __put_unaligned_be24(const u32 val, u8 *p) -{ - *p++ = val >> 16; - *p++ = val >> 8; - *p++ = val; -} - -static inline void put_unaligned_be24(const u32 val, void *p) -{ - __put_unaligned_be24(val, p); -} - -static inline void __put_unaligned_le24(const u32 val, u8 *p) -{ - *p++ = val; - *p++ = val >> 8; - *p++ = val >> 16; -} - -static inline void put_unaligned_le24(const u32 val, void *p) -{ - __put_unaligned_le24(val, p); -} +#if defined(__LITTLE_ENDIAN) +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include +# include +# endif +# include +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#elif defined(__BIG_ENDIAN) +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include +# include +# endif +# include +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#else +# error need to define endianess +#endif #endif /* __ASM_GENERIC_UNALIGNED_H */ diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h new file mode 100644 index 0000000000..cccc86ecfe --- /dev/null +++ b/include/asm-generic/unistd.h @@ -0,0 +1,12 @@ +#include +#include + +/* + * These are required system calls, we should + * invert the logic eventually and let them + * be selected by default. + */ +#if __BITS_PER_LONG == 32 +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_LLSEEK +#endif diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h index adf91a783b..36c8ff5201 100644 --- a/include/asm-generic/vga.h +++ b/include/asm-generic/vga.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Access to VGA videoram * diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index f2984af2b8..2f85b265f3 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -23,18 +23,18 @@ * _etext = .; * * _sdata = .; - * RO_DATA(PAGE_SIZE) - * RW_DATA(...) + * RO_DATA_SECTION(PAGE_SIZE) + * RW_DATA_SECTION(...) * _edata = .; * * EXCEPTION_TABLE(...) + * NOTES * * BSS_SECTION(0, 0, 0) * _end = .; * * STABS_DEBUG * DWARF_DEBUG - * ELF_DETAILS * * DISCARDS // must be the last * } @@ -54,86 +54,18 @@ #define LOAD_OFFSET 0 #endif -/* - * Only some architectures want to have the .notes segment visible in - * a separate PT_NOTE ELF Program Header. When this happens, it needs - * to be visible in both the kernel text's PT_LOAD and the PT_NOTE - * Program Headers. In this case, though, the PT_LOAD needs to be made - * the default again so that all the following sections don't also end - * up in the PT_NOTE Program Header. - */ -#ifdef EMITS_PT_NOTE -#define NOTES_HEADERS :text :note -#define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text -#else -#define NOTES_HEADERS -#define NOTES_HEADERS_RESTORE -#endif - -/* - * Some architectures have non-executable read-only exception tables. - * They can be added to the RO_DATA segment by specifying their desired - * alignment. - */ -#ifdef RO_EXCEPTION_TABLE_ALIGN -#define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN) -#else -#define RO_EXCEPTION_TABLE -#endif +#include /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) /* - * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which - * generates .data.identifier sections, which need to be pulled in with - * .data. We don't want to pull in .data..other sections, which Linux - * has defined. Same for text and bss. - * - * With LTO_CLANG, the linker also splits sections by default, so we need - * these macros to combine the sections during the final link. - * - * RODATA_MAIN is not used because existing code already defines .rodata.x - * sections to be brought in with rodata. - */ -#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) -#define TEXT_MAIN .text .text.[0-9a-zA-Z_]* -#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* -#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* -#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* -#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral* -#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* -#else -#define TEXT_MAIN .text -#define DATA_MAIN .data -#define SDATA_MAIN .sdata -#define RODATA_MAIN .rodata -#define BSS_MAIN .bss -#define SBSS_MAIN .sbss -#endif - -/* - * GCC 4.5 and later have a 32 bytes section alignment for structures. - * Except GCC 4.9, that feels the need to align on 64 bytes. + * Align to a 32 byte boundary equal to the + * alignment gcc 4.5 uses for a struct */ #define STRUCT_ALIGNMENT 32 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) -/* - * The order of the sched class addresses are important, as they are - * used to determine the order of the priority of each sched class in - * relation to each other. - */ -#define SCHED_DATA \ - STRUCT_ALIGN(); \ - __begin_sched_classes = .; \ - *(__idle_sched_class) \ - *(__fair_sched_class) \ - *(__rt_sched_class) \ - *(__dl_sched_class) \ - *(__stop_sched_class) \ - __end_sched_classes = .; - /* The actual configuration determine if the init/exit sections * are handled as text/data or they can be discarded (which * often happens at runtime) @@ -155,83 +87,58 @@ #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD -/* - * The ftrace call sites are logged to a section whose name depends on the - * compiler option used. A given kernel image will only use one, AKA - * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header - * dependencies for FTRACE_CALLSITE_SECTION's definition. - * - * Need to also make ftrace_stub_graph point to ftrace_stub - * so that the same stub location may have different protocols - * and not mess up with C verifiers. - */ #define MCOUNT_REC() . = ALIGN(8); \ - __start_mcount_loc = .; \ - KEEP(*(__mcount_loc)) \ - KEEP(*(__patchable_function_entries)) \ - __stop_mcount_loc = .; \ - ftrace_stub_graph = ftrace_stub; + VMLINUX_SYMBOL(__start_mcount_loc) = .; \ + *(__mcount_loc) \ + VMLINUX_SYMBOL(__stop_mcount_loc) = .; #else -# ifdef CONFIG_FUNCTION_TRACER -# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; -# else -# define MCOUNT_REC() -# endif +#define MCOUNT_REC() #endif #ifdef CONFIG_TRACE_BRANCH_PROFILING -#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ - KEEP(*(_ftrace_annotated_branch)) \ - __stop_annotated_branch_profile = .; +#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ + *(_ftrace_annotated_branch) \ + VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; #else #define LIKELY_PROFILE() #endif #ifdef CONFIG_PROFILE_ALL_BRANCHES -#define BRANCH_PROFILE() __start_branch_profile = .; \ - KEEP(*(_ftrace_branch)) \ - __stop_branch_profile = .; +#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ + *(_ftrace_branch) \ + VMLINUX_SYMBOL(__stop_branch_profile) = .; #else #define BRANCH_PROFILE() #endif #ifdef CONFIG_KPROBES #define KPROBE_BLACKLIST() . = ALIGN(8); \ - __start_kprobe_blacklist = .; \ - KEEP(*(_kprobe_blacklist)) \ - __stop_kprobe_blacklist = .; + VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ + *(_kprobe_blacklist) \ + VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; #else #define KPROBE_BLACKLIST() #endif -#ifdef CONFIG_FUNCTION_ERROR_INJECTION -#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ - __start_error_injection_whitelist = .; \ - KEEP(*(_error_injection_whitelist)) \ - __stop_error_injection_whitelist = .; -#else -#define ERROR_INJECT_WHITELIST() -#endif - #ifdef CONFIG_EVENT_TRACING #define FTRACE_EVENTS() . = ALIGN(8); \ - __start_ftrace_events = .; \ - KEEP(*(_ftrace_events)) \ - __stop_ftrace_events = .; \ - __start_ftrace_eval_maps = .; \ - KEEP(*(_ftrace_eval_map)) \ - __stop_ftrace_eval_maps = .; + VMLINUX_SYMBOL(__start_ftrace_events) = .; \ + *(_ftrace_events) \ + VMLINUX_SYMBOL(__stop_ftrace_events) = .; \ + VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \ + *(_ftrace_enum_map) \ + VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .; #else #define FTRACE_EVENTS() #endif #ifdef CONFIG_TRACING -#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \ - KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ - __stop___trace_bprintk_fmt = .; -#define TRACEPOINT_STR() __start___tracepoint_str = .; \ - KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ - __stop___tracepoint_str = .; +#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ + *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ + VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; +#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ + *(__tracepoint_str) /* Trace_printk fmt' pointer */ \ + VMLINUX_SYMBOL(__stop___tracepoint_str) = .; #else #define TRACE_PRINTKS() #define TRACEPOINT_STR() @@ -239,58 +146,36 @@ #ifdef CONFIG_FTRACE_SYSCALLS #define TRACE_SYSCALLS() . = ALIGN(8); \ - __start_syscalls_metadata = .; \ - KEEP(*(__syscalls_metadata)) \ - __stop_syscalls_metadata = .; + VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ + *(__syscalls_metadata) \ + VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; #else #define TRACE_SYSCALLS() #endif -#ifdef CONFIG_BPF_EVENTS -#define BPF_RAW_TP() STRUCT_ALIGN(); \ - __start__bpf_raw_tp = .; \ - KEEP(*(__bpf_raw_tp_map)) \ - __stop__bpf_raw_tp = .; -#else -#define BPF_RAW_TP() -#endif - #ifdef CONFIG_SERIAL_EARLYCON -#define EARLYCON_TABLE() . = ALIGN(8); \ - __earlycon_table = .; \ - KEEP(*(__earlycon_table)) \ - __earlycon_table_end = .; +#define EARLYCON_TABLE() STRUCT_ALIGN(); \ + VMLINUX_SYMBOL(__earlycon_table) = .; \ + *(__earlycon_table) \ + VMLINUX_SYMBOL(__earlycon_table_end) = .; #else #define EARLYCON_TABLE() #endif -#ifdef CONFIG_SECURITY -#define LSM_TABLE() . = ALIGN(8); \ - __start_lsm_info = .; \ - KEEP(*(.lsm_info.init)) \ - __end_lsm_info = .; -#define EARLY_LSM_TABLE() . = ALIGN(8); \ - __start_early_lsm_info = .; \ - KEEP(*(.early_lsm_info.init)) \ - __end_early_lsm_info = .; -#else -#define LSM_TABLE() -#define EARLY_LSM_TABLE() -#endif - #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) #define _OF_TABLE_0(name) #define _OF_TABLE_1(name) \ . = ALIGN(8); \ - __##name##_of_table = .; \ - KEEP(*(__##name##_of_table)) \ - KEEP(*(__##name##_of_table_end)) + VMLINUX_SYMBOL(__##name##_of_table) = .; \ + *(__##name##_of_table) \ + *(__##name##_of_table_end) -#define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) +#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) +#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) @@ -298,64 +183,46 @@ #ifdef CONFIG_ACPI #define ACPI_PROBE_TABLE(name) \ . = ALIGN(8); \ - __##name##_acpi_probe_table = .; \ - KEEP(*(__##name##_acpi_probe_table)) \ - __##name##_acpi_probe_table_end = .; + VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \ + *(__##name##_acpi_probe_table) \ + VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .; #else #define ACPI_PROBE_TABLE(name) #endif -#ifdef CONFIG_THERMAL -#define THERMAL_TABLE(name) \ - . = ALIGN(8); \ - __##name##_thermal_table = .; \ - KEEP(*(__##name##_thermal_table)) \ - __##name##_thermal_table_end = .; -#else -#define THERMAL_TABLE(name) -#endif - -#ifdef CONFIG_DTPM -#define DTPM_TABLE() \ - . = ALIGN(8); \ - __dtpm_table = .; \ - KEEP(*(__dtpm_table)) \ - __dtpm_table_end = .; -#else -#define DTPM_TABLE() -#endif - #define KERNEL_DTB() \ STRUCT_ALIGN(); \ - __dtb_start = .; \ - KEEP(*(.dtb.init.rodata)) \ - __dtb_end = .; + VMLINUX_SYMBOL(__dtb_start) = .; \ + *(.dtb.init.rodata) \ + VMLINUX_SYMBOL(__dtb_end) = .; /* * .data section + * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates + * .data.identifier which needs to be pulled in with .data, but don't want to + * pull in .data..stuff which has its own requirements. Same for bss. */ #define DATA_DATA \ - *(.xiptext) \ - *(DATA_MAIN) \ + *(.data .data.[0-9a-zA-Z_]*) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ - MEM_KEEP(init.data*) \ - MEM_KEEP(exit.data*) \ + MEM_KEEP(init.data) \ + MEM_KEEP(exit.data) \ *(.data.unlikely) \ - __start_once = .; \ - *(.data.once) \ - __end_once = .; \ STRUCT_ALIGN(); \ *(__tracepoints) \ /* implement dynamic printk debug */ \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___jump_table) = .; \ + *(__jump_table) \ + VMLINUX_SYMBOL(__stop___jump_table) = .; \ . = ALIGN(8); \ - __start___dyndbg = .; \ - KEEP(*(__dyndbg)) \ - __stop___dyndbg = .; \ + VMLINUX_SYMBOL(__start___verbose) = .; \ + *(__verbose) \ + VMLINUX_SYMBOL(__stop___verbose) = .; \ LIKELY_PROFILE() \ BRANCH_PROFILE() \ TRACE_PRINTKS() \ - BPF_RAW_TP() \ TRACEPOINT_STR() /* @@ -363,15 +230,14 @@ */ #define NOSAVE_DATA \ . = ALIGN(PAGE_SIZE); \ - __nosave_begin = .; \ + VMLINUX_SYMBOL(__nosave_begin) = .; \ *(.data..nosave) \ . = ALIGN(PAGE_SIZE); \ - __nosave_end = .; + VMLINUX_SYMBOL(__nosave_end) = .; #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ - *(.data..page_aligned) \ - . = ALIGN(page_align); + *(.data..page_aligned) #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ @@ -384,28 +250,9 @@ #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ - __start_init_task = .; \ - init_thread_union = .; \ - init_stack = .; \ - KEEP(*(.data..init_task)) \ - KEEP(*(.data..init_thread_info)) \ - . = __start_init_task + THREAD_SIZE; \ - __end_init_task = .; - -#define JUMP_TABLE_DATA \ - . = ALIGN(8); \ - __start___jump_table = .; \ - KEEP(*(__jump_table)) \ - __stop___jump_table = .; - -#define STATIC_CALL_DATA \ - . = ALIGN(8); \ - __start_static_call_sites = .; \ - KEEP(*(.static_call_sites)) \ - __stop_static_call_sites = .; \ - __start_static_call_tramp_key = .; \ - KEEP(*(.static_call_tramp_key)) \ - __stop_static_call_tramp_key = .; + VMLINUX_SYMBOL(__start_init_task) = .; \ + *(.data..init_task) \ + VMLINUX_SYMBOL(__end_init_task) = .; /* * Allow architectures to handle ro_after_init data on their @@ -413,28 +260,26 @@ */ #ifndef RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA \ - . = ALIGN(8); \ - __start_ro_after_init = .; \ + __start_data_ro_after_init = .; \ *(.data..ro_after_init) \ - JUMP_TABLE_DATA \ - STATIC_CALL_DATA \ - __end_ro_after_init = .; + __end_data_ro_after_init = .; #endif /* * Read only Data */ -#define RO_DATA(align) \ +#define RO_DATA_SECTION(align) \ . = ALIGN((align)); \ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ - __start_rodata = .; \ + VMLINUX_SYMBOL(__start_rodata) = .; \ *(.rodata) *(.rodata.*) \ - SCHED_DATA \ RO_AFTER_INIT_DATA /* Read only after init */ \ + *(.data..read_only) \ + *(__vermagic) /* Kernel version magic */ \ . = ALIGN(8); \ - __start___tracepoints_ptrs = .; \ - KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ - __stop___tracepoints_ptrs = .; \ + VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ + *(__tracepoints_ptrs) /* Tracepoints: pointer array */\ + VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ *(__tracepoints_strings)/* Tracepoints: strings */ \ } \ \ @@ -442,76 +287,118 @@ *(.rodata1) \ } \ \ + BUG_TABLE \ + \ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ - __start_pci_fixups_early = .; \ - KEEP(*(.pci_fixup_early)) \ - __end_pci_fixups_early = .; \ - __start_pci_fixups_header = .; \ - KEEP(*(.pci_fixup_header)) \ - __end_pci_fixups_header = .; \ - __start_pci_fixups_final = .; \ - KEEP(*(.pci_fixup_final)) \ - __end_pci_fixups_final = .; \ - __start_pci_fixups_enable = .; \ - KEEP(*(.pci_fixup_enable)) \ - __end_pci_fixups_enable = .; \ - __start_pci_fixups_resume = .; \ - KEEP(*(.pci_fixup_resume)) \ - __end_pci_fixups_resume = .; \ - __start_pci_fixups_resume_early = .; \ - KEEP(*(.pci_fixup_resume_early)) \ - __end_pci_fixups_resume_early = .; \ - __start_pci_fixups_suspend = .; \ - KEEP(*(.pci_fixup_suspend)) \ - __end_pci_fixups_suspend = .; \ - __start_pci_fixups_suspend_late = .; \ - KEEP(*(.pci_fixup_suspend_late)) \ - __end_pci_fixups_suspend_late = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ + *(.pci_fixup_early) \ + VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ + *(.pci_fixup_header) \ + VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ + *(.pci_fixup_final) \ + VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ + *(.pci_fixup_enable) \ + VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ + *(.pci_fixup_resume) \ + VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ + *(.pci_fixup_resume_early) \ + VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ + *(.pci_fixup_suspend) \ + VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ + *(.pci_fixup_suspend_late) \ + VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ } \ \ /* Built-in firmware blobs */ \ - .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \ - __start_builtin_fw = .; \ - KEEP(*(.builtin_fw)) \ - __end_builtin_fw = .; \ + .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_builtin_fw) = .; \ + *(.builtin_fw) \ + VMLINUX_SYMBOL(__end_builtin_fw) = .; \ } \ \ TRACEDATA \ \ - PRINTK_INDEX \ - \ /* Kernel symbol table: Normal symbols */ \ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ - __start___ksymtab = .; \ + VMLINUX_SYMBOL(__start___ksymtab) = .; \ KEEP(*(SORT(___ksymtab+*))) \ - __stop___ksymtab = .; \ + VMLINUX_SYMBOL(__stop___ksymtab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ - __start___ksymtab_gpl = .; \ + VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ KEEP(*(SORT(___ksymtab_gpl+*))) \ - __stop___ksymtab_gpl = .; \ + VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ + } \ + \ + /* Kernel symbol table: Normal unused symbols */ \ + __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ + KEEP(*(SORT(___ksymtab_unused+*))) \ + VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only unused symbols */ \ + __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ + VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ + } \ + \ + /* Kernel symbol table: GPL-future-only symbols */ \ + __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ + KEEP(*(SORT(___ksymtab_gpl_future+*))) \ + VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ } \ \ /* Kernel symbol table: Normal symbols */ \ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ - __start___kcrctab = .; \ + VMLINUX_SYMBOL(__start___kcrctab) = .; \ KEEP(*(SORT(___kcrctab+*))) \ - __stop___kcrctab = .; \ + VMLINUX_SYMBOL(__stop___kcrctab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ - __start___kcrctab_gpl = .; \ + VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ KEEP(*(SORT(___kcrctab_gpl+*))) \ - __stop___kcrctab_gpl = .; \ + VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ + } \ + \ + /* Kernel symbol table: Normal unused symbols */ \ + __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ + KEEP(*(SORT(___kcrctab_unused+*))) \ + VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only unused symbols */ \ + __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ + VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ + } \ + \ + /* Kernel symbol table: GPL-future-only symbols */ \ + __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ + KEEP(*(SORT(___kcrctab_gpl_future+*))) \ + VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ } \ \ /* Kernel symbol table: strings */ \ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ - *(__ksymtab_strings) \ + KEEP(*(__ksymtab_strings)) \ } \ \ /* __*init sections */ \ @@ -523,127 +410,115 @@ \ /* Built-in module parameters. */ \ __param : AT(ADDR(__param) - LOAD_OFFSET) { \ - __start___param = .; \ - KEEP(*(__param)) \ - __stop___param = .; \ + VMLINUX_SYMBOL(__start___param) = .; \ + *(__param) \ + VMLINUX_SYMBOL(__stop___param) = .; \ } \ \ /* Built-in module versions. */ \ __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ - __start___modver = .; \ - KEEP(*(__modver)) \ - __stop___modver = .; \ + VMLINUX_SYMBOL(__start___modver) = .; \ + *(__modver) \ + VMLINUX_SYMBOL(__stop___modver) = .; \ + . = ALIGN((align)); \ + VMLINUX_SYMBOL(__end_rodata) = .; \ } \ - \ - RO_EXCEPTION_TABLE \ - NOTES \ - BTF \ - \ - . = ALIGN((align)); \ - __end_rodata = .; + . = ALIGN((align)); +/* RODATA & RO_DATA provided for backward compatibility. + * All archs are supposed to use RO_DATA() */ +#define RODATA RO_DATA_SECTION(4096) +#define RO_DATA(align) RO_DATA_SECTION(align) -/* - * .text..L.cfi.jumptable.* contain Control-Flow Integrity (CFI) - * jump table entries. - */ -#ifdef CONFIG_CFI_CLANG -#define TEXT_CFI_JT \ - . = ALIGN(PMD_SIZE); \ - __cfi_jt_start = .; \ - *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \ - . = ALIGN(PMD_SIZE); \ - __cfi_jt_end = .; -#else -#define TEXT_CFI_JT -#endif +#define SECURITY_INIT \ + .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__security_initcall_start) = .; \ + KEEP(*(.security_initcall.init)) \ + VMLINUX_SYMBOL(__security_initcall_end) = .; \ + } -/* - * Non-instrumentable text section - */ -#define NOINSTR_TEXT \ - ALIGN_FUNCTION(); \ - __noinstr_text_start = .; \ - *(.noinstr.text) \ - __noinstr_text_end = .; - -/* - * .text section. Map to function alignment to avoid address changes +/* .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map - * - * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead - * code elimination is enabled, so these sections should be converted - * to use ".." first. - */ + * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates + * .text.identifier which needs to be pulled in with .text , but some + * architectures define .text.foo which is not intended to be pulled in here. + * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have + * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ - *(.text.hot .text.hot.*) \ - *(TEXT_MAIN .text.fixup) \ - *(.text.unlikely .text.unlikely.*) \ - *(.text.unknown .text.unknown.*) \ - NOINSTR_TEXT \ - *(.text..refcount) \ + *(.text.hot .text .text.fixup .text.unlikely) \ *(.ref.text) \ - *(.text.asan.* .text.tsan.*) \ - TEXT_CFI_JT \ - MEM_KEEP(init.text*) \ - MEM_KEEP(exit.text*) \ + REFCOUNT_TEXT \ + MEM_KEEP(init.text) \ + MEM_KEEP(exit.text) \ +#define __REFCOUNT_TEXT(section) \ + VMLINUX_SYMBOL(__##section##_start) = .; \ + *(.text.##section) \ + VMLINUX_SYMBOL(__##section##_end) = .; + +#define REFCOUNT_TEXT \ + __REFCOUNT_TEXT(refcount_overflow) \ + __REFCOUNT_TEXT(refcount64_overflow) \ + __REFCOUNT_TEXT(refcount_underflow) \ + __REFCOUNT_TEXT(refcount64_underflow) \ /* sched.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ #define SCHED_TEXT \ ALIGN_FUNCTION(); \ - __sched_text_start = .; \ + VMLINUX_SYMBOL(__sched_text_start) = .; \ *(.sched.text) \ - __sched_text_end = .; + VMLINUX_SYMBOL(__sched_text_end) = .; /* spinlock.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ #define LOCK_TEXT \ ALIGN_FUNCTION(); \ - __lock_text_start = .; \ + VMLINUX_SYMBOL(__lock_text_start) = .; \ *(.spinlock.text) \ - __lock_text_end = .; + VMLINUX_SYMBOL(__lock_text_end) = .; #define CPUIDLE_TEXT \ ALIGN_FUNCTION(); \ - __cpuidle_text_start = .; \ + VMLINUX_SYMBOL(__cpuidle_text_start) = .; \ *(.cpuidle.text) \ - __cpuidle_text_end = .; + VMLINUX_SYMBOL(__cpuidle_text_end) = .; #define KPROBES_TEXT \ ALIGN_FUNCTION(); \ - __kprobes_text_start = .; \ + VMLINUX_SYMBOL(__kprobes_text_start) = .; \ *(.kprobes.text) \ - __kprobes_text_end = .; + VMLINUX_SYMBOL(__kprobes_text_end) = .; #define ENTRY_TEXT \ ALIGN_FUNCTION(); \ - __entry_text_start = .; \ + VMLINUX_SYMBOL(__entry_text_start) = .; \ *(.entry.text) \ - __entry_text_end = .; + VMLINUX_SYMBOL(__entry_text_end) = .; +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) #define IRQENTRY_TEXT \ ALIGN_FUNCTION(); \ - __irqentry_text_start = .; \ + VMLINUX_SYMBOL(__irqentry_text_start) = .; \ *(.irqentry.text) \ - __irqentry_text_end = .; + VMLINUX_SYMBOL(__irqentry_text_end) = .; +#else +#define IRQENTRY_TEXT +#endif +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) #define SOFTIRQENTRY_TEXT \ ALIGN_FUNCTION(); \ - __softirqentry_text_start = .; \ + VMLINUX_SYMBOL(__softirqentry_text_start) = .; \ *(.softirqentry.text) \ - __softirqentry_text_end = .; - -#define STATIC_CALL_TEXT \ - ALIGN_FUNCTION(); \ - __static_call_text_start = .; \ - *(.static_call.text) \ - __static_call_text_end = .; + VMLINUX_SYMBOL(__softirqentry_text_end) = .; +#else +#define SOFTIRQENTRY_TEXT +#endif /* Section used for early init (in .S files) */ -#define HEAD_TEXT KEEP(*(.head.text)) +#define HEAD_TEXT *(.head.text) #define HEAD_TEXT_SECTION \ .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ @@ -656,29 +531,11 @@ #define EXCEPTION_TABLE(align) \ . = ALIGN(align); \ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ - __start___ex_table = .; \ - KEEP(*(__ex_table)) \ - __stop___ex_table = .; \ + VMLINUX_SYMBOL(__start___ex_table) = .; \ + *(__ex_table) \ + VMLINUX_SYMBOL(__stop___ex_table) = .; \ } -/* - * .BTF - */ -#ifdef CONFIG_DEBUG_INFO_BTF -#define BTF \ - .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ - __start_BTF = .; \ - KEEP(*(.BTF)) \ - __stop_BTF = .; \ - } \ - . = ALIGN(4); \ - .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ - *(.BTF_ids) \ - } -#else -#define BTF -#endif - /* * Init task */ @@ -690,12 +547,11 @@ #ifdef CONFIG_CONSTRUCTORS #define KERNEL_CTORS() . = ALIGN(8); \ - __ctors_start = .; \ - KEEP(*(SORT(.ctors.*))) \ - KEEP(*(.ctors)) \ - KEEP(*(SORT(.init_array.*))) \ - KEEP(*(.init_array)) \ - __ctors_end = .; + VMLINUX_SYMBOL(__ctors_start) = .; \ + *(.ctors) \ + *(SORT(.init_array.*)) \ + *(.init_array) \ + VMLINUX_SYMBOL(__ctors_end) = .; #else #define KERNEL_CTORS() #endif @@ -703,43 +559,43 @@ /* init and exit section handling */ #define INIT_DATA \ KEEP(*(SORT(___kentry+*))) \ - *(.init.data init.data.*) \ - MEM_DISCARD(init.data*) \ + *(.init.data) \ + MEM_DISCARD(init.data) \ KERNEL_CTORS() \ MCOUNT_REC() \ - *(.init.rodata .init.rodata.*) \ + *(.init.rodata.str) \ + *(.init.rodata) \ + *(.init.rodata.*) \ FTRACE_EVENTS() \ TRACE_SYSCALLS() \ KPROBE_BLACKLIST() \ - ERROR_INJECT_WHITELIST() \ MEM_DISCARD(init.rodata) \ CLK_OF_TABLES() \ RESERVEDMEM_OF_TABLES() \ - TIMER_OF_TABLES() \ + CLKSRC_OF_TABLES() \ + IOMMU_OF_TABLES() \ CPU_METHOD_OF_TABLES() \ CPUIDLE_METHOD_OF_TABLES() \ KERNEL_DTB() \ IRQCHIP_OF_MATCH_TABLE() \ ACPI_PROBE_TABLE(irqchip) \ - ACPI_PROBE_TABLE(timer) \ - THERMAL_TABLE(governor) \ - DTPM_TABLE() \ - EARLYCON_TABLE() \ - LSM_TABLE() \ - EARLY_LSM_TABLE() \ - KUNIT_TABLE() + ACPI_PROBE_TABLE(clksrc) \ + EARLYCON_TABLE() #define INIT_TEXT \ - *(.init.text .init.text.*) \ + *(.init.text) \ *(.text.startup) \ - MEM_DISCARD(init.text*) + MEM_DISCARD(init.text) #define EXIT_DATA \ - *(.exit.data .exit.data.*) \ - *(.fini_array .fini_array.*) \ - *(.dtors .dtors.*) \ - MEM_DISCARD(exit.data*) \ - MEM_DISCARD(exit.rodata*) + *(.exit.data) \ + *(.exit.rodata) \ + *(.exit.rodata.*) \ + *(.fini_array) \ + *(.dtors) \ + MEM_DISCARD(exit.data) \ + *(.exit.rodata.str) \ + MEM_DISCARD(exit.rodata) #define EXIT_TEXT \ *(.exit.text) \ @@ -756,8 +612,7 @@ #define SBSS(sbss_align) \ . = ALIGN(sbss_align); \ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ - *(.dynsbss) \ - *(SBSS_MAIN) \ + *(.sbss) \ *(.scommon) \ } @@ -773,11 +628,9 @@ . = ALIGN(bss_align); \ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ BSS_FIRST_SECTIONS \ - . = ALIGN(PAGE_SIZE); \ *(.bss..page_aligned) \ - . = ALIGN(PAGE_SIZE); \ *(.dynbss) \ - *(BSS_MAIN) \ + *(.bss .bss.[0-9a-zA-Z_]*) \ *(COMMON) \ } @@ -805,125 +658,66 @@ .debug_str 0 : { *(.debug_str) } \ .debug_loc 0 : { *(.debug_loc) } \ .debug_macinfo 0 : { *(.debug_macinfo) } \ - .debug_pubtypes 0 : { *(.debug_pubtypes) } \ - /* DWARF 3 */ \ - .debug_ranges 0 : { *(.debug_ranges) } \ /* SGI/MIPS DWARF 2 extensions */ \ .debug_weaknames 0 : { *(.debug_weaknames) } \ .debug_funcnames 0 : { *(.debug_funcnames) } \ .debug_typenames 0 : { *(.debug_typenames) } \ .debug_varnames 0 : { *(.debug_varnames) } \ - /* GNU DWARF 2 extensions */ \ - .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ - .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ - /* DWARF 4 */ \ - .debug_types 0 : { *(.debug_types) } \ - /* DWARF 5 */ \ - .debug_addr 0 : { *(.debug_addr) } \ - .debug_line_str 0 : { *(.debug_line_str) } \ - .debug_loclists 0 : { *(.debug_loclists) } \ - .debug_macro 0 : { *(.debug_macro) } \ - .debug_names 0 : { *(.debug_names) } \ - .debug_rnglists 0 : { *(.debug_rnglists) } \ - .debug_str_offsets 0 : { *(.debug_str_offsets) } -/* Stabs debugging sections. */ + /* Stabs debugging sections. */ #define STABS_DEBUG \ .stab 0 : { *(.stab) } \ .stabstr 0 : { *(.stabstr) } \ .stab.excl 0 : { *(.stab.excl) } \ .stab.exclstr 0 : { *(.stab.exclstr) } \ .stab.index 0 : { *(.stab.index) } \ - .stab.indexstr 0 : { *(.stab.indexstr) } - -/* Required sections not related to debugging. */ -#define ELF_DETAILS \ - .comment 0 : { *(.comment) } \ - .symtab 0 : { *(.symtab) } \ - .strtab 0 : { *(.strtab) } \ - .shstrtab 0 : { *(.shstrtab) } + .stab.indexstr 0 : { *(.stab.indexstr) } \ + .comment 0 : { *(.comment) } #ifdef CONFIG_GENERIC_BUG #define BUG_TABLE \ . = ALIGN(8); \ __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ - __start___bug_table = .; \ - KEEP(*(__bug_table)) \ - __stop___bug_table = .; \ + VMLINUX_SYMBOL(__start___bug_table) = .; \ + *(__bug_table) \ + VMLINUX_SYMBOL(__stop___bug_table) = .; \ } #else #define BUG_TABLE #endif -#ifdef CONFIG_UNWINDER_ORC -#define ORC_UNWIND_TABLE \ - . = ALIGN(4); \ - .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ - __start_orc_unwind_ip = .; \ - KEEP(*(.orc_unwind_ip)) \ - __stop_orc_unwind_ip = .; \ - } \ - . = ALIGN(2); \ - .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ - __start_orc_unwind = .; \ - KEEP(*(.orc_unwind)) \ - __stop_orc_unwind = .; \ - } \ - . = ALIGN(4); \ - .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ - orc_lookup = .; \ - . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ - LOOKUP_BLOCK_SIZE) + 1) * 4; \ - orc_lookup_end = .; \ - } -#else -#define ORC_UNWIND_TABLE -#endif - #ifdef CONFIG_PM_TRACE #define TRACEDATA \ . = ALIGN(4); \ .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ - __tracedata_start = .; \ - KEEP(*(.tracedata)) \ - __tracedata_end = .; \ + VMLINUX_SYMBOL(__tracedata_start) = .; \ + *(.tracedata) \ + VMLINUX_SYMBOL(__tracedata_end) = .; \ } #else #define TRACEDATA #endif -#ifdef CONFIG_PRINTK_INDEX -#define PRINTK_INDEX \ - .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \ - __start_printk_index = .; \ - *(.printk_index) \ - __stop_printk_index = .; \ - } -#else -#define PRINTK_INDEX -#endif - #define NOTES \ .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ - __start_notes = .; \ - KEEP(*(.note.*)) \ - __stop_notes = .; \ - } NOTES_HEADERS \ - NOTES_HEADERS_RESTORE + VMLINUX_SYMBOL(__start_notes) = .; \ + *(.note.*) \ + VMLINUX_SYMBOL(__stop_notes) = .; \ + } #define INIT_SETUP(initsetup_align) \ . = ALIGN(initsetup_align); \ - __setup_start = .; \ - KEEP(*(.init.setup)) \ - __setup_end = .; + VMLINUX_SYMBOL(__setup_start) = .; \ + *(.init.setup) \ + VMLINUX_SYMBOL(__setup_end) = .; #define INIT_CALLS_LEVEL(level) \ - __initcall##level##_start = .; \ + VMLINUX_SYMBOL(__initcall##level##_start) = .; \ KEEP(*(.initcall##level##.init)) \ KEEP(*(.initcall##level##s.init)) \ #define INIT_CALLS \ - __initcall_start = .; \ + VMLINUX_SYMBOL(__initcall_start) = .; \ KEEP(*(.initcallearly.init)) \ INIT_CALLS_LEVEL(0) \ INIT_CALLS_LEVEL(1) \ @@ -934,24 +728,22 @@ INIT_CALLS_LEVEL(rootfs) \ INIT_CALLS_LEVEL(6) \ INIT_CALLS_LEVEL(7) \ - __initcall_end = .; + VMLINUX_SYMBOL(__initcall_end) = .; #define CON_INITCALL \ - __con_initcall_start = .; \ + VMLINUX_SYMBOL(__con_initcall_start) = .; \ KEEP(*(.con_initcall.init)) \ - __con_initcall_end = .; + VMLINUX_SYMBOL(__con_initcall_end) = .; -/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ -#define KUNIT_TABLE() \ - . = ALIGN(8); \ - __kunit_suites_start = .; \ - KEEP(*(.kunit_test_suites)) \ - __kunit_suites_end = .; +#define SECURITY_INITCALL \ + VMLINUX_SYMBOL(__security_initcall_start) = .; \ + KEEP(*(.security_initcall.init)) \ + VMLINUX_SYMBOL(__security_initcall_end) = .; #ifdef CONFIG_BLK_DEV_INITRD #define INIT_RAM_FS \ . = ALIGN(4); \ - __initramfs_start = .; \ + VMLINUX_SYMBOL(__initramfs_start) = .; \ KEEP(*(.init.ramfs)) \ . = ALIGN(8); \ KEEP(*(.init.ramfs.info)) @@ -959,25 +751,6 @@ #define INIT_RAM_FS #endif -/* - * Memory encryption operates on a page basis. Since we need to clear - * the memory encryption mask for this section, it needs to be aligned - * on a page boundary and be a page-size multiple in length. - * - * Note: We use a separate section so that only this section gets - * decrypted to avoid exposing more than we wish. - */ -#ifdef CONFIG_AMD_MEM_ENCRYPT -#define PERCPU_DECRYPTED_SECTION \ - . = ALIGN(PAGE_SIZE); \ - *(.data..decrypted) \ - *(.data..percpu..decrypted) \ - . = ALIGN(PAGE_SIZE); -#else -#define PERCPU_DECRYPTED_SECTION -#endif - - /* * Default discarded sections. * @@ -987,48 +760,13 @@ * section definitions so that such archs put those in earlier section * definitions. */ -#ifdef RUNTIME_DISCARD_EXIT -#define EXIT_DISCARDS -#else -#define EXIT_DISCARDS \ - EXIT_TEXT \ - EXIT_DATA -#endif - -/* - * Clang's -fprofile-arcs, -fsanitize=kernel-address, and - * -fsanitize=thread produce unwanted sections (.eh_frame - * and .init_array.*), but CONFIG_CONSTRUCTORS wants to - * keep any .init_array.* sections. - * https://bugs.llvm.org/show_bug.cgi?id=46478 - */ -#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) || \ - defined(CONFIG_CFI_CLANG) -# ifdef CONFIG_CONSTRUCTORS -# define SANITIZER_DISCARDS \ - *(.eh_frame) -# else -# define SANITIZER_DISCARDS \ - *(.init_array) *(.init_array.*) \ - *(.eh_frame) -# endif -#else -# define SANITIZER_DISCARDS -#endif - -#define COMMON_DISCARDS \ - SANITIZER_DISCARDS \ - *(.discard) \ - *(.discard.*) \ - *(.modinfo) \ - /* ld.bfd warns about .gnu.version* even when not emitted */ \ - *(.gnu.version*) \ - #define DISCARDS \ /DISCARD/ : { \ - EXIT_DISCARDS \ + EXIT_TEXT \ + EXIT_DATA \ EXIT_CALL \ - COMMON_DISCARDS \ + *(.discard) \ + *(.discard.*) \ } /** @@ -1042,17 +780,18 @@ * sharing between subsections for different purposes. */ #define PERCPU_INPUT(cacheline) \ - __per_cpu_start = .; \ + VMLINUX_SYMBOL(__per_cpu_start) = .; \ *(.data..percpu..first) \ . = ALIGN(PAGE_SIZE); \ *(.data..percpu..page_aligned) \ - . = ALIGN(cacheline); \ + . = ALIGN(PAGE_SIZE); \ + *(.data..percpu..read_only) \ + . = ALIGN(PAGE_SIZE); \ *(.data..percpu..read_mostly) \ . = ALIGN(cacheline); \ *(.data..percpu) \ *(.data..percpu..shared_aligned) \ - PERCPU_DECRYPTED_SECTION \ - __per_cpu_end = .; + VMLINUX_SYMBOL(__per_cpu_end) = .; /** * PERCPU_VADDR - define output section for percpu area @@ -1074,16 +813,18 @@ * section in the linker script will go there too. @phdr should have * a leading colon. * - * Note that this macros defines __per_cpu_load as an absolute symbol. + * Note that this macros defines per_cpu_load as an absolute symbol. * If there is no need to put the percpu section at a predetermined * address, use PERCPU_SECTION. */ #define PERCPU_VADDR(cacheline, vaddr, phdr) \ - __per_cpu_load = .; \ - .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ + per_cpu_load = .; \ + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \ + - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \ PERCPU_INPUT(cacheline) \ } phdr \ - . = __per_cpu_load + SIZEOF(.data..percpu); + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu); /** * PERCPU_SECTION - define output section for percpu area, simple version @@ -1100,7 +841,7 @@ #define PERCPU_SECTION(cacheline) \ . = ALIGN(PAGE_SIZE); \ .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ - __per_cpu_load = .; \ + VMLINUX_SYMBOL(__per_cpu_load) = .; \ PERCPU_INPUT(cacheline) \ } @@ -1123,7 +864,7 @@ * matches the requirement of PAGE_ALIGNED_DATA. * * use 0 as page_align if page_aligned data is not used */ -#define RW_DATA(cacheline, pagealigned, inittask) \ +#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ . = ALIGN(PAGE_SIZE); \ .data : AT(ADDR(.data) - LOAD_OFFSET) { \ INIT_TASK_DATA(inittask) \ @@ -1133,30 +874,32 @@ READ_MOSTLY_DATA(cacheline) \ DATA_DATA \ CONSTRUCTORS \ - } \ - BUG_TABLE \ + } #define INIT_TEXT_SECTION(inittext_align) \ . = ALIGN(inittext_align); \ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ - _sinittext = .; \ + VMLINUX_SYMBOL(_sinittext) = .; \ INIT_TEXT \ - _einittext = .; \ + VMLINUX_SYMBOL(_einittext) = .; \ } #define INIT_DATA_SECTION(initsetup_align) \ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(_sinitdata) = .; \ INIT_DATA \ INIT_SETUP(initsetup_align) \ INIT_CALLS \ CON_INITCALL \ + SECURITY_INITCALL \ INIT_RAM_FS \ + VMLINUX_SYMBOL(_einitdata) = .; \ } #define BSS_SECTION(sbss_align, bss_align, stop_align) \ . = ALIGN(sbss_align); \ - __bss_start = .; \ + VMLINUX_SYMBOL(__bss_start) = .; \ SBSS(sbss_align) \ BSS(bss_align) \ . = ALIGN(stop_align); \ - __bss_stop = .; + VMLINUX_SYMBOL(__bss_stop) = .; diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 20c93f08c9..011dde083f 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_WORD_AT_A_TIME_H #define _ASM_WORD_AT_A_TIME_H diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h index b62a2a56a4..b4d843225a 100644 --- a/include/asm-generic/xor.h +++ b/include/asm-generic/xor.h @@ -1,8 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/asm-generic/xor.h * * Generic optimized RAID-5 checksumming functions. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * You should have received a copy of the GNU General Public License + * (for example /usr/src/linux/COPYING); if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index 73c7139c86..caedb74c92 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -1,17 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef __CLKSOURCE_ARM_ARCH_TIMER_H #define __CLKSOURCE_ARM_ARCH_TIMER_H -#include #include #include -#define ARCH_TIMER_TYPE_CP15 BIT(0) -#define ARCH_TIMER_TYPE_MEM BIT(1) - #define ARCH_TIMER_CTRL_ENABLE (1 << 0) #define ARCH_TIMER_CTRL_IT_MASK (1 << 1) #define ARCH_TIMER_CTRL_IT_STAT (1 << 2) @@ -27,28 +34,11 @@ enum arch_timer_reg { ARCH_TIMER_REG_TVAL, }; -enum arch_timer_ppi_nr { - ARCH_TIMER_PHYS_SECURE_PPI, - ARCH_TIMER_PHYS_NONSECURE_PPI, - ARCH_TIMER_VIRT_PPI, - ARCH_TIMER_HYP_PPI, - ARCH_TIMER_HYP_VIRT_PPI, - ARCH_TIMER_MAX_TIMER_PPI -}; - -enum arch_timer_spi_nr { - ARCH_TIMER_PHYS_SPI, - ARCH_TIMER_VIRT_SPI, - ARCH_TIMER_MAX_TIMER_SPI -}; - #define ARCH_TIMER_PHYS_ACCESS 0 #define ARCH_TIMER_VIRT_ACCESS 1 #define ARCH_TIMER_MEM_PHYS_ACCESS 2 #define ARCH_TIMER_MEM_VIRT_ACCESS 3 -#define ARCH_TIMER_MEM_MAX_FRAMES 8 - #define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */ #define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */ #define ARCH_TIMER_VIRT_EVT_EN (1 << 2) @@ -57,28 +47,11 @@ enum arch_timer_spi_nr { #define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */ #define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */ -#define ARCH_TIMER_EVT_STREAM_PERIOD_US 100 -#define ARCH_TIMER_EVT_STREAM_FREQ \ - (USEC_PER_SEC / ARCH_TIMER_EVT_STREAM_PERIOD_US) +#define ARCH_TIMER_EVT_STREAM_FREQ 10000 /* 100us */ struct arch_timer_kvm_info { struct timecounter timecounter; int virtual_irq; - int physical_irq; -}; - -struct arch_timer_mem_frame { - bool valid; - phys_addr_t cntbase; - size_t size; - int phys_irq; - int virt_irq; -}; - -struct arch_timer_mem { - phys_addr_t cntctlbase; - size_t size; - struct arch_timer_mem_frame frame[ARCH_TIMER_MEM_MAX_FRAMES]; }; #ifdef CONFIG_ARM_ARCH_TIMER @@ -86,7 +59,6 @@ struct arch_timer_mem { extern u32 arch_timer_get_rate(void); extern u64 (*arch_timer_read_counter)(void); extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void); -extern bool arch_timer_evtstrm_available(void); #else @@ -100,11 +72,6 @@ static inline u64 arch_timer_read_counter(void) return 0; } -static inline bool arch_timer_evtstrm_available(void) -{ - return false; -} - #endif #endif diff --git a/include/clocksource/metag_generic.h b/include/clocksource/metag_generic.h new file mode 100644 index 0000000000..ac17e7d06c --- /dev/null +++ b/include/clocksource/metag_generic.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2013 Imaginaton Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __CLKSOURCE_METAG_GENERIC_H +#define __CLKSOURCE_METAG_GENERIC_H + +extern int metag_generic_timer_init(void); + +#endif /* __CLKSOURCE_METAG_GENERIC_H */ diff --git a/include/clocksource/pxa.h b/include/clocksource/pxa.h index 0cfe7b9fdf..1efbe5a669 100644 --- a/include/clocksource/pxa.h +++ b/include/clocksource/pxa.h @@ -1,13 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * PXA clocksource, clockevents, and OST interrupt handlers. * * Copyright (C) 2014 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * */ #ifndef _CLOCKSOURCE_PXA_H #define _CLOCKSOURCE_PXA_H -extern void pxa_timer_nodt_init(int irq, void __iomem *base); +extern void pxa_timer_nodt_init(int irq, void __iomem *base, + unsigned long clock_tick_rate); #endif diff --git a/include/clocksource/samsung_pwm.h b/include/clocksource/samsung_pwm.h index 9b435caa95..0c7d48b8b3 100644 --- a/include/clocksource/samsung_pwm.h +++ b/include/clocksource/samsung_pwm.h @@ -1,6 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef __CLOCKSOURCE_SAMSUNG_PWM_H #define __CLOCKSOURCE_SAMSUNG_PWM_H @@ -27,7 +38,6 @@ struct samsung_pwm_variant { }; void samsung_pwm_clocksource_init(void __iomem *base, - unsigned int *irqs, - const struct samsung_pwm_variant *variant); + unsigned int *irqs, struct samsung_pwm_variant *variant); #endif /* __CLOCKSOURCE_SAMSUNG_PWM_H */ diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h new file mode 100644 index 0000000000..7654d71243 --- /dev/null +++ b/include/clocksource/timer-sp804.h @@ -0,0 +1,28 @@ +#ifndef __CLKSOURCE_TIMER_SP804_H +#define __CLKSOURCE_TIMER_SP804_H + +struct clk; + +int __sp804_clocksource_and_sched_clock_init(void __iomem *, + const char *, struct clk *, int); +int __sp804_clockevents_init(void __iomem *, unsigned int, + struct clk *, const char *); +void sp804_timer_disable(void __iomem *); + +static inline void sp804_clocksource_init(void __iomem *base, const char *name) +{ + __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0); +} + +static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, + const char *name) +{ + __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1); +} + +static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name) +{ + __sp804_clockevents_init(base, irq, NULL, name); + +} +#endif diff --git a/include/crypto/ablk_helper.h b/include/crypto/ablk_helper.h new file mode 100644 index 0000000000..4f93df50c2 --- /dev/null +++ b/include/crypto/ablk_helper.h @@ -0,0 +1,31 @@ +/* + * Shared async block cipher helpers + */ + +#ifndef _CRYPTO_ABLK_HELPER_H +#define _CRYPTO_ABLK_HELPER_H + +#include +#include +#include + +struct async_helper_ctx { + struct cryptd_ablkcipher *cryptd_tfm; +}; + +extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len); + +extern int __ablk_encrypt(struct ablkcipher_request *req); + +extern int ablk_encrypt(struct ablkcipher_request *req); + +extern int ablk_decrypt(struct ablkcipher_request *req); + +extern void ablk_exit(struct crypto_tfm *tfm); + +extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name); + +extern int ablk_init(struct crypto_tfm *tfm); + +#endif /* _CRYPTO_ABLK_HELPER_H */ diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 5af914c1ab..12f84327ca 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * AEAD: Authenticated Encryption with Associated Data * * Copyright (c) 2007-2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_AEAD_H @@ -43,33 +48,27 @@ * * Memory Structure: * - * The source scatterlist must contain the concatenation of - * associated data || plaintext or ciphertext. + * To support the needs of the most prominent user of AEAD ciphers, namely + * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere + * to. * - * The destination scatterlist has the same layout, except that the plaintext - * (resp. ciphertext) will grow (resp. shrink) by the authentication tag size - * during encryption (resp. decryption). + * The scatter list pointing to the input data must contain: * - * In-place encryption/decryption is enabled by using the same scatterlist - * pointer for both the source and destination. + * * for RFC4106 ciphers, the concatenation of + * associated authentication data || IV || plaintext or ciphertext. Note, the + * same IV (buffer) is also set with the aead_request_set_crypt call. Note, + * the API call of aead_request_set_ad must provide the length of the AAD and + * the IV. The API call of aead_request_set_crypt only points to the size of + * the input plaintext or ciphertext. * - * Even in the out-of-place case, space must be reserved in the destination for - * the associated data, even though it won't be written to. This makes the - * in-place and out-of-place cases more consistent. It is permissible for the - * "destination" associated data to alias the "source" associated data. + * * for "normal" AEAD ciphers, the concatenation of + * associated authentication data || plaintext or ciphertext. * - * As with the other scatterlist crypto APIs, zero-length scatterlist elements - * are not allowed in the used part of the scatterlist. Thus, if there is no - * associated data, the first element must point to the plaintext/ciphertext. - * - * To meet the needs of IPsec, a special quirk applies to rfc4106, rfc4309, - * rfc4543, and rfc7539esp ciphers. For these ciphers, the final 'ivsize' bytes - * of the associated data buffer must contain a second copy of the IV. This is - * in addition to the copy passed to aead_request_set_crypt(). These two IV - * copies must not differ; different implementations of the same algorithm may - * behave differently in that case. Note that the algorithm might not actually - * treat the IV as associated data; nevertheless the length passed to - * aead_request_set_ad() must include it. + * It is important to note that if multiple scatter gather list entries form + * the input data mentioned above, the first entry must not point to a NULL + * buffer. If there is any potential where the AAD buffer can be NULL, the + * calling code must contain a precaution to ensure that this does not result + * in the first scatter gather list entry pointing to a NULL buffer. */ struct crypto_aead; @@ -116,6 +115,7 @@ struct aead_request { * @setkey: see struct skcipher_alg * @encrypt: see struct skcipher_alg * @decrypt: see struct skcipher_alg + * @geniv: see struct skcipher_alg * @ivsize: see struct skcipher_alg * @chunksize: see struct skcipher_alg * @init: Initialize the cryptographic transformation object. This function @@ -142,6 +142,8 @@ struct aead_alg { int (*init)(struct crypto_aead *tfm); void (*exit)(struct crypto_aead *tfm); + const char *geniv; + unsigned int ivsize; unsigned int maxauthsize; unsigned int chunksize; @@ -185,19 +187,12 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) /** * crypto_free_aead() - zeroize and free aead handle * @tfm: cipher handle to be freed - * - * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_aead(struct crypto_aead *tfm) { crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); } -static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm) -{ - return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); -} - static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) { return container_of(crypto_aead_tfm(tfm)->__crt_alg, @@ -240,16 +235,6 @@ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) return tfm->authsize; } -static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg) -{ - return alg->maxauthsize; -} - -static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead) -{ - return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead)); -} - /** * crypto_aead_blocksize() - obtain block size of cipher * @tfm: cipher handle @@ -340,11 +325,14 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ -int crypto_aead_encrypt(struct aead_request *req); +static inline int crypto_aead_encrypt(struct aead_request *req) +{ + return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req); +} /** * crypto_aead_decrypt() - decrypt ciphertext - * @req: reference to the aead_request handle that holds all information + * @req: reference to the ablkcipher_request handle that holds all information * needed to perform the cipher operation * * Decrypt ciphertext data using the aead_request handle. That data structure @@ -364,7 +352,15 @@ int crypto_aead_encrypt(struct aead_request *req); * integrity of the ciphertext or the associated data was violated); * < 0 if an error occurred. */ -int crypto_aead_decrypt(struct aead_request *req); +static inline int crypto_aead_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + + if (req->cryptlen < crypto_aead_authsize(aead)) + return -EINVAL; + + return crypto_aead_alg(aead)->decrypt(req); +} /** * DOC: Asynchronous AEAD Request Handle @@ -432,7 +428,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, */ static inline void aead_request_free(struct aead_request *req) { - kfree_sensitive(req); + kzfree(req); } /** @@ -456,7 +452,7 @@ static inline void aead_request_free(struct aead_request *req) * completes * * The callback function is registered with the aead_request handle and - * must comply with the following template:: + * must comply with the following template * * void callback_function(struct crypto_async_request *req, int error) */ @@ -487,18 +483,30 @@ static inline void aead_request_set_callback(struct aead_request *req, * destination is the ciphertext. For a decryption operation, the use is * reversed - the source is the ciphertext and the destination is the plaintext. * - * The memory structure for cipher operation has the following structure: + * For both src/dst the layout is associated data, plain/cipher text, + * authentication tag. * - * - AEAD encryption input: assoc data || plaintext - * - AEAD encryption output: assoc data || ciphertext || auth tag - * - AEAD decryption input: assoc data || ciphertext || auth tag - * - AEAD decryption output: assoc data || plaintext + * The content of the AD in the destination buffer after processing + * will either be untouched, or it will contain a copy of the AD + * from the source buffer. In order to ensure that it always has + * a copy of the AD, the user must copy the AD over either before + * or after processing. Of course this is not relevant if the user + * is doing in-place processing where src == dst. * - * Albeit the kernel requires the presence of the AAD buffer, however, - * the kernel does not fill the AAD buffer in the output case. If the - * caller wants to have that data buffer filled, the caller must either - * use an in-place cipher operation (i.e. same memory location for - * input/output memory location). + * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, + * the caller must concatenate the ciphertext followed by the + * authentication tag and provide the entire data stream to the + * decryption operation (i.e. the data length used for the + * initialization of the scatterlist and the data length for the + * decryption operation is identical). For encryption, however, + * the authentication tag is created while encrypting the data. + * The destination buffer must hold sufficient space for the + * ciphertext and the authentication tag while the encryption + * invocation must only point to the plaintext data size. The + * following code snippet illustrates the memory usage + * buffer = kmalloc(ptbuflen + (enc ? authsize : 0)); + * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0)); + * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv); */ static inline void aead_request_set_crypt(struct aead_request *req, struct scatterlist *src, diff --git a/include/crypto/aes.h b/include/crypto/aes.h index 2090729701..7524ba3b6f 100644 --- a/include/crypto/aes.h +++ b/include/crypto/aes.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Common values for AES algorithms */ @@ -28,63 +27,13 @@ struct crypto_aes_ctx { u32 key_length; }; -extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned; -extern const u32 crypto_it_tab[4][256] ____cacheline_aligned; - -/* - * validate key length for AES algorithms - */ -static inline int aes_check_keylen(unsigned int keylen) -{ - switch (keylen) { - case AES_KEYSIZE_128: - case AES_KEYSIZE_192: - case AES_KEYSIZE_256: - break; - default: - return -EINVAL; - } - - return 0; -} +extern const u32 crypto_ft_tab[4][256]; +extern const u32 crypto_fl_tab[4][256]; +extern const u32 crypto_it_tab[4][256]; +extern const u32 crypto_il_tab[4][256]; int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len); - -/** - * aes_expandkey - Expands the AES key as described in FIPS-197 - * @ctx: The location where the computed key will be stored. - * @in_key: The supplied key. - * @key_len: The length of the supplied key. - * - * Returns 0 on success. The function fails only if an invalid key size (or - * pointer) is supplied. - * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes - * key schedule plus a 16 bytes key which is used before the first round). - * The decryption key is prepared for the "Equivalent Inverse Cipher" as - * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is - * for the initial combination, the second slot for the first round and so on. - */ -int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, - unsigned int key_len); - -/** - * aes_encrypt - Encrypt a single AES block - * @ctx: Context struct containing the key schedule - * @out: Buffer to store the ciphertext - * @in: Buffer containing the plaintext - */ -void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); - -/** - * aes_decrypt - Decrypt a single AES block - * @ctx: Context struct containing the key schedule - * @out: Buffer to store the plaintext - * @in: Buffer containing the ciphertext - */ -void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); - -extern const u8 crypto_aes_sbox[]; -extern const u8 crypto_aes_inv_sbox[]; - +int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, + unsigned int key_len); #endif diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 5764b46bd1..c37cc59e9b 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -1,9 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Public Key Encryption * * Copyright (c) 2015, Intel Corporation * Authors: Tadeusz Struk + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_AKCIPHER_H #define _CRYPTO_AKCIPHER_H @@ -14,20 +19,14 @@ * * @base: Common attributes for async crypto requests * @src: Source data - * For verify op this is signature + digest, in that case - * total size of @src is @src_len + @dst_len. - * @dst: Destination data (Should be NULL for verify op) + * @dst: Destination data * @src_len: Size of the input buffer - * For verify op it's size of signature part of @src, this part - * is supposed to be operated by cipher. - * @dst_len: Size of @dst buffer (for all ops except verify). - * It needs to be at least as big as the expected result - * depending on the operation. + * @dst_len: Size of the output buffer. It needs to be at least + * as big as the expected result depending on the operation * After operation it will be updated with the actual size of the * result. * In case of error where the dst sgl size was insufficient, * it will be updated to the size required for the operation. - * For verify op this is size of digest part in @src. * @__ctx: Start of private context data */ struct akcipher_request { @@ -56,9 +55,10 @@ struct crypto_akcipher { * algorithm. In case of error, where the dst_len was insufficient, * the req->dst_len will be updated to the size required for the * operation - * @verify: Function performs a complete verify operation as defined by - * public key algorithm, returning verification status. Requires - * digest value as input parameter. + * @verify: Function performs a sign operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation * @encrypt: Function performs an encrypt operation as defined by public key * algorithm. In case of error, where the dst_len was insufficient, * the req->dst_len will be updated to the size required for the @@ -69,10 +69,10 @@ struct crypto_akcipher { * operation * @set_pub_key: Function invokes the algorithm specific set public key * function, which knows how to decode and interpret - * the BER encoded public key and parameters + * the BER encoded public key * @set_priv_key: Function invokes the algorithm specific set private key * function, which knows how to decode and interpret - * the BER encoded private key and parameters + * the BER encoded private key * @max_size: Function returns dest buffer size required for a given key. * @init: Initialize the cryptographic transformation object. * This function is used to initialize the cryptographic @@ -98,7 +98,7 @@ struct akcipher_alg { unsigned int keylen); int (*set_priv_key)(struct crypto_akcipher *tfm, const void *key, unsigned int keylen); - unsigned int (*max_size)(struct crypto_akcipher *tfm); + int (*max_size)(struct crypto_akcipher *tfm); int (*init)(struct crypto_akcipher *tfm); void (*exit)(struct crypto_akcipher *tfm); @@ -174,8 +174,6 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm( * crypto_free_akcipher() - free AKCIPHER tfm handle * * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() - * - * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_akcipher(struct crypto_akcipher *tfm) { @@ -209,7 +207,7 @@ static inline struct akcipher_request *akcipher_request_alloc( */ static inline void akcipher_request_free(struct akcipher_request *req) { - kfree_sensitive(req); + kzfree(req); } /** @@ -240,10 +238,9 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req, * * @req: public key request * @src: ptr to input scatter list - * @dst: ptr to output scatter list or NULL for verify op + * @dst: ptr to output scatter list * @src_len: size of the src input scatter list to be processed - * @dst_len: size of the dst output scatter list or size of signature - * portion in @src for verify op + * @dst_len: size of the dst output scatter list */ static inline void akcipher_request_set_crypt(struct akcipher_request *req, struct scatterlist *src, @@ -260,14 +257,13 @@ static inline void akcipher_request_set_crypt(struct akcipher_request *req, /** * crypto_akcipher_maxsize() - Get len for output buffer * - * Function returns the dest buffer size required for a given key. - * Function assumes that the key is already set in the transformation. If this - * function is called without a setkey or with a failed setkey, you will end up - * in a NULL dereference. + * Function returns the dest buffer size required for a given key * * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * + * Return: minimum len for output buffer or error code in key hasn't been set */ -static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) +static inline int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) { struct akcipher_alg *alg = crypto_akcipher_alg(tfm); @@ -288,14 +284,8 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - struct crypto_alg *calg = tfm->base.__crt_alg; - unsigned int src_len = req->src_len; - int ret; - crypto_stats_get(calg); - ret = alg->encrypt(req); - crypto_stats_akcipher_encrypt(src_len, ret, calg); - return ret; + return alg->encrypt(req); } /** @@ -312,14 +302,8 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - struct crypto_alg *calg = tfm->base.__crt_alg; - unsigned int src_len = req->src_len; - int ret; - crypto_stats_get(calg); - ret = alg->decrypt(req); - crypto_stats_akcipher_decrypt(src_len, ret, calg); - return ret; + return alg->decrypt(req); } /** @@ -336,52 +320,37 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - struct crypto_alg *calg = tfm->base.__crt_alg; - int ret; - crypto_stats_get(calg); - ret = alg->sign(req); - crypto_stats_akcipher_sign(ret, calg); - return ret; + return alg->sign(req); } /** - * crypto_akcipher_verify() - Invoke public key signature verification + * crypto_akcipher_verify() - Invoke public key verify operation * - * Function invokes the specific public key signature verification operation - * for a given public key algorithm. + * Function invokes the specific public key verify operation for a given + * public key algorithm * * @req: asymmetric key request * - * Note: req->dst should be NULL, req->src should point to SG of size - * (req->src_size + req->dst_size), containing signature (of req->src_size - * length) with appended digest (of req->dst_size length). - * - * Return: zero on verification success; error code in case of error. + * Return: zero on success; error code in case of error */ static inline int crypto_akcipher_verify(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - struct crypto_alg *calg = tfm->base.__crt_alg; - int ret; - crypto_stats_get(calg); - ret = alg->verify(req); - crypto_stats_akcipher_verify(ret, calg); - return ret; + return alg->verify(req); } /** * crypto_akcipher_set_pub_key() - Invoke set public key operation * * Function invokes the algorithm specific set key function, which knows - * how to decode and interpret the encoded key and parameters + * how to decode and interpret the encoded key * * @tfm: tfm handle - * @key: BER encoded public key, algo OID, paramlen, BER encoded - * parameters - * @keylen: length of the key (not including other data) + * @key: BER encoded public key + * @keylen: length of the key * * Return: zero on success; error code in case of error */ @@ -398,12 +367,11 @@ static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm, * crypto_akcipher_set_priv_key() - Invoke set private key operation * * Function invokes the algorithm specific set key function, which knows - * how to decode and interpret the encoded key and parameters + * how to decode and interpret the encoded key * * @tfm: tfm handle - * @key: BER encoded private key, algo OID, paramlen, BER encoded - * parameters - * @keylen: length of the key (not including other data) + * @key: BER encoded private key + * @keylen: length of the key * * Return: zero on success; error code in case of error */ diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 5f6841c73e..5fa5211b01 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Cryptographic API for algorithms (i.e., low-level API). * * Copyright (c) 2006 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_ALGAPI_H #define _CRYPTO_ALGAPI_H @@ -10,23 +15,13 @@ #include #include #include - -/* - * Maximum values for blocksize and alignmask, used to allocate - * static buffers that are big enough for any combination of - * algs and architectures. Ciphers have a lower maximum size. - */ -#define MAX_ALGAPI_BLOCKSIZE 160 -#define MAX_ALGAPI_ALIGNMASK 63 -#define MAX_CIPHER_BLOCKSIZE 16 -#define MAX_CIPHER_ALIGNMASK 15 +#include struct crypto_aead; struct crypto_instance; struct module; struct rtattr; struct seq_file; -struct sk_buff; struct crypto_type { unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); @@ -35,25 +30,20 @@ struct crypto_type { int (*init_tfm)(struct crypto_tfm *tfm); void (*show)(struct seq_file *m, struct crypto_alg *alg); int (*report)(struct sk_buff *skb, struct crypto_alg *alg); + struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); void (*free)(struct crypto_instance *inst); unsigned int type; unsigned int maskclear; unsigned int maskset; unsigned int tfmsize; -}; +} __do_const; struct crypto_instance { struct crypto_alg alg; struct crypto_template *tmpl; - - union { - /* Node in list of instances after registration. */ - struct hlist_node list; - /* List of attached spawns before registration. */ - struct crypto_spawn *spawns; - }; + struct hlist_node list; void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -63,6 +53,8 @@ struct crypto_template { struct hlist_head instances; struct module *module; + struct crypto_instance *(*alloc)(struct rtattr **tb); + void (*free)(struct crypto_instance *inst); int (*create)(struct crypto_template *tmpl, struct rtattr **tb); char name[CRYPTO_MAX_ALG_NAME]; @@ -71,16 +63,9 @@ struct crypto_template { struct crypto_spawn { struct list_head list; struct crypto_alg *alg; - union { - /* Back pointer to instance after registration.*/ - struct crypto_instance *inst; - /* Spawn list pointer prior to registration. */ - struct crypto_spawn *next; - }; + struct crypto_instance *inst; const struct crypto_type *frontend; u32 mask; - bool dead; - bool registered; }; struct crypto_queue { @@ -96,89 +81,139 @@ struct scatter_walk { unsigned int offset; }; -struct crypto_attr_alg { - char name[CRYPTO_MAX_ALG_NAME]; +struct blkcipher_walk { + union { + struct { + struct page *page; + unsigned long offset; + } phys; + + struct { + u8 *page; + u8 *addr; + } virt; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + + struct scatter_walk out; + unsigned int total; + + void *page; + u8 *buffer; + u8 *iv; + unsigned int ivsize; + + int flags; + unsigned int walk_blocksize; + unsigned int cipher_blocksize; + unsigned int alignmask; }; -struct crypto_attr_type { - u32 type; - u32 mask; +struct ablkcipher_walk { + struct { + struct page *page; + unsigned int offset; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + struct scatter_walk out; + unsigned int total; + struct list_head buffers; + u8 *iv_buffer; + u8 *iv; + int flags; + unsigned int blocksize; }; +extern const struct crypto_type crypto_ablkcipher_type; +extern const struct crypto_type crypto_blkcipher_type; + void crypto_mod_put(struct crypto_alg *alg); int crypto_register_template(struct crypto_template *tmpl); -int crypto_register_templates(struct crypto_template *tmpls, int count); void crypto_unregister_template(struct crypto_template *tmpl); -void crypto_unregister_templates(struct crypto_template *tmpls, int count); struct crypto_template *crypto_lookup_template(const char *name); int crypto_register_instance(struct crypto_template *tmpl, struct crypto_instance *inst); -void crypto_unregister_instance(struct crypto_instance *inst); +int crypto_unregister_instance(struct crypto_instance *inst); + +int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, + struct crypto_instance *inst, u32 mask); +int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, + struct crypto_instance *inst, + const struct crypto_type *frontend); +int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, + u32 type, u32 mask); -int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, - const char *name, u32 type, u32 mask); void crypto_drop_spawn(struct crypto_spawn *spawn); struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, u32 mask); void *crypto_spawn_tfm2(struct crypto_spawn *spawn); +static inline void crypto_set_spawn(struct crypto_spawn *spawn, + struct crypto_instance *inst) +{ + spawn->inst = inst; +} + struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); -int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); +int crypto_check_attr_type(struct rtattr **tb, u32 type); const char *crypto_attr_alg_name(struct rtattr *rta); +struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, + const struct crypto_type *frontend, + u32 type, u32 mask); + +static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, + u32 type, u32 mask) +{ + return crypto_attr_alg2(rta, NULL, type, mask); +} + +int crypto_attr_u32(struct rtattr *rta, u32 *num); int crypto_inst_setname(struct crypto_instance *inst, const char *name, struct crypto_alg *alg); +void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, + unsigned int head); +struct crypto_instance *crypto_alloc_instance(const char *name, + struct crypto_alg *alg); void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); int crypto_enqueue_request(struct crypto_queue *queue, struct crypto_async_request *request); -void crypto_enqueue_request_head(struct crypto_queue *queue, - struct crypto_async_request *request); struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); +int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); static inline unsigned int crypto_queue_len(struct crypto_queue *queue) { return queue->qlen; } +/* These functions require the input/output to be aligned as u32. */ void crypto_inc(u8 *a, unsigned int size); -void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size); +void crypto_xor(u8 *dst, const u8 *src, unsigned int size); -static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) -{ - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && - __builtin_constant_p(size) && - (size % sizeof(unsigned long)) == 0) { - unsigned long *d = (unsigned long *)dst; - unsigned long *s = (unsigned long *)src; +int blkcipher_walk_done(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, int err); +int blkcipher_walk_virt(struct blkcipher_desc *desc, + struct blkcipher_walk *walk); +int blkcipher_walk_phys(struct blkcipher_desc *desc, + struct blkcipher_walk *walk); +int blkcipher_walk_virt_block(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + unsigned int blocksize); +int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + struct crypto_aead *tfm, + unsigned int blocksize); - while (size > 0) { - *d++ ^= *s++; - size -= sizeof(unsigned long); - } - } else { - __crypto_xor(dst, dst, src, size); - } -} - -static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, - unsigned int size) -{ - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && - __builtin_constant_p(size) && - (size % sizeof(unsigned long)) == 0) { - unsigned long *d = (unsigned long *)dst; - unsigned long *s1 = (unsigned long *)src1; - unsigned long *s2 = (unsigned long *)src2; - - while (size > 0) { - *d++ = *s1++ ^ *s2++; - size -= sizeof(unsigned long); - } - } else { - __crypto_xor(dst, src1, src2, size); - } -} +int ablkcipher_walk_done(struct ablkcipher_request *req, + struct ablkcipher_walk *walk, int err); +int ablkcipher_walk_phys(struct ablkcipher_request *req, + struct ablkcipher_walk *walk); +void __ablkcipher_walk_complete(struct ablkcipher_walk *walk); static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) { @@ -197,6 +232,82 @@ static inline void *crypto_instance_ctx(struct crypto_instance *inst) return inst->__ctx; } +static inline struct ablkcipher_alg *crypto_ablkcipher_alg( + struct crypto_ablkcipher *tfm) +{ + return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; +} + +static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct crypto_blkcipher *crypto_spawn_blkcipher( + struct crypto_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK; + + return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); +} + +static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct crypto_cipher *crypto_spawn_cipher( + struct crypto_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_CIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask)); +} + +static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; +} + +static inline void blkcipher_walk_init(struct blkcipher_walk *walk, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + walk->in.sg = src; + walk->out.sg = dst; + walk->total = nbytes; +} + +static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + walk->in.sg = src; + walk->out.sg = dst; + walk->total = nbytes; + INIT_LIST_HEAD(&walk->buffers); +} + +static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk) +{ + if (unlikely(!list_empty(&walk->buffers))) + __ablkcipher_walk_complete(walk); +} + static inline struct crypto_async_request *crypto_get_backlog( struct crypto_queue *queue) { @@ -204,29 +315,42 @@ static inline struct crypto_async_request *crypto_get_backlog( container_of(queue->backlog, struct crypto_async_request, list); } -static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off) +static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, + struct ablkcipher_request *request) { - return (algt->type ^ off) & algt->mask & off; + return crypto_enqueue_request(queue, &request->base); +} + +static inline struct ablkcipher_request *ablkcipher_dequeue_request( + struct crypto_queue *queue) +{ + return ablkcipher_request_cast(crypto_dequeue_request(queue)); +} + +static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) +{ + return req->__ctx; +} + +static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); +} + +static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, + u32 type, u32 mask) +{ + return crypto_attr_alg(tb[1], type, mask); } /* - * When an algorithm uses another algorithm (e.g., if it's an instance of a - * template), these are the flags that should always be set on the "outer" - * algorithm if any "inner" algorithm has them set. + * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. + * Otherwise returns zero. */ -#define CRYPTO_ALG_INHERITED_FLAGS \ - (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \ - CRYPTO_ALG_ALLOCATES_MEMORY) - -/* - * Given the type and mask that specify the flags restrictions on a template - * instance being created, return the mask that should be passed to - * crypto_grab_*() (along with type=0) to honor any request the user made to - * have any of the CRYPTO_ALG_INHERITED_FLAGS clear. - */ -static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt) +static inline int crypto_requires_sync(u32 type, u32 mask) { - return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS); + return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; } noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); @@ -246,14 +370,12 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size) return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; } -int crypto_register_notifier(struct notifier_block *nb); -int crypto_unregister_notifier(struct notifier_block *nb); - -/* Crypto notification events. */ -enum { - CRYPTO_MSG_ALG_REQUEST, - CRYPTO_MSG_ALG_REGISTER, - CRYPTO_MSG_ALG_LOADED, -}; +static inline void crypto_yield(u32 flags) +{ +#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY) + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) + cond_resched(); +#endif +} #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h index 5f92a98608..6775059539 100644 --- a/include/crypto/authenc.h +++ b/include/crypto/authenc.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Authenc: Simple AEAD wrapper for IPsec * * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_AUTHENC_H #define _CRYPTO_AUTHENC_H diff --git a/include/crypto/blowfish.h b/include/crypto/blowfish.h index 9b384670b3..1450d4a279 100644 --- a/include/crypto/blowfish.h +++ b/include/crypto/blowfish.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Common values for blowfish algorithms */ diff --git a/include/crypto/cast5.h b/include/crypto/cast5.h index 3d4ed4ea9c..14fbf39d63 100644 --- a/include/crypto/cast5.h +++ b/include/crypto/cast5.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _CRYPTO_CAST5_H #define _CRYPTO_CAST5_H diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h index 38f490cd50..1a592df2f7 100644 --- a/include/crypto/cast6.h +++ b/include/crypto/cast6.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _CRYPTO_CAST6_H #define _CRYPTO_CAST6_H @@ -15,10 +14,11 @@ struct cast6_ctx { u8 Kr[12][4]; }; -int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, unsigned int keylen); +int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, + unsigned int keylen, u32 *flags); int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); -void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src); -void __cast6_decrypt(const void *ctx, u8 *dst, const u8 *src); +void __cast6_encrypt(void *ctx, u8 *dst, const u8 *src); +void __cast6_decrypt(void *ctx, u8 *dst, const u8 *src); #endif diff --git a/include/crypto/cast_common.h b/include/crypto/cast_common.h index b900902441..b7df35cd9f 100644 --- a/include/crypto/cast_common.h +++ b/include/crypto/cast_common.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _CRYPTO_CAST_COMMON_H #define _CRYPTO_CAST_COMMON_H diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h new file mode 100644 index 0000000000..20d20f681a --- /dev/null +++ b/include/crypto/chacha20.h @@ -0,0 +1,26 @@ +/* + * Common values for the ChaCha20 algorithm + */ + +#ifndef _CRYPTO_CHACHA20_H +#define _CRYPTO_CHACHA20_H + +#include +#include + +#define CHACHA20_IV_SIZE 16 +#define CHACHA20_KEY_SIZE 32 +#define CHACHA20_BLOCK_SIZE 64 + +struct chacha20_ctx { + u32 key[8]; +}; + +void chacha20_block(u32 *state, void *stream); +void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); +int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keysize); +int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes); + +#endif diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index 23169f4d87..bc792d5a9e 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Software async crypto daemon * @@ -13,22 +12,27 @@ #ifndef _CRYPTO_CRYPT_H #define _CRYPTO_CRYPT_H +#include #include #include #include -#include -struct cryptd_skcipher { - struct crypto_skcipher base; +struct cryptd_ablkcipher { + struct crypto_ablkcipher base; }; +static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast( + struct crypto_ablkcipher *tfm) +{ + return (struct cryptd_ablkcipher *)tfm; +} + /* alg_name should be algorithm to be cryptd-ed */ -struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, - u32 type, u32 mask); -struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); -/* Must be called without moving CPUs. */ -bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm); -void cryptd_free_skcipher(struct cryptd_skcipher *tfm); +struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, + u32 type, u32 mask); +struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); +bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm); +void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); struct cryptd_ahash { struct crypto_ahash base; diff --git a/include/crypto/crypto_wq.h b/include/crypto/crypto_wq.h new file mode 100644 index 0000000000..a7d252daf9 --- /dev/null +++ b/include/crypto/crypto_wq.h @@ -0,0 +1,7 @@ +#ifndef CRYPTO_WQ_H +#define CRYPTO_WQ_H + +#include + +extern struct workqueue_struct *kcrypto_wq; +#endif diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h index a1c66d1001..4180fc080e 100644 --- a/include/crypto/ctr.h +++ b/include/crypto/ctr.h @@ -1,65 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * CTR: Counter mode * * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_CTR_H #define _CRYPTO_CTR_H -#include -#include -#include -#include - #define CTR_RFC3686_NONCE_SIZE 4 #define CTR_RFC3686_IV_SIZE 8 #define CTR_RFC3686_BLOCK_SIZE 16 -static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req, - void (*fn)(struct crypto_skcipher *, - const u8 *, u8 *)) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - int blocksize = crypto_skcipher_chunksize(tfm); - u8 buf[MAX_CIPHER_BLOCKSIZE]; - struct skcipher_walk walk; - int err; - - /* avoid integer division due to variable blocksize parameter */ - if (WARN_ON_ONCE(!is_power_of_2(blocksize))) - return -EINVAL; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes > 0) { - u8 *dst = walk.dst.virt.addr; - u8 *src = walk.src.virt.addr; - int nbytes = walk.nbytes; - int tail = 0; - - if (nbytes < walk.total) { - tail = walk.nbytes & (blocksize - 1); - nbytes -= tail; - } - - do { - int bsize = min(nbytes, blocksize); - - fn(tfm, walk.iv, buf); - - crypto_xor_cpy(dst, src, buf, bsize); - crypto_inc(walk.iv, blocksize); - - dst += bsize; - src += bsize; - nbytes -= bsize; - } while (nbytes > 0); - - err = skcipher_walk_done(&walk, tail); - } - return err; -} - #endif /* _CRYPTO_CTR_H */ diff --git a/include/crypto/des.h b/include/crypto/des.h index 7812b4331a..fc6274c6bb 100644 --- a/include/crypto/des.h +++ b/include/crypto/des.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * DES & Triple DES EDE Cipher Algorithms. */ @@ -6,8 +5,6 @@ #ifndef __CRYPTO_DES_H #define __CRYPTO_DES_H -#include - #define DES_KEY_SIZE 8 #define DES_EXPKEY_WORDS 32 #define DES_BLOCK_SIZE 8 @@ -16,42 +13,10 @@ #define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) #define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE -struct des_ctx { - u32 expkey[DES_EXPKEY_WORDS]; -}; -struct des3_ede_ctx { - u32 expkey[DES3_EDE_EXPKEY_WORDS]; -}; +extern unsigned long des_ekey(u32 *pe, const u8 *k); -void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src); -void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src); - -void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src); -void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src); - -/** - * des_expand_key - Expand a DES input key into a key schedule - * @ctx: the key schedule - * @key: buffer containing the input key - * @len: size of the buffer contents - * - * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if - * the key is accepted but has been found to be weak. - */ -int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen); - -/** - * des3_ede_expand_key - Expand a triple DES input key into a key schedule - * @ctx: the key schedule - * @key: buffer containing the input key - * @len: size of the buffer contents - * - * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if - * the key is accepted but has been found to be weak. Note that weak keys will - * be rejected (and -EINVAL will be returned) when running in FIPS mode. - */ -int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key, - unsigned int keylen); +extern int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, + unsigned int keylen); #endif /* __CRYPTO_DES_H */ diff --git a/include/crypto/dh.h b/include/crypto/dh.h index d71e9858ab..5102a8f282 100644 --- a/include/crypto/dh.h +++ b/include/crypto/dh.h @@ -1,86 +1,29 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Diffie-Hellman secret to be used with kpp API along with helper functions * * Copyright (c) 2016, Intel Corporation * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_DH_ #define _CRYPTO_DH_ -/** - * DOC: DH Helper Functions - * - * To use DH with the KPP cipher API, the following data structure and - * functions should be used. - * - * To use DH with KPP, the following functions should be used to operate on - * a DH private key. The packet private key that can be set with - * the KPP API function call of crypto_kpp_set_secret. - */ - -/** - * struct dh - define a DH private key - * - * @key: Private DH key - * @p: Diffie-Hellman parameter P - * @q: Diffie-Hellman parameter Q - * @g: Diffie-Hellman generator G - * @key_size: Size of the private DH key - * @p_size: Size of DH parameter P - * @q_size: Size of DH parameter Q - * @g_size: Size of DH generator G - */ struct dh { void *key; void *p; - void *q; void *g; unsigned int key_size; unsigned int p_size; - unsigned int q_size; unsigned int g_size; }; -/** - * crypto_dh_key_len() - Obtain the size of the private DH key - * @params: private DH key - * - * This function returns the packet DH key size. A caller can use that - * with the provided DH private key reference to obtain the required - * memory size to hold a packet key. - * - * Return: size of the key in bytes - */ -unsigned int crypto_dh_key_len(const struct dh *params); - -/** - * crypto_dh_encode_key() - encode the private key - * @buf: Buffer allocated by the caller to hold the packet DH - * private key. The buffer should be at least crypto_dh_key_len - * bytes in size. - * @len: Length of the packet private key buffer - * @params: Buffer with the caller-specified private key - * - * The DH implementations operate on a packet representation of the private - * key. - * - * Return: -EINVAL if buffer has insufficient size, 0 on success - */ +int crypto_dh_key_len(const struct dh *params); int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params); - -/** - * crypto_dh_decode_key() - decode a private key - * @buf: Buffer holding a packet key that should be decoded - * @len: Length of the packet private key buffer - * @params: Buffer allocated by the caller that is filled with the - * unpacked DH private key. - * - * The unpacking obtains the private key by pointing @p to the correct location - * in @buf. Thus, both pointers refer to the same memory. - * - * Return: -EINVAL if buffer has insufficient size, 0 on success - */ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params); #endif diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index c416512693..22f884c973 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -122,15 +122,15 @@ struct drbg_state { struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ struct skcipher_request *ctr_req; /* CTR mode request handle */ + __u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */ + __u8 *ctr_null_value; /* CTR mode aligned zero buf */ __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ __u8 *outscratchpad; /* CTR mode aligned outbuf */ - struct crypto_wait ctr_wait; /* CTR mode async wait obj */ - struct scatterlist sg_in, sg_out; /* CTR mode SGLs */ + struct completion ctr_completion; /* CTR mode async handler */ + int ctr_async_err; /* CTR mode async error */ bool seeded; /* DRBG fully seeded? */ bool pr; /* Prediction resistance enabled? */ - bool fips_primed; /* Continuous test primed? */ - unsigned char *prev; /* FIPS 140-2 continuous test value */ struct work_struct seed_work; /* asynchronous seeding support */ struct crypto_rng *jent; const struct drbg_state_ops *d_ops; @@ -184,7 +184,11 @@ static inline size_t drbg_max_addtl(struct drbg_state *drbg) static inline size_t drbg_max_requests(struct drbg_state *drbg) { /* SP800-90A requires 2**48 maximum requests before reseeding */ - return (1<<20); +#if (__BITS_PER_LONG == 32) + return SIZE_MAX; +#else + return (1UL<<48); +#endif } /* diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h index a9f98078d2..84bad548d1 100644 --- a/include/crypto/ecdh.h +++ b/include/crypto/ecdh.h @@ -1,82 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ECDH params to be used with kpp API * * Copyright (c) 2016, Intel Corporation * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_ECDH_ #define _CRYPTO_ECDH_ -/** - * DOC: ECDH Helper Functions - * - * To use ECDH with the KPP cipher API, the following data structure and - * functions should be used. - * - * The ECC curves known to the ECDH implementation are specified in this - * header file. - * - * To use ECDH with KPP, the following functions should be used to operate on - * an ECDH private key. The packet private key that can be set with - * the KPP API function call of crypto_kpp_set_secret. - */ - /* Curves IDs */ #define ECC_CURVE_NIST_P192 0x0001 #define ECC_CURVE_NIST_P256 0x0002 -#define ECC_CURVE_NIST_P384 0x0003 -/** - * struct ecdh - define an ECDH private key - * - * @key: Private ECDH key - * @key_size: Size of the private ECDH key - */ struct ecdh { + unsigned short curve_id; char *key; unsigned short key_size; }; -/** - * crypto_ecdh_key_len() - Obtain the size of the private ECDH key - * @params: private ECDH key - * - * This function returns the packet ECDH key size. A caller can use that - * with the provided ECDH private key reference to obtain the required - * memory size to hold a packet key. - * - * Return: size of the key in bytes - */ -unsigned int crypto_ecdh_key_len(const struct ecdh *params); - -/** - * crypto_ecdh_encode_key() - encode the private key - * @buf: Buffer allocated by the caller to hold the packet ECDH - * private key. The buffer should be at least crypto_ecdh_key_len - * bytes in size. - * @len: Length of the packet private key buffer - * @p: Buffer with the caller-specified private key - * - * The ECDH implementations operate on a packet representation of the private - * key. - * - * Return: -EINVAL if buffer has insufficient size, 0 on success - */ +int crypto_ecdh_key_len(const struct ecdh *params); int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p); - -/** - * crypto_ecdh_decode_key() - decode a private key - * @buf: Buffer holding a packet key that should be decoded - * @len: Length of the packet private key buffer - * @p: Buffer allocated by the caller that is filled with the - * unpacked ECDH private key. - * - * The unpacking obtains the private key by pointing @p to the correct location - * in @buf. Thus, both pointers refer to the same memory. - * - * Return: -EINVAL if buffer has insufficient size, 0 on success - */ int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p); #endif diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 26cac19b0f..04eb5c77ad 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Crypto engine API * * Copyright (c) 2016 Baolin Wang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_ENGINE_H #define _CRYPTO_ENGINE_H @@ -12,10 +17,7 @@ #include #include #include -#include -#include #include -#include #define ENGINE_NAME_LEN 30 /* @@ -24,11 +26,9 @@ * @idling: the engine is entering idle state * @busy: request pump is busy * @running: the engine is on working - * @retry_support: indication that the hardware allows re-execution - * of a failed backlog request - * crypto-engine, in head position to keep order + * @cur_req_prepared: current request is prepared * @list: link with the global crypto engine list - * @queue_lock: spinlock to synchronise access to request queue + * @queue_lock: spinlock to syncronise access to request queue * @queue: the crypto queue of the engine * @rt: whether this queue is set to run as a realtime task * @prepare_crypt_hardware: a request will soon arrive from the queue @@ -37,9 +37,14 @@ * @unprepare_crypt_hardware: there are currently no more requests on the * queue so the subsystem notifies the driver that it may relax the * hardware by issuing this call - * @do_batch_requests: execute a batch of requests. Depends on multiple - * requests support. - * @kworker: kthread worker struct for request pump + * @prepare_cipher_request: do some prepare if need before handle the current request + * @unprepare_cipher_request: undo any work done by prepare_cipher_request() + * @cipher_one_request: do encryption for current request + * @prepare_hash_request: do some prepare if need before handle the current request + * @unprepare_hash_request: undo any work done by prepare_hash_request() + * @hash_one_request: do hash for current request + * @kworker: thread struct for request pump + * @kworker_task: pointer to task for request pump kworker thread * @pump_requests: work struct for scheduling work to the request pump * @priv_data: the engine private data * @cur_req: the current request which is on processing @@ -49,70 +54,54 @@ struct crypto_engine { bool idling; bool busy; bool running; - - bool retry_support; + bool cur_req_prepared; struct list_head list; spinlock_t queue_lock; struct crypto_queue queue; - struct device *dev; bool rt; int (*prepare_crypt_hardware)(struct crypto_engine *engine); int (*unprepare_crypt_hardware)(struct crypto_engine *engine); - int (*do_batch_requests)(struct crypto_engine *engine); + int (*prepare_cipher_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*unprepare_cipher_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*prepare_hash_request)(struct crypto_engine *engine, + struct ahash_request *req); + int (*unprepare_hash_request)(struct crypto_engine *engine, + struct ahash_request *req); + int (*cipher_one_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*hash_one_request)(struct crypto_engine *engine, + struct ahash_request *req); - struct kthread_worker *kworker; + struct kthread_worker kworker; + struct task_struct *kworker_task; struct kthread_work pump_requests; void *priv_data; struct crypto_async_request *cur_req; }; -/* - * struct crypto_engine_op - crypto hardware engine operations - * @prepare__request: do some prepare if need before handle the current request - * @unprepare_request: undo any work done by prepare_request() - * @do_one_request: do encryption for current request - */ -struct crypto_engine_op { - int (*prepare_request)(struct crypto_engine *engine, - void *areq); - int (*unprepare_request)(struct crypto_engine *engine, - void *areq); - int (*do_one_request)(struct crypto_engine *engine, - void *areq); -}; - -struct crypto_engine_ctx { - struct crypto_engine_op op; -}; - -int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, - struct aead_request *req); -int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, - struct akcipher_request *req); +int crypto_transfer_cipher_request(struct crypto_engine *engine, + struct ablkcipher_request *req, + bool need_pump); +int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine, + struct ablkcipher_request *req); +int crypto_transfer_hash_request(struct crypto_engine *engine, + struct ahash_request *req, bool need_pump); int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, - struct ahash_request *req); -int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, - struct skcipher_request *req); -void crypto_finalize_aead_request(struct crypto_engine *engine, - struct aead_request *req, int err); -void crypto_finalize_akcipher_request(struct crypto_engine *engine, - struct akcipher_request *req, int err); + struct ahash_request *req); +void crypto_finalize_cipher_request(struct crypto_engine *engine, + struct ablkcipher_request *req, int err); void crypto_finalize_hash_request(struct crypto_engine *engine, struct ahash_request *req, int err); -void crypto_finalize_skcipher_request(struct crypto_engine *engine, - struct skcipher_request *req, int err); int crypto_engine_start(struct crypto_engine *engine); int crypto_engine_stop(struct crypto_engine *engine); struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); -struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, - bool retry_support, - int (*cbk_do_batch)(struct crypto_engine *engine), - bool rt, int qlen); int crypto_engine_exit(struct crypto_engine *engine); #endif /* _CRYPTO_ENGINE_H */ diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h index 81330c6446..da2530e34b 100644 --- a/include/crypto/gf128mul.h +++ b/include/crypto/gf128mul.h @@ -43,13 +43,12 @@ --------------------------------------------------------------------------- Issue Date: 31/01/2006 - An implementation of field multiplication in Galois Field GF(2^128) + An implementation of field multiplication in Galois Field GF(128) */ #ifndef _CRYPTO_GF128MUL_H #define _CRYPTO_GF128MUL_H -#include #include #include @@ -66,7 +65,7 @@ * are left and the lsb's are right. char b[16] is an array and b[0] is * the first octet. * - * 10000000 00000000 00000000 00000000 .... 00000000 00000000 00000000 + * 80000000 00000000 00000000 00000000 .... 00000000 00000000 00000000 * b[0] b[1] b[2] b[3] b[13] b[14] b[15] * * Every bit is a coefficient of some power of X. We can store the bits @@ -86,17 +85,15 @@ * Both of the above formats are easy to implement on big-endian * machines. * - * XTS and EME (the latter of which is patent encumbered) use the ble - * format (bits are stored in big endian order and the bytes in little - * endian). The above buffer represents X^7 in this case and the - * primitive polynomial is b[0] = 0x87. + * EME (which is patent encumbered) uses the ble format (bits are stored + * in big endian order and the bytes in little endian). The above buffer + * represents X^7 in this case and the primitive polynomial is b[0] = 0x87. * * The common machine word-size is smaller than 128 bits, so to make * an efficient implementation we must split into machine word sizes. - * This implementation uses 64-bit words for the moment. Machine - * endianness comes into play. The lle format in relation to machine - * endianness is discussed below by the original author of gf128mul Dr - * Brian Gladman. + * This file uses one 32bit for the moment. Machine endianness comes into + * play. The lle format in relation to machine endianness is discussed + * below by the original author of gf128mul Dr Brian Gladman. * * Let's look at the bbe and ble format on a little endian machine. * @@ -130,10 +127,10 @@ * machines this will automatically aligned to wordsize and on a 64-bit * machine also. */ -/* Multiply a GF(2^128) field element by x. Field elements are - held in arrays of bytes in which field bits 8n..8n + 7 are held in - byte[n], with lower indexed bits placed in the more numerically - significant bit positions within bytes. +/* Multiply a GF128 field element by x. Field elements are held in arrays + of bytes in which field bits 8n..8n + 7 are held in byte[n], with lower + indexed bits placed in the more numerically significant bit positions + within bytes. On little endian machines the bit indexes translate into the bit positions within four 32-bit words in the following way @@ -164,58 +161,8 @@ void gf128mul_lle(be128 *a, const be128 *b); void gf128mul_bbe(be128 *a, const be128 *b); -/* - * The following functions multiply a field element by x in - * the polynomial field representation. They use 64-bit word operations - * to gain speed but compensate for machine endianness and hence work - * correctly on both styles of machine. - * - * They are defined here for performance. - */ - -static inline u64 gf128mul_mask_from_bit(u64 x, int which) -{ - /* a constant-time version of 'x & ((u64)1 << which) ? (u64)-1 : 0' */ - return ((s64)(x << (63 - which)) >> 63); -} - -static inline void gf128mul_x_lle(be128 *r, const be128 *x) -{ - u64 a = be64_to_cpu(x->a); - u64 b = be64_to_cpu(x->b); - - /* equivalent to gf128mul_table_le[(b << 7) & 0xff] << 48 - * (see crypto/gf128mul.c): */ - u64 _tt = gf128mul_mask_from_bit(b, 0) & ((u64)0xe1 << 56); - - r->b = cpu_to_be64((b >> 1) | (a << 63)); - r->a = cpu_to_be64((a >> 1) ^ _tt); -} - -static inline void gf128mul_x_bbe(be128 *r, const be128 *x) -{ - u64 a = be64_to_cpu(x->a); - u64 b = be64_to_cpu(x->b); - - /* equivalent to gf128mul_table_be[a >> 63] (see crypto/gf128mul.c): */ - u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; - - r->a = cpu_to_be64((a << 1) | (b >> 63)); - r->b = cpu_to_be64((b << 1) ^ _tt); -} - -/* needed by XTS */ -static inline void gf128mul_x_ble(le128 *r, const le128 *x) -{ - u64 a = le64_to_cpu(x->a); - u64 b = le64_to_cpu(x->b); - - /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ - u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; - - r->a = cpu_to_le64((a << 1) | (b >> 63)); - r->b = cpu_to_le64((b << 1) ^ _tt); -} +/* multiply by x in ble format, needed by XTS */ +void gf128mul_x_ble(be128 *a, const be128 *b); /* 4k table optimization */ @@ -225,28 +172,29 @@ struct gf128mul_4k { struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g); struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g); -void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t); -void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t); -void gf128mul_x8_ble(le128 *r, const le128 *x); +void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t); +void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t); + static inline void gf128mul_free_4k(struct gf128mul_4k *t) { - kfree_sensitive(t); + kfree(t); } -/* 64k table optimization, implemented for bbe */ +/* 64k table optimization, implemented for lle and bbe */ struct gf128mul_64k { struct gf128mul_4k *t[16]; }; -/* First initialize with the constant factor with which you - * want to multiply and then call gf128mul_64k_bbe with the other - * factor in the first argument, and the table in the second. - * Afterwards, the result is stored in *a. - */ +/* first initialize with the constant factor with which you + * want to multiply and then call gf128_64k_lle with the other + * factor in the first argument, the table in the second and a + * scratch register in the third. Afterwards *a = *r. */ +struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g); struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g); void gf128mul_free_64k(struct gf128mul_64k *t); -void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t); +void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t); +void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t); #endif /* _CRYPTO_GF128MUL_H */ diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h index f832c9f2ac..2a61c9bbab 100644 --- a/include/crypto/ghash.h +++ b/include/crypto/ghash.h @@ -1,6 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Common values for the GHASH hash function + * Common values for GHASH algorithms */ #ifndef __CRYPTO_GHASH_H__ diff --git a/include/crypto/hash.h b/include/crypto/hash.h index f140e46439..26605888a1 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Hash: Hash algorithms under the crypto API * * Copyright (c) 2008 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_HASH_H @@ -59,14 +64,18 @@ struct ahash_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; +#define AHASH_REQUEST_ON_STACK(name, ahash) \ + char __##name##_desc[sizeof(struct ahash_request) + \ + crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \ + struct ahash_request *name = (void *)__##name##_desc + /** * struct ahash_alg - asynchronous message digest definition - * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the + * @init: Initialize the transformation context. Intended only to initialize the * state of the HASH transformation at the beginning. This shall fill in * the internal structures used during the entire duration of the whole - * transformation. No data processing happens at this point. Driver code - * implementation must not use req->result. - * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This + * transformation. No data processing happens at this point. + * @update: Push a chunk of data into the driver for transformation. This * function actually pushes blocks of data from upper layers into the * driver, which then passes those to the hardware as seen fit. This * function must not finalize the HASH transformation by calculating the @@ -74,14 +83,12 @@ struct ahash_request { * transformation. This function shall not modify the transformation * context, as this function may be called in parallel with the same * transformation object. Data processing can happen synchronously - * [SHASH] or asynchronously [AHASH] at this point. Driver must not use - * req->result. - * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the + * [SHASH] or asynchronously [AHASH] at this point. + * @final: Retrieve result from the driver. This function finalizes the * transformation and retrieves the resulting hash from the driver and * pushes it back to upper layers. No data processing happens at this - * point unless hardware requires it to finish the transformation - * (then the data buffered by the device driver is processed). - * @finup: **[optional]** Combination of @update and @final. This function is effectively a + * point. + * @finup: Combination of @update and @final. This function is effectively a * combination of @update and @final calls issued in sequence. As some * hardware cannot do @update and @final separately, this callback was * added to allow such hardware to be used at least by IPsec. Data @@ -112,23 +119,11 @@ struct ahash_request { * you want to save partial result of the transformation after * processing certain amount of data and reload this partial result * multiple times later on for multiple re-use. No data processing - * happens at this point. Driver must not use req->result. + * happens at this point. * @import: Import partial state of the transformation. This function loads the * entire state of the ongoing transformation from a provided block of * data so the transformation can continue from this point onward. No - * data processing happens at this point. Driver must not use - * req->result. - * @init_tfm: Initialize the cryptographic transformation object. - * This function is called only once at the instantiation - * time, right after the transformation context was - * allocated. In case the cryptographic hardware has - * some special requirements which need to be handled - * by software, this function shall check for the precise - * requirement of the transformation and put any software - * fallbacks in place. - * @exit_tfm: Deinitialize the cryptographic transformation object. - * This is a counterpart to @init_tfm, used to remove - * various changes set in @init_tfm. + * data processing happens at this point. * @halg: see struct hash_alg_common */ struct ahash_alg { @@ -141,30 +136,20 @@ struct ahash_alg { int (*import)(struct ahash_request *req, const void *in); int (*setkey)(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); - int (*init_tfm)(struct crypto_ahash *tfm); - void (*exit_tfm)(struct crypto_ahash *tfm); struct hash_alg_common halg; }; struct shash_desc { struct crypto_shash *tfm; - void *__ctx[] __aligned(ARCH_SLAB_MINALIGN); + u32 flags; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; }; -#define HASH_MAX_DIGESTSIZE 64 - -/* - * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc' - * containing a 'struct sha3_state'. - */ -#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) - -#define HASH_MAX_STATESIZE 512 - -#define SHASH_DESC_ON_STACK(shash, ctx) \ - char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \ - __aligned(__alignof__(struct shash_desc)); \ +#define SHASH_DESC_ON_STACK(shash, ctx) \ + char __##shash##_desc[sizeof(struct shash_desc) + \ + crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \ struct shash_desc *shash = (struct shash_desc *)__##shash##_desc /** @@ -177,17 +162,6 @@ struct shash_desc { * @export: see struct ahash_alg * @import: see struct ahash_alg * @setkey: see struct ahash_alg - * @init_tfm: Initialize the cryptographic transformation object. - * This function is called only once at the instantiation - * time, right after the transformation context was - * allocated. In case the cryptographic hardware has - * some special requirements which need to be handled - * by software, this function shall check for the precise - * requirement of the transformation and put any software - * fallbacks in place. - * @exit_tfm: Deinitialize the cryptographic transformation object. - * This is a counterpart to @init_tfm, used to remove - * various changes set in @init_tfm. * @digestsize: see struct ahash_alg * @statesize: see struct ahash_alg * @descsize: Size of the operational state for the message digest. This state @@ -208,8 +182,6 @@ struct shash_alg { int (*import)(struct shash_desc *desc, const void *in); int (*setkey)(struct crypto_shash *tfm, const u8 *key, unsigned int keylen); - int (*init_tfm)(struct crypto_shash *tfm); - void (*exit_tfm)(struct crypto_shash *tfm); unsigned int descsize; @@ -233,6 +205,7 @@ struct crypto_ahash { unsigned int keylen); unsigned int reqsize; + bool has_setkey; struct crypto_tfm base; }; @@ -248,7 +221,7 @@ struct crypto_shash { * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto) * * The asynchronous cipher operation discussion provided for the - * CRYPTO_ALG_TYPE_SKCIPHER API applies here as well. + * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well. */ static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) @@ -281,8 +254,6 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) /** * crypto_free_ahash() - zeroize and free the ahash handle * @tfm: cipher handle to be freed - * - * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_ahash(struct crypto_ahash *tfm) { @@ -358,16 +329,6 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) return crypto_hash_alg_common(tfm)->digestsize; } -/** - * crypto_ahash_statesize() - obtain size of the ahash state - * @tfm: cipher handle - * - * Return the size of the ahash state. With the crypto_ahash_export() - * function, the caller can export the state into a buffer whose size is - * defined with this function. - * - * Return: size of the ahash state - */ static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) { return crypto_hash_alg_common(tfm)->statesize; @@ -408,7 +369,11 @@ static inline struct crypto_ahash *crypto_ahash_reqtfm( * crypto_ahash_reqsize() - obtain size of the request data structure * @tfm: cipher handle * - * Return: size of the request data + * Return the size of the ahash state size. With the crypto_ahash_export + * function, the caller can export the state into a buffer whose size is + * defined with this function. + * + * Return: size of the ahash state */ static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) { @@ -434,16 +399,22 @@ static inline void *ahash_request_ctx(struct ahash_request *req) int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); +static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm) +{ + return tfm->has_setkey; +} + /** * crypto_ahash_finup() - update and finalize message digest * @req: reference to the ahash_request handle that holds all information * needed to perform the cipher operation * * This function is a "short-hand" for the function calls of - * crypto_ahash_update and crypto_ahash_final. The parameters have the same + * crypto_ahash_update and crypto_shash_final. The parameters have the same * meaning as discussed for those separate functions. * - * Return: see crypto_ahash_final() + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred */ int crypto_ahash_finup(struct ahash_request *req); @@ -456,11 +427,8 @@ int crypto_ahash_finup(struct ahash_request *req); * based on all data added to the cipher handle. The message digest is placed * into the output buffer registered with the ahash_request handle. * - * Return: - * 0 if the message digest was successfully calculated; - * -EINPROGRESS if data is fed into hardware (DMA) or queued for later; - * -EBUSY if queue is full and request should be resubmitted later; - * other < 0 if an error occurred + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred */ int crypto_ahash_final(struct ahash_request *req); @@ -473,7 +441,8 @@ int crypto_ahash_final(struct ahash_request *req); * crypto_ahash_update and crypto_ahash_final. The parameters have the same * meaning as discussed for those separate three functions. * - * Return: see crypto_ahash_final() + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred */ int crypto_ahash_digest(struct ahash_request *req); @@ -484,7 +453,7 @@ int crypto_ahash_digest(struct ahash_request *req); * * This function exports the hash state of the ahash_request handle into the * caller-allocated output buffer out which must have sufficient size (e.g. by - * calling crypto_ahash_statesize()). + * calling crypto_ahash_reqsize). * * Return: 0 if the export was successful; < 0 if an error occurred */ @@ -506,12 +475,7 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out) */ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->import(req, in); + return crypto_ahash_reqtfm(req)->import(req, in); } /** @@ -523,16 +487,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) * handle. Any potentially existing state created by previous operations is * discarded. * - * Return: see crypto_ahash_final() + * Return: 0 if the message digest initialization was successful; < 0 if an + * error occurred */ static inline int crypto_ahash_init(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->init(req); + return crypto_ahash_reqtfm(req)->init(req); } /** @@ -544,19 +504,12 @@ static inline int crypto_ahash_init(struct ahash_request *req) * is pointed to by the scatter/gather list registered in the &ahash_request * handle * - * Return: see crypto_ahash_final() + * Return: 0 if the message digest update was successful; < 0 if an error + * occurred */ static inline int crypto_ahash_update(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct crypto_alg *alg = tfm->base.__crt_alg; - unsigned int nbytes = req->nbytes; - int ret; - - crypto_stats_get(alg); - ret = crypto_ahash_reqtfm(req)->update(req); - crypto_stats_ahash_update(nbytes, ret, alg); - return ret; + return crypto_ahash_reqtfm(req)->update(req); } /** @@ -616,7 +569,7 @@ static inline struct ahash_request *ahash_request_alloc( */ static inline void ahash_request_free(struct ahash_request *req) { - kfree_sensitive(req); + kzfree(req); } static inline void ahash_request_zero(struct ahash_request *req) @@ -652,7 +605,7 @@ static inline struct ahash_request *ahash_request_cast( * the cipher operation completes. * * The callback function is registered with the &ahash_request handle and - * must comply with the following template:: + * must comply with the following template * * void callback_function(struct crypto_async_request *req, int error) */ @@ -697,7 +650,7 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, * The message digest API is able to maintain state information for the * caller. * - * The synchronous message digest API can store user-related context in its + * The synchronous message digest API can store user-related context in in its * shash_desc request data structure. */ @@ -726,8 +679,6 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) /** * crypto_free_shash() - zeroize and free the message digest handle * @tfm: cipher handle to be freed - * - * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_shash(struct crypto_shash *tfm) { @@ -843,7 +794,6 @@ static inline void *shash_desc_ctx(struct shash_desc *desc) * cipher handle must point to a keyed message digest cipher in order for this * function to succeed. * - * Context: Any context. * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, @@ -860,32 +810,12 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, * crypto_shash_update and crypto_shash_final. The parameters have the same * meaning as discussed for those separate three functions. * - * Context: Any context. * Return: 0 if the message digest creation was successful; < 0 if an error * occurred */ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out); -/** - * crypto_shash_tfm_digest() - calculate message digest for buffer - * @tfm: hash transformation object - * @data: see crypto_shash_update() - * @len: see crypto_shash_update() - * @out: see crypto_shash_final() - * - * This is a simplified version of crypto_shash_digest() for users who don't - * want to allocate their own hash descriptor (shash_desc). Instead, - * crypto_shash_tfm_digest() takes a hash transformation object (crypto_shash) - * directly, and it allocates a hash descriptor on the stack internally. - * Note that this stack allocation may be fairly large. - * - * Context: Any context. - * Return: 0 on success; < 0 if an error occurred. - */ -int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, - unsigned int len, u8 *out); - /** * crypto_shash_export() - extract operational state for message digest * @desc: reference to the operational state handle whose state is exported @@ -895,7 +825,6 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, * caller-allocated output buffer out which must have sufficient size (e.g. by * calling crypto_shash_descsize). * - * Context: Any context. * Return: 0 if the export creation was successful; < 0 if an error occurred */ static inline int crypto_shash_export(struct shash_desc *desc, void *out) @@ -912,17 +841,11 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out) * the input buffer. That buffer should have been generated with the * crypto_ahash_export function. * - * Context: Any context. * Return: 0 if the import was successful; < 0 if an error occurred */ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) { - struct crypto_shash *tfm = desc->tfm; - - if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return crypto_shash_alg(tfm)->import(desc, in); + return crypto_shash_alg(desc->tfm)->import(desc, in); } /** @@ -933,18 +856,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) * operational state handle. Any potentially existing state created by * previous operations is discarded. * - * Context: Any context. * Return: 0 if the message digest initialization was successful; < 0 if an * error occurred */ static inline int crypto_shash_init(struct shash_desc *desc) { - struct crypto_shash *tfm = desc->tfm; - - if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return crypto_shash_alg(tfm)->init(desc); + return crypto_shash_alg(desc->tfm)->init(desc); } /** @@ -955,7 +872,6 @@ static inline int crypto_shash_init(struct shash_desc *desc) * * Updates the message digest state of the operational state handle. * - * Context: Any context. * Return: 0 if the message digest update was successful; < 0 if an error * occurred */ @@ -972,7 +888,6 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data, * into the output buffer. The caller must ensure that the output buffer is * large enough by using crypto_shash_digestsize. * - * Context: Any context. * Return: 0 if the message digest creation was successful; < 0 if an error * occurred */ @@ -989,7 +904,6 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out); * crypto_shash_update and crypto_shash_final. The parameters have the same * meaning as discussed for those separate functions. * - * Context: Any context. * Return: 0 if the message digest creation was successful; < 0 if an error * occurred */ diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h index dd4f067850..56f217d41f 100644 --- a/include/crypto/hash_info.h +++ b/include/crypto/hash_info.h @@ -1,17 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Hash Info: Hash algorithms information * * Copyright (c) 2013 Dmitry Kasatkin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_HASH_INFO_H #define _CRYPTO_HASH_INFO_H -#include -#include +#include #include -#include #include diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index a5db86670b..a2bfd7843f 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * if_alg: User-space algorithm interface * * Copyright (c) 2010 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_IF_ALG_H @@ -13,12 +18,8 @@ #include #include #include -#include #include -#include -#include - #define ALG_MAX_PAGES 16 struct crypto_async_request; @@ -29,13 +30,18 @@ struct alg_sock { struct sock *parent; - atomic_t refcnt; - atomic_t nokey_refcnt; + unsigned int refcnt; + unsigned int nokey_refcnt; const struct af_alg_type *type; void *private; }; +struct af_alg_completion { + struct completion completion; + int err; +}; + struct af_alg_control { struct af_alg_iv *iv; int op; @@ -46,7 +52,6 @@ struct af_alg_type { void *(*bind)(const char *name, u32 type, u32 mask); void (*release)(void *private); int (*setkey)(void *private, const u8 *key, unsigned int keylen); - int (*setentropy)(void *private, sockptr_t entropy, unsigned int len); int (*accept)(void *private, struct sock *sk); int (*accept_nokey)(void *private, struct sock *sk); int (*setauthsize)(void *private, unsigned int authsize); @@ -63,185 +68,30 @@ struct af_alg_sgl { unsigned int npages; }; -/* TX SGL entry */ -struct af_alg_tsgl { - struct list_head list; - unsigned int cur; /* Last processed SG entry */ - struct scatterlist sg[]; /* Array of SGs forming the SGL */ -}; - -#define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \ - sizeof(struct scatterlist) - 1) - -/* RX SGL entry */ -struct af_alg_rsgl { - struct af_alg_sgl sgl; - struct list_head list; - size_t sg_num_bytes; /* Bytes of data in that SGL */ -}; - -/** - * struct af_alg_async_req - definition of crypto request - * @iocb: IOCB for AIO operations - * @sk: Socket the request is associated with - * @first_rsgl: First RX SG - * @last_rsgl: Pointer to last RX SG - * @rsgl_list: Track RX SGs - * @tsgl: Private, per request TX SGL of buffers to process - * @tsgl_entries: Number of entries in priv. TX SGL - * @outlen: Number of output bytes generated by crypto op - * @areqlen: Length of this data structure - * @cra_u: Cipher request - */ -struct af_alg_async_req { - struct kiocb *iocb; - struct sock *sk; - - struct af_alg_rsgl first_rsgl; - struct af_alg_rsgl *last_rsgl; - struct list_head rsgl_list; - - struct scatterlist *tsgl; - unsigned int tsgl_entries; - - unsigned int outlen; - unsigned int areqlen; - - union { - struct aead_request aead_req; - struct skcipher_request skcipher_req; - } cra_u; - - /* req ctx trails this struct */ -}; - -/** - * struct af_alg_ctx - definition of the crypto context - * - * The crypto context tracks the input data during the lifetime of an AF_ALG - * socket. - * - * @tsgl_list: Link to TX SGL - * @iv: IV for cipher operation - * @aead_assoclen: Length of AAD for AEAD cipher operations - * @completion: Work queue for synchronous operation - * @used: TX bytes sent to kernel. This variable is used to - * ensure that user space cannot cause the kernel - * to allocate too much memory in sendmsg operation. - * @rcvused: Total RX bytes to be filled by kernel. This variable - * is used to ensure user space cannot cause the kernel - * to allocate too much memory in a recvmsg operation. - * @more: More data to be expected from user space? - * @merge: Shall new data from user space be merged into existing - * SG? - * @enc: Cryptographic operation to be performed when - * recvmsg is invoked. - * @init: True if metadata has been sent. - * @len: Length of memory allocated for this data structure. - */ -struct af_alg_ctx { - struct list_head tsgl_list; - - void *iv; - size_t aead_assoclen; - - struct crypto_wait wait; - - size_t used; - atomic_t rcvused; - - bool more; - bool merge; - bool enc; - bool init; - - unsigned int len; -}; - int af_alg_register_type(const struct af_alg_type *type); int af_alg_unregister_type(const struct af_alg_type *type); int af_alg_release(struct socket *sock); void af_alg_release_parent(struct sock *sk); -int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern); +int af_alg_accept(struct sock *sk, struct socket *newsock); int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len); void af_alg_free_sg(struct af_alg_sgl *sgl); +void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new); + +int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); + +int af_alg_wait_for_completion(int err, struct af_alg_completion *completion); +void af_alg_complete(struct crypto_async_request *req, int err); static inline struct alg_sock *alg_sk(struct sock *sk) { return (struct alg_sock *)sk; } -/** - * Size of available buffer for sending data from user space to kernel. - * - * @sk socket of connection to user space - * @return number of bytes still available - */ -static inline int af_alg_sndbuf(struct sock *sk) +static inline void af_alg_init_completion(struct af_alg_completion *completion) { - struct alg_sock *ask = alg_sk(sk); - struct af_alg_ctx *ctx = ask->private; - - return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - - ctx->used, 0); + init_completion(&completion->completion); } -/** - * Can the send buffer still be written to? - * - * @sk socket of connection to user space - * @return true => writable, false => not writable - */ -static inline bool af_alg_writable(struct sock *sk) -{ - return PAGE_SIZE <= af_alg_sndbuf(sk); -} - -/** - * Size of available buffer used by kernel for the RX user space operation. - * - * @sk socket of connection to user space - * @return number of bytes still available - */ -static inline int af_alg_rcvbuf(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct af_alg_ctx *ctx = ask->private; - - return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - - atomic_read(&ctx->rcvused), 0); -} - -/** - * Can the RX buffer still be written to? - * - * @sk socket of connection to user space - * @return true => writable, false => not writable - */ -static inline bool af_alg_readable(struct sock *sk) -{ - return PAGE_SIZE <= af_alg_rcvbuf(sk); -} - -unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); -void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, - size_t dst_offset); -void af_alg_wmem_wakeup(struct sock *sk); -int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); -int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, - unsigned int ivsize); -ssize_t af_alg_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags); -void af_alg_free_resources(struct af_alg_async_req *areq); -void af_alg_async_cb(struct crypto_async_request *_req, int err); -__poll_t af_alg_poll(struct file *file, struct socket *sock, - poll_table *wait); -struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, - unsigned int areqlen); -int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, - struct af_alg_async_req *areq, size_t maxsize, - size_t *outlen); - #endif /* _CRYPTO_IF_ALG_H */ diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h index 27b7b0224e..6ad8e31d38 100644 --- a/include/crypto/internal/aead.h +++ b/include/crypto/internal/aead.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * AEAD: Authenticated Encryption with Associated Data * * Copyright (c) 2007-2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_INTERNAL_AEAD_H @@ -81,9 +86,14 @@ static inline struct aead_request *aead_request_cast( return container_of(req, struct aead_request, base); } -int crypto_grab_aead(struct crypto_aead_spawn *spawn, - struct crypto_instance *inst, - const char *name, u32 type, u32 mask); +static inline void crypto_set_aead_spawn( + struct crypto_aead_spawn *spawn, struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, + u32 type, u32 mask); static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn) { @@ -108,6 +118,16 @@ static inline void crypto_aead_set_reqsize(struct crypto_aead *aead, aead->reqsize = reqsize; } +static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg) +{ + return alg->maxauthsize; +} + +static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead) +{ + return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead)); +} + static inline void aead_init_queue(struct aead_queue *queue, unsigned int max_qlen) { diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h index 8d3220c9ab..479a0078f0 100644 --- a/include/crypto/internal/akcipher.h +++ b/include/crypto/internal/akcipher.h @@ -1,9 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Public Key Encryption * * Copyright (c) 2015, Intel Corporation * Authors: Tadeusz Struk + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_AKCIPHER_INT_H #define _CRYPTO_AKCIPHER_INT_H @@ -33,12 +38,6 @@ static inline void *akcipher_request_ctx(struct akcipher_request *req) return req->__ctx; } -static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher, - unsigned int reqsize) -{ - crypto_akcipher_alg(akcipher)->reqsize = reqsize; -} - static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm) { return tfm->base.__crt_ctx; @@ -78,9 +77,15 @@ static inline void *akcipher_instance_ctx(struct akcipher_instance *inst) return crypto_instance_ctx(akcipher_crypto_instance(inst)); } -int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, - struct crypto_instance *inst, - const char *name, u32 type, u32 mask); +static inline void crypto_set_akcipher_spawn( + struct crypto_akcipher_spawn *spawn, + struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, + u32 type, u32 mask); static inline struct crypto_akcipher *crypto_spawn_akcipher( struct crypto_akcipher_spawn *spawn) diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 7fd7126f59..2bcfb931bc 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * geniv: IV generation * * Copyright (c) 2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_INTERNAL_GENIV_H @@ -15,12 +20,13 @@ struct aead_geniv_ctx { spinlock_t lock; struct crypto_aead *child; - struct crypto_sync_skcipher *sknull; + struct crypto_skcipher *sknull; u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); }; struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb); + struct rtattr **tb, u32 type, u32 mask); +void aead_geniv_free(struct aead_instance *inst); int aead_init_geniv(struct crypto_aead *tfm); void aead_exit_geniv(struct crypto_aead *tfm); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 25806141db..f6d9af3efa 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Hash algorithms. * * Copyright (c) 2008 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_INTERNAL_HASH_H @@ -30,25 +35,11 @@ struct crypto_hash_walk { }; struct ahash_instance { - void (*free)(struct ahash_instance *inst); - union { - struct { - char head[offsetof(struct ahash_alg, halg.base)]; - struct crypto_instance base; - } s; - struct ahash_alg alg; - }; + struct ahash_alg alg; }; struct shash_instance { - void (*free)(struct shash_instance *inst); - union { - struct { - char head[offsetof(struct shash_alg, base)]; - struct crypto_instance base; - } s; - struct shash_alg alg; - }; + struct shash_alg alg; }; struct crypto_ahash_spawn { @@ -59,74 +50,75 @@ struct crypto_shash_spawn { struct crypto_spawn base; }; +extern const struct crypto_type crypto_ahash_type; + int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); int crypto_hash_walk_first(struct ahash_request *req, struct crypto_hash_walk *walk); +int crypto_ahash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk); + +static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk, + int err) +{ + return crypto_hash_walk_done(walk, err); +} static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) { return !(walk->entrylen | walk->total); } -int crypto_register_ahash(struct ahash_alg *alg); -void crypto_unregister_ahash(struct ahash_alg *alg); -int crypto_register_ahashes(struct ahash_alg *algs, int count); -void crypto_unregister_ahashes(struct ahash_alg *algs, int count); -int ahash_register_instance(struct crypto_template *tmpl, - struct ahash_instance *inst); - -bool crypto_shash_alg_has_setkey(struct shash_alg *alg); - -static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) +static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk) { - return crypto_shash_alg_has_setkey(alg) && - !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); + return crypto_hash_walk_last(walk); } -bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); +int crypto_register_ahash(struct ahash_alg *alg); +int crypto_unregister_ahash(struct ahash_alg *alg); +int ahash_register_instance(struct crypto_template *tmpl, + struct ahash_instance *inst); +void ahash_free_instance(struct crypto_instance *inst); -int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, - struct crypto_instance *inst, - const char *name, u32 type, u32 mask); +int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, + struct hash_alg_common *alg, + struct crypto_instance *inst); static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn) { crypto_drop_spawn(&spawn->base); } -static inline struct hash_alg_common *crypto_spawn_ahash_alg( - struct crypto_ahash_spawn *spawn) -{ - return __crypto_hash_alg_common(spawn->base.alg); -} +struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask); int crypto_register_shash(struct shash_alg *alg); -void crypto_unregister_shash(struct shash_alg *alg); +int crypto_unregister_shash(struct shash_alg *alg); int crypto_register_shashes(struct shash_alg *algs, int count); -void crypto_unregister_shashes(struct shash_alg *algs, int count); +int crypto_unregister_shashes(struct shash_alg *algs, int count); int shash_register_instance(struct crypto_template *tmpl, struct shash_instance *inst); -void shash_free_singlespawn_instance(struct shash_instance *inst); +void shash_free_instance(struct crypto_instance *inst); -int crypto_grab_shash(struct crypto_shash_spawn *spawn, - struct crypto_instance *inst, - const char *name, u32 type, u32 mask); +int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, + struct shash_alg *alg, + struct crypto_instance *inst); static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn) { crypto_drop_spawn(&spawn->base); } -static inline struct shash_alg *crypto_spawn_shash_alg( - struct crypto_shash_spawn *spawn) -{ - return __crypto_shash_alg(spawn->base.alg); -} +struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask); int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); +int ahash_mcryptd_update(struct ahash_request *desc); +int ahash_mcryptd_final(struct ahash_request *desc); +int ahash_mcryptd_finup(struct ahash_request *desc); +int ahash_mcryptd_digest(struct ahash_request *desc); + int crypto_init_shash_ops_async(struct crypto_tfm *tfm); static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) @@ -149,19 +141,13 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, static inline struct crypto_instance *ahash_crypto_instance( struct ahash_instance *inst) { - return &inst->s.base; + return container_of(&inst->alg.halg.base, struct crypto_instance, alg); } static inline struct ahash_instance *ahash_instance( struct crypto_instance *inst) { - return container_of(inst, struct ahash_instance, s.base); -} - -static inline struct ahash_instance *ahash_alg_instance( - struct crypto_ahash *ahash) -{ - return ahash_instance(crypto_tfm_alg_instance(&ahash->base)); + return container_of(&inst->alg, struct ahash_instance, alg.halg.base); } static inline void *ahash_instance_ctx(struct ahash_instance *inst) @@ -169,6 +155,17 @@ static inline void *ahash_instance_ctx(struct ahash_instance *inst) return crypto_instance_ctx(ahash_crypto_instance(inst)); } +static inline unsigned int ahash_instance_headroom(void) +{ + return sizeof(struct ahash_alg) - sizeof(struct crypto_alg); +} + +static inline struct ahash_instance *ahash_alloc_instance( + const char *name, struct crypto_alg *alg) +{ + return crypto_alloc_instance2(name, alg, ahash_instance_headroom()); +} + static inline void ahash_request_complete(struct ahash_request *req, int err) { req->base.complete(&req->base, err); @@ -197,6 +194,12 @@ static inline struct ahash_request *ahash_dequeue_request( return ahash_request_cast(crypto_dequeue_request(queue)); } +static inline int ahash_tfm_in_queue(struct crypto_queue *queue, + struct crypto_ahash *tfm) +{ + return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm)); +} + static inline void *crypto_shash_ctx(struct crypto_shash *tfm) { return crypto_tfm_ctx(&tfm->base); @@ -205,19 +208,14 @@ static inline void *crypto_shash_ctx(struct crypto_shash *tfm) static inline struct crypto_instance *shash_crypto_instance( struct shash_instance *inst) { - return &inst->s.base; + return container_of(&inst->alg.base, struct crypto_instance, alg); } static inline struct shash_instance *shash_instance( struct crypto_instance *inst) { - return container_of(inst, struct shash_instance, s.base); -} - -static inline struct shash_instance *shash_alg_instance( - struct crypto_shash *shash) -{ - return shash_instance(crypto_tfm_alg_instance(&shash->base)); + return container_of(__crypto_shash_alg(&inst->alg), + struct shash_instance, alg); } static inline void *shash_instance_ctx(struct shash_instance *inst) @@ -225,6 +223,13 @@ static inline void *shash_instance_ctx(struct shash_instance *inst) return crypto_instance_ctx(shash_crypto_instance(inst)); } +static inline struct shash_instance *shash_alloc_instance( + const char *name, struct crypto_alg *alg) +{ + return crypto_alloc_instance2(name, alg, + sizeof(struct shash_alg) - sizeof(*alg)); +} + static inline struct crypto_shash *crypto_spawn_shash( struct crypto_shash_spawn *spawn) { diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h index 659b642efa..ad3acf3649 100644 --- a/include/crypto/internal/kpp.h +++ b/include/crypto/internal/kpp.h @@ -1,9 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Key-agreement Protocol Primitives (KPP) * * Copyright (c) 2016, Intel Corporation * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_KPP_INT_H #define _CRYPTO_KPP_INT_H diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h index e0711b6a59..a52ef3483d 100644 --- a/include/crypto/internal/rng.h +++ b/include/crypto/internal/rng.h @@ -1,9 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RNG: Random Number Generator algorithms under the crypto API * * Copyright (c) 2008 Neil Horman * Copyright (c) 2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_INTERNAL_RNG_H diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index e870133f4b..9e8f1590de 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h @@ -1,9 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RSA internal helpers * * Copyright (c) 2015, Intel Corporation * Authors: Tadeusz Struk + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _RSA_HELPER_ #define _RSA_HELPER_ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index a2339f80a6..a21a95e1a3 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -1,20 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Symmetric key ciphers. * * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_INTERNAL_SKCIPHER_H #define _CRYPTO_INTERNAL_SKCIPHER_H #include -#include #include -#include #include -struct aead_request; struct rtattr; struct skcipher_instance { @@ -32,39 +34,7 @@ struct crypto_skcipher_spawn { struct crypto_spawn base; }; -struct skcipher_walk { - union { - struct { - struct page *page; - unsigned long offset; - } phys; - - struct { - u8 *page; - void *addr; - } virt; - } src, dst; - - struct scatter_walk in; - unsigned int nbytes; - - struct scatter_walk out; - unsigned int total; - - struct list_head buffers; - - u8 *page; - u8 *buffer; - u8 *oiv; - void *iv; - - unsigned int ivsize; - - int flags; - unsigned int blocksize; - unsigned int stride; - unsigned int alignmask; -}; +extern const struct crypto_type crypto_givcipher_type; static inline struct crypto_instance *skcipher_crypto_instance( struct skcipher_instance *inst) @@ -89,9 +59,22 @@ static inline void skcipher_request_complete(struct skcipher_request *req, int e req->base.complete(&req->base, err); } -int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, - struct crypto_instance *inst, - const char *name, u32 type, u32 mask); +static inline void crypto_set_skcipher_spawn( + struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, + const char *name, u32 type, u32 mask) +{ + return crypto_grab_skcipher(spawn, name, type, mask); +} + +struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask); static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) { @@ -116,6 +99,12 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher( return crypto_spawn_tfm2(&spawn->base); } +static inline struct crypto_skcipher *crypto_spawn_skcipher2( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_spawn_skcipher(spawn); +} + static inline void crypto_skcipher_set_reqsize( struct crypto_skcipher *skcipher, unsigned int reqsize) { @@ -129,21 +118,15 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); int skcipher_register_instance(struct crypto_template *tmpl, struct skcipher_instance *inst); -int skcipher_walk_done(struct skcipher_walk *walk, int err); -int skcipher_walk_virt(struct skcipher_walk *walk, - struct skcipher_request *req, - bool atomic); -int skcipher_walk_async(struct skcipher_walk *walk, - struct skcipher_request *req); -int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); -int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); -void skcipher_walk_complete(struct skcipher_walk *walk, int err); - -static inline void skcipher_walk_abort(struct skcipher_walk *walk) +static inline void ablkcipher_request_complete(struct ablkcipher_request *req, + int err) { - skcipher_walk_done(walk, -ECANCELED); + req->base.complete(&req->base, err); +} + +static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) +{ + return req->base.flags; } static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) @@ -164,60 +147,28 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req) static inline unsigned int crypto_skcipher_alg_min_keysize( struct skcipher_alg *alg) { + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.min_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.min_keysize; + return alg->min_keysize; } static inline unsigned int crypto_skcipher_alg_max_keysize( struct skcipher_alg *alg) { + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.max_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.max_keysize; + return alg->max_keysize; } -static inline unsigned int crypto_skcipher_alg_walksize( - struct skcipher_alg *alg) -{ - return alg->walksize; -} - -/** - * crypto_skcipher_walksize() - obtain walk size - * @tfm: cipher handle - * - * In some cases, algorithms can only perform optimally when operating on - * multiple blocks in parallel. This is reflected by the walksize, which - * must be a multiple of the chunksize (or equal if the concern does not - * apply) - * - * Return: walk size in bytes - */ -static inline unsigned int crypto_skcipher_walksize( - struct crypto_skcipher *tfm) -{ - return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); -} - -/* Helpers for simple block cipher modes of operation */ -struct skcipher_ctx_simple { - struct crypto_cipher *cipher; /* underlying block cipher */ -}; -static inline struct crypto_cipher * -skcipher_cipher_simple(struct crypto_skcipher *tfm) -{ - struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); - - return ctx->cipher; -} - -struct skcipher_instance *skcipher_alloc_instance_simple( - struct crypto_template *tmpl, struct rtattr **tb); - -static inline struct crypto_alg *skcipher_ialg_simple( - struct skcipher_instance *inst) -{ - struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); - - return crypto_spawn_cipher_alg(spawn); -} - #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index cccceadc16..30791f75c1 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -1,9 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Key-agreement Protocol Primitives (KPP) * * Copyright (c) 2016, Intel Corporation * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_KPP_ @@ -48,7 +53,7 @@ struct crypto_kpp { * * @set_secret: Function invokes the protocol specific function to * store the secret private key along with parameters. - * The implementation knows how to decode the buffer + * The implementation knows how to decode thie buffer * @generate_public_key: Function generate the public key to be sent to the * counterpart. In case of error, where output is not big * enough req->dst_len will be updated to the size @@ -66,15 +71,15 @@ struct crypto_kpp { * * @reqsize: Request context size required by algorithm * implementation - * @base: Common crypto API algorithm data structure + * @base Common crypto API algorithm data structure */ struct kpp_alg { - int (*set_secret)(struct crypto_kpp *tfm, const void *buffer, + int (*set_secret)(struct crypto_kpp *tfm, void *buffer, unsigned int len); int (*generate_public_key)(struct kpp_request *req); int (*compute_shared_secret)(struct kpp_request *req); - unsigned int (*max_size)(struct crypto_kpp *tfm); + int (*max_size)(struct crypto_kpp *tfm); int (*init)(struct crypto_kpp *tfm); void (*exit)(struct crypto_kpp *tfm); @@ -84,7 +89,7 @@ struct kpp_alg { }; /** - * DOC: Generic Key-agreement Protocol Primitives API + * DOC: Generic Key-agreement Protocol Primitevs API * * The KPP API is used with the algorithm type * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto) @@ -97,7 +102,7 @@ struct kpp_alg { * @mask: specifies the mask for the algorithm * * Allocate a handle for kpp algorithm. The returned struct crypto_kpp - * is required for any following API invocation + * is requeried for any following API invocation * * Return: allocated handle in case of success; IS_ERR() is true in case of * an error, PTR_ERR() returns the error code. @@ -140,22 +145,10 @@ static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req) return __crypto_kpp_tfm(req->base.tfm); } -static inline u32 crypto_kpp_get_flags(struct crypto_kpp *tfm) -{ - return crypto_tfm_get_flags(crypto_kpp_tfm(tfm)); -} - -static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags) -{ - crypto_tfm_set_flags(crypto_kpp_tfm(tfm), flags); -} - /** * crypto_free_kpp() - free KPP tfm handle * * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() - * - * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_kpp(struct crypto_kpp *tfm) { @@ -189,7 +182,7 @@ static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm, */ static inline void kpp_request_free(struct kpp_request *req) { - kfree_sensitive(req); + kzfree(req); } /** @@ -271,36 +264,22 @@ struct kpp_secret { * Function invokes the specific kpp operation for a given alg. * * @tfm: tfm handle - * @buffer: Buffer holding the packet representation of the private - * key. The structure of the packet key depends on the particular - * KPP implementation. Packing and unpacking helpers are provided - * for ECDH and DH (see the respective header files for those - * implementations). - * @len: Length of the packet private key buffer. * * Return: zero on success; error code in case of error */ -static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, - const void *buffer, unsigned int len) +static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer, + unsigned int len) { struct kpp_alg *alg = crypto_kpp_alg(tfm); - struct crypto_alg *calg = tfm->base.__crt_alg; - int ret; - crypto_stats_get(calg); - ret = alg->set_secret(tfm, buffer, len); - crypto_stats_kpp_set_secret(calg, ret); - return ret; + return alg->set_secret(tfm, buffer, len); } /** * crypto_kpp_generate_public_key() - Invoke kpp operation * * Function invokes the specific kpp operation for generating the public part - * for a given kpp algorithm. - * - * To generate a private key, the caller should use a random number generator. - * The output of the requested length serves as the private key. + * for a given kpp algorithm * * @req: kpp key request * @@ -310,13 +289,8 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct kpp_alg *alg = crypto_kpp_alg(tfm); - struct crypto_alg *calg = tfm->base.__crt_alg; - int ret; - crypto_stats_get(calg); - ret = alg->generate_public_key(req); - crypto_stats_kpp_generate_public_key(calg, ret); - return ret; + return alg->generate_public_key(req); } /** @@ -333,26 +307,20 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct kpp_alg *alg = crypto_kpp_alg(tfm); - struct crypto_alg *calg = tfm->base.__crt_alg; - int ret; - crypto_stats_get(calg); - ret = alg->compute_shared_secret(req); - crypto_stats_kpp_compute_shared_secret(calg, ret); - return ret; + return alg->compute_shared_secret(req); } /** * crypto_kpp_maxsize() - Get len for output buffer * - * Function returns the output buffer size required for a given key. - * Function assumes that the key is already set in the transformation. If this - * function is called without a setkey or with a failed setkey, you will end up - * in a NULL dereference. + * Function returns the output buffer size required * * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * + * Return: minimum len for output buffer or error code if key hasn't been set */ -static inline unsigned int crypto_kpp_maxsize(struct crypto_kpp *tfm) +static inline int crypto_kpp_maxsize(struct crypto_kpp *tfm) { struct kpp_alg *alg = crypto_kpp_alg(tfm); diff --git a/include/crypto/lrw.h b/include/crypto/lrw.h new file mode 100644 index 0000000000..25a2c87163 --- /dev/null +++ b/include/crypto/lrw.h @@ -0,0 +1,43 @@ +#ifndef _CRYPTO_LRW_H +#define _CRYPTO_LRW_H + +#include + +struct scatterlist; +struct gf128mul_64k; +struct blkcipher_desc; + +#define LRW_BLOCK_SIZE 16 + +struct lrw_table_ctx { + /* optimizes multiplying a random (non incrementing, as at the + * start of a new sector) value with key2, we could also have + * used 4k optimization tables or no optimization at all. In the + * latter case we would have to store key2 here */ + struct gf128mul_64k *table; + /* stores: + * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, + * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } + * key2*{ 0,0,...1,1,1,1,1 }, etc + * needed for optimized multiplication of incrementing values + * with key2 */ + be128 mulinc[128]; +}; + +int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak); +void lrw_free_table(struct lrw_table_ctx *ctx); + +struct lrw_crypt_req { + be128 *tbuf; + unsigned int tbuflen; + + struct lrw_table_ctx *table_ctx; + void *crypt_ctx; + void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes); +}; + +int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes, + struct lrw_crypt_req *req); + +#endif /* _CRYPTO_LRW_H */ diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h new file mode 100644 index 0000000000..4a53c0d38c --- /dev/null +++ b/include/crypto/mcryptd.h @@ -0,0 +1,112 @@ +/* + * Software async multibuffer crypto daemon headers + * + * Author: + * Tim Chen + * + * Copyright (c) 2014, Intel Corporation. + */ + +#ifndef _CRYPTO_MCRYPT_H +#define _CRYPTO_MCRYPT_H + +#include +#include +#include + +struct mcryptd_ahash { + struct crypto_ahash base; +}; + +static inline struct mcryptd_ahash *__mcryptd_ahash_cast( + struct crypto_ahash *tfm) +{ + return (struct mcryptd_ahash *)tfm; +} + +struct mcryptd_cpu_queue { + struct crypto_queue queue; + struct work_struct work; +}; + +struct mcryptd_queue { + struct mcryptd_cpu_queue __percpu *cpu_queue; +}; + +struct mcryptd_instance_ctx { + struct crypto_spawn spawn; + struct mcryptd_queue *queue; +}; + +struct mcryptd_hash_ctx { + struct crypto_ahash *child; + struct mcryptd_alg_state *alg_state; +}; + +struct mcryptd_tag { + /* seq number of request */ + unsigned seq_num; + /* arrival time of request */ + unsigned long arrival; + unsigned long expire; + int cpu; +}; + +struct mcryptd_hash_request_ctx { + struct list_head waiter; + crypto_completion_t complete; + struct mcryptd_tag tag; + struct crypto_hash_walk walk; + u8 *out; + int flag; + struct ahash_request areq; +}; + +struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, + u32 type, u32 mask); +struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); +struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req); +void mcryptd_free_ahash(struct mcryptd_ahash *tfm); +void mcryptd_flusher(struct work_struct *work); + +enum mcryptd_req_type { + MCRYPTD_NONE, + MCRYPTD_UPDATE, + MCRYPTD_FINUP, + MCRYPTD_DIGEST, + MCRYPTD_FINAL +}; + +struct mcryptd_alg_cstate { + unsigned long next_flush; + unsigned next_seq_num; + bool flusher_engaged; + struct delayed_work flush; + int cpu; + struct mcryptd_alg_state *alg_state; + void *mgr; + spinlock_t work_lock; + struct list_head work_list; + struct list_head flush_list; +}; + +struct mcryptd_alg_state { + struct mcryptd_alg_cstate __percpu *alg_cstate; + unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate); +}; + +/* return delay in jiffies from current time */ +static inline unsigned long get_delay(unsigned long t) +{ + long delay; + + delay = (long) t - (long) jiffies; + if (delay <= 0) + return 0; + else + return (unsigned long) delay; +} + +void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay); + +#endif diff --git a/include/crypto/md5.h b/include/crypto/md5.h index cf9e9dec3d..327deac963 100644 --- a/include/crypto/md5.h +++ b/include/crypto/md5.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _CRYPTO_MD5_H #define _CRYPTO_MD5_H diff --git a/include/crypto/null.h b/include/crypto/null.h index 0ef577cc00..3f0c59fb0a 100644 --- a/include/crypto/null.h +++ b/include/crypto/null.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* Values for NULL algorithms */ #ifndef _CRYPTO_NULL_H @@ -9,7 +8,17 @@ #define NULL_DIGEST_SIZE 0 #define NULL_IV_SIZE 0 -struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void); +struct crypto_skcipher *crypto_get_default_null_skcipher(void); void crypto_put_default_null_skcipher(void); +static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void) +{ + return crypto_get_default_null_skcipher(); +} + +static inline void crypto_put_default_null_skcipher2(void) +{ + crypto_put_default_null_skcipher(); +} + #endif diff --git a/include/crypto/padlock.h b/include/crypto/padlock.h index 6de70e88f1..d2cfa2ef49 100644 --- a/include/crypto/padlock.h +++ b/include/crypto/padlock.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Driver for VIA PadLock * * Copyright (c) 2004 Michal Ludvig + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_PADLOCK_H diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h index b9bc343619..d7d8bd8c6e 100644 --- a/include/crypto/pcrypt.h +++ b/include/crypto/pcrypt.h @@ -1,9 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * pcrypt - Parallel crypto engine. * * Copyright (C) 2009 secunet Security Networks AG * Copyright (C) 2009 Steffen Klassert + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _CRYPTO_PCRYPT_H diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h index 38ec7f5f90..583f199400 100644 --- a/include/crypto/pkcs7.h +++ b/include/crypto/pkcs7.h @@ -1,15 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* PKCS#7 crypto data parser * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _CRYPTO_PKCS7_H #define _CRYPTO_PKCS7_H #include -#include #include struct key; @@ -41,7 +44,4 @@ extern int pkcs7_verify(struct pkcs7_message *pkcs7, extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7, const void *data, size_t datalen); -extern int pkcs7_get_digest(struct pkcs7_message *pkcs7, const u8 **buf, - u32 *len, enum hash_algo *hash_algo); - #endif /* _CRYPTO_PKCS7_H */ diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index 090692ec3b..894df59b74 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Common values for the Poly1305 algorithm */ @@ -13,87 +12,30 @@ #define POLY1305_KEY_SIZE 32 #define POLY1305_DIGEST_SIZE 16 -/* The poly1305_key and poly1305_state types are mostly opaque and - * implementation-defined. Limbs might be in base 2^64 or base 2^26, or - * different yet. The union type provided keeps these 64-bit aligned for the - * case in which this is implemented using 64x64 multiplies. - */ - -struct poly1305_key { - union { - u32 r[5]; - u64 r64[3]; - }; -}; - -struct poly1305_core_key { - struct poly1305_key key; - struct poly1305_key precomputed_s; -}; - -struct poly1305_state { - union { - u32 h[5]; - u64 h64[3]; - }; -}; - struct poly1305_desc_ctx { + /* key */ + u32 r[5]; + /* finalize key */ + u32 s[4]; + /* accumulator */ + u32 h[5]; /* partial buffer */ u8 buf[POLY1305_BLOCK_SIZE]; /* bytes used in partial buffer */ unsigned int buflen; - /* how many keys have been set in r[] */ - unsigned short rset; - /* whether s[] has been set */ + /* r key has been set */ + bool rset; + /* s key has been set */ bool sset; - /* finalize key */ - u32 s[4]; - /* accumulator */ - struct poly1305_state h; - /* key */ - union { - struct poly1305_key opaque_r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE]; - struct poly1305_core_key core_r; - }; }; -void poly1305_init_arch(struct poly1305_desc_ctx *desc, - const u8 key[POLY1305_KEY_SIZE]); -void poly1305_init_generic(struct poly1305_desc_ctx *desc, - const u8 key[POLY1305_KEY_SIZE]); - -static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key) -{ - if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) - poly1305_init_arch(desc, key); - else - poly1305_init_generic(desc, key); -} - -void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src, - unsigned int nbytes); -void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src, - unsigned int nbytes); - -static inline void poly1305_update(struct poly1305_desc_ctx *desc, - const u8 *src, unsigned int nbytes) -{ - if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) - poly1305_update_arch(desc, src, nbytes); - else - poly1305_update_generic(desc, src, nbytes); -} - -void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest); -void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest); - -static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest) -{ - if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) - poly1305_final_arch(desc, digest); - else - poly1305_final_generic(desc, digest); -} +int crypto_poly1305_init(struct shash_desc *desc); +int crypto_poly1305_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen); +unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, + const u8 *src, unsigned int srclen); +int crypto_poly1305_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen); +int crypto_poly1305_final(struct shash_desc *desc, u8 *dst); #endif diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index f603325c0c..882ca0e1e7 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -1,18 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric public-key algorithm definitions * - * See Documentation/crypto/asymmetric-keys.rst + * See Documentation/crypto/asymmetric-keys.txt * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_PUBLIC_KEY_H #define _LINUX_PUBLIC_KEY_H -#include -#include - /* * Cryptographic data for the public-key subtype of the asymmetric key type. * @@ -22,10 +23,6 @@ struct public_key { void *key; u32 keylen; - enum OID algo; - void *params; - u32 paramlen; - bool key_is_private; const char *id_type; const char *pkey_algo; }; @@ -38,14 +35,11 @@ extern void public_key_free(struct public_key *key); struct public_key_signature { struct asymmetric_key_id *auth_ids[2]; u8 *s; /* Signature */ - u8 *digest; u32 s_size; /* Number of bytes in signature */ - u32 digest_size; /* Number of bytes in digest */ + u8 *digest; + u8 digest_size; /* Number of bytes in digest */ const char *pkey_algo; const char *hash_algo; - const char *encoding; - const void *data; - unsigned int data_size; }; extern void public_key_signature_free(struct public_key_signature *sig); @@ -56,29 +50,12 @@ struct key; struct key_type; union key_payload; -extern int restrict_link_by_signature(struct key *dest_keyring, +extern int restrict_link_by_signature(struct key *trust_keyring, const struct key_type *type, - const union key_payload *payload, - struct key *trust_keyring); + const union key_payload *payload); -extern int restrict_link_by_key_or_keyring(struct key *dest_keyring, - const struct key_type *type, - const union key_payload *payload, - struct key *trusted); - -extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring, - const struct key_type *type, - const union key_payload *payload, - struct key *trusted); - -extern int query_asymmetric_key(const struct kernel_pkey_params *, - struct kernel_pkey_query *); - -extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *); -extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *); -extern int create_signature(struct kernel_pkey_params *, const void *, void *); -extern int verify_signature(const struct key *, - const struct public_key_signature *); +extern int verify_signature(const struct key *key, + const struct public_key_signature *sig); int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig); diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 17bb3673d3..b95ede354a 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -1,9 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RNG: Random Number Generator algorithms under the crypto API * * Copyright (c) 2008 Neil Horman * Copyright (c) 2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_RNG_H @@ -111,8 +116,6 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) /** * crypto_free_rng() - zeroize and free RNG handle * @tfm: cipher handle to be freed - * - * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_rng(struct crypto_rng *tfm) { @@ -137,13 +140,7 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { - struct crypto_alg *alg = tfm->base.__crt_alg; - int ret; - - crypto_stats_get(alg); - ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); - crypto_stats_rng_generate(alg, dlen, ret); - return ret; + return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); } /** diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 7af08174a7..880e6be9e9 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Cryptographic scatter and gather helpers. * @@ -6,6 +5,12 @@ * Copyright (c) 2002 Adam J. Richter * Copyright (c) 2004 Jean-Luc Cooke * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_SCATTERWALK_H @@ -17,14 +22,27 @@ #include static inline void scatterwalk_crypto_chain(struct scatterlist *head, - struct scatterlist *sg, int num) + struct scatterlist *sg, + int chain, int num) { + if (chain) { + head->length += sg->length; + sg = sg_next(sg); + } + if (sg) sg_chain(head, num, sg); else sg_mark_end(head); } +static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in, + struct scatter_walk *walk_out) +{ + return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) + + (int)(walk_in->offset - walk_out->offset)); +} + static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) { unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; @@ -81,7 +99,12 @@ static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, struct page *page; page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); - flush_dcache_page(page); + /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as + * PageSlab cannot be optimised away per se due to + * use of volatile pointer. + */ + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) + flush_dcache_page(page); } if (more && walk->offset >= walk->sg->offset + walk->sg->length) diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h index 75c7eaa208..1a1f67fccf 100644 --- a/include/crypto/serpent.h +++ b/include/crypto/serpent.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Common values for serpent algorithms */ @@ -22,7 +21,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, unsigned int keylen); int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); -void __serpent_encrypt(const void *ctx, u8 *dst, const u8 *src); -void __serpent_decrypt(const void *ctx, u8 *dst, const u8 *src); +void __serpent_encrypt(void *ctx, u8 *dst, const u8 *src); +void __serpent_decrypt(void *ctx, u8 *dst, const u8 *src); #endif diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 4ff3da8166..c94d3eb1ce 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Common values for SHA algorithms */ @@ -71,10 +70,6 @@ extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE]; extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE]; -extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE]; - -extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE]; - struct sha1_state { u32 state[SHA1_DIGEST_SIZE / 4]; u64 count; @@ -112,56 +107,4 @@ extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data, extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash); - -/* - * An implementation of SHA-1's compression function. Don't use in new code! - * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't - * the correct way to hash something with SHA-1 (use crypto_shash instead). - */ -#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4) -#define SHA1_WORKSPACE_WORDS 16 -void sha1_init(__u32 *buf); -void sha1_transform(__u32 *digest, const char *data, __u32 *W); - -/* - * Stand-alone implementation of the SHA256 algorithm. It is designed to - * have as little dependencies as possible so it can be used in the - * kexec_file purgatory. In other cases you should generally use the - * hash APIs from include/crypto/hash.h. Especially when hashing large - * amounts of data as those APIs may be hw-accelerated. - * - * For details see lib/crypto/sha256.c - */ - -static inline void sha256_init(struct sha256_state *sctx) -{ - sctx->state[0] = SHA256_H0; - sctx->state[1] = SHA256_H1; - sctx->state[2] = SHA256_H2; - sctx->state[3] = SHA256_H3; - sctx->state[4] = SHA256_H4; - sctx->state[5] = SHA256_H5; - sctx->state[6] = SHA256_H6; - sctx->state[7] = SHA256_H7; - sctx->count = 0; -} -void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len); -void sha256_final(struct sha256_state *sctx, u8 *out); -void sha256(const u8 *data, unsigned int len, u8 *out); - -static inline void sha224_init(struct sha256_state *sctx) -{ - sctx->state[0] = SHA224_H0; - sctx->state[1] = SHA224_H1; - sctx->state[2] = SHA224_H2; - sctx->state[3] = SHA224_H3; - sctx->state[4] = SHA224_H4; - sctx->state[5] = SHA224_H5; - sctx->state[6] = SHA224_H6; - sctx->state[7] = SHA224_H7; - sctx->count = 0; -} -void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len); -void sha224_final(struct sha256_state *sctx, u8 *out); - #endif diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h index 2e0e7c3827..d0df431f9a 100644 --- a/include/crypto/sha1_base.h +++ b/include/crypto/sha1_base.h @@ -1,18 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * sha1_base.h - core logic for SHA-1 implementations * * Copyright (C) 2015 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ -#ifndef _CRYPTO_SHA1_BASE_H -#define _CRYPTO_SHA1_BASE_H - #include -#include +#include #include #include -#include #include @@ -102,8 +101,6 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) put_unaligned_be32(sctx->state[i], digest++); - memzero_explicit(sctx, sizeof(*sctx)); + *sctx = (struct sha1_state){}; return 0; } - -#endif /* _CRYPTO_SHA1_BASE_H */ diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h index 76173c6130..d1f2195bb7 100644 --- a/include/crypto/sha256_base.h +++ b/include/crypto/sha256_base.h @@ -1,18 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * sha256_base.h - core logic for SHA-256 implementations * * Copyright (C) 2015 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ -#ifndef _CRYPTO_SHA256_BASE_H -#define _CRYPTO_SHA256_BASE_H - #include -#include +#include #include #include -#include #include @@ -23,7 +22,16 @@ static inline int sha224_base_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); - sha224_init(sctx); + sctx->state[0] = SHA224_H0; + sctx->state[1] = SHA224_H1; + sctx->state[2] = SHA224_H2; + sctx->state[3] = SHA224_H3; + sctx->state[4] = SHA224_H4; + sctx->state[5] = SHA224_H5; + sctx->state[6] = SHA224_H6; + sctx->state[7] = SHA224_H7; + sctx->count = 0; + return 0; } @@ -31,7 +39,16 @@ static inline int sha256_base_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); - sha256_init(sctx); + sctx->state[0] = SHA256_H0; + sctx->state[1] = SHA256_H1; + sctx->state[2] = SHA256_H2; + sctx->state[3] = SHA256_H3; + sctx->state[4] = SHA256_H4; + sctx->state[5] = SHA256_H5; + sctx->state[6] = SHA256_H6; + sctx->state[7] = SHA256_H7; + sctx->count = 0; + return 0; } @@ -106,8 +123,6 @@ static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32)) put_unaligned_be32(sctx->state[i], digest++); - memzero_explicit(sctx, sizeof(*sctx)); + *sctx = (struct sha256_state){}; return 0; } - -#endif /* _CRYPTO_SHA256_BASE_H */ diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h index 080f60c2e6..f4c9f68f5f 100644 --- a/include/crypto/sha3.h +++ b/include/crypto/sha3.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Common values for SHA-3 algorithms */ @@ -19,6 +18,7 @@ struct sha3_state { u64 st[25]; + unsigned int md_len; unsigned int rsiz; unsigned int rsizw; @@ -26,9 +26,4 @@ struct sha3_state { u8 buf[SHA3_224_BLOCK_SIZE]; }; -int crypto_sha3_init(struct shash_desc *desc); -int crypto_sha3_update(struct shash_desc *desc, const u8 *data, - unsigned int len); -int crypto_sha3_final(struct shash_desc *desc, u8 *out); - #endif diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h index b370b3340b..6c5341e005 100644 --- a/include/crypto/sha512_base.h +++ b/include/crypto/sha512_base.h @@ -1,18 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * sha512_base.h - core logic for SHA-512 implementations * * Copyright (C) 2015 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ -#ifndef _CRYPTO_SHA512_BASE_H -#define _CRYPTO_SHA512_BASE_H - #include -#include +#include #include #include -#include #include @@ -127,8 +126,6 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64)) put_unaligned_be64(sctx->state[i], digest++); - memzero_explicit(sctx, sizeof(*sctx)); + *sctx = (struct sha512_state){}; return 0; } - -#endif /* _CRYPTO_SHA512_BASE_H */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index ef0fc9ed43..cc4d98a789 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Symmetric key ciphers. * * Copyright (c) 2007-2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _CRYPTO_SKCIPHER_H @@ -18,7 +23,7 @@ * @iv: Initialisation Vector * @src: Source SG list * @dst: Destination SG list - * @base: Underlying async request + * @base: Underlying async request request * @__ctx: Start of private context data */ struct skcipher_request { @@ -34,14 +39,30 @@ struct skcipher_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; -struct crypto_skcipher { - unsigned int reqsize; +/** + * struct skcipher_givcrypt_request - Crypto request with IV generation + * @seq: Sequence number for IV generation + * @giv: Space for generated IV + * @creq: The crypto request itself + */ +struct skcipher_givcrypt_request { + u64 seq; + u8 *giv; - struct crypto_tfm base; + struct ablkcipher_request creq; }; -struct crypto_sync_skcipher { - struct crypto_skcipher base; +struct crypto_skcipher { + int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct skcipher_request *req); + int (*decrypt)(struct skcipher_request *req); + + unsigned int ivsize; + unsigned int reqsize; + unsigned int keysize; + + struct crypto_tfm base; }; /** @@ -94,9 +115,6 @@ struct crypto_sync_skcipher { * IV of exactly that size to perform the encrypt or decrypt operation. * @chunksize: Equal to the block size except for stream ciphers such as * CTR where it is set to the underlying block size. - * @walksize: Equal to the chunk size except in cases where the algorithm is - * considerably more efficient if it can operate on multiple chunks - * in parallel. Should be a multiple of chunksize. * @base: Definition of a generic crypto algorithm. * * All fields except @ivsize are mandatory and must be filled. @@ -113,22 +131,13 @@ struct skcipher_alg { unsigned int max_keysize; unsigned int ivsize; unsigned int chunksize; - unsigned int walksize; struct crypto_alg base; }; -#define MAX_SYNC_SKCIPHER_REQSIZE 384 -/* - * This performs a type-check against the "tfm" argument to make sure - * all users have the correct skcipher tfm for doing on-stack requests. - */ -#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \ +#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ char __##name##_desc[sizeof(struct skcipher_request) + \ - MAX_SYNC_SKCIPHER_REQSIZE + \ - (!(sizeof((struct crypto_sync_skcipher *)1 == \ - (typeof(tfm))1))) \ - ] CRYPTO_MINALIGN_ATTR; \ + crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ struct skcipher_request *name = (void *)__##name##_desc /** @@ -184,9 +193,6 @@ static inline struct crypto_skcipher *__crypto_skcipher_cast( struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, u32 type, u32 mask); -struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name, - u32 type, u32 mask); - static inline struct crypto_tfm *crypto_skcipher_tfm( struct crypto_skcipher *tfm) { @@ -196,21 +202,31 @@ static inline struct crypto_tfm *crypto_skcipher_tfm( /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed - * - * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm) { crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm)); } -static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm) +/** + * crypto_has_skcipher() - Search for the availability of an skcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the skcipher is known to the kernel crypto API; false + * otherwise + */ +static inline int crypto_has_skcipher(const char *alg_name, u32 type, + u32 mask) { - crypto_free_skcipher(&tfm->base); + return crypto_has_alg(alg_name, crypto_skcipher_type(type), + crypto_skcipher_mask(mask)); } /** - * crypto_has_skcipher() - Search for the availability of an skcipher. + * crypto_has_skcipher2() - Search for the availability of an skcipher. * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * skcipher * @type: specifies the type of the skcipher @@ -219,7 +235,7 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm) * Return: true when the skcipher is known to the kernel crypto API; false * otherwise */ -int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask); +int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask); static inline const char *crypto_skcipher_driver_name( struct crypto_skcipher *tfm) @@ -236,6 +252,13 @@ static inline struct skcipher_alg *crypto_skcipher_alg( static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) { + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.ivsize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.ivsize; + return alg->ivsize; } @@ -250,34 +273,19 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) */ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->ivsize; -} - -static inline unsigned int crypto_sync_skcipher_ivsize( - struct crypto_sync_skcipher *tfm) -{ - return crypto_skcipher_ivsize(&tfm->base); -} - -/** - * crypto_skcipher_blocksize() - obtain block size of cipher - * @tfm: cipher handle - * - * The block size for the skcipher referenced with the cipher handle is - * returned. The caller may use that information to allocate appropriate - * memory for the data returned by the encryption or decryption operation - * - * Return: block size of cipher - */ -static inline unsigned int crypto_skcipher_blocksize( - struct crypto_skcipher *tfm) -{ - return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); + return tfm->ivsize; } static inline unsigned int crypto_skcipher_alg_chunksize( struct skcipher_alg *alg) { + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + return alg->chunksize; } @@ -298,10 +306,20 @@ static inline unsigned int crypto_skcipher_chunksize( return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); } -static inline unsigned int crypto_sync_skcipher_blocksize( - struct crypto_sync_skcipher *tfm) +/** + * crypto_skcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the skcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_skcipher_blocksize( + struct crypto_skcipher *tfm) { - return crypto_skcipher_blocksize(&tfm->base); + return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); } static inline unsigned int crypto_skcipher_alignmask( @@ -327,24 +345,6 @@ static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm, crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags); } -static inline u32 crypto_sync_skcipher_get_flags( - struct crypto_sync_skcipher *tfm) -{ - return crypto_skcipher_get_flags(&tfm->base); -} - -static inline void crypto_sync_skcipher_set_flags( - struct crypto_sync_skcipher *tfm, u32 flags) -{ - crypto_skcipher_set_flags(&tfm->base, flags); -} - -static inline void crypto_sync_skcipher_clear_flags( - struct crypto_sync_skcipher *tfm, u32 flags) -{ - crypto_skcipher_clear_flags(&tfm->base, flags); -} - /** * crypto_skcipher_setkey() - set key for cipher * @tfm: cipher handle @@ -361,25 +361,21 @@ static inline void crypto_sync_skcipher_clear_flags( * * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ -int crypto_skcipher_setkey(struct crypto_skcipher *tfm, - const u8 *key, unsigned int keylen); - -static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, +static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - return crypto_skcipher_setkey(&tfm->base, key, keylen); + return tfm->setkey(tfm, key, keylen); } -static inline unsigned int crypto_skcipher_min_keysize( - struct crypto_skcipher *tfm) +static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->min_keysize; + return tfm->keysize; } -static inline unsigned int crypto_skcipher_max_keysize( +static inline unsigned int crypto_skcipher_default_keysize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->max_keysize; + return tfm->keysize; } /** @@ -397,14 +393,6 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm( return __crypto_skcipher_cast(req->base.tfm); } -static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm( - struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - - return container_of(tfm, struct crypto_sync_skcipher, base); -} - /** * crypto_skcipher_encrypt() - encrypt plaintext * @req: reference to the skcipher_request handle that holds all information @@ -416,7 +404,12 @@ static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm( * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ -int crypto_skcipher_encrypt(struct skcipher_request *req); +static inline int crypto_skcipher_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + return tfm->encrypt(req); +} /** * crypto_skcipher_decrypt() - decrypt ciphertext @@ -429,7 +422,12 @@ int crypto_skcipher_encrypt(struct skcipher_request *req); * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ -int crypto_skcipher_decrypt(struct skcipher_request *req); +static inline int crypto_skcipher_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + return tfm->decrypt(req); +} /** * DOC: Symmetric Key Cipher Request Handle @@ -467,12 +465,6 @@ static inline void skcipher_request_set_tfm(struct skcipher_request *req, req->base.tfm = crypto_skcipher_tfm(tfm); } -static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req, - struct crypto_sync_skcipher *tfm) -{ - skcipher_request_set_tfm(req, &tfm->base); -} - static inline struct skcipher_request *skcipher_request_cast( struct crypto_async_request *req) { @@ -510,7 +502,7 @@ static inline struct skcipher_request *skcipher_request_alloc( */ static inline void skcipher_request_free(struct skcipher_request *req) { - kfree_sensitive(req); + kzfree(req); } static inline void skcipher_request_zero(struct skcipher_request *req) @@ -524,7 +516,7 @@ static inline void skcipher_request_zero(struct skcipher_request *req) * skcipher_request_set_callback() - set asynchronous callback function * @req: request handle * @flags: specify zero or an ORing of the flags - * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and * increase the wait queue beyond the initial maximum size; * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep * @compl: callback function pointer to be registered with the request handle @@ -541,7 +533,7 @@ static inline void skcipher_request_zero(struct skcipher_request *req) * cipher operation completes. * * The callback function is registered with the skcipher_request handle and - * must comply with the following template:: + * must comply with the following template * * void callback_function(struct crypto_async_request *req, int error) */ diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h index f6b307a585..095c901a8a 100644 --- a/include/crypto/twofish.h +++ b/include/crypto/twofish.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _CRYPTO_TWOFISH_H #define _CRYPTO_TWOFISH_H @@ -19,7 +18,7 @@ struct twofish_ctx { }; int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key, - unsigned int key_len); + unsigned int key_len, u32 *flags); int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len); #endif diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h new file mode 100644 index 0000000000..6b700c7b2f --- /dev/null +++ b/include/crypto/vmac.h @@ -0,0 +1,63 @@ +/* + * Modified to interface to the Linux kernel + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef __CRYPTO_VMAC_H +#define __CRYPTO_VMAC_H + +/* -------------------------------------------------------------------------- + * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. + * This implementation is herby placed in the public domain. + * The authors offers no warranty. Use at your own risk. + * Please send bug reports to the authors. + * Last modified: 17 APR 08, 1700 PDT + * ----------------------------------------------------------------------- */ + +/* + * User definable settings. + */ +#define VMAC_TAG_LEN 64 +#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ +#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) +#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ + +/* + * This implementation uses u32 and u64 as names for unsigned 32- + * and 64-bit integer types. These are defined in C99 stdint.h. The + * following may need adaptation if you are not running a C99 or + * Microsoft C environment. + */ +struct vmac_ctx { + u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; + u64 polykey[2*VMAC_TAG_LEN/64]; + u64 l3key[2*VMAC_TAG_LEN/64]; + u64 polytmp[2*VMAC_TAG_LEN/64]; + u64 cached_nonce[2]; + u64 cached_aes[2]; + int first_block_processed; +}; + +typedef u64 vmac_t; + +struct vmac_ctx_t { + struct crypto_cipher *child; + struct vmac_ctx __vmac_ctx; + u8 partial[VMAC_NHBYTES]; /* partial block */ + int partial_size; /* size of the partial block */ +}; + +#endif /* __CRYPTO_VMAC_H */ diff --git a/include/crypto/xts.h b/include/crypto/xts.h index 0f8dba69fe..1f5b11fe8a 100644 --- a/include/crypto/xts.h +++ b/include/crypto/xts.h @@ -1,45 +1,52 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _CRYPTO_XTS_H #define _CRYPTO_XTS_H #include -#include +#include +#include #include +struct scatterlist; +struct blkcipher_desc; + #define XTS_BLOCK_SIZE 16 +struct xts_crypt_req { + be128 *tbuf; + unsigned int tbuflen; + + void *tweak_ctx; + void (*tweak_fn)(void *ctx, u8* dst, const u8* src); + void *crypt_ctx; + void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes); +}; + +#define XTS_TWEAK_CAST(x) (x) + +int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes, + struct xts_crypt_req *req); + static inline int xts_check_key(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { + u32 *flags = &tfm->crt_flags; + /* * key consists of keys of equal size concatenated, therefore * the length must be even. */ - if (keylen % 2) + if (keylen % 2) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; + } /* ensure that the AES and tweak key are not identical */ - if (fips_enabled && !crypto_memneq(key, key + (keylen / 2), keylen / 2)) - return -EINVAL; - - return 0; -} - -static inline int xts_verify_key(struct crypto_skcipher *tfm, - const u8 *key, unsigned int keylen) -{ - /* - * key consists of keys of equal size concatenated, therefore - * the length must be even. - */ - if (keylen % 2) - return -EINVAL; - - /* ensure that the AES and tweak key are not identical */ - if ((fips_enabled || (crypto_skcipher_get_flags(tfm) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) && - !crypto_memneq(key, key + (keylen / 2), keylen / 2)) + if (fips_enabled && + !crypto_memneq(key, key + (keylen / 2), keylen / 2)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; + } return 0; } diff --git a/include/drm/ati_pcigart.h b/include/drm/ati_pcigart.h new file mode 100644 index 0000000000..5765648b5e --- /dev/null +++ b/include/drm/ati_pcigart.h @@ -0,0 +1,30 @@ +#ifndef DRM_ATI_PCIGART_H +#define DRM_ATI_PCIGART_H + +#include + +/* location of GART table */ +#define DRM_ATI_GART_MAIN 1 +#define DRM_ATI_GART_FB 2 + +#define DRM_ATI_GART_PCI 1 +#define DRM_ATI_GART_PCIE 2 +#define DRM_ATI_GART_IGP 3 + +struct drm_ati_pcigart_info { + int gart_table_location; + int gart_reg_if; + void *addr; + dma_addr_t bus_addr; + dma_addr_t table_mask; + struct drm_dma_handle *table_handle; + struct drm_local_map mapping; + int table_size; +}; + +extern int drm_ati_pcigart_init(struct drm_device *dev, + struct drm_ati_pcigart_info * gart_info); +extern int drm_ati_pcigart_cleanup(struct drm_device *dev, + struct drm_ati_pcigart_info * gart_info); + +#endif diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h index b0dcc07334..f6f0c06220 100644 --- a/include/drm/bridge/analogix_dp.h +++ b/include/drm/bridge/analogix_dp.h @@ -1,16 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Analogix DP (Display Port) Core interface driver. * * Copyright (C) 2015 Rockchip Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef _ANALOGIX_DP_H_ #define _ANALOGIX_DP_H_ #include -struct analogix_dp_device; - enum analogix_dp_devtype { EXYNOS_DP, RK3288_DP, @@ -27,10 +29,8 @@ struct analogix_dp_plat_data { struct drm_panel *panel; struct drm_encoder *encoder; struct drm_connector *connector; - bool skip_connector; - int (*power_on_start)(struct analogix_dp_plat_data *); - int (*power_on_end)(struct analogix_dp_plat_data *); + int (*power_on)(struct analogix_dp_plat_data *); int (*power_off)(struct analogix_dp_plat_data *); int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *, struct drm_connector *); @@ -38,16 +38,15 @@ struct analogix_dp_plat_data { struct drm_connector *); }; -int analogix_dp_resume(struct analogix_dp_device *dp); -int analogix_dp_suspend(struct analogix_dp_device *dp); +int analogix_dp_psr_supported(struct device *dev); +int analogix_dp_enable_psr(struct device *dev); +int analogix_dp_disable_psr(struct device *dev); -struct analogix_dp_device * -analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data); -int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev); -void analogix_dp_unbind(struct analogix_dp_device *dp); -void analogix_dp_remove(struct analogix_dp_device *dp); +int analogix_dp_resume(struct device *dev); +int analogix_dp_suspend(struct device *dev); -int analogix_dp_start_crc(struct drm_connector *connector); -int analogix_dp_stop_crc(struct drm_connector *connector); +int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, + struct analogix_dp_plat_data *plat_data); +void analogix_dp_unbind(struct device *dev, struct device *master, void *data); #endif /* _ANALOGIX_DP_H_ */ diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index 2a1f85f9a8..bae79f3c4d 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -1,79 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2011 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DW_HDMI__ #define __DW_HDMI__ -#include +#include -struct drm_display_info; -struct drm_display_mode; -struct drm_encoder; struct dw_hdmi; -struct platform_device; - -/** - * DOC: Supported input formats and encodings - * - * Depending on the Hardware configuration of the Controller IP, it supports - * a subset of the following input formats and encodings on its internal - * 48bit bus. - * - * +----------------------+----------------------------------+------------------------------+ - * | Format Name | Format Code | Encodings | - * +----------------------+----------------------------------+------------------------------+ - * | RGB 4:4:4 8bit | ``MEDIA_BUS_FMT_RGB888_1X24`` | ``V4L2_YCBCR_ENC_DEFAULT`` | - * +----------------------+----------------------------------+------------------------------+ - * | RGB 4:4:4 10bits | ``MEDIA_BUS_FMT_RGB101010_1X30`` | ``V4L2_YCBCR_ENC_DEFAULT`` | - * +----------------------+----------------------------------+------------------------------+ - * | RGB 4:4:4 12bits | ``MEDIA_BUS_FMT_RGB121212_1X36`` | ``V4L2_YCBCR_ENC_DEFAULT`` | - * +----------------------+----------------------------------+------------------------------+ - * | RGB 4:4:4 16bits | ``MEDIA_BUS_FMT_RGB161616_1X48`` | ``V4L2_YCBCR_ENC_DEFAULT`` | - * +----------------------+----------------------------------+------------------------------+ - * | YCbCr 4:4:4 8bit | ``MEDIA_BUS_FMT_YUV8_1X24`` | ``V4L2_YCBCR_ENC_601`` | - * | | | or ``V4L2_YCBCR_ENC_709`` | - * | | | or ``V4L2_YCBCR_ENC_XV601`` | - * | | | or ``V4L2_YCBCR_ENC_XV709`` | - * +----------------------+----------------------------------+------------------------------+ - * | YCbCr 4:4:4 10bits | ``MEDIA_BUS_FMT_YUV10_1X30`` | ``V4L2_YCBCR_ENC_601`` | - * | | | or ``V4L2_YCBCR_ENC_709`` | - * | | | or ``V4L2_YCBCR_ENC_XV601`` | - * | | | or ``V4L2_YCBCR_ENC_XV709`` | - * +----------------------+----------------------------------+------------------------------+ - * | YCbCr 4:4:4 12bits | ``MEDIA_BUS_FMT_YUV12_1X36`` | ``V4L2_YCBCR_ENC_601`` | - * | | | or ``V4L2_YCBCR_ENC_709`` | - * | | | or ``V4L2_YCBCR_ENC_XV601`` | - * | | | or ``V4L2_YCBCR_ENC_XV709`` | - * +----------------------+----------------------------------+------------------------------+ - * | YCbCr 4:4:4 16bits | ``MEDIA_BUS_FMT_YUV16_1X48`` | ``V4L2_YCBCR_ENC_601`` | - * | | | or ``V4L2_YCBCR_ENC_709`` | - * | | | or ``V4L2_YCBCR_ENC_XV601`` | - * | | | or ``V4L2_YCBCR_ENC_XV709`` | - * +----------------------+----------------------------------+------------------------------+ - * | YCbCr 4:2:2 8bit | ``MEDIA_BUS_FMT_UYVY8_1X16`` | ``V4L2_YCBCR_ENC_601`` | - * | | | or ``V4L2_YCBCR_ENC_709`` | - * +----------------------+----------------------------------+------------------------------+ - * | YCbCr 4:2:2 10bits | ``MEDIA_BUS_FMT_UYVY10_1X20`` | ``V4L2_YCBCR_ENC_601`` | - * | | | or ``V4L2_YCBCR_ENC_709`` | - * +----------------------+----------------------------------+------------------------------+ - * | YCbCr 4:2:2 12bits | ``MEDIA_BUS_FMT_UYVY12_1X24`` | ``V4L2_YCBCR_ENC_601`` | - * | | | or ``V4L2_YCBCR_ENC_709`` | - * +----------------------+----------------------------------+------------------------------+ - * | YCbCr 4:2:0 8bit | ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` | ``V4L2_YCBCR_ENC_601`` | - * | | | or ``V4L2_YCBCR_ENC_709`` | - * +----------------------+----------------------------------+------------------------------+ - * | YCbCr 4:2:0 10bits | ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``| ``V4L2_YCBCR_ENC_601`` | - * | | | or ``V4L2_YCBCR_ENC_709`` | - * +----------------------+----------------------------------+------------------------------+ - * | YCbCr 4:2:0 12bits | ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``| ``V4L2_YCBCR_ENC_601`` | - * | | | or ``V4L2_YCBCR_ENC_709`` | - * +----------------------+----------------------------------+------------------------------+ - * | YCbCr 4:2:0 16bits | ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``| ``V4L2_YCBCR_ENC_601`` | - * | | | or ``V4L2_YCBCR_ENC_709`` | - * +----------------------+----------------------------------+------------------------------+ - */ enum { DW_HDMI_RES_8, @@ -82,14 +21,10 @@ enum { DW_HDMI_RES_MAX, }; -enum dw_hdmi_phy_type { - DW_HDMI_PHY_DWC_HDMI_TX_PHY = 0x00, - DW_HDMI_PHY_DWC_MHL_PHY_HEAC = 0xb2, - DW_HDMI_PHY_DWC_MHL_PHY = 0xc2, - DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY_HEAC = 0xe2, - DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY = 0xf2, - DW_HDMI_PHY_DWC_HDMI20_TX_PHY = 0xf3, - DW_HDMI_PHY_VENDOR_PHY = 0xfe, +enum dw_hdmi_devtype { + IMX6Q_HDMI, + IMX6DL_HDMI, + RK3288_HDMI, }; struct dw_hdmi_mpll_config { @@ -112,89 +47,23 @@ struct dw_hdmi_phy_config { u16 vlev_ctr; /* voltage level control */ }; -struct dw_hdmi_phy_ops { - int (*init)(struct dw_hdmi *hdmi, void *data, - const struct drm_display_info *display, - const struct drm_display_mode *mode); - void (*disable)(struct dw_hdmi *hdmi, void *data); - enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data); - void (*update_hpd)(struct dw_hdmi *hdmi, void *data, - bool force, bool disabled, bool rxsense); - void (*setup_hpd)(struct dw_hdmi *hdmi, void *data); -}; - struct dw_hdmi_plat_data { - struct regmap *regm; - - unsigned int output_port; - - unsigned long input_bus_encoding; - bool use_drm_infoframe; - bool ycbcr_420_allowed; - - /* - * Private data passed to all the .mode_valid() and .configure_phy() - * callback functions. - */ - void *priv_data; - - /* Platform-specific mode validation (optional). */ - enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data, - const struct drm_display_info *info, - const struct drm_display_mode *mode); - - /* Vendor PHY support */ - const struct dw_hdmi_phy_ops *phy_ops; - const char *phy_name; - void *phy_data; - unsigned int phy_force_vendor; - - /* Synopsys PHY support */ + enum dw_hdmi_devtype dev_type; const struct dw_hdmi_mpll_config *mpll_cfg; const struct dw_hdmi_curr_ctrl *cur_ctr; const struct dw_hdmi_phy_config *phy_config; - int (*configure_phy)(struct dw_hdmi *hdmi, void *data, - unsigned long mpixelclock); - - unsigned int disable_cec : 1; + enum drm_mode_status (*mode_valid)(struct drm_connector *connector, + struct drm_display_mode *mode); }; -struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, - const struct dw_hdmi_plat_data *plat_data); -void dw_hdmi_remove(struct dw_hdmi *hdmi); -void dw_hdmi_unbind(struct dw_hdmi *hdmi); -struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev, - struct drm_encoder *encoder, - const struct dw_hdmi_plat_data *plat_data); +void dw_hdmi_unbind(struct device *dev, struct device *master, void *data); +int dw_hdmi_bind(struct device *dev, struct device *master, + void *data, struct drm_encoder *encoder, + struct resource *iores, int irq, + const struct dw_hdmi_plat_data *plat_data); -void dw_hdmi_resume(struct dw_hdmi *hdmi); - -void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); - -int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn, - struct device *codec_dev); void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); -void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt); -void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status); -void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca); void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); void dw_hdmi_audio_disable(struct dw_hdmi *hdmi); -void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi, - const struct drm_display_info *display); - -/* PHY configuration */ -void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address); -void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, - unsigned char addr); - -void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable); -void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable); -void dw_hdmi_phy_reset(struct dw_hdmi *hdmi); - -enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi, - void *data); -void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data, - bool force, bool disabled, bool rxsense); -void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data); #endif /* __IMX_HDMI_H__ */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h new file mode 100644 index 0000000000..872cabef4a --- /dev/null +++ b/include/drm/drmP.h @@ -0,0 +1,1123 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith + * Author: Gareth Hughes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_P_H_ +#define _DRM_P_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct module; + +struct drm_file; +struct drm_device; +struct drm_agp_head; +struct drm_local_map; +struct drm_device_dma; +struct drm_dma_handle; +struct drm_gem_object; +struct drm_master; +struct drm_vblank_crtc; + +struct device_node; +struct videomode; +struct reservation_object; +struct dma_buf_attachment; + +/* + * The following categories are defined: + * + * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ... + * This is the category used by the DRM_DEBUG() macro. + * + * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ... + * This is the category used by the DRM_DEBUG_DRIVER() macro. + * + * KMS: used in the modesetting code. + * This is the category used by the DRM_DEBUG_KMS() macro. + * + * PRIME: used in the prime code. + * This is the category used by the DRM_DEBUG_PRIME() macro. + * + * ATOMIC: used in the atomic code. + * This is the category used by the DRM_DEBUG_ATOMIC() macro. + * + * VBL: used for verbose debug message in the vblank code + * This is the category used by the DRM_DEBUG_VBL() macro. + * + * Enabling verbose debug messages is done through the drm.debug parameter, + * each category being enabled by a bit. + * + * drm.debug=0x1 will enable CORE messages + * drm.debug=0x2 will enable DRIVER messages + * drm.debug=0x3 will enable CORE and DRIVER messages + * ... + * drm.debug=0x3f will enable all messages + * + * An interesting feature is that it's possible to enable verbose logging at + * run-time by echoing the debug value in its sysfs node: + * # echo 0xf > /sys/module/drm/parameters/debug + */ +#define DRM_UT_NONE 0x00 +#define DRM_UT_CORE 0x01 +#define DRM_UT_DRIVER 0x02 +#define DRM_UT_KMS 0x04 +#define DRM_UT_PRIME 0x08 +#define DRM_UT_ATOMIC 0x10 +#define DRM_UT_VBL 0x20 + +extern __printf(6, 7) __nocapture(4, 5) +void drm_dev_printk(const struct device *dev, const char *level, + unsigned int category, const char *function_name, + const char *prefix, const char *format, ...); + +extern __printf(3, 4) +void drm_printk(const char *level, unsigned int category, + const char *format, ...); + +/***********************************************************************/ +/** \name DRM template customization defaults */ +/*@{*/ + +/* driver capabilities and requirements mask */ +#define DRIVER_USE_AGP 0x1 +#define DRIVER_LEGACY 0x2 +#define DRIVER_PCI_DMA 0x8 +#define DRIVER_SG 0x10 +#define DRIVER_HAVE_DMA 0x20 +#define DRIVER_HAVE_IRQ 0x40 +#define DRIVER_IRQ_SHARED 0x80 +#define DRIVER_GEM 0x1000 +#define DRIVER_MODESET 0x2000 +#define DRIVER_PRIME 0x4000 +#define DRIVER_RENDER 0x8000 +#define DRIVER_ATOMIC 0x10000 +#define DRIVER_KMS_LEGACY_CONTEXT 0x20000 + +/***********************************************************************/ +/** \name Macros to make printk easier */ +/*@{*/ + +#define _DRM_PRINTK(once, level, fmt, ...) \ + do { \ + printk##once(KERN_##level "[" DRM_NAME "] " fmt, \ + ##__VA_ARGS__); \ + } while (0) + +#define DRM_INFO(fmt, ...) \ + _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE(fmt, ...) \ + _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN(fmt, ...) \ + _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) + +#define DRM_INFO_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) + +/** + * Error output. + * + * \param fmt printf() like format string. + * \param arg arguments + */ +#define DRM_DEV_ERROR(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\ + fmt, ##__VA_ARGS__) +#define DRM_ERROR(fmt, ...) \ + drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__) + +/** + * Rate limited error output. Like DRM_ERROR() but won't flood the log. + * + * \param fmt printf() like format string. + * \param arg arguments + */ +#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ +}) +#define DRM_ERROR_RATELIMITED(fmt, ...) \ + DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) + +#define DRM_DEV_INFO(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \ + ##__VA_ARGS__) + +#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ +({ \ + static bool __print_once __read_mostly; \ + if (!__print_once) { \ + __print_once = true; \ + DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ + } \ +}) + +/** + * Debug output. + * + * \param fmt printf() like format string. + * \param arg arguments + */ +#define DRM_DEV_DEBUG(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_DRIVER(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG_KMS(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_PRIME(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_ATOMIC(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG_VBL(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__) + +#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \ + __func__, "", fmt, ##args); \ +}) + +/** + * Rate limited debug output. Like DRM_DEBUG() but won't flood the log. + * + * \param fmt printf() like format string. + * \param arg arguments + */ +#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \ + DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args) +#define DRM_DEBUG_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args) +#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args) +#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args) +#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args) + +/*@}*/ + +/***********************************************************************/ +/** \name Internal types and structures */ +/*@{*/ + +#define DRM_IF_VERSION(maj, min) (maj << 16 | min) + +/** + * Ioctl function type. + * + * \param inode device inode. + * \param file_priv DRM file private pointer. + * \param cmd command. + * \param arg argument. + */ +typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data, + struct drm_file *file_priv); +typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd, + unsigned long arg); + +#define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_MAJOR 226 + +#define DRM_AUTH 0x1 +#define DRM_MASTER 0x2 +#define DRM_ROOT_ONLY 0x4 +#define DRM_CONTROL_ALLOW 0x8 +#define DRM_UNLOCKED 0x10 +#define DRM_RENDER_ALLOW 0x20 + +struct drm_ioctl_desc { + unsigned int cmd; + int flags; + drm_ioctl_t func; + const char *name; +} __do_const; + +/** + * Creates a driver or general drm_ioctl_desc array entry for the given + * ioctl, for use by drm_ioctl(). + */ + +#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ + [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \ + .cmd = DRM_IOCTL_##ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } + +/* Event queued up for userspace to read */ +struct drm_pending_event { + struct completion *completion; + void (*completion_release)(struct completion *completion); + struct drm_event *event; + struct fence *fence; + struct list_head link; + struct list_head pending_link; + struct drm_file *file_priv; + pid_t pid; /* pid of requester, no guarantee it's valid by the time + we deliver the event, for tracing only */ +}; + +struct drm_prime_file_private { + struct mutex lock; + struct rb_root dmabufs; + struct rb_root handles; +}; + +/** File private data */ +struct drm_file { + unsigned authenticated :1; + /* true when the client has asked us to expose stereo 3D mode flags */ + unsigned stereo_allowed :1; + /* + * true if client understands CRTC primary planes and cursor planes + * in the plane list + */ + unsigned universal_planes:1; + /* true if client understands atomic properties */ + unsigned atomic:1; + /* + * This client is the creator of @master. + * Protected by struct drm_device::master_mutex. + */ + unsigned is_master:1; + + struct pid *pid; + drm_magic_t magic; + struct list_head lhead; + struct drm_minor *minor; + unsigned long lock_count; + + /** Mapping of mm object handles to object pointers. */ + struct idr object_idr; + /** Lock for synchronization of access to object_idr. */ + spinlock_t table_lock; + + struct file *filp; + void *driver_priv; + + struct drm_master *master; /* master this node is currently associated with + N.B. not always dev->master */ + /** + * fbs - List of framebuffers associated with this file. + * + * Protected by fbs_lock. Note that the fbs list holds a reference on + * the fb object to prevent it from untimely disappearing. + */ + struct list_head fbs; + struct mutex fbs_lock; + + /** User-created blob properties; this retains a reference on the + * property. */ + struct list_head blobs; + + wait_queue_head_t event_wait; + struct list_head pending_event_list; + struct list_head event_list; + int event_space; + + struct mutex event_read_lock; + + struct drm_prime_file_private prime; +}; + +/** + * Lock data. + */ +struct drm_lock_data { + struct drm_hw_lock *hw_lock; /**< Hardware lock */ + /** Private of lock holder's file (NULL=kernel) */ + struct drm_file *file_priv; + wait_queue_head_t lock_queue; /**< Queue of blocked processes */ + unsigned long lock_time; /**< Time of last lock in jiffies */ + spinlock_t spinlock; + uint32_t kernel_waiters; + uint32_t user_waiters; + int idle_has_lock; +}; + +/* Flags and return codes for get_vblank_timestamp() driver function. */ +#define DRM_CALLED_FROM_VBLIRQ 1 +#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) +#define DRM_VBLANKTIME_IN_VBLANK (1 << 1) + +/* get_scanout_position() return flags */ +#define DRM_SCANOUTPOS_VALID (1 << 0) +#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1) +#define DRM_SCANOUTPOS_ACCURATE (1 << 2) + +/** + * DRM driver structure. This structure represent the common code for + * a family of cards. There will one drm_device for each card present + * in this family + */ +struct drm_driver { + int (*load) (struct drm_device *, unsigned long flags); + int (*firstopen) (struct drm_device *); + int (*open) (struct drm_device *, struct drm_file *); + void (*preclose) (struct drm_device *, struct drm_file *file_priv); + void (*postclose) (struct drm_device *, struct drm_file *); + void (*lastclose) (struct drm_device *); + int (*unload) (struct drm_device *); + int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); + int (*dma_quiescent) (struct drm_device *); + int (*context_dtor) (struct drm_device *dev, int context); + int (*set_busid)(struct drm_device *dev, struct drm_master *master); + + /** + * get_vblank_counter - get raw hardware vblank counter + * @dev: DRM device + * @pipe: counter to fetch + * + * Driver callback for fetching a raw hardware vblank counter for @crtc. + * If a device doesn't have a hardware counter, the driver can simply + * use drm_vblank_no_hw_counter() function. The DRM core will account for + * missed vblank events while interrupts where disabled based on system + * timestamps. + * + * Wraparound handling and loss of events due to modesetting is dealt + * with in the DRM core code. + * + * RETURNS + * Raw vblank counter value. + */ + u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe); + + /** + * enable_vblank - enable vblank interrupt events + * @dev: DRM device + * @pipe: which irq to enable + * + * Enable vblank interrupts for @crtc. If the device doesn't have + * a hardware vblank counter, the driver should use the + * drm_vblank_no_hw_counter() function that keeps a virtual counter. + * + * RETURNS + * Zero on success, appropriate errno if the given @crtc's vblank + * interrupt cannot be enabled. + */ + int (*enable_vblank) (struct drm_device *dev, unsigned int pipe); + + /** + * disable_vblank - disable vblank interrupt events + * @dev: DRM device + * @pipe: which irq to enable + * + * Disable vblank interrupts for @crtc. If the device doesn't have + * a hardware vblank counter, the driver should use the + * drm_vblank_no_hw_counter() function that keeps a virtual counter. + */ + void (*disable_vblank) (struct drm_device *dev, unsigned int pipe); + + /** + * Called by \c drm_device_is_agp. Typically used to determine if a + * card is really attached to AGP or not. + * + * \param dev DRM device handle + * + * \returns + * One of three values is returned depending on whether or not the + * card is absolutely \b not AGP (return of 0), absolutely \b is AGP + * (return of 1), or may or may not be AGP (return of 2). + */ + int (*device_is_agp) (struct drm_device *dev); + + /** + * Called by vblank timestamping code. + * + * Return the current display scanout position from a crtc, and an + * optional accurate ktime_get timestamp of when position was measured. + * + * \param dev DRM device. + * \param pipe Id of the crtc to query. + * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0). + * \param *vpos Target location for current vertical scanout position. + * \param *hpos Target location for current horizontal scanout position. + * \param *stime Target location for timestamp taken immediately before + * scanout position query. Can be NULL to skip timestamp. + * \param *etime Target location for timestamp taken immediately after + * scanout position query. Can be NULL to skip timestamp. + * \param mode Current display timings. + * + * Returns vpos as a positive number while in active scanout area. + * Returns vpos as a negative number inside vblank, counting the number + * of scanlines to go until end of vblank, e.g., -1 means "one scanline + * until start of active scanout / end of vblank." + * + * \return Flags, or'ed together as follows: + * + * DRM_SCANOUTPOS_VALID = Query successful. + * DRM_SCANOUTPOS_INVBL = Inside vblank. + * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of + * this flag means that returned position may be offset by a constant + * but unknown small number of scanlines wrt. real scanout position. + * + */ + int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe, + unsigned int flags, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); + + /** + * Called by \c drm_get_last_vbltimestamp. Should return a precise + * timestamp when the most recent VBLANK interval ended or will end. + * + * Specifically, the timestamp in @vblank_time should correspond as + * closely as possible to the time when the first video scanline of + * the video frame after the end of VBLANK will start scanning out, + * the time immediately after end of the VBLANK interval. If the + * @crtc is currently inside VBLANK, this will be a time in the future. + * If the @crtc is currently scanning out a frame, this will be the + * past start time of the current scanout. This is meant to adhere + * to the OpenML OML_sync_control extension specification. + * + * \param dev dev DRM device handle. + * \param pipe crtc for which timestamp should be returned. + * \param *max_error Maximum allowable timestamp error in nanoseconds. + * Implementation should strive to provide timestamp + * with an error of at most *max_error nanoseconds. + * Returns true upper bound on error for timestamp. + * \param *vblank_time Target location for returned vblank timestamp. + * \param flags 0 = Defaults, no special treatment needed. + * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank + * irq handler. Some drivers need to apply some workarounds + * for gpu-specific vblank irq quirks if flag is set. + * + * \returns + * Zero if timestamping isn't supported in current display mode or a + * negative number on failure. A positive status code on success, + * which describes how the vblank_time timestamp was computed. + */ + int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe, + int *max_error, + struct timeval *vblank_time, + unsigned flags); + + /* these have to be filled in */ + + irqreturn_t(*irq_handler) (int irq, void *arg); + void (*irq_preinstall) (struct drm_device *dev); + int (*irq_postinstall) (struct drm_device *dev); + void (*irq_uninstall) (struct drm_device *dev); + + /* Master routines */ + int (*master_create)(struct drm_device *dev, struct drm_master *master); + void (*master_destroy)(struct drm_device *dev, struct drm_master *master); + /** + * master_set is called whenever the minor master is set. + * master_drop is called whenever the minor master is dropped. + */ + + int (*master_set)(struct drm_device *dev, struct drm_file *file_priv, + bool from_open); + void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); + + int (*debugfs_init)(struct drm_minor *minor); + void (*debugfs_cleanup)(struct drm_minor *minor); + + /** + * @gem_free_object: deconstructor for drm_gem_objects + * + * This is deprecated and should not be used by new drivers. Use + * @gem_free_object_unlocked instead. + */ + void (*gem_free_object) (struct drm_gem_object *obj); + + /** + * @gem_free_object_unlocked: deconstructor for drm_gem_objects + * + * This is for drivers which are not encumbered with dev->struct_mutex + * legacy locking schemes. Use this hook instead of @gem_free_object. + */ + void (*gem_free_object_unlocked) (struct drm_gem_object *obj); + + int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); + void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); + + /** + * Hook for allocating the GEM object struct, for use by core + * helpers. + */ + struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, + size_t size); + + /* prime: */ + /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */ + int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, + uint32_t handle, uint32_t flags, int *prime_fd); + /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */ + int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, + int prime_fd, uint32_t *handle); + /* export GEM -> dmabuf */ + struct dma_buf * (*gem_prime_export)(struct drm_device *dev, + struct drm_gem_object *obj, int flags); + /* import dmabuf -> GEM */ + struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, + struct dma_buf *dma_buf); + /* low-level interface used by drm_gem_prime_{import,export} */ + int (*gem_prime_pin)(struct drm_gem_object *obj); + void (*gem_prime_unpin)(struct drm_gem_object *obj); + struct reservation_object * (*gem_prime_res_obj)( + struct drm_gem_object *obj); + struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); + struct drm_gem_object *(*gem_prime_import_sg_table)( + struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + void *(*gem_prime_vmap)(struct drm_gem_object *obj); + void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); + int (*gem_prime_mmap)(struct drm_gem_object *obj, + struct vm_area_struct *vma); + + /* vga arb irq handler */ + void (*vgaarb_irq)(struct drm_device *dev, bool state); + + /* dumb alloc support */ + int (*dumb_create)(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + int (*dumb_map_offset)(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); + int (*dumb_destroy)(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle); + + /* Driver private ops for this object */ + const struct vm_operations_struct *gem_vm_ops; + + int major; + int minor; + int patchlevel; + char *name; + char *desc; + char *date; + + u32 driver_features; + int dev_priv_size; + const struct drm_ioctl_desc *ioctls; + int num_ioctls; + const struct file_operations *fops; + + /* List of devices hanging off this driver with stealth attach. */ + struct list_head legacy_dev_list; +} __do_const; +typedef struct drm_driver __no_const drm_driver_no_const; + +enum drm_minor_type { + DRM_MINOR_PRIMARY, + DRM_MINOR_CONTROL, + DRM_MINOR_RENDER, + DRM_MINOR_CNT, +}; + +/** + * Info file list entry. This structure represents a debugfs or proc file to + * be created by the drm core + */ +struct drm_info_list { + const char *name; /** file name */ + int (*show)(struct seq_file*, void*); /** show callback */ + u32 driver_features; /**< Required driver features for this entry */ + void *data; +} __do_const; +typedef struct drm_info_list __no_const drm_info_list_no_const; + +/** + * debugfs node structure. This structure represents a debugfs file. + */ +struct drm_info_node { + struct list_head list; + struct drm_minor *minor; + const struct drm_info_list *info_ent; + struct dentry *dent; +}; + +/** + * DRM minor structure. This structure represents a drm minor number. + */ +struct drm_minor { + int index; /**< Minor device number */ + int type; /**< Control or render */ + struct device *kdev; /**< Linux device */ + struct drm_device *dev; + + struct dentry *debugfs_root; + + struct list_head debugfs_list; + struct mutex debugfs_lock; /* Protects debugfs_list. */ +}; + +/** + * DRM device structure. This structure represent a complete card that + * may contain multiple heads. + */ +struct drm_device { + struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ + int if_version; /**< Highest interface version set */ + + /** \name Lifetime Management */ + /*@{ */ + struct kref ref; /**< Object ref-count */ + struct device *dev; /**< Device structure of bus-device */ + struct drm_driver *driver; /**< DRM driver managing the device */ + void *dev_private; /**< DRM driver private data */ + struct drm_minor *control; /**< Control node */ + struct drm_minor *primary; /**< Primary node */ + struct drm_minor *render; /**< Render node */ + + /* currently active master for this device. Protected by master_mutex */ + struct drm_master *master; + + atomic_t unplugged; /**< Flag whether dev is dead */ + struct inode *anon_inode; /**< inode for private address-space */ + char *unique; /**< unique name of the device */ + /*@} */ + + /** \name Locks */ + /*@{ */ + struct mutex struct_mutex; /**< For others */ + struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ + /*@} */ + + /** \name Usage Counters */ + /*@{ */ + local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */ + spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ + int buf_use; /**< Buffers in use -- cannot alloc */ + atomic_t buf_alloc; /**< Buffer allocation in progress */ + /*@} */ + + struct mutex filelist_mutex; + struct list_head filelist; + + /** \name Memory management */ + /*@{ */ + struct list_head maplist; /**< Linked list of regions */ + struct drm_open_hash map_hash; /**< User token hash table for maps */ + + /** \name Context handle management */ + /*@{ */ + struct list_head ctxlist; /**< Linked list of context handles */ + struct mutex ctxlist_mutex; /**< For ctxlist */ + + struct idr ctx_idr; + + struct list_head vmalist; /**< List of vmas (for debugging) */ + + /*@} */ + + /** \name DMA support */ + /*@{ */ + struct drm_device_dma *dma; /**< Optional pointer for DMA support */ + /*@} */ + + /** \name Context support */ + /*@{ */ + + __volatile__ long context_flag; /**< Context swapping flag */ + int last_context; /**< Last current context */ + /*@} */ + + /** \name VBLANK IRQ support */ + /*@{ */ + bool irq_enabled; + int irq; + + /* + * If true, vblank interrupt will be disabled immediately when the + * refcount drops to zero, as opposed to via the vblank disable + * timer. + * This can be set to true it the hardware has a working vblank + * counter and the driver uses drm_vblank_on() and drm_vblank_off() + * appropriately. + */ + bool vblank_disable_immediate; + + /* array of size num_crtcs */ + struct drm_vblank_crtc *vblank; + + spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ + spinlock_t vbl_lock; + + u32 max_vblank_count; /**< size of vblank counter register */ + + /** + * List of events + */ + struct list_head vblank_event_list; + spinlock_t event_lock; + + /*@} */ + + struct drm_agp_head *agp; /**< AGP data */ + + struct pci_dev *pdev; /**< PCI device structure */ +#ifdef __alpha__ + struct pci_controller *hose; +#endif + + struct platform_device *platformdev; /**< Platform device struture */ + struct virtio_device *virtdev; + + struct drm_sg_mem *sg; /**< Scatter gather memory */ + unsigned int num_crtcs; /**< Number of CRTCs on this device */ + + struct { + int context; + struct drm_hw_lock *lock; + } sigdata; + + struct drm_local_map *agp_buffer_map; + unsigned int agp_buffer_token; + + struct drm_mode_config mode_config; /**< Current mode config */ + + /** \name GEM information */ + /*@{ */ + struct mutex object_name_lock; + struct idr object_name_idr; + struct drm_vma_offset_manager *vma_offset_manager; + /*@} */ + int switch_power_state; +}; + +#include + +#define DRM_SWITCH_POWER_ON 0 +#define DRM_SWITCH_POWER_OFF 1 +#define DRM_SWITCH_POWER_CHANGING 2 +#define DRM_SWITCH_POWER_DYNAMIC_OFF 3 + +static __inline__ int drm_core_check_feature(struct drm_device *dev, + int feature) +{ + return ((dev->driver->driver_features & feature) ? 1 : 0); +} + +static inline void drm_device_set_unplugged(struct drm_device *dev) +{ + smp_wmb(); + atomic_set(&dev->unplugged, 1); +} + +static inline int drm_device_is_unplugged(struct drm_device *dev) +{ + int ret = atomic_read(&dev->unplugged); + smp_rmb(); + return ret; +} + +static inline bool drm_is_render_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_RENDER; +} + +static inline bool drm_is_control_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_CONTROL; +} + +static inline bool drm_is_primary_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_PRIMARY; +} + +/******************************************************************/ +/** \name Internal function definitions */ +/*@{*/ + + /* Driver support (drm_drv.h) */ +extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv); +extern long drm_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); +extern long drm_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); +extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); + +/* File Operations (drm_fops.c) */ +int drm_open(struct inode *inode, struct file *filp); +ssize_t drm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset); +int drm_release(struct inode *inode, struct file *filp); +unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); +int drm_event_reserve_init_locked(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e); +int drm_event_reserve_init(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e); +void drm_event_cancel_free(struct drm_device *dev, + struct drm_pending_event *p); +void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e); +void drm_send_event(struct drm_device *dev, struct drm_pending_event *e); + +/* Misc. IOCTL support (drm_ioctl.c) */ +int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_invalid_op(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/* Cache management (drm_cache.c) */ +void drm_clflush_pages(struct page *pages[], unsigned long num_pages); +void drm_clflush_sg(struct sg_table *st); +void drm_clflush_virt_range(void *addr, unsigned long length); + +/* + * These are exported to drivers so that they can implement fencing using + * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. + */ + +/* Modesetting support */ +extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe); +extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe); + +/* drm_drv.c */ +void drm_put_dev(struct drm_device *dev); +void drm_unplug_dev(struct drm_device *dev); +extern unsigned int drm_debug; + + /* Debugfs support */ +#if defined(CONFIG_DEBUG_FS) +extern int drm_debugfs_create_files(const struct drm_info_list *files, + int count, struct dentry *root, + struct drm_minor *minor); +extern int drm_debugfs_remove_files(const struct drm_info_list *files, + int count, struct drm_minor *minor); +#else +static inline int drm_debugfs_create_files(const struct drm_info_list *files, + int count, struct dentry *root, + struct drm_minor *minor) +{ + return 0; +} + +static inline int drm_debugfs_remove_files(const struct drm_info_list *files, + int count, struct drm_minor *minor) +{ + return 0; +} +#endif + +struct dma_buf_export_info; + +extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, + int flags); +extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, uint32_t flags, + int *prime_fd); +extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); +extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, uint32_t *handle); +struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, + struct dma_buf_export_info *exp_info); +extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf); + +extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, + dma_addr_t *addrs, int max_pages); +extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); +extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); + + +extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, + size_t align); +extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah); + + /* sysfs support (drm_sysfs.c) */ +extern void drm_sysfs_hotplug_event(struct drm_device *dev); + + +struct drm_device *drm_dev_alloc(struct drm_driver *driver, + struct device *parent); +int drm_dev_init(struct drm_device *dev, + struct drm_driver *driver, + struct device *parent); +void drm_dev_ref(struct drm_device *dev); +void drm_dev_unref(struct drm_device *dev); +int drm_dev_register(struct drm_device *dev, unsigned long flags); +void drm_dev_unregister(struct drm_device *dev); + +struct drm_minor *drm_minor_acquire(unsigned int minor_id); +void drm_minor_release(struct drm_minor *minor); + +/*@}*/ + +/* PCI section */ +static __inline__ int drm_pci_device_is_agp(struct drm_device *dev) +{ + if (dev->driver->device_is_agp != NULL) { + int err = (*dev->driver->device_is_agp) (dev); + + if (err != 2) { + return err; + } + } + + return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); +} +void drm_pci_agp_destroy(struct drm_device *dev); + +extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); +extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); +#ifdef CONFIG_PCI +extern int drm_get_pci_dev(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct drm_driver *driver); +extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master); +#else +static inline int drm_get_pci_dev(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct drm_driver *driver) +{ + return -ENOSYS; +} + +static inline int drm_pci_set_busid(struct drm_device *dev, + struct drm_master *master) +{ + return -ENOSYS; +} +#endif + +#define DRM_PCIE_SPEED_25 1 +#define DRM_PCIE_SPEED_50 2 +#define DRM_PCIE_SPEED_80 4 + +extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); +extern int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw); + +/* platform section */ +extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); + +/* returns true if currently okay to sleep */ +static __inline__ bool drm_can_sleep(void) +{ + if (in_atomic() || in_dbg_master() || irqs_disabled()) + return false; + return true; +} + +/* helper for handling conditionals in various for_each macros */ +#define for_each_if(condition) if (!(condition)) {} else + +#endif diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h index 664e120b93..b2d912670a 100644 --- a/include/drm/drm_agpsupport.h +++ b/include/drm/drm_agpsupport.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DRM_AGPSUPPORT_H_ #define _DRM_AGPSUPPORT_H_ @@ -31,6 +30,11 @@ struct drm_agp_head { void drm_free_agp(struct agp_memory * handle, int pages); int drm_bind_agp(struct agp_memory * handle, unsigned int start); int drm_unbind_agp(struct agp_memory * handle); +struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, + struct page **pages, + unsigned long num_pages, + uint32_t gtt_offset, + uint32_t type); struct drm_agp_head *drm_agp_init(struct drm_device *dev); void drm_legacy_agp_clear(struct drm_device *dev); @@ -75,6 +79,15 @@ static inline int drm_unbind_agp(struct agp_memory * handle) return -ENODEV; } +static inline struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, + struct page **pages, + unsigned long num_pages, + uint32_t gtt_offset, + uint32_t type) +{ + return NULL; +} + static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev) { return NULL; diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 1701c2128a..9701f2dfb7 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -29,13 +29,12 @@ #define DRM_ATOMIC_H_ #include -#include /** * struct drm_crtc_commit - track modeset commits on a CRTC * * This structure is used to track pending modeset changes and atomic commit on - * a per-CRTC basis. Since updating the list should never block, this structure + * a per-CRTC basis. Since updating the list should never block this structure * is reference counted to allow waiters to safely wait on an event to complete, * without holding any locks. * @@ -60,14 +59,12 @@ * wait for flip_done <---- * clean up atomic state * - * The important bit to know is that &cleanup_done is the terminal event, but the - * ordering between &flip_done and &hw_done is entirely up to the specific driver + * The important bit to know is that cleanup_done is the terminal event, but the + * ordering between flip_done and hw_done is entirely up to the specific driver * and modeset state change. * * For an implementation of how to use this look at * drm_atomic_helper_setup_commit() from the atomic helper library. - * - * See also drm_crtc_commit_wait(). */ struct drm_crtc_commit { /** @@ -94,9 +91,6 @@ struct drm_crtc_commit { * commit is sent to userspace, or when an out-fence is singalled. Note * that for most hardware, in most cases this happens after @hw_done is * signalled. - * - * Completion of this stage is signalled implicitly by calling - * drm_crtc_send_vblank_event() on &drm_crtc_state.event. */ struct completion flip_done; @@ -105,16 +99,13 @@ struct drm_crtc_commit { * * Will be signalled when all hw register changes for this commit have * been written out. Especially when disabling a pipe this can be much - * later than @flip_done, since that can signal already when the + * later than than @flip_done, since that can signal already when the * screen goes black, whereas to fully shut down a pipe more register * I/O is required. * * Note that this does not need to include separately reference-counted * resources like backing storage buffer pinning, or runtime pm * management. - * - * Drivers should call drm_atomic_helper_commit_hw_done() to signal - * completion of this stage. */ struct completion hw_done; @@ -126,17 +117,13 @@ struct drm_crtc_commit { * a vblank wait completed it might be a bit later. This completion is * useful to throttle updates and avoid hardware updates getting ahead * of the buffer cleanup too much. - * - * Drivers should call drm_atomic_helper_commit_cleanup_done() to signal - * completion of this stage. */ struct completion cleanup_done; /** * @commit_entry: * - * Entry on the per-CRTC &drm_crtc.commit_list. Protected by - * $drm_crtc.commit_lock. + * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock. */ struct list_head commit_entry; @@ -146,260 +133,48 @@ struct drm_crtc_commit { * &drm_pending_vblank_event pointer to clean up private events. */ struct drm_pending_vblank_event *event; - - /** - * @abort_completion: - * - * A flag that's set after drm_atomic_helper_setup_commit() takes a - * second reference for the completion of $drm_crtc_state.event. It's - * used by the free code to remove the second reference if commit fails. - */ - bool abort_completion; }; struct __drm_planes_state { struct drm_plane *ptr; - struct drm_plane_state *state, *old_state, *new_state; + struct drm_plane_state *state; }; struct __drm_crtcs_state { struct drm_crtc *ptr; - struct drm_crtc_state *state, *old_state, *new_state; - - /** - * @commit: - * - * A reference to the CRTC commit object that is kept for use by - * drm_atomic_helper_wait_for_flip_done() after - * drm_atomic_helper_commit_hw_done() is called. This ensures that a - * concurrent commit won't free a commit object that is still in use. - */ + struct drm_crtc_state *state; struct drm_crtc_commit *commit; - - s32 __user *out_fence_ptr; - u64 last_vblank_count; }; struct __drm_connnectors_state { struct drm_connector *ptr; - struct drm_connector_state *state, *old_state, *new_state; - /** - * @out_fence_ptr: - * - * User-provided pointer which the kernel uses to return a sync_file - * file descriptor. Used by writeback connectors to signal completion of - * the writeback. - */ - s32 __user *out_fence_ptr; -}; - -struct drm_private_obj; -struct drm_private_state; - -/** - * struct drm_private_state_funcs - atomic state functions for private objects - * - * These hooks are used by atomic helpers to create, swap and destroy states of - * private objects. The structure itself is used as a vtable to identify the - * associated private object type. Each private object type that needs to be - * added to the atomic states is expected to have an implementation of these - * hooks and pass a pointer to its drm_private_state_funcs struct to - * drm_atomic_get_private_obj_state(). - */ -struct drm_private_state_funcs { - /** - * @atomic_duplicate_state: - * - * Duplicate the current state of the private object and return it. It - * is an error to call this before obj->state has been initialized. - * - * RETURNS: - * - * Duplicated atomic state or NULL when obj->state is not - * initialized or allocation failed. - */ - struct drm_private_state *(*atomic_duplicate_state)(struct drm_private_obj *obj); - - /** - * @atomic_destroy_state: - * - * Frees the private object state created with @atomic_duplicate_state. - */ - void (*atomic_destroy_state)(struct drm_private_obj *obj, - struct drm_private_state *state); -}; - -/** - * struct drm_private_obj - base struct for driver private atomic object - * - * A driver private object is initialized by calling - * drm_atomic_private_obj_init() and cleaned up by calling - * drm_atomic_private_obj_fini(). - * - * Currently only tracks the state update functions and the opaque driver - * private state itself, but in the future might also track which - * &drm_modeset_lock is required to duplicate and update this object's state. - * - * All private objects must be initialized before the DRM device they are - * attached to is registered to the DRM subsystem (call to drm_dev_register()) - * and should stay around until this DRM device is unregistered (call to - * drm_dev_unregister()). In other words, private objects lifetime is tied - * to the DRM device lifetime. This implies that: - * - * 1/ all calls to drm_atomic_private_obj_init() must be done before calling - * drm_dev_register() - * 2/ all calls to drm_atomic_private_obj_fini() must be done after calling - * drm_dev_unregister() - * - * If that private object is used to store a state shared by multiple - * CRTCs, proper care must be taken to ensure that non-blocking commits are - * properly ordered to avoid a use-after-free issue. - * - * Indeed, assuming a sequence of two non-blocking &drm_atomic_commit on two - * different &drm_crtc using different &drm_plane and &drm_connector, so with no - * resources shared, there's no guarantee on which commit is going to happen - * first. However, the second &drm_atomic_commit will consider the first - * &drm_private_obj its old state, and will be in charge of freeing it whenever - * the second &drm_atomic_commit is done. - * - * If the first &drm_atomic_commit happens after it, it will consider its - * &drm_private_obj the new state and will be likely to access it, resulting in - * an access to a freed memory region. Drivers should store (and get a reference - * to) the &drm_crtc_commit structure in our private state in - * &drm_mode_config_helper_funcs.atomic_commit_setup, and then wait for that - * commit to complete as the first step of - * &drm_mode_config_helper_funcs.atomic_commit_tail, similar to - * drm_atomic_helper_wait_for_dependencies(). - */ -struct drm_private_obj { - /** - * @head: List entry used to attach a private object to a &drm_device - * (queued to &drm_mode_config.privobj_list). - */ - struct list_head head; - - /** - * @lock: Modeset lock to protect the state object. - */ - struct drm_modeset_lock lock; - - /** - * @state: Current atomic state for this driver private object. - */ - struct drm_private_state *state; - - /** - * @funcs: - * - * Functions to manipulate the state of this driver private object, see - * &drm_private_state_funcs. - */ - const struct drm_private_state_funcs *funcs; -}; - -/** - * drm_for_each_privobj() - private object iterator - * - * @privobj: pointer to the current private object. Updated after each - * iteration - * @dev: the DRM device we want get private objects from - * - * Allows one to iterate over all private objects attached to @dev - */ -#define drm_for_each_privobj(privobj, dev) \ - list_for_each_entry(privobj, &(dev)->mode_config.privobj_list, head) - -/** - * struct drm_private_state - base struct for driver private object state - * @state: backpointer to global drm_atomic_state - * - * Currently only contains a backpointer to the overall atomic update, but in - * the future also might hold synchronization information similar to e.g. - * &drm_crtc.commit. - */ -struct drm_private_state { - struct drm_atomic_state *state; -}; - -struct __drm_private_objs_state { - struct drm_private_obj *ptr; - struct drm_private_state *state, *old_state, *new_state; + struct drm_connector_state *state; }; /** * struct drm_atomic_state - the global state object for atomic updates - * @ref: count of all references to this state (will not be freed until zero) * @dev: parent DRM device - * @async_update: hint for asynchronous plane update + * @allow_modeset: allow full modeset + * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics + * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL. * @planes: pointer to array of structures with per-plane data * @crtcs: pointer to array of CRTC pointers * @num_connector: size of the @connectors and @connector_states arrays * @connectors: pointer to array of structures with per-connector data - * @num_private_objs: size of the @private_objs array - * @private_objs: pointer to array of private object pointers * @acquire_ctx: acquire context for this atomic modeset state update - * - * States are added to an atomic update by calling drm_atomic_get_crtc_state(), - * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for - * private state structures, drm_atomic_get_private_obj_state(). */ struct drm_atomic_state { - struct kref ref; - struct drm_device *dev; - - /** - * @allow_modeset: - * - * Allow full modeset. This is used by the ATOMIC IOCTL handler to - * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should - * never consult this flag, instead looking at the output of - * drm_atomic_crtc_needs_modeset(). - */ bool allow_modeset : 1; - /** - * @legacy_cursor_update: - * - * Hint to enforce legacy cursor IOCTL semantics. - * - * WARNING: This is thoroughly broken and pretty much impossible to - * implement correctly. Drivers must ignore this and should instead - * implement &drm_plane_helper_funcs.atomic_async_check and - * &drm_plane_helper_funcs.atomic_async_commit hooks. New users of this - * flag are not allowed. - */ bool legacy_cursor_update : 1; - bool async_update : 1; - /** - * @duplicated: - * - * Indicates whether or not this atomic state was duplicated using - * drm_atomic_helper_duplicate_state(). Drivers and atomic helpers - * should use this to fixup normal inconsistencies in duplicated - * states. - */ - bool duplicated : 1; + bool legacy_set_config : 1; struct __drm_planes_state *planes; struct __drm_crtcs_state *crtcs; int num_connector; struct __drm_connnectors_state *connectors; - int num_private_objs; - struct __drm_private_objs_state *private_objs; struct drm_modeset_acquire_ctx *acquire_ctx; - /** - * @fake_commit: - * - * Used for signaling unbound planes/connectors. - * When a connector or plane is not bound to any CRTC, it's still important - * to preserve linearity to prevent the atomic states from being freed to early. - * - * This commit (if set) is not bound to any CRTC, but will be completed when - * drm_atomic_helper_commit_hw_done() is called. - */ - struct drm_crtc_commit *fake_commit; - /** * @commit_work: * @@ -409,67 +184,16 @@ struct drm_atomic_state { struct work_struct commit_work; }; -void __drm_crtc_commit_free(struct kref *kref); - -/** - * drm_crtc_commit_get - acquire a reference to the CRTC commit - * @commit: CRTC commit - * - * Increases the reference of @commit. - * - * Returns: - * The pointer to @commit, with reference increased. - */ -static inline struct drm_crtc_commit *drm_crtc_commit_get(struct drm_crtc_commit *commit) +void drm_crtc_commit_put(struct drm_crtc_commit *commit); +static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit) { kref_get(&commit->ref); - return commit; } -/** - * drm_crtc_commit_put - release a reference to the CRTC commmit - * @commit: CRTC commit - * - * This releases a reference to @commit which is freed after removing the - * final reference. No locking required and callable from any context. - */ -static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit) -{ - kref_put(&commit->ref, __drm_crtc_commit_free); -} - -int drm_crtc_commit_wait(struct drm_crtc_commit *commit); - struct drm_atomic_state * __must_check drm_atomic_state_alloc(struct drm_device *dev); void drm_atomic_state_clear(struct drm_atomic_state *state); - -/** - * drm_atomic_state_get - acquire a reference to the atomic state - * @state: The atomic state - * - * Returns a new reference to the @state - */ -static inline struct drm_atomic_state * -drm_atomic_state_get(struct drm_atomic_state *state) -{ - kref_get(&state->ref); - return state; -} - -void __drm_atomic_state_free(struct kref *ref); - -/** - * drm_atomic_state_put - release a reference to the atomic state - * @state: The atomic state - * - * This releases a reference to @state which is freed after removing the - * final reference. No locking required and callable from any context. - */ -static inline void drm_atomic_state_put(struct drm_atomic_state *state) -{ - kref_put(&state->ref, __drm_atomic_state_free); -} +void drm_atomic_state_free(struct drm_atomic_state *state); int __must_check drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state); @@ -479,46 +203,29 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state); struct drm_crtc_state * __must_check drm_atomic_get_crtc_state(struct drm_atomic_state *state, struct drm_crtc *crtc); +int drm_atomic_crtc_set_property(struct drm_crtc *crtc, + struct drm_crtc_state *state, struct drm_property *property, + uint64_t val); struct drm_plane_state * __must_check drm_atomic_get_plane_state(struct drm_atomic_state *state, struct drm_plane *plane); +int drm_atomic_plane_set_property(struct drm_plane *plane, + struct drm_plane_state *state, struct drm_property *property, + uint64_t val); struct drm_connector_state * __must_check drm_atomic_get_connector_state(struct drm_atomic_state *state, struct drm_connector *connector); - -void drm_atomic_private_obj_init(struct drm_device *dev, - struct drm_private_obj *obj, - struct drm_private_state *state, - const struct drm_private_state_funcs *funcs); -void drm_atomic_private_obj_fini(struct drm_private_obj *obj); - -struct drm_private_state * __must_check -drm_atomic_get_private_obj_state(struct drm_atomic_state *state, - struct drm_private_obj *obj); -struct drm_private_state * -drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state, - struct drm_private_obj *obj); -struct drm_private_state * -drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state, - struct drm_private_obj *obj); - -struct drm_connector * -drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state, - struct drm_encoder *encoder); -struct drm_connector * -drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state, - struct drm_encoder *encoder); +int drm_atomic_connector_set_property(struct drm_connector *connector, + struct drm_connector_state *state, struct drm_property *property, + uint64_t val); /** - * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists + * drm_atomic_get_existing_crtc_state - get crtc state, if it exists * @state: global atomic state object - * @crtc: CRTC to grab + * @crtc: crtc to grab * - * This function returns the CRTC state for the given CRTC, or NULL - * if the CRTC is not part of the global atomic state. - * - * This function is deprecated, @drm_atomic_get_old_crtc_state or - * @drm_atomic_get_new_crtc_state should be used instead. + * This function returns the crtc state for the given crtc, or NULL + * if the crtc is not part of the global atomic state. */ static inline struct drm_crtc_state * drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state, @@ -527,35 +234,6 @@ drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state, return state->crtcs[drm_crtc_index(crtc)].state; } -/** - * drm_atomic_get_old_crtc_state - get old CRTC state, if it exists - * @state: global atomic state object - * @crtc: CRTC to grab - * - * This function returns the old CRTC state for the given CRTC, or - * NULL if the CRTC is not part of the global atomic state. - */ -static inline struct drm_crtc_state * -drm_atomic_get_old_crtc_state(struct drm_atomic_state *state, - struct drm_crtc *crtc) -{ - return state->crtcs[drm_crtc_index(crtc)].old_state; -} -/** - * drm_atomic_get_new_crtc_state - get new CRTC state, if it exists - * @state: global atomic state object - * @crtc: CRTC to grab - * - * This function returns the new CRTC state for the given CRTC, or - * NULL if the CRTC is not part of the global atomic state. - */ -static inline struct drm_crtc_state * -drm_atomic_get_new_crtc_state(struct drm_atomic_state *state, - struct drm_crtc *crtc) -{ - return state->crtcs[drm_crtc_index(crtc)].new_state; -} - /** * drm_atomic_get_existing_plane_state - get plane state, if it exists * @state: global atomic state object @@ -563,9 +241,6 @@ drm_atomic_get_new_crtc_state(struct drm_atomic_state *state, * * This function returns the plane state for the given plane, or NULL * if the plane is not part of the global atomic state. - * - * This function is deprecated, @drm_atomic_get_old_plane_state or - * @drm_atomic_get_new_plane_state should be used instead. */ static inline struct drm_plane_state * drm_atomic_get_existing_plane_state(struct drm_atomic_state *state, @@ -574,36 +249,6 @@ drm_atomic_get_existing_plane_state(struct drm_atomic_state *state, return state->planes[drm_plane_index(plane)].state; } -/** - * drm_atomic_get_old_plane_state - get plane state, if it exists - * @state: global atomic state object - * @plane: plane to grab - * - * This function returns the old plane state for the given plane, or - * NULL if the plane is not part of the global atomic state. - */ -static inline struct drm_plane_state * -drm_atomic_get_old_plane_state(struct drm_atomic_state *state, - struct drm_plane *plane) -{ - return state->planes[drm_plane_index(plane)].old_state; -} - -/** - * drm_atomic_get_new_plane_state - get plane state, if it exists - * @state: global atomic state object - * @plane: plane to grab - * - * This function returns the new plane state for the given plane, or - * NULL if the plane is not part of the global atomic state. - */ -static inline struct drm_plane_state * -drm_atomic_get_new_plane_state(struct drm_atomic_state *state, - struct drm_plane *plane) -{ - return state->planes[drm_plane_index(plane)].new_state; -} - /** * drm_atomic_get_existing_connector_state - get connector state, if it exists * @state: global atomic state object @@ -611,9 +256,6 @@ drm_atomic_get_new_plane_state(struct drm_atomic_state *state, * * This function returns the connector state for the given connector, * or NULL if the connector is not part of the global atomic state. - * - * This function is deprecated, @drm_atomic_get_old_connector_state or - * @drm_atomic_get_new_connector_state should be used instead. */ static inline struct drm_connector_state * drm_atomic_get_existing_connector_state(struct drm_atomic_state *state, @@ -627,46 +269,6 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state, return state->connectors[index].state; } -/** - * drm_atomic_get_old_connector_state - get connector state, if it exists - * @state: global atomic state object - * @connector: connector to grab - * - * This function returns the old connector state for the given connector, - * or NULL if the connector is not part of the global atomic state. - */ -static inline struct drm_connector_state * -drm_atomic_get_old_connector_state(struct drm_atomic_state *state, - struct drm_connector *connector) -{ - int index = drm_connector_index(connector); - - if (index >= state->num_connector) - return NULL; - - return state->connectors[index].old_state; -} - -/** - * drm_atomic_get_new_connector_state - get connector state, if it exists - * @state: global atomic state object - * @connector: connector to grab - * - * This function returns the new connector state for the given connector, - * or NULL if the connector is not part of the global atomic state. - */ -static inline struct drm_connector_state * -drm_atomic_get_new_connector_state(struct drm_atomic_state *state, - struct drm_connector *connector) -{ - int index = drm_connector_index(connector); - - if (index >= state->num_connector) - return NULL; - - return state->connectors[index].new_state; -} - /** * __drm_atomic_get_current_plane_state - get current plane state * @state: global atomic state object @@ -704,8 +306,19 @@ __drm_atomic_get_current_plane_state(struct drm_atomic_state *state, } int __must_check -drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, - struct drm_encoder *encoder); +drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, + struct drm_display_mode *mode); +int __must_check +drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, + struct drm_property_blob *blob); +int __must_check +drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, + struct drm_crtc *crtc); +void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, + struct drm_framebuffer *fb); +int __must_check +drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, + struct drm_crtc *crtc); int __must_check drm_atomic_add_affected_connectors(struct drm_atomic_state *state, struct drm_crtc *crtc); @@ -713,411 +326,54 @@ int __must_check drm_atomic_add_affected_planes(struct drm_atomic_state *state, struct drm_crtc *crtc); +void drm_atomic_legacy_backoff(struct drm_atomic_state *state); + +void +drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret); + int __must_check drm_atomic_check_only(struct drm_atomic_state *state); int __must_check drm_atomic_commit(struct drm_atomic_state *state); int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); -void drm_state_dump(struct drm_device *dev, struct drm_printer *p); - -/** - * for_each_oldnew_connector_in_state - iterate over all connectors in an atomic update - * @__state: &struct drm_atomic_state pointer - * @connector: &struct drm_connector iteration cursor - * @old_connector_state: &struct drm_connector_state iteration cursor for the - * old state - * @new_connector_state: &struct drm_connector_state iteration cursor for the - * new state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all connectors in an atomic update, tracking both old and - * new state. This is useful in places where the state delta needs to be - * considered, for example in atomic check functions. - */ -#define for_each_oldnew_connector_in_state(__state, connector, old_connector_state, new_connector_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->num_connector; \ - (__i)++) \ - for_each_if ((__state)->connectors[__i].ptr && \ - ((connector) = (__state)->connectors[__i].ptr, \ - (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ - (old_connector_state) = (__state)->connectors[__i].old_state, \ - (new_connector_state) = (__state)->connectors[__i].new_state, 1)) - -/** - * for_each_old_connector_in_state - iterate over all connectors in an atomic update - * @__state: &struct drm_atomic_state pointer - * @connector: &struct drm_connector iteration cursor - * @old_connector_state: &struct drm_connector_state iteration cursor for the - * old state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all connectors in an atomic update, tracking only the old - * state. This is useful in disable functions, where we need the old state the - * hardware is still in. - */ -#define for_each_old_connector_in_state(__state, connector, old_connector_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->num_connector; \ - (__i)++) \ - for_each_if ((__state)->connectors[__i].ptr && \ - ((connector) = (__state)->connectors[__i].ptr, \ - (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ - (old_connector_state) = (__state)->connectors[__i].old_state, 1)) - -/** - * for_each_new_connector_in_state - iterate over all connectors in an atomic update - * @__state: &struct drm_atomic_state pointer - * @connector: &struct drm_connector iteration cursor - * @new_connector_state: &struct drm_connector_state iteration cursor for the - * new state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all connectors in an atomic update, tracking only the new - * state. This is useful in enable functions, where we need the new state the - * hardware should be in when the atomic commit operation has completed. - */ -#define for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->num_connector; \ - (__i)++) \ - for_each_if ((__state)->connectors[__i].ptr && \ - ((connector) = (__state)->connectors[__i].ptr, \ - (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ - (new_connector_state) = (__state)->connectors[__i].new_state, \ - (void)(new_connector_state) /* Only to avoid unused-but-set-variable warning */, 1)) - -/** - * for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update - * @__state: &struct drm_atomic_state pointer - * @crtc: &struct drm_crtc iteration cursor - * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state - * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all CRTCs in an atomic update, tracking both old and - * new state. This is useful in places where the state delta needs to be - * considered, for example in atomic check functions. - */ -#define for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \ +#define for_each_connector_in_state(__state, connector, connector_state, __i) \ for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_crtc; \ + (__i) < (__state)->num_connector && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (connector_state) = (__state)->connectors[__i].state, 1); \ (__i)++) \ - for_each_if ((__state)->crtcs[__i].ptr && \ - ((crtc) = (__state)->crtcs[__i].ptr, \ - (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ - (old_crtc_state) = (__state)->crtcs[__i].old_state, \ - (void)(old_crtc_state) /* Only to avoid unused-but-set-variable warning */, \ - (new_crtc_state) = (__state)->crtcs[__i].new_state, \ - (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1)) + for_each_if (connector) -/** - * for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update - * @__state: &struct drm_atomic_state pointer - * @crtc: &struct drm_crtc iteration cursor - * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all CRTCs in an atomic update, tracking only the old - * state. This is useful in disable functions, where we need the old state the - * hardware is still in. - */ -#define for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \ +#define for_each_crtc_in_state(__state, crtc, crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (crtc_state) = (__state)->crtcs[__i].state, 1); \ + (__i)++) \ + for_each_if (crtc_state) + +#define for_each_plane_in_state(__state, plane, plane_state, __i) \ for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_crtc; \ + (__i) < (__state)->dev->mode_config.num_total_plane && \ + ((plane) = (__state)->planes[__i].ptr, \ + (plane_state) = (__state)->planes[__i].state, 1); \ (__i)++) \ - for_each_if ((__state)->crtcs[__i].ptr && \ - ((crtc) = (__state)->crtcs[__i].ptr, \ - (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ - (old_crtc_state) = (__state)->crtcs[__i].old_state, 1)) - -/** - * for_each_new_crtc_in_state - iterate over all CRTCs in an atomic update - * @__state: &struct drm_atomic_state pointer - * @crtc: &struct drm_crtc iteration cursor - * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all CRTCs in an atomic update, tracking only the new - * state. This is useful in enable functions, where we need the new state the - * hardware should be in when the atomic commit operation has completed. - */ -#define for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_crtc; \ - (__i)++) \ - for_each_if ((__state)->crtcs[__i].ptr && \ - ((crtc) = (__state)->crtcs[__i].ptr, \ - (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ - (new_crtc_state) = (__state)->crtcs[__i].new_state, \ - (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1)) - -/** - * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update - * @__state: &struct drm_atomic_state pointer - * @plane: &struct drm_plane iteration cursor - * @old_plane_state: &struct drm_plane_state iteration cursor for the old state - * @new_plane_state: &struct drm_plane_state iteration cursor for the new state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all planes in an atomic update, tracking both old and - * new state. This is useful in places where the state delta needs to be - * considered, for example in atomic check functions. - */ -#define for_each_oldnew_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_total_plane; \ - (__i)++) \ - for_each_if ((__state)->planes[__i].ptr && \ - ((plane) = (__state)->planes[__i].ptr, \ - (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ - (old_plane_state) = (__state)->planes[__i].old_state,\ - (new_plane_state) = (__state)->planes[__i].new_state, 1)) - -/** - * for_each_oldnew_plane_in_state_reverse - iterate over all planes in an atomic - * update in reverse order - * @__state: &struct drm_atomic_state pointer - * @plane: &struct drm_plane iteration cursor - * @old_plane_state: &struct drm_plane_state iteration cursor for the old state - * @new_plane_state: &struct drm_plane_state iteration cursor for the new state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all planes in an atomic update in reverse order, - * tracking both old and new state. This is useful in places where the - * state delta needs to be considered, for example in atomic check functions. - */ -#define for_each_oldnew_plane_in_state_reverse(__state, plane, old_plane_state, new_plane_state, __i) \ - for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \ - (__i) >= 0; \ - (__i)--) \ - for_each_if ((__state)->planes[__i].ptr && \ - ((plane) = (__state)->planes[__i].ptr, \ - (old_plane_state) = (__state)->planes[__i].old_state,\ - (new_plane_state) = (__state)->planes[__i].new_state, 1)) - -/** - * for_each_new_plane_in_state_reverse - other than only tracking new state, - * it's the same as for_each_oldnew_plane_in_state_reverse - * @__state: &struct drm_atomic_state pointer - * @plane: &struct drm_plane iteration cursor - * @new_plane_state: &struct drm_plane_state iteration cursor for the new state - * @__i: int iteration cursor, for macro-internal use - */ -#define for_each_new_plane_in_state_reverse(__state, plane, new_plane_state, __i) \ - for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \ - (__i) >= 0; \ - (__i)--) \ - for_each_if ((__state)->planes[__i].ptr && \ - ((plane) = (__state)->planes[__i].ptr, \ - (new_plane_state) = (__state)->planes[__i].new_state, 1)) - -/** - * for_each_old_plane_in_state - iterate over all planes in an atomic update - * @__state: &struct drm_atomic_state pointer - * @plane: &struct drm_plane iteration cursor - * @old_plane_state: &struct drm_plane_state iteration cursor for the old state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all planes in an atomic update, tracking only the old - * state. This is useful in disable functions, where we need the old state the - * hardware is still in. - */ -#define for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_total_plane; \ - (__i)++) \ - for_each_if ((__state)->planes[__i].ptr && \ - ((plane) = (__state)->planes[__i].ptr, \ - (old_plane_state) = (__state)->planes[__i].old_state, 1)) -/** - * for_each_new_plane_in_state - iterate over all planes in an atomic update - * @__state: &struct drm_atomic_state pointer - * @plane: &struct drm_plane iteration cursor - * @new_plane_state: &struct drm_plane_state iteration cursor for the new state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all planes in an atomic update, tracking only the new - * state. This is useful in enable functions, where we need the new state the - * hardware should be in when the atomic commit operation has completed. - */ -#define for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_total_plane; \ - (__i)++) \ - for_each_if ((__state)->planes[__i].ptr && \ - ((plane) = (__state)->planes[__i].ptr, \ - (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ - (new_plane_state) = (__state)->planes[__i].new_state, \ - (void)(new_plane_state) /* Only to avoid unused-but-set-variable warning */, 1)) - -/** - * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update - * @__state: &struct drm_atomic_state pointer - * @obj: &struct drm_private_obj iteration cursor - * @old_obj_state: &struct drm_private_state iteration cursor for the old state - * @new_obj_state: &struct drm_private_state iteration cursor for the new state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all private objects in an atomic update, tracking both - * old and new state. This is useful in places where the state delta needs - * to be considered, for example in atomic check functions. - */ -#define for_each_oldnew_private_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->num_private_objs && \ - ((obj) = (__state)->private_objs[__i].ptr, \ - (old_obj_state) = (__state)->private_objs[__i].old_state, \ - (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ - (__i)++) - -/** - * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update - * @__state: &struct drm_atomic_state pointer - * @obj: &struct drm_private_obj iteration cursor - * @old_obj_state: &struct drm_private_state iteration cursor for the old state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all private objects in an atomic update, tracking only - * the old state. This is useful in disable functions, where we need the old - * state the hardware is still in. - */ -#define for_each_old_private_obj_in_state(__state, obj, old_obj_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->num_private_objs && \ - ((obj) = (__state)->private_objs[__i].ptr, \ - (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \ - (__i)++) - -/** - * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update - * @__state: &struct drm_atomic_state pointer - * @obj: &struct drm_private_obj iteration cursor - * @new_obj_state: &struct drm_private_state iteration cursor for the new state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all private objects in an atomic update, tracking only - * the new state. This is useful in enable functions, where we need the new state the - * hardware should be in when the atomic commit operation has completed. - */ -#define for_each_new_private_obj_in_state(__state, obj, new_obj_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->num_private_objs && \ - ((obj) = (__state)->private_objs[__i].ptr, \ - (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ - (__i)++) + for_each_if (plane_state) /** * drm_atomic_crtc_needs_modeset - compute combined modeset need * @state: &drm_crtc_state for the CRTC * - * To give drivers flexibility &struct drm_crtc_state has 3 booleans to track + * To give drivers flexibility struct &drm_crtc_state has 3 booleans to track * whether the state CRTC changed enough to need a full modeset cycle: - * mode_changed, active_changed and connectors_changed. This helper simply + * connectors_changed, mode_changed and active_change. This helper simply * combines these three to compute the overall need for a modeset for @state. - * - * The atomic helper code sets these booleans, but drivers can and should - * change them appropriately to accurately represent whether a modeset is - * really needed. In general, drivers should avoid full modesets whenever - * possible. - * - * For example if the CRTC mode has changed, and the hardware is able to enact - * the requested mode change without going through a full modeset, the driver - * should clear mode_changed in its &drm_mode_config_funcs.atomic_check - * implementation. */ static inline bool -drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state) +drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state) { return state->mode_changed || state->active_changed || state->connectors_changed; } -/** - * drm_atomic_crtc_effectively_active - compute whether CRTC is actually active - * @state: &drm_crtc_state for the CRTC - * - * When in self refresh mode, the crtc_state->active value will be false, since - * the CRTC is off. However in some cases we're interested in whether the CRTC - * is active, or effectively active (ie: it's connected to an active display). - * In these cases, use this function instead of just checking active. - */ -static inline bool -drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state) -{ - return state->active || state->self_refresh_active; -} - -/** - * struct drm_bus_cfg - bus configuration - * - * This structure stores the configuration of a physical bus between two - * components in an output pipeline, usually between two bridges, an encoder - * and a bridge, or a bridge and a connector. - * - * The bus configuration is stored in &drm_bridge_state separately for the - * input and output buses, as seen from the point of view of each bridge. The - * bus configuration of a bridge output is usually identical to the - * configuration of the next bridge's input, but may differ if the signals are - * modified between the two bridges, for instance by an inverter on the board. - * The input and output configurations of a bridge may differ if the bridge - * modifies the signals internally, for instance by performing format - * conversion, or modifying signals polarities. - */ -struct drm_bus_cfg { - /** - * @format: format used on this bus (one of the MEDIA_BUS_FMT_* format) - * - * This field should not be directly modified by drivers - * (drm_atomic_bridge_chain_select_bus_fmts() takes care of the bus - * format negotiation). - */ - u32 format; - - /** - * @flags: DRM_BUS_* flags used on this bus - */ - u32 flags; -}; - -/** - * struct drm_bridge_state - Atomic bridge state object - */ -struct drm_bridge_state { - /** - * @base: inherit from &drm_private_state - */ - struct drm_private_state base; - - /** - * @bridge: the bridge this state refers to - */ - struct drm_bridge *bridge; - - /** - * @input_bus_cfg: input bus configuration - */ - struct drm_bus_cfg input_bus_cfg; - - /** - * @output_bus_cfg: input bus configuration - */ - struct drm_bus_cfg output_bus_cfg; -}; - -static inline struct drm_bridge_state * -drm_priv_to_bridge_state(struct drm_private_state *priv) -{ - return container_of(priv, struct drm_bridge_state, base); -} - -struct drm_bridge_state * -drm_atomic_get_bridge_state(struct drm_atomic_state *state, - struct drm_bridge *bridge); -struct drm_bridge_state * -drm_atomic_get_old_bridge_state(struct drm_atomic_state *state, - struct drm_bridge *bridge); -struct drm_bridge_state * -drm_atomic_get_new_bridge_state(struct drm_atomic_state *state, - struct drm_bridge *bridge); #endif /* DRM_ATOMIC_H_ */ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 4045e2507e..7ff92b09fd 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -31,52 +31,34 @@ #include #include #include -#include -#include struct drm_atomic_state; -struct drm_private_obj; -struct drm_private_state; int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state); -int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, - const struct drm_crtc_state *crtc_state, - int min_scale, - int max_scale, - bool can_position, - bool can_update_disabled); int drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_check(struct drm_device *dev, struct drm_atomic_state *state); void drm_atomic_helper_commit_tail(struct drm_atomic_state *state); -void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state); int drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); -int drm_atomic_helper_async_check(struct drm_device *dev, - struct drm_atomic_state *state); -void drm_atomic_helper_async_commit(struct drm_device *dev, - struct drm_atomic_state *state); int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state, bool pre_swap); +bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev, + struct drm_atomic_state *old_state, + struct drm_crtc *crtc); void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state); -void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, - struct drm_atomic_state *old_state); - void drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, struct drm_atomic_state *old_state); -void -drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state); - void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, struct drm_atomic_state *state); void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, @@ -98,14 +80,13 @@ void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, bool atomic); -int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state, - bool stall); +void drm_atomic_helper_swap_state(struct drm_atomic_state *state, + bool stall); /* nonblocking commit helpers */ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, bool nonblock); void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state); -void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state); void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state); void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state); @@ -116,79 +97,114 @@ int drm_atomic_helper_update_plane(struct drm_plane *plane, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h, - struct drm_modeset_acquire_ctx *ctx); -int drm_atomic_helper_disable_plane(struct drm_plane *plane, - struct drm_modeset_acquire_ctx *ctx); -int drm_atomic_helper_set_config(struct drm_mode_set *set, - struct drm_modeset_acquire_ctx *ctx); + uint32_t src_w, uint32_t src_h); +int drm_atomic_helper_disable_plane(struct drm_plane *plane); +int __drm_atomic_helper_disable_plane(struct drm_plane *plane, + struct drm_plane_state *plane_state); +int drm_atomic_helper_set_config(struct drm_mode_set *set); +int __drm_atomic_helper_set_config(struct drm_mode_set *set, + struct drm_atomic_state *state); int drm_atomic_helper_disable_all(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); -void drm_atomic_helper_shutdown(struct drm_device *dev); -struct drm_atomic_state * -drm_atomic_helper_duplicate_state(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx); struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev); -int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, - struct drm_modeset_acquire_ctx *ctx); int drm_atomic_helper_resume(struct drm_device *dev, struct drm_atomic_state *state); +int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, + struct drm_property *property, + uint64_t val); +int drm_atomic_helper_plane_set_property(struct drm_plane *plane, + struct drm_property *property, + uint64_t val); +int drm_atomic_helper_connector_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val); int drm_atomic_helper_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, - uint32_t flags, - struct drm_modeset_acquire_ctx *ctx); -int drm_atomic_helper_page_flip_target( - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t flags, - uint32_t target, - struct drm_modeset_acquire_ctx *ctx); + uint32_t flags); +int drm_atomic_helper_connector_dpms(struct drm_connector *connector, + int mode); +struct drm_encoder * +drm_atomic_helper_best_encoder(struct drm_connector *connector); + +/* default implementations for state handling */ +void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); +struct drm_crtc_state * +drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state); +void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); + +void drm_atomic_helper_plane_reset(struct drm_plane *plane); +void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, + struct drm_plane_state *state); +struct drm_plane_state * +drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane); +void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state); +void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state); + +void __drm_atomic_helper_connector_reset(struct drm_connector *connector, + struct drm_connector_state *conn_state); +void drm_atomic_helper_connector_reset(struct drm_connector *connector); +void +__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, + struct drm_connector_state *state); +struct drm_connector_state * +drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector); +struct drm_atomic_state * +drm_atomic_helper_duplicate_state(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); +void +__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); +void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state); +int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, + u16 *red, u16 *green, u16 *blue, + uint32_t size); /** * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC * @plane: the loop cursor - * @crtc: the CRTC whose planes are iterated + * @crtc: the crtc whose planes are iterated * * This iterates over the current state, useful (for example) when applying * atomic state after it has been checked and swapped. To iterate over the - * planes which *will* be attached (more useful in code called from - * &drm_mode_config_funcs.atomic_check) see + * planes which *will* be attached (for ->atomic_check()) see * drm_atomic_crtc_state_for_each_plane(). */ #define drm_atomic_crtc_for_each_plane(plane, crtc) \ drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask) /** - * drm_atomic_crtc_state_for_each_plane - iterate over attached planes in new state + * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state * @plane: the loop cursor - * @crtc_state: the incoming CRTC state + * @crtc_state: the incoming crtc-state * * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be - * attached if the specified state is applied. Useful during for example - * in code called from &drm_mode_config_funcs.atomic_check operations, to - * validate the incoming state. + * attached if the specified state is applied. Useful during (for example) + * ->atomic_check() operations, to validate the incoming state. */ #define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) /** - * drm_atomic_crtc_state_for_each_plane_state - iterate over attached planes in new state + * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state * @plane: the loop cursor * @plane_state: loop cursor for the plane's state, must be const - * @crtc_state: the incoming CRTC state + * @crtc_state: the incoming crtc-state * * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be - * attached if the specified state is applied. Useful during for example - * in code called from &drm_mode_config_funcs.atomic_check operations, to - * validate the incoming state. + * attached if the specified state is applied. Useful during (for example) + * ->atomic_check() operations, to validate the incoming state. * * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a * const plane_state. This is useful when a driver just wants to peek at other - * active planes on this CRTC, but does not need to change it. + * active planes on this crtc, but does not need to change it. */ #define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \ @@ -196,10 +212,10 @@ int drm_atomic_helper_page_flip_target( __drm_atomic_get_current_plane_state((crtc_state)->state, \ plane))) -/** +/* * drm_atomic_plane_disabling - check whether a plane is being disabled - * @old_plane_state: old atomic plane state - * @new_plane_state: new atomic plane state + * @plane: plane object + * @old_state: previous atomic state * * Checks the atomic state of a plane to determine whether it's being disabled * or not. This also WARNs if it detects an invalid state (both CRTC and FB @@ -209,26 +225,28 @@ int drm_atomic_helper_page_flip_target( * True if the plane is being disabled, false otherwise. */ static inline bool -drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state, - struct drm_plane_state *new_plane_state) +drm_atomic_plane_disabling(struct drm_plane *plane, + struct drm_plane_state *old_state) { /* * When disabling a plane, CRTC and FB should always be NULL together. * Anything else should be considered a bug in the atomic core, so we * gently warn about it. */ - WARN_ON((new_plane_state->crtc == NULL && new_plane_state->fb != NULL) || - (new_plane_state->crtc != NULL && new_plane_state->fb == NULL)); + WARN_ON((plane->state->crtc == NULL && plane->state->fb != NULL) || + (plane->state->crtc != NULL && plane->state->fb == NULL)); - return old_plane_state->crtc && !new_plane_state->crtc; + /* + * When using the transitional helpers, old_state may be NULL. If so, + * we know nothing about the current state and have to assume that it + * might be enabled. + * + * When using the atomic helpers, old_state won't be NULL. Therefore + * this check assumes that either the driver will have reconstructed + * the correct state in ->reset() or that the driver will have taken + * appropriate measures to disable all planes. + */ + return (!old_state || old_state->crtc) && !plane->state->crtc; } -u32 * -drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state, - u32 output_fmt, - unsigned int *num_input_fmts); - #endif /* DRM_ATOMIC_HELPER_H_ */ diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index ba248ca886..610223b048 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -1,6 +1,3 @@ -#ifndef _DRM_AUTH_H_ -#define _DRM_AUTH_H_ - /* * Internal Header for the Direct Rendering Manager * @@ -28,35 +25,18 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include -#include -#include - -struct drm_file; -struct drm_hw_lock; - -/* - * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for - * include ordering reasons. - * - * DO NOT USE. - */ -struct drm_lock_data { - struct drm_hw_lock *hw_lock; - struct drm_file *file_priv; - wait_queue_head_t lock_queue; - unsigned long lock_time; - spinlock_t spinlock; - uint32_t kernel_waiters; - uint32_t user_waiters; - int idle_has_lock; -}; +#ifndef _DRM_AUTH_H_ +#define _DRM_AUTH_H_ /** * struct drm_master - drm master structure * * @refcount: Refcount for this master object. * @dev: Link back to the DRM device + * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. + * @unique_len: Length of unique field. Protected by drm_global_mutex. + * @magic_map: Map of used authentication tokens. Protected by struct_mutex. + * @lock: DRI lock information. * @driver_priv: Pointer to driver-private information. * * Note that master structures are only relevant for the legacy/primary device @@ -65,97 +45,15 @@ struct drm_lock_data { struct drm_master { struct kref refcount; struct drm_device *dev; - /** - * @unique: Unique identifier: e.g. busid. Protected by - * &drm_device.master_mutex. - */ char *unique; - /** - * @unique_len: Length of unique field. Protected by - * &drm_device.master_mutex. - */ int unique_len; - /** - * @magic_map: Map of used authentication tokens. Protected by - * &drm_device.master_mutex. - */ struct idr magic_map; - void *driver_priv; - - /** - * @lessor: - * - * Lease grantor, only set if this &struct drm_master represents a - * lessee holding a lease of objects from @lessor. Full owners of the - * device have this set to NULL. - * - * The lessor does not change once it's set in drm_lease_create(), and - * each lessee holds a reference to its lessor that it releases upon - * being destroyed in drm_lease_destroy(). - * - * See also the :ref:`section on display resource leasing - * `. - */ - struct drm_master *lessor; - - /** - * @lessee_id: - * - * ID for lessees. Owners (i.e. @lessor is NULL) always have ID 0. - * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex. - */ - int lessee_id; - - /** - * @lessee_list: - * - * List entry of lessees of @lessor, where they are linked to @lessees. - * Not used for owners. Protected by &drm_device.mode_config's - * &drm_mode_config.idr_mutex. - */ - struct list_head lessee_list; - - /** - * @lessees: - * - * List of drm_masters leasing from this one. Protected by - * &drm_device.mode_config's &drm_mode_config.idr_mutex. - * - * This list is empty if no leases have been granted, or if all lessees - * have been destroyed. Since lessors are referenced by all their - * lessees, this master cannot be destroyed unless the list is empty. - */ - struct list_head lessees; - - /** - * @leases: - * - * Objects leased to this drm_master. Protected by - * &drm_device.mode_config's &drm_mode_config.idr_mutex. - * - * Objects are leased all together in drm_lease_create(), and are - * removed all together when the lease is revoked. - */ - struct idr leases; - - /** - * @lessee_idr: - * - * All lessees under this owner (only used where @lessor is NULL). - * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex. - */ - struct idr lessee_idr; - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) struct drm_lock_data lock; -#endif + void *driver_priv; }; struct drm_master *drm_master_get(struct drm_master *master); -struct drm_master *drm_file_get_master(struct drm_file *file_priv); void drm_master_put(struct drm_master **master); bool drm_is_current_master(struct drm_file *fpriv); -struct drm_master *drm_master_create(struct drm_device *dev); - #endif diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h index 88bdfec3bd..36baa175de 100644 --- a/include/drm/drm_blend.h +++ b/include/drm/drm_blend.h @@ -25,27 +25,30 @@ #include #include -#include - -#define DRM_MODE_BLEND_PREMULTI 0 -#define DRM_MODE_BLEND_COVERAGE 1 -#define DRM_MODE_BLEND_PIXEL_NONE 2 struct drm_device; struct drm_atomic_state; -struct drm_plane; -static inline bool drm_rotation_90_or_270(unsigned int rotation) -{ - return rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270); -} +/* + * Rotation property bits. DRM_ROTATE_ rotates the image by the + * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and + * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation + * + * WARNING: These defines are UABI since they're exposed in the rotation + * property. + */ +#define DRM_ROTATE_0 BIT(0) +#define DRM_ROTATE_90 BIT(1) +#define DRM_ROTATE_180 BIT(2) +#define DRM_ROTATE_270 BIT(3) +#define DRM_ROTATE_MASK (DRM_ROTATE_0 | DRM_ROTATE_90 | \ + DRM_ROTATE_180 | DRM_ROTATE_270) +#define DRM_REFLECT_X BIT(4) +#define DRM_REFLECT_Y BIT(5) +#define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y) -#define DRM_BLEND_ALPHA_OPAQUE 0xffff - -int drm_plane_create_alpha_property(struct drm_plane *plane); -int drm_plane_create_rotation_property(struct drm_plane *plane, - unsigned int rotation, - unsigned int supported_rotations); +struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, + unsigned int supported_rotations); unsigned int drm_rotation_simplify(unsigned int rotation, unsigned int supported_rotations); @@ -56,6 +59,4 @@ int drm_plane_create_zpos_immutable_property(struct drm_plane *plane, unsigned int zpos); int drm_atomic_normalize_zpos(struct drm_device *dev, struct drm_atomic_state *state); -int drm_plane_create_blend_mode_property(struct drm_plane *plane, - unsigned int supported_modes); #endif diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 46bdfa48c4..530a1d6e8c 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -23,33 +23,12 @@ #ifndef __DRM_BRIDGE_H__ #define __DRM_BRIDGE_H__ -#include #include -#include - -#include -#include +#include #include #include struct drm_bridge; -struct drm_bridge_timings; -struct drm_connector; -struct drm_display_info; -struct drm_panel; -struct edid; -struct i2c_adapter; - -/** - * enum drm_bridge_attach_flags - Flags for &drm_bridge_funcs.attach - */ -enum drm_bridge_attach_flags { - /** - * @DRM_BRIDGE_ATTACH_NO_CONNECTOR: When this flag is set the bridge - * shall not create a drm_connector. - */ - DRM_BRIDGE_ATTACH_NO_CONNECTOR = BIT(0), -}; /** * struct drm_bridge_funcs - drm_bridge control functions @@ -59,17 +38,15 @@ struct drm_bridge_funcs { * @attach: * * This callback is invoked whenever our bridge is being attached to a - * &drm_encoder. The flags argument tunes the behaviour of the attach - * operation (see DRM_BRIDGE_ATTACH_*). + * &drm_encoder. * - * The @attach callback is optional. + * The attach callback is optional. * * RETURNS: * * Zero on success, error code on failure. */ - int (*attach)(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags); + int (*attach)(struct drm_bridge *bridge); /** * @detach: @@ -77,62 +54,25 @@ struct drm_bridge_funcs { * This callback is invoked whenever our bridge is being detached from a * &drm_encoder. * - * The @detach callback is optional. + * The detach callback is optional. */ void (*detach)(struct drm_bridge *bridge); - /** - * @mode_valid: - * - * This callback is used to check if a specific mode is valid in this - * bridge. This should be implemented if the bridge has some sort of - * restriction in the modes it can display. For example, a given bridge - * may be responsible to set a clock value. If the clock can not - * produce all the values for the available modes then this callback - * can be used to restrict the number of modes to only the ones that - * can be displayed. - * - * This hook is used by the probe helpers to filter the mode list in - * drm_helper_probe_single_connector_modes(), and it is used by the - * atomic helpers to validate modes supplied by userspace in - * drm_atomic_helper_check_modeset(). - * - * The @mode_valid callback is optional. - * - * NOTE: - * - * Since this function is both called from the check phase of an atomic - * commit, and the mode validation in the probe paths it is not allowed - * to look at anything else but the passed-in mode, and validate it - * against configuration-invariant hardward constraints. Any further - * limits which depend upon the configuration can only be checked in - * @mode_fixup. - * - * RETURNS: - * - * drm_mode_status Enum - */ - enum drm_mode_status (*mode_valid)(struct drm_bridge *bridge, - const struct drm_display_info *info, - const struct drm_display_mode *mode); - /** * @mode_fixup: * - * This callback is used to validate and adjust a mode. The parameter + * This callback is used to validate and adjust a mode. The paramater * mode is the display mode that should be fed to the next element in * the display chain, either the final &drm_connector or the next * &drm_bridge. The parameter adjusted_mode is the input mode the bridge * requires. It can be modified by this callback and does not need to - * match mode. See also &drm_crtc_state.adjusted_mode for more details. + * match mode. * * This is the only hook that allows a bridge to reject a modeset. If * this function passes all other callbacks must succeed for this * configuration. * - * The mode_fixup callback is optional. &drm_bridge_funcs.mode_fixup() - * is not called when &drm_bridge_funcs.atomic_check() is implemented, - * so only one of them should be provided. + * The mode_fixup callback is optional. * * NOTE: * @@ -142,12 +82,6 @@ struct drm_bridge_funcs { * NOT touch any persistent state (hardware or software) or data * structures except the passed in @state parameter. * - * Also beware that userspace can request its own custom modes, neither - * core nor helpers filter modes to the list of probe modes reported by - * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure - * that modes are filtered consistently put any bridge constraints and - * limits checks into @mode_valid. - * * RETURNS: * * True if an acceptable configuration is possible, false if the modeset @@ -162,44 +96,32 @@ struct drm_bridge_funcs { * This callback should disable the bridge. It is called right before * the preceding element in the display pipe is disabled. If the * preceding element is a bridge this means it's called before that - * bridge's @disable vfunc. If the preceding element is a &drm_encoder - * it's called right before the &drm_encoder_helper_funcs.disable, - * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms - * hook. + * bridge's ->disable() function. If the preceding element is a + * &drm_encoder it's called right before the encoder's ->disable(), + * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is still running when this callback is called. * - * The @disable callback is optional. - * - * NOTE: - * - * This is deprecated, do not use! - * New drivers shall use &drm_bridge_funcs.atomic_disable. + * The disable callback is optional. */ void (*disable)(struct drm_bridge *bridge); /** * @post_disable: * - * This callback should disable the bridge. It is called right after the - * preceding element in the display pipe is disabled. If the preceding - * element is a bridge this means it's called after that bridge's - * @post_disable function. If the preceding element is a &drm_encoder - * it's called right after the encoder's - * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare - * or &drm_encoder_helper_funcs.dpms hook. + * This callback should disable the bridge. It is called right after + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called after that + * bridge's ->post_disable() function. If the preceding element is a + * &drm_encoder it's called right after the encoder's ->disable(), + * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. * * The bridge must assume that the display pipe (i.e. clocks and timing * singals) feeding it is no longer running when this callback is * called. * - * The @post_disable callback is optional. - * - * NOTE: - * - * This is deprecated, do not use! - * New drivers shall use &drm_bridge_funcs.atomic_post_disable. + * The post_disable callback is optional. */ void (*post_disable)(struct drm_bridge *bridge); @@ -207,53 +129,29 @@ struct drm_bridge_funcs { * @mode_set: * * This callback should set the given mode on the bridge. It is called - * after the @mode_set callback for the preceding element in the display - * pipeline has been called already. If the bridge is the first element - * then this would be &drm_encoder_helper_funcs.mode_set. The display - * pipe (i.e. clocks and timing signals) is off when this function is - * called. - * - * The adjusted_mode parameter is the mode output by the CRTC for the - * first bridge in the chain. It can be different from the mode - * parameter that contains the desired mode for the connector at the end - * of the bridges chain, for instance when the first bridge in the chain - * performs scaling. The adjusted mode is mostly useful for the first - * bridge in the chain and is likely irrelevant for the other bridges. - * - * For atomic drivers the adjusted_mode is the mode stored in - * &drm_crtc_state.adjusted_mode. - * - * NOTE: - * - * This is deprecated, do not use! - * New drivers shall set their mode in the - * &drm_bridge_funcs.atomic_enable operation. + * after the ->mode_set() callback for the preceding element in the + * display pipeline has been called already. The display pipe (i.e. + * clocks and timing signals) is off when this function is called. */ void (*mode_set)(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adjusted_mode); + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); /** * @pre_enable: * * This callback should enable the bridge. It is called right before * the preceding element in the display pipe is enabled. If the * preceding element is a bridge this means it's called before that - * bridge's @pre_enable function. If the preceding element is a - * &drm_encoder it's called right before the encoder's - * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or - * &drm_encoder_helper_funcs.dpms hook. + * bridge's ->pre_enable() function. If the preceding element is a + * &drm_encoder it's called right before the encoder's ->enable(), + * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. * * The display pipe (i.e. clocks and timing signals) feeding this bridge * will not yet be running when this callback is called. The bridge must * not enable the display link feeding the next bridge in the chain (if * there is one) when this callback is called. * - * The @pre_enable callback is optional. - * - * NOTE: - * - * This is deprecated, do not use! - * New drivers shall use &drm_bridge_funcs.atomic_pre_enable. + * The pre_enable callback is optional. */ void (*pre_enable)(struct drm_bridge *bridge); @@ -263,655 +161,58 @@ struct drm_bridge_funcs { * This callback should enable the bridge. It is called right after * the preceding element in the display pipe is enabled. If the * preceding element is a bridge this means it's called after that - * bridge's @enable function. If the preceding element is a - * &drm_encoder it's called right after the encoder's - * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or - * &drm_encoder_helper_funcs.dpms hook. + * bridge's ->enable() function. If the preceding element is a + * &drm_encoder it's called right after the encoder's ->enable(), + * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is running when this callback is called. This * callback must enable the display link feeding the next bridge in the * chain if there is one. * - * The @enable callback is optional. - * - * NOTE: - * - * This is deprecated, do not use! - * New drivers shall use &drm_bridge_funcs.atomic_enable. + * The enable callback is optional. */ void (*enable)(struct drm_bridge *bridge); - - /** - * @atomic_pre_enable: - * - * This callback should enable the bridge. It is called right before - * the preceding element in the display pipe is enabled. If the - * preceding element is a bridge this means it's called before that - * bridge's @atomic_pre_enable or @pre_enable function. If the preceding - * element is a &drm_encoder it's called right before the encoder's - * &drm_encoder_helper_funcs.atomic_enable hook. - * - * The display pipe (i.e. clocks and timing signals) feeding this bridge - * will not yet be running when this callback is called. The bridge must - * not enable the display link feeding the next bridge in the chain (if - * there is one) when this callback is called. - * - * Note that this function will only be invoked in the context of an - * atomic commit. It will not be invoked from - * &drm_bridge_chain_pre_enable. It would be prudent to also provide an - * implementation of @pre_enable if you are expecting driver calls into - * &drm_bridge_chain_pre_enable. - * - * The @atomic_pre_enable callback is optional. - */ - void (*atomic_pre_enable)(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state); - - /** - * @atomic_enable: - * - * This callback should enable the bridge. It is called right after - * the preceding element in the display pipe is enabled. If the - * preceding element is a bridge this means it's called after that - * bridge's @atomic_enable or @enable function. If the preceding element - * is a &drm_encoder it's called right after the encoder's - * &drm_encoder_helper_funcs.atomic_enable hook. - * - * The bridge can assume that the display pipe (i.e. clocks and timing - * signals) feeding it is running when this callback is called. This - * callback must enable the display link feeding the next bridge in the - * chain if there is one. - * - * Note that this function will only be invoked in the context of an - * atomic commit. It will not be invoked from &drm_bridge_chain_enable. - * It would be prudent to also provide an implementation of @enable if - * you are expecting driver calls into &drm_bridge_chain_enable. - * - * The @atomic_enable callback is optional. - */ - void (*atomic_enable)(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state); - /** - * @atomic_disable: - * - * This callback should disable the bridge. It is called right before - * the preceding element in the display pipe is disabled. If the - * preceding element is a bridge this means it's called before that - * bridge's @atomic_disable or @disable vfunc. If the preceding element - * is a &drm_encoder it's called right before the - * &drm_encoder_helper_funcs.atomic_disable hook. - * - * The bridge can assume that the display pipe (i.e. clocks and timing - * signals) feeding it is still running when this callback is called. - * - * Note that this function will only be invoked in the context of an - * atomic commit. It will not be invoked from - * &drm_bridge_chain_disable. It would be prudent to also provide an - * implementation of @disable if you are expecting driver calls into - * &drm_bridge_chain_disable. - * - * The @atomic_disable callback is optional. - */ - void (*atomic_disable)(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state); - - /** - * @atomic_post_disable: - * - * This callback should disable the bridge. It is called right after the - * preceding element in the display pipe is disabled. If the preceding - * element is a bridge this means it's called after that bridge's - * @atomic_post_disable or @post_disable function. If the preceding - * element is a &drm_encoder it's called right after the encoder's - * &drm_encoder_helper_funcs.atomic_disable hook. - * - * The bridge must assume that the display pipe (i.e. clocks and timing - * signals) feeding it is no longer running when this callback is - * called. - * - * Note that this function will only be invoked in the context of an - * atomic commit. It will not be invoked from - * &drm_bridge_chain_post_disable. - * It would be prudent to also provide an implementation of - * @post_disable if you are expecting driver calls into - * &drm_bridge_chain_post_disable. - * - * The @atomic_post_disable callback is optional. - */ - void (*atomic_post_disable)(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state); - - /** - * @atomic_duplicate_state: - * - * Duplicate the current bridge state object (which is guaranteed to be - * non-NULL). - * - * The atomic_duplicate_state hook is mandatory if the bridge - * implements any of the atomic hooks, and should be left unassigned - * otherwise. For bridges that don't subclass &drm_bridge_state, the - * drm_atomic_helper_bridge_duplicate_state() helper function shall be - * used to implement this hook. - * - * RETURNS: - * A valid drm_bridge_state object or NULL if the allocation fails. - */ - struct drm_bridge_state *(*atomic_duplicate_state)(struct drm_bridge *bridge); - - /** - * @atomic_destroy_state: - * - * Destroy a bridge state object previously allocated by - * &drm_bridge_funcs.atomic_duplicate_state(). - * - * The atomic_destroy_state hook is mandatory if the bridge implements - * any of the atomic hooks, and should be left unassigned otherwise. - * For bridges that don't subclass &drm_bridge_state, the - * drm_atomic_helper_bridge_destroy_state() helper function shall be - * used to implement this hook. - */ - void (*atomic_destroy_state)(struct drm_bridge *bridge, - struct drm_bridge_state *state); - - /** - * @atomic_get_output_bus_fmts: - * - * Return the supported bus formats on the output end of a bridge. - * The returned array must be allocated with kmalloc() and will be - * freed by the caller. If the allocation fails, NULL should be - * returned. num_output_fmts must be set to the returned array size. - * Formats listed in the returned array should be listed in decreasing - * preference order (the core will try all formats until it finds one - * that works). - * - * This method is only called on the last element of the bridge chain - * as part of the bus format negotiation process that happens in - * &drm_atomic_bridge_chain_select_bus_fmts(). - * This method is optional. When not implemented, the core will - * fall back to &drm_connector.display_info.bus_formats[0] if - * &drm_connector.display_info.num_bus_formats > 0, - * or to MEDIA_BUS_FMT_FIXED otherwise. - */ - u32 *(*atomic_get_output_bus_fmts)(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state, - unsigned int *num_output_fmts); - - /** - * @atomic_get_input_bus_fmts: - * - * Return the supported bus formats on the input end of a bridge for - * a specific output bus format. - * - * The returned array must be allocated with kmalloc() and will be - * freed by the caller. If the allocation fails, NULL should be - * returned. num_output_fmts must be set to the returned array size. - * Formats listed in the returned array should be listed in decreasing - * preference order (the core will try all formats until it finds one - * that works). When the format is not supported NULL should be - * returned and num_output_fmts should be set to 0. - * - * This method is called on all elements of the bridge chain as part of - * the bus format negotiation process that happens in - * drm_atomic_bridge_chain_select_bus_fmts(). - * This method is optional. When not implemented, the core will bypass - * bus format negotiation on this element of the bridge without - * failing, and the previous element in the chain will be passed - * MEDIA_BUS_FMT_FIXED as its output bus format. - * - * Bridge drivers that need to support being linked to bridges that are - * not supporting bus format negotiation should handle the - * output_fmt == MEDIA_BUS_FMT_FIXED case appropriately, by selecting a - * sensible default value or extracting this information from somewhere - * else (FW property, &drm_display_mode, &drm_display_info, ...) - * - * Note: Even if input format selection on the first bridge has no - * impact on the negotiation process (bus format negotiation stops once - * we reach the first element of the chain), drivers are expected to - * return accurate input formats as the input format may be used to - * configure the CRTC output appropriately. - */ - u32 *(*atomic_get_input_bus_fmts)(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state, - u32 output_fmt, - unsigned int *num_input_fmts); - - /** - * @atomic_check: - * - * This method is responsible for checking bridge state correctness. - * It can also check the state of the surrounding components in chain - * to make sure the whole pipeline can work properly. - * - * &drm_bridge_funcs.atomic_check() hooks are called in reverse - * order (from the last to the first bridge). - * - * This method is optional. &drm_bridge_funcs.mode_fixup() is not - * called when &drm_bridge_funcs.atomic_check() is implemented, so only - * one of them should be provided. - * - * If drivers need to tweak &drm_bridge_state.input_bus_cfg.flags or - * &drm_bridge_state.output_bus_cfg.flags it should happen in - * this function. By default the &drm_bridge_state.output_bus_cfg.flags - * field is set to the next bridge - * &drm_bridge_state.input_bus_cfg.flags value or - * &drm_connector.display_info.bus_flags if the bridge is the last - * element in the chain. - * - * RETURNS: - * zero if the check passed, a negative error code otherwise. - */ - int (*atomic_check)(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state); - - /** - * @atomic_reset: - * - * Reset the bridge to a predefined state (or retrieve its current - * state) and return a &drm_bridge_state object matching this state. - * This function is called at attach time. - * - * The atomic_reset hook is mandatory if the bridge implements any of - * the atomic hooks, and should be left unassigned otherwise. For - * bridges that don't subclass &drm_bridge_state, the - * drm_atomic_helper_bridge_reset() helper function shall be used to - * implement this hook. - * - * Note that the atomic_reset() semantics is not exactly matching the - * reset() semantics found on other components (connector, plane, ...). - * - * 1. The reset operation happens when the bridge is attached, not when - * drm_mode_config_reset() is called - * 2. It's meant to be used exclusively on bridges that have been - * converted to the ATOMIC API - * - * RETURNS: - * A valid drm_bridge_state object in case of success, an ERR_PTR() - * giving the reason of the failure otherwise. - */ - struct drm_bridge_state *(*atomic_reset)(struct drm_bridge *bridge); - - /** - * @detect: - * - * Check if anything is attached to the bridge output. - * - * This callback is optional, if not implemented the bridge will be - * considered as always having a component attached to its output. - * Bridges that implement this callback shall set the - * DRM_BRIDGE_OP_DETECT flag in their &drm_bridge->ops. - * - * RETURNS: - * - * drm_connector_status indicating the bridge output status. - */ - enum drm_connector_status (*detect)(struct drm_bridge *bridge); - - /** - * @get_modes: - * - * Fill all modes currently valid for the sink into the &drm_connector - * with drm_mode_probed_add(). - * - * The @get_modes callback is mostly intended to support non-probeable - * displays such as many fixed panels. Bridges that support reading - * EDID shall leave @get_modes unimplemented and implement the - * &drm_bridge_funcs->get_edid callback instead. - * - * This callback is optional. Bridges that implement it shall set the - * DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops. - * - * The connector parameter shall be used for the sole purpose of - * filling modes, and shall not be stored internally by bridge drivers - * for future usage. - * - * RETURNS: - * - * The number of modes added by calling drm_mode_probed_add(). - */ - int (*get_modes)(struct drm_bridge *bridge, - struct drm_connector *connector); - - /** - * @get_edid: - * - * Read and parse the EDID data of the connected display. - * - * The @get_edid callback is the preferred way of reporting mode - * information for a display connected to the bridge output. Bridges - * that support reading EDID shall implement this callback and leave - * the @get_modes callback unimplemented. - * - * The caller of this operation shall first verify the output - * connection status and refrain from reading EDID from a disconnected - * output. - * - * This callback is optional. Bridges that implement it shall set the - * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops. - * - * The connector parameter shall be used for the sole purpose of EDID - * retrieval and parsing, and shall not be stored internally by bridge - * drivers for future usage. - * - * RETURNS: - * - * An edid structure newly allocated with kmalloc() (or similar) on - * success, or NULL otherwise. The caller is responsible for freeing - * the returned edid structure with kfree(). - */ - struct edid *(*get_edid)(struct drm_bridge *bridge, - struct drm_connector *connector); - - /** - * @hpd_notify: - * - * Notify the bridge of hot plug detection. - * - * This callback is optional, it may be implemented by bridges that - * need to be notified of display connection or disconnection for - * internal reasons. One use case is to reset the internal state of CEC - * controllers for HDMI bridges. - */ - void (*hpd_notify)(struct drm_bridge *bridge, - enum drm_connector_status status); - - /** - * @hpd_enable: - * - * Enable hot plug detection. From now on the bridge shall call - * drm_bridge_hpd_notify() each time a change is detected in the output - * connection status, until hot plug detection gets disabled with - * @hpd_disable. - * - * This callback is optional and shall only be implemented by bridges - * that support hot-plug notification without polling. Bridges that - * implement it shall also implement the @hpd_disable callback and set - * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops. - */ - void (*hpd_enable)(struct drm_bridge *bridge); - - /** - * @hpd_disable: - * - * Disable hot plug detection. Once this function returns the bridge - * shall not call drm_bridge_hpd_notify() when a change in the output - * connection status occurs. - * - * This callback is optional and shall only be implemented by bridges - * that support hot-plug notification without polling. Bridges that - * implement it shall also implement the @hpd_enable callback and set - * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops. - */ - void (*hpd_disable)(struct drm_bridge *bridge); -}; - -/** - * struct drm_bridge_timings - timing information for the bridge - */ -struct drm_bridge_timings { - /** - * @input_bus_flags: - * - * Tells what additional settings for the pixel data on the bus - * this bridge requires (like pixel signal polarity). See also - * &drm_display_info->bus_flags. - */ - u32 input_bus_flags; - /** - * @setup_time_ps: - * - * Defines the time in picoseconds the input data lines must be - * stable before the clock edge. - */ - u32 setup_time_ps; - /** - * @hold_time_ps: - * - * Defines the time in picoseconds taken for the bridge to sample the - * input signal after the clock edge. - */ - u32 hold_time_ps; - /** - * @dual_link: - * - * True if the bus operates in dual-link mode. The exact meaning is - * dependent on the bus type. For LVDS buses, this indicates that even- - * and odd-numbered pixels are received on separate links. - */ - bool dual_link; -}; - -/** - * enum drm_bridge_ops - Bitmask of operations supported by the bridge - */ -enum drm_bridge_ops { - /** - * @DRM_BRIDGE_OP_DETECT: The bridge can detect displays connected to - * its output. Bridges that set this flag shall implement the - * &drm_bridge_funcs->detect callback. - */ - DRM_BRIDGE_OP_DETECT = BIT(0), - /** - * @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display - * connected to its output. Bridges that set this flag shall implement - * the &drm_bridge_funcs->get_edid callback. - */ - DRM_BRIDGE_OP_EDID = BIT(1), - /** - * @DRM_BRIDGE_OP_HPD: The bridge can detect hot-plug and hot-unplug - * without requiring polling. Bridges that set this flag shall - * implement the &drm_bridge_funcs->hpd_enable and - * &drm_bridge_funcs->hpd_disable callbacks if they support enabling - * and disabling hot-plug detection dynamically. - */ - DRM_BRIDGE_OP_HPD = BIT(2), - /** - * @DRM_BRIDGE_OP_MODES: The bridge can retrieve the modes supported - * by the display at its output. This does not include reading EDID - * which is separately covered by @DRM_BRIDGE_OP_EDID. Bridges that set - * this flag shall implement the &drm_bridge_funcs->get_modes callback. - */ - DRM_BRIDGE_OP_MODES = BIT(3), }; /** * struct drm_bridge - central DRM bridge control structure + * @dev: DRM device this bridge belongs to + * @encoder: encoder to which this bridge is connected + * @next: the next bridge in the encoder chain + * @of_node: device node pointer to the bridge + * @list: to keep track of all added bridges + * @funcs: control functions + * @driver_private: pointer to the bridge driver's internal context */ struct drm_bridge { - /** @base: inherit from &drm_private_object */ - struct drm_private_obj base; - /** @dev: DRM device this bridge belongs to */ struct drm_device *dev; - /** @encoder: encoder to which this bridge is connected */ struct drm_encoder *encoder; - /** @chain_node: used to form a bridge chain */ - struct list_head chain_node; + struct drm_bridge *next; #ifdef CONFIG_OF - /** @of_node: device node pointer to the bridge */ struct device_node *of_node; #endif - /** @list: to keep track of all added bridges */ struct list_head list; - /** - * @timings: - * - * the timing specification for the bridge, if any (may be NULL) - */ - const struct drm_bridge_timings *timings; - /** @funcs: control functions */ + const struct drm_bridge_funcs *funcs; - /** @driver_private: pointer to the bridge driver's internal context */ void *driver_private; - /** @ops: bitmask of operations supported by the bridge */ - enum drm_bridge_ops ops; - /** - * @type: Type of the connection at the bridge output - * (DRM_MODE_CONNECTOR_*). For bridges at the end of this chain this - * identifies the type of connected display. - */ - int type; - /** - * @interlace_allowed: Indicate that the bridge can handle interlaced - * modes. - */ - bool interlace_allowed; - /** - * @ddc: Associated I2C adapter for DDC access, if any. - */ - struct i2c_adapter *ddc; - /** private: */ - /** - * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields. - */ - struct mutex hpd_mutex; - /** - * @hpd_cb: Hot plug detection callback, registered with - * drm_bridge_hpd_enable(). - */ - void (*hpd_cb)(void *data, enum drm_connector_status status); - /** - * @hpd_data: Private data passed to the Hot plug detection callback - * @hpd_cb. - */ - void *hpd_data; }; -static inline struct drm_bridge * -drm_priv_to_bridge(struct drm_private_obj *priv) -{ - return container_of(priv, struct drm_bridge, base); -} - -void drm_bridge_add(struct drm_bridge *bridge); +int drm_bridge_add(struct drm_bridge *bridge); void drm_bridge_remove(struct drm_bridge *bridge); struct drm_bridge *of_drm_find_bridge(struct device_node *np); -int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, - struct drm_bridge *previous, - enum drm_bridge_attach_flags flags); +int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge); +void drm_bridge_detach(struct drm_bridge *bridge); -/** - * drm_bridge_get_next_bridge() - Get the next bridge in the chain - * @bridge: bridge object - * - * RETURNS: - * the next bridge in the chain after @bridge, or NULL if @bridge is the last. - */ -static inline struct drm_bridge * -drm_bridge_get_next_bridge(struct drm_bridge *bridge) -{ - if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain)) - return NULL; - - return list_next_entry(bridge, chain_node); -} - -/** - * drm_bridge_get_prev_bridge() - Get the previous bridge in the chain - * @bridge: bridge object - * - * RETURNS: - * the previous bridge in the chain, or NULL if @bridge is the first. - */ -static inline struct drm_bridge * -drm_bridge_get_prev_bridge(struct drm_bridge *bridge) -{ - if (list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) - return NULL; - - return list_prev_entry(bridge, chain_node); -} - -/** - * drm_bridge_chain_get_first_bridge() - Get the first bridge in the chain - * @encoder: encoder object - * - * RETURNS: - * the first bridge in the chain, or NULL if @encoder has no bridge attached - * to it. - */ -static inline struct drm_bridge * -drm_bridge_chain_get_first_bridge(struct drm_encoder *encoder) -{ - return list_first_entry_or_null(&encoder->bridge_chain, - struct drm_bridge, chain_node); -} - -/** - * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain - * @encoder: the encoder to iterate bridges on - * @bridge: a bridge pointer updated to point to the current bridge at each - * iteration - * - * Iterate over all bridges present in the bridge chain attached to @encoder. - */ -#define drm_for_each_bridge_in_chain(encoder, bridge) \ - list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node) - -bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); -enum drm_mode_status -drm_bridge_chain_mode_valid(struct drm_bridge *bridge, - const struct drm_display_info *info, - const struct drm_display_mode *mode); -void drm_bridge_chain_disable(struct drm_bridge *bridge); -void drm_bridge_chain_post_disable(struct drm_bridge *bridge); -void drm_bridge_chain_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adjusted_mode); -void drm_bridge_chain_pre_enable(struct drm_bridge *bridge); -void drm_bridge_chain_enable(struct drm_bridge *bridge); - -int drm_atomic_bridge_chain_check(struct drm_bridge *bridge, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state); -void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge, - struct drm_atomic_state *state); -void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, - struct drm_atomic_state *state); -void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, - struct drm_atomic_state *state); -void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge, - struct drm_atomic_state *state); - -u32 * -drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state, - u32 output_fmt, - unsigned int *num_input_fmts); - -enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge); -int drm_bridge_get_modes(struct drm_bridge *bridge, - struct drm_connector *connector); -struct edid *drm_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector); -void drm_bridge_hpd_enable(struct drm_bridge *bridge, - void (*cb)(void *data, - enum drm_connector_status status), - void *data); -void drm_bridge_hpd_disable(struct drm_bridge *bridge); -void drm_bridge_hpd_notify(struct drm_bridge *bridge, - enum drm_connector_status status); - -#ifdef CONFIG_DRM_PANEL_BRIDGE -struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel); -struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel, - u32 connector_type); -void drm_panel_bridge_remove(struct drm_bridge *bridge); -struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev, - struct drm_panel *panel); -struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev, - struct drm_panel *panel, - u32 connector_type); -struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge); -#endif +bool drm_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void drm_bridge_disable(struct drm_bridge *bridge); +void drm_bridge_post_disable(struct drm_bridge *bridge); +void drm_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void drm_bridge_pre_enable(struct drm_bridge *bridge); +void drm_bridge_enable(struct drm_bridge *bridge); #endif diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index cc9de1632d..cebecff536 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -33,48 +33,17 @@ #ifndef _DRM_CACHE_H_ #define _DRM_CACHE_H_ -#include - -struct dma_buf_map; - void drm_clflush_pages(struct page *pages[], unsigned long num_pages); -void drm_clflush_sg(struct sg_table *st); -void drm_clflush_virt_range(void *addr, unsigned long length); -bool drm_need_swiotlb(int dma_bits); - static inline bool drm_arch_can_wc_memory(void) { #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE) return false; -#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON64) - return false; -#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) - /* - * The DRM driver stack is designed to work with cache coherent devices - * only, but permits an optimization to be enabled in some cases, where - * for some buffers, both the CPU and the GPU use uncached mappings, - * removing the need for DMA snooping and allocation in the CPU caches. - * - * The use of uncached GPU mappings relies on the correct implementation - * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU - * will use cached mappings nonetheless. On x86 platforms, this does not - * seem to matter, as uncached CPU mappings will snoop the caches in any - * case. However, on ARM and arm64, enabling this optimization on a - * platform where NoSnoop is ignored results in loss of coherency, which - * breaks correct operation of the device. Since we have no way of - * detecting whether NoSnoop works or not, just disable this - * optimization entirely for ARM and arm64. - */ +#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3) return false; #else return true; #endif } -void drm_memcpy_init_early(void); - -void drm_memcpy_from_wc(struct dma_buf_map *dst, - const struct dma_buf_map *src, - unsigned long len); #endif diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h index 81c298488b..c767238ac9 100644 --- a/include/drm/drm_color_mgmt.h +++ b/include/drm/drm_color_mgmt.h @@ -24,35 +24,6 @@ #define __DRM_COLOR_MGMT_H__ #include -#include - -struct drm_crtc; -struct drm_plane; - -/** - * drm_color_lut_extract - clamp and round LUT entries - * @user_input: input value - * @bit_precision: number of bits the hw LUT supports - * - * Extract a degamma/gamma LUT value provided by user (in the form of - * &drm_color_lut entries) and round it to the precision supported by the - * hardware. - */ -static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision) -{ - u32 val = user_input; - u32 max = 0xffff >> (16 - bit_precision); - - /* Round only if we're not using full precision. */ - if (bit_precision < 16) { - val += 1UL << (16 - bit_precision - 1); - val >>= 16 - bit_precision; - } - - return clamp_val(val, 0, max); -} - -u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n); void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, uint degamma_lut_size, @@ -63,61 +34,28 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size); /** - * drm_color_lut_size - calculate the number of entries in the LUT - * @blob: blob containing the LUT + * drm_color_lut_extract - clamp&round LUT entries + * @user_input: input value + * @bit_precision: number of bits the hw LUT supports * - * Returns: - * The number of entries in the color LUT stored in @blob. + * Extract a degamma/gamma LUT value provided by user (in the form of + * &drm_color_lut entries) and round it to the precision supported by the + * hardware. */ -static inline int drm_color_lut_size(const struct drm_property_blob *blob) +static inline uint32_t drm_color_lut_extract(uint32_t user_input, + uint32_t bit_precision) { - return blob->length / sizeof(struct drm_color_lut); + uint32_t val = user_input; + uint32_t max = 0xffff >> (16 - bit_precision); + + /* Round only if we're not using full precision. */ + if (bit_precision < 16) { + val += 1UL << (16 - bit_precision - 1); + val >>= 16 - bit_precision; + } + + return clamp_val(val, 0, max); } -enum drm_color_encoding { - DRM_COLOR_YCBCR_BT601, - DRM_COLOR_YCBCR_BT709, - DRM_COLOR_YCBCR_BT2020, - DRM_COLOR_ENCODING_MAX, -}; -enum drm_color_range { - DRM_COLOR_YCBCR_LIMITED_RANGE, - DRM_COLOR_YCBCR_FULL_RANGE, - DRM_COLOR_RANGE_MAX, -}; - -int drm_plane_create_color_properties(struct drm_plane *plane, - u32 supported_encodings, - u32 supported_ranges, - enum drm_color_encoding default_encoding, - enum drm_color_range default_range); - -/** - * enum drm_color_lut_tests - hw-specific LUT tests to perform - * - * The drm_color_lut_check() function takes a bitmask of the values here to - * determine which tests to apply to a userspace-provided LUT. - */ -enum drm_color_lut_tests { - /** - * @DRM_COLOR_LUT_EQUAL_CHANNELS: - * - * Checks whether the entries of a LUT all have equal values for the - * red, green, and blue channels. Intended for hardware that only - * accepts a single value per LUT entry and assumes that value applies - * to all three color components. - */ - DRM_COLOR_LUT_EQUAL_CHANNELS = BIT(0), - - /** - * @DRM_COLOR_LUT_NON_DECREASING: - * - * Checks whether the entries of a LUT are always flat or increasing - * (never decreasing). - */ - DRM_COLOR_LUT_NON_DECREASING = BIT(1), -}; - -int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests); #endif diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 1647960c9e..ac9d7d8e0e 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -24,24 +24,20 @@ #define __DRM_CONNECTOR_H__ #include -#include #include -#include #include -#include #include +struct drm_device; + struct drm_connector_helper_funcs; -struct drm_modeset_acquire_ctx; struct drm_device; struct drm_crtc; struct drm_encoder; struct drm_property; struct drm_property_blob; -struct drm_printer; struct edid; -struct i2c_adapter; enum drm_connector_force { DRM_FORCE_UNSPECIFIED, @@ -83,53 +79,6 @@ enum drm_connector_status { connector_status_unknown = 3, }; -/** - * enum drm_connector_registration_state - userspace registration status for - * a &drm_connector - * - * This enum is used to track the status of initializing a connector and - * registering it with userspace, so that DRM can prevent bogus modesets on - * connectors that no longer exist. - */ -enum drm_connector_registration_state { - /** - * @DRM_CONNECTOR_INITIALIZING: The connector has just been created, - * but has yet to be exposed to userspace. There should be no - * additional restrictions to how the state of this connector may be - * modified. - */ - DRM_CONNECTOR_INITIALIZING = 0, - - /** - * @DRM_CONNECTOR_REGISTERED: The connector has been fully initialized - * and registered with sysfs, as such it has been exposed to - * userspace. There should be no additional restrictions to how the - * state of this connector may be modified. - */ - DRM_CONNECTOR_REGISTERED = 1, - - /** - * @DRM_CONNECTOR_UNREGISTERED: The connector has either been exposed - * to userspace and has since been unregistered and removed from - * userspace, or the connector was unregistered before it had a chance - * to be exposed to userspace (e.g. still in the - * @DRM_CONNECTOR_INITIALIZING state). When a connector is - * unregistered, there are additional restrictions to how its state - * may be modified: - * - * - An unregistered connector may only have its DPMS changed from - * On->Off. Once DPMS is changed to Off, it may not be switched back - * to On. - * - Modesets are not allowed on unregistered connectors, unless they - * would result in disabling its assigned CRTCs. This means - * disabling a CRTC on an unregistered connector is OK, but enabling - * one is not. - * - Removing a CRTC from an unregistered connector is OK, but new - * CRTCs may never be assigned to an unregistered connector. - */ - DRM_CONNECTOR_UNREGISTERED = 2, -}; - enum subpixel_order { SubPixelUnknown = 0, SubPixelHorizontalRGB, @@ -137,330 +86,6 @@ enum subpixel_order { SubPixelVerticalRGB, SubPixelVerticalBGR, SubPixelNone, - -}; - -/** - * struct drm_scrambling: sink's scrambling support. - */ -struct drm_scrambling { - /** - * @supported: scrambling supported for rates > 340 Mhz. - */ - bool supported; - /** - * @low_rates: scrambling supported for rates <= 340 Mhz. - */ - bool low_rates; -}; - -/* - * struct drm_scdc - Information about scdc capabilities of a HDMI 2.0 sink - * - * Provides SCDC register support and capabilities related information on a - * HDMI 2.0 sink. In case of a HDMI 1.4 sink, all parameter must be 0. - */ -struct drm_scdc { - /** - * @supported: status control & data channel present. - */ - bool supported; - /** - * @read_request: sink is capable of generating scdc read request. - */ - bool read_request; - /** - * @scrambling: sink's scrambling capabilities - */ - struct drm_scrambling scrambling; -}; - -/** - * struct drm_hdmi_dsc_cap - DSC capabilities of HDMI sink - * - * Describes the DSC support provided by HDMI 2.1 sink. - * The information is fetched fom additional HFVSDB blocks defined - * for HDMI 2.1. - */ -struct drm_hdmi_dsc_cap { - /** @v_1p2: flag for dsc1.2 version support by sink */ - bool v_1p2; - - /** @native_420: Does sink support DSC with 4:2:0 compression */ - bool native_420; - - /** - * @all_bpp: Does sink support all bpp with 4:4:4: or 4:2:2 - * compressed formats - */ - bool all_bpp; - - /** - * @bpc_supported: compressed bpc supported by sink : 10, 12 or 16 bpc - */ - u8 bpc_supported; - - /** @max_slices: maximum number of Horizontal slices supported by */ - u8 max_slices; - - /** @clk_per_slice : max pixel clock in MHz supported per slice */ - int clk_per_slice; - - /** @max_lanes : dsc max lanes supported for Fixed rate Link training */ - u8 max_lanes; - - /** @max_frl_rate_per_lane : maximum frl rate with DSC per lane */ - u8 max_frl_rate_per_lane; - - /** @total_chunk_kbytes: max size of chunks in KBs supported per line*/ - u8 total_chunk_kbytes; -}; - -/** - * struct drm_hdmi_info - runtime information about the connected HDMI sink - * - * Describes if a given display supports advanced HDMI 2.0 features. - * This information is available in CEA-861-F extension blocks (like HF-VSDB). - */ -struct drm_hdmi_info { - /** @scdc: sink's scdc support and capabilities */ - struct drm_scdc scdc; - - /** - * @y420_vdb_modes: bitmap of modes which can support ycbcr420 - * output only (not normal RGB/YCBCR444/422 outputs). The max VIC - * defined by the CEA-861-G spec is 219, so the size is 256 bits to map - * up to 256 VICs. - */ - unsigned long y420_vdb_modes[BITS_TO_LONGS(256)]; - - /** - * @y420_cmdb_modes: bitmap of modes which can support ycbcr420 - * output also, along with normal HDMI outputs. The max VIC defined by - * the CEA-861-G spec is 219, so the size is 256 bits to map up to 256 - * VICs. - */ - unsigned long y420_cmdb_modes[BITS_TO_LONGS(256)]; - - /** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */ - u64 y420_cmdb_map; - - /** @y420_dc_modes: bitmap of deep color support index */ - u8 y420_dc_modes; - - /** @max_frl_rate_per_lane: support fixed rate link */ - u8 max_frl_rate_per_lane; - - /** @max_lanes: supported by sink */ - u8 max_lanes; - - /** @dsc_cap: DSC capabilities of the sink */ - struct drm_hdmi_dsc_cap dsc_cap; -}; - -/** - * enum drm_link_status - connector's link_status property value - * - * This enum is used as the connector's link status property value. - * It is set to the values defined in uapi. - * - * @DRM_LINK_STATUS_GOOD: DP Link is Good as a result of successful - * link training - * @DRM_LINK_STATUS_BAD: DP Link is BAD as a result of link training - * failure - */ -enum drm_link_status { - DRM_LINK_STATUS_GOOD = DRM_MODE_LINK_STATUS_GOOD, - DRM_LINK_STATUS_BAD = DRM_MODE_LINK_STATUS_BAD, -}; - -/** - * enum drm_panel_orientation - panel_orientation info for &drm_display_info - * - * This enum is used to track the (LCD) panel orientation. There are no - * separate #defines for the uapi! - * - * @DRM_MODE_PANEL_ORIENTATION_UNKNOWN: The drm driver has not provided any - * panel orientation information (normal - * for non panels) in this case the "panel - * orientation" connector prop will not be - * attached. - * @DRM_MODE_PANEL_ORIENTATION_NORMAL: The top side of the panel matches the - * top side of the device's casing. - * @DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP: The top side of the panel matches the - * bottom side of the device's casing, iow - * the panel is mounted upside-down. - * @DRM_MODE_PANEL_ORIENTATION_LEFT_UP: The left side of the panel matches the - * top side of the device's casing. - * @DRM_MODE_PANEL_ORIENTATION_RIGHT_UP: The right side of the panel matches the - * top side of the device's casing. - */ -enum drm_panel_orientation { - DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1, - DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, - DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, - DRM_MODE_PANEL_ORIENTATION_LEFT_UP, - DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, -}; - -/** - * struct drm_monitor_range_info - Panel's Monitor range in EDID for - * &drm_display_info - * - * This struct is used to store a frequency range supported by panel - * as parsed from EDID's detailed monitor range descriptor block. - * - * @min_vfreq: This is the min supported refresh rate in Hz from - * EDID's detailed monitor range. - * @max_vfreq: This is the max supported refresh rate in Hz from - * EDID's detailed monitor range - */ -struct drm_monitor_range_info { - u8 min_vfreq; - u8 max_vfreq; -}; - -/* - * This is a consolidated colorimetry list supported by HDMI and - * DP protocol standard. The respective connectors will register - * a property with the subset of this list (supported by that - * respective protocol). Userspace will set the colorspace through - * a colorspace property which will be created and exposed to - * userspace. - */ - -/* For Default case, driver will set the colorspace */ -#define DRM_MODE_COLORIMETRY_DEFAULT 0 -/* CEA 861 Normal Colorimetry options */ -#define DRM_MODE_COLORIMETRY_NO_DATA 0 -#define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC 1 -#define DRM_MODE_COLORIMETRY_BT709_YCC 2 -/* CEA 861 Extended Colorimetry Options */ -#define DRM_MODE_COLORIMETRY_XVYCC_601 3 -#define DRM_MODE_COLORIMETRY_XVYCC_709 4 -#define DRM_MODE_COLORIMETRY_SYCC_601 5 -#define DRM_MODE_COLORIMETRY_OPYCC_601 6 -#define DRM_MODE_COLORIMETRY_OPRGB 7 -#define DRM_MODE_COLORIMETRY_BT2020_CYCC 8 -#define DRM_MODE_COLORIMETRY_BT2020_RGB 9 -#define DRM_MODE_COLORIMETRY_BT2020_YCC 10 -/* Additional Colorimetry extension added as part of CTA 861.G */ -#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11 -#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12 -/* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */ -#define DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED 13 -#define DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT 14 -#define DRM_MODE_COLORIMETRY_BT601_YCC 15 - -/** - * enum drm_bus_flags - bus_flags info for &drm_display_info - * - * This enum defines signal polarities and clock edge information for signals on - * a bus as bitmask flags. - * - * The clock edge information is conveyed by two sets of symbols, - * DRM_BUS_FLAGS_*_DRIVE_\* and DRM_BUS_FLAGS_*_SAMPLE_\*. When this enum is - * used to describe a bus from the point of view of the transmitter, the - * \*_DRIVE_\* flags should be used. When used from the point of view of the - * receiver, the \*_SAMPLE_\* flags should be used. The \*_DRIVE_\* and - * \*_SAMPLE_\* flags alias each other, with the \*_SAMPLE_POSEDGE and - * \*_SAMPLE_NEGEDGE flags being equal to \*_DRIVE_NEGEDGE and \*_DRIVE_POSEDGE - * respectively. This simplifies code as signals are usually sampled on the - * opposite edge of the driving edge. Transmitters and receivers may however - * need to take other signal timings into account to convert between driving - * and sample edges. - */ -enum drm_bus_flags { - /** - * @DRM_BUS_FLAG_DE_LOW: - * - * The Data Enable signal is active low - */ - DRM_BUS_FLAG_DE_LOW = BIT(0), - - /** - * @DRM_BUS_FLAG_DE_HIGH: - * - * The Data Enable signal is active high - */ - DRM_BUS_FLAG_DE_HIGH = BIT(1), - - /** - * @DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE: - * - * Data is driven on the rising edge of the pixel clock - */ - DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE = BIT(2), - - /** - * @DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE: - * - * Data is driven on the falling edge of the pixel clock - */ - DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE = BIT(3), - - /** - * @DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE: - * - * Data is sampled on the rising edge of the pixel clock - */ - DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, - - /** - * @DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE: - * - * Data is sampled on the falling edge of the pixel clock - */ - DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, - - /** - * @DRM_BUS_FLAG_DATA_MSB_TO_LSB: - * - * Data is transmitted MSB to LSB on the bus - */ - DRM_BUS_FLAG_DATA_MSB_TO_LSB = BIT(4), - - /** - * @DRM_BUS_FLAG_DATA_LSB_TO_MSB: - * - * Data is transmitted LSB to MSB on the bus - */ - DRM_BUS_FLAG_DATA_LSB_TO_MSB = BIT(5), - - /** - * @DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE: - * - * Sync signals are driven on the rising edge of the pixel clock - */ - DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE = BIT(6), - - /** - * @DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE: - * - * Sync signals are driven on the falling edge of the pixel clock - */ - DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = BIT(7), - - /** - * @DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE: - * - * Sync signals are sampled on the rising edge of the pixel clock - */ - DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE, - - /** - * @DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE: - * - * Sync signals are sampled on the falling edge of the pixel clock - */ - DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE, - - /** - * @DRM_BUS_FLAG_SHARP_SIGNALS: - * - * Set if the Sharp-specific signals (SPL, CLS, PS, REV) must be used - */ - DRM_BUS_FLAG_SHARP_SIGNALS = BIT(8), }; /** @@ -468,23 +93,33 @@ enum drm_bus_flags { * * Describes a given display (e.g. CRT or flat panel) and its limitations. For * fixed display sinks like built-in panels there's not much difference between - * this and &struct drm_connector. But for sinks with a real cable this + * this and struct &drm_connector. But for sinks with a real cable this * structure is meant to describe all the things at the other end of the cable. * * For sinks which provide an EDID this can be filled out by calling * drm_add_edid_modes(). */ struct drm_display_info { + /** + * @name: Name of the display. + */ + char name[DRM_DISPLAY_INFO_LEN]; + /** * @width_mm: Physical width in mm. */ - unsigned int width_mm; - + unsigned int width_mm; /** * @height_mm: Physical height in mm. */ unsigned int height_mm; + /** + * @pixel_clock: Maximum pixel clock supported by the sink, in units of + * 100Hz. This mismatches the clok in &drm_display_mode (which is in + * kHZ), because that's what the EDID uses as base unit. + */ + unsigned int pixel_clock; /** * @bpc: Maximum bits per color channel. Used by HDMI and DP outputs. */ @@ -498,16 +133,6 @@ struct drm_display_info { #define DRM_COLOR_FORMAT_RGB444 (1<<0) #define DRM_COLOR_FORMAT_YCRCB444 (1<<1) #define DRM_COLOR_FORMAT_YCRCB422 (1<<2) -#define DRM_COLOR_FORMAT_YCRCB420 (1<<3) - - /** - * @panel_orientation: Read only connector property for built-in panels, - * indicating the orientation of the panel vs the device's casing. - * drm_connector_init() sets this to DRM_MODE_PANEL_ORIENTATION_UNKNOWN. - * When not UNKNOWN this gets used by the drm_fb_helpers to rotate the - * fb to compensate and gets exported as prop to userspace. - */ - int panel_orientation; /** * @color_formats: HDMI Color formats, selects between RGB and YCrCb @@ -528,10 +153,16 @@ struct drm_display_info { */ unsigned int num_bus_formats; +#define DRM_BUS_FLAG_DE_LOW (1<<0) +#define DRM_BUS_FLAG_DE_HIGH (1<<1) +/* drive data on pos. edge */ +#define DRM_BUS_FLAG_PIXDATA_POSEDGE (1<<2) +/* drive data on neg. edge */ +#define DRM_BUS_FLAG_PIXDATA_NEGEDGE (1<<3) + /** * @bus_flags: Additional information (like pixel signal polarity) for - * the pixel data on the bus, using &enum drm_bus_flags values - * DRM_BUS_FLAGS\_. + * the pixel data on the bus, using DRM_BUS_FLAGS\_ defines. */ u32 bus_flags; @@ -546,25 +177,6 @@ struct drm_display_info { */ bool dvi_dual; - /** - * @is_hdmi: True if the sink is an HDMI device. - * - * This field shall be used instead of calling - * drm_detect_hdmi_monitor() when possible. - */ - bool is_hdmi; - - /** - * @has_hdmi_infoframe: Does the sink support the HDMI infoframe? - */ - bool has_hdmi_infoframe; - - /** - * @rgb_quant_range_selectable: Does the sink support selecting - * the RGB quantization range? - */ - bool rgb_quant_range_selectable; - /** * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even * more stuff redundant with @bus_formats. @@ -575,84 +187,19 @@ struct drm_display_info { * @cea_rev: CEA revision of the HDMI sink. */ u8 cea_rev; - - /** - * @hdmi: advance features of a HDMI sink. - */ - struct drm_hdmi_info hdmi; - - /** - * @non_desktop: Non desktop display (HMD). - */ - bool non_desktop; - - /** - * @monitor_range: Frequency range supported by monitor range descriptor - */ - struct drm_monitor_range_info monitor_range; }; int drm_display_info_set_bus_formats(struct drm_display_info *info, const u32 *formats, unsigned int num_formats); -/** - * struct drm_connector_tv_margins - TV connector related margins - * - * Describes the margins in pixels to put around the image on TV - * connectors to deal with overscan. - */ -struct drm_connector_tv_margins { - /** - * @bottom: Bottom margin in pixels. - */ - unsigned int bottom; - - /** - * @left: Left margin in pixels. - */ - unsigned int left; - - /** - * @right: Right margin in pixels. - */ - unsigned int right; - - /** - * @top: Top margin in pixels. - */ - unsigned int top; -}; - -/** - * struct drm_tv_connector_state - TV connector related states - * @subconnector: selected subconnector - * @margins: TV margins - * @mode: TV mode - * @brightness: brightness in percent - * @contrast: contrast in percent - * @flicker_reduction: flicker reduction in percent - * @overscan: overscan in percent - * @saturation: saturation in percent - * @hue: hue in percent - */ -struct drm_tv_connector_state { - enum drm_mode_subconnector subconnector; - struct drm_connector_tv_margins margins; - unsigned int mode; - unsigned int brightness; - unsigned int contrast; - unsigned int flicker_reduction; - unsigned int overscan; - unsigned int saturation; - unsigned int hue; -}; - /** * struct drm_connector_state - mutable connector state + * @connector: backpointer to the connector + * @best_encoder: can be used by helpers and drivers to select the encoder + * @state: backpointer to global drm_atomic_state */ struct drm_connector_state { - /** @connector: backpointer to the connector */ struct drm_connector *connector; /** @@ -663,129 +210,9 @@ struct drm_connector_state { */ struct drm_crtc *crtc; - /** - * @best_encoder: - * - * Used by the atomic helpers to select the encoder, through the - * &drm_connector_helper_funcs.atomic_best_encoder or - * &drm_connector_helper_funcs.best_encoder callbacks. - * - * This is also used in the atomic helpers to map encoders to their - * current and previous connectors, see - * drm_atomic_get_old_connector_for_encoder() and - * drm_atomic_get_new_connector_for_encoder(). - * - * NOTE: Atomic drivers must fill this out (either themselves or through - * helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will - * not return correct data to userspace. - */ struct drm_encoder *best_encoder; - /** - * @link_status: Connector link_status to keep track of whether link is - * GOOD or BAD to notify userspace if retraining is necessary. - */ - enum drm_link_status link_status; - - /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; - - /** - * @commit: Tracks the pending commit to prevent use-after-free conditions. - * - * Is only set when @crtc is NULL. - */ - struct drm_crtc_commit *commit; - - /** @tv: TV connector state */ - struct drm_tv_connector_state tv; - - /** - * @self_refresh_aware: - * - * This tracks whether a connector is aware of the self refresh state. - * It should be set to true for those connector implementations which - * understand the self refresh state. This is needed since the crtc - * registers the self refresh helpers and it doesn't know if the - * connectors downstream have implemented self refresh entry/exit. - * - * Drivers should set this to true in atomic_check if they know how to - * handle self_refresh requests. - */ - bool self_refresh_aware; - - /** - * @picture_aspect_ratio: Connector property to control the - * HDMI infoframe aspect ratio setting. - * - * The %DRM_MODE_PICTURE_ASPECT_\* values much match the - * values for &enum hdmi_picture_aspect - */ - enum hdmi_picture_aspect picture_aspect_ratio; - - /** - * @content_type: Connector property to control the - * HDMI infoframe content type setting. - * The %DRM_MODE_CONTENT_TYPE_\* values much - * match the values. - */ - unsigned int content_type; - - /** - * @hdcp_content_type: Connector property to pass the type of - * protected content. This is most commonly used for HDCP. - */ - unsigned int hdcp_content_type; - - /** - * @scaling_mode: Connector property to control the - * upscaling, mostly used for built-in panels. - */ - unsigned int scaling_mode; - - /** - * @content_protection: Connector property to request content - * protection. This is most commonly used for HDCP. - */ - unsigned int content_protection; - - /** - * @colorspace: State variable for Connector property to request - * colorspace change on Sink. This is most commonly used to switch - * to wider color gamuts like BT2020. - */ - u32 colorspace; - - /** - * @writeback_job: Writeback job for writeback connectors - * - * Holds the framebuffer and out-fence for a writeback connector. As - * the writeback completion may be asynchronous to the normal commit - * cycle, the writeback job lifetime is managed separately from the - * normal atomic state by this object. - * - * See also: drm_writeback_queue_job() and - * drm_writeback_signal_completion() - */ - struct drm_writeback_job *writeback_job; - - /** - * @max_requested_bpc: Connector property to limit the maximum bit - * depth of the pixels. - */ - u8 max_requested_bpc; - - /** - * @max_bpc: Connector max_bpc based on the requested max_bpc property - * and the connector bpc limitations obtained from edid. - */ - u8 max_bpc; - - /** - * @hdr_output_metadata: - * DRM blob property for HDR output metadata - */ - struct drm_property_blob *hdr_output_metadata; }; /** @@ -805,8 +232,8 @@ struct drm_connector_funcs { * implement the 4 level DPMS support on the connector any more, but * instead only have an on/off "ACTIVE" property on the CRTC object. * - * This hook is not used by atomic drivers, remapping of the legacy DPMS - * property is entirely handled in the DRM core. + * Drivers implementing atomic modeset should use + * drm_atomic_helper_connector_dpms() to implement this hook. * * RETURNS: * @@ -834,25 +261,12 @@ struct drm_connector_funcs { * connector due to a user request. force can be used by the driver to * avoid expensive, destructive operations during automated probing. * - * This callback is optional, if not implemented the connector will be - * considered as always being attached. - * * FIXME: * * Note that this hook is only called by the probe helper. It's not in * the helper library vtable purely for historical reasons. The only DRM * core entry point to probe connector state is @fill_modes. * - * Note that the helper library will already hold - * &drm_mode_config.connection_mutex. Drivers which need to grab additional - * locks to avoid races with concurrent modeset changes need to use - * &drm_connector_helper_funcs.detect_ctx instead. - * - * Also note that this callback can be called no matter the - * state the connector is in. Drivers that need the underlying - * device to be powered to perform the detection will first need - * to make sure it's been properly enabled. - * * RETURNS: * * drm_connector_status indicating the connector's status. @@ -881,23 +295,24 @@ struct drm_connector_funcs { * * Entry point for output detection and basic mode validation. The * driver should reprobe the output if needed (e.g. when hotplug - * handling is unreliable), add all detected modes to &drm_connector.modes + * handling is unreliable), add all detected modes to connector->modes * and filter out any the device can't support in any configuration. It * also needs to filter out any modes wider or higher than the * parameters max_width and max_height indicate. * * The drivers must also prune any modes no longer valid from - * &drm_connector.modes. Furthermore it must update - * &drm_connector.status and &drm_connector.edid. If no EDID has been - * received for this output connector->edid must be NULL. + * connector->modes. Furthermore it must update connector->status and + * connector->edid. If no EDID has been received for this output + * connector->edid must be NULL. * * Drivers using the probe helpers should use - * drm_helper_probe_single_connector_modes() to implement this + * drm_helper_probe_single_connector_modes() or + * drm_helper_probe_single_connector_modes_nomerge() to implement this * function. * * RETURNS: * - * The number of modes detected and filled into &drm_connector.modes. + * The number of modes detected and filled into connector->modes. */ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); @@ -907,9 +322,11 @@ struct drm_connector_funcs { * This is the legacy entry point to update a property attached to the * connector. * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_connector_set_property() to implement this hook. + * * This callback is optional if the driver does not support any legacy - * driver-private properties. For atomic drivers it is not used because - * property handling is done entirely in the DRM core. + * driver-private properties. * * RETURNS: * @@ -928,8 +345,6 @@ struct drm_connector_funcs { * core drm connector interfaces. Everything added from this callback * should be unregistered in the early_unregister callback. * - * This is called while holding &drm_connector.mutex. - * * Returns: * * 0 on success, or a negative error code on failure. @@ -944,8 +359,6 @@ struct drm_connector_funcs { * late_register(). It is called from drm_connector_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. - * - * This is called while holding &drm_connector.mutex. */ void (*early_unregister)(struct drm_connector *connector); @@ -965,19 +378,17 @@ struct drm_connector_funcs { * Duplicate the current atomic state for this connector and return it. * The core and helpers guarantee that any atomic state duplicated with * this hook and still owned by the caller (i.e. not transferred to the - * driver by calling &drm_mode_config_funcs.atomic_commit) will be - * cleaned up by calling the @atomic_destroy_state hook in this - * structure. + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. * - * This callback is mandatory for atomic drivers. - * - * Atomic drivers which don't subclass &struct drm_connector_state should use + * Atomic drivers which don't subclass struct &drm_connector_state should use * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the * state structure to extend it with driver-private state should use * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is * duplicated in a consistent fashion across drivers. * - * It is an error to call this hook before &drm_connector.state has been + * It is an error to call this hook before connector->state has been * initialized correctly. * * NOTE: @@ -997,8 +408,6 @@ struct drm_connector_funcs { * * Destroy a state duplicated with @atomic_duplicate_state and release * or unreference all resources it references - * - * This callback is mandatory for atomic drivers. */ void (*atomic_destroy_state)(struct drm_connector *connector, struct drm_connector_state *state); @@ -1072,149 +481,68 @@ struct drm_connector_funcs { const struct drm_connector_state *state, struct drm_property *property, uint64_t *val); - - /** - * @atomic_print_state: - * - * If driver subclasses &struct drm_connector_state, it should implement - * this optional hook for printing additional driver specific state. - * - * Do not call this directly, use drm_atomic_connector_print_state() - * instead. - */ - void (*atomic_print_state)(struct drm_printer *p, - const struct drm_connector_state *state); }; -/** - * struct drm_cmdline_mode - DRM Mode passed through the kernel command-line - * - * Each connector can have an initial mode with additional options - * passed through the kernel command line. This structure allows to - * express those parameters and will be filled by the command-line - * parser. - */ +/* mode specified on the command line */ struct drm_cmdline_mode { - /** - * @name: - * - * Name of the mode. - */ - char name[DRM_DISPLAY_MODE_LEN]; - - /** - * @specified: - * - * Has a mode been read from the command-line? - */ bool specified; - - /** - * @refresh_specified: - * - * Did the mode have a preferred refresh rate? - */ bool refresh_specified; - - /** - * @bpp_specified: - * - * Did the mode have a preferred BPP? - */ bool bpp_specified; - - /** - * @xres: - * - * Active resolution on the X axis, in pixels. - */ - int xres; - - /** - * @yres: - * - * Active resolution on the Y axis, in pixels. - */ - int yres; - - /** - * @bpp: - * - * Bits per pixels for the mode. - */ + int xres, yres; int bpp; - - /** - * @refresh: - * - * Refresh rate, in Hertz. - */ int refresh; - - /** - * @rb: - * - * Do we need to use reduced blanking? - */ bool rb; - - /** - * @interlace: - * - * The mode is interlaced. - */ bool interlace; - - /** - * @cvt: - * - * The timings will be calculated using the VESA Coordinated - * Video Timings instead of looking up the mode from a table. - */ bool cvt; - - /** - * @margins: - * - * Add margins to the mode calculation (1.8% of xres rounded - * down to 8 pixels and 1.8% of yres). - */ bool margins; - - /** - * @force: - * - * Ignore the hotplug state of the connector, and force its - * state to one of the DRM_FORCE_* values. - */ enum drm_connector_force force; - - /** - * @rotation_reflection: - * - * Initial rotation and reflection of the mode setup from the - * command line. See DRM_MODE_ROTATE_* and - * DRM_MODE_REFLECT_*. The only rotations supported are - * DRM_MODE_ROTATE_0 and DRM_MODE_ROTATE_180. - */ - unsigned int rotation_reflection; - - /** - * @panel_orientation: - * - * drm-connector "panel orientation" property override value, - * DRM_MODE_PANEL_ORIENTATION_UNKNOWN if not set. - */ - enum drm_panel_orientation panel_orientation; - - /** - * @tv_margins: TV margins to apply to the mode. - */ - struct drm_connector_tv_margins tv_margins; }; /** * struct drm_connector - central DRM connector control structure + * @dev: parent DRM device + * @kdev: kernel device for sysfs attributes + * @attr: sysfs attributes + * @head: list management + * @base: base KMS object + * @name: human readable name, can be overwritten by the driver + * @connector_type: one of the DRM_MODE_CONNECTOR_ types from drm_mode.h + * @connector_type_id: index into connector type enum + * @interlace_allowed: can this connector handle interlaced modes? + * @doublescan_allowed: can this connector handle doublescan? + * @stereo_allowed: can this connector handle stereo modes? + * @registered: is this connector exposed (registered) with userspace? + * @modes: modes available on this connector (from fill_modes() + user) + * @status: one of the drm_connector_status enums (connected, not, or unknown) + * @probed_modes: list of modes derived directly from the display + * @funcs: connector control functions + * @edid_blob_ptr: DRM property containing EDID if present + * @properties: property tracking for this connector + * @dpms: current dpms state + * @helper_private: mid-layer private data + * @cmdline_mode: mode line parsed from the kernel cmdline for this connector + * @force: a DRM_FORCE_ state for forced mode sets + * @override_edid: has the EDID been overwritten through debugfs for testing? + * @encoder_ids: valid encoders for this connector + * @encoder: encoder driving this connector, if any + * @eld: EDID-like data, if present + * @latency_present: AV delay info from ELD, if found + * @video_latency: video latency info from ELD, if found + * @audio_latency: audio latency info from ELD, if found + * @null_edid_counter: track sinks that give us all zeros for the EDID + * @bad_edid_counter: track sinks that give us an EDID with invalid checksum + * @edid_corrupt: indicates whether the last read EDID was corrupt + * @debugfs_entry: debugfs directory for this connector + * @state: current atomic state for this connector + * @has_tile: is this connector connected to a tiled monitor + * @tile_group: tile group for the connected monitor + * @tile_is_single_monitor: whether the tile is one monitor housing + * @num_h_tile: number of horizontal tiles in the tile group + * @num_v_tile: number of vertical tiles in the tile group + * @tile_h_loc: horizontal location of this tile + * @tile_v_loc: vertical location of this tile + * @tile_h_size: horizontal size of this tile. + * @tile_v_size: vertical size of this tile. * * Each connector may be connected to one or more CRTCs, or may be clonable by * another connector if they can share a CRTC. Each connector also has a specific @@ -1222,36 +550,15 @@ struct drm_cmdline_mode { * span multiple monitors). */ struct drm_connector { - /** @dev: parent DRM device */ struct drm_device *dev; - /** @kdev: kernel device for sysfs attributes */ struct device *kdev; - /** @attr: sysfs attributes */ struct device_attribute *attr; - - /** - * @head: - * - * List of all connectors on a @dev, linked from - * &drm_mode_config.connector_list. Protected by - * &drm_mode_config.connector_list_lock, but please only use - * &drm_connector_list_iter to walk this list. - */ struct list_head head; - /** @base: base KMS object */ struct drm_mode_object base; - /** @name: human readable name, can be overwritten by the driver */ char *name; - /** - * @mutex: Lock for general connector state, but currently only protects - * @registered. Most of the connector state is still protected by - * &drm_mode_config.mutex. - */ - struct mutex mutex; - /** * @index: Compacted connector index, which matches the position inside * the mode_config.list for drivers not supporting hot-add/removing. Can @@ -1260,133 +567,58 @@ struct drm_connector { */ unsigned index; - /** - * @connector_type: - * one of the DRM_MODE_CONNECTOR_ types from drm_mode.h - */ int connector_type; - /** @connector_type_id: index into connector type enum */ int connector_type_id; - /** - * @interlace_allowed: - * Can this connector handle interlaced modes? Only used by - * drm_helper_probe_single_connector_modes() for mode filtering. - */ bool interlace_allowed; - /** - * @doublescan_allowed: - * Can this connector handle doublescan? Only used by - * drm_helper_probe_single_connector_modes() for mode filtering. - */ bool doublescan_allowed; - /** - * @stereo_allowed: - * Can this connector handle stereo modes? Only used by - * drm_helper_probe_single_connector_modes() for mode filtering. - */ bool stereo_allowed; + bool registered; + struct list_head modes; /* list of modes on this connector */ - /** - * @ycbcr_420_allowed : This bool indicates if this connector is - * capable of handling YCBCR 420 output. While parsing the EDID - * blocks it's very helpful to know if the source is capable of - * handling YCBCR 420 outputs. - */ - bool ycbcr_420_allowed; - - /** - * @registration_state: Is this connector initializing, exposed - * (registered) with userspace, or unregistered? - * - * Protected by @mutex. - */ - enum drm_connector_registration_state registration_state; - - /** - * @modes: - * Modes available on this connector (from fill_modes() + user). - * Protected by &drm_mode_config.mutex. - */ - struct list_head modes; - - /** - * @status: - * One of the drm_connector_status enums (connected, not, or unknown). - * Protected by &drm_mode_config.mutex. - */ enum drm_connector_status status; - /** - * @probed_modes: - * These are modes added by probing with DDC or the BIOS, before - * filtering is applied. Used by the probe helpers. Protected by - * &drm_mode_config.mutex. - */ + /* these are modes added by probing with DDC or the BIOS */ struct list_head probed_modes; /** * @display_info: Display information is filled from EDID information * when a display is detected. For non hot-pluggable displays such as * flat panels in embedded systems, the driver should initialize the - * &drm_display_info.width_mm and &drm_display_info.height_mm fields - * with the physical size of the display. - * - * Protected by &drm_mode_config.mutex. + * display_info.width_mm and display_info.height_mm fields with the + * physical size of the display. */ struct drm_display_info display_info; - - /** @funcs: connector control functions */ const struct drm_connector_funcs *funcs; - /** - * @edid_blob_ptr: DRM property containing EDID if present. Protected by - * &drm_mode_config.mutex. This should be updated only by calling - * drm_connector_update_edid_property(). - */ struct drm_property_blob *edid_blob_ptr; - - /** @properties: property tracking for this connector */ struct drm_object_properties properties; - /** - * @scaling_mode_property: Optional atomic property to control the - * upscaling. See drm_connector_attach_content_protection_property(). - */ - struct drm_property *scaling_mode_property; - - /** - * @vrr_capable_property: Optional property to help userspace - * query hardware support for variable refresh rate on a connector. - * connector. Drivers can add the property to a connector by - * calling drm_connector_attach_vrr_capable_property(). - * - * This should be updated only by calling - * drm_connector_set_vrr_capable_property(). - */ - struct drm_property *vrr_capable_property; - - /** - * @colorspace_property: Connector property to set the suitable - * colorspace supported by the sink. - */ - struct drm_property *colorspace_property; - /** * @path_blob_ptr: * - * DRM blob property data for the DP MST path property. This should only - * be updated by calling drm_connector_set_path_property(). + * DRM blob property data for the DP MST path property. */ struct drm_property_blob *path_blob_ptr; /** - * @max_bpc_property: Default connector property for the max bpc to be - * driven out of the connector. + * @tile_blob_ptr: + * + * DRM blob property data for the tile property (used mostly by DP MST). + * This is meant for screens which are driven through separate display + * pipelines represented by &drm_crtc, which might not be running with + * genlocked clocks. For tiled panels which are genlocked, like + * dual-link LVDS or dual-link DSI, the driver should try to not expose + * the tiling and virtualize both &drm_crtc and &drm_plane if needed. */ - struct drm_property *max_bpc_property; + struct drm_property_blob *tile_blob_ptr; +/* should we poll this connector for connects and disconnects */ +/* hot plug detectable */ #define DRM_CONNECTOR_POLL_HPD (1 << 0) +/* poll for connections */ #define DRM_CONNECTOR_POLL_CONNECT (1 << 1) +/* can cleanly poll for disconnections without flickering the screen */ +/* DACs should rarely do this without a lot of testing */ #define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) /** @@ -1403,163 +635,53 @@ struct drm_connector { * Periodically poll the connector for connection. * * DRM_CONNECTOR_POLL_DISCONNECT - * Periodically poll the connector for disconnection, without - * causing flickering even when the connector is in use. DACs should - * rarely do this without a lot of testing. + * Periodically poll the connector for disconnection. * * Set to 0 for connectors that don't support connection status * discovery. */ uint8_t polled; - /** - * @dpms: Current dpms state. For legacy drivers the - * &drm_connector_funcs.dpms callback must update this. For atomic - * drivers, this is handled by the core atomic code, and drivers must - * only take &drm_crtc_state.active into account. - */ + /* requested DPMS state */ int dpms; - /** @helper_private: mid-layer private data */ const struct drm_connector_helper_funcs *helper_private; - /** @cmdline_mode: mode line parsed from the kernel cmdline for this connector */ + /* forced on connector */ struct drm_cmdline_mode cmdline_mode; - /** @force: a DRM_FORCE_ state for forced mode sets */ enum drm_connector_force force; - /** @override_edid: has the EDID been overwritten through debugfs for testing? */ bool override_edid; - /** @epoch_counter: used to detect any other changes in connector, besides status */ - u64 epoch_counter; - /** - * @possible_encoders: Bit mask of encoders that can drive this - * connector, drm_encoder_index() determines the index into the bitfield - * and the bits are set with drm_connector_attach_encoder(). - */ - u32 possible_encoders; - - /** - * @encoder: Currently bound encoder driving this connector, if any. - * Only really meaningful for non-atomic drivers. Atomic drivers should - * instead look at &drm_connector_state.best_encoder, and in case they - * need the CRTC driving this output, &drm_connector_state.crtc. - */ - struct drm_encoder *encoder; +#define DRM_CONNECTOR_MAX_ENCODER 3 + uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; + struct drm_encoder *encoder; /* currently active encoder */ #define MAX_ELD_BYTES 128 - /** @eld: EDID-like data, if present */ + /* EDID bits */ uint8_t eld[MAX_ELD_BYTES]; - /** @latency_present: AV delay info from ELD, if found */ bool latency_present[2]; - /** - * @video_latency: Video latency info from ELD, if found. - * [0]: progressive, [1]: interlaced - */ - int video_latency[2]; - /** - * @audio_latency: audio latency info from ELD, if found - * [0]: progressive, [1]: interlaced - */ + int video_latency[2]; /* [0]: progressive, [1]: interlaced */ int audio_latency[2]; - - /** - * @ddc: associated ddc adapter. - * A connector usually has its associated ddc adapter. If a driver uses - * this field, then an appropriate symbolic link is created in connector - * sysfs directory to make it easy for the user to tell which i2c - * adapter is for a particular display. - * - * The field should be set by calling drm_connector_init_with_ddc(). - */ - struct i2c_adapter *ddc; - - /** - * @null_edid_counter: track sinks that give us all zeros for the EDID. - * Needed to workaround some HW bugs where we get all 0s - */ - int null_edid_counter; - - /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */ + int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */ unsigned bad_edid_counter; - /** - * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used - * in Displayport compliance testing - Displayport Link CTS Core 1.2 - * rev1.1 4.2.2.6 + /* Flag for raw EDID header corruption - used in Displayport + * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6 */ bool edid_corrupt; - /** - * @real_edid_checksum: real edid checksum for corrupted edid block. - * Required in Displayport 1.4 compliance testing - * rev1.1 4.2.2.6 - */ - u8 real_edid_checksum; - /** @debugfs_entry: debugfs directory for this connector */ struct dentry *debugfs_entry; - /** - * @state: - * - * Current atomic state for this connector. - * - * This is protected by &drm_mode_config.connection_mutex. Note that - * nonblocking atomic commits access the current connector state without - * taking locks. Either by going through the &struct drm_atomic_state - * pointers, see for_each_oldnew_connector_in_state(), - * for_each_old_connector_in_state() and - * for_each_new_connector_in_state(). Or through careful ordering of - * atomic commit operations as implemented in the atomic helpers, see - * &struct drm_crtc_commit. - */ struct drm_connector_state *state; - /* DisplayID bits. FIXME: Extract into a substruct? */ - - /** - * @tile_blob_ptr: - * - * DRM blob property data for the tile property (used mostly by DP MST). - * This is meant for screens which are driven through separate display - * pipelines represented by &drm_crtc, which might not be running with - * genlocked clocks. For tiled panels which are genlocked, like - * dual-link LVDS or dual-link DSI, the driver should try to not expose - * the tiling and virtualize both &drm_crtc and &drm_plane if needed. - * - * This should only be updated by calling - * drm_connector_set_tile_property(). - */ - struct drm_property_blob *tile_blob_ptr; - - /** @has_tile: is this connector connected to a tiled monitor */ + /* DisplayID bits */ bool has_tile; - /** @tile_group: tile group for the connected monitor */ struct drm_tile_group *tile_group; - /** @tile_is_single_monitor: whether the tile is one monitor housing */ bool tile_is_single_monitor; - /** @num_h_tile: number of horizontal tiles in the tile group */ - /** @num_v_tile: number of vertical tiles in the tile group */ uint8_t num_h_tile, num_v_tile; - /** @tile_h_loc: horizontal location of this tile */ - /** @tile_v_loc: vertical location of this tile */ uint8_t tile_h_loc, tile_v_loc; - /** @tile_h_size: horizontal size of this tile. */ - /** @tile_v_size: vertical size of this tile. */ uint16_t tile_h_size, tile_v_size; - - /** - * @free_node: - * - * List used only by &drm_connector_list_iter to be able to clean up a - * connector from any context, in conjunction with - * &drm_mode_config.connector_free_work. - */ - struct llist_node free_node; - - /** @hdr_sink_metadata: HDR Metadata Information read from sink */ - struct hdr_sink_metadata hdr_sink_metadata; }; #define obj_to_connector(x) container_of(x, struct drm_connector, base) @@ -1568,89 +690,55 @@ int drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type); -int drm_connector_init_with_ddc(struct drm_device *dev, - struct drm_connector *connector, - const struct drm_connector_funcs *funcs, - int connector_type, - struct i2c_adapter *ddc); -void drm_connector_attach_edid_property(struct drm_connector *connector); int drm_connector_register(struct drm_connector *connector); void drm_connector_unregister(struct drm_connector *connector); -int drm_connector_attach_encoder(struct drm_connector *connector, +int drm_mode_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder); void drm_connector_cleanup(struct drm_connector *connector); - -static inline unsigned int drm_connector_index(const struct drm_connector *connector) +static inline unsigned drm_connector_index(struct drm_connector *connector) { return connector->index; } -static inline u32 drm_connector_mask(const struct drm_connector *connector) -{ - return 1 << connector->index; -} - /** * drm_connector_lookup - lookup connector object * @dev: DRM device - * @file_priv: drm file to check for lease against. * @id: connector object id * * This function looks up the connector object specified by id * add takes a reference to it. */ static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev, - struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; - mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CONNECTOR); + mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR); return mo ? obj_to_connector(mo) : NULL; } /** - * drm_connector_get - acquire a connector reference - * @connector: DRM connector + * drm_connector_reference - incr the connector refcnt + * @connector: connector * * This function increments the connector's refcount. */ -static inline void drm_connector_get(struct drm_connector *connector) +static inline void drm_connector_reference(struct drm_connector *connector) { - drm_mode_object_get(&connector->base); + drm_mode_object_reference(&connector->base); } /** - * drm_connector_put - release a connector reference - * @connector: DRM connector + * drm_connector_unreference - unref a connector + * @connector: connector to unref * - * This function decrements the connector's reference count and frees the - * object if the reference count drops to zero. + * This function decrements the connector's refcount and frees it if it drops to zero. */ -static inline void drm_connector_put(struct drm_connector *connector) +static inline void drm_connector_unreference(struct drm_connector *connector) { - drm_mode_object_put(&connector->base); + drm_mode_object_unreference(&connector->base); } -/** - * drm_connector_is_unregistered - has the connector been unregistered from - * userspace? - * @connector: DRM connector - * - * Checks whether or not @connector has been unregistered from userspace. - * - * Returns: - * True if the connector was unregistered, false if the connector is - * registered or has not yet been registered with userspace. - */ -static inline bool -drm_connector_is_unregistered(struct drm_connector *connector) -{ - return READ_ONCE(connector->registration_state) == - DRM_CONNECTOR_UNREGISTERED; -} - -const char *drm_get_connector_type_name(unsigned int connector_type); const char *drm_get_connector_status_name(enum drm_connector_status status); const char *drm_get_subpixel_order_name(enum subpixel_order order); const char *drm_get_dpms_name(int val); @@ -1658,128 +746,33 @@ const char *drm_get_dvi_i_subconnector_name(int val); const char *drm_get_dvi_i_select_name(int val); const char *drm_get_tv_subconnector_name(int val); const char *drm_get_tv_select_name(int val); -const char *drm_get_dp_subconnector_name(int val); -const char *drm_get_content_protection_name(int val); -const char *drm_get_hdcp_content_type_name(int val); int drm_mode_create_dvi_i_properties(struct drm_device *dev); -void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector); - -int drm_mode_create_tv_margin_properties(struct drm_device *dev); int drm_mode_create_tv_properties(struct drm_device *dev, unsigned int num_modes, const char * const modes[]); -void drm_connector_attach_tv_margin_properties(struct drm_connector *conn); int drm_mode_create_scaling_mode_property(struct drm_device *dev); -int drm_connector_attach_content_type_property(struct drm_connector *dev); -int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, - u32 scaling_mode_mask); -int drm_connector_attach_vrr_capable_property( - struct drm_connector *connector); -int drm_connector_attach_colorspace_property(struct drm_connector *connector); -int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector); -bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state, - struct drm_connector_state *new_state); int drm_mode_create_aspect_ratio_property(struct drm_device *dev); -int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector); -int drm_mode_create_dp_colorspace_property(struct drm_connector *connector); -int drm_mode_create_content_type_property(struct drm_device *dev); -void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, - const struct drm_connector_state *conn_state); - int drm_mode_create_suggested_offset_properties(struct drm_device *dev); -int drm_connector_set_path_property(struct drm_connector *connector, - const char *path); -int drm_connector_set_tile_property(struct drm_connector *connector); -int drm_connector_update_edid_property(struct drm_connector *connector, - const struct edid *edid); -void drm_connector_set_link_status_property(struct drm_connector *connector, - uint64_t link_status); -void drm_connector_set_vrr_capable_property( - struct drm_connector *connector, bool capable); -int drm_connector_set_panel_orientation( - struct drm_connector *connector, - enum drm_panel_orientation panel_orientation); -int drm_connector_set_panel_orientation_with_quirk( - struct drm_connector *connector, - enum drm_panel_orientation panel_orientation, - int width, int height); -int drm_connector_attach_max_bpc_property(struct drm_connector *connector, - int min, int max); +int drm_mode_connector_set_path_property(struct drm_connector *connector, + const char *path); +int drm_mode_connector_set_tile_property(struct drm_connector *connector); +int drm_mode_connector_update_edid_property(struct drm_connector *connector, + const struct edid *edid); /** - * struct drm_tile_group - Tile group metadata - * @refcount: reference count - * @dev: DRM device - * @id: tile group id exposed to userspace - * @group_data: Sink-private data identifying this group + * drm_for_each_connector - iterate over all connectors + * @connector: the loop cursor + * @dev: the DRM device * - * @group_data corresponds to displayid vend/prod/serial for external screens - * with an EDID. + * Iterate over all connectors of @dev. */ -struct drm_tile_group { - struct kref refcount; - struct drm_device *dev; - int id; - u8 group_data[8]; -}; - -struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, - const char topology[8]); -struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, - const char topology[8]); -void drm_mode_put_tile_group(struct drm_device *dev, - struct drm_tile_group *tg); - -/** - * struct drm_connector_list_iter - connector_list iterator - * - * This iterator tracks state needed to be able to walk the connector_list - * within struct drm_mode_config. Only use together with - * drm_connector_list_iter_begin(), drm_connector_list_iter_end() and - * drm_connector_list_iter_next() respectively the convenience macro - * drm_for_each_connector_iter(). - * - * Note that the return value of drm_connector_list_iter_next() is only valid - * up to the next drm_connector_list_iter_next() or - * drm_connector_list_iter_end() call. If you want to use the connector later, - * then you need to grab your own reference first using drm_connector_get(). - */ -struct drm_connector_list_iter { -/* private: */ - struct drm_device *dev; - struct drm_connector *conn; -}; - -void drm_connector_list_iter_begin(struct drm_device *dev, - struct drm_connector_list_iter *iter); -struct drm_connector * -drm_connector_list_iter_next(struct drm_connector_list_iter *iter); -void drm_connector_list_iter_end(struct drm_connector_list_iter *iter); - -bool drm_connector_has_possible_encoder(struct drm_connector *connector, - struct drm_encoder *encoder); - -/** - * drm_for_each_connector_iter - connector_list iterator macro - * @connector: &struct drm_connector pointer used as cursor - * @iter: &struct drm_connector_list_iter - * - * Note that @connector is only valid within the list body, if you want to use - * @connector after calling drm_connector_list_iter_end() then you need to grab - * your own reference first using drm_connector_get(). - */ -#define drm_for_each_connector_iter(connector, iter) \ - while ((connector = drm_connector_list_iter_next(iter))) - -/** - * drm_connector_for_each_possible_encoder - iterate connector's possible encoders - * @connector: &struct drm_connector pointer - * @encoder: &struct drm_encoder pointer used as cursor - */ -#define drm_connector_for_each_possible_encoder(connector, encoder) \ - drm_for_each_encoder_mask(encoder, (connector)->dev, \ - (connector)->possible_encoders) +#define drm_for_each_connector(connector, dev) \ + for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \ + connector = list_first_entry(&(dev)->mode_config.connector_list, \ + struct drm_connector, head); \ + &connector->head != (&(dev)->mode_config.connector_list); \ + connector = list_next_entry(connector, head)) #endif diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 13eeba2a75..0aa2925265 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -39,23 +40,20 @@ #include #include #include -#include +#include #include +#include #include #include #include #include -#include -#include struct drm_device; struct drm_mode_set; struct drm_file; struct drm_clip_rect; -struct drm_printer; -struct drm_self_refresh_data; struct device_node; -struct dma_fence; +struct fence; struct edid; static inline int64_t U642I64(uint64_t val) @@ -67,270 +65,98 @@ static inline uint64_t I642U64(int64_t val) return (uint64_t)*((uint64_t *)&val); } +/* data corresponds to displayid vend/prod/serial */ +struct drm_tile_group { + struct kref refcount; + struct drm_device *dev; + int id; + u8 group_data[8]; +}; + struct drm_crtc; +struct drm_encoder; struct drm_pending_vblank_event; struct drm_plane; struct drm_bridge; struct drm_atomic_state; struct drm_crtc_helper_funcs; +struct drm_encoder_helper_funcs; struct drm_plane_helper_funcs; /** * struct drm_crtc_state - mutable CRTC state + * @crtc: backpointer to the CRTC + * @enable: whether the CRTC should be enabled, gates all other state + * @active: whether the CRTC is actively displaying (used for DPMS) + * @planes_changed: planes on this crtc are updated + * @mode_changed: crtc_state->mode or crtc_state->enable has been changed + * @active_changed: crtc_state->active has been toggled. + * @connectors_changed: connectors to this crtc have been updated + * @zpos_changed: zpos values of planes on this crtc have been updated + * @color_mgmt_changed: color management properties have changed (degamma or + * gamma LUT or CSC matrix) + * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes + * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors + * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders + * @last_vblank_count: for helpers and drivers to capture the vblank of the + * update to ensure framebuffer cleanup isn't done too early + * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings + * @mode: current mode timings + * @mode_blob: &drm_property_blob for @mode + * @degamma_lut: Lookup table for converting framebuffer pixel data + * before apply the conversion matrix + * @ctm: Transformation matrix + * @gamma_lut: Lookup table for converting pixel data after the + * conversion matrix + * @state: backpointer to global drm_atomic_state * - * Note that the distinction between @enable and @active is rather subtle: + * Note that the distinction between @enable and @active is rather subtile: * Flipping @active while @enable is set without changing anything else may - * never return in a failure from the &drm_mode_config_funcs.atomic_check - * callback. Userspace assumes that a DPMS On will always succeed. In other - * words: @enable controls resource assignment, @active controls the actual - * hardware state. - * - * The three booleans active_changed, connectors_changed and mode_changed are - * intended to indicate whether a full modeset is needed, rather than strictly - * describing what has changed in a commit. See also: - * drm_atomic_crtc_needs_modeset() - * - * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or - * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control - * state like @plane_mask so drivers not converted over to atomic helpers should - * not rely on these being accurate! + * never return in a failure from the ->atomic_check callback. Userspace assumes + * that a DPMS On will always succeed. In other words: @enable controls resource + * assignment, @active controls the actual hardware state. */ struct drm_crtc_state { - /** @crtc: backpointer to the CRTC */ struct drm_crtc *crtc; - /** - * @enable: Whether the CRTC should be enabled, gates all other state. - * This controls reservations of shared resources. Actual hardware state - * is controlled by @active. - */ bool enable; - - /** - * @active: Whether the CRTC is actively displaying (used for DPMS). - * Implies that @enable is set. The driver must not release any shared - * resources if @active is set to false but @enable still true, because - * userspace expects that a DPMS ON always succeeds. - * - * Hence drivers must not consult @active in their various - * &drm_mode_config_funcs.atomic_check callback to reject an atomic - * commit. They can consult it to aid in the computation of derived - * hardware state, since even in the DPMS OFF state the display hardware - * should be as much powered down as when the CRTC is completely - * disabled through setting @enable to false. - */ bool active; - /** - * @planes_changed: Planes on this crtc are updated. Used by the atomic - * helpers and drivers to steer the atomic commit control flow. - */ + /* computed state bits used by helpers and drivers */ bool planes_changed : 1; - - /** - * @mode_changed: @mode or @enable has been changed. Used by the atomic - * helpers and drivers to steer the atomic commit control flow. See also - * drm_atomic_crtc_needs_modeset(). - * - * Drivers are supposed to set this for any CRTC state changes that - * require a full modeset. They can also reset it to false if e.g. a - * @mode change can be done without a full modeset by only changing - * scaler settings. - */ bool mode_changed : 1; - - /** - * @active_changed: @active has been toggled. Used by the atomic - * helpers and drivers to steer the atomic commit control flow. See also - * drm_atomic_crtc_needs_modeset(). - */ bool active_changed : 1; - - /** - * @connectors_changed: Connectors to this crtc have been updated, - * either in their state or routing. Used by the atomic - * helpers and drivers to steer the atomic commit control flow. See also - * drm_atomic_crtc_needs_modeset(). - * - * Drivers are supposed to set this as-needed from their own atomic - * check code, e.g. from &drm_encoder_helper_funcs.atomic_check - */ bool connectors_changed : 1; - /** - * @zpos_changed: zpos values of planes on this crtc have been updated. - * Used by the atomic helpers and drivers to steer the atomic commit - * control flow. - */ bool zpos_changed : 1; - /** - * @color_mgmt_changed: Color management properties have changed - * (@gamma_lut, @degamma_lut or @ctm). Used by the atomic helpers and - * drivers to steer the atomic commit control flow. - */ bool color_mgmt_changed : 1; - /** - * @no_vblank: - * - * Reflects the ability of a CRTC to send VBLANK events. This state - * usually depends on the pipeline configuration. If set to true, DRM - * atomic helpers will send out a fake VBLANK event during display - * updates after all hardware changes have been committed. This is - * implemented in drm_atomic_helper_fake_vblank(). - * - * One usage is for drivers and/or hardware without support for VBLANK - * interrupts. Such drivers typically do not initialize vblanking - * (i.e., call drm_vblank_init() with the number of CRTCs). For CRTCs - * without initialized vblanking, this field is set to true in - * drm_atomic_helper_check_modeset(), and a fake VBLANK event will be - * send out on each update of the display pipeline by - * drm_atomic_helper_fake_vblank(). - * - * Another usage is CRTCs feeding a writeback connector operating in - * oneshot mode. In this case the fake VBLANK event is only generated - * when a job is queued to the writeback connector, and we want the - * core to fake VBLANK events when this part of the pipeline hasn't - * changed but others had or when the CRTC and connectors are being - * disabled. - * - * __drm_atomic_helper_crtc_duplicate_state() will not reset the value - * from the current state, the CRTC driver is then responsible for - * updating this field when needed. - * - * Note that the combination of &drm_crtc_state.event == NULL and - * &drm_crtc_state.no_blank == true is valid and usually used when the - * writeback connector attached to the CRTC has a new job queued. In - * this case the driver will send the VBLANK event on its own when the - * writeback job is complete. - */ - bool no_vblank : 1; - - /** - * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to - * this CRTC. + /* attached planes bitmask: + * WARNING: transitional helpers do not maintain plane_mask so + * drivers not converted over to atomic helpers should not rely + * on plane_mask being accurate! */ u32 plane_mask; - /** - * @connector_mask: Bitmask of drm_connector_mask(connector) of - * connectors attached to this CRTC. - */ u32 connector_mask; - - /** - * @encoder_mask: Bitmask of drm_encoder_mask(encoder) of encoders - * attached to this CRTC. - */ u32 encoder_mask; - /** - * @adjusted_mode: - * - * Internal display timings which can be used by the driver to handle - * differences between the mode requested by userspace in @mode and what - * is actually programmed into the hardware. - * - * For drivers using &drm_bridge, this stores hardware display timings - * used between the CRTC and the first bridge. For other drivers, the - * meaning of the adjusted_mode field is purely driver implementation - * defined information, and will usually be used to store the hardware - * display timings used between the CRTC and encoder blocks. - */ + /* last_vblank_count: for vblank waits before cleanup */ + u32 last_vblank_count; + + /* adjusted_mode: for use by helpers and drivers */ struct drm_display_mode adjusted_mode; - /** - * @mode: - * - * Display timings requested by userspace. The driver should try to - * match the refresh rate as close as possible (but note that it's - * undefined what exactly is close enough, e.g. some of the HDMI modes - * only differ in less than 1% of the refresh rate). The active width - * and height as observed by userspace for positioning planes must match - * exactly. - * - * For external connectors where the sink isn't fixed (like with a - * built-in panel), this mode here should match the physical mode on the - * wire to the last details (i.e. including sync polarities and - * everything). - */ struct drm_display_mode mode; - /** - * @mode_blob: &drm_property_blob for @mode, for exposing the mode to - * atomic userspace. - */ + /* blob property to expose current mode to atomic userspace */ struct drm_property_blob *mode_blob; - /** - * @degamma_lut: - * - * Lookup table for converting framebuffer pixel data before apply the - * color conversion matrix @ctm. See drm_crtc_enable_color_mgmt(). The - * blob (if not NULL) is an array of &struct drm_color_lut. - */ + /* blob property to expose color management to userspace */ struct drm_property_blob *degamma_lut; - - /** - * @ctm: - * - * Color transformation matrix. See drm_crtc_enable_color_mgmt(). The - * blob (if not NULL) is a &struct drm_color_ctm. - */ struct drm_property_blob *ctm; - - /** - * @gamma_lut: - * - * Lookup table for converting pixel data after the color conversion - * matrix @ctm. See drm_crtc_enable_color_mgmt(). The blob (if not - * NULL) is an array of &struct drm_color_lut. - */ struct drm_property_blob *gamma_lut; - /** - * @target_vblank: - * - * Target vertical blank period when a page flip - * should take effect. - */ - u32 target_vblank; - - /** - * @async_flip: - * - * This is set when DRM_MODE_PAGE_FLIP_ASYNC is set in the legacy - * PAGE_FLIP IOCTL. It's not wired up for the atomic IOCTL itself yet. - */ - bool async_flip; - - /** - * @vrr_enabled: - * - * Indicates if variable refresh rate should be enabled for the CRTC. - * Support for the requested vrr state will depend on driver and - * hardware capabiltiy - lacking support is not treated as failure. - */ - bool vrr_enabled; - - /** - * @self_refresh_active: - * - * Used by the self refresh helpers to denote when a self refresh - * transition is occurring. This will be set on enable/disable callbacks - * when self refresh is being enabled or disabled. In some cases, it may - * not be desirable to fully shut off the crtc during self refresh. - * CRTC's can inspect this flag and determine the best course of action. - */ - bool self_refresh_active; - - /** - * @scaling_filter: - * - * Scaling filter to be applied - */ - enum drm_scaling_filter scaling_filter; - /** * @event: * @@ -342,9 +168,7 @@ struct drm_crtc_state { * atomic commit. In that case the event can be send out any time * after the hardware has stopped scanning out the current * framebuffers. It should contain the timestamp and counter for the - * last vblank before the display pipeline was shut off. The simplest - * way to achieve that is calling drm_crtc_send_vblank_event() - * somewhen after drm_crtc_vblank_off() has been called. + * last vblank before the display pipeline was shut off. * * - For a CRTC which is enabled at the end of the commit (even when it * undergoes an full modeset) the vblank timestamp and counter must @@ -355,14 +179,7 @@ struct drm_crtc_state { * - Events for disabled CRTCs are not allowed, and drivers can ignore * that case. * - * For very simple hardware without VBLANK interrupt, enabling - * &struct drm_crtc_state.no_vblank makes DRM's atomic commit helpers - * send a fake VBLANK event at the end of the display update after all - * hardware changes have been applied. See - * drm_atomic_helper_fake_vblank(). - * - * For more complex hardware this - * can be handled by the drm_crtc_send_vblank_event() function, + * This can be handled by the drm_crtc_send_vblank_event() function, * which the driver should call on the provided event upon completion of * the atomic commit. Note that if the driver supports vblank signalling * and timestamping the vblank counters and timestamps must agree with @@ -379,25 +196,9 @@ struct drm_crtc_state { * drm_crtc_arm_vblank_event(). See the documentation of that function * for a detailed discussion of the constraints it needs to be used * safely. - * - * If the device can't notify of flip completion in a race-free way - * at all, then the event should be armed just after the page flip is - * committed. In the worst case the driver will send the event to - * userspace one frame too late. This doesn't allow for a real atomic - * update, but it should avoid tearing. */ struct drm_pending_vblank_event *event; - /** - * @commit: - * - * This tracks how the commit for this update proceeds through the - * various phases. This is never cleared, except when we destroy the - * state, so that subsequent commits can synchronize with previous ones. - */ - struct drm_crtc_commit *commit; - - /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; }; @@ -498,20 +299,21 @@ struct drm_crtc_funcs { * * This callback is optional. * - * Atomic drivers who want to support gamma tables should implement the - * atomic color management support, enabled by calling - * drm_crtc_enable_color_mgmt(), which then supports the legacy gamma - * interface through the drm_atomic_helper_legacy_gamma_set() - * compatibility implementation. + * NOTE: + * + * Drivers that support gamma tables and also fbdev emulation through + * the provided helper library need to take care to fill out the gamma + * hooks for both. Currently there's a bit an unfortunate duplication + * going on, which should eventually be unified to just one set of + * hooks. */ int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t size, - struct drm_modeset_acquire_ctx *ctx); + uint32_t size); /** * @destroy: * - * Clean up CRTC resources. This is only called at driver unload time + * Clean up plane resources. This is only called at driver unload time * through drm_mode_config_cleanup() since a CRTC cannot be hotplugged * in DRM. */ @@ -522,7 +324,7 @@ struct drm_crtc_funcs { * * This is the main legacy entry point to change the modeset state on a * CRTC. All the details of the desired configuration are passed in a - * &struct drm_mode_set - see there for details. + * struct &drm_mode_set - see there for details. * * Drivers implementing atomic modeset should use * drm_atomic_helper_set_config() to implement this hook. @@ -531,8 +333,7 @@ struct drm_crtc_funcs { * * 0 on success or a negative error code on failure. */ - int (*set_config)(struct drm_mode_set *set, - struct drm_modeset_acquire_ctx *ctx); + int (*set_config)(struct drm_mode_set *set); /** * @page_flip: @@ -545,8 +346,8 @@ struct drm_crtc_funcs { * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application * requests a page flip the DRM core verifies that the new frame buffer * is large enough to be scanned out by the CRTC in the currently - * configured mode and then calls this hook with a pointer to the new - * frame buffer. + * configured mode and then calls the CRTC ->page_flip() operation with a + * pointer to the new frame buffer. * * The driver must wait for any pending rendering to the new framebuffer * to complete before executing the flip. It should also wait for any @@ -554,7 +355,7 @@ struct drm_crtc_funcs { * shared dma-buf. * * An application can request to be notified when the page flip has - * completed. The drm core will supply a &struct drm_event in the event + * completed. The drm core will supply a struct &drm_event in the event * parameter in this case. This can be handled by the * drm_crtc_send_vblank_event() function, which the driver should call on * the provided event upon completion of the flip. Note that if @@ -581,7 +382,7 @@ struct drm_crtc_funcs { * RETURNS: * * 0 on success or a negative error code on failure. Note that if a - * page flip operation is already pending the callback should return + * ->page_flip() operation is already pending the callback should return * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode * or just runtime disabled through DPMS respectively the new atomic * "ACTIVE" state) should result in an -EINVAL error code. Note that @@ -590,8 +391,7 @@ struct drm_crtc_funcs { int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, - uint32_t flags, - struct drm_modeset_acquire_ctx *ctx); + uint32_t flags); /** * @page_flip_target: @@ -609,8 +409,7 @@ struct drm_crtc_funcs { int (*page_flip_target)(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, - uint32_t flags, uint32_t target, - struct drm_modeset_acquire_ctx *ctx); + uint32_t flags, uint32_t target); /** * @set_property: @@ -618,9 +417,11 @@ struct drm_crtc_funcs { * This is the legacy entry point to update a property attached to the * CRTC. * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_crtc_set_property() to implement this hook. + * * This callback is optional if the driver does not support any legacy - * driver-private properties. For atomic drivers it is not used because - * property handling is done entirely in the DRM core. + * driver-private properties. * * RETURNS: * @@ -633,21 +434,19 @@ struct drm_crtc_funcs { * @atomic_duplicate_state: * * Duplicate the current atomic state for this CRTC and return it. - * The core and helpers guarantee that any atomic state duplicated with + * The core and helpers gurantee that any atomic state duplicated with * this hook and still owned by the caller (i.e. not transferred to the - * driver by calling &drm_mode_config_funcs.atomic_commit) will be - * cleaned up by calling the @atomic_destroy_state hook in this - * structure. + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. * - * This callback is mandatory for atomic drivers. - * - * Atomic drivers which don't subclass &struct drm_crtc_state should use + * Atomic drivers which don't subclass struct &drm_crtc should use * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the * state structure to extend it with driver-private state should use * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is * duplicated in a consistent fashion across drivers. * - * It is an error to call this hook before &drm_crtc.state has been + * It is an error to call this hook before crtc->state has been * initialized correctly. * * NOTE: @@ -667,8 +466,6 @@ struct drm_crtc_funcs { * * Destroy a state duplicated with @atomic_duplicate_state and release * or unreference all resources it references - * - * This callback is mandatory for atomic drivers. */ void (*atomic_destroy_state)(struct drm_crtc *crtc, struct drm_crtc_state *state); @@ -762,235 +559,60 @@ struct drm_crtc_funcs { * * This optional hook should be used to unregister the additional * userspace interfaces attached to the crtc from - * @late_register. It is called from drm_dev_unregister(), + * late_unregister(). It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ void (*early_unregister)(struct drm_crtc *crtc); - - /** - * @set_crc_source: - * - * Changes the source of CRC checksums of frames at the request of - * userspace, typically for testing purposes. The sources available are - * specific of each driver and a %NULL value indicates that CRC - * generation is to be switched off. - * - * When CRC generation is enabled, the driver should call - * drm_crtc_add_crc_entry() at each frame, providing any information - * that characterizes the frame contents in the crcN arguments, as - * provided from the configured source. Drivers must accept an "auto" - * source name that will select a default source for this CRTC. - * - * This may trigger an atomic modeset commit if necessary, to enable CRC - * generation. - * - * Note that "auto" can depend upon the current modeset configuration, - * e.g. it could pick an encoder or output specific CRC sampling point. - * - * This callback is optional if the driver does not support any CRC - * generation functionality. - * - * RETURNS: - * - * 0 on success or a negative error code on failure. - */ - int (*set_crc_source)(struct drm_crtc *crtc, const char *source); - - /** - * @verify_crc_source: - * - * verifies the source of CRC checksums of frames before setting the - * source for CRC and during crc open. Source parameter can be NULL - * while disabling crc source. - * - * This callback is optional if the driver does not support any CRC - * generation functionality. - * - * RETURNS: - * - * 0 on success or a negative error code on failure. - */ - int (*verify_crc_source)(struct drm_crtc *crtc, const char *source, - size_t *values_cnt); - /** - * @get_crc_sources: - * - * Driver callback for getting a list of all the available sources for - * CRC generation. This callback depends upon verify_crc_source, So - * verify_crc_source callback should be implemented before implementing - * this. Driver can pass full list of available crc sources, this - * callback does the verification on each crc-source before passing it - * to userspace. - * - * This callback is optional if the driver does not support exporting of - * possible CRC sources list. - * - * RETURNS: - * - * a constant character pointer to the list of all the available CRC - * sources. On failure driver should return NULL. count should be - * updated with number of sources in list. if zero we don't process any - * source from the list. - */ - const char *const *(*get_crc_sources)(struct drm_crtc *crtc, - size_t *count); - - /** - * @atomic_print_state: - * - * If driver subclasses &struct drm_crtc_state, it should implement - * this optional hook for printing additional driver specific state. - * - * Do not call this directly, use drm_atomic_crtc_print_state() - * instead. - */ - void (*atomic_print_state)(struct drm_printer *p, - const struct drm_crtc_state *state); - - /** - * @get_vblank_counter: - * - * Driver callback for fetching a raw hardware vblank counter for the - * CRTC. It's meant to be used by new drivers as the replacement of - * &drm_driver.get_vblank_counter hook. - * - * This callback is optional. If a device doesn't have a hardware - * counter, the driver can simply leave the hook as NULL. The DRM core - * will account for missed vblank events while interrupts where disabled - * based on system timestamps. - * - * Wraparound handling and loss of events due to modesetting is dealt - * with in the DRM core code, as long as drivers call - * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or - * enabling a CRTC. - * - * See also &drm_device.vblank_disable_immediate and - * &drm_device.max_vblank_count. - * - * Returns: - * - * Raw vblank counter value. - */ - u32 (*get_vblank_counter)(struct drm_crtc *crtc); - - /** - * @enable_vblank: - * - * Enable vblank interrupts for the CRTC. It's meant to be used by - * new drivers as the replacement of &drm_driver.enable_vblank hook. - * - * Returns: - * - * Zero on success, appropriate errno if the vblank interrupt cannot - * be enabled. - */ - int (*enable_vblank)(struct drm_crtc *crtc); - - /** - * @disable_vblank: - * - * Disable vblank interrupts for the CRTC. It's meant to be used by - * new drivers as the replacement of &drm_driver.disable_vblank hook. - */ - void (*disable_vblank)(struct drm_crtc *crtc); - - /** - * @get_vblank_timestamp: - * - * Called by drm_get_last_vbltimestamp(). Should return a precise - * timestamp when the most recent vblank interval ended or will end. - * - * Specifically, the timestamp in @vblank_time should correspond as - * closely as possible to the time when the first video scanline of - * the video frame after the end of vblank will start scanning out, - * the time immediately after end of the vblank interval. If the - * @crtc is currently inside vblank, this will be a time in the future. - * If the @crtc is currently scanning out a frame, this will be the - * past start time of the current scanout. This is meant to adhere - * to the OpenML OML_sync_control extension specification. - * - * Parameters: - * - * crtc: - * CRTC for which timestamp should be returned. - * max_error: - * Maximum allowable timestamp error in nanoseconds. - * Implementation should strive to provide timestamp - * with an error of at most max_error nanoseconds. - * Returns true upper bound on error for timestamp. - * vblank_time: - * Target location for returned vblank timestamp. - * in_vblank_irq: - * True when called from drm_crtc_handle_vblank(). Some drivers - * need to apply some workarounds for gpu-specific vblank irq quirks - * if flag is set. - * - * Returns: - * - * True on success, false on failure, which means the core should - * fallback to a simple timestamp taken in drm_crtc_handle_vblank(). - */ - bool (*get_vblank_timestamp)(struct drm_crtc *crtc, - int *max_error, - ktime_t *vblank_time, - bool in_vblank_irq); }; /** * struct drm_crtc - central CRTC control structure + * @dev: parent DRM device + * @port: OF node used by drm_of_find_possible_crtcs() + * @head: list management + * @name: human readable name, can be overwritten by the driver + * @mutex: per-CRTC locking + * @base: base KMS object for ID tracking etc. + * @primary: primary plane for this CRTC + * @cursor: cursor plane for this CRTC + * @cursor_x: current x position of the cursor, used for universal cursor planes + * @cursor_y: current y position of the cursor, used for universal cursor planes + * @enabled: is this CRTC enabled? + * @mode: current mode timings + * @hwmode: mode timings as programmed to hw regs + * @x: x position on screen + * @y: y position on screen + * @funcs: CRTC control functions + * @gamma_size: size of gamma ramp + * @gamma_store: gamma ramp values + * @helper_private: mid-layer private data + * @properties: property tracking for this CRTC * * Each CRTC may have one or more connectors associated with it. This structure * allows the CRTC to be controlled. */ struct drm_crtc { - /** @dev: parent DRM device */ struct drm_device *dev; - /** @port: OF node used by drm_of_find_possible_crtcs(). */ struct device_node *port; - /** - * @head: - * - * List of all CRTCs on @dev, linked from &drm_mode_config.crtc_list. - * Invariant over the lifetime of @dev and therefore does not need - * locking. - */ struct list_head head; - /** @name: human readable name, can be overwritten by the driver */ char *name; /** * @mutex: * - * This provides a read lock for the overall CRTC state (mode, dpms + * This provides a read lock for the overall crtc state (mode, dpms * state, ...) and a write lock for everything which can be update - * without a full modeset (fb, cursor data, CRTC properties ...). A full - * modeset also need to grab &drm_mode_config.connection_mutex. - * - * For atomic drivers specifically this protects @state. + * without a full modeset (fb, cursor data, crtc properties ...). Full + * modeset also need to grab dev->mode_config.connection_mutex. */ struct drm_modeset_lock mutex; - /** @base: base KMS object for ID tracking etc. */ struct drm_mode_object base; - /** - * @primary: - * Primary plane for this CRTC. Note that this is only - * relevant for legacy IOCTL, it specifies the plane implicitly used by - * the SETCRTC and PAGE_FLIP IOCTLs. It does not have any significance - * beyond that. - */ + /* primary and cursor planes for CRTC */ struct drm_plane *primary; - - /** - * @cursor: - * Cursor plane for this CRTC. Note that this is only relevant for - * legacy IOCTL, it specifies the plane implicitly used by the SETCURSOR - * and SETCURSOR2 IOCTLs. It does not have any significance - * beyond that. - */ struct drm_plane *cursor; /** @@ -999,115 +621,36 @@ struct drm_crtc { */ unsigned index; - /** - * @cursor_x: Current x position of the cursor, used for universal - * cursor planes because the SETCURSOR IOCTL only can update the - * framebuffer without supplying the coordinates. Drivers should not use - * this directly, atomic drivers should look at &drm_plane_state.crtc_x - * of the cursor plane instead. - */ + /* position of cursor plane on crtc */ int cursor_x; - /** - * @cursor_y: Current y position of the cursor, used for universal - * cursor planes because the SETCURSOR IOCTL only can update the - * framebuffer without supplying the coordinates. Drivers should not use - * this directly, atomic drivers should look at &drm_plane_state.crtc_y - * of the cursor plane instead. - */ int cursor_y; - /** - * @enabled: - * - * Is this CRTC enabled? Should only be used by legacy drivers, atomic - * drivers should instead consult &drm_crtc_state.enable and - * &drm_crtc_state.active. Atomic drivers can update this by calling - * drm_atomic_helper_update_legacy_modeset_state(). - */ bool enabled; - /** - * @mode: - * - * Current mode timings. Should only be used by legacy drivers, atomic - * drivers should instead consult &drm_crtc_state.mode. Atomic drivers - * can update this by calling - * drm_atomic_helper_update_legacy_modeset_state(). - */ + /* Requested mode from modesetting. */ struct drm_display_mode mode; - /** - * @hwmode: - * - * Programmed mode in hw, after adjustments for encoders, crtc, panel - * scaling etc. Should only be used by legacy drivers, for high - * precision vblank timestamps in - * drm_crtc_vblank_helper_get_vblank_timestamp(). - * - * Note that atomic drivers should not use this, but instead use - * &drm_crtc_state.adjusted_mode. And for high-precision timestamps - * drm_crtc_vblank_helper_get_vblank_timestamp() used - * &drm_vblank_crtc.hwmode, - * which is filled out by calling drm_calc_timestamping_constants(). + /* Programmed mode in hw, after adjustments for encoders, + * crtc, panel scaling etc. Needed for timestamping etc. */ struct drm_display_mode hwmode; - /** - * @x: - * x position on screen. Should only be used by legacy drivers, atomic - * drivers should look at &drm_plane_state.crtc_x of the primary plane - * instead. Updated by calling - * drm_atomic_helper_update_legacy_modeset_state(). - */ - int x; - /** - * @y: - * y position on screen. Should only be used by legacy drivers, atomic - * drivers should look at &drm_plane_state.crtc_y of the primary plane - * instead. Updated by calling - * drm_atomic_helper_update_legacy_modeset_state(). - */ - int y; - - /** @funcs: CRTC control functions */ + int x, y; const struct drm_crtc_funcs *funcs; - /** - * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up - * by calling drm_mode_crtc_set_gamma_size(). - */ + /* Legacy FB CRTC gamma size for reporting to userspace */ uint32_t gamma_size; - - /** - * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and - * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size(). - */ uint16_t *gamma_store; - /** @helper_private: mid-layer private data */ + /* if you are using the helper */ const struct drm_crtc_helper_funcs *helper_private; - /** @properties: property tracking for this CRTC */ struct drm_object_properties properties; - /** - * @scaling_filter_property: property to apply a particular filter while - * scaling. - */ - struct drm_property *scaling_filter_property; - /** * @state: * * Current atomic state for this CRTC. - * - * This is protected by @mutex. Note that nonblocking atomic commits - * access the current CRTC state without taking locks. Either by going - * through the &struct drm_atomic_state pointers, see - * for_each_oldnew_crtc_in_state(), for_each_old_crtc_in_state() and - * for_each_new_crtc_in_state(). Or through careful ordering of atomic - * commit operations as implemented in the atomic helpers, see - * &struct drm_crtc_commit. */ struct drm_crtc_state *state; @@ -1115,16 +658,10 @@ struct drm_crtc { * @commit_list: * * List of &drm_crtc_commit structures tracking pending commits. - * Protected by @commit_lock. This list holds its own full reference, - * as does the ongoing commit. - * - * "Note that the commit for a state change is also tracked in - * &drm_crtc_state.commit. For accessing the immediately preceding - * commit in an atomic update it is recommended to just use that - * pointer in the old CRTC state, since accessing that doesn't need - * any locking or list-walking. @commit_list should only be used to - * stall for framebuffer cleanup that's signalled through - * &drm_crtc_commit.cleanup_done." + * Protected by @commit_lock. This list doesn't hold its own full + * reference, but burrows it from the ongoing commit. Commit entries + * must be removed from this list once the commit is fully completed, + * but before it's correspoding &drm_atomic_state gets destroyed. */ struct list_head commit_list; @@ -1135,56 +672,14 @@ struct drm_crtc { */ spinlock_t commit_lock; -#ifdef CONFIG_DEBUG_FS /** - * @debugfs_entry: + * @acquire_ctx: * - * Debugfs directory for this CRTC. + * Per-CRTC implicit acquire context used by atomic drivers for legacy + * IOCTLs, so that atomic drivers can get at the locking acquire + * context. */ - struct dentry *debugfs_entry; -#endif - - /** - * @crc: - * - * Configuration settings of CRC capture. - */ - struct drm_crtc_crc crc; - - /** - * @fence_context: - * - * timeline context used for fence operations. - */ - unsigned int fence_context; - - /** - * @fence_lock: - * - * spinlock to protect the fences in the fence_context. - */ - spinlock_t fence_lock; - /** - * @fence_seqno: - * - * Seqno variable used as monotonic counter for the fences - * created on the CRTC's timeline. - */ - unsigned long fence_seqno; - - /** - * @timeline_name: - * - * The name of the CRTC's fence timeline. - */ - char timeline_name[32]; - - /** - * @self_refresh_data: Holds the state for the self refresh helpers - * - * Initialized via drm_self_refresh_helper_init(). - */ - struct drm_self_refresh_data *self_refresh_data; + struct drm_modeset_acquire_ctx *acquire_ctx; }; /** @@ -1197,8 +692,10 @@ struct drm_crtc { * @connectors: array of connectors to drive with this CRTC if possible * @num_connectors: size of @connectors array * - * This represents a modeset configuration for the legacy SETCRTC ioctl and is - * also used internally. Atomic drivers instead use &drm_atomic_state. + * Represents a single crtc the connectors that it drives with what mode + * and from which framebuffer it scans out from. + * + * This is used to set modes. */ struct drm_mode_set { struct drm_framebuffer *fb; @@ -1212,49 +709,631 @@ struct drm_mode_set { size_t num_connectors; }; +/** + * struct drm_mode_config_funcs - basic driver provided mode setting functions + * + * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that + * involve drivers. + */ +struct drm_mode_config_funcs { + /** + * @fb_create: + * + * Create a new framebuffer object. The core does basic checks on the + * requested metadata, but most of that is left to the driver. See + * struct &drm_mode_fb_cmd2 for details. + * + * If the parameters are deemed valid and the backing storage objects in + * the underlying memory manager all exist, then the driver allocates + * a new &drm_framebuffer structure, subclassed to contain + * driver-specific information (like the internal native buffer object + * references). It also needs to fill out all relevant metadata, which + * should be done by calling drm_helper_mode_fill_fb_struct(). + * + * The initialization is finalized by calling drm_framebuffer_init(), + * which registers the framebuffer and makes it accessible to other + * threads. + * + * RETURNS: + * + * A new framebuffer with an initial reference count of 1 or a negative + * error code encoded with ERR_PTR(). + */ + struct drm_framebuffer *(*fb_create)(struct drm_device *dev, + struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); + + /** + * @output_poll_changed: + * + * Callback used by helpers to inform the driver of output configuration + * changes. + * + * Drivers implementing fbdev emulation with the helpers can call + * drm_fb_helper_hotplug_changed from this hook to inform the fbdev + * helper of output changes. + * + * FIXME: + * + * Except that there's no vtable for device-level helper callbacks + * there's no reason this is a core function. + */ + void (*output_poll_changed)(struct drm_device *dev); + + /** + * @atomic_check: + * + * This is the only hook to validate an atomic modeset update. This + * function must reject any modeset and state changes which the hardware + * or driver doesn't support. This includes but is of course not limited + * to: + * + * - Checking that the modes, framebuffers, scaling and placement + * requirements and so on are within the limits of the hardware. + * + * - Checking that any hidden shared resources are not oversubscribed. + * This can be shared PLLs, shared lanes, overall memory bandwidth, + * display fifo space (where shared between planes or maybe even + * CRTCs). + * + * - Checking that virtualized resources exported to userspace are not + * oversubscribed. For various reasons it can make sense to expose + * more planes, crtcs or encoders than which are physically there. One + * example is dual-pipe operations (which generally should be hidden + * from userspace if when lockstepped in hardware, exposed otherwise), + * where a plane might need 1 hardware plane (if it's just on one + * pipe), 2 hardware planes (when it spans both pipes) or maybe even + * shared a hardware plane with a 2nd plane (if there's a compatible + * plane requested on the area handled by the other pipe). + * + * - Check that any transitional state is possible and that if + * requested, the update can indeed be done in the vblank period + * without temporarily disabling some functions. + * + * - Check any other constraints the driver or hardware might have. + * + * - This callback also needs to correctly fill out the &drm_crtc_state + * in this update to make sure that drm_atomic_crtc_needs_modeset() + * reflects the nature of the possible update and returns true if and + * only if the update cannot be applied without tearing within one + * vblank on that CRTC. The core uses that information to reject + * updates which require a full modeset (i.e. blanking the screen, or + * at least pausing updates for a substantial amount of time) if + * userspace has disallowed that in its request. + * + * - The driver also does not need to repeat basic input validation + * like done for the corresponding legacy entry points. The core does + * that before calling this hook. + * + * See the documentation of @atomic_commit for an exhaustive list of + * error conditions which don't have to be checked at the + * ->atomic_check() stage? + * + * See the documentation for struct &drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_check(), or one of the exported sub-functions of + * it. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EINVAL, if any of the above constraints are violated. + * + * - -EDEADLK, when returned from an attempt to acquire an additional + * &drm_modeset_lock through drm_modeset_lock(). + * + * - -ENOMEM, if allocating additional state sub-structures failed due + * to lack of memory. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point all errors are + * treated equally. + */ + int (*atomic_check)(struct drm_device *dev, + struct drm_atomic_state *state); + + /** + * @atomic_commit: + * + * This is the only hook to commit an atomic modeset update. The core + * guarantees that @atomic_check has been called successfully before + * calling this function, and that nothing has been changed in the + * interim. + * + * See the documentation for struct &drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_commit(), or one of the exported sub-functions of + * it. + * + * Nonblocking commits (as indicated with the nonblock parameter) must + * do any preparatory work which might result in an unsuccessful commit + * in the context of this callback. The only exceptions are hardware + * errors resulting in -EIO. But even in that case the driver must + * ensure that the display pipe is at least running, to avoid + * compositors crashing when pageflips don't work. Anything else, + * specifically committing the update to the hardware, should be done + * without blocking the caller. For updates which do not require a + * modeset this must be guaranteed. + * + * The driver must wait for any pending rendering to the new + * framebuffers to complete before executing the flip. It should also + * wait for any pending rendering from other drivers if the underlying + * buffer is a shared dma-buf. Nonblocking commits must not wait for + * rendering in the context of this callback. + * + * An application can request to be notified when the atomic commit has + * completed. These events are per-CRTC and can be distinguished by the + * CRTC index supplied in &drm_event to userspace. + * + * The drm core will supply a struct &drm_event in the event + * member of each CRTC's &drm_crtc_state structure. See the + * documentation for &drm_crtc_state for more details about the precise + * semantics of this event. + * + * NOTE: + * + * Drivers are not allowed to shut down any display pipe successfully + * enabled through an atomic commit on their own. Doing so can result in + * compositors crashing if a page flip is suddenly rejected because the + * pipe is off. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EBUSY, if a nonblocking updated is requested and there is + * an earlier updated pending. Drivers are allowed to support a queue + * of outstanding updates, but currently no driver supports that. + * Note that drivers must wait for preceding updates to complete if a + * synchronous update is requested, they are not allowed to fail the + * commit in that case. + * + * - -ENOMEM, if the driver failed to allocate memory. Specifically + * this can happen when trying to pin framebuffers, which must only + * be done when committing the state. + * + * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate + * that the driver has run out of vram, iommu space or similar GPU + * address space needed for framebuffer. + * + * - -EIO, if the hardware completely died. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point of view all errors are + * treated equally. + * + * This list is exhaustive. Specifically this hook is not allowed to + * return -EINVAL (any invalid requests should be caught in + * @atomic_check) or -EDEADLK (this function must not acquire + * additional modeset locks). + */ + int (*atomic_commit)(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock); + + /** + * @atomic_state_alloc: + * + * This optional hook can be used by drivers that want to subclass struct + * &drm_atomic_state to be able to track their own driver-private global + * state easily. If this hook is implemented, drivers must also + * implement @atomic_state_clear and @atomic_state_free. + * + * RETURNS: + * + * A new &drm_atomic_state on success or NULL on failure. + */ + struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev); + + /** + * @atomic_state_clear: + * + * This hook must clear any driver private state duplicated into the + * passed-in &drm_atomic_state. This hook is called when the caller + * encountered a &drm_modeset_lock deadlock and needs to drop all + * already acquired locks as part of the deadlock avoidance dance + * implemented in drm_modeset_lock_backoff(). + * + * Any duplicated state must be invalidated since a concurrent atomic + * update might change it, and the drm atomic interfaces always apply + * updates as relative changes to the current state. + * + * Drivers that implement this must call drm_atomic_state_default_clear() + * to clear common state. + */ + void (*atomic_state_clear)(struct drm_atomic_state *state); + + /** + * @atomic_state_free: + * + * This hook needs driver private resources and the &drm_atomic_state + * itself. Note that the core first calls drm_atomic_state_clear() to + * avoid code duplicate between the clear and free hooks. + * + * Drivers that implement this must call drm_atomic_state_default_free() + * to release common resources. + */ + void (*atomic_state_free)(struct drm_atomic_state *state); +}; + +/** + * struct drm_mode_config - Mode configuration control structure + * @mutex: mutex protecting KMS related lists and structures + * @connection_mutex: ww mutex protecting connector state and routing + * @acquire_ctx: global implicit acquire context used by atomic drivers for + * legacy IOCTLs + * @fb_lock: mutex to protect fb state and lists + * @num_fb: number of fbs available + * @fb_list: list of framebuffers available + * @num_encoder: number of encoders on this device + * @encoder_list: list of encoder objects + * @num_overlay_plane: number of overlay planes on this device + * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device + * @plane_list: list of plane objects + * @num_crtc: number of CRTCs on this device + * @crtc_list: list of CRTC objects + * @property_list: list of property objects + * @min_width: minimum pixel width on this device + * @min_height: minimum pixel height on this device + * @max_width: maximum pixel width on this device + * @max_height: maximum pixel height on this device + * @funcs: core driver provided mode setting functions + * @fb_base: base address of the framebuffer + * @poll_enabled: track polling support for this device + * @poll_running: track polling status for this device + * @delayed_event: track delayed poll uevent deliver for this device + * @output_poll_work: delayed work for polling in process context + * @property_blob_list: list of all the blob property objects + * @blob_lock: mutex for blob property allocation and management + * @*_property: core property tracking + * @preferred_depth: preferred RBG pixel depth, used by fb helpers + * @prefer_shadow: hint to userspace to prefer shadow-fb rendering + * @cursor_width: hint to userspace for max cursor width + * @cursor_height: hint to userspace for max cursor height + * @helper_private: mid-layer private data + * + * Core mode resource tracking structure. All CRTC, encoders, and connectors + * enumerated by the driver are added here, as are global properties. Some + * global restrictions are also here, e.g. dimension restrictions. + */ +struct drm_mode_config { + struct mutex mutex; /* protects configuration (mode lists etc.) */ + struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */ + struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ + + /** + * @idr_mutex: + * + * Mutex for KMS ID allocation and management. Protects both @crtc_idr + * and @tile_idr. + */ + struct mutex idr_mutex; + + /** + * @crtc_idr: + * + * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc, + * connector, modes - just makes life easier to have only one. + */ + struct idr crtc_idr; + + /** + * @tile_idr: + * + * Use this idr for allocating new IDs for tiled sinks like use in some + * high-res DP MST screens. + */ + struct idr tile_idr; + + struct mutex fb_lock; /* proctects global and per-file fb lists */ + int num_fb; + struct list_head fb_list; + + /** + * @num_connector: Number of connectors on this device. + */ + int num_connector; + /** + * @connector_ida: ID allocator for connector indices. + */ + struct ida connector_ida; + /** + * @connector_list: List of connector objects. + */ + struct list_head connector_list; + int num_encoder; + struct list_head encoder_list; + + /* + * Track # of overlay planes separately from # of total planes. By + * default we only advertise overlay planes to userspace; if userspace + * sets the "universal plane" capability bit, we'll go ahead and + * expose all planes. + */ + int num_overlay_plane; + int num_total_plane; + struct list_head plane_list; + + int num_crtc; + struct list_head crtc_list; + + struct list_head property_list; + + int min_width, min_height; + int max_width, max_height; + const struct drm_mode_config_funcs *funcs; + resource_size_t fb_base; + + /* output poll support */ + bool poll_enabled; + bool poll_running; + bool delayed_event; + struct delayed_work output_poll_work; + + struct mutex blob_lock; + + /* pointers to standard properties */ + struct list_head property_blob_list; + /** + * @edid_property: Default connector property to hold the EDID of the + * currently connected sink, if any. + */ + struct drm_property *edid_property; + /** + * @dpms_property: Default connector property to control the + * connector's DPMS state. + */ + struct drm_property *dpms_property; + /** + * @path_property: Default connector property to hold the DP MST path + * for the port. + */ + struct drm_property *path_property; + /** + * @tile_property: Default connector property to store the tile + * position of a tiled screen, for sinks which need to be driven with + * multiple CRTCs. + */ + struct drm_property *tile_property; + /** + * @plane_type_property: Default plane property to differentiate + * CURSOR, PRIMARY and OVERLAY legacy uses of planes. + */ + struct drm_property *plane_type_property; + /** + * @rotation_property: Optional property for planes or CRTCs to specifiy + * rotation. + */ + struct drm_property *rotation_property; + /** + * @prop_src_x: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_x; + /** + * @prop_src_y: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_y; + /** + * @prop_src_w: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_w; + /** + * @prop_src_h: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_h; + /** + * @prop_crtc_x: Default atomic plane property for the plane destination + * position in the &drm_crtc is is being shown on. + */ + struct drm_property *prop_crtc_x; + /** + * @prop_crtc_y: Default atomic plane property for the plane destination + * position in the &drm_crtc is is being shown on. + */ + struct drm_property *prop_crtc_y; + /** + * @prop_crtc_w: Default atomic plane property for the plane destination + * position in the &drm_crtc is is being shown on. + */ + struct drm_property *prop_crtc_w; + /** + * @prop_crtc_h: Default atomic plane property for the plane destination + * position in the &drm_crtc is is being shown on. + */ + struct drm_property *prop_crtc_h; + /** + * @prop_fb_id: Default atomic plane property to specify the + * &drm_framebuffer. + */ + struct drm_property *prop_fb_id; + /** + * @prop_crtc_id: Default atomic plane property to specify the + * &drm_crtc. + */ + struct drm_property *prop_crtc_id; + /** + * @prop_active: Default atomic CRTC property to control the active + * state, which is the simplified implementation for DPMS in atomic + * drivers. + */ + struct drm_property *prop_active; + /** + * @prop_mode_id: Default atomic CRTC property to set the mode for a + * CRTC. A 0 mode implies that the CRTC is entirely disabled - all + * connectors must be of and active must be set to disabled, too. + */ + struct drm_property *prop_mode_id; + + /** + * @dvi_i_subconnector_property: Optional DVI-I property to + * differentiate between analog or digital mode. + */ + struct drm_property *dvi_i_subconnector_property; + /** + * @dvi_i_select_subconnector_property: Optional DVI-I property to + * select between analog or digital mode. + */ + struct drm_property *dvi_i_select_subconnector_property; + + /** + * @tv_subconnector_property: Optional TV property to differentiate + * between different TV connector types. + */ + struct drm_property *tv_subconnector_property; + /** + * @tv_select_subconnector_property: Optional TV property to select + * between different TV connector types. + */ + struct drm_property *tv_select_subconnector_property; + /** + * @tv_mode_property: Optional TV property to select + * the output TV mode. + */ + struct drm_property *tv_mode_property; + /** + * @tv_left_margin_property: Optional TV property to set the left + * margin. + */ + struct drm_property *tv_left_margin_property; + /** + * @tv_right_margin_property: Optional TV property to set the right + * margin. + */ + struct drm_property *tv_right_margin_property; + /** + * @tv_top_margin_property: Optional TV property to set the right + * margin. + */ + struct drm_property *tv_top_margin_property; + /** + * @tv_bottom_margin_property: Optional TV property to set the right + * margin. + */ + struct drm_property *tv_bottom_margin_property; + /** + * @tv_brightness_property: Optional TV property to set the + * brightness. + */ + struct drm_property *tv_brightness_property; + /** + * @tv_contrast_property: Optional TV property to set the + * contrast. + */ + struct drm_property *tv_contrast_property; + /** + * @tv_flicker_reduction_property: Optional TV property to control the + * flicker reduction mode. + */ + struct drm_property *tv_flicker_reduction_property; + /** + * @tv_overscan_property: Optional TV property to control the overscan + * setting. + */ + struct drm_property *tv_overscan_property; + /** + * @tv_saturation_property: Optional TV property to set the + * saturation. + */ + struct drm_property *tv_saturation_property; + /** + * @tv_hue_property: Optional TV property to set the hue. + */ + struct drm_property *tv_hue_property; + + /** + * @scaling_mode_property: Optional connector property to control the + * upscaling, mostly used for built-in panels. + */ + struct drm_property *scaling_mode_property; + /** + * @aspect_ratio_property: Optional connector property to control the + * HDMI infoframe aspect ratio setting. + */ + struct drm_property *aspect_ratio_property; + /** + * @degamma_lut_property: Optional CRTC property to set the LUT used to + * convert the framebuffer's colors to linear gamma. + */ + struct drm_property *degamma_lut_property; + /** + * @degamma_lut_size_property: Optional CRTC property for the size of + * the degamma LUT as supported by the driver (read-only). + */ + struct drm_property *degamma_lut_size_property; + /** + * @ctm_property: Optional CRTC property to set the + * matrix used to convert colors after the lookup in the + * degamma LUT. + */ + struct drm_property *ctm_property; + /** + * @gamma_lut_property: Optional CRTC property to set the LUT used to + * convert the colors, after the CTM matrix, to the gamma space of the + * connected screen. + */ + struct drm_property *gamma_lut_property; + /** + * @gamma_lut_size_property: Optional CRTC property for the size of the + * gamma LUT as supported by the driver (read-only). + */ + struct drm_property *gamma_lut_size_property; + + /** + * @suggested_x_property: Optional connector property with a hint for + * the position of the output on the host's screen. + */ + struct drm_property *suggested_x_property; + /** + * @suggested_y_property: Optional connector property with a hint for + * the position of the output on the host's screen. + */ + struct drm_property *suggested_y_property; + + /* dumb ioctl parameters */ + uint32_t preferred_depth, prefer_shadow; + + /** + * @async_page_flip: Does this device support async flips on the primary + * plane? + */ + bool async_page_flip; + + /** + * @allow_fb_modifiers: + * + * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call. + */ + bool allow_fb_modifiers; + + /* cursor size */ + uint32_t cursor_width, cursor_height; + + struct drm_mode_config_helper_funcs *helper_private; +}; + #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) -__printf(6, 7) +extern __printf(6, 7) int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor, const struct drm_crtc_funcs *funcs, const char *name, ...); -void drm_crtc_cleanup(struct drm_crtc *crtc); - -__printf(7, 8) -void *__drmm_crtc_alloc_with_planes(struct drm_device *dev, - size_t size, size_t offset, - struct drm_plane *primary, - struct drm_plane *cursor, - const struct drm_crtc_funcs *funcs, - const char *name, ...); - -/** - * drmm_crtc_alloc_with_planes - Allocate and initialize a new CRTC object with - * specified primary and cursor planes. - * @dev: DRM device - * @type: the type of the struct which contains struct &drm_crtc - * @member: the name of the &drm_crtc within @type. - * @primary: Primary plane for CRTC - * @cursor: Cursor plane for CRTC - * @funcs: callbacks for the new CRTC - * @name: printf style format string for the CRTC name, or NULL for default name - * - * Allocates and initializes a new crtc object. Cleanup is automatically - * handled through registering drmm_crtc_cleanup() with drmm_add_action(). - * - * The @drm_crtc_funcs.destroy hook must be NULL. - * - * Returns: - * Pointer to new crtc, or ERR_PTR on failure. - */ -#define drmm_crtc_alloc_with_planes(dev, type, member, primary, cursor, funcs, name, ...) \ - ((type *)__drmm_crtc_alloc_with_planes(dev, sizeof(type), \ - offsetof(type, member), \ - primary, cursor, funcs, \ - name, ##__VA_ARGS__)) +extern void drm_crtc_cleanup(struct drm_crtc *crtc); /** * drm_crtc_index - find the index of a registered CRTC @@ -1272,57 +1351,56 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc) * drm_crtc_mask - find the mask of a registered CRTC * @crtc: CRTC to find mask for * - * Given a registered CRTC, return the mask bit of that CRTC for the - * &drm_encoder.possible_crtcs and &drm_plane.possible_crtcs fields. + * Given a registered CRTC, return the mask bit of that CRTC for an + * encoder's possible_crtcs field. */ -static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc) +static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc) { return 1 << drm_crtc_index(crtc); } -int drm_mode_set_config_internal(struct drm_mode_set *set); -struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx); +extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode, + int *hdisplay, int *vdisplay); +extern int drm_crtc_force_disable(struct drm_crtc *crtc); +extern int drm_crtc_force_disable_all(struct drm_device *dev); -/** - * drm_crtc_find - look up a CRTC object from its ID - * @dev: DRM device - * @file_priv: drm file to check for lease against. - * @id: &drm_mode_object ID - * - * This can be used to look up a CRTC from its userspace ID. Only used by - * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS - * userspace interface should be done using &drm_property. - */ +extern void drm_mode_config_init(struct drm_device *dev); +extern void drm_mode_config_reset(struct drm_device *dev); +extern void drm_mode_config_cleanup(struct drm_device *dev); + +extern int drm_mode_set_config_internal(struct drm_mode_set *set); + +extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, + char topology[8]); +extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, + char topology[8]); +extern void drm_mode_put_tile_group(struct drm_device *dev, + struct drm_tile_group *tg); + +/* Helpers */ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, - struct drm_file *file_priv, - uint32_t id) + uint32_t id) { struct drm_mode_object *mo; - mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CRTC); + mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CRTC); return mo ? obj_to_crtc(mo) : NULL; } -/** - * drm_for_each_crtc - iterate over all CRTCs - * @crtc: a &struct drm_crtc as the loop cursor - * @dev: the &struct drm_device - * - * Iterate over all CRTCs of @dev. - */ #define drm_for_each_crtc(crtc, dev) \ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) -/** - * drm_for_each_crtc_reverse - iterate over all CRTCs in reverse order - * @crtc: a &struct drm_crtc as the loop cursor - * @dev: the &struct drm_device - * - * Iterate over all CRTCs of @dev. - */ -#define drm_for_each_crtc_reverse(crtc, dev) \ - list_for_each_entry_reverse(crtc, &(dev)->mode_config.crtc_list, head) - -int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc, - unsigned int supported_filters); +static inline void +assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config) +{ + /* + * The connector hotadd/remove code currently grabs both locks when + * updating lists. Hence readers need only hold either of them to be + * safe and the check amounts to + * + * WARN_ON(not_holding(A) && not_holding(B)). + */ + WARN_ON(!mutex_is_locked(&mode_config->mutex) && + !drm_modeset_is_locked(&mode_config->connection_mutex)); +} #endif /* __DRM_CRTC_H__ */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index a6d520d5b6..982c299e43 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -43,19 +43,36 @@ #include #include -void drm_helper_disable_unused_functions(struct drm_device *dev); -int drm_crtc_helper_set_config(struct drm_mode_set *set, - struct drm_modeset_acquire_ctx *ctx); -bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, - struct drm_display_mode *mode, - int x, int y, - struct drm_framebuffer *old_fb); -bool drm_helper_crtc_in_use(struct drm_crtc *crtc); -bool drm_helper_encoder_in_use(struct drm_encoder *encoder); +extern void drm_helper_disable_unused_functions(struct drm_device *dev); +extern int drm_crtc_helper_set_config(struct drm_mode_set *set); +extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, + struct drm_framebuffer *old_fb); +extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); +extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder); -int drm_helper_connector_dpms(struct drm_connector *connector, int mode); +extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode); -void drm_helper_resume_force_mode(struct drm_device *dev); -int drm_helper_force_disable_all(struct drm_device *dev); +extern void drm_helper_resume_force_mode(struct drm_device *dev); + +int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb); +int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); + +/* drm_probe_helper.c */ +extern int drm_helper_probe_single_connector_modes(struct drm_connector + *connector, uint32_t maxX, + uint32_t maxY); +extern void drm_kms_helper_poll_init(struct drm_device *dev); +extern void drm_kms_helper_poll_fini(struct drm_device *dev); +extern bool drm_helper_hpd_irq_event(struct drm_device *dev); +extern void drm_kms_helper_hotplug_event(struct drm_device *dev); + +extern void drm_kms_helper_poll_disable(struct drm_device *dev); +extern void drm_kms_helper_poll_enable(struct drm_device *dev); +extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); #endif diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h index ec64d141f5..c0d4df6a60 100644 --- a/include/drm/drm_displayid.h +++ b/include/drm/drm_displayid.h @@ -22,10 +22,6 @@ #ifndef DRM_DISPLAYID_H #define DRM_DISPLAYID_H -#include - -struct edid; - #define DATA_BLOCK_PRODUCT_ID 0x00 #define DATA_BLOCK_DISPLAY_PARAMETERS 0x01 #define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02 @@ -44,7 +40,6 @@ struct edid; #define DATA_BLOCK_DISPLAY_INTERFACE 0x0f #define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10 #define DATA_BLOCK_TILED_DISPLAY 0x12 -#define DATA_BLOCK_CTA 0x81 #define DATA_BLOCK_VENDOR_SPECIFIC 0x7f @@ -56,7 +51,7 @@ struct edid; #define PRODUCT_TYPE_REPEATER 5 #define PRODUCT_TYPE_DIRECT_DRIVE 6 -struct displayid_header { +struct displayid_hdr { u8 rev; u8 bytes; u8 prod_id; @@ -93,25 +88,6 @@ struct displayid_detailed_timings_1 { struct displayid_detailed_timing_block { struct displayid_block base; - struct displayid_detailed_timings_1 timings[]; + struct displayid_detailed_timings_1 timings[0]; }; - -/* DisplayID iteration */ -struct displayid_iter { - const struct edid *edid; - - const u8 *section; - int length; - int idx; - int ext_index; -}; - -void displayid_iter_edid_begin(const struct edid *edid, - struct displayid_iter *iter); -const struct displayid_block * -__displayid_iter_next(struct displayid_iter *iter); -#define displayid_iter_for_each(__block, __iter) \ - while (((__block) = __displayid_iter_next(__iter))) -void displayid_iter_end(struct displayid_iter *iter); - #endif diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h index 7ee4822650..e8a9dfd0e0 100644 --- a/include/drm/drm_dp_dual_mode_helper.h +++ b/include/drm/drm_dp_dual_mode_helper.h @@ -40,8 +40,6 @@ #define DP_DUAL_MODE_REV_TYPE2 0x00 #define DP_DUAL_MODE_TYPE_MASK 0xf0 #define DP_DUAL_MODE_TYPE_TYPE2 0xa0 -/* This field is marked reserved in dual mode spec, used in LSPCON */ -#define DP_DUAL_MODE_TYPE_HAS_DPCD 0x08 #define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/ #define DP_DUAL_IEEE_OUI_LEN 3 #define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */ @@ -57,12 +55,6 @@ #define DP_DUAL_MODE_CEC_ENABLE 0x01 #define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22 -/* LSPCON specific registers, defined by MCA */ -#define DP_DUAL_MODE_LSPCON_MODE_CHANGE 0x40 -#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41 -#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1 - -struct drm_device; struct i2c_adapter; ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, @@ -70,20 +62,6 @@ ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, u8 offset, const void *buffer, size_t size); -/** - * enum drm_lspcon_mode - * @DRM_LSPCON_MODE_INVALID: No LSPCON. - * @DRM_LSPCON_MODE_LS: Level shifter mode of LSPCON - * which drives DP++ to HDMI 1.4 conversion. - * @DRM_LSPCON_MODE_PCON: Protocol converter mode of LSPCON - * which drives DP++ to HDMI 2.0 active conversion. - */ -enum drm_lspcon_mode { - DRM_LSPCON_MODE_INVALID, - DRM_LSPCON_MODE_LS, - DRM_LSPCON_MODE_PCON, -}; - /** * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor @@ -92,7 +70,6 @@ enum drm_lspcon_mode { * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor - * @DRM_DP_DUAL_MODE_LSPCON: Level shifter / protocol converter */ enum drm_dp_dual_mode_type { DRM_DP_DUAL_MODE_NONE, @@ -101,21 +78,15 @@ enum drm_dp_dual_mode_type { DRM_DP_DUAL_MODE_TYPE1_HDMI, DRM_DP_DUAL_MODE_TYPE2_DVI, DRM_DP_DUAL_MODE_TYPE2_HDMI, - DRM_DP_DUAL_MODE_LSPCON, }; -enum drm_dp_dual_mode_type -drm_dp_dual_mode_detect(const struct drm_device *dev, struct i2c_adapter *adapter); -int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type, +enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter); +int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter); -int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type, +int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter, bool *enabled); -int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type, +int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter, bool enable); const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type); -int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter, - enum drm_lspcon_mode *current_mode); -int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter, - enum drm_lspcon_mode reqd_mode); #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 1d5b3dbb6e..2a79882cb6 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -23,14 +23,9 @@ #ifndef _DRM_DP_HELPER_H_ #define _DRM_DP_HELPER_H_ -#include -#include #include -#include - -struct drm_device; -struct drm_dp_aux; -struct drm_panel; +#include +#include /* * Unless otherwise noted, all values are from the DP 1.1a spec. Note that @@ -47,48 +42,6 @@ struct drm_panel; * 1.2 formally includes both eDP and DPI definitions. */ -/* MSA (Main Stream Attribute) MISC bits (as MISC1<<8|MISC0) */ -#define DP_MSA_MISC_SYNC_CLOCK (1 << 0) -#define DP_MSA_MISC_INTERLACE_VTOTAL_EVEN (1 << 8) -#define DP_MSA_MISC_STEREO_NO_3D (0 << 9) -#define DP_MSA_MISC_STEREO_PROG_RIGHT_EYE (1 << 9) -#define DP_MSA_MISC_STEREO_PROG_LEFT_EYE (3 << 9) -/* bits per component for non-RAW */ -#define DP_MSA_MISC_6_BPC (0 << 5) -#define DP_MSA_MISC_8_BPC (1 << 5) -#define DP_MSA_MISC_10_BPC (2 << 5) -#define DP_MSA_MISC_12_BPC (3 << 5) -#define DP_MSA_MISC_16_BPC (4 << 5) -/* bits per component for RAW */ -#define DP_MSA_MISC_RAW_6_BPC (1 << 5) -#define DP_MSA_MISC_RAW_7_BPC (2 << 5) -#define DP_MSA_MISC_RAW_8_BPC (3 << 5) -#define DP_MSA_MISC_RAW_10_BPC (4 << 5) -#define DP_MSA_MISC_RAW_12_BPC (5 << 5) -#define DP_MSA_MISC_RAW_14_BPC (6 << 5) -#define DP_MSA_MISC_RAW_16_BPC (7 << 5) -/* pixel encoding/colorimetry format */ -#define _DP_MSA_MISC_COLOR(misc1_7, misc0_21, misc0_3, misc0_4) \ - ((misc1_7) << 15 | (misc0_4) << 4 | (misc0_3) << 3 | ((misc0_21) << 1)) -#define DP_MSA_MISC_COLOR_RGB _DP_MSA_MISC_COLOR(0, 0, 0, 0) -#define DP_MSA_MISC_COLOR_CEA_RGB _DP_MSA_MISC_COLOR(0, 0, 1, 0) -#define DP_MSA_MISC_COLOR_RGB_WIDE_FIXED _DP_MSA_MISC_COLOR(0, 3, 0, 0) -#define DP_MSA_MISC_COLOR_RGB_WIDE_FLOAT _DP_MSA_MISC_COLOR(0, 3, 0, 1) -#define DP_MSA_MISC_COLOR_Y_ONLY _DP_MSA_MISC_COLOR(1, 0, 0, 0) -#define DP_MSA_MISC_COLOR_RAW _DP_MSA_MISC_COLOR(1, 1, 0, 0) -#define DP_MSA_MISC_COLOR_YCBCR_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 1, 0) -#define DP_MSA_MISC_COLOR_YCBCR_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 1, 1) -#define DP_MSA_MISC_COLOR_YCBCR_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 1, 0) -#define DP_MSA_MISC_COLOR_YCBCR_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 1, 1) -#define DP_MSA_MISC_COLOR_XVYCC_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 0, 0) -#define DP_MSA_MISC_COLOR_XVYCC_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 0, 1) -#define DP_MSA_MISC_COLOR_XVYCC_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 0, 0) -#define DP_MSA_MISC_COLOR_XVYCC_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 0, 1) -#define DP_MSA_MISC_COLOR_OPRGB _DP_MSA_MISC_COLOR(0, 0, 1, 1) -#define DP_MSA_MISC_COLOR_DCI_P3 _DP_MSA_MISC_COLOR(0, 3, 1, 0) -#define DP_MSA_MISC_COLOR_COLOR_PROFILE _DP_MSA_MISC_COLOR(0, 3, 1, 1) -#define DP_MSA_MISC_COLOR_VSC_SDP (1 << 14) - #define DP_AUX_MAX_PAYLOAD_BYTES 16 #define DP_AUX_I2C_WRITE 0x0 @@ -108,15 +61,9 @@ struct drm_panel; #define DP_AUX_I2C_REPLY_DEFER (0x2 << 2) #define DP_AUX_I2C_REPLY_MASK (0x3 << 2) -/* DPCD Field Address Mapping */ - -/* Receiver Capability */ +/* AUX CH addresses */ +/* DPCD */ #define DP_DPCD_REV 0x000 -# define DP_DPCD_REV_10 0x10 -# define DP_DPCD_REV_11 0x11 -# define DP_DPCD_REV_12 0x12 -# define DP_DPCD_REV_13 0x13 -# define DP_DPCD_REV_14 0x14 #define DP_MAX_LINK_RATE 0x001 @@ -127,9 +74,7 @@ struct drm_panel; #define DP_MAX_DOWNSPREAD 0x003 # define DP_MAX_DOWNSPREAD_0_5 (1 << 0) -# define DP_STREAM_REGENERATION_STATUS_CAP (1 << 1) /* 2.0 */ # define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6) -# define DP_TPS4_SUPPORTED (1 << 7) #define DP_NORP 0x004 @@ -144,8 +89,6 @@ struct drm_panel; # define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ #define DP_MAIN_LINK_CHANNEL_CODING 0x006 -# define DP_CAP_ANSI_8B10B (1 << 0) -# define DP_CAP_ANSI_128B132B (1 << 1) /* 2.0 */ #define DP_DOWN_STREAM_PORT_COUNT 0x007 # define DP_PORT_COUNT_MASK 0x0f @@ -174,9 +117,7 @@ struct drm_panel; # define DP_FRAMING_CHANGE_CAP (1 << 1) # define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */ -#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ -# define DP_TRAINING_AUX_RD_MASK 0x7F /* DP 1.3 */ -# define DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT (1 << 7) /* DP 1.3 */ +#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ #define DP_ADAPTER_CAP 0x00f /* 1.2 */ # define DP_FORCE_LOAD_SENSE_CAP (1 << 0) @@ -189,14 +130,8 @@ struct drm_panel; #define DP_FAUX_CAP 0x020 /* 1.2 */ # define DP_FAUX_CAP_1 (1 << 0) -#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020 /* 2.0 */ -# define DP_FALLBACK_1024x768_60HZ_24BPP (1 << 0) -# define DP_FALLBACK_1280x720_60HZ_24BPP (1 << 1) -# define DP_FALLBACK_1920x1080_60HZ_24BPP (1 << 2) - #define DP_MSTM_CAP 0x021 /* 1.2 */ # define DP_MST_CAP (1 << 0) -# define DP_SINGLE_STREAM_SIDEBAND_MSG (1 << 1) /* 2.0 */ #define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */ @@ -244,123 +179,9 @@ struct drm_panel; #define DP_GUID 0x030 /* 1.2 */ -#define DP_DSC_SUPPORT 0x060 /* DP 1.4 */ -# define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0) - -#define DP_DSC_REV 0x061 -# define DP_DSC_MAJOR_MASK (0xf << 0) -# define DP_DSC_MINOR_MASK (0xf << 4) -# define DP_DSC_MAJOR_SHIFT 0 -# define DP_DSC_MINOR_SHIFT 4 - -#define DP_DSC_RC_BUF_BLK_SIZE 0x062 -# define DP_DSC_RC_BUF_BLK_SIZE_1 0x0 -# define DP_DSC_RC_BUF_BLK_SIZE_4 0x1 -# define DP_DSC_RC_BUF_BLK_SIZE_16 0x2 -# define DP_DSC_RC_BUF_BLK_SIZE_64 0x3 - -#define DP_DSC_RC_BUF_SIZE 0x063 - -#define DP_DSC_SLICE_CAP_1 0x064 -# define DP_DSC_1_PER_DP_DSC_SINK (1 << 0) -# define DP_DSC_2_PER_DP_DSC_SINK (1 << 1) -# define DP_DSC_4_PER_DP_DSC_SINK (1 << 3) -# define DP_DSC_6_PER_DP_DSC_SINK (1 << 4) -# define DP_DSC_8_PER_DP_DSC_SINK (1 << 5) -# define DP_DSC_10_PER_DP_DSC_SINK (1 << 6) -# define DP_DSC_12_PER_DP_DSC_SINK (1 << 7) - -#define DP_DSC_LINE_BUF_BIT_DEPTH 0x065 -# define DP_DSC_LINE_BUF_BIT_DEPTH_MASK (0xf << 0) -# define DP_DSC_LINE_BUF_BIT_DEPTH_9 0x0 -# define DP_DSC_LINE_BUF_BIT_DEPTH_10 0x1 -# define DP_DSC_LINE_BUF_BIT_DEPTH_11 0x2 -# define DP_DSC_LINE_BUF_BIT_DEPTH_12 0x3 -# define DP_DSC_LINE_BUF_BIT_DEPTH_13 0x4 -# define DP_DSC_LINE_BUF_BIT_DEPTH_14 0x5 -# define DP_DSC_LINE_BUF_BIT_DEPTH_15 0x6 -# define DP_DSC_LINE_BUF_BIT_DEPTH_16 0x7 -# define DP_DSC_LINE_BUF_BIT_DEPTH_8 0x8 - -#define DP_DSC_BLK_PREDICTION_SUPPORT 0x066 -# define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0) - -#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */ - -#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */ -# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0) -# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8 - -#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069 -# define DP_DSC_RGB (1 << 0) -# define DP_DSC_YCbCr444 (1 << 1) -# define DP_DSC_YCbCr422_Simple (1 << 2) -# define DP_DSC_YCbCr422_Native (1 << 3) -# define DP_DSC_YCbCr420_Native (1 << 4) - -#define DP_DSC_DEC_COLOR_DEPTH_CAP 0x06A -# define DP_DSC_8_BPC (1 << 1) -# define DP_DSC_10_BPC (1 << 2) -# define DP_DSC_12_BPC (1 << 3) - -#define DP_DSC_PEAK_THROUGHPUT 0x06B -# define DP_DSC_THROUGHPUT_MODE_0_MASK (0xf << 0) -# define DP_DSC_THROUGHPUT_MODE_0_SHIFT 0 -# define DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED 0 -# define DP_DSC_THROUGHPUT_MODE_0_340 (1 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_400 (2 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_450 (3 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_500 (4 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_550 (5 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_600 (6 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_650 (7 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_700 (8 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_750 (9 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_800 (10 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_850 (11 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_900 (12 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_950 (13 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_1000 (14 << 0) -# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 0) /* 1.4a */ -# define DP_DSC_THROUGHPUT_MODE_1_MASK (0xf << 4) -# define DP_DSC_THROUGHPUT_MODE_1_SHIFT 4 -# define DP_DSC_THROUGHPUT_MODE_1_UNSUPPORTED 0 -# define DP_DSC_THROUGHPUT_MODE_1_340 (1 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_400 (2 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_450 (3 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_500 (4 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_550 (5 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_600 (6 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_650 (7 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_700 (8 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_750 (9 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_800 (10 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_850 (11 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_900 (12 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_950 (13 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4) -# define DP_DSC_THROUGHPUT_MODE_1_170 (15 << 4) - -#define DP_DSC_MAX_SLICE_WIDTH 0x06C -#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560 -#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320 - -#define DP_DSC_SLICE_CAP_2 0x06D -# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0) -# define DP_DSC_20_PER_DP_DSC_SINK (1 << 1) -# define DP_DSC_24_PER_DP_DSC_SINK (1 << 2) - -#define DP_DSC_BITS_PER_PIXEL_INC 0x06F -# define DP_DSC_BITS_PER_PIXEL_1_16 0x0 -# define DP_DSC_BITS_PER_PIXEL_1_8 0x1 -# define DP_DSC_BITS_PER_PIXEL_1_4 0x2 -# define DP_DSC_BITS_PER_PIXEL_1_2 0x3 -# define DP_DSC_BITS_PER_PIXEL_1 0x4 - #define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ # define DP_PSR_IS_SUPPORTED 1 # define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */ -# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */ #define DP_PSR_CAPS 0x071 /* XXX 1.2? */ # define DP_PSR_NO_TRAIN_ON_EXIT 1 @@ -373,11 +194,6 @@ struct drm_panel; # define DP_PSR_SETUP_TIME_0 (6 << 1) # define DP_PSR_SETUP_TIME_MASK (7 << 1) # define DP_PSR_SETUP_TIME_SHIFT 1 -# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */ -# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */ - -#define DP_PSR2_SU_X_GRANULARITY 0x072 /* eDP 1.4b */ -#define DP_PSR2_SU_Y_GRANULARITY 0x074 /* eDP 1.4b */ /* * 0x80-0x8f describe downstream port capabilities, but there are two layouts @@ -398,155 +214,20 @@ struct drm_panel; # define DP_DS_PORT_TYPE_DP_DUALMODE 5 # define DP_DS_PORT_TYPE_WIRELESS 6 # define DP_DS_PORT_HPD (1 << 3) -# define DP_DS_NON_EDID_MASK (0xf << 4) -# define DP_DS_NON_EDID_720x480i_60 (1 << 4) -# define DP_DS_NON_EDID_720x480i_50 (2 << 4) -# define DP_DS_NON_EDID_1920x1080i_60 (3 << 4) -# define DP_DS_NON_EDID_1920x1080i_50 (4 << 4) -# define DP_DS_NON_EDID_1280x720_60 (5 << 4) -# define DP_DS_NON_EDID_1280x720_50 (7 << 4) /* offset 1 for VGA is maximum megapixels per second / 8 */ -/* offset 1 for DVI/HDMI is maximum TMDS clock in Mbps / 2.5 */ -/* offset 2 for VGA/DVI/HDMI */ +/* offset 2 */ # define DP_DS_MAX_BPC_MASK (3 << 0) # define DP_DS_8BPC 0 # define DP_DS_10BPC 1 # define DP_DS_12BPC 2 # define DP_DS_16BPC 3 -/* HDMI2.1 PCON FRL CONFIGURATION */ -# define DP_PCON_MAX_FRL_BW (7 << 2) -# define DP_PCON_MAX_0GBPS (0 << 2) -# define DP_PCON_MAX_9GBPS (1 << 2) -# define DP_PCON_MAX_18GBPS (2 << 2) -# define DP_PCON_MAX_24GBPS (3 << 2) -# define DP_PCON_MAX_32GBPS (4 << 2) -# define DP_PCON_MAX_40GBPS (5 << 2) -# define DP_PCON_MAX_48GBPS (6 << 2) -# define DP_PCON_SOURCE_CTL_MODE (1 << 5) -/* offset 3 for DVI */ -# define DP_DS_DVI_DUAL_LINK (1 << 1) -# define DP_DS_DVI_HIGH_COLOR_DEPTH (1 << 2) -/* offset 3 for HDMI */ -# define DP_DS_HDMI_FRAME_SEQ_TO_FRAME_PACK (1 << 0) -# define DP_DS_HDMI_YCBCR422_PASS_THROUGH (1 << 1) -# define DP_DS_HDMI_YCBCR420_PASS_THROUGH (1 << 2) -# define DP_DS_HDMI_YCBCR444_TO_422_CONV (1 << 3) -# define DP_DS_HDMI_YCBCR444_TO_420_CONV (1 << 4) - -/* - * VESA DP-to-HDMI PCON Specification adds caps for colorspace - * conversion in DFP cap DPCD 83h. Sec6.1 Table-3. - * Based on the available support the source can enable - * color conversion by writing into PROTOCOL_COVERTER_CONTROL_2 - * DPCD 3052h. - */ -# define DP_DS_HDMI_BT601_RGB_YCBCR_CONV (1 << 5) -# define DP_DS_HDMI_BT709_RGB_YCBCR_CONV (1 << 6) -# define DP_DS_HDMI_BT2020_RGB_YCBCR_CONV (1 << 7) - -#define DP_MAX_DOWNSTREAM_PORTS 0x10 - -/* DP Forward error Correction Registers */ -#define DP_FEC_CAPABILITY 0x090 /* 1.4 */ -# define DP_FEC_CAPABLE (1 << 0) -# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1) -# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2) -# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3) - -/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */ -#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xC /* 0x9E - 0x92 */ -#define DP_PCON_DSC_ENCODER 0x092 -# define DP_PCON_DSC_ENCODER_SUPPORTED (1 << 0) -# define DP_PCON_DSC_PPS_ENC_OVERRIDE (1 << 1) - -/* DP-HDMI2.1 PCON DSC Version */ -#define DP_PCON_DSC_VERSION 0x093 -# define DP_PCON_DSC_MAJOR_MASK (0xF << 0) -# define DP_PCON_DSC_MINOR_MASK (0xF << 4) -# define DP_PCON_DSC_MAJOR_SHIFT 0 -# define DP_PCON_DSC_MINOR_SHIFT 4 - -/* DP-HDMI2.1 PCON DSC RC Buffer block size */ -#define DP_PCON_DSC_RC_BUF_BLK_INFO 0x094 -# define DP_PCON_DSC_RC_BUF_BLK_SIZE (0x3 << 0) -# define DP_PCON_DSC_RC_BUF_BLK_1KB 0 -# define DP_PCON_DSC_RC_BUF_BLK_4KB 1 -# define DP_PCON_DSC_RC_BUF_BLK_16KB 2 -# define DP_PCON_DSC_RC_BUF_BLK_64KB 3 - -/* DP-HDMI2.1 PCON DSC RC Buffer size */ -#define DP_PCON_DSC_RC_BUF_SIZE 0x095 - -/* DP-HDMI2.1 PCON DSC Slice capabilities-1 */ -#define DP_PCON_DSC_SLICE_CAP_1 0x096 -# define DP_PCON_DSC_1_PER_DSC_ENC (0x1 << 0) -# define DP_PCON_DSC_2_PER_DSC_ENC (0x1 << 1) -# define DP_PCON_DSC_4_PER_DSC_ENC (0x1 << 3) -# define DP_PCON_DSC_6_PER_DSC_ENC (0x1 << 4) -# define DP_PCON_DSC_8_PER_DSC_ENC (0x1 << 5) -# define DP_PCON_DSC_10_PER_DSC_ENC (0x1 << 6) -# define DP_PCON_DSC_12_PER_DSC_ENC (0x1 << 7) - -#define DP_PCON_DSC_BUF_BIT_DEPTH 0x097 -# define DP_PCON_DSC_BIT_DEPTH_MASK (0xF << 0) -# define DP_PCON_DSC_DEPTH_9_BITS 0 -# define DP_PCON_DSC_DEPTH_10_BITS 1 -# define DP_PCON_DSC_DEPTH_11_BITS 2 -# define DP_PCON_DSC_DEPTH_12_BITS 3 -# define DP_PCON_DSC_DEPTH_13_BITS 4 -# define DP_PCON_DSC_DEPTH_14_BITS 5 -# define DP_PCON_DSC_DEPTH_15_BITS 6 -# define DP_PCON_DSC_DEPTH_16_BITS 7 -# define DP_PCON_DSC_DEPTH_8_BITS 8 - -#define DP_PCON_DSC_BLOCK_PREDICTION 0x098 -# define DP_PCON_DSC_BLOCK_PRED_SUPPORT (0x1 << 0) - -#define DP_PCON_DSC_ENC_COLOR_FMT_CAP 0x099 -# define DP_PCON_DSC_ENC_RGB (0x1 << 0) -# define DP_PCON_DSC_ENC_YUV444 (0x1 << 1) -# define DP_PCON_DSC_ENC_YUV422_S (0x1 << 2) -# define DP_PCON_DSC_ENC_YUV422_N (0x1 << 3) -# define DP_PCON_DSC_ENC_YUV420_N (0x1 << 4) - -#define DP_PCON_DSC_ENC_COLOR_DEPTH_CAP 0x09A -# define DP_PCON_DSC_ENC_8BPC (0x1 << 1) -# define DP_PCON_DSC_ENC_10BPC (0x1 << 2) -# define DP_PCON_DSC_ENC_12BPC (0x1 << 3) - -#define DP_PCON_DSC_MAX_SLICE_WIDTH 0x09B - -/* DP-HDMI2.1 PCON DSC Slice capabilities-2 */ -#define DP_PCON_DSC_SLICE_CAP_2 0x09C -# define DP_PCON_DSC_16_PER_DSC_ENC (0x1 << 0) -# define DP_PCON_DSC_20_PER_DSC_ENC (0x1 << 1) -# define DP_PCON_DSC_24_PER_DSC_ENC (0x1 << 2) - -/* DP-HDMI2.1 PCON HDMI TX Encoder Bits/pixel increment */ -#define DP_PCON_DSC_BPP_INCR 0x09E -# define DP_PCON_DSC_BPP_INCR_MASK (0x7 << 0) -# define DP_PCON_DSC_ONE_16TH_BPP 0 -# define DP_PCON_DSC_ONE_8TH_BPP 1 -# define DP_PCON_DSC_ONE_4TH_BPP 2 -# define DP_PCON_DSC_ONE_HALF_BPP 3 -# define DP_PCON_DSC_ONE_BPP 4 - -/* DP Extended DSC Capabilities */ -#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */ -#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1 -#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2 - -/* Link Configuration */ +/* link configuration */ #define DP_LINK_BW_SET 0x100 # define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */ # define DP_LINK_BW_1_62 0x06 # define DP_LINK_BW_2_7 0x0a # define DP_LINK_BW_5_4 0x14 /* 1.2 */ -# define DP_LINK_BW_8_1 0x1e /* 1.4 */ -# define DP_LINK_BW_10 0x01 /* 2.0 128b/132b Link Layer */ -# define DP_LINK_BW_13_5 0x04 /* 2.0 128b/132b Link Layer */ -# define DP_LINK_BW_20 0x02 /* 2.0 128b/132b Link Layer */ #define DP_LANE_COUNT_SET 0x101 # define DP_LANE_COUNT_MASK 0x0f @@ -557,9 +238,7 @@ struct drm_panel; # define DP_TRAINING_PATTERN_1 1 # define DP_TRAINING_PATTERN_2 2 # define DP_TRAINING_PATTERN_3 3 /* 1.2 */ -# define DP_TRAINING_PATTERN_4 7 /* 1.4 */ # define DP_TRAINING_PATTERN_MASK 0x3 -# define DP_TRAINING_PATTERN_MASK_1_4 0xf /* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */ # define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2) @@ -598,15 +277,12 @@ struct drm_panel; # define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 # define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) -# define DP_TX_FFE_PRESET_VALUE_MASK (0xf << 0) /* 2.0 128b/132b Link Layer */ - #define DP_DOWNSPREAD_CTRL 0x107 # define DP_SPREAD_AMP_0_5 (1 << 4) # define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */ #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 # define DP_SET_ANSI_8B10B (1 << 0) -# define DP_SET_ANSI_128B132B (1 << 1) #define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */ /* bitmask as for DP_I2C_SPEED_CAP */ @@ -625,19 +301,8 @@ struct drm_panel; # define DP_LINK_QUAL_PATTERN_ERROR_RATE 2 # define DP_LINK_QUAL_PATTERN_PRBS7 3 # define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4 -# define DP_LINK_QUAL_PATTERN_CP2520_PAT_1 5 -# define DP_LINK_QUAL_PATTERN_CP2520_PAT_2 6 -# define DP_LINK_QUAL_PATTERN_CP2520_PAT_3 7 -/* DP 2.0 UHBR10, UHBR13.5, UHBR20 */ -# define DP_LINK_QUAL_PATTERN_128B132B_TPS1 0x08 -# define DP_LINK_QUAL_PATTERN_128B132B_TPS2 0x10 -# define DP_LINK_QUAL_PATTERN_PRSBS9 0x18 -# define DP_LINK_QUAL_PATTERN_PRSBS11 0x20 -# define DP_LINK_QUAL_PATTERN_PRSBS15 0x28 -# define DP_LINK_QUAL_PATTERN_PRSBS23 0x30 -# define DP_LINK_QUAL_PATTERN_PRSBS31 0x38 -# define DP_LINK_QUAL_PATTERN_CUSTOM 0x40 -# define DP_LINK_QUAL_PATTERN_SQUARE 0x48 +# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5 +# define DP_LINK_QUAL_PATTERN_MASK 7 #define DP_TRAINING_LANE0_1_SET2 0x10f #define DP_TRAINING_LANE2_3_SET2 0x110 @@ -670,33 +335,16 @@ struct drm_panel; #define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */ # define DP_PWR_NOT_NEEDED (1 << 0) -#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */ -# define DP_FEC_READY (1 << 0) -# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1) -# define DP_FEC_ERR_COUNT_DIS (0 << 1) -# define DP_FEC_UNCORR_BLK_ERROR_COUNT (1 << 1) -# define DP_FEC_CORR_BLK_ERROR_COUNT (2 << 1) -# define DP_FEC_BIT_ERROR_COUNT (3 << 1) -# define DP_FEC_LANE_SELECT_MASK (3 << 4) -# define DP_FEC_LANE_0_SELECT (0 << 4) -# define DP_FEC_LANE_1_SELECT (1 << 4) -# define DP_FEC_LANE_2_SELECT (2 << 4) -# define DP_FEC_LANE_3_SELECT (3 << 4) - #define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */ # define DP_AUX_FRAME_SYNC_VALID (1 << 0) -#define DP_DSC_ENABLE 0x160 /* DP 1.4 */ -# define DP_DECOMPRESSION_EN (1 << 0) - -#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ -# define DP_PSR_ENABLE BIT(0) -# define DP_PSR_MAIN_LINK_ACTIVE BIT(1) -# define DP_PSR_CRC_VERIFICATION BIT(2) -# define DP_PSR_FRAME_CAPTURE BIT(3) -# define DP_PSR_SU_REGION_SCANLINE_CAPTURE BIT(4) /* eDP 1.4a */ -# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS BIT(5) /* eDP 1.4a */ -# define DP_PSR_ENABLE_PSR2 BIT(6) /* eDP 1.4a */ +#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ +# define DP_PSR_ENABLE (1 << 0) +# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) +# define DP_PSR_CRC_VERIFICATION (1 << 2) +# define DP_PSR_FRAME_CAPTURE (1 << 3) +# define DP_PSR_SELECTIVE_UPDATE (1 << 4) +# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) #define DP_ADAPTER_CTRL 0x1a0 # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) @@ -708,7 +356,6 @@ struct drm_panel; #define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1 #define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2 -/* Link/Sink Device Status */ #define DP_SINK_COUNT 0x200 /* prior to 1.2 bit 7 was reserved mbz */ # define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) @@ -740,9 +387,9 @@ struct drm_panel; #define DP_LINK_STATUS_UPDATED (1 << 7) #define DP_SINK_STATUS 0x205 -# define DP_RECEIVE_PORT_0_STATUS (1 << 0) -# define DP_RECEIVE_PORT_1_STATUS (1 << 1) -# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */ + +#define DP_RECEIVE_PORT_0_STATUS (1 << 0) +#define DP_RECEIVE_PORT_1_STATUS (1 << 1) #define DP_ADJUST_REQUEST_LANE0_1 0x206 #define DP_ADJUST_REQUEST_LANE2_3 0x207 @@ -755,30 +402,12 @@ struct drm_panel; # define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 # define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 -/* DP 2.0 128b/132b Link Layer */ -# define DP_ADJUST_TX_FFE_PRESET_LANE0_MASK (0xf << 0) -# define DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT 0 -# define DP_ADJUST_TX_FFE_PRESET_LANE1_MASK (0xf << 4) -# define DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT 4 - -#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c -# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03 -# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0 -# define DP_ADJUST_POST_CURSOR2_LANE1_MASK 0x0c -# define DP_ADJUST_POST_CURSOR2_LANE1_SHIFT 2 -# define DP_ADJUST_POST_CURSOR2_LANE2_MASK 0x30 -# define DP_ADJUST_POST_CURSOR2_LANE2_SHIFT 4 -# define DP_ADJUST_POST_CURSOR2_LANE3_MASK 0xc0 -# define DP_ADJUST_POST_CURSOR2_LANE3_SHIFT 6 - #define DP_TEST_REQUEST 0x218 # define DP_TEST_LINK_TRAINING (1 << 0) # define DP_TEST_LINK_VIDEO_PATTERN (1 << 1) # define DP_TEST_LINK_EDID_READ (1 << 2) # define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ # define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */ -# define DP_TEST_LINK_AUDIO_PATTERN (1 << 5) /* DPCD >= 1.2 */ -# define DP_TEST_LINK_AUDIO_DISABLED_VIDEO (1 << 6) /* DPCD >= 1.2 */ #define DP_TEST_LINK_RATE 0x219 # define DP_LINK_RATE_162 (0x6) @@ -787,66 +416,6 @@ struct drm_panel; #define DP_TEST_LANE_COUNT 0x220 #define DP_TEST_PATTERN 0x221 -# define DP_NO_TEST_PATTERN 0x0 -# define DP_COLOR_RAMP 0x1 -# define DP_BLACK_AND_WHITE_VERTICAL_LINES 0x2 -# define DP_COLOR_SQUARE 0x3 - -#define DP_TEST_H_TOTAL_HI 0x222 -#define DP_TEST_H_TOTAL_LO 0x223 - -#define DP_TEST_V_TOTAL_HI 0x224 -#define DP_TEST_V_TOTAL_LO 0x225 - -#define DP_TEST_H_START_HI 0x226 -#define DP_TEST_H_START_LO 0x227 - -#define DP_TEST_V_START_HI 0x228 -#define DP_TEST_V_START_LO 0x229 - -#define DP_TEST_HSYNC_HI 0x22A -# define DP_TEST_HSYNC_POLARITY (1 << 7) -# define DP_TEST_HSYNC_WIDTH_HI_MASK (127 << 0) -#define DP_TEST_HSYNC_WIDTH_LO 0x22B - -#define DP_TEST_VSYNC_HI 0x22C -# define DP_TEST_VSYNC_POLARITY (1 << 7) -# define DP_TEST_VSYNC_WIDTH_HI_MASK (127 << 0) -#define DP_TEST_VSYNC_WIDTH_LO 0x22D - -#define DP_TEST_H_WIDTH_HI 0x22E -#define DP_TEST_H_WIDTH_LO 0x22F - -#define DP_TEST_V_HEIGHT_HI 0x230 -#define DP_TEST_V_HEIGHT_LO 0x231 - -#define DP_TEST_MISC0 0x232 -# define DP_TEST_SYNC_CLOCK (1 << 0) -# define DP_TEST_COLOR_FORMAT_MASK (3 << 1) -# define DP_TEST_COLOR_FORMAT_SHIFT 1 -# define DP_COLOR_FORMAT_RGB (0 << 1) -# define DP_COLOR_FORMAT_YCbCr422 (1 << 1) -# define DP_COLOR_FORMAT_YCbCr444 (2 << 1) -# define DP_TEST_DYNAMIC_RANGE_VESA (0 << 3) -# define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3) -# define DP_TEST_YCBCR_COEFFICIENTS (1 << 4) -# define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4) -# define DP_YCBCR_COEFFICIENTS_ITU709 (1 << 4) -# define DP_TEST_BIT_DEPTH_MASK (7 << 5) -# define DP_TEST_BIT_DEPTH_SHIFT 5 -# define DP_TEST_BIT_DEPTH_6 (0 << 5) -# define DP_TEST_BIT_DEPTH_8 (1 << 5) -# define DP_TEST_BIT_DEPTH_10 (2 << 5) -# define DP_TEST_BIT_DEPTH_12 (3 << 5) -# define DP_TEST_BIT_DEPTH_16 (4 << 5) - -#define DP_TEST_MISC1 0x233 -# define DP_TEST_REFRESH_DENOMINATOR (1 << 0) -# define DP_TEST_INTERLACED (1 << 1) - -#define DP_TEST_REFRESH_RATE_NUMERATOR 0x234 - -#define DP_TEST_MISC0 0x232 #define DP_TEST_CRC_R_CR 0x240 #define DP_TEST_CRC_G_Y 0x242 @@ -856,27 +425,6 @@ struct drm_panel; # define DP_TEST_CRC_SUPPORTED (1 << 5) # define DP_TEST_COUNT_MASK 0xf -#define DP_PHY_TEST_PATTERN 0x248 -# define DP_PHY_TEST_PATTERN_SEL_MASK 0x7 -# define DP_PHY_TEST_PATTERN_NONE 0x0 -# define DP_PHY_TEST_PATTERN_D10_2 0x1 -# define DP_PHY_TEST_PATTERN_ERROR_COUNT 0x2 -# define DP_PHY_TEST_PATTERN_PRBS7 0x3 -# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM 0x4 -# define DP_PHY_TEST_PATTERN_CP2520 0x5 - -#define DP_TEST_HBR2_SCRAMBLER_RESET 0x24A -#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250 -#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251 -#define DP_TEST_80BIT_CUSTOM_PATTERN_23_16 0x252 -#define DP_TEST_80BIT_CUSTOM_PATTERN_31_24 0x253 -#define DP_TEST_80BIT_CUSTOM_PATTERN_39_32 0x254 -#define DP_TEST_80BIT_CUSTOM_PATTERN_47_40 0x255 -#define DP_TEST_80BIT_CUSTOM_PATTERN_55_48 0x256 -#define DP_TEST_80BIT_CUSTOM_PATTERN_63_56 0x257 -#define DP_TEST_80BIT_CUSTOM_PATTERN_71_64 0x258 -#define DP_TEST_80BIT_CUSTOM_PATTERN_79_72 0x259 - #define DP_TEST_RESPONSE 0x260 # define DP_TEST_ACK (1 << 0) # define DP_TEST_NAK (1 << 1) @@ -886,26 +434,6 @@ struct drm_panel; #define DP_TEST_SINK 0x270 # define DP_TEST_SINK_START (1 << 0) -#define DP_TEST_AUDIO_MODE 0x271 -#define DP_TEST_AUDIO_PATTERN_TYPE 0x272 -#define DP_TEST_AUDIO_PERIOD_CH1 0x273 -#define DP_TEST_AUDIO_PERIOD_CH2 0x274 -#define DP_TEST_AUDIO_PERIOD_CH3 0x275 -#define DP_TEST_AUDIO_PERIOD_CH4 0x276 -#define DP_TEST_AUDIO_PERIOD_CH5 0x277 -#define DP_TEST_AUDIO_PERIOD_CH6 0x278 -#define DP_TEST_AUDIO_PERIOD_CH7 0x279 -#define DP_TEST_AUDIO_PERIOD_CH8 0x27A - -#define DP_FEC_STATUS 0x280 /* 1.4 */ -# define DP_FEC_DECODE_EN_DETECTED (1 << 0) -# define DP_FEC_DECODE_DIS_DETECTED (1 << 1) - -#define DP_FEC_ERROR_COUNT_LSB 0x0281 /* 1.4 */ - -#define DP_FEC_ERROR_COUNT_MSB 0x0282 /* 1.4 */ -# define DP_FEC_ERROR_COUNT_MASK 0x7F -# define DP_FEC_ERR_COUNT_VALID (1 << 7) #define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */ # define DP_PAYLOAD_TABLE_UPDATED (1 << 0) @@ -914,34 +442,23 @@ struct drm_panel; #define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */ /* up to ID_SLOT_63 at 0x2ff */ -/* Source Device-specific */ #define DP_SOURCE_OUI 0x300 - -/* Sink Device-specific */ #define DP_SINK_OUI 0x400 - -/* Branch Device-specific */ #define DP_BRANCH_OUI 0x500 #define DP_BRANCH_ID 0x503 -#define DP_BRANCH_REVISION_START 0x509 #define DP_BRANCH_HW_REV 0x509 #define DP_BRANCH_SW_REV 0x50A -/* Link/Sink Device Power Control */ #define DP_SET_POWER 0x600 # define DP_SET_POWER_D0 0x1 # define DP_SET_POWER_D3 0x2 # define DP_SET_POWER_MASK 0x3 -# define DP_SET_POWER_D3_AUX_ON 0x5 -/* eDP-specific */ #define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */ # define DP_EDP_11 0x00 # define DP_EDP_12 0x01 # define DP_EDP_13 0x02 # define DP_EDP_14 0x03 -# define DP_EDP_14a 0x04 /* eDP 1.4a */ -# define DP_EDP_14b 0x05 /* eDP 1.4b */ #define DP_EDP_GENERAL_CAP_1 0x701 # define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0) @@ -997,12 +514,10 @@ struct drm_panel; #define DP_EDP_PWMGEN_BIT_COUNT 0x724 #define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725 #define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726 -# define DP_EDP_PWMGEN_BIT_COUNT_MASK (0x1f << 0) #define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727 #define DP_EDP_BACKLIGHT_FREQ_SET 0x728 -# define DP_EDP_BACKLIGHT_FREQ_BASE_KHZ 27000 #define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a #define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b @@ -1018,18 +533,11 @@ struct drm_panel; #define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */ #define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */ -#define DP_EDP_MSO_LINK_CAPABILITIES 0x7a4 /* eDP 1.4 */ -# define DP_EDP_MSO_NUMBER_OF_LINKS_MASK (7 << 0) -# define DP_EDP_MSO_NUMBER_OF_LINKS_SHIFT 0 -# define DP_EDP_MSO_INDEPENDENT_LINK_BIT (1 << 3) - -/* Sideband MSG Buffers */ #define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */ #define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */ #define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */ #define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */ -/* DPRX Event Status Indicator */ #define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */ /* 0-5 sink count */ # define DP_SINK_COUNT_CP_READY (1 << 6) @@ -1037,16 +545,8 @@ struct drm_panel; #define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */ #define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */ -# define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE (1 << 0) -# define DP_LOCK_ACQUISITION_REQUEST (1 << 1) -# define DP_CEC_IRQ (1 << 2) #define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */ -# define RX_CAP_CHANGED (1 << 0) -# define LINK_STATUS_CHANGED (1 << 1) -# define STREAM_STATUS_CHANGED (1 << 2) -# define HDMI_LINK_STATUS_CHANGED (1 << 3) -# define CONNECTED_OFF_ENTRY_REQUESTED (1 << 4) #define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ # define DP_PSR_LINK_CRC_ERROR (1 << 0) @@ -1065,367 +565,9 @@ struct drm_panel; # define DP_PSR_SINK_INTERNAL_ERROR 7 # define DP_PSR_SINK_STATE_MASK 0x07 -#define DP_SYNCHRONIZATION_LATENCY_IN_SINK 0x2009 /* edp 1.4 */ -# define DP_MAX_RESYNC_FRAME_COUNT_MASK (0xf << 0) -# define DP_MAX_RESYNC_FRAME_COUNT_SHIFT 0 -# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4) -# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4 - -#define DP_LAST_RECEIVED_PSR_SDP 0x200a /* eDP 1.2 */ -# define DP_PSR_STATE_BIT (1 << 0) /* eDP 1.2 */ -# define DP_UPDATE_RFB_BIT (1 << 1) /* eDP 1.2 */ -# define DP_CRC_VALID_BIT (1 << 2) /* eDP 1.2 */ -# define DP_SU_VALID (1 << 3) /* eDP 1.4 */ -# define DP_FIRST_SCAN_LINE_SU_REGION (1 << 4) /* eDP 1.4 */ -# define DP_LAST_SCAN_LINE_SU_REGION (1 << 5) /* eDP 1.4 */ -# define DP_Y_COORDINATE_VALID (1 << 6) /* eDP 1.4a */ - #define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */ # define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0) -#define DP_LANE0_1_STATUS_ESI 0x200c /* status same as 0x202 */ -#define DP_LANE2_3_STATUS_ESI 0x200d /* status same as 0x203 */ -#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */ -#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */ - -/* Extended Receiver Capability: See DP_DPCD_REV for definitions */ -#define DP_DP13_DPCD_REV 0x2200 - -#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */ -# define DP_GTC_CAP (1 << 0) /* DP 1.3 */ -# define DP_SST_SPLIT_SDP_CAP (1 << 1) /* DP 1.4 */ -# define DP_AV_SYNC_CAP (1 << 2) /* DP 1.3 */ -# define DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED (1 << 3) /* DP 1.3 */ -# define DP_VSC_EXT_VESA_SDP_SUPPORTED (1 << 4) /* DP 1.4 */ -# define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED (1 << 5) /* DP 1.4 */ -# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */ -# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */ - -#define DP_128B132B_SUPPORTED_LINK_RATES 0x2215 /* 2.0 */ -# define DP_UHBR10 (1 << 0) -# define DP_UHBR20 (1 << 1) -# define DP_UHBR13_5 (1 << 2) - -#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */ -# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f - -/* Protocol Converter Extension */ -/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */ -#define DP_CEC_TUNNELING_CAPABILITY 0x3000 -# define DP_CEC_TUNNELING_CAPABLE (1 << 0) -# define DP_CEC_SNOOPING_CAPABLE (1 << 1) -# define DP_CEC_MULTIPLE_LA_CAPABLE (1 << 2) - -#define DP_CEC_TUNNELING_CONTROL 0x3001 -# define DP_CEC_TUNNELING_ENABLE (1 << 0) -# define DP_CEC_SNOOPING_ENABLE (1 << 1) - -#define DP_CEC_RX_MESSAGE_INFO 0x3002 -# define DP_CEC_RX_MESSAGE_LEN_MASK (0xf << 0) -# define DP_CEC_RX_MESSAGE_LEN_SHIFT 0 -# define DP_CEC_RX_MESSAGE_HPD_STATE (1 << 4) -# define DP_CEC_RX_MESSAGE_HPD_LOST (1 << 5) -# define DP_CEC_RX_MESSAGE_ACKED (1 << 6) -# define DP_CEC_RX_MESSAGE_ENDED (1 << 7) - -#define DP_CEC_TX_MESSAGE_INFO 0x3003 -# define DP_CEC_TX_MESSAGE_LEN_MASK (0xf << 0) -# define DP_CEC_TX_MESSAGE_LEN_SHIFT 0 -# define DP_CEC_TX_RETRY_COUNT_MASK (0x7 << 4) -# define DP_CEC_TX_RETRY_COUNT_SHIFT 4 -# define DP_CEC_TX_MESSAGE_SEND (1 << 7) - -#define DP_CEC_TUNNELING_IRQ_FLAGS 0x3004 -# define DP_CEC_RX_MESSAGE_INFO_VALID (1 << 0) -# define DP_CEC_RX_MESSAGE_OVERFLOW (1 << 1) -# define DP_CEC_TX_MESSAGE_SENT (1 << 4) -# define DP_CEC_TX_LINE_ERROR (1 << 5) -# define DP_CEC_TX_ADDRESS_NACK_ERROR (1 << 6) -# define DP_CEC_TX_DATA_NACK_ERROR (1 << 7) - -#define DP_CEC_LOGICAL_ADDRESS_MASK 0x300E /* 0x300F word */ -# define DP_CEC_LOGICAL_ADDRESS_0 (1 << 0) -# define DP_CEC_LOGICAL_ADDRESS_1 (1 << 1) -# define DP_CEC_LOGICAL_ADDRESS_2 (1 << 2) -# define DP_CEC_LOGICAL_ADDRESS_3 (1 << 3) -# define DP_CEC_LOGICAL_ADDRESS_4 (1 << 4) -# define DP_CEC_LOGICAL_ADDRESS_5 (1 << 5) -# define DP_CEC_LOGICAL_ADDRESS_6 (1 << 6) -# define DP_CEC_LOGICAL_ADDRESS_7 (1 << 7) -#define DP_CEC_LOGICAL_ADDRESS_MASK_2 0x300F /* 0x300E word */ -# define DP_CEC_LOGICAL_ADDRESS_8 (1 << 0) -# define DP_CEC_LOGICAL_ADDRESS_9 (1 << 1) -# define DP_CEC_LOGICAL_ADDRESS_10 (1 << 2) -# define DP_CEC_LOGICAL_ADDRESS_11 (1 << 3) -# define DP_CEC_LOGICAL_ADDRESS_12 (1 << 4) -# define DP_CEC_LOGICAL_ADDRESS_13 (1 << 5) -# define DP_CEC_LOGICAL_ADDRESS_14 (1 << 6) -# define DP_CEC_LOGICAL_ADDRESS_15 (1 << 7) - -#define DP_CEC_RX_MESSAGE_BUFFER 0x3010 -#define DP_CEC_TX_MESSAGE_BUFFER 0x3020 -#define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10 - -/* PCON CONFIGURE-1 FRL FOR HDMI SINK */ -#define DP_PCON_HDMI_LINK_CONFIG_1 0x305A -# define DP_PCON_ENABLE_MAX_FRL_BW (7 << 0) -# define DP_PCON_ENABLE_MAX_BW_0GBPS 0 -# define DP_PCON_ENABLE_MAX_BW_9GBPS 1 -# define DP_PCON_ENABLE_MAX_BW_18GBPS 2 -# define DP_PCON_ENABLE_MAX_BW_24GBPS 3 -# define DP_PCON_ENABLE_MAX_BW_32GBPS 4 -# define DP_PCON_ENABLE_MAX_BW_40GBPS 5 -# define DP_PCON_ENABLE_MAX_BW_48GBPS 6 -# define DP_PCON_ENABLE_SOURCE_CTL_MODE (1 << 3) -# define DP_PCON_ENABLE_CONCURRENT_LINK (1 << 4) -# define DP_PCON_ENABLE_SEQUENTIAL_LINK (0 << 4) -# define DP_PCON_ENABLE_LINK_FRL_MODE (1 << 5) -# define DP_PCON_ENABLE_HPD_READY (1 << 6) -# define DP_PCON_ENABLE_HDMI_LINK (1 << 7) - -/* PCON CONFIGURE-2 FRL FOR HDMI SINK */ -#define DP_PCON_HDMI_LINK_CONFIG_2 0x305B -# define DP_PCON_MAX_LINK_BW_MASK (0x3F << 0) -# define DP_PCON_FRL_BW_MASK_9GBPS (1 << 0) -# define DP_PCON_FRL_BW_MASK_18GBPS (1 << 1) -# define DP_PCON_FRL_BW_MASK_24GBPS (1 << 2) -# define DP_PCON_FRL_BW_MASK_32GBPS (1 << 3) -# define DP_PCON_FRL_BW_MASK_40GBPS (1 << 4) -# define DP_PCON_FRL_BW_MASK_48GBPS (1 << 5) -# define DP_PCON_FRL_LINK_TRAIN_EXTENDED (1 << 6) -# define DP_PCON_FRL_LINK_TRAIN_NORMAL (0 << 6) - -/* PCON HDMI LINK STATUS */ -#define DP_PCON_HDMI_TX_LINK_STATUS 0x303B -# define DP_PCON_HDMI_TX_LINK_ACTIVE (1 << 0) -# define DP_PCON_FRL_READY (1 << 1) - -/* PCON HDMI POST FRL STATUS */ -#define DP_PCON_HDMI_POST_FRL_STATUS 0x3036 -# define DP_PCON_HDMI_LINK_MODE (1 << 0) -# define DP_PCON_HDMI_MODE_TMDS 0 -# define DP_PCON_HDMI_MODE_FRL 1 -# define DP_PCON_HDMI_FRL_TRAINED_BW (0x3F << 1) -# define DP_PCON_FRL_TRAINED_BW_9GBPS (1 << 1) -# define DP_PCON_FRL_TRAINED_BW_18GBPS (1 << 2) -# define DP_PCON_FRL_TRAINED_BW_24GBPS (1 << 3) -# define DP_PCON_FRL_TRAINED_BW_32GBPS (1 << 4) -# define DP_PCON_FRL_TRAINED_BW_40GBPS (1 << 5) -# define DP_PCON_FRL_TRAINED_BW_48GBPS (1 << 6) - -#define DP_PROTOCOL_CONVERTER_CONTROL_0 0x3050 /* DP 1.3 */ -# define DP_HDMI_DVI_OUTPUT_CONFIG (1 << 0) /* DP 1.3 */ -#define DP_PROTOCOL_CONVERTER_CONTROL_1 0x3051 /* DP 1.3 */ -# define DP_CONVERSION_TO_YCBCR420_ENABLE (1 << 0) /* DP 1.3 */ -# define DP_HDMI_EDID_PROCESSING_DISABLE (1 << 1) /* DP 1.4 */ -# define DP_HDMI_AUTONOMOUS_SCRAMBLING_DISABLE (1 << 2) /* DP 1.4 */ -# define DP_HDMI_FORCE_SCRAMBLING (1 << 3) /* DP 1.4 */ -#define DP_PROTOCOL_CONVERTER_CONTROL_2 0x3052 /* DP 1.3 */ -# define DP_CONVERSION_TO_YCBCR422_ENABLE (1 << 0) /* DP 1.3 */ -# define DP_PCON_ENABLE_DSC_ENCODER (1 << 1) -# define DP_PCON_ENCODER_PPS_OVERRIDE_MASK (0x3 << 2) -# define DP_PCON_ENC_PPS_OVERRIDE_DISABLED 0 -# define DP_PCON_ENC_PPS_OVERRIDE_EN_PARAMS 1 -# define DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER 2 -# define DP_CONVERSION_RGB_YCBCR_MASK (7 << 4) -# define DP_CONVERSION_BT601_RGB_YCBCR_ENABLE (1 << 4) -# define DP_CONVERSION_BT709_RGB_YCBCR_ENABLE (1 << 5) -# define DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE (1 << 6) - -/* PCON Downstream HDMI ERROR Status per Lane */ -#define DP_PCON_HDMI_ERROR_STATUS_LN0 0x3037 -#define DP_PCON_HDMI_ERROR_STATUS_LN1 0x3038 -#define DP_PCON_HDMI_ERROR_STATUS_LN2 0x3039 -#define DP_PCON_HDMI_ERROR_STATUS_LN3 0x303A -# define DP_PCON_HDMI_ERROR_COUNT_MASK (0x7 << 0) -# define DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS (1 << 0) -# define DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS (1 << 1) -# define DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS (1 << 2) - -/* PCON HDMI CONFIG PPS Override Buffer - * Valid Offsets to be added to Base : 0-127 - */ -#define DP_PCON_HDMI_PPS_OVERRIDE_BASE 0x3100 - -/* PCON HDMI CONFIG PPS Override Parameter: Slice height - * Offset-0 8LSBs of the Slice height. - * Offset-1 8MSBs of the Slice height. - */ -#define DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT 0x3180 - -/* PCON HDMI CONFIG PPS Override Parameter: Slice width - * Offset-0 8LSBs of the Slice width. - * Offset-1 8MSBs of the Slice width. - */ -#define DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH 0x3182 - -/* PCON HDMI CONFIG PPS Override Parameter: bits_per_pixel - * Offset-0 8LSBs of the bits_per_pixel. - * Offset-1 2MSBs of the bits_per_pixel. - */ -#define DP_PCON_HDMI_PPS_OVRD_BPP 0x3184 - -/* HDCP 1.3 and HDCP 2.2 */ -#define DP_AUX_HDCP_BKSV 0x68000 -#define DP_AUX_HDCP_RI_PRIME 0x68005 -#define DP_AUX_HDCP_AKSV 0x68007 -#define DP_AUX_HDCP_AN 0x6800C -#define DP_AUX_HDCP_V_PRIME(h) (0x68014 + h * 4) -#define DP_AUX_HDCP_BCAPS 0x68028 -# define DP_BCAPS_REPEATER_PRESENT BIT(1) -# define DP_BCAPS_HDCP_CAPABLE BIT(0) -#define DP_AUX_HDCP_BSTATUS 0x68029 -# define DP_BSTATUS_REAUTH_REQ BIT(3) -# define DP_BSTATUS_LINK_FAILURE BIT(2) -# define DP_BSTATUS_R0_PRIME_READY BIT(1) -# define DP_BSTATUS_READY BIT(0) -#define DP_AUX_HDCP_BINFO 0x6802A -#define DP_AUX_HDCP_KSV_FIFO 0x6802C -#define DP_AUX_HDCP_AINFO 0x6803B - -/* DP HDCP2.2 parameter offsets in DPCD address space */ -#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000 -#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008 -#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B -#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215 -#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D -#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220 -#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0 -#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0 -#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0 -#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0 -#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0 -#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8 -#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318 -#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328 -#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330 -#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332 -#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335 -#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345 -#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0 -#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0 -#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3 -#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5 -#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473 -#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493 -#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494 -#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518 - -/* LTTPR: Link Training (LT)-tunable PHY Repeaters */ -#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */ -#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */ -#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */ -#define DP_PHY_REPEATER_MODE 0xf0003 /* 1.3 */ -#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */ -#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */ -#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */ - -enum drm_dp_phy { - DP_PHY_DPRX, - - DP_PHY_LTTPR1, - DP_PHY_LTTPR2, - DP_PHY_LTTPR3, - DP_PHY_LTTPR4, - DP_PHY_LTTPR5, - DP_PHY_LTTPR6, - DP_PHY_LTTPR7, - DP_PHY_LTTPR8, - - DP_MAX_LTTPR_COUNT = DP_PHY_LTTPR8, -}; - -#define DP_PHY_LTTPR(i) (DP_PHY_LTTPR1 + (i)) - -#define __DP_LTTPR1_BASE 0xf0010 /* 1.3 */ -#define __DP_LTTPR2_BASE 0xf0060 /* 1.3 */ -#define DP_LTTPR_BASE(dp_phy) \ - (__DP_LTTPR1_BASE + (__DP_LTTPR2_BASE - __DP_LTTPR1_BASE) * \ - ((dp_phy) - DP_PHY_LTTPR1)) - -#define DP_LTTPR_REG(dp_phy, lttpr1_reg) \ - (DP_LTTPR_BASE(dp_phy) - DP_LTTPR_BASE(DP_PHY_LTTPR1) + (lttpr1_reg)) - -#define DP_TRAINING_PATTERN_SET_PHY_REPEATER1 0xf0010 /* 1.3 */ -#define DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy) \ - DP_LTTPR_REG(dp_phy, DP_TRAINING_PATTERN_SET_PHY_REPEATER1) - -#define DP_TRAINING_LANE0_SET_PHY_REPEATER1 0xf0011 /* 1.3 */ -#define DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy) \ - DP_LTTPR_REG(dp_phy, DP_TRAINING_LANE0_SET_PHY_REPEATER1) - -#define DP_TRAINING_LANE1_SET_PHY_REPEATER1 0xf0012 /* 1.3 */ -#define DP_TRAINING_LANE2_SET_PHY_REPEATER1 0xf0013 /* 1.3 */ -#define DP_TRAINING_LANE3_SET_PHY_REPEATER1 0xf0014 /* 1.3 */ -#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0020 /* 1.4a */ -#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \ - DP_LTTPR_REG(dp_phy, DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1) - -#define DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1 0xf0021 /* 1.4a */ -# define DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED BIT(0) -# define DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED BIT(1) - -#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */ -#define DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy) \ - DP_LTTPR_REG(dp_phy, DP_LANE0_1_STATUS_PHY_REPEATER1) - -#define DP_LANE2_3_STATUS_PHY_REPEATER1 0xf0031 /* 1.3 */ - -#define DP_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER1 0xf0032 /* 1.3 */ -#define DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 0xf0033 /* 1.3 */ -#define DP_ADJUST_REQUEST_LANE2_3_PHY_REPEATER1 0xf0034 /* 1.3 */ -#define DP_SYMBOL_ERROR_COUNT_LANE0_PHY_REPEATER1 0xf0035 /* 1.3 */ -#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */ -#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */ -#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */ - -#define __DP_FEC1_BASE 0xf0290 /* 1.4 */ -#define __DP_FEC2_BASE 0xf0298 /* 1.4 */ -#define DP_FEC_BASE(dp_phy) \ - (__DP_FEC1_BASE + ((__DP_FEC2_BASE - __DP_FEC1_BASE) * \ - ((dp_phy) - DP_PHY_LTTPR1))) - -#define DP_FEC_REG(dp_phy, fec1_reg) \ - (DP_FEC_BASE(dp_phy) - DP_FEC_BASE(DP_PHY_LTTPR1) + fec1_reg) - -#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */ -#define DP_FEC_STATUS_PHY_REPEATER(dp_phy) \ - DP_FEC_REG(dp_phy, DP_FEC_STATUS_PHY_REPEATER1) - -#define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */ -#define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */ - -#define DP_LTTPR_MAX_ADD 0xf02ff /* 1.4 */ - -#define DP_DPCD_MAX_ADD 0xfffff /* 1.4 */ - -/* Repeater modes */ -#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */ -#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */ - -/* DP HDCP message start offsets in DPCD address space */ -#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET -#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET -#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET -#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET -#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET -#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \ - DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET -#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET -#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET -#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET -#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET -#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET -#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET -#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET - -#define HDCP_2_2_DP_RXSTATUS_LEN 1 -#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0)) -#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1)) -#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2)) -#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) -#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4)) - /* DP 1.2 Sideband message defines */ /* peer device type - DP 1.2a Table 2-92 */ #define DP_PEER_DEVICE_NONE 0x0 @@ -1435,7 +577,6 @@ enum drm_dp_phy { #define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4 /* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */ -#define DP_GET_MSG_TRANSACTION_VERSION 0x00 /* DP 1.3 */ #define DP_LINK_ADDRESS 0x01 #define DP_CONNECTION_STATUS_NOTIFY 0x02 #define DP_ENUM_PATH_RESOURCES 0x10 @@ -1451,13 +592,6 @@ enum drm_dp_phy { #define DP_POWER_DOWN_PHY 0x25 #define DP_SINK_EVENT_NOTIFY 0x30 #define DP_QUERY_STREAM_ENC_STATUS 0x38 -#define DP_QUERY_STREAM_ENC_STATUS_STATE_NO_EXIST 0 -#define DP_QUERY_STREAM_ENC_STATUS_STATE_INACTIVE 1 -#define DP_QUERY_STREAM_ENC_STATUS_STATE_ACTIVE 2 - -/* DP 1.2 MST sideband reply types */ -#define DP_SIDEBAND_REPLY_ACK 0x00 -#define DP_SIDEBAND_REPLY_NAK 0x01 /* DP 1.2 MST sideband nak reasons - table 2.84 */ #define DP_NAK_WRITE_FAILURE 0x01 @@ -1480,7 +614,6 @@ enum drm_dp_phy { #define DP_MST_PHYSICAL_PORT_0 0 #define DP_MST_LOGICAL_PORT_0 8 -#define DP_LINK_CONSTANT_N_VALUE 0x8000 #define DP_LINK_STATUS_SIZE 6 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); @@ -1490,221 +623,45 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], int lane); u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], int lane); -u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE], - unsigned int lane); #define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_RECEIVER_CAP_SIZE 0xf -#define DP_DSC_RECEIVER_CAP_SIZE 0xf #define EDP_PSR_RECEIVER_CAP_SIZE 2 #define EDP_DISPLAY_CTL_CAP_SIZE 3 -#define DP_LTTPR_COMMON_CAP_SIZE 8 -#define DP_LTTPR_PHY_CAP_SIZE 3 -void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, - const u8 dpcd[DP_RECEIVER_CAP_SIZE]); -void drm_dp_lttpr_link_train_clock_recovery_delay(void); -void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, - const u8 dpcd[DP_RECEIVER_CAP_SIZE]); -void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux, - const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); u8 drm_dp_link_rate_to_bw_code(int link_rate); int drm_dp_bw_code_to_link_rate(u8 link_bw); -#define DP_SDP_AUDIO_TIMESTAMP 0x01 -#define DP_SDP_AUDIO_STREAM 0x02 -#define DP_SDP_EXTENSION 0x04 /* DP 1.1 */ -#define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */ -#define DP_SDP_ISRC 0x06 /* DP 1.2 */ -#define DP_SDP_VSC 0x07 /* DP 1.2 */ -#define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */ -#define DP_SDP_PPS 0x10 /* DP 1.4 */ -#define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */ -#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */ -/* 0x80+ CEA-861 infoframe types */ - -/** - * struct dp_sdp_header - DP secondary data packet header - * @HB0: Secondary Data Packet ID - * @HB1: Secondary Data Packet Type - * @HB2: Secondary Data Packet Specific header, Byte 0 - * @HB3: Secondary Data packet Specific header, Byte 1 - */ -struct dp_sdp_header { - u8 HB0; - u8 HB1; - u8 HB2; - u8 HB3; +struct edp_sdp_header { + u8 HB0; /* Secondary Data Packet ID */ + u8 HB1; /* Secondary Data Packet Type */ + u8 HB2; /* 7:5 reserved, 4:0 revision number */ + u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */ } __packed; #define EDP_SDP_HEADER_REVISION_MASK 0x1F #define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F -#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F -/** - * struct dp_sdp - DP secondary data packet - * @sdp_header: DP secondary data packet header - * @db: DP secondaray data packet data blocks - * VSC SDP Payload for PSR - * db[0]: Stereo Interface - * db[1]: 0 - PSR State; 1 - Update RFB; 2 - CRC Valid - * db[2]: CRC value bits 7:0 of the R or Cr component - * db[3]: CRC value bits 15:8 of the R or Cr component - * db[4]: CRC value bits 7:0 of the G or Y component - * db[5]: CRC value bits 15:8 of the G or Y component - * db[6]: CRC value bits 7:0 of the B or Cb component - * db[7]: CRC value bits 15:8 of the B or Cb component - * db[8] - db[31]: Reserved - * VSC SDP Payload for Pixel Encoding/Colorimetry Format - * db[0] - db[15]: Reserved - * db[16]: Pixel Encoding and Colorimetry Formats - * db[17]: Dynamic Range and Component Bit Depth - * db[18]: Content Type - * db[19] - db[31]: Reserved - */ -struct dp_sdp { - struct dp_sdp_header sdp_header; - u8 db[32]; +struct edp_vsc_psr { + struct edp_sdp_header sdp_header; + u8 DB0; /* Stereo Interface */ + u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */ + u8 DB2; /* CRC value bits 7:0 of the R or Cr component */ + u8 DB3; /* CRC value bits 15:8 of the R or Cr component */ + u8 DB4; /* CRC value bits 7:0 of the G or Y component */ + u8 DB5; /* CRC value bits 15:8 of the G or Y component */ + u8 DB6; /* CRC value bits 7:0 of the B or Cb component */ + u8 DB7; /* CRC value bits 15:8 of the B or Cb component */ + u8 DB8_31[24]; /* Reserved */ } __packed; #define EDP_VSC_PSR_STATE_ACTIVE (1<<0) #define EDP_VSC_PSR_UPDATE_RFB (1<<1) #define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2) -/** - * enum dp_pixelformat - drm DP Pixel encoding formats - * - * This enum is used to indicate DP VSC SDP Pixel encoding formats. - * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through - * DB18] - * - * @DP_PIXELFORMAT_RGB: RGB pixel encoding format - * @DP_PIXELFORMAT_YUV444: YCbCr 4:4:4 pixel encoding format - * @DP_PIXELFORMAT_YUV422: YCbCr 4:2:2 pixel encoding format - * @DP_PIXELFORMAT_YUV420: YCbCr 4:2:0 pixel encoding format - * @DP_PIXELFORMAT_Y_ONLY: Y Only pixel encoding format - * @DP_PIXELFORMAT_RAW: RAW pixel encoding format - * @DP_PIXELFORMAT_RESERVED: Reserved pixel encoding format - */ -enum dp_pixelformat { - DP_PIXELFORMAT_RGB = 0, - DP_PIXELFORMAT_YUV444 = 0x1, - DP_PIXELFORMAT_YUV422 = 0x2, - DP_PIXELFORMAT_YUV420 = 0x3, - DP_PIXELFORMAT_Y_ONLY = 0x4, - DP_PIXELFORMAT_RAW = 0x5, - DP_PIXELFORMAT_RESERVED = 0x6, -}; - -/** - * enum dp_colorimetry - drm DP Colorimetry formats - * - * This enum is used to indicate DP VSC SDP Colorimetry formats. - * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through - * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition. - * - * @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or - * ITU-R BT.601 colorimetry format - * @DP_COLORIMETRY_RGB_WIDE_FIXED: RGB wide gamut fixed point colorimetry format - * @DP_COLORIMETRY_BT709_YCC: ITU-R BT.709 colorimetry format - * @DP_COLORIMETRY_RGB_WIDE_FLOAT: RGB wide gamut floating point - * (scRGB (IEC 61966-2-2)) colorimetry format - * @DP_COLORIMETRY_XVYCC_601: xvYCC601 colorimetry format - * @DP_COLORIMETRY_OPRGB: OpRGB colorimetry format - * @DP_COLORIMETRY_XVYCC_709: xvYCC709 colorimetry format - * @DP_COLORIMETRY_DCI_P3_RGB: DCI-P3 (SMPTE RP 431-2) colorimetry format - * @DP_COLORIMETRY_SYCC_601: sYCC601 colorimetry format - * @DP_COLORIMETRY_RGB_CUSTOM: RGB Custom Color Profile colorimetry format - * @DP_COLORIMETRY_OPYCC_601: opYCC601 colorimetry format - * @DP_COLORIMETRY_BT2020_RGB: ITU-R BT.2020 R' G' B' colorimetry format - * @DP_COLORIMETRY_BT2020_CYCC: ITU-R BT.2020 Y'c C'bc C'rc colorimetry format - * @DP_COLORIMETRY_BT2020_YCC: ITU-R BT.2020 Y' C'b C'r colorimetry format - */ -enum dp_colorimetry { - DP_COLORIMETRY_DEFAULT = 0, - DP_COLORIMETRY_RGB_WIDE_FIXED = 0x1, - DP_COLORIMETRY_BT709_YCC = 0x1, - DP_COLORIMETRY_RGB_WIDE_FLOAT = 0x2, - DP_COLORIMETRY_XVYCC_601 = 0x2, - DP_COLORIMETRY_OPRGB = 0x3, - DP_COLORIMETRY_XVYCC_709 = 0x3, - DP_COLORIMETRY_DCI_P3_RGB = 0x4, - DP_COLORIMETRY_SYCC_601 = 0x4, - DP_COLORIMETRY_RGB_CUSTOM = 0x5, - DP_COLORIMETRY_OPYCC_601 = 0x5, - DP_COLORIMETRY_BT2020_RGB = 0x6, - DP_COLORIMETRY_BT2020_CYCC = 0x6, - DP_COLORIMETRY_BT2020_YCC = 0x7, -}; - -/** - * enum dp_dynamic_range - drm DP Dynamic Range - * - * This enum is used to indicate DP VSC SDP Dynamic Range. - * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through - * DB18] - * - * @DP_DYNAMIC_RANGE_VESA: VESA range - * @DP_DYNAMIC_RANGE_CTA: CTA range - */ -enum dp_dynamic_range { - DP_DYNAMIC_RANGE_VESA = 0, - DP_DYNAMIC_RANGE_CTA = 1, -}; - -/** - * enum dp_content_type - drm DP Content Type - * - * This enum is used to indicate DP VSC SDP Content Types. - * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through - * DB18] - * CTA-861-G defines content types and expected processing by a sink device - * - * @DP_CONTENT_TYPE_NOT_DEFINED: Not defined type - * @DP_CONTENT_TYPE_GRAPHICS: Graphics type - * @DP_CONTENT_TYPE_PHOTO: Photo type - * @DP_CONTENT_TYPE_VIDEO: Video type - * @DP_CONTENT_TYPE_GAME: Game type - */ -enum dp_content_type { - DP_CONTENT_TYPE_NOT_DEFINED = 0x00, - DP_CONTENT_TYPE_GRAPHICS = 0x01, - DP_CONTENT_TYPE_PHOTO = 0x02, - DP_CONTENT_TYPE_VIDEO = 0x03, - DP_CONTENT_TYPE_GAME = 0x04, -}; - -/** - * struct drm_dp_vsc_sdp - drm DP VSC SDP - * - * This structure represents a DP VSC SDP of drm - * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and - * [Table 2-117: VSC SDP Payload for DB16 through DB18] - * - * @sdp_type: secondary-data packet type - * @revision: revision number - * @length: number of valid data bytes - * @pixelformat: pixel encoding format - * @colorimetry: colorimetry format - * @bpc: bit per color - * @dynamic_range: dynamic range information - * @content_type: CTA-861-G defines content types and expected processing by a sink device - */ -struct drm_dp_vsc_sdp { - unsigned char sdp_type; - unsigned char revision; - unsigned char length; - enum dp_pixelformat pixelformat; - enum dp_colorimetry colorimetry; - int bpc; - enum dp_dynamic_range dynamic_range; - enum dp_content_type content_type; -}; - -void drm_dp_vsc_sdp_log(const char *level, struct device *dev, - const struct drm_dp_vsc_sdp *vsc); - int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]); static inline int @@ -1726,13 +683,6 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); } -static inline bool -drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_DPCD_REV] >= 0x11 && - (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); -} - static inline bool drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { @@ -1740,103 +690,6 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; } -static inline bool -drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_DPCD_REV] >= 0x14 && - dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED; -} - -static inline u8 -drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 : - DP_TRAINING_PATTERN_MASK; -} - -static inline bool -drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT; -} - -/* DP/eDP DSC support */ -u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], - bool is_edp); -u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); -int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE], - u8 dsc_bpc[3]); - -static inline bool -drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) -{ - return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] & - DP_DSC_DECOMPRESSION_IS_SUPPORTED; -} - -static inline u16 -drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) -{ - return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] | - (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] & - DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK << - DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT); -} - -static inline u32 -drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) -{ - /* Max Slicewidth = Number of Pixels * 320 */ - return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] * - DP_DSC_SLICE_WIDTH_MULTIPLIER; -} - -/* Forward Error Correction Support on DP 1.4 */ -static inline bool -drm_dp_sink_supports_fec(const u8 fec_capable) -{ - return fec_capable & DP_FEC_CAPABLE; -} - -static inline bool -drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B; -} - -static inline bool -drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_EDP_CONFIGURATION_CAP] & - DP_ALTERNATE_SCRAMBLER_RESET_CAP; -} - -/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */ -static inline bool -drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_DOWN_STREAM_PORT_COUNT] & - DP_MSA_TIMING_PAR_IGNORED; -} - -/** - * drm_edp_backlight_supported() - Check an eDP DPCD for VESA backlight support - * @edp_dpcd: The DPCD to check - * - * Note that currently this function will return %false for panels which support various DPCD - * backlight features but which require the brightness be set through PWM, and don't support setting - * the brightness level via the DPCD. This is a TODO. - * - * Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false - * otherwise - */ -static inline bool -drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) -{ - return (edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP) && - (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP); -} - /* * DisplayPort AUX channel */ @@ -1857,126 +710,50 @@ struct drm_dp_aux_msg { size_t size; }; -struct cec_adapter; -struct edid; -struct drm_connector; - -/** - * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX - * @lock: mutex protecting this struct - * @adap: the CEC adapter for CEC-Tunneling-over-AUX support. - * @connector: the connector this CEC adapter is associated with - * @unregister_work: unregister the CEC adapter - */ -struct drm_dp_aux_cec { - struct mutex lock; - struct cec_adapter *adap; - struct drm_connector *connector; - struct delayed_work unregister_work; -}; - /** * struct drm_dp_aux - DisplayPort AUX channel + * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter + * @ddc: I2C adapter that can be used for I2C-over-AUX communication + * @dev: pointer to struct device that is the parent for this AUX channel + * @hw_mutex: internal mutex used for locking transfers + * @transfer: transfers a message representing a single AUX transaction + * + * The .dev field should be set to a pointer to the device that implements + * the AUX channel. + * + * The .name field may be used to specify the name of the I2C adapter. If set to + * NULL, dev_name() of .dev will be used. + * + * Drivers provide a hardware-specific implementation of how transactions + * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg + * structure describing the transaction is passed into this function. Upon + * success, the implementation should return the number of payload bytes + * that were transferred, or a negative error-code on failure. Helpers + * propagate errors from the .transfer() function, with the exception of + * the -EBUSY error, which causes a transaction to be retried. On a short, + * helpers will return -EPROTO to make it simpler to check for failure. * * An AUX channel can also be used to transport I2C messages to a sink. A - * typical application of that is to access an EDID that's present in the sink - * device. The @transfer() function can also be used to execute such - * transactions. The drm_dp_aux_register() function registers an I2C adapter - * that can be passed to drm_probe_ddc(). Upon removal, drivers should call - * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long - * transfers by default; if a partial response is received, the adapter will - * drop down to the size given by the partial response for this transaction - * only. + * typical application of that is to access an EDID that's present in the + * sink device. The .transfer() function can also be used to execute such + * transactions. The drm_dp_aux_register() function registers an I2C + * adapter that can be passed to drm_probe_ddc(). Upon removal, drivers + * should call drm_dp_aux_unregister() to remove the I2C adapter. + * The I2C adapter uses long transfers by default; if a partial response is + * received, the adapter will drop down to the size given by the partial + * response for this transaction only. + * + * Note that the aux helper code assumes that the .transfer() function + * only modifies the reply field of the drm_dp_aux_msg structure. The + * retry logic and i2c helpers assume this is the case. */ struct drm_dp_aux { - /** - * @name: user-visible name of this AUX channel and the - * I2C-over-AUX adapter. - * - * It's also used to specify the name of the I2C adapter. If set - * to %NULL, dev_name() of @dev will be used. - */ const char *name; - - /** - * @ddc: I2C adapter that can be used for I2C-over-AUX - * communication - */ struct i2c_adapter ddc; - - /** - * @dev: pointer to struct device that is the parent for this - * AUX channel. - */ struct device *dev; - - /** - * @drm_dev: pointer to the &drm_device that owns this AUX channel. - * Beware, this may be %NULL before drm_dp_aux_register() has been - * called. - * - * It should be set to the &drm_device that will be using this AUX - * channel as early as possible. For many graphics drivers this should - * happen before drm_dp_aux_init(), however it's perfectly fine to set - * this field later so long as it's assigned before calling - * drm_dp_aux_register(). - */ - struct drm_device *drm_dev; - - /** - * @crtc: backpointer to the crtc that is currently using this - * AUX channel - */ - struct drm_crtc *crtc; - - /** - * @hw_mutex: internal mutex used for locking transfers. - * - * Note that if the underlying hardware is shared among multiple - * channels, the driver needs to do additional locking to - * prevent concurrent access. - */ struct mutex hw_mutex; - - /** - * @crc_work: worker that captures CRCs for each frame - */ - struct work_struct crc_work; - - /** - * @crc_count: counter of captured frame CRCs - */ - u8 crc_count; - - /** - * @transfer: transfers a message representing a single AUX - * transaction. - * - * This is a hardware-specific implementation of how - * transactions are executed that the drivers must provide. - * - * A pointer to a &drm_dp_aux_msg structure describing the - * transaction is passed into this function. Upon success, the - * implementation should return the number of payload bytes that - * were transferred, or a negative error-code on failure. - * - * Helpers will propagate these errors, with the exception of - * the %-EBUSY error, which causes a transaction to be retried. - * On a short, helpers will return %-EPROTO to make it simpler - * to check for failure. - * - * The @transfer() function must only modify the reply field of - * the &drm_dp_aux_msg structure. The retry logic and i2c - * helpers assume this is the case. - * - * Also note that this callback can be called no matter the - * state @dev is in. Drivers that need that device to be powered - * to perform this operation will first need to make sure it's - * been properly enabled. - */ ssize_t (*transfer)(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg); - /** * @i2c_nack_count: Counts I2C NACKs, used for DP validation. */ @@ -1985,14 +762,6 @@ struct drm_dp_aux { * @i2c_defer_count: Counts I2C DEFERs, used for DP validation. */ unsigned i2c_defer_count; - /** - * @cec: struct containing fields used for CEC-Tunneling-over-AUX. - */ - struct drm_dp_aux_cec cec; - /** - * @is_remote: Is this AUX CH actually using sideband messaging. - */ - bool is_remote; }; ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, @@ -2030,288 +799,35 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, return drm_dp_dpcd_write(aux, offset, &value, 1); } -int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, - u8 dpcd[DP_RECEIVER_CAP_SIZE]); - int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, u8 status[DP_LINK_STATUS_SIZE]); -int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, - enum drm_dp_phy dp_phy, - u8 link_status[DP_LINK_STATUS_SIZE]); +/* + * DisplayPort link + */ +#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) -bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, - u8 real_edid_checksum); +struct drm_dp_link { + unsigned char revision; + unsigned int rate; + unsigned int num_lanes; + unsigned long capabilities; +}; -int drm_dp_read_downstream_info(struct drm_dp_aux *aux, - const u8 dpcd[DP_RECEIVER_CAP_SIZE], - u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]); -bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], u8 type); -bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], - const struct edid *edid); -int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); -int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], - const struct edid *edid); -int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], - const struct edid *edid); +int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], - const struct edid *edid); -bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); -bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); -struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev, - const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); + const u8 port_cap[4]); int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); -void drm_dp_downstream_debug(struct seq_file *m, - const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], - const struct edid *edid, - struct drm_dp_aux *aux); -enum drm_mode_subconnector -drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); -void drm_dp_set_subconnector_property(struct drm_connector *connector, - enum drm_connector_status status, - const u8 *dpcd, - const u8 port_cap[4]); +void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], struct drm_dp_aux *aux); -struct drm_dp_desc; -bool drm_dp_read_sink_count_cap(struct drm_connector *connector, - const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const struct drm_dp_desc *desc); -int drm_dp_read_sink_count(struct drm_dp_aux *aux); - -int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux, - u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); -int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux, - enum drm_dp_phy dp_phy, - u8 caps[DP_LTTPR_PHY_CAP_SIZE]); -int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]); -int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); -int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); -bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); -bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); - -void drm_dp_remote_aux_init(struct drm_dp_aux *aux); void drm_dp_aux_init(struct drm_dp_aux *aux); int drm_dp_aux_register(struct drm_dp_aux *aux); void drm_dp_aux_unregister(struct drm_dp_aux *aux); -int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); -int drm_dp_stop_crc(struct drm_dp_aux *aux); - -struct drm_dp_dpcd_ident { - u8 oui[3]; - u8 device_id[6]; - u8 hw_rev; - u8 sw_major_rev; - u8 sw_minor_rev; -} __packed; - -/** - * struct drm_dp_desc - DP branch/sink device descriptor - * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). - * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. - */ -struct drm_dp_desc { - struct drm_dp_dpcd_ident ident; - u32 quirks; -}; - -int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, - bool is_branch); - -/** - * enum drm_dp_quirk - Display Port sink/branch device specific quirks - * - * Display Port sink and branch devices in the wild have a variety of bugs, try - * to collect them here. The quirks are shared, but it's up to the drivers to - * implement workarounds for them. - */ -enum drm_dp_quirk { - /** - * @DP_DPCD_QUIRK_CONSTANT_N: - * - * The device requires main link attributes Mvid and Nvid to be limited - * to 16 bits. So will give a constant value (0x8000) for compatability. - */ - DP_DPCD_QUIRK_CONSTANT_N, - /** - * @DP_DPCD_QUIRK_NO_PSR: - * - * The device does not support PSR even if reports that it supports or - * driver still need to implement proper handling for such device. - */ - DP_DPCD_QUIRK_NO_PSR, - /** - * @DP_DPCD_QUIRK_NO_SINK_COUNT: - * - * The device does not set SINK_COUNT to a non-zero value. - * The driver should ignore SINK_COUNT during detection. Note that - * drm_dp_read_sink_count_cap() automatically checks for this quirk. - */ - DP_DPCD_QUIRK_NO_SINK_COUNT, - /** - * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD: - * - * The device supports MST DSC despite not supporting Virtual DPCD. - * The DSC caps can be read from the physical aux instead. - */ - DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD, - /** - * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS: - * - * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite - * the DP_MAX_LINK_RATE register reporting a lower max multiplier. - */ - DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS, -}; - -/** - * drm_dp_has_quirk() - does the DP device have a specific quirk - * @desc: Device descriptor filled by drm_dp_read_desc() - * @quirk: Quirk to query for - * - * Return true if DP device identified by @desc has @quirk. - */ -static inline bool -drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) -{ - return desc->quirks & BIT(quirk); -} - -/** - * struct drm_edp_backlight_info - Probed eDP backlight info struct - * @pwmgen_bit_count: The pwmgen bit count - * @pwm_freq_pre_divider: The PWM frequency pre-divider value being used for this backlight, if any - * @max: The maximum backlight level that may be set - * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register? - * @aux_enable: Does the panel support the AUX enable cap? - * - * This structure contains various data about an eDP backlight, which can be populated by using - * drm_edp_backlight_init(). - */ -struct drm_edp_backlight_info { - u8 pwmgen_bit_count; - u8 pwm_freq_pre_divider; - u16 max; - - bool lsb_reg_used : 1; - bool aux_enable : 1; -}; - -int -drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, - u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE], - u16 *current_level, u8 *current_mode); -int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, - u16 level); -int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, - u16 level); -int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl); - -#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ - (IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) - -int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux); - -#else - -static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel, - struct drm_dp_aux *aux) -{ - return 0; -} - -#endif - -#ifdef CONFIG_DRM_DP_CEC -void drm_dp_cec_irq(struct drm_dp_aux *aux); -void drm_dp_cec_register_connector(struct drm_dp_aux *aux, - struct drm_connector *connector); -void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); -void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); -void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); -#else -static inline void drm_dp_cec_irq(struct drm_dp_aux *aux) -{ -} - -static inline void -drm_dp_cec_register_connector(struct drm_dp_aux *aux, - struct drm_connector *connector) -{ -} - -static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) -{ -} - -static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, - const struct edid *edid) -{ -} - -static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux) -{ -} - -#endif - -/** - * struct drm_dp_phy_test_params - DP Phy Compliance parameters - * @link_rate: Requested Link rate from DPCD 0x219 - * @num_lanes: Number of lanes requested by sing through DPCD 0x220 - * @phy_pattern: DP Phy test pattern from DPCD 0x248 - * @hbr2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B - * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259 - * @enhanced_frame_cap: flag for enhanced frame capability. - */ -struct drm_dp_phy_test_params { - int link_rate; - u8 num_lanes; - u8 phy_pattern; - u8 hbr2_reset[2]; - u8 custom80[10]; - bool enhanced_frame_cap; -}; - -int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux, - struct drm_dp_phy_test_params *data); -int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, - struct drm_dp_phy_test_params *data, u8 dp_rev); -int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); -int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd); -bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux); -int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps, - u8 frl_mode); -int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask, - u8 frl_type); -int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux); -int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux); - -bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux); -int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask); -void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux, - struct drm_connector *connector); -bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); -int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); -int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); -int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); -int drm_dp_pcon_pps_default(struct drm_dp_aux *aux); -int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]); -int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]); -bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], u8 color_spc); -int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc); - #endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index ddb9231d03..0032076705 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -24,27 +24,6 @@ #include #include -#include - -#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) -#include -#include - -enum drm_dp_mst_topology_ref_type { - DRM_DP_MST_TOPOLOGY_REF_GET, - DRM_DP_MST_TOPOLOGY_REF_PUT, -}; - -struct drm_dp_mst_topology_ref_history { - struct drm_dp_mst_topology_ref_entry { - enum drm_dp_mst_topology_ref_type type; - int count; - ktime_t ts_nsec; - depot_stack_handle_t backtrace; - } *entries; - int len; -}; -#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */ struct drm_dp_mst_branch; @@ -64,58 +43,30 @@ struct drm_dp_vcpi { /** * struct drm_dp_mst_port - MST port + * @kref: reference count for this port. * @port_num: port number - * @input: if this port is an input port. Protected by - * &drm_dp_mst_topology_mgr.base.lock. - * @mcs: message capability status - DP 1.2 spec. Protected by - * &drm_dp_mst_topology_mgr.base.lock. - * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by - * &drm_dp_mst_topology_mgr.base.lock. - * @pdt: Peer Device Type. Protected by - * &drm_dp_mst_topology_mgr.base.lock. - * @ldps: Legacy Device Plug Status. Protected by - * &drm_dp_mst_topology_mgr.base.lock. - * @dpcd_rev: DPCD revision of device on this port. Protected by - * &drm_dp_mst_topology_mgr.base.lock. - * @num_sdp_streams: Number of simultaneous streams. Protected by - * &drm_dp_mst_topology_mgr.base.lock. - * @num_sdp_stream_sinks: Number of stream sinks. Protected by - * &drm_dp_mst_topology_mgr.base.lock. - * @full_pbn: Max possible bandwidth for this port. Protected by - * &drm_dp_mst_topology_mgr.base.lock. + * @input: if this port is an input port. + * @mcs: message capability status - DP 1.2 spec. + * @ddps: DisplayPort Device Plug Status - DP 1.2 + * @pdt: Peer Device Type + * @ldps: Legacy Device Plug Status + * @dpcd_rev: DPCD revision of device on this port + * @num_sdp_streams: Number of simultaneous streams + * @num_sdp_stream_sinks: Number of stream sinks + * @available_pbn: Available bandwidth for this port. * @next: link to next port on this branch device - * @aux: i2c aux transport to talk to device connected to this port, protected - * by &drm_dp_mst_topology_mgr.base.lock. + * @mstb: branch device attach below this port + * @aux: i2c aux transport to talk to device connected to this port. * @parent: branch device parent of this port * @vcpi: Virtual Channel Payload info for this port. - * @connector: DRM connector this port is connected to. Protected by - * &drm_dp_mst_topology_mgr.base.lock. + * @connector: DRM connector this port is connected to. * @mgr: topology manager this port lives under. * * This structure represents an MST port endpoint on a device somewhere * in the MST topology. */ struct drm_dp_mst_port { - /** - * @topology_kref: refcount for this port's lifetime in the topology, - * only the DP MST helpers should need to touch this - */ - struct kref topology_kref; - - /** - * @malloc_kref: refcount for the memory allocation containing this - * structure. See drm_dp_mst_get_port_malloc() and - * drm_dp_mst_put_port_malloc(). - */ - struct kref malloc_kref; - -#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) - /** - * @topology_ref_history: A history of each topology - * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS. - */ - struct drm_dp_mst_topology_ref_history topology_ref_history; -#endif + struct kref kref; u8 port_num; bool input; @@ -126,19 +77,9 @@ struct drm_dp_mst_port { u8 dpcd_rev; u8 num_sdp_streams; u8 num_sdp_stream_sinks; - uint16_t full_pbn; + uint16_t available_pbn; struct list_head next; - /** - * @mstb: the branch device connected to this port, if there is one. - * This should be considered protected for reading by - * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this: - * &drm_dp_mst_topology_mgr.up_req_work and - * &drm_dp_mst_topology_mgr.work, which do not grab - * &drm_dp_mst_topology_mgr.lock during reads but are the only - * updaters of this list and are protected from writing concurrently - * by &drm_dp_mst_topology_mgr.probe_lock. - */ - struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */ struct drm_dp_aux aux; /* i2c bus for this port? */ struct drm_dp_mst_branch *parent; @@ -156,14 +97,51 @@ struct drm_dp_mst_port { * audio-capable. */ bool has_audio; - - /** - * @fec_capable: bool indicating if FEC can be supported up to that - * point in the MST topology. - */ - bool fec_capable; }; +/** + * struct drm_dp_mst_branch - MST branch device. + * @kref: reference count for this port. + * @rad: Relative Address to talk to this branch device. + * @lct: Link count total to talk to this branch device. + * @num_ports: number of ports on the branch. + * @msg_slots: one bit per transmitted msg slot. + * @ports: linked list of ports on this branch. + * @port_parent: pointer to the port parent, NULL if toplevel. + * @mgr: topology manager for this branch device. + * @tx_slots: transmission slots for this device. + * @last_seqno: last sequence number used to talk to this. + * @link_address_sent: if a link address message has been sent to this device yet. + * @guid: guid for DP 1.2 branch device. port under this branch can be + * identified by port #. + * + * This structure represents an MST branch device, there is one + * primary branch device at the root, along with any other branches connected + * to downstream port of parent branches. + */ +struct drm_dp_mst_branch { + struct kref kref; + u8 rad[8]; + u8 lct; + int num_ports; + + int msg_slots; + struct list_head ports; + + /* list of tx ops queue for this port */ + struct drm_dp_mst_port *port_parent; + struct drm_dp_mst_topology_mgr *mgr; + + /* slots are protected by mstb->mgr->qlock */ + struct drm_dp_sideband_msg_tx *tx_slots[2]; + int last_seqno; + bool link_address_sent; + + /* global unique identifier to identify branch devices */ + u8 guid[16]; +}; + + /* sideband msg header - not bit struct */ struct drm_dp_sideband_msg_hdr { u8 lct; @@ -177,87 +155,6 @@ struct drm_dp_sideband_msg_hdr { bool seqno; }; -struct drm_dp_sideband_msg_rx { - u8 chunk[48]; - u8 msg[256]; - u8 curchunk_len; - u8 curchunk_idx; /* chunk we are parsing now */ - u8 curchunk_hdrlen; - u8 curlen; /* total length of the msg */ - bool have_somt; - bool have_eomt; - struct drm_dp_sideband_msg_hdr initial_hdr; -}; - -/** - * struct drm_dp_mst_branch - MST branch device. - * @rad: Relative Address to talk to this branch device. - * @lct: Link count total to talk to this branch device. - * @num_ports: number of ports on the branch. - * @port_parent: pointer to the port parent, NULL if toplevel. - * @mgr: topology manager for this branch device. - * @link_address_sent: if a link address message has been sent to this device yet. - * @guid: guid for DP 1.2 branch device. port under this branch can be - * identified by port #. - * - * This structure represents an MST branch device, there is one - * primary branch device at the root, along with any other branches connected - * to downstream port of parent branches. - */ -struct drm_dp_mst_branch { - /** - * @topology_kref: refcount for this branch device's lifetime in the - * topology, only the DP MST helpers should need to touch this - */ - struct kref topology_kref; - - /** - * @malloc_kref: refcount for the memory allocation containing this - * structure. See drm_dp_mst_get_mstb_malloc() and - * drm_dp_mst_put_mstb_malloc(). - */ - struct kref malloc_kref; - -#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) - /** - * @topology_ref_history: A history of each topology - * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS. - */ - struct drm_dp_mst_topology_ref_history topology_ref_history; -#endif - - /** - * @destroy_next: linked-list entry used by - * drm_dp_delayed_destroy_work() - */ - struct list_head destroy_next; - - u8 rad[8]; - u8 lct; - int num_ports; - - /** - * @ports: the list of ports on this branch device. This should be - * considered protected for reading by &drm_dp_mst_topology_mgr.lock. - * There are two exceptions to this: - * &drm_dp_mst_topology_mgr.up_req_work and - * &drm_dp_mst_topology_mgr.work, which do not grab - * &drm_dp_mst_topology_mgr.lock during reads but are the only - * updaters of this list and are protected from updating the list - * concurrently by @drm_dp_mst_topology_mgr.probe_lock - */ - struct list_head ports; - - struct drm_dp_mst_port *port_parent; - struct drm_dp_mst_topology_mgr *mgr; - - bool link_address_sent; - - /* global unique identifier to identify branch devices */ - u8 guid[16]; -}; - - struct drm_dp_nak_reply { u8 guid[16]; u8 reason; @@ -313,33 +210,17 @@ struct drm_dp_remote_i2c_write_ack_reply { u8 port_number; }; -struct drm_dp_query_stream_enc_status_ack_reply { - /* Bit[23:16]- Stream Id */ - u8 stream_id; - /* Bit[15]- Signed */ - bool reply_signed; - - /* Bit[10:8]- Stream Output Sink Type */ - bool unauthorizable_device_present; - bool legacy_device_present; - bool query_capable_device_present; - - /* Bit[12:11]- Stream Output CP Type */ - bool hdcp_1x_device_present; - bool hdcp_2x_device_present; - - /* Bit[4]- Stream Authentication */ - bool auth_completed; - - /* Bit[3]- Stream Encryption */ - bool encryption_enabled; - - /* Bit[2]- Stream Repeater Function Present */ - bool repeater_present; - - /* Bit[1:0]- Stream State */ - u8 state; +struct drm_dp_sideband_msg_rx { + u8 chunk[48]; + u8 msg[256]; + u8 curchunk_len; + u8 curchunk_idx; /* chunk we are parsing now */ + u8 curchunk_hdrlen; + u8 curlen; /* total length of the msg */ + bool have_somt; + bool have_eomt; + struct drm_dp_sideband_msg_hdr initial_hdr; }; #define DRM_DP_MAX_SDP_STREAMS 16 @@ -384,7 +265,7 @@ struct drm_dp_remote_dpcd_write { struct drm_dp_remote_i2c_read { u8 num_transactions; u8 port_number; - struct drm_dp_remote_i2c_read_tx { + struct { u8 i2c_dev_id; u8 num_bytes; u8 *bytes; @@ -402,15 +283,6 @@ struct drm_dp_remote_i2c_write { u8 *bytes; }; -struct drm_dp_query_stream_enc_status { - u8 stream_id; - u8 client_id[7]; /* 56-bit nonce */ - u8 stream_event; - bool valid_stream_event; - u8 stream_behavior; - u8 valid_stream_behavior; -}; - /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */ struct drm_dp_port_number_req { u8 port_number; @@ -418,7 +290,6 @@ struct drm_dp_port_number_req { struct drm_dp_enum_path_resources_ack_reply { u8 port_number; - bool fec_capable; u16 full_payload_bw_number; u16 avail_payload_bw_number; }; @@ -441,7 +312,7 @@ struct drm_dp_resource_status_notify { struct drm_dp_query_payload_ack_reply { u8 port_number; - u16 allocated_pbn; + u8 allocated_pbn; }; struct drm_dp_sideband_msg_req_body { @@ -459,8 +330,6 @@ struct drm_dp_sideband_msg_req_body { struct drm_dp_remote_i2c_read i2c_read; struct drm_dp_remote_i2c_write i2c_write; - - struct drm_dp_query_stream_enc_status enc_status; } u; }; @@ -483,8 +352,6 @@ struct drm_dp_sideband_msg_reply_body { struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack; struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack; struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack; - - struct drm_dp_query_stream_enc_status_ack_reply enc_status; } u; }; @@ -516,15 +383,11 @@ struct drm_dp_mst_topology_mgr; struct drm_dp_mst_topology_cbs { /* create a connector for a port */ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); - /* - * Checks for any pending MST interrupts, passing them to MST core for - * processing, the same way an HPD IRQ pulse handler would do this. - * If provided MST core calls this callback from a poll-waiting loop - * when waiting for MST down message replies. The driver is expected - * to guard against a race between this callback and the driver's HPD - * IRQ pulse handler. - */ - void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr); + void (*register_connector)(struct drm_connector *connector); + void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector); + void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); + }; #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) @@ -540,24 +403,6 @@ struct drm_dp_payload { int vcpi; }; -#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) - -struct drm_dp_vcpi_allocation { - struct drm_dp_mst_port *port; - int vcpi; - int pbn; - bool dsc_enabled; - struct list_head next; -}; - -struct drm_dp_mst_topology_state { - struct drm_private_state base; - struct list_head vcpis; - struct drm_dp_mst_topology_mgr *mgr; -}; - -#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) - /** * struct drm_dp_mst_topology_mgr - DisplayPort MST manager * @@ -566,15 +411,10 @@ struct drm_dp_mst_topology_state { * on the GPU. */ struct drm_dp_mst_topology_mgr { - /** - * @base: Base private object for atomic - */ - struct drm_private_obj base; - /** * @dev: device pointer for adding i2c devices etc. */ - struct drm_device *dev; + struct device *dev; /** * @cbs: callbacks for connector addition and destruction. */ @@ -593,14 +433,6 @@ struct drm_dp_mst_topology_mgr { * @max_payloads: maximum number of payloads the GPU can generate. */ int max_payloads; - /** - * @max_lane_count: maximum number of lanes the GPU can drive. - */ - int max_lane_count; - /** - * @max_link_rate: maximum link rate per lane GPU can output, in kHz. - */ - int max_link_rate; /** * @conn_base_id: DRM connector ID this mgr is connected to. Only used * to build the MST connector path value. @@ -608,41 +440,28 @@ struct drm_dp_mst_topology_mgr { int conn_base_id; /** - * @up_req_recv: Message receiver state for up requests. + * @down_rep_recv: Message receiver state for down replies. This and + * @up_req_recv are only ever access from the work item, which is + * serialised. + */ + struct drm_dp_sideband_msg_rx down_rep_recv; + /** + * @up_req_recv: Message receiver state for up requests. This and + * @down_rep_recv are only ever access from the work item, which is + * serialised. */ struct drm_dp_sideband_msg_rx up_req_recv; /** - * @down_rep_recv: Message receiver state for replies to down - * requests. - */ - struct drm_dp_sideband_msg_rx down_rep_recv; - - /** - * @lock: protects @mst_state, @mst_primary, @dpcd, and - * @payload_id_table_cleared. + * @lock: protects mst state, primary, dpcd. */ struct mutex lock; - /** - * @probe_lock: Prevents @work and @up_req_work, the only writers of - * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing - * while they update the topology. - */ - struct mutex probe_lock; - /** * @mst_state: If this manager is enabled for an MST capable port. False * if no MST sink/branch devices is connected. */ - bool mst_state : 1; - - /** - * @payload_id_table_cleared: Whether or not we've cleared the payload - * ID table for @mst_primary. Protected by @lock. - */ - bool payload_id_table_cleared : 1; - + bool mst_state; /** * @mst_primary: Pointer to the primary/first branch device. */ @@ -660,19 +479,26 @@ struct drm_dp_mst_topology_mgr { * @pbn_div: PBN to slots divisor. */ int pbn_div; - /** - * @funcs: Atomic helper callbacks + * @total_slots: Total slots that can be allocated. */ - const struct drm_private_state_funcs *funcs; + int total_slots; + /** + * @avail_slots: Still available slots that can be allocated. + */ + int avail_slots; + /** + * @total_pbn: Total PBN count. + */ + int total_pbn; /** - * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state + * @qlock: protects @tx_msg_downq, the tx_slots in struct + * &drm_dp_mst_branch and txmsg->state once they are queued */ struct mutex qlock; - /** - * @tx_msg_downq: List of pending down requests + * @tx_msg_downq: List of pending down replies. */ struct list_head tx_msg_downq; @@ -682,13 +508,12 @@ struct drm_dp_mst_topology_mgr { struct mutex payload_lock; /** * @proposed_vcpis: Array of pointers for the new VCPI allocation. The - * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of - * this array is determined by @max_payloads. + * VCPI structure itself is embedded into the corresponding + * &drm_dp_mst_port structure. */ struct drm_dp_vcpi **proposed_vcpis; /** - * @payloads: Array of payloads. The size of this array is determined - * by @max_payloads. + * @payloads: Array of payloads. */ struct drm_dp_payload *payloads; /** @@ -717,89 +542,42 @@ struct drm_dp_mst_topology_mgr { struct work_struct tx_work; /** - * @destroy_port_list: List of to be destroyed connectors. + * @destroy_connector_list: List of to be destroyed connectors. */ - struct list_head destroy_port_list; + struct list_head destroy_connector_list; /** - * @destroy_branch_device_list: List of to be destroyed branch - * devices. + * @destroy_connector_lock: Protects @connector_list. */ - struct list_head destroy_branch_device_list; + struct mutex destroy_connector_lock; /** - * @delayed_destroy_lock: Protects @destroy_port_list and - * @destroy_branch_device_list. + * @destroy_connector_work: Work item to destroy connectors. Needed to + * avoid locking inversion. */ - struct mutex delayed_destroy_lock; - - /** - * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items. - * A dedicated WQ makes it possible to drain any requeued work items - * on it. - */ - struct workqueue_struct *delayed_destroy_wq; - - /** - * @delayed_destroy_work: Work item to destroy MST port and branch - * devices, needed to avoid locking inversion. - */ - struct work_struct delayed_destroy_work; - - /** - * @up_req_list: List of pending up requests from the topology that - * need to be processed, in chronological order. - */ - struct list_head up_req_list; - /** - * @up_req_lock: Protects @up_req_list - */ - struct mutex up_req_lock; - /** - * @up_req_work: Work item to process up requests received from the - * topology. Needed to avoid blocking hotplug handling and sideband - * transmissions. - */ - struct work_struct up_req_work; - -#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) - /** - * @topology_ref_history_lock: protects - * &drm_dp_mst_port.topology_ref_history and - * &drm_dp_mst_branch.topology_ref_history. - */ - struct mutex topology_ref_history_lock; -#endif + struct work_struct destroy_connector_work; }; -int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, - struct drm_device *dev, struct drm_dp_aux *aux, - int max_dpcd_transaction_bytes, - int max_payloads, - int max_lane_count, int max_link_rate, - int conn_base_id); +int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id); void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); -bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); + int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); + int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); -int -drm_dp_mst_detect_port(struct drm_connector *connector, - struct drm_modeset_acquire_ctx *ctx, - struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port); +enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); +bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); -int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, - int link_rate, int link_lane_count); -int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); +int drm_dp_calc_pbn_mode(int clock, int bpp); -bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, int pbn, int slots); + +bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots); int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); @@ -826,144 +604,5 @@ void drm_dp_mst_dump_topology(struct seq_file *m, struct drm_dp_mst_topology_mgr *mgr); void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); -int __must_check -drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, - bool sync); - -ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, - unsigned int offset, void *buffer, size_t size); -ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, - unsigned int offset, void *buffer, size_t size); - -int drm_dp_mst_connector_late_register(struct drm_connector *connector, - struct drm_dp_mst_port *port); -void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, - struct drm_dp_mst_port *port); - -struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, - struct drm_dp_mst_topology_mgr *mgr); -int __must_check -drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, - struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, int pbn, - int pbn_div); -int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, - struct drm_dp_mst_port *port, - int pbn, int pbn_div, - bool enable); -int __must_check -drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, - struct drm_dp_mst_topology_mgr *mgr); -int __must_check -drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, - struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port); -int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, bool power_up); -int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, - struct drm_dp_query_stream_enc_status_ack_reply *status); -int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); - -void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); -void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); - -struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port); - -extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs; - -/** - * __drm_dp_mst_state_iter_get - private atomic state iterator function for - * macro-internal use - * @state: &struct drm_atomic_state pointer - * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor - * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state - * iteration cursor - * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state - * iteration cursor - * @i: int iteration cursor, for macro-internal use - * - * Used by for_each_oldnew_mst_mgr_in_state(), - * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't - * call this directly. - * - * Returns: - * True if the current &struct drm_private_obj is a &struct - * drm_dp_mst_topology_mgr, false otherwise. - */ -static inline bool -__drm_dp_mst_state_iter_get(struct drm_atomic_state *state, - struct drm_dp_mst_topology_mgr **mgr, - struct drm_dp_mst_topology_state **old_state, - struct drm_dp_mst_topology_state **new_state, - int i) -{ - struct __drm_private_objs_state *objs_state = &state->private_objs[i]; - - if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs) - return false; - - *mgr = to_dp_mst_topology_mgr(objs_state->ptr); - if (old_state) - *old_state = to_dp_mst_topology_state(objs_state->old_state); - if (new_state) - *new_state = to_dp_mst_topology_state(objs_state->new_state); - - return true; -} - -/** - * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology - * managers in an atomic update - * @__state: &struct drm_atomic_state pointer - * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor - * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old - * state - * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new - * state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all DRM DP MST topology managers in an atomic update, - * tracking both old and new state. This is useful in places where the state - * delta needs to be considered, for example in atomic check functions. - */ -#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \ - for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ - for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i))) - -/** - * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers - * in an atomic update - * @__state: &struct drm_atomic_state pointer - * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor - * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old - * state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all DRM DP MST topology managers in an atomic update, - * tracking only the old state. This is useful in disable functions, where we - * need the old state the hardware is still in. - */ -#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \ - for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ - for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i))) - -/** - * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers - * in an atomic update - * @__state: &struct drm_atomic_state pointer - * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor - * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new - * state - * @__i: int iteration cursor, for macro-internal use - * - * This iterates over all DRM DP MST topology managers in an atomic update, - * tracking only the new state. This is useful in enable functions, where we - * need the new state the hardware should be in when the atomic commit - * operation has completed. - */ -#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \ - for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ - for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i))) - +int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); #endif diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index deccfd39e6..c3a7d440bc 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -24,8 +24,6 @@ #define __DRM_EDID_H__ #include -#include -#include struct drm_device; struct i2c_adapter; @@ -91,11 +89,6 @@ struct detailed_data_string { u8 str[13]; } __attribute__((packed)); -#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00 -#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01 -#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02 -#define DRM_EDID_CVT_SUPPORT_FLAG 0x04 - struct detailed_data_monitor_range { u8 min_vfreq; u8 max_vfreq; @@ -182,23 +175,21 @@ struct detailed_timing { #define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4) #define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5) #define DRM_EDID_INPUT_DIGITAL (1 << 7) -#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) /* 1.4 */ -#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) /* 1.4 */ -#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) /* 1.4 */ -#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) /* 1.4 */ -#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) /* 1.4 */ -#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) /* 1.4 */ -#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) /* 1.4 */ -#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) /* 1.4 */ -#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) /* 1.4 */ -#define DRM_EDID_DIGITAL_TYPE_MASK (7 << 0) /* 1.4 */ -#define DRM_EDID_DIGITAL_TYPE_UNDEF (0 << 0) /* 1.4 */ -#define DRM_EDID_DIGITAL_TYPE_DVI (1 << 0) /* 1.4 */ -#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2 << 0) /* 1.4 */ -#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3 << 0) /* 1.4 */ -#define DRM_EDID_DIGITAL_TYPE_MDDI (4 << 0) /* 1.4 */ -#define DRM_EDID_DIGITAL_TYPE_DP (5 << 0) /* 1.4 */ -#define DRM_EDID_DIGITAL_DFP_1_X (1 << 0) /* 1.3 */ +#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) +#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) +#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) +#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) +#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) +#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) +#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) +#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) +#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) +#define DRM_EDID_DIGITAL_TYPE_UNDEF (0) +#define DRM_EDID_DIGITAL_TYPE_DVI (1) +#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2) +#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3) +#define DRM_EDID_DIGITAL_TYPE_MDDI (4) +#define DRM_EDID_DIGITAL_TYPE_DP (5) #define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) #define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1) @@ -221,44 +212,6 @@ struct detailed_timing { #define DRM_EDID_HDMI_DC_30 (1 << 4) #define DRM_EDID_HDMI_DC_Y444 (1 << 3) -/* YCBCR 420 deep color modes */ -#define DRM_EDID_YCBCR420_DC_48 (1 << 2) -#define DRM_EDID_YCBCR420_DC_36 (1 << 1) -#define DRM_EDID_YCBCR420_DC_30 (1 << 0) -#define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \ - DRM_EDID_YCBCR420_DC_36 | \ - DRM_EDID_YCBCR420_DC_30) - -/* HDMI 2.1 additional fields */ -#define DRM_EDID_MAX_FRL_RATE_MASK 0xf0 -#define DRM_EDID_FAPA_START_LOCATION (1 << 0) -#define DRM_EDID_ALLM (1 << 1) -#define DRM_EDID_FVA (1 << 2) - -/* Deep Color specific */ -#define DRM_EDID_DC_30BIT_420 (1 << 0) -#define DRM_EDID_DC_36BIT_420 (1 << 1) -#define DRM_EDID_DC_48BIT_420 (1 << 2) - -/* VRR specific */ -#define DRM_EDID_CNMVRR (1 << 3) -#define DRM_EDID_CINEMA_VRR (1 << 4) -#define DRM_EDID_MDELTA (1 << 5) -#define DRM_EDID_VRR_MAX_UPPER_MASK 0xc0 -#define DRM_EDID_VRR_MAX_LOWER_MASK 0xff -#define DRM_EDID_VRR_MIN_MASK 0x3f - -/* DSC specific */ -#define DRM_EDID_DSC_10BPC (1 << 0) -#define DRM_EDID_DSC_12BPC (1 << 1) -#define DRM_EDID_DSC_16BPC (1 << 2) -#define DRM_EDID_DSC_ALL_BPP (1 << 3) -#define DRM_EDID_DSC_NATIVE_420 (1 << 6) -#define DRM_EDID_DSC_1P2 (1 << 7) -#define DRM_EDID_DSC_MAX_FRL_RATE_MASK 0xf0 -#define DRM_EDID_DSC_MAX_SLICES 0xf -#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f - /* ELD Header Block */ #define DRM_ELD_HEADER_BLOCK_SIZE 4 @@ -295,7 +248,6 @@ struct detailed_timing { # define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ #define DRM_ELD_SPEAKER 7 -# define DRM_ELD_SPEAKER_MASK 0x7f # define DRM_ELD_SPEAKER_RLRC (1 << 6) # define DRM_ELD_SPEAKER_FLRC (1 << 5) # define DRM_ELD_SPEAKER_RC (1 << 4) @@ -336,7 +288,7 @@ struct edid { u8 features; /* Color characteristics */ u8 red_green_lo; - u8 blue_white_lo; + u8 black_white_lo; u8 red_x; u8 red_y; u8 green_x; @@ -369,55 +321,33 @@ struct cea_sad { struct drm_encoder; struct drm_connector; -struct drm_connector_state; struct drm_display_mode; +struct hdmi_avi_infoframe; +struct hdmi_vendor_infoframe; +void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, const struct drm_display_mode *mode); +struct drm_connector *drm_select_eld(struct drm_encoder *encoder); #ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE -struct edid *drm_load_edid_firmware(struct drm_connector *connector); -int __drm_set_edid_firmware_path(const char *path); -int __drm_get_edid_firmware_path(char *buf, size_t bufsize); +int drm_load_edid_firmware(struct drm_connector *connector); #else -static inline struct edid * -drm_load_edid_firmware(struct drm_connector *connector) +static inline int drm_load_edid_firmware(struct drm_connector *connector) { - return ERR_PTR(-ENOENT); + return 0; } #endif -bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2); - int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, - const struct drm_connector *connector, const struct drm_display_mode *mode); int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, - const struct drm_connector *connector, const struct drm_display_mode *mode); -void -drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame, - const struct drm_connector_state *conn_state); - -void -drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame, - const struct drm_connector_state *conn_state); - -void -drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, - const struct drm_connector *connector, - const struct drm_display_mode *mode, - enum hdmi_quantization_range rgb_quant_range); - -int -drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, - const struct drm_connector_state *conn_state); - /** * drm_eld_mnl - Get ELD monitor name length in bytes. * @eld: pointer to an eld memory structure with mnl set @@ -484,18 +414,6 @@ static inline int drm_eld_size(const uint8_t *eld) return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; } -/** - * drm_eld_get_spk_alloc - Get speaker allocation - * @eld: pointer to an ELD memory structure - * - * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER - * field definitions to identify speakers. - */ -static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) -{ - return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; -} - /** * drm_eld_get_conn_type - Get device type hdmi/dp connected * @eld: pointer to an ELD memory structure @@ -519,13 +437,12 @@ struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, struct i2c_adapter *adapter); struct edid *drm_edid_duplicate(const struct edid *edid); int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); -int drm_add_override_edid_modes(struct drm_connector *connector); u8 drm_match_cea_mode(const struct drm_display_mode *to_match); +enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code); bool drm_detect_hdmi_monitor(struct edid *edid); bool drm_detect_monitor_audio(struct edid *edid); -enum hdmi_quantization_range -drm_default_rgb_quant_range(const struct drm_display_mode *mode); +bool drm_rgb_quant_range_selectable(struct edid *edid); int drm_add_modes_noedid(struct drm_connector *connector, int hdisplay, int vdisplay); void drm_set_preferred_mode(struct drm_connector *connector, @@ -540,11 +457,5 @@ void drm_edid_get_monitor_name(struct edid *edid, char *name, struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, int hsize, int vsize, int fresh, bool rb); -struct drm_display_mode * -drm_display_mode_from_cea_vic(struct drm_device *dev, - u8 video_code); -const u8 *drm_find_edid_extension(const struct edid *edid, - int ext_id, int *ext_index); - #endif /* __DRM_EDID_H__ */ diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index 6e91a0280f..387e33a4d6 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -25,12 +25,7 @@ #include #include -#include -#include #include -#include - -struct drm_encoder; /** * struct drm_encoder_funcs - encoder controls @@ -76,7 +71,7 @@ struct drm_encoder_funcs { * * This optional hook should be used to unregister the additional * userspace interfaces attached to the encoder from - * @late_register. It is called from drm_dev_unregister(), + * late_unregister(). It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ @@ -89,7 +84,9 @@ struct drm_encoder_funcs { * @head: list management * @base: base KMS object * @name: human readable name, can be overwritten by the driver - * @funcs: control functions, can be NULL for simple managed encoders + * @crtc: currently bound CRTC + * @bridge: bridge associated to the encoder + * @funcs: control functions * @helper_private: mid-layer private data * * CRTCs drive pixels to encoders, which convert them into signals @@ -139,9 +136,9 @@ struct drm_encoder { * @possible_crtcs: Bitmask of potential CRTC bindings, using * drm_crtc_index() as the index into the bitfield. The driver must set * the bits for all &drm_crtc objects this encoder can be connected to - * before calling drm_dev_register(). + * before calling drm_encoder_init(). * - * You will get a WARN if you get this wrong in the driver. + * In reality almost every driver gets this wrong. * * Note that since CRTC objects can't be hotplugged the assigned indices * are stable and hence known before registering all objects. @@ -153,35 +150,20 @@ struct drm_encoder { * using drm_encoder_index() as the index into the bitfield. The driver * must set the bits for all &drm_encoder objects which can clone a * &drm_crtc together with this encoder before calling - * drm_dev_register(). Drivers should set the bit representing the + * drm_encoder_init(). Drivers should set the bit representing the * encoder itself, too. Cloning bits should be set such that when two * encoders can be used in a cloned configuration, they both should have * each another bits set. * - * As an exception to the above rule if the driver doesn't implement - * any cloning it can leave @possible_clones set to 0. The core will - * automagically fix this up by setting the bit for the encoder itself. - * - * You will get a WARN if you get this wrong in the driver. + * In reality almost every driver gets this wrong. * * Note that since encoder objects can't be hotplugged the assigned indices * are stable and hence known before registering all objects. */ uint32_t possible_clones; - /** - * @crtc: Currently bound CRTC, only really meaningful for non-atomic - * drivers. Atomic drivers should instead check - * &drm_connector_state.crtc. - */ struct drm_crtc *crtc; - - /** - * @bridge_chain: Bridges attached to this encoder. Drivers shall not - * access this field directly. - */ - struct list_head bridge_chain; - + struct drm_bridge *bridge; const struct drm_encoder_funcs *funcs; const struct drm_encoder_helper_funcs *helper_private; }; @@ -194,54 +176,6 @@ int drm_encoder_init(struct drm_device *dev, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, ...); -__printf(6, 7) -void *__drmm_encoder_alloc(struct drm_device *dev, - size_t size, size_t offset, - const struct drm_encoder_funcs *funcs, - int encoder_type, - const char *name, ...); - -/** - * drmm_encoder_alloc - Allocate and initialize an encoder - * @dev: drm device - * @type: the type of the struct which contains struct &drm_encoder - * @member: the name of the &drm_encoder within @type - * @funcs: callbacks for this encoder (optional) - * @encoder_type: user visible type of the encoder - * @name: printf style format string for the encoder name, or NULL for default name - * - * Allocates and initializes an encoder. Encoder should be subclassed as part of - * driver encoder objects. Cleanup is automatically handled through registering - * drm_encoder_cleanup() with drmm_add_action(). - * - * The @drm_encoder_funcs.destroy hook must be NULL. - * - * Returns: - * Pointer to new encoder, or ERR_PTR on failure. - */ -#define drmm_encoder_alloc(dev, type, member, funcs, encoder_type, name, ...) \ - ((type *)__drmm_encoder_alloc(dev, sizeof(type), \ - offsetof(type, member), funcs, \ - encoder_type, name, ##__VA_ARGS__)) - -/** - * drmm_plain_encoder_alloc - Allocate and initialize an encoder - * @dev: drm device - * @funcs: callbacks for this encoder (optional) - * @encoder_type: user visible type of the encoder - * @name: printf style format string for the encoder name, or NULL for default name - * - * This is a simplified version of drmm_encoder_alloc(), which only allocates - * and returns a struct drm_encoder instance, with no subclassing. - * - * Returns: - * Pointer to the new drm_encoder struct, or ERR_PTR on failure. - */ -#define drmm_plain_encoder_alloc(dev, funcs, encoder_type, name, ...) \ - ((struct drm_encoder *) \ - __drmm_encoder_alloc(dev, sizeof(struct drm_encoder), \ - 0, funcs, encoder_type, name, ##__VA_ARGS__)) - /** * drm_encoder_index - find the index of a registered encoder * @encoder: encoder to find index for @@ -249,22 +183,13 @@ void *__drmm_encoder_alloc(struct drm_device *dev, * Given a registered encoder, return the index of that encoder within a DRM * device's list of encoders. */ -static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder) +static inline unsigned int drm_encoder_index(struct drm_encoder *encoder) { return encoder->index; } -/** - * drm_encoder_mask - find the mask of a registered encoder - * @encoder: encoder to find mask for - * - * Given a registered encoder, return the mask bit of that encoder for an - * encoder's possible_clones field. - */ -static inline u32 drm_encoder_mask(const struct drm_encoder *encoder) -{ - return 1 << drm_encoder_index(encoder); -} +/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */ +static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc); /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? @@ -282,19 +207,17 @@ static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder, /** * drm_encoder_find - find a &drm_encoder * @dev: DRM device - * @file_priv: drm file to check for lease against. * @id: encoder id * * Returns the encoder with @id, NULL if it doesn't exist. Simple wrapper around * drm_mode_object_find(). */ static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev, - struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; - mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_ENCODER); + mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); return mo ? obj_to_encoder(mo) : NULL; } @@ -311,7 +234,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder); */ #define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \ list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \ - for_each_if ((encoder_mask) & drm_encoder_mask(encoder)) + for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder))) /** * drm_for_each_encoder - iterate over all encoders diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index a09864f6d6..82cdf61139 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -27,8 +27,8 @@ #ifndef __DRM_ENCODER_SLAVE_H__ #define __DRM_ENCODER_SLAVE_H__ +#include #include -#include /** * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index 6447e34528..f313211f8e 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -1,23 +1,51 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DRM_FB_CMA_HELPER_H__ #define __DRM_FB_CMA_HELPER_H__ -#include +struct drm_fbdev_cma; +struct drm_gem_cma_object; -struct drm_device; +struct drm_fb_helper_surface_size; +struct drm_framebuffer_funcs; +struct drm_fb_helper_funcs; struct drm_framebuffer; -struct drm_plane_state; +struct drm_fb_helper; +struct drm_device; +struct drm_file; +struct drm_mode_fb_cmd2; + +struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, + unsigned int preferred_bpp, unsigned int num_crtc, + unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs); +struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, + unsigned int preferred_bpp, unsigned int num_crtc, + unsigned int max_conn_count); +void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); + +void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); +void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); +void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state); +int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes, + const struct drm_framebuffer_funcs *funcs); + +void drm_fb_cma_destroy(struct drm_framebuffer *fb); +int drm_fb_cma_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int *handle); + +struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev, + struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_framebuffer_funcs *funcs); +struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, + struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, unsigned int plane); -dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb, - struct drm_plane_state *state, - unsigned int plane); +#ifdef CONFIG_DEBUG_FS +struct seq_file; -void drm_fb_cma_sync_non_coherent(struct drm_device *drm, - struct drm_plane_state *old_state, - struct drm_plane_state *state); +int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg); +#endif #endif diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 3af4624368..ed8edfef75 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -32,9 +32,7 @@ struct drm_fb_helper; -#include #include -#include #include enum mode_set_atomic { @@ -42,6 +40,16 @@ enum mode_set_atomic { ENTER_ATOMIC_MODE_SET, }; +struct drm_fb_offset { + int x, y; +}; + +struct drm_fb_helper_crtc { + struct drm_mode_set mode_set; + struct drm_display_mode *desired_mode; + int x, y; +}; + /** * struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size * @fb_width: fbdev width @@ -56,8 +64,10 @@ enum mode_set_atomic { * according to the largest width/height (so it is large enough for all CRTCs * to scanout). But the fbdev width/height is sized to the minimum width/ * height of all the displays. This ensures that fbcon fits on the smallest - * of the attached displays. fb_width/fb_height is used by - * drm_fb_helper_fill_info() to fill out the &fb_info.var structure. + * of the attached displays. + * + * So what is passed to drm_fb_helper_fill_var() should be fb_width/fb_height, + * rather than the surface size. */ struct drm_fb_helper_surface_size { u32 fb_width; @@ -74,6 +84,38 @@ struct drm_fb_helper_surface_size { * Driver callbacks used by the fbdev emulation helper library. */ struct drm_fb_helper_funcs { + /** + * @gamma_set: + * + * Set the given gamma LUT register on the given CRTC. + * + * This callback is optional. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ + void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, int regno); + /** + * @gamma_get: + * + * Read the given gamma LUT register on the given CRTC, used to save the + * current LUT when force-restoring the fbdev for e.g. kdbg. + * + * This callback is optional. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ + void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno); + /** * @fb_probe: * @@ -90,63 +132,74 @@ struct drm_fb_helper_funcs { */ int (*fb_probe)(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes); + + /** + * @initial_config: + * + * Driver callback to setup an initial fbdev display configuration. + * Drivers can use this callback to tell the fbdev emulation what the + * preferred initial configuration is. This is useful to implement + * smooth booting where the fbdev (and subsequently all userspace) never + * changes the mode, but always inherits the existing configuration. + * + * This callback is optional. + * + * RETURNS: + * + * The driver should return true if a suitable initial configuration has + * been filled out and false when the fbdev helper should fall back to + * the default probing logic. + */ + bool (*initial_config)(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_crtc **crtcs, + struct drm_display_mode **modes, + struct drm_fb_offset *offsets, + bool *enabled, int width, int height); +}; + +struct drm_fb_helper_connector { + struct drm_connector *connector; }; /** * struct drm_fb_helper - main structure to emulate fbdev on top of KMS * @fb: Scanout framebuffer object * @dev: DRM device + * @crtc_count: number of possible CRTCs + * @crtc_info: per-CRTC helper state (mode, x/y offset, etc) + * @connector_count: number of connected connectors + * @connector_info_alloc_count: size of connector_info + * @connector_info: array of per-connector information * @funcs: driver callbacks for fb helper * @fbdev: emulated fbdev device info struct * @pseudo_palette: fake palette of 16 colors - * @damage_clip: clip rectangle used with deferred_io to accumulate damage to - * the screen buffer - * @damage_lock: spinlock protecting @damage_clip - * @damage_work: worker used to flush the framebuffer + * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to + * the screen buffer + * @dirty_lock: spinlock protecting @dirty_clip + * @dirty_work: worker used to flush the framebuffer * @resume_work: worker used during resume if the console lock is already taken * * This is the main structure used by the fbdev helpers. Drivers supporting * fbdev emulation should embedded this into their overall driver structure. - * Drivers must also fill out a &struct drm_fb_helper_funcs with a few + * Drivers must also fill out a struct &drm_fb_helper_funcs with a few * operations. */ struct drm_fb_helper { - /** - * @client: - * - * DRM client used by the generic fbdev emulation. - */ - struct drm_client_dev client; - - /** - * @buffer: - * - * Framebuffer used by the generic fbdev emulation. - */ - struct drm_client_buffer *buffer; - struct drm_framebuffer *fb; struct drm_device *dev; + int crtc_count; + struct drm_fb_helper_crtc *crtc_info; + int connector_count; + int connector_info_alloc_count; + struct drm_fb_helper_connector **connector_info; const struct drm_fb_helper_funcs *funcs; struct fb_info *fbdev; u32 pseudo_palette[17]; - struct drm_clip_rect damage_clip; - spinlock_t damage_lock; - struct work_struct damage_work; + struct drm_clip_rect dirty_clip; + spinlock_t dirty_lock; + struct work_struct dirty_work; struct work_struct resume_work; - /** - * @lock: - * - * Top-level FBDEV helper lock. This protects all internal data - * structures and lists, such as @connector_info and @crtc_info. - * - * FIXME: fbdev emulation locking is a mess and long term we want to - * protect all helper internal state with this lock as well as reduce - * core KMS locking as much as possible. - */ - struct mutex lock; - /** * @kernel_fb_list: * @@ -162,37 +215,8 @@ struct drm_fb_helper { * needs to be reprobe when fbdev is in control again. */ bool delayed_hotplug; - - /** - * @deferred_setup: - * - * If no outputs are connected (disconnected or unknown) the FB helper - * code will defer setup until at least one of the outputs shows up. - * This field keeps track of the status so that setup can be retried - * at every hotplug event until it succeeds eventually. - * - * Protected by @lock. - */ - bool deferred_setup; - - /** - * @preferred_bpp: - * - * Temporary storage for the driver's preferred BPP setting passed to - * FB helper initialization. This needs to be tracked so that deferred - * FB helper setup can pass this on. - * - * See also: @deferred_setup - */ - int preferred_bpp; }; -static inline struct drm_fb_helper * -drm_fb_helper_from_client(struct drm_client_dev *client) -{ - return container_of(client, struct drm_fb_helper, client); -} - /** * define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers * @@ -204,15 +228,14 @@ drm_fb_helper_from_client(struct drm_client_dev *client) .fb_set_par = drm_fb_helper_set_par, \ .fb_setcmap = drm_fb_helper_setcmap, \ .fb_blank = drm_fb_helper_blank, \ - .fb_pan_display = drm_fb_helper_pan_display, \ - .fb_debug_enter = drm_fb_helper_debug_enter, \ - .fb_debug_leave = drm_fb_helper_debug_leave, \ - .fb_ioctl = drm_fb_helper_ioctl + .fb_pan_display = drm_fb_helper_pan_display #ifdef CONFIG_DRM_FBDEV_EMULATION void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, const struct drm_fb_helper_funcs *funcs); -int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper); +int drm_fb_helper_init(struct drm_device *dev, + struct drm_fb_helper *helper, int crtc_count, + int max_conn); void drm_fb_helper_fini(struct drm_fb_helper *helper); int drm_fb_helper_blank(int blank, struct fb_info *info); int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, @@ -225,9 +248,13 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper); void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper); -void drm_fb_helper_fill_info(struct fb_info *info, - struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes); +void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper); +void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, + uint32_t fb_width, uint32_t fb_height); +void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth); + +void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper); void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagelist); @@ -257,19 +284,21 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); -int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg); - int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); +int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); int drm_fb_helper_debug_enter(struct fb_info *info); int drm_fb_helper_debug_leave(struct fb_info *info); +struct drm_display_mode * +drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, + int width, int height); +struct drm_display_mode * +drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, + int width, int height); -void drm_fb_helper_lastclose(struct drm_device *dev); -void drm_fb_helper_output_poll_changed(struct drm_device *dev); - -void drm_fbdev_generic_setup(struct drm_device *dev, - unsigned int preferred_bpp); +int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); +int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector); #else static inline void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, @@ -278,19 +307,14 @@ static inline void drm_fb_helper_prepare(struct drm_device *dev, } static inline int drm_fb_helper_init(struct drm_device *dev, - struct drm_fb_helper *helper) + struct drm_fb_helper *helper, int crtc_count, + int max_conn) { - /* So drivers can use it to free the struct */ - helper->dev = dev; - dev->fb_helper = helper; - return 0; } static inline void drm_fb_helper_fini(struct drm_fb_helper *helper) { - if (helper && helper->dev) - helper->dev->fb_helper = NULL; } static inline int drm_fb_helper_blank(int blank, struct fb_info *info) @@ -330,11 +354,18 @@ drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) { } +static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper) +{ +} -static inline void -drm_fb_helper_fill_info(struct fb_info *info, - struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) +static inline void drm_fb_helper_fill_var(struct fb_info *info, + struct drm_fb_helper *fb_helper, + uint32_t fb_width, uint32_t fb_height) +{ +} + +static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth) { } @@ -344,10 +375,8 @@ static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap, return 0; } -static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg) +static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) { - return 0; } static inline void drm_fb_helper_deferred_io(struct fb_info *info, @@ -355,11 +384,6 @@ static inline void drm_fb_helper_deferred_io(struct fb_info *info, { } -static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) -{ - return -ENODEV; -} - static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos) @@ -425,6 +449,12 @@ static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, return 0; } +static inline int +drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) +{ + return 0; +} + static inline int drm_fb_helper_debug_enter(struct fb_info *info) { return 0; @@ -435,19 +465,45 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info) return 0; } -static inline void drm_fb_helper_lastclose(struct drm_device *dev) +static inline struct drm_display_mode * +drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, + int width, int height) { + return NULL; } -static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev) +static inline struct drm_display_mode * +drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, + int width, int height) { + return NULL; } -static inline void -drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) +static inline int +drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) { + return 0; +} + +static inline int +drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + return 0; } #endif +static inline int +drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a, + const char *name, bool primary) +{ +#if IS_REACHABLE(CONFIG_FB) + return remove_conflicting_framebuffers(a, name, primary); +#else + return 0; +#endif +} + #endif diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h index 21c3d512d2..d387cf06ae 100644 --- a/include/drm/drm_flip_work.h +++ b/include/drm/drm_flip_work.h @@ -54,7 +54,7 @@ typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); /** * struct drm_flip_task - flip work task * @node: list entry element - * @data: data to pass to &drm_flip_work.func + * @data: data to pass to work->func */ struct drm_flip_task { struct list_head node; diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index 22aa64d07c..30c30fa87e 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -25,295 +25,14 @@ #include #include -/** - * DRM_FORMAT_MAX_PLANES - maximum number of planes a DRM format can have - */ -#define DRM_FORMAT_MAX_PLANES 4u - -/* - * DRM formats are little endian. Define host endian variants for the - * most common formats here, to reduce the #ifdefs needed in drivers. - * - * Note that the DRM_FORMAT_BIG_ENDIAN flag should only be used in - * case the format can't be specified otherwise, so we don't end up - * with two values describing the same format. - */ -#ifdef __BIG_ENDIAN -# define DRM_FORMAT_HOST_XRGB1555 (DRM_FORMAT_XRGB1555 | \ - DRM_FORMAT_BIG_ENDIAN) -# define DRM_FORMAT_HOST_RGB565 (DRM_FORMAT_RGB565 | \ - DRM_FORMAT_BIG_ENDIAN) -# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_BGRX8888 -# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_BGRA8888 -#else -# define DRM_FORMAT_HOST_XRGB1555 DRM_FORMAT_XRGB1555 -# define DRM_FORMAT_HOST_RGB565 DRM_FORMAT_RGB565 -# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_XRGB8888 -# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_ARGB8888 -#endif - -struct drm_device; -struct drm_mode_fb_cmd2; - -/** - * struct drm_format_info - information about a DRM format - */ -struct drm_format_info { - /** @format: 4CC format identifier (DRM_FORMAT_*) */ - u32 format; - - /** - * @depth: - * - * Color depth (number of bits per pixel excluding padding bits), - * valid for a subset of RGB formats only. This is a legacy field, do - * not use in new code and set to 0 for new formats. - */ - u8 depth; - - /** @num_planes: Number of color planes (1 to 3) */ - u8 num_planes; - - union { - /** - * @cpp: - * - * Number of bytes per pixel (per plane), this is aliased with - * @char_per_block. It is deprecated in favour of using the - * triplet @char_per_block, @block_w, @block_h for better - * describing the pixel format. - */ - u8 cpp[DRM_FORMAT_MAX_PLANES]; - - /** - * @char_per_block: - * - * Number of bytes per block (per plane), where blocks are - * defined as a rectangle of pixels which are stored next to - * each other in a byte aligned memory region. Together with - * @block_w and @block_h this is used to properly describe tiles - * in tiled formats or to describe groups of pixels in packed - * formats for which the memory needed for a single pixel is not - * byte aligned. - * - * @cpp has been kept for historical reasons because there are - * a lot of places in drivers where it's used. In drm core for - * generic code paths the preferred way is to use - * @char_per_block, drm_format_info_block_width() and - * drm_format_info_block_height() which allows handling both - * block and non-block formats in the same way. - * - * For formats that are intended to be used only with non-linear - * modifiers both @cpp and @char_per_block must be 0 in the - * generic format table. Drivers could supply accurate - * information from their drm_mode_config.get_format_info hook - * if they want the core to be validating the pitch. - */ - u8 char_per_block[DRM_FORMAT_MAX_PLANES]; - }; - - /** - * @block_w: - * - * Block width in pixels, this is intended to be accessed through - * drm_format_info_block_width() - */ - u8 block_w[DRM_FORMAT_MAX_PLANES]; - - /** - * @block_h: - * - * Block height in pixels, this is intended to be accessed through - * drm_format_info_block_height() - */ - u8 block_h[DRM_FORMAT_MAX_PLANES]; - - /** @hsub: Horizontal chroma subsampling factor */ - u8 hsub; - /** @vsub: Vertical chroma subsampling factor */ - u8 vsub; - - /** @has_alpha: Does the format embeds an alpha component? */ - bool has_alpha; - - /** @is_yuv: Is it a YUV format? */ - bool is_yuv; -}; - -/** - * drm_format_info_is_yuv_packed - check that the format info matches a YUV - * format with data laid in a single plane - * @info: format info - * - * Returns: - * A boolean indicating whether the format info matches a packed YUV format. - */ -static inline bool -drm_format_info_is_yuv_packed(const struct drm_format_info *info) -{ - return info->is_yuv && info->num_planes == 1; -} - -/** - * drm_format_info_is_yuv_semiplanar - check that the format info matches a YUV - * format with data laid in two planes (luminance and chrominance) - * @info: format info - * - * Returns: - * A boolean indicating whether the format info matches a semiplanar YUV format. - */ -static inline bool -drm_format_info_is_yuv_semiplanar(const struct drm_format_info *info) -{ - return info->is_yuv && info->num_planes == 2; -} - -/** - * drm_format_info_is_yuv_planar - check that the format info matches a YUV - * format with data laid in three planes (one for each YUV component) - * @info: format info - * - * Returns: - * A boolean indicating whether the format info matches a planar YUV format. - */ -static inline bool -drm_format_info_is_yuv_planar(const struct drm_format_info *info) -{ - return info->is_yuv && info->num_planes == 3; -} - -/** - * drm_format_info_is_yuv_sampling_410 - check that the format info matches a - * YUV format with 4:1:0 sub-sampling - * @info: format info - * - * Returns: - * A boolean indicating whether the format info matches a YUV format with 4:1:0 - * sub-sampling. - */ -static inline bool -drm_format_info_is_yuv_sampling_410(const struct drm_format_info *info) -{ - return info->is_yuv && info->hsub == 4 && info->vsub == 4; -} - -/** - * drm_format_info_is_yuv_sampling_411 - check that the format info matches a - * YUV format with 4:1:1 sub-sampling - * @info: format info - * - * Returns: - * A boolean indicating whether the format info matches a YUV format with 4:1:1 - * sub-sampling. - */ -static inline bool -drm_format_info_is_yuv_sampling_411(const struct drm_format_info *info) -{ - return info->is_yuv && info->hsub == 4 && info->vsub == 1; -} - -/** - * drm_format_info_is_yuv_sampling_420 - check that the format info matches a - * YUV format with 4:2:0 sub-sampling - * @info: format info - * - * Returns: - * A boolean indicating whether the format info matches a YUV format with 4:2:0 - * sub-sampling. - */ -static inline bool -drm_format_info_is_yuv_sampling_420(const struct drm_format_info *info) -{ - return info->is_yuv && info->hsub == 2 && info->vsub == 2; -} - -/** - * drm_format_info_is_yuv_sampling_422 - check that the format info matches a - * YUV format with 4:2:2 sub-sampling - * @info: format info - * - * Returns: - * A boolean indicating whether the format info matches a YUV format with 4:2:2 - * sub-sampling. - */ -static inline bool -drm_format_info_is_yuv_sampling_422(const struct drm_format_info *info) -{ - return info->is_yuv && info->hsub == 2 && info->vsub == 1; -} - -/** - * drm_format_info_is_yuv_sampling_444 - check that the format info matches a - * YUV format with 4:4:4 sub-sampling - * @info: format info - * - * Returns: - * A boolean indicating whether the format info matches a YUV format with 4:4:4 - * sub-sampling. - */ -static inline bool -drm_format_info_is_yuv_sampling_444(const struct drm_format_info *info) -{ - return info->is_yuv && info->hsub == 1 && info->vsub == 1; -} - -/** - * drm_format_info_plane_width - width of the plane given the first plane - * @info: pixel format info - * @width: width of the first plane - * @plane: plane index - * - * Returns: - * The width of @plane, given that the width of the first plane is @width. - */ -static inline -int drm_format_info_plane_width(const struct drm_format_info *info, int width, - int plane) -{ - if (!info || plane >= info->num_planes) - return 0; - - if (plane == 0) - return width; - - return width / info->hsub; -} - -/** - * drm_format_info_plane_height - height of the plane given the first plane - * @info: pixel format info - * @height: height of the first plane - * @plane: plane index - * - * Returns: - * The height of @plane, given that the height of the first plane is @height. - */ -static inline -int drm_format_info_plane_height(const struct drm_format_info *info, int height, - int plane) -{ - if (!info || plane >= info->num_planes) - return 0; - - if (plane == 0) - return height; - - return height / info->vsub; -} - -const struct drm_format_info *__drm_format_info(u32 format); -const struct drm_format_info *drm_format_info(u32 format); -const struct drm_format_info * -drm_get_format_info(struct drm_device *dev, - const struct drm_mode_fb_cmd2 *mode_cmd); uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); -uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, - uint32_t bpp, uint32_t depth); -unsigned int drm_format_info_block_width(const struct drm_format_info *info, - int plane); -unsigned int drm_format_info_block_height(const struct drm_format_info *info, - int plane); -uint64_t drm_format_info_min_pitch(const struct drm_format_info *info, - int plane, unsigned int buffer_width); +void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp); +int drm_format_num_planes(uint32_t format); +int drm_format_plane_cpp(uint32_t format, int plane); +int drm_format_horz_chroma_subsampling(uint32_t format); +int drm_format_vert_chroma_subsampling(uint32_t format); +int drm_format_plane_width(int width, uint32_t format, int plane); +int drm_format_plane_height(int height, uint32_t format, int plane); +char *drm_get_format_name(uint32_t format) __malloc; #endif /* __DRM_FOURCC_H__ */ diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index f67c5b7bcb..f5ae1f436a 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -23,18 +23,13 @@ #ifndef __DRM_FRAMEBUFFER_H__ #define __DRM_FRAMEBUFFER_H__ -#include #include -#include - -#include +#include #include -struct drm_clip_rect; -struct drm_device; -struct drm_file; struct drm_framebuffer; -struct drm_gem_object; +struct drm_file; +struct drm_device; /** * struct drm_framebuffer_funcs - framebuffer hooks @@ -45,8 +40,8 @@ struct drm_framebuffer_funcs { * * Clean up framebuffer resources, specifically also unreference the * backing storage. The core guarantees to call this function for every - * framebuffer successfully created by calling - * &drm_mode_config_funcs.fb_create. Drivers must also call + * framebuffer successfully created by ->fb_create() in + * &drm_mode_config_funcs. Drivers must also call * drm_framebuffer_cleanup() to release DRM core resources for this * framebuffer. */ @@ -56,7 +51,7 @@ struct drm_framebuffer_funcs { * @create_handle: * * Create a buffer handle in the driver-specific buffer manager (either - * GEM or TTM) valid for the passed-in &struct drm_file. This is used by + * GEM or TTM) valid for the passed-in struct &drm_file. This is used by * the core to implement the GETFB IOCTL, which returns (for * sufficiently priviledged user) also a native buffer handle. This can * be used for seamless transitions between modesetting clients by @@ -87,9 +82,6 @@ struct drm_framebuffer_funcs { * for more information as all the semantics and arguments have a one to * one mapping on this function. * - * Atomic drivers should use drm_atomic_helper_dirtyfb() to implement - * this hook. - * * RETURNS: * * 0 on success or a negative error code on failure. @@ -109,8 +101,8 @@ struct drm_framebuffer_funcs { * cleanup (like releasing the reference(s) on the backing GEM bo(s)) * should be deferred. In cases like this, the driver would like to * hold a ref to the fb even though it has already been removed from - * userspace perspective. See drm_framebuffer_get() and - * drm_framebuffer_put(). + * userspace perspective. See drm_framebuffer_reference() and + * drm_framebuffer_unreference(). * * The refcount is stored inside the mode object @base. */ @@ -120,8 +112,8 @@ struct drm_framebuffer { */ struct drm_device *dev; /** - * @head: Place on the &drm_mode_config.fb_list, access protected by - * &drm_mode_config.fb_lock. + * @head: Place on the dev->mode_config.fb_list, access protected by + * dev->mode_config.fb_lock. */ struct list_head head; @@ -129,16 +121,6 @@ struct drm_framebuffer { * @base: base modeset object structure, contains the reference count. */ struct drm_mode_object base; - - /** - * @comm: Name of the process allocating the fb, used for fb dumping. - */ - char comm[TASK_COMM_LEN]; - - /** - * @format: framebuffer format information - */ - const struct drm_format_info *format; /** * @funcs: framebuffer vfunc table */ @@ -147,7 +129,7 @@ struct drm_framebuffer { * @pitches: Line stride per buffer. For userspace created object this * is copied from drm_mode_fb_cmd2. */ - unsigned int pitches[DRM_FORMAT_MAX_PLANES]; + unsigned int pitches[4]; /** * @offsets: Offset from buffer start to the actual pixel data in bytes, * per buffer. For userspace created object this is copied from @@ -163,16 +145,16 @@ struct drm_framebuffer { * * This should not be used to specifiy x/y pixel offsets into the buffer * data (even for linear buffers). Specifying an x/y pixel offset is - * instead done through the source rectangle in &struct drm_plane_state. + * instead done through the source rectangle in struct &drm_plane_state. */ - unsigned int offsets[DRM_FORMAT_MAX_PLANES]; + unsigned int offsets[4]; /** - * @modifier: Data layout modifier. This is used to describe + * @modifier: Data layout modifier, per buffer. This is used to describe * tiling, or also special layouts (like compression) of auxiliary * buffers. For userspace created object this is copied from * drm_mode_fb_cmd2. */ - uint64_t modifier; + uint64_t modifier[4]; /** * @width: Logical width of the visible area of the framebuffer, in * pixels. @@ -183,11 +165,28 @@ struct drm_framebuffer { * pixels. */ unsigned int height; + /** + * @depth: Depth in bits per pixel for RGB formats. 0 for everything + * else. Legacy information derived from @pixel_format, it's suggested to use + * the DRM FOURCC codes and helper functions directly instead. + */ + unsigned int depth; + /** + * @bits_per_pixel: Storage used bits per pixel for RGB formats. 0 for + * everything else. Legacy information derived from @pixel_format, it's + * suggested to use the DRM FOURCC codes and helper functions directly + * instead. + */ + int bits_per_pixel; /** * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or * DRM_MODE_FB_MODIFIERS. */ int flags; + /** + * @pixel_format: DRM FOURCC code describing the pixel format. + */ + uint32_t pixel_format; /* fourcc format */ /** * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR @@ -201,16 +200,10 @@ struct drm_framebuffer { */ int hot_y; /** - * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. + * @filp_head: Placed on struct &drm_file fbs list_head, protected by + * fbs_lock in the same structure. */ struct list_head filp_head; - /** - * @obj: GEM objects backing the framebuffer, one per plane (optional). - * - * This is used by the GEM framebuffer helpers, see e.g. - * drm_gem_fb_create(). - */ - struct drm_gem_object *obj[DRM_FORMAT_MAX_PLANES]; }; #define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) @@ -219,33 +212,31 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs); struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, - struct drm_file *file_priv, uint32_t id); void drm_framebuffer_remove(struct drm_framebuffer *fb); void drm_framebuffer_cleanup(struct drm_framebuffer *fb); void drm_framebuffer_unregister_private(struct drm_framebuffer *fb); /** - * drm_framebuffer_get - acquire a framebuffer reference - * @fb: DRM framebuffer + * drm_framebuffer_reference - incr the fb refcnt + * @fb: framebuffer * - * This function increments the framebuffer's reference count. + * This functions increments the fb's refcount. */ -static inline void drm_framebuffer_get(struct drm_framebuffer *fb) +static inline void drm_framebuffer_reference(struct drm_framebuffer *fb) { - drm_mode_object_get(&fb->base); + drm_mode_object_reference(&fb->base); } /** - * drm_framebuffer_put - release a framebuffer reference - * @fb: DRM framebuffer + * drm_framebuffer_unreference - unref a framebuffer + * @fb: framebuffer to unref * - * This function decrements the framebuffer's reference count and frees the - * framebuffer if the reference count drops to zero. + * This functions decrements the fb's refcount and frees it if it drops to zero. */ -static inline void drm_framebuffer_put(struct drm_framebuffer *fb) +static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb) { - drm_mode_object_put(&fb->base); + drm_mode_object_unreference(&fb->base); } /** @@ -254,36 +245,18 @@ static inline void drm_framebuffer_put(struct drm_framebuffer *fb) * * This functions returns the framebuffer's reference count. */ -static inline uint32_t drm_framebuffer_read_refcount(const struct drm_framebuffer *fb) +static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb) { - return kref_read(&fb->base.refcount); + return atomic_read(&fb->base.refcount.refcount); } /** - * drm_framebuffer_assign - store a reference to the fb - * @p: location to store framebuffer - * @fb: new framebuffer (maybe NULL) - * - * This functions sets the location to store a reference to the framebuffer, - * unreferencing the framebuffer that was previously stored in that location. - */ -static inline void drm_framebuffer_assign(struct drm_framebuffer **p, - struct drm_framebuffer *fb) -{ - if (fb) - drm_framebuffer_get(fb); - if (*p) - drm_framebuffer_put(*p); - *p = fb; -} - -/* * drm_for_each_fb - iterate over all framebuffers * @fb: the loop cursor * @dev: the DRM device * - * Iterate over all framebuffers of @dev. User must hold - * &drm_mode_config.fb_lock. + * Iterate over all framebuffers of @dev. User must hold the fb_lock from + * &drm_mode_config. */ #define drm_for_each_fb(fb, dev) \ for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \ @@ -291,48 +264,4 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p, struct drm_framebuffer, head); \ &fb->head != (&(dev)->mode_config.fb_list); \ fb = list_next_entry(fb, head)) - -int drm_framebuffer_plane_width(int width, - const struct drm_framebuffer *fb, int plane); -int drm_framebuffer_plane_height(int height, - const struct drm_framebuffer *fb, int plane); - -/** - * struct drm_afbc_framebuffer - a special afbc frame buffer object - * - * A derived class of struct drm_framebuffer, dedicated for afbc use cases. - */ -struct drm_afbc_framebuffer { - /** - * @base: base framebuffer structure. - */ - struct drm_framebuffer base; - /** - * @block_width: width of a single afbc block - */ - u32 block_width; - /** - * @block_height: height of a single afbc block - */ - u32 block_height; - /** - * @aligned_width: aligned frame buffer width - */ - u32 aligned_width; - /** - * @aligned_height: aligned frame buffer height - */ - u32 aligned_height; - /** - * @offset: offset of the first afbc header - */ - u32 offset; - /** - * @afbc_size: minimum size of afbc buffer - */ - u32 afbc_size; -}; - -#define fb_to_afbc_fb(x) container_of(x, struct drm_afbc_framebuffer, base) - #endif diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 35e7f44c2a..9f63736e61 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -34,146 +34,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include -#include - -#include - -struct dma_buf_map; -struct drm_gem_object; - -/** - * struct drm_gem_object_funcs - GEM object functions - */ -struct drm_gem_object_funcs { - /** - * @free: - * - * Deconstructor for drm_gem_objects. - * - * This callback is mandatory. - */ - void (*free)(struct drm_gem_object *obj); - - /** - * @open: - * - * Called upon GEM handle creation. - * - * This callback is optional. - */ - int (*open)(struct drm_gem_object *obj, struct drm_file *file); - - /** - * @close: - * - * Called upon GEM handle release. - * - * This callback is optional. - */ - void (*close)(struct drm_gem_object *obj, struct drm_file *file); - - /** - * @print_info: - * - * If driver subclasses struct &drm_gem_object, it can implement this - * optional hook for printing additional driver specific info. - * - * drm_printf_indent() should be used in the callback passing it the - * indent argument. - * - * This callback is called from drm_gem_print_info(). - * - * This callback is optional. - */ - void (*print_info)(struct drm_printer *p, unsigned int indent, - const struct drm_gem_object *obj); - - /** - * @export: - * - * Export backing buffer as a &dma_buf. - * If this is not set drm_gem_prime_export() is used. - * - * This callback is optional. - */ - struct dma_buf *(*export)(struct drm_gem_object *obj, int flags); - - /** - * @pin: - * - * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper. - * - * This callback is optional. - */ - int (*pin)(struct drm_gem_object *obj); - - /** - * @unpin: - * - * Unpin backing buffer. Used by the drm_gem_map_detach() helper. - * - * This callback is optional. - */ - void (*unpin)(struct drm_gem_object *obj); - - /** - * @get_sg_table: - * - * Returns a Scatter-Gather table representation of the buffer. - * Used when exporting a buffer by the drm_gem_map_dma_buf() helper. - * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table() - * in drm_gem_unmap_buf(), therefore these helpers and this callback - * here cannot be used for sg tables pointing at driver private memory - * ranges. - * - * See also drm_prime_pages_to_sg(). - */ - struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); - - /** - * @vmap: - * - * Returns a virtual address for the buffer. Used by the - * drm_gem_dmabuf_vmap() helper. - * - * This callback is optional. - */ - int (*vmap)(struct drm_gem_object *obj, struct dma_buf_map *map); - - /** - * @vunmap: - * - * Releases the address previously returned by @vmap. Used by the - * drm_gem_dmabuf_vunmap() helper. - * - * This callback is optional. - */ - void (*vunmap)(struct drm_gem_object *obj, struct dma_buf_map *map); - - /** - * @mmap: - * - * Handle mmap() of the gem object, setup vma accordingly. - * - * This callback is optional. - * - * The callback is used by both drm_gem_mmap_obj() and - * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not - * used, the @mmap callback must set vma->vm_ops instead. - */ - int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); - - /** - * @vm_ops: - * - * Virtual memory operations used with mmap. - * - * This is optional but necessary for mmap support. - */ - const struct vm_operations_struct *vm_ops; -}; - /** * struct drm_gem_object - GEM buffer object * @@ -188,9 +48,9 @@ struct drm_gem_object { * * Reference count of this object * - * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked() - * or drm_gem_object_put() to release a reference to a GEM - * buffer object. + * Please use drm_gem_object_reference() to acquire and + * drm_gem_object_unreference() or drm_gem_object_unreference_unlocked() + * to release a reference to a GEM buffer object. */ struct kref refcount; @@ -203,7 +63,7 @@ struct drm_gem_object { * drops to 0 any global names (e.g. the id in the flink namespace) will * be cleared. * - * Protected by &drm_device.object_name_lock. + * Protected by dev->object_name_lock. */ unsigned handle_count; @@ -246,11 +106,41 @@ struct drm_gem_object { * @name: * * Global name for this object, starts at 1. 0 means unnamed. - * Access is covered by &drm_device.object_name_lock. This is used by - * the GEM_FLINK and GEM_OPEN ioctls. + * Access is covered by dev->object_name_lock. This is used by the GEM_FLINK + * and GEM_OPEN ioctls. */ int name; + /** + * @read_domains: + * + * Read memory domains. These monitor which caches contain read/write data + * related to the object. When transitioning from one set of domains + * to another, the driver is called to ensure that caches are suitably + * flushed and invalidated. + */ + uint32_t read_domains; + + /** + * @write_domain: Corresponding unique write memory domain. + */ + uint32_t write_domain; + + /** + * @pending_read_domains: + * + * While validating an exec operation, the + * new read/write domain values are computed here. + * They will be transferred to the above values + * at the point that any cache flushing occurs + */ + uint32_t pending_read_domains; + + /** + * @pending_write_domain: Write domain similar to @pending_read_domains. + */ + uint32_t pending_write_domain; + /** * @dma_buf: * @@ -260,7 +150,7 @@ struct drm_gem_object { * through importing or exporting). We break the resulting reference * loop when the last gem handle for this object is released. * - * Protected by &drm_device.object_name_lock. + * Protected by obj->object_name_lock. */ struct dma_buf *dma_buf; @@ -273,9 +163,8 @@ struct drm_gem_object { * attachment point for the device. This is invariant over the lifetime * of a gem object. * - * The &drm_gem_object_funcs.free callback is responsible for - * cleaning up the dma_buf attachment and references acquired at import - * time. + * The driver's ->gem_free_object callback is responsible for cleaning + * up the dma_buf attachment and references acquired at import time. * * Note that the drm gem/prime core does not depend upon drivers setting * this field any more. So for drivers where this doesn't make sense @@ -283,63 +172,8 @@ struct drm_gem_object { * simply leave it as NULL. */ struct dma_buf_attachment *import_attach; - - /** - * @resv: - * - * Pointer to reservation object associated with the this GEM object. - * - * Normally (@resv == &@_resv) except for imported GEM objects. - */ - struct dma_resv *resv; - - /** - * @_resv: - * - * A reservation object for this GEM object. - * - * This is unused for imported GEM objects. - */ - struct dma_resv _resv; - - /** - * @funcs: - * - * Optional GEM object functions. If this is set, it will be used instead of the - * corresponding &drm_driver GEM callbacks. - * - * New drivers should use this. - * - */ - const struct drm_gem_object_funcs *funcs; }; -/** - * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers - * @name: name for the generated structure - * - * This macro autogenerates a suitable &struct file_operations for GEM based - * drivers, which can be assigned to &drm_driver.fops. Note that this structure - * cannot be shared between drivers, because it contains a reference to the - * current module using THIS_MODULE. - * - * Note that the declaration is already marked as static - if you need a - * non-static version of this you're probably doing it wrong and will break the - * THIS_MODULE reference by accident. - */ -#define DEFINE_DRM_GEM_FOPS(name) \ - static const struct file_operations name = {\ - .owner = THIS_MODULE,\ - .open = drm_open,\ - .release = drm_release,\ - .unlocked_ioctl = drm_ioctl,\ - .compat_ioctl = drm_compat_ioctl,\ - .poll = drm_poll,\ - .read = drm_read,\ - .llseek = noop_llseek,\ - .mmap = drm_gem_mmap,\ - } - void drm_gem_object_release(struct drm_gem_object *obj); void drm_gem_object_free(struct kref *kref); int drm_gem_object_init(struct drm_device *dev, @@ -353,36 +187,42 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); /** - * drm_gem_object_get - acquire a GEM buffer object reference + * drm_gem_object_reference - acquire a GEM BO reference * @obj: GEM buffer object * - * This function acquires an additional reference to @obj. It is illegal to - * call this without already holding a reference. No locks required. + * This acquires additional reference to @obj. It is illegal to call this + * without already holding a reference. No locks required. */ -static inline void drm_gem_object_get(struct drm_gem_object *obj) +static inline void +drm_gem_object_reference(struct drm_gem_object *obj) { kref_get(&obj->refcount); } -__attribute__((nonnull)) +/** + * __drm_gem_object_unreference - raw function to release a GEM BO reference + * @obj: GEM buffer object + * + * This function is meant to be used by drivers which are not encumbered with + * dev->struct_mutex legacy locking and which are using the + * gem_free_object_unlocked callback. It avoids all the locking checks and + * locking overhead of drm_gem_object_unreference() and + * drm_gem_object_unreference_unlocked(). + * + * Drivers should never call this directly in their code. Instead they should + * wrap it up into a ``driver_gem_object_unreference(struct driver_gem_object + * *obj)`` wrapper function, and use that. Shared code should never call this, to + * avoid breaking drivers by accident which still depend upon dev->struct_mutex + * locking. + */ static inline void -__drm_gem_object_put(struct drm_gem_object *obj) +__drm_gem_object_unreference(struct drm_gem_object *obj) { kref_put(&obj->refcount, drm_gem_object_free); } -/** - * drm_gem_object_put - drop a GEM buffer object reference - * @obj: GEM buffer object - * - * This releases a reference to @obj. - */ -static inline void -drm_gem_object_put(struct drm_gem_object *obj) -{ - if (obj) - __drm_gem_object_put(obj); -} +void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj); +void drm_gem_object_unreference(struct drm_gem_object *obj); int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, @@ -398,21 +238,9 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj); void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed); -int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, - int count, struct drm_gem_object ***objs_out); struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); -long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, - bool wait_all, unsigned long timeout); -int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, - struct ww_acquire_ctx *acquire_ctx); -void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, - struct ww_acquire_ctx *acquire_ctx); -int drm_gem_fence_array_add(struct xarray *fence_array, - struct dma_fence *fence); -int drm_gem_fence_array_add_implicit(struct xarray *fence_array, - struct drm_gem_object *obj, - bool write); -int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, - u32 handle, u64 *offset); +int drm_gem_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle); #endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index cd13508acb..acd6af8a8e 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -1,22 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DRM_GEM_CMA_HELPER_H__ #define __DRM_GEM_CMA_HELPER_H__ -#include -#include +#include #include -struct drm_mode_create_dumb; - /** * struct drm_gem_cma_object - GEM object backed by CMA memory allocations * @base: base GEM object * @paddr: physical address of the backing memory - * @sgt: scatter/gather table for imported PRIME buffers. The table can have - * more than one entry but they are guaranteed to have contiguous - * DMA addresses. + * @sgt: scatter/gather table for imported PRIME buffers * @vaddr: kernel virtual address of the backing memory - * @map_noncoherent: if true, the GEM object is backed by non-coherent memory */ struct drm_gem_cma_object { struct drm_gem_object base; @@ -25,46 +18,13 @@ struct drm_gem_cma_object { /* For objects with DMA memory allocated by GEM CMA */ void *vaddr; - - bool map_noncoherent; }; -#define to_drm_gem_cma_obj(gem_obj) \ - container_of(gem_obj, struct drm_gem_cma_object, base) - -#ifndef CONFIG_MMU -#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ - .get_unmapped_area = drm_gem_cma_get_unmapped_area, -#else -#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS -#endif - -/** - * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers - * @name: name for the generated structure - * - * This macro autogenerates a suitable &struct file_operations for CMA based - * drivers, which can be assigned to &drm_driver.fops. Note that this structure - * cannot be shared between drivers, because it contains a reference to the - * current module using THIS_MODULE. - * - * Note that the declaration is already marked as static - if you need a - * non-static version of this you're probably doing it wrong and will break the - * THIS_MODULE reference by accident. - */ -#define DEFINE_DRM_GEM_CMA_FOPS(name) \ - static const struct file_operations name = {\ - .owner = THIS_MODULE,\ - .open = drm_open,\ - .release = drm_release,\ - .unlocked_ioctl = drm_ioctl,\ - .compat_ioctl = drm_compat_ioctl,\ - .poll = drm_poll,\ - .read = drm_read,\ - .llseek = noop_llseek,\ - .mmap = drm_gem_mmap,\ - DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ - } +static inline struct drm_gem_cma_object * +to_drm_gem_cma_obj(struct drm_gem_object *gem_obj) +{ + return container_of(gem_obj, struct drm_gem_cma_object, base); +} /* free GEM object */ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); @@ -79,110 +39,32 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *drm, struct drm_mode_create_dumb *args); +/* map memory region for DRM framebuffer to user space */ +int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *drm, u32 handle, + u64 *offset); + +/* set vm_flags and we can change the VM attribute to other one at here */ +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); + /* allocate physical memory */ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, size_t size); extern const struct vm_operations_struct drm_gem_cma_vm_ops; -#ifndef CONFIG_MMU -unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, - unsigned long addr, - unsigned long len, - unsigned long pgoff, - unsigned long flags); +#ifdef CONFIG_DEBUG_FS +void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); #endif -void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, - const struct drm_gem_object *obj); - -struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj); +struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object * drm_gem_cma_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); - -/** - * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations - * @dumb_create_func: callback function for .dumb_create - * - * This macro provides a shortcut for setting the default GEM operations in the - * &drm_driver structure. - * - * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS for drivers that - * override the default implementation of &struct rm_driver.dumb_create. Use - * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address - * on imported buffers should use - * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. - */ -#define DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \ - .dumb_create = (dumb_create_func), \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ - .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, \ - .gem_prime_mmap = drm_gem_prime_mmap - -/** - * DRM_GEM_CMA_DRIVER_OPS - CMA GEM driver operations - * - * This macro provides a shortcut for setting the default GEM operations in the - * &drm_driver structure. - * - * Drivers that come with their own implementation of - * &struct drm_driver.dumb_create should use - * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use - * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address - * on imported buffers should use DRM_GEM_CMA_DRIVER_OPS_VMAP instead. - */ -#define DRM_GEM_CMA_DRIVER_OPS \ - DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_cma_dumb_create) - -/** - * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - CMA GEM driver operations - * ensuring a virtual address - * on the buffer - * @dumb_create_func: callback function for .dumb_create - * - * This macro provides a shortcut for setting the default GEM operations in the - * &drm_driver structure for drivers that need the virtual address also on - * imported buffers. - * - * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS_VMAP for drivers that - * override the default implementation of &struct drm_driver.dumb_create. Use - * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a - * virtual address on imported buffers should use - * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. - */ -#define DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \ - .dumb_create = dumb_create_func, \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ - .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \ - .gem_prime_mmap = drm_gem_prime_mmap - -/** - * DRM_GEM_CMA_DRIVER_OPS_VMAP - CMA GEM driver operations ensuring a virtual - * address on the buffer - * - * This macro provides a shortcut for setting the default GEM operations in the - * &drm_driver structure for drivers that need the virtual address also on - * imported buffers. - * - * Drivers that come with their own implementation of - * &struct drm_driver.dumb_create should use - * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use - * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a - * virtual address on imported buffers should use DRM_GEM_CMA_DRIVER_OPS - * instead. - */ -#define DRM_GEM_CMA_DRIVER_OPS_VMAP \ - DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_cma_dumb_create) - -struct drm_gem_object * -drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm, - struct dma_buf_attachment *attach, - struct sg_table *sgt); +int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma); +void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj); +void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr); #endif /* __DRM_GEM_CMA_HELPER_H__ */ diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h new file mode 100644 index 0000000000..a06805eaf6 --- /dev/null +++ b/include/drm/drm_global.h @@ -0,0 +1,53 @@ +/************************************************************************** + * + * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _DRM_GLOBAL_H_ +#define _DRM_GLOBAL_H_ +enum drm_global_types { + DRM_GLOBAL_TTM_MEM = 0, + DRM_GLOBAL_TTM_BO, + DRM_GLOBAL_TTM_OBJECT, + DRM_GLOBAL_NUM +}; + +struct drm_global_reference { + enum drm_global_types global_type; + size_t size; + void *object; + int (*init) (struct drm_global_reference *); + void (*release) (struct drm_global_reference *); +}; + +extern void drm_global_init(void); +extern void drm_global_release(void); +extern int drm_global_item_ref(struct drm_global_reference *ref); +extern void drm_global_item_unref(struct drm_global_reference *ref); + +#endif diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h index bb95ff011b..fce2ef3fdf 100644 --- a/include/drm/drm_hashtab.h +++ b/include/drm/drm_hashtab.h @@ -49,17 +49,17 @@ struct drm_open_hash { u8 order; }; -int drm_ht_create(struct drm_open_hash *ht, unsigned int order); -int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); -int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, - unsigned long seed, int bits, int shift, - unsigned long add); -int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); +extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order); +extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); +extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, + unsigned long seed, int bits, int shift, + unsigned long add); +extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); -void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); -void drm_ht_remove(struct drm_open_hash *ht); +extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); +extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); +extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); +extern void drm_ht_remove(struct drm_open_hash *ht); /* * RCU-safe interface diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h index d77f6e65b1..2401b14d30 100644 --- a/include/drm/drm_irq.h +++ b/include/drm/drm_irq.h @@ -24,9 +24,160 @@ #ifndef _DRM_IRQ_H_ #define _DRM_IRQ_H_ -struct drm_device; +#include -int drm_irq_install(struct drm_device *dev, int irq); -int drm_irq_uninstall(struct drm_device *dev); +/** + * struct drm_pending_vblank_event - pending vblank event tracking + */ +struct drm_pending_vblank_event { + /** + * @base: Base structure for tracking pending DRM events. + */ + struct drm_pending_event base; + /** + * @pipe: drm_crtc_index() of the &drm_crtc this event is for. + */ + unsigned int pipe; + /** + * @event: Actual event which will be sent to userspace. + */ + struct drm_event_vblank event; +}; + +/** + * struct drm_vblank_crtc - vblank tracking for a CRTC + * + * This structure tracks the vblank state for one CRTC. + * + * Note that for historical reasons - the vblank handling code is still shared + * with legacy/non-kms drivers - this is a free-standing structure not directly + * connected to struct &drm_crtc. But all public interface functions are taking + * a struct &drm_crtc to hide this implementation detail. + */ +struct drm_vblank_crtc { + /** + * @dev: Pointer to the &drm_device. + */ + struct drm_device *dev; + /** + * @queue: Wait queue for vblank waiters. + */ + wait_queue_head_t queue; /**< VBLANK wait queue */ + /** + * @disable_timer: Disable timer for the delayed vblank disabling + * hysteresis logic. Vblank disabling is controlled through the + * drm_vblank_offdelay module option and the setting of the + * max_vblank_count value in the &drm_device structure. + */ + struct timer_list disable_timer; + + /** + * @seqlock: Protect vblank count and time. + */ + seqlock_t seqlock; /* protects vblank count and time */ + + /** + * @count: Current software vblank counter. + */ + u32 count; + /** + * @time: Vblank timestamp corresponding to @count. + */ + struct timeval time; + + /** + * @refcount: Number of users/waiters of the vblank interrupt. Only when + * this refcount reaches 0 can the hardware interrupt be disabled using + * @disable_timer. + */ + atomic_t refcount; /* number of users of vblank interruptsper crtc */ + /** + * @last: Protected by dev->vbl_lock, used for wraparound handling. + */ + u32 last; + /** + * @inmodeset: Tracks whether the vblank is disabled due to a modeset. + * For legacy driver bit 2 additionally tracks whether an additional + * temporary vblank reference has been acquired to paper over the + * hardware counter resetting/jumping. KMS drivers should instead just + * call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly + * save and restore the vblank count. + */ + unsigned int inmodeset; /* Display driver is setting mode */ + /** + * @pipe: drm_crtc_index() of the &drm_crtc corresponding to this + * structure. + */ + unsigned int pipe; + /** + * @framedur_ns: Frame/Field duration in ns, used by + * drm_calc_vbltimestamp_from_scanoutpos() and computed by + * drm_calc_timestamping_constants(). + */ + int framedur_ns; + /** + * @linedur_ns: Line duration in ns, used by + * drm_calc_vbltimestamp_from_scanoutpos() and computed by + * drm_calc_timestamping_constants(). + */ + int linedur_ns; + /** + * @enabled: Tracks the enabling state of the corresponding &drm_crtc to + * avoid double-disabling and hence corrupting saved state. Needed by + * drivers not using atomic KMS, since those might go through their CRTC + * disabling functions multiple times. + */ + bool enabled; +}; + +extern int drm_irq_install(struct drm_device *dev, int irq); +extern int drm_irq_uninstall(struct drm_device *dev); + +extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); +extern int drm_wait_vblank(struct drm_device *dev, void *data, + struct drm_file *filp); +extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe); +extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc); +extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, + struct timeval *vblanktime); +extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); +extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); +extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); +extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); +extern int drm_crtc_vblank_get(struct drm_crtc *crtc); +extern void drm_crtc_vblank_put(struct drm_crtc *crtc); +extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe); +extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); +extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe); +extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe); +extern void drm_crtc_vblank_off(struct drm_crtc *crtc); +extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); +extern void drm_crtc_vblank_on(struct drm_crtc *crtc); +extern void drm_vblank_cleanup(struct drm_device *dev); +extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc); +extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe); + +extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, + unsigned int pipe, int *max_error, + struct timeval *vblank_time, + unsigned flags, + const struct drm_display_mode *mode); +extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, + const struct drm_display_mode *mode); + +/** + * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC + * @crtc: which CRTC's vblank waitqueue to retrieve + * + * This function returns a pointer to the vblank waitqueue for the CRTC. + * Drivers can use this to implement vblank waits using wait_event() and related + * functions. + */ +static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc) +{ + return &crtc->dev->vblank[drm_crtc_index(crtc)].queue; +} #endif diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h index 58dc8d8cc9..cf0e7d89bc 100644 --- a/include/drm/drm_legacy.h +++ b/include/drm/drm_legacy.h @@ -1,5 +1,8 @@ #ifndef __DRM_DRM_LEGACY_H__ #define __DRM_DRM_LEGACY_H__ + +#include + /* * Legacy driver interfaces for the Direct Rendering Manager * @@ -33,16 +36,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include - -#include -#include -#include - -struct drm_device; -struct drm_driver; -struct file; -struct pci_driver; /* * Legacy Support for palateontologic DRM drivers @@ -138,7 +131,7 @@ struct drm_sg_mem { * Kernel side of a mapping */ struct drm_local_map { - dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/ + resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ unsigned long size; /**< Requested physical size (bytes) */ enum drm_map_type type; /**< Type of memory to map */ enum drm_map_flags flags; /**< Flags */ @@ -163,9 +156,10 @@ struct drm_map_list { int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, struct drm_local_map **map_p); -struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token); void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); +void drm_legacy_master_rmmaps(struct drm_device *dev, + struct drm_master *master); struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); @@ -192,127 +186,22 @@ do { \ void drm_legacy_idlelock_take(struct drm_lock_data *lock); void drm_legacy_idlelock_release(struct drm_lock_data *lock); -/* drm_irq.c */ -int drm_legacy_irq_uninstall(struct drm_device *dev); - -/* drm_pci.c */ - -#ifdef CONFIG_PCI - -int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver); -void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver); - -#else - -static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, - size_t size, size_t align) -{ - return NULL; -} - -static inline void drm_pci_free(struct drm_device *dev, - struct drm_dma_handle *dmah) -{ -} - -static inline int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ - return -EINVAL; -} - -static inline void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ -} - -#endif - -/* - * AGP Support - */ - -struct drm_agp_head { - struct agp_kern_info agp_info; - struct list_head memory; - unsigned long mode; - struct agp_bridge_data *bridge; - int enabled; - int acquired; - unsigned long base; - int agp_mtrr; - int cant_use_aperture; - unsigned long page_mask; -}; - -#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) -struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev); -int drm_legacy_agp_acquire(struct drm_device *dev); -int drm_legacy_agp_release(struct drm_device *dev); -int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); -int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info); -int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); -int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); -#else -static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) -{ - return NULL; -} - -static inline int drm_legacy_agp_acquire(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_release(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_enable(struct drm_device *dev, - struct drm_agp_mode mode) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_info(struct drm_device *dev, - struct drm_agp_info *info) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_alloc(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_free(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_unbind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_bind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} -#endif +/* drm_pci.c dma alloc wrappers */ +void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); /* drm_memory.c */ void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); +static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, + unsigned int token) +{ + struct drm_map_list *_entry; + list_for_each_entry(_entry, &dev->maplist, head) + if (_entry->user_token == token) + return _entry->map; + return NULL; +} + #endif /* __DRM_DRM_LEGACY_H__ */ diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h new file mode 100644 index 0000000000..70d4e221a3 --- /dev/null +++ b/include/drm/drm_mem_util.h @@ -0,0 +1,81 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Jesse Barnes + * + */ +#ifndef _DRM_MEM_UTIL_H_ +#define _DRM_MEM_UTIL_H_ + +#include + +static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) +{ + if (size != 0 && nmemb > SIZE_MAX / size) + return NULL; + + if (size * nmemb <= PAGE_SIZE) + return kcalloc(nmemb, size, GFP_KERNEL); + + return __vmalloc(size * nmemb, + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); +} + +/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ +static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) +{ + if (size != 0 && nmemb > SIZE_MAX / size) + return NULL; + + if (size * nmemb <= PAGE_SIZE) + return kmalloc(nmemb * size, GFP_KERNEL); + + return __vmalloc(size * nmemb, + GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); +} + +static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp) +{ + if (size != 0 && nmemb > SIZE_MAX / size) + return NULL; + + if (size * nmemb <= PAGE_SIZE) + return kmalloc(nmemb * size, gfp); + + if (gfp & __GFP_RECLAIMABLE) { + void *ptr = kmalloc(nmemb * size, + gfp | __GFP_NOWARN | __GFP_NORETRY); + if (ptr) + return ptr; + } + + return __vmalloc(size * nmemb, + gfp | __GFP_HIGHMEM, PAGE_KERNEL); +} + +static __inline void drm_free_large(void *ptr) +{ + kvfree(ptr); +} + +#endif diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index af7ba8071e..4fef19064b 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MIPI DSI Bus * * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd. * Andrzej Hajda + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __DRM_MIPI_DSI_H__ @@ -13,7 +16,6 @@ struct mipi_dsi_host; struct mipi_dsi_device; -struct drm_dsc_picture_parameter_set; /* request ACK from peripheral */ #define MIPI_DSI_MSG_REQ_ACK BIT(0) @@ -80,11 +82,6 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, * Note that typically DSI packet transmission is atomic, so the .transfer() * function will seldomly return anything other than the number of bytes * contained in the transmit buffer on success. - * - * Also note that those callbacks can be called no matter the state the - * host is in. Drivers that need the underlying device to be powered to - * perform these operations will first need to make sure it's been - * properly enabled. */ struct mipi_dsi_host_ops { int (*attach)(struct mipi_dsi_host *host, @@ -124,15 +121,15 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node); /* enable hsync-end packets in vsync-pulse and v-porch area */ #define MIPI_DSI_MODE_VIDEO_HSE BIT(4) /* disable hfront-porch area */ -#define MIPI_DSI_MODE_VIDEO_NO_HFP BIT(5) +#define MIPI_DSI_MODE_VIDEO_HFP BIT(5) /* disable hback-porch area */ -#define MIPI_DSI_MODE_VIDEO_NO_HBP BIT(6) +#define MIPI_DSI_MODE_VIDEO_HBP BIT(6) /* disable hsync-active area */ -#define MIPI_DSI_MODE_VIDEO_NO_HSA BIT(7) +#define MIPI_DSI_MODE_VIDEO_HSA BIT(7) /* flush display FIFO on vsync pulse */ #define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8) /* disable EoT packets in HS mode */ -#define MIPI_DSI_MODE_NO_EOT_PACKET BIT(9) +#define MIPI_DSI_MODE_EOT_PACKET BIT(9) /* device supports non-continuous clock behavior (DSI spec 5.6.1) */ #define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10) /* transmit data in low power */ @@ -171,12 +168,6 @@ struct mipi_dsi_device_info { * @format: pixel format for video mode * @lanes: number of active data lanes * @mode_flags: DSI operation mode related flags - * @hs_rate: maximum lane frequency for high speed mode in hertz, this should - * be set to the real limits of the hardware, zero is only accepted for - * legacy drivers - * @lp_rate: maximum lane frequency for low power mode in hertz, this should - * be set to the real limits of the hardware, zero is only accepted for - * legacy drivers */ struct mipi_dsi_device { struct mipi_dsi_host *host; @@ -187,8 +178,6 @@ struct mipi_dsi_device { unsigned int lanes; enum mipi_dsi_pixel_format format; unsigned long mode_flags; - unsigned long hs_rate; - unsigned long lp_rate; }; #define MIPI_DSI_MODULE_PREFIX "mipi-dsi:" @@ -234,9 +223,6 @@ int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi); int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi); int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, u16 value); -ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable); -ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi, - const struct drm_dsc_picture_parameter_set *pps); ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload, size_t size); diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 9b4292f229..0934d317fc 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -1,7 +1,6 @@ /************************************************************************** * * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. - * Copyright 2016 Intel Corporation * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -40,260 +39,98 @@ #include #include #include -#include #include #include -#ifdef CONFIG_DRM_DEBUG_MM -#include -#endif -#include - -#ifdef CONFIG_DRM_DEBUG_MM -#define DRM_MM_BUG_ON(expr) BUG_ON(expr) -#else -#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#ifdef CONFIG_DEBUG_FS +#include #endif -/** - * enum drm_mm_insert_mode - control search and allocation behaviour - * - * The &struct drm_mm range manager supports finding a suitable modes using - * a number of search trees. These trees are oranised by size, by address and - * in most recent eviction order. This allows the user to find either the - * smallest hole to reuse, the lowest or highest address to reuse, or simply - * reuse the most recent eviction that fits. When allocating the &drm_mm_node - * from within the hole, the &drm_mm_insert_mode also dictate whether to - * allocate the lowest matching address or the highest. - */ -enum drm_mm_insert_mode { - /** - * @DRM_MM_INSERT_BEST: - * - * Search for the smallest hole (within the search range) that fits - * the desired node. - * - * Allocates the node from the bottom of the found hole. - */ - DRM_MM_INSERT_BEST = 0, - - /** - * @DRM_MM_INSERT_LOW: - * - * Search for the lowest hole (address closest to 0, within the search - * range) that fits the desired node. - * - * Allocates the node from the bottom of the found hole. - */ - DRM_MM_INSERT_LOW, - - /** - * @DRM_MM_INSERT_HIGH: - * - * Search for the highest hole (address closest to U64_MAX, within the - * search range) that fits the desired node. - * - * Allocates the node from the *top* of the found hole. The specified - * alignment for the node is applied to the base of the node - * (&drm_mm_node.start). - */ - DRM_MM_INSERT_HIGH, - - /** - * @DRM_MM_INSERT_EVICT: - * - * Search for the most recently evicted hole (within the search range) - * that fits the desired node. This is appropriate for use immediately - * after performing an eviction scan (see drm_mm_scan_init()) and - * removing the selected nodes to form a hole. - * - * Allocates the node from the bottom of the found hole. - */ - DRM_MM_INSERT_EVICT, - - /** - * @DRM_MM_INSERT_ONCE: - * - * Only check the first hole for suitablity and report -ENOSPC - * immediately otherwise, rather than check every hole until a - * suitable one is found. Can only be used in conjunction with another - * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW. - */ - DRM_MM_INSERT_ONCE = BIT(31), - - /** - * @DRM_MM_INSERT_HIGHEST: - * - * Only check the highest hole (the hole with the largest address) and - * insert the node at the top of the hole or report -ENOSPC if - * unsuitable. - * - * Does not search all holes. - */ - DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE, - - /** - * @DRM_MM_INSERT_LOWEST: - * - * Only check the lowest hole (the hole with the smallest address) and - * insert the node at the bottom of the hole or report -ENOSPC if - * unsuitable. - * - * Does not search all holes. - */ - DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE, +enum drm_mm_search_flags { + DRM_MM_SEARCH_DEFAULT = 0, + DRM_MM_SEARCH_BEST = 1 << 0, + DRM_MM_SEARCH_BELOW = 1 << 1, }; -/** - * struct drm_mm_node - allocated block in the DRM allocator - * - * This represents an allocated block in a &drm_mm allocator. Except for - * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is - * entirely opaque and should only be accessed through the provided funcions. - * Since allocation of these nodes is entirely handled by the driver they can be - * embedded. - */ +enum drm_mm_allocator_flags { + DRM_MM_CREATE_DEFAULT = 0, + DRM_MM_CREATE_TOP = 1 << 0, +}; + +#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT +#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP + struct drm_mm_node { - /** @color: Opaque driver-private tag. */ - unsigned long color; - /** @start: Start address of the allocated block. */ - u64 start; - /** @size: Size of the allocated block. */ - u64 size; - /* private: */ - struct drm_mm *mm; struct list_head node_list; struct list_head hole_stack; struct rb_node rb; - struct rb_node rb_hole_size; - struct rb_node rb_hole_addr; + unsigned hole_follows : 1; + unsigned scanned_block : 1; + unsigned scanned_prev_free : 1; + unsigned scanned_next_free : 1; + unsigned scanned_preceeds_hole : 1; + unsigned allocated : 1; + unsigned long color; + u64 start; + u64 size; u64 __subtree_last; - u64 hole_size; - u64 subtree_max_hole; - unsigned long flags; -#define DRM_MM_NODE_ALLOCATED_BIT 0 -#define DRM_MM_NODE_SCANNED_BIT 1 -#ifdef CONFIG_DRM_DEBUG_MM - depot_stack_handle_t stack; -#endif + struct drm_mm *mm; }; -/** - * struct drm_mm - DRM allocator - * - * DRM range allocator with a few special functions and features geared towards - * managing GPU memory. Except for the @color_adjust callback the structure is - * entirely opaque and should only be accessed through the provided functions - * and macros. This structure can be embedded into larger driver structures. - */ struct drm_mm { - /** - * @color_adjust: - * - * Optional driver callback to further apply restrictions on a hole. The - * node argument points at the node containing the hole from which the - * block would be allocated (see drm_mm_hole_follows() and friends). The - * other arguments are the size of the block to be allocated. The driver - * can adjust the start and end as needed to e.g. insert guard pages. - */ - void (*color_adjust)(const struct drm_mm_node *node, - unsigned long color, - u64 *start, u64 *end); - - /* private: */ /* List of all memory nodes that immediately precede a free hole. */ struct list_head hole_stack; /* head_node.node_list is the list of all memory nodes, ordered * according to the (increasing) start address of the memory node. */ struct drm_mm_node head_node; /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ - struct rb_root_cached interval_tree; - struct rb_root_cached holes_size; - struct rb_root holes_addr; + struct rb_root interval_tree; - unsigned long scan_active; -}; + unsigned int scan_check_range : 1; + unsigned scan_alignment; + unsigned long scan_color; + u64 scan_size; + u64 scan_hit_start; + u64 scan_hit_end; + unsigned scanned_blocks; + u64 scan_start; + u64 scan_end; + struct drm_mm_node *prev_scanned_node; -/** - * struct drm_mm_scan - DRM allocator eviction roaster data - * - * This structure tracks data needed for the eviction roaster set up using - * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and - * drm_mm_scan_remove_block(). The structure is entirely opaque and should only - * be accessed through the provided functions and macros. It is meant to be - * allocated temporarily by the driver on the stack. - */ -struct drm_mm_scan { - /* private: */ - struct drm_mm *mm; - - u64 size; - u64 alignment; - u64 remainder_mask; - - u64 range_start; - u64 range_end; - - u64 hit_start; - u64 hit_end; - - unsigned long color; - enum drm_mm_insert_mode mode; + void (*color_adjust)(struct drm_mm_node *node, unsigned long color, + u64 *start, u64 *end); }; /** * drm_mm_node_allocated - checks whether a node is allocated * @node: drm_mm_node to check * - * Drivers are required to clear a node prior to using it with the - * drm_mm range manager. - * - * Drivers should use this helper for proper encapsulation of drm_mm + * Drivers should use this helpers for proper encapusulation of drm_mm * internals. * * Returns: * True if the @node is allocated. */ -static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) +static inline bool drm_mm_node_allocated(struct drm_mm_node *node) { - return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); + return node->allocated; } /** * drm_mm_initialized - checks whether an allocator is initialized * @mm: drm_mm to check * - * Drivers should clear the struct drm_mm prior to initialisation if they - * want to use this function. - * - * Drivers should use this helper for proper encapsulation of drm_mm + * Drivers should use this helpers for proper encapusulation of drm_mm * internals. * * Returns: * True if the @mm is initialized. */ -static inline bool drm_mm_initialized(const struct drm_mm *mm) +static inline bool drm_mm_initialized(struct drm_mm *mm) { - return READ_ONCE(mm->hole_stack.next); + return mm->hole_stack.next; } -/** - * drm_mm_hole_follows - checks whether a hole follows this node - * @node: drm_mm_node to check - * - * Holes are embedded into the drm_mm using the tail of a drm_mm_node. - * If you wish to know whether a hole follows this particular node, - * query this function. See also drm_mm_hole_node_start() and - * drm_mm_hole_node_end(). - * - * Returns: - * True if a hole follows the @node. - */ -static inline bool drm_mm_hole_follows(const struct drm_mm_node *node) -{ - return node->hole_size; -} - -static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) +static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) { return hole_node->start + hole_node->size; } @@ -302,20 +139,20 @@ static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) * drm_mm_hole_node_start - computes the start of the hole following @node * @hole_node: drm_mm_node which implicitly tracks the following hole * - * This is useful for driver-specific debug dumpers. Otherwise drivers should - * not inspect holes themselves. Drivers must check first whether a hole indeed - * follows by looking at drm_mm_hole_follows() + * This is useful for driver-sepific debug dumpers. Otherwise drivers should not + * inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at node->hole_follows. * * Returns: * Start of the subsequent hole. */ -static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) { - DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node)); + BUG_ON(!hole_node->hole_follows); return __drm_mm_hole_node_start(hole_node); } -static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) +static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) { return list_next_entry(hole_node, node_list)->start; } @@ -324,126 +161,79 @@ static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) * drm_mm_hole_node_end - computes the end of the hole following @node * @hole_node: drm_mm_node which implicitly tracks the following hole * - * This is useful for driver-specific debug dumpers. Otherwise drivers should - * not inspect holes themselves. Drivers must check first whether a hole indeed - * follows by looking at drm_mm_hole_follows(). + * This is useful for driver-sepific debug dumpers. Otherwise drivers should not + * inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at node->hole_follows. * * Returns: * End of the subsequent hole. */ -static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) { return __drm_mm_hole_node_end(hole_node); } -/** - * drm_mm_nodes - list of nodes under the drm_mm range manager - * @mm: the struct drm_mm range manager - * - * As the drm_mm range manager hides its node_list deep with its - * structure, extracting it looks painful and repetitive. This is - * not expected to be used outside of the drm_mm_for_each_node() - * macros and similar internal functions. - * - * Returns: - * The node list, may be empty. - */ -#define drm_mm_nodes(mm) (&(mm)->head_node.node_list) - /** * drm_mm_for_each_node - iterator to walk over all allocated nodes - * @entry: &struct drm_mm_node to assign to in each iteration step - * @mm: &drm_mm allocator to walk + * @entry: drm_mm_node structure to assign to in each iteration step + * @mm: drm_mm allocator to walk * * This iterator walks over all nodes in the range allocator. It is implemented - * with list_for_each(), so not save against removal of elements. + * with list_for_each, so not save against removal of elements. */ -#define drm_mm_for_each_node(entry, mm) \ - list_for_each_entry(entry, drm_mm_nodes(mm), node_list) +#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ + &(mm)->head_node.node_list, \ + node_list) -/** - * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes - * @entry: &struct drm_mm_node to assign to in each iteration step - * @next: &struct drm_mm_node to store the next step - * @mm: &drm_mm allocator to walk - * - * This iterator walks over all nodes in the range allocator. It is implemented - * with list_for_each_safe(), so save against removal of elements. - */ -#define drm_mm_for_each_node_safe(entry, next, mm) \ - list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) +#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ + for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ + &entry->hole_stack != &(mm)->hole_stack ? \ + hole_start = drm_mm_hole_node_start(entry), \ + hole_end = drm_mm_hole_node_end(entry), \ + 1 : 0; \ + entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) /** * drm_mm_for_each_hole - iterator to walk over all holes - * @pos: &drm_mm_node used internally to track progress - * @mm: &drm_mm allocator to walk + * @entry: drm_mm_node used internally to track progress + * @mm: drm_mm allocator to walk * @hole_start: ulong variable to assign the hole start to on each iteration * @hole_end: ulong variable to assign the hole end to on each iteration * * This iterator walks over all holes in the range allocator. It is implemented - * with list_for_each(), so not save against removal of elements. @entry is used + * with list_for_each, so not save against removal of elements. @entry is used * internally and will not reflect a real drm_mm_node for the very first hole. * Hence users of this iterator may not access it. * * Implementation Note: * We need to inline list_for_each_entry in order to be able to set hole_start * and hole_end on each iteration while keeping the macro sane. + * + * The __drm_mm_for_each_hole version is similar, but with added support for + * going backwards. */ -#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \ - for (pos = list_first_entry(&(mm)->hole_stack, \ - typeof(*pos), hole_stack); \ - &pos->hole_stack != &(mm)->hole_stack ? \ - hole_start = drm_mm_hole_node_start(pos), \ - hole_end = hole_start + pos->hole_size, \ - 1 : 0; \ - pos = list_next_entry(pos, hole_stack)) +#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ + __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0) /* * Basic range manager support (drm_mm.c) */ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); -int drm_mm_insert_node_in_range(struct drm_mm *mm, - struct drm_mm_node *node, - u64 size, - u64 alignment, - unsigned long color, - u64 start, - u64 end, - enum drm_mm_insert_mode mode); - -/** - * drm_mm_insert_node_generic - search for space and insert @node - * @mm: drm_mm to allocate from - * @node: preallocate node to insert - * @size: size of the allocation - * @alignment: alignment of the allocation - * @color: opaque tag value to use for this node - * @mode: fine-tune the allocation search and placement - * - * This is a simplified version of drm_mm_insert_node_in_range() with no - * range restrictions applied. - * - * The preallocated node must be cleared to 0. - * - * Returns: - * 0 on success, -ENOSPC if there's no suitable hole. - */ -static inline int -drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, - u64 size, u64 alignment, - unsigned long color, - enum drm_mm_insert_mode mode) -{ - return drm_mm_insert_node_in_range(mm, node, - size, alignment, color, - 0, U64_MAX, mode); -} +int drm_mm_insert_node_generic(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size, + unsigned alignment, + unsigned long color, + enum drm_mm_search_flags sflags, + enum drm_mm_allocator_flags aflags); /** * drm_mm_insert_node - search for space and insert @node * @mm: drm_mm to allocate from * @node: preallocate node to insert * @size: size of the allocation + * @alignment: alignment of the allocation + * @flags: flags to fine-tune the allocation * * This is a simplified version of drm_mm_insert_node_generic() with @color set * to 0. @@ -455,97 +245,84 @@ drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, */ static inline int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, - u64 size) + u64 size, + unsigned alignment, + enum drm_mm_search_flags flags) { - return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0); + return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, + DRM_MM_CREATE_DEFAULT); +} + +int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size, + unsigned alignment, + unsigned long color, + u64 start, + u64 end, + enum drm_mm_search_flags sflags, + enum drm_mm_allocator_flags aflags); +/** + * drm_mm_insert_node_in_range - ranged search for space and insert @node + * @mm: drm_mm to allocate from + * @node: preallocate node to insert + * @size: size of the allocation + * @alignment: alignment of the allocation + * @start: start of the allowed range for this node + * @end: end of the allowed range for this node + * @flags: flags to fine-tune the allocation + * + * This is a simplified version of drm_mm_insert_node_in_range_generic() with + * @color set to 0. + * + * The preallocated node must be cleared to 0. + * + * Returns: + * 0 on success, -ENOSPC if there's no suitable hole. + */ +static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size, + unsigned alignment, + u64 start, + u64 end, + enum drm_mm_search_flags flags) +{ + return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, + 0, start, end, flags, + DRM_MM_CREATE_DEFAULT); } void drm_mm_remove_node(struct drm_mm_node *node); void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); -void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); +void drm_mm_init(struct drm_mm *mm, + u64 start, + u64 size) __intentional_overflow(3); void drm_mm_takedown(struct drm_mm *mm); - -/** - * drm_mm_clean - checks whether an allocator is clean - * @mm: drm_mm allocator to check - * - * Returns: - * True if the allocator is completely free, false if there's still a node - * allocated in it. - */ -static inline bool drm_mm_clean(const struct drm_mm *mm) -{ - return list_empty(drm_mm_nodes(mm)); -} +bool drm_mm_clean(struct drm_mm *mm); struct drm_mm_node * -__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); +drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last); -/** - * drm_mm_for_each_node_in_range - iterator to walk over a range of - * allocated nodes - * @node__: drm_mm_node structure to assign to in each iteration step - * @mm__: drm_mm allocator to walk - * @start__: starting offset, the first node will overlap this - * @end__: ending offset, the last node will start before this (but may overlap) - * - * This iterator walks over all nodes in the range allocator that lie - * between @start and @end. It is implemented similarly to list_for_each(), - * but using the internal interval tree to accelerate the search for the - * starting node, and so not safe against removal of elements. It assumes - * that @end is within (or is the upper limit of) the drm_mm allocator. - * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk - * over the special _unallocated_ &drm_mm.head_node, and may even continue - * indefinitely. - */ -#define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ - for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ - node__->start < (end__); \ - node__ = list_next_entry(node__, node_list)) +struct drm_mm_node * +drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last); -void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, - struct drm_mm *mm, - u64 size, u64 alignment, unsigned long color, - u64 start, u64 end, - enum drm_mm_insert_mode mode); +void drm_mm_init_scan(struct drm_mm *mm, + u64 size, + unsigned alignment, + unsigned long color); +void drm_mm_init_scan_with_range(struct drm_mm *mm, + u64 size, + unsigned alignment, + unsigned long color, + u64 start, + u64 end); +bool drm_mm_scan_add_block(struct drm_mm_node *node); +bool drm_mm_scan_remove_block(struct drm_mm_node *node); -/** - * drm_mm_scan_init - initialize lru scanning - * @scan: scan state - * @mm: drm_mm to scan - * @size: size of the allocation - * @alignment: alignment of the allocation - * @color: opaque tag value to use for the allocation - * @mode: fine-tune the allocation search and placement - * - * This is a simplified version of drm_mm_scan_init_with_range() with no range - * restrictions applied. - * - * This simply sets up the scanning routines with the parameters for the desired - * hole. - * - * Warning: - * As long as the scan list is non-empty, no other operations than - * adding/removing nodes to/from the scan list are allowed. - */ -static inline void drm_mm_scan_init(struct drm_mm_scan *scan, - struct drm_mm *mm, - u64 size, - u64 alignment, - unsigned long color, - enum drm_mm_insert_mode mode) -{ - drm_mm_scan_init_with_range(scan, mm, - size, alignment, color, - 0, U64_MAX, mode); -} - -bool drm_mm_scan_add_block(struct drm_mm_scan *scan, - struct drm_mm_node *node); -bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, - struct drm_mm_node *node); -struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan); - -void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p); +void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); +#ifdef CONFIG_DEBUG_FS +int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); +#endif #endif diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h index c34a3e8030..43460b21d1 100644 --- a/include/drm/drm_mode_object.h +++ b/include/drm/drm_mode_object.h @@ -24,11 +24,9 @@ #define __DRM_MODESET_H__ #include -#include struct drm_object_properties; struct drm_property; struct drm_device; -struct drm_file; /** * struct drm_mode_object - base structure for modeset objects @@ -47,10 +45,10 @@ struct drm_file; * drm_object_attach_property() before the object is visible to userspace. * * - For objects with dynamic lifetimes (as indicated by a non-NULL @free_cb) it - * provides reference counting through drm_mode_object_get() and - * drm_mode_object_put(). This is used by &drm_framebuffer, &drm_connector - * and &drm_property_blob. These objects provide specialized reference - * counting wrappers. + * provides reference counting through drm_mode_object_reference() and + * drm_mode_object_unreference(). This is used by &drm_framebuffer, + * &drm_connector and &drm_property_blob. These objects provide specialized + * reference counting wrappers. */ struct drm_mode_object { uint32_t id; @@ -88,15 +86,10 @@ struct drm_object_properties { * * Note that atomic drivers do not store mutable properties in this * array, but only the decoded values in the corresponding state - * structure. The decoding is done using the &drm_crtc.atomic_get_property and - * &drm_crtc.atomic_set_property hooks for &struct drm_crtc. For - * &struct drm_plane the hooks are &drm_plane_funcs.atomic_get_property and - * &drm_plane_funcs.atomic_set_property. And for &struct drm_connector - * the hooks are &drm_connector_funcs.atomic_get_property and - * &drm_connector_funcs.atomic_set_property . - * - * Hence atomic drivers should not use drm_object_property_set_value() - * and drm_object_property_get_value() on mutable objects, i.e. those + * structure. The decoding is done using the ->atomic_get_property and + * ->atomic_set_property hooks of the corresponding object. Hence atomic + * drivers should not use drm_object_property_set_value() and + * drm_object_property_get_value() on mutable objects, i.e. those * without the DRM_MODE_PROP_IMMUTABLE flag set. */ uint64_t values[DRM_OBJECT_MAX_PROPERTY]; @@ -115,10 +108,9 @@ struct drm_object_properties { } struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, - struct drm_file *file_priv, uint32_t id, uint32_t type); -void drm_mode_object_get(struct drm_mode_object *obj); -void drm_mode_object_put(struct drm_mode_object *obj); +void drm_mode_object_reference(struct drm_mode_object *obj); +void drm_mode_object_unreference(struct drm_mode_object *obj); int drm_object_property_set_value(struct drm_mode_object *obj, struct drm_property *property, @@ -130,6 +122,4 @@ int drm_object_property_get_value(struct drm_mode_object *obj, void drm_object_attach_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t init_val); - -bool drm_mode_object_lease_required(uint32_t type); #endif diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 29ba4adf0c..9934d91619 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -48,7 +48,7 @@ struct videomode; * @MODE_HSYNC: hsync out of range * @MODE_VSYNC: vsync out of range * @MODE_H_ILLEGAL: mode has illegal horizontal timings - * @MODE_V_ILLEGAL: mode has illegal vertical timings + * @MODE_V_ILLEGAL: mode has illegal horizontal timings * @MODE_BAD_WIDTH: requires an unsupported linepitch * @MODE_NOMODE: no mode with a matching name * @MODE_NO_INTERLACE: interlaced mode not supported @@ -80,7 +80,6 @@ struct videomode; * @MODE_ONE_SIZE: only one resolution is supported * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking * @MODE_NO_STEREO: stereo modes not supported - * @MODE_NO_420: ycbcr 420 modes not supported * @MODE_STALE: mode has become stale * @MODE_BAD: unspecified reason * @MODE_ERROR: error condition @@ -125,35 +124,21 @@ enum drm_mode_status { MODE_ONE_SIZE, MODE_NO_REDUCED, MODE_NO_STEREO, - MODE_NO_420, MODE_STALE = -3, MODE_BAD = -2, MODE_ERROR = -1 }; +#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ + DRM_MODE_TYPE_CRTC_C) + #define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ .name = nm, .status = 0, .type = (t), .clock = (c), \ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ - .vscan = (vs), .flags = (f) - -/** - * DRM_SIMPLE_MODE - Simple display mode - * @hd: Horizontal resolution, width - * @vd: Vertical resolution, height - * @hd_mm: Display width in millimeters - * @vd_mm: Display height in millimeters - * - * This macro initializes a &drm_display_mode that only contains info about - * resolution and physical size. - */ -#define DRM_SIMPLE_MODE(hd, vd, hd_mm, vd_mm) \ - .type = DRM_MODE_TYPE_DRIVER, .clock = 1 /* pass validation */, \ - .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \ - .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \ - .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \ - .height_mm = (vd_mm) + .vscan = (vs), .flags = (f), \ + .base.type = DRM_MODE_OBJECT_MODE #define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */ #define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ @@ -163,12 +148,6 @@ enum drm_mode_status { #define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF -#define DRM_MODE_MATCH_TIMINGS (1 << 0) -#define DRM_MODE_MATCH_CLOCK (1 << 1) -#define DRM_MODE_MATCH_FLAGS (1 << 2) -#define DRM_MODE_MATCH_3D_FLAGS (1 << 3) -#define DRM_MODE_MATCH_ASPECT_RATIO (1 << 4) - /** * struct drm_display_mode - DRM kernel-internal display mode structure * @hdisplay: horizontal display size @@ -195,9 +174,6 @@ enum drm_mode_status { * @crtc_vsync_end: hardware mode vertical sync end * @crtc_vtotal: hardware mode vertical total size * - * This is the kernel API display mode information structure. For the - * user-space version see struct drm_mode_modeinfo. - * * The horizontal and vertical timings are defined per the following diagram. * * :: @@ -221,148 +197,8 @@ enum drm_mode_status { * there's the hardware timings, which are corrected for interlacing, * double-clocking and similar things. They are provided as a convenience, and * can be appropriately computed using drm_mode_set_crtcinfo(). - * - * For printing you can use %DRM_MODE_FMT and DRM_MODE_ARG(). */ struct drm_display_mode { - /** - * @clock: - * - * Pixel clock in kHz. - */ - int clock; /* in kHz */ - u16 hdisplay; - u16 hsync_start; - u16 hsync_end; - u16 htotal; - u16 hskew; - u16 vdisplay; - u16 vsync_start; - u16 vsync_end; - u16 vtotal; - u16 vscan; - /** - * @flags: - * - * Sync and timing flags: - * - * - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high. - * - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low. - * - DRM_MODE_FLAG_PVSYNC: vertical sync is active high. - * - DRM_MODE_FLAG_NVSYNC: vertical sync is active low. - * - DRM_MODE_FLAG_INTERLACE: mode is interlaced. - * - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan. - * - DRM_MODE_FLAG_CSYNC: mode uses composite sync. - * - DRM_MODE_FLAG_PCSYNC: composite sync is active high. - * - DRM_MODE_FLAG_NCSYNC: composite sync is active low. - * - DRM_MODE_FLAG_HSKEW: hskew provided (not used?). - * - DRM_MODE_FLAG_BCAST: - * - DRM_MODE_FLAG_PIXMUX: - * - DRM_MODE_FLAG_DBLCLK: double-clocked mode. - * - DRM_MODE_FLAG_CLKDIV2: half-clocked mode. - * - * Additionally there's flags to specify how 3D modes are packed: - * - * - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode. - * - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right. - * - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields. - * - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines. - * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames. - * - DRM_MODE_FLAG_3D_L_DEPTH: ? - * - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ? - * - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom - * parts. - * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and - * right parts. - */ - u32 flags; - - /** - * @crtc_clock: - * - * Actual pixel or dot clock in the hardware. This differs from the - * logical @clock when e.g. using interlacing, double-clocking, stereo - * modes or other fancy stuff that changes the timings and signals - * actually sent over the wire. - * - * This is again in kHz. - * - * Note that with digital outputs like HDMI or DP there's usually a - * massive confusion between the dot clock and the signal clock at the - * bit encoding level. Especially when a 8b/10b encoding is used and the - * difference is exactly a factor of 10. - */ - int crtc_clock; - u16 crtc_hdisplay; - u16 crtc_hblank_start; - u16 crtc_hblank_end; - u16 crtc_hsync_start; - u16 crtc_hsync_end; - u16 crtc_htotal; - u16 crtc_hskew; - u16 crtc_vdisplay; - u16 crtc_vblank_start; - u16 crtc_vblank_end; - u16 crtc_vsync_start; - u16 crtc_vsync_end; - u16 crtc_vtotal; - - /** - * @width_mm: - * - * Addressable size of the output in mm, projectors should set this to - * 0. - */ - u16 width_mm; - - /** - * @height_mm: - * - * Addressable size of the output in mm, projectors should set this to - * 0. - */ - u16 height_mm; - - /** - * @type: - * - * A bitmask of flags, mostly about the source of a mode. Possible flags - * are: - * - * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native - * resolution of an LCD panel. There should only be one preferred - * mode per connector at any given time. - * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of - * them really. Drivers must set this bit for all modes they create - * and expose to userspace. - * - DRM_MODE_TYPE_USERDEF: Mode defined or selected via the kernel - * command line. - * - * Plus a big list of flags which shouldn't be used at all, but are - * still around since these flags are also used in the userspace ABI. - * We no longer accept modes with these types though: - * - * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused. - * Use DRM_MODE_TYPE_DRIVER instead. - * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use - * DRM_MODE_TYPE_PREFERRED instead. - * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers - * which are stuck around for hysterical raisins only. No one has an - * idea what they were meant for. Don't use. - */ - u8 type; - - /** - * @expose_to_userspace: - * - * Indicates whether the mode is to be exposed to the userspace. - * This is to maintain a set of exposed modes while preparing - * user-mode's list in drm_mode_getconnector ioctl. The purpose of - * this only lies in the ioctl function, and is not to be used - * outside the function. - */ - bool expose_to_userspace; - /** * @head: * @@ -370,6 +206,20 @@ struct drm_display_mode { */ struct list_head head; + /** + * @base: + * + * A display mode is a normal modeset object, possibly including public + * userspace id. + * + * FIXME: + * + * This can probably be removed since the entire concept of userspace + * managing modes explicitly has never landed in upstream kernel mode + * setting support. + */ + struct drm_mode_object base; + /** * @name: * @@ -385,30 +235,178 @@ struct drm_display_mode { */ enum drm_mode_status status; + /** + * @type: + * + * A bitmask of flags, mostly about the source of a mode. Possible flags + * are: + * + * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, effectively + * unused. + * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native + * resolution of an LCD panel. There should only be one preferred + * mode per connector at any given time. + * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of + * them really. Drivers must set this bit for all modes they create + * and expose to userspace. + * + * Plus a big list of flags which shouldn't be used at all, but are + * still around since these flags are also used in the userspace ABI: + * + * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use + * DRM_MODE_TYPE_PREFERRED instead. + * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers + * which are stuck around for hysterical raisins only. No one has an + * idea what they were meant for. Don't use. + * - DRM_MODE_TYPE_USERDEF: Mode defined by userspace, again a vestige + * from older kms designs where userspace had to first add a custom + * mode to the kernel's mode list before it could use it. Don't use. + */ + unsigned int type; + + /** + * @clock: + * + * Pixel clock in kHz. + */ + int clock; /* in kHz */ + int hdisplay; + int hsync_start; + int hsync_end; + int htotal; + int hskew; + int vdisplay; + int vsync_start; + int vsync_end; + int vtotal; + int vscan; + /** + * @flags: + * + * Sync and timing flags: + * + * - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high. + * - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low. + * - DRM_MODE_FLAG_PVSYNC: vertical sync is active high. + * - DRM_MODE_FLAG_NVSYNC: vertical sync is active low. + * - DRM_MODE_FLAG_INTERLACE: mode is interlaced. + * - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan. + * - DRM_MODE_FLAG_CSYNC: mode uses composite sync. + * - DRM_MODE_FLAG_PCSYNC: composite sync is active high. + * - DRM_MODE_FLAG_NCSYNC: composite sync is active low. + * - DRM_MODE_FLAG_HSKEW: hskew provided (not used?). + * - DRM_MODE_FLAG_BCAST: not used? + * - DRM_MODE_FLAG_PIXMUX: not used? + * - DRM_MODE_FLAG_DBLCLK: double-clocked mode. + * - DRM_MODE_FLAG_CLKDIV2: half-clocked mode. + * + * Additionally there's flags to specify how 3D modes are packed: + * + * - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode. + * - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right. + * - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields. + * - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames. + * - DRM_MODE_FLAG_3D_L_DEPTH: ? + * - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ? + * - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom + * parts. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and + * right parts. + */ + unsigned int flags; + + /** + * @width_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ + int width_mm; + + /** + * @height_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ + int height_mm; + + /** + * @crtc_clock: + * + * Actual pixel or dot clock in the hardware. This differs from the + * logical @clock when e.g. using interlacing, double-clocking, stereo + * modes or other fancy stuff that changes the timings and signals + * actually sent over the wire. + * + * This is again in kHz. + * + * Note that with digital outputs like HDMI or DP there's usually a + * massive confusion between the dot clock and the signal clock at the + * bit encoding level. Especially when a 8b/10b encoding is used and the + * difference is exactly a factor of 10. + */ + int crtc_clock; + int crtc_hdisplay; + int crtc_hblank_start; + int crtc_hblank_end; + int crtc_hsync_start; + int crtc_hsync_end; + int crtc_htotal; + int crtc_hskew; + int crtc_vdisplay; + int crtc_vblank_start; + int crtc_vblank_end; + int crtc_vsync_start; + int crtc_vsync_end; + int crtc_vtotal; + + /** + * @private: + * + * Pointer for driver private data. This can only be used for mode + * objects passed to drivers in modeset operations. It shouldn't be used + * by atomic drivers since they can store any additional data by + * subclassing state structures. + */ + int *private; + + /** + * @private_flags: + * + * Similar to @private, but just an integer. + */ + int private_flags; + + /** + * @vrefresh: + * + * Vertical refresh rate, for debug output in human readable form. Not + * used in a functional way. + * + * This value is in Hz. + */ + int vrefresh; + + /** + * @hsync: + * + * Horizontal refresh rate, for debug output in human readable form. Not + * used in a functional way. + * + * This value is in kHz. + */ + int hsync; + /** * @picture_aspect_ratio: * * Field for setting the HDMI picture aspect ratio of a mode. */ enum hdmi_picture_aspect picture_aspect_ratio; - }; -/** - * DRM_MODE_FMT - printf string for &struct drm_display_mode - */ -#define DRM_MODE_FMT "\"%s\": %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x" - -/** - * DRM_MODE_ARG - printf arguments for &struct drm_display_mode - * @m: display mode - */ -#define DRM_MODE_ARG(m) \ - (m)->name, drm_mode_vrefresh(m), (m)->clock, \ - (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \ - (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \ - (m)->type, (m)->flags - #define obj_to_mode(x) container_of(x, struct drm_display_mode, base) /** @@ -431,17 +429,10 @@ struct drm_display_mode *drm_mode_create(struct drm_device *dev); void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, const struct drm_display_mode *in); -int drm_mode_convert_umode(struct drm_device *dev, - struct drm_display_mode *out, +int drm_mode_convert_umode(struct drm_display_mode *out, const struct drm_mode_modeinfo *in); void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); -bool drm_mode_is_420_only(const struct drm_display_info *display, - const struct drm_display_mode *mode); -bool drm_mode_is_420_also(const struct drm_display_info *display, - const struct drm_display_mode *mode); -bool drm_mode_is_420(const struct drm_display_info *display, - const struct drm_display_mode *mode); struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, @@ -461,24 +452,13 @@ void drm_display_mode_from_videomode(const struct videomode *vm, void drm_display_mode_to_videomode(const struct drm_display_mode *dmode, struct videomode *vm); void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags); - -#if defined(CONFIG_OF) int of_get_drm_display_mode(struct device_node *np, struct drm_display_mode *dmode, u32 *bus_flags, int index); -#else -static inline int of_get_drm_display_mode(struct device_node *np, - struct drm_display_mode *dmode, - u32 *bus_flags, int index) -{ - return -EINVAL; -} -#endif void drm_mode_set_name(struct drm_display_mode *mode); +int drm_mode_hsync(const struct drm_display_mode *mode); int drm_mode_vrefresh(const struct drm_display_mode *mode); -void drm_mode_get_hv_timing(const struct drm_display_mode *mode, - int *hdisplay, int *vdisplay); void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags); @@ -486,9 +466,6 @@ void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode); -bool drm_mode_match(const struct drm_display_mode *mode1, - const struct drm_display_mode *mode2, - unsigned int match_flags); bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, @@ -497,22 +474,18 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); /* for use by the crtc helper probe functions */ -enum drm_mode_status drm_mode_validate_driver(struct drm_device *dev, - const struct drm_display_mode *mode); +enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode); enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode, int maxX, int maxY); -enum drm_mode_status -drm_mode_validate_ycbcr420(const struct drm_display_mode *mode, - struct drm_connector *connector); void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); void drm_mode_sort(struct list_head *mode_list); -void drm_connector_list_update(struct drm_connector *connector); +void drm_mode_connector_list_update(struct drm_connector *connector); /* parsing cmdline modes */ bool drm_mode_parse_command_line_for_connector(const char *mode_option, - const struct drm_connector *connector, + struct drm_connector *connector, struct drm_cmdline_mode *mode); struct drm_display_mode * drm_mode_create_from_cmdline_mode(struct drm_device *dev, diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h index 995fd981ca..b8051d5abe 100644 --- a/include/drm/drm_modeset_helper.h +++ b/include/drm/drm_modeset_helper.h @@ -23,22 +23,14 @@ #ifndef __DRM_KMS_HELPER_H__ #define __DRM_KMS_HELPER_H__ -struct drm_crtc; -struct drm_crtc_funcs; -struct drm_device; -struct drm_framebuffer; -struct drm_mode_fb_cmd2; +#include void drm_helper_move_panel_connectors_to_head(struct drm_device *); -void drm_helper_mode_fill_fb_struct(struct drm_device *dev, - struct drm_framebuffer *fb, +void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, const struct drm_mode_fb_cmd2 *mode_cmd); int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs); -int drm_mode_config_helper_suspend(struct drm_device *dev); -int drm_mode_config_helper_resume(struct drm_device *dev); - #endif diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index fdfa9f37ce..f2a0d40165 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -30,7 +30,6 @@ #define __DRM_MODESET_HELPER_VTABLES_H__ #include -#include /** * DOC: overview @@ -49,8 +48,6 @@ */ enum mode_set_atomic; -struct drm_writeback_connector; -struct drm_writeback_job; /** * struct drm_crtc_helper_funcs - helper operations for CRTCs @@ -73,7 +70,7 @@ struct drm_crtc_helper_funcs { * This callback is used by the legacy CRTC helpers. Atomic helpers * also support using this hook for enabling and disabling a CRTC to * facilitate transitions to atomic, but it is deprecated. Instead - * @atomic_enable and @atomic_disable should be used. + * @enable and @disable should be used. */ void (*dpms)(struct drm_crtc *crtc, int mode); @@ -87,8 +84,8 @@ struct drm_crtc_helper_funcs { * * This callback is used by the legacy CRTC helpers. Atomic helpers * also support using this hook for disabling a CRTC to facilitate - * transitions to atomic, but it is deprecated. Instead @atomic_disable - * should be used. + * transitions to atomic, but it is deprecated. Instead @disable should + * be used. */ void (*prepare)(struct drm_crtc *crtc); @@ -102,55 +99,20 @@ struct drm_crtc_helper_funcs { * * This callback is used by the legacy CRTC helpers. Atomic helpers * also support using this hook for enabling a CRTC to facilitate - * transitions to atomic, but it is deprecated. Instead @atomic_enable - * should be used. + * transitions to atomic, but it is deprecated. Instead @enable should + * be used. */ void (*commit)(struct drm_crtc *crtc); - /** - * @mode_valid: - * - * This callback is used to check if a specific mode is valid in this - * crtc. This should be implemented if the crtc has some sort of - * restriction in the modes it can display. For example, a given crtc - * may be responsible to set a clock value. If the clock can not - * produce all the values for the available modes then this callback - * can be used to restrict the number of modes to only the ones that - * can be displayed. - * - * This hook is used by the probe helpers to filter the mode list in - * drm_helper_probe_single_connector_modes(), and it is used by the - * atomic helpers to validate modes supplied by userspace in - * drm_atomic_helper_check_modeset(). - * - * This function is optional. - * - * NOTE: - * - * Since this function is both called from the check phase of an atomic - * commit, and the mode validation in the probe paths it is not allowed - * to look at anything else but the passed-in mode, and validate it - * against configuration-invariant hardward constraints. Any further - * limits which depend upon the configuration can only be checked in - * @mode_fixup or @atomic_check. - * - * RETURNS: - * - * drm_mode_status Enum - */ - enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc, - const struct drm_display_mode *mode); - /** * @mode_fixup: * * This callback is used to validate a mode. The parameter mode is the * display mode that userspace requested, adjusted_mode is the mode the * encoders need to be fed with. Note that this is the inverse semantics - * of the meaning for the &drm_encoder and &drm_bridge_funcs.mode_fixup - * vfunc. If the CRTC cannot support the requested conversion from mode - * to adjusted_mode it should reject the modeset. See also - * &drm_crtc_state.adjusted_mode for more details. + * of the meaning for the &drm_encoder and &drm_bridge + * ->mode_fixup() functions. If the CRTC cannot support the requested + * conversion from mode to adjusted_mode it should reject the modeset. * * This function is used by both legacy CRTC helpers and atomic helpers. * With atomic helpers it is optional. @@ -167,17 +129,21 @@ struct drm_crtc_helper_funcs { * allowed. * * Atomic drivers which need to inspect and adjust more state should - * instead use the @atomic_check callback, but note that they're not - * perfectly equivalent: @mode_valid is called from - * drm_atomic_helper_check_modeset(), but @atomic_check is called from - * drm_atomic_helper_check_planes(), because originally it was meant for - * plane update checks only. + * instead use the @atomic_check callback. * - * Also beware that userspace can request its own custom modes, neither - * core nor helpers filter modes to the list of probe modes reported by - * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure - * that modes are filtered consistently put any CRTC constraints and - * limits checks into @mode_valid. + * Also beware that neither core nor helpers filter modes before + * passing them to the driver: While the list of modes that is + * advertised to userspace is filtered using the connector's + * ->mode_valid() callback, neither the core nor the helpers do any + * filtering on modes passed in from userspace when setting a mode. It + * is therefore possible for userspace to pass in a mode that was + * previously filtered out using ->mode_valid() or add a custom mode + * that wasn't probed from EDID or similar to begin with. Even though + * this is an advanced feature and rarely used nowadays, some users rely + * on being able to specify modes manually so drivers must be prepared + * to deal with it. Specifically this means that all drivers need not + * only validate modes in ->mode_valid() but also in ->mode_fixup() to + * make sure invalid modes passed in from userspace are rejected. * * RETURNS: * @@ -224,7 +190,7 @@ struct drm_crtc_helper_funcs { * pipeline is suspended using either DPMS or the new "ACTIVE" property. * Which means register values set in this callback might get reset when * the CRTC is suspended, but not restored. Such drivers should instead - * move all their CRTC setup into the @atomic_enable callback. + * move all their CRTC setup into the @enable callback. * * This callback is optional. */ @@ -238,7 +204,7 @@ struct drm_crtc_helper_funcs { * optimized fast-path instead of a full mode set operation with all the * resulting flickering. If it is not present * drm_crtc_helper_set_config() will fall back to a full modeset, using - * the @mode_set callback. Since it can't update other planes it's + * the ->mode_set() callback. Since it can't update other planes it's * incompatible with atomic modeset support. * * This callback is only used by the CRTC helpers and deprecated. @@ -268,22 +234,38 @@ struct drm_crtc_helper_funcs { struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic); + /** + * @load_lut: + * + * Load a LUT prepared with the @gamma_set functions from + * &drm_fb_helper_funcs. + * + * This callback is optional and is only used by the fbdev emulation + * helpers. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ + void (*load_lut)(struct drm_crtc *crtc); + /** * @disable: * * This callback should be used to disable the CRTC. With the atomic * drivers it is called after all encoders connected to this CRTC have - * been shut off already using their own - * &drm_encoder_helper_funcs.disable hook. If that sequence is too - * simple drivers can just add their own hooks and call it from this - * CRTC callback here by looping over all encoders connected to it using - * for_each_encoder_on_crtc(). + * been shut off already using their own ->disable hook. If that + * sequence is too simple drivers can just add their own hooks and call + * it from this CRTC callback here by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). * * This hook is used both by legacy CRTC helpers and atomic helpers. * Atomic drivers don't need to implement it if there's no need to * disable anything at the CRTC level. To ensure that runtime PM * handling (using either DPMS or the new "ACTIVE" property) works - * @disable must be the inverse of @atomic_enable for atomic drivers. + * @disable must be the inverse of @enable for atomic drivers. * Atomic drivers should consider to use @atomic_disable instead of * this one. * @@ -301,6 +283,24 @@ struct drm_crtc_helper_funcs { */ void (*disable)(struct drm_crtc *crtc); + /** + * @enable: + * + * This callback should be used to enable the CRTC. With the atomic + * drivers it is called before all encoders connected to this CRTC are + * enabled through the encoder's own ->enable hook. If that sequence is + * too simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with @disable. + * Atomic drivers don't need to implement it if there's no need to + * enable anything at the CRTC level. To ensure that runtime PM handling + * (using either DPMS or the new "ACTIVE" property) works + * @enable must be the inverse of @disable for atomic drivers. + */ + void (*enable)(struct drm_crtc *crtc); + /** * @atomic_check: * @@ -315,16 +315,16 @@ struct drm_crtc_helper_funcs { * beforehand. This is calling order used by the default helper * implementation in drm_atomic_helper_check(). * - * When using drm_atomic_helper_check_planes() this hook is called - * after the &drm_plane_helper_funcs.atomic_check hook for planes, which - * allows drivers to assign shared resources requested by planes in this - * callback here. For more complicated dependencies the driver can call - * the provided check helpers multiple times until the computed state - * has a final configuration and everything has been checked. + * When using drm_atomic_helper_check_planes() CRTCs' ->atomic_check() + * hooks are called after the ones for planes, which allows drivers to + * assign shared resources requested by planes in the CRTC callback + * here. For more complicated dependencies the driver can call the provided + * check helpers multiple times until the computed state has a final + * configuration and everything has been checked. * * This function is also allowed to inspect any other object's state and * can add more state objects to the atomic commit if needed. Care must - * be taken though to ensure that state check and compute functions for + * be taken though to ensure that state check&compute functions for * these added states are all called, and derived state in other objects * all updated. Again the recommendation is to just call check helpers * until a maximal configuration is reached. @@ -336,13 +336,8 @@ struct drm_crtc_helper_funcs { * * This function is called in the check phase of an atomic update. The * driver is not allowed to change anything outside of the free-standing - * state object passed-in. - * - * Also beware that userspace can request its own custom modes, neither - * core nor helpers filter modes to the list of probe modes reported by - * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure - * that modes are filtered consistently put any CRTC constraints and - * limits checks into @mode_valid. + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. * * RETURNS: * @@ -352,7 +347,7 @@ struct drm_crtc_helper_funcs { * deadlock. */ int (*atomic_check)(struct drm_crtc *crtc, - struct drm_atomic_state *state); + struct drm_crtc_state *state); /** * @atomic_begin: @@ -366,14 +361,14 @@ struct drm_crtc_helper_funcs { * * Note that the power state of the display pipe when this function is * called depends upon the exact helpers and calling sequence the driver - * has picked. See drm_atomic_helper_commit_planes() for a discussion of - * the tradeoffs and variants of plane commit helpers. + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. * * This callback is used by the atomic modeset helpers and by the * transitional plane helpers, but it is optional. */ void (*atomic_begin)(struct drm_crtc *crtc, - struct drm_atomic_state *state); + struct drm_crtc_state *old_crtc_state); /** * @atomic_flush: * @@ -390,103 +385,36 @@ struct drm_crtc_helper_funcs { * * Note that the power state of the display pipe when this function is * called depends upon the exact helpers and calling sequence the driver - * has picked. See drm_atomic_helper_commit_planes() for a discussion of - * the tradeoffs and variants of plane commit helpers. + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. * * This callback is used by the atomic modeset helpers and by the * transitional plane helpers, but it is optional. */ void (*atomic_flush)(struct drm_crtc *crtc, - struct drm_atomic_state *state); - - /** - * @atomic_enable: - * - * This callback should be used to enable the CRTC. With the atomic - * drivers it is called before all encoders connected to this CRTC are - * enabled through the encoder's own &drm_encoder_helper_funcs.enable - * hook. If that sequence is too simple drivers can just add their own - * hooks and call it from this CRTC callback here by looping over all - * encoders connected to it using for_each_encoder_on_crtc(). - * - * This hook is used only by atomic helpers, for symmetry with - * @atomic_disable. Atomic drivers don't need to implement it if there's - * no need to enable anything at the CRTC level. To ensure that runtime - * PM handling (using either DPMS or the new "ACTIVE" property) works - * @atomic_enable must be the inverse of @atomic_disable for atomic - * drivers. - * - * This function is optional. - */ - void (*atomic_enable)(struct drm_crtc *crtc, - struct drm_atomic_state *state); + struct drm_crtc_state *old_crtc_state); /** * @atomic_disable: * * This callback should be used to disable the CRTC. With the atomic * drivers it is called after all encoders connected to this CRTC have - * been shut off already using their own - * &drm_encoder_helper_funcs.disable hook. If that sequence is too - * simple drivers can just add their own hooks and call it from this - * CRTC callback here by looping over all encoders connected to it using - * for_each_encoder_on_crtc(). + * been shut off already using their own ->disable hook. If that + * sequence is too simple drivers can just add their own hooks and call + * it from this CRTC callback here by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). * * This hook is used only by atomic helpers. Atomic drivers don't * need to implement it if there's no need to disable anything at the * CRTC level. * - * This function is optional. + * Comparing to @disable, this one provides the additional input + * parameter @old_crtc_state which could be used to access the old + * state. Atomic drivers should consider to use this one instead + * of @disable. */ void (*atomic_disable)(struct drm_crtc *crtc, - struct drm_atomic_state *state); - - /** - * @get_scanout_position: - * - * Called by vblank timestamping code. - * - * Returns the current display scanout position from a CRTC and an - * optional accurate ktime_get() timestamp of when the position was - * measured. Note that this is a helper callback which is only used - * if a driver uses drm_crtc_vblank_helper_get_vblank_timestamp() - * for the @drm_crtc_funcs.get_vblank_timestamp callback. - * - * Parameters: - * - * crtc: - * The CRTC. - * in_vblank_irq: - * True when called from drm_crtc_handle_vblank(). Some drivers - * need to apply some workarounds for gpu-specific vblank irq - * quirks if the flag is set. - * vpos: - * Target location for current vertical scanout position. - * hpos: - * Target location for current horizontal scanout position. - * stime: - * Target location for timestamp taken immediately before - * scanout position query. Can be NULL to skip timestamp. - * etime: - * Target location for timestamp taken immediately after - * scanout position query. Can be NULL to skip timestamp. - * mode: - * Current display timings. - * - * Returns vpos as a positive number while in active scanout area. - * Returns vpos as a negative number inside vblank, counting the number - * of scanlines to go until end of vblank, e.g., -1 means "one scanline - * until start of active scanout / end of vblank." - * - * Returns: - * - * True on success, false if a reliable scanout position counter could - * not be read out. - */ - bool (*get_scanout_position)(struct drm_crtc *crtc, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode); + struct drm_crtc_state *old_crtc_state); }; /** @@ -525,40 +453,6 @@ struct drm_encoder_helper_funcs { */ void (*dpms)(struct drm_encoder *encoder, int mode); - /** - * @mode_valid: - * - * This callback is used to check if a specific mode is valid in this - * encoder. This should be implemented if the encoder has some sort - * of restriction in the modes it can display. For example, a given - * encoder may be responsible to set a clock value. If the clock can - * not produce all the values for the available modes then this callback - * can be used to restrict the number of modes to only the ones that - * can be displayed. - * - * This hook is used by the probe helpers to filter the mode list in - * drm_helper_probe_single_connector_modes(), and it is used by the - * atomic helpers to validate modes supplied by userspace in - * drm_atomic_helper_check_modeset(). - * - * This function is optional. - * - * NOTE: - * - * Since this function is both called from the check phase of an atomic - * commit, and the mode validation in the probe paths it is not allowed - * to look at anything else but the passed-in mode, and validate it - * against configuration-invariant hardward constraints. Any further - * limits which depend upon the configuration can only be checked in - * @mode_fixup or @atomic_check. - * - * RETURNS: - * - * drm_mode_status Enum - */ - enum drm_mode_status (*mode_valid)(struct drm_encoder *crtc, - const struct drm_display_mode *mode); - /** * @mode_fixup: * @@ -566,8 +460,7 @@ struct drm_encoder_helper_funcs { * mode is the display mode that should be fed to the next element in * the display chain, either the final &drm_connector or a &drm_bridge. * The parameter adjusted_mode is the input mode the encoder requires. It - * can be modified by this callback and does not need to match mode. See - * also &drm_crtc_state.adjusted_mode for more details. + * can be modified by this callback and does not need to match mode. * * This function is used by both legacy CRTC helpers and atomic helpers. * This hook is optional. @@ -584,15 +477,21 @@ struct drm_encoder_helper_funcs { * allowed. * * Atomic drivers which need to inspect and adjust more state should - * instead use the @atomic_check callback. If @atomic_check is used, - * this hook isn't called since @atomic_check allows a strict superset - * of the functionality of @mode_fixup. + * instead use the @atomic_check callback. * - * Also beware that userspace can request its own custom modes, neither - * core nor helpers filter modes to the list of probe modes reported by - * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure - * that modes are filtered consistently put any encoder constraints and - * limits checks into @mode_valid. + * Also beware that neither core nor helpers filter modes before + * passing them to the driver: While the list of modes that is + * advertised to userspace is filtered using the connector's + * ->mode_valid() callback, neither the core nor the helpers do any + * filtering on modes passed in from userspace when setting a mode. It + * is therefore possible for userspace to pass in a mode that was + * previously filtered out using ->mode_valid() or add a custom mode + * that wasn't probed from EDID or similar to begin with. Even though + * this is an advanced feature and rarely used nowadays, some users rely + * on being able to specify modes manually so drivers must be prepared + * to deal with it. Specifically this means that all drivers need not + * only validate modes in ->mode_valid() but also in ->mode_fixup() to + * make sure invalid modes passed in from userspace are rejected. * * RETURNS: * @@ -644,7 +543,7 @@ struct drm_encoder_helper_funcs { * use this hook, because the helper library calls it only once and not * every time the display pipeline is suspend using either DPMS or the * new "ACTIVE" property. Such drivers should instead move all their - * encoder setup into the @enable callback. + * encoder setup into the ->enable() callback. * * This callback is used both by the legacy CRTC helpers and the atomic * modeset helpers. It is optional in the atomic helpers. @@ -670,7 +569,7 @@ struct drm_encoder_helper_funcs { * use this hook, because the helper library calls it only once and not * every time the display pipeline is suspended using either DPMS or the * new "ACTIVE" property. Such drivers should instead move all their - * encoder setup into the @enable callback. + * encoder setup into the ->enable() callback. * * This callback is used by the atomic modeset helpers in place of the * @mode_set callback, if set by the driver. It is optional and should @@ -682,6 +581,22 @@ struct drm_encoder_helper_funcs { struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state); + /** + * @get_crtc: + * + * This callback is used by the legacy CRTC helpers to work around + * deficiencies in its own book-keeping. + * + * Do not use, use atomic helpers instead, which get the book keeping + * right. + * + * FIXME: + * + * Currently only nouveau is using this, and as soon as nouveau is + * atomic we can ditch this hook. + */ + struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); + /** * @detect: * @@ -700,61 +615,15 @@ struct drm_encoder_helper_funcs { enum drm_connector_status (*detect)(struct drm_encoder *encoder, struct drm_connector *connector); - /** - * @atomic_disable: - * - * This callback should be used to disable the encoder. With the atomic - * drivers it is called before this encoder's CRTC has been shut off - * using their own &drm_crtc_helper_funcs.atomic_disable hook. If that - * sequence is too simple drivers can just add their own driver private - * encoder hooks and call them from CRTC's callback by looping over all - * encoders connected to it using for_each_encoder_on_crtc(). - * - * This callback is a variant of @disable that provides the atomic state - * to the driver. If @atomic_disable is implemented, @disable is not - * called by the helpers. - * - * This hook is only used by atomic helpers. Atomic drivers don't need - * to implement it if there's no need to disable anything at the encoder - * level. To ensure that runtime PM handling (using either DPMS or the - * new "ACTIVE" property) works @atomic_disable must be the inverse of - * @atomic_enable. - */ - void (*atomic_disable)(struct drm_encoder *encoder, - struct drm_atomic_state *state); - - /** - * @atomic_enable: - * - * This callback should be used to enable the encoder. It is called - * after this encoder's CRTC has been enabled using their own - * &drm_crtc_helper_funcs.atomic_enable hook. If that sequence is - * too simple drivers can just add their own driver private encoder - * hooks and call them from CRTC's callback by looping over all encoders - * connected to it using for_each_encoder_on_crtc(). - * - * This callback is a variant of @enable that provides the atomic state - * to the driver. If @atomic_enable is implemented, @enable is not - * called by the helpers. - * - * This hook is only used by atomic helpers, it is the opposite of - * @atomic_disable. Atomic drivers don't need to implement it if there's - * no need to enable anything at the encoder level. To ensure that - * runtime PM handling works @atomic_enable must be the inverse of - * @atomic_disable. - */ - void (*atomic_enable)(struct drm_encoder *encoder, - struct drm_atomic_state *state); - /** * @disable: * * This callback should be used to disable the encoder. With the atomic * drivers it is called before this encoder's CRTC has been shut off - * using their own &drm_crtc_helper_funcs.disable hook. If that - * sequence is too simple drivers can just add their own driver private - * encoder hooks and call them from CRTC's callback by looping over all - * encoders connected to it using for_each_encoder_on_crtc(). + * using the CRTC's own ->disable hook. If that sequence is too simple + * drivers can just add their own driver private encoder hooks and call + * them from CRTC's callback by looping over all encoders connected to + * it using for_each_encoder_on_crtc(). * * This hook is used both by legacy CRTC helpers and atomic helpers. * Atomic drivers don't need to implement it if there's no need to @@ -762,9 +631,6 @@ struct drm_encoder_helper_funcs { * handling (using either DPMS or the new "ACTIVE" property) works * @disable must be the inverse of @enable for atomic drivers. * - * For atomic drivers also consider @atomic_disable and save yourself - * from having to read the NOTE below! - * * NOTE: * * With legacy CRTC helpers there's a big semantic difference between @@ -784,16 +650,16 @@ struct drm_encoder_helper_funcs { * * This callback should be used to enable the encoder. With the atomic * drivers it is called after this encoder's CRTC has been enabled using - * their own &drm_crtc_helper_funcs.enable hook. If that sequence is - * too simple drivers can just add their own driver private encoder - * hooks and call them from CRTC's callback by looping over all encoders - * connected to it using for_each_encoder_on_crtc(). + * the CRTC's own ->enable hook. If that sequence is too simple drivers + * can just add their own driver private encoder hooks and call them + * from CRTC's callback by looping over all encoders connected to it + * using for_each_encoder_on_crtc(). * - * This hook is only used by atomic helpers, it is the opposite of - * @disable. Atomic drivers don't need to implement it if there's no - * need to enable anything at the encoder level. To ensure that - * runtime PM handling (using either DPMS or the new "ACTIVE" property) - * works @enable must be the inverse of @disable for atomic drivers. + * This hook is used only by atomic helpers, for symmetry with @disable. + * Atomic drivers don't need to implement it if there's no need to + * enable anything at the encoder level. To ensure that runtime PM handling + * (using either DPMS or the new "ACTIVE" property) works + * @enable must be the inverse of @disable for atomic drivers. */ void (*enable)(struct drm_encoder *encoder); @@ -806,11 +672,6 @@ struct drm_encoder_helper_funcs { * update the CRTC to match what the encoder needs for the requested * connector. * - * Since this provides a strict superset of the functionality of - * @mode_fixup (the requested and adjusted modes are both available - * through the passed in &struct drm_crtc_state) @mode_fixup is not - * called when @atomic_check is implemented. - * * This function is used by the atomic helpers, but it is optional. * * NOTE: @@ -820,12 +681,6 @@ struct drm_encoder_helper_funcs { * state objects passed-in or assembled in the overall &drm_atomic_state * update tracking structure. * - * Also beware that userspace can request its own custom modes, neither - * core nor helpers filter modes to the list of probe modes reported by - * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure - * that modes are filtered consistently put any encoder constraints and - * limits checks into @mode_valid. - * * RETURNS: * * 0 on success, -EINVAL if the state or the transition can't be @@ -836,7 +691,7 @@ struct drm_encoder_helper_funcs { int (*atomic_check)(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state); -}; +} __no_const; /** * drm_encoder_helper_add - sets the helper vtable for an encoder @@ -860,74 +715,38 @@ struct drm_connector_helper_funcs { * @get_modes: * * This function should fill in all modes currently valid for the sink - * into the &drm_connector.probed_modes list. It should also update the - * EDID property by calling drm_connector_update_edid_property(). + * into the connector->probed_modes list. It should also update the + * EDID property by calling drm_mode_connector_update_edid_property(). * * The usual way to implement this is to cache the EDID retrieved in the * probe callback somewhere in the driver-private connector structure. * In this function drivers then parse the modes in the EDID and add - * them by calling drm_add_edid_modes(). But connectors that drive a + * them by calling drm_add_edid_modes(). But connectors that driver a * fixed panel can also manually add specific modes using * drm_mode_probed_add(). Drivers which manually add modes should also - * make sure that the &drm_connector.display_info, - * &drm_connector.width_mm and &drm_connector.height_mm fields are - * filled in. - * - * Note that the caller function will automatically add standard VESA - * DMT modes up to 1024x768 if the .get_modes() helper operation returns - * no mode and if the connector status is connector_status_connected or - * connector_status_unknown. There is no need to call - * drm_add_modes_noedid() manually in that case. + * make sure that the @display_info, @width_mm and @height_mm fields of the + * struct &drm_connector are filled in. * * Virtual drivers that just want some standard VESA mode with a given * resolution can call drm_add_modes_noedid(), and mark the preferred * one using drm_set_preferred_mode(). * - * This function is only called after the @detect hook has indicated + * Finally drivers that support audio probably want to update the ELD + * data, too, using drm_edid_to_eld(). + * + * This function is only called after the ->detect() hook has indicated * that a sink is connected and when the EDID isn't overridden through * sysfs or the kernel commandline. * * This callback is used by the probe helpers in e.g. * drm_helper_probe_single_connector_modes(). * - * To avoid races with concurrent connector state updates, the helper - * libraries always call this with the &drm_mode_config.connection_mutex - * held. Because of this it's safe to inspect &drm_connector->state. - * * RETURNS: * * The number of modes added by calling drm_mode_probed_add(). */ int (*get_modes)(struct drm_connector *connector); - /** - * @detect_ctx: - * - * Check to see if anything is attached to the connector. The parameter - * force is set to false whilst polling, true when checking the - * connector due to a user request. force can be used by the driver to - * avoid expensive, destructive operations during automated probing. - * - * This callback is optional, if not implemented the connector will be - * considered as always being attached. - * - * This is the atomic version of &drm_connector_funcs.detect. - * - * To avoid races against concurrent connector state updates, the - * helper libraries always call this with ctx set to a valid context, - * and &drm_mode_config.connection_mutex will always be locked with - * the ctx parameter set to this ctx. This allows taking additional - * locks as required. - * - * RETURNS: - * - * &drm_connector_status indicating the connector's status, - * or the error code returned by drm_modeset_lock(), -EDEADLK. - */ - int (*detect_ctx)(struct drm_connector *connector, - struct drm_modeset_acquire_ctx *ctx, - bool force); - /** * @mode_valid: * @@ -938,74 +757,21 @@ struct drm_connector_helper_funcs { * (which is usually derived from the EDID data block from the sink). * See e.g. drm_helper_probe_single_connector_modes(). * - * This function is optional. - * * NOTE: * * This only filters the mode list supplied to userspace in the - * GETCONNECTOR IOCTL. Compared to &drm_encoder_helper_funcs.mode_valid, - * &drm_crtc_helper_funcs.mode_valid and &drm_bridge_funcs.mode_valid, - * which are also called by the atomic helpers from - * drm_atomic_helper_check_modeset(). This allows userspace to force and - * ignore sink constraint (like the pixel clock limits in the screen's - * EDID), which is useful for e.g. testing, or working around a broken - * EDID. Any source hardware constraint (which always need to be - * enforced) therefore should be checked in one of the above callbacks, - * and not this one here. + * GETCONNECOTR IOCTL. Userspace is free to create modes of its own and + * ask the kernel to use them. It this case the atomic helpers or legacy + * CRTC helpers will not call this function. Drivers therefore must + * still fully validate any mode passed in in a modeset request. * - * To avoid races with concurrent connector state updates, the helper - * libraries always call this with the &drm_mode_config.connection_mutex - * held. Because of this it's safe to inspect &drm_connector->state. - * * RETURNS: * - * Either &drm_mode_status.MODE_OK or one of the failure reasons in &enum - * drm_mode_status. + * Either MODE_OK or one of the failure reasons in enum + * &drm_mode_status. */ enum drm_mode_status (*mode_valid)(struct drm_connector *connector, struct drm_display_mode *mode); - - /** - * @mode_valid_ctx: - * - * Callback to validate a mode for a connector, irrespective of the - * specific display configuration. - * - * This callback is used by the probe helpers to filter the mode list - * (which is usually derived from the EDID data block from the sink). - * See e.g. drm_helper_probe_single_connector_modes(). - * - * This function is optional, and is the atomic version of - * &drm_connector_helper_funcs.mode_valid. - * - * To allow for accessing the atomic state of modesetting objects, the - * helper libraries always call this with ctx set to a valid context, - * and &drm_mode_config.connection_mutex will always be locked with - * the ctx parameter set to @ctx. This allows for taking additional - * locks as required. - * - * Even though additional locks may be acquired, this callback is - * still expected not to take any constraints into account which would - * be influenced by the currently set display state - such constraints - * should be handled in the driver's atomic check. For example, if a - * connector shares display bandwidth with other connectors then it - * would be ok to validate the minimum bandwidth requirement of a mode - * against the maximum possible bandwidth of the connector. But it - * wouldn't be ok to take the current bandwidth usage of other - * connectors into account, as this would change depending on the - * display state. - * - * Returns: - * 0 if &drm_connector_helper_funcs.mode_valid_ctx succeeded and wrote - * the &enum drm_mode_status value to @status, or a negative error - * code otherwise. - * - */ - int (*mode_valid_ctx)(struct drm_connector *connector, - struct drm_display_mode *mode, - struct drm_modeset_acquire_ctx *ctx, - enum drm_mode_status *status); - /** * @best_encoder: * @@ -1024,8 +790,9 @@ struct drm_connector_helper_funcs { * @atomic_best_encoder. * * You can leave this function to NULL if the connector is only - * attached to a single encoder. In this case, the core will call - * drm_connector_get_single_encoder() for you. + * attached to a single encoder and you are using the atomic helpers. + * In this case, the core will call drm_atomic_helper_best_encoder() + * for you. * * RETURNS: * @@ -1045,40 +812,7 @@ struct drm_connector_helper_funcs { * * This function is used by drm_atomic_helper_check_modeset(). * If it is not implemented, the core will fallback to @best_encoder - * (or drm_connector_get_single_encoder() if @best_encoder is NULL). - * - * NOTE: - * - * This function is called in the check phase of an atomic update. The - * driver is not allowed to change anything outside of the - * &drm_atomic_state update tracking structure passed in. - * - * RETURNS: - * - * Encoder that should be used for the given connector and connector - * state, or NULL if no suitable encoder exists. Note that the helpers - * will ensure that encoders aren't used twice, drivers should not check - * for this. - */ - struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, - struct drm_atomic_state *state); - - /** - * @atomic_check: - * - * This hook is used to validate connector state. This function is - * called from &drm_atomic_helper_check_modeset, and is called when - * a connector property is set, or a modeset on the crtc is forced. - * - * Because &drm_atomic_helper_check_modeset may be called multiple times, - * this function should handle being called multiple times as well. - * - * This function is also allowed to inspect any other object's state and - * can add more state objects to the atomic commit if needed. Care must - * be taken though to ensure that state check and compute functions for - * these added states are all called, and derived state in other objects - * all updated. Again the recommendation is to just call check helpers - * until a maximal configuration is reached. + * (or drm_atomic_helper_best_encoder() if @best_encoder is NULL). * * NOTE: * @@ -1089,61 +823,15 @@ struct drm_connector_helper_funcs { * * RETURNS: * - * 0 on success, -EINVAL if the state or the transition can't be - * supported, -ENOMEM on memory allocation failure and -EDEADLK if an - * attempt to obtain another state object ran into a &drm_modeset_lock - * deadlock. + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. */ - int (*atomic_check)(struct drm_connector *connector, - struct drm_atomic_state *state); - - /** - * @atomic_commit: - * - * This hook is to be used by drivers implementing writeback connectors - * that need a point when to commit the writeback job to the hardware. - * The writeback_job to commit is available in the new connector state, - * in &drm_connector_state.writeback_job. - * - * This hook is optional. - * - * This callback is used by the atomic modeset helpers. - */ - void (*atomic_commit)(struct drm_connector *connector, - struct drm_atomic_state *state); - - /** - * @prepare_writeback_job: - * - * As writeback jobs contain a framebuffer, drivers may need to - * prepare and clean them up the same way they can prepare and - * clean up framebuffers for planes. This optional connector operation - * is used to support the preparation of writeback jobs. The job - * prepare operation is called from drm_atomic_helper_prepare_planes() - * for struct &drm_writeback_connector connectors only. - * - * This operation is optional. - * - * This callback is used by the atomic modeset helpers. - */ - int (*prepare_writeback_job)(struct drm_writeback_connector *connector, - struct drm_writeback_job *job); - /** - * @cleanup_writeback_job: - * - * This optional connector operation is used to support the - * cleanup of writeback jobs. The job cleanup operation is called - * from the existing drm_writeback_cleanup_job() function, invoked - * both when destroying the job as part of an aborted commit, or when - * the job completes. - * - * This operation is optional. - * - * This callback is used by the atomic modeset helpers. - */ - void (*cleanup_writeback_job)(struct drm_writeback_connector *connector, - struct drm_writeback_job *job); + struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, + struct drm_connector_state *connector_state); }; +typedef struct drm_connector_helper_funcs __no_const drm_connector_helper_funcs_no_const; /** * drm_connector_helper_add - sets the helper vtable for a connector @@ -1167,23 +855,17 @@ struct drm_plane_helper_funcs { * @prepare_fb: * * This hook is to prepare a framebuffer for scanout by e.g. pinning - * its backing storage or relocating it into a contiguous block of + * it's backing storage or relocating it into a contiguous block of * VRAM. Other possible preparatory work includes flushing caches. * * This function must not block for outstanding rendering, since it is * called in the context of the atomic IOCTL even for async commits to * be able to return any errors to userspace. Instead the recommended - * way is to fill out the &drm_plane_state.fence of the passed-in + * way is to fill out the fence member of the passed-in * &drm_plane_state. If the driver doesn't support native fences then * equivalent functionality should be implemented through private * members in the plane structure. * - * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook - * set drm_gem_plane_helper_prepare_fb() is called automatically to - * implement this. Other drivers which need additional plane processing - * can call drm_gem_plane_helper_prepare_fb() from their @prepare_fb - * hook. - * * The helpers will call @cleanup_fb with matching arguments for every * successful call to this hook. * @@ -1193,7 +875,7 @@ struct drm_plane_helper_funcs { * RETURNS: * * 0 on success or one of the following negative error codes allowed by - * the &drm_mode_config_funcs.atomic_commit vfunc. When using helpers + * the atomic_commit hook in &drm_mode_config_funcs. When using helpers * this callback is the only one which can fail an atomic commit, * everything else must complete successfully. */ @@ -1216,7 +898,7 @@ struct drm_plane_helper_funcs { * * Drivers should check plane specific constraints in this hook. * - * When using drm_atomic_helper_check_planes() plane's @atomic_check + * When using drm_atomic_helper_check_planes() plane's ->atomic_check() * hooks are called before the ones for CRTCs, which allows drivers to * request shared resources that the CRTC controls here. For more * complicated dependencies the driver can call the provided check helpers @@ -1225,7 +907,7 @@ struct drm_plane_helper_funcs { * * This function is also allowed to inspect any other object's state and * can add more state objects to the atomic commit if needed. Care must - * be taken though to ensure that state check and compute functions for + * be taken though to ensure that state check&compute functions for * these added states are all called, and derived state in other objects * all updated. Again the recommendation is to just call check helpers * until a maximal configuration is reached. @@ -1236,8 +918,9 @@ struct drm_plane_helper_funcs { * NOTE: * * This function is called in the check phase of an atomic update. The - * driver is not allowed to change anything outside of the - * &drm_atomic_state update tracking structure. + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. * * RETURNS: * @@ -1247,105 +930,48 @@ struct drm_plane_helper_funcs { * deadlock. */ int (*atomic_check)(struct drm_plane *plane, - struct drm_atomic_state *state); + struct drm_plane_state *state); /** * @atomic_update: * * Drivers should use this function to update the plane state. This - * hook is called in-between the &drm_crtc_helper_funcs.atomic_begin and - * drm_crtc_helper_funcs.atomic_flush callbacks. + * hook is called in-between the ->atomic_begin() and + * ->atomic_flush() of &drm_crtc_helper_funcs. * * Note that the power state of the display pipe when this function is * called depends upon the exact helpers and calling sequence the driver - * has picked. See drm_atomic_helper_commit_planes() for a discussion of - * the tradeoffs and variants of plane commit helpers. + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. * * This callback is used by the atomic modeset helpers and by the * transitional plane helpers, but it is optional. */ void (*atomic_update)(struct drm_plane *plane, - struct drm_atomic_state *state); + struct drm_plane_state *old_state); /** * @atomic_disable: * * Drivers should use this function to unconditionally disable a plane. - * This hook is called in-between the - * &drm_crtc_helper_funcs.atomic_begin and - * drm_crtc_helper_funcs.atomic_flush callbacks. It is an alternative to + * This hook is called in-between the ->atomic_begin() and + * ->atomic_flush() of &drm_crtc_helper_funcs. It is an alternative to * @atomic_update, which will be called for disabling planes, too, if * the @atomic_disable hook isn't implemented. * * This hook is also useful to disable planes in preparation of a modeset, * by calling drm_atomic_helper_disable_planes_on_crtc() from the - * &drm_crtc_helper_funcs.disable hook. + * ->disable() hook in &drm_crtc_helper_funcs. * * Note that the power state of the display pipe when this function is * called depends upon the exact helpers and calling sequence the driver - * has picked. See drm_atomic_helper_commit_planes() for a discussion of - * the tradeoffs and variants of plane commit helpers. + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. * * This callback is used by the atomic modeset helpers and by the * transitional plane helpers, but it is optional. */ void (*atomic_disable)(struct drm_plane *plane, - struct drm_atomic_state *state); - - /** - * @atomic_async_check: - * - * Drivers should set this function pointer to check if the plane's - * atomic state can be updated in a async fashion. Here async means - * "not vblank synchronized". - * - * This hook is called by drm_atomic_async_check() to establish if a - * given update can be committed asynchronously, that is, if it can - * jump ahead of the state currently queued for update. - * - * RETURNS: - * - * Return 0 on success and any error returned indicates that the update - * can not be applied in asynchronous manner. - */ - int (*atomic_async_check)(struct drm_plane *plane, - struct drm_atomic_state *state); - - /** - * @atomic_async_update: - * - * Drivers should set this function pointer to perform asynchronous - * updates of planes, that is, jump ahead of the currently queued - * state and update the plane. Here async means "not vblank - * synchronized". - * - * This hook is called by drm_atomic_helper_async_commit(). - * - * An async update will happen on legacy cursor updates. An async - * update won't happen if there is an outstanding commit modifying - * the same plane. - * - * When doing async_update drivers shouldn't replace the - * &drm_plane_state but update the current one with the new plane - * configurations in the new plane_state. - * - * Drivers should also swap the framebuffers between current plane - * state (&drm_plane.state) and new_state. - * This is required since cleanup for async commits is performed on - * the new state, rather than old state like for traditional commits. - * Since we want to give up the reference on the current (old) fb - * instead of our brand new one, swap them in the driver during the - * async commit. - * - * FIXME: - * - It only works for single plane updates - * - Async Pageflips are not supported yet - * - Some hw might still scan out the old buffer until the next - * vblank, however we let go of the fb references as soon as - * we run this hook. For now drivers must implement their own workers - * for deferring if needed, until a common solution is created. - */ - void (*atomic_async_update)(struct drm_plane *plane, - struct drm_atomic_state *state); + struct drm_plane_state *old_state); }; /** @@ -1374,20 +1000,15 @@ struct drm_mode_config_helper_funcs { * to implement blocking and nonblocking commits easily. It is not used * by the atomic helpers * - * This function is called when the new atomic state has already been - * swapped into the various state pointers. The passed in state - * therefore contains copies of the old/previous state. This hook should - * commit the new state into hardware. Note that the helpers have - * already waited for preceeding atomic commits and fences, but drivers - * can add more waiting calls at the start of their implementation, e.g. - * to wait for driver-internal request for implicit syncing, before - * starting to commit the update to the hardware. + * This hook should first commit the given atomic state to the hardware. + * But drivers can add more waiting calls at the start of their + * implementation, e.g. to wait for driver-internal request for implicit + * syncing, before starting to commit the update to the hardware. * * After the atomic update is committed to the hardware this hook needs * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate * to be executed by the hardware, for example using - * drm_atomic_helper_wait_for_vblanks() or - * drm_atomic_helper_wait_for_flip_done(), and then clean up the old + * drm_atomic_helper_wait_for_vblanks(), and then clean up the old * framebuffers using drm_atomic_helper_cleanup_planes(). * * When disabling a CRTC this hook _must_ stall for the commit to @@ -1401,27 +1022,6 @@ struct drm_mode_config_helper_funcs { * drm_atomic_helper_commit_tail(). */ void (*atomic_commit_tail)(struct drm_atomic_state *state); - - /** - * @atomic_commit_setup: - * - * This hook is used by the default atomic_commit() hook implemented in - * drm_atomic_helper_commit() together with the nonblocking helpers (see - * drm_atomic_helper_setup_commit()) to extend the DRM commit setup. It - * is not used by the atomic helpers. - * - * This function is called at the end of - * drm_atomic_helper_setup_commit(), so once the commit has been - * properly setup across the generic DRM object states. It allows - * drivers to do some additional commit tracking that isn't related to a - * CRTC, plane or connector, tracked in a &drm_private_obj structure. - * - * Note that the documentation of &drm_private_obj has more details on - * how one should implement this. - * - * This hook is optional. - */ - int (*atomic_commit_setup)(struct drm_atomic_state *state); }; #endif diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index aafd07388e..c5576fbcb9 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -34,7 +34,6 @@ struct drm_modeset_lock; * @contended: used internally for -EDEADLK handling * @locked: list of held locks * @trylock_only: trylock mode used in atomic contexts/panic notifiers - * @interruptible: whether interruptible locking should be used. * * Each thread competing for a set of locks must use one acquire * ctx. And if any lock fxn returns -EDEADLK, it must backoff and @@ -60,15 +59,12 @@ struct drm_modeset_acquire_ctx { * Trylock mode, use only for panic handlers! */ bool trylock_only; - - /* Perform interruptible waits on this context. */ - bool interruptible; }; /** * struct drm_modeset_lock - used for locking modeset resources. * @mutex: resource locking - * @head: used to hold its place on &drm_atomi_state.locked list when + * @head: used to hold it's place on state->locked list when * part of an atomic update * * Used for locking CRTCs and other modeset resources. @@ -86,15 +82,24 @@ struct drm_modeset_lock { struct list_head head; }; -#define DRM_MODESET_ACQUIRE_INTERRUPTIBLE BIT(0) +extern struct ww_class crtc_ww_class; void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, uint32_t flags); void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx); void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx); -int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx); +void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx); +int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx); -void drm_modeset_lock_init(struct drm_modeset_lock *lock); +/** + * drm_modeset_lock_init - initialize lock + * @lock: lock to init + */ +static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock) +{ + ww_mutex_init(&lock->mutex, &crtc_ww_class); + INIT_LIST_HEAD(&lock->head); +} /** * drm_modeset_lock_fini - cleanup lock @@ -114,18 +119,10 @@ static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock) return ww_mutex_is_locked(&lock->mutex); } -/** - * drm_modeset_lock_assert_held - equivalent to lockdep_assert_held() - * @lock: lock to check - */ -static inline void drm_modeset_lock_assert_held(struct drm_modeset_lock *lock) -{ - lockdep_assert_held(&lock->mutex.base); -} - int drm_modeset_lock(struct drm_modeset_lock *lock, struct drm_modeset_acquire_ctx *ctx); -int __must_check drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock); +int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock, + struct drm_modeset_acquire_ctx *ctx); void drm_modeset_unlock(struct drm_modeset_lock *lock); struct drm_device; @@ -134,73 +131,14 @@ struct drm_plane; void drm_modeset_lock_all(struct drm_device *dev); void drm_modeset_unlock_all(struct drm_device *dev); +void drm_modeset_lock_crtc(struct drm_crtc *crtc, + struct drm_plane *plane); +void drm_modeset_unlock_crtc(struct drm_crtc *crtc); void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); +struct drm_modeset_acquire_ctx * +drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc); int drm_modeset_lock_all_ctx(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); -/** - * DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks - * @dev: drm device - * @ctx: local modeset acquire context, will be dereferenced - * @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init() - * @ret: local ret/err/etc variable to track error status - * - * Use these macros to simplify grabbing all modeset locks using a local - * context. This has the advantage of reducing boilerplate, but also properly - * checking return values where appropriate. - * - * Any code run between BEGIN and END will be holding the modeset locks. - * - * This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and - * forth between the labels on deadlock and error conditions. - * - * Drivers can acquire additional modeset locks. If any lock acquisition - * fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with - * the @ret parameter containing the return value of drm_modeset_lock(). - * - * Returns: - * The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN() - * is 0, so no error checking is necessary - */ -#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \ - if (!drm_drv_uses_atomic_modeset(dev)) \ - mutex_lock(&dev->mode_config.mutex); \ - drm_modeset_acquire_init(&ctx, flags); \ -modeset_lock_retry: \ - ret = drm_modeset_lock_all_ctx(dev, &ctx); \ - if (ret) \ - goto modeset_lock_fail; - -/** - * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks - * @dev: drm device - * @ctx: local modeset acquire context, will be dereferenced - * @ret: local ret/err/etc variable to track error status - * - * The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN - * if ret is -EDEADLK. - * - * It's important that you use the same ret variable for begin and end so - * deadlock conditions are properly handled. - * - * Returns: - * ret will be untouched unless it is -EDEADLK on entry. That means that if you - * successfully acquire the locks, ret will be whatever your code sets it to. If - * there is a deadlock or other failure with acquire or backoff, ret will be set - * to that failure. In both of these cases the code between BEGIN/END will not - * be run, so the failure will reflect the inability to grab the locks. - */ -#define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \ -modeset_lock_fail: \ - if (ret == -EDEADLK) { \ - ret = drm_modeset_backoff(&ctx); \ - if (!ret) \ - goto modeset_lock_retry; \ - } \ - drm_modeset_drop_locks(&ctx); \ - drm_modeset_acquire_fini(&ctx); \ - if (!drm_drv_uses_atomic_modeset(dev)) \ - mutex_unlock(&dev->mode_config.mutex); - #endif /* DRM_MODESET_LOCK_H_ */ diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index b9b093add9..3fd87b386e 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -1,75 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DRM_OF_H__ #define __DRM_OF_H__ #include -#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) -#include -#endif struct component_master_ops; -struct component_match; struct device; struct drm_device; struct drm_encoder; -struct drm_panel; -struct drm_bridge; struct device_node; -/** - * enum drm_lvds_dual_link_pixels - Pixel order of an LVDS dual-link connection - * @DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS: Even pixels are expected to be generated - * from the first port, odd pixels from the second port - * @DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS: Odd pixels are expected to be generated - * from the first port, even pixels from the second port - */ -enum drm_lvds_dual_link_pixels { - DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 0, - DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 1, -}; - #ifdef CONFIG_OF -uint32_t drm_of_crtc_port_mask(struct drm_device *dev, - struct device_node *port); -uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, - struct device_node *port); -void drm_of_component_match_add(struct device *master, - struct component_match **matchptr, - int (*compare)(struct device *, void *), - struct device_node *node); -int drm_of_component_probe(struct device *dev, - int (*compare_of)(struct device *, void *), - const struct component_master_ops *m_ops); -int drm_of_encoder_active_endpoint(struct device_node *node, - struct drm_encoder *encoder, - struct of_endpoint *endpoint); -int drm_of_find_panel_or_bridge(const struct device_node *np, - int port, int endpoint, - struct drm_panel **panel, - struct drm_bridge **bridge); -int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, - const struct device_node *port2); +extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, + struct device_node *port); +extern int drm_of_component_probe(struct device *dev, + int (*compare_of)(struct device *, void *), + const struct component_master_ops *m_ops); +extern int drm_of_encoder_active_endpoint(struct device_node *node, + struct drm_encoder *encoder, + struct of_endpoint *endpoint); #else -static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev, - struct device_node *port) -{ - return 0; -} - static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, struct device_node *port) { return 0; } -static inline void -drm_of_component_match_add(struct device *master, - struct component_match **matchptr, - int (*compare)(struct device *, void *), - struct device_node *node) -{ -} - static inline int drm_of_component_probe(struct device *dev, int (*compare_of)(struct device *, void *), @@ -84,50 +39,8 @@ static inline int drm_of_encoder_active_endpoint(struct device_node *node, { return -EINVAL; } -static inline int drm_of_find_panel_or_bridge(const struct device_node *np, - int port, int endpoint, - struct drm_panel **panel, - struct drm_bridge **bridge) -{ - return -EINVAL; -} - -static inline int -drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, - const struct device_node *port2) -{ - return -EINVAL; -} #endif -/* - * drm_of_panel_bridge_remove - remove panel bridge - * @np: device tree node containing panel bridge output ports - * - * Remove the panel bridge of a given DT node's port and endpoint number - * - * Returns zero if successful, or one of the standard error codes if it fails. - */ -static inline int drm_of_panel_bridge_remove(const struct device_node *np, - int port, int endpoint) -{ -#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) - struct drm_bridge *bridge; - struct device_node *remote; - - remote = of_graph_get_remote_node(np, port, endpoint); - if (!remote) - return -ENODEV; - - bridge = of_drm_find_bridge(remote); - drm_panel_bridge_remove(bridge); - - return 0; -#else - return -EINVAL; -#endif -} - static inline int drm_of_encoder_active_endpoint_id(struct device_node *node, struct drm_encoder *encoder) { diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h new file mode 100644 index 0000000000..86ab99bc0a --- /dev/null +++ b/include/drm/drm_os_linux.h @@ -0,0 +1,65 @@ +/** + * \file drm_os_linux.h + * OS abstraction macros. + */ + +#include /* For task queue support */ +#include + +#ifndef readq +static inline u64 readq(void __iomem *reg) +{ + return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); +} + +static inline void writeq(u64 val, void __iomem *reg) +{ + writel(val & 0xffffffff, reg); + writel(val >> 32, reg + 0x4UL); +} +#endif + +/** Current process ID */ +#define DRM_CURRENTPID task_pid_nr(current) +#define DRM_UDELAY(d) udelay(d) +/** Read a byte from a MMIO region */ +#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset)) +/** Read a word from a MMIO region */ +#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset)) +/** Read a dword from a MMIO region */ +#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset)) +/** Write a byte into a MMIO region */ +#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset)) +/** Write a word into a MMIO region */ +#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset)) +/** Write a dword into a MMIO region */ +#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset)) + +/** Read a qword from a MMIO region - be careful using these unless you really understand them */ +#define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset)) +/** Write a qword into a MMIO region */ +#define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset)) + +#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ +do { \ + DECLARE_WAITQUEUE(entry, current); \ + unsigned long end = jiffies + (timeout); \ + add_wait_queue(&(queue), &entry); \ + \ + for (;;) { \ + __set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (time_after_eq(jiffies, end)) { \ + ret = -EBUSY; \ + break; \ + } \ + schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \ + if (signal_pending(current)) { \ + ret = -EINTR; \ + break; \ + } \ + } \ + __set_current_state(TASK_RUNNING); \ + remove_wait_queue(&(queue), &entry); \ +} while (0) diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 4602f833eb..220d1e2b3d 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -24,21 +24,23 @@ #ifndef __DRM_PANEL_H__ #define __DRM_PANEL_H__ -#include -#include #include -struct backlight_device; -struct device_node; struct drm_connector; struct drm_device; struct drm_panel; struct display_timing; -enum drm_panel_orientation; - /** * struct drm_panel_funcs - perform operations on a given panel + * @disable: disable panel (turn off back light, etc.) + * @unprepare: turn off panel + * @prepare: turn on panel and perform set up + * @enable: enable panel (turn on back light, etc.) + * @get_modes: add modes to the connector that the panel is attached to and + * return the number of modes added + * @get_timings: copy display timings into the provided array and return + * the number of display timings available * * The .prepare() function is typically called before the display controller * starts to transmit video data. Panel drivers can use this to turn the panel @@ -62,156 +64,140 @@ enum drm_panel_orientation; * * To save power when no video data is transmitted, a driver can power down * the panel. This is the job of the .unprepare() function. - * - * Backlight can be handled automatically if configured using - * drm_panel_of_backlight() or drm_panel_dp_aux_backlight(). Then the driver - * does not need to implement the functionality to enable/disable backlight. */ struct drm_panel_funcs { - /** - * @prepare: - * - * Turn on panel and perform set up. - * - * This function is optional. - */ - int (*prepare)(struct drm_panel *panel); - - /** - * @enable: - * - * Enable panel (turn on back light, etc.). - * - * This function is optional. - */ - int (*enable)(struct drm_panel *panel); - - /** - * @disable: - * - * Disable panel (turn off back light, etc.). - * - * This function is optional. - */ int (*disable)(struct drm_panel *panel); - - /** - * @unprepare: - * - * Turn off panel. - * - * This function is optional. - */ int (*unprepare)(struct drm_panel *panel); - - /** - * @get_modes: - * - * Add modes to the connector that the panel is attached to - * and returns the number of modes added. - * - * This function is mandatory. - */ - int (*get_modes)(struct drm_panel *panel, - struct drm_connector *connector); - - /** - * @get_timings: - * - * Copy display timings into the provided array and return - * the number of display timings available. - * - * This function is optional. - */ + int (*prepare)(struct drm_panel *panel); + int (*enable)(struct drm_panel *panel); + int (*get_modes)(struct drm_panel *panel); int (*get_timings)(struct drm_panel *panel, unsigned int num_timings, struct display_timing *timings); }; /** * struct drm_panel - DRM panel object + * @drm: DRM device owning the panel + * @connector: DRM connector that the panel is attached to + * @dev: parent device of the panel + * @funcs: operations that can be performed on the panel + * @list: panel entry in registry */ struct drm_panel { - /** - * @dev: - * - * Parent device of the panel. - */ + struct drm_device *drm; + struct drm_connector *connector; struct device *dev; - /** - * @backlight: - * - * Backlight device, used to turn on backlight after the call - * to enable(), and to turn off backlight before the call to - * disable(). - * backlight is set by drm_panel_of_backlight() or - * drm_panel_dp_aux_backlight() and drivers shall not assign it. - */ - struct backlight_device *backlight; - - /** - * @funcs: - * - * Operations that can be performed on the panel. - */ const struct drm_panel_funcs *funcs; - /** - * @connector_type: - * - * Type of the panel as a DRM_MODE_CONNECTOR_* value. This is used to - * initialise the drm_connector corresponding to the panel with the - * correct connector type. - */ - int connector_type; - - /** - * @list: - * - * Panel entry in registry. - */ struct list_head list; }; -void drm_panel_init(struct drm_panel *panel, struct device *dev, - const struct drm_panel_funcs *funcs, - int connector_type); +/** + * drm_disable_unprepare - power off a panel + * @panel: DRM panel + * + * Calling this function will completely power off a panel (assert the panel's + * reset, turn off power supplies, ...). After this function has completed, it + * is usually no longer possible to communicate with the panel until another + * call to drm_panel_prepare(). + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_unprepare(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->unprepare) + return panel->funcs->unprepare(panel); -void drm_panel_add(struct drm_panel *panel); + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_disable - disable a panel + * @panel: DRM panel + * + * This will typically turn off the panel's backlight or disable the display + * drivers. For smart panels it should still be possible to communicate with + * the integrated circuitry via any command bus after this call. + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_disable(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->disable) + return panel->funcs->disable(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_prepare - power on a panel + * @panel: DRM panel + * + * Calling this function will enable power and deassert any reset signals to + * the panel. After this has completed it is possible to communicate with any + * integrated circuitry via a command bus. + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_prepare(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->prepare) + return panel->funcs->prepare(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_enable - enable a panel + * @panel: DRM panel + * + * Calling this function will cause the panel display drivers to be turned on + * and the backlight to be enabled. Content will be visible on screen after + * this call completes. + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_enable(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->enable) + return panel->funcs->enable(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_get_modes - probe the available display modes of a panel + * @panel: DRM panel + * + * The modes probed from the panel are automatically added to the connector + * that the panel is attached to. + * + * Return: The number of modes available from the panel on success or a + * negative error code on failure. + */ +static inline int drm_panel_get_modes(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->get_modes) + return panel->funcs->get_modes(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +void drm_panel_init(struct drm_panel *panel); + +int drm_panel_add(struct drm_panel *panel); void drm_panel_remove(struct drm_panel *panel); -int drm_panel_prepare(struct drm_panel *panel); -int drm_panel_unprepare(struct drm_panel *panel); +int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); +int drm_panel_detach(struct drm_panel *panel); -int drm_panel_enable(struct drm_panel *panel); -int drm_panel_disable(struct drm_panel *panel); - -int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector); - -#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL) -struct drm_panel *of_drm_find_panel(const struct device_node *np); -int of_drm_get_panel_orientation(const struct device_node *np, - enum drm_panel_orientation *orientation); +#ifdef CONFIG_OF +struct drm_panel *of_drm_find_panel(struct device_node *np); #else -static inline struct drm_panel *of_drm_find_panel(const struct device_node *np) +static inline struct drm_panel *of_drm_find_panel(struct device_node *np) { - return ERR_PTR(-ENODEV); -} - -static inline int of_drm_get_panel_orientation(const struct device_node *np, - enum drm_panel_orientation *orientation) -{ - return -ENODEV; -} -#endif - -#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ - (IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) -int drm_panel_of_backlight(struct drm_panel *panel); -#else -static inline int drm_panel_of_backlight(struct drm_panel *panel) -{ - return 0; + return NULL; } #endif diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index b7e899ce44..8bc073d297 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: MIT */ #define radeon_PCI_IDS \ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index fed97e3562..8b4dc62470 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -26,244 +26,74 @@ #include #include #include -#include -#include -#include -#include struct drm_crtc; -struct drm_printer; -struct drm_modeset_acquire_ctx; - -enum drm_scaling_filter { - DRM_SCALING_FILTER_DEFAULT, - DRM_SCALING_FILTER_NEAREST_NEIGHBOR, -}; /** * struct drm_plane_state - mutable plane state - * - * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and - * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the - * raw coordinates provided by userspace. Drivers should use - * drm_atomic_helper_check_plane_state() and only use the derived rectangles in - * @src and @dst to program the hardware. + * @plane: backpointer to the plane + * @crtc: currently bound CRTC, NULL if disabled + * @fb: currently bound framebuffer + * @fence: optional fence to wait for before scanning out @fb + * @crtc_x: left position of visible portion of plane on crtc + * @crtc_y: upper position of visible portion of plane on crtc + * @crtc_w: width of visible portion of plane on crtc + * @crtc_h: height of visible portion of plane on crtc + * @src_x: left position of visible portion of plane within + * plane (in 16.16) + * @src_y: upper position of visible portion of plane within + * plane (in 16.16) + * @src_w: width of visible portion of plane (in 16.16) + * @src_h: height of visible portion of plane (in 16.16) + * @rotation: rotation of the plane + * @zpos: priority of the given plane on crtc (optional) + * Note that multiple active planes on the same crtc can have an identical + * zpos value. The rule to solving the conflict is to compare the plane + * object IDs; the plane with a higher ID must be stacked on top of a + * plane with a lower ID. + * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1 + * where N is the number of active planes for given crtc. Note that + * the driver must call drm_atomic_normalize_zpos() to update this before + * it can be trusted. + * @src: clipped source coordinates of the plane (in 16.16) + * @dst: clipped destination coordinates of the plane + * @visible: visibility of the plane + * @state: backpointer to global drm_atomic_state */ struct drm_plane_state { - /** @plane: backpointer to the plane */ struct drm_plane *plane; - /** - * @crtc: - * - * Currently bound CRTC, NULL if disabled. Do not this write directly, - * use drm_atomic_set_crtc_for_plane() - */ - struct drm_crtc *crtc; + struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */ + struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */ + struct fence *fence; - /** - * @fb: - * - * Currently bound framebuffer. Do not write this directly, use - * drm_atomic_set_fb_for_plane() - */ - struct drm_framebuffer *fb; - - /** - * @fence: - * - * Optional fence to wait for before scanning out @fb. The core atomic - * code will set this when userspace is using explicit fencing. Do not - * write this field directly for a driver's implicit fence, use - * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is - * preserved. - * - * Drivers should store any implicit fence in this from their - * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb() - * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers. - */ - struct dma_fence *fence; - - /** - * @crtc_x: - * - * Left position of visible portion of plane on crtc, signed dest - * location allows it to be partially off screen. - */ - - int32_t crtc_x; - /** - * @crtc_y: - * - * Upper position of visible portion of plane on crtc, signed dest - * location allows it to be partially off screen. - */ - int32_t crtc_y; - - /** @crtc_w: width of visible portion of plane on crtc */ - /** @crtc_h: height of visible portion of plane on crtc */ + /* Signed dest location allows it to be partially off screen */ + int32_t crtc_x, crtc_y; uint32_t crtc_w, crtc_h; - /** - * @src_x: left position of visible portion of plane within plane (in - * 16.16 fixed point). - */ - uint32_t src_x; - /** - * @src_y: upper position of visible portion of plane within plane (in - * 16.16 fixed point). - */ - uint32_t src_y; - /** @src_w: width of visible portion of plane (in 16.16) */ - /** @src_h: height of visible portion of plane (in 16.16) */ + /* Source values are 16.16 fixed point */ + uint32_t src_x, src_y; uint32_t src_h, src_w; - /** - * @alpha: - * Opacity of the plane with 0 as completely transparent and 0xffff as - * completely opaque. See drm_plane_create_alpha_property() for more - * details. - */ - u16 alpha; - - /** - * @pixel_blend_mode: - * The alpha blending equation selection, describing how the pixels from - * the current plane are composited with the background. Value can be - * one of DRM_MODE_BLEND_* - */ - uint16_t pixel_blend_mode; - - /** - * @rotation: - * Rotation of the plane. See drm_plane_create_rotation_property() for - * more details. - */ + /* Plane rotation */ unsigned int rotation; - /** - * @zpos: - * Priority of the given plane on crtc (optional). - * - * User-space may set mutable zpos properties so that multiple active - * planes on the same CRTC have identical zpos values. This is a - * user-space bug, but drivers can solve the conflict by comparing the - * plane object IDs; the plane with a higher ID is stacked on top of a - * plane with a lower ID. - * - * See drm_plane_create_zpos_property() and - * drm_plane_create_zpos_immutable_property() for more details. - */ + /* Plane zpos */ unsigned int zpos; - - /** - * @normalized_zpos: - * Normalized value of zpos: unique, range from 0 to N-1 where N is the - * number of active planes for given crtc. Note that the driver must set - * &drm_mode_config.normalize_zpos or call drm_atomic_normalize_zpos() to - * update this before it can be trusted. - */ unsigned int normalized_zpos; - /** - * @color_encoding: - * - * Color encoding for non RGB formats - */ - enum drm_color_encoding color_encoding; - - /** - * @color_range: - * - * Color range for non RGB formats - */ - enum drm_color_range color_range; - - /** - * @fb_damage_clips: - * - * Blob representing damage (area in plane framebuffer that changed - * since last plane update) as an array of &drm_mode_rect in framebuffer - * coodinates of the attached framebuffer. Note that unlike plane src, - * damage clips are not in 16.16 fixed point. - * - * See drm_plane_get_damage_clips() and - * drm_plane_get_damage_clips_count() for accessing these. - */ - struct drm_property_blob *fb_damage_clips; - - /** - * @src: - * - * source coordinates of the plane (in 16.16). - * - * When using drm_atomic_helper_check_plane_state(), - * the coordinates are clipped, but the driver may choose - * to use unclipped coordinates instead when the hardware - * performs the clipping automatically. - */ - /** - * @dst: - * - * clipped destination coordinates of the plane. - * - * When using drm_atomic_helper_check_plane_state(), - * the coordinates are clipped, but the driver may choose - * to use unclipped coordinates instead when the hardware - * performs the clipping automatically. - */ + /* Clipped coordinates */ struct drm_rect src, dst; - /** - * @visible: - * - * Visibility of the plane. This can be false even if fb!=NULL and - * crtc!=NULL, due to clipping. + /* + * Is the plane actually visible? Can be false even + * if fb!=NULL and crtc!=NULL, due to clipping. */ bool visible; - /** - * @scaling_filter: - * - * Scaling filter to be applied - */ - enum drm_scaling_filter scaling_filter; - - /** - * @commit: Tracks the pending commit to prevent use-after-free conditions, - * and for async plane updates. - * - * May be NULL. - */ - struct drm_crtc_commit *commit; - - /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; }; -static inline struct drm_rect -drm_plane_state_src(const struct drm_plane_state *state) -{ - struct drm_rect src = { - .x1 = state->src_x, - .y1 = state->src_y, - .x2 = state->src_x + state->src_w, - .y2 = state->src_y + state->src_h, - }; - return src; -} - -static inline struct drm_rect -drm_plane_state_dest(const struct drm_plane_state *state) -{ - struct drm_rect dest = { - .x1 = state->crtc_x, - .y1 = state->crtc_y, - .x2 = state->crtc_x + state->crtc_w, - .y2 = state->crtc_y + state->crtc_h, - }; - return dest; -} /** * struct drm_plane_funcs - driver plane control functions @@ -300,8 +130,7 @@ struct drm_plane_funcs { int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h, - struct drm_modeset_acquire_ctx *ctx); + uint32_t src_w, uint32_t src_h); /** * @disable_plane: @@ -318,8 +147,7 @@ struct drm_plane_funcs { * * 0 on success or a negative error code on failure. */ - int (*disable_plane)(struct drm_plane *plane, - struct drm_modeset_acquire_ctx *ctx); + int (*disable_plane)(struct drm_plane *plane); /** * @destroy: @@ -348,9 +176,11 @@ struct drm_plane_funcs { * This is the legacy entry point to update a property attached to the * plane. * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_plane_set_property() to implement this hook. + * * This callback is optional if the driver does not support any legacy - * driver-private properties. For atomic drivers it is not used because - * property handling is done entirely in the DRM core. + * driver-private properties. * * RETURNS: * @@ -363,21 +193,19 @@ struct drm_plane_funcs { * @atomic_duplicate_state: * * Duplicate the current atomic state for this plane and return it. - * The core and helpers guarantee that any atomic state duplicated with + * The core and helpers gurantee that any atomic state duplicated with * this hook and still owned by the caller (i.e. not transferred to the - * driver by calling &drm_mode_config_funcs.atomic_commit) will be - * cleaned up by calling the @atomic_destroy_state hook in this - * structure. + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. * - * This callback is mandatory for atomic drivers. - * - * Atomic drivers which don't subclass &struct drm_plane_state should use + * Atomic drivers which don't subclass struct &drm_plane_state should use * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the * state structure to extend it with driver-private state should use * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is * duplicated in a consistent fashion across drivers. * - * It is an error to call this hook before &drm_plane.state has been + * It is an error to call this hook before plane->state has been * initialized correctly. * * NOTE: @@ -397,8 +225,6 @@ struct drm_plane_funcs { * * Destroy a state duplicated with @atomic_duplicate_state and release * or unreference all resources it references - * - * This callback is mandatory for atomic drivers. */ void (*atomic_destroy_state)(struct drm_plane *plane, struct drm_plane_state *state); @@ -492,42 +318,11 @@ struct drm_plane_funcs { * * This optional hook should be used to unregister the additional * userspace interfaces attached to the plane from - * @late_register. It is called from drm_dev_unregister(), + * late_unregister(). It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ void (*early_unregister)(struct drm_plane *plane); - - /** - * @atomic_print_state: - * - * If driver subclasses &struct drm_plane_state, it should implement - * this optional hook for printing additional driver specific state. - * - * Do not call this directly, use drm_atomic_plane_print_state() - * instead. - */ - void (*atomic_print_state)(struct drm_printer *p, - const struct drm_plane_state *state); - - /** - * @format_mod_supported: - * - * This optional hook is used for the DRM to determine if the given - * format/modifier combination is valid for the plane. This allows the - * DRM to generate the correct format bitmask (which formats apply to - * which modifier), and to valdiate modifiers at atomic_check time. - * - * If not present, then any modifier in the plane's modifier - * list is allowed with any of the plane's formats. - * - * Returns: - * - * True if the given modifier is valid for that format on the plane. - * False otherwise. - */ - bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format, - uint64_t modifier); }; /** @@ -541,14 +336,10 @@ struct drm_plane_funcs { * * For compatibility with legacy userspace, only overlay planes are made * available to userspace by default. Userspace clients may set the - * &DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they + * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they * wish to receive a universal plane list containing all plane types. See also * drm_for_each_legacy_plane(). * - * In addition to setting each plane's type, drivers need to setup the - * &drm_crtc.primary and optionally &drm_crtc.cursor pointers for legacy - * IOCTLs. See drm_crtc_init_with_planes(). - * * WARNING: The values of this enum is UABI since they're exposed in the "type" * property. */ @@ -564,20 +355,19 @@ enum drm_plane_type { /** * @DRM_PLANE_TYPE_PRIMARY: * - * A primary plane attached to a CRTC is the most likely to be able to - * light up the CRTC when no scaling/cropping is used and the plane - * covers the whole CRTC. + * Primary planes represent a "main" plane for a CRTC. Primary planes + * are the planes operated upon by CRTC modesetting and flipping + * operations described in the page_flip and set_config hooks in struct + * &drm_crtc_funcs. */ DRM_PLANE_TYPE_PRIMARY, /** * @DRM_PLANE_TYPE_CURSOR: * - * A cursor plane attached to a CRTC is more likely to be able to be - * enabled when no scaling/cropping is used and the framebuffer has the - * size indicated by &drm_mode_config.cursor_width and - * &drm_mode_config.cursor_height. Additionally, if the driver doesn't - * support modifiers, the framebuffer should have a linear layout. + * Cursor planes represent a "cursor" plane for a CRTC. Cursor planes + * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and + * DRM_IOCTL_MODE_CURSOR2 IOCTLs. */ DRM_PLANE_TYPE_CURSOR, }; @@ -585,96 +375,56 @@ enum drm_plane_type { /** * struct drm_plane - central DRM plane control structure - * - * Planes represent the scanout hardware of a display block. They receive their - * input data from a &drm_framebuffer and feed it to a &drm_crtc. Planes control - * the color conversion, see `Plane Composition Properties`_ for more details, - * and are also involved in the color conversion of input pixels, see `Color - * Management Properties`_ for details on that. + * @dev: DRM device this plane belongs to + * @head: for list management + * @name: human readable name, can be overwritten by the driver + * @base: base mode object + * @possible_crtcs: pipes this plane can be bound to + * @format_types: array of formats supported by this plane + * @format_count: number of formats supported + * @format_default: driver hasn't supplied supported formats for the plane + * @crtc: currently bound CRTC + * @fb: currently bound fb + * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by + * drm_mode_set_config_internal() to implement correct refcounting. + * @funcs: helper functions + * @properties: property tracking for this plane + * @type: type of plane (overlay, primary, cursor) + * @state: current atomic state for this plane + * @zpos_property: zpos property for this plane + * @helper_private: mid-layer private data */ struct drm_plane { - /** @dev: DRM device this plane belongs to */ struct drm_device *dev; - - /** - * @head: - * - * List of all planes on @dev, linked from &drm_mode_config.plane_list. - * Invariant over the lifetime of @dev and therefore does not need - * locking. - */ struct list_head head; - /** @name: human readable name, can be overwritten by the driver */ char *name; /** * @mutex: * - * Protects modeset plane state, together with the &drm_crtc.mutex of - * CRTC this plane is linked to (when active, getting activated or - * getting disabled). - * - * For atomic drivers specifically this protects @state. + * Protects modeset plane state, together with the mutex of &drm_crtc + * this plane is linked to (when active, getting actived or getting + * disabled). */ struct drm_modeset_lock mutex; - /** @base: base mode object */ struct drm_mode_object base; - /** - * @possible_crtcs: pipes this plane can be bound to constructed from - * drm_crtc_mask() - */ uint32_t possible_crtcs; - /** @format_types: array of formats supported by this plane */ uint32_t *format_types; - /** @format_count: Size of the array pointed at by @format_types. */ unsigned int format_count; - /** - * @format_default: driver hasn't supplied supported formats for the - * plane. Used by the drm_plane_init compatibility wrapper only. - */ bool format_default; - /** @modifiers: array of modifiers supported by this plane */ - uint64_t *modifiers; - /** @modifier_count: Size of the array pointed at by @modifier_count. */ - unsigned int modifier_count; - - /** - * @crtc: - * - * Currently bound CRTC, only meaningful for non-atomic drivers. For - * atomic drivers this is forced to be NULL, atomic drivers should - * instead check &drm_plane_state.crtc. - */ struct drm_crtc *crtc; - - /** - * @fb: - * - * Currently bound framebuffer, only meaningful for non-atomic drivers. - * For atomic drivers this is forced to be NULL, atomic drivers should - * instead check &drm_plane_state.fb. - */ struct drm_framebuffer *fb; - /** - * @old_fb: - * - * Temporary tracking of the old fb while a modeset is ongoing. Only - * used by non-atomic drivers, forced to be NULL for atomic drivers. - */ struct drm_framebuffer *old_fb; - /** @funcs: plane control functions */ const struct drm_plane_funcs *funcs; - /** @properties: property tracking for this plane */ struct drm_object_properties properties; - /** @type: Type of plane, see &enum drm_plane_type for details. */ enum drm_plane_type type; /** @@ -683,136 +433,31 @@ struct drm_plane { */ unsigned index; - /** @helper_private: mid-layer private data */ const struct drm_plane_helper_funcs *helper_private; - /** - * @state: - * - * Current atomic state for this plane. - * - * This is protected by @mutex. Note that nonblocking atomic commits - * access the current plane state without taking locks. Either by going - * through the &struct drm_atomic_state pointers, see - * for_each_oldnew_plane_in_state(), for_each_old_plane_in_state() and - * for_each_new_plane_in_state(). Or through careful ordering of atomic - * commit operations as implemented in the atomic helpers, see - * &struct drm_crtc_commit. - */ struct drm_plane_state *state; - /** - * @alpha_property: - * Optional alpha property for this plane. See - * drm_plane_create_alpha_property(). - */ - struct drm_property *alpha_property; - /** - * @zpos_property: - * Optional zpos property for this plane. See - * drm_plane_create_zpos_property(). - */ struct drm_property *zpos_property; - /** - * @rotation_property: - * Optional rotation property for this plane. See - * drm_plane_create_rotation_property(). - */ - struct drm_property *rotation_property; - /** - * @blend_mode_property: - * Optional "pixel blend mode" enum property for this plane. - * Blend mode property represents the alpha blending equation selection, - * describing how the pixels from the current plane are composited with - * the background. - */ - struct drm_property *blend_mode_property; - - /** - * @color_encoding_property: - * - * Optional "COLOR_ENCODING" enum property for specifying - * color encoding for non RGB formats. - * See drm_plane_create_color_properties(). - */ - struct drm_property *color_encoding_property; - /** - * @color_range_property: - * - * Optional "COLOR_RANGE" enum property for specifying - * color range for non RGB formats. - * See drm_plane_create_color_properties(). - */ - struct drm_property *color_range_property; - - /** - * @scaling_filter_property: property to apply a particular filter while - * scaling. - */ - struct drm_property *scaling_filter_property; }; #define obj_to_plane(x) container_of(x, struct drm_plane, base) -__printf(9, 10) +extern __printf(8, 9) int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, - uint32_t possible_crtcs, + unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, - const uint64_t *format_modifiers, enum drm_plane_type type, const char *name, ...); -int drm_plane_init(struct drm_device *dev, - struct drm_plane *plane, - uint32_t possible_crtcs, - const struct drm_plane_funcs *funcs, - const uint32_t *formats, unsigned int format_count, - bool is_primary); -void drm_plane_cleanup(struct drm_plane *plane); - -__printf(10, 11) -void *__drmm_universal_plane_alloc(struct drm_device *dev, - size_t size, size_t offset, - uint32_t possible_crtcs, - const struct drm_plane_funcs *funcs, - const uint32_t *formats, - unsigned int format_count, - const uint64_t *format_modifiers, - enum drm_plane_type plane_type, - const char *name, ...); - -/** - * drmm_universal_plane_alloc - Allocate and initialize an universal plane object - * @dev: DRM device - * @type: the type of the struct which contains struct &drm_plane - * @member: the name of the &drm_plane within @type - * @possible_crtcs: bitmask of possible CRTCs - * @funcs: callbacks for the new plane - * @formats: array of supported formats (DRM_FORMAT\_\*) - * @format_count: number of elements in @formats - * @format_modifiers: array of struct drm_format modifiers terminated by - * DRM_FORMAT_MOD_INVALID - * @plane_type: type of plane (overlay, primary, cursor) - * @name: printf style format string for the plane name, or NULL for default name - * - * Allocates and initializes a plane object of type @type. Cleanup is - * automatically handled through registering drm_plane_cleanup() with - * drmm_add_action(). - * - * The @drm_plane_funcs.destroy hook must be NULL. - * - * Returns: - * Pointer to new plane, or ERR_PTR on failure. - */ -#define drmm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \ - format_count, format_modifiers, plane_type, name, ...) \ - ((type *)__drmm_universal_plane_alloc(dev, sizeof(type), \ - offsetof(type, member), \ - possible_crtcs, funcs, formats, \ - format_count, format_modifiers, \ - plane_type, name, ##__VA_ARGS__)) +extern int drm_plane_init(struct drm_device *dev, + struct drm_plane *plane, + unsigned long possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + bool is_primary); +extern void drm_plane_cleanup(struct drm_plane *plane); /** * drm_plane_index - find the index of a registered plane @@ -821,22 +466,12 @@ void *__drmm_universal_plane_alloc(struct drm_device *dev, * Given a registered plane, return the index of that plane within a DRM * device's list of planes. */ -static inline unsigned int drm_plane_index(const struct drm_plane *plane) +static inline unsigned int drm_plane_index(struct drm_plane *plane) { return plane->index; } - -/** - * drm_plane_mask - find the mask of a registered plane - * @plane: plane to find mask for - */ -static inline u32 drm_plane_mask(const struct drm_plane *plane) -{ - return 1 << drm_plane_index(plane); -} - -struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx); -void drm_plane_force_disable(struct drm_plane *plane); +extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx); +extern void drm_plane_force_disable(struct drm_plane *plane); int drm_mode_plane_set_obj_prop(struct drm_plane *plane, struct drm_property *property, @@ -845,18 +480,16 @@ int drm_mode_plane_set_obj_prop(struct drm_plane *plane, /** * drm_plane_find - find a &drm_plane * @dev: DRM device - * @file_priv: drm file to check for lease against. * @id: plane id * * Returns the plane with @id, NULL if it doesn't exist. Simple wrapper around * drm_mode_object_find(). */ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, - struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; - mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PLANE); + mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE); return mo ? obj_to_plane(mo) : NULL; } @@ -870,7 +503,7 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, */ #define drm_for_each_plane_mask(plane, dev, plane_mask) \ list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ - for_each_if ((plane_mask) & drm_plane_mask(plane)) + for_each_if ((plane_mask) & (1 << drm_plane_index(plane))) /** * drm_for_each_legacy_plane - iterate over all planes for legacy userspace @@ -879,7 +512,7 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, * * Iterate over all legacy planes of @dev, excluding primary and cursor planes. * This is useful for implementing userspace apis when userspace is not - * universal plane aware. See also &enum drm_plane_type. + * universal plane aware. See also enum &drm_plane_type. */ #define drm_for_each_legacy_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ @@ -895,16 +528,5 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, #define drm_for_each_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) -bool drm_any_plane_has_format(struct drm_device *dev, - u32 format, u64 modifier); - -void drm_plane_enable_fb_damage_clips(struct drm_plane *plane); -unsigned int -drm_plane_get_damage_clips_count(const struct drm_plane_state *state); -struct drm_mode_rect * -drm_plane_get_damage_clips(const struct drm_plane_state *state); - -int drm_plane_create_scaling_filter_property(struct drm_plane *plane, - unsigned int supported_filters); #endif diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 331ebd60b3..c18959685c 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -38,7 +38,44 @@ */ #define DRM_PLANE_HELPER_NO_SCALING (1<<16) +int drm_plane_helper_check_state(struct drm_plane_state *state, + const struct drm_rect *clip, + int min_scale, int max_scale, + bool can_position, + bool can_update_disabled); +int drm_plane_helper_check_update(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_rect *src, + struct drm_rect *dest, + const struct drm_rect *clip, + unsigned int rotation, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled, + bool *visible); +int drm_primary_helper_update(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); +int drm_primary_helper_disable(struct drm_plane *plane); void drm_primary_helper_destroy(struct drm_plane *plane); extern const struct drm_plane_funcs drm_primary_helper_funcs; +int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); +int drm_plane_helper_disable(struct drm_plane *plane); + +/* For use by drm_crtc_helper.c */ +int drm_plane_helper_commit(struct drm_plane *plane, + struct drm_plane_state *plane_state, + struct drm_framebuffer *old_fb); #endif diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index 65bc9710a4..43c4b6a204 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -27,25 +27,16 @@ #include #include -#include - /** * struct drm_property_enum - symbolic values for enumerations - * @head: list of enum values, linked to &drm_property.enum_list + * @value: numeric property value for this enum entry + * @head: list of enum values, linked to enum_list in &drm_property * @name: symbolic name for the enum * * For enumeration and bitmask properties this structure stores the symbolic * decoding for each value. This is used for example for the rotation property. */ struct drm_property_enum { - /** - * @value: numeric property value for this enum entry - * - * If the property has the type &DRM_MODE_PROP_BITMASK, @value stores a - * bitshift, not a bitmask. In other words, the enum entry is enabled - * if the bit number @value is set in the property's value. This enum - * entry has the bitmask ``1 << value``. - */ uint64_t value; struct list_head head; char name[DRM_PROP_NAME_LEN]; @@ -121,7 +112,7 @@ struct drm_property { * by the property. Bitmask properties are created using * drm_property_create_bitmask(). * - * DRM_MODE_PROP_OBJECT + * DRM_MODE_PROB_OBJECT * Object properties are used to link modeset objects. This is used * extensively in the atomic support to create the display pipeline, * by linking &drm_framebuffer to &drm_plane, &drm_plane to @@ -156,12 +147,11 @@ struct drm_property { * properties are not exposed to legacy userspace. * * DRM_MODE_PROP_IMMUTABLE - * Set for properties whose values cannot be changed by + * Set for properties where userspace cannot be changed by * userspace. The kernel is allowed to update the value of these * properties. This is generally used to expose probe state to - * userspace, e.g. the EDID, or the connector path property on DP - * MST sinks. Kernel can update the value of an immutable property - * by calling drm_object_property_set_value(). + * usersapce, e.g. the EDID, or the connector path property on DP + * MST sinks. */ uint32_t flags; @@ -201,17 +191,18 @@ struct drm_property { * struct drm_property_blob - Blob data for &drm_property * @base: base KMS object * @dev: DRM device - * @head_global: entry on the global blob list in - * &drm_mode_config.property_blob_list. - * @head_file: entry on the per-file blob list in &drm_file.blobs list. + * @head_global: entry on the global blob list in &drm_mode_config + * property_blob_list. + * @head_file: entry on the per-file blob list in &drm_file blobs list. * @length: size of the blob in bytes, invariant over the lifetime of the object * @data: actual data, embedded at the end of this structure * * Blobs are used to store bigger values than what fits directly into the 64 * bits available for a &drm_property. * - * Blobs are reference counted using drm_property_blob_get() and - * drm_property_blob_put(). They are created using drm_property_create_blob(). + * Blobs are reference counted using drm_property_reference_blob() and + * drm_property_unreference_blob(). They are created using + * drm_property_create_blob(). */ struct drm_property_blob { struct drm_mode_object base; @@ -219,12 +210,12 @@ struct drm_property_blob { struct list_head head_global; struct list_head head_file; size_t length; - void *data; + unsigned char data[]; }; struct drm_prop_enum_list { int type; - const char *name; + char *name; }; #define obj_to_property(x) container_of(x, struct drm_property, base) @@ -247,30 +238,28 @@ static inline bool drm_property_type_is(struct drm_property *property, return property->flags & type; } -struct drm_property *drm_property_create(struct drm_device *dev, - u32 flags, const char *name, - int num_values); -struct drm_property *drm_property_create_enum(struct drm_device *dev, - u32 flags, const char *name, +struct drm_property *drm_property_create(struct drm_device *dev, int flags, + const char *name, int num_values); +struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, + const char *name, const struct drm_prop_enum_list *props, int num_values); struct drm_property *drm_property_create_bitmask(struct drm_device *dev, - u32 flags, const char *name, + int flags, const char *name, const struct drm_prop_enum_list *props, int num_props, uint64_t supported_bits); -struct drm_property *drm_property_create_range(struct drm_device *dev, - u32 flags, const char *name, +struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, + const char *name, uint64_t min, uint64_t max); struct drm_property *drm_property_create_signed_range(struct drm_device *dev, - u32 flags, const char *name, + int flags, const char *name, int64_t min, int64_t max); struct drm_property *drm_property_create_object(struct drm_device *dev, - u32 flags, const char *name, - uint32_t type); -struct drm_property *drm_property_create_bool(struct drm_device *dev, - u32 flags, const char *name); -int drm_property_add_enum(struct drm_property *property, + int flags, const char *name, uint32_t type); +struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags, + const char *name); +int drm_property_add_enum(struct drm_property *property, int index, uint64_t value, const char *name); void drm_property_destroy(struct drm_device *dev, struct drm_property *property); @@ -285,25 +274,21 @@ int drm_property_replace_global_blob(struct drm_device *dev, const void *data, struct drm_mode_object *obj_holds_id, struct drm_property *prop_holds_id); -bool drm_property_replace_blob(struct drm_property_blob **blob, - struct drm_property_blob *new_blob); -struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob); -void drm_property_blob_put(struct drm_property_blob *blob); +struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob); +void drm_property_unreference_blob(struct drm_property_blob *blob); /** - * drm_property_find - find property object + * drm_connector_find - find property object * @dev: DRM device - * @file_priv: drm file to check for lease against. * @id: property object id * * This function looks up the property object specified by id and returns it. */ static inline struct drm_property *drm_property_find(struct drm_device *dev, - struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; - mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PROPERTY); + mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY); return mo ? obj_to_property(mo) : NULL; } diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h index 6f6e19bd4d..83bb156d43 100644 --- a/include/drm/drm_rect.h +++ b/include/drm/drm_rect.h @@ -24,8 +24,6 @@ #ifndef DRM_RECT_H #define DRM_RECT_H -#include - /** * DOC: rect utils * @@ -39,58 +37,11 @@ * @x2: horizontal ending coordinate (exclusive) * @y1: vertical starting coordinate (inclusive) * @y2: vertical ending coordinate (exclusive) - * - * Note that this must match the layout of struct drm_mode_rect or the damage - * helpers like drm_atomic_helper_damage_iter_init() break. */ struct drm_rect { int x1, y1, x2, y2; }; -/** - * DRM_RECT_FMT - printf string for &struct drm_rect - */ -#define DRM_RECT_FMT "%dx%d%+d%+d" -/** - * DRM_RECT_ARG - printf arguments for &struct drm_rect - * @r: rectangle struct - */ -#define DRM_RECT_ARG(r) drm_rect_width(r), drm_rect_height(r), (r)->x1, (r)->y1 - -/** - * DRM_RECT_FP_FMT - printf string for &struct drm_rect in 16.16 fixed point - */ -#define DRM_RECT_FP_FMT "%d.%06ux%d.%06u%+d.%06u%+d.%06u" -/** - * DRM_RECT_FP_ARG - printf arguments for &struct drm_rect in 16.16 fixed point - * @r: rectangle struct - * - * This is useful for e.g. printing plane source rectangles, which are in 16.16 - * fixed point. - */ -#define DRM_RECT_FP_ARG(r) \ - drm_rect_width(r) >> 16, ((drm_rect_width(r) & 0xffff) * 15625) >> 10, \ - drm_rect_height(r) >> 16, ((drm_rect_height(r) & 0xffff) * 15625) >> 10, \ - (r)->x1 >> 16, (((r)->x1 & 0xffff) * 15625) >> 10, \ - (r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10 - -/** - * drm_rect_init - initialize the rectangle from x/y/w/h - * @r: rectangle - * @x: x coordinate - * @y: y coordinate - * @width: width - * @height: height - */ -static inline void drm_rect_init(struct drm_rect *r, int x, int y, - int width, int height) -{ - r->x1 = x; - r->y1 = y; - r->x2 = x + width; - r->y2 = y + height; -} - /** * drm_rect_adjust_size - adjust the size of the rectangle * @r: rectangle to be adjusted @@ -128,20 +79,6 @@ static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy) r->y2 += dy; } -/** - * drm_rect_translate_to - translate the rectangle to an absolute position - * @r: rectangle to be tranlated - * @x: horizontal position - * @y: vertical position - * - * Move rectangle @r to @x in the horizontal direction, - * and to @y in the vertical direction. - */ -static inline void drm_rect_translate_to(struct drm_rect *r, int x, int y) -{ - drm_rect_translate(r, x - r->x1, y - r->y1); -} - /** * drm_rect_downscale - downscale a rectangle * @r: rectangle to be downscaled @@ -183,7 +120,7 @@ static inline int drm_rect_height(const struct drm_rect *r) } /** - * drm_rect_visible - determine if the rectangle is visible + * drm_rect_visible - determine if the the rectangle is visible * @r: rectangle whose visibility is returned * * RETURNS: @@ -209,28 +146,22 @@ static inline bool drm_rect_equals(const struct drm_rect *r1, r1->y1 == r2->y1 && r1->y2 == r2->y2; } -/** - * drm_rect_fp_to_int - Convert a rect in 16.16 fixed point form to int form. - * @dst: rect to be stored the converted value - * @src: rect in 16.16 fixed point form - */ -static inline void drm_rect_fp_to_int(struct drm_rect *dst, - const struct drm_rect *src) -{ - drm_rect_init(dst, src->x1 >> 16, src->y1 >> 16, - drm_rect_width(src) >> 16, - drm_rect_height(src) >> 16); -} - bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip); bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, - const struct drm_rect *clip); + const struct drm_rect *clip, + int hscale, int vscale); int drm_rect_calc_hscale(const struct drm_rect *src, const struct drm_rect *dst, int min_hscale, int max_hscale); int drm_rect_calc_vscale(const struct drm_rect *src, const struct drm_rect *dst, int min_vscale, int max_vscale); +int drm_rect_calc_hscale_relaxed(struct drm_rect *src, + struct drm_rect *dst, + int min_hscale, int max_hscale); +int drm_rect_calc_vscale_relaxed(struct drm_rect *src, + struct drm_rect *dst, + int min_vscale, int max_vscale); void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point); void drm_rect_rotate(struct drm_rect *r, diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h index 0b3647e614..01a8436ccb 100644 --- a/include/drm/drm_simple_kms_helper.h +++ b/include/drm/drm_simple_kms_helper.h @@ -1,15 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H #define __LINUX_DRM_SIMPLE_KMS_HELPER_H -#include -#include -#include - struct drm_simple_display_pipe; /** @@ -17,41 +17,6 @@ struct drm_simple_display_pipe; * display pipeline */ struct drm_simple_display_pipe_funcs { - /** - * @mode_valid: - * - * This callback is used to check if a specific mode is valid in the - * crtc used in this simple display pipe. This should be implemented - * if the display pipe has some sort of restriction in the modes - * it can display. For example, a given display pipe may be responsible - * to set a clock value. If the clock can not produce all the values - * for the available modes then this callback can be used to restrict - * the number of modes to only the ones that can be displayed. Another - * reason can be bandwidth mitigation: the memory port on the display - * controller can have bandwidth limitations not allowing pixel data - * to be fetched at any rate. - * - * This hook is used by the probe helpers to filter the mode list in - * drm_helper_probe_single_connector_modes(), and it is used by the - * atomic helpers to validate modes supplied by userspace in - * drm_atomic_helper_check_modeset(). - * - * This function is optional. - * - * NOTE: - * - * Since this function is both called from the check phase of an atomic - * commit, and the mode validation in the probe paths it is not allowed - * to look at anything else but the passed-in mode, and validate it - * against configuration-invariant hardware constraints. - * - * RETURNS: - * - * drm_mode_status Enum - */ - enum drm_mode_status (*mode_valid)(struct drm_simple_display_pipe *pipe, - const struct drm_display_mode *mode); - /** * @enable: * @@ -60,8 +25,7 @@ struct drm_simple_display_pipe_funcs { * This hook is optional. */ void (*enable)(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state, - struct drm_plane_state *plane_state); + struct drm_crtc_state *crtc_state); /** * @disable: * @@ -100,27 +64,18 @@ struct drm_simple_display_pipe_funcs { * This is the function drivers should submit the * &drm_pending_vblank_event from. Using either * drm_crtc_arm_vblank_event(), when the driver supports vblank - * interrupt handling, or drm_crtc_send_vblank_event() for more - * complex case. In case the hardware lacks vblank support entirely, - * drivers can set &struct drm_crtc_state.no_vblank in - * &struct drm_simple_display_pipe_funcs.check and let DRM's - * atomic helper fake a vblank event. + * interrupt handling, or drm_crtc_send_vblank_event() directly in case + * the hardware lacks vblank support entirely. */ void (*update)(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_plane_state); + struct drm_plane_state *plane_state); /** * @prepare_fb: * - * Optional, called by &drm_plane_helper_funcs.prepare_fb. Please read - * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for - * more details. - * - * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook - * set drm_gem_simple_display_pipe_prepare_fb() is called automatically - * to implement this. Other drivers which need additional plane - * processing can call drm_gem_simple_display_pipe_prepare_fb() from - * their @prepare_fb hook. + * Optional, called by struct &drm_plane_helper_funcs ->prepare_fb . + * Please read the documentation for the ->prepare_fb hook in + * struct &drm_plane_helper_funcs for more details. */ int (*prepare_fb)(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state); @@ -128,84 +83,12 @@ struct drm_simple_display_pipe_funcs { /** * @cleanup_fb: * - * Optional, called by &drm_plane_helper_funcs.cleanup_fb. Please read - * the documentation for the &drm_plane_helper_funcs.cleanup_fb hook for - * more details. + * Optional, called by struct &drm_plane_helper_funcs ->cleanup_fb . + * Please read the documentation for the ->cleanup_fb hook in + * struct &drm_plane_helper_funcs for more details. */ void (*cleanup_fb)(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state); - - /** - * @enable_vblank: - * - * Optional, called by &drm_crtc_funcs.enable_vblank. Please read - * the documentation for the &drm_crtc_funcs.enable_vblank hook for - * more details. - */ - int (*enable_vblank)(struct drm_simple_display_pipe *pipe); - - /** - * @disable_vblank: - * - * Optional, called by &drm_crtc_funcs.disable_vblank. Please read - * the documentation for the &drm_crtc_funcs.disable_vblank hook for - * more details. - */ - void (*disable_vblank)(struct drm_simple_display_pipe *pipe); - - /** - * @reset_crtc: - * - * Optional, called by &drm_crtc_funcs.reset. Please read the - * documentation for the &drm_crtc_funcs.reset hook for more details. - */ - void (*reset_crtc)(struct drm_simple_display_pipe *pipe); - - /** - * @duplicate_crtc_state: - * - * Optional, called by &drm_crtc_funcs.atomic_duplicate_state. Please - * read the documentation for the &drm_crtc_funcs.atomic_duplicate_state - * hook for more details. - */ - struct drm_crtc_state * (*duplicate_crtc_state)(struct drm_simple_display_pipe *pipe); - - /** - * @destroy_crtc_state: - * - * Optional, called by &drm_crtc_funcs.atomic_destroy_state. Please - * read the documentation for the &drm_crtc_funcs.atomic_destroy_state - * hook for more details. - */ - void (*destroy_crtc_state)(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state); - - /** - * @reset_plane: - * - * Optional, called by &drm_plane_funcs.reset. Please read the - * documentation for the &drm_plane_funcs.reset hook for more details. - */ - void (*reset_plane)(struct drm_simple_display_pipe *pipe); - - /** - * @duplicate_plane_state: - * - * Optional, called by &drm_plane_funcs.atomic_duplicate_state. Please - * read the documentation for the &drm_plane_funcs.atomic_duplicate_state - * hook for more details. - */ - struct drm_plane_state * (*duplicate_plane_state)(struct drm_simple_display_pipe *pipe); - - /** - * @destroy_plane_state: - * - * Optional, called by &drm_plane_funcs.atomic_destroy_state. Please - * read the documentation for the &drm_plane_funcs.atomic_destroy_state - * hook for more details. - */ - void (*destroy_plane_state)(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *plane_state); }; /** @@ -231,39 +114,12 @@ struct drm_simple_display_pipe { int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe, struct drm_bridge *bridge); +void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe); + int drm_simple_display_pipe_init(struct drm_device *dev, struct drm_simple_display_pipe *pipe, const struct drm_simple_display_pipe_funcs *funcs, const uint32_t *formats, unsigned int format_count, - const uint64_t *format_modifiers, struct drm_connector *connector); -int drm_simple_encoder_init(struct drm_device *dev, - struct drm_encoder *encoder, - int encoder_type); - -void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size, - size_t offset, int encoder_type); - -/** - * drmm_simple_encoder_alloc - Allocate and initialize an encoder with basic - * functionality. - * @dev: drm device - * @type: the type of the struct which contains struct &drm_encoder - * @member: the name of the &drm_encoder within @type. - * @encoder_type: user visible type of the encoder - * - * Allocates and initializes an encoder that has no further functionality. - * Settings for possible CRTC and clones are left to their initial values. - * Cleanup is automatically handled through registering drm_encoder_cleanup() - * with drmm_add_action(). - * - * Returns: - * Pointer to new encoder, or ERR_PTR on failure. - */ -#define drmm_simple_encoder_alloc(dev, type, member, encoder_type) \ - ((type *)__drmm_simple_encoder_alloc(dev, sizeof(type), \ - offsetof(type, member), \ - encoder_type)) - #endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */ diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h index d454ef617b..1d8e033fde 100644 --- a/include/drm/drm_sysfs.h +++ b/include/drm/drm_sysfs.h @@ -1,16 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DRM_SYSFS_H_ #define _DRM_SYSFS_H_ -struct drm_device; -struct device; -struct drm_connector; -struct drm_property; +/** + * This minimalistic include file is intended for users (read TTM) that + * don't want to include the full drmP.h file. + */ -int drm_class_device_register(struct device *dev); -void drm_class_device_unregister(struct device *dev); +extern int drm_class_device_register(struct device *dev); +extern void drm_class_device_unregister(struct device *dev); -void drm_sysfs_hotplug_event(struct drm_device *dev); -void drm_sysfs_connector_status_event(struct drm_connector *connector, - struct drm_property *property); #endif diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index 4f8c35206f..9c03895dc4 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h @@ -25,22 +25,11 @@ #include #include +#include #include #include #include -/* We make up offsets for buffer objects so we can recognize them at - * mmap time. pgoff in mmap is an unsigned long, so we need to make sure - * that the faked up offset will fit - */ -#if BITS_PER_LONG == 64 -#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) -#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 256) -#else -#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) -#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) -#endif - struct drm_file; struct drm_vma_offset_file { @@ -53,7 +42,6 @@ struct drm_vma_offset_node { rwlock_t vm_lock; struct drm_mm_node vm_node; struct rb_root vm_files; - void *driver_private; }; struct drm_vma_offset_manager { @@ -165,7 +153,7 @@ static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) * Start address of @node for page-based addressing. 0 if the node does not * have an offset allocated. */ -static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node) +static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node) { return node->vm_node.start; } diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h index 228f43e8df..87ac5e6ca5 100644 --- a/include/drm/gma_drm.h +++ b/include/drm/gma_drm.h @@ -1,10 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /************************************************************************** * Copyright (c) 2007-2011, Intel Corporation. * All Rights Reserved. * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. * All Rights Reserved. * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * **************************************************************************/ #ifndef _GMA_DRM_H_ diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h index 3cb25ccbe5..a25483090c 100644 --- a/include/drm/i2c/tda998x.h +++ b/include/drm/i2c/tda998x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DRM_I2C_TDA998X_H__ #define __DRM_I2C_TDA998X_H__ diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h index 55c3b12358..b46fa0ef30 100644 --- a/include/drm/i915_component.h +++ b/include/drm/i915_component.h @@ -24,31 +24,102 @@ #ifndef _I915_COMPONENT_H_ #define _I915_COMPONENT_H_ -#include "drm_audio_component.h" - -enum i915_component_type { - I915_COMPONENT_AUDIO = 1, - I915_COMPONENT_HDCP, -}; - /* MAX_PORT is the number of port * It must be sync with I915_MAX_PORTS defined i915_drv.h + * 5 should be enough as only HSW, BDW, SKL need such fix. */ -#define MAX_PORTS 9 +#define MAX_PORTS 5 + +/** + * struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver + */ +struct i915_audio_component_ops { + /** + * @owner: i915 module + */ + struct module *owner; + /** + * @get_power: get the POWER_DOMAIN_AUDIO power well + * + * Request the power well to be turned on. + */ + void (*get_power)(struct device *); + /** + * @put_power: put the POWER_DOMAIN_AUDIO power well + * + * Allow the power well to be turned off. + */ + void (*put_power)(struct device *); + /** + * @codec_wake_override: Enable/disable codec wake signal + */ + void (*codec_wake_override)(struct device *, bool enable); + /** + * @get_cdclk_freq: Get the Core Display Clock in kHz + */ + int (*get_cdclk_freq)(struct device *); + /** + * @sync_audio_rate: set n/cts based on the sample rate + * + * Called from audio driver. After audio driver sets the + * sample rate, it will call this function to set n/cts + */ + int (*sync_audio_rate)(struct device *, int port, int rate); + /** + * @get_eld: fill the audio state and ELD bytes for the given port + * + * Called from audio driver to get the HDMI/DP audio state of the given + * digital port, and also fetch ELD bytes to the given pointer. + * + * It returns the byte size of the original ELD (not the actually + * copied size), zero for an invalid ELD, or a negative error code. + * + * Note that the returned size may be over @max_bytes. Then it + * implies that only a part of ELD has been copied to the buffer. + */ + int (*get_eld)(struct device *, int port, bool *enabled, + unsigned char *buf, int max_bytes); +}; + +/** + * struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver + */ +struct i915_audio_component_audio_ops { + /** + * @audio_ptr: Pointer to be used in call to pin_eld_notify + */ + void *audio_ptr; + /** + * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed + * + * Called when the i915 driver has set up audio pipeline or has just + * begun to tear it down. This allows the HDA driver to update its + * status accordingly (even when the HDA controller is in power save + * mode). + */ + void (*pin_eld_notify)(void *audio_ptr, int port); +}; /** * struct i915_audio_component - Used for direct communication between i915 and hda drivers */ struct i915_audio_component { /** - * @base: the drm_audio_component base class + * @dev: i915 device, used as parameter for ops */ - struct drm_audio_component base; - + struct device *dev; /** * @aud_sample_rate: the array of audio sample rate per port */ int aud_sample_rate[MAX_PORTS]; + /** + * @ops: Ops implemented by i915 driver, called by hda driver + */ + const struct i915_audio_component_ops *ops; + /** + * @audio_ops: Ops implemented by hda driver, called by i915 driver + */ + const struct i915_audio_component_audio_ops *audio_ops; }; #endif /* _I915_COMPONENT_H_ */ diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 6722005884..4e1b274e11 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -30,14 +30,11 @@ #include /* For use by IPS driver */ -unsigned long i915_read_mch_val(void); -bool i915_gpu_raise(void); -bool i915_gpu_lower(void); -bool i915_gpu_busy(void); -bool i915_gpu_turbo_disable(void); - -/* Exported from arch/x86/kernel/early-quirks.c */ -extern struct resource intel_graphics_stolen_res; +extern unsigned long i915_read_mch_val(void); +extern bool i915_gpu_raise(void); +extern bool i915_gpu_lower(void); +extern bool i915_gpu_busy(void); +extern bool i915_gpu_turbo_disable(void); /* * The Bridge device's PCI config space has information about the @@ -95,9 +92,7 @@ extern struct resource intel_graphics_stolen_res; #define I845_TSEG_SIZE_512K (2 << 1) #define I845_TSEG_SIZE_1M (3 << 1) -#define INTEL_BSM 0x5c -#define INTEL_GEN11_BSM_DW0 0xc0 -#define INTEL_GEN11_BSM_DW1 0xc4 +#define INTEL_BSM 0x5c #define INTEL_BSM_MASK (-(1u << 20)) #endif /* _I915_DRM_H_ */ diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index eee18fa53b..2f88e2b8b4 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -37,7 +37,7 @@ */ #define INTEL_VGA_DEVICE(id, info) { \ 0x8086, id, \ - ~0, ~0, \ + PCI_ANY_ID, PCI_ANY_ID, \ 0x030000, 0xff0000, \ (unsigned long) info } @@ -47,14 +47,6 @@ 0x030000, 0xff0000, \ (unsigned long) info } -#define INTEL_I810_IDS(info) \ - INTEL_VGA_DEVICE(0x7121, info), /* I810 */ \ - INTEL_VGA_DEVICE(0x7123, info), /* I810_DC100 */ \ - INTEL_VGA_DEVICE(0x7125, info) /* I810_E */ - -#define INTEL_I815_IDS(info) \ - INTEL_VGA_DEVICE(0x1132, info) /* I815*/ - #define INTEL_I830_IDS(info) \ INTEL_VGA_DEVICE(0x3577, info) @@ -108,10 +100,8 @@ INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \ INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */ -#define INTEL_PINEVIEW_G_IDS(info) \ - INTEL_VGA_DEVICE(0xa001, info) - -#define INTEL_PINEVIEW_M_IDS(info) \ +#define INTEL_PINEVIEW_IDS(info) \ + INTEL_VGA_DEVICE(0xa001, info), \ INTEL_VGA_DEVICE(0xa011, info) #define INTEL_IRONLAKE_D_IDS(info) \ @@ -120,205 +110,132 @@ #define INTEL_IRONLAKE_M_IDS(info) \ INTEL_VGA_DEVICE(0x0046, info) -#define INTEL_SNB_D_GT1_IDS(info) \ +#define INTEL_SNB_D_IDS(info) \ INTEL_VGA_DEVICE(0x0102, info), \ + INTEL_VGA_DEVICE(0x0112, info), \ + INTEL_VGA_DEVICE(0x0122, info), \ INTEL_VGA_DEVICE(0x010A, info) -#define INTEL_SNB_D_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x0112, info), \ - INTEL_VGA_DEVICE(0x0122, info) - -#define INTEL_SNB_D_IDS(info) \ - INTEL_SNB_D_GT1_IDS(info), \ - INTEL_SNB_D_GT2_IDS(info) - -#define INTEL_SNB_M_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x0106, info) - -#define INTEL_SNB_M_GT2_IDS(info) \ +#define INTEL_SNB_M_IDS(info) \ + INTEL_VGA_DEVICE(0x0106, info), \ INTEL_VGA_DEVICE(0x0116, info), \ INTEL_VGA_DEVICE(0x0126, info) -#define INTEL_SNB_M_IDS(info) \ - INTEL_SNB_M_GT1_IDS(info), \ - INTEL_SNB_M_GT2_IDS(info) - -#define INTEL_IVB_M_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */ - -#define INTEL_IVB_M_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */ - #define INTEL_IVB_M_IDS(info) \ - INTEL_IVB_M_GT1_IDS(info), \ - INTEL_IVB_M_GT2_IDS(info) - -#define INTEL_IVB_D_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \ - INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */ - -#define INTEL_IVB_D_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \ - INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */ + INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */ #define INTEL_IVB_D_IDS(info) \ - INTEL_IVB_D_GT1_IDS(info), \ - INTEL_IVB_D_GT2_IDS(info) + INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \ + INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \ + INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */ #define INTEL_IVB_Q_IDS(info) \ INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */ -#define INTEL_HSW_ULT_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ - INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \ - INTEL_VGA_DEVICE(0x0A0B, info) /* ULT GT1 reserved */ - -#define INTEL_HSW_ULX_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */ - -#define INTEL_HSW_GT1_IDS(info) \ - INTEL_HSW_ULT_GT1_IDS(info), \ - INTEL_HSW_ULX_GT1_IDS(info), \ - INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \ - INTEL_VGA_DEVICE(0x040A, info), /* GT1 server */ \ - INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \ - INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \ - INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \ - INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \ - INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \ - INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0D0E, info) /* CRW GT1 reserved */ - -#define INTEL_HSW_ULT_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \ - INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \ - INTEL_VGA_DEVICE(0x0A1B, info) /* ULT GT2 reserved */ \ - -#define INTEL_HSW_ULX_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \ - -#define INTEL_HSW_GT2_IDS(info) \ - INTEL_HSW_ULT_GT2_IDS(info), \ - INTEL_HSW_ULX_GT2_IDS(info), \ - INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \ - INTEL_VGA_DEVICE(0x041A, info), /* GT2 server */ \ - INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \ - INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \ - INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \ - INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \ - INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \ - INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0D1E, info) /* CRW GT2 reserved */ - -#define INTEL_HSW_ULT_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \ - INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ - INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \ - INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \ - INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */ - -#define INTEL_HSW_GT3_IDS(info) \ - INTEL_HSW_ULT_GT3_IDS(info), \ - INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \ - INTEL_VGA_DEVICE(0x0426, info), /* GT3 mobile */ \ - INTEL_VGA_DEVICE(0x042A, info), /* GT3 server */ \ - INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \ - INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \ - INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \ - INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \ - INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \ - INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \ - INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \ - INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \ - INTEL_VGA_DEVICE(0x0D26, info), /* CRW GT3 mobile */ \ - INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \ - INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \ - INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ - #define INTEL_HSW_IDS(info) \ - INTEL_HSW_GT1_IDS(info), \ - INTEL_HSW_GT2_IDS(info), \ - INTEL_HSW_GT3_IDS(info) + INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \ + INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \ + INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \ + INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \ + INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \ + INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \ + INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \ + INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \ + INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \ + INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \ + INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \ + INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \ + INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \ + INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \ + INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \ + INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \ + INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \ + INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \ + INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \ + INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ + INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */ #define INTEL_VLV_IDS(info) \ INTEL_VGA_DEVICE(0x0f30, info), \ INTEL_VGA_DEVICE(0x0f31, info), \ INTEL_VGA_DEVICE(0x0f32, info), \ - INTEL_VGA_DEVICE(0x0f33, info) + INTEL_VGA_DEVICE(0x0f33, info), \ + INTEL_VGA_DEVICE(0x0157, info), \ + INTEL_VGA_DEVICE(0x0155, info) -#define INTEL_BDW_ULT_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \ - INTEL_VGA_DEVICE(0x160B, info) /* GT1 Iris */ - -#define INTEL_BDW_ULX_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x160E, info) /* GT1 ULX */ - -#define INTEL_BDW_GT1_IDS(info) \ - INTEL_BDW_ULT_GT1_IDS(info), \ - INTEL_BDW_ULX_GT1_IDS(info), \ +#define INTEL_BDW_GT12_IDS(info) \ INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \ - INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \ - INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */ - -#define INTEL_BDW_ULT_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \ + INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \ + INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \ + INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \ INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \ - INTEL_VGA_DEVICE(0x161B, info) /* GT2 ULT */ - -#define INTEL_BDW_ULX_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */ - -#define INTEL_BDW_GT2_IDS(info) \ - INTEL_BDW_ULT_GT2_IDS(info), \ - INTEL_BDW_ULX_GT2_IDS(info), \ - INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \ + INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \ + INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \ + INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \ + INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \ INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \ INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */ -#define INTEL_BDW_ULT_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \ - INTEL_VGA_DEVICE(0x162B, info) /* Iris */ \ - -#define INTEL_BDW_ULX_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x162E, info) /* ULX */ - #define INTEL_BDW_GT3_IDS(info) \ - INTEL_BDW_ULT_GT3_IDS(info), \ - INTEL_BDW_ULX_GT3_IDS(info), \ INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \ + INTEL_VGA_DEVICE(0x162E, info), /* ULX */\ INTEL_VGA_DEVICE(0x162A, info), /* Server */ \ INTEL_VGA_DEVICE(0x162D, info) /* Workstation */ -#define INTEL_BDW_ULT_RSVD_IDS(info) \ - INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ - INTEL_VGA_DEVICE(0x163B, info) /* Iris */ - -#define INTEL_BDW_ULX_RSVD_IDS(info) \ - INTEL_VGA_DEVICE(0x163E, info) /* ULX */ - #define INTEL_BDW_RSVD_IDS(info) \ - INTEL_BDW_ULT_RSVD_IDS(info), \ - INTEL_BDW_ULX_RSVD_IDS(info), \ INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \ + INTEL_VGA_DEVICE(0x163E, info), /* ULX */ \ INTEL_VGA_DEVICE(0x163A, info), /* Server */ \ INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ #define INTEL_BDW_IDS(info) \ - INTEL_BDW_GT1_IDS(info), \ - INTEL_BDW_GT2_IDS(info), \ + INTEL_BDW_GT12_IDS(info), \ INTEL_BDW_GT3_IDS(info), \ INTEL_BDW_RSVD_IDS(info) @@ -328,53 +245,34 @@ INTEL_VGA_DEVICE(0x22b2, info), \ INTEL_VGA_DEVICE(0x22b3, info) -#define INTEL_SKL_ULT_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \ - INTEL_VGA_DEVICE(0x1913, info) /* ULT GT1.5 */ - -#define INTEL_SKL_ULX_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \ - INTEL_VGA_DEVICE(0x1915, info) /* ULX GT1.5 */ - #define INTEL_SKL_GT1_IDS(info) \ - INTEL_SKL_ULT_GT1_IDS(info), \ - INTEL_SKL_ULX_GT1_IDS(info), \ + INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \ INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \ - INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \ INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \ - INTEL_VGA_DEVICE(0x1917, info) /* DT GT1.5 */ - -#define INTEL_SKL_ULT_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \ - INTEL_VGA_DEVICE(0x1921, info) /* ULT GT2F */ - -#define INTEL_SKL_ULX_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x191E, info) /* ULX GT2 */ + INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */ #define INTEL_SKL_GT2_IDS(info) \ - INTEL_SKL_ULT_GT2_IDS(info), \ - INTEL_SKL_ULX_GT2_IDS(info), \ + INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \ + INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \ INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \ - INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ -#define INTEL_SKL_ULT_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3e */ \ - INTEL_VGA_DEVICE(0x1927, info) /* ULT GT3e */ - #define INTEL_SKL_GT3_IDS(info) \ - INTEL_SKL_ULT_GT3_IDS(info), \ - INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \ - INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3e */ \ - INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3e */ + INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ + INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ #define INTEL_SKL_GT4_IDS(info) \ INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ - INTEL_VGA_DEVICE(0x193A, info), /* SRV GT4e */ \ - INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4e */ \ - INTEL_VGA_DEVICE(0x193D, info) /* WKS GT4e */ + INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \ + INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \ + INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4 */ #define INTEL_SKL_IDS(info) \ INTEL_SKL_GT1_IDS(info), \ @@ -389,283 +287,38 @@ INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ -#define INTEL_GLK_IDS(info) \ - INTEL_VGA_DEVICE(0x3184, info), \ - INTEL_VGA_DEVICE(0x3185, info) - -#define INTEL_KBL_ULT_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ - INTEL_VGA_DEVICE(0x5913, info) /* ULT GT1.5 */ - -#define INTEL_KBL_ULX_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ - INTEL_VGA_DEVICE(0x5915, info) /* ULX GT1.5 */ - #define INTEL_KBL_GT1_IDS(info) \ - INTEL_KBL_ULT_GT1_IDS(info), \ - INTEL_KBL_ULX_GT1_IDS(info), \ + INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ + INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ + INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \ + INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \ - INTEL_VGA_DEVICE(0x590A, info), /* SRV GT1 */ \ - INTEL_VGA_DEVICE(0x590B, info) /* Halo GT1 */ - -#define INTEL_KBL_ULT_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ - INTEL_VGA_DEVICE(0x5921, info) /* ULT GT2F */ - -#define INTEL_KBL_ULX_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x591E, info) /* ULX GT2 */ + INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ #define INTEL_KBL_GT2_IDS(info) \ - INTEL_KBL_ULT_GT2_IDS(info), \ - INTEL_KBL_ULX_GT2_IDS(info), \ + INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \ + INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \ INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ - INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \ - INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ -#define INTEL_KBL_ULT_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x5926, info) /* ULT GT3 */ - #define INTEL_KBL_GT3_IDS(info) \ - INTEL_KBL_ULT_GT3_IDS(info), \ INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */ #define INTEL_KBL_GT4_IDS(info) \ INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */ -/* AML/KBL Y GT2 */ -#define INTEL_AML_KBL_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \ - INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */ - -/* AML/CFL Y GT2 */ -#define INTEL_AML_CFL_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x87CA, info) - -/* CML GT1 */ -#define INTEL_CML_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x9BA2, info), \ - INTEL_VGA_DEVICE(0x9BA4, info), \ - INTEL_VGA_DEVICE(0x9BA5, info), \ - INTEL_VGA_DEVICE(0x9BA8, info) - -#define INTEL_CML_U_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x9B21, info), \ - INTEL_VGA_DEVICE(0x9BAA, info), \ - INTEL_VGA_DEVICE(0x9BAC, info) - -/* CML GT2 */ -#define INTEL_CML_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x9BC2, info), \ - INTEL_VGA_DEVICE(0x9BC4, info), \ - INTEL_VGA_DEVICE(0x9BC5, info), \ - INTEL_VGA_DEVICE(0x9BC6, info), \ - INTEL_VGA_DEVICE(0x9BC8, info), \ - INTEL_VGA_DEVICE(0x9BE6, info), \ - INTEL_VGA_DEVICE(0x9BF6, info) - -#define INTEL_CML_U_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x9B41, info), \ - INTEL_VGA_DEVICE(0x9BCA, info), \ - INTEL_VGA_DEVICE(0x9BCC, info) - #define INTEL_KBL_IDS(info) \ INTEL_KBL_GT1_IDS(info), \ INTEL_KBL_GT2_IDS(info), \ INTEL_KBL_GT3_IDS(info), \ - INTEL_KBL_GT4_IDS(info), \ - INTEL_AML_KBL_GT2_IDS(info) - -/* CFL S */ -#define INTEL_CFL_S_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \ - INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \ - INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */ - -#define INTEL_CFL_S_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ - -/* CFL H */ -#define INTEL_CFL_H_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x3E9C, info) - -#define INTEL_CFL_H_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x3E94, info), /* Halo GT2 */ \ - INTEL_VGA_DEVICE(0x3E9B, info) /* Halo GT2 */ - -/* CFL U GT2 */ -#define INTEL_CFL_U_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA9, info) - -/* CFL U GT3 */ -#define INTEL_CFL_U_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */ - -/* WHL/CFL U GT1 */ -#define INTEL_WHL_U_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA1, info), \ - INTEL_VGA_DEVICE(0x3EA4, info) - -/* WHL/CFL U GT2 */ -#define INTEL_WHL_U_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA0, info), \ - INTEL_VGA_DEVICE(0x3EA3, info) - -/* WHL/CFL U GT3 */ -#define INTEL_WHL_U_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA2, info) - -#define INTEL_CFL_IDS(info) \ - INTEL_CFL_S_GT1_IDS(info), \ - INTEL_CFL_S_GT2_IDS(info), \ - INTEL_CFL_H_GT1_IDS(info), \ - INTEL_CFL_H_GT2_IDS(info), \ - INTEL_CFL_U_GT2_IDS(info), \ - INTEL_CFL_U_GT3_IDS(info), \ - INTEL_WHL_U_GT1_IDS(info), \ - INTEL_WHL_U_GT2_IDS(info), \ - INTEL_WHL_U_GT3_IDS(info), \ - INTEL_AML_CFL_GT2_IDS(info), \ - INTEL_CML_GT1_IDS(info), \ - INTEL_CML_GT2_IDS(info), \ - INTEL_CML_U_GT1_IDS(info), \ - INTEL_CML_U_GT2_IDS(info) - -/* CNL */ -#define INTEL_CNL_PORT_F_IDS(info) \ - INTEL_VGA_DEVICE(0x5A44, info), \ - INTEL_VGA_DEVICE(0x5A4C, info), \ - INTEL_VGA_DEVICE(0x5A54, info), \ - INTEL_VGA_DEVICE(0x5A5C, info) - -#define INTEL_CNL_IDS(info) \ - INTEL_CNL_PORT_F_IDS(info), \ - INTEL_VGA_DEVICE(0x5A40, info), \ - INTEL_VGA_DEVICE(0x5A41, info), \ - INTEL_VGA_DEVICE(0x5A42, info), \ - INTEL_VGA_DEVICE(0x5A49, info), \ - INTEL_VGA_DEVICE(0x5A4A, info), \ - INTEL_VGA_DEVICE(0x5A50, info), \ - INTEL_VGA_DEVICE(0x5A51, info), \ - INTEL_VGA_DEVICE(0x5A52, info), \ - INTEL_VGA_DEVICE(0x5A59, info), \ - INTEL_VGA_DEVICE(0x5A5A, info) - -/* ICL */ -#define INTEL_ICL_PORT_F_IDS(info) \ - INTEL_VGA_DEVICE(0x8A50, info), \ - INTEL_VGA_DEVICE(0x8A52, info), \ - INTEL_VGA_DEVICE(0x8A53, info), \ - INTEL_VGA_DEVICE(0x8A54, info), \ - INTEL_VGA_DEVICE(0x8A56, info), \ - INTEL_VGA_DEVICE(0x8A57, info), \ - INTEL_VGA_DEVICE(0x8A58, info), \ - INTEL_VGA_DEVICE(0x8A59, info), \ - INTEL_VGA_DEVICE(0x8A5A, info), \ - INTEL_VGA_DEVICE(0x8A5B, info), \ - INTEL_VGA_DEVICE(0x8A5C, info), \ - INTEL_VGA_DEVICE(0x8A70, info), \ - INTEL_VGA_DEVICE(0x8A71, info) - -#define INTEL_ICL_11_IDS(info) \ - INTEL_ICL_PORT_F_IDS(info), \ - INTEL_VGA_DEVICE(0x8A51, info), \ - INTEL_VGA_DEVICE(0x8A5D, info) - -/* EHL */ -#define INTEL_EHL_IDS(info) \ - INTEL_VGA_DEVICE(0x4541, info), \ - INTEL_VGA_DEVICE(0x4551, info), \ - INTEL_VGA_DEVICE(0x4555, info), \ - INTEL_VGA_DEVICE(0x4557, info), \ - INTEL_VGA_DEVICE(0x4571, info) - -/* JSL */ -#define INTEL_JSL_IDS(info) \ - INTEL_VGA_DEVICE(0x4E51, info), \ - INTEL_VGA_DEVICE(0x4E55, info), \ - INTEL_VGA_DEVICE(0x4E57, info), \ - INTEL_VGA_DEVICE(0x4E61, info), \ - INTEL_VGA_DEVICE(0x4E71, info) - -/* TGL */ -#define INTEL_TGL_12_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x9A60, info), \ - INTEL_VGA_DEVICE(0x9A68, info), \ - INTEL_VGA_DEVICE(0x9A70, info) - -#define INTEL_TGL_12_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x9A40, info), \ - INTEL_VGA_DEVICE(0x9A49, info), \ - INTEL_VGA_DEVICE(0x9A59, info), \ - INTEL_VGA_DEVICE(0x9A78, info), \ - INTEL_VGA_DEVICE(0x9AC0, info), \ - INTEL_VGA_DEVICE(0x9AC9, info), \ - INTEL_VGA_DEVICE(0x9AD9, info), \ - INTEL_VGA_DEVICE(0x9AF8, info) - -#define INTEL_TGL_12_IDS(info) \ - INTEL_TGL_12_GT1_IDS(info), \ - INTEL_TGL_12_GT2_IDS(info) - -/* RKL */ -#define INTEL_RKL_IDS(info) \ - INTEL_VGA_DEVICE(0x4C80, info), \ - INTEL_VGA_DEVICE(0x4C8A, info), \ - INTEL_VGA_DEVICE(0x4C8B, info), \ - INTEL_VGA_DEVICE(0x4C8C, info), \ - INTEL_VGA_DEVICE(0x4C90, info), \ - INTEL_VGA_DEVICE(0x4C9A, info) - -/* DG1 */ -#define INTEL_DG1_IDS(info) \ - INTEL_VGA_DEVICE(0x4905, info), \ - INTEL_VGA_DEVICE(0x4906, info), \ - INTEL_VGA_DEVICE(0x4907, info), \ - INTEL_VGA_DEVICE(0x4908, info) - -/* ADL-S */ -#define INTEL_ADLS_IDS(info) \ - INTEL_VGA_DEVICE(0x4680, info), \ - INTEL_VGA_DEVICE(0x4681, info), \ - INTEL_VGA_DEVICE(0x4682, info), \ - INTEL_VGA_DEVICE(0x4683, info), \ - INTEL_VGA_DEVICE(0x4688, info), \ - INTEL_VGA_DEVICE(0x4689, info), \ - INTEL_VGA_DEVICE(0x4690, info), \ - INTEL_VGA_DEVICE(0x4691, info), \ - INTEL_VGA_DEVICE(0x4692, info), \ - INTEL_VGA_DEVICE(0x4693, info) - -/* ADL-P */ -#define INTEL_ADLP_IDS(info) \ - INTEL_VGA_DEVICE(0x46A0, info), \ - INTEL_VGA_DEVICE(0x46A1, info), \ - INTEL_VGA_DEVICE(0x46A2, info), \ - INTEL_VGA_DEVICE(0x46A3, info), \ - INTEL_VGA_DEVICE(0x46A6, info), \ - INTEL_VGA_DEVICE(0x46A8, info), \ - INTEL_VGA_DEVICE(0x46AA, info), \ - INTEL_VGA_DEVICE(0x462A, info), \ - INTEL_VGA_DEVICE(0x4626, info), \ - INTEL_VGA_DEVICE(0x4628, info), \ - INTEL_VGA_DEVICE(0x46B0, info), \ - INTEL_VGA_DEVICE(0x46B1, info), \ - INTEL_VGA_DEVICE(0x46B2, info), \ - INTEL_VGA_DEVICE(0x46B3, info), \ - INTEL_VGA_DEVICE(0x46C0, info), \ - INTEL_VGA_DEVICE(0x46C1, info), \ - INTEL_VGA_DEVICE(0x46C2, info), \ - INTEL_VGA_DEVICE(0x46C3, info) + INTEL_KBL_GT4_IDS(info) #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index abfefaaf89..e47b019f82 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -1,16 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* Common header for intel-gtt.ko and i915.ko */ #ifndef _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H -#include -#include -#include - -void intel_gtt_get(u64 *gtt_total, - phys_addr_t *mappable_base, - resource_size_t *mappable_end); +void intel_gtt_get(u64 *gtt_total, u64 *stolen_size, + u64 *mappable_base, u64 *mappable_end); int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, struct agp_bridge_data *bridge); @@ -34,4 +28,8 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); /* flag for GFDT type */ #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) +#ifdef CONFIG_INTEL_IOMMU +extern int intel_iommu_gfx_mapped; +#endif + #endif diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index f681bbdbc6..9eb940d675 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -31,7 +31,6 @@ #ifndef _TTM_BO_API_H_ #define _TTM_BO_API_H_ -#include #include #include #include @@ -40,23 +39,62 @@ #include #include #include -#include +#include -#include "ttm_resource.h" - -struct ttm_global; - -struct ttm_device; - -struct dma_buf_map; +struct ttm_bo_device; struct drm_mm_node; struct ttm_placement; -struct ttm_place; +/** + * struct ttm_bus_placement + * + * @addr: mapped virtual address + * @base: bus base address + * @is_iomem: is this io memory ? + * @size: size in byte + * @offset: offset from the base address + * @io_reserved_vm: The VM system has a refcount in @io_reserved_count + * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve + * + * Structure indicating the bus placement of an object. + */ +struct ttm_bus_placement { + void *addr; + phys_addr_t base; + unsigned long size; + unsigned long offset; + bool is_iomem; + bool io_reserved_vm; + uint64_t io_reserved_count; +}; -struct ttm_lru_bulk_move; + +/** + * struct ttm_mem_reg + * + * @mm_node: Memory manager node. + * @size: Requested size of memory region. + * @num_pages: Actual size of memory region in pages. + * @page_alignment: Page alignment. + * @placement: Placement flags. + * @bus: Placement on io bus accessible to the CPU + * + * Structure indicating the placement and space resources used by a + * buffer object. + */ + +struct ttm_mem_reg { + void *mm_node; + unsigned long start; + unsigned long size; + unsigned long num_pages; + uint32_t page_alignment; + uint32_t mem_type; + uint32_t placement; + struct ttm_bus_placement bus; +}; /** * enum ttm_bo_type @@ -83,25 +121,34 @@ struct ttm_tt; /** * struct ttm_buffer_object * - * @base: drm_gem_object superclass data. * @bdev: Pointer to the buffer object device structure. * @type: The bo type. - * @page_alignment: Page alignment. * @destroy: Destruction function. If NULL, kfree is used. * @num_pages: Actual number of pages. + * @acc_size: Accounted size for this object. * @kref: Reference count of this buffer object. When this refcount reaches - * zero, the object is destroyed or put on the delayed delete list. + * zero, the object is put on the delayed delete list. + * @list_kref: List reference count of this buffer object. This member is + * used to avoid destruction while the buffer object is still on a list. + * Lru lists may keep one refcount, the delayed delete list, and kref != 0 + * keeps one refcount. When this refcount reaches zero, + * the object is destroyed. * @mem: structure describing current placement. + * @persistent_swap_storage: Usually the swap storage is deleted for buffers + * pinned in physical memory. If this behaviour is not desired, this member + * holds a pointer to a persistent shmem object. * @ttm: TTM structure holding system pages. * @evicted: Whether the object was evicted without user-space knowing. - * @deleted: True if the object is only a zombie and already deleted. + * @cpu_writes: For synchronization. Number of cpu writers. * @lru: List head for the lru list. * @ddestroy: List head for the delayed destroy list. * @swap: List head for swap LRU list. * @moving: Fence set when BO is moving + * @vma_node: Address space manager node. * @offset: The current GPU offset, which can have different meanings * depending on the memory type. For SYSTEM type memory, it should be 0. * @cur_placement: Hint of current placement. + * @wu_mutex: Wait unreserved mutex. * * Base class for TTM buffer object, that deals with data placement and CPU * mappings. GPU mappings are really up to the driver, but for simpler GPUs @@ -116,29 +163,38 @@ struct ttm_tt; */ struct ttm_buffer_object { - struct drm_gem_object base; - /** * Members constant at init. */ - struct ttm_device *bdev; + struct ttm_bo_global *glob; + struct ttm_bo_device *bdev; enum ttm_bo_type type; - uint32_t page_alignment; void (*destroy) (struct ttm_buffer_object *); + unsigned long num_pages; + size_t acc_size; /** * Members not needing protection. */ + struct kref kref; + struct kref list_kref; /** * Members protected by the bo::resv::reserved lock. */ - struct ttm_resource *resource; + struct ttm_mem_reg mem; + struct file *persistent_swap_storage; struct ttm_tt *ttm; - bool deleted; + bool evicted; + + /** + * Members protected by the bo::reserved lock only when written to. + */ + + atomic_t cpu_writers; /** * Members protected by the bdev::lru_lock. @@ -146,14 +202,16 @@ struct ttm_buffer_object { struct list_head lru; struct list_head ddestroy; + struct list_head swap; + struct list_head io_reserve_lru; /** * Members protected by a bo reservation. */ - struct dma_fence *moving; - unsigned priority; - unsigned pin_count; + struct fence *moving; + + struct drm_vma_offset_node vma_node; /** * Special members that are protected by the reserve lock @@ -161,7 +219,14 @@ struct ttm_buffer_object { * either of these locks held. */ + uint64_t offset; /* GPU address space is independent of CPU word size */ + uint32_t cur_placement; + struct sg_table *sg; + + struct reservation_object *resv; + struct reservation_object ttm_resv; + struct mutex wu_mutex; }; /** @@ -191,55 +256,17 @@ struct ttm_bo_kmap_obj { }; /** - * struct ttm_operation_ctx - * - * @interruptible: Sleep interruptible if sleeping. - * @no_wait_gpu: Return immediately if the GPU is busy. - * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages. - * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple - * BOs share the same reservation object. - * @force_alloc: Don't check the memory account during suspend or CPU page - * faults. Should only be used by TTM internally. - * @resv: Reservation object to allow reserved evictions with. - * - * Context for TTM operations like changing buffer placement or general memory - * allocation. - */ -struct ttm_operation_ctx { - bool interruptible; - bool no_wait_gpu; - bool gfp_retry_mayfail; - bool allow_res_evict; - bool force_alloc; - struct dma_resv *resv; - uint64_t bytes_moved; -}; - -/** - * ttm_bo_get - reference a struct ttm_buffer_object + * ttm_bo_reference - reference a struct ttm_buffer_object * * @bo: The buffer object. + * + * Returns a refcounted pointer to a buffer object. */ -static inline void ttm_bo_get(struct ttm_buffer_object *bo) + +static inline struct ttm_buffer_object * +ttm_bo_reference(struct ttm_buffer_object *bo) { kref_get(&bo->kref); -} - -/** - * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless - * its refcount has already reached zero. - * @bo: The buffer object. - * - * Used to reference a TTM buffer object in lookups where the object is removed - * from the lookup structure during the destructor and for RCU lookups. - * - * Returns: @bo if the referencing was successful, NULL otherwise. - */ -static inline __must_check struct ttm_buffer_object * -ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) -{ - if (!kref_get_unless_zero(&bo->kref)) - return NULL; return bo; } @@ -257,31 +284,29 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) * Returns -EBUSY if no_wait is true and the buffer is busy. * Returns -ERESTARTSYS if interrupted by a signal. */ -int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait); - -static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) -{ - return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); -} +extern int ttm_bo_wait(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait); /** * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo * * @placement: Return immediately if buffer is busy. - * @mem: The struct ttm_resource indicating the region where the bo resides + * @mem: The struct ttm_mem_reg indicating the region where the bo resides * @new_flags: Describes compatible placement found * * Returns true if the placement is compatible */ -bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_resource *mem, - uint32_t *new_flags); +extern bool ttm_bo_mem_compat(struct ttm_placement *placement, + struct ttm_mem_reg *mem, + uint32_t *new_flags); /** * ttm_bo_validate * * @bo: The buffer object. * @placement: Proposed placement for the buffer object. - * @ctx: validation parameters. + * @interruptible: Sleep interruptible if sleeping. + * @no_wait_gpu: Return immediately if the GPU is busy. * * Changes placement and caching policy of the buffer object * according proposed placement. @@ -291,43 +316,67 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_resource *mem * -EBUSY if no_wait is true and buffer busy. * -ERESTARTSYS if interrupted by a signal. */ -int ttm_bo_validate(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - struct ttm_operation_ctx *ctx); +extern int ttm_bo_validate(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + bool interruptible, + bool no_wait_gpu); /** - * ttm_bo_put + * ttm_bo_unref * * @bo: The buffer object. * - * Unreference a buffer object. + * Unreference and clear a pointer to a buffer object. */ -void ttm_bo_put(struct ttm_buffer_object *bo); +extern void ttm_bo_unref(struct ttm_buffer_object **bo); + + +/** + * ttm_bo_list_ref_sub + * + * @bo: The buffer object. + * @count: The number of references with which to decrease @bo::list_kref; + * @never_free: The refcount should not reach zero with this operation. + * + * Release @count lru list references to this buffer object. + */ +extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, + bool never_free); + +/** + * ttm_bo_add_to_lru + * + * @bo: The buffer object. + * + * Add this bo to the relevant mem type lru and, if it's backed by + * system pages (ttms) to the swap list. + * This function must be called with struct ttm_bo_global::lru_lock held, and + * is typically called immediately prior to unreserving a bo. + */ +extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); + +/** + * ttm_bo_del_from_lru + * + * @bo: The buffer object. + * + * Remove this bo from all lru lists used to lookup and reserve an object. + * This function must be called with struct ttm_bo_global::lru_lock held, + * and is usually called just immediately after the bo has been reserved to + * avoid recursive reservation from lru lists. + */ +extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); /** * ttm_bo_move_to_lru_tail * * @bo: The buffer object. - * @mem: Resource object. - * @bulk: optional bulk move structure to remember BO positions * * Move this BO to the tail of all lru lists used to lookup and reserve an - * object. This function must be called with struct ttm_global::lru_lock + * object. This function must be called with struct ttm_bo_global::lru_lock * held, and is used to make a BO less likely to be considered for eviction. */ -void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, - struct ttm_resource *mem, - struct ttm_lru_bulk_move *bulk); - -/** - * ttm_bo_bulk_move_lru_tail - * - * @bulk: bulk move structure - * - * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that - * BO order never changes. Should be called with ttm_global::lru_lock held. - */ -void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); +extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); /** * ttm_bo_lock_delayed_workqueue @@ -336,73 +385,62 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); * Returns * True if the workqueue was queued at the time */ -int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev); +extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); /** * ttm_bo_unlock_delayed_workqueue * * Allows the delayed workqueue to run. */ -void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched); +extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, + int resched); /** - * ttm_bo_eviction_valuable + * ttm_bo_synccpu_write_grab * - * @bo: The buffer object to evict - * @place: the placement we need to make room for + * @bo: The buffer object: + * @no_wait: Return immediately if buffer is busy. * - * Check if it is valuable to evict the BO to make room for the given placement. - */ -bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, - const struct ttm_place *place); - -/** - * ttm_bo_init_reserved - * - * @bdev: Pointer to a ttm_device struct. - * @bo: Pointer to a ttm_buffer_object to be initialized. - * @size: Requested size of buffer object. - * @type: Requested type of buffer object. - * @flags: Initial placement flags. - * @page_alignment: Data alignment in pages. - * @ctx: TTM operation context for memory allocation. - * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. - * @destroy: Destroy function. Use NULL for kfree(). - * - * This function initializes a pre-allocated struct ttm_buffer_object. - * As this object may be part of a larger structure, this function, - * together with the @destroy function, - * enables driver-specific objects derived from a ttm_buffer_object. - * - * On successful return, the caller owns an object kref to @bo. The kref and - * list_kref are usually set to 1, but note that in some situations, other - * tasks may already be holding references to @bo as well. - * Furthermore, if resv == NULL, the buffer's reservation lock will be held, - * and it is the caller's responsibility to call ttm_bo_unreserve. - * - * If a failure occurs, the function will call the @destroy function, or - * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is - * illegal and will likely cause memory corruption. + * Synchronizes a buffer object for CPU RW access. This means + * command submission that affects the buffer will return -EBUSY + * until ttm_bo_synccpu_write_release is called. * * Returns - * -ENOMEM: Out of memory. - * -EINVAL: Invalid placement flags. - * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. + * -EBUSY if the buffer is busy and no_wait is true. + * -ERESTARTSYS if interrupted by a signal. */ +extern int +ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); -int ttm_bo_init_reserved(struct ttm_device *bdev, - struct ttm_buffer_object *bo, - size_t size, enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, - struct ttm_operation_ctx *ctx, - struct sg_table *sg, struct dma_resv *resv, - void (*destroy) (struct ttm_buffer_object *)); +/** + * ttm_bo_synccpu_write_release: + * + * @bo : The buffer object. + * + * Releases a synccpu lock. + */ +extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); + +/** + * ttm_bo_acc_size + * + * @bdev: Pointer to a ttm_bo_device struct. + * @bo_size: size of the buffer object in byte. + * @struct_size: size of the structure holding buffer object datas + * + * Returns size to account for a buffer object + */ +size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, + unsigned long bo_size, + unsigned struct_size); +size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, + unsigned long bo_size, + unsigned struct_size); /** * ttm_bo_init * - * @bdev: Pointer to a ttm_device struct. + * @bdev: Pointer to a ttm_bo_device struct. * @bo: Pointer to a ttm_buffer_object to be initialized. * @size: Requested size of buffer object. * @type: Requested type of buffer object. @@ -410,22 +448,20 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, * @page_alignment: Data alignment in pages. * @interruptible: If needing to sleep to wait for GPU resources, * sleep interruptible. + * @persistent_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member * holds a pointer to a persistent shmem object. Typically, this would * point to the shmem object backing a GEM object if TTM is used to back a * GEM user interface. - * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. + * @acc_size: Accounted size for this object. + * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. * @destroy: Destroy function. Use NULL for kfree(). * * This function initializes a pre-allocated struct ttm_buffer_object. * As this object may be part of a larger structure, this function, * together with the @destroy function, * enables driver-specific objects derived from a ttm_buffer_object. - * - * On successful return, the caller owns an object kref to @bo. The kref and - * list_kref are usually set to 1, but note that in some situations, other - * tasks may already be holding references to @bo as well. - * + * On successful return, the object kref and list_kref are set to 1. * If a failure occurs, the function will call the @destroy function, or * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is * illegal and will likely cause memory corruption. @@ -435,12 +471,121 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, * -EINVAL: Invalid placement flags. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ -int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo, - size_t size, enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, bool interrubtible, - struct sg_table *sg, struct dma_resv *resv, - void (*destroy) (struct ttm_buffer_object *)); + +extern int ttm_bo_init(struct ttm_bo_device *bdev, + struct ttm_buffer_object *bo, + unsigned long size, + enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, + bool interrubtible, + struct file *persistent_swap_storage, + size_t acc_size, + struct sg_table *sg, + struct reservation_object *resv, + void (*destroy) (struct ttm_buffer_object *)); + +/** + * ttm_bo_create + * + * @bdev: Pointer to a ttm_bo_device struct. + * @size: Requested size of buffer object. + * @type: Requested type of buffer object. + * @placement: Initial placement. + * @page_alignment: Data alignment in pages. + * @interruptible: If needing to sleep while waiting for GPU resources, + * sleep interruptible. + * @persistent_swap_storage: Usually the swap storage is deleted for buffers + * pinned in physical memory. If this behaviour is not desired, this member + * holds a pointer to a persistent shmem object. Typically, this would + * point to the shmem object backing a GEM object if TTM is used to back a + * GEM user interface. + * @p_bo: On successful completion *p_bo points to the created object. + * + * This function allocates a ttm_buffer_object, and then calls ttm_bo_init + * on that object. The destroy function is set to kfree(). + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid placement flags. + * -ERESTARTSYS: Interrupted by signal while waiting for resources. + */ + +extern int ttm_bo_create(struct ttm_bo_device *bdev, + unsigned long size, + enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, + bool interruptible, + struct file *persistent_swap_storage, + struct ttm_buffer_object **p_bo); + +/** + * ttm_bo_init_mm + * + * @bdev: Pointer to a ttm_bo_device struct. + * @mem_type: The memory type. + * @p_size: size managed area in pages. + * + * Initialize a manager for a given memory type. + * Note: if part of driver firstopen, it must be protected from a + * potentially racing lastclose. + * Returns: + * -EINVAL: invalid size or memory type. + * -ENOMEM: Not enough memory. + * May also return driver-specified errors. + */ + +extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, + unsigned long p_size); +/** + * ttm_bo_clean_mm + * + * @bdev: Pointer to a ttm_bo_device struct. + * @mem_type: The memory type. + * + * Take down a manager for a given memory type after first walking + * the LRU list to evict any buffers left alive. + * + * Normally, this function is part of lastclose() or unload(), and at that + * point there shouldn't be any buffers left created by user-space, since + * there should've been removed by the file descriptor release() method. + * However, before this function is run, make sure to signal all sync objects, + * and verify that the delayed delete queue is empty. The driver must also + * make sure that there are no NO_EVICT buffers present in this memory type + * when the call is made. + * + * If this function is part of a VT switch, the caller must make sure that + * there are no appications currently validating buffers before this + * function is called. The caller can do that by first taking the + * struct ttm_bo_device::ttm_lock in write mode. + * + * Returns: + * -EINVAL: invalid or uninitialized memory type. + * -EBUSY: There are still buffers left in this memory type. + */ + +extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); + +/** + * ttm_bo_evict_mm + * + * @bdev: Pointer to a ttm_bo_device struct. + * @mem_type: The memory type. + * + * Evicts all buffers on the lru list of the memory type. + * This is normally part of a VT switch or an + * out-of-memory-space-due-to-fragmentation handler. + * The caller must make sure that there are no other processes + * currently validating buffers, and can do that by taking the + * struct ttm_bo_device::ttm_lock in write mode. + * + * Returns: + * -EINVAL: Invalid or uninitialized memory type. + * -ERESTARTSYS: The call was interrupted by a signal while waiting to + * evict a buffer. + */ + +extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); /** * ttm_kmap_obj_virtual @@ -453,6 +598,7 @@ int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo, * If *is_iomem is 1 on return, the virtual address points to an io memory area, * that should strictly be accessed by the iowriteXX() and similar functions. */ + static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, bool *is_iomem) { @@ -476,8 +622,9 @@ static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, * -ENOMEM: Out of memory. * -EINVAL: Invalid range. */ -int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, - unsigned long num_pages, struct ttm_bo_kmap_obj *map); + +extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct ttm_bo_kmap_obj *map); /** * ttm_bo_kunmap @@ -486,48 +633,41 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, * * Unmaps a kernel map set up by ttm_bo_kmap. */ -void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); + +extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); /** - * ttm_bo_vmap - * - * @bo: The buffer object. - * @map: pointer to a struct dma_buf_map representing the map. - * - * Sets up a kernel virtual mapping, using ioremap or vmap to the - * data in the buffer object. The parameter @map returns the virtual - * address as struct dma_buf_map. Unmap the buffer with ttm_bo_vunmap(). - * - * Returns - * -ENOMEM: Out of memory. - * -EINVAL: Invalid range. - */ -int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); - -/** - * ttm_bo_vunmap - * - * @bo: The buffer object. - * @map: Object describing the map to unmap. - * - * Unmaps a kernel map set up by ttm_bo_vmap(). - */ -void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); - -/** - * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object. + * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. * * @vma: vma as input from the fbdev mmap method. - * @bo: The bo backing the address space. + * @bo: The bo backing the address space. The address space will + * have the same size as the bo, and start at offset 0. * - * Maps a buffer object. + * This function is intended to be called by the fbdev mmap method + * if the fbdev address space is to be backed by a bo. */ -int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); + +extern int ttm_fbdev_mmap(struct vm_area_struct *vma, + struct ttm_buffer_object *bo); + +/** + * ttm_bo_mmap - mmap out of the ttm device address space. + * + * @filp: filp as input from the mmap method. + * @vma: vma as input from the mmap method. + * @bdev: Pointer to the ttm_bo_device with the address space manager. + * + * This function is intended to be called by the device mmap method. + * if the device address space is to be backed by the bo manager. + */ + +extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, + struct ttm_bo_device *bdev); /** * ttm_bo_io * - * @bdev: Pointer to the struct ttm_device. + * @bdev: Pointer to the struct ttm_bo_device. * @filp: Pointer to the struct file attempting to read / write. * @wbuf: User-space pointer to address of buffer to write. NULL on read. * @rbuf: User-space pointer to address of buffer to read into. @@ -544,69 +684,11 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); * the function may return -ERESTARTSYS if * interrupted by a signal. */ -ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp, - const char __user *wbuf, char __user *rbuf, - size_t count, loff_t *f_pos, bool write); -int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - gfp_t gfp_flags); - -/** - * ttm_bo_pin - Pin the buffer object. - * @bo: The buffer object to pin - * - * Make sure the buffer is not evicted any more during memory pressure. - */ -static inline void ttm_bo_pin(struct ttm_buffer_object *bo) -{ - dma_resv_assert_held(bo->base.resv); - WARN_ON_ONCE(!kref_read(&bo->kref)); - ++bo->pin_count; -} - -/** - * ttm_bo_unpin - Unpin the buffer object. - * @bo: The buffer object to unpin - * - * Allows the buffer object to be evicted again during memory pressure. - */ -static inline void ttm_bo_unpin(struct ttm_buffer_object *bo) -{ - dma_resv_assert_held(bo->base.resv); - WARN_ON_ONCE(!kref_read(&bo->kref)); - if (bo->pin_count) - --bo->pin_count; - else - WARN_ON_ONCE(true); -} - -int ttm_mem_evict_first(struct ttm_device *bdev, - struct ttm_resource_manager *man, - const struct ttm_place *place, - struct ttm_operation_ctx *ctx, - struct ww_acquire_ctx *ticket); - -/* Default number of pre-faulted pages in the TTM fault handler */ -#define TTM_BO_VM_NUM_PREFAULT 16 - -vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, - struct vm_fault *vmf); - -vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, - pgprot_t prot, - pgoff_t num_prefault, - pgoff_t fault_page_size); - -vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf); - -void ttm_bo_vm_open(struct vm_area_struct *vma); - -void ttm_bo_vm_close(struct vm_area_struct *vma); - -int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, - void *buf, int len, int write); -bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all); - -vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot); +extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, + const char __user *wbuf, char __user *rbuf, + size_t count, loff_t *f_pos, bool write); +extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); +extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); #endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 68d6069572..4f0a921859 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -30,59 +30,668 @@ #ifndef _TTM_BO_DRIVER_H_ #define _TTM_BO_DRIVER_H_ +#include +#include +#include +#include #include +#include #include #include #include #include -#include +#include -#include +struct ttm_backend_func { + /** + * struct ttm_backend_func member bind + * + * @ttm: Pointer to a struct ttm_tt. + * @bo_mem: Pointer to a struct ttm_mem_reg describing the + * memory type and location for binding. + * + * Bind the backend pages into the aperture in the location + * indicated by @bo_mem. This function should be able to handle + * differences between aperture and system page sizes. + */ + int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); -#include "ttm_bo_api.h" -#include "ttm_kmap_iter.h" -#include "ttm_placement.h" -#include "ttm_tt.h" -#include "ttm_pool.h" + /** + * struct ttm_backend_func member unbind + * + * @ttm: Pointer to a struct ttm_tt. + * + * Unbind previously bound backend pages. This function should be + * able to handle differences between aperture and system page sizes. + */ + int (*unbind) (struct ttm_tt *ttm); -/** - * struct ttm_lru_bulk_move_pos - * - * @first: first BO in the bulk move range - * @last: last BO in the bulk move range - * - * Positions for a lru bulk move. - */ -struct ttm_lru_bulk_move_pos { - struct ttm_buffer_object *first; - struct ttm_buffer_object *last; + /** + * struct ttm_backend_func member destroy + * + * @ttm: Pointer to a struct ttm_tt. + * + * Destroy the backend. This will be call back from ttm_tt_destroy so + * don't call ttm_tt_destroy from the callback or infinite loop. + */ + void (*destroy) (struct ttm_tt *ttm); +}; + +#define TTM_PAGE_FLAG_WRITE (1 << 3) +#define TTM_PAGE_FLAG_SWAPPED (1 << 4) +#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5) +#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) +#define TTM_PAGE_FLAG_DMA32 (1 << 7) +#define TTM_PAGE_FLAG_SG (1 << 8) + +enum ttm_caching_state { + tt_uncached, + tt_wc, + tt_cached }; /** - * struct ttm_lru_bulk_move + * struct ttm_tt * - * @tt: first/last lru entry for BOs in the TT domain - * @vram: first/last lru entry for BOs in the VRAM domain - * @swap: first/last lru entry for BOs on the swap list + * @bdev: Pointer to a struct ttm_bo_device. + * @func: Pointer to a struct ttm_backend_func that describes + * the backend methods. + * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL + * pointer. + * @pages: Array of pages backing the data. + * @num_pages: Number of pages in the page array. + * @bdev: Pointer to the current struct ttm_bo_device. + * @be: Pointer to the ttm backend. + * @swap_storage: Pointer to shmem struct file for swap storage. + * @caching_state: The current caching state of the pages. + * @state: The current binding state of the pages. * - * Helper structure for bulk moves on the LRU list. + * This is a structure holding the pages, caching- and aperture binding + * status for a buffer object that isn't backed by fixed (VRAM / AGP) + * memory. */ -struct ttm_lru_bulk_move { - struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY]; - struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY]; + +struct ttm_tt { + struct ttm_bo_device *bdev; + struct ttm_backend_func *func; + struct page *dummy_read_page; + struct page **pages; + uint32_t page_flags; + unsigned long num_pages; + struct sg_table *sg; /* for SG objects via dma-buf */ + struct ttm_bo_global *glob; + struct file *swap_storage; + enum ttm_caching_state caching_state; + enum { + tt_bound, + tt_unbound, + tt_unpopulated, + } state; }; +/** + * struct ttm_dma_tt + * + * @ttm: Base ttm_tt struct. + * @dma_address: The DMA (bus) addresses of the pages + * @pages_list: used by some page allocation backend + * + * This is a structure holding the pages, caching- and aperture binding + * status for a buffer object that isn't backed by fixed (VRAM / AGP) + * memory. + */ +struct ttm_dma_tt { + struct ttm_tt ttm; + dma_addr_t *dma_address; + struct list_head pages_list; +}; + +#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ +#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ +#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ + +struct ttm_mem_type_manager; + +struct ttm_mem_type_manager_func { + /** + * struct ttm_mem_type_manager member init + * + * @man: Pointer to a memory type manager. + * @p_size: Implementation dependent, but typically the size of the + * range to be managed in pages. + * + * Called to initialize a private range manager. The function is + * expected to initialize the man::priv member. + * Returns 0 on success, negative error code on failure. + */ + int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); + + /** + * struct ttm_mem_type_manager member takedown + * + * @man: Pointer to a memory type manager. + * + * Called to undo the setup done in init. All allocated resources + * should be freed. + */ + int (*takedown)(struct ttm_mem_type_manager *man); + + /** + * struct ttm_mem_type_manager member get_node + * + * @man: Pointer to a memory type manager. + * @bo: Pointer to the buffer object we're allocating space for. + * @placement: Placement details. + * @flags: Additional placement flags. + * @mem: Pointer to a struct ttm_mem_reg to be filled in. + * + * This function should allocate space in the memory type managed + * by @man. Placement details if + * applicable are given by @placement. If successful, + * @mem::mm_node should be set to a non-null value, and + * @mem::start should be set to a value identifying the beginning + * of the range allocated, and the function should return zero. + * If the memory region accommodate the buffer object, @mem::mm_node + * should be set to NULL, and the function should return 0. + * If a system error occurred, preventing the request to be fulfilled, + * the function should return a negative error code. + * + * Note that @mem::mm_node will only be dereferenced by + * struct ttm_mem_type_manager functions and optionally by the driver, + * which has knowledge of the underlying type. + * + * This function may not be called from within atomic context, so + * an implementation can and must use either a mutex or a spinlock to + * protect any data structures managing the space. + */ + int (*get_node)(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_mem_reg *mem); + + /** + * struct ttm_mem_type_manager member put_node + * + * @man: Pointer to a memory type manager. + * @mem: Pointer to a struct ttm_mem_reg to be filled in. + * + * This function frees memory type resources previously allocated + * and that are identified by @mem::mm_node and @mem::start. May not + * be called from within atomic context. + */ + void (*put_node)(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem); + + /** + * struct ttm_mem_type_manager member debug + * + * @man: Pointer to a memory type manager. + * @prefix: Prefix to be used in printout to identify the caller. + * + * This function is called to print out the state of the memory + * type manager to aid debugging of out-of-memory conditions. + * It may not be called from within atomic context. + */ + void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); +}; + +/** + * struct ttm_mem_type_manager + * + * @has_type: The memory type has been initialized. + * @use_type: The memory type is enabled. + * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory + * managed by this memory type. + * @gpu_offset: If used, the GPU offset of the first managed page of + * fixed memory or the first managed location in an aperture. + * @size: Size of the managed region. + * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, + * as defined in ttm_placement_common.h + * @default_caching: The default caching policy used for a buffer object + * placed in this memory type if the user doesn't provide one. + * @func: structure pointer implementing the range manager. See above + * @priv: Driver private closure for @func. + * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures + * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions + * reserved by the TTM vm system. + * @io_reserve_lru: Optional lru list for unreserving io mem regions. + * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain + * @move_lock: lock for move fence + * static information. bdev::driver::io_mem_free is never used. + * @lru: The lru list for this memory type. + * @move: The fence of the last pipelined move operation. + * + * This structure is used to identify and manage memory types for a device. + * It's set up by the ttm_bo_driver::init_mem_type method. + */ + + + +struct ttm_mem_type_manager { + struct ttm_bo_device *bdev; + + /* + * No protection. Constant from start. + */ + + bool has_type; + bool use_type; + uint32_t flags; + uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ + uint64_t size; + uint32_t available_caching; + uint32_t default_caching; + const struct ttm_mem_type_manager_func *func; + void *priv; + struct mutex io_reserve_mutex; + bool use_io_reserve_lru; + bool io_reserve_fastpath; + spinlock_t move_lock; + + /* + * Protected by @io_reserve_mutex: + */ + + struct list_head io_reserve_lru; + + /* + * Protected by the global->lru_lock. + */ + + struct list_head lru; + + /* + * Protected by @move_lock. + */ + struct fence *move; +}; + +/** + * struct ttm_bo_driver + * + * @create_ttm_backend_entry: Callback to create a struct ttm_backend. + * @invalidate_caches: Callback to invalidate read caches when a buffer object + * has been evicted. + * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager + * structure. + * @evict_flags: Callback to obtain placement flags when a buffer is evicted. + * @move: Callback for a driver to hook in accelerated functions to + * move a buffer. + * If set to NULL, a potentially slow memcpy() move is used. + */ + +struct ttm_bo_driver { + /** + * ttm_tt_create + * + * @bdev: pointer to a struct ttm_bo_device: + * @size: Size of the data needed backing. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * @dummy_read_page: See struct ttm_bo_device. + * + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. + */ + struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev, + unsigned long size, + uint32_t page_flags, + struct page *dummy_read_page); + + /** + * ttm_tt_populate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Allocate all backing pages + * Returns: + * -ENOMEM: Out of memory. + */ + int (*ttm_tt_populate)(struct ttm_tt *ttm); + + /** + * ttm_tt_unpopulate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Free all backing page + */ + void (*ttm_tt_unpopulate)(struct ttm_tt *ttm); + + /** + * struct ttm_bo_driver member invalidate_caches + * + * @bdev: the buffer object device. + * @flags: new placement of the rebound buffer object. + * + * A previosly evicted buffer has been rebound in a + * potentially new location. Tell the driver that it might + * consider invalidating read (texture) caches on the next command + * submission as a consequence. + */ + + int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); + int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, + struct ttm_mem_type_manager *man); + /** + * struct ttm_bo_driver member evict_flags: + * + * @bo: the buffer object to be evicted + * + * Return the bo flags for a buffer which is not mapped to the hardware. + * These will be placed in proposed_flags so that when the move is + * finished, they'll end up in bo->mem.flags + */ + + void(*evict_flags) (struct ttm_buffer_object *bo, + struct ttm_placement *placement); + /** + * struct ttm_bo_driver member move: + * + * @bo: the buffer to move + * @evict: whether this motion is evicting the buffer from + * the graphics address space + * @interruptible: Use interruptible sleeps if possible when sleeping. + * @no_wait: whether this should give up and return -EBUSY + * if this move would require sleeping + * @new_mem: the new memory region receiving the buffer + * + * Move a buffer between two memory regions. + */ + int (*move) (struct ttm_buffer_object *bo, + bool evict, bool interruptible, + bool no_wait_gpu, + struct ttm_mem_reg *new_mem); + + /** + * struct ttm_bo_driver_member verify_access + * + * @bo: Pointer to a buffer object. + * @filp: Pointer to a struct file trying to access the object. + * + * Called from the map / write / read methods to verify that the + * caller is permitted to access the buffer object. + * This member may be set to NULL, which will refuse this kind of + * access for all buffer objects. + * This function should return 0 if access is granted, -EPERM otherwise. + */ + int (*verify_access) (struct ttm_buffer_object *bo, + struct file *filp); + + /* hook to notify driver about a driver move so it + * can do tiling things */ + void (*move_notify)(struct ttm_buffer_object *bo, + struct ttm_mem_reg *new_mem); + /* notify the driver we are taking a fault on this BO + * and have reserved it */ + int (*fault_reserve_notify)(struct ttm_buffer_object *bo); + + /** + * notify the driver that we're about to swap out this bo + */ + void (*swap_notify) (struct ttm_buffer_object *bo); + + /** + * Driver callback on when mapping io memory (for bo_move_memcpy + * for instance). TTM will take care to call io_mem_free whenever + * the mapping is not use anymore. io_mem_reserve & io_mem_free + * are balanced. + */ + int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); + void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); + + /** + * Optional driver callback for when BO is removed from the LRU. + * Called with LRU lock held immediately before the removal. + */ + void (*lru_removal)(struct ttm_buffer_object *bo); + + /** + * Return the list_head after which a BO should be inserted in the LRU. + */ + struct list_head *(*lru_tail)(struct ttm_buffer_object *bo); + struct list_head *(*swap_lru_tail)(struct ttm_buffer_object *bo); +}; + +/** + * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. + */ + +struct ttm_bo_global_ref { + struct drm_global_reference ref; + struct ttm_mem_global *mem_glob; +}; + +/** + * struct ttm_bo_global - Buffer object driver global data. + * + * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. + * @dummy_read_page: Pointer to a dummy page used for mapping requests + * of unpopulated pages. + * @shrink: A shrink callback object used for buffer object swap. + * @device_list_mutex: Mutex protecting the device list. + * This mutex is held while traversing the device list for pm options. + * @lru_lock: Spinlock protecting the bo subsystem lru lists. + * @device_list: List of buffer object devices. + * @swap_lru: Lru list of buffer objects used for swapping. + */ + +struct ttm_bo_global { + + /** + * Constant after init. + */ + + struct kobject kobj; + struct ttm_mem_global *mem_glob; + struct page *dummy_read_page; + struct ttm_mem_shrink shrink; + struct mutex device_list_mutex; + spinlock_t lru_lock; + + /** + * Protected by device_list_mutex. + */ + struct list_head device_list; + + /** + * Protected by the lru_lock. + */ + struct list_head swap_lru; + + /** + * Internal protection. + */ + atomic_t bo_count; +}; + + +#define TTM_NUM_MEM_TYPES 8 + +/** + * struct ttm_bo_device - Buffer object driver device-specific data. + * + * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. + * @man: An array of mem_type_managers. + * @vma_manager: Address space manager + * lru_lock: Spinlock that protects the buffer+device lru lists and + * ddestroy lists. + * @dev_mapping: A pointer to the struct address_space representing the + * device address space. + * @wq: Work queue structure for the delayed delete workqueue. + * + */ + +struct ttm_bo_device { + + /* + * Constant after bo device init / atomic. + */ + struct list_head device_list; + struct ttm_bo_global *glob; + struct ttm_bo_driver *driver; + struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; + + /* + * Protected by internal locks. + */ + struct drm_vma_offset_manager vma_manager; + + /* + * Protected by the global:lru lock. + */ + struct list_head ddestroy; + + /* + * Protected by load / firstopen / lastclose /unload sync. + */ + + struct address_space *dev_mapping; + + /* + * Internal protection. + */ + + struct delayed_work wq; + + bool need_dma32; +}; + +/** + * ttm_flag_masked + * + * @old: Pointer to the result and original value. + * @new: New value of bits. + * @mask: Mask of bits to change. + * + * Convenience function to change a number of bits identified by a mask. + */ + +static inline uint32_t +ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) +{ + *old ^= (*old ^ new) & mask; + return *old; +} + +/** + * ttm_tt_init + * + * @ttm: The struct ttm_tt. + * @bdev: pointer to a struct ttm_bo_device: + * @size: Size of the data needed backing. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * @dummy_read_page: See struct ttm_bo_device. + * + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. + */ +extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); +extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); + +/** + * ttm_tt_fini + * + * @ttm: the ttm_tt structure. + * + * Free memory of ttm_tt structure + */ +extern void ttm_tt_fini(struct ttm_tt *ttm); +extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); + +/** + * ttm_ttm_bind: + * + * @ttm: The struct ttm_tt containing backing pages. + * @bo_mem: The struct ttm_mem_reg identifying the binding location. + * + * Bind the pages of @ttm to an aperture location identified by @bo_mem + */ +extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); + +/** + * ttm_ttm_destroy: + * + * @ttm: The struct ttm_tt. + * + * Unbind, unpopulate and destroy common struct ttm_tt. + */ +extern void ttm_tt_destroy(struct ttm_tt *ttm); + +/** + * ttm_ttm_unbind: + * + * @ttm: The struct ttm_tt. + * + * Unbind a struct ttm_tt. + */ +extern void ttm_tt_unbind(struct ttm_tt *ttm); + +/** + * ttm_tt_swapin: + * + * @ttm: The struct ttm_tt. + * + * Swap in a previously swap out ttm_tt. + */ +extern int ttm_tt_swapin(struct ttm_tt *ttm); + +/** + * ttm_tt_set_placement_caching: + * + * @ttm A struct ttm_tt the backing pages of which will change caching policy. + * @placement: Flag indicating the desired caching policy. + * + * This function will change caching policy of any default kernel mappings of + * the pages backing @ttm. If changing from cached to uncached or + * write-combined, + * all CPU caches will first be flushed to make sure the data of the pages + * hit RAM. This function may be very costly as it involves global TLB + * and cache flushes and potential page splitting / combining. + */ +extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); +extern int ttm_tt_swapout(struct ttm_tt *ttm, + struct file *persistent_swap_storage); + +/** + * ttm_tt_unpopulate - free pages from a ttm + * + * @ttm: Pointer to the ttm_tt structure + * + * Calls the driver method to free all pages from a ttm + */ +extern void ttm_tt_unpopulate(struct ttm_tt *ttm); + /* * ttm_bo.c */ +/** + * ttm_mem_reg_is_pci + * + * @bdev: Pointer to a struct ttm_bo_device. + * @mem: A valid struct ttm_mem_reg. + * + * Returns true if the memory described by @mem is PCI memory, + * false otherwise. + */ +extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem); + /** * ttm_bo_mem_space * * @bo: Pointer to a struct ttm_buffer_object. the data of which * we want to allocate space for. * @proposed_placement: Proposed new placement for the buffer object. - * @mem: A struct ttm_resource. + * @mem: A struct ttm_mem_reg. * @interruptible: Sleep interruptible when sliping. * @no_wait_gpu: Return immediately if the GPU is busy. * @@ -95,17 +704,115 @@ struct ttm_lru_bulk_move { * fragmentation or concurrent allocators. * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. */ -int ttm_bo_mem_space(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - struct ttm_resource **mem, - struct ttm_operation_ctx *ctx); +extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem, + bool interruptible, + bool no_wait_gpu); + +extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); +extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); + +extern void ttm_bo_global_release(struct drm_global_reference *ref); +extern int ttm_bo_global_init(struct drm_global_reference *ref); + +extern int ttm_bo_device_release(struct ttm_bo_device *bdev); + +/** + * ttm_bo_device_init + * + * @bdev: A pointer to a struct ttm_bo_device to initialize. + * @glob: A pointer to an initialized struct ttm_bo_global. + * @driver: A pointer to a struct ttm_bo_driver set up by the caller. + * @mapping: The address space to use for this bo. + * @file_page_offset: Offset into the device address space that is available + * for buffer data. This ensures compatibility with other users of the + * address space. + * + * Initializes a struct ttm_bo_device: + * Returns: + * !0: Failure. + */ +extern int ttm_bo_device_init(struct ttm_bo_device *bdev, + struct ttm_bo_global *glob, + struct ttm_bo_driver *driver, + struct address_space *mapping, + uint64_t file_page_offset, bool need_dma32); /** * ttm_bo_unmap_virtual * * @bo: tear down the virtual mappings for this BO */ -void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); +extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); + +/** + * ttm_bo_unmap_virtual + * + * @bo: tear down the virtual mappings for this BO + * + * The caller must take ttm_mem_io_lock before calling this function. + */ +extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); + +extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); +extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); +extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, + bool interruptible); +extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); + +extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); +extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); + +struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo); +struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo); + +/** + * __ttm_bo_reserve: + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. + * @ticket: ticket used to acquire the ww_mutex. + * + * Will not remove reserved buffers from the lru lists. + * Otherwise identical to ttm_bo_reserve. + * + * Returns: + * -EDEADLK: The reservation may cause a deadlock. + * Release all buffer reservations, wait for @bo to become unreserved and + * try again. (only if use_sequence == 1). + * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. + * -EBUSY: The function needed to sleep, but @no_wait was true + * -EALREADY: Bo already reserved using @ticket. This error code will only + * be returned if @use_ticket is set to true. + */ +static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait, + struct ww_acquire_ctx *ticket) +{ + int ret = 0; + + if (no_wait) { + bool success; + if (WARN_ON(ticket)) + return -EBUSY; + + success = ww_mutex_trylock(&bo->resv->lock); + return success ? 0 : -EBUSY; + } + + if (interruptible) + ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); + else + ret = ww_mutex_lock(&bo->resv->lock, ticket); + if (ret == -EINTR) + return -ERESTARTSYS; + return ret; +} /** * ttm_bo_reserve: @@ -116,13 +823,35 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); * @ticket: ticket used to acquire the ww_mutex. * * Locks a buffer object for validation. (Or prevents other processes from - * locking it for validation), while taking a number of measures to prevent - * deadlocks. + * locking it for validation) and removes it from lru lists, while taking + * a number of measures to prevent deadlocks. + * + * Deadlocks may occur when two processes try to reserve multiple buffers in + * different order, either by will or as a result of a buffer being evicted + * to make room for a buffer already reserved. (Buffers are reserved before + * they are evicted). The following algorithm prevents such deadlocks from + * occurring: + * Processes attempting to reserve multiple buffers other than for eviction, + * (typically execbuf), should first obtain a unique 32-bit + * validation sequence number, + * and call this function with @use_ticket == 1 and @ticket->stamp == the unique + * sequence number. If upon call of this function, the buffer object is already + * reserved, the validation sequence is checked against the validation + * sequence of the process currently reserving the buffer, + * and if the current validation sequence is greater than that of the process + * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps + * waiting for the buffer to become unreserved, after which it retries + * reserving. + * The caller should, when receiving an -EDEADLK error + * release all its buffer reservations, wait for @bo to become unreserved, and + * then rerun the validation with the same validation sequence. This procedure + * will always guarantee that the process with the lowest validation sequence + * will eventually succeed, preventing both deadlocks and starvation. * * Returns: * -EDEADLK: The reservation may cause a deadlock. * Release all buffer reservations, wait for @bo to become unreserved and - * try again. + * try again. (only if use_sequence == 1). * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by * a signal. Release all buffer reservations and return to user-space. * -EBUSY: The function needed to sleep, but @no_wait was true @@ -133,23 +862,14 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, struct ww_acquire_ctx *ticket) { - int ret = 0; + int ret; - if (no_wait) { - bool success; - if (WARN_ON(ticket)) - return -EBUSY; + WARN_ON(!atomic_read(&bo->kref.refcount)); - success = dma_resv_trylock(bo->base.resv); - return success ? 0 : -EBUSY; - } + ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket); + if (likely(ret == 0)) + ttm_bo_del_sub_from_lru(bo); - if (interruptible) - ret = dma_resv_lock_interruptible(bo->base.resv, ticket); - else - ret = dma_resv_lock(bo->base.resv, ticket); - if (ret == -EINTR) - return -ERESTARTSYS; return ret; } @@ -167,44 +887,34 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, bool interruptible, struct ww_acquire_ctx *ticket) { - if (interruptible) { - int ret = dma_resv_lock_slow_interruptible(bo->base.resv, - ticket); - if (ret == -EINTR) - ret = -ERESTARTSYS; - return ret; - } - dma_resv_lock_slow(bo->base.resv, ticket); - return 0; -} + int ret = 0; -static inline void -ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) -{ - spin_lock(&bo->bdev->lru_lock); - ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); - spin_unlock(&bo->bdev->lru_lock); -} + WARN_ON(!atomic_read(&bo->kref.refcount)); -static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) -{ - WARN_ON(bo->resource); - bo->resource = new_mem; + if (interruptible) + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + ticket); + else + ww_mutex_lock_slow(&bo->resv->lock, ticket); + + if (likely(ret == 0)) + ttm_bo_del_sub_from_lru(bo); + else if (ret == -EINTR) + ret = -ERESTARTSYS; + + return ret; } /** - * ttm_bo_move_null = assign memory for a buffer object. - * @bo: The bo to assign the memory to - * @new_mem: The memory to be assigned. + * __ttm_bo_unreserve + * @bo: A pointer to a struct ttm_buffer_object. * - * Assign the memory from new_mem to the memory of the buffer object bo. + * Unreserve a previous reservation of @bo where the buffer object is + * already on lru lists. */ -static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) +static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo) { - ttm_resource_free(bo, &bo->resource); - ttm_bo_assign_mem(bo, new_mem); + ww_mutex_unlock(&bo->resv->lock); } /** @@ -216,17 +926,56 @@ static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, */ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) { - ttm_bo_move_to_lru_tail_unlocked(bo); - dma_resv_unlock(bo->base.resv); + if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { + spin_lock(&bo->glob->lru_lock); + ttm_bo_add_to_lru(bo); + spin_unlock(&bo->glob->lru_lock); + } + __ttm_bo_unreserve(bo); +} + +/** + * ttm_bo_unreserve_ticket + * @bo: A pointer to a struct ttm_buffer_object. + * @ticket: ww_acquire_ctx used for reserving + * + * Unreserve a previous reservation of @bo made with @ticket. + */ +static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, + struct ww_acquire_ctx *t) +{ + ttm_bo_unreserve(bo); } /* * ttm_bo_util.c */ -int ttm_mem_io_reserve(struct ttm_device *bdev, - struct ttm_resource *mem); -void ttm_mem_io_free(struct ttm_device *bdev, - struct ttm_resource *mem); + +int ttm_mem_io_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem); +void ttm_mem_io_free(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem); +/** + * ttm_bo_move_ttm + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @new_mem: struct ttm_mem_reg indicating where to move. + * + * Optimized move function for a buffer object with both old and + * new placement backed by a TTM. The function will, if successful, + * free any old aperture space, and set (@new_mem)->mm_node to NULL, + * and update the (@bo)->mem placement flags. If unsuccessful, the old + * data remains untouched, and it's up to the caller to free the + * memory space indicated by @new_mem. + * Returns: + * !0: Failure. + */ + +extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait_gpu, + struct ttm_mem_reg *new_mem); /** * ttm_bo_move_memcpy @@ -234,7 +983,7 @@ void ttm_mem_io_free(struct ttm_device *bdev, * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. * @no_wait_gpu: Return immediately if the GPU is busy. - * @new_mem: struct ttm_resource indicating where to move. + * @new_mem: struct ttm_mem_reg indicating where to move. * * Fallback move function for a mappable buffer object in mappable memory. * The function will, if successful, @@ -246,9 +995,18 @@ void ttm_mem_io_free(struct ttm_device *bdev, * !0: Failure. */ -int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem); +extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait_gpu, + struct ttm_mem_reg *new_mem); + +/** + * ttm_bo_free_old_node + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Utility function to free an old placement after a successful move. + */ +extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); /** * ttm_bo_move_accel_cleanup. @@ -256,8 +1014,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * @bo: A pointer to a struct ttm_buffer_object. * @fence: A fence object that signals when moving is complete. * @evict: This is an evict move. Don't return until the buffer is idle. - * @pipeline: evictions are to be pipelined. - * @new_mem: struct ttm_resource indicating where to move. + * @new_mem: struct ttm_mem_reg indicating where to move. * * Accelerated move function to be called when an accelerated move * has been scheduled. The function will create a new temporary buffer object @@ -266,70 +1023,62 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * destroyed when the move is complete. This will help pipeline * buffer moves. */ -int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct dma_fence *fence, bool evict, - bool pipeline, - struct ttm_resource *new_mem); + +extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct fence *fence, bool evict, + struct ttm_mem_reg *new_mem); /** - * ttm_bo_move_accel_cleanup. + * ttm_bo_pipeline_move. * * @bo: A pointer to a struct ttm_buffer_object. - * @new_mem: struct ttm_resource indicating where to move. + * @fence: A fence object that signals when moving is complete. + * @evict: This is an evict move. Don't return until the buffer is idle. + * @new_mem: struct ttm_mem_reg indicating where to move. * - * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed - * by the caller to be idle. Typically used after memcpy buffer moves. + * Function for pipelining accelerated moves. Either free the memory + * immediately or hang it on a temporary buffer object. */ -static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) -{ - int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem); - - WARN_ON(ret); -} - -/** - * ttm_bo_pipeline_gutting. - * - * @bo: A pointer to a struct ttm_buffer_object. - * - * Pipelined gutting a BO of its backing store. - */ -int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); +int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, + struct fence *fence, bool evict, + struct ttm_mem_reg *new_mem); /** * ttm_io_prot * - * bo: ttm buffer object - * res: ttm resource object + * @c_state: Caching state. * @tmp: Page protection flag for a normal, cached mapping. * * Utility function that returns the pgprot_t that should be used for * setting up a PTE with the caching model indicated by @c_state. */ -pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, - pgprot_t tmp); +extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); + +extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; + +#if IS_ENABLED(CONFIG_AGP) +#include /** - * ttm_bo_tt_bind + * ttm_agp_tt_create * - * Bind the object tt to a memory resource. + * @bdev: Pointer to a struct ttm_bo_device. + * @bridge: The agp bridge this device is sitting on. + * @size: Size of the data needed backing. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * @dummy_read_page: See struct ttm_bo_device. + * + * + * Create a TTM backend that uses the indicated AGP bridge as an aperture + * for TT memory. This function uses the linux agpgart interface to + * bind and unbind memory backing a ttm_tt. */ -int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem); - -/** - * ttm_bo_tt_destroy. - */ -void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); - -void ttm_move_memcpy(struct ttm_buffer_object *bo, - u32 num_pages, - struct ttm_kmap_iter *dst_iter, - struct ttm_kmap_iter *src_iter); - -struct ttm_kmap_iter * -ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, - struct io_mapping *iomap, - struct sg_table *st, - resource_size_t start); +extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, + struct agp_bridge_data *bridge, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); +int ttm_agp_tt_populate(struct ttm_tt *ttm); +void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); +#endif + #endif diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index a99d7fdf29..b620c317c7 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -31,22 +31,21 @@ #ifndef _TTM_EXECBUF_UTIL_H_ #define _TTM_EXECBUF_UTIL_H_ +#include #include -#include "ttm_bo_api.h" - /** * struct ttm_validate_buffer * * @head: list head for thread-private list. * @bo: refcounted buffer object pointer. - * @num_shared: How many shared fences we want to add. + * @shared: should the fence be added shared? */ struct ttm_validate_buffer { struct list_head head; struct ttm_buffer_object *bo; - unsigned int num_shared; + bool shared; }; /** @@ -58,8 +57,9 @@ struct ttm_validate_buffer { * Undoes all buffer validation reservations for bos pointed to by * the list entries. */ -void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, - struct list_head *list); + +extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, + struct list_head *list); /** * function ttm_eu_reserve_buffers @@ -69,7 +69,6 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, * @list: thread private list of ttm_validate_buffer structs. * @intr: should the wait be interruptible * @dups: [out] optional list of duplicates. - * @del_lru: true if BOs should be removed from the LRU. * * Tries to reserve bos pointed to by the list entries for validation. * If the function returns 0, all buffers are marked as "unfenced", @@ -95,9 +94,10 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, * ttm_eu_fence_buffer_objects() when command submission is complete or * has failed. */ -int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, - struct list_head *list, bool intr, - struct list_head *dups); + +extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, + struct list_head *list, bool intr, + struct list_head *dups); /** * function ttm_eu_fence_buffer_objects. @@ -111,8 +111,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, * It also unreserves all buffers, putting them on lru lists. * */ -void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, - struct list_head *list, - struct dma_fence *fence); + +extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, + struct list_head *list, + struct fence *fence); #endif diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h new file mode 100644 index 0000000000..2902beb5f6 --- /dev/null +++ b/include/drm/ttm/ttm_lock.h @@ -0,0 +1,247 @@ +/************************************************************************** + * + * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +/** @file ttm_lock.h + * This file implements a simple replacement for the buffer manager use + * of the DRM heavyweight hardware lock. + * The lock is a read-write lock. Taking it in read mode and write mode + * is relatively fast, and intended for in-kernel use only. + * + * The vt mode is used only when there is a need to block all + * user-space processes from validating buffers. + * It's allowed to leave kernel space with the vt lock held. + * If a user-space process dies while having the vt-lock, + * it will be released during the file descriptor release. The vt lock + * excludes write lock and read lock. + * + * The suspend mode is used to lock out all TTM users when preparing for + * and executing suspend operations. + * + */ + +#ifndef _TTM_LOCK_H_ +#define _TTM_LOCK_H_ + +#include +#include +#include + +/** + * struct ttm_lock + * + * @base: ttm base object used solely to release the lock if the client + * holding the lock dies. + * @queue: Queue for processes waiting for lock change-of-status. + * @lock: Spinlock protecting some lock members. + * @rw: Read-write lock counter. Protected by @lock. + * @flags: Lock state. Protected by @lock. + * @kill_takers: Boolean whether to kill takers of the lock. + * @signal: Signal to send when kill_takers is true. + */ + +struct ttm_lock { + struct ttm_base_object base; + wait_queue_head_t queue; + spinlock_t lock; + int32_t rw; + uint32_t flags; + bool kill_takers; + int signal; + struct ttm_object_file *vt_holder; +}; + + +/** + * ttm_lock_init + * + * @lock: Pointer to a struct ttm_lock + * Initializes the lock. + */ +extern void ttm_lock_init(struct ttm_lock *lock); + +/** + * ttm_read_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a read lock. + */ +extern void ttm_read_unlock(struct ttm_lock *lock); + +/** + * ttm_read_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Takes the lock in read mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_read_trylock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Tries to take the lock in read mode. If the lock is already held + * in write mode, the function will return -EBUSY. If the lock is held + * in vt or suspend mode, the function will sleep until these modes + * are unlocked. + * + * Returns: + * -EBUSY The lock was already held in write mode. + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_write_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a write lock. + */ +extern void ttm_write_unlock(struct ttm_lock *lock); + +/** + * ttm_write_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Takes the lock in write mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_lock_downgrade + * + * @lock: Pointer to a struct ttm_lock + * + * Downgrades a write lock to a read lock. + */ +extern void ttm_lock_downgrade(struct ttm_lock *lock); + +/** + * ttm_suspend_lock + * + * @lock: Pointer to a struct ttm_lock + * + * Takes the lock in suspend mode. Excludes read and write mode. + */ +extern void ttm_suspend_lock(struct ttm_lock *lock); + +/** + * ttm_suspend_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a suspend lock + */ +extern void ttm_suspend_unlock(struct ttm_lock *lock); + +/** + * ttm_vt_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * @tfile: Pointer to a struct ttm_object_file to register the lock with. + * + * Takes the lock in vt mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + * -ENOMEM: Out of memory when locking. + */ +extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible, + struct ttm_object_file *tfile); + +/** + * ttm_vt_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a vt lock. + * Returns: + * -EINVAL If the lock was not held. + */ +extern int ttm_vt_unlock(struct ttm_lock *lock); + +/** + * ttm_write_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a write lock. + */ +extern void ttm_write_unlock(struct ttm_lock *lock); + +/** + * ttm_write_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Takes the lock in write mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_lock_set_kill + * + * @lock: Pointer to a struct ttm_lock + * @val: Boolean whether to kill processes taking the lock. + * @signal: Signal to send to the process taking the lock. + * + * The kill-when-taking-lock functionality is used to kill processes that keep + * on using the TTM functionality when its resources has been taken down, for + * example when the X server exits. A typical sequence would look like this: + * - X server takes lock in write mode. + * - ttm_lock_set_kill() is called with @val set to true. + * - As part of X server exit, TTM resources are taken down. + * - X server releases the lock on file release. + * - Another dri client wants to render, takes the lock and is killed. + * + */ +static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, + int signal) +{ + lock->kill_takers = val; + if (val) + lock->signal = signal; +} + +#endif diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index c1f167881e..ca178e7802 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -35,7 +35,20 @@ #include #include #include -#include "ttm_bo_api.h" + +/** + * struct ttm_mem_shrink - callback to shrink TTM memory usage. + * + * @do_shrink: The callback function. + * + * Arguments to the do_shrink functions are intended to be passed using + * inheritance. That is, the argument class derives from struct ttm_mem_shrink, + * and can be accessed using container_of(). + */ + +struct ttm_mem_shrink { + int (*do_shrink) (struct ttm_mem_shrink *); +} __no_const; /** * struct ttm_mem_global - Global memory accounting structure. @@ -49,8 +62,6 @@ * @work: The workqueue callback for the shrink queue. * @lock: Lock to protect the @shrink - and the memory accounting members, * that is, essentially the whole structure with some exceptions. - * @lower_mem_limit: include lower limit of swap space and lower limit of - * system memory. * @zones: Array of pointers to accounting zones. * @num_zones: Number of populated entries in the @zones array. * @zone_kernel: Pointer to the kernel zone. @@ -63,12 +74,12 @@ #define TTM_MEM_MAX_ZONES 2 struct ttm_mem_zone; -extern struct ttm_mem_global { +struct ttm_mem_global { struct kobject kobj; + struct ttm_mem_shrink *shrink; struct workqueue_struct *swap_queue; struct work_struct work; spinlock_t lock; - uint64_t lower_mem_limit; struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; unsigned int num_zones; struct ttm_mem_zone *zone_kernel; @@ -77,19 +88,72 @@ extern struct ttm_mem_global { #else struct ttm_mem_zone *zone_dma32; #endif -} ttm_mem_glob; +}; -int ttm_mem_global_init(struct ttm_mem_global *glob); -void ttm_mem_global_release(struct ttm_mem_global *glob); -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); -int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size); -size_t ttm_round_pot(size_t size); -bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages, - struct ttm_operation_ctx *ctx); +/** + * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object + * + * @shrink: The object to initialize. + * @func: The callback function. + */ + +static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink, + int (*func) (struct ttm_mem_shrink *)) +{ + shrink->do_shrink = func; +} + +/** + * ttm_mem_register_shrink - register a struct ttm_mem_shrink object. + * + * @glob: The struct ttm_mem_global object to register with. + * @shrink: An initialized struct ttm_mem_shrink object to register. + * + * Returns: + * -EBUSY: There's already a callback registered. (May change). + */ + +static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob, + struct ttm_mem_shrink *shrink) +{ + spin_lock(&glob->lock); + if (glob->shrink != NULL) { + spin_unlock(&glob->lock); + return -EBUSY; + } + glob->shrink = shrink; + spin_unlock(&glob->lock); + return 0; +} + +/** + * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object. + * + * @glob: The struct ttm_mem_global object to unregister from. + * @shrink: A previously registert struct ttm_mem_shrink object. + * + */ + +static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob, + struct ttm_mem_shrink *shrink) +{ + spin_lock(&glob->lock); + BUG_ON(glob->shrink != shrink); + glob->shrink = NULL; + spin_unlock(&glob->lock); +} + +extern int ttm_mem_global_init(struct ttm_mem_global *glob); +extern void ttm_mem_global_release(struct ttm_mem_global *glob); +extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + bool no_wait, bool interruptible); +extern void ttm_mem_global_free(struct ttm_mem_global *glob, + uint64_t amount); +extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, + struct page *page, + bool no_wait, bool interruptible); +extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, + struct page *page); +extern size_t ttm_round_pot(size_t size); +extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob); #endif diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h new file mode 100644 index 0000000000..1487011fe0 --- /dev/null +++ b/include/drm/ttm/ttm_object.h @@ -0,0 +1,353 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +/** @file ttm_object.h + * + * Base- and reference object implementation for the various + * ttm objects. Implements reference counting, minimal security checks + * and release on file close. + */ + +#ifndef _TTM_OBJECT_H_ +#define _TTM_OBJECT_H_ + +#include +#include +#include +#include +#include +#include + +/** + * enum ttm_ref_type + * + * Describes what type of reference a ref object holds. + * + * TTM_REF_USAGE is a simple refcount on a base object. + * + * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a + * buffer object. + * + * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a + * buffer object. + * + */ + +enum ttm_ref_type { + TTM_REF_USAGE, + TTM_REF_SYNCCPU_READ, + TTM_REF_SYNCCPU_WRITE, + TTM_REF_NUM +}; + +/** + * enum ttm_object_type + * + * One entry per ttm object type. + * Device-specific types should use the + * ttm_driver_typex types. + */ + +enum ttm_object_type { + ttm_fence_type, + ttm_buffer_type, + ttm_lock_type, + ttm_prime_type, + ttm_driver_type0 = 256, + ttm_driver_type1, + ttm_driver_type2, + ttm_driver_type3, + ttm_driver_type4, + ttm_driver_type5 +}; + +struct ttm_object_file; +struct ttm_object_device; + +/** + * struct ttm_base_object + * + * @hash: hash entry for the per-device object hash. + * @type: derived type this object is base class for. + * @shareable: Other ttm_object_files can access this object. + * + * @tfile: Pointer to ttm_object_file of the creator. + * NULL if the object was not created by a user request. + * (kernel object). + * + * @refcount: Number of references to this object, not + * including the hash entry. A reference to a base object can + * only be held by a ref object. + * + * @refcount_release: A function to be called when there are + * no more references to this object. This function should + * destroy the object (or make sure destruction eventually happens), + * and when it is called, the object has + * already been taken out of the per-device hash. The parameter + * "base" should be set to NULL by the function. + * + * @ref_obj_release: A function to be called when a reference object + * with another ttm_ref_type than TTM_REF_USAGE is deleted. + * This function may, for example, release a lock held by a user-space + * process. + * + * This struct is intended to be used as a base struct for objects that + * are visible to user-space. It provides a global name, race-safe + * access and refcounting, minimal access contol and hooks for unref actions. + */ + +struct ttm_base_object { + struct rcu_head rhead; + struct drm_hash_item hash; + enum ttm_object_type object_type; + bool shareable; + struct ttm_object_file *tfile; + struct kref refcount; + void (*refcount_release) (struct ttm_base_object **base); + void (*ref_obj_release) (struct ttm_base_object *base, + enum ttm_ref_type ref_type); +}; + + +/** + * struct ttm_prime_object - Modified base object that is prime-aware + * + * @base: struct ttm_base_object that we derive from + * @mutex: Mutex protecting the @dma_buf member. + * @size: Size of the dma_buf associated with this object + * @real_type: Type of the underlying object. Needed since we're setting + * the value of @base::object_type to ttm_prime_type + * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this + * object. + * @refcount_release: The underlying object's release method. Needed since + * we set @base::refcount_release to our own release method. + */ + +struct ttm_prime_object { + struct ttm_base_object base; + struct mutex mutex; + size_t size; + enum ttm_object_type real_type; + struct dma_buf *dma_buf; + void (*refcount_release) (struct ttm_base_object **); +}; + +/** + * ttm_base_object_init + * + * @tfile: Pointer to a struct ttm_object_file. + * @base: The struct ttm_base_object to initialize. + * @shareable: This object is shareable with other applcations. + * (different @tfile pointers.) + * @type: The object type. + * @refcount_release: See the struct ttm_base_object description. + * @ref_obj_release: See the struct ttm_base_object description. + * + * Initializes a struct ttm_base_object. + */ + +extern int ttm_base_object_init(struct ttm_object_file *tfile, + struct ttm_base_object *base, + bool shareable, + enum ttm_object_type type, + void (*refcount_release) (struct ttm_base_object + **), + void (*ref_obj_release) (struct ttm_base_object + *, + enum ttm_ref_type + ref_type)); + +/** + * ttm_base_object_lookup + * + * @tfile: Pointer to a struct ttm_object_file. + * @key: Hash key + * + * Looks up a struct ttm_base_object with the key @key. + */ + +extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file + *tfile, uint32_t key); + +/** + * ttm_base_object_lookup_for_ref + * + * @tdev: Pointer to a struct ttm_object_device. + * @key: Hash key + * + * Looks up a struct ttm_base_object with the key @key. + * This function should only be used when the struct tfile associated with the + * caller doesn't yet have a reference to the base object. + */ + +extern struct ttm_base_object * +ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key); + +/** + * ttm_base_object_unref + * + * @p_base: Pointer to a pointer referencing a struct ttm_base_object. + * + * Decrements the base object refcount and clears the pointer pointed to by + * p_base. + */ + +extern void ttm_base_object_unref(struct ttm_base_object **p_base); + +/** + * ttm_ref_object_add. + * + * @tfile: A struct ttm_object_file representing the application owning the + * ref_object. + * @base: The base object to reference. + * @ref_type: The type of reference. + * @existed: Upon completion, indicates that an identical reference object + * already existed, and the refcount was upped on that object instead. + * @require_existed: Fail with -EPERM if an identical ref object didn't + * already exist. + * + * Checks that the base object is shareable and adds a ref object to it. + * + * Adding a ref object to a base object is basically like referencing the + * base object, but a user-space application holds the reference. When the + * file corresponding to @tfile is closed, all its reference objects are + * deleted. A reference object can have different types depending on what + * it's intended for. It can be refcounting to prevent object destruction, + * When user-space takes a lock, it can add a ref object to that lock to + * make sure the lock is released if the application dies. A ref object + * will hold a single reference on a base object. + */ +extern int ttm_ref_object_add(struct ttm_object_file *tfile, + struct ttm_base_object *base, + enum ttm_ref_type ref_type, bool *existed, + bool require_existed); + +extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, + struct ttm_base_object *base); + +/** + * ttm_ref_object_base_unref + * + * @key: Key representing the base object. + * @ref_type: Ref type of the ref object to be dereferenced. + * + * Unreference a ref object with type @ref_type + * on the base object identified by @key. If there are no duplicate + * references, the ref object will be destroyed and the base object + * will be unreferenced. + */ +extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, + unsigned long key, + enum ttm_ref_type ref_type); + +/** + * ttm_object_file_init - initialize a struct ttm_object file + * + * @tdev: A struct ttm_object device this file is initialized on. + * @hash_order: Order of the hash table used to hold the reference objects. + * + * This is typically called by the file_ops::open function. + */ + +extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device + *tdev, + unsigned int hash_order); + +/** + * ttm_object_file_release - release data held by a ttm_object_file + * + * @p_tfile: Pointer to pointer to the ttm_object_file object to release. + * *p_tfile will be set to NULL by this function. + * + * Releases all data associated by a ttm_object_file. + * Typically called from file_ops::release. The caller must + * ensure that there are no concurrent users of tfile. + */ + +extern void ttm_object_file_release(struct ttm_object_file **p_tfile); + +/** + * ttm_object device init - initialize a struct ttm_object_device + * + * @mem_glob: struct ttm_mem_global for memory accounting. + * @hash_order: Order of hash table used to hash the base objects. + * @ops: DMA buf ops for prime objects of this device. + * + * This function is typically called on device initialization to prepare + * data structures needed for ttm base and ref objects. + */ + +extern struct ttm_object_device * +ttm_object_device_init(struct ttm_mem_global *mem_glob, + unsigned int hash_order, + const struct dma_buf_ops *ops); + +/** + * ttm_object_device_release - release data held by a ttm_object_device + * + * @p_tdev: Pointer to pointer to the ttm_object_device object to release. + * *p_tdev will be set to NULL by this function. + * + * Releases all data associated by a ttm_object_device. + * Typically called from driver::unload before the destruction of the + * device private data structure. + */ + +extern void ttm_object_device_release(struct ttm_object_device **p_tdev); + +#define ttm_base_object_kfree(__object, __base)\ + kfree_rcu(__object, __base.rhead) + +extern int ttm_prime_object_init(struct ttm_object_file *tfile, + size_t size, + struct ttm_prime_object *prime, + bool shareable, + enum ttm_object_type type, + void (*refcount_release) + (struct ttm_base_object **), + void (*ref_obj_release) + (struct ttm_base_object *, + enum ttm_ref_type ref_type)); + +static inline enum ttm_object_type +ttm_base_object_type(struct ttm_base_object *base) +{ + return (base->object_type == ttm_prime_type) ? + container_of(base, struct ttm_prime_object, base)->real_type : + base->object_type; +} +extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, + int fd, u32 *handle); +extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, + uint32_t handle, uint32_t flags, + int *prime_fd); + +#define ttm_prime_object_kfree(__obj, __prime) \ + kfree_rcu(__obj, __prime.base.rhead) +#endif diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index a6b6ef5f9b..9643967eb5 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void); * * Add backing pages to all of @ttm */ -int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); +extern int ttm_pool_populate(struct ttm_tt *ttm); /** * ttm_pool_unpopulate: @@ -56,25 +56,15 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); * * Free all pages of @ttm */ -void ttm_pool_unpopulate(struct ttm_tt *ttm); - -/** - * Populates and DMA maps pages to fullfil a ttm_dma_populate() request - */ -int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, - struct ttm_operation_ctx *ctx); - -/** - * Unpopulates and DMA unmaps pages as part of a - * ttm_dma_unpopulate() request */ -void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); +extern void ttm_pool_unpopulate(struct ttm_tt *ttm); /** * Output the state of pools to debugfs file */ -int ttm_page_alloc_debugfs(struct seq_file *m, void *data); +extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data); -#if defined(CONFIG_DRM_TTM_DMA_PAGE_POOL) + +#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) /** * Initialize pool allocator. */ @@ -88,11 +78,11 @@ void ttm_dma_page_alloc_fini(void); /** * Output the state of pools to debugfs file */ -int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); +extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); -int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, - struct ttm_operation_ctx *ctx); -void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); +struct device; +extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); +extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); #else static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, @@ -108,8 +98,7 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) return 0; } static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, - struct device *dev, - struct ttm_operation_ctx *ctx) + struct device *dev) { return -ENOMEM; } diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h index 8995c9e4ec..932be0c808 100644 --- a/include/drm/ttm/ttm_placement.h +++ b/include/drm/ttm/ttm_placement.h @@ -42,16 +42,35 @@ #define TTM_PL_VRAM 2 #define TTM_PL_PRIV 3 +#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM) +#define TTM_PL_FLAG_TT (1 << TTM_PL_TT) +#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM) +#define TTM_PL_FLAG_PRIV (1 << TTM_PL_PRIV) +#define TTM_PL_MASK_MEM 0x0000FFFF + /* + * Other flags that affects data placement. + * TTM_PL_FLAG_CACHED indicates cache-coherent mappings + * if available. + * TTM_PL_FLAG_SHARED means that another application may + * reference the buffer. + * TTM_PL_FLAG_NO_EVICT means that the buffer may never + * be evicted to make room for other buffers. * TTM_PL_FLAG_TOPDOWN requests to be placed from the * top of the memory area, instead of the bottom. */ -#define TTM_PL_FLAG_CONTIGUOUS (1 << 0) -#define TTM_PL_FLAG_TOPDOWN (1 << 1) +#define TTM_PL_FLAG_CACHED (1 << 16) +#define TTM_PL_FLAG_UNCACHED (1 << 17) +#define TTM_PL_FLAG_WC (1 << 18) +#define TTM_PL_FLAG_NO_EVICT (1 << 21) +#define TTM_PL_FLAG_TOPDOWN (1 << 22) -/* For multihop handling */ -#define TTM_PL_FLAG_TEMPORARY (1 << 2) +#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ + TTM_PL_FLAG_UNCACHED | \ + TTM_PL_FLAG_WC) + +#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING) /** * struct ttm_place @@ -65,7 +84,6 @@ struct ttm_place { unsigned fpfn; unsigned lpfn; - uint32_t mem_type; uint32_t flags; }; diff --git a/include/dt-bindings/arm/ux500_pm_domains.h b/include/dt-bindings/arm/ux500_pm_domains.h index 9bd764f0c9..398a6c0288 100644 --- a/include/dt-bindings/arm/ux500_pm_domains.h +++ b/include/dt-bindings/arm/ux500_pm_domains.h @@ -1,8 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Linaro Ltd. * * Author: Ulf Hansson + * License terms: GNU General Public License (GPL) version 2 */ #ifndef _DT_BINDINGS_ARM_UX500_PM_DOMAINS_H #define _DT_BINDINGS_ARM_UX500_PM_DOMAINS_H diff --git a/include/dt-bindings/clk/ti-dra7-atl.h b/include/dt-bindings/clk/ti-dra7-atl.h new file mode 100644 index 0000000000..42dd4164f6 --- /dev/null +++ b/include/dt-bindings/clk/ti-dra7-atl.h @@ -0,0 +1,40 @@ +/* + * This header provides constants for DRA7 ATL (Audio Tracking Logic) + * + * The constants defined in this header are used in dts files + * + * Copyright (C) 2013 Texas Instruments, Inc. + * + * Peter Ujfalusi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_DRA7_ATL_H +#define _DT_BINDINGS_CLK_DRA7_ATL_H + +#define DRA7_ATL_WS_MCASP1_FSR 0 +#define DRA7_ATL_WS_MCASP1_FSX 1 +#define DRA7_ATL_WS_MCASP2_FSR 2 +#define DRA7_ATL_WS_MCASP2_FSX 3 +#define DRA7_ATL_WS_MCASP3_FSX 4 +#define DRA7_ATL_WS_MCASP4_FSX 5 +#define DRA7_ATL_WS_MCASP5_FSX 6 +#define DRA7_ATL_WS_MCASP6_FSX 7 +#define DRA7_ATL_WS_MCASP7_FSX 8 +#define DRA7_ATL_WS_MCASP8_FSX 9 +#define DRA7_ATL_WS_MCASP8_AHCLKX 10 +#define DRA7_ATL_WS_XREF_CLK3 11 +#define DRA7_ATL_WS_XREF_CLK0 12 +#define DRA7_ATL_WS_XREF_CLK1 13 +#define DRA7_ATL_WS_XREF_CLK2 14 +#define DRA7_ATL_WS_OSC1_X1 15 + +#endif diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h index d3871c6330..04e8db27da 100644 --- a/include/dt-bindings/clock/alphascale,asm9260.h +++ b/include/dt-bindings/clock/alphascale,asm9260.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2014 Oleksij Rempel + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_ASM9260_H diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h index 98e1b2ab64..ab3ee241d1 100644 --- a/include/dt-bindings/clock/at91.h +++ b/include/dt-bindings/clock/at91.h @@ -1,42 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This header provides constants for AT91 pmc status. * * The constants defined in this header are being used in dts. + * + * Licensed under GPLv2 or later. */ #ifndef _DT_BINDINGS_CLK_AT91_H #define _DT_BINDINGS_CLK_AT91_H -#define PMC_TYPE_CORE 0 -#define PMC_TYPE_SYSTEM 1 -#define PMC_TYPE_PERIPHERAL 2 -#define PMC_TYPE_GCK 3 -#define PMC_TYPE_PROGRAMMABLE 4 - -#define PMC_SLOW 0 -#define PMC_MCK 1 -#define PMC_UTMI 2 -#define PMC_MAIN 3 -#define PMC_MCK2 4 -#define PMC_I2S0_MUX 5 -#define PMC_I2S1_MUX 6 -#define PMC_PLLACK 7 -#define PMC_PLLBCK 8 -#define PMC_AUDIOPLLCK 9 - -/* SAMA7G5 */ -#define PMC_CPUPLL (PMC_MAIN + 1) -#define PMC_SYSPLL (PMC_MAIN + 2) -#define PMC_DDRPLL (PMC_MAIN + 3) -#define PMC_IMGPLL (PMC_MAIN + 4) -#define PMC_BAUDPLL (PMC_MAIN + 5) -#define PMC_AUDIOPMCPLL (PMC_MAIN + 6) -#define PMC_AUDIOIOPLL (PMC_MAIN + 7) -#define PMC_ETHPLL (PMC_MAIN + 8) -#define PMC_CPU (PMC_MAIN + 9) - -#ifndef AT91_PMC_MOSCS #define AT91_PMC_MOSCS 0 /* MOSCS Flag */ #define AT91_PMC_LOCKA 1 /* PLLA Lock */ #define AT91_PMC_LOCKB 2 /* PLLB Lock */ @@ -47,6 +19,5 @@ #define AT91_PMC_MOSCRCS 17 /* Main On-Chip RC */ #define AT91_PMC_CFDEV 18 /* Clock Failure Detector Event */ #define AT91_PMC_GCKRDY 24 /* Generated Clocks */ -#endif #endif diff --git a/include/dt-bindings/clock/ath79-clk.h b/include/dt-bindings/clock/ath79-clk.h index eec8f399b9..27359ad839 100644 --- a/include/dt-bindings/clock/ath79-clk.h +++ b/include/dt-bindings/clock/ath79-clk.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014, 2016 Antony Pavlov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DT_BINDINGS_ATH79_CLK_H @@ -9,9 +13,7 @@ #define ATH79_CLK_CPU 0 #define ATH79_CLK_DDR 1 #define ATH79_CLK_AHB 2 -#define ATH79_CLK_REF 3 -#define ATH79_CLK_MDIO 4 -#define ATH79_CLK_END 5 +#define ATH79_CLK_END 3 #endif /* __DT_BINDINGS_ATH79_CLK_H */ diff --git a/include/dt-bindings/clock/axis,artpec6-clkctrl.h b/include/dt-bindings/clock/axis,artpec6-clkctrl.h index b1f4971642..f9f04dccc9 100644 --- a/include/dt-bindings/clock/axis,artpec6-clkctrl.h +++ b/include/dt-bindings/clock/axis,artpec6-clkctrl.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * ARTPEC-6 clock controller indexes * * Copyright 2016 Axis Comunications AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H diff --git a/include/dt-bindings/clock/bcm2835-aux.h b/include/dt-bindings/clock/bcm2835-aux.h index bb79de383a..d91156e265 100644 --- a/include/dt-bindings/clock/bcm2835-aux.h +++ b/include/dt-bindings/clock/bcm2835-aux.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #define BCM2835_AUX_CLOCK_UART 0 diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h index b60c03430c..360e00cefd 100644 --- a/include/dt-bindings/clock/bcm2835.h +++ b/include/dt-bindings/clock/bcm2835.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #define BCM2835_PLLA 0 @@ -56,7 +64,3 @@ #define BCM2835_CLOCK_CAM1 46 #define BCM2835_CLOCK_DSI0E 47 #define BCM2835_CLOCK_DSI1E 48 -#define BCM2835_CLOCK_DSI0P 49 -#define BCM2835_CLOCK_DSI1P 50 - -#define BCM2711_CLOCK_EMMC2 51 diff --git a/include/dt-bindings/clock/berlin2.h b/include/dt-bindings/clock/berlin2.h index b07b8efab0..0c30800175 100644 --- a/include/dt-bindings/clock/berlin2.h +++ b/include/dt-bindings/clock/berlin2.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Berlin2 BG2/BG2CD clock tree IDs */ diff --git a/include/dt-bindings/clock/berlin2q.h b/include/dt-bindings/clock/berlin2q.h index 44b4ac3828..72eaf91c9c 100644 --- a/include/dt-bindings/clock/berlin2q.h +++ b/include/dt-bindings/clock/berlin2q.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Berlin2 BG2Q clock tree IDs */ diff --git a/include/dt-bindings/clock/clps711x-clock.h b/include/dt-bindings/clock/clps711x-clock.h index 55b403d8b4..0c4c80b632 100644 --- a/include/dt-bindings/clock/clps711x-clock.h +++ b/include/dt-bindings/clock/clps711x-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DT_BINDINGS_CLOCK_CLPS711X_H diff --git a/include/dt-bindings/clock/efm32-cmu.h b/include/dt-bindings/clock/efm32-cmu.h index 4b48d15fe1..b21b91e736 100644 --- a/include/dt-bindings/clock/efm32-cmu.h +++ b/include/dt-bindings/clock/efm32-cmu.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_BINDINGS_CLOCK_EFM32_CMU_H #define __DT_BINDINGS_CLOCK_EFM32_CMU_H diff --git a/include/dt-bindings/clock/exynos-audss-clk.h b/include/dt-bindings/clock/exynos-audss-clk.h index eee9fcc6e6..0ae6f5a75d 100644 --- a/include/dt-bindings/clock/exynos-audss-clk.h +++ b/include/dt-bindings/clock/exynos-audss-clk.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for Samsung audio subsystem * clock controller. diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h index fe8214017b..c796ff02ce 100644 --- a/include/dt-bindings/clock/exynos3250.h +++ b/include/dt-bindings/clock/exynos3250.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Tomasz Figa * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants for Samsung Exynos3250 clock controllers. */ diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h index 88ec3968b9..c40111f36d 100644 --- a/include/dt-bindings/clock/exynos4.h +++ b/include/dt-bindings/clock/exynos4.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Samsung Electronics Co., Ltd. * Author: Andrzej Hajda * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants for Exynos4 clock controller. - */ +*/ #ifndef _DT_BINDINGS_CLOCK_EXYNOS_4_H #define _DT_BINDINGS_CLOCK_EXYNOS_4_H @@ -187,7 +190,32 @@ #define CLK_MIPI_HSI 349 /* Exynos4210 only */ #define CLK_PIXELASYNCM0 351 #define CLK_PIXELASYNCM1 352 -#define CLK_ASYNC_G3D 353 /* Exynos4x12 only */ +#define CLK_FIMC_LITE0 353 /* Exynos4x12 only */ +#define CLK_FIMC_LITE1 354 /* Exynos4x12 only */ +#define CLK_PPMUISPX 355 /* Exynos4x12 only */ +#define CLK_PPMUISPMX 356 /* Exynos4x12 only */ +#define CLK_FIMC_ISP 357 /* Exynos4x12 only */ +#define CLK_FIMC_DRC 358 /* Exynos4x12 only */ +#define CLK_FIMC_FD 359 /* Exynos4x12 only */ +#define CLK_MCUISP 360 /* Exynos4x12 only */ +#define CLK_GICISP 361 /* Exynos4x12 only */ +#define CLK_SMMU_ISP 362 /* Exynos4x12 only */ +#define CLK_SMMU_DRC 363 /* Exynos4x12 only */ +#define CLK_SMMU_FD 364 /* Exynos4x12 only */ +#define CLK_SMMU_LITE0 365 /* Exynos4x12 only */ +#define CLK_SMMU_LITE1 366 /* Exynos4x12 only */ +#define CLK_MCUCTL_ISP 367 /* Exynos4x12 only */ +#define CLK_MPWM_ISP 368 /* Exynos4x12 only */ +#define CLK_I2C0_ISP 369 /* Exynos4x12 only */ +#define CLK_I2C1_ISP 370 /* Exynos4x12 only */ +#define CLK_MTCADC_ISP 371 /* Exynos4x12 only */ +#define CLK_PWM_ISP 372 /* Exynos4x12 only */ +#define CLK_WDT_ISP 373 /* Exynos4x12 only */ +#define CLK_UART_ISP 374 /* Exynos4x12 only */ +#define CLK_ASYNCAXIM 375 /* Exynos4x12 only */ +#define CLK_SMMU_ISPCX 376 /* Exynos4x12 only */ +#define CLK_SPI0_ISP 377 /* Exynos4x12 only */ +#define CLK_SPI1_ISP 378 /* Exynos4x12 only */ #define CLK_PWM_ISP_SCLK 379 /* Exynos4x12 only */ #define CLK_SPI0_ISP_SCLK 380 /* Exynos4x12 only */ #define CLK_SPI1_ISP_SCLK 381 /* Exynos4x12 only */ @@ -229,6 +257,10 @@ #define CLK_PPMUACP 415 /* div clocks */ +#define CLK_DIV_ISP0 450 /* Exynos4x12 only */ +#define CLK_DIV_ISP1 451 /* Exynos4x12 only */ +#define CLK_DIV_MCUISP0 452 /* Exynos4x12 only */ +#define CLK_DIV_MCUISP1 453 /* Exynos4x12 only */ #define CLK_DIV_ACLK200 454 /* Exynos4x12 only */ #define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */ #define CLK_DIV_ACP 456 @@ -240,39 +272,4 @@ /* must be greater than maximal clock id */ #define CLK_NR_CLKS 461 -/* Exynos4x12 ISP clocks */ -#define CLK_ISP_FIMC_ISP 1 -#define CLK_ISP_FIMC_DRC 2 -#define CLK_ISP_FIMC_FD 3 -#define CLK_ISP_FIMC_LITE0 4 -#define CLK_ISP_FIMC_LITE1 5 -#define CLK_ISP_MCUISP 6 -#define CLK_ISP_GICISP 7 -#define CLK_ISP_SMMU_ISP 8 -#define CLK_ISP_SMMU_DRC 9 -#define CLK_ISP_SMMU_FD 10 -#define CLK_ISP_SMMU_LITE0 11 -#define CLK_ISP_SMMU_LITE1 12 -#define CLK_ISP_PPMUISPMX 13 -#define CLK_ISP_PPMUISPX 14 -#define CLK_ISP_MCUCTL_ISP 15 -#define CLK_ISP_MPWM_ISP 16 -#define CLK_ISP_I2C0_ISP 17 -#define CLK_ISP_I2C1_ISP 18 -#define CLK_ISP_MTCADC_ISP 19 -#define CLK_ISP_PWM_ISP 20 -#define CLK_ISP_WDT_ISP 21 -#define CLK_ISP_UART_ISP 22 -#define CLK_ISP_ASYNCAXIM 23 -#define CLK_ISP_SMMU_ISPCX 24 -#define CLK_ISP_SPI0_ISP 25 -#define CLK_ISP_SPI1_ISP 26 - -#define CLK_ISP_DIV_ISP0 27 -#define CLK_ISP_DIV_ISP1 28 -#define CLK_ISP_DIV_MCUISP0 29 -#define CLK_ISP_DIV_MCUISP1 30 - -#define CLK_NR_ISP_CLKS 31 - #endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */ diff --git a/include/dt-bindings/clock/exynos4415.h b/include/dt-bindings/clock/exynos4415.h new file mode 100644 index 0000000000..7eed551007 --- /dev/null +++ b/include/dt-bindings/clock/exynos4415.h @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Chanwoo Choi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants for Samsung Exynos4415 clock controllers. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H + +/* + * Let each exported clock get a unique index, which is used on DT-enabled + * platforms to lookup the clock from a clock specifier. These indices are + * therefore considered an ABI and so must not be changed. This implies + * that new clocks should be added either in free spaces between clock groups + * or at the end. + */ + +/* + * Main CMU + */ + +#define CLK_OSCSEL 1 +#define CLK_FIN_PLL 2 +#define CLK_FOUT_APLL 3 +#define CLK_FOUT_MPLL 4 +#define CLK_FOUT_EPLL 5 +#define CLK_FOUT_G3D_PLL 6 +#define CLK_FOUT_ISP_PLL 7 +#define CLK_FOUT_DISP_PLL 8 + +/* Muxes */ +#define CLK_MOUT_MPLL_USER_L 16 +#define CLK_MOUT_GDL 17 +#define CLK_MOUT_MPLL_USER_R 18 +#define CLK_MOUT_GDR 19 +#define CLK_MOUT_EBI 20 +#define CLK_MOUT_ACLK_200 21 +#define CLK_MOUT_ACLK_160 22 +#define CLK_MOUT_ACLK_100 23 +#define CLK_MOUT_ACLK_266 24 +#define CLK_MOUT_G3D_PLL 25 +#define CLK_MOUT_EPLL 26 +#define CLK_MOUT_EBI_1 27 +#define CLK_MOUT_ISP_PLL 28 +#define CLK_MOUT_DISP_PLL 29 +#define CLK_MOUT_MPLL_USER_T 30 +#define CLK_MOUT_ACLK_400_MCUISP 31 +#define CLK_MOUT_G3D_PLLSRC 32 +#define CLK_MOUT_CSIS1 33 +#define CLK_MOUT_CSIS0 34 +#define CLK_MOUT_CAM1 35 +#define CLK_MOUT_FIMC3_LCLK 36 +#define CLK_MOUT_FIMC2_LCLK 37 +#define CLK_MOUT_FIMC1_LCLK 38 +#define CLK_MOUT_FIMC0_LCLK 39 +#define CLK_MOUT_MFC 40 +#define CLK_MOUT_MFC_1 41 +#define CLK_MOUT_MFC_0 42 +#define CLK_MOUT_G3D 43 +#define CLK_MOUT_G3D_1 44 +#define CLK_MOUT_G3D_0 45 +#define CLK_MOUT_MIPI0 46 +#define CLK_MOUT_FIMD0 47 +#define CLK_MOUT_TSADC_ISP 48 +#define CLK_MOUT_UART_ISP 49 +#define CLK_MOUT_SPI1_ISP 50 +#define CLK_MOUT_SPI0_ISP 51 +#define CLK_MOUT_PWM_ISP 52 +#define CLK_MOUT_AUDIO0 53 +#define CLK_MOUT_TSADC 54 +#define CLK_MOUT_MMC2 55 +#define CLK_MOUT_MMC1 56 +#define CLK_MOUT_MMC0 57 +#define CLK_MOUT_UART3 58 +#define CLK_MOUT_UART2 59 +#define CLK_MOUT_UART1 60 +#define CLK_MOUT_UART0 61 +#define CLK_MOUT_SPI2 62 +#define CLK_MOUT_SPI1 63 +#define CLK_MOUT_SPI0 64 +#define CLK_MOUT_SPDIF 65 +#define CLK_MOUT_AUDIO2 66 +#define CLK_MOUT_AUDIO1 67 +#define CLK_MOUT_MPLL_USER_C 68 +#define CLK_MOUT_HPM 69 +#define CLK_MOUT_CORE 70 +#define CLK_MOUT_APLL 71 +#define CLK_MOUT_PXLASYNC_CSIS1_FIMC 72 +#define CLK_MOUT_PXLASYNC_CSIS0_FIMC 73 +#define CLK_MOUT_JPEG 74 +#define CLK_MOUT_JPEG1 75 +#define CLK_MOUT_JPEG0 76 +#define CLK_MOUT_ACLK_ISP0_300 77 +#define CLK_MOUT_ACLK_ISP0_400 78 +#define CLK_MOUT_ACLK_ISP0_300_USER 79 +#define CLK_MOUT_ACLK_ISP1_300 80 +#define CLK_MOUT_ACLK_ISP1_300_USER 81 +#define CLK_MOUT_HDMI 82 + +/* Dividers */ +#define CLK_DIV_GPL 90 +#define CLK_DIV_GDL 91 +#define CLK_DIV_GPR 92 +#define CLK_DIV_GDR 93 +#define CLK_DIV_ACLK_400_MCUISP 94 +#define CLK_DIV_EBI 95 +#define CLK_DIV_ACLK_200 96 +#define CLK_DIV_ACLK_160 97 +#define CLK_DIV_ACLK_100 98 +#define CLK_DIV_ACLK_266 99 +#define CLK_DIV_CSIS1 100 +#define CLK_DIV_CSIS0 101 +#define CLK_DIV_CAM1 102 +#define CLK_DIV_FIMC3_LCLK 103 +#define CLK_DIV_FIMC2_LCLK 104 +#define CLK_DIV_FIMC1_LCLK 105 +#define CLK_DIV_FIMC0_LCLK 106 +#define CLK_DIV_TV_BLK 107 +#define CLK_DIV_MFC 108 +#define CLK_DIV_G3D 109 +#define CLK_DIV_MIPI0_PRE 110 +#define CLK_DIV_MIPI0 111 +#define CLK_DIV_FIMD0 112 +#define CLK_DIV_UART_ISP 113 +#define CLK_DIV_SPI1_ISP_PRE 114 +#define CLK_DIV_SPI1_ISP 115 +#define CLK_DIV_SPI0_ISP_PRE 116 +#define CLK_DIV_SPI0_ISP 117 +#define CLK_DIV_PWM_ISP 118 +#define CLK_DIV_PCM0 119 +#define CLK_DIV_AUDIO0 120 +#define CLK_DIV_TSADC_PRE 121 +#define CLK_DIV_TSADC 122 +#define CLK_DIV_MMC1_PRE 123 +#define CLK_DIV_MMC1 124 +#define CLK_DIV_MMC0_PRE 125 +#define CLK_DIV_MMC0 126 +#define CLK_DIV_MMC2_PRE 127 +#define CLK_DIV_MMC2 128 +#define CLK_DIV_UART3 129 +#define CLK_DIV_UART2 130 +#define CLK_DIV_UART1 131 +#define CLK_DIV_UART0 132 +#define CLK_DIV_SPI1_PRE 133 +#define CLK_DIV_SPI1 134 +#define CLK_DIV_SPI0_PRE 135 +#define CLK_DIV_SPI0 136 +#define CLK_DIV_SPI2_PRE 137 +#define CLK_DIV_SPI2 138 +#define CLK_DIV_PCM2 139 +#define CLK_DIV_AUDIO2 140 +#define CLK_DIV_PCM1 141 +#define CLK_DIV_AUDIO1 142 +#define CLK_DIV_I2S1 143 +#define CLK_DIV_PXLASYNC_CSIS1_FIMC 144 +#define CLK_DIV_PXLASYNC_CSIS0_FIMC 145 +#define CLK_DIV_JPEG 146 +#define CLK_DIV_CORE2 147 +#define CLK_DIV_APLL 148 +#define CLK_DIV_PCLK_DBG 149 +#define CLK_DIV_ATB 150 +#define CLK_DIV_PERIPH 151 +#define CLK_DIV_COREM1 152 +#define CLK_DIV_COREM0 153 +#define CLK_DIV_CORE 154 +#define CLK_DIV_HPM 155 +#define CLK_DIV_COPY 156 + +/* Gates */ +#define CLK_ASYNC_G3D 180 +#define CLK_ASYNC_MFCL 181 +#define CLK_ASYNC_TVX 182 +#define CLK_PPMULEFT 183 +#define CLK_GPIO_LEFT 184 +#define CLK_PPMUIMAGE 185 +#define CLK_QEMDMA2 186 +#define CLK_QEROTATOR 187 +#define CLK_SMMUMDMA2 188 +#define CLK_SMMUROTATOR 189 +#define CLK_MDMA2 190 +#define CLK_ROTATOR 191 +#define CLK_ASYNC_ISPMX 192 +#define CLK_ASYNC_MAUDIOX 193 +#define CLK_ASYNC_MFCR 194 +#define CLK_ASYNC_FSYSD 195 +#define CLK_ASYNC_LCD0X 196 +#define CLK_ASYNC_CAMX 197 +#define CLK_PPMURIGHT 198 +#define CLK_GPIO_RIGHT 199 +#define CLK_ANTIRBK_APBIF 200 +#define CLK_EFUSE_WRITER_APBIF 201 +#define CLK_MONOCNT 202 +#define CLK_TZPC6 203 +#define CLK_PROVISIONKEY1 204 +#define CLK_PROVISIONKEY0 205 +#define CLK_CMU_ISPPART 206 +#define CLK_TMU_APBIF 207 +#define CLK_KEYIF 208 +#define CLK_RTC 209 +#define CLK_WDT 210 +#define CLK_MCT 211 +#define CLK_SECKEY 212 +#define CLK_HDMI_CEC 213 +#define CLK_TZPC5 214 +#define CLK_TZPC4 215 +#define CLK_TZPC3 216 +#define CLK_TZPC2 217 +#define CLK_TZPC1 218 +#define CLK_TZPC0 219 +#define CLK_CMU_COREPART 220 +#define CLK_CMU_TOPPART 221 +#define CLK_PMU_APBIF 222 +#define CLK_SYSREG 223 +#define CLK_CHIP_ID 224 +#define CLK_SMMUFIMC_LITE2 225 +#define CLK_FIMC_LITE2 226 +#define CLK_PIXELASYNCM1 227 +#define CLK_PIXELASYNCM0 228 +#define CLK_PPMUCAMIF 229 +#define CLK_SMMUJPEG 230 +#define CLK_SMMUFIMC3 231 +#define CLK_SMMUFIMC2 232 +#define CLK_SMMUFIMC1 233 +#define CLK_SMMUFIMC0 234 +#define CLK_JPEG 235 +#define CLK_CSIS1 236 +#define CLK_CSIS0 237 +#define CLK_FIMC3 238 +#define CLK_FIMC2 239 +#define CLK_FIMC1 240 +#define CLK_FIMC0 241 +#define CLK_PPMUTV 242 +#define CLK_SMMUTV 243 +#define CLK_HDMI 244 +#define CLK_MIXER 245 +#define CLK_VP 246 +#define CLK_PPMUMFC_R 247 +#define CLK_PPMUMFC_L 248 +#define CLK_SMMUMFC_R 249 +#define CLK_SMMUMFC_L 250 +#define CLK_MFC 251 +#define CLK_PPMUG3D 252 +#define CLK_G3D 253 +#define CLK_PPMULCD0 254 +#define CLK_SMMUFIMD0 255 +#define CLK_DSIM0 256 +#define CLK_SMIES 257 +#define CLK_MIE0 258 +#define CLK_FIMD0 259 +#define CLK_TSADC 260 +#define CLK_PPMUFILE 261 +#define CLK_NFCON 262 +#define CLK_USBDEVICE 263 +#define CLK_USBHOST 264 +#define CLK_SROMC 265 +#define CLK_SDMMC2 266 +#define CLK_SDMMC1 267 +#define CLK_SDMMC0 268 +#define CLK_PDMA1 269 +#define CLK_PDMA0 270 +#define CLK_SPDIF 271 +#define CLK_PWM 272 +#define CLK_PCM2 273 +#define CLK_PCM1 274 +#define CLK_I2S1 275 +#define CLK_SPI2 276 +#define CLK_SPI1 277 +#define CLK_SPI0 278 +#define CLK_I2CHDMI 279 +#define CLK_I2C7 280 +#define CLK_I2C6 281 +#define CLK_I2C5 282 +#define CLK_I2C4 283 +#define CLK_I2C3 284 +#define CLK_I2C2 285 +#define CLK_I2C1 286 +#define CLK_I2C0 287 +#define CLK_UART3 288 +#define CLK_UART2 289 +#define CLK_UART1 290 +#define CLK_UART0 291 + +/* Special clocks */ +#define CLK_SCLK_PXLAYSNC_CSIS1_FIMC 330 +#define CLK_SCLK_PXLAYSNC_CSIS0_FIMC 331 +#define CLK_SCLK_JPEG 332 +#define CLK_SCLK_CSIS1 333 +#define CLK_SCLK_CSIS0 334 +#define CLK_SCLK_CAM1 335 +#define CLK_SCLK_FIMC3_LCLK 336 +#define CLK_SCLK_FIMC2_LCLK 337 +#define CLK_SCLK_FIMC1_LCLK 338 +#define CLK_SCLK_FIMC0_LCLK 339 +#define CLK_SCLK_PIXEL 340 +#define CLK_SCLK_HDMI 341 +#define CLK_SCLK_MIXER 342 +#define CLK_SCLK_MFC 343 +#define CLK_SCLK_G3D 344 +#define CLK_SCLK_MIPIDPHY4L 345 +#define CLK_SCLK_MIPI0 346 +#define CLK_SCLK_MDNIE0 347 +#define CLK_SCLK_FIMD0 348 +#define CLK_SCLK_PCM0 349 +#define CLK_SCLK_AUDIO0 350 +#define CLK_SCLK_TSADC 351 +#define CLK_SCLK_EBI 352 +#define CLK_SCLK_MMC2 353 +#define CLK_SCLK_MMC1 354 +#define CLK_SCLK_MMC0 355 +#define CLK_SCLK_I2S 356 +#define CLK_SCLK_PCM2 357 +#define CLK_SCLK_PCM1 358 +#define CLK_SCLK_AUDIO2 359 +#define CLK_SCLK_AUDIO1 360 +#define CLK_SCLK_SPDIF 361 +#define CLK_SCLK_SPI2 362 +#define CLK_SCLK_SPI1 363 +#define CLK_SCLK_SPI0 364 +#define CLK_SCLK_UART3 365 +#define CLK_SCLK_UART2 366 +#define CLK_SCLK_UART1 367 +#define CLK_SCLK_UART0 368 +#define CLK_SCLK_HDMIPHY 369 + +/* + * Total number of clocks of main CMU. + * NOTE: Must be equal to last clock ID increased by one. + */ +#define CLK_NR_CLKS 370 + +/* + * CMU DMC + */ +#define CLK_DMC_FOUT_MPLL 1 +#define CLK_DMC_FOUT_BPLL 2 + +#define CLK_DMC_MOUT_MPLL 3 +#define CLK_DMC_MOUT_BPLL 4 +#define CLK_DMC_MOUT_DPHY 5 +#define CLK_DMC_MOUT_DMC_BUS 6 + +#define CLK_DMC_DIV_DMC 7 +#define CLK_DMC_DIV_DPHY 8 +#define CLK_DMC_DIV_DMC_PRE 9 +#define CLK_DMC_DIV_DMCP 10 +#define CLK_DMC_DIV_DMCD 11 +#define CLK_DMC_DIV_MPLL_PRE 12 + +/* + * Total number of clocks of CMU_DMC. + * NOTE: Must be equal to highest clock ID increased by one. + */ +#define NR_CLKS_DMC 13 + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H */ diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h index e259cc01f2..15508adcdf 100644 --- a/include/dt-bindings/clock/exynos5250.h +++ b/include/dt-bindings/clock/exynos5250.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Samsung Electronics Co., Ltd. * Author: Andrzej Hajda * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants for Exynos5250 clock controller. - */ +*/ #ifndef _DT_BINDINGS_CLOCK_EXYNOS_5250_H #define _DT_BINDINGS_CLOCK_EXYNOS_5250_H @@ -172,10 +175,8 @@ #define CLK_MOUT_GPLL 1025 #define CLK_MOUT_ACLK200_DISP1_SUB 1026 #define CLK_MOUT_ACLK300_DISP1_SUB 1027 -#define CLK_MOUT_APLL 1028 -#define CLK_MOUT_MPLL 1029 /* must be greater than maximal clock id */ -#define CLK_NR_CLKS 1030 +#define CLK_NR_CLKS 1028 #endif /* _DT_BINDINGS_CLOCK_EXYNOS_5250_H */ diff --git a/include/dt-bindings/clock/exynos5260-clk.h b/include/dt-bindings/clock/exynos5260-clk.h index 98a58cbd81..a4bac9a176 100644 --- a/include/dt-bindings/clock/exynos5260-clk.h +++ b/include/dt-bindings/clock/exynos5260-clk.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Rahul Sharma * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Provides Constants for Exynos5260 clocks. - */ +*/ #ifndef _DT_BINDINGS_CLK_EXYNOS5260_H #define _DT_BINDINGS_CLK_EXYNOS5260_H diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h index 86c2ad56c5..6cb4e90f81 100644 --- a/include/dt-bindings/clock/exynos5410.h +++ b/include/dt-bindings/clock/exynos5410.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Copyright (c) 2016 Krzysztof Kozlowski * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants for Exynos5421 clock controller. - */ +*/ #ifndef _DT_BINDINGS_CLOCK_EXYNOS_5410_H #define _DT_BINDINGS_CLOCK_EXYNOS_5410_H @@ -36,7 +39,6 @@ #define CLK_UART0 257 #define CLK_UART1 258 #define CLK_UART2 259 -#define CLK_UART3 260 #define CLK_I2C0 261 #define CLK_I2C1 262 #define CLK_I2C2 263 @@ -45,7 +47,7 @@ #define CLK_USI1 266 #define CLK_USI2 267 #define CLK_USI3 268 -#define CLK_TSADC 270 +#define CLK_UART3 260 #define CLK_PWM 279 #define CLK_MCT 315 #define CLK_WDT 316 diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h index 9fffc6ceaa..6fd21c2914 100644 --- a/include/dt-bindings/clock/exynos5420.h +++ b/include/dt-bindings/clock/exynos5420.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Samsung Electronics Co., Ltd. * Author: Andrzej Hajda * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants for Exynos5420 clock controller. - */ +*/ #ifndef _DT_BINDINGS_CLOCK_EXYNOS_5420_H #define _DT_BINDINGS_CLOCK_EXYNOS_5420_H @@ -60,7 +63,6 @@ #define CLK_MAU_EPLL 159 #define CLK_SCLK_HSIC_12M 160 #define CLK_SCLK_MPHY_IXTAL24 161 -#define CLK_SCLK_BPLL 162 /* gate clocks */ #define CLK_UART0 257 @@ -196,16 +198,6 @@ #define CLK_ACLK432_CAM 518 #define CLK_ACLK_FL1550_CAM 519 #define CLK_ACLK550_CAM 520 -#define CLK_CLKM_PHY0 521 -#define CLK_CLKM_PHY1 522 -#define CLK_ACLK_PPMU_DREX0_0 523 -#define CLK_ACLK_PPMU_DREX0_1 524 -#define CLK_ACLK_PPMU_DREX1_0 525 -#define CLK_ACLK_PPMU_DREX1_1 526 -#define CLK_PCLK_PPMU_DREX0_0 527 -#define CLK_PCLK_PPMU_DREX0_1 528 -#define CLK_PCLK_PPMU_DREX1_0 529 -#define CLK_PCLK_PPMU_DREX1_1 530 /* mux clocks */ #define CLK_MOUT_HDMI 640 @@ -225,17 +217,6 @@ #define CLK_MOUT_MCLK_CDREX 654 #define CLK_MOUT_BPLL 655 #define CLK_MOUT_MX_MSPLL_CCORE 656 -#define CLK_MOUT_EPLL 657 -#define CLK_MOUT_MAU_EPLL 658 -#define CLK_MOUT_USER_MAU_EPLL 659 -#define CLK_MOUT_SCLK_SPLL 660 -#define CLK_MOUT_MX_MSPLL_CCORE_PHY 661 -#define CLK_MOUT_SW_ACLK_G3D 662 -#define CLK_MOUT_APLL 663 -#define CLK_MOUT_MSPLL_CPU 664 -#define CLK_MOUT_KPLL 665 -#define CLK_MOUT_MSPLL_KFC 666 - /* divider clocks */ #define CLK_DOUT_PIXEL 768 @@ -267,11 +248,8 @@ #define CLK_DOUT_CCLK_DREX0 794 #define CLK_DOUT_CLK2X_PHY0 795 #define CLK_DOUT_PCLK_CORE_MEM 796 -#define CLK_FF_DOUT_SPLL2 797 -#define CLK_DOUT_PCLK_DREX0 798 -#define CLK_DOUT_PCLK_DREX1 799 /* must be greater than maximal clock id */ -#define CLK_NR_CLKS 800 +#define CLK_NR_CLKS 797 #endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */ diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h index 25ffa53573..4fa6bb2136 100644 --- a/include/dt-bindings/clock/exynos5433.h +++ b/include/dt-bindings/clock/exynos5433.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Chanwoo Choi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _DT_BINDINGS_CLOCK_EXYNOS5433_H @@ -156,7 +159,7 @@ #define CLK_ACLK_G2D_266 220 #define CLK_ACLK_G2D_400 221 #define CLK_ACLK_G3D_400 222 -#define CLK_ACLK_IMEM_SSSX_266 223 +#define CLK_ACLK_IMEM_SSX_266 223 #define CLK_ACLK_BUS0_400 224 #define CLK_ACLK_BUS1_400 225 #define CLK_ACLK_IMEM_200 226 @@ -768,10 +771,7 @@ #define CLK_PCLK_DECON 113 -#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114 -#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115 - -#define DISP_NR_CLK 116 +#define DISP_NR_CLK 114 /* CMU_AUD */ #define CLK_MOUT_AUD_PLL_USER 1 @@ -1406,10 +1406,4 @@ #define CAM1_NR_CLK 113 -/* CMU_IMEM */ -#define CLK_ACLK_SLIMSSS 2 -#define CLK_PCLK_SLIMSSS 35 - -#define IMEM_NR_CLK 36 - #endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */ diff --git a/include/dt-bindings/clock/exynos5440.h b/include/dt-bindings/clock/exynos5440.h new file mode 100644 index 0000000000..842cdc0adf --- /dev/null +++ b/include/dt-bindings/clock/exynos5440.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Andrzej Hajda + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants for Exynos5440 clock controller. +*/ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5440_H +#define _DT_BINDINGS_CLOCK_EXYNOS_5440_H + +#define CLK_XTAL 1 +#define CLK_ARM_CLK 2 +#define CLK_CPLLA 3 +#define CLK_CPLLB 4 +#define CLK_SPI_BAUD 16 +#define CLK_PB0_250 17 +#define CLK_PR0_250 18 +#define CLK_PR1_250 19 +#define CLK_B_250 20 +#define CLK_B_125 21 +#define CLK_B_200 22 +#define CLK_SATA 23 +#define CLK_USB 24 +#define CLK_GMAC0 25 +#define CLK_CS250 26 +#define CLK_PB0_250_O 27 +#define CLK_PR0_250_O 28 +#define CLK_PR1_250_O 29 +#define CLK_B_250_O 30 +#define CLK_B_125_O 31 +#define CLK_B_200_O 32 +#define CLK_SATA_O 33 +#define CLK_USB_O 34 +#define CLK_GMAC0_O 35 +#define CLK_CS250_O 36 + +/* must be greater than maximal clock id */ +#define CLK_NR_CLKS 37 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5440_H */ diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h index fce33c7050..10c5586110 100644 --- a/include/dt-bindings/clock/exynos7-clk.h +++ b/include/dt-bindings/clock/exynos7-clk.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Naveen Krishna Ch - */ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ #ifndef _DT_BINDINGS_CLOCK_EXYNOS7_H #define _DT_BINDINGS_CLOCK_EXYNOS7_H diff --git a/include/dt-bindings/clock/gxbb-aoclkc.h b/include/dt-bindings/clock/gxbb-aoclkc.h index ec3b26319f..31751482d1 100644 --- a/include/dt-bindings/clock/gxbb-aoclkc.h +++ b/include/dt-bindings/clock/gxbb-aoclkc.h @@ -62,13 +62,5 @@ #define CLKID_AO_UART1 3 #define CLKID_AO_UART2 4 #define CLKID_AO_IR_BLASTER 5 -#define CLKID_AO_CEC_32K 6 -#define CLKID_AO_CTS_OSCIN 7 -#define CLKID_AO_32K_PRE 8 -#define CLKID_AO_32K_DIV 9 -#define CLKID_AO_32K_SEL 10 -#define CLKID_AO_32K 11 -#define CLKID_AO_CTS_RTC_OSCIN 12 -#define CLKID_AO_CLK81 13 #endif diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h index 4073eb7a9d..baade6f429 100644 --- a/include/dt-bindings/clock/gxbb-clkc.h +++ b/include/dt-bindings/clock/gxbb-clkc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * GXBB clock tree IDs */ @@ -6,146 +5,24 @@ #ifndef __GXBB_CLKC_H #define __GXBB_CLKC_H -#define CLKID_SYS_PLL 0 +#define CLKID_CPUCLK 1 #define CLKID_HDMI_PLL 2 -#define CLKID_FIXED_PLL 3 #define CLKID_FCLK_DIV2 4 #define CLKID_FCLK_DIV3 5 #define CLKID_FCLK_DIV4 6 -#define CLKID_FCLK_DIV5 7 -#define CLKID_FCLK_DIV7 8 -#define CLKID_GP0_PLL 9 #define CLKID_CLK81 12 -#define CLKID_MPLL0 13 -#define CLKID_MPLL1 14 #define CLKID_MPLL2 15 -#define CLKID_DDR 16 -#define CLKID_DOS 17 -#define CLKID_ISA 18 -#define CLKID_PL301 19 -#define CLKID_PERIPHS 20 -#define CLKID_SPICC 21 -#define CLKID_I2C 22 -#define CLKID_SAR_ADC 23 -#define CLKID_SMART_CARD 24 -#define CLKID_RNG0 25 -#define CLKID_UART0 26 -#define CLKID_SDHC 27 -#define CLKID_STREAM 28 -#define CLKID_ASYNC_FIFO 29 -#define CLKID_SDIO 30 -#define CLKID_ABUF 31 -#define CLKID_HIU_IFACE 32 -#define CLKID_ASSIST_MISC 33 #define CLKID_SPI 34 +#define CLKID_I2C 22 #define CLKID_ETH 36 -#define CLKID_I2S_SPDIF 35 -#define CLKID_DEMUX 37 -#define CLKID_AIU_GLUE 38 -#define CLKID_IEC958 39 -#define CLKID_I2S_OUT 40 -#define CLKID_AMCLK 41 -#define CLKID_AIFIFO2 42 -#define CLKID_MIXER 43 -#define CLKID_MIXER_IFACE 44 -#define CLKID_ADC 45 -#define CLKID_BLKMV 46 -#define CLKID_AIU 47 -#define CLKID_UART1 48 -#define CLKID_G2D 49 #define CLKID_USB0 50 #define CLKID_USB1 51 -#define CLKID_RESET 52 -#define CLKID_NAND 53 -#define CLKID_DOS_PARSER 54 #define CLKID_USB 55 -#define CLKID_VDIN1 56 -#define CLKID_AHB_ARB0 57 -#define CLKID_EFUSE 58 -#define CLKID_BOOT_ROM 59 -#define CLKID_AHB_DATA_BUS 60 -#define CLKID_AHB_CTRL_BUS 61 -#define CLKID_HDMI_INTR_SYNC 62 -#define CLKID_HDMI_PCLK 63 #define CLKID_USB1_DDR_BRIDGE 64 #define CLKID_USB0_DDR_BRIDGE 65 -#define CLKID_MMC_PCLK 66 -#define CLKID_DVIN 67 -#define CLKID_UART2 68 -#define CLKID_SANA 69 -#define CLKID_VPU_INTR 70 -#define CLKID_SEC_AHB_AHB3_BRIDGE 71 -#define CLKID_CLK81_A53 72 -#define CLKID_VCLK2_VENCI0 73 -#define CLKID_VCLK2_VENCI1 74 -#define CLKID_VCLK2_VENCP0 75 -#define CLKID_VCLK2_VENCP1 76 -#define CLKID_GCLK_VENCI_INT0 77 -#define CLKID_GCLK_VENCI_INT 78 -#define CLKID_DAC_CLK 79 -#define CLKID_AOCLK_GATE 80 -#define CLKID_IEC958_GATE 81 -#define CLKID_ENC480P 82 -#define CLKID_RNG1 83 -#define CLKID_GCLK_VENCI_INT1 84 -#define CLKID_VCLK2_VENCLMCC 85 -#define CLKID_VCLK2_VENCL 86 -#define CLKID_VCLK_OTHER 87 -#define CLKID_EDP 88 -#define CLKID_AO_MEDIA_CPU 89 -#define CLKID_AO_AHB_SRAM 90 -#define CLKID_AO_AHB_BUS 91 -#define CLKID_AO_IFACE 92 #define CLKID_AO_I2C 93 #define CLKID_SD_EMMC_A 94 #define CLKID_SD_EMMC_B 95 #define CLKID_SD_EMMC_C 96 -#define CLKID_SAR_ADC_CLK 97 -#define CLKID_SAR_ADC_SEL 98 -#define CLKID_MALI_0_SEL 100 -#define CLKID_MALI_0 102 -#define CLKID_MALI_1_SEL 103 -#define CLKID_MALI_1 105 -#define CLKID_MALI 106 -#define CLKID_CTS_AMCLK 107 -#define CLKID_CTS_MCLK_I958 110 -#define CLKID_CTS_I958 113 -#define CLKID_32K_CLK 114 -#define CLKID_SD_EMMC_A_CLK0 119 -#define CLKID_SD_EMMC_B_CLK0 122 -#define CLKID_SD_EMMC_C_CLK0 125 -#define CLKID_VPU_0_SEL 126 -#define CLKID_VPU_0 128 -#define CLKID_VPU_1_SEL 129 -#define CLKID_VPU_1 131 -#define CLKID_VPU 132 -#define CLKID_VAPB_0_SEL 133 -#define CLKID_VAPB_0 135 -#define CLKID_VAPB_1_SEL 136 -#define CLKID_VAPB_1 138 -#define CLKID_VAPB_SEL 139 -#define CLKID_VAPB 140 -#define CLKID_VDEC_1 153 -#define CLKID_VDEC_HEVC 156 -#define CLKID_GEN_CLK 159 -#define CLKID_VID_PLL 166 -#define CLKID_VCLK 175 -#define CLKID_VCLK2 176 -#define CLKID_VCLK_DIV1 185 -#define CLKID_VCLK_DIV2 186 -#define CLKID_VCLK_DIV4 187 -#define CLKID_VCLK_DIV6 188 -#define CLKID_VCLK_DIV12 189 -#define CLKID_VCLK2_DIV1 190 -#define CLKID_VCLK2_DIV2 191 -#define CLKID_VCLK2_DIV4 192 -#define CLKID_VCLK2_DIV6 193 -#define CLKID_VCLK2_DIV12 194 -#define CLKID_CTS_ENCI 199 -#define CLKID_CTS_ENCP 200 -#define CLKID_CTS_VDAC 201 -#define CLKID_HDMI_TX 202 -#define CLKID_HDMI 205 -#define CLKID_ACODEC 206 #endif /* __GXBB_CLKC_H */ diff --git a/include/dt-bindings/clock/hi3519-clock.h b/include/dt-bindings/clock/hi3519-clock.h index 43354105f6..14f4d2184e 100644 --- a/include/dt-bindings/clock/hi3519-clock.h +++ b/include/dt-bindings/clock/hi3519-clock.h @@ -1,6 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2015 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef __DTS_HI3519_CLOCK_H diff --git a/include/dt-bindings/clock/hi3620-clock.h b/include/dt-bindings/clock/hi3620-clock.h index f9dc6f6d30..21b9d0e2eb 100644 --- a/include/dt-bindings/clock/hi3620-clock.h +++ b/include/dt-bindings/clock/hi3620-clock.h @@ -1,10 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2012-2013 Hisilicon Limited. * Copyright (c) 2012-2013 Linaro Limited. * * Author: Haojian Zhuang * Xin Li + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * */ #ifndef __DTS_HI3620_CLOCK_H diff --git a/include/dt-bindings/clock/hi6220-clock.h b/include/dt-bindings/clock/hi6220-clock.h index 9e40605e61..6b03c84f42 100644 --- a/include/dt-bindings/clock/hi6220-clock.h +++ b/include/dt-bindings/clock/hi6220-clock.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015 Hisilicon Limited. * * Author: Bintian Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __DT_BINDINGS_CLOCK_HI6220_H @@ -121,10 +124,7 @@ #define HI6220_CS_DAPB 57 #define HI6220_CS_ATB_DIV 58 -/* gate clock */ -#define HI6220_DAPB_CLK 59 - -#define HI6220_SYS_NR_CLKS 60 +#define HI6220_SYS_NR_CLKS 59 /* clk in Hi6220 media controller */ /* gate clocks */ @@ -171,8 +171,4 @@ #define HI6220_DDRC_AXI1 7 #define HI6220_POWER_NR_CLKS 8 - -/* clk in Hi6220 acpu sctrl */ -#define HI6220_ACPU_SFT_AT_S 0 - #endif diff --git a/include/dt-bindings/clock/hip04-clock.h b/include/dt-bindings/clock/hip04-clock.h index 088d70cd79..695e61cd15 100644 --- a/include/dt-bindings/clock/hip04-clock.h +++ b/include/dt-bindings/clock/hip04-clock.h @@ -1,9 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2013-2014 Hisilicon Limited. * Copyright (c) 2013-2014 Linaro Limited. * * Author: Haojian Zhuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * */ #ifndef __DTS_HIP04_CLOCK_H diff --git a/include/dt-bindings/clock/hix5hd2-clock.h b/include/dt-bindings/clock/hix5hd2-clock.h index 2b8779f1ac..fd29c174ba 100644 --- a/include/dt-bindings/clock/hix5hd2-clock.h +++ b/include/dt-bindings/clock/hix5hd2-clock.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 Linaro Ltd. * Copyright (c) 2014 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. */ #ifndef __DTS_HIX5HD2_CLOCK_H diff --git a/include/dt-bindings/clock/imx1-clock.h b/include/dt-bindings/clock/imx1-clock.h index 3730a46e7c..607bf01a31 100644 --- a/include/dt-bindings/clock/imx1-clock.h +++ b/include/dt-bindings/clock/imx1-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DT_BINDINGS_CLOCK_IMX1_H diff --git a/include/dt-bindings/clock/imx21-clock.h b/include/dt-bindings/clock/imx21-clock.h index 66d0ec5e4c..b13596cf51 100644 --- a/include/dt-bindings/clock/imx21-clock.h +++ b/include/dt-bindings/clock/imx21-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DT_BINDINGS_CLOCK_IMX21_H diff --git a/include/dt-bindings/clock/imx27-clock.h b/include/dt-bindings/clock/imx27-clock.h index 1ff448b803..148b053e54 100644 --- a/include/dt-bindings/clock/imx27-clock.h +++ b/include/dt-bindings/clock/imx27-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DT_BINDINGS_CLOCK_IMX27_H diff --git a/include/dt-bindings/clock/imx5-clock.h b/include/dt-bindings/clock/imx5-clock.h index bc65e30695..d382fc71aa 100644 --- a/include/dt-bindings/clock/imx5-clock.h +++ b/include/dt-bindings/clock/imx5-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2013 Lucas Stach, Pengutronix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DT_BINDINGS_CLOCK_IMX5_H @@ -210,7 +214,6 @@ #define IMX5_CLK_IEEE1588_SEL 202 #define IMX5_CLK_IEEE1588_PODF 203 #define IMX5_CLK_IEEE1588_GATE 204 -#define IMX5_CLK_SCC2_IPG_GATE 205 -#define IMX5_CLK_END 206 +#define IMX5_CLK_END 205 #endif /* __DT_BINDINGS_CLOCK_IMX5_H */ diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h index e20c43cc36..da59fd9cdb 100644 --- a/include/dt-bindings/clock/imx6qdl-clock.h +++ b/include/dt-bindings/clock/imx6qdl-clock.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2014 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __DT_BINDINGS_CLOCK_IMX6QDL_H @@ -268,11 +271,6 @@ #define IMX6QDL_CLK_PRE_AXI 258 #define IMX6QDL_CLK_MLB_SEL 259 #define IMX6QDL_CLK_MLB_PODF 260 -#define IMX6QDL_CLK_EPIT1 261 -#define IMX6QDL_CLK_EPIT2 262 -#define IMX6QDL_CLK_MMDC_P0_IPG 263 -#define IMX6QDL_CLK_DCIC1 264 -#define IMX6QDL_CLK_DCIC2 265 -#define IMX6QDL_CLK_END 266 +#define IMX6QDL_CLK_END 261 #endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h index 31364d2caa..e14573e293 100644 --- a/include/dt-bindings/clock/imx6sl-clock.h +++ b/include/dt-bindings/clock/imx6sl-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DT_BINDINGS_CLOCK_IMX6SL_H @@ -171,8 +175,6 @@ #define IMX6SL_CLK_SSI2_IPG 162 #define IMX6SL_CLK_SSI3_IPG 163 #define IMX6SL_CLK_SPDIF_GCLK 164 -#define IMX6SL_CLK_MMDC_P0_IPG 165 -#define IMX6SL_CLK_MMDC_P1_IPG 166 -#define IMX6SL_CLK_END 167 +#define IMX6SL_CLK_END 165 #endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */ diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h index 1c64997d61..36f0324902 100644 --- a/include/dt-bindings/clock/imx6sx-clock.h +++ b/include/dt-bindings/clock/imx6sx-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DT_BINDINGS_CLOCK_IMX6SX_H @@ -271,11 +275,6 @@ #define IMX6SX_PLL6_BYPASS 262 #define IMX6SX_PLL7_BYPASS 263 #define IMX6SX_CLK_SPDIF_GCLK 264 -#define IMX6SX_CLK_LVDS2_SEL 265 -#define IMX6SX_CLK_LVDS2_OUT 266 -#define IMX6SX_CLK_LVDS2_IN 267 -#define IMX6SX_CLK_ANACLK2 268 -#define IMX6SX_CLK_MMDC_P1_IPG 269 -#define IMX6SX_CLK_CLK_END 270 +#define IMX6SX_CLK_CLK_END 265 #endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */ diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h index 79094338e6..fd8aee8f64 100644 --- a/include/dt-bindings/clock/imx6ul-clock.h +++ b/include/dt-bindings/clock/imx6ul-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DT_BINDINGS_CLOCK_IMX6UL_H @@ -231,32 +235,7 @@ #define IMX6UL_CLK_CSI_PODF 222 #define IMX6UL_CLK_PLL3_120M 223 #define IMX6UL_CLK_KPP 224 -#define IMX6ULL_CLK_ESAI_PRED 225 -#define IMX6ULL_CLK_ESAI_PODF 226 -#define IMX6ULL_CLK_ESAI_EXTAL 227 -#define IMX6ULL_CLK_ESAI_MEM 228 -#define IMX6ULL_CLK_ESAI_IPG 229 -#define IMX6ULL_CLK_DCP_CLK 230 -#define IMX6ULL_CLK_EPDC_PRE_SEL 231 -#define IMX6ULL_CLK_EPDC_SEL 232 -#define IMX6ULL_CLK_EPDC_PODF 233 -#define IMX6ULL_CLK_EPDC_ACLK 234 -#define IMX6ULL_CLK_EPDC_PIX 235 -#define IMX6ULL_CLK_ESAI_SEL 236 -#define IMX6UL_CLK_CKO1_SEL 237 -#define IMX6UL_CLK_CKO1_PODF 238 -#define IMX6UL_CLK_CKO1 239 -#define IMX6UL_CLK_CKO2_SEL 240 -#define IMX6UL_CLK_CKO2_PODF 241 -#define IMX6UL_CLK_CKO2 242 -#define IMX6UL_CLK_CKO 243 -#define IMX6UL_CLK_GPIO1 244 -#define IMX6UL_CLK_GPIO2 245 -#define IMX6UL_CLK_GPIO3 246 -#define IMX6UL_CLK_GPIO4 247 -#define IMX6UL_CLK_GPIO5 248 -#define IMX6UL_CLK_MMDC_P1_IPG 249 -#define IMX6UL_CLK_END 250 +#define IMX6UL_CLK_END 225 #endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */ diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h index 1d4c0dfe02..1183347c38 100644 --- a/include/dt-bindings/clock/imx7d-clock.h +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-2015 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DT_BINDINGS_CLOCK_IMX7D_H @@ -76,10 +80,10 @@ #define IMX7D_ARM_M4_ROOT_SRC 67 #define IMX7D_ARM_M4_ROOT_CG 68 #define IMX7D_ARM_M4_ROOT_DIV 69 -#define IMX7D_ARM_M0_ROOT_CLK 70 /* unused */ -#define IMX7D_ARM_M0_ROOT_SRC 71 /* unused */ -#define IMX7D_ARM_M0_ROOT_CG 72 /* unused */ -#define IMX7D_ARM_M0_ROOT_DIV 73 /* unused */ +#define IMX7D_ARM_M0_ROOT_CLK 70 +#define IMX7D_ARM_M0_ROOT_SRC 71 +#define IMX7D_ARM_M0_ROOT_CG 72 +#define IMX7D_ARM_M0_ROOT_DIV 73 #define IMX7D_MAIN_AXI_ROOT_CLK 74 #define IMX7D_MAIN_AXI_ROOT_SRC 75 #define IMX7D_MAIN_AXI_ROOT_CG 76 @@ -164,7 +168,7 @@ #define IMX7D_SPDIF_ROOT_SRC 155 #define IMX7D_SPDIF_ROOT_CG 156 #define IMX7D_SPDIF_ROOT_DIV 157 -#define IMX7D_ENET1_IPG_ROOT_CLK 158 +#define IMX7D_ENET1_REF_ROOT_CLK 158 #define IMX7D_ENET1_REF_ROOT_SRC 159 #define IMX7D_ENET1_REF_ROOT_CG 160 #define IMX7D_ENET1_REF_ROOT_DIV 161 @@ -172,7 +176,7 @@ #define IMX7D_ENET1_TIME_ROOT_SRC 163 #define IMX7D_ENET1_TIME_ROOT_CG 164 #define IMX7D_ENET1_TIME_ROOT_DIV 165 -#define IMX7D_ENET2_IPG_ROOT_CLK 166 +#define IMX7D_ENET2_REF_ROOT_CLK 166 #define IMX7D_ENET2_REF_ROOT_SRC 167 #define IMX7D_ENET2_REF_ROOT_CG 168 #define IMX7D_ENET2_REF_ROOT_DIV 169 @@ -445,12 +449,5 @@ #define IMX7D_ADC_ROOT_CLK 436 #define IMX7D_CLK_ARM 437 #define IMX7D_CKIL 438 -#define IMX7D_OCOTP_CLK 439 -#define IMX7D_NAND_RAWNAND_CLK 440 -#define IMX7D_NAND_USDHC_BUS_RAWNAND_CLK 441 -#define IMX7D_SNVS_CLK 442 -#define IMX7D_CAAM_CLK 443 -#define IMX7D_KPP_ROOT_CLK 444 -#define IMX7D_PXP_CLK 445 -#define IMX7D_CLK_END 446 +#define IMX7D_CLK_END 439 #endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/jz4740-cgu.h b/include/dt-bindings/clock/jz4740-cgu.h index e82d770285..43153d3e9b 100644 --- a/include/dt-bindings/clock/jz4740-cgu.h +++ b/include/dt-bindings/clock/jz4740-cgu.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides clock numbers for the ingenic,jz4740-cgu DT binding. * @@ -34,6 +33,5 @@ #define JZ4740_CLK_ADC 19 #define JZ4740_CLK_I2C 20 #define JZ4740_CLK_AIC 21 -#define JZ4740_CLK_TCU 22 #endif /* __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ */ diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/jz4780-cgu.h index 85cf8eb508..467165e3cf 100644 --- a/include/dt-bindings/clock/jz4780-cgu.h +++ b/include/dt-bindings/clock/jz4780-cgu.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides clock numbers for the ingenic,jz4780-cgu DT binding. * @@ -12,80 +11,78 @@ #ifndef __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ #define __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ -#define JZ4780_CLK_EXCLK 0 -#define JZ4780_CLK_RTCLK 1 -#define JZ4780_CLK_APLL 2 -#define JZ4780_CLK_MPLL 3 -#define JZ4780_CLK_EPLL 4 -#define JZ4780_CLK_VPLL 5 -#define JZ4780_CLK_OTGPHY 6 -#define JZ4780_CLK_SCLKA 7 -#define JZ4780_CLK_CPUMUX 8 -#define JZ4780_CLK_CPU 9 -#define JZ4780_CLK_L2CACHE 10 -#define JZ4780_CLK_AHB0 11 -#define JZ4780_CLK_AHB2PMUX 12 -#define JZ4780_CLK_AHB2 13 -#define JZ4780_CLK_PCLK 14 -#define JZ4780_CLK_DDR 15 -#define JZ4780_CLK_VPU 16 -#define JZ4780_CLK_I2SPLL 17 -#define JZ4780_CLK_I2S 18 +#define JZ4780_CLK_EXCLK 0 +#define JZ4780_CLK_RTCLK 1 +#define JZ4780_CLK_APLL 2 +#define JZ4780_CLK_MPLL 3 +#define JZ4780_CLK_EPLL 4 +#define JZ4780_CLK_VPLL 5 +#define JZ4780_CLK_OTGPHY 6 +#define JZ4780_CLK_SCLKA 7 +#define JZ4780_CLK_CPUMUX 8 +#define JZ4780_CLK_CPU 9 +#define JZ4780_CLK_L2CACHE 10 +#define JZ4780_CLK_AHB0 11 +#define JZ4780_CLK_AHB2PMUX 12 +#define JZ4780_CLK_AHB2 13 +#define JZ4780_CLK_PCLK 14 +#define JZ4780_CLK_DDR 15 +#define JZ4780_CLK_VPU 16 +#define JZ4780_CLK_I2SPLL 17 +#define JZ4780_CLK_I2S 18 #define JZ4780_CLK_LCD0PIXCLK 19 #define JZ4780_CLK_LCD1PIXCLK 20 -#define JZ4780_CLK_MSCMUX 21 -#define JZ4780_CLK_MSC0 22 -#define JZ4780_CLK_MSC1 23 -#define JZ4780_CLK_MSC2 24 -#define JZ4780_CLK_UHC 25 -#define JZ4780_CLK_SSIPLL 26 -#define JZ4780_CLK_SSI 27 -#define JZ4780_CLK_CIMMCLK 28 -#define JZ4780_CLK_PCMPLL 29 -#define JZ4780_CLK_PCM 30 -#define JZ4780_CLK_GPU 31 -#define JZ4780_CLK_HDMI 32 -#define JZ4780_CLK_BCH 33 -#define JZ4780_CLK_NEMC 34 -#define JZ4780_CLK_OTG0 35 -#define JZ4780_CLK_SSI0 36 -#define JZ4780_CLK_SMB0 37 -#define JZ4780_CLK_SMB1 38 -#define JZ4780_CLK_SCC 39 -#define JZ4780_CLK_AIC 40 -#define JZ4780_CLK_TSSI0 41 -#define JZ4780_CLK_OWI 42 -#define JZ4780_CLK_KBC 43 -#define JZ4780_CLK_SADC 44 -#define JZ4780_CLK_UART0 45 -#define JZ4780_CLK_UART1 46 -#define JZ4780_CLK_UART2 47 -#define JZ4780_CLK_UART3 48 -#define JZ4780_CLK_SSI1 49 -#define JZ4780_CLK_SSI2 50 -#define JZ4780_CLK_PDMA 51 -#define JZ4780_CLK_GPS 52 -#define JZ4780_CLK_MAC 53 -#define JZ4780_CLK_SMB2 54 -#define JZ4780_CLK_CIM 55 -#define JZ4780_CLK_LCD 56 -#define JZ4780_CLK_TVE 57 -#define JZ4780_CLK_IPU 58 -#define JZ4780_CLK_DDR0 59 -#define JZ4780_CLK_DDR1 60 -#define JZ4780_CLK_SMB3 61 -#define JZ4780_CLK_TSSI1 62 -#define JZ4780_CLK_COMPRESS 63 -#define JZ4780_CLK_AIC1 64 -#define JZ4780_CLK_GPVLC 65 -#define JZ4780_CLK_OTG1 66 -#define JZ4780_CLK_UART4 67 -#define JZ4780_CLK_AHBMON 68 -#define JZ4780_CLK_SMB4 69 -#define JZ4780_CLK_DES 70 -#define JZ4780_CLK_X2D 71 -#define JZ4780_CLK_CORE1 72 -#define JZ4780_CLK_EXCLK_DIV512 73 -#define JZ4780_CLK_RTC 74 +#define JZ4780_CLK_MSCMUX 21 +#define JZ4780_CLK_MSC0 22 +#define JZ4780_CLK_MSC1 23 +#define JZ4780_CLK_MSC2 24 +#define JZ4780_CLK_UHC 25 +#define JZ4780_CLK_SSIPLL 26 +#define JZ4780_CLK_SSI 27 +#define JZ4780_CLK_CIMMCLK 28 +#define JZ4780_CLK_PCMPLL 29 +#define JZ4780_CLK_PCM 30 +#define JZ4780_CLK_GPU 31 +#define JZ4780_CLK_HDMI 32 +#define JZ4780_CLK_BCH 33 +#define JZ4780_CLK_NEMC 34 +#define JZ4780_CLK_OTG0 35 +#define JZ4780_CLK_SSI0 36 +#define JZ4780_CLK_SMB0 37 +#define JZ4780_CLK_SMB1 38 +#define JZ4780_CLK_SCC 39 +#define JZ4780_CLK_AIC 40 +#define JZ4780_CLK_TSSI0 41 +#define JZ4780_CLK_OWI 42 +#define JZ4780_CLK_KBC 43 +#define JZ4780_CLK_SADC 44 +#define JZ4780_CLK_UART0 45 +#define JZ4780_CLK_UART1 46 +#define JZ4780_CLK_UART2 47 +#define JZ4780_CLK_UART3 48 +#define JZ4780_CLK_SSI1 49 +#define JZ4780_CLK_SSI2 50 +#define JZ4780_CLK_PDMA 51 +#define JZ4780_CLK_GPS 52 +#define JZ4780_CLK_MAC 53 +#define JZ4780_CLK_SMB2 54 +#define JZ4780_CLK_CIM 55 +#define JZ4780_CLK_LCD 56 +#define JZ4780_CLK_TVE 57 +#define JZ4780_CLK_IPU 58 +#define JZ4780_CLK_DDR0 59 +#define JZ4780_CLK_DDR1 60 +#define JZ4780_CLK_SMB3 61 +#define JZ4780_CLK_TSSI1 62 +#define JZ4780_CLK_COMPRESS 63 +#define JZ4780_CLK_AIC1 64 +#define JZ4780_CLK_GPVLC 65 +#define JZ4780_CLK_OTG1 66 +#define JZ4780_CLK_UART4 67 +#define JZ4780_CLK_AHBMON 68 +#define JZ4780_CLK_SMB4 69 +#define JZ4780_CLK_DES 70 +#define JZ4780_CLK_X2D 71 +#define JZ4780_CLK_CORE1 72 #endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */ diff --git a/include/dt-bindings/clock/lsi,axm5516-clks.h b/include/dt-bindings/clock/lsi,axm5516-clks.h index 050bbdab4f..beb41ace5d 100644 --- a/include/dt-bindings/clock/lsi,axm5516-clks.h +++ b/include/dt-bindings/clock/lsi,axm5516-clks.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 LSI Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. */ #ifndef _DT_BINDINGS_CLK_AXM5516_H diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h index 87f5ad5df7..7a510384a8 100644 --- a/include/dt-bindings/clock/marvell,mmp2.h +++ b/include/dt-bindings/clock/marvell,mmp2.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DTS_MARVELL_MMP2_CLOCK_H #define __DTS_MARVELL_MMP2_CLOCK_H @@ -26,11 +25,6 @@ #define MMP2_CLK_VCTCXO_4 25 #define MMP2_CLK_UART_PLL 26 #define MMP2_CLK_USB_PLL 27 -#define MMP3_CLK_PLL1_P 28 -#define MMP3_CLK_PLL2_P 29 -#define MMP3_CLK_PLL3 30 -#define MMP2_CLK_I2S0 31 -#define MMP2_CLK_I2S1 32 /* apb periphrals */ #define MMP2_CLK_TWSI0 60 @@ -55,10 +49,6 @@ #define MMP2_CLK_SSP2 79 #define MMP2_CLK_SSP3 80 #define MMP2_CLK_TIMER 81 -#define MMP2_CLK_THERMAL0 82 -#define MMP3_CLK_THERMAL1 83 -#define MMP3_CLK_THERMAL2 84 -#define MMP3_CLK_THERMAL3 85 /* axi periphrals */ #define MMP2_CLK_SDH0 101 @@ -80,16 +70,6 @@ #define MMP2_CLK_CCIC1_MIX 117 #define MMP2_CLK_CCIC1_PHY 118 #define MMP2_CLK_CCIC1_SPHY 119 -#define MMP2_CLK_DISP0_LCDC 120 -#define MMP2_CLK_USBHSIC0 121 -#define MMP2_CLK_USBHSIC1 122 -#define MMP2_CLK_GPU_BUS 123 -#define MMP3_CLK_GPU_BUS MMP2_CLK_GPU_BUS -#define MMP2_CLK_GPU_3D 124 -#define MMP3_CLK_GPU_3D MMP2_CLK_GPU_3D -#define MMP3_CLK_GPU_2D 125 -#define MMP3_CLK_SDH4 126 -#define MMP2_CLK_AUDIO 127 #define MMP2_NR_CLKS 200 #endif diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h index caf90436b8..3e45bdfe1a 100644 --- a/include/dt-bindings/clock/marvell,pxa168.h +++ b/include/dt-bindings/clock/marvell,pxa168.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DTS_MARVELL_PXA168_CLOCK_H #define __DTS_MARVELL_PXA168_CLOCK_H diff --git a/include/dt-bindings/clock/marvell,pxa1928.h b/include/dt-bindings/clock/marvell,pxa1928.h index 5dca482029..d4f2e18919 100644 --- a/include/dt-bindings/clock/marvell,pxa1928.h +++ b/include/dt-bindings/clock/marvell,pxa1928.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DTS_MARVELL_PXA1928_CLOCK_H #define __DTS_MARVELL_PXA1928_CLOCK_H diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h index 7bf4623894..135082a0b6 100644 --- a/include/dt-bindings/clock/marvell,pxa910.h +++ b/include/dt-bindings/clock/marvell,pxa910.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DTS_MARVELL_PXA910_CLOCK_H #define __DTS_MARVELL_PXA910_CLOCK_H diff --git a/include/dt-bindings/clock/maxim,max77620.h b/include/dt-bindings/clock/maxim,max77620.h index 9d6609aaa1..82aba28496 100644 --- a/include/dt-bindings/clock/maxim,max77620.h +++ b/include/dt-bindings/clock/maxim,max77620.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants clocks for the Maxim 77620 PMIC. */ diff --git a/include/dt-bindings/clock/maxim,max77686.h b/include/dt-bindings/clock/maxim,max77686.h index af8261dcac..7b28b09058 100644 --- a/include/dt-bindings/clock/maxim,max77686.h +++ b/include/dt-bindings/clock/maxim,max77686.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Google, Inc * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants clocks for the Maxim 77686 PMIC. */ diff --git a/include/dt-bindings/clock/maxim,max77802.h b/include/dt-bindings/clock/maxim,max77802.h index 51adcbaed6..997312edcb 100644 --- a/include/dt-bindings/clock/maxim,max77802.h +++ b/include/dt-bindings/clock/maxim,max77802.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Google, Inc * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants clocks for the Maxim 77802 PMIC. */ diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h index f33781338e..a55ff8c9b3 100644 --- a/include/dt-bindings/clock/meson8b-clkc.h +++ b/include/dt-bindings/clock/meson8b-clkc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Meson8b clock tree IDs */ @@ -6,6 +5,8 @@ #ifndef __MESON8B_CLKC_H #define __MESON8B_CLKC_H +#define CLKID_UNUSED 0 +#define CLKID_XTAL 1 #define CLKID_PLL_FIXED 2 #define CLKID_PLL_VID 3 #define CLKID_PLL_SYS 4 @@ -20,99 +21,5 @@ #define CLKID_ZERO 13 #define CLKID_MPEG_SEL 14 #define CLKID_MPEG_DIV 15 -#define CLKID_DDR 16 -#define CLKID_DOS 17 -#define CLKID_ISA 18 -#define CLKID_PL301 19 -#define CLKID_PERIPHS 20 -#define CLKID_SPICC 21 -#define CLKID_I2C 22 -#define CLKID_SAR_ADC 23 -#define CLKID_SMART_CARD 24 -#define CLKID_RNG0 25 -#define CLKID_UART0 26 -#define CLKID_SDHC 27 -#define CLKID_STREAM 28 -#define CLKID_ASYNC_FIFO 29 -#define CLKID_SDIO 30 -#define CLKID_ABUF 31 -#define CLKID_HIU_IFACE 32 -#define CLKID_ASSIST_MISC 33 -#define CLKID_SPI 34 -#define CLKID_I2S_SPDIF 35 -#define CLKID_ETH 36 -#define CLKID_DEMUX 37 -#define CLKID_AIU_GLUE 38 -#define CLKID_IEC958 39 -#define CLKID_I2S_OUT 40 -#define CLKID_AMCLK 41 -#define CLKID_AIFIFO2 42 -#define CLKID_MIXER 43 -#define CLKID_MIXER_IFACE 44 -#define CLKID_ADC 45 -#define CLKID_BLKMV 46 -#define CLKID_AIU 47 -#define CLKID_UART1 48 -#define CLKID_G2D 49 -#define CLKID_USB0 50 -#define CLKID_USB1 51 -#define CLKID_RESET 52 -#define CLKID_NAND 53 -#define CLKID_DOS_PARSER 54 -#define CLKID_USB 55 -#define CLKID_VDIN1 56 -#define CLKID_AHB_ARB0 57 -#define CLKID_EFUSE 58 -#define CLKID_BOOT_ROM 59 -#define CLKID_AHB_DATA_BUS 60 -#define CLKID_AHB_CTRL_BUS 61 -#define CLKID_HDMI_INTR_SYNC 62 -#define CLKID_HDMI_PCLK 63 -#define CLKID_USB1_DDR_BRIDGE 64 -#define CLKID_USB0_DDR_BRIDGE 65 -#define CLKID_MMC_PCLK 66 -#define CLKID_DVIN 67 -#define CLKID_UART2 68 -#define CLKID_SANA 69 -#define CLKID_VPU_INTR 70 -#define CLKID_SEC_AHB_AHB3_BRIDGE 71 -#define CLKID_CLK81_A9 72 -#define CLKID_VCLK2_VENCI0 73 -#define CLKID_VCLK2_VENCI1 74 -#define CLKID_VCLK2_VENCP0 75 -#define CLKID_VCLK2_VENCP1 76 -#define CLKID_GCLK_VENCI_INT 77 -#define CLKID_GCLK_VENCP_INT 78 -#define CLKID_DAC_CLK 79 -#define CLKID_AOCLK_GATE 80 -#define CLKID_IEC958_GATE 81 -#define CLKID_ENC480P 82 -#define CLKID_RNG1 83 -#define CLKID_GCLK_VENCL_INT 84 -#define CLKID_VCLK2_VENCLMCC 85 -#define CLKID_VCLK2_VENCL 86 -#define CLKID_VCLK2_OTHER 87 -#define CLKID_EDP 88 -#define CLKID_AO_MEDIA_CPU 89 -#define CLKID_AO_AHB_SRAM 90 -#define CLKID_AO_AHB_BUS 91 -#define CLKID_AO_IFACE 92 -#define CLKID_MPLL0 93 -#define CLKID_MPLL1 94 -#define CLKID_MPLL2 95 -#define CLKID_NAND_CLK 112 -#define CLKID_APB 124 -#define CLKID_PERIPH 126 -#define CLKID_AXI 128 -#define CLKID_L2_DRAM 130 -#define CLKID_HDMI_SYS 174 -#define CLKID_VPU 190 -#define CLKID_VDEC_1 196 -#define CLKID_VDEC_HCODEC 199 -#define CLKID_VDEC_2 202 -#define CLKID_VDEC_HEVC 206 -#define CLKID_CTS_AMCLK 209 -#define CLKID_CTS_MCLK_I958 212 -#define CLKID_CTS_I958 213 #endif /* __MESON8B_CLKC_H */ diff --git a/include/dt-bindings/clock/microchip,pic32-clock.h b/include/dt-bindings/clock/microchip,pic32-clock.h index 371668d989..184647a6a8 100644 --- a/include/dt-bindings/clock/microchip,pic32-clock.h +++ b/include/dt-bindings/clock/microchip,pic32-clock.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Purna Chandra Mandal, * Copyright (C) 2015 Microchip Technology Inc. All rights reserved. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. */ #ifndef _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ diff --git a/include/dt-bindings/clock/mpc512x-clock.h b/include/dt-bindings/clock/mpc512x-clock.h index 13c316bf27..4f94919327 100644 --- a/include/dt-bindings/clock/mpc512x-clock.h +++ b/include/dt-bindings/clock/mpc512x-clock.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for MPC512x clock specs in DT bindings. */ diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h index 6d531d5ae0..2062c67e2e 100644 --- a/include/dt-bindings/clock/mt2701-clk.h +++ b/include/dt-bindings/clock/mt2701-clk.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 MediaTek Inc. * Author: Shunli Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MT2701_H @@ -163,11 +171,11 @@ #define CLK_TOP_8BDAC 151 #define CLK_TOP_WBG_DIG_416M 152 #define CLK_TOP_DPI 153 -#define CLK_TOP_DSI0_LNTC_DSI 154 -#define CLK_TOP_AUD_EXT1 155 -#define CLK_TOP_AUD_EXT2 156 -#define CLK_TOP_NFI1X_PAD 157 -#define CLK_TOP_AXISEL_D4 158 +#define CLK_TOP_HDMITX_CLKDIG_CTS 154 +#define CLK_TOP_DSI0_LNTC_DSI 155 +#define CLK_TOP_AUD_EXT1 156 +#define CLK_TOP_AUD_EXT2 157 +#define CLK_TOP_NFI1X_PAD 158 #define CLK_TOP_NR 159 /* APMIXEDSYS */ @@ -185,8 +193,7 @@ #define CLK_APMIXED_HADDS2PLL 11 #define CLK_APMIXED_AUD2PLL 12 #define CLK_APMIXED_TVD2PLL 13 -#define CLK_APMIXED_HDMI_REF 14 -#define CLK_APMIXED_NR 15 +#define CLK_APMIXED_NR 14 /* DDRPHY */ @@ -214,8 +221,7 @@ #define CLK_INFRA_PMICWRAP 17 #define CLK_INFRA_DDCCI 18 #define CLK_INFRA_CLK_13M 19 -#define CLK_INFRA_CPUSEL 20 -#define CLK_INFRA_NR 21 +#define CLK_INFRA_NR 20 /* PERICFG */ @@ -423,10 +429,6 @@ #define CLK_ETHSYS_CRYPTO 8 #define CLK_ETHSYS_NR 9 -/* G3DSYS */ -#define CLK_G3DSYS_CORE 1 -#define CLK_G3DSYS_NR 2 - /* BDP */ #define CLK_BDP_BRG_BA 1 diff --git a/include/dt-bindings/clock/mt8135-clk.h b/include/dt-bindings/clock/mt8135-clk.h index dad8365a4d..6dac6c091d 100644 --- a/include/dt-bindings/clock/mt8135-clk.h +++ b/include/dt-bindings/clock/mt8135-clk.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 MediaTek Inc. * Author: James Liao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MT8135_H diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h index 3d00c98b96..6094bf7e50 100644 --- a/include/dt-bindings/clock/mt8173-clk.h +++ b/include/dt-bindings/clock/mt8173-clk.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 MediaTek Inc. * Author: James Liao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MT8173_H @@ -185,9 +193,7 @@ #define CLK_INFRA_PMICSPI 10 #define CLK_INFRA_PMICWRAP 11 #define CLK_INFRA_CLK_13M 12 -#define CLK_INFRA_CA53SEL 13 -#define CLK_INFRA_CA72SEL 14 -#define CLK_INFRA_NR_CLK 15 +#define CLK_INFRA_NR_CLK 13 /* PERI_SYS */ diff --git a/include/dt-bindings/clock/pistachio-clk.h b/include/dt-bindings/clock/pistachio-clk.h index ec7a8683f3..039f83facb 100644 --- a/include/dt-bindings/clock/pistachio-clk.h +++ b/include/dt-bindings/clock/pistachio-clk.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. */ #ifndef _DT_BINDINGS_CLOCK_PISTACHIO_H diff --git a/include/dt-bindings/clock/pxa-clock.h b/include/dt-bindings/clock/pxa-clock.h index ce3d6b6a2e..e65803b1dc 100644 --- a/include/dt-bindings/clock/pxa-clock.h +++ b/include/dt-bindings/clock/pxa-clock.h @@ -1,7 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Inspired by original work from pxa2xx-regs.h by Nicolas Pitre * Copyright (C) 2014 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_PXA2XX_H__ @@ -68,7 +72,6 @@ #define CLK_USIM 58 #define CLK_USIM1 59 #define CLK_USMI0 60 -#define CLK_OSC32k768 61 -#define CLK_MAX 62 +#define CLK_MAX 61 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-apq8084.h b/include/dt-bindings/clock/qcom,gcc-apq8084.h index 7f657cf8cc..5aa7ebeae4 100644 --- a/include/dt-bindings/clock/qcom,gcc-apq8084.h +++ b/include/dt-bindings/clock/qcom,gcc-apq8084.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_APQ_GCC_8084_H diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h index 7e8a7be6dc..6240e5b0e9 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h @@ -81,17 +81,6 @@ #define GCC_WCSS5G_CLK 62 #define GCC_WCSS5G_REF_CLK 63 #define GCC_WCSS5G_RTC_CLK 64 -#define GCC_APSS_DDRPLL_VCO 65 -#define GCC_SDCC_PLLDIV_CLK 66 -#define GCC_FEPLL_VCO 67 -#define GCC_FEPLL125_CLK 68 -#define GCC_FEPLL125DLY_CLK 69 -#define GCC_FEPLL200_CLK 70 -#define GCC_FEPLL500_CLK 71 -#define GCC_FEPLL_WCSS2G_CLK 72 -#define GCC_FEPLL_WCSS5G_CLK 73 -#define GCC_APSS_CPU_PLLDIV_CLK 74 -#define GCC_PCNOC_AHB_CLK_SRC 75 #define WIFI0_CPU_INIT_RESET 0 #define WIFI0_RADIO_SRIF_RESET 1 diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h index 7deec14a6d..dc4254b8cb 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_GCC_IPQ806X_H diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9615.h b/include/dt-bindings/clock/qcom,gcc-mdm9615.h index 9e4c34823d..9ab2c40871 100644 --- a/include/dt-bindings/clock/qcom,gcc-mdm9615.h +++ b/include/dt-bindings/clock/qcom,gcc-mdm9615.h @@ -1,8 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. * Copyright (c) BayLibre, SAS. * Author : Neil Armstrong + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MDM_GCC_9615_H @@ -315,7 +323,5 @@ #define CE3_H_CLK 305 #define USB_HS1_SYSTEM_CLK_SRC 306 #define USB_HS1_SYSTEM_CLK 307 -#define EBI2_CLK 308 -#define EBI2_AON_CLK 309 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8660.h b/include/dt-bindings/clock/qcom,gcc-msm8660.h index 4777c00271..67665f6813 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8660.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8660.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MSM_GCC_8660_H diff --git a/include/dt-bindings/clock/qcom,gcc-msm8916.h b/include/dt-bindings/clock/qcom,gcc-msm8916.h index 5630344061..28a27a4ed3 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8916.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8916.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2015 Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MSM_GCC_8916_H diff --git a/include/dt-bindings/clock/qcom,gcc-msm8960.h b/include/dt-bindings/clock/qcom,gcc-msm8960.h index 950b828626..7d20eedfee 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8960.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8960.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MSM_GCC_8960_H @@ -311,7 +319,5 @@ #define CE3_SRC 303 #define CE3_CORE_CLK 304 #define CE3_H_CLK 305 -#define PLL16 306 -#define PLL17 307 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8974.h b/include/dt-bindings/clock/qcom,gcc-msm8974.h index 5c10570988..81d32f6391 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8974.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8974.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MSM_GCC_8974_H diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h index 03bf49d43d..1828723eb6 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8996.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MSM_GCC_8996_H @@ -225,17 +233,6 @@ #define GCC_PCIE_CLKREF_CLK 216 #define GCC_RX2_USB2_CLKREF_CLK 217 #define GCC_RX1_USB2_CLKREF_CLK 218 -#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK 219 -#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 220 -#define GCC_EDP_CLKREF_CLK 221 -#define GCC_MSS_CFG_AHB_CLK 222 -#define GCC_MSS_Q6_BIMC_AXI_CLK 223 -#define GCC_MSS_SNOC_AXI_CLK 224 -#define GCC_MSS_MNOC_BIMC_AXI_CLK 225 -#define GCC_DCC_AHB_CLK 226 -#define GCC_AGGRE0_NOC_MPU_CFG_AHB_CLK 227 -#define GCC_MMSS_GPLL0_DIV_CLK 228 -#define GCC_MSS_GPLL0_DIV_CLK 229 #define GCC_SYSTEM_NOC_BCR 0 #define GCC_CONFIG_NOC_BCR 1 @@ -342,7 +339,6 @@ #define GCC_PCIE_PHY_COM_NOCSR_BCR 102 #define GCC_USB3_PHY_BCR 103 #define GCC_USB3PHY_PHY_BCR 104 -#define GCC_MSS_RESTART 105 /* Indexes for GDSCs */ diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h index 25b92bbf0a..4e944b85c5 100644 --- a/include/dt-bindings/clock/qcom,lcc-ipq806x.h +++ b/include/dt-bindings/clock/qcom,lcc-ipq806x.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_LCC_IPQ806X_H diff --git a/include/dt-bindings/clock/qcom,lcc-mdm9615.h b/include/dt-bindings/clock/qcom,lcc-mdm9615.h index 299338ee1d..cac963a2fd 100644 --- a/include/dt-bindings/clock/qcom,lcc-mdm9615.h +++ b/include/dt-bindings/clock/qcom,lcc-mdm9615.h @@ -1,8 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, The Linux Foundation. All rights reserved. * Copyright (c) BayLibre, SAS. * Author : Neil Armstrong + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_LCC_MDM9615_H diff --git a/include/dt-bindings/clock/qcom,lcc-msm8960.h b/include/dt-bindings/clock/qcom,lcc-msm8960.h index d115a49f4c..4fb2aa64d9 100644 --- a/include/dt-bindings/clock/qcom,lcc-msm8960.h +++ b/include/dt-bindings/clock/qcom,lcc-msm8960.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_LCC_MSM8960_H diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h index 9d42b1b25a..03861e3f49 100644 --- a/include/dt-bindings/clock/qcom,mmcc-apq8084.h +++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_APQ_MMCC_8084_H diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8960.h b/include/dt-bindings/clock/qcom,mmcc-msm8960.h index 81714fc859..85041b28f9 100644 --- a/include/dt-bindings/clock/qcom,mmcc-msm8960.h +++ b/include/dt-bindings/clock/qcom,mmcc-msm8960.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MSM_MMCC_8960_H diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h index a62cb0629a..28651e54c9 100644 --- a/include/dt-bindings/clock/qcom,mmcc-msm8974.h +++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MSM_MMCC_8974_H diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8996.h b/include/dt-bindings/clock/qcom,mmcc-msm8996.h index d51f9ac705..5abc445ad8 100644 --- a/include/dt-bindings/clock/qcom,mmcc-msm8996.h +++ b/include/dt-bindings/clock/qcom,mmcc-msm8996.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_MSM_MMCC_8996_H diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h index a267ac2501..3cd813896d 100644 --- a/include/dt-bindings/clock/r7s72100-clock.h +++ b/include/dt-bindings/clock/r7s72100-clock.h @@ -1,28 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0 - * +/* * Copyright (C) 2014 Renesas Solutions Corp. * Copyright (C) 2014 Wolfram Sang, Sang Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. */ #ifndef __DT_BINDINGS_CLOCK_R7S72100_H__ #define __DT_BINDINGS_CLOCK_R7S72100_H__ #define R7S72100_CLK_PLL 0 -#define R7S72100_CLK_I 1 -#define R7S72100_CLK_G 2 - -/* MSTP2 */ -#define R7S72100_CLK_CORESIGHT 0 /* MSTP3 */ -#define R7S72100_CLK_IEBUS 7 -#define R7S72100_CLK_IRDA 6 -#define R7S72100_CLK_LIN0 5 -#define R7S72100_CLK_LIN1 4 #define R7S72100_CLK_MTU2 3 -#define R7S72100_CLK_CAN 2 -#define R7S72100_CLK_ADCPWR 1 -#define R7S72100_CLK_PWM 0 /* MSTP4 */ #define R7S72100_CLK_SCIF0 7 @@ -34,52 +25,14 @@ #define R7S72100_CLK_SCIF6 1 #define R7S72100_CLK_SCIF7 0 -/* MSTP5 */ -#define R7S72100_CLK_SCI0 7 -#define R7S72100_CLK_SCI1 6 -#define R7S72100_CLK_SG0 5 -#define R7S72100_CLK_SG1 4 -#define R7S72100_CLK_SG2 3 -#define R7S72100_CLK_SG3 2 -#define R7S72100_CLK_OSTM0 1 -#define R7S72100_CLK_OSTM1 0 - -/* MSTP6 */ -#define R7S72100_CLK_ADC 7 -#define R7S72100_CLK_CEU 6 -#define R7S72100_CLK_DOC0 5 -#define R7S72100_CLK_DOC1 4 -#define R7S72100_CLK_DRC0 3 -#define R7S72100_CLK_DRC1 2 -#define R7S72100_CLK_JCU 1 -#define R7S72100_CLK_RTC 0 - /* MSTP7 */ -#define R7S72100_CLK_VDEC0 7 -#define R7S72100_CLK_VDEC1 6 #define R7S72100_CLK_ETHER 4 -#define R7S72100_CLK_NAND 3 -#define R7S72100_CLK_USB0 1 -#define R7S72100_CLK_USB1 0 - -/* MSTP8 */ -#define R7S72100_CLK_IMR0 7 -#define R7S72100_CLK_IMR1 6 -#define R7S72100_CLK_IMRDISP 5 -#define R7S72100_CLK_MMCIF 4 -#define R7S72100_CLK_MLB 3 -#define R7S72100_CLK_ETHAVB 2 -#define R7S72100_CLK_SCUX 1 /* MSTP9 */ #define R7S72100_CLK_I2C0 7 #define R7S72100_CLK_I2C1 6 #define R7S72100_CLK_I2C2 5 #define R7S72100_CLK_I2C3 4 -#define R7S72100_CLK_SPIBSC0 3 -#define R7S72100_CLK_SPIBSC1 2 -#define R7S72100_CLK_VDC50 1 /* and LVDS */ -#define R7S72100_CLK_VDC51 0 /* MSTP10 */ #define R7S72100_CLK_SPI0 7 @@ -87,26 +40,5 @@ #define R7S72100_CLK_SPI2 5 #define R7S72100_CLK_SPI3 4 #define R7S72100_CLK_SPI4 3 -#define R7S72100_CLK_CDROM 2 -#define R7S72100_CLK_SPDIF 1 -#define R7S72100_CLK_RGPVG2 0 - -/* MSTP11 */ -#define R7S72100_CLK_SSI0 5 -#define R7S72100_CLK_SSI1 4 -#define R7S72100_CLK_SSI2 3 -#define R7S72100_CLK_SSI3 2 -#define R7S72100_CLK_SSI4 1 -#define R7S72100_CLK_SSI5 0 - -/* MSTP12 */ -#define R7S72100_CLK_SDHI00 3 -#define R7S72100_CLK_SDHI01 2 -#define R7S72100_CLK_SDHI10 1 -#define R7S72100_CLK_SDHI11 0 - -/* MSTP13 */ -#define R7S72100_CLK_PIX1 2 -#define R7S72100_CLK_PIX0 1 #endif /* __DT_BINDINGS_CLOCK_R7S72100_H__ */ diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h index 1ec4827b80..dd11ecdf83 100644 --- a/include/dt-bindings/clock/r8a73a4-clock.h +++ b/include/dt-bindings/clock/r8a73a4-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2014 Ulrich Hecht + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A73A4_H__ @@ -50,7 +54,6 @@ #define R8A73A4_CLK_IIC3 11 #define R8A73A4_CLK_IIC4 10 #define R8A73A4_CLK_IIC5 9 -#define R8A73A4_CLK_INTC_SYS 8 #define R8A73A4_CLK_IRQC 7 /* MSTP5 */ diff --git a/include/dt-bindings/clock/r8a7740-clock.h b/include/dt-bindings/clock/r8a7740-clock.h index 1b3fdb39cc..476135da0f 100644 --- a/include/dt-bindings/clock/r8a7740-clock.h +++ b/include/dt-bindings/clock/r8a7740-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2014 Ulrich Hecht + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A7740_H__ diff --git a/include/dt-bindings/clock/r8a7778-clock.h b/include/dt-bindings/clock/r8a7778-clock.h index 4a32b364fd..f6b07c5399 100644 --- a/include/dt-bindings/clock/r8a7778-clock.h +++ b/include/dt-bindings/clock/r8a7778-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2014 Ulrich Hecht + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A7778_H__ @@ -26,8 +30,6 @@ #define R8A7778_CLK_SCIF3 23 #define R8A7778_CLK_SCIF4 22 #define R8A7778_CLK_SCIF5 21 -#define R8A7778_CLK_HSCIF0 19 -#define R8A7778_CLK_HSCIF1 18 #define R8A7778_CLK_TMU0 16 #define R8A7778_CLK_TMU1 15 #define R8A7778_CLK_TMU2 14 diff --git a/include/dt-bindings/clock/r8a7779-clock.h b/include/dt-bindings/clock/r8a7779-clock.h index f0549234b7..381a611423 100644 --- a/include/dt-bindings/clock/r8a7779-clock.h +++ b/include/dt-bindings/clock/r8a7779-clock.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2013 Horms Solutions Ltd. * * Contact: Simon Horman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A7779_H__ diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h index c92ff1e602..fa5e8da809 100644 --- a/include/dt-bindings/clock/r8a7790-clock.h +++ b/include/dt-bindings/clock/r8a7790-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2013 Ideas On Board SPRL + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A7790_H__ @@ -78,7 +82,6 @@ /* MSTP4 */ #define R8A7790_CLK_IRQC 7 -#define R8A7790_CLK_INTC_SYS 8 /* MSTP5 */ #define R8A7790_CLK_AUDIO_DMAC1 1 diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h index bb4f18b1b3..ffa11379b3 100644 --- a/include/dt-bindings/clock/r8a7791-clock.h +++ b/include/dt-bindings/clock/r8a7791-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2013 Ideas On Board SPRL + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A7791_H__ @@ -68,7 +72,6 @@ /* MSTP4 */ #define R8A7791_CLK_IRQC 7 -#define R8A7791_CLK_INTC_SYS 8 /* MSTP5 */ #define R8A7791_CLK_AUDIO_DMAC1 1 @@ -105,7 +108,6 @@ #define R8A7791_CLK_SATA0 15 /* MSTP9 */ -#define R8A7791_CLK_GYROADC 1 #define R8A7791_CLK_GPIO7 4 #define R8A7791_CLK_GPIO6 5 #define R8A7791_CLK_GPIO5 7 diff --git a/include/dt-bindings/clock/r8a7792-clock.h b/include/dt-bindings/clock/r8a7792-clock.h index 2948d9ce3a..9a8b392ceb 100644 --- a/include/dt-bindings/clock/r8a7792-clock.h +++ b/include/dt-bindings/clock/r8a7792-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2016 Cogent Embedded, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A7792_H__ @@ -13,6 +17,7 @@ #define R8A7792_CLK_PLL3 3 #define R8A7792_CLK_LB 4 #define R8A7792_CLK_QSPI 5 +#define R8A7792_CLK_Z 6 /* MSTP0 */ #define R8A7792_CLK_MSIOF0 0 @@ -40,7 +45,6 @@ /* MSTP4 */ #define R8A7792_CLK_IRQC 7 -#define R8A7792_CLK_INTC_SYS 8 /* MSTP5 */ #define R8A7792_CLK_AUDIO_DMAC0 2 diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h index 49c66d8ed1..efcbc594fe 100644 --- a/include/dt-bindings/clock/r8a7793-clock.h +++ b/include/dt-bindings/clock/r8a7793-clock.h @@ -1,8 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0 - * +/* * r8a7793 clock definition * * Copyright (C) 2014 Renesas Electronics Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __DT_BINDINGS_CLOCK_R8A7793_H__ @@ -69,11 +77,10 @@ /* MSTP4 */ #define R8A7793_CLK_IRQC 7 -#define R8A7793_CLK_INTC_SYS 8 /* MSTP5 */ -#define R8A7793_CLK_AUDIO_DMAC1 1 -#define R8A7793_CLK_AUDIO_DMAC0 2 +#define R8A7793_CLK_AUDIO_DMAC1 1 +#define R8A7793_CLK_AUDIO_DMAC0 2 #define R8A7793_CLK_ADSP_MOD 6 #define R8A7793_CLK_THERMAL 22 #define R8A7793_CLK_PWM 23 diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h index 649f005782..88e64846cf 100644 --- a/include/dt-bindings/clock/r8a7794-clock.h +++ b/include/dt-bindings/clock/r8a7794-clock.h @@ -1,7 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0+ - * +/* * Copyright (C) 2014 Renesas Electronics Corporation * Copyright 2013 Ideas On Board SPRL + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A7794_H__ @@ -60,7 +64,6 @@ /* MSTP4 */ #define R8A7794_CLK_IRQC 7 -#define R8A7794_CLK_INTC_SYS 8 /* MSTP5 */ #define R8A7794_CLK_AUDIO_DMAC0 2 @@ -78,7 +81,6 @@ #define R8A7794_CLK_SCIF2 19 #define R8A7794_CLK_SCIF1 20 #define R8A7794_CLK_SCIF0 21 -#define R8A7794_CLK_DU1 23 #define R8A7794_CLK_DU0 24 /* MSTP8 */ diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h index 92b3e2a951..e864aae0a2 100644 --- a/include/dt-bindings/clock/r8a7795-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0+ - * +/* * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ #define __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ @@ -50,17 +54,10 @@ #define R8A7795_CLK_CANFD 39 #define R8A7795_CLK_HDMI 40 #define R8A7795_CLK_CSI0 41 -/* CLK_CSIREF was removed */ +#define R8A7795_CLK_CSIREF 42 #define R8A7795_CLK_CP 43 #define R8A7795_CLK_CPEX 44 #define R8A7795_CLK_R 45 #define R8A7795_CLK_OSC 46 -/* r8a7795 ES2.0 CPG Core Clocks */ -#define R8A7795_CLK_S0D2 47 -#define R8A7795_CLK_S0D3 48 -#define R8A7795_CLK_S0D6 49 -#define R8A7795_CLK_S0D8 50 -#define R8A7795_CLK_S0D12 51 - #endif /* __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7796-cpg-mssr.h b/include/dt-bindings/clock/r8a7796-cpg-mssr.h index c0957cf458..1e5942695f 100644 --- a/include/dt-bindings/clock/r8a7796-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7796-cpg-mssr.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0+ - * +/* * Copyright (C) 2016 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__ #define __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__ @@ -56,7 +60,7 @@ #define R8A7796_CLK_CANFD 45 #define R8A7796_CLK_HDMI 46 #define R8A7796_CLK_CSI0 47 -/* CLK_CSIREF was removed */ +#define R8A7796_CLK_CSIREF 48 #define R8A7796_CLK_CP 49 #define R8A7796_CLK_CPEX 50 #define R8A7796_CLK_R 51 diff --git a/include/dt-bindings/clock/renesas-cpg-mssr.h b/include/dt-bindings/clock/renesas-cpg-mssr.h index 8169ad063f..569a3cc33f 100644 --- a/include/dt-bindings/clock/renesas-cpg-mssr.h +++ b/include/dt-bindings/clock/renesas-cpg-mssr.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0+ - * +/* * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ #define __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h index a96a9870ad..de44109a3a 100644 --- a/include/dt-bindings/clock/rk3036-cru.h +++ b/include/dt-bindings/clock/rk3036-cru.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2015 Rockchip Electronics Co. Ltd. * Author: Xing Zheng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3036_H @@ -81,7 +90,6 @@ #define HCLK_OTG0 449 #define HCLK_OTG1 450 #define HCLK_NANDC 453 -#define HCLK_SFC 454 #define HCLK_SDMMC 456 #define HCLK_SDIO 457 #define HCLK_EMMC 459 diff --git a/include/dt-bindings/clock/rk3066a-cru.h b/include/dt-bindings/clock/rk3066a-cru.h index 553f972835..d3a9824ef6 100644 --- a/include/dt-bindings/clock/rk3066a-cru.h +++ b/include/dt-bindings/clock/rk3066a-cru.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014 MundoReader S.L. * Author: Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3066A_H diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h index afad90680f..4f53e70f68 100644 --- a/include/dt-bindings/clock/rk3188-cru-common.h +++ b/include/dt-bindings/clock/rk3188-cru-common.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014 MundoReader S.L. * Author: Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_COMMON_H @@ -59,14 +68,10 @@ #define ACLK_LCDC1 196 #define ACLK_GPU 197 #define ACLK_SMC 198 -#define ACLK_CIF1 199 +#define ACLK_CIF 199 #define ACLK_IPP 200 #define ACLK_RGA 201 #define ACLK_CIF0 202 -#define ACLK_CPU 203 -#define ACLK_PERI 204 -#define ACLK_VEPU 205 -#define ACLK_VDPU 206 /* pclk gates */ #define PCLK_GRF 320 @@ -99,10 +104,6 @@ #define PCLK_EFUSE 347 #define PCLK_TZPC 348 #define PCLK_TSADC 349 -#define PCLK_CPU 350 -#define PCLK_PERI 351 -#define PCLK_DDRUPCTL 352 -#define PCLK_PUBL 353 /* hclk gates */ #define HCLK_SDMMC 448 @@ -125,14 +126,8 @@ #define HCLK_IPP 465 #define HCLK_RGA 466 #define HCLK_NANDC0 467 -#define HCLK_CPU 468 -#define HCLK_PERI 469 -#define HCLK_CIF1 470 -#define HCLK_VEPU 471 -#define HCLK_VDPU 472 -#define HCLK_HDMI 473 -#define CLK_NR_CLKS (HCLK_HDMI + 1) +#define CLK_NR_CLKS (HCLK_NANDC0 + 1) /* soft-reset indices */ #define SRST_MCORE 2 diff --git a/include/dt-bindings/clock/rk3188-cru.h b/include/dt-bindings/clock/rk3188-cru.h index c45916ae68..9f2e631f26 100644 --- a/include/dt-bindings/clock/rk3188-cru.h +++ b/include/dt-bindings/clock/rk3188-cru.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014 MundoReader S.L. * Author: Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_H diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h index de550ea56e..b27e2b1a65 100644 --- a/include/dt-bindings/clock/rk3228-cru.h +++ b/include/dt-bindings/clock/rk3228-cru.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2015 Rockchip Electronics Co. Ltd. * Author: Jeffy Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3228_H @@ -40,7 +49,6 @@ #define SCLK_EMMC_DRV 117 #define SCLK_SDMMC_SAMPLE 118 #define SCLK_SDIO_SAMPLE 119 -#define SCLK_SDIO_SRC 120 #define SCLK_EMMC_SAMPLE 121 #define SCLK_VOP 122 #define SCLK_HDMI_HDCP 123 @@ -53,18 +61,6 @@ #define SCLK_MAC_TX 130 #define SCLK_MAC_PHY 131 #define SCLK_MAC_OUT 132 -#define SCLK_VDEC_CABAC 133 -#define SCLK_VDEC_CORE 134 -#define SCLK_RGA 135 -#define SCLK_HDCP 136 -#define SCLK_HDMI_CEC 137 -#define SCLK_CRYPTO 138 -#define SCLK_TSP 139 -#define SCLK_HSADC 140 -#define SCLK_WIFI 141 -#define SCLK_OTGPHY0 142 -#define SCLK_OTGPHY1 143 -#define SCLK_HDMI_PHY 144 /* dclk gates */ #define DCLK_VOP 190 @@ -72,32 +68,15 @@ /* aclk gates */ #define ACLK_DMAC 194 -#define ACLK_CPU 195 -#define ACLK_VPU_PRE 196 -#define ACLK_RKVDEC_PRE 197 -#define ACLK_RGA_PRE 198 -#define ACLK_IEP_PRE 199 -#define ACLK_HDCP_PRE 200 -#define ACLK_VOP_PRE 201 -#define ACLK_VPU 202 -#define ACLK_RKVDEC 203 -#define ACLK_IEP 204 -#define ACLK_RGA 205 -#define ACLK_HDCP 206 #define ACLK_PERI 210 #define ACLK_VOP 211 #define ACLK_GMAC 212 -#define ACLK_GPU 213 /* pclk gates */ #define PCLK_GPIO0 320 #define PCLK_GPIO1 321 #define PCLK_GPIO2 322 #define PCLK_GPIO3 323 -#define PCLK_VIO_H2P 324 -#define PCLK_HDCP 325 -#define PCLK_EFUSE_1024 326 -#define PCLK_EFUSE_256 327 #define PCLK_GRF 329 #define PCLK_I2C0 332 #define PCLK_I2C1 333 @@ -110,7 +89,6 @@ #define PCLK_TSADC 344 #define PCLK_PWM 350 #define PCLK_TIMER 353 -#define PCLK_CPU 354 #define PCLK_PERI 363 #define PCLK_HDMI_CTRL 364 #define PCLK_HDMI_PHY 365 @@ -126,24 +104,6 @@ #define HCLK_SDMMC 456 #define HCLK_SDIO 457 #define HCLK_EMMC 459 -#define HCLK_CPU 460 -#define HCLK_VPU_PRE 461 -#define HCLK_RKVDEC_PRE 462 -#define HCLK_VIO_PRE 463 -#define HCLK_VPU 464 -#define HCLK_RKVDEC 465 -#define HCLK_VIO 466 -#define HCLK_RGA 467 -#define HCLK_IEP 468 -#define HCLK_VIO_H2P 469 -#define HCLK_HDCP_MMU 470 -#define HCLK_HOST0 471 -#define HCLK_HOST1 472 -#define HCLK_HOST2 473 -#define HCLK_OTG 474 -#define HCLK_TSP 475 -#define HCLK_M_CRYPTO 476 -#define HCLK_S_CRYPTO 477 #define HCLK_PERI 478 #define CLK_NR_CLKS (HCLK_PERI + 1) diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h index 33819acbfc..9a586e2d9c 100644 --- a/include/dt-bindings/clock/rk3288-cru.h +++ b/include/dt-bindings/clock/rk3288-cru.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014 MundoReader S.L. * Author: Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3288_H @@ -79,7 +88,6 @@ #define SCLK_PVTM_GPU 124 #define SCLK_CRYPTO 125 #define SCLK_MIPIDSI_24M 126 -#define SCLK_VIP_OUT 127 #define SCLK_MAC 151 #define SCLK_MACREF_OUT 152 @@ -160,7 +168,6 @@ #define PCLK_WDT 368 #define PCLK_EFUSE256 369 #define PCLK_EFUSE1024 370 -#define PCLK_ISP_IN 371 /* hclk gates */ #define HCLK_GPS 448 diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h index 83c72a163f..9c5dd9ba2f 100644 --- a/include/dt-bindings/clock/rk3368-cru.h +++ b/include/dt-bindings/clock/rk3368-cru.h @@ -1,6 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2015 Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3368_H @@ -35,12 +44,13 @@ #define SCLK_I2S_8CH 82 #define SCLK_SPDIF_8CH 83 #define SCLK_I2S_2CH 84 -#define SCLK_TIMER00 85 -#define SCLK_TIMER01 86 -#define SCLK_TIMER02 87 -#define SCLK_TIMER03 88 -#define SCLK_TIMER04 89 -#define SCLK_TIMER05 90 +#define SCLK_TIMER0 85 +#define SCLK_TIMER1 86 +#define SCLK_TIMER2 87 +#define SCLK_TIMER3 88 +#define SCLK_TIMER4 89 +#define SCLK_TIMER5 90 +#define SCLK_TIMER6 91 #define SCLK_OTGPHY0 93 #define SCLK_OTG_ADP 96 #define SCLK_HSICPHY480M 97 @@ -72,13 +82,6 @@ #define SCLK_SFC 126 #define SCLK_MAC 127 #define SCLK_MACREF_OUT 128 -#define SCLK_TIMER10 133 -#define SCLK_TIMER11 134 -#define SCLK_TIMER12 135 -#define SCLK_TIMER13 136 -#define SCLK_TIMER14 137 -#define SCLK_TIMER15 138 -#define SCLK_VIP_OUT 139 #define DCLK_VOP 190 #define MCLK_CRYPTO 191 @@ -148,9 +151,6 @@ #define PCLK_ISP 366 #define PCLK_VIP 367 #define PCLK_WDT 368 -#define PCLK_EFUSE256 369 -#define PCLK_DPHYRX 370 -#define PCLK_DPHYTX0 371 /* hclk gates */ #define HCLK_SFC 448 diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h index 44e0a319f0..220a60f20d 100644 --- a/include/dt-bindings/clock/rk3399-cru.h +++ b/include/dt-bindings/clock/rk3399-cru.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2016 Rockchip Electronics Co. Ltd. * Author: Xing Zheng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3399_H @@ -123,8 +132,6 @@ #define SCLK_RMII_SRC 166 #define SCLK_PCIEPHY_REF100M 167 #define SCLK_DDRC 168 -#define SCLK_TESTCLKOUT1 169 -#define SCLK_TESTCLKOUT2 170 #define DCLK_VOP0 180 #define DCLK_VOP1 181 diff --git a/include/dt-bindings/clock/rockchip,rk808.h b/include/dt-bindings/clock/rockchip,rk808.h index 75dabfc6ad..1a873432f9 100644 --- a/include/dt-bindings/clock/rockchip,rk808.h +++ b/include/dt-bindings/clock/rockchip,rk808.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants clk index RK808 pmic clkout */ diff --git a/include/dt-bindings/clock/s3c2410.h b/include/dt-bindings/clock/s3c2410.h index 0fb65c3f2f..352a7673fc 100644 --- a/include/dt-bindings/clock/s3c2410.h +++ b/include/dt-bindings/clock/s3c2410.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Heiko Stuebner * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants clock controllers of Samsung S3C2410 and later. */ diff --git a/include/dt-bindings/clock/s3c2412.h b/include/dt-bindings/clock/s3c2412.h index b4656156cc..aac1dcfda8 100644 --- a/include/dt-bindings/clock/s3c2412.h +++ b/include/dt-bindings/clock/s3c2412.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Heiko Stuebner * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants clock controllers of Samsung S3C2412. */ diff --git a/include/dt-bindings/clock/s3c2443.h b/include/dt-bindings/clock/s3c2443.h index a9d2f105d5..37e66b054d 100644 --- a/include/dt-bindings/clock/s3c2443.h +++ b/include/dt-bindings/clock/s3c2443.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Heiko Stuebner * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants clock controllers of Samsung S3C2443 and later. */ @@ -23,8 +26,6 @@ #define ARMCLK 4 #define HCLK 5 #define PCLK 6 -#define MPLL 7 -#define EPLL 8 /* Special clocks */ #define SCLK_HSSPI0 16 diff --git a/include/dt-bindings/clock/s5pv210-audss.h b/include/dt-bindings/clock/s5pv210-audss.h index 84d62fe7a7..fe57406e24 100644 --- a/include/dt-bindings/clock/s5pv210-audss.h +++ b/include/dt-bindings/clock/s5pv210-audss.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 Tomasz Figa * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * This header provides constants for Samsung audio subsystem * clock controller. * diff --git a/include/dt-bindings/clock/s5pv210.h b/include/dt-bindings/clock/s5pv210.h index c36699c2fa..e88986b7c6 100644 --- a/include/dt-bindings/clock/s5pv210.h +++ b/include/dt-bindings/clock/s5pv210.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013 Samsung Electronics Co., Ltd. * Author: Mateusz Krawczuk * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants for Samsung S5PV210 clock controller. */ diff --git a/include/dt-bindings/clock/samsung,s2mps11.h b/include/dt-bindings/clock/samsung,s2mps11.h index 5ece35d429..b903d7de27 100644 --- a/include/dt-bindings/clock/samsung,s2mps11.h +++ b/include/dt-bindings/clock/samsung,s2mps11.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2015 Markus Reichl * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants clocks for the Samsung S2MPS11 PMIC. */ diff --git a/include/dt-bindings/clock/samsung,s3c64xx-clock.h b/include/dt-bindings/clock/samsung,s3c64xx-clock.h index 19d233f37e..ad95c7f500 100644 --- a/include/dt-bindings/clock/samsung,s3c64xx-clock.h +++ b/include/dt-bindings/clock/samsung,s3c64xx-clock.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Tomasz Figa * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants for Samsung S3C64xx clock controller. - */ +*/ #ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H #define _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H diff --git a/include/dt-bindings/clock/sh73a0-clock.h b/include/dt-bindings/clock/sh73a0-clock.h index 5b544ad7f9..2eca353a29 100644 --- a/include/dt-bindings/clock/sh73a0-clock.h +++ b/include/dt-bindings/clock/sh73a0-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2014 Ulrich Hecht + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_SH73A0_H__ diff --git a/include/dt-bindings/clock/stih407-clks.h b/include/dt-bindings/clock/stih407-clks.h index f0936c1337..082edd9bad 100644 --- a/include/dt-bindings/clock/stih407-clks.h +++ b/include/dt-bindings/clock/stih407-clks.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants clk index STMicroelectronics * STiH407 SoC. diff --git a/include/dt-bindings/clock/stih410-clks.h b/include/dt-bindings/clock/stih410-clks.h index 90cbe6154c..2097a4bbe1 100644 --- a/include/dt-bindings/clock/stih410-clks.h +++ b/include/dt-bindings/clock/stih410-clks.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants clk index STMicroelectronics * STiH410 SoC. diff --git a/include/dt-bindings/clock/stih415-clks.h b/include/dt-bindings/clock/stih415-clks.h new file mode 100644 index 0000000000..d80caa68ae --- /dev/null +++ b/include/dt-bindings/clock/stih415-clks.h @@ -0,0 +1,16 @@ +/* + * This header provides constants clk index STMicroelectronics + * STiH415 SoC. + */ +#ifndef _CLK_STIH415 +#define _CLK_STIH415 + +/* CLOCKGEN A0 */ +#define CLK_ICN_REG 0 +#define CLK_ETH1_PHY 4 + +/* CLOCKGEN A1 */ +#define CLK_ICN_IF_2 0 +#define CLK_GMAC0_PHY 3 + +#endif diff --git a/include/dt-bindings/clock/stih416-clks.h b/include/dt-bindings/clock/stih416-clks.h index 7430227802..f9bdbd1356 100644 --- a/include/dt-bindings/clock/stih416-clks.h +++ b/include/dt-bindings/clock/stih416-clks.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants clk index STMicroelectronics * STiH416 SoC. diff --git a/include/dt-bindings/clock/stih418-clks.h b/include/dt-bindings/clock/stih418-clks.h index 0e7fba0c52..b62aa0b202 100644 --- a/include/dt-bindings/clock/stih418-clks.h +++ b/include/dt-bindings/clock/stih418-clks.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants clk index STMicroelectronics * STiH418 SoC. diff --git a/include/dt-bindings/clock/sun6i-a31-ccu.h b/include/dt-bindings/clock/sun6i-a31-ccu.h index 39878d9dce..4482530fb6 100644 --- a/include/dt-bindings/clock/sun6i-a31-ccu.h +++ b/include/dt-bindings/clock/sun6i-a31-ccu.h @@ -43,14 +43,8 @@ #ifndef _DT_BINDINGS_CLK_SUN6I_A31_H_ #define _DT_BINDINGS_CLK_SUN6I_A31_H_ -#define CLK_PLL_VIDEO0_2X 7 - #define CLK_PLL_PERIPH 10 -#define CLK_PLL_VIDEO1_2X 13 - -#define CLK_PLL_MIPI 15 - #define CLK_CPU 18 #define CLK_AHB1_MIPIDSI 23 diff --git a/include/dt-bindings/clock/sun8i-a23-a33-ccu.h b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h index eb524d0bbd..f8222b6b2c 100644 --- a/include/dt-bindings/clock/sun8i-a23-a33-ccu.h +++ b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h @@ -43,8 +43,6 @@ #ifndef _DT_BINDINGS_CLK_SUN8I_A23_A33_H_ #define _DT_BINDINGS_CLK_SUN8I_A23_A33_H_ -#define CLK_PLL_MIPI 13 - #define CLK_CPUX 18 #define CLK_BUS_MIPI_DSI 23 diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h index 30d2d15373..efb7ba2bd5 100644 --- a/include/dt-bindings/clock/sun8i-h3-ccu.h +++ b/include/dt-bindings/clock/sun8i-h3-ccu.h @@ -43,10 +43,6 @@ #ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_ #define _DT_BINDINGS_CLK_SUN8I_H3_H_ -#define CLK_PLL_VIDEO 6 - -#define CLK_PLL_PERIPH0 9 - #define CLK_CPUX 14 #define CLK_BUS_CE 20 @@ -95,7 +91,7 @@ #define CLK_BUS_UART1 63 #define CLK_BUS_UART2 64 #define CLK_BUS_UART3 65 -#define CLK_BUS_SCR0 66 +#define CLK_BUS_SCR 66 #define CLK_BUS_EPHY 67 #define CLK_BUS_DBG 68 @@ -143,10 +139,7 @@ #define CLK_AVS 110 #define CLK_HDMI 111 #define CLK_HDMI_DDC 112 -#define CLK_MBUS 113 + #define CLK_GPU 114 -/* New clocks imported in H5 */ -#define CLK_BUS_SCR1 115 - #endif /* _DT_BINDINGS_CLK_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/clock/tegra114-car.h b/include/dt-bindings/clock/tegra114-car.h index a93426f008..534c03f8ad 100644 --- a/include/dt-bindings/clock/tegra114-car.h +++ b/include/dt-bindings/clock/tegra114-car.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for binding nvidia,tegra114-car. * @@ -157,7 +156,7 @@ /* 133 */ /* 134 */ /* 135 */ -#define TEGRA114_CLK_CEC 136 +/* 136 */ /* 137 */ /* 138 */ /* 139 */ @@ -228,8 +227,6 @@ #define TEGRA114_CLK_CLK_M 201 #define TEGRA114_CLK_CLK_M_DIV2 202 #define TEGRA114_CLK_CLK_M_DIV4 203 -#define TEGRA114_CLK_OSC_DIV2 202 -#define TEGRA114_CLK_OSC_DIV4 203 #define TEGRA114_CLK_PLL_REF 204 #define TEGRA114_CLK_PLL_C 205 #define TEGRA114_CLK_PLL_C_OUT1 206 @@ -272,11 +269,11 @@ #define TEGRA114_CLK_AUDIO3 242 #define TEGRA114_CLK_AUDIO4 243 #define TEGRA114_CLK_SPDIF 244 -/* 245 */ -/* 246 */ -/* 247 */ -/* 248 */ -#define TEGRA114_CLK_OSC 249 +#define TEGRA114_CLK_CLK_OUT_1 245 +#define TEGRA114_CLK_CLK_OUT_2 246 +#define TEGRA114_CLK_CLK_OUT_3 247 +#define TEGRA114_CLK_BLINK 248 +/* 249 */ /* 250 */ /* 251 */ #define TEGRA114_CLK_XUSB_HOST_SRC 252 @@ -335,9 +332,9 @@ #define TEGRA114_CLK_AUDIO3_MUX 303 #define TEGRA114_CLK_AUDIO4_MUX 304 #define TEGRA114_CLK_SPDIF_MUX 305 -/* 306 */ -/* 307 */ -/* 308 */ +#define TEGRA114_CLK_CLK_OUT_1_MUX 306 +#define TEGRA114_CLK_CLK_OUT_2_MUX 307 +#define TEGRA114_CLK_CLK_OUT_3_MUX 308 #define TEGRA114_CLK_DSIA_MUX 309 #define TEGRA114_CLK_DSIB_MUX 310 #define TEGRA114_CLK_XUSB_SS_DIV2 311 diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h index c59f9de01b..a215609056 100644 --- a/include/dt-bindings/clock/tegra124-car-common.h +++ b/include/dt-bindings/clock/tegra124-car-common.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for binding nvidia,tegra124-car or * nvidia,tegra132-car. @@ -157,7 +156,7 @@ /* 133 */ /* 134 */ /* 135 */ -#define TEGRA124_CLK_CEC 136 +/* 136 */ /* 137 */ /* 138 */ /* 139 */ @@ -227,8 +226,6 @@ #define TEGRA124_CLK_CLK_M 201 #define TEGRA124_CLK_CLK_M_DIV2 202 #define TEGRA124_CLK_CLK_M_DIV4 203 -#define TEGRA124_CLK_OSC_DIV2 202 -#define TEGRA124_CLK_OSC_DIV4 203 #define TEGRA124_CLK_PLL_REF 204 #define TEGRA124_CLK_PLL_C 205 #define TEGRA124_CLK_PLL_C_OUT1 206 @@ -271,11 +268,11 @@ #define TEGRA124_CLK_AUDIO3 242 #define TEGRA124_CLK_AUDIO4 243 #define TEGRA124_CLK_SPDIF 244 -/* 245 */ -/* 246 */ -/* 247 */ -/* 248 */ -#define TEGRA124_CLK_OSC 249 +#define TEGRA124_CLK_CLK_OUT_1 245 +#define TEGRA124_CLK_CLK_OUT_2 246 +#define TEGRA124_CLK_CLK_OUT_3 247 +#define TEGRA124_CLK_BLINK 248 +/* 249 */ /* 250 */ /* 251 */ #define TEGRA124_CLK_XUSB_HOST_SRC 252 @@ -334,13 +331,12 @@ #define TEGRA124_CLK_AUDIO3_MUX 303 #define TEGRA124_CLK_AUDIO4_MUX 304 #define TEGRA124_CLK_SPDIF_MUX 305 -/* 306 */ -/* 307 */ -/* 308 */ +#define TEGRA124_CLK_CLK_OUT_1_MUX 306 +#define TEGRA124_CLK_CLK_OUT_2_MUX 307 +#define TEGRA124_CLK_CLK_OUT_3_MUX 308 /* 309 */ /* 310 */ -#define TEGRA124_CLK_SOR0_LVDS 311 /* deprecated */ -#define TEGRA124_CLK_SOR0_OUT 311 +#define TEGRA124_CLK_SOR0_LVDS 311 #define TEGRA124_CLK_XUSB_SS_DIV2 312 #define TEGRA124_CLK_PLL_M_UD 313 diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h index c520ee2319..2860737f04 100644 --- a/include/dt-bindings/clock/tegra124-car.h +++ b/include/dt-bindings/clock/tegra124-car.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides Tegra124-specific constants for binding * nvidia,tegra124-car. diff --git a/include/dt-bindings/clock/tegra20-car.h b/include/dt-bindings/clock/tegra20-car.h index fe541f6279..04500b243a 100644 --- a/include/dt-bindings/clock/tegra20-car.h +++ b/include/dt-bindings/clock/tegra20-car.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for binding nvidia,tegra20-car. * @@ -131,7 +130,7 @@ #define TEGRA20_CLK_CCLK 108 #define TEGRA20_CLK_HCLK 109 #define TEGRA20_CLK_PCLK 110 -/* 111 */ +#define TEGRA20_CLK_BLINK 111 #define TEGRA20_CLK_PLL_A 112 #define TEGRA20_CLK_PLL_A_OUT0 113 #define TEGRA20_CLK_PLL_C 114 diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index 9cfcc3baa5..35288b20f2 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for binding nvidia,tegra210-car. * @@ -40,7 +39,7 @@ /* 20 (register bit affects vi and vi_sensor) */ /* 21 */ #define TEGRA210_CLK_USBD 22 -#define TEGRA210_CLK_ISPA 23 +#define TEGRA210_CLK_ISP 23 /* 24 */ /* 25 */ #define TEGRA210_CLK_DISP2 26 @@ -95,7 +94,7 @@ #define TEGRA210_CLK_CSITE 73 /* 74 */ /* 75 */ -#define TEGRA210_CLK_LA 76 +/* 76 */ /* 77 */ #define TEGRA210_CLK_SOC_THERM 78 #define TEGRA210_CLK_DTV 79 @@ -157,7 +156,7 @@ /* 133 */ /* 134 */ /* 135 */ -#define TEGRA210_CLK_CEC 136 +/* 136 */ /* 137 */ /* 138 */ /* 139 */ @@ -174,7 +173,7 @@ #define TEGRA210_CLK_ENTROPY 149 /* 150 */ /* 151 */ -#define TEGRA210_CLK_DP2 152 +/* 152 */ /* 153 */ /* 154 */ /* 155 (bit affects dfll_ref and dfll_soc) */ @@ -211,7 +210,7 @@ #define TEGRA210_CLK_DBGAPB 185 /* 186 */ #define TEGRA210_CLK_PLL_P_OUT_ADSP 187 -/* 188 ((bit affects pll_a_out_adsp and pll_a_out0_out_adsp)*/ +/* 188 */ #define TEGRA210_CLK_PLL_G_REF 189 /* 190 */ /* 191 */ @@ -223,7 +222,7 @@ /* 196 */ #define TEGRA210_CLK_DMIC3 197 #define TEGRA210_CLK_APE 198 -#define TEGRA210_CLK_ADSP 199 +/* 199 */ /* 200 */ /* 201 */ #define TEGRA210_CLK_MAUD 202 @@ -242,10 +241,10 @@ /* 215 */ /* 216 */ /* 217 */ -#define TEGRA210_CLK_ADSP_NEON 218 +/* 218 */ #define TEGRA210_CLK_NVENC 219 -#define TEGRA210_CLK_IQC2 220 -#define TEGRA210_CLK_IQC1 221 +/* 220 */ +/* 221 */ #define TEGRA210_CLK_SOR_SAFE 222 #define TEGRA210_CLK_PLL_P_OUT_CPU 223 @@ -262,8 +261,6 @@ #define TEGRA210_CLK_CLK_M 233 #define TEGRA210_CLK_CLK_M_DIV2 234 #define TEGRA210_CLK_CLK_M_DIV4 235 -#define TEGRA210_CLK_OSC_DIV2 234 -#define TEGRA210_CLK_OSC_DIV4 235 #define TEGRA210_CLK_PLL_REF 236 #define TEGRA210_CLK_PLL_C 237 #define TEGRA210_CLK_PLL_C_OUT1 238 @@ -306,13 +303,12 @@ #define TEGRA210_CLK_AUDIO3 274 #define TEGRA210_CLK_AUDIO4 275 #define TEGRA210_CLK_SPDIF 276 -/* 277 */ -#define TEGRA210_CLK_QSPI_PM 278 -/* 279 */ -/* 280 */ -#define TEGRA210_CLK_SOR0_LVDS 281 /* deprecated */ -#define TEGRA210_CLK_SOR0_OUT 281 -#define TEGRA210_CLK_SOR1_OUT 282 +#define TEGRA210_CLK_CLK_OUT_1 277 +#define TEGRA210_CLK_CLK_OUT_2 278 +#define TEGRA210_CLK_CLK_OUT_3 279 +#define TEGRA210_CLK_BLINK 280 +/* 281 */ +#define TEGRA210_CLK_SOR1_SRC 282 /* 283 */ #define TEGRA210_CLK_XUSB_HOST_SRC 284 #define TEGRA210_CLK_XUSB_FALCON_SRC 285 @@ -351,14 +347,14 @@ #define TEGRA210_CLK_PLL_P_OUT_XUSB 317 #define TEGRA210_CLK_XUSB_SSP_SRC 318 #define TEGRA210_CLK_PLL_RE_OUT1 319 -#define TEGRA210_CLK_PLL_MB_UD 320 -#define TEGRA210_CLK_PLL_P_UD 321 -#define TEGRA210_CLK_ISP 322 -#define TEGRA210_CLK_PLL_A_OUT_ADSP 323 -#define TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP 324 +/* 320 */ +/* 321 */ +/* 322 */ +/* 323 */ +/* 324 */ /* 325 */ -#define TEGRA210_CLK_OSC 326 -#define TEGRA210_CLK_CSI_TPG 327 +/* 326 */ +/* 327 */ /* 328 */ /* 329 */ /* 330 */ @@ -388,27 +384,18 @@ #define TEGRA210_CLK_AUDIO3_MUX 353 #define TEGRA210_CLK_AUDIO4_MUX 354 #define TEGRA210_CLK_SPDIF_MUX 355 -/* 356 */ -/* 357 */ -/* 358 */ +#define TEGRA210_CLK_CLK_OUT_1_MUX 356 +#define TEGRA210_CLK_CLK_OUT_2_MUX 357 +#define TEGRA210_CLK_CLK_OUT_3_MUX 358 #define TEGRA210_CLK_DSIA_MUX 359 #define TEGRA210_CLK_DSIB_MUX 360 -/* 361 */ +#define TEGRA210_CLK_SOR0_LVDS 361 #define TEGRA210_CLK_XUSB_SS_DIV2 362 #define TEGRA210_CLK_PLL_M_UD 363 #define TEGRA210_CLK_PLL_C_UD 364 #define TEGRA210_CLK_SCLK_MUX 365 -#define TEGRA210_CLK_ACLK 370 - -#define TEGRA210_CLK_DMIC1_SYNC_CLK 388 -#define TEGRA210_CLK_DMIC1_SYNC_CLK_MUX 389 -#define TEGRA210_CLK_DMIC2_SYNC_CLK 390 -#define TEGRA210_CLK_DMIC2_SYNC_CLK_MUX 391 -#define TEGRA210_CLK_DMIC3_SYNC_CLK 392 -#define TEGRA210_CLK_DMIC3_SYNC_CLK_MUX 393 - -#define TEGRA210_CLK_CLK_MAX 394 +#define TEGRA210_CLK_CLK_MAX 366 #endif /* _DT_BINDINGS_CLOCK_TEGRA210_CAR_H */ diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h index f193663e6f..889e49ba0a 100644 --- a/include/dt-bindings/clock/tegra30-car.h +++ b/include/dt-bindings/clock/tegra30-car.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for binding nvidia,tegra30-car. * @@ -157,7 +156,7 @@ /* 133 */ /* 134 */ /* 135 */ -#define TEGRA30_CLK_CEC 136 +/* 136 */ /* 137 */ /* 138 */ /* 139 */ @@ -196,8 +195,6 @@ #define TEGRA30_CLK_CLK_M 171 #define TEGRA30_CLK_CLK_M_DIV2 172 #define TEGRA30_CLK_CLK_M_DIV4 173 -#define TEGRA30_CLK_OSC_DIV2 172 -#define TEGRA30_CLK_OSC_DIV4 173 #define TEGRA30_CLK_PLL_REF 174 #define TEGRA30_CLK_PLL_C 175 #define TEGRA30_CLK_PLL_C_OUT1 176 @@ -232,11 +229,11 @@ #define TEGRA30_CLK_AUDIO3 204 #define TEGRA30_CLK_AUDIO4 205 #define TEGRA30_CLK_SPDIF 206 -/* 207 */ -/* 208 */ -/* 209 */ +#define TEGRA30_CLK_CLK_OUT_1 207 /* (extern1) */ +#define TEGRA30_CLK_CLK_OUT_2 208 /* (extern2) */ +#define TEGRA30_CLK_CLK_OUT_3 209 /* (extern3) */ #define TEGRA30_CLK_SCLK 210 -/* 211 */ +#define TEGRA30_CLK_BLINK 211 #define TEGRA30_CLK_CCLK_G 212 #define TEGRA30_CLK_CCLK_LP 213 #define TEGRA30_CLK_TWD 214 @@ -245,7 +242,7 @@ #define TEGRA30_CLK_HCLK 217 #define TEGRA30_CLK_PCLK 218 /* 219 */ -#define TEGRA30_CLK_OSC 220 +/* 220 */ /* 221 */ /* 222 */ /* 223 */ @@ -262,9 +259,9 @@ /* 297 */ /* 298 */ /* 299 */ -/* 300 */ -/* 301 */ -/* 302 */ +#define TEGRA30_CLK_CLK_OUT_1_MUX 300 +#define TEGRA30_CLK_CLK_OUT_2_MUX 301 +#define TEGRA30_CLK_CLK_OUT_3_MUX 302 #define TEGRA30_CLK_AUDIO0_MUX 303 #define TEGRA30_CLK_AUDIO1_MUX 304 #define TEGRA30_CLK_AUDIO2_MUX 305 diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h index 373644e467..45997750c8 100644 --- a/include/dt-bindings/clock/vf610-clock.h +++ b/include/dt-bindings/clock/vf610-clock.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_VF610_H @@ -195,8 +199,6 @@ #define VF610_CLK_WKPU 186 #define VF610_CLK_TCON0 187 #define VF610_CLK_TCON1 188 -#define VF610_CLK_CAAM 189 -#define VF610_CLK_CRC 190 -#define VF610_CLK_END 191 +#define VF610_CLK_END 189 #endif /* __DT_BINDINGS_CLOCK_VF610_H */ diff --git a/include/dt-bindings/clock/zx296702-clock.h b/include/dt-bindings/clock/zx296702-clock.h index e04126111a..26ee564b0e 100644 --- a/include/dt-bindings/clock/zx296702-clock.h +++ b/include/dt-bindings/clock/zx296702-clock.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2014 Linaro Ltd. * Copyright (C) 2014 ZTE Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __DT_BINDINGS_CLOCK_ZX296702_H diff --git a/include/dt-bindings/clock/zx296718-clock.h b/include/dt-bindings/clock/zx296718-clock.h index bf2ff6d2ee..822d523850 100644 --- a/include/dt-bindings/clock/zx296718-clock.h +++ b/include/dt-bindings/clock/zx296718-clock.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 - 2016 ZTE Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __DT_BINDINGS_CLOCK_ZX296718_H #define __DT_BINDINGS_CLOCK_ZX296718_H @@ -154,11 +157,7 @@ #define AUDIO_TDM_WCLK 17 #define AUDIO_TDM_PCLK 18 #define AUDIO_TS_PCLK 19 -#define I2S0_WCLK_MUX 20 -#define I2S1_WCLK_MUX 21 -#define I2S2_WCLK_MUX 22 -#define I2S3_WCLK_MUX 23 -#define AUDIO_NR_CLKS 24 +#define AUDIO_NR_CLKS 20 #endif diff --git a/include/dt-bindings/display/tda998x.h b/include/dt-bindings/display/tda998x.h index 746831ff39..34757a3847 100644 --- a/include/dt-bindings/display/tda998x.h +++ b/include/dt-bindings/display/tda998x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DT_BINDINGS_TDA998X_H #define _DT_BINDINGS_TDA998X_H diff --git a/include/dt-bindings/dma/at91.h b/include/dt-bindings/dma/at91.h index e7b3e06554..ab6cbba454 100644 --- a/include/dt-bindings/dma/at91.h +++ b/include/dt-bindings/dma/at91.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * This header provides macros for at91 dma bindings. * * Copyright (C) 2013 Ludovic Desroches + * + * GPLv2 only */ #ifndef __DT_BINDINGS_AT91_DMA_H__ diff --git a/include/dt-bindings/dma/nbpfaxi.h b/include/dt-bindings/dma/nbpfaxi.h index 88e59acc06..c1a5b9e0d6 100644 --- a/include/dt-bindings/dma/nbpfaxi.h +++ b/include/dt-bindings/dma/nbpfaxi.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. * Author: Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. */ #ifndef DT_BINDINGS_NBPFAXI_H diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h index c029467e82..c673d2c87c 100644 --- a/include/dt-bindings/gpio/gpio.h +++ b/include/dt-bindings/gpio/gpio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for most GPIO bindings. * @@ -18,25 +17,11 @@ #define GPIO_PUSH_PULL 0 #define GPIO_SINGLE_ENDED 2 -/* Bit 2 express Open drain or open source */ -#define GPIO_LINE_OPEN_SOURCE 0 -#define GPIO_LINE_OPEN_DRAIN 4 - /* - * Open Drain/Collector is the combination of single-ended open drain interface. - * Open Source/Emitter is the combination of single-ended open source interface. + * Open Drain/Collector is the combination of single-ended active low, + * Open Source/Emitter is the combination of single-ended active high. */ -#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN) -#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE) - -/* Bit 3 express GPIO suspend/resume and reset persistence */ -#define GPIO_PERSISTENT 0 -#define GPIO_TRANSITORY 8 - -/* Bit 4 express pull up */ -#define GPIO_PULL_UP 16 - -/* Bit 5 express pull down */ -#define GPIO_PULL_DOWN 32 +#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_ACTIVE_LOW) +#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_ACTIVE_HIGH) #endif diff --git a/include/dt-bindings/gpio/meson-gxbb-gpio.h b/include/dt-bindings/gpio/meson-gxbb-gpio.h index 489c75b276..58654fd7aa 100644 --- a/include/dt-bindings/gpio/meson-gxbb-gpio.h +++ b/include/dt-bindings/gpio/meson-gxbb-gpio.h @@ -1,9 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * GPIO definitions for Amlogic Meson GXBB SoCs * * Copyright (C) 2016 Endless Mobile, Inc. * Author: Carlo Caione + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef _DT_BINDINGS_MESON_GXBB_GPIO_H @@ -23,7 +29,6 @@ #define GPIOAO_11 11 #define GPIOAO_12 12 #define GPIOAO_13 13 -#define GPIO_TEST_N 14 #define GPIOZ_0 0 #define GPIOZ_1 1 @@ -144,5 +149,6 @@ #define GPIOCLK_1 116 #define GPIOCLK_2 117 #define GPIOCLK_3 118 +#define GPIO_TEST_N 119 #endif diff --git a/include/dt-bindings/gpio/meson8-gpio.h b/include/dt-bindings/gpio/meson8-gpio.h index e2d083104d..fdaeb5cbf5 100644 --- a/include/dt-bindings/gpio/meson8-gpio.h +++ b/include/dt-bindings/gpio/meson8-gpio.h @@ -1,8 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * GPIO definitions for Amlogic Meson8 SoCs * * Copyright (C) 2014 Beniamino Galvani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef _DT_BINDINGS_MESON8_GPIO_H diff --git a/include/dt-bindings/gpio/meson8b-gpio.h b/include/dt-bindings/gpio/meson8b-gpio.h index 7c3bc0782e..c38cb20d71 100644 --- a/include/dt-bindings/gpio/meson8b-gpio.h +++ b/include/dt-bindings/gpio/meson8b-gpio.h @@ -1,121 +1,32 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * GPIO definitions for Amlogic Meson8b SoCs * * Copyright (C) 2015 Endless Mobile, Inc. * Author: Carlo Caione + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef _DT_BINDINGS_MESON8B_GPIO_H #define _DT_BINDINGS_MESON8B_GPIO_H -/* EE (CBUS) GPIO chip */ -#define GPIOX_0 0 -#define GPIOX_1 1 -#define GPIOX_2 2 -#define GPIOX_3 3 -#define GPIOX_4 4 -#define GPIOX_5 5 -#define GPIOX_6 6 -#define GPIOX_7 7 -#define GPIOX_8 8 -#define GPIOX_9 9 -#define GPIOX_10 10 -#define GPIOX_11 11 -#define GPIOX_16 12 -#define GPIOX_17 13 -#define GPIOX_18 14 -#define GPIOX_19 15 -#define GPIOX_20 16 -#define GPIOX_21 17 +#include -#define GPIOY_0 18 -#define GPIOY_1 19 -#define GPIOY_3 20 -#define GPIOY_6 21 -#define GPIOY_7 22 -#define GPIOY_8 23 -#define GPIOY_9 24 -#define GPIOY_10 25 -#define GPIOY_11 26 -#define GPIOY_12 27 -#define GPIOY_13 28 -#define GPIOY_14 29 - -#define GPIODV_9 30 -#define GPIODV_24 31 -#define GPIODV_25 32 -#define GPIODV_26 33 -#define GPIODV_27 34 -#define GPIODV_28 35 -#define GPIODV_29 36 - -#define GPIOH_0 37 -#define GPIOH_1 38 -#define GPIOH_2 39 -#define GPIOH_3 40 -#define GPIOH_4 41 -#define GPIOH_5 42 -#define GPIOH_6 43 -#define GPIOH_7 44 -#define GPIOH_8 45 -#define GPIOH_9 46 - -#define CARD_0 47 -#define CARD_1 48 -#define CARD_2 49 -#define CARD_3 50 -#define CARD_4 51 -#define CARD_5 52 -#define CARD_6 53 - -#define BOOT_0 54 -#define BOOT_1 55 -#define BOOT_2 56 -#define BOOT_3 57 -#define BOOT_4 58 -#define BOOT_5 59 -#define BOOT_6 60 -#define BOOT_7 61 -#define BOOT_8 62 -#define BOOT_9 63 -#define BOOT_10 64 -#define BOOT_11 65 -#define BOOT_12 66 -#define BOOT_13 67 -#define BOOT_14 68 -#define BOOT_15 69 -#define BOOT_16 70 -#define BOOT_17 71 -#define BOOT_18 72 - -#define DIF_0_P 73 -#define DIF_0_N 74 -#define DIF_1_P 75 -#define DIF_1_N 76 -#define DIF_2_P 77 -#define DIF_2_N 78 -#define DIF_3_P 79 -#define DIF_3_N 80 -#define DIF_4_P 81 -#define DIF_4_N 82 - -/* AO GPIO chip */ -#define GPIOAO_0 0 -#define GPIOAO_1 1 -#define GPIOAO_2 2 -#define GPIOAO_3 3 -#define GPIOAO_4 4 -#define GPIOAO_5 5 -#define GPIOAO_6 6 -#define GPIOAO_7 7 -#define GPIOAO_8 8 -#define GPIOAO_9 9 -#define GPIOAO_10 10 -#define GPIOAO_11 11 -#define GPIOAO_12 12 -#define GPIOAO_13 13 -#define GPIO_BSD_EN 14 -#define GPIO_TEST_N 15 +/* GPIO Bank DIF */ +#define DIF_0_P 120 +#define DIF_0_N 121 +#define DIF_1_P 122 +#define DIF_1_N 123 +#define DIF_2_P 124 +#define DIF_2_N 125 +#define DIF_3_P 126 +#define DIF_3_N 127 +#define DIF_4_P 128 +#define DIF_4_N 129 #endif /* _DT_BINDINGS_MESON8B_GPIO_H */ diff --git a/include/dt-bindings/gpio/tegra-gpio.h b/include/dt-bindings/gpio/tegra-gpio.h index 7625dbc577..a1c09e88e8 100644 --- a/include/dt-bindings/gpio/tegra-gpio.h +++ b/include/dt-bindings/gpio/tegra-gpio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for binding nvidia,tegra*-gpio. * diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h index af0d9583be..38001c7023 100644 --- a/include/dt-bindings/gpio/tegra186-gpio.h +++ b/include/dt-bindings/gpio/tegra186-gpio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for binding nvidia,tegra186-gpio*. * @@ -8,50 +7,50 @@ * The second cell contains standard flag values specified in gpio.h. */ -#ifndef _DT_BINDINGS_GPIO_TEGRA186_GPIO_H -#define _DT_BINDINGS_GPIO_TEGRA186_GPIO_H +#ifndef _DT_BINDINGS_GPIO_TEGRA_GPIO_H +#define _DT_BINDINGS_GPIO_TEGRA_GPIO_H #include /* GPIOs implemented by main GPIO controller */ -#define TEGRA186_MAIN_GPIO_PORT_A 0 -#define TEGRA186_MAIN_GPIO_PORT_B 1 -#define TEGRA186_MAIN_GPIO_PORT_C 2 -#define TEGRA186_MAIN_GPIO_PORT_D 3 -#define TEGRA186_MAIN_GPIO_PORT_E 4 -#define TEGRA186_MAIN_GPIO_PORT_F 5 -#define TEGRA186_MAIN_GPIO_PORT_G 6 -#define TEGRA186_MAIN_GPIO_PORT_H 7 -#define TEGRA186_MAIN_GPIO_PORT_I 8 -#define TEGRA186_MAIN_GPIO_PORT_J 9 -#define TEGRA186_MAIN_GPIO_PORT_K 10 -#define TEGRA186_MAIN_GPIO_PORT_L 11 -#define TEGRA186_MAIN_GPIO_PORT_M 12 -#define TEGRA186_MAIN_GPIO_PORT_N 13 -#define TEGRA186_MAIN_GPIO_PORT_O 14 -#define TEGRA186_MAIN_GPIO_PORT_P 15 -#define TEGRA186_MAIN_GPIO_PORT_Q 16 -#define TEGRA186_MAIN_GPIO_PORT_R 17 -#define TEGRA186_MAIN_GPIO_PORT_T 18 -#define TEGRA186_MAIN_GPIO_PORT_X 19 -#define TEGRA186_MAIN_GPIO_PORT_Y 20 -#define TEGRA186_MAIN_GPIO_PORT_BB 21 -#define TEGRA186_MAIN_GPIO_PORT_CC 22 +#define TEGRA_MAIN_GPIO_PORT_A 0 +#define TEGRA_MAIN_GPIO_PORT_B 1 +#define TEGRA_MAIN_GPIO_PORT_C 2 +#define TEGRA_MAIN_GPIO_PORT_D 3 +#define TEGRA_MAIN_GPIO_PORT_E 4 +#define TEGRA_MAIN_GPIO_PORT_F 5 +#define TEGRA_MAIN_GPIO_PORT_G 6 +#define TEGRA_MAIN_GPIO_PORT_H 7 +#define TEGRA_MAIN_GPIO_PORT_I 8 +#define TEGRA_MAIN_GPIO_PORT_J 9 +#define TEGRA_MAIN_GPIO_PORT_K 10 +#define TEGRA_MAIN_GPIO_PORT_L 11 +#define TEGRA_MAIN_GPIO_PORT_M 12 +#define TEGRA_MAIN_GPIO_PORT_N 13 +#define TEGRA_MAIN_GPIO_PORT_O 14 +#define TEGRA_MAIN_GPIO_PORT_P 15 +#define TEGRA_MAIN_GPIO_PORT_Q 16 +#define TEGRA_MAIN_GPIO_PORT_R 17 +#define TEGRA_MAIN_GPIO_PORT_T 18 +#define TEGRA_MAIN_GPIO_PORT_X 19 +#define TEGRA_MAIN_GPIO_PORT_Y 20 +#define TEGRA_MAIN_GPIO_PORT_BB 21 +#define TEGRA_MAIN_GPIO_PORT_CC 22 -#define TEGRA186_MAIN_GPIO(port, offset) \ - ((TEGRA186_MAIN_GPIO_PORT_##port * 8) + offset) +#define TEGRA_MAIN_GPIO(port, offset) \ + ((TEGRA_MAIN_GPIO_PORT_##port * 8) + offset) /* GPIOs implemented by AON GPIO controller */ -#define TEGRA186_AON_GPIO_PORT_S 0 -#define TEGRA186_AON_GPIO_PORT_U 1 -#define TEGRA186_AON_GPIO_PORT_V 2 -#define TEGRA186_AON_GPIO_PORT_W 3 -#define TEGRA186_AON_GPIO_PORT_Z 4 -#define TEGRA186_AON_GPIO_PORT_AA 5 -#define TEGRA186_AON_GPIO_PORT_EE 6 -#define TEGRA186_AON_GPIO_PORT_FF 7 +#define TEGRA_AON_GPIO_PORT_S 0 +#define TEGRA_AON_GPIO_PORT_U 1 +#define TEGRA_AON_GPIO_PORT_V 2 +#define TEGRA_AON_GPIO_PORT_W 3 +#define TEGRA_AON_GPIO_PORT_Z 4 +#define TEGRA_AON_GPIO_PORT_AA 5 +#define TEGRA_AON_GPIO_PORT_EE 6 +#define TEGRA_AON_GPIO_PORT_FF 7 -#define TEGRA186_AON_GPIO(port, offset) \ - ((TEGRA186_AON_GPIO_PORT_##port * 8) + offset) +#define TEGRA_AON_GPIO(port, offset) \ + ((TEGRA_AON_GPIO_PORT_##port * 8) + offset) #endif diff --git a/include/dt-bindings/i2c/i2c.h b/include/dt-bindings/i2c/i2c.h index 0c12c38dfa..1d5da81d90 100644 --- a/include/dt-bindings/i2c/i2c.h +++ b/include/dt-bindings/i2c/i2c.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * This header provides constants for I2C bindings * @@ -6,6 +5,8 @@ * Copyright (C) 2015 by Renesas Electronics Corporation * * Wolfram Sang + * + * GPLv2 only */ #ifndef _DT_BINDINGS_I2C_I2C_H diff --git a/include/dt-bindings/iio/adc/fsl-imx25-gcq.h b/include/dt-bindings/iio/adc/fsl-imx25-gcq.h index 08ef4d298b..87abdd4a76 100644 --- a/include/dt-bindings/iio/adc/fsl-imx25-gcq.h +++ b/include/dt-bindings/iio/adc/fsl-imx25-gcq.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for configuring the I.MX25 ADC */ diff --git a/include/dt-bindings/iio/adi,ad5592r.h b/include/dt-bindings/iio/adi,ad5592r.h index 9f8c7b808c..c48aca1dca 100644 --- a/include/dt-bindings/iio/adi,ad5592r.h +++ b/include/dt-bindings/iio/adi,ad5592r.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DT_BINDINGS_ADI_AD5592R_H #define _DT_BINDINGS_ADI_AD5592R_H diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h index 08adfe2596..42121fa238 100644 --- a/include/dt-bindings/iio/qcom,spmi-vadc.h +++ b/include/dt-bindings/iio/qcom,spmi-vadc.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2012-2014,2018,2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_QCOM_SPMI_VADC_H @@ -108,193 +116,4 @@ #define VADC_LR_MUX10_PU1_PU2_AMUX_USB_ID 0xf9 #define VADC_LR_MUX3_BUF_PU1_PU2_XO_THERM 0xfc -/* ADC channels for SPMI PMIC5 */ - -#define ADC5_REF_GND 0x00 -#define ADC5_1P25VREF 0x01 -#define ADC5_VREF_VADC 0x02 -#define ADC5_VREF_VADC5_DIV_3 0x82 -#define ADC5_VPH_PWR 0x83 -#define ADC5_VBAT_SNS 0x84 -#define ADC5_VCOIN 0x85 -#define ADC5_DIE_TEMP 0x06 -#define ADC5_USB_IN_I 0x07 -#define ADC5_USB_IN_V_16 0x08 -#define ADC5_CHG_TEMP 0x09 -#define ADC5_BAT_THERM 0x0a -#define ADC5_BAT_ID 0x0b -#define ADC5_XO_THERM 0x0c -#define ADC5_AMUX_THM1 0x0d -#define ADC5_AMUX_THM2 0x0e -#define ADC5_AMUX_THM3 0x0f -#define ADC5_AMUX_THM4 0x10 -#define ADC5_AMUX_THM5 0x11 -#define ADC5_GPIO1 0x12 -#define ADC5_GPIO2 0x13 -#define ADC5_GPIO3 0x14 -#define ADC5_GPIO4 0x15 -#define ADC5_GPIO5 0x16 -#define ADC5_GPIO6 0x17 -#define ADC5_GPIO7 0x18 -#define ADC5_SBUx 0x99 -#define ADC5_MID_CHG_DIV6 0x1e -#define ADC5_OFF 0xff - -/* 30k pull-up1 */ -#define ADC5_BAT_THERM_30K_PU 0x2a -#define ADC5_BAT_ID_30K_PU 0x2b -#define ADC5_XO_THERM_30K_PU 0x2c -#define ADC5_AMUX_THM1_30K_PU 0x2d -#define ADC5_AMUX_THM2_30K_PU 0x2e -#define ADC5_AMUX_THM3_30K_PU 0x2f -#define ADC5_AMUX_THM4_30K_PU 0x30 -#define ADC5_AMUX_THM5_30K_PU 0x31 -#define ADC5_GPIO1_30K_PU 0x32 -#define ADC5_GPIO2_30K_PU 0x33 -#define ADC5_GPIO3_30K_PU 0x34 -#define ADC5_GPIO4_30K_PU 0x35 -#define ADC5_GPIO5_30K_PU 0x36 -#define ADC5_GPIO6_30K_PU 0x37 -#define ADC5_GPIO7_30K_PU 0x38 -#define ADC5_SBUx_30K_PU 0x39 - -/* 100k pull-up2 */ -#define ADC5_BAT_THERM_100K_PU 0x4a -#define ADC5_BAT_ID_100K_PU 0x4b -#define ADC5_XO_THERM_100K_PU 0x4c -#define ADC5_AMUX_THM1_100K_PU 0x4d -#define ADC5_AMUX_THM2_100K_PU 0x4e -#define ADC5_AMUX_THM3_100K_PU 0x4f -#define ADC5_AMUX_THM4_100K_PU 0x50 -#define ADC5_AMUX_THM5_100K_PU 0x51 -#define ADC5_GPIO1_100K_PU 0x52 -#define ADC5_GPIO2_100K_PU 0x53 -#define ADC5_GPIO3_100K_PU 0x54 -#define ADC5_GPIO4_100K_PU 0x55 -#define ADC5_GPIO5_100K_PU 0x56 -#define ADC5_GPIO6_100K_PU 0x57 -#define ADC5_GPIO7_100K_PU 0x58 -#define ADC5_SBUx_100K_PU 0x59 - -/* 400k pull-up3 */ -#define ADC5_BAT_THERM_400K_PU 0x6a -#define ADC5_BAT_ID_400K_PU 0x6b -#define ADC5_XO_THERM_400K_PU 0x6c -#define ADC5_AMUX_THM1_400K_PU 0x6d -#define ADC5_AMUX_THM2_400K_PU 0x6e -#define ADC5_AMUX_THM3_400K_PU 0x6f -#define ADC5_AMUX_THM4_400K_PU 0x70 -#define ADC5_AMUX_THM5_400K_PU 0x71 -#define ADC5_GPIO1_400K_PU 0x72 -#define ADC5_GPIO2_400K_PU 0x73 -#define ADC5_GPIO3_400K_PU 0x74 -#define ADC5_GPIO4_400K_PU 0x75 -#define ADC5_GPIO5_400K_PU 0x76 -#define ADC5_GPIO6_400K_PU 0x77 -#define ADC5_GPIO7_400K_PU 0x78 -#define ADC5_SBUx_400K_PU 0x79 - -/* 1/3 Divider */ -#define ADC5_GPIO1_DIV3 0x92 -#define ADC5_GPIO2_DIV3 0x93 -#define ADC5_GPIO3_DIV3 0x94 -#define ADC5_GPIO4_DIV3 0x95 -#define ADC5_GPIO5_DIV3 0x96 -#define ADC5_GPIO6_DIV3 0x97 -#define ADC5_GPIO7_DIV3 0x98 -#define ADC5_SBUx_DIV3 0x99 - -/* Current and combined current/voltage channels */ -#define ADC5_INT_EXT_ISENSE 0xa1 -#define ADC5_PARALLEL_ISENSE 0xa5 -#define ADC5_CUR_REPLICA_VDS 0xa7 -#define ADC5_CUR_SENS_BATFET_VDS_OFFSET 0xa9 -#define ADC5_CUR_SENS_REPLICA_VDS_OFFSET 0xab -#define ADC5_EXT_SENS_OFFSET 0xad - -#define ADC5_INT_EXT_ISENSE_VBAT_VDATA 0xb0 -#define ADC5_INT_EXT_ISENSE_VBAT_IDATA 0xb1 -#define ADC5_EXT_ISENSE_VBAT_VDATA 0xb2 -#define ADC5_EXT_ISENSE_VBAT_IDATA 0xb3 -#define ADC5_PARALLEL_ISENSE_VBAT_VDATA 0xb4 -#define ADC5_PARALLEL_ISENSE_VBAT_IDATA 0xb5 - -#define ADC5_MAX_CHANNEL 0xc0 - -/* ADC channels for ADC for PMIC7 */ - -#define ADC7_REF_GND 0x00 -#define ADC7_1P25VREF 0x01 -#define ADC7_VREF_VADC 0x02 -#define ADC7_DIE_TEMP 0x03 - -#define ADC7_AMUX_THM1 0x04 -#define ADC7_AMUX_THM2 0x05 -#define ADC7_AMUX_THM3 0x06 -#define ADC7_AMUX_THM4 0x07 -#define ADC7_AMUX_THM5 0x08 -#define ADC7_AMUX_THM6 0x09 -#define ADC7_GPIO1 0x0a -#define ADC7_GPIO2 0x0b -#define ADC7_GPIO3 0x0c -#define ADC7_GPIO4 0x0d - -#define ADC7_CHG_TEMP 0x10 -#define ADC7_USB_IN_V_16 0x11 -#define ADC7_VDC_16 0x12 -#define ADC7_CC1_ID 0x13 -#define ADC7_VREF_BAT_THERM 0x15 -#define ADC7_IIN_FB 0x17 - -/* 30k pull-up1 */ -#define ADC7_AMUX_THM1_30K_PU 0x24 -#define ADC7_AMUX_THM2_30K_PU 0x25 -#define ADC7_AMUX_THM3_30K_PU 0x26 -#define ADC7_AMUX_THM4_30K_PU 0x27 -#define ADC7_AMUX_THM5_30K_PU 0x28 -#define ADC7_AMUX_THM6_30K_PU 0x29 -#define ADC7_GPIO1_30K_PU 0x2a -#define ADC7_GPIO2_30K_PU 0x2b -#define ADC7_GPIO3_30K_PU 0x2c -#define ADC7_GPIO4_30K_PU 0x2d -#define ADC7_CC1_ID_30K_PU 0x33 - -/* 100k pull-up2 */ -#define ADC7_AMUX_THM1_100K_PU 0x44 -#define ADC7_AMUX_THM2_100K_PU 0x45 -#define ADC7_AMUX_THM3_100K_PU 0x46 -#define ADC7_AMUX_THM4_100K_PU 0x47 -#define ADC7_AMUX_THM5_100K_PU 0x48 -#define ADC7_AMUX_THM6_100K_PU 0x49 -#define ADC7_GPIO1_100K_PU 0x4a -#define ADC7_GPIO2_100K_PU 0x4b -#define ADC7_GPIO3_100K_PU 0x4c -#define ADC7_GPIO4_100K_PU 0x4d -#define ADC7_CC1_ID_100K_PU 0x53 - -/* 400k pull-up3 */ -#define ADC7_AMUX_THM1_400K_PU 0x64 -#define ADC7_AMUX_THM2_400K_PU 0x65 -#define ADC7_AMUX_THM3_400K_PU 0x66 -#define ADC7_AMUX_THM4_400K_PU 0x67 -#define ADC7_AMUX_THM5_400K_PU 0x68 -#define ADC7_AMUX_THM6_400K_PU 0x69 -#define ADC7_GPIO1_400K_PU 0x6a -#define ADC7_GPIO2_400K_PU 0x6b -#define ADC7_GPIO3_400K_PU 0x6c -#define ADC7_GPIO4_400K_PU 0x6d -#define ADC7_CC1_ID_400K_PU 0x73 - -/* 1/3 Divider */ -#define ADC7_GPIO1_DIV3 0x8a -#define ADC7_GPIO2_DIV3 0x8b -#define ADC7_GPIO3_DIV3 0x8c -#define ADC7_GPIO4_DIV3 0x8d - -#define ADC7_VPH_PWR 0x8e -#define ADC7_VBAT_SNS 0x8f - -#define ADC7_SBUx 0x94 -#define ADC7_VBAT_2S_MID 0x96 - #endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */ diff --git a/include/dt-bindings/input/input.h b/include/dt-bindings/input/input.h index bcf0ae100f..a21413324a 100644 --- a/include/dt-bindings/input/input.h +++ b/include/dt-bindings/input/input.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for most input bindings. * diff --git a/include/dt-bindings/input/linux-event-codes.h b/include/dt-bindings/input/linux-event-codes.h index 225ec87d4f..693bbcd267 100644 --- a/include/dt-bindings/input/linux-event-codes.h +++ b/include/dt-bindings/input/linux-event-codes.h @@ -1,950 +1 @@ -/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ -/* - * Input event codes - * - * *** IMPORTANT *** - * This file is not only included from C-code but also from devicetree source - * files. As such this file MUST only contain comments and defines. - * - * Copyright (c) 1999-2002 Vojtech Pavlik - * Copyright (c) 2015 Hans de Goede - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - */ -#ifndef _UAPI_INPUT_EVENT_CODES_H -#define _UAPI_INPUT_EVENT_CODES_H - -/* - * Device properties and quirks - */ - -#define INPUT_PROP_POINTER 0x00 /* needs a pointer */ -#define INPUT_PROP_DIRECT 0x01 /* direct input devices */ -#define INPUT_PROP_BUTTONPAD 0x02 /* has button(s) under pad */ -#define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */ -#define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */ -#define INPUT_PROP_POINTING_STICK 0x05 /* is a pointing stick */ -#define INPUT_PROP_ACCELEROMETER 0x06 /* has accelerometer */ - -#define INPUT_PROP_MAX 0x1f -#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1) - -/* - * Event types - */ - -#define EV_SYN 0x00 -#define EV_KEY 0x01 -#define EV_REL 0x02 -#define EV_ABS 0x03 -#define EV_MSC 0x04 -#define EV_SW 0x05 -#define EV_LED 0x11 -#define EV_SND 0x12 -#define EV_REP 0x14 -#define EV_FF 0x15 -#define EV_PWR 0x16 -#define EV_FF_STATUS 0x17 -#define EV_MAX 0x1f -#define EV_CNT (EV_MAX+1) - -/* - * Synchronization events. - */ - -#define SYN_REPORT 0 -#define SYN_CONFIG 1 -#define SYN_MT_REPORT 2 -#define SYN_DROPPED 3 -#define SYN_MAX 0xf -#define SYN_CNT (SYN_MAX+1) - -/* - * Keys and buttons - * - * Most of the keys/buttons are modeled after USB HUT 1.12 - * (see http://www.usb.org/developers/hidpage). - * Abbreviations in the comments: - * AC - Application Control - * AL - Application Launch Button - * SC - System Control - */ - -#define KEY_RESERVED 0 -#define KEY_ESC 1 -#define KEY_1 2 -#define KEY_2 3 -#define KEY_3 4 -#define KEY_4 5 -#define KEY_5 6 -#define KEY_6 7 -#define KEY_7 8 -#define KEY_8 9 -#define KEY_9 10 -#define KEY_0 11 -#define KEY_MINUS 12 -#define KEY_EQUAL 13 -#define KEY_BACKSPACE 14 -#define KEY_TAB 15 -#define KEY_Q 16 -#define KEY_W 17 -#define KEY_E 18 -#define KEY_R 19 -#define KEY_T 20 -#define KEY_Y 21 -#define KEY_U 22 -#define KEY_I 23 -#define KEY_O 24 -#define KEY_P 25 -#define KEY_LEFTBRACE 26 -#define KEY_RIGHTBRACE 27 -#define KEY_ENTER 28 -#define KEY_LEFTCTRL 29 -#define KEY_A 30 -#define KEY_S 31 -#define KEY_D 32 -#define KEY_F 33 -#define KEY_G 34 -#define KEY_H 35 -#define KEY_J 36 -#define KEY_K 37 -#define KEY_L 38 -#define KEY_SEMICOLON 39 -#define KEY_APOSTROPHE 40 -#define KEY_GRAVE 41 -#define KEY_LEFTSHIFT 42 -#define KEY_BACKSLASH 43 -#define KEY_Z 44 -#define KEY_X 45 -#define KEY_C 46 -#define KEY_V 47 -#define KEY_B 48 -#define KEY_N 49 -#define KEY_M 50 -#define KEY_COMMA 51 -#define KEY_DOT 52 -#define KEY_SLASH 53 -#define KEY_RIGHTSHIFT 54 -#define KEY_KPASTERISK 55 -#define KEY_LEFTALT 56 -#define KEY_SPACE 57 -#define KEY_CAPSLOCK 58 -#define KEY_F1 59 -#define KEY_F2 60 -#define KEY_F3 61 -#define KEY_F4 62 -#define KEY_F5 63 -#define KEY_F6 64 -#define KEY_F7 65 -#define KEY_F8 66 -#define KEY_F9 67 -#define KEY_F10 68 -#define KEY_NUMLOCK 69 -#define KEY_SCROLLLOCK 70 -#define KEY_KP7 71 -#define KEY_KP8 72 -#define KEY_KP9 73 -#define KEY_KPMINUS 74 -#define KEY_KP4 75 -#define KEY_KP5 76 -#define KEY_KP6 77 -#define KEY_KPPLUS 78 -#define KEY_KP1 79 -#define KEY_KP2 80 -#define KEY_KP3 81 -#define KEY_KP0 82 -#define KEY_KPDOT 83 - -#define KEY_ZENKAKUHANKAKU 85 -#define KEY_102ND 86 -#define KEY_F11 87 -#define KEY_F12 88 -#define KEY_RO 89 -#define KEY_KATAKANA 90 -#define KEY_HIRAGANA 91 -#define KEY_HENKAN 92 -#define KEY_KATAKANAHIRAGANA 93 -#define KEY_MUHENKAN 94 -#define KEY_KPJPCOMMA 95 -#define KEY_KPENTER 96 -#define KEY_RIGHTCTRL 97 -#define KEY_KPSLASH 98 -#define KEY_SYSRQ 99 -#define KEY_RIGHTALT 100 -#define KEY_LINEFEED 101 -#define KEY_HOME 102 -#define KEY_UP 103 -#define KEY_PAGEUP 104 -#define KEY_LEFT 105 -#define KEY_RIGHT 106 -#define KEY_END 107 -#define KEY_DOWN 108 -#define KEY_PAGEDOWN 109 -#define KEY_INSERT 110 -#define KEY_DELETE 111 -#define KEY_MACRO 112 -#define KEY_MUTE 113 -#define KEY_VOLUMEDOWN 114 -#define KEY_VOLUMEUP 115 -#define KEY_POWER 116 /* SC System Power Down */ -#define KEY_KPEQUAL 117 -#define KEY_KPPLUSMINUS 118 -#define KEY_PAUSE 119 -#define KEY_SCALE 120 /* AL Compiz Scale (Expose) */ - -#define KEY_KPCOMMA 121 -#define KEY_HANGEUL 122 -#define KEY_HANGUEL KEY_HANGEUL -#define KEY_HANJA 123 -#define KEY_YEN 124 -#define KEY_LEFTMETA 125 -#define KEY_RIGHTMETA 126 -#define KEY_COMPOSE 127 - -#define KEY_STOP 128 /* AC Stop */ -#define KEY_AGAIN 129 -#define KEY_PROPS 130 /* AC Properties */ -#define KEY_UNDO 131 /* AC Undo */ -#define KEY_FRONT 132 -#define KEY_COPY 133 /* AC Copy */ -#define KEY_OPEN 134 /* AC Open */ -#define KEY_PASTE 135 /* AC Paste */ -#define KEY_FIND 136 /* AC Search */ -#define KEY_CUT 137 /* AC Cut */ -#define KEY_HELP 138 /* AL Integrated Help Center */ -#define KEY_MENU 139 /* Menu (show menu) */ -#define KEY_CALC 140 /* AL Calculator */ -#define KEY_SETUP 141 -#define KEY_SLEEP 142 /* SC System Sleep */ -#define KEY_WAKEUP 143 /* System Wake Up */ -#define KEY_FILE 144 /* AL Local Machine Browser */ -#define KEY_SENDFILE 145 -#define KEY_DELETEFILE 146 -#define KEY_XFER 147 -#define KEY_PROG1 148 -#define KEY_PROG2 149 -#define KEY_WWW 150 /* AL Internet Browser */ -#define KEY_MSDOS 151 -#define KEY_COFFEE 152 /* AL Terminal Lock/Screensaver */ -#define KEY_SCREENLOCK KEY_COFFEE -#define KEY_ROTATE_DISPLAY 153 /* Display orientation for e.g. tablets */ -#define KEY_DIRECTION KEY_ROTATE_DISPLAY -#define KEY_CYCLEWINDOWS 154 -#define KEY_MAIL 155 -#define KEY_BOOKMARKS 156 /* AC Bookmarks */ -#define KEY_COMPUTER 157 -#define KEY_BACK 158 /* AC Back */ -#define KEY_FORWARD 159 /* AC Forward */ -#define KEY_CLOSECD 160 -#define KEY_EJECTCD 161 -#define KEY_EJECTCLOSECD 162 -#define KEY_NEXTSONG 163 -#define KEY_PLAYPAUSE 164 -#define KEY_PREVIOUSSONG 165 -#define KEY_STOPCD 166 -#define KEY_RECORD 167 -#define KEY_REWIND 168 -#define KEY_PHONE 169 /* Media Select Telephone */ -#define KEY_ISO 170 -#define KEY_CONFIG 171 /* AL Consumer Control Configuration */ -#define KEY_HOMEPAGE 172 /* AC Home */ -#define KEY_REFRESH 173 /* AC Refresh */ -#define KEY_EXIT 174 /* AC Exit */ -#define KEY_MOVE 175 -#define KEY_EDIT 176 -#define KEY_SCROLLUP 177 -#define KEY_SCROLLDOWN 178 -#define KEY_KPLEFTPAREN 179 -#define KEY_KPRIGHTPAREN 180 -#define KEY_NEW 181 /* AC New */ -#define KEY_REDO 182 /* AC Redo/Repeat */ - -#define KEY_F13 183 -#define KEY_F14 184 -#define KEY_F15 185 -#define KEY_F16 186 -#define KEY_F17 187 -#define KEY_F18 188 -#define KEY_F19 189 -#define KEY_F20 190 -#define KEY_F21 191 -#define KEY_F22 192 -#define KEY_F23 193 -#define KEY_F24 194 - -#define KEY_PLAYCD 200 -#define KEY_PAUSECD 201 -#define KEY_PROG3 202 -#define KEY_PROG4 203 -#define KEY_DASHBOARD 204 /* AL Dashboard */ -#define KEY_SUSPEND 205 -#define KEY_CLOSE 206 /* AC Close */ -#define KEY_PLAY 207 -#define KEY_FASTFORWARD 208 -#define KEY_BASSBOOST 209 -#define KEY_PRINT 210 /* AC Print */ -#define KEY_HP 211 -#define KEY_CAMERA 212 -#define KEY_SOUND 213 -#define KEY_QUESTION 214 -#define KEY_EMAIL 215 -#define KEY_CHAT 216 -#define KEY_SEARCH 217 -#define KEY_CONNECT 218 -#define KEY_FINANCE 219 /* AL Checkbook/Finance */ -#define KEY_SPORT 220 -#define KEY_SHOP 221 -#define KEY_ALTERASE 222 -#define KEY_CANCEL 223 /* AC Cancel */ -#define KEY_BRIGHTNESSDOWN 224 -#define KEY_BRIGHTNESSUP 225 -#define KEY_MEDIA 226 - -#define KEY_SWITCHVIDEOMODE 227 /* Cycle between available video - outputs (Monitor/LCD/TV-out/etc) */ -#define KEY_KBDILLUMTOGGLE 228 -#define KEY_KBDILLUMDOWN 229 -#define KEY_KBDILLUMUP 230 - -#define KEY_SEND 231 /* AC Send */ -#define KEY_REPLY 232 /* AC Reply */ -#define KEY_FORWARDMAIL 233 /* AC Forward Msg */ -#define KEY_SAVE 234 /* AC Save */ -#define KEY_DOCUMENTS 235 - -#define KEY_BATTERY 236 - -#define KEY_BLUETOOTH 237 -#define KEY_WLAN 238 -#define KEY_UWB 239 - -#define KEY_UNKNOWN 240 - -#define KEY_VIDEO_NEXT 241 /* drive next video source */ -#define KEY_VIDEO_PREV 242 /* drive previous video source */ -#define KEY_BRIGHTNESS_CYCLE 243 /* brightness up, after max is min */ -#define KEY_BRIGHTNESS_AUTO 244 /* Set Auto Brightness: manual - brightness control is off, - rely on ambient */ -#define KEY_BRIGHTNESS_ZERO KEY_BRIGHTNESS_AUTO -#define KEY_DISPLAY_OFF 245 /* display device to off state */ - -#define KEY_WWAN 246 /* Wireless WAN (LTE, UMTS, GSM, etc.) */ -#define KEY_WIMAX KEY_WWAN -#define KEY_RFKILL 247 /* Key that controls all radios */ - -#define KEY_MICMUTE 248 /* Mute / unmute the microphone */ - -/* Code 255 is reserved for special needs of AT keyboard driver */ - -#define BTN_MISC 0x100 -#define BTN_0 0x100 -#define BTN_1 0x101 -#define BTN_2 0x102 -#define BTN_3 0x103 -#define BTN_4 0x104 -#define BTN_5 0x105 -#define BTN_6 0x106 -#define BTN_7 0x107 -#define BTN_8 0x108 -#define BTN_9 0x109 - -#define BTN_MOUSE 0x110 -#define BTN_LEFT 0x110 -#define BTN_RIGHT 0x111 -#define BTN_MIDDLE 0x112 -#define BTN_SIDE 0x113 -#define BTN_EXTRA 0x114 -#define BTN_FORWARD 0x115 -#define BTN_BACK 0x116 -#define BTN_TASK 0x117 - -#define BTN_JOYSTICK 0x120 -#define BTN_TRIGGER 0x120 -#define BTN_THUMB 0x121 -#define BTN_THUMB2 0x122 -#define BTN_TOP 0x123 -#define BTN_TOP2 0x124 -#define BTN_PINKIE 0x125 -#define BTN_BASE 0x126 -#define BTN_BASE2 0x127 -#define BTN_BASE3 0x128 -#define BTN_BASE4 0x129 -#define BTN_BASE5 0x12a -#define BTN_BASE6 0x12b -#define BTN_DEAD 0x12f - -#define BTN_GAMEPAD 0x130 -#define BTN_SOUTH 0x130 -#define BTN_A BTN_SOUTH -#define BTN_EAST 0x131 -#define BTN_B BTN_EAST -#define BTN_C 0x132 -#define BTN_NORTH 0x133 -#define BTN_X BTN_NORTH -#define BTN_WEST 0x134 -#define BTN_Y BTN_WEST -#define BTN_Z 0x135 -#define BTN_TL 0x136 -#define BTN_TR 0x137 -#define BTN_TL2 0x138 -#define BTN_TR2 0x139 -#define BTN_SELECT 0x13a -#define BTN_START 0x13b -#define BTN_MODE 0x13c -#define BTN_THUMBL 0x13d -#define BTN_THUMBR 0x13e - -#define BTN_DIGI 0x140 -#define BTN_TOOL_PEN 0x140 -#define BTN_TOOL_RUBBER 0x141 -#define BTN_TOOL_BRUSH 0x142 -#define BTN_TOOL_PENCIL 0x143 -#define BTN_TOOL_AIRBRUSH 0x144 -#define BTN_TOOL_FINGER 0x145 -#define BTN_TOOL_MOUSE 0x146 -#define BTN_TOOL_LENS 0x147 -#define BTN_TOOL_QUINTTAP 0x148 /* Five fingers on trackpad */ -#define BTN_STYLUS3 0x149 -#define BTN_TOUCH 0x14a -#define BTN_STYLUS 0x14b -#define BTN_STYLUS2 0x14c -#define BTN_TOOL_DOUBLETAP 0x14d -#define BTN_TOOL_TRIPLETAP 0x14e -#define BTN_TOOL_QUADTAP 0x14f /* Four fingers on trackpad */ - -#define BTN_WHEEL 0x150 -#define BTN_GEAR_DOWN 0x150 -#define BTN_GEAR_UP 0x151 - -#define KEY_OK 0x160 -#define KEY_SELECT 0x161 -#define KEY_GOTO 0x162 -#define KEY_CLEAR 0x163 -#define KEY_POWER2 0x164 -#define KEY_OPTION 0x165 -#define KEY_INFO 0x166 /* AL OEM Features/Tips/Tutorial */ -#define KEY_TIME 0x167 -#define KEY_VENDOR 0x168 -#define KEY_ARCHIVE 0x169 -#define KEY_PROGRAM 0x16a /* Media Select Program Guide */ -#define KEY_CHANNEL 0x16b -#define KEY_FAVORITES 0x16c -#define KEY_EPG 0x16d -#define KEY_PVR 0x16e /* Media Select Home */ -#define KEY_MHP 0x16f -#define KEY_LANGUAGE 0x170 -#define KEY_TITLE 0x171 -#define KEY_SUBTITLE 0x172 -#define KEY_ANGLE 0x173 -#define KEY_FULL_SCREEN 0x174 /* AC View Toggle */ -#define KEY_ZOOM KEY_FULL_SCREEN -#define KEY_MODE 0x175 -#define KEY_KEYBOARD 0x176 -#define KEY_ASPECT_RATIO 0x177 /* HUTRR37: Aspect */ -#define KEY_SCREEN KEY_ASPECT_RATIO -#define KEY_PC 0x178 /* Media Select Computer */ -#define KEY_TV 0x179 /* Media Select TV */ -#define KEY_TV2 0x17a /* Media Select Cable */ -#define KEY_VCR 0x17b /* Media Select VCR */ -#define KEY_VCR2 0x17c /* VCR Plus */ -#define KEY_SAT 0x17d /* Media Select Satellite */ -#define KEY_SAT2 0x17e -#define KEY_CD 0x17f /* Media Select CD */ -#define KEY_TAPE 0x180 /* Media Select Tape */ -#define KEY_RADIO 0x181 -#define KEY_TUNER 0x182 /* Media Select Tuner */ -#define KEY_PLAYER 0x183 -#define KEY_TEXT 0x184 -#define KEY_DVD 0x185 /* Media Select DVD */ -#define KEY_AUX 0x186 -#define KEY_MP3 0x187 -#define KEY_AUDIO 0x188 /* AL Audio Browser */ -#define KEY_VIDEO 0x189 /* AL Movie Browser */ -#define KEY_DIRECTORY 0x18a -#define KEY_LIST 0x18b -#define KEY_MEMO 0x18c /* Media Select Messages */ -#define KEY_CALENDAR 0x18d -#define KEY_RED 0x18e -#define KEY_GREEN 0x18f -#define KEY_YELLOW 0x190 -#define KEY_BLUE 0x191 -#define KEY_CHANNELUP 0x192 /* Channel Increment */ -#define KEY_CHANNELDOWN 0x193 /* Channel Decrement */ -#define KEY_FIRST 0x194 -#define KEY_LAST 0x195 /* Recall Last */ -#define KEY_AB 0x196 -#define KEY_NEXT 0x197 -#define KEY_RESTART 0x198 -#define KEY_SLOW 0x199 -#define KEY_SHUFFLE 0x19a -#define KEY_BREAK 0x19b -#define KEY_PREVIOUS 0x19c -#define KEY_DIGITS 0x19d -#define KEY_TEEN 0x19e -#define KEY_TWEN 0x19f -#define KEY_VIDEOPHONE 0x1a0 /* Media Select Video Phone */ -#define KEY_GAMES 0x1a1 /* Media Select Games */ -#define KEY_ZOOMIN 0x1a2 /* AC Zoom In */ -#define KEY_ZOOMOUT 0x1a3 /* AC Zoom Out */ -#define KEY_ZOOMRESET 0x1a4 /* AC Zoom */ -#define KEY_WORDPROCESSOR 0x1a5 /* AL Word Processor */ -#define KEY_EDITOR 0x1a6 /* AL Text Editor */ -#define KEY_SPREADSHEET 0x1a7 /* AL Spreadsheet */ -#define KEY_GRAPHICSEDITOR 0x1a8 /* AL Graphics Editor */ -#define KEY_PRESENTATION 0x1a9 /* AL Presentation App */ -#define KEY_DATABASE 0x1aa /* AL Database App */ -#define KEY_NEWS 0x1ab /* AL Newsreader */ -#define KEY_VOICEMAIL 0x1ac /* AL Voicemail */ -#define KEY_ADDRESSBOOK 0x1ad /* AL Contacts/Address Book */ -#define KEY_MESSENGER 0x1ae /* AL Instant Messaging */ -#define KEY_DISPLAYTOGGLE 0x1af /* Turn display (LCD) on and off */ -#define KEY_BRIGHTNESS_TOGGLE KEY_DISPLAYTOGGLE -#define KEY_SPELLCHECK 0x1b0 /* AL Spell Check */ -#define KEY_LOGOFF 0x1b1 /* AL Logoff */ - -#define KEY_DOLLAR 0x1b2 -#define KEY_EURO 0x1b3 - -#define KEY_FRAMEBACK 0x1b4 /* Consumer - transport controls */ -#define KEY_FRAMEFORWARD 0x1b5 -#define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */ -#define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */ -#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */ -#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */ -#define KEY_IMAGES 0x1ba /* AL Image Browser */ -#define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */ -#define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */ -#define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */ - -#define KEY_DEL_EOL 0x1c0 -#define KEY_DEL_EOS 0x1c1 -#define KEY_INS_LINE 0x1c2 -#define KEY_DEL_LINE 0x1c3 - -#define KEY_FN 0x1d0 -#define KEY_FN_ESC 0x1d1 -#define KEY_FN_F1 0x1d2 -#define KEY_FN_F2 0x1d3 -#define KEY_FN_F3 0x1d4 -#define KEY_FN_F4 0x1d5 -#define KEY_FN_F5 0x1d6 -#define KEY_FN_F6 0x1d7 -#define KEY_FN_F7 0x1d8 -#define KEY_FN_F8 0x1d9 -#define KEY_FN_F9 0x1da -#define KEY_FN_F10 0x1db -#define KEY_FN_F11 0x1dc -#define KEY_FN_F12 0x1dd -#define KEY_FN_1 0x1de -#define KEY_FN_2 0x1df -#define KEY_FN_D 0x1e0 -#define KEY_FN_E 0x1e1 -#define KEY_FN_F 0x1e2 -#define KEY_FN_S 0x1e3 -#define KEY_FN_B 0x1e4 -#define KEY_FN_RIGHT_SHIFT 0x1e5 - -#define KEY_BRL_DOT1 0x1f1 -#define KEY_BRL_DOT2 0x1f2 -#define KEY_BRL_DOT3 0x1f3 -#define KEY_BRL_DOT4 0x1f4 -#define KEY_BRL_DOT5 0x1f5 -#define KEY_BRL_DOT6 0x1f6 -#define KEY_BRL_DOT7 0x1f7 -#define KEY_BRL_DOT8 0x1f8 -#define KEY_BRL_DOT9 0x1f9 -#define KEY_BRL_DOT10 0x1fa - -#define KEY_NUMERIC_0 0x200 /* used by phones, remote controls, */ -#define KEY_NUMERIC_1 0x201 /* and other keypads */ -#define KEY_NUMERIC_2 0x202 -#define KEY_NUMERIC_3 0x203 -#define KEY_NUMERIC_4 0x204 -#define KEY_NUMERIC_5 0x205 -#define KEY_NUMERIC_6 0x206 -#define KEY_NUMERIC_7 0x207 -#define KEY_NUMERIC_8 0x208 -#define KEY_NUMERIC_9 0x209 -#define KEY_NUMERIC_STAR 0x20a -#define KEY_NUMERIC_POUND 0x20b -#define KEY_NUMERIC_A 0x20c /* Phone key A - HUT Telephony 0xb9 */ -#define KEY_NUMERIC_B 0x20d -#define KEY_NUMERIC_C 0x20e -#define KEY_NUMERIC_D 0x20f - -#define KEY_CAMERA_FOCUS 0x210 -#define KEY_WPS_BUTTON 0x211 /* WiFi Protected Setup key */ - -#define KEY_TOUCHPAD_TOGGLE 0x212 /* Request switch touchpad on or off */ -#define KEY_TOUCHPAD_ON 0x213 -#define KEY_TOUCHPAD_OFF 0x214 - -#define KEY_CAMERA_ZOOMIN 0x215 -#define KEY_CAMERA_ZOOMOUT 0x216 -#define KEY_CAMERA_UP 0x217 -#define KEY_CAMERA_DOWN 0x218 -#define KEY_CAMERA_LEFT 0x219 -#define KEY_CAMERA_RIGHT 0x21a - -#define KEY_ATTENDANT_ON 0x21b -#define KEY_ATTENDANT_OFF 0x21c -#define KEY_ATTENDANT_TOGGLE 0x21d /* Attendant call on or off */ -#define KEY_LIGHTS_TOGGLE 0x21e /* Reading light on or off */ - -#define BTN_DPAD_UP 0x220 -#define BTN_DPAD_DOWN 0x221 -#define BTN_DPAD_LEFT 0x222 -#define BTN_DPAD_RIGHT 0x223 - -#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */ -#define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */ - -#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */ -#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */ -#define KEY_JOURNAL 0x242 /* AL Log/Journal/Timecard */ -#define KEY_CONTROLPANEL 0x243 /* AL Control Panel */ -#define KEY_APPSELECT 0x244 /* AL Select Task/Application */ -#define KEY_SCREENSAVER 0x245 /* AL Screen Saver */ -#define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */ -#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */ -#define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */ -#define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */ - -#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ -#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ - -#define KEY_KBDINPUTASSIST_PREV 0x260 -#define KEY_KBDINPUTASSIST_NEXT 0x261 -#define KEY_KBDINPUTASSIST_PREVGROUP 0x262 -#define KEY_KBDINPUTASSIST_NEXTGROUP 0x263 -#define KEY_KBDINPUTASSIST_ACCEPT 0x264 -#define KEY_KBDINPUTASSIST_CANCEL 0x265 - -/* Diagonal movement keys */ -#define KEY_RIGHT_UP 0x266 -#define KEY_RIGHT_DOWN 0x267 -#define KEY_LEFT_UP 0x268 -#define KEY_LEFT_DOWN 0x269 - -#define KEY_ROOT_MENU 0x26a /* Show Device's Root Menu */ -/* Show Top Menu of the Media (e.g. DVD) */ -#define KEY_MEDIA_TOP_MENU 0x26b -#define KEY_NUMERIC_11 0x26c -#define KEY_NUMERIC_12 0x26d -/* - * Toggle Audio Description: refers to an audio service that helps blind and - * visually impaired consumers understand the action in a program. Note: in - * some countries this is referred to as "Video Description". - */ -#define KEY_AUDIO_DESC 0x26e -#define KEY_3D_MODE 0x26f -#define KEY_NEXT_FAVORITE 0x270 -#define KEY_STOP_RECORD 0x271 -#define KEY_PAUSE_RECORD 0x272 -#define KEY_VOD 0x273 /* Video on Demand */ -#define KEY_UNMUTE 0x274 -#define KEY_FASTREVERSE 0x275 -#define KEY_SLOWREVERSE 0x276 -/* - * Control a data application associated with the currently viewed channel, - * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.) - */ -#define KEY_DATA 0x277 -#define KEY_ONSCREEN_KEYBOARD 0x278 -/* Electronic privacy screen control */ -#define KEY_PRIVACY_SCREEN_TOGGLE 0x279 - -/* Select an area of screen to be copied */ -#define KEY_SELECTIVE_SCREENSHOT 0x27a - -/* - * Some keyboards have keys which do not have a defined meaning, these keys - * are intended to be programmed / bound to macros by the user. For most - * keyboards with these macro-keys the key-sequence to inject, or action to - * take, is all handled by software on the host side. So from the kernel's - * point of view these are just normal keys. - * - * The KEY_MACRO# codes below are intended for such keys, which may be labeled - * e.g. G1-G18, or S1 - S30. The KEY_MACRO# codes MUST NOT be used for keys - * where the marking on the key does indicate a defined meaning / purpose. - * - * The KEY_MACRO# codes MUST also NOT be used as fallback for when no existing - * KEY_FOO define matches the marking / purpose. In this case a new KEY_FOO - * define MUST be added. - */ -#define KEY_MACRO1 0x290 -#define KEY_MACRO2 0x291 -#define KEY_MACRO3 0x292 -#define KEY_MACRO4 0x293 -#define KEY_MACRO5 0x294 -#define KEY_MACRO6 0x295 -#define KEY_MACRO7 0x296 -#define KEY_MACRO8 0x297 -#define KEY_MACRO9 0x298 -#define KEY_MACRO10 0x299 -#define KEY_MACRO11 0x29a -#define KEY_MACRO12 0x29b -#define KEY_MACRO13 0x29c -#define KEY_MACRO14 0x29d -#define KEY_MACRO15 0x29e -#define KEY_MACRO16 0x29f -#define KEY_MACRO17 0x2a0 -#define KEY_MACRO18 0x2a1 -#define KEY_MACRO19 0x2a2 -#define KEY_MACRO20 0x2a3 -#define KEY_MACRO21 0x2a4 -#define KEY_MACRO22 0x2a5 -#define KEY_MACRO23 0x2a6 -#define KEY_MACRO24 0x2a7 -#define KEY_MACRO25 0x2a8 -#define KEY_MACRO26 0x2a9 -#define KEY_MACRO27 0x2aa -#define KEY_MACRO28 0x2ab -#define KEY_MACRO29 0x2ac -#define KEY_MACRO30 0x2ad - -/* - * Some keyboards with the macro-keys described above have some extra keys - * for controlling the host-side software responsible for the macro handling: - * -A macro recording start/stop key. Note that not all keyboards which emit - * KEY_MACRO_RECORD_START will also emit KEY_MACRO_RECORD_STOP if - * KEY_MACRO_RECORD_STOP is not advertised, then KEY_MACRO_RECORD_START - * should be interpreted as a recording start/stop toggle; - * -Keys for switching between different macro (pre)sets, either a key for - * cycling through the configured presets or keys to directly select a preset. - */ -#define KEY_MACRO_RECORD_START 0x2b0 -#define KEY_MACRO_RECORD_STOP 0x2b1 -#define KEY_MACRO_PRESET_CYCLE 0x2b2 -#define KEY_MACRO_PRESET1 0x2b3 -#define KEY_MACRO_PRESET2 0x2b4 -#define KEY_MACRO_PRESET3 0x2b5 - -/* - * Some keyboards have a buildin LCD panel where the contents are controlled - * by the host. Often these have a number of keys directly below the LCD - * intended for controlling a menu shown on the LCD. These keys often don't - * have any labeling so we just name them KEY_KBD_LCD_MENU# - */ -#define KEY_KBD_LCD_MENU1 0x2b8 -#define KEY_KBD_LCD_MENU2 0x2b9 -#define KEY_KBD_LCD_MENU3 0x2ba -#define KEY_KBD_LCD_MENU4 0x2bb -#define KEY_KBD_LCD_MENU5 0x2bc - -#define BTN_TRIGGER_HAPPY 0x2c0 -#define BTN_TRIGGER_HAPPY1 0x2c0 -#define BTN_TRIGGER_HAPPY2 0x2c1 -#define BTN_TRIGGER_HAPPY3 0x2c2 -#define BTN_TRIGGER_HAPPY4 0x2c3 -#define BTN_TRIGGER_HAPPY5 0x2c4 -#define BTN_TRIGGER_HAPPY6 0x2c5 -#define BTN_TRIGGER_HAPPY7 0x2c6 -#define BTN_TRIGGER_HAPPY8 0x2c7 -#define BTN_TRIGGER_HAPPY9 0x2c8 -#define BTN_TRIGGER_HAPPY10 0x2c9 -#define BTN_TRIGGER_HAPPY11 0x2ca -#define BTN_TRIGGER_HAPPY12 0x2cb -#define BTN_TRIGGER_HAPPY13 0x2cc -#define BTN_TRIGGER_HAPPY14 0x2cd -#define BTN_TRIGGER_HAPPY15 0x2ce -#define BTN_TRIGGER_HAPPY16 0x2cf -#define BTN_TRIGGER_HAPPY17 0x2d0 -#define BTN_TRIGGER_HAPPY18 0x2d1 -#define BTN_TRIGGER_HAPPY19 0x2d2 -#define BTN_TRIGGER_HAPPY20 0x2d3 -#define BTN_TRIGGER_HAPPY21 0x2d4 -#define BTN_TRIGGER_HAPPY22 0x2d5 -#define BTN_TRIGGER_HAPPY23 0x2d6 -#define BTN_TRIGGER_HAPPY24 0x2d7 -#define BTN_TRIGGER_HAPPY25 0x2d8 -#define BTN_TRIGGER_HAPPY26 0x2d9 -#define BTN_TRIGGER_HAPPY27 0x2da -#define BTN_TRIGGER_HAPPY28 0x2db -#define BTN_TRIGGER_HAPPY29 0x2dc -#define BTN_TRIGGER_HAPPY30 0x2dd -#define BTN_TRIGGER_HAPPY31 0x2de -#define BTN_TRIGGER_HAPPY32 0x2df -#define BTN_TRIGGER_HAPPY33 0x2e0 -#define BTN_TRIGGER_HAPPY34 0x2e1 -#define BTN_TRIGGER_HAPPY35 0x2e2 -#define BTN_TRIGGER_HAPPY36 0x2e3 -#define BTN_TRIGGER_HAPPY37 0x2e4 -#define BTN_TRIGGER_HAPPY38 0x2e5 -#define BTN_TRIGGER_HAPPY39 0x2e6 -#define BTN_TRIGGER_HAPPY40 0x2e7 - -/* We avoid low common keys in module aliases so they don't get huge. */ -#define KEY_MIN_INTERESTING KEY_MUTE -#define KEY_MAX 0x2ff -#define KEY_CNT (KEY_MAX+1) - -/* - * Relative axes - */ - -#define REL_X 0x00 -#define REL_Y 0x01 -#define REL_Z 0x02 -#define REL_RX 0x03 -#define REL_RY 0x04 -#define REL_RZ 0x05 -#define REL_HWHEEL 0x06 -#define REL_DIAL 0x07 -#define REL_WHEEL 0x08 -#define REL_MISC 0x09 -/* - * 0x0a is reserved and should not be used in input drivers. - * It was used by HID as REL_MISC+1 and userspace needs to detect if - * the next REL_* event is correct or is just REL_MISC + n. - * We define here REL_RESERVED so userspace can rely on it and detect - * the situation described above. - */ -#define REL_RESERVED 0x0a -#define REL_WHEEL_HI_RES 0x0b -#define REL_HWHEEL_HI_RES 0x0c -#define REL_MAX 0x0f -#define REL_CNT (REL_MAX+1) - -/* - * Absolute axes - */ - -#define ABS_X 0x00 -#define ABS_Y 0x01 -#define ABS_Z 0x02 -#define ABS_RX 0x03 -#define ABS_RY 0x04 -#define ABS_RZ 0x05 -#define ABS_THROTTLE 0x06 -#define ABS_RUDDER 0x07 -#define ABS_WHEEL 0x08 -#define ABS_GAS 0x09 -#define ABS_BRAKE 0x0a -#define ABS_HAT0X 0x10 -#define ABS_HAT0Y 0x11 -#define ABS_HAT1X 0x12 -#define ABS_HAT1Y 0x13 -#define ABS_HAT2X 0x14 -#define ABS_HAT2Y 0x15 -#define ABS_HAT3X 0x16 -#define ABS_HAT3Y 0x17 -#define ABS_PRESSURE 0x18 -#define ABS_DISTANCE 0x19 -#define ABS_TILT_X 0x1a -#define ABS_TILT_Y 0x1b -#define ABS_TOOL_WIDTH 0x1c - -#define ABS_VOLUME 0x20 - -#define ABS_MISC 0x28 - -/* - * 0x2e is reserved and should not be used in input drivers. - * It was used by HID as ABS_MISC+6 and userspace needs to detect if - * the next ABS_* event is correct or is just ABS_MISC + n. - * We define here ABS_RESERVED so userspace can rely on it and detect - * the situation described above. - */ -#define ABS_RESERVED 0x2e - -#define ABS_MT_SLOT 0x2f /* MT slot being modified */ -#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ -#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ -#define ABS_MT_WIDTH_MAJOR 0x32 /* Major axis of approaching ellipse */ -#define ABS_MT_WIDTH_MINOR 0x33 /* Minor axis (omit if circular) */ -#define ABS_MT_ORIENTATION 0x34 /* Ellipse orientation */ -#define ABS_MT_POSITION_X 0x35 /* Center X touch position */ -#define ABS_MT_POSITION_Y 0x36 /* Center Y touch position */ -#define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */ -#define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */ -#define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */ -#define ABS_MT_PRESSURE 0x3a /* Pressure on contact area */ -#define ABS_MT_DISTANCE 0x3b /* Contact hover distance */ -#define ABS_MT_TOOL_X 0x3c /* Center X tool position */ -#define ABS_MT_TOOL_Y 0x3d /* Center Y tool position */ - - -#define ABS_MAX 0x3f -#define ABS_CNT (ABS_MAX+1) - -/* - * Switch events - */ - -#define SW_LID 0x00 /* set = lid shut */ -#define SW_TABLET_MODE 0x01 /* set = tablet mode */ -#define SW_HEADPHONE_INSERT 0x02 /* set = inserted */ -#define SW_RFKILL_ALL 0x03 /* rfkill master switch, type "any" - set = radio enabled */ -#define SW_RADIO SW_RFKILL_ALL /* deprecated */ -#define SW_MICROPHONE_INSERT 0x04 /* set = inserted */ -#define SW_DOCK 0x05 /* set = plugged into dock */ -#define SW_LINEOUT_INSERT 0x06 /* set = inserted */ -#define SW_JACK_PHYSICAL_INSERT 0x07 /* set = mechanical switch set */ -#define SW_VIDEOOUT_INSERT 0x08 /* set = inserted */ -#define SW_CAMERA_LENS_COVER 0x09 /* set = lens covered */ -#define SW_KEYPAD_SLIDE 0x0a /* set = keypad slide out */ -#define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */ -#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */ -#define SW_LINEIN_INSERT 0x0d /* set = inserted */ -#define SW_MUTE_DEVICE 0x0e /* set = device disabled */ -#define SW_PEN_INSERTED 0x0f /* set = pen inserted */ -#define SW_MACHINE_COVER 0x10 /* set = cover closed */ -#define SW_MAX 0x10 -#define SW_CNT (SW_MAX+1) - -/* - * Misc events - */ - -#define MSC_SERIAL 0x00 -#define MSC_PULSELED 0x01 -#define MSC_GESTURE 0x02 -#define MSC_RAW 0x03 -#define MSC_SCAN 0x04 -#define MSC_TIMESTAMP 0x05 -#define MSC_MAX 0x07 -#define MSC_CNT (MSC_MAX+1) - -/* - * LEDs - */ - -#define LED_NUML 0x00 -#define LED_CAPSL 0x01 -#define LED_SCROLLL 0x02 -#define LED_COMPOSE 0x03 -#define LED_KANA 0x04 -#define LED_SLEEP 0x05 -#define LED_SUSPEND 0x06 -#define LED_MUTE 0x07 -#define LED_MISC 0x08 -#define LED_MAIL 0x09 -#define LED_CHARGING 0x0a -#define LED_MAX 0x0f -#define LED_CNT (LED_MAX+1) - -/* - * Autorepeat values - */ - -#define REP_DELAY 0x00 -#define REP_PERIOD 0x01 -#define REP_MAX 0x01 -#define REP_CNT (REP_MAX+1) - -/* - * Sounds - */ - -#define SND_CLICK 0x00 -#define SND_BELL 0x01 -#define SND_TONE 0x02 -#define SND_MAX 0x07 -#define SND_CNT (SND_MAX+1) - -#endif +../../uapi/linux/input-event-codes.h \ No newline at end of file diff --git a/include/dt-bindings/input/ti-drv260x.h b/include/dt-bindings/input/ti-drv260x.h index af71082dd1..2626e6d9f7 100644 --- a/include/dt-bindings/input/ti-drv260x.h +++ b/include/dt-bindings/input/ti-drv260x.h @@ -1,10 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * DRV260X haptics driver family * * Author: Dan Murphy * * Copyright: (C) 2014 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. */ #ifndef _DT_BINDINGS_TI_DRV260X_H diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h index 35b6f69b7d..d4110d5caa 100644 --- a/include/dt-bindings/interrupt-controller/arm-gic.h +++ b/include/dt-bindings/interrupt-controller/arm-gic.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* * This header provides constants for the ARM GIC. */ diff --git a/include/dt-bindings/interrupt-controller/irq-st.h b/include/dt-bindings/interrupt-controller/irq-st.h index 9c9c8e2b80..4c59aceb9b 100644 --- a/include/dt-bindings/interrupt-controller/irq-st.h +++ b/include/dt-bindings/interrupt-controller/irq-st.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/irqchip/irq-st.h * * Copyright (C) 2014 STMicroelectronics – All Rights Reserved * * Author: Lee Jones + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_ST_H diff --git a/include/dt-bindings/interrupt-controller/irq.h b/include/dt-bindings/interrupt-controller/irq.h index 9e3d183e13..33a1003c55 100644 --- a/include/dt-bindings/interrupt-controller/irq.h +++ b/include/dt-bindings/interrupt-controller/irq.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* * This header provides constants for most IRQ bindings. * diff --git a/include/dt-bindings/interrupt-controller/mips-gic.h b/include/dt-bindings/interrupt-controller/mips-gic.h index bd45cee0c3..cf35a577e3 100644 --- a/include/dt-bindings/interrupt-controller/mips-gic.h +++ b/include/dt-bindings/interrupt-controller/mips-gic.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H #define _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h index 52b619d44b..7958bec7de 100644 --- a/include/dt-bindings/leds/common.h +++ b/include/dt-bindings/leds/common.h @@ -1,12 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides macros for the common LEDs device tree bindings. * * Copyright (C) 2015, Samsung Electronics Co., Ltd. - * Author: Jacek Anaszewski * - * Copyright (C) 2019 Jacek Anaszewski - * Copyright (C) 2020 Pavel Machek + * Author: Jacek Anaszewski */ #ifndef __DT_BINDINGS_LEDS_H @@ -21,74 +18,4 @@ #define LEDS_BOOST_ADAPTIVE 1 #define LEDS_BOOST_FIXED 2 -/* Standard LED colors */ -#define LED_COLOR_ID_WHITE 0 -#define LED_COLOR_ID_RED 1 -#define LED_COLOR_ID_GREEN 2 -#define LED_COLOR_ID_BLUE 3 -#define LED_COLOR_ID_AMBER 4 -#define LED_COLOR_ID_VIOLET 5 -#define LED_COLOR_ID_YELLOW 6 -#define LED_COLOR_ID_IR 7 -#define LED_COLOR_ID_MULTI 8 /* For multicolor LEDs */ -#define LED_COLOR_ID_RGB 9 /* For multicolor LEDs that can do arbitrary color, - so this would include RGBW and similar */ -#define LED_COLOR_ID_MAX 10 - -/* Standard LED functions */ -/* Keyboard LEDs, usually it would be input4::capslock etc. */ -/* Obsolete equivalent: "shift-key-light" */ -#define LED_FUNCTION_CAPSLOCK "capslock" -#define LED_FUNCTION_SCROLLLOCK "scrolllock" -#define LED_FUNCTION_NUMLOCK "numlock" -/* Obsolete equivalents: "tpacpi::thinklight" (IBM/Lenovo Thinkpads), - "lp5523:kb{1,2,3,4,5,6}" (Nokia N900) */ -#define LED_FUNCTION_KBD_BACKLIGHT "kbd_backlight" - -/* System LEDs, usually found on system body. - platform::mute (etc) is sometimes seen, :mute would be better */ -#define LED_FUNCTION_POWER "power" -#define LED_FUNCTION_DISK "disk" - -/* Obsolete: "platform:*:charging" (allwinner sun50i) */ -#define LED_FUNCTION_CHARGING "charging" -/* Used RGB notification LEDs common on phones. - Obsolete equivalents: "status-led:{red,green,blue}" (Motorola Droid 4), - "lp5523:{r,g,b}" (Nokia N900) */ -#define LED_FUNCTION_STATUS "status" - -#define LED_FUNCTION_MICMUTE "micmute" -#define LED_FUNCTION_MUTE "mute" - -/* Miscelleaus functions. Use functions above if you can. */ -#define LED_FUNCTION_ACTIVITY "activity" -#define LED_FUNCTION_ALARM "alarm" -#define LED_FUNCTION_BACKLIGHT "backlight" -#define LED_FUNCTION_BLUETOOTH "bluetooth" -#define LED_FUNCTION_BOOT "boot" -#define LED_FUNCTION_CPU "cpu" -#define LED_FUNCTION_DEBUG "debug" -#define LED_FUNCTION_DISK_ACTIVITY "disk-activity" -#define LED_FUNCTION_DISK_ERR "disk-err" -#define LED_FUNCTION_DISK_READ "disk-read" -#define LED_FUNCTION_DISK_WRITE "disk-write" -#define LED_FUNCTION_FAULT "fault" -#define LED_FUNCTION_FLASH "flash" -#define LED_FUNCTION_HEARTBEAT "heartbeat" -#define LED_FUNCTION_INDICATOR "indicator" -#define LED_FUNCTION_LAN "lan" -#define LED_FUNCTION_MAIL "mail" -#define LED_FUNCTION_MTD "mtd" -#define LED_FUNCTION_PANIC "panic" -#define LED_FUNCTION_PROGRAMMING "programming" -#define LED_FUNCTION_RX "rx" -#define LED_FUNCTION_SD "sd" -#define LED_FUNCTION_STANDBY "standby" -#define LED_FUNCTION_TORCH "torch" -#define LED_FUNCTION_TX "tx" -#define LED_FUNCTION_USB "usb" -#define LED_FUNCTION_WAN "wan" -#define LED_FUNCTION_WLAN "wlan" -#define LED_FUNCTION_WPS "wps" - #endif /* __DT_BINDINGS_LEDS_H */ diff --git a/include/dt-bindings/leds/leds-ns2.h b/include/dt-bindings/leds/leds-ns2.h index fd615749e7..491c5f974a 100644 --- a/include/dt-bindings/leds/leds-ns2.h +++ b/include/dt-bindings/leds/leds-ns2.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DT_BINDINGS_LEDS_NS2_H #define _DT_BINDINGS_LEDS_NS2_H diff --git a/include/dt-bindings/media/c8sectpfe.h b/include/dt-bindings/media/c8sectpfe.h index 6b1fb6f541..a0b5c7be68 100644 --- a/include/dt-bindings/media/c8sectpfe.h +++ b/include/dt-bindings/media/c8sectpfe.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_C8SECTPFE_H #define __DT_C8SECTPFE_H diff --git a/include/dt-bindings/media/omap3-isp.h b/include/dt-bindings/media/omap3-isp.h index 436c71210e..b18c60e468 100644 --- a/include/dt-bindings/media/omap3-isp.h +++ b/include/dt-bindings/media/omap3-isp.h @@ -1,8 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/dt-bindings/media/omap3-isp.h * * Copyright (C) 2015 Sakari Ailus + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. */ #ifndef __DT_BINDINGS_OMAP3_ISP_H__ diff --git a/include/dt-bindings/media/tvp5150.h b/include/dt-bindings/media/tvp5150.h index dda00c0385..c852a35e91 100644 --- a/include/dt-bindings/media/tvp5150.h +++ b/include/dt-bindings/media/tvp5150.h @@ -1,9 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* tvp5150.h - definition for tvp5150 inputs Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl) + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _DT_BINDINGS_MEDIA_TVP5150_H @@ -14,6 +26,8 @@ #define TVP5150_COMPOSITE1 1 #define TVP5150_SVIDEO 2 +#define TVP5150_INPUT_NUM 3 + /* TVP5150 HW outputs */ #define TVP5150_NORMAL 0 #define TVP5150_BLACK_SCREEN 1 diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h index 94ed3edfcc..6298fec006 100644 --- a/include/dt-bindings/media/xilinx-vip.h +++ b/include/dt-bindings/media/xilinx-vip.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * Xilinx Video IP Core * @@ -7,6 +6,10 @@ * * Contacts: Hyun Kwon * Laurent Pinchart + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __DT_BINDINGS_MEDIA_XILINX_VIP_H__ diff --git a/include/dt-bindings/memory/mt2701-larb-port.h b/include/dt-bindings/memory/mt2701-larb-port.h index 25d03526f1..6764d74474 100644 --- a/include/dt-bindings/memory/mt2701-larb-port.h +++ b/include/dt-bindings/memory/mt2701-larb-port.h @@ -1,11 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015 MediaTek Inc. * Author: Honghui Zhang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ -#ifndef _DT_BINDINGS_MEMORY_MT2701_LARB_PORT_H_ -#define _DT_BINDINGS_MEMORY_MT2701_LARB_PORT_H_ +#ifndef _MT2701_LARB_PORT_H_ +#define _MT2701_LARB_PORT_H_ /* * Mediatek m4u generation 1 such as mt2701 has flat m4u port numbers, diff --git a/include/dt-bindings/memory/mt8173-larb-port.h b/include/dt-bindings/memory/mt8173-larb-port.h index 167a7fc518..5fef5d1f8f 100644 --- a/include/dt-bindings/memory/mt8173-larb-port.h +++ b/include/dt-bindings/memory/mt8173-larb-port.h @@ -1,12 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015-2016 MediaTek Inc. * Author: Yong Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ -#ifndef _DT_BINDINGS_MEMORY_MT8173_LARB_PORT_H_ -#define _DT_BINDINGS_MEMORY_MT8173_LARB_PORT_H_ +#ifndef __DTS_IOMMU_PORT_MT8173_H +#define __DTS_IOMMU_PORT_MT8173_H -#include +#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) +/* Local arbiter ID */ +#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0x7) +/* PortID within the local arbiter */ +#define MTK_M4U_TO_PORT(id) ((id) & 0x1f) #define M4U_LARB0_ID 0 #define M4U_LARB1_ID 1 diff --git a/include/dt-bindings/memory/tegra114-mc.h b/include/dt-bindings/memory/tegra114-mc.h index dfe99c8a5b..8f48985a31 100644 --- a/include/dt-bindings/memory/tegra114-mc.h +++ b/include/dt-bindings/memory/tegra114-mc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef DT_BINDINGS_MEMORY_TEGRA114_MC_H #define DT_BINDINGS_MEMORY_TEGRA114_MC_H @@ -23,21 +22,4 @@ #define TEGRA_SWGROUP_EMUCIF 18 #define TEGRA_SWGROUP_TSEC 19 -#define TEGRA114_MC_RESET_AVPC 0 -#define TEGRA114_MC_RESET_DC 1 -#define TEGRA114_MC_RESET_DCB 2 -#define TEGRA114_MC_RESET_EPP 3 -#define TEGRA114_MC_RESET_2D 4 -#define TEGRA114_MC_RESET_HC 5 -#define TEGRA114_MC_RESET_HDA 6 -#define TEGRA114_MC_RESET_ISP 7 -#define TEGRA114_MC_RESET_MPCORE 8 -#define TEGRA114_MC_RESET_MPCORELP 9 -#define TEGRA114_MC_RESET_MPE 10 -#define TEGRA114_MC_RESET_3D 11 -#define TEGRA114_MC_RESET_3D2 12 -#define TEGRA114_MC_RESET_PPCS 13 -#define TEGRA114_MC_RESET_VDE 14 -#define TEGRA114_MC_RESET_VI 15 - #endif diff --git a/include/dt-bindings/memory/tegra124-mc.h b/include/dt-bindings/memory/tegra124-mc.h index 7e73bb400e..7d8ee798f3 100644 --- a/include/dt-bindings/memory/tegra124-mc.h +++ b/include/dt-bindings/memory/tegra124-mc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef DT_BINDINGS_MEMORY_TEGRA124_MC_H #define DT_BINDINGS_MEMORY_TEGRA124_MC_H @@ -29,97 +28,4 @@ #define TEGRA_SWGROUP_VIC 24 #define TEGRA_SWGROUP_VI 25 -#define TEGRA124_MC_RESET_AFI 0 -#define TEGRA124_MC_RESET_AVPC 1 -#define TEGRA124_MC_RESET_DC 2 -#define TEGRA124_MC_RESET_DCB 3 -#define TEGRA124_MC_RESET_HC 4 -#define TEGRA124_MC_RESET_HDA 5 -#define TEGRA124_MC_RESET_ISP2 6 -#define TEGRA124_MC_RESET_MPCORE 7 -#define TEGRA124_MC_RESET_MPCORELP 8 -#define TEGRA124_MC_RESET_MSENC 9 -#define TEGRA124_MC_RESET_PPCS 10 -#define TEGRA124_MC_RESET_SATA 11 -#define TEGRA124_MC_RESET_VDE 12 -#define TEGRA124_MC_RESET_VI 13 -#define TEGRA124_MC_RESET_VIC 14 -#define TEGRA124_MC_RESET_XUSB_HOST 15 -#define TEGRA124_MC_RESET_XUSB_DEV 16 -#define TEGRA124_MC_RESET_TSEC 17 -#define TEGRA124_MC_RESET_SDMMC1 18 -#define TEGRA124_MC_RESET_SDMMC2 19 -#define TEGRA124_MC_RESET_SDMMC3 20 -#define TEGRA124_MC_RESET_SDMMC4 21 -#define TEGRA124_MC_RESET_ISP2B 22 -#define TEGRA124_MC_RESET_GPU 23 - -#define TEGRA124_MC_PTCR 0 -#define TEGRA124_MC_DISPLAY0A 1 -#define TEGRA124_MC_DISPLAY0AB 2 -#define TEGRA124_MC_DISPLAY0B 3 -#define TEGRA124_MC_DISPLAY0BB 4 -#define TEGRA124_MC_DISPLAY0C 5 -#define TEGRA124_MC_DISPLAY0CB 6 -#define TEGRA124_MC_AFIR 14 -#define TEGRA124_MC_AVPCARM7R 15 -#define TEGRA124_MC_DISPLAYHC 16 -#define TEGRA124_MC_DISPLAYHCB 17 -#define TEGRA124_MC_HDAR 21 -#define TEGRA124_MC_HOST1XDMAR 22 -#define TEGRA124_MC_HOST1XR 23 -#define TEGRA124_MC_MSENCSRD 28 -#define TEGRA124_MC_PPCSAHBDMAR 29 -#define TEGRA124_MC_PPCSAHBSLVR 30 -#define TEGRA124_MC_SATAR 31 -#define TEGRA124_MC_VDEBSEVR 34 -#define TEGRA124_MC_VDEMBER 35 -#define TEGRA124_MC_VDEMCER 36 -#define TEGRA124_MC_VDETPER 37 -#define TEGRA124_MC_MPCORELPR 38 -#define TEGRA124_MC_MPCORER 39 -#define TEGRA124_MC_MSENCSWR 43 -#define TEGRA124_MC_AFIW 49 -#define TEGRA124_MC_AVPCARM7W 50 -#define TEGRA124_MC_HDAW 53 -#define TEGRA124_MC_HOST1XW 54 -#define TEGRA124_MC_MPCORELPW 56 -#define TEGRA124_MC_MPCOREW 57 -#define TEGRA124_MC_PPCSAHBDMAW 59 -#define TEGRA124_MC_PPCSAHBSLVW 60 -#define TEGRA124_MC_SATAW 61 -#define TEGRA124_MC_VDEBSEVW 62 -#define TEGRA124_MC_VDEDBGW 63 -#define TEGRA124_MC_VDEMBEW 64 -#define TEGRA124_MC_VDETPMW 65 -#define TEGRA124_MC_ISPRA 68 -#define TEGRA124_MC_ISPWA 70 -#define TEGRA124_MC_ISPWB 71 -#define TEGRA124_MC_XUSB_HOSTR 74 -#define TEGRA124_MC_XUSB_HOSTW 75 -#define TEGRA124_MC_XUSB_DEVR 76 -#define TEGRA124_MC_XUSB_DEVW 77 -#define TEGRA124_MC_ISPRAB 78 -#define TEGRA124_MC_ISPWAB 80 -#define TEGRA124_MC_ISPWBB 81 -#define TEGRA124_MC_TSECSRD 84 -#define TEGRA124_MC_TSECSWR 85 -#define TEGRA124_MC_A9AVPSCR 86 -#define TEGRA124_MC_A9AVPSCW 87 -#define TEGRA124_MC_GPUSRD 88 -#define TEGRA124_MC_GPUSWR 89 -#define TEGRA124_MC_DISPLAYT 90 -#define TEGRA124_MC_SDMMCRA 96 -#define TEGRA124_MC_SDMMCRAA 97 -#define TEGRA124_MC_SDMMCR 98 -#define TEGRA124_MC_SDMMCRAB 99 -#define TEGRA124_MC_SDMMCWA 100 -#define TEGRA124_MC_SDMMCWAA 101 -#define TEGRA124_MC_SDMMCW 102 -#define TEGRA124_MC_SDMMCWAB 103 -#define TEGRA124_MC_VICSRD 108 -#define TEGRA124_MC_VICSWR 109 -#define TEGRA124_MC_VIW 114 -#define TEGRA124_MC_DISPLAYD 115 - #endif diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h index 5e082547f1..d1731bc14d 100644 --- a/include/dt-bindings/memory/tegra210-mc.h +++ b/include/dt-bindings/memory/tegra210-mc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef DT_BINDINGS_MEMORY_TEGRA210_MC_H #define DT_BINDINGS_MEMORY_TEGRA210_MC_H @@ -33,46 +32,5 @@ #define TEGRA_SWGROUP_AXIAP 28 #define TEGRA_SWGROUP_ETR 29 #define TEGRA_SWGROUP_TSECB 30 -#define TEGRA_SWGROUP_NV 31 -#define TEGRA_SWGROUP_NV2 32 -#define TEGRA_SWGROUP_PPCS1 33 -#define TEGRA_SWGROUP_DC1 34 -#define TEGRA_SWGROUP_PPCS2 35 -#define TEGRA_SWGROUP_HC1 36 -#define TEGRA_SWGROUP_SE1 37 -#define TEGRA_SWGROUP_TSEC1 38 -#define TEGRA_SWGROUP_TSECB1 39 -#define TEGRA_SWGROUP_NVDEC1 40 - -#define TEGRA210_MC_RESET_AFI 0 -#define TEGRA210_MC_RESET_AVPC 1 -#define TEGRA210_MC_RESET_DC 2 -#define TEGRA210_MC_RESET_DCB 3 -#define TEGRA210_MC_RESET_HC 4 -#define TEGRA210_MC_RESET_HDA 5 -#define TEGRA210_MC_RESET_ISP2 6 -#define TEGRA210_MC_RESET_MPCORE 7 -#define TEGRA210_MC_RESET_NVENC 8 -#define TEGRA210_MC_RESET_PPCS 9 -#define TEGRA210_MC_RESET_SATA 10 -#define TEGRA210_MC_RESET_VI 11 -#define TEGRA210_MC_RESET_VIC 12 -#define TEGRA210_MC_RESET_XUSB_HOST 13 -#define TEGRA210_MC_RESET_XUSB_DEV 14 -#define TEGRA210_MC_RESET_A9AVP 15 -#define TEGRA210_MC_RESET_TSEC 16 -#define TEGRA210_MC_RESET_SDMMC1 17 -#define TEGRA210_MC_RESET_SDMMC2 18 -#define TEGRA210_MC_RESET_SDMMC3 19 -#define TEGRA210_MC_RESET_SDMMC4 20 -#define TEGRA210_MC_RESET_ISP2B 21 -#define TEGRA210_MC_RESET_GPU 22 -#define TEGRA210_MC_RESET_NVDEC 23 -#define TEGRA210_MC_RESET_APE 24 -#define TEGRA210_MC_RESET_SE 25 -#define TEGRA210_MC_RESET_NVJPG 26 -#define TEGRA210_MC_RESET_AXIAP 27 -#define TEGRA210_MC_RESET_ETR 28 -#define TEGRA210_MC_RESET_TSECB 29 #endif diff --git a/include/dt-bindings/memory/tegra30-mc.h b/include/dt-bindings/memory/tegra30-mc.h index 930f708aca..502beb03d7 100644 --- a/include/dt-bindings/memory/tegra30-mc.h +++ b/include/dt-bindings/memory/tegra30-mc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef DT_BINDINGS_MEMORY_TEGRA30_MC_H #define DT_BINDINGS_MEMORY_TEGRA30_MC_H @@ -22,90 +21,4 @@ #define TEGRA_SWGROUP_MPCORE 17 #define TEGRA_SWGROUP_ISP 18 -#define TEGRA30_MC_RESET_AFI 0 -#define TEGRA30_MC_RESET_AVPC 1 -#define TEGRA30_MC_RESET_DC 2 -#define TEGRA30_MC_RESET_DCB 3 -#define TEGRA30_MC_RESET_EPP 4 -#define TEGRA30_MC_RESET_2D 5 -#define TEGRA30_MC_RESET_HC 6 -#define TEGRA30_MC_RESET_HDA 7 -#define TEGRA30_MC_RESET_ISP 8 -#define TEGRA30_MC_RESET_MPCORE 9 -#define TEGRA30_MC_RESET_MPCORELP 10 -#define TEGRA30_MC_RESET_MPE 11 -#define TEGRA30_MC_RESET_3D 12 -#define TEGRA30_MC_RESET_3D2 13 -#define TEGRA30_MC_RESET_PPCS 14 -#define TEGRA30_MC_RESET_SATA 15 -#define TEGRA30_MC_RESET_VDE 16 -#define TEGRA30_MC_RESET_VI 17 - -#define TEGRA30_MC_PTCR 0 -#define TEGRA30_MC_DISPLAY0A 1 -#define TEGRA30_MC_DISPLAY0AB 2 -#define TEGRA30_MC_DISPLAY0B 3 -#define TEGRA30_MC_DISPLAY0BB 4 -#define TEGRA30_MC_DISPLAY0C 5 -#define TEGRA30_MC_DISPLAY0CB 6 -#define TEGRA30_MC_DISPLAY1B 7 -#define TEGRA30_MC_DISPLAY1BB 8 -#define TEGRA30_MC_EPPUP 9 -#define TEGRA30_MC_G2PR 10 -#define TEGRA30_MC_G2SR 11 -#define TEGRA30_MC_MPEUNIFBR 12 -#define TEGRA30_MC_VIRUV 13 -#define TEGRA30_MC_AFIR 14 -#define TEGRA30_MC_AVPCARM7R 15 -#define TEGRA30_MC_DISPLAYHC 16 -#define TEGRA30_MC_DISPLAYHCB 17 -#define TEGRA30_MC_FDCDRD 18 -#define TEGRA30_MC_FDCDRD2 19 -#define TEGRA30_MC_G2DR 20 -#define TEGRA30_MC_HDAR 21 -#define TEGRA30_MC_HOST1XDMAR 22 -#define TEGRA30_MC_HOST1XR 23 -#define TEGRA30_MC_IDXSRD 24 -#define TEGRA30_MC_IDXSRD2 25 -#define TEGRA30_MC_MPE_IPRED 26 -#define TEGRA30_MC_MPEAMEMRD 27 -#define TEGRA30_MC_MPECSRD 28 -#define TEGRA30_MC_PPCSAHBDMAR 29 -#define TEGRA30_MC_PPCSAHBSLVR 30 -#define TEGRA30_MC_SATAR 31 -#define TEGRA30_MC_TEXSRD 32 -#define TEGRA30_MC_TEXSRD2 33 -#define TEGRA30_MC_VDEBSEVR 34 -#define TEGRA30_MC_VDEMBER 35 -#define TEGRA30_MC_VDEMCER 36 -#define TEGRA30_MC_VDETPER 37 -#define TEGRA30_MC_MPCORELPR 38 -#define TEGRA30_MC_MPCORER 39 -#define TEGRA30_MC_EPPU 40 -#define TEGRA30_MC_EPPV 41 -#define TEGRA30_MC_EPPY 42 -#define TEGRA30_MC_MPEUNIFBW 43 -#define TEGRA30_MC_VIWSB 44 -#define TEGRA30_MC_VIWU 45 -#define TEGRA30_MC_VIWV 46 -#define TEGRA30_MC_VIWY 47 -#define TEGRA30_MC_G2DW 48 -#define TEGRA30_MC_AFIW 49 -#define TEGRA30_MC_AVPCARM7W 50 -#define TEGRA30_MC_FDCDWR 51 -#define TEGRA30_MC_FDCDWR2 52 -#define TEGRA30_MC_HDAW 53 -#define TEGRA30_MC_HOST1XW 54 -#define TEGRA30_MC_ISPW 55 -#define TEGRA30_MC_MPCORELPW 56 -#define TEGRA30_MC_MPCOREW 57 -#define TEGRA30_MC_MPECSWR 58 -#define TEGRA30_MC_PPCSAHBDMAW 59 -#define TEGRA30_MC_PPCSAHBSLVW 60 -#define TEGRA30_MC_SATAW 61 -#define TEGRA30_MC_VDEBSEVW 62 -#define TEGRA30_MC_VDEDBGW 63 -#define TEGRA30_MC_VDEMBEW 64 -#define TEGRA30_MC_VDETPMW 65 - #endif diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h index 1056108c95..dedf46ffdb 100644 --- a/include/dt-bindings/mfd/arizona.h +++ b/include/dt-bindings/mfd/arizona.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Device Tree defines for Arizona devices * * Copyright 2015 Cirrus Logic Inc. * * Author: Charles Keepax + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _DT_BINDINGS_MFD_ARIZONA_H diff --git a/include/dt-bindings/mfd/as3722.h b/include/dt-bindings/mfd/as3722.h index 9ef0cba904..e66c0898c5 100644 --- a/include/dt-bindings/mfd/as3722.h +++ b/include/dt-bindings/mfd/as3722.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides macros for ams AS3722 device bindings. * diff --git a/include/dt-bindings/mfd/atmel-flexcom.h b/include/dt-bindings/mfd/atmel-flexcom.h index 4e2fc32363..a266fe4ee9 100644 --- a/include/dt-bindings/mfd/atmel-flexcom.h +++ b/include/dt-bindings/mfd/atmel-flexcom.h @@ -1,8 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * This header provides macros for Atmel Flexcom DT bindings. * * Copyright (C) 2015 Cyrille Pitchen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . */ #ifndef __DT_BINDINGS_ATMEL_FLEXCOM_H__ diff --git a/include/dt-bindings/mfd/dbx500-prcmu.h b/include/dt-bindings/mfd/dbx500-prcmu.h index 0404bcc47d..552a2d174f 100644 --- a/include/dt-bindings/mfd/dbx500-prcmu.h +++ b/include/dt-bindings/mfd/dbx500-prcmu.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for the PRCMU bindings. * diff --git a/include/dt-bindings/mfd/max77620.h b/include/dt-bindings/mfd/max77620.h index 1e19c5f908..b911a0720c 100644 --- a/include/dt-bindings/mfd/max77620.h +++ b/include/dt-bindings/mfd/max77620.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides macros for MAXIM MAX77620 device bindings. * diff --git a/include/dt-bindings/mfd/palmas.h b/include/dt-bindings/mfd/palmas.h index c4f1d57ff4..cdb075aae4 100644 --- a/include/dt-bindings/mfd/palmas.h +++ b/include/dt-bindings/mfd/palmas.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides macros for Palmas device bindings. * diff --git a/include/dt-bindings/mfd/qcom-rpm.h b/include/dt-bindings/mfd/qcom-rpm.h index c9204c4df5..54aef5e217 100644 --- a/include/dt-bindings/mfd/qcom-rpm.h +++ b/include/dt-bindings/mfd/qcom-rpm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for the Qualcomm RPM bindings. */ diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h index 88a7f56843..d05894afa7 100644 --- a/include/dt-bindings/mfd/st-lpc.h +++ b/include/dt-bindings/mfd/st-lpc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides shared DT/Driver defines for ST's LPC device * diff --git a/include/dt-bindings/mfd/stm32f4-rcc.h b/include/dt-bindings/mfd/stm32f4-rcc.h index 309e8c79f2..e98942dc0d 100644 --- a/include/dt-bindings/mfd/stm32f4-rcc.h +++ b/include/dt-bindings/mfd/stm32f4-rcc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for the STM32F4 RCC IP */ @@ -19,20 +18,14 @@ #define STM32F4_RCC_AHB1_GPIOJ 9 #define STM32F4_RCC_AHB1_GPIOK 10 #define STM32F4_RCC_AHB1_CRC 12 -#define STM32F4_RCC_AHB1_BKPSRAM 18 -#define STM32F4_RCC_AHB1_CCMDATARAM 20 #define STM32F4_RCC_AHB1_DMA1 21 #define STM32F4_RCC_AHB1_DMA2 22 #define STM32F4_RCC_AHB1_DMA2D 23 #define STM32F4_RCC_AHB1_ETHMAC 25 -#define STM32F4_RCC_AHB1_ETHMACTX 26 -#define STM32F4_RCC_AHB1_ETHMACRX 27 -#define STM32F4_RCC_AHB1_ETHMACPTP 28 -#define STM32F4_RCC_AHB1_OTGHS 29 -#define STM32F4_RCC_AHB1_OTGHSULPI 30 +#define STM32F4_RCC_AHB1_OTGHS 29 #define STM32F4_AHB1_RESET(bit) (STM32F4_RCC_AHB1_##bit + (0x10 * 8)) -#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit) +#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit + (0x30 * 8)) /* AHB2 */ @@ -43,14 +36,13 @@ #define STM32F4_RCC_AHB2_OTGFS 7 #define STM32F4_AHB2_RESET(bit) (STM32F4_RCC_AHB2_##bit + (0x14 * 8)) -#define STM32F4_AHB2_CLOCK(bit) (STM32F4_RCC_AHB2_##bit + 0x20) +#define STM32F4_AHB2_CLOCK(bit) (STM32F4_RCC_AHB2_##bit + (0x34 * 8)) /* AHB3 */ #define STM32F4_RCC_AHB3_FMC 0 -#define STM32F4_RCC_AHB3_QSPI 1 #define STM32F4_AHB3_RESET(bit) (STM32F4_RCC_AHB3_##bit + (0x18 * 8)) -#define STM32F4_AHB3_CLOCK(bit) (STM32F4_RCC_AHB3_##bit + 0x40) +#define STM32F4_AHB3_CLOCK(bit) (STM32F4_RCC_AHB3_##bit + (0x38 * 8)) /* APB1 */ #define STM32F4_RCC_APB1_TIM2 0 @@ -80,16 +72,14 @@ #define STM32F4_RCC_APB1_UART8 31 #define STM32F4_APB1_RESET(bit) (STM32F4_RCC_APB1_##bit + (0x20 * 8)) -#define STM32F4_APB1_CLOCK(bit) (STM32F4_RCC_APB1_##bit + 0x80) +#define STM32F4_APB1_CLOCK(bit) (STM32F4_RCC_APB1_##bit + (0x40 * 8)) /* APB2 */ #define STM32F4_RCC_APB2_TIM1 0 #define STM32F4_RCC_APB2_TIM8 1 #define STM32F4_RCC_APB2_USART1 4 #define STM32F4_RCC_APB2_USART6 5 -#define STM32F4_RCC_APB2_ADC1 8 -#define STM32F4_RCC_APB2_ADC2 9 -#define STM32F4_RCC_APB2_ADC3 10 +#define STM32F4_RCC_APB2_ADC 8 #define STM32F4_RCC_APB2_SDIO 11 #define STM32F4_RCC_APB2_SPI1 12 #define STM32F4_RCC_APB2_SPI4 13 @@ -101,9 +91,8 @@ #define STM32F4_RCC_APB2_SPI6 21 #define STM32F4_RCC_APB2_SAI1 22 #define STM32F4_RCC_APB2_LTDC 26 -#define STM32F4_RCC_APB2_DSI 27 #define STM32F4_APB2_RESET(bit) (STM32F4_RCC_APB2_##bit + (0x24 * 8)) -#define STM32F4_APB2_CLOCK(bit) (STM32F4_RCC_APB2_##bit + 0xA0) +#define STM32F4_APB2_CLOCK(bit) (STM32F4_RCC_APB2_##bit + (0x44 * 8)) #endif /* _DT_BINDINGS_MFD_STM32F4_RCC_H */ diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h index 9eb2ec2b2e..2383dd20ff 100644 --- a/include/dt-bindings/net/mscc-phy-vsc8531.h +++ b/include/dt-bindings/net/mscc-phy-vsc8531.h @@ -4,28 +4,18 @@ * Author: Nagaraju Lakkaraju * * License: Dual MIT/GPL - * Copyright (c) 2017 Microsemi Corporation + * Copyright (c) 2016 Microsemi Corporation */ #ifndef _DT_BINDINGS_MSCC_VSC8531_H #define _DT_BINDINGS_MSCC_VSC8531_H -/* PHY LED Modes */ -#define VSC8531_LINK_ACTIVITY 0 -#define VSC8531_LINK_1000_ACTIVITY 1 -#define VSC8531_LINK_100_ACTIVITY 2 -#define VSC8531_LINK_10_ACTIVITY 3 -#define VSC8531_LINK_100_1000_ACTIVITY 4 -#define VSC8531_LINK_10_1000_ACTIVITY 5 -#define VSC8531_LINK_10_100_ACTIVITY 6 -#define VSC8584_LINK_100FX_1000X_ACTIVITY 7 -#define VSC8531_DUPLEX_COLLISION 8 -#define VSC8531_COLLISION 9 -#define VSC8531_ACTIVITY 10 -#define VSC8584_100FX_1000X_ACTIVITY 11 -#define VSC8531_AUTONEG_FAULT 12 -#define VSC8531_SERIAL_MODE 13 -#define VSC8531_FORCE_LED_OFF 14 -#define VSC8531_FORCE_LED_ON 15 +/* MAC interface Edge rate control VDDMAC in milli Volts */ +#define MSCC_VDDMAC_3300 3300 +#define MSCC_VDDMAC_2500 2500 +#define MSCC_VDDMAC_1800 1800 +#define MSCC_VDDMAC_1500 1500 +#define MSCC_VDDMAC_MAX 4 +#define MSCC_SLOWDOWN_MAX 8 #endif diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h index 6fc4b445d3..172744a72e 100644 --- a/include/dt-bindings/net/ti-dp83867.h +++ b/include/dt-bindings/net/ti-dp83867.h @@ -1,10 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Device Tree constants for the Texas Instruments DP83867 PHY * * Author: Dan Murphy * * Copyright: (C) 2015 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. */ #ifndef _DT_BINDINGS_TI_DP83867_H @@ -34,20 +42,4 @@ #define DP83867_RGMIIDCTL_3_75_NS 0xe #define DP83867_RGMIIDCTL_4_00_NS 0xf -/* IO_MUX_CFG - Clock output selection */ -#define DP83867_CLK_O_SEL_CHN_A_RCLK 0x0 -#define DP83867_CLK_O_SEL_CHN_B_RCLK 0x1 -#define DP83867_CLK_O_SEL_CHN_C_RCLK 0x2 -#define DP83867_CLK_O_SEL_CHN_D_RCLK 0x3 -#define DP83867_CLK_O_SEL_CHN_A_RCLK_DIV5 0x4 -#define DP83867_CLK_O_SEL_CHN_B_RCLK_DIV5 0x5 -#define DP83867_CLK_O_SEL_CHN_C_RCLK_DIV5 0x6 -#define DP83867_CLK_O_SEL_CHN_D_RCLK_DIV5 0x7 -#define DP83867_CLK_O_SEL_CHN_A_TCLK 0x8 -#define DP83867_CLK_O_SEL_CHN_B_TCLK 0x9 -#define DP83867_CLK_O_SEL_CHN_C_TCLK 0xA -#define DP83867_CLK_O_SEL_CHN_D_TCLK 0xB -#define DP83867_CLK_O_SEL_REF_CLK 0xC -/* Special flag to indicate clock should be off */ -#define DP83867_CLK_O_SEL_OFF 0xFFFFFFFF #endif diff --git a/include/dt-bindings/phy/phy-pistachio-usb.h b/include/dt-bindings/phy/phy-pistachio-usb.h index 3542a67daf..d1877aa0a3 100644 --- a/include/dt-bindings/phy/phy-pistachio-usb.h +++ b/include/dt-bindings/phy/phy-pistachio-usb.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. */ #ifndef _DT_BINDINGS_PHY_PISTACHIO diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h index f48c9acf25..6c901930eb 100644 --- a/include/dt-bindings/phy/phy.h +++ b/include/dt-bindings/phy/phy.h @@ -1,10 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * * This header provides constants for the phy framework * * Copyright (C) 2014 STMicroelectronics * Author: Gabriel Fernandez + * License terms: GNU General Public License (GPL), version 2 */ #ifndef _DT_BINDINGS_PHY @@ -15,12 +15,5 @@ #define PHY_TYPE_PCIE 2 #define PHY_TYPE_USB2 3 #define PHY_TYPE_USB3 4 -#define PHY_TYPE_UFS 5 -#define PHY_TYPE_DP 6 -#define PHY_TYPE_XPCS 7 -#define PHY_TYPE_SGMII 8 -#define PHY_TYPE_QSGMII 9 -#define PHY_TYPE_DPHY 10 -#define PHY_TYPE_CPHY 11 #endif /* _DT_BINDINGS_PHY */ diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h index 17877e8598..226f77246a 100644 --- a/include/dt-bindings/pinctrl/am33xx.h +++ b/include/dt-bindings/pinctrl/am33xx.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants specific to AM33XX pinctrl bindings. */ @@ -40,133 +39,5 @@ #undef PIN_OFF_INPUT_PULLDOWN #undef PIN_OFF_WAKEUPENABLE -#define AM335X_PIN_OFFSET_MIN 0x0800U - -#define AM335X_PIN_GPMC_AD0 0x800 -#define AM335X_PIN_GPMC_AD1 0x804 -#define AM335X_PIN_GPMC_AD2 0x808 -#define AM335X_PIN_GPMC_AD3 0x80c -#define AM335X_PIN_GPMC_AD4 0x810 -#define AM335X_PIN_GPMC_AD5 0x814 -#define AM335X_PIN_GPMC_AD6 0x818 -#define AM335X_PIN_GPMC_AD7 0x81c -#define AM335X_PIN_GPMC_AD8 0x820 -#define AM335X_PIN_GPMC_AD9 0x824 -#define AM335X_PIN_GPMC_AD10 0x828 -#define AM335X_PIN_GPMC_AD11 0x82c -#define AM335X_PIN_GPMC_AD12 0x830 -#define AM335X_PIN_GPMC_AD13 0x834 -#define AM335X_PIN_GPMC_AD14 0x838 -#define AM335X_PIN_GPMC_AD15 0x83c -#define AM335X_PIN_GPMC_A0 0x840 -#define AM335X_PIN_GPMC_A1 0x844 -#define AM335X_PIN_GPMC_A2 0x848 -#define AM335X_PIN_GPMC_A3 0x84c -#define AM335X_PIN_GPMC_A4 0x850 -#define AM335X_PIN_GPMC_A5 0x854 -#define AM335X_PIN_GPMC_A6 0x858 -#define AM335X_PIN_GPMC_A7 0x85c -#define AM335X_PIN_GPMC_A8 0x860 -#define AM335X_PIN_GPMC_A9 0x864 -#define AM335X_PIN_GPMC_A10 0x868 -#define AM335X_PIN_GPMC_A11 0x86c -#define AM335X_PIN_GPMC_WAIT0 0x870 -#define AM335X_PIN_GPMC_WPN 0x874 -#define AM335X_PIN_GPMC_BEN1 0x878 -#define AM335X_PIN_GPMC_CSN0 0x87c -#define AM335X_PIN_GPMC_CSN1 0x880 -#define AM335X_PIN_GPMC_CSN2 0x884 -#define AM335X_PIN_GPMC_CSN3 0x888 -#define AM335X_PIN_GPMC_CLK 0x88c -#define AM335X_PIN_GPMC_ADVN_ALE 0x890 -#define AM335X_PIN_GPMC_OEN_REN 0x894 -#define AM335X_PIN_GPMC_WEN 0x898 -#define AM335X_PIN_GPMC_BEN0_CLE 0x89c -#define AM335X_PIN_LCD_DATA0 0x8a0 -#define AM335X_PIN_LCD_DATA1 0x8a4 -#define AM335X_PIN_LCD_DATA2 0x8a8 -#define AM335X_PIN_LCD_DATA3 0x8ac -#define AM335X_PIN_LCD_DATA4 0x8b0 -#define AM335X_PIN_LCD_DATA5 0x8b4 -#define AM335X_PIN_LCD_DATA6 0x8b8 -#define AM335X_PIN_LCD_DATA7 0x8bc -#define AM335X_PIN_LCD_DATA8 0x8c0 -#define AM335X_PIN_LCD_DATA9 0x8c4 -#define AM335X_PIN_LCD_DATA10 0x8c8 -#define AM335X_PIN_LCD_DATA11 0x8cc -#define AM335X_PIN_LCD_DATA12 0x8d0 -#define AM335X_PIN_LCD_DATA13 0x8d4 -#define AM335X_PIN_LCD_DATA14 0x8d8 -#define AM335X_PIN_LCD_DATA15 0x8dc -#define AM335X_PIN_LCD_VSYNC 0x8e0 -#define AM335X_PIN_LCD_HSYNC 0x8e4 -#define AM335X_PIN_LCD_PCLK 0x8e8 -#define AM335X_PIN_LCD_AC_BIAS_EN 0x8ec -#define AM335X_PIN_MMC0_DAT3 0x8f0 -#define AM335X_PIN_MMC0_DAT2 0x8f4 -#define AM335X_PIN_MMC0_DAT1 0x8f8 -#define AM335X_PIN_MMC0_DAT0 0x8fc -#define AM335X_PIN_MMC0_CLK 0x900 -#define AM335X_PIN_MMC0_CMD 0x904 -#define AM335X_PIN_MII1_COL 0x908 -#define AM335X_PIN_MII1_CRS 0x90c -#define AM335X_PIN_MII1_RX_ER 0x910 -#define AM335X_PIN_MII1_TX_EN 0x914 -#define AM335X_PIN_MII1_RX_DV 0x918 -#define AM335X_PIN_MII1_TXD3 0x91c -#define AM335X_PIN_MII1_TXD2 0x920 -#define AM335X_PIN_MII1_TXD1 0x924 -#define AM335X_PIN_MII1_TXD0 0x928 -#define AM335X_PIN_MII1_TX_CLK 0x92c -#define AM335X_PIN_MII1_RX_CLK 0x930 -#define AM335X_PIN_MII1_RXD3 0x934 -#define AM335X_PIN_MII1_RXD2 0x938 -#define AM335X_PIN_MII1_RXD1 0x93c -#define AM335X_PIN_MII1_RXD0 0x940 -#define AM335X_PIN_RMII1_REF_CLK 0x944 -#define AM335X_PIN_MDIO 0x948 -#define AM335X_PIN_MDC 0x94c -#define AM335X_PIN_SPI0_SCLK 0x950 -#define AM335X_PIN_SPI0_D0 0x954 -#define AM335X_PIN_SPI0_D1 0x958 -#define AM335X_PIN_SPI0_CS0 0x95c -#define AM335X_PIN_SPI0_CS1 0x960 -#define AM335X_PIN_ECAP0_IN_PWM0_OUT 0x964 -#define AM335X_PIN_UART0_CTSN 0x968 -#define AM335X_PIN_UART0_RTSN 0x96c -#define AM335X_PIN_UART0_RXD 0x970 -#define AM335X_PIN_UART0_TXD 0x974 -#define AM335X_PIN_UART1_CTSN 0x978 -#define AM335X_PIN_UART1_RTSN 0x97c -#define AM335X_PIN_UART1_RXD 0x980 -#define AM335X_PIN_UART1_TXD 0x984 -#define AM335X_PIN_I2C0_SDA 0x988 -#define AM335X_PIN_I2C0_SCL 0x98c -#define AM335X_PIN_MCASP0_ACLKX 0x990 -#define AM335X_PIN_MCASP0_FSX 0x994 -#define AM335X_PIN_MCASP0_AXR0 0x998 -#define AM335X_PIN_MCASP0_AHCLKR 0x99c -#define AM335X_PIN_MCASP0_ACLKR 0x9a0 -#define AM335X_PIN_MCASP0_FSR 0x9a4 -#define AM335X_PIN_MCASP0_AXR1 0x9a8 -#define AM335X_PIN_MCASP0_AHCLKX 0x9ac -#define AM335X_PIN_XDMA_EVENT_INTR0 0x9b0 -#define AM335X_PIN_XDMA_EVENT_INTR1 0x9b4 -#define AM335X_PIN_WARMRSTN 0x9b8 -#define AM335X_PIN_NNMI 0x9c0 -#define AM335X_PIN_TMS 0x9d0 -#define AM335X_PIN_TDI 0x9d4 -#define AM335X_PIN_TDO 0x9d8 -#define AM335X_PIN_TCK 0x9dc -#define AM335X_PIN_TRSTN 0x9e0 -#define AM335X_PIN_EMU0 0x9e4 -#define AM335X_PIN_EMU1 0x9e8 -#define AM335X_PIN_RTC_PWRONRSTN 0x9f8 -#define AM335X_PIN_PMIC_POWER_EN 0x9fc -#define AM335X_PIN_EXT_WAKEUP 0xa00 -#define AM335X_PIN_USB0_DRVVBUS 0xa1c -#define AM335X_PIN_USB1_DRVVBUS 0xa34 - -#define AM335X_PIN_OFFSET_MAX 0x0a34U - #endif + diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h index 6ce4a32f77..344bd1eb33 100644 --- a/include/dt-bindings/pinctrl/am43xx.h +++ b/include/dt-bindings/pinctrl/am43xx.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants specific to AM43XX pinctrl bindings. */ @@ -22,22 +21,9 @@ #define INPUT_EN (1 << 18) #define SLEWCTRL_SLOW (1 << 19) #define SLEWCTRL_FAST 0 -#define DS0_FORCE_OFF_MODE (1 << 24) -#define DS0_INPUT (1 << 25) -#define DS0_FORCE_OUT_HIGH (1 << 26) -#define DS0_PULL_UP_DOWN_EN (0 << 27) -#define DS0_PULL_UP_DOWN_DIS (1 << 27) -#define DS0_PULL_UP_SEL (1 << 28) +#define DS0_PULL_UP_DOWN_EN (1 << 27) #define WAKEUP_ENABLE (1 << 29) -#define DS0_PIN_OUTPUT (DS0_FORCE_OFF_MODE) -#define DS0_PIN_OUTPUT_HIGH (DS0_FORCE_OFF_MODE | DS0_FORCE_OUT_HIGH) -#define DS0_PIN_OUTPUT_PULLUP (DS0_FORCE_OFF_MODE | DS0_PULL_UP_DOWN_EN | DS0_PULL_UP_SEL) -#define DS0_PIN_OUTPUT_PULLDOWN (DS0_FORCE_OFF_MODE | DS0_PULL_UP_DOWN_EN) -#define DS0_PIN_INPUT (DS0_FORCE_OFF_MODE | DS0_INPUT) -#define DS0_PIN_INPUT_PULLUP (DS0_FORCE_OFF_MODE | DS0_INPUT | DS0_PULL_UP_DOWN_EN | DS0_PULL_UP_SEL) -#define DS0_PIN_INPUT_PULLDOWN (DS0_FORCE_OFF_MODE | DS0_INPUT | DS0_PULL_UP_DOWN_EN) - #define PIN_OUTPUT (PULL_DISABLE) #define PIN_OUTPUT_PULLUP (PULL_UP) #define PIN_OUTPUT_PULLDOWN 0 diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h index e8e117306b..bbca3d0389 100644 --- a/include/dt-bindings/pinctrl/at91.h +++ b/include/dt-bindings/pinctrl/at91.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * This header provides constants for most at91 pinctrl bindings. * * Copyright (C) 2013 Jean-Christophe PLAGNIOL-VILLARD + * + * GPLv2 only */ #ifndef __DT_BINDINGS_AT91_PINCTRL_H__ @@ -14,9 +15,6 @@ #define AT91_PINCTRL_DEGLITCH (1 << 2) #define AT91_PINCTRL_PULL_DOWN (1 << 3) #define AT91_PINCTRL_DIS_SCHMIT (1 << 4) -#define AT91_PINCTRL_OUTPUT (1 << 7) -#define AT91_PINCTRL_OUTPUT_VAL(x) ((x & 0x1) << 8) -#define AT91_PINCTRL_SLEWRATE (1 << 9) #define AT91_PINCTRL_DEBOUNCE (1 << 16) #define AT91_PINCTRL_DEBOUNCE_VAL(x) (x << 17) @@ -27,9 +25,6 @@ #define AT91_PINCTRL_DRIVE_STRENGTH_MED (0x2 << 5) #define AT91_PINCTRL_DRIVE_STRENGTH_HI (0x3 << 5) -#define AT91_PINCTRL_SLEWRATE_ENA (0x0 << 9) -#define AT91_PINCTRL_SLEWRATE_DIS (0x1 << 9) - #define AT91_PIOA 0 #define AT91_PIOB 1 #define AT91_PIOC 2 @@ -42,8 +37,4 @@ #define AT91_PERIPH_C 3 #define AT91_PERIPH_D 4 -#define ATMEL_PIO_DRVSTR_LO 1 -#define ATMEL_PIO_DRVSTR_ME 2 -#define ATMEL_PIO_DRVSTR_HI 3 - #endif /* __DT_BINDINGS_AT91_PINCTRL_H__ */ diff --git a/include/dt-bindings/pinctrl/bcm2835.h b/include/dt-bindings/pinctrl/bcm2835.h index b5b2654a0e..6f0bc37af3 100644 --- a/include/dt-bindings/pinctrl/bcm2835.h +++ b/include/dt-bindings/pinctrl/bcm2835.h @@ -1,8 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Header providing constants for bcm2835 pinctrl bindings. * * Copyright (C) 2015 Stefan Wahren + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html */ #ifndef __DT_BINDINGS_PINCTRL_BCM2835_H__ @@ -18,9 +24,4 @@ #define BCM2835_FSEL_ALT2 6 #define BCM2835_FSEL_ALT3 7 -/* brcm,pull property */ -#define BCM2835_PUD_OFF 0 -#define BCM2835_PUD_DOWN 1 -#define BCM2835_PUD_UP 2 - #endif /* __DT_BINDINGS_PINCTRL_BCM2835_H__ */ diff --git a/include/dt-bindings/pinctrl/dm814x.h b/include/dt-bindings/pinctrl/dm814x.h index afbabbc4dd..0f484273da 100644 --- a/include/dt-bindings/pinctrl/dm814x.h +++ b/include/dt-bindings/pinctrl/dm814x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants specific to DM814X pinctrl bindings. */ diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h index 252cdfd0d8..5c75e80915 100644 --- a/include/dt-bindings/pinctrl/dra.h +++ b/include/dt-bindings/pinctrl/dra.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * This header provides constants for DRA pinctrl bindings. * * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ * Author: Rajendra Nayak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _DT_BINDINGS_PINCTRL_DRA_H @@ -70,8 +73,5 @@ */ #define DRA7XX_CORE_IOPAD(pa, val) (((pa) & 0xffff) - 0x3400) (val) -/* DRA7 IODELAY configuration parameters */ -#define A_DELAY_PS(val) ((val) & 0xffff) -#define G_DELAY_PS(val) ((val) & 0xffff) #endif diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h index 0359bfdc91..38f1ea879e 100644 --- a/include/dt-bindings/pinctrl/hisi.h +++ b/include/dt-bindings/pinctrl/hisi.h @@ -56,19 +56,4 @@ #define DRIVE4_08MA (4 << 4) #define DRIVE4_10MA (6 << 4) -/* drive strength definition for hi3660 */ -#define DRIVE6_MASK (15 << 4) -#define DRIVE6_04MA (0 << 4) -#define DRIVE6_12MA (4 << 4) -#define DRIVE6_19MA (8 << 4) -#define DRIVE6_27MA (10 << 4) -#define DRIVE6_32MA (15 << 4) -#define DRIVE7_02MA (0 << 4) -#define DRIVE7_04MA (1 << 4) -#define DRIVE7_06MA (2 << 4) -#define DRIVE7_08MA (3 << 4) -#define DRIVE7_10MA (4 << 4) -#define DRIVE7_12MA (5 << 4) -#define DRIVE7_14MA (6 << 4) -#define DRIVE7_16MA (7 << 4) #endif diff --git a/include/dt-bindings/pinctrl/mt6397-pinfunc.h b/include/dt-bindings/pinctrl/mt6397-pinfunc.h index f393fbd689..85739b308c 100644 --- a/include/dt-bindings/pinctrl/mt6397-pinfunc.h +++ b/include/dt-bindings/pinctrl/mt6397-pinfunc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DTS_MT6397_PINFUNC_H #define __DTS_MT6397_PINFUNC_H diff --git a/include/dt-bindings/pinctrl/mt65xx.h b/include/dt-bindings/pinctrl/mt65xx.h index 7e16e58fe1..1198f45413 100644 --- a/include/dt-bindings/pinctrl/mt65xx.h +++ b/include/dt-bindings/pinctrl/mt65xx.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 MediaTek Inc. * Author: Hongzhou.Yang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_PINCTRL_MT65XX_H diff --git a/include/dt-bindings/pinctrl/mt7623-pinfunc.h b/include/dt-bindings/pinctrl/mt7623-pinfunc.h index 604fe781c4..2f00bdc424 100644 --- a/include/dt-bindings/pinctrl/mt7623-pinfunc.h +++ b/include/dt-bindings/pinctrl/mt7623-pinfunc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DTS_MT7623_PINFUNC_H #define __DTS_MT7623_PINFUNC_H @@ -23,26 +22,20 @@ #define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_GPIO5 (MTK_PIN_NO(5) | 0) #define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_PWRAP_SPICK2_I (MTK_PIN_NO(5) | 1) -#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_ANT_SEL1 (MTK_PIN_NO(5) | 5) #define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_GPIO6 (MTK_PIN_NO(6) | 0) #define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_PWRAP_SPICS2_B_I (MTK_PIN_NO(6) | 1) -#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_ANT_SEL0 (MTK_PIN_NO(6) | 5) #define MT7623_PIN_7_SPI1_CSN_FUNC_GPIO7 (MTK_PIN_NO(7) | 0) #define MT7623_PIN_7_SPI1_CSN_FUNC_SPI1_CS (MTK_PIN_NO(7) | 1) -#define MT7623_PIN_7_SPI1_CSN_FUNC_KCOL0 (MTK_PIN_NO(7) | 4) #define MT7623_PIN_8_SPI1_MI_FUNC_GPIO8 (MTK_PIN_NO(8) | 0) #define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MI (MTK_PIN_NO(8) | 1) #define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MO (MTK_PIN_NO(8) | 2) -#define MT7623_PIN_8_SPI1_MI_FUNC_KCOL1 (MTK_PIN_NO(8) | 4) #define MT7623_PIN_9_SPI1_MO_FUNC_GPIO9 (MTK_PIN_NO(9) | 0) #define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MO (MTK_PIN_NO(9) | 1) #define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MI (MTK_PIN_NO(9) | 2) -#define MT7623_PIN_9_SPI1_MO_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(9) | 3) -#define MT7623_PIN_9_SPI1_MO_FUNC_KCOL2 (MTK_PIN_NO(9) | 4) #define MT7623_PIN_10_RTC32K_CK_FUNC_GPIO10 (MTK_PIN_NO(10) | 0) #define MT7623_PIN_10_RTC32K_CK_FUNC_RTC32K_CK (MTK_PIN_NO(10) | 1) @@ -59,7 +52,6 @@ #define MT7623_PIN_14_GPIO14_FUNC_GPIO14 (MTK_PIN_NO(14) | 0) #define MT7623_PIN_14_GPIO14_FUNC_URXD2 (MTK_PIN_NO(14) | 1) #define MT7623_PIN_14_GPIO14_FUNC_UTXD2 (MTK_PIN_NO(14) | 2) -#define MT7623_PIN_14_GPIO14_FUNC_SRCCLKENAI2 (MTK_PIN_NO(14) | 5) #define MT7623_PIN_15_GPIO15_FUNC_GPIO15 (MTK_PIN_NO(15) | 0) #define MT7623_PIN_15_GPIO15_FUNC_UTXD2 (MTK_PIN_NO(15) | 1) @@ -67,139 +59,88 @@ #define MT7623_PIN_18_PCM_CLK_FUNC_GPIO18 (MTK_PIN_NO(18) | 0) #define MT7623_PIN_18_PCM_CLK_FUNC_PCM_CLK0 (MTK_PIN_NO(18) | 1) -#define MT7623_PIN_18_PCM_CLK_FUNC_MRG_CLK (MTK_PIN_NO(18) | 2) -#define MT7623_PIN_18_PCM_CLK_FUNC_MM_TEST_CK (MTK_PIN_NO(18) | 4) -#define MT7623_PIN_18_PCM_CLK_FUNC_CONN_DSP_JCK (MTK_PIN_NO(18) | 5) #define MT7623_PIN_18_PCM_CLK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(18) | 6) #define MT7623_PIN_19_PCM_SYNC_FUNC_GPIO19 (MTK_PIN_NO(19) | 0) #define MT7623_PIN_19_PCM_SYNC_FUNC_PCM_SYNC (MTK_PIN_NO(19) | 1) -#define MT7623_PIN_19_PCM_SYNC_FUNC_MRG_SYNC (MTK_PIN_NO(19) | 2) -#define MT7623_PIN_19_PCM_SYNC_FUNC_CONN_DSP_JINTP (MTK_PIN_NO(19) | 5) #define MT7623_PIN_19_PCM_SYNC_FUNC_AP_PCM_SYNC (MTK_PIN_NO(19) | 6) #define MT7623_PIN_20_PCM_RX_FUNC_GPIO20 (MTK_PIN_NO(20) | 0) #define MT7623_PIN_20_PCM_RX_FUNC_PCM_RX (MTK_PIN_NO(20) | 1) -#define MT7623_PIN_20_PCM_RX_FUNC_MRG_RX (MTK_PIN_NO(20) | 2) -#define MT7623_PIN_20_PCM_RX_FUNC_MRG_TX (MTK_PIN_NO(20) | 3) #define MT7623_PIN_20_PCM_RX_FUNC_PCM_TX (MTK_PIN_NO(20) | 4) -#define MT7623_PIN_20_PCM_RX_FUNC_CONN_DSP_JDI (MTK_PIN_NO(20) | 5) #define MT7623_PIN_20_PCM_RX_FUNC_AP_PCM_RX (MTK_PIN_NO(20) | 6) #define MT7623_PIN_21_PCM_TX_FUNC_GPIO21 (MTK_PIN_NO(21) | 0) #define MT7623_PIN_21_PCM_TX_FUNC_PCM_TX (MTK_PIN_NO(21) | 1) -#define MT7623_PIN_21_PCM_TX_FUNC_MRG_TX (MTK_PIN_NO(21) | 2) -#define MT7623_PIN_21_PCM_TX_FUNC_MRG_RX (MTK_PIN_NO(21) | 3) #define MT7623_PIN_21_PCM_TX_FUNC_PCM_RX (MTK_PIN_NO(21) | 4) -#define MT7623_PIN_21_PCM_TX_FUNC_CONN_DSP_JMS (MTK_PIN_NO(21) | 5) #define MT7623_PIN_21_PCM_TX_FUNC_AP_PCM_TX (MTK_PIN_NO(21) | 6) #define MT7623_PIN_22_EINT0_FUNC_GPIO22 (MTK_PIN_NO(22) | 0) #define MT7623_PIN_22_EINT0_FUNC_UCTS0 (MTK_PIN_NO(22) | 1) #define MT7623_PIN_22_EINT0_FUNC_PCIE0_PERST_N (MTK_PIN_NO(22) | 2) -#define MT7623_PIN_22_EINT0_FUNC_KCOL3 (MTK_PIN_NO(22) | 3) -#define MT7623_PIN_22_EINT0_FUNC_CONN_DSP_JDO (MTK_PIN_NO(22) | 4) -#define MT7623_PIN_22_EINT0_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(22) | 5) #define MT7623_PIN_23_EINT1_FUNC_GPIO23 (MTK_PIN_NO(23) | 0) #define MT7623_PIN_23_EINT1_FUNC_URTS0 (MTK_PIN_NO(23) | 1) #define MT7623_PIN_23_EINT1_FUNC_PCIE1_PERST_N (MTK_PIN_NO(23) | 2) -#define MT7623_PIN_23_EINT1_FUNC_KCOL2 (MTK_PIN_NO(23) | 3) -#define MT7623_PIN_23_EINT1_FUNC_CONN_MCU_TDO (MTK_PIN_NO(23) | 4) -#define MT7623_PIN_23_EINT1_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5) #define MT7623_PIN_24_EINT2_FUNC_GPIO24 (MTK_PIN_NO(24) | 0) #define MT7623_PIN_24_EINT2_FUNC_UCTS1 (MTK_PIN_NO(24) | 1) #define MT7623_PIN_24_EINT2_FUNC_PCIE2_PERST_N (MTK_PIN_NO(24) | 2) -#define MT7623_PIN_24_EINT2_FUNC_KCOL1 (MTK_PIN_NO(24) | 3) -#define MT7623_PIN_24_EINT2_FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(24) | 4) #define MT7623_PIN_25_EINT3_FUNC_GPIO25 (MTK_PIN_NO(25) | 0) #define MT7623_PIN_25_EINT3_FUNC_URTS1 (MTK_PIN_NO(25) | 1) -#define MT7623_PIN_25_EINT3_FUNC_KCOL0 (MTK_PIN_NO(25) | 3) -#define MT7623_PIN_25_EINT3_FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(25) | 4) #define MT7623_PIN_26_EINT4_FUNC_GPIO26 (MTK_PIN_NO(26) | 0) #define MT7623_PIN_26_EINT4_FUNC_UCTS3 (MTK_PIN_NO(26) | 1) -#define MT7623_PIN_26_EINT4_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(26) | 2) -#define MT7623_PIN_26_EINT4_FUNC_KROW3 (MTK_PIN_NO(26) | 3) -#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_TCK0 (MTK_PIN_NO(26) | 4) -#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(26) | 5) #define MT7623_PIN_26_EINT4_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(26) | 6) #define MT7623_PIN_27_EINT5_FUNC_GPIO27 (MTK_PIN_NO(27) | 0) #define MT7623_PIN_27_EINT5_FUNC_URTS3 (MTK_PIN_NO(27) | 1) -#define MT7623_PIN_27_EINT5_FUNC_IDDIG_P1 (MTK_PIN_NO(27) | 2) -#define MT7623_PIN_27_EINT5_FUNC_KROW2 (MTK_PIN_NO(27) | 3) -#define MT7623_PIN_27_EINT5_FUNC_CONN_MCU_TDI (MTK_PIN_NO(27) | 4) #define MT7623_PIN_27_EINT5_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(27) | 6) #define MT7623_PIN_28_EINT6_FUNC_GPIO28 (MTK_PIN_NO(28) | 0) #define MT7623_PIN_28_EINT6_FUNC_DRV_VBUS (MTK_PIN_NO(28) | 1) -#define MT7623_PIN_28_EINT6_FUNC_KROW1 (MTK_PIN_NO(28) | 3) -#define MT7623_PIN_28_EINT6_FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(28) | 4) #define MT7623_PIN_28_EINT6_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(28) | 6) #define MT7623_PIN_29_EINT7_FUNC_GPIO29 (MTK_PIN_NO(29) | 0) #define MT7623_PIN_29_EINT7_FUNC_IDDIG (MTK_PIN_NO(29) | 1) #define MT7623_PIN_29_EINT7_FUNC_MSDC1_WP (MTK_PIN_NO(29) | 2) -#define MT7623_PIN_29_EINT7_FUNC_KROW0 (MTK_PIN_NO(29) | 3) -#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_TMS (MTK_PIN_NO(29) | 4) -#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(29) | 5) #define MT7623_PIN_29_EINT7_FUNC_PCIE2_PERST_N (MTK_PIN_NO(29) | 6) #define MT7623_PIN_33_I2S1_DATA_FUNC_GPIO33 (MTK_PIN_NO(33) | 0) #define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA (MTK_PIN_NO(33) | 1) -#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA_BYPS (MTK_PIN_NO(33) | 2) #define MT7623_PIN_33_I2S1_DATA_FUNC_PCM_TX (MTK_PIN_NO(33) | 3) -#define MT7623_PIN_33_I2S1_DATA_FUNC_IMG_TEST_CK (MTK_PIN_NO(33) | 4) -#define MT7623_PIN_33_I2S1_DATA_FUNC_G1_RXD0 (MTK_PIN_NO(33) | 5) #define MT7623_PIN_33_I2S1_DATA_FUNC_AP_PCM_TX (MTK_PIN_NO(33) | 6) #define MT7623_PIN_34_I2S1_DATA_IN_FUNC_GPIO34 (MTK_PIN_NO(34) | 0) #define MT7623_PIN_34_I2S1_DATA_IN_FUNC_I2S1_DATA_IN (MTK_PIN_NO(34) | 1) #define MT7623_PIN_34_I2S1_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(34) | 3) -#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_VDEC_TEST_CK (MTK_PIN_NO(34) | 4) -#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_G1_RXD1 (MTK_PIN_NO(34) | 5) #define MT7623_PIN_34_I2S1_DATA_IN_FUNC_AP_PCM_RX (MTK_PIN_NO(34) | 6) #define MT7623_PIN_35_I2S1_BCK_FUNC_GPIO35 (MTK_PIN_NO(35) | 0) #define MT7623_PIN_35_I2S1_BCK_FUNC_I2S1_BCK (MTK_PIN_NO(35) | 1) #define MT7623_PIN_35_I2S1_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(35) | 3) -#define MT7623_PIN_35_I2S1_BCK_FUNC_G1_RXD2 (MTK_PIN_NO(35) | 5) #define MT7623_PIN_35_I2S1_BCK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(35) | 6) #define MT7623_PIN_36_I2S1_LRCK_FUNC_GPIO36 (MTK_PIN_NO(36) | 0) #define MT7623_PIN_36_I2S1_LRCK_FUNC_I2S1_LRCK (MTK_PIN_NO(36) | 1) #define MT7623_PIN_36_I2S1_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(36) | 3) -#define MT7623_PIN_36_I2S1_LRCK_FUNC_G1_RXD3 (MTK_PIN_NO(36) | 5) #define MT7623_PIN_36_I2S1_LRCK_FUNC_AP_PCM_SYNC (MTK_PIN_NO(36) | 6) #define MT7623_PIN_37_I2S1_MCLK_FUNC_GPIO37 (MTK_PIN_NO(37) | 0) #define MT7623_PIN_37_I2S1_MCLK_FUNC_I2S1_MCLK (MTK_PIN_NO(37) | 1) -#define MT7623_PIN_37_I2S1_MCLK_FUNC_G1_RXDV (MTK_PIN_NO(37) | 5) #define MT7623_PIN_39_JTMS_FUNC_GPIO39 (MTK_PIN_NO(39) | 0) #define MT7623_PIN_39_JTMS_FUNC_JTMS (MTK_PIN_NO(39) | 1) -#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_TMS (MTK_PIN_NO(39) | 2) -#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(39) | 3) -#define MT7623_PIN_39_JTMS_FUNC_DFD_TMS_XI (MTK_PIN_NO(39) | 4) #define MT7623_PIN_40_JTCK_FUNC_GPIO40 (MTK_PIN_NO(40) | 0) #define MT7623_PIN_40_JTCK_FUNC_JTCK (MTK_PIN_NO(40) | 1) -#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_TCK1 (MTK_PIN_NO(40) | 2) -#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(40) | 3) -#define MT7623_PIN_40_JTCK_FUNC_DFD_TCK_XI (MTK_PIN_NO(40) | 4) #define MT7623_PIN_41_JTDI_FUNC_GPIO41 (MTK_PIN_NO(41) | 0) #define MT7623_PIN_41_JTDI_FUNC_JTDI (MTK_PIN_NO(41) | 1) -#define MT7623_PIN_41_JTDI_FUNC_CONN_MCU_TDI (MTK_PIN_NO(41) | 2) -#define MT7623_PIN_41_JTDI_FUNC_DFD_TDI_XI (MTK_PIN_NO(41) | 4) #define MT7623_PIN_42_JTDO_FUNC_GPIO42 (MTK_PIN_NO(42) | 0) #define MT7623_PIN_42_JTDO_FUNC_JTDO (MTK_PIN_NO(42) | 1) -#define MT7623_PIN_42_JTDO_FUNC_CONN_MCU_TDO (MTK_PIN_NO(42) | 2) -#define MT7623_PIN_42_JTDO_FUNC_DFD_TDO (MTK_PIN_NO(42) | 4) #define MT7623_PIN_43_NCLE_FUNC_GPIO43 (MTK_PIN_NO(43) | 0) #define MT7623_PIN_43_NCLE_FUNC_NCLE (MTK_PIN_NO(43) | 1) @@ -218,46 +159,31 @@ #define MT7623_PIN_47_NREB_FUNC_GPIO47 (MTK_PIN_NO(47) | 0) #define MT7623_PIN_47_NREB_FUNC_NREB (MTK_PIN_NO(47) | 1) -#define MT7623_PIN_47_NREB_FUNC_IDDIG_P1 (MTK_PIN_NO(47) | 2) #define MT7623_PIN_48_NRNB_FUNC_GPIO48 (MTK_PIN_NO(48) | 0) #define MT7623_PIN_48_NRNB_FUNC_NRNB (MTK_PIN_NO(48) | 1) -#define MT7623_PIN_48_NRNB_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(48) | 2) #define MT7623_PIN_49_I2S0_DATA_FUNC_GPIO49 (MTK_PIN_NO(49) | 0) #define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA (MTK_PIN_NO(49) | 1) -#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA_BYPS (MTK_PIN_NO(49) | 2) #define MT7623_PIN_49_I2S0_DATA_FUNC_PCM_TX (MTK_PIN_NO(49) | 3) #define MT7623_PIN_49_I2S0_DATA_FUNC_AP_I2S_DO (MTK_PIN_NO(49) | 6) #define MT7623_PIN_53_SPI0_CSN_FUNC_GPIO53 (MTK_PIN_NO(53) | 0) #define MT7623_PIN_53_SPI0_CSN_FUNC_SPI0_CS (MTK_PIN_NO(53) | 1) -#define MT7623_PIN_53_SPI0_CSN_FUNC_SPDIF (MTK_PIN_NO(53) | 3) -#define MT7623_PIN_53_SPI0_CSN_FUNC_ADC_CK (MTK_PIN_NO(53) | 4) #define MT7623_PIN_53_SPI0_CSN_FUNC_PWM1 (MTK_PIN_NO(53) | 5) #define MT7623_PIN_54_SPI0_CK_FUNC_GPIO54 (MTK_PIN_NO(54) | 0) #define MT7623_PIN_54_SPI0_CK_FUNC_SPI0_CK (MTK_PIN_NO(54) | 1) -#define MT7623_PIN_54_SPI0_CK_FUNC_SPDIF_IN1 (MTK_PIN_NO(54) | 3) -#define MT7623_PIN_54_SPI0_CK_FUNC_ADC_DAT_IN (MTK_PIN_NO(54) | 4) #define MT7623_PIN_55_SPI0_MI_FUNC_GPIO55 (MTK_PIN_NO(55) | 0) #define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MI (MTK_PIN_NO(55) | 1) #define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MO (MTK_PIN_NO(55) | 2) #define MT7623_PIN_55_SPI0_MI_FUNC_MSDC1_WP (MTK_PIN_NO(55) | 3) -#define MT7623_PIN_55_SPI0_MI_FUNC_ADC_WS (MTK_PIN_NO(55) | 4) #define MT7623_PIN_55_SPI0_MI_FUNC_PWM2 (MTK_PIN_NO(55) | 5) #define MT7623_PIN_56_SPI0_MO_FUNC_GPIO56 (MTK_PIN_NO(56) | 0) #define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MO (MTK_PIN_NO(56) | 1) #define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MI (MTK_PIN_NO(56) | 2) -#define MT7623_PIN_56_SPI0_MO_FUNC_SPDIF_IN0 (MTK_PIN_NO(56) | 3) - -#define MT7623_PIN_57_SDA1_FUNC_GPIO57 (MTK_PIN_NO(57) | 0) -#define MT7623_PIN_57_SDA1_FUNC_SDA1 (MTK_PIN_NO(57) | 1) - -#define MT7623_PIN_58_SCL1_FUNC_GPIO58 (MTK_PIN_NO(58) | 0) -#define MT7623_PIN_58_SCL1_FUNC_SCL1 (MTK_PIN_NO(58) | 1) #define MT7623_PIN_60_WB_RSTB_FUNC_GPIO60 (MTK_PIN_NO(60) | 0) #define MT7623_PIN_60_WB_RSTB_FUNC_WB_RSTB (MTK_PIN_NO(60) | 1) @@ -318,47 +244,12 @@ #define MT7623_PIN_76_SCL0_FUNC_GPIO76 (MTK_PIN_NO(76) | 0) #define MT7623_PIN_76_SCL0_FUNC_SCL0 (MTK_PIN_NO(76) | 1) -#define MT7623_PIN_77_SDA2_FUNC_GPIO77 (MTK_PIN_NO(77) | 0) -#define MT7623_PIN_77_SDA2_FUNC_SDA2 (MTK_PIN_NO(77) | 1) - -#define MT7623_PIN_78_SCL2_FUNC_GPIO78 (MTK_PIN_NO(78) | 0) -#define MT7623_PIN_78_SCL2_FUNC_SCL2 (MTK_PIN_NO(78) | 1) - -#define MT7623_PIN_79_URXD0_FUNC_GPIO79 (MTK_PIN_NO(79) | 0) -#define MT7623_PIN_79_URXD0_FUNC_URXD0 (MTK_PIN_NO(79) | 1) -#define MT7623_PIN_79_URXD0_FUNC_UTXD0 (MTK_PIN_NO(79) | 2) - -#define MT7623_PIN_80_UTXD0_FUNC_GPIO80 (MTK_PIN_NO(80) | 0) -#define MT7623_PIN_80_UTXD0_FUNC_UTXD0 (MTK_PIN_NO(80) | 1) -#define MT7623_PIN_80_UTXD0_FUNC_URXD0 (MTK_PIN_NO(80) | 2) - -#define MT7623_PIN_81_URXD1_FUNC_GPIO81 (MTK_PIN_NO(81) | 0) -#define MT7623_PIN_81_URXD1_FUNC_URXD1 (MTK_PIN_NO(81) | 1) -#define MT7623_PIN_81_URXD1_FUNC_UTXD1 (MTK_PIN_NO(81) | 2) - -#define MT7623_PIN_82_UTXD1_FUNC_GPIO82 (MTK_PIN_NO(82) | 0) -#define MT7623_PIN_82_UTXD1_FUNC_UTXD1 (MTK_PIN_NO(82) | 1) -#define MT7623_PIN_82_UTXD1_FUNC_URXD1 (MTK_PIN_NO(82) | 2) - #define MT7623_PIN_83_LCM_RST_FUNC_GPIO83 (MTK_PIN_NO(83) | 0) #define MT7623_PIN_83_LCM_RST_FUNC_LCM_RST (MTK_PIN_NO(83) | 1) -#define MT7623_PIN_83_LCM_RST_FUNC_VDAC_CK_XI (MTK_PIN_NO(83) | 2) #define MT7623_PIN_84_DSI_TE_FUNC_GPIO84 (MTK_PIN_NO(84) | 0) #define MT7623_PIN_84_DSI_TE_FUNC_DSI_TE (MTK_PIN_NO(84) | 1) -#define MT7623_PIN_91_MIPI_TDN3_FUNC_GPIO91 (MTK_PIN_NO(91) | 0) -#define MT7623_PIN_91_MIPI_TDN3_FUNC_TDN3 (MTK_PIN_NO(91) | 1) - -#define MT7623_PIN_92_MIPI_TDP3_FUNC_GPIO92 (MTK_PIN_NO(92) | 0) -#define MT7623_PIN_92_MIPI_TDP3_FUNC_TDP3 (MTK_PIN_NO(92) | 1) - -#define MT7623_PIN_93_MIPI_TDN2_FUNC_GPIO93 (MTK_PIN_NO(93) | 0) -#define MT7623_PIN_93_MIPI_TDN2_FUNC_TDN2 (MTK_PIN_NO(93) | 1) - -#define MT7623_PIN_94_MIPI_TDP2_FUNC_GPIO94 (MTK_PIN_NO(94) | 0) -#define MT7623_PIN_94_MIPI_TDP2_FUNC_TDP2 (MTK_PIN_NO(94) | 1) - #define MT7623_PIN_95_MIPI_TCN_FUNC_GPIO95 (MTK_PIN_NO(95) | 0) #define MT7623_PIN_95_MIPI_TCN_FUNC_TCN (MTK_PIN_NO(95) | 1) @@ -377,28 +268,6 @@ #define MT7623_PIN_100_MIPI_TDP0_FUNC_GPIO100 (MTK_PIN_NO(100) | 0) #define MT7623_PIN_100_MIPI_TDP0_FUNC_TDP0 (MTK_PIN_NO(100) | 1) -#define MT7623_PIN_101_SPI2_CSN_FUNC_GPIO101 (MTK_PIN_NO(101) | 0) -#define MT7623_PIN_101_SPI2_CSN_FUNC_SPI2_CS (MTK_PIN_NO(101) | 1) -#define MT7623_PIN_101_SPI2_CSN_FUNC_SCL3 (MTK_PIN_NO(101) | 3) -#define MT7623_PIN_101_SPI2_CSN_FUNC_KROW0 (MTK_PIN_NO(101) | 4) - -#define MT7623_PIN_102_SPI2_MI_FUNC_GPIO102 (MTK_PIN_NO(102) | 0) -#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MI (MTK_PIN_NO(102) | 1) -#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MO (MTK_PIN_NO(102) | 2) -#define MT7623_PIN_102_SPI2_MI_FUNC_SDA3 (MTK_PIN_NO(102) | 3) -#define MT7623_PIN_102_SPI2_MI_FUNC_KROW1 (MTK_PIN_NO(102) | 4) - -#define MT7623_PIN_103_SPI2_MO_FUNC_GPIO103 (MTK_PIN_NO(103) | 0) -#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MO (MTK_PIN_NO(103) | 1) -#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MI (MTK_PIN_NO(103) | 2) -#define MT7623_PIN_103_SPI2_MO_FUNC_SCL3 (MTK_PIN_NO(103) | 3) -#define MT7623_PIN_103_SPI2_MO_FUNC_KROW2 (MTK_PIN_NO(103) | 4) - -#define MT7623_PIN_104_SPI2_CK_FUNC_GPIO104 (MTK_PIN_NO(104) | 0) -#define MT7623_PIN_104_SPI2_CK_FUNC_SPI2_CK (MTK_PIN_NO(104) | 1) -#define MT7623_PIN_104_SPI2_CK_FUNC_SDA3 (MTK_PIN_NO(104) | 3) -#define MT7623_PIN_104_SPI2_CK_FUNC_KROW3 (MTK_PIN_NO(104) | 4) - #define MT7623_PIN_105_MSDC1_CMD_FUNC_GPIO105 (MTK_PIN_NO(105) | 0) #define MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD (MTK_PIN_NO(105) | 1) #define MT7623_PIN_105_MSDC1_CMD_FUNC_SDA1 (MTK_PIN_NO(105) | 3) @@ -478,22 +347,22 @@ #define MT7623_PIN_121_MSDC0_DAT0_FUNC_WATCHDOG (MTK_PIN_NO(121) | 5) #define MT7623_PIN_122_GPIO122_FUNC_GPIO122 (MTK_PIN_NO(122) | 0) -#define MT7623_PIN_122_GPIO122_FUNC_CEC (MTK_PIN_NO(122) | 1) +#define MT7623_PIN_122_GPIO122_FUNC_TEST (MTK_PIN_NO(122) | 1) #define MT7623_PIN_122_GPIO122_FUNC_SDA2 (MTK_PIN_NO(122) | 4) #define MT7623_PIN_122_GPIO122_FUNC_URXD0 (MTK_PIN_NO(122) | 5) -#define MT7623_PIN_123_HTPLG_FUNC_GPIO123 (MTK_PIN_NO(123) | 0) -#define MT7623_PIN_123_HTPLG_FUNC_HTPLG (MTK_PIN_NO(123) | 1) -#define MT7623_PIN_123_HTPLG_FUNC_SCL2 (MTK_PIN_NO(123) | 4) -#define MT7623_PIN_123_HTPLG_FUNC_UTXD0 (MTK_PIN_NO(123) | 5) +#define MT7623_PIN_123_GPIO123_FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define MT7623_PIN_123_GPIO123_FUNC_TEST (MTK_PIN_NO(123) | 1) +#define MT7623_PIN_123_GPIO123_FUNC_SCL2 (MTK_PIN_NO(123) | 4) +#define MT7623_PIN_123_GPIO123_FUNC_UTXD0 (MTK_PIN_NO(123) | 5) #define MT7623_PIN_124_GPIO124_FUNC_GPIO124 (MTK_PIN_NO(124) | 0) -#define MT7623_PIN_124_GPIO124_FUNC_HDMISCK (MTK_PIN_NO(124) | 1) +#define MT7623_PIN_124_GPIO124_FUNC_TEST (MTK_PIN_NO(124) | 1) #define MT7623_PIN_124_GPIO124_FUNC_SDA1 (MTK_PIN_NO(124) | 4) #define MT7623_PIN_124_GPIO124_FUNC_PWM3 (MTK_PIN_NO(124) | 5) #define MT7623_PIN_125_GPIO125_FUNC_GPIO125 (MTK_PIN_NO(125) | 0) -#define MT7623_PIN_125_GPIO125_FUNC_HDMISD (MTK_PIN_NO(125) | 1) +#define MT7623_PIN_125_GPIO125_FUNC_TEST (MTK_PIN_NO(125) | 1) #define MT7623_PIN_125_GPIO125_FUNC_SCL1 (MTK_PIN_NO(125) | 4) #define MT7623_PIN_125_GPIO125_FUNC_PWM4 (MTK_PIN_NO(125) | 5) diff --git a/include/dt-bindings/pinctrl/nomadik.h b/include/dt-bindings/pinctrl/nomadik.h index fa24565e00..638fb321a1 100644 --- a/include/dt-bindings/pinctrl/nomadik.h +++ b/include/dt-bindings/pinctrl/nomadik.h @@ -1,9 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * nomadik.h * * Copyright (C) ST-Ericsson SA 2013 * Author: Gabriel Fernandez for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2 */ #define INPUT_NOPULL 0 diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h index f48245ff87..effadd0569 100644 --- a/include/dt-bindings/pinctrl/omap.h +++ b/include/dt-bindings/pinctrl/omap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for OMAP pinctrl bindings. * @@ -46,8 +45,8 @@ #define PIN_OFF_NONE 0 #define PIN_OFF_OUTPUT_HIGH (OFF_EN | OFFOUT_EN | OFFOUT_VAL) #define PIN_OFF_OUTPUT_LOW (OFF_EN | OFFOUT_EN) -#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFFOUT_EN | OFF_PULL_EN | OFF_PULL_UP) -#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFFOUT_EN | OFF_PULL_EN) +#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFF_PULL_EN | OFF_PULL_UP) +#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFF_PULL_EN) #define PIN_OFF_WAKEUPENABLE WAKEUP_EN /* @@ -64,8 +63,7 @@ #define OMAP3_WKUP_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x2a00) (val) #define DM814X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) #define DM816X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) -#define AM33XX_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) (0) -#define AM33XX_PADCONF(pa, conf, mux) OMAP_IOPAD_OFFSET((pa), 0x0800) (conf) (mux) +#define AM33XX_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) /* * Macros to allow using the offset from the padconf physical address diff --git a/include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h b/include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h index ac63c399b4..914d56da93 100644 --- a/include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h +++ b/include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DT_BINDINGS_PINCTRL_TEGRA_XUSB_H #define _DT_BINDINGS_PINCTRL_TEGRA_XUSB_H 1 diff --git a/include/dt-bindings/pinctrl/pinctrl-tegra.h b/include/dt-bindings/pinctrl/pinctrl-tegra.h index d9b18bf264..ebafa498be 100644 --- a/include/dt-bindings/pinctrl/pinctrl-tegra.h +++ b/include/dt-bindings/pinctrl/pinctrl-tegra.h @@ -1,10 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * This header provides constants for Tegra pinctrl bindings. * * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. * * Author: Laxman Dewangan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef _DT_BINDINGS_PINCTRL_TEGRA_H diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h index e5df5ce45a..aafa76cb56 100644 --- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h +++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for the Qualcomm PMIC GPIO binding. */ @@ -90,17 +89,11 @@ #define PMA8084_GPIO_S4 2 #define PMA8084_GPIO_L6 3 -#define PM8994_GPIO_VPH 0 -#define PM8994_GPIO_S4 2 -#define PM8994_GPIO_L12 3 - /* To be used with "function" */ #define PMIC_GPIO_FUNC_NORMAL "normal" #define PMIC_GPIO_FUNC_PAIRED "paired" #define PMIC_GPIO_FUNC_FUNC1 "func1" #define PMIC_GPIO_FUNC_FUNC2 "func2" -#define PMIC_GPIO_FUNC_FUNC3 "func3" -#define PMIC_GPIO_FUNC_FUNC4 "func4" #define PMIC_GPIO_FUNC_DTEST1 "dtest1" #define PMIC_GPIO_FUNC_DTEST2 "dtest2" #define PMIC_GPIO_FUNC_DTEST3 "dtest3" diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h index 32e66ee7e8..a15c1704d0 100644 --- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h +++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for the Qualcomm PMIC's * Multi-Purpose Pin binding. @@ -66,12 +65,6 @@ #define PMA8084_MPP_S4 2 #define PMA8084_MPP_L6 3 -#define PM8994_MPP_VPH 0 -/* Only supported for MPP_05-MPP_08 */ -#define PM8994_MPP_L19 1 -#define PM8994_MPP_S4 2 -#define PM8994_MPP_L12 3 - /* * Analog Input - Set the source for analog input. * To be used with "qcom,amux-route" property diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h index 5f291045e8..743e66a95e 100644 --- a/include/dt-bindings/pinctrl/rockchip.h +++ b/include/dt-bindings/pinctrl/rockchip.h @@ -1,47 +1,34 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Header providing constants for Rockchip pinctrl bindings. * * Copyright (c) 2013 MundoReader S.L. * Author: Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __DT_BINDINGS_ROCKCHIP_PINCTRL_H__ #define __DT_BINDINGS_ROCKCHIP_PINCTRL_H__ -#define RK_PA0 0 -#define RK_PA1 1 -#define RK_PA2 2 -#define RK_PA3 3 -#define RK_PA4 4 -#define RK_PA5 5 -#define RK_PA6 6 -#define RK_PA7 7 -#define RK_PB0 8 -#define RK_PB1 9 -#define RK_PB2 10 -#define RK_PB3 11 -#define RK_PB4 12 -#define RK_PB5 13 -#define RK_PB6 14 -#define RK_PB7 15 -#define RK_PC0 16 -#define RK_PC1 17 -#define RK_PC2 18 -#define RK_PC3 19 -#define RK_PC4 20 -#define RK_PC5 21 -#define RK_PC6 22 -#define RK_PC7 23 -#define RK_PD0 24 -#define RK_PD1 25 -#define RK_PD2 26 -#define RK_PD3 27 -#define RK_PD4 28 -#define RK_PD5 29 -#define RK_PD6 30 -#define RK_PD7 31 +#define RK_GPIO0 0 +#define RK_GPIO1 1 +#define RK_GPIO2 2 +#define RK_GPIO3 3 +#define RK_GPIO4 4 +#define RK_GPIO6 6 #define RK_FUNC_GPIO 0 +#define RK_FUNC_1 1 +#define RK_FUNC_2 2 +#define RK_FUNC_3 3 +#define RK_FUNC_4 4 #endif diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h index b1832506b9..6276eb785e 100644 --- a/include/dt-bindings/pinctrl/samsung.h +++ b/include/dt-bindings/pinctrl/samsung.h @@ -1,11 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Samsung's Exynos pinctrl bindings * * Copyright (c) 2016 Samsung Electronics Co., Ltd. * http://www.samsung.com * Author: Krzysztof Kozlowski - */ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ #ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__ #define __DT_BINDINGS_PINCTRL_SAMSUNG_H__ @@ -42,20 +45,6 @@ #define EXYNOS5420_PIN_DRV_LV3 2 #define EXYNOS5420_PIN_DRV_LV4 3 -/* Drive strengths for Exynos5433 */ -#define EXYNOS5433_PIN_DRV_FAST_SR1 0 -#define EXYNOS5433_PIN_DRV_FAST_SR2 1 -#define EXYNOS5433_PIN_DRV_FAST_SR3 2 -#define EXYNOS5433_PIN_DRV_FAST_SR4 3 -#define EXYNOS5433_PIN_DRV_FAST_SR5 4 -#define EXYNOS5433_PIN_DRV_FAST_SR6 5 -#define EXYNOS5433_PIN_DRV_SLOW_SR1 8 -#define EXYNOS5433_PIN_DRV_SLOW_SR2 9 -#define EXYNOS5433_PIN_DRV_SLOW_SR3 0xa -#define EXYNOS5433_PIN_DRV_SLOW_SR4 0xb -#define EXYNOS5433_PIN_DRV_SLOW_SR5 0xc -#define EXYNOS5433_PIN_DRV_SLOW_SR6 0xf - #define EXYNOS_PIN_FUNC_INPUT 0 #define EXYNOS_PIN_FUNC_OUTPUT 1 #define EXYNOS_PIN_FUNC_2 2 @@ -63,15 +52,6 @@ #define EXYNOS_PIN_FUNC_4 4 #define EXYNOS_PIN_FUNC_5 5 #define EXYNOS_PIN_FUNC_6 6 -#define EXYNOS_PIN_FUNC_EINT 0xf -#define EXYNOS_PIN_FUNC_F EXYNOS_PIN_FUNC_EINT - -/* Drive strengths for Exynos7 FSYS1 block */ -#define EXYNOS7_FSYS1_PIN_DRV_LV1 0 -#define EXYNOS7_FSYS1_PIN_DRV_LV2 4 -#define EXYNOS7_FSYS1_PIN_DRV_LV3 2 -#define EXYNOS7_FSYS1_PIN_DRV_LV4 6 -#define EXYNOS7_FSYS1_PIN_DRV_LV5 1 -#define EXYNOS7_FSYS1_PIN_DRV_LV6 5 +#define EXYNOS_PIN_FUNC_F 0xf #endif /* __DT_BINDINGS_PINCTRL_SAMSUNG_H__ */ diff --git a/include/dt-bindings/pinctrl/stm32f429-pinfunc.h b/include/dt-bindings/pinctrl/stm32f429-pinfunc.h new file mode 100644 index 0000000000..26f18798d9 --- /dev/null +++ b/include/dt-bindings/pinctrl/stm32f429-pinfunc.h @@ -0,0 +1,1239 @@ +#ifndef _DT_BINDINGS_STM32F429_PINFUNC_H +#define _DT_BINDINGS_STM32F429_PINFUNC_H + +#define STM32F429_PA0_FUNC_GPIO 0x0 +#define STM32F429_PA0_FUNC_TIM2_CH1_TIM2_ETR 0x2 +#define STM32F429_PA0_FUNC_TIM5_CH1 0x3 +#define STM32F429_PA0_FUNC_TIM8_ETR 0x4 +#define STM32F429_PA0_FUNC_USART2_CTS 0x8 +#define STM32F429_PA0_FUNC_UART4_TX 0x9 +#define STM32F429_PA0_FUNC_ETH_MII_CRS 0xc +#define STM32F429_PA0_FUNC_EVENTOUT 0x10 +#define STM32F429_PA0_FUNC_ANALOG 0x11 + +#define STM32F429_PA1_FUNC_GPIO 0x100 +#define STM32F429_PA1_FUNC_TIM2_CH2 0x102 +#define STM32F429_PA1_FUNC_TIM5_CH2 0x103 +#define STM32F429_PA1_FUNC_USART2_RTS 0x108 +#define STM32F429_PA1_FUNC_UART4_RX 0x109 +#define STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK 0x10c +#define STM32F429_PA1_FUNC_EVENTOUT 0x110 +#define STM32F429_PA1_FUNC_ANALOG 0x111 + +#define STM32F429_PA2_FUNC_GPIO 0x200 +#define STM32F429_PA2_FUNC_TIM2_CH3 0x202 +#define STM32F429_PA2_FUNC_TIM5_CH3 0x203 +#define STM32F429_PA2_FUNC_TIM9_CH1 0x204 +#define STM32F429_PA2_FUNC_USART2_TX 0x208 +#define STM32F429_PA2_FUNC_ETH_MDIO 0x20c +#define STM32F429_PA2_FUNC_EVENTOUT 0x210 +#define STM32F429_PA2_FUNC_ANALOG 0x211 + +#define STM32F429_PA3_FUNC_GPIO 0x300 +#define STM32F429_PA3_FUNC_TIM2_CH4 0x302 +#define STM32F429_PA3_FUNC_TIM5_CH4 0x303 +#define STM32F429_PA3_FUNC_TIM9_CH2 0x304 +#define STM32F429_PA3_FUNC_USART2_RX 0x308 +#define STM32F429_PA3_FUNC_OTG_HS_ULPI_D0 0x30b +#define STM32F429_PA3_FUNC_ETH_MII_COL 0x30c +#define STM32F429_PA3_FUNC_LCD_B5 0x30f +#define STM32F429_PA3_FUNC_EVENTOUT 0x310 +#define STM32F429_PA3_FUNC_ANALOG 0x311 + +#define STM32F429_PA4_FUNC_GPIO 0x400 +#define STM32F429_PA4_FUNC_SPI1_NSS 0x406 +#define STM32F429_PA4_FUNC_SPI3_NSS_I2S3_WS 0x407 +#define STM32F429_PA4_FUNC_USART2_CK 0x408 +#define STM32F429_PA4_FUNC_OTG_HS_SOF 0x40d +#define STM32F429_PA4_FUNC_DCMI_HSYNC 0x40e +#define STM32F429_PA4_FUNC_LCD_VSYNC 0x40f +#define STM32F429_PA4_FUNC_EVENTOUT 0x410 +#define STM32F429_PA4_FUNC_ANALOG 0x411 + +#define STM32F429_PA5_FUNC_GPIO 0x500 +#define STM32F429_PA5_FUNC_TIM2_CH1_TIM2_ETR 0x502 +#define STM32F429_PA5_FUNC_TIM8_CH1N 0x504 +#define STM32F429_PA5_FUNC_SPI1_SCK 0x506 +#define STM32F429_PA5_FUNC_OTG_HS_ULPI_CK 0x50b +#define STM32F429_PA5_FUNC_EVENTOUT 0x510 +#define STM32F429_PA5_FUNC_ANALOG 0x511 + +#define STM32F429_PA6_FUNC_GPIO 0x600 +#define STM32F429_PA6_FUNC_TIM1_BKIN 0x602 +#define STM32F429_PA6_FUNC_TIM3_CH1 0x603 +#define STM32F429_PA6_FUNC_TIM8_BKIN 0x604 +#define STM32F429_PA6_FUNC_SPI1_MISO 0x606 +#define STM32F429_PA6_FUNC_TIM13_CH1 0x60a +#define STM32F429_PA6_FUNC_DCMI_PIXCLK 0x60e +#define STM32F429_PA6_FUNC_LCD_G2 0x60f +#define STM32F429_PA6_FUNC_EVENTOUT 0x610 +#define STM32F429_PA6_FUNC_ANALOG 0x611 + +#define STM32F429_PA7_FUNC_GPIO 0x700 +#define STM32F429_PA7_FUNC_TIM1_CH1N 0x702 +#define STM32F429_PA7_FUNC_TIM3_CH2 0x703 +#define STM32F429_PA7_FUNC_TIM8_CH1N 0x704 +#define STM32F429_PA7_FUNC_SPI1_MOSI 0x706 +#define STM32F429_PA7_FUNC_TIM14_CH1 0x70a +#define STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV 0x70c +#define STM32F429_PA7_FUNC_EVENTOUT 0x710 +#define STM32F429_PA7_FUNC_ANALOG 0x711 + +#define STM32F429_PA8_FUNC_GPIO 0x800 +#define STM32F429_PA8_FUNC_MCO1 0x801 +#define STM32F429_PA8_FUNC_TIM1_CH1 0x802 +#define STM32F429_PA8_FUNC_I2C3_SCL 0x805 +#define STM32F429_PA8_FUNC_USART1_CK 0x808 +#define STM32F429_PA8_FUNC_OTG_FS_SOF 0x80b +#define STM32F429_PA8_FUNC_LCD_R6 0x80f +#define STM32F429_PA8_FUNC_EVENTOUT 0x810 +#define STM32F429_PA8_FUNC_ANALOG 0x811 + +#define STM32F429_PA9_FUNC_GPIO 0x900 +#define STM32F429_PA9_FUNC_TIM1_CH2 0x902 +#define STM32F429_PA9_FUNC_I2C3_SMBA 0x905 +#define STM32F429_PA9_FUNC_USART1_TX 0x908 +#define STM32F429_PA9_FUNC_DCMI_D0 0x90e +#define STM32F429_PA9_FUNC_EVENTOUT 0x910 +#define STM32F429_PA9_FUNC_ANALOG 0x911 + +#define STM32F429_PA10_FUNC_GPIO 0xa00 +#define STM32F429_PA10_FUNC_TIM1_CH3 0xa02 +#define STM32F429_PA10_FUNC_USART1_RX 0xa08 +#define STM32F429_PA10_FUNC_OTG_FS_ID 0xa0b +#define STM32F429_PA10_FUNC_DCMI_D1 0xa0e +#define STM32F429_PA10_FUNC_EVENTOUT 0xa10 +#define STM32F429_PA10_FUNC_ANALOG 0xa11 + +#define STM32F429_PA11_FUNC_GPIO 0xb00 +#define STM32F429_PA11_FUNC_TIM1_CH4 0xb02 +#define STM32F429_PA11_FUNC_USART1_CTS 0xb08 +#define STM32F429_PA11_FUNC_CAN1_RX 0xb0a +#define STM32F429_PA11_FUNC_OTG_FS_DM 0xb0b +#define STM32F429_PA11_FUNC_LCD_R4 0xb0f +#define STM32F429_PA11_FUNC_EVENTOUT 0xb10 +#define STM32F429_PA11_FUNC_ANALOG 0xb11 + +#define STM32F429_PA12_FUNC_GPIO 0xc00 +#define STM32F429_PA12_FUNC_TIM1_ETR 0xc02 +#define STM32F429_PA12_FUNC_USART1_RTS 0xc08 +#define STM32F429_PA12_FUNC_CAN1_TX 0xc0a +#define STM32F429_PA12_FUNC_OTG_FS_DP 0xc0b +#define STM32F429_PA12_FUNC_LCD_R5 0xc0f +#define STM32F429_PA12_FUNC_EVENTOUT 0xc10 +#define STM32F429_PA12_FUNC_ANALOG 0xc11 + +#define STM32F429_PA13_FUNC_GPIO 0xd00 +#define STM32F429_PA13_FUNC_JTMS_SWDIO 0xd01 +#define STM32F429_PA13_FUNC_EVENTOUT 0xd10 +#define STM32F429_PA13_FUNC_ANALOG 0xd11 + +#define STM32F429_PA14_FUNC_GPIO 0xe00 +#define STM32F429_PA14_FUNC_JTCK_SWCLK 0xe01 +#define STM32F429_PA14_FUNC_EVENTOUT 0xe10 +#define STM32F429_PA14_FUNC_ANALOG 0xe11 + +#define STM32F429_PA15_FUNC_GPIO 0xf00 +#define STM32F429_PA15_FUNC_JTDI 0xf01 +#define STM32F429_PA15_FUNC_TIM2_CH1_TIM2_ETR 0xf02 +#define STM32F429_PA15_FUNC_SPI1_NSS 0xf06 +#define STM32F429_PA15_FUNC_SPI3_NSS_I2S3_WS 0xf07 +#define STM32F429_PA15_FUNC_EVENTOUT 0xf10 +#define STM32F429_PA15_FUNC_ANALOG 0xf11 + + + +#define STM32F429_PB0_FUNC_GPIO 0x1000 +#define STM32F429_PB0_FUNC_TIM1_CH2N 0x1002 +#define STM32F429_PB0_FUNC_TIM3_CH3 0x1003 +#define STM32F429_PB0_FUNC_TIM8_CH2N 0x1004 +#define STM32F429_PB0_FUNC_LCD_R3 0x100a +#define STM32F429_PB0_FUNC_OTG_HS_ULPI_D1 0x100b +#define STM32F429_PB0_FUNC_ETH_MII_RXD2 0x100c +#define STM32F429_PB0_FUNC_EVENTOUT 0x1010 +#define STM32F429_PB0_FUNC_ANALOG 0x1011 + +#define STM32F429_PB1_FUNC_GPIO 0x1100 +#define STM32F429_PB1_FUNC_TIM1_CH3N 0x1102 +#define STM32F429_PB1_FUNC_TIM3_CH4 0x1103 +#define STM32F429_PB1_FUNC_TIM8_CH3N 0x1104 +#define STM32F429_PB1_FUNC_LCD_R6 0x110a +#define STM32F429_PB1_FUNC_OTG_HS_ULPI_D2 0x110b +#define STM32F429_PB1_FUNC_ETH_MII_RXD3 0x110c +#define STM32F429_PB1_FUNC_EVENTOUT 0x1110 +#define STM32F429_PB1_FUNC_ANALOG 0x1111 + +#define STM32F429_PB2_FUNC_GPIO 0x1200 +#define STM32F429_PB2_FUNC_EVENTOUT 0x1210 +#define STM32F429_PB2_FUNC_ANALOG 0x1211 + +#define STM32F429_PB3_FUNC_GPIO 0x1300 +#define STM32F429_PB3_FUNC_JTDO_TRACESWO 0x1301 +#define STM32F429_PB3_FUNC_TIM2_CH2 0x1302 +#define STM32F429_PB3_FUNC_SPI1_SCK 0x1306 +#define STM32F429_PB3_FUNC_SPI3_SCK_I2S3_CK 0x1307 +#define STM32F429_PB3_FUNC_EVENTOUT 0x1310 +#define STM32F429_PB3_FUNC_ANALOG 0x1311 + +#define STM32F429_PB4_FUNC_GPIO 0x1400 +#define STM32F429_PB4_FUNC_NJTRST 0x1401 +#define STM32F429_PB4_FUNC_TIM3_CH1 0x1403 +#define STM32F429_PB4_FUNC_SPI1_MISO 0x1406 +#define STM32F429_PB4_FUNC_SPI3_MISO 0x1407 +#define STM32F429_PB4_FUNC_I2S3EXT_SD 0x1408 +#define STM32F429_PB4_FUNC_EVENTOUT 0x1410 +#define STM32F429_PB4_FUNC_ANALOG 0x1411 + +#define STM32F429_PB5_FUNC_GPIO 0x1500 +#define STM32F429_PB5_FUNC_TIM3_CH2 0x1503 +#define STM32F429_PB5_FUNC_I2C1_SMBA 0x1505 +#define STM32F429_PB5_FUNC_SPI1_MOSI 0x1506 +#define STM32F429_PB5_FUNC_SPI3_MOSI_I2S3_SD 0x1507 +#define STM32F429_PB5_FUNC_CAN2_RX 0x150a +#define STM32F429_PB5_FUNC_OTG_HS_ULPI_D7 0x150b +#define STM32F429_PB5_FUNC_ETH_PPS_OUT 0x150c +#define STM32F429_PB5_FUNC_FMC_SDCKE1 0x150d +#define STM32F429_PB5_FUNC_DCMI_D10 0x150e +#define STM32F429_PB5_FUNC_EVENTOUT 0x1510 +#define STM32F429_PB5_FUNC_ANALOG 0x1511 + +#define STM32F429_PB6_FUNC_GPIO 0x1600 +#define STM32F429_PB6_FUNC_TIM4_CH1 0x1603 +#define STM32F429_PB6_FUNC_I2C1_SCL 0x1605 +#define STM32F429_PB6_FUNC_USART1_TX 0x1608 +#define STM32F429_PB6_FUNC_CAN2_TX 0x160a +#define STM32F429_PB6_FUNC_FMC_SDNE1 0x160d +#define STM32F429_PB6_FUNC_DCMI_D5 0x160e +#define STM32F429_PB6_FUNC_EVENTOUT 0x1610 +#define STM32F429_PB6_FUNC_ANALOG 0x1611 + +#define STM32F429_PB7_FUNC_GPIO 0x1700 +#define STM32F429_PB7_FUNC_TIM4_CH2 0x1703 +#define STM32F429_PB7_FUNC_I2C1_SDA 0x1705 +#define STM32F429_PB7_FUNC_USART1_RX 0x1708 +#define STM32F429_PB7_FUNC_FMC_NL 0x170d +#define STM32F429_PB7_FUNC_DCMI_VSYNC 0x170e +#define STM32F429_PB7_FUNC_EVENTOUT 0x1710 +#define STM32F429_PB7_FUNC_ANALOG 0x1711 + +#define STM32F429_PB8_FUNC_GPIO 0x1800 +#define STM32F429_PB8_FUNC_TIM4_CH3 0x1803 +#define STM32F429_PB8_FUNC_TIM10_CH1 0x1804 +#define STM32F429_PB8_FUNC_I2C1_SCL 0x1805 +#define STM32F429_PB8_FUNC_CAN1_RX 0x180a +#define STM32F429_PB8_FUNC_ETH_MII_TXD3 0x180c +#define STM32F429_PB8_FUNC_SDIO_D4 0x180d +#define STM32F429_PB8_FUNC_DCMI_D6 0x180e +#define STM32F429_PB8_FUNC_LCD_B6 0x180f +#define STM32F429_PB8_FUNC_EVENTOUT 0x1810 +#define STM32F429_PB8_FUNC_ANALOG 0x1811 + +#define STM32F429_PB9_FUNC_GPIO 0x1900 +#define STM32F429_PB9_FUNC_TIM4_CH4 0x1903 +#define STM32F429_PB9_FUNC_TIM11_CH1 0x1904 +#define STM32F429_PB9_FUNC_I2C1_SDA 0x1905 +#define STM32F429_PB9_FUNC_SPI2_NSS_I2S2_WS 0x1906 +#define STM32F429_PB9_FUNC_CAN1_TX 0x190a +#define STM32F429_PB9_FUNC_SDIO_D5 0x190d +#define STM32F429_PB9_FUNC_DCMI_D7 0x190e +#define STM32F429_PB9_FUNC_LCD_B7 0x190f +#define STM32F429_PB9_FUNC_EVENTOUT 0x1910 +#define STM32F429_PB9_FUNC_ANALOG 0x1911 + +#define STM32F429_PB10_FUNC_GPIO 0x1a00 +#define STM32F429_PB10_FUNC_TIM2_CH3 0x1a02 +#define STM32F429_PB10_FUNC_I2C2_SCL 0x1a05 +#define STM32F429_PB10_FUNC_SPI2_SCK_I2S2_CK 0x1a06 +#define STM32F429_PB10_FUNC_USART3_TX 0x1a08 +#define STM32F429_PB10_FUNC_OTG_HS_ULPI_D3 0x1a0b +#define STM32F429_PB10_FUNC_ETH_MII_RX_ER 0x1a0c +#define STM32F429_PB10_FUNC_LCD_G4 0x1a0f +#define STM32F429_PB10_FUNC_EVENTOUT 0x1a10 +#define STM32F429_PB10_FUNC_ANALOG 0x1a11 + +#define STM32F429_PB11_FUNC_GPIO 0x1b00 +#define STM32F429_PB11_FUNC_TIM2_CH4 0x1b02 +#define STM32F429_PB11_FUNC_I2C2_SDA 0x1b05 +#define STM32F429_PB11_FUNC_USART3_RX 0x1b08 +#define STM32F429_PB11_FUNC_OTG_HS_ULPI_D4 0x1b0b +#define STM32F429_PB11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN 0x1b0c +#define STM32F429_PB11_FUNC_LCD_G5 0x1b0f +#define STM32F429_PB11_FUNC_EVENTOUT 0x1b10 +#define STM32F429_PB11_FUNC_ANALOG 0x1b11 + +#define STM32F429_PB12_FUNC_GPIO 0x1c00 +#define STM32F429_PB12_FUNC_TIM1_BKIN 0x1c02 +#define STM32F429_PB12_FUNC_I2C2_SMBA 0x1c05 +#define STM32F429_PB12_FUNC_SPI2_NSS_I2S2_WS 0x1c06 +#define STM32F429_PB12_FUNC_USART3_CK 0x1c08 +#define STM32F429_PB12_FUNC_CAN2_RX 0x1c0a +#define STM32F429_PB12_FUNC_OTG_HS_ULPI_D5 0x1c0b +#define STM32F429_PB12_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0 0x1c0c +#define STM32F429_PB12_FUNC_OTG_HS_ID 0x1c0d +#define STM32F429_PB12_FUNC_EVENTOUT 0x1c10 +#define STM32F429_PB12_FUNC_ANALOG 0x1c11 + +#define STM32F429_PB13_FUNC_GPIO 0x1d00 +#define STM32F429_PB13_FUNC_TIM1_CH1N 0x1d02 +#define STM32F429_PB13_FUNC_SPI2_SCK_I2S2_CK 0x1d06 +#define STM32F429_PB13_FUNC_USART3_CTS 0x1d08 +#define STM32F429_PB13_FUNC_CAN2_TX 0x1d0a +#define STM32F429_PB13_FUNC_OTG_HS_ULPI_D6 0x1d0b +#define STM32F429_PB13_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1 0x1d0c +#define STM32F429_PB13_FUNC_EVENTOUT 0x1d10 +#define STM32F429_PB13_FUNC_ANALOG 0x1d11 + +#define STM32F429_PB14_FUNC_GPIO 0x1e00 +#define STM32F429_PB14_FUNC_TIM1_CH2N 0x1e02 +#define STM32F429_PB14_FUNC_TIM8_CH2N 0x1e04 +#define STM32F429_PB14_FUNC_SPI2_MISO 0x1e06 +#define STM32F429_PB14_FUNC_I2S2EXT_SD 0x1e07 +#define STM32F429_PB14_FUNC_USART3_RTS 0x1e08 +#define STM32F429_PB14_FUNC_TIM12_CH1 0x1e0a +#define STM32F429_PB14_FUNC_OTG_HS_DM 0x1e0d +#define STM32F429_PB14_FUNC_EVENTOUT 0x1e10 +#define STM32F429_PB14_FUNC_ANALOG 0x1e11 + +#define STM32F429_PB15_FUNC_GPIO 0x1f00 +#define STM32F429_PB15_FUNC_RTC_REFIN 0x1f01 +#define STM32F429_PB15_FUNC_TIM1_CH3N 0x1f02 +#define STM32F429_PB15_FUNC_TIM8_CH3N 0x1f04 +#define STM32F429_PB15_FUNC_SPI2_MOSI_I2S2_SD 0x1f06 +#define STM32F429_PB15_FUNC_TIM12_CH2 0x1f0a +#define STM32F429_PB15_FUNC_OTG_HS_DP 0x1f0d +#define STM32F429_PB15_FUNC_EVENTOUT 0x1f10 +#define STM32F429_PB15_FUNC_ANALOG 0x1f11 + + + +#define STM32F429_PC0_FUNC_GPIO 0x2000 +#define STM32F429_PC0_FUNC_OTG_HS_ULPI_STP 0x200b +#define STM32F429_PC0_FUNC_FMC_SDNWE 0x200d +#define STM32F429_PC0_FUNC_EVENTOUT 0x2010 +#define STM32F429_PC0_FUNC_ANALOG 0x2011 + +#define STM32F429_PC1_FUNC_GPIO 0x2100 +#define STM32F429_PC1_FUNC_ETH_MDC 0x210c +#define STM32F429_PC1_FUNC_EVENTOUT 0x2110 +#define STM32F429_PC1_FUNC_ANALOG 0x2111 + +#define STM32F429_PC2_FUNC_GPIO 0x2200 +#define STM32F429_PC2_FUNC_SPI2_MISO 0x2206 +#define STM32F429_PC2_FUNC_I2S2EXT_SD 0x2207 +#define STM32F429_PC2_FUNC_OTG_HS_ULPI_DIR 0x220b +#define STM32F429_PC2_FUNC_ETH_MII_TXD2 0x220c +#define STM32F429_PC2_FUNC_FMC_SDNE0 0x220d +#define STM32F429_PC2_FUNC_EVENTOUT 0x2210 +#define STM32F429_PC2_FUNC_ANALOG 0x2211 + +#define STM32F429_PC3_FUNC_GPIO 0x2300 +#define STM32F429_PC3_FUNC_SPI2_MOSI_I2S2_SD 0x2306 +#define STM32F429_PC3_FUNC_OTG_HS_ULPI_NXT 0x230b +#define STM32F429_PC3_FUNC_ETH_MII_TX_CLK 0x230c +#define STM32F429_PC3_FUNC_FMC_SDCKE0 0x230d +#define STM32F429_PC3_FUNC_EVENTOUT 0x2310 +#define STM32F429_PC3_FUNC_ANALOG 0x2311 + +#define STM32F429_PC4_FUNC_GPIO 0x2400 +#define STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0 0x240c +#define STM32F429_PC4_FUNC_EVENTOUT 0x2410 +#define STM32F429_PC4_FUNC_ANALOG 0x2411 + +#define STM32F429_PC5_FUNC_GPIO 0x2500 +#define STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1 0x250c +#define STM32F429_PC5_FUNC_EVENTOUT 0x2510 +#define STM32F429_PC5_FUNC_ANALOG 0x2511 + +#define STM32F429_PC6_FUNC_GPIO 0x2600 +#define STM32F429_PC6_FUNC_TIM3_CH1 0x2603 +#define STM32F429_PC6_FUNC_TIM8_CH1 0x2604 +#define STM32F429_PC6_FUNC_I2S2_MCK 0x2606 +#define STM32F429_PC6_FUNC_USART6_TX 0x2609 +#define STM32F429_PC6_FUNC_SDIO_D6 0x260d +#define STM32F429_PC6_FUNC_DCMI_D0 0x260e +#define STM32F429_PC6_FUNC_LCD_HSYNC 0x260f +#define STM32F429_PC6_FUNC_EVENTOUT 0x2610 +#define STM32F429_PC6_FUNC_ANALOG 0x2611 + +#define STM32F429_PC7_FUNC_GPIO 0x2700 +#define STM32F429_PC7_FUNC_TIM3_CH2 0x2703 +#define STM32F429_PC7_FUNC_TIM8_CH2 0x2704 +#define STM32F429_PC7_FUNC_I2S3_MCK 0x2707 +#define STM32F429_PC7_FUNC_USART6_RX 0x2709 +#define STM32F429_PC7_FUNC_SDIO_D7 0x270d +#define STM32F429_PC7_FUNC_DCMI_D1 0x270e +#define STM32F429_PC7_FUNC_LCD_G6 0x270f +#define STM32F429_PC7_FUNC_EVENTOUT 0x2710 +#define STM32F429_PC7_FUNC_ANALOG 0x2711 + +#define STM32F429_PC8_FUNC_GPIO 0x2800 +#define STM32F429_PC8_FUNC_TIM3_CH3 0x2803 +#define STM32F429_PC8_FUNC_TIM8_CH3 0x2804 +#define STM32F429_PC8_FUNC_USART6_CK 0x2809 +#define STM32F429_PC8_FUNC_SDIO_D0 0x280d +#define STM32F429_PC8_FUNC_DCMI_D2 0x280e +#define STM32F429_PC8_FUNC_EVENTOUT 0x2810 +#define STM32F429_PC8_FUNC_ANALOG 0x2811 + +#define STM32F429_PC9_FUNC_GPIO 0x2900 +#define STM32F429_PC9_FUNC_MCO2 0x2901 +#define STM32F429_PC9_FUNC_TIM3_CH4 0x2903 +#define STM32F429_PC9_FUNC_TIM8_CH4 0x2904 +#define STM32F429_PC9_FUNC_I2C3_SDA 0x2905 +#define STM32F429_PC9_FUNC_I2S_CKIN 0x2906 +#define STM32F429_PC9_FUNC_SDIO_D1 0x290d +#define STM32F429_PC9_FUNC_DCMI_D3 0x290e +#define STM32F429_PC9_FUNC_EVENTOUT 0x2910 +#define STM32F429_PC9_FUNC_ANALOG 0x2911 + +#define STM32F429_PC10_FUNC_GPIO 0x2a00 +#define STM32F429_PC10_FUNC_SPI3_SCK_I2S3_CK 0x2a07 +#define STM32F429_PC10_FUNC_USART3_TX 0x2a08 +#define STM32F429_PC10_FUNC_UART4_TX 0x2a09 +#define STM32F429_PC10_FUNC_SDIO_D2 0x2a0d +#define STM32F429_PC10_FUNC_DCMI_D8 0x2a0e +#define STM32F429_PC10_FUNC_LCD_R2 0x2a0f +#define STM32F429_PC10_FUNC_EVENTOUT 0x2a10 +#define STM32F429_PC10_FUNC_ANALOG 0x2a11 + +#define STM32F429_PC11_FUNC_GPIO 0x2b00 +#define STM32F429_PC11_FUNC_I2S3EXT_SD 0x2b06 +#define STM32F429_PC11_FUNC_SPI3_MISO 0x2b07 +#define STM32F429_PC11_FUNC_USART3_RX 0x2b08 +#define STM32F429_PC11_FUNC_UART4_RX 0x2b09 +#define STM32F429_PC11_FUNC_SDIO_D3 0x2b0d +#define STM32F429_PC11_FUNC_DCMI_D4 0x2b0e +#define STM32F429_PC11_FUNC_EVENTOUT 0x2b10 +#define STM32F429_PC11_FUNC_ANALOG 0x2b11 + +#define STM32F429_PC12_FUNC_GPIO 0x2c00 +#define STM32F429_PC12_FUNC_SPI3_MOSI_I2S3_SD 0x2c07 +#define STM32F429_PC12_FUNC_USART3_CK 0x2c08 +#define STM32F429_PC12_FUNC_UART5_TX 0x2c09 +#define STM32F429_PC12_FUNC_SDIO_CK 0x2c0d +#define STM32F429_PC12_FUNC_DCMI_D9 0x2c0e +#define STM32F429_PC12_FUNC_EVENTOUT 0x2c10 +#define STM32F429_PC12_FUNC_ANALOG 0x2c11 + +#define STM32F429_PC13_FUNC_GPIO 0x2d00 +#define STM32F429_PC13_FUNC_EVENTOUT 0x2d10 +#define STM32F429_PC13_FUNC_ANALOG 0x2d11 + +#define STM32F429_PC14_FUNC_GPIO 0x2e00 +#define STM32F429_PC14_FUNC_EVENTOUT 0x2e10 +#define STM32F429_PC14_FUNC_ANALOG 0x2e11 + +#define STM32F429_PC15_FUNC_GPIO 0x2f00 +#define STM32F429_PC15_FUNC_EVENTOUT 0x2f10 +#define STM32F429_PC15_FUNC_ANALOG 0x2f11 + + + +#define STM32F429_PD0_FUNC_GPIO 0x3000 +#define STM32F429_PD0_FUNC_CAN1_RX 0x300a +#define STM32F429_PD0_FUNC_FMC_D2 0x300d +#define STM32F429_PD0_FUNC_EVENTOUT 0x3010 +#define STM32F429_PD0_FUNC_ANALOG 0x3011 + +#define STM32F429_PD1_FUNC_GPIO 0x3100 +#define STM32F429_PD1_FUNC_CAN1_TX 0x310a +#define STM32F429_PD1_FUNC_FMC_D3 0x310d +#define STM32F429_PD1_FUNC_EVENTOUT 0x3110 +#define STM32F429_PD1_FUNC_ANALOG 0x3111 + +#define STM32F429_PD2_FUNC_GPIO 0x3200 +#define STM32F429_PD2_FUNC_TIM3_ETR 0x3203 +#define STM32F429_PD2_FUNC_UART5_RX 0x3209 +#define STM32F429_PD2_FUNC_SDIO_CMD 0x320d +#define STM32F429_PD2_FUNC_DCMI_D11 0x320e +#define STM32F429_PD2_FUNC_EVENTOUT 0x3210 +#define STM32F429_PD2_FUNC_ANALOG 0x3211 + +#define STM32F429_PD3_FUNC_GPIO 0x3300 +#define STM32F429_PD3_FUNC_SPI2_SCK_I2S2_CK 0x3306 +#define STM32F429_PD3_FUNC_USART2_CTS 0x3308 +#define STM32F429_PD3_FUNC_FMC_CLK 0x330d +#define STM32F429_PD3_FUNC_DCMI_D5 0x330e +#define STM32F429_PD3_FUNC_LCD_G7 0x330f +#define STM32F429_PD3_FUNC_EVENTOUT 0x3310 +#define STM32F429_PD3_FUNC_ANALOG 0x3311 + +#define STM32F429_PD4_FUNC_GPIO 0x3400 +#define STM32F429_PD4_FUNC_USART2_RTS 0x3408 +#define STM32F429_PD4_FUNC_FMC_NOE 0x340d +#define STM32F429_PD4_FUNC_EVENTOUT 0x3410 +#define STM32F429_PD4_FUNC_ANALOG 0x3411 + +#define STM32F429_PD5_FUNC_GPIO 0x3500 +#define STM32F429_PD5_FUNC_USART2_TX 0x3508 +#define STM32F429_PD5_FUNC_FMC_NWE 0x350d +#define STM32F429_PD5_FUNC_EVENTOUT 0x3510 +#define STM32F429_PD5_FUNC_ANALOG 0x3511 + +#define STM32F429_PD6_FUNC_GPIO 0x3600 +#define STM32F429_PD6_FUNC_SPI3_MOSI_I2S3_SD 0x3606 +#define STM32F429_PD6_FUNC_SAI1_SD_A 0x3607 +#define STM32F429_PD6_FUNC_USART2_RX 0x3608 +#define STM32F429_PD6_FUNC_FMC_NWAIT 0x360d +#define STM32F429_PD6_FUNC_DCMI_D10 0x360e +#define STM32F429_PD6_FUNC_LCD_B2 0x360f +#define STM32F429_PD6_FUNC_EVENTOUT 0x3610 +#define STM32F429_PD6_FUNC_ANALOG 0x3611 + +#define STM32F429_PD7_FUNC_GPIO 0x3700 +#define STM32F429_PD7_FUNC_USART2_CK 0x3708 +#define STM32F429_PD7_FUNC_FMC_NE1_FMC_NCE2 0x370d +#define STM32F429_PD7_FUNC_EVENTOUT 0x3710 +#define STM32F429_PD7_FUNC_ANALOG 0x3711 + +#define STM32F429_PD8_FUNC_GPIO 0x3800 +#define STM32F429_PD8_FUNC_USART3_TX 0x3808 +#define STM32F429_PD8_FUNC_FMC_D13 0x380d +#define STM32F429_PD8_FUNC_EVENTOUT 0x3810 +#define STM32F429_PD8_FUNC_ANALOG 0x3811 + +#define STM32F429_PD9_FUNC_GPIO 0x3900 +#define STM32F429_PD9_FUNC_USART3_RX 0x3908 +#define STM32F429_PD9_FUNC_FMC_D14 0x390d +#define STM32F429_PD9_FUNC_EVENTOUT 0x3910 +#define STM32F429_PD9_FUNC_ANALOG 0x3911 + +#define STM32F429_PD10_FUNC_GPIO 0x3a00 +#define STM32F429_PD10_FUNC_USART3_CK 0x3a08 +#define STM32F429_PD10_FUNC_FMC_D15 0x3a0d +#define STM32F429_PD10_FUNC_LCD_B3 0x3a0f +#define STM32F429_PD10_FUNC_EVENTOUT 0x3a10 +#define STM32F429_PD10_FUNC_ANALOG 0x3a11 + +#define STM32F429_PD11_FUNC_GPIO 0x3b00 +#define STM32F429_PD11_FUNC_USART3_CTS 0x3b08 +#define STM32F429_PD11_FUNC_FMC_A16 0x3b0d +#define STM32F429_PD11_FUNC_EVENTOUT 0x3b10 +#define STM32F429_PD11_FUNC_ANALOG 0x3b11 + +#define STM32F429_PD12_FUNC_GPIO 0x3c00 +#define STM32F429_PD12_FUNC_TIM4_CH1 0x3c03 +#define STM32F429_PD12_FUNC_USART3_RTS 0x3c08 +#define STM32F429_PD12_FUNC_FMC_A17 0x3c0d +#define STM32F429_PD12_FUNC_EVENTOUT 0x3c10 +#define STM32F429_PD12_FUNC_ANALOG 0x3c11 + +#define STM32F429_PD13_FUNC_GPIO 0x3d00 +#define STM32F429_PD13_FUNC_TIM4_CH2 0x3d03 +#define STM32F429_PD13_FUNC_FMC_A18 0x3d0d +#define STM32F429_PD13_FUNC_EVENTOUT 0x3d10 +#define STM32F429_PD13_FUNC_ANALOG 0x3d11 + +#define STM32F429_PD14_FUNC_GPIO 0x3e00 +#define STM32F429_PD14_FUNC_TIM4_CH3 0x3e03 +#define STM32F429_PD14_FUNC_FMC_D0 0x3e0d +#define STM32F429_PD14_FUNC_EVENTOUT 0x3e10 +#define STM32F429_PD14_FUNC_ANALOG 0x3e11 + +#define STM32F429_PD15_FUNC_GPIO 0x3f00 +#define STM32F429_PD15_FUNC_TIM4_CH4 0x3f03 +#define STM32F429_PD15_FUNC_FMC_D1 0x3f0d +#define STM32F429_PD15_FUNC_EVENTOUT 0x3f10 +#define STM32F429_PD15_FUNC_ANALOG 0x3f11 + + + +#define STM32F429_PE0_FUNC_GPIO 0x4000 +#define STM32F429_PE0_FUNC_TIM4_ETR 0x4003 +#define STM32F429_PE0_FUNC_UART8_RX 0x4009 +#define STM32F429_PE0_FUNC_FMC_NBL0 0x400d +#define STM32F429_PE0_FUNC_DCMI_D2 0x400e +#define STM32F429_PE0_FUNC_EVENTOUT 0x4010 +#define STM32F429_PE0_FUNC_ANALOG 0x4011 + +#define STM32F429_PE1_FUNC_GPIO 0x4100 +#define STM32F429_PE1_FUNC_UART8_TX 0x4109 +#define STM32F429_PE1_FUNC_FMC_NBL1 0x410d +#define STM32F429_PE1_FUNC_DCMI_D3 0x410e +#define STM32F429_PE1_FUNC_EVENTOUT 0x4110 +#define STM32F429_PE1_FUNC_ANALOG 0x4111 + +#define STM32F429_PE2_FUNC_GPIO 0x4200 +#define STM32F429_PE2_FUNC_TRACECLK 0x4201 +#define STM32F429_PE2_FUNC_SPI4_SCK 0x4206 +#define STM32F429_PE2_FUNC_SAI1_MCLK_A 0x4207 +#define STM32F429_PE2_FUNC_ETH_MII_TXD3 0x420c +#define STM32F429_PE2_FUNC_FMC_A23 0x420d +#define STM32F429_PE2_FUNC_EVENTOUT 0x4210 +#define STM32F429_PE2_FUNC_ANALOG 0x4211 + +#define STM32F429_PE3_FUNC_GPIO 0x4300 +#define STM32F429_PE3_FUNC_TRACED0 0x4301 +#define STM32F429_PE3_FUNC_SAI1_SD_B 0x4307 +#define STM32F429_PE3_FUNC_FMC_A19 0x430d +#define STM32F429_PE3_FUNC_EVENTOUT 0x4310 +#define STM32F429_PE3_FUNC_ANALOG 0x4311 + +#define STM32F429_PE4_FUNC_GPIO 0x4400 +#define STM32F429_PE4_FUNC_TRACED1 0x4401 +#define STM32F429_PE4_FUNC_SPI4_NSS 0x4406 +#define STM32F429_PE4_FUNC_SAI1_FS_A 0x4407 +#define STM32F429_PE4_FUNC_FMC_A20 0x440d +#define STM32F429_PE4_FUNC_DCMI_D4 0x440e +#define STM32F429_PE4_FUNC_LCD_B0 0x440f +#define STM32F429_PE4_FUNC_EVENTOUT 0x4410 +#define STM32F429_PE4_FUNC_ANALOG 0x4411 + +#define STM32F429_PE5_FUNC_GPIO 0x4500 +#define STM32F429_PE5_FUNC_TRACED2 0x4501 +#define STM32F429_PE5_FUNC_TIM9_CH1 0x4504 +#define STM32F429_PE5_FUNC_SPI4_MISO 0x4506 +#define STM32F429_PE5_FUNC_SAI1_SCK_A 0x4507 +#define STM32F429_PE5_FUNC_FMC_A21 0x450d +#define STM32F429_PE5_FUNC_DCMI_D6 0x450e +#define STM32F429_PE5_FUNC_LCD_G0 0x450f +#define STM32F429_PE5_FUNC_EVENTOUT 0x4510 +#define STM32F429_PE5_FUNC_ANALOG 0x4511 + +#define STM32F429_PE6_FUNC_GPIO 0x4600 +#define STM32F429_PE6_FUNC_TRACED3 0x4601 +#define STM32F429_PE6_FUNC_TIM9_CH2 0x4604 +#define STM32F429_PE6_FUNC_SPI4_MOSI 0x4606 +#define STM32F429_PE6_FUNC_SAI1_SD_A 0x4607 +#define STM32F429_PE6_FUNC_FMC_A22 0x460d +#define STM32F429_PE6_FUNC_DCMI_D7 0x460e +#define STM32F429_PE6_FUNC_LCD_G1 0x460f +#define STM32F429_PE6_FUNC_EVENTOUT 0x4610 +#define STM32F429_PE6_FUNC_ANALOG 0x4611 + +#define STM32F429_PE7_FUNC_GPIO 0x4700 +#define STM32F429_PE7_FUNC_TIM1_ETR 0x4702 +#define STM32F429_PE7_FUNC_UART7_RX 0x4709 +#define STM32F429_PE7_FUNC_FMC_D4 0x470d +#define STM32F429_PE7_FUNC_EVENTOUT 0x4710 +#define STM32F429_PE7_FUNC_ANALOG 0x4711 + +#define STM32F429_PE8_FUNC_GPIO 0x4800 +#define STM32F429_PE8_FUNC_TIM1_CH1N 0x4802 +#define STM32F429_PE8_FUNC_UART7_TX 0x4809 +#define STM32F429_PE8_FUNC_FMC_D5 0x480d +#define STM32F429_PE8_FUNC_EVENTOUT 0x4810 +#define STM32F429_PE8_FUNC_ANALOG 0x4811 + +#define STM32F429_PE9_FUNC_GPIO 0x4900 +#define STM32F429_PE9_FUNC_TIM1_CH1 0x4902 +#define STM32F429_PE9_FUNC_FMC_D6 0x490d +#define STM32F429_PE9_FUNC_EVENTOUT 0x4910 +#define STM32F429_PE9_FUNC_ANALOG 0x4911 + +#define STM32F429_PE10_FUNC_GPIO 0x4a00 +#define STM32F429_PE10_FUNC_TIM1_CH2N 0x4a02 +#define STM32F429_PE10_FUNC_FMC_D7 0x4a0d +#define STM32F429_PE10_FUNC_EVENTOUT 0x4a10 +#define STM32F429_PE10_FUNC_ANALOG 0x4a11 + +#define STM32F429_PE11_FUNC_GPIO 0x4b00 +#define STM32F429_PE11_FUNC_TIM1_CH2 0x4b02 +#define STM32F429_PE11_FUNC_SPI4_NSS 0x4b06 +#define STM32F429_PE11_FUNC_FMC_D8 0x4b0d +#define STM32F429_PE11_FUNC_LCD_G3 0x4b0f +#define STM32F429_PE11_FUNC_EVENTOUT 0x4b10 +#define STM32F429_PE11_FUNC_ANALOG 0x4b11 + +#define STM32F429_PE12_FUNC_GPIO 0x4c00 +#define STM32F429_PE12_FUNC_TIM1_CH3N 0x4c02 +#define STM32F429_PE12_FUNC_SPI4_SCK 0x4c06 +#define STM32F429_PE12_FUNC_FMC_D9 0x4c0d +#define STM32F429_PE12_FUNC_LCD_B4 0x4c0f +#define STM32F429_PE12_FUNC_EVENTOUT 0x4c10 +#define STM32F429_PE12_FUNC_ANALOG 0x4c11 + +#define STM32F429_PE13_FUNC_GPIO 0x4d00 +#define STM32F429_PE13_FUNC_TIM1_CH3 0x4d02 +#define STM32F429_PE13_FUNC_SPI4_MISO 0x4d06 +#define STM32F429_PE13_FUNC_FMC_D10 0x4d0d +#define STM32F429_PE13_FUNC_LCD_DE 0x4d0f +#define STM32F429_PE13_FUNC_EVENTOUT 0x4d10 +#define STM32F429_PE13_FUNC_ANALOG 0x4d11 + +#define STM32F429_PE14_FUNC_GPIO 0x4e00 +#define STM32F429_PE14_FUNC_TIM1_CH4 0x4e02 +#define STM32F429_PE14_FUNC_SPI4_MOSI 0x4e06 +#define STM32F429_PE14_FUNC_FMC_D11 0x4e0d +#define STM32F429_PE14_FUNC_LCD_CLK 0x4e0f +#define STM32F429_PE14_FUNC_EVENTOUT 0x4e10 +#define STM32F429_PE14_FUNC_ANALOG 0x4e11 + +#define STM32F429_PE15_FUNC_GPIO 0x4f00 +#define STM32F429_PE15_FUNC_TIM1_BKIN 0x4f02 +#define STM32F429_PE15_FUNC_FMC_D12 0x4f0d +#define STM32F429_PE15_FUNC_LCD_R7 0x4f0f +#define STM32F429_PE15_FUNC_EVENTOUT 0x4f10 +#define STM32F429_PE15_FUNC_ANALOG 0x4f11 + + + +#define STM32F429_PF0_FUNC_GPIO 0x5000 +#define STM32F429_PF0_FUNC_I2C2_SDA 0x5005 +#define STM32F429_PF0_FUNC_FMC_A0 0x500d +#define STM32F429_PF0_FUNC_EVENTOUT 0x5010 +#define STM32F429_PF0_FUNC_ANALOG 0x5011 + +#define STM32F429_PF1_FUNC_GPIO 0x5100 +#define STM32F429_PF1_FUNC_I2C2_SCL 0x5105 +#define STM32F429_PF1_FUNC_FMC_A1 0x510d +#define STM32F429_PF1_FUNC_EVENTOUT 0x5110 +#define STM32F429_PF1_FUNC_ANALOG 0x5111 + +#define STM32F429_PF2_FUNC_GPIO 0x5200 +#define STM32F429_PF2_FUNC_I2C2_SMBA 0x5205 +#define STM32F429_PF2_FUNC_FMC_A2 0x520d +#define STM32F429_PF2_FUNC_EVENTOUT 0x5210 +#define STM32F429_PF2_FUNC_ANALOG 0x5211 + +#define STM32F429_PF3_FUNC_GPIO 0x5300 +#define STM32F429_PF3_FUNC_FMC_A3 0x530d +#define STM32F429_PF3_FUNC_EVENTOUT 0x5310 +#define STM32F429_PF3_FUNC_ANALOG 0x5311 + +#define STM32F429_PF4_FUNC_GPIO 0x5400 +#define STM32F429_PF4_FUNC_FMC_A4 0x540d +#define STM32F429_PF4_FUNC_EVENTOUT 0x5410 +#define STM32F429_PF4_FUNC_ANALOG 0x5411 + +#define STM32F429_PF5_FUNC_GPIO 0x5500 +#define STM32F429_PF5_FUNC_FMC_A5 0x550d +#define STM32F429_PF5_FUNC_EVENTOUT 0x5510 +#define STM32F429_PF5_FUNC_ANALOG 0x5511 + +#define STM32F429_PF6_FUNC_GPIO 0x5600 +#define STM32F429_PF6_FUNC_TIM10_CH1 0x5604 +#define STM32F429_PF6_FUNC_SPI5_NSS 0x5606 +#define STM32F429_PF6_FUNC_SAI1_SD_B 0x5607 +#define STM32F429_PF6_FUNC_UART7_RX 0x5609 +#define STM32F429_PF6_FUNC_FMC_NIORD 0x560d +#define STM32F429_PF6_FUNC_EVENTOUT 0x5610 +#define STM32F429_PF6_FUNC_ANALOG 0x5611 + +#define STM32F429_PF7_FUNC_GPIO 0x5700 +#define STM32F429_PF7_FUNC_TIM11_CH1 0x5704 +#define STM32F429_PF7_FUNC_SPI5_SCK 0x5706 +#define STM32F429_PF7_FUNC_SAI1_MCLK_B 0x5707 +#define STM32F429_PF7_FUNC_UART7_TX 0x5709 +#define STM32F429_PF7_FUNC_FMC_NREG 0x570d +#define STM32F429_PF7_FUNC_EVENTOUT 0x5710 +#define STM32F429_PF7_FUNC_ANALOG 0x5711 + +#define STM32F429_PF8_FUNC_GPIO 0x5800 +#define STM32F429_PF8_FUNC_SPI5_MISO 0x5806 +#define STM32F429_PF8_FUNC_SAI1_SCK_B 0x5807 +#define STM32F429_PF8_FUNC_TIM13_CH1 0x580a +#define STM32F429_PF8_FUNC_FMC_NIOWR 0x580d +#define STM32F429_PF8_FUNC_EVENTOUT 0x5810 +#define STM32F429_PF8_FUNC_ANALOG 0x5811 + +#define STM32F429_PF9_FUNC_GPIO 0x5900 +#define STM32F429_PF9_FUNC_SPI5_MOSI 0x5906 +#define STM32F429_PF9_FUNC_SAI1_FS_B 0x5907 +#define STM32F429_PF9_FUNC_TIM14_CH1 0x590a +#define STM32F429_PF9_FUNC_FMC_CD 0x590d +#define STM32F429_PF9_FUNC_EVENTOUT 0x5910 +#define STM32F429_PF9_FUNC_ANALOG 0x5911 + +#define STM32F429_PF10_FUNC_GPIO 0x5a00 +#define STM32F429_PF10_FUNC_FMC_INTR 0x5a0d +#define STM32F429_PF10_FUNC_DCMI_D11 0x5a0e +#define STM32F429_PF10_FUNC_LCD_DE 0x5a0f +#define STM32F429_PF10_FUNC_EVENTOUT 0x5a10 +#define STM32F429_PF10_FUNC_ANALOG 0x5a11 + +#define STM32F429_PF11_FUNC_GPIO 0x5b00 +#define STM32F429_PF11_FUNC_SPI5_MOSI 0x5b06 +#define STM32F429_PF11_FUNC_FMC_SDNRAS 0x5b0d +#define STM32F429_PF11_FUNC_DCMI_D12 0x5b0e +#define STM32F429_PF11_FUNC_EVENTOUT 0x5b10 +#define STM32F429_PF11_FUNC_ANALOG 0x5b11 + +#define STM32F429_PF12_FUNC_GPIO 0x5c00 +#define STM32F429_PF12_FUNC_FMC_A6 0x5c0d +#define STM32F429_PF12_FUNC_EVENTOUT 0x5c10 +#define STM32F429_PF12_FUNC_ANALOG 0x5c11 + +#define STM32F429_PF13_FUNC_GPIO 0x5d00 +#define STM32F429_PF13_FUNC_FMC_A7 0x5d0d +#define STM32F429_PF13_FUNC_EVENTOUT 0x5d10 +#define STM32F429_PF13_FUNC_ANALOG 0x5d11 + +#define STM32F429_PF14_FUNC_GPIO 0x5e00 +#define STM32F429_PF14_FUNC_FMC_A8 0x5e0d +#define STM32F429_PF14_FUNC_EVENTOUT 0x5e10 +#define STM32F429_PF14_FUNC_ANALOG 0x5e11 + +#define STM32F429_PF15_FUNC_GPIO 0x5f00 +#define STM32F429_PF15_FUNC_FMC_A9 0x5f0d +#define STM32F429_PF15_FUNC_EVENTOUT 0x5f10 +#define STM32F429_PF15_FUNC_ANALOG 0x5f11 + + + +#define STM32F429_PG0_FUNC_GPIO 0x6000 +#define STM32F429_PG0_FUNC_FMC_A10 0x600d +#define STM32F429_PG0_FUNC_EVENTOUT 0x6010 +#define STM32F429_PG0_FUNC_ANALOG 0x6011 + +#define STM32F429_PG1_FUNC_GPIO 0x6100 +#define STM32F429_PG1_FUNC_FMC_A11 0x610d +#define STM32F429_PG1_FUNC_EVENTOUT 0x6110 +#define STM32F429_PG1_FUNC_ANALOG 0x6111 + +#define STM32F429_PG2_FUNC_GPIO 0x6200 +#define STM32F429_PG2_FUNC_FMC_A12 0x620d +#define STM32F429_PG2_FUNC_EVENTOUT 0x6210 +#define STM32F429_PG2_FUNC_ANALOG 0x6211 + +#define STM32F429_PG3_FUNC_GPIO 0x6300 +#define STM32F429_PG3_FUNC_FMC_A13 0x630d +#define STM32F429_PG3_FUNC_EVENTOUT 0x6310 +#define STM32F429_PG3_FUNC_ANALOG 0x6311 + +#define STM32F429_PG4_FUNC_GPIO 0x6400 +#define STM32F429_PG4_FUNC_FMC_A14_FMC_BA0 0x640d +#define STM32F429_PG4_FUNC_EVENTOUT 0x6410 +#define STM32F429_PG4_FUNC_ANALOG 0x6411 + +#define STM32F429_PG5_FUNC_GPIO 0x6500 +#define STM32F429_PG5_FUNC_FMC_A15_FMC_BA1 0x650d +#define STM32F429_PG5_FUNC_EVENTOUT 0x6510 +#define STM32F429_PG5_FUNC_ANALOG 0x6511 + +#define STM32F429_PG6_FUNC_GPIO 0x6600 +#define STM32F429_PG6_FUNC_FMC_INT2 0x660d +#define STM32F429_PG6_FUNC_DCMI_D12 0x660e +#define STM32F429_PG6_FUNC_LCD_R7 0x660f +#define STM32F429_PG6_FUNC_EVENTOUT 0x6610 +#define STM32F429_PG6_FUNC_ANALOG 0x6611 + +#define STM32F429_PG7_FUNC_GPIO 0x6700 +#define STM32F429_PG7_FUNC_USART6_CK 0x6709 +#define STM32F429_PG7_FUNC_FMC_INT3 0x670d +#define STM32F429_PG7_FUNC_DCMI_D13 0x670e +#define STM32F429_PG7_FUNC_LCD_CLK 0x670f +#define STM32F429_PG7_FUNC_EVENTOUT 0x6710 +#define STM32F429_PG7_FUNC_ANALOG 0x6711 + +#define STM32F429_PG8_FUNC_GPIO 0x6800 +#define STM32F429_PG8_FUNC_SPI6_NSS 0x6806 +#define STM32F429_PG8_FUNC_USART6_RTS 0x6809 +#define STM32F429_PG8_FUNC_ETH_PPS_OUT 0x680c +#define STM32F429_PG8_FUNC_FMC_SDCLK 0x680d +#define STM32F429_PG8_FUNC_EVENTOUT 0x6810 +#define STM32F429_PG8_FUNC_ANALOG 0x6811 + +#define STM32F429_PG9_FUNC_GPIO 0x6900 +#define STM32F429_PG9_FUNC_USART6_RX 0x6909 +#define STM32F429_PG9_FUNC_FMC_NE2_FMC_NCE3 0x690d +#define STM32F429_PG9_FUNC_DCMI_VSYNC 0x690e +#define STM32F429_PG9_FUNC_EVENTOUT 0x6910 +#define STM32F429_PG9_FUNC_ANALOG 0x6911 + +#define STM32F429_PG10_FUNC_GPIO 0x6a00 +#define STM32F429_PG10_FUNC_LCD_G3 0x6a0a +#define STM32F429_PG10_FUNC_FMC_NCE4_1_FMC_NE3 0x6a0d +#define STM32F429_PG10_FUNC_DCMI_D2 0x6a0e +#define STM32F429_PG10_FUNC_LCD_B2 0x6a0f +#define STM32F429_PG10_FUNC_EVENTOUT 0x6a10 +#define STM32F429_PG10_FUNC_ANALOG 0x6a11 + +#define STM32F429_PG11_FUNC_GPIO 0x6b00 +#define STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN 0x6b0c +#define STM32F429_PG11_FUNC_FMC_NCE4_2 0x6b0d +#define STM32F429_PG11_FUNC_DCMI_D3 0x6b0e +#define STM32F429_PG11_FUNC_LCD_B3 0x6b0f +#define STM32F429_PG11_FUNC_EVENTOUT 0x6b10 +#define STM32F429_PG11_FUNC_ANALOG 0x6b11 + +#define STM32F429_PG12_FUNC_GPIO 0x6c00 +#define STM32F429_PG12_FUNC_SPI6_MISO 0x6c06 +#define STM32F429_PG12_FUNC_USART6_RTS 0x6c09 +#define STM32F429_PG12_FUNC_LCD_B4 0x6c0a +#define STM32F429_PG12_FUNC_FMC_NE4 0x6c0d +#define STM32F429_PG12_FUNC_LCD_B1 0x6c0f +#define STM32F429_PG12_FUNC_EVENTOUT 0x6c10 +#define STM32F429_PG12_FUNC_ANALOG 0x6c11 + +#define STM32F429_PG13_FUNC_GPIO 0x6d00 +#define STM32F429_PG13_FUNC_SPI6_SCK 0x6d06 +#define STM32F429_PG13_FUNC_USART6_CTS 0x6d09 +#define STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0 0x6d0c +#define STM32F429_PG13_FUNC_FMC_A24 0x6d0d +#define STM32F429_PG13_FUNC_EVENTOUT 0x6d10 +#define STM32F429_PG13_FUNC_ANALOG 0x6d11 + +#define STM32F429_PG14_FUNC_GPIO 0x6e00 +#define STM32F429_PG14_FUNC_SPI6_MOSI 0x6e06 +#define STM32F429_PG14_FUNC_USART6_TX 0x6e09 +#define STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1 0x6e0c +#define STM32F429_PG14_FUNC_FMC_A25 0x6e0d +#define STM32F429_PG14_FUNC_EVENTOUT 0x6e10 +#define STM32F429_PG14_FUNC_ANALOG 0x6e11 + +#define STM32F429_PG15_FUNC_GPIO 0x6f00 +#define STM32F429_PG15_FUNC_USART6_CTS 0x6f09 +#define STM32F429_PG15_FUNC_FMC_SDNCAS 0x6f0d +#define STM32F429_PG15_FUNC_DCMI_D13 0x6f0e +#define STM32F429_PG15_FUNC_EVENTOUT 0x6f10 +#define STM32F429_PG15_FUNC_ANALOG 0x6f11 + + + +#define STM32F429_PH0_FUNC_GPIO 0x7000 +#define STM32F429_PH0_FUNC_EVENTOUT 0x7010 +#define STM32F429_PH0_FUNC_ANALOG 0x7011 + +#define STM32F429_PH1_FUNC_GPIO 0x7100 +#define STM32F429_PH1_FUNC_EVENTOUT 0x7110 +#define STM32F429_PH1_FUNC_ANALOG 0x7111 + +#define STM32F429_PH2_FUNC_GPIO 0x7200 +#define STM32F429_PH2_FUNC_ETH_MII_CRS 0x720c +#define STM32F429_PH2_FUNC_FMC_SDCKE0 0x720d +#define STM32F429_PH2_FUNC_LCD_R0 0x720f +#define STM32F429_PH2_FUNC_EVENTOUT 0x7210 +#define STM32F429_PH2_FUNC_ANALOG 0x7211 + +#define STM32F429_PH3_FUNC_GPIO 0x7300 +#define STM32F429_PH3_FUNC_ETH_MII_COL 0x730c +#define STM32F429_PH3_FUNC_FMC_SDNE0 0x730d +#define STM32F429_PH3_FUNC_LCD_R1 0x730f +#define STM32F429_PH3_FUNC_EVENTOUT 0x7310 +#define STM32F429_PH3_FUNC_ANALOG 0x7311 + +#define STM32F429_PH4_FUNC_GPIO 0x7400 +#define STM32F429_PH4_FUNC_I2C2_SCL 0x7405 +#define STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT 0x740b +#define STM32F429_PH4_FUNC_EVENTOUT 0x7410 +#define STM32F429_PH4_FUNC_ANALOG 0x7411 + +#define STM32F429_PH5_FUNC_GPIO 0x7500 +#define STM32F429_PH5_FUNC_I2C2_SDA 0x7505 +#define STM32F429_PH5_FUNC_SPI5_NSS 0x7506 +#define STM32F429_PH5_FUNC_FMC_SDNWE 0x750d +#define STM32F429_PH5_FUNC_EVENTOUT 0x7510 +#define STM32F429_PH5_FUNC_ANALOG 0x7511 + +#define STM32F429_PH6_FUNC_GPIO 0x7600 +#define STM32F429_PH6_FUNC_I2C2_SMBA 0x7605 +#define STM32F429_PH6_FUNC_SPI5_SCK 0x7606 +#define STM32F429_PH6_FUNC_TIM12_CH1 0x760a +#define STM32F429_PH6_FUNC_ETH_MII_RXD2 0x760c +#define STM32F429_PH6_FUNC_FMC_SDNE1 0x760d +#define STM32F429_PH6_FUNC_DCMI_D8 0x760e +#define STM32F429_PH6_FUNC_EVENTOUT 0x7610 +#define STM32F429_PH6_FUNC_ANALOG 0x7611 + +#define STM32F429_PH7_FUNC_GPIO 0x7700 +#define STM32F429_PH7_FUNC_I2C3_SCL 0x7705 +#define STM32F429_PH7_FUNC_SPI5_MISO 0x7706 +#define STM32F429_PH7_FUNC_ETH_MII_RXD3 0x770c +#define STM32F429_PH7_FUNC_FMC_SDCKE1 0x770d +#define STM32F429_PH7_FUNC_DCMI_D9 0x770e +#define STM32F429_PH7_FUNC_EVENTOUT 0x7710 +#define STM32F429_PH7_FUNC_ANALOG 0x7711 + +#define STM32F429_PH8_FUNC_GPIO 0x7800 +#define STM32F429_PH8_FUNC_I2C3_SDA 0x7805 +#define STM32F429_PH8_FUNC_FMC_D16 0x780d +#define STM32F429_PH8_FUNC_DCMI_HSYNC 0x780e +#define STM32F429_PH8_FUNC_LCD_R2 0x780f +#define STM32F429_PH8_FUNC_EVENTOUT 0x7810 +#define STM32F429_PH8_FUNC_ANALOG 0x7811 + +#define STM32F429_PH9_FUNC_GPIO 0x7900 +#define STM32F429_PH9_FUNC_I2C3_SMBA 0x7905 +#define STM32F429_PH9_FUNC_TIM12_CH2 0x790a +#define STM32F429_PH9_FUNC_FMC_D17 0x790d +#define STM32F429_PH9_FUNC_DCMI_D0 0x790e +#define STM32F429_PH9_FUNC_LCD_R3 0x790f +#define STM32F429_PH9_FUNC_EVENTOUT 0x7910 +#define STM32F429_PH9_FUNC_ANALOG 0x7911 + +#define STM32F429_PH10_FUNC_GPIO 0x7a00 +#define STM32F429_PH10_FUNC_TIM5_CH1 0x7a03 +#define STM32F429_PH10_FUNC_FMC_D18 0x7a0d +#define STM32F429_PH10_FUNC_DCMI_D1 0x7a0e +#define STM32F429_PH10_FUNC_LCD_R4 0x7a0f +#define STM32F429_PH10_FUNC_EVENTOUT 0x7a10 +#define STM32F429_PH10_FUNC_ANALOG 0x7a11 + +#define STM32F429_PH11_FUNC_GPIO 0x7b00 +#define STM32F429_PH11_FUNC_TIM5_CH2 0x7b03 +#define STM32F429_PH11_FUNC_FMC_D19 0x7b0d +#define STM32F429_PH11_FUNC_DCMI_D2 0x7b0e +#define STM32F429_PH11_FUNC_LCD_R5 0x7b0f +#define STM32F429_PH11_FUNC_EVENTOUT 0x7b10 +#define STM32F429_PH11_FUNC_ANALOG 0x7b11 + +#define STM32F429_PH12_FUNC_GPIO 0x7c00 +#define STM32F429_PH12_FUNC_TIM5_CH3 0x7c03 +#define STM32F429_PH12_FUNC_FMC_D20 0x7c0d +#define STM32F429_PH12_FUNC_DCMI_D3 0x7c0e +#define STM32F429_PH12_FUNC_LCD_R6 0x7c0f +#define STM32F429_PH12_FUNC_EVENTOUT 0x7c10 +#define STM32F429_PH12_FUNC_ANALOG 0x7c11 + +#define STM32F429_PH13_FUNC_GPIO 0x7d00 +#define STM32F429_PH13_FUNC_TIM8_CH1N 0x7d04 +#define STM32F429_PH13_FUNC_CAN1_TX 0x7d0a +#define STM32F429_PH13_FUNC_FMC_D21 0x7d0d +#define STM32F429_PH13_FUNC_LCD_G2 0x7d0f +#define STM32F429_PH13_FUNC_EVENTOUT 0x7d10 +#define STM32F429_PH13_FUNC_ANALOG 0x7d11 + +#define STM32F429_PH14_FUNC_GPIO 0x7e00 +#define STM32F429_PH14_FUNC_TIM8_CH2N 0x7e04 +#define STM32F429_PH14_FUNC_FMC_D22 0x7e0d +#define STM32F429_PH14_FUNC_DCMI_D4 0x7e0e +#define STM32F429_PH14_FUNC_LCD_G3 0x7e0f +#define STM32F429_PH14_FUNC_EVENTOUT 0x7e10 +#define STM32F429_PH14_FUNC_ANALOG 0x7e11 + +#define STM32F429_PH15_FUNC_GPIO 0x7f00 +#define STM32F429_PH15_FUNC_TIM8_CH3N 0x7f04 +#define STM32F429_PH15_FUNC_FMC_D23 0x7f0d +#define STM32F429_PH15_FUNC_DCMI_D11 0x7f0e +#define STM32F429_PH15_FUNC_LCD_G4 0x7f0f +#define STM32F429_PH15_FUNC_EVENTOUT 0x7f10 +#define STM32F429_PH15_FUNC_ANALOG 0x7f11 + + + +#define STM32F429_PI0_FUNC_GPIO 0x8000 +#define STM32F429_PI0_FUNC_TIM5_CH4 0x8003 +#define STM32F429_PI0_FUNC_SPI2_NSS_I2S2_WS 0x8006 +#define STM32F429_PI0_FUNC_FMC_D24 0x800d +#define STM32F429_PI0_FUNC_DCMI_D13 0x800e +#define STM32F429_PI0_FUNC_LCD_G5 0x800f +#define STM32F429_PI0_FUNC_EVENTOUT 0x8010 +#define STM32F429_PI0_FUNC_ANALOG 0x8011 + +#define STM32F429_PI1_FUNC_GPIO 0x8100 +#define STM32F429_PI1_FUNC_SPI2_SCK_I2S2_CK 0x8106 +#define STM32F429_PI1_FUNC_FMC_D25 0x810d +#define STM32F429_PI1_FUNC_DCMI_D8 0x810e +#define STM32F429_PI1_FUNC_LCD_G6 0x810f +#define STM32F429_PI1_FUNC_EVENTOUT 0x8110 +#define STM32F429_PI1_FUNC_ANALOG 0x8111 + +#define STM32F429_PI2_FUNC_GPIO 0x8200 +#define STM32F429_PI2_FUNC_TIM8_CH4 0x8204 +#define STM32F429_PI2_FUNC_SPI2_MISO 0x8206 +#define STM32F429_PI2_FUNC_I2S2EXT_SD 0x8207 +#define STM32F429_PI2_FUNC_FMC_D26 0x820d +#define STM32F429_PI2_FUNC_DCMI_D9 0x820e +#define STM32F429_PI2_FUNC_LCD_G7 0x820f +#define STM32F429_PI2_FUNC_EVENTOUT 0x8210 +#define STM32F429_PI2_FUNC_ANALOG 0x8211 + +#define STM32F429_PI3_FUNC_GPIO 0x8300 +#define STM32F429_PI3_FUNC_TIM8_ETR 0x8304 +#define STM32F429_PI3_FUNC_SPI2_MOSI_I2S2_SD 0x8306 +#define STM32F429_PI3_FUNC_FMC_D27 0x830d +#define STM32F429_PI3_FUNC_DCMI_D10 0x830e +#define STM32F429_PI3_FUNC_EVENTOUT 0x8310 +#define STM32F429_PI3_FUNC_ANALOG 0x8311 + +#define STM32F429_PI4_FUNC_GPIO 0x8400 +#define STM32F429_PI4_FUNC_TIM8_BKIN 0x8404 +#define STM32F429_PI4_FUNC_FMC_NBL2 0x840d +#define STM32F429_PI4_FUNC_DCMI_D5 0x840e +#define STM32F429_PI4_FUNC_LCD_B4 0x840f +#define STM32F429_PI4_FUNC_EVENTOUT 0x8410 +#define STM32F429_PI4_FUNC_ANALOG 0x8411 + +#define STM32F429_PI5_FUNC_GPIO 0x8500 +#define STM32F429_PI5_FUNC_TIM8_CH1 0x8504 +#define STM32F429_PI5_FUNC_FMC_NBL3 0x850d +#define STM32F429_PI5_FUNC_DCMI_VSYNC 0x850e +#define STM32F429_PI5_FUNC_LCD_B5 0x850f +#define STM32F429_PI5_FUNC_EVENTOUT 0x8510 +#define STM32F429_PI5_FUNC_ANALOG 0x8511 + +#define STM32F429_PI6_FUNC_GPIO 0x8600 +#define STM32F429_PI6_FUNC_TIM8_CH2 0x8604 +#define STM32F429_PI6_FUNC_FMC_D28 0x860d +#define STM32F429_PI6_FUNC_DCMI_D6 0x860e +#define STM32F429_PI6_FUNC_LCD_B6 0x860f +#define STM32F429_PI6_FUNC_EVENTOUT 0x8610 +#define STM32F429_PI6_FUNC_ANALOG 0x8611 + +#define STM32F429_PI7_FUNC_GPIO 0x8700 +#define STM32F429_PI7_FUNC_TIM8_CH3 0x8704 +#define STM32F429_PI7_FUNC_FMC_D29 0x870d +#define STM32F429_PI7_FUNC_DCMI_D7 0x870e +#define STM32F429_PI7_FUNC_LCD_B7 0x870f +#define STM32F429_PI7_FUNC_EVENTOUT 0x8710 +#define STM32F429_PI7_FUNC_ANALOG 0x8711 + +#define STM32F429_PI8_FUNC_GPIO 0x8800 +#define STM32F429_PI8_FUNC_EVENTOUT 0x8810 +#define STM32F429_PI8_FUNC_ANALOG 0x8811 + +#define STM32F429_PI9_FUNC_GPIO 0x8900 +#define STM32F429_PI9_FUNC_CAN1_RX 0x890a +#define STM32F429_PI9_FUNC_FMC_D30 0x890d +#define STM32F429_PI9_FUNC_LCD_VSYNC 0x890f +#define STM32F429_PI9_FUNC_EVENTOUT 0x8910 +#define STM32F429_PI9_FUNC_ANALOG 0x8911 + +#define STM32F429_PI10_FUNC_GPIO 0x8a00 +#define STM32F429_PI10_FUNC_ETH_MII_RX_ER 0x8a0c +#define STM32F429_PI10_FUNC_FMC_D31 0x8a0d +#define STM32F429_PI10_FUNC_LCD_HSYNC 0x8a0f +#define STM32F429_PI10_FUNC_EVENTOUT 0x8a10 +#define STM32F429_PI10_FUNC_ANALOG 0x8a11 + +#define STM32F429_PI11_FUNC_GPIO 0x8b00 +#define STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR 0x8b0b +#define STM32F429_PI11_FUNC_EVENTOUT 0x8b10 +#define STM32F429_PI11_FUNC_ANALOG 0x8b11 + +#define STM32F429_PI12_FUNC_GPIO 0x8c00 +#define STM32F429_PI12_FUNC_LCD_HSYNC 0x8c0f +#define STM32F429_PI12_FUNC_EVENTOUT 0x8c10 +#define STM32F429_PI12_FUNC_ANALOG 0x8c11 + +#define STM32F429_PI13_FUNC_GPIO 0x8d00 +#define STM32F429_PI13_FUNC_LCD_VSYNC 0x8d0f +#define STM32F429_PI13_FUNC_EVENTOUT 0x8d10 +#define STM32F429_PI13_FUNC_ANALOG 0x8d11 + +#define STM32F429_PI14_FUNC_GPIO 0x8e00 +#define STM32F429_PI14_FUNC_LCD_CLK 0x8e0f +#define STM32F429_PI14_FUNC_EVENTOUT 0x8e10 +#define STM32F429_PI14_FUNC_ANALOG 0x8e11 + +#define STM32F429_PI15_FUNC_GPIO 0x8f00 +#define STM32F429_PI15_FUNC_LCD_R0 0x8f0f +#define STM32F429_PI15_FUNC_EVENTOUT 0x8f10 +#define STM32F429_PI15_FUNC_ANALOG 0x8f11 + + + +#define STM32F429_PJ0_FUNC_GPIO 0x9000 +#define STM32F429_PJ0_FUNC_LCD_R1 0x900f +#define STM32F429_PJ0_FUNC_EVENTOUT 0x9010 +#define STM32F429_PJ0_FUNC_ANALOG 0x9011 + +#define STM32F429_PJ1_FUNC_GPIO 0x9100 +#define STM32F429_PJ1_FUNC_LCD_R2 0x910f +#define STM32F429_PJ1_FUNC_EVENTOUT 0x9110 +#define STM32F429_PJ1_FUNC_ANALOG 0x9111 + +#define STM32F429_PJ2_FUNC_GPIO 0x9200 +#define STM32F429_PJ2_FUNC_LCD_R3 0x920f +#define STM32F429_PJ2_FUNC_EVENTOUT 0x9210 +#define STM32F429_PJ2_FUNC_ANALOG 0x9211 + +#define STM32F429_PJ3_FUNC_GPIO 0x9300 +#define STM32F429_PJ3_FUNC_LCD_R4 0x930f +#define STM32F429_PJ3_FUNC_EVENTOUT 0x9310 +#define STM32F429_PJ3_FUNC_ANALOG 0x9311 + +#define STM32F429_PJ4_FUNC_GPIO 0x9400 +#define STM32F429_PJ4_FUNC_LCD_R5 0x940f +#define STM32F429_PJ4_FUNC_EVENTOUT 0x9410 +#define STM32F429_PJ4_FUNC_ANALOG 0x9411 + +#define STM32F429_PJ5_FUNC_GPIO 0x9500 +#define STM32F429_PJ5_FUNC_LCD_R6 0x950f +#define STM32F429_PJ5_FUNC_EVENTOUT 0x9510 +#define STM32F429_PJ5_FUNC_ANALOG 0x9511 + +#define STM32F429_PJ6_FUNC_GPIO 0x9600 +#define STM32F429_PJ6_FUNC_LCD_R7 0x960f +#define STM32F429_PJ6_FUNC_EVENTOUT 0x9610 +#define STM32F429_PJ6_FUNC_ANALOG 0x9611 + +#define STM32F429_PJ7_FUNC_GPIO 0x9700 +#define STM32F429_PJ7_FUNC_LCD_G0 0x970f +#define STM32F429_PJ7_FUNC_EVENTOUT 0x9710 +#define STM32F429_PJ7_FUNC_ANALOG 0x9711 + +#define STM32F429_PJ8_FUNC_GPIO 0x9800 +#define STM32F429_PJ8_FUNC_LCD_G1 0x980f +#define STM32F429_PJ8_FUNC_EVENTOUT 0x9810 +#define STM32F429_PJ8_FUNC_ANALOG 0x9811 + +#define STM32F429_PJ9_FUNC_GPIO 0x9900 +#define STM32F429_PJ9_FUNC_LCD_G2 0x990f +#define STM32F429_PJ9_FUNC_EVENTOUT 0x9910 +#define STM32F429_PJ9_FUNC_ANALOG 0x9911 + +#define STM32F429_PJ10_FUNC_GPIO 0x9a00 +#define STM32F429_PJ10_FUNC_LCD_G3 0x9a0f +#define STM32F429_PJ10_FUNC_EVENTOUT 0x9a10 +#define STM32F429_PJ10_FUNC_ANALOG 0x9a11 + +#define STM32F429_PJ11_FUNC_GPIO 0x9b00 +#define STM32F429_PJ11_FUNC_LCD_G4 0x9b0f +#define STM32F429_PJ11_FUNC_EVENTOUT 0x9b10 +#define STM32F429_PJ11_FUNC_ANALOG 0x9b11 + +#define STM32F429_PJ12_FUNC_GPIO 0x9c00 +#define STM32F429_PJ12_FUNC_LCD_B0 0x9c0f +#define STM32F429_PJ12_FUNC_EVENTOUT 0x9c10 +#define STM32F429_PJ12_FUNC_ANALOG 0x9c11 + +#define STM32F429_PJ13_FUNC_GPIO 0x9d00 +#define STM32F429_PJ13_FUNC_LCD_B1 0x9d0f +#define STM32F429_PJ13_FUNC_EVENTOUT 0x9d10 +#define STM32F429_PJ13_FUNC_ANALOG 0x9d11 + +#define STM32F429_PJ14_FUNC_GPIO 0x9e00 +#define STM32F429_PJ14_FUNC_LCD_B2 0x9e0f +#define STM32F429_PJ14_FUNC_EVENTOUT 0x9e10 +#define STM32F429_PJ14_FUNC_ANALOG 0x9e11 + +#define STM32F429_PJ15_FUNC_GPIO 0x9f00 +#define STM32F429_PJ15_FUNC_LCD_B3 0x9f0f +#define STM32F429_PJ15_FUNC_EVENTOUT 0x9f10 +#define STM32F429_PJ15_FUNC_ANALOG 0x9f11 + + + +#define STM32F429_PK0_FUNC_GPIO 0xa000 +#define STM32F429_PK0_FUNC_LCD_G5 0xa00f +#define STM32F429_PK0_FUNC_EVENTOUT 0xa010 +#define STM32F429_PK0_FUNC_ANALOG 0xa011 + +#define STM32F429_PK1_FUNC_GPIO 0xa100 +#define STM32F429_PK1_FUNC_LCD_G6 0xa10f +#define STM32F429_PK1_FUNC_EVENTOUT 0xa110 +#define STM32F429_PK1_FUNC_ANALOG 0xa111 + +#define STM32F429_PK2_FUNC_GPIO 0xa200 +#define STM32F429_PK2_FUNC_LCD_G7 0xa20f +#define STM32F429_PK2_FUNC_EVENTOUT 0xa210 +#define STM32F429_PK2_FUNC_ANALOG 0xa211 + +#define STM32F429_PK3_FUNC_GPIO 0xa300 +#define STM32F429_PK3_FUNC_LCD_B4 0xa30f +#define STM32F429_PK3_FUNC_EVENTOUT 0xa310 +#define STM32F429_PK3_FUNC_ANALOG 0xa311 + +#define STM32F429_PK4_FUNC_GPIO 0xa400 +#define STM32F429_PK4_FUNC_LCD_B5 0xa40f +#define STM32F429_PK4_FUNC_EVENTOUT 0xa410 +#define STM32F429_PK4_FUNC_ANALOG 0xa411 + +#define STM32F429_PK5_FUNC_GPIO 0xa500 +#define STM32F429_PK5_FUNC_LCD_B6 0xa50f +#define STM32F429_PK5_FUNC_EVENTOUT 0xa510 +#define STM32F429_PK5_FUNC_ANALOG 0xa511 + +#define STM32F429_PK6_FUNC_GPIO 0xa600 +#define STM32F429_PK6_FUNC_LCD_B7 0xa60f +#define STM32F429_PK6_FUNC_EVENTOUT 0xa610 +#define STM32F429_PK6_FUNC_ANALOG 0xa611 + +#define STM32F429_PK7_FUNC_GPIO 0xa700 +#define STM32F429_PK7_FUNC_LCD_DE 0xa70f +#define STM32F429_PK7_FUNC_EVENTOUT 0xa710 +#define STM32F429_PK7_FUNC_ANALOG 0xa711 + +#endif /* _DT_BINDINGS_STM32F429_PINFUNC_H */ diff --git a/include/dt-bindings/pinctrl/stm32f746-pinfunc.h b/include/dt-bindings/pinctrl/stm32f746-pinfunc.h new file mode 100644 index 0000000000..6348c6a830 --- /dev/null +++ b/include/dt-bindings/pinctrl/stm32f746-pinfunc.h @@ -0,0 +1,1324 @@ +#ifndef _DT_BINDINGS_STM32F746_PINFUNC_H +#define _DT_BINDINGS_STM32F746_PINFUNC_H + +#define STM32F746_PA0_FUNC_GPIO 0x0 +#define STM32F746_PA0_FUNC_TIM2_CH1_TIM2_ETR 0x2 +#define STM32F746_PA0_FUNC_TIM5_CH1 0x3 +#define STM32F746_PA0_FUNC_TIM8_ETR 0x4 +#define STM32F746_PA0_FUNC_USART2_CTS 0x8 +#define STM32F746_PA0_FUNC_UART4_TX 0x9 +#define STM32F746_PA0_FUNC_SAI2_SD_B 0xb +#define STM32F746_PA0_FUNC_ETH_MII_CRS 0xc +#define STM32F746_PA0_FUNC_EVENTOUT 0x10 +#define STM32F746_PA0_FUNC_ANALOG 0x11 + +#define STM32F746_PA1_FUNC_GPIO 0x100 +#define STM32F746_PA1_FUNC_TIM2_CH2 0x102 +#define STM32F746_PA1_FUNC_TIM5_CH2 0x103 +#define STM32F746_PA1_FUNC_USART2_RTS 0x108 +#define STM32F746_PA1_FUNC_UART4_RX 0x109 +#define STM32F746_PA1_FUNC_QUADSPI_BK1_IO3 0x10a +#define STM32F746_PA1_FUNC_SAI2_MCLK_B 0x10b +#define STM32F746_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK 0x10c +#define STM32F746_PA1_FUNC_LCD_R2 0x10f +#define STM32F746_PA1_FUNC_EVENTOUT 0x110 +#define STM32F746_PA1_FUNC_ANALOG 0x111 + +#define STM32F746_PA2_FUNC_GPIO 0x200 +#define STM32F746_PA2_FUNC_TIM2_CH3 0x202 +#define STM32F746_PA2_FUNC_TIM5_CH3 0x203 +#define STM32F746_PA2_FUNC_TIM9_CH1 0x204 +#define STM32F746_PA2_FUNC_USART2_TX 0x208 +#define STM32F746_PA2_FUNC_SAI2_SCK_B 0x209 +#define STM32F746_PA2_FUNC_ETH_MDIO 0x20c +#define STM32F746_PA2_FUNC_LCD_R1 0x20f +#define STM32F746_PA2_FUNC_EVENTOUT 0x210 +#define STM32F746_PA2_FUNC_ANALOG 0x211 + +#define STM32F746_PA3_FUNC_GPIO 0x300 +#define STM32F746_PA3_FUNC_TIM2_CH4 0x302 +#define STM32F746_PA3_FUNC_TIM5_CH4 0x303 +#define STM32F746_PA3_FUNC_TIM9_CH2 0x304 +#define STM32F746_PA3_FUNC_USART2_RX 0x308 +#define STM32F746_PA3_FUNC_OTG_HS_ULPI_D0 0x30b +#define STM32F746_PA3_FUNC_ETH_MII_COL 0x30c +#define STM32F746_PA3_FUNC_LCD_B5 0x30f +#define STM32F746_PA3_FUNC_EVENTOUT 0x310 +#define STM32F746_PA3_FUNC_ANALOG 0x311 + +#define STM32F746_PA4_FUNC_GPIO 0x400 +#define STM32F746_PA4_FUNC_SPI1_NSS_I2S1_WS 0x406 +#define STM32F746_PA4_FUNC_SPI3_NSS_I2S3_WS 0x407 +#define STM32F746_PA4_FUNC_USART2_CK 0x408 +#define STM32F746_PA4_FUNC_OTG_HS_SOF 0x40d +#define STM32F746_PA4_FUNC_DCMI_HSYNC 0x40e +#define STM32F746_PA4_FUNC_LCD_VSYNC 0x40f +#define STM32F746_PA4_FUNC_EVENTOUT 0x410 +#define STM32F746_PA4_FUNC_ANALOG 0x411 + +#define STM32F746_PA5_FUNC_GPIO 0x500 +#define STM32F746_PA5_FUNC_TIM2_CH1_TIM2_ETR 0x502 +#define STM32F746_PA5_FUNC_TIM8_CH1N 0x504 +#define STM32F746_PA5_FUNC_SPI1_SCK_I2S1_CK 0x506 +#define STM32F746_PA5_FUNC_OTG_HS_ULPI_CK 0x50b +#define STM32F746_PA5_FUNC_LCD_R4 0x50f +#define STM32F746_PA5_FUNC_EVENTOUT 0x510 +#define STM32F746_PA5_FUNC_ANALOG 0x511 + +#define STM32F746_PA6_FUNC_GPIO 0x600 +#define STM32F746_PA6_FUNC_TIM1_BKIN 0x602 +#define STM32F746_PA6_FUNC_TIM3_CH1 0x603 +#define STM32F746_PA6_FUNC_TIM8_BKIN 0x604 +#define STM32F746_PA6_FUNC_SPI1_MISO 0x606 +#define STM32F746_PA6_FUNC_TIM13_CH1 0x60a +#define STM32F746_PA6_FUNC_DCMI_PIXCLK 0x60e +#define STM32F746_PA6_FUNC_LCD_G2 0x60f +#define STM32F746_PA6_FUNC_EVENTOUT 0x610 +#define STM32F746_PA6_FUNC_ANALOG 0x611 + +#define STM32F746_PA7_FUNC_GPIO 0x700 +#define STM32F746_PA7_FUNC_TIM1_CH1N 0x702 +#define STM32F746_PA7_FUNC_TIM3_CH2 0x703 +#define STM32F746_PA7_FUNC_TIM8_CH1N 0x704 +#define STM32F746_PA7_FUNC_SPI1_MOSI_I2S1_SD 0x706 +#define STM32F746_PA7_FUNC_TIM14_CH1 0x70a +#define STM32F746_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV 0x70c +#define STM32F746_PA7_FUNC_FMC_SDNWE 0x70d +#define STM32F746_PA7_FUNC_EVENTOUT 0x710 +#define STM32F746_PA7_FUNC_ANALOG 0x711 + +#define STM32F746_PA8_FUNC_GPIO 0x800 +#define STM32F746_PA8_FUNC_MCO1 0x801 +#define STM32F746_PA8_FUNC_TIM1_CH1 0x802 +#define STM32F746_PA8_FUNC_TIM8_BKIN2 0x804 +#define STM32F746_PA8_FUNC_I2C3_SCL 0x805 +#define STM32F746_PA8_FUNC_USART1_CK 0x808 +#define STM32F746_PA8_FUNC_OTG_FS_SOF 0x80b +#define STM32F746_PA8_FUNC_LCD_R6 0x80f +#define STM32F746_PA8_FUNC_EVENTOUT 0x810 +#define STM32F746_PA8_FUNC_ANALOG 0x811 + +#define STM32F746_PA9_FUNC_GPIO 0x900 +#define STM32F746_PA9_FUNC_TIM1_CH2 0x902 +#define STM32F746_PA9_FUNC_I2C3_SMBA 0x905 +#define STM32F746_PA9_FUNC_SPI2_SCK_I2S2_CK 0x906 +#define STM32F746_PA9_FUNC_USART1_TX 0x908 +#define STM32F746_PA9_FUNC_DCMI_D0 0x90e +#define STM32F746_PA9_FUNC_EVENTOUT 0x910 +#define STM32F746_PA9_FUNC_ANALOG 0x911 + +#define STM32F746_PA10_FUNC_GPIO 0xa00 +#define STM32F746_PA10_FUNC_TIM1_CH3 0xa02 +#define STM32F746_PA10_FUNC_USART1_RX 0xa08 +#define STM32F746_PA10_FUNC_OTG_FS_ID 0xa0b +#define STM32F746_PA10_FUNC_DCMI_D1 0xa0e +#define STM32F746_PA10_FUNC_EVENTOUT 0xa10 +#define STM32F746_PA10_FUNC_ANALOG 0xa11 + +#define STM32F746_PA11_FUNC_GPIO 0xb00 +#define STM32F746_PA11_FUNC_TIM1_CH4 0xb02 +#define STM32F746_PA11_FUNC_USART1_CTS 0xb08 +#define STM32F746_PA11_FUNC_CAN1_RX 0xb0a +#define STM32F746_PA11_FUNC_OTG_FS_DM 0xb0b +#define STM32F746_PA11_FUNC_LCD_R4 0xb0f +#define STM32F746_PA11_FUNC_EVENTOUT 0xb10 +#define STM32F746_PA11_FUNC_ANALOG 0xb11 + +#define STM32F746_PA12_FUNC_GPIO 0xc00 +#define STM32F746_PA12_FUNC_TIM1_ETR 0xc02 +#define STM32F746_PA12_FUNC_USART1_RTS 0xc08 +#define STM32F746_PA12_FUNC_SAI2_FS_B 0xc09 +#define STM32F746_PA12_FUNC_CAN1_TX 0xc0a +#define STM32F746_PA12_FUNC_OTG_FS_DP 0xc0b +#define STM32F746_PA12_FUNC_LCD_R5 0xc0f +#define STM32F746_PA12_FUNC_EVENTOUT 0xc10 +#define STM32F746_PA12_FUNC_ANALOG 0xc11 + +#define STM32F746_PA13_FUNC_GPIO 0xd00 +#define STM32F746_PA13_FUNC_JTMS_SWDIO 0xd01 +#define STM32F746_PA13_FUNC_EVENTOUT 0xd10 +#define STM32F746_PA13_FUNC_ANALOG 0xd11 + +#define STM32F746_PA14_FUNC_GPIO 0xe00 +#define STM32F746_PA14_FUNC_JTCK_SWCLK 0xe01 +#define STM32F746_PA14_FUNC_EVENTOUT 0xe10 +#define STM32F746_PA14_FUNC_ANALOG 0xe11 + +#define STM32F746_PA15_FUNC_GPIO 0xf00 +#define STM32F746_PA15_FUNC_JTDI 0xf01 +#define STM32F746_PA15_FUNC_TIM2_CH1_TIM2_ETR 0xf02 +#define STM32F746_PA15_FUNC_HDMI_CEC 0xf05 +#define STM32F746_PA15_FUNC_SPI1_NSS_I2S1_WS 0xf06 +#define STM32F746_PA15_FUNC_SPI3_NSS_I2S3_WS 0xf07 +#define STM32F746_PA15_FUNC_UART4_RTS 0xf09 +#define STM32F746_PA15_FUNC_EVENTOUT 0xf10 +#define STM32F746_PA15_FUNC_ANALOG 0xf11 + + +#define STM32F746_PB0_FUNC_GPIO 0x1000 +#define STM32F746_PB0_FUNC_TIM1_CH2N 0x1002 +#define STM32F746_PB0_FUNC_TIM3_CH3 0x1003 +#define STM32F746_PB0_FUNC_TIM8_CH2N 0x1004 +#define STM32F746_PB0_FUNC_UART4_CTS 0x1009 +#define STM32F746_PB0_FUNC_LCD_R3 0x100a +#define STM32F746_PB0_FUNC_OTG_HS_ULPI_D1 0x100b +#define STM32F746_PB0_FUNC_ETH_MII_RXD2 0x100c +#define STM32F746_PB0_FUNC_EVENTOUT 0x1010 +#define STM32F746_PB0_FUNC_ANALOG 0x1011 + +#define STM32F746_PB1_FUNC_GPIO 0x1100 +#define STM32F746_PB1_FUNC_TIM1_CH3N 0x1102 +#define STM32F746_PB1_FUNC_TIM3_CH4 0x1103 +#define STM32F746_PB1_FUNC_TIM8_CH3N 0x1104 +#define STM32F746_PB1_FUNC_LCD_R6 0x110a +#define STM32F746_PB1_FUNC_OTG_HS_ULPI_D2 0x110b +#define STM32F746_PB1_FUNC_ETH_MII_RXD3 0x110c +#define STM32F746_PB1_FUNC_EVENTOUT 0x1110 +#define STM32F746_PB1_FUNC_ANALOG 0x1111 + +#define STM32F746_PB2_FUNC_GPIO 0x1200 +#define STM32F746_PB2_FUNC_SAI1_SD_A 0x1207 +#define STM32F746_PB2_FUNC_SPI3_MOSI_I2S3_SD 0x1208 +#define STM32F746_PB2_FUNC_QUADSPI_CLK 0x120a +#define STM32F746_PB2_FUNC_EVENTOUT 0x1210 +#define STM32F746_PB2_FUNC_ANALOG 0x1211 + +#define STM32F746_PB3_FUNC_GPIO 0x1300 +#define STM32F746_PB3_FUNC_JTDO_TRACESWO 0x1301 +#define STM32F746_PB3_FUNC_TIM2_CH2 0x1302 +#define STM32F746_PB3_FUNC_SPI1_SCK_I2S1_CK 0x1306 +#define STM32F746_PB3_FUNC_SPI3_SCK_I2S3_CK 0x1307 +#define STM32F746_PB3_FUNC_EVENTOUT 0x1310 +#define STM32F746_PB3_FUNC_ANALOG 0x1311 + +#define STM32F746_PB4_FUNC_GPIO 0x1400 +#define STM32F746_PB4_FUNC_NJTRST 0x1401 +#define STM32F746_PB4_FUNC_TIM3_CH1 0x1403 +#define STM32F746_PB4_FUNC_SPI1_MISO 0x1406 +#define STM32F746_PB4_FUNC_SPI3_MISO 0x1407 +#define STM32F746_PB4_FUNC_SPI2_NSS_I2S2_WS 0x1408 +#define STM32F746_PB4_FUNC_EVENTOUT 0x1410 +#define STM32F746_PB4_FUNC_ANALOG 0x1411 + +#define STM32F746_PB5_FUNC_GPIO 0x1500 +#define STM32F746_PB5_FUNC_TIM3_CH2 0x1503 +#define STM32F746_PB5_FUNC_I2C1_SMBA 0x1505 +#define STM32F746_PB5_FUNC_SPI1_MOSI_I2S1_SD 0x1506 +#define STM32F746_PB5_FUNC_SPI3_MOSI_I2S3_SD 0x1507 +#define STM32F746_PB5_FUNC_CAN2_RX 0x150a +#define STM32F746_PB5_FUNC_OTG_HS_ULPI_D7 0x150b +#define STM32F746_PB5_FUNC_ETH_PPS_OUT 0x150c +#define STM32F746_PB5_FUNC_FMC_SDCKE1 0x150d +#define STM32F746_PB5_FUNC_DCMI_D10 0x150e +#define STM32F746_PB5_FUNC_EVENTOUT 0x1510 +#define STM32F746_PB5_FUNC_ANALOG 0x1511 + +#define STM32F746_PB6_FUNC_GPIO 0x1600 +#define STM32F746_PB6_FUNC_TIM4_CH1 0x1603 +#define STM32F746_PB6_FUNC_HDMI_CEC 0x1604 +#define STM32F746_PB6_FUNC_I2C1_SCL 0x1605 +#define STM32F746_PB6_FUNC_USART1_TX 0x1608 +#define STM32F746_PB6_FUNC_CAN2_TX 0x160a +#define STM32F746_PB6_FUNC_QUADSPI_BK1_NCS 0x160b +#define STM32F746_PB6_FUNC_FMC_SDNE1 0x160d +#define STM32F746_PB6_FUNC_DCMI_D5 0x160e +#define STM32F746_PB6_FUNC_EVENTOUT 0x1610 +#define STM32F746_PB6_FUNC_ANALOG 0x1611 + +#define STM32F746_PB7_FUNC_GPIO 0x1700 +#define STM32F746_PB7_FUNC_TIM4_CH2 0x1703 +#define STM32F746_PB7_FUNC_I2C1_SDA 0x1705 +#define STM32F746_PB7_FUNC_USART1_RX 0x1708 +#define STM32F746_PB7_FUNC_FMC_NL 0x170d +#define STM32F746_PB7_FUNC_DCMI_VSYNC 0x170e +#define STM32F746_PB7_FUNC_EVENTOUT 0x1710 +#define STM32F746_PB7_FUNC_ANALOG 0x1711 + +#define STM32F746_PB8_FUNC_GPIO 0x1800 +#define STM32F746_PB8_FUNC_TIM4_CH3 0x1803 +#define STM32F746_PB8_FUNC_TIM10_CH1 0x1804 +#define STM32F746_PB8_FUNC_I2C1_SCL 0x1805 +#define STM32F746_PB8_FUNC_CAN1_RX 0x180a +#define STM32F746_PB8_FUNC_ETH_MII_TXD3 0x180c +#define STM32F746_PB8_FUNC_SDMMC1_D4 0x180d +#define STM32F746_PB8_FUNC_DCMI_D6 0x180e +#define STM32F746_PB8_FUNC_LCD_B6 0x180f +#define STM32F746_PB8_FUNC_EVENTOUT 0x1810 +#define STM32F746_PB8_FUNC_ANALOG 0x1811 + +#define STM32F746_PB9_FUNC_GPIO 0x1900 +#define STM32F746_PB9_FUNC_TIM4_CH4 0x1903 +#define STM32F746_PB9_FUNC_TIM11_CH1 0x1904 +#define STM32F746_PB9_FUNC_I2C1_SDA 0x1905 +#define STM32F746_PB9_FUNC_SPI2_NSS_I2S2_WS 0x1906 +#define STM32F746_PB9_FUNC_CAN1_TX 0x190a +#define STM32F746_PB9_FUNC_SDMMC1_D5 0x190d +#define STM32F746_PB9_FUNC_DCMI_D7 0x190e +#define STM32F746_PB9_FUNC_LCD_B7 0x190f +#define STM32F746_PB9_FUNC_EVENTOUT 0x1910 +#define STM32F746_PB9_FUNC_ANALOG 0x1911 + +#define STM32F746_PB10_FUNC_GPIO 0x1a00 +#define STM32F746_PB10_FUNC_TIM2_CH3 0x1a02 +#define STM32F746_PB10_FUNC_I2C2_SCL 0x1a05 +#define STM32F746_PB10_FUNC_SPI2_SCK_I2S2_CK 0x1a06 +#define STM32F746_PB10_FUNC_USART3_TX 0x1a08 +#define STM32F746_PB10_FUNC_OTG_HS_ULPI_D3 0x1a0b +#define STM32F746_PB10_FUNC_ETH_MII_RX_ER 0x1a0c +#define STM32F746_PB10_FUNC_LCD_G4 0x1a0f +#define STM32F746_PB10_FUNC_EVENTOUT 0x1a10 +#define STM32F746_PB10_FUNC_ANALOG 0x1a11 + +#define STM32F746_PB11_FUNC_GPIO 0x1b00 +#define STM32F746_PB11_FUNC_TIM2_CH4 0x1b02 +#define STM32F746_PB11_FUNC_I2C2_SDA 0x1b05 +#define STM32F746_PB11_FUNC_USART3_RX 0x1b08 +#define STM32F746_PB11_FUNC_OTG_HS_ULPI_D4 0x1b0b +#define STM32F746_PB11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN 0x1b0c +#define STM32F746_PB11_FUNC_LCD_G5 0x1b0f +#define STM32F746_PB11_FUNC_EVENTOUT 0x1b10 +#define STM32F746_PB11_FUNC_ANALOG 0x1b11 + +#define STM32F746_PB12_FUNC_GPIO 0x1c00 +#define STM32F746_PB12_FUNC_TIM1_BKIN 0x1c02 +#define STM32F746_PB12_FUNC_I2C2_SMBA 0x1c05 +#define STM32F746_PB12_FUNC_SPI2_NSS_I2S2_WS 0x1c06 +#define STM32F746_PB12_FUNC_USART3_CK 0x1c08 +#define STM32F746_PB12_FUNC_CAN2_RX 0x1c0a +#define STM32F746_PB12_FUNC_OTG_HS_ULPI_D5 0x1c0b +#define STM32F746_PB12_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0 0x1c0c +#define STM32F746_PB12_FUNC_OTG_HS_ID 0x1c0d +#define STM32F746_PB12_FUNC_EVENTOUT 0x1c10 +#define STM32F746_PB12_FUNC_ANALOG 0x1c11 + +#define STM32F746_PB13_FUNC_GPIO 0x1d00 +#define STM32F746_PB13_FUNC_TIM1_CH1N 0x1d02 +#define STM32F746_PB13_FUNC_SPI2_SCK_I2S2_CK 0x1d06 +#define STM32F746_PB13_FUNC_USART3_CTS 0x1d08 +#define STM32F746_PB13_FUNC_CAN2_TX 0x1d0a +#define STM32F746_PB13_FUNC_OTG_HS_ULPI_D6 0x1d0b +#define STM32F746_PB13_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1 0x1d0c +#define STM32F746_PB13_FUNC_EVENTOUT 0x1d10 +#define STM32F746_PB13_FUNC_ANALOG 0x1d11 + +#define STM32F746_PB14_FUNC_GPIO 0x1e00 +#define STM32F746_PB14_FUNC_TIM1_CH2N 0x1e02 +#define STM32F746_PB14_FUNC_TIM8_CH2N 0x1e04 +#define STM32F746_PB14_FUNC_SPI2_MISO 0x1e06 +#define STM32F746_PB14_FUNC_USART3_RTS 0x1e08 +#define STM32F746_PB14_FUNC_TIM12_CH1 0x1e0a +#define STM32F746_PB14_FUNC_OTG_HS_DM 0x1e0d +#define STM32F746_PB14_FUNC_EVENTOUT 0x1e10 +#define STM32F746_PB14_FUNC_ANALOG 0x1e11 + +#define STM32F746_PB15_FUNC_GPIO 0x1f00 +#define STM32F746_PB15_FUNC_RTC_REFIN 0x1f01 +#define STM32F746_PB15_FUNC_TIM1_CH3N 0x1f02 +#define STM32F746_PB15_FUNC_TIM8_CH3N 0x1f04 +#define STM32F746_PB15_FUNC_SPI2_MOSI_I2S2_SD 0x1f06 +#define STM32F746_PB15_FUNC_TIM12_CH2 0x1f0a +#define STM32F746_PB15_FUNC_OTG_HS_DP 0x1f0d +#define STM32F746_PB15_FUNC_EVENTOUT 0x1f10 +#define STM32F746_PB15_FUNC_ANALOG 0x1f11 + + +#define STM32F746_PC0_FUNC_GPIO 0x2000 +#define STM32F746_PC0_FUNC_SAI2_FS_B 0x2009 +#define STM32F746_PC0_FUNC_OTG_HS_ULPI_STP 0x200b +#define STM32F746_PC0_FUNC_FMC_SDNWE 0x200d +#define STM32F746_PC0_FUNC_LCD_R5 0x200f +#define STM32F746_PC0_FUNC_EVENTOUT 0x2010 +#define STM32F746_PC0_FUNC_ANALOG 0x2011 + +#define STM32F746_PC1_FUNC_GPIO 0x2100 +#define STM32F746_PC1_FUNC_TRACED0 0x2101 +#define STM32F746_PC1_FUNC_SPI2_MOSI_I2S2_SD 0x2106 +#define STM32F746_PC1_FUNC_SAI1_SD_A 0x2107 +#define STM32F746_PC1_FUNC_ETH_MDC 0x210c +#define STM32F746_PC1_FUNC_EVENTOUT 0x2110 +#define STM32F746_PC1_FUNC_ANALOG 0x2111 + +#define STM32F746_PC2_FUNC_GPIO 0x2200 +#define STM32F746_PC2_FUNC_SPI2_MISO 0x2206 +#define STM32F746_PC2_FUNC_OTG_HS_ULPI_DIR 0x220b +#define STM32F746_PC2_FUNC_ETH_MII_TXD2 0x220c +#define STM32F746_PC2_FUNC_FMC_SDNE0 0x220d +#define STM32F746_PC2_FUNC_EVENTOUT 0x2210 +#define STM32F746_PC2_FUNC_ANALOG 0x2211 + +#define STM32F746_PC3_FUNC_GPIO 0x2300 +#define STM32F746_PC3_FUNC_SPI2_MOSI_I2S2_SD 0x2306 +#define STM32F746_PC3_FUNC_OTG_HS_ULPI_NXT 0x230b +#define STM32F746_PC3_FUNC_ETH_MII_TX_CLK 0x230c +#define STM32F746_PC3_FUNC_FMC_SDCKE0 0x230d +#define STM32F746_PC3_FUNC_EVENTOUT 0x2310 +#define STM32F746_PC3_FUNC_ANALOG 0x2311 + +#define STM32F746_PC4_FUNC_GPIO 0x2400 +#define STM32F746_PC4_FUNC_I2S1_MCK 0x2406 +#define STM32F746_PC4_FUNC_SPDIFRX_IN2 0x2409 +#define STM32F746_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0 0x240c +#define STM32F746_PC4_FUNC_FMC_SDNE0 0x240d +#define STM32F746_PC4_FUNC_EVENTOUT 0x2410 +#define STM32F746_PC4_FUNC_ANALOG 0x2411 + +#define STM32F746_PC5_FUNC_GPIO 0x2500 +#define STM32F746_PC5_FUNC_SPDIFRX_IN3 0x2509 +#define STM32F746_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1 0x250c +#define STM32F746_PC5_FUNC_FMC_SDCKE0 0x250d +#define STM32F746_PC5_FUNC_EVENTOUT 0x2510 +#define STM32F746_PC5_FUNC_ANALOG 0x2511 + +#define STM32F746_PC6_FUNC_GPIO 0x2600 +#define STM32F746_PC6_FUNC_TIM3_CH1 0x2603 +#define STM32F746_PC6_FUNC_TIM8_CH1 0x2604 +#define STM32F746_PC6_FUNC_I2S2_MCK 0x2606 +#define STM32F746_PC6_FUNC_USART6_TX 0x2609 +#define STM32F746_PC6_FUNC_SDMMC1_D6 0x260d +#define STM32F746_PC6_FUNC_DCMI_D0 0x260e +#define STM32F746_PC6_FUNC_LCD_HSYNC 0x260f +#define STM32F746_PC6_FUNC_EVENTOUT 0x2610 +#define STM32F746_PC6_FUNC_ANALOG 0x2611 + +#define STM32F746_PC7_FUNC_GPIO 0x2700 +#define STM32F746_PC7_FUNC_TIM3_CH2 0x2703 +#define STM32F746_PC7_FUNC_TIM8_CH2 0x2704 +#define STM32F746_PC7_FUNC_I2S3_MCK 0x2707 +#define STM32F746_PC7_FUNC_USART6_RX 0x2709 +#define STM32F746_PC7_FUNC_SDMMC1_D7 0x270d +#define STM32F746_PC7_FUNC_DCMI_D1 0x270e +#define STM32F746_PC7_FUNC_LCD_G6 0x270f +#define STM32F746_PC7_FUNC_EVENTOUT 0x2710 +#define STM32F746_PC7_FUNC_ANALOG 0x2711 + +#define STM32F746_PC8_FUNC_GPIO 0x2800 +#define STM32F746_PC8_FUNC_TRACED1 0x2801 +#define STM32F746_PC8_FUNC_TIM3_CH3 0x2803 +#define STM32F746_PC8_FUNC_TIM8_CH3 0x2804 +#define STM32F746_PC8_FUNC_UART5_RTS 0x2808 +#define STM32F746_PC8_FUNC_USART6_CK 0x2809 +#define STM32F746_PC8_FUNC_SDMMC1_D0 0x280d +#define STM32F746_PC8_FUNC_DCMI_D2 0x280e +#define STM32F746_PC8_FUNC_EVENTOUT 0x2810 +#define STM32F746_PC8_FUNC_ANALOG 0x2811 + +#define STM32F746_PC9_FUNC_GPIO 0x2900 +#define STM32F746_PC9_FUNC_MCO2 0x2901 +#define STM32F746_PC9_FUNC_TIM3_CH4 0x2903 +#define STM32F746_PC9_FUNC_TIM8_CH4 0x2904 +#define STM32F746_PC9_FUNC_I2C3_SDA 0x2905 +#define STM32F746_PC9_FUNC_I2S_CKIN 0x2906 +#define STM32F746_PC9_FUNC_UART5_CTS 0x2908 +#define STM32F746_PC9_FUNC_QUADSPI_BK1_IO0 0x290a +#define STM32F746_PC9_FUNC_SDMMC1_D1 0x290d +#define STM32F746_PC9_FUNC_DCMI_D3 0x290e +#define STM32F746_PC9_FUNC_EVENTOUT 0x2910 +#define STM32F746_PC9_FUNC_ANALOG 0x2911 + +#define STM32F746_PC10_FUNC_GPIO 0x2a00 +#define STM32F746_PC10_FUNC_SPI3_SCK_I2S3_CK 0x2a07 +#define STM32F746_PC10_FUNC_USART3_TX 0x2a08 +#define STM32F746_PC10_FUNC_UART4_TX 0x2a09 +#define STM32F746_PC10_FUNC_QUADSPI_BK1_IO1 0x2a0a +#define STM32F746_PC10_FUNC_SDMMC1_D2 0x2a0d +#define STM32F746_PC10_FUNC_DCMI_D8 0x2a0e +#define STM32F746_PC10_FUNC_LCD_R2 0x2a0f +#define STM32F746_PC10_FUNC_EVENTOUT 0x2a10 +#define STM32F746_PC10_FUNC_ANALOG 0x2a11 + +#define STM32F746_PC11_FUNC_GPIO 0x2b00 +#define STM32F746_PC11_FUNC_SPI3_MISO 0x2b07 +#define STM32F746_PC11_FUNC_USART3_RX 0x2b08 +#define STM32F746_PC11_FUNC_UART4_RX 0x2b09 +#define STM32F746_PC11_FUNC_QUADSPI_BK2_NCS 0x2b0a +#define STM32F746_PC11_FUNC_SDMMC1_D3 0x2b0d +#define STM32F746_PC11_FUNC_DCMI_D4 0x2b0e +#define STM32F746_PC11_FUNC_EVENTOUT 0x2b10 +#define STM32F746_PC11_FUNC_ANALOG 0x2b11 + +#define STM32F746_PC12_FUNC_GPIO 0x2c00 +#define STM32F746_PC12_FUNC_TRACED3 0x2c01 +#define STM32F746_PC12_FUNC_SPI3_MOSI_I2S3_SD 0x2c07 +#define STM32F746_PC12_FUNC_USART3_CK 0x2c08 +#define STM32F746_PC12_FUNC_UART5_TX 0x2c09 +#define STM32F746_PC12_FUNC_SDMMC1_CK 0x2c0d +#define STM32F746_PC12_FUNC_DCMI_D9 0x2c0e +#define STM32F746_PC12_FUNC_EVENTOUT 0x2c10 +#define STM32F746_PC12_FUNC_ANALOG 0x2c11 + +#define STM32F746_PC13_FUNC_GPIO 0x2d00 +#define STM32F746_PC13_FUNC_EVENTOUT 0x2d10 +#define STM32F746_PC13_FUNC_ANALOG 0x2d11 + +#define STM32F746_PC14_FUNC_GPIO 0x2e00 +#define STM32F746_PC14_FUNC_EVENTOUT 0x2e10 +#define STM32F746_PC14_FUNC_ANALOG 0x2e11 + +#define STM32F746_PC15_FUNC_GPIO 0x2f00 +#define STM32F746_PC15_FUNC_EVENTOUT 0x2f10 +#define STM32F746_PC15_FUNC_ANALOG 0x2f11 + + +#define STM32F746_PD0_FUNC_GPIO 0x3000 +#define STM32F746_PD0_FUNC_CAN1_RX 0x300a +#define STM32F746_PD0_FUNC_FMC_D2 0x300d +#define STM32F746_PD0_FUNC_EVENTOUT 0x3010 +#define STM32F746_PD0_FUNC_ANALOG 0x3011 + +#define STM32F746_PD1_FUNC_GPIO 0x3100 +#define STM32F746_PD1_FUNC_CAN1_TX 0x310a +#define STM32F746_PD1_FUNC_FMC_D3 0x310d +#define STM32F746_PD1_FUNC_EVENTOUT 0x3110 +#define STM32F746_PD1_FUNC_ANALOG 0x3111 + +#define STM32F746_PD2_FUNC_GPIO 0x3200 +#define STM32F746_PD2_FUNC_TRACED2 0x3201 +#define STM32F746_PD2_FUNC_TIM3_ETR 0x3203 +#define STM32F746_PD2_FUNC_UART5_RX 0x3209 +#define STM32F746_PD2_FUNC_SDMMC1_CMD 0x320d +#define STM32F746_PD2_FUNC_DCMI_D11 0x320e +#define STM32F746_PD2_FUNC_EVENTOUT 0x3210 +#define STM32F746_PD2_FUNC_ANALOG 0x3211 + +#define STM32F746_PD3_FUNC_GPIO 0x3300 +#define STM32F746_PD3_FUNC_SPI2_SCK_I2S2_CK 0x3306 +#define STM32F746_PD3_FUNC_USART2_CTS 0x3308 +#define STM32F746_PD3_FUNC_FMC_CLK 0x330d +#define STM32F746_PD3_FUNC_DCMI_D5 0x330e +#define STM32F746_PD3_FUNC_LCD_G7 0x330f +#define STM32F746_PD3_FUNC_EVENTOUT 0x3310 +#define STM32F746_PD3_FUNC_ANALOG 0x3311 + +#define STM32F746_PD4_FUNC_GPIO 0x3400 +#define STM32F746_PD4_FUNC_USART2_RTS 0x3408 +#define STM32F746_PD4_FUNC_FMC_NOE 0x340d +#define STM32F746_PD4_FUNC_EVENTOUT 0x3410 +#define STM32F746_PD4_FUNC_ANALOG 0x3411 + +#define STM32F746_PD5_FUNC_GPIO 0x3500 +#define STM32F746_PD5_FUNC_USART2_TX 0x3508 +#define STM32F746_PD5_FUNC_FMC_NWE 0x350d +#define STM32F746_PD5_FUNC_EVENTOUT 0x3510 +#define STM32F746_PD5_FUNC_ANALOG 0x3511 + +#define STM32F746_PD6_FUNC_GPIO 0x3600 +#define STM32F746_PD6_FUNC_SPI3_MOSI_I2S3_SD 0x3606 +#define STM32F746_PD6_FUNC_SAI1_SD_A 0x3607 +#define STM32F746_PD6_FUNC_USART2_RX 0x3608 +#define STM32F746_PD6_FUNC_FMC_NWAIT 0x360d +#define STM32F746_PD6_FUNC_DCMI_D10 0x360e +#define STM32F746_PD6_FUNC_LCD_B2 0x360f +#define STM32F746_PD6_FUNC_EVENTOUT 0x3610 +#define STM32F746_PD6_FUNC_ANALOG 0x3611 + +#define STM32F746_PD7_FUNC_GPIO 0x3700 +#define STM32F746_PD7_FUNC_USART2_CK 0x3708 +#define STM32F746_PD7_FUNC_SPDIFRX_IN0 0x3709 +#define STM32F746_PD7_FUNC_FMC_NE1 0x370d +#define STM32F746_PD7_FUNC_EVENTOUT 0x3710 +#define STM32F746_PD7_FUNC_ANALOG 0x3711 + +#define STM32F746_PD8_FUNC_GPIO 0x3800 +#define STM32F746_PD8_FUNC_USART3_TX 0x3808 +#define STM32F746_PD8_FUNC_SPDIFRX_IN1 0x3809 +#define STM32F746_PD8_FUNC_FMC_D13 0x380d +#define STM32F746_PD8_FUNC_EVENTOUT 0x3810 +#define STM32F746_PD8_FUNC_ANALOG 0x3811 + +#define STM32F746_PD9_FUNC_GPIO 0x3900 +#define STM32F746_PD9_FUNC_USART3_RX 0x3908 +#define STM32F746_PD9_FUNC_FMC_D14 0x390d +#define STM32F746_PD9_FUNC_EVENTOUT 0x3910 +#define STM32F746_PD9_FUNC_ANALOG 0x3911 + +#define STM32F746_PD10_FUNC_GPIO 0x3a00 +#define STM32F746_PD10_FUNC_USART3_CK 0x3a08 +#define STM32F746_PD10_FUNC_FMC_D15 0x3a0d +#define STM32F746_PD10_FUNC_LCD_B3 0x3a0f +#define STM32F746_PD10_FUNC_EVENTOUT 0x3a10 +#define STM32F746_PD10_FUNC_ANALOG 0x3a11 + +#define STM32F746_PD11_FUNC_GPIO 0x3b00 +#define STM32F746_PD11_FUNC_I2C4_SMBA 0x3b05 +#define STM32F746_PD11_FUNC_USART3_CTS 0x3b08 +#define STM32F746_PD11_FUNC_QUADSPI_BK1_IO0 0x3b0a +#define STM32F746_PD11_FUNC_SAI2_SD_A 0x3b0b +#define STM32F746_PD11_FUNC_FMC_A16_FMC_CLE 0x3b0d +#define STM32F746_PD11_FUNC_EVENTOUT 0x3b10 +#define STM32F746_PD11_FUNC_ANALOG 0x3b11 + +#define STM32F746_PD12_FUNC_GPIO 0x3c00 +#define STM32F746_PD12_FUNC_TIM4_CH1 0x3c03 +#define STM32F746_PD12_FUNC_LPTIM1_IN1 0x3c04 +#define STM32F746_PD12_FUNC_I2C4_SCL 0x3c05 +#define STM32F746_PD12_FUNC_USART3_RTS 0x3c08 +#define STM32F746_PD12_FUNC_QUADSPI_BK1_IO1 0x3c0a +#define STM32F746_PD12_FUNC_SAI2_FS_A 0x3c0b +#define STM32F746_PD12_FUNC_FMC_A17_FMC_ALE 0x3c0d +#define STM32F746_PD12_FUNC_EVENTOUT 0x3c10 +#define STM32F746_PD12_FUNC_ANALOG 0x3c11 + +#define STM32F746_PD13_FUNC_GPIO 0x3d00 +#define STM32F746_PD13_FUNC_TIM4_CH2 0x3d03 +#define STM32F746_PD13_FUNC_LPTIM1_OUT 0x3d04 +#define STM32F746_PD13_FUNC_I2C4_SDA 0x3d05 +#define STM32F746_PD13_FUNC_QUADSPI_BK1_IO3 0x3d0a +#define STM32F746_PD13_FUNC_SAI2_SCK_A 0x3d0b +#define STM32F746_PD13_FUNC_FMC_A18 0x3d0d +#define STM32F746_PD13_FUNC_EVENTOUT 0x3d10 +#define STM32F746_PD13_FUNC_ANALOG 0x3d11 + +#define STM32F746_PD14_FUNC_GPIO 0x3e00 +#define STM32F746_PD14_FUNC_TIM4_CH3 0x3e03 +#define STM32F746_PD14_FUNC_UART8_CTS 0x3e09 +#define STM32F746_PD14_FUNC_FMC_D0 0x3e0d +#define STM32F746_PD14_FUNC_EVENTOUT 0x3e10 +#define STM32F746_PD14_FUNC_ANALOG 0x3e11 + +#define STM32F746_PD15_FUNC_GPIO 0x3f00 +#define STM32F746_PD15_FUNC_TIM4_CH4 0x3f03 +#define STM32F746_PD15_FUNC_UART8_RTS 0x3f09 +#define STM32F746_PD15_FUNC_FMC_D1 0x3f0d +#define STM32F746_PD15_FUNC_EVENTOUT 0x3f10 +#define STM32F746_PD15_FUNC_ANALOG 0x3f11 + + +#define STM32F746_PE0_FUNC_GPIO 0x4000 +#define STM32F746_PE0_FUNC_TIM4_ETR 0x4003 +#define STM32F746_PE0_FUNC_LPTIM1_ETR 0x4004 +#define STM32F746_PE0_FUNC_UART8_RX 0x4009 +#define STM32F746_PE0_FUNC_SAI2_MCLK_A 0x400b +#define STM32F746_PE0_FUNC_FMC_NBL0 0x400d +#define STM32F746_PE0_FUNC_DCMI_D2 0x400e +#define STM32F746_PE0_FUNC_EVENTOUT 0x4010 +#define STM32F746_PE0_FUNC_ANALOG 0x4011 + +#define STM32F746_PE1_FUNC_GPIO 0x4100 +#define STM32F746_PE1_FUNC_LPTIM1_IN2 0x4104 +#define STM32F746_PE1_FUNC_UART8_TX 0x4109 +#define STM32F746_PE1_FUNC_FMC_NBL1 0x410d +#define STM32F746_PE1_FUNC_DCMI_D3 0x410e +#define STM32F746_PE1_FUNC_EVENTOUT 0x4110 +#define STM32F746_PE1_FUNC_ANALOG 0x4111 + +#define STM32F746_PE2_FUNC_GPIO 0x4200 +#define STM32F746_PE2_FUNC_TRACECLK 0x4201 +#define STM32F746_PE2_FUNC_SPI4_SCK 0x4206 +#define STM32F746_PE2_FUNC_SAI1_MCLK_A 0x4207 +#define STM32F746_PE2_FUNC_QUADSPI_BK1_IO2 0x420a +#define STM32F746_PE2_FUNC_ETH_MII_TXD3 0x420c +#define STM32F746_PE2_FUNC_FMC_A23 0x420d +#define STM32F746_PE2_FUNC_EVENTOUT 0x4210 +#define STM32F746_PE2_FUNC_ANALOG 0x4211 + +#define STM32F746_PE3_FUNC_GPIO 0x4300 +#define STM32F746_PE3_FUNC_TRACED0 0x4301 +#define STM32F746_PE3_FUNC_SAI1_SD_B 0x4307 +#define STM32F746_PE3_FUNC_FMC_A19 0x430d +#define STM32F746_PE3_FUNC_EVENTOUT 0x4310 +#define STM32F746_PE3_FUNC_ANALOG 0x4311 + +#define STM32F746_PE4_FUNC_GPIO 0x4400 +#define STM32F746_PE4_FUNC_TRACED1 0x4401 +#define STM32F746_PE4_FUNC_SPI4_NSS 0x4406 +#define STM32F746_PE4_FUNC_SAI1_FS_A 0x4407 +#define STM32F746_PE4_FUNC_FMC_A20 0x440d +#define STM32F746_PE4_FUNC_DCMI_D4 0x440e +#define STM32F746_PE4_FUNC_LCD_B0 0x440f +#define STM32F746_PE4_FUNC_EVENTOUT 0x4410 +#define STM32F746_PE4_FUNC_ANALOG 0x4411 + +#define STM32F746_PE5_FUNC_GPIO 0x4500 +#define STM32F746_PE5_FUNC_TRACED2 0x4501 +#define STM32F746_PE5_FUNC_TIM9_CH1 0x4504 +#define STM32F746_PE5_FUNC_SPI4_MISO 0x4506 +#define STM32F746_PE5_FUNC_SAI1_SCK_A 0x4507 +#define STM32F746_PE5_FUNC_FMC_A21 0x450d +#define STM32F746_PE5_FUNC_DCMI_D6 0x450e +#define STM32F746_PE5_FUNC_LCD_G0 0x450f +#define STM32F746_PE5_FUNC_EVENTOUT 0x4510 +#define STM32F746_PE5_FUNC_ANALOG 0x4511 + +#define STM32F746_PE6_FUNC_GPIO 0x4600 +#define STM32F746_PE6_FUNC_TRACED3 0x4601 +#define STM32F746_PE6_FUNC_TIM1_BKIN2 0x4602 +#define STM32F746_PE6_FUNC_TIM9_CH2 0x4604 +#define STM32F746_PE6_FUNC_SPI4_MOSI 0x4606 +#define STM32F746_PE6_FUNC_SAI1_SD_A 0x4607 +#define STM32F746_PE6_FUNC_SAI2_MCLK_B 0x460b +#define STM32F746_PE6_FUNC_FMC_A22 0x460d +#define STM32F746_PE6_FUNC_DCMI_D7 0x460e +#define STM32F746_PE6_FUNC_LCD_G1 0x460f +#define STM32F746_PE6_FUNC_EVENTOUT 0x4610 +#define STM32F746_PE6_FUNC_ANALOG 0x4611 + +#define STM32F746_PE7_FUNC_GPIO 0x4700 +#define STM32F746_PE7_FUNC_TIM1_ETR 0x4702 +#define STM32F746_PE7_FUNC_UART7_RX 0x4709 +#define STM32F746_PE7_FUNC_QUADSPI_BK2_IO0 0x470b +#define STM32F746_PE7_FUNC_FMC_D4 0x470d +#define STM32F746_PE7_FUNC_EVENTOUT 0x4710 +#define STM32F746_PE7_FUNC_ANALOG 0x4711 + +#define STM32F746_PE8_FUNC_GPIO 0x4800 +#define STM32F746_PE8_FUNC_TIM1_CH1N 0x4802 +#define STM32F746_PE8_FUNC_UART7_TX 0x4809 +#define STM32F746_PE8_FUNC_QUADSPI_BK2_IO1 0x480b +#define STM32F746_PE8_FUNC_FMC_D5 0x480d +#define STM32F746_PE8_FUNC_EVENTOUT 0x4810 +#define STM32F746_PE8_FUNC_ANALOG 0x4811 + +#define STM32F746_PE9_FUNC_GPIO 0x4900 +#define STM32F746_PE9_FUNC_TIM1_CH1 0x4902 +#define STM32F746_PE9_FUNC_UART7_RTS 0x4909 +#define STM32F746_PE9_FUNC_QUADSPI_BK2_IO2 0x490b +#define STM32F746_PE9_FUNC_FMC_D6 0x490d +#define STM32F746_PE9_FUNC_EVENTOUT 0x4910 +#define STM32F746_PE9_FUNC_ANALOG 0x4911 + +#define STM32F746_PE10_FUNC_GPIO 0x4a00 +#define STM32F746_PE10_FUNC_TIM1_CH2N 0x4a02 +#define STM32F746_PE10_FUNC_UART7_CTS 0x4a09 +#define STM32F746_PE10_FUNC_QUADSPI_BK2_IO3 0x4a0b +#define STM32F746_PE10_FUNC_FMC_D7 0x4a0d +#define STM32F746_PE10_FUNC_EVENTOUT 0x4a10 +#define STM32F746_PE10_FUNC_ANALOG 0x4a11 + +#define STM32F746_PE11_FUNC_GPIO 0x4b00 +#define STM32F746_PE11_FUNC_TIM1_CH2 0x4b02 +#define STM32F746_PE11_FUNC_SPI4_NSS 0x4b06 +#define STM32F746_PE11_FUNC_SAI2_SD_B 0x4b0b +#define STM32F746_PE11_FUNC_FMC_D8 0x4b0d +#define STM32F746_PE11_FUNC_LCD_G3 0x4b0f +#define STM32F746_PE11_FUNC_EVENTOUT 0x4b10 +#define STM32F746_PE11_FUNC_ANALOG 0x4b11 + +#define STM32F746_PE12_FUNC_GPIO 0x4c00 +#define STM32F746_PE12_FUNC_TIM1_CH3N 0x4c02 +#define STM32F746_PE12_FUNC_SPI4_SCK 0x4c06 +#define STM32F746_PE12_FUNC_SAI2_SCK_B 0x4c0b +#define STM32F746_PE12_FUNC_FMC_D9 0x4c0d +#define STM32F746_PE12_FUNC_LCD_B4 0x4c0f +#define STM32F746_PE12_FUNC_EVENTOUT 0x4c10 +#define STM32F746_PE12_FUNC_ANALOG 0x4c11 + +#define STM32F746_PE13_FUNC_GPIO 0x4d00 +#define STM32F746_PE13_FUNC_TIM1_CH3 0x4d02 +#define STM32F746_PE13_FUNC_SPI4_MISO 0x4d06 +#define STM32F746_PE13_FUNC_SAI2_FS_B 0x4d0b +#define STM32F746_PE13_FUNC_FMC_D10 0x4d0d +#define STM32F746_PE13_FUNC_LCD_DE 0x4d0f +#define STM32F746_PE13_FUNC_EVENTOUT 0x4d10 +#define STM32F746_PE13_FUNC_ANALOG 0x4d11 + +#define STM32F746_PE14_FUNC_GPIO 0x4e00 +#define STM32F746_PE14_FUNC_TIM1_CH4 0x4e02 +#define STM32F746_PE14_FUNC_SPI4_MOSI 0x4e06 +#define STM32F746_PE14_FUNC_SAI2_MCLK_B 0x4e0b +#define STM32F746_PE14_FUNC_FMC_D11 0x4e0d +#define STM32F746_PE14_FUNC_LCD_CLK 0x4e0f +#define STM32F746_PE14_FUNC_EVENTOUT 0x4e10 +#define STM32F746_PE14_FUNC_ANALOG 0x4e11 + +#define STM32F746_PE15_FUNC_GPIO 0x4f00 +#define STM32F746_PE15_FUNC_TIM1_BKIN 0x4f02 +#define STM32F746_PE15_FUNC_FMC_D12 0x4f0d +#define STM32F746_PE15_FUNC_LCD_R7 0x4f0f +#define STM32F746_PE15_FUNC_EVENTOUT 0x4f10 +#define STM32F746_PE15_FUNC_ANALOG 0x4f11 + + +#define STM32F746_PF0_FUNC_GPIO 0x5000 +#define STM32F746_PF0_FUNC_I2C2_SDA 0x5005 +#define STM32F746_PF0_FUNC_FMC_A0 0x500d +#define STM32F746_PF0_FUNC_EVENTOUT 0x5010 +#define STM32F746_PF0_FUNC_ANALOG 0x5011 + +#define STM32F746_PF1_FUNC_GPIO 0x5100 +#define STM32F746_PF1_FUNC_I2C2_SCL 0x5105 +#define STM32F746_PF1_FUNC_FMC_A1 0x510d +#define STM32F746_PF1_FUNC_EVENTOUT 0x5110 +#define STM32F746_PF1_FUNC_ANALOG 0x5111 + +#define STM32F746_PF2_FUNC_GPIO 0x5200 +#define STM32F746_PF2_FUNC_I2C2_SMBA 0x5205 +#define STM32F746_PF2_FUNC_FMC_A2 0x520d +#define STM32F746_PF2_FUNC_EVENTOUT 0x5210 +#define STM32F746_PF2_FUNC_ANALOG 0x5211 + +#define STM32F746_PF3_FUNC_GPIO 0x5300 +#define STM32F746_PF3_FUNC_FMC_A3 0x530d +#define STM32F746_PF3_FUNC_EVENTOUT 0x5310 +#define STM32F746_PF3_FUNC_ANALOG 0x5311 + +#define STM32F746_PF4_FUNC_GPIO 0x5400 +#define STM32F746_PF4_FUNC_FMC_A4 0x540d +#define STM32F746_PF4_FUNC_EVENTOUT 0x5410 +#define STM32F746_PF4_FUNC_ANALOG 0x5411 + +#define STM32F746_PF5_FUNC_GPIO 0x5500 +#define STM32F746_PF5_FUNC_FMC_A5 0x550d +#define STM32F746_PF5_FUNC_EVENTOUT 0x5510 +#define STM32F746_PF5_FUNC_ANALOG 0x5511 + +#define STM32F746_PF6_FUNC_GPIO 0x5600 +#define STM32F746_PF6_FUNC_TIM10_CH1 0x5604 +#define STM32F746_PF6_FUNC_SPI5_NSS 0x5606 +#define STM32F746_PF6_FUNC_SAI1_SD_B 0x5607 +#define STM32F746_PF6_FUNC_UART7_RX 0x5609 +#define STM32F746_PF6_FUNC_QUADSPI_BK1_IO3 0x560a +#define STM32F746_PF6_FUNC_EVENTOUT 0x5610 +#define STM32F746_PF6_FUNC_ANALOG 0x5611 + +#define STM32F746_PF7_FUNC_GPIO 0x5700 +#define STM32F746_PF7_FUNC_TIM11_CH1 0x5704 +#define STM32F746_PF7_FUNC_SPI5_SCK 0x5706 +#define STM32F746_PF7_FUNC_SAI1_MCLK_B 0x5707 +#define STM32F746_PF7_FUNC_UART7_TX 0x5709 +#define STM32F746_PF7_FUNC_QUADSPI_BK1_IO2 0x570a +#define STM32F746_PF7_FUNC_EVENTOUT 0x5710 +#define STM32F746_PF7_FUNC_ANALOG 0x5711 + +#define STM32F746_PF8_FUNC_GPIO 0x5800 +#define STM32F746_PF8_FUNC_SPI5_MISO 0x5806 +#define STM32F746_PF8_FUNC_SAI1_SCK_B 0x5807 +#define STM32F746_PF8_FUNC_UART7_RTS 0x5809 +#define STM32F746_PF8_FUNC_TIM13_CH1 0x580a +#define STM32F746_PF8_FUNC_QUADSPI_BK1_IO0 0x580b +#define STM32F746_PF8_FUNC_EVENTOUT 0x5810 +#define STM32F746_PF8_FUNC_ANALOG 0x5811 + +#define STM32F746_PF9_FUNC_GPIO 0x5900 +#define STM32F746_PF9_FUNC_SPI5_MOSI 0x5906 +#define STM32F746_PF9_FUNC_SAI1_FS_B 0x5907 +#define STM32F746_PF9_FUNC_UART7_CTS 0x5909 +#define STM32F746_PF9_FUNC_TIM14_CH1 0x590a +#define STM32F746_PF9_FUNC_QUADSPI_BK1_IO1 0x590b +#define STM32F746_PF9_FUNC_EVENTOUT 0x5910 +#define STM32F746_PF9_FUNC_ANALOG 0x5911 + +#define STM32F746_PF10_FUNC_GPIO 0x5a00 +#define STM32F746_PF10_FUNC_DCMI_D11 0x5a0e +#define STM32F746_PF10_FUNC_LCD_DE 0x5a0f +#define STM32F746_PF10_FUNC_EVENTOUT 0x5a10 +#define STM32F746_PF10_FUNC_ANALOG 0x5a11 + +#define STM32F746_PF11_FUNC_GPIO 0x5b00 +#define STM32F746_PF11_FUNC_SPI5_MOSI 0x5b06 +#define STM32F746_PF11_FUNC_SAI2_SD_B 0x5b0b +#define STM32F746_PF11_FUNC_FMC_SDNRAS 0x5b0d +#define STM32F746_PF11_FUNC_DCMI_D12 0x5b0e +#define STM32F746_PF11_FUNC_EVENTOUT 0x5b10 +#define STM32F746_PF11_FUNC_ANALOG 0x5b11 + +#define STM32F746_PF12_FUNC_GPIO 0x5c00 +#define STM32F746_PF12_FUNC_FMC_A6 0x5c0d +#define STM32F746_PF12_FUNC_EVENTOUT 0x5c10 +#define STM32F746_PF12_FUNC_ANALOG 0x5c11 + +#define STM32F746_PF13_FUNC_GPIO 0x5d00 +#define STM32F746_PF13_FUNC_I2C4_SMBA 0x5d05 +#define STM32F746_PF13_FUNC_FMC_A7 0x5d0d +#define STM32F746_PF13_FUNC_EVENTOUT 0x5d10 +#define STM32F746_PF13_FUNC_ANALOG 0x5d11 + +#define STM32F746_PF14_FUNC_GPIO 0x5e00 +#define STM32F746_PF14_FUNC_I2C4_SCL 0x5e05 +#define STM32F746_PF14_FUNC_FMC_A8 0x5e0d +#define STM32F746_PF14_FUNC_EVENTOUT 0x5e10 +#define STM32F746_PF14_FUNC_ANALOG 0x5e11 + +#define STM32F746_PF15_FUNC_GPIO 0x5f00 +#define STM32F746_PF15_FUNC_I2C4_SDA 0x5f05 +#define STM32F746_PF15_FUNC_FMC_A9 0x5f0d +#define STM32F746_PF15_FUNC_EVENTOUT 0x5f10 +#define STM32F746_PF15_FUNC_ANALOG 0x5f11 + + +#define STM32F746_PG0_FUNC_GPIO 0x6000 +#define STM32F746_PG0_FUNC_FMC_A10 0x600d +#define STM32F746_PG0_FUNC_EVENTOUT 0x6010 +#define STM32F746_PG0_FUNC_ANALOG 0x6011 + +#define STM32F746_PG1_FUNC_GPIO 0x6100 +#define STM32F746_PG1_FUNC_FMC_A11 0x610d +#define STM32F746_PG1_FUNC_EVENTOUT 0x6110 +#define STM32F746_PG1_FUNC_ANALOG 0x6111 + +#define STM32F746_PG2_FUNC_GPIO 0x6200 +#define STM32F746_PG2_FUNC_FMC_A12 0x620d +#define STM32F746_PG2_FUNC_EVENTOUT 0x6210 +#define STM32F746_PG2_FUNC_ANALOG 0x6211 + +#define STM32F746_PG3_FUNC_GPIO 0x6300 +#define STM32F746_PG3_FUNC_FMC_A13 0x630d +#define STM32F746_PG3_FUNC_EVENTOUT 0x6310 +#define STM32F746_PG3_FUNC_ANALOG 0x6311 + +#define STM32F746_PG4_FUNC_GPIO 0x6400 +#define STM32F746_PG4_FUNC_FMC_A14_FMC_BA0 0x640d +#define STM32F746_PG4_FUNC_EVENTOUT 0x6410 +#define STM32F746_PG4_FUNC_ANALOG 0x6411 + +#define STM32F746_PG5_FUNC_GPIO 0x6500 +#define STM32F746_PG5_FUNC_FMC_A15_FMC_BA1 0x650d +#define STM32F746_PG5_FUNC_EVENTOUT 0x6510 +#define STM32F746_PG5_FUNC_ANALOG 0x6511 + +#define STM32F746_PG6_FUNC_GPIO 0x6600 +#define STM32F746_PG6_FUNC_DCMI_D12 0x660e +#define STM32F746_PG6_FUNC_LCD_R7 0x660f +#define STM32F746_PG6_FUNC_EVENTOUT 0x6610 +#define STM32F746_PG6_FUNC_ANALOG 0x6611 + +#define STM32F746_PG7_FUNC_GPIO 0x6700 +#define STM32F746_PG7_FUNC_USART6_CK 0x6709 +#define STM32F746_PG7_FUNC_FMC_INT 0x670d +#define STM32F746_PG7_FUNC_DCMI_D13 0x670e +#define STM32F746_PG7_FUNC_LCD_CLK 0x670f +#define STM32F746_PG7_FUNC_EVENTOUT 0x6710 +#define STM32F746_PG7_FUNC_ANALOG 0x6711 + +#define STM32F746_PG8_FUNC_GPIO 0x6800 +#define STM32F746_PG8_FUNC_SPI6_NSS 0x6806 +#define STM32F746_PG8_FUNC_SPDIFRX_IN2 0x6808 +#define STM32F746_PG8_FUNC_USART6_RTS 0x6809 +#define STM32F746_PG8_FUNC_ETH_PPS_OUT 0x680c +#define STM32F746_PG8_FUNC_FMC_SDCLK 0x680d +#define STM32F746_PG8_FUNC_EVENTOUT 0x6810 +#define STM32F746_PG8_FUNC_ANALOG 0x6811 + +#define STM32F746_PG9_FUNC_GPIO 0x6900 +#define STM32F746_PG9_FUNC_SPDIFRX_IN3 0x6908 +#define STM32F746_PG9_FUNC_USART6_RX 0x6909 +#define STM32F746_PG9_FUNC_QUADSPI_BK2_IO2 0x690a +#define STM32F746_PG9_FUNC_SAI2_FS_B 0x690b +#define STM32F746_PG9_FUNC_FMC_NE2_FMC_NCE 0x690d +#define STM32F746_PG9_FUNC_DCMI_VSYNC 0x690e +#define STM32F746_PG9_FUNC_EVENTOUT 0x6910 +#define STM32F746_PG9_FUNC_ANALOG 0x6911 + +#define STM32F746_PG10_FUNC_GPIO 0x6a00 +#define STM32F746_PG10_FUNC_LCD_G3 0x6a0a +#define STM32F746_PG10_FUNC_SAI2_SD_B 0x6a0b +#define STM32F746_PG10_FUNC_FMC_NE3 0x6a0d +#define STM32F746_PG10_FUNC_DCMI_D2 0x6a0e +#define STM32F746_PG10_FUNC_LCD_B2 0x6a0f +#define STM32F746_PG10_FUNC_EVENTOUT 0x6a10 +#define STM32F746_PG10_FUNC_ANALOG 0x6a11 + +#define STM32F746_PG11_FUNC_GPIO 0x6b00 +#define STM32F746_PG11_FUNC_SPDIFRX_IN0 0x6b08 +#define STM32F746_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN 0x6b0c +#define STM32F746_PG11_FUNC_DCMI_D3 0x6b0e +#define STM32F746_PG11_FUNC_LCD_B3 0x6b0f +#define STM32F746_PG11_FUNC_EVENTOUT 0x6b10 +#define STM32F746_PG11_FUNC_ANALOG 0x6b11 + +#define STM32F746_PG12_FUNC_GPIO 0x6c00 +#define STM32F746_PG12_FUNC_LPTIM1_IN1 0x6c04 +#define STM32F746_PG12_FUNC_SPI6_MISO 0x6c06 +#define STM32F746_PG12_FUNC_SPDIFRX_IN1 0x6c08 +#define STM32F746_PG12_FUNC_USART6_RTS 0x6c09 +#define STM32F746_PG12_FUNC_LCD_B4 0x6c0a +#define STM32F746_PG12_FUNC_FMC_NE4 0x6c0d +#define STM32F746_PG12_FUNC_LCD_B1 0x6c0f +#define STM32F746_PG12_FUNC_EVENTOUT 0x6c10 +#define STM32F746_PG12_FUNC_ANALOG 0x6c11 + +#define STM32F746_PG13_FUNC_GPIO 0x6d00 +#define STM32F746_PG13_FUNC_TRACED0 0x6d01 +#define STM32F746_PG13_FUNC_LPTIM1_OUT 0x6d04 +#define STM32F746_PG13_FUNC_SPI6_SCK 0x6d06 +#define STM32F746_PG13_FUNC_USART6_CTS 0x6d09 +#define STM32F746_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0 0x6d0c +#define STM32F746_PG13_FUNC_FMC_A24 0x6d0d +#define STM32F746_PG13_FUNC_LCD_R0 0x6d0f +#define STM32F746_PG13_FUNC_EVENTOUT 0x6d10 +#define STM32F746_PG13_FUNC_ANALOG 0x6d11 + +#define STM32F746_PG14_FUNC_GPIO 0x6e00 +#define STM32F746_PG14_FUNC_TRACED1 0x6e01 +#define STM32F746_PG14_FUNC_LPTIM1_ETR 0x6e04 +#define STM32F746_PG14_FUNC_SPI6_MOSI 0x6e06 +#define STM32F746_PG14_FUNC_USART6_TX 0x6e09 +#define STM32F746_PG14_FUNC_QUADSPI_BK2_IO3 0x6e0a +#define STM32F746_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1 0x6e0c +#define STM32F746_PG14_FUNC_FMC_A25 0x6e0d +#define STM32F746_PG14_FUNC_LCD_B0 0x6e0f +#define STM32F746_PG14_FUNC_EVENTOUT 0x6e10 +#define STM32F746_PG14_FUNC_ANALOG 0x6e11 + +#define STM32F746_PG15_FUNC_GPIO 0x6f00 +#define STM32F746_PG15_FUNC_USART6_CTS 0x6f09 +#define STM32F746_PG15_FUNC_FMC_SDNCAS 0x6f0d +#define STM32F746_PG15_FUNC_DCMI_D13 0x6f0e +#define STM32F746_PG15_FUNC_EVENTOUT 0x6f10 +#define STM32F746_PG15_FUNC_ANALOG 0x6f11 + + +#define STM32F746_PH0_FUNC_GPIO 0x7000 +#define STM32F746_PH0_FUNC_EVENTOUT 0x7010 +#define STM32F746_PH0_FUNC_ANALOG 0x7011 + +#define STM32F746_PH1_FUNC_GPIO 0x7100 +#define STM32F746_PH1_FUNC_EVENTOUT 0x7110 +#define STM32F746_PH1_FUNC_ANALOG 0x7111 + +#define STM32F746_PH2_FUNC_GPIO 0x7200 +#define STM32F746_PH2_FUNC_LPTIM1_IN2 0x7204 +#define STM32F746_PH2_FUNC_QUADSPI_BK2_IO0 0x720a +#define STM32F746_PH2_FUNC_SAI2_SCK_B 0x720b +#define STM32F746_PH2_FUNC_ETH_MII_CRS 0x720c +#define STM32F746_PH2_FUNC_FMC_SDCKE0 0x720d +#define STM32F746_PH2_FUNC_LCD_R0 0x720f +#define STM32F746_PH2_FUNC_EVENTOUT 0x7210 +#define STM32F746_PH2_FUNC_ANALOG 0x7211 + +#define STM32F746_PH3_FUNC_GPIO 0x7300 +#define STM32F746_PH3_FUNC_QUADSPI_BK2_IO1 0x730a +#define STM32F746_PH3_FUNC_SAI2_MCLK_B 0x730b +#define STM32F746_PH3_FUNC_ETH_MII_COL 0x730c +#define STM32F746_PH3_FUNC_FMC_SDNE0 0x730d +#define STM32F746_PH3_FUNC_LCD_R1 0x730f +#define STM32F746_PH3_FUNC_EVENTOUT 0x7310 +#define STM32F746_PH3_FUNC_ANALOG 0x7311 + +#define STM32F746_PH4_FUNC_GPIO 0x7400 +#define STM32F746_PH4_FUNC_I2C2_SCL 0x7405 +#define STM32F746_PH4_FUNC_OTG_HS_ULPI_NXT 0x740b +#define STM32F746_PH4_FUNC_EVENTOUT 0x7410 +#define STM32F746_PH4_FUNC_ANALOG 0x7411 + +#define STM32F746_PH5_FUNC_GPIO 0x7500 +#define STM32F746_PH5_FUNC_I2C2_SDA 0x7505 +#define STM32F746_PH5_FUNC_SPI5_NSS 0x7506 +#define STM32F746_PH5_FUNC_FMC_SDNWE 0x750d +#define STM32F746_PH5_FUNC_EVENTOUT 0x7510 +#define STM32F746_PH5_FUNC_ANALOG 0x7511 + +#define STM32F746_PH6_FUNC_GPIO 0x7600 +#define STM32F746_PH6_FUNC_I2C2_SMBA 0x7605 +#define STM32F746_PH6_FUNC_SPI5_SCK 0x7606 +#define STM32F746_PH6_FUNC_TIM12_CH1 0x760a +#define STM32F746_PH6_FUNC_ETH_MII_RXD2 0x760c +#define STM32F746_PH6_FUNC_FMC_SDNE1 0x760d +#define STM32F746_PH6_FUNC_DCMI_D8 0x760e +#define STM32F746_PH6_FUNC_EVENTOUT 0x7610 +#define STM32F746_PH6_FUNC_ANALOG 0x7611 + +#define STM32F746_PH7_FUNC_GPIO 0x7700 +#define STM32F746_PH7_FUNC_I2C3_SCL 0x7705 +#define STM32F746_PH7_FUNC_SPI5_MISO 0x7706 +#define STM32F746_PH7_FUNC_ETH_MII_RXD3 0x770c +#define STM32F746_PH7_FUNC_FMC_SDCKE1 0x770d +#define STM32F746_PH7_FUNC_DCMI_D9 0x770e +#define STM32F746_PH7_FUNC_EVENTOUT 0x7710 +#define STM32F746_PH7_FUNC_ANALOG 0x7711 + +#define STM32F746_PH8_FUNC_GPIO 0x7800 +#define STM32F746_PH8_FUNC_I2C3_SDA 0x7805 +#define STM32F746_PH8_FUNC_FMC_D16 0x780d +#define STM32F746_PH8_FUNC_DCMI_HSYNC 0x780e +#define STM32F746_PH8_FUNC_LCD_R2 0x780f +#define STM32F746_PH8_FUNC_EVENTOUT 0x7810 +#define STM32F746_PH8_FUNC_ANALOG 0x7811 + +#define STM32F746_PH9_FUNC_GPIO 0x7900 +#define STM32F746_PH9_FUNC_I2C3_SMBA 0x7905 +#define STM32F746_PH9_FUNC_TIM12_CH2 0x790a +#define STM32F746_PH9_FUNC_FMC_D17 0x790d +#define STM32F746_PH9_FUNC_DCMI_D0 0x790e +#define STM32F746_PH9_FUNC_LCD_R3 0x790f +#define STM32F746_PH9_FUNC_EVENTOUT 0x7910 +#define STM32F746_PH9_FUNC_ANALOG 0x7911 + +#define STM32F746_PH10_FUNC_GPIO 0x7a00 +#define STM32F746_PH10_FUNC_TIM5_CH1 0x7a03 +#define STM32F746_PH10_FUNC_I2C4_SMBA 0x7a05 +#define STM32F746_PH10_FUNC_FMC_D18 0x7a0d +#define STM32F746_PH10_FUNC_DCMI_D1 0x7a0e +#define STM32F746_PH10_FUNC_LCD_R4 0x7a0f +#define STM32F746_PH10_FUNC_EVENTOUT 0x7a10 +#define STM32F746_PH10_FUNC_ANALOG 0x7a11 + +#define STM32F746_PH11_FUNC_GPIO 0x7b00 +#define STM32F746_PH11_FUNC_TIM5_CH2 0x7b03 +#define STM32F746_PH11_FUNC_I2C4_SCL 0x7b05 +#define STM32F746_PH11_FUNC_FMC_D19 0x7b0d +#define STM32F746_PH11_FUNC_DCMI_D2 0x7b0e +#define STM32F746_PH11_FUNC_LCD_R5 0x7b0f +#define STM32F746_PH11_FUNC_EVENTOUT 0x7b10 +#define STM32F746_PH11_FUNC_ANALOG 0x7b11 + +#define STM32F746_PH12_FUNC_GPIO 0x7c00 +#define STM32F746_PH12_FUNC_TIM5_CH3 0x7c03 +#define STM32F746_PH12_FUNC_I2C4_SDA 0x7c05 +#define STM32F746_PH12_FUNC_FMC_D20 0x7c0d +#define STM32F746_PH12_FUNC_DCMI_D3 0x7c0e +#define STM32F746_PH12_FUNC_LCD_R6 0x7c0f +#define STM32F746_PH12_FUNC_EVENTOUT 0x7c10 +#define STM32F746_PH12_FUNC_ANALOG 0x7c11 + +#define STM32F746_PH13_FUNC_GPIO 0x7d00 +#define STM32F746_PH13_FUNC_TIM8_CH1N 0x7d04 +#define STM32F746_PH13_FUNC_CAN1_TX 0x7d0a +#define STM32F746_PH13_FUNC_FMC_D21 0x7d0d +#define STM32F746_PH13_FUNC_LCD_G2 0x7d0f +#define STM32F746_PH13_FUNC_EVENTOUT 0x7d10 +#define STM32F746_PH13_FUNC_ANALOG 0x7d11 + +#define STM32F746_PH14_FUNC_GPIO 0x7e00 +#define STM32F746_PH14_FUNC_TIM8_CH2N 0x7e04 +#define STM32F746_PH14_FUNC_FMC_D22 0x7e0d +#define STM32F746_PH14_FUNC_DCMI_D4 0x7e0e +#define STM32F746_PH14_FUNC_LCD_G3 0x7e0f +#define STM32F746_PH14_FUNC_EVENTOUT 0x7e10 +#define STM32F746_PH14_FUNC_ANALOG 0x7e11 + +#define STM32F746_PH15_FUNC_GPIO 0x7f00 +#define STM32F746_PH15_FUNC_TIM8_CH3N 0x7f04 +#define STM32F746_PH15_FUNC_FMC_D23 0x7f0d +#define STM32F746_PH15_FUNC_DCMI_D11 0x7f0e +#define STM32F746_PH15_FUNC_LCD_G4 0x7f0f +#define STM32F746_PH15_FUNC_EVENTOUT 0x7f10 +#define STM32F746_PH15_FUNC_ANALOG 0x7f11 + + +#define STM32F746_PI0_FUNC_GPIO 0x8000 +#define STM32F746_PI0_FUNC_TIM5_CH4 0x8003 +#define STM32F746_PI0_FUNC_SPI2_NSS_I2S2_WS 0x8006 +#define STM32F746_PI0_FUNC_FMC_D24 0x800d +#define STM32F746_PI0_FUNC_DCMI_D13 0x800e +#define STM32F746_PI0_FUNC_LCD_G5 0x800f +#define STM32F746_PI0_FUNC_EVENTOUT 0x8010 +#define STM32F746_PI0_FUNC_ANALOG 0x8011 + +#define STM32F746_PI1_FUNC_GPIO 0x8100 +#define STM32F746_PI1_FUNC_TIM8_BKIN2 0x8104 +#define STM32F746_PI1_FUNC_SPI2_SCK_I2S2_CK 0x8106 +#define STM32F746_PI1_FUNC_FMC_D25 0x810d +#define STM32F746_PI1_FUNC_DCMI_D8 0x810e +#define STM32F746_PI1_FUNC_LCD_G6 0x810f +#define STM32F746_PI1_FUNC_EVENTOUT 0x8110 +#define STM32F746_PI1_FUNC_ANALOG 0x8111 + +#define STM32F746_PI2_FUNC_GPIO 0x8200 +#define STM32F746_PI2_FUNC_TIM8_CH4 0x8204 +#define STM32F746_PI2_FUNC_SPI2_MISO 0x8206 +#define STM32F746_PI2_FUNC_FMC_D26 0x820d +#define STM32F746_PI2_FUNC_DCMI_D9 0x820e +#define STM32F746_PI2_FUNC_LCD_G7 0x820f +#define STM32F746_PI2_FUNC_EVENTOUT 0x8210 +#define STM32F746_PI2_FUNC_ANALOG 0x8211 + +#define STM32F746_PI3_FUNC_GPIO 0x8300 +#define STM32F746_PI3_FUNC_TIM8_ETR 0x8304 +#define STM32F746_PI3_FUNC_SPI2_MOSI_I2S2_SD 0x8306 +#define STM32F746_PI3_FUNC_FMC_D27 0x830d +#define STM32F746_PI3_FUNC_DCMI_D10 0x830e +#define STM32F746_PI3_FUNC_EVENTOUT 0x8310 +#define STM32F746_PI3_FUNC_ANALOG 0x8311 + +#define STM32F746_PI4_FUNC_GPIO 0x8400 +#define STM32F746_PI4_FUNC_TIM8_BKIN 0x8404 +#define STM32F746_PI4_FUNC_SAI2_MCLK_A 0x840b +#define STM32F746_PI4_FUNC_FMC_NBL2 0x840d +#define STM32F746_PI4_FUNC_DCMI_D5 0x840e +#define STM32F746_PI4_FUNC_LCD_B4 0x840f +#define STM32F746_PI4_FUNC_EVENTOUT 0x8410 +#define STM32F746_PI4_FUNC_ANALOG 0x8411 + +#define STM32F746_PI5_FUNC_GPIO 0x8500 +#define STM32F746_PI5_FUNC_TIM8_CH1 0x8504 +#define STM32F746_PI5_FUNC_SAI2_SCK_A 0x850b +#define STM32F746_PI5_FUNC_FMC_NBL3 0x850d +#define STM32F746_PI5_FUNC_DCMI_VSYNC 0x850e +#define STM32F746_PI5_FUNC_LCD_B5 0x850f +#define STM32F746_PI5_FUNC_EVENTOUT 0x8510 +#define STM32F746_PI5_FUNC_ANALOG 0x8511 + +#define STM32F746_PI6_FUNC_GPIO 0x8600 +#define STM32F746_PI6_FUNC_TIM8_CH2 0x8604 +#define STM32F746_PI6_FUNC_SAI2_SD_A 0x860b +#define STM32F746_PI6_FUNC_FMC_D28 0x860d +#define STM32F746_PI6_FUNC_DCMI_D6 0x860e +#define STM32F746_PI6_FUNC_LCD_B6 0x860f +#define STM32F746_PI6_FUNC_EVENTOUT 0x8610 +#define STM32F746_PI6_FUNC_ANALOG 0x8611 + +#define STM32F746_PI7_FUNC_GPIO 0x8700 +#define STM32F746_PI7_FUNC_TIM8_CH3 0x8704 +#define STM32F746_PI7_FUNC_SAI2_FS_A 0x870b +#define STM32F746_PI7_FUNC_FMC_D29 0x870d +#define STM32F746_PI7_FUNC_DCMI_D7 0x870e +#define STM32F746_PI7_FUNC_LCD_B7 0x870f +#define STM32F746_PI7_FUNC_EVENTOUT 0x8710 +#define STM32F746_PI7_FUNC_ANALOG 0x8711 + +#define STM32F746_PI8_FUNC_GPIO 0x8800 +#define STM32F746_PI8_FUNC_EVENTOUT 0x8810 +#define STM32F746_PI8_FUNC_ANALOG 0x8811 + +#define STM32F746_PI9_FUNC_GPIO 0x8900 +#define STM32F746_PI9_FUNC_CAN1_RX 0x890a +#define STM32F746_PI9_FUNC_FMC_D30 0x890d +#define STM32F746_PI9_FUNC_LCD_VSYNC 0x890f +#define STM32F746_PI9_FUNC_EVENTOUT 0x8910 +#define STM32F746_PI9_FUNC_ANALOG 0x8911 + +#define STM32F746_PI10_FUNC_GPIO 0x8a00 +#define STM32F746_PI10_FUNC_ETH_MII_RX_ER 0x8a0c +#define STM32F746_PI10_FUNC_FMC_D31 0x8a0d +#define STM32F746_PI10_FUNC_LCD_HSYNC 0x8a0f +#define STM32F746_PI10_FUNC_EVENTOUT 0x8a10 +#define STM32F746_PI10_FUNC_ANALOG 0x8a11 + +#define STM32F746_PI11_FUNC_GPIO 0x8b00 +#define STM32F746_PI11_FUNC_OTG_HS_ULPI_DIR 0x8b0b +#define STM32F746_PI11_FUNC_EVENTOUT 0x8b10 +#define STM32F746_PI11_FUNC_ANALOG 0x8b11 + +#define STM32F746_PI12_FUNC_GPIO 0x8c00 +#define STM32F746_PI12_FUNC_LCD_HSYNC 0x8c0f +#define STM32F746_PI12_FUNC_EVENTOUT 0x8c10 +#define STM32F746_PI12_FUNC_ANALOG 0x8c11 + +#define STM32F746_PI13_FUNC_GPIO 0x8d00 +#define STM32F746_PI13_FUNC_LCD_VSYNC 0x8d0f +#define STM32F746_PI13_FUNC_EVENTOUT 0x8d10 +#define STM32F746_PI13_FUNC_ANALOG 0x8d11 + +#define STM32F746_PI14_FUNC_GPIO 0x8e00 +#define STM32F746_PI14_FUNC_LCD_CLK 0x8e0f +#define STM32F746_PI14_FUNC_EVENTOUT 0x8e10 +#define STM32F746_PI14_FUNC_ANALOG 0x8e11 + +#define STM32F746_PI15_FUNC_GPIO 0x8f00 +#define STM32F746_PI15_FUNC_LCD_R0 0x8f0f +#define STM32F746_PI15_FUNC_EVENTOUT 0x8f10 +#define STM32F746_PI15_FUNC_ANALOG 0x8f11 + + +#define STM32F746_PJ0_FUNC_GPIO 0x9000 +#define STM32F746_PJ0_FUNC_LCD_R1 0x900f +#define STM32F746_PJ0_FUNC_EVENTOUT 0x9010 +#define STM32F746_PJ0_FUNC_ANALOG 0x9011 + +#define STM32F746_PJ1_FUNC_GPIO 0x9100 +#define STM32F746_PJ1_FUNC_LCD_R2 0x910f +#define STM32F746_PJ1_FUNC_EVENTOUT 0x9110 +#define STM32F746_PJ1_FUNC_ANALOG 0x9111 + +#define STM32F746_PJ2_FUNC_GPIO 0x9200 +#define STM32F746_PJ2_FUNC_LCD_R3 0x920f +#define STM32F746_PJ2_FUNC_EVENTOUT 0x9210 +#define STM32F746_PJ2_FUNC_ANALOG 0x9211 + +#define STM32F746_PJ3_FUNC_GPIO 0x9300 +#define STM32F746_PJ3_FUNC_LCD_R4 0x930f +#define STM32F746_PJ3_FUNC_EVENTOUT 0x9310 +#define STM32F746_PJ3_FUNC_ANALOG 0x9311 + +#define STM32F746_PJ4_FUNC_GPIO 0x9400 +#define STM32F746_PJ4_FUNC_LCD_R5 0x940f +#define STM32F746_PJ4_FUNC_EVENTOUT 0x9410 +#define STM32F746_PJ4_FUNC_ANALOG 0x9411 + +#define STM32F746_PJ5_FUNC_GPIO 0x9500 +#define STM32F746_PJ5_FUNC_LCD_R6 0x950f +#define STM32F746_PJ5_FUNC_EVENTOUT 0x9510 +#define STM32F746_PJ5_FUNC_ANALOG 0x9511 + +#define STM32F746_PJ6_FUNC_GPIO 0x9600 +#define STM32F746_PJ6_FUNC_LCD_R7 0x960f +#define STM32F746_PJ6_FUNC_EVENTOUT 0x9610 +#define STM32F746_PJ6_FUNC_ANALOG 0x9611 + +#define STM32F746_PJ7_FUNC_GPIO 0x9700 +#define STM32F746_PJ7_FUNC_LCD_G0 0x970f +#define STM32F746_PJ7_FUNC_EVENTOUT 0x9710 +#define STM32F746_PJ7_FUNC_ANALOG 0x9711 + +#define STM32F746_PJ8_FUNC_GPIO 0x9800 +#define STM32F746_PJ8_FUNC_LCD_G1 0x980f +#define STM32F746_PJ8_FUNC_EVENTOUT 0x9810 +#define STM32F746_PJ8_FUNC_ANALOG 0x9811 + +#define STM32F746_PJ9_FUNC_GPIO 0x9900 +#define STM32F746_PJ9_FUNC_LCD_G2 0x990f +#define STM32F746_PJ9_FUNC_EVENTOUT 0x9910 +#define STM32F746_PJ9_FUNC_ANALOG 0x9911 + +#define STM32F746_PJ10_FUNC_GPIO 0x9a00 +#define STM32F746_PJ10_FUNC_LCD_G3 0x9a0f +#define STM32F746_PJ10_FUNC_EVENTOUT 0x9a10 +#define STM32F746_PJ10_FUNC_ANALOG 0x9a11 + +#define STM32F746_PJ11_FUNC_GPIO 0x9b00 +#define STM32F746_PJ11_FUNC_LCD_G4 0x9b0f +#define STM32F746_PJ11_FUNC_EVENTOUT 0x9b10 +#define STM32F746_PJ11_FUNC_ANALOG 0x9b11 + +#define STM32F746_PJ12_FUNC_GPIO 0x9c00 +#define STM32F746_PJ12_FUNC_LCD_B0 0x9c0f +#define STM32F746_PJ12_FUNC_EVENTOUT 0x9c10 +#define STM32F746_PJ12_FUNC_ANALOG 0x9c11 + +#define STM32F746_PJ13_FUNC_GPIO 0x9d00 +#define STM32F746_PJ13_FUNC_LCD_B1 0x9d0f +#define STM32F746_PJ13_FUNC_EVENTOUT 0x9d10 +#define STM32F746_PJ13_FUNC_ANALOG 0x9d11 + +#define STM32F746_PJ14_FUNC_GPIO 0x9e00 +#define STM32F746_PJ14_FUNC_LCD_B2 0x9e0f +#define STM32F746_PJ14_FUNC_EVENTOUT 0x9e10 +#define STM32F746_PJ14_FUNC_ANALOG 0x9e11 + +#define STM32F746_PJ15_FUNC_GPIO 0x9f00 +#define STM32F746_PJ15_FUNC_LCD_B3 0x9f0f +#define STM32F746_PJ15_FUNC_EVENTOUT 0x9f10 +#define STM32F746_PJ15_FUNC_ANALOG 0x9f11 + + +#define STM32F746_PK0_FUNC_GPIO 0xa000 +#define STM32F746_PK0_FUNC_LCD_G5 0xa00f +#define STM32F746_PK0_FUNC_EVENTOUT 0xa010 +#define STM32F746_PK0_FUNC_ANALOG 0xa011 + +#define STM32F746_PK1_FUNC_GPIO 0xa100 +#define STM32F746_PK1_FUNC_LCD_G6 0xa10f +#define STM32F746_PK1_FUNC_EVENTOUT 0xa110 +#define STM32F746_PK1_FUNC_ANALOG 0xa111 + +#define STM32F746_PK2_FUNC_GPIO 0xa200 +#define STM32F746_PK2_FUNC_LCD_G7 0xa20f +#define STM32F746_PK2_FUNC_EVENTOUT 0xa210 +#define STM32F746_PK2_FUNC_ANALOG 0xa211 + +#define STM32F746_PK3_FUNC_GPIO 0xa300 +#define STM32F746_PK3_FUNC_LCD_B4 0xa30f +#define STM32F746_PK3_FUNC_EVENTOUT 0xa310 +#define STM32F746_PK3_FUNC_ANALOG 0xa311 + +#define STM32F746_PK4_FUNC_GPIO 0xa400 +#define STM32F746_PK4_FUNC_LCD_B5 0xa40f +#define STM32F746_PK4_FUNC_EVENTOUT 0xa410 +#define STM32F746_PK4_FUNC_ANALOG 0xa411 + +#define STM32F746_PK5_FUNC_GPIO 0xa500 +#define STM32F746_PK5_FUNC_LCD_B6 0xa50f +#define STM32F746_PK5_FUNC_EVENTOUT 0xa510 +#define STM32F746_PK5_FUNC_ANALOG 0xa511 + +#define STM32F746_PK6_FUNC_GPIO 0xa600 +#define STM32F746_PK6_FUNC_LCD_B7 0xa60f +#define STM32F746_PK6_FUNC_EVENTOUT 0xa610 +#define STM32F746_PK6_FUNC_ANALOG 0xa611 + +#define STM32F746_PK7_FUNC_GPIO 0xa700 +#define STM32F746_PK7_FUNC_LCD_DE 0xa70f +#define STM32F746_PK7_FUNC_EVENTOUT 0xa710 +#define STM32F746_PK7_FUNC_ANALOG 0xa711 + +#endif /* _DT_BINDINGS_STM32F746_PINFUNC_H */ diff --git a/include/dt-bindings/power/mt8173-power.h b/include/dt-bindings/power/mt8173-power.h index ef4a7f9448..b34cee95aa 100644 --- a/include/dt-bindings/power/mt8173-power.h +++ b/include/dt-bindings/power/mt8173-power.h @@ -1,6 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _DT_BINDINGS_POWER_MT8173_POWER_H -#define _DT_BINDINGS_POWER_MT8173_POWER_H +#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H +#define _DT_BINDINGS_POWER_MT8183_POWER_H #define MT8173_POWER_DOMAIN_VDEC 0 #define MT8173_POWER_DOMAIN_VENC 1 @@ -13,4 +12,4 @@ #define MT8173_POWER_DOMAIN_MFG_2D 8 #define MT8173_POWER_DOMAIN_MFG 9 -#endif /* _DT_BINDINGS_POWER_MT8173_POWER_H */ +#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */ diff --git a/include/dt-bindings/power/r8a7779-sysc.h b/include/dt-bindings/power/r8a7779-sysc.h index c4f528b6cc..183571da50 100644 --- a/include/dt-bindings/power/r8a7779-sysc.h +++ b/include/dt-bindings/power/r8a7779-sysc.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. */ #ifndef __DT_BINDINGS_POWER_R8A7779_SYSC_H__ #define __DT_BINDINGS_POWER_R8A7779_SYSC_H__ diff --git a/include/dt-bindings/power/r8a7790-sysc.h b/include/dt-bindings/power/r8a7790-sysc.h index bcb4905706..6af4e9929b 100644 --- a/include/dt-bindings/power/r8a7790-sysc.h +++ b/include/dt-bindings/power/r8a7790-sysc.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. */ #ifndef __DT_BINDINGS_POWER_R8A7790_SYSC_H__ #define __DT_BINDINGS_POWER_R8A7790_SYSC_H__ diff --git a/include/dt-bindings/power/r8a7791-sysc.h b/include/dt-bindings/power/r8a7791-sysc.h index 1d20fae424..1403baa051 100644 --- a/include/dt-bindings/power/r8a7791-sysc.h +++ b/include/dt-bindings/power/r8a7791-sysc.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. */ #ifndef __DT_BINDINGS_POWER_R8A7791_SYSC_H__ #define __DT_BINDINGS_POWER_R8A7791_SYSC_H__ diff --git a/include/dt-bindings/power/r8a7792-sysc.h b/include/dt-bindings/power/r8a7792-sysc.h index dd3a4667ca..74f4a78e29 100644 --- a/include/dt-bindings/power/r8a7792-sysc.h +++ b/include/dt-bindings/power/r8a7792-sysc.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Cogent Embedded Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. */ #ifndef __DT_BINDINGS_POWER_R8A7792_SYSC_H__ #define __DT_BINDINGS_POWER_R8A7792_SYSC_H__ diff --git a/include/dt-bindings/power/r8a7793-sysc.h b/include/dt-bindings/power/r8a7793-sysc.h index 056998c635..b5693df3d8 100644 --- a/include/dt-bindings/power/r8a7793-sysc.h +++ b/include/dt-bindings/power/r8a7793-sysc.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. */ #ifndef __DT_BINDINGS_POWER_R8A7793_SYSC_H__ #define __DT_BINDINGS_POWER_R8A7793_SYSC_H__ diff --git a/include/dt-bindings/power/r8a7794-sysc.h b/include/dt-bindings/power/r8a7794-sysc.h index 4d6c708e6f..862241c2d2 100644 --- a/include/dt-bindings/power/r8a7794-sysc.h +++ b/include/dt-bindings/power/r8a7794-sysc.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. */ #ifndef __DT_BINDINGS_POWER_R8A7794_SYSC_H__ #define __DT_BINDINGS_POWER_R8A7794_SYSC_H__ diff --git a/include/dt-bindings/power/r8a7795-sysc.h b/include/dt-bindings/power/r8a7795-sysc.h index eea6ad69f0..ee2e26ba60 100644 --- a/include/dt-bindings/power/r8a7795-sysc.h +++ b/include/dt-bindings/power/r8a7795-sysc.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. */ #ifndef __DT_BINDINGS_POWER_R8A7795_SYSC_H__ #define __DT_BINDINGS_POWER_R8A7795_SYSC_H__ @@ -30,7 +33,7 @@ #define R8A7795_PD_CA53_SCU 21 #define R8A7795_PD_3DG_E 22 #define R8A7795_PD_A3IR 24 -#define R8A7795_PD_A2VC0 25 /* ES1.x only */ +#define R8A7795_PD_A2VC0 25 #define R8A7795_PD_A2VC1 26 /* Always-on power area */ diff --git a/include/dt-bindings/power/r8a7796-sysc.h b/include/dt-bindings/power/r8a7796-sysc.h index 7e6fc06ebf..5b4daab44d 100644 --- a/include/dt-bindings/power/r8a7796-sysc.h +++ b/include/dt-bindings/power/r8a7796-sysc.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. */ #ifndef __DT_BINDINGS_POWER_R8A7796_SYSC_H__ #define __DT_BINDINGS_POWER_R8A7796_SYSC_H__ diff --git a/include/dt-bindings/power/raspberrypi-power.h b/include/dt-bindings/power/raspberrypi-power.h index 3575f9f4b0..b3ff8e09a7 100644 --- a/include/dt-bindings/power/raspberrypi-power.h +++ b/include/dt-bindings/power/raspberrypi-power.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright © 2015 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H diff --git a/include/dt-bindings/power/rk3288-power.h b/include/dt-bindings/power/rk3288-power.h index f710b56ccd..b8b1045f3d 100644 --- a/include/dt-bindings/power/rk3288-power.h +++ b/include/dt-bindings/power/rk3288-power.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_BINDINGS_POWER_RK3288_POWER_H__ #define __DT_BINDINGS_POWER_RK3288_POWER_H__ diff --git a/include/dt-bindings/power/rk3368-power.h b/include/dt-bindings/power/rk3368-power.h index 5e602dbd64..93633d57ed 100644 --- a/include/dt-bindings/power/rk3368-power.h +++ b/include/dt-bindings/power/rk3368-power.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_BINDINGS_POWER_RK3368_POWER_H__ #define __DT_BINDINGS_POWER_RK3368_POWER_H__ diff --git a/include/dt-bindings/power/rk3399-power.h b/include/dt-bindings/power/rk3399-power.h index aedd8b180f..168b3bfbd6 100644 --- a/include/dt-bindings/power/rk3399-power.h +++ b/include/dt-bindings/power/rk3399-power.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_BINDINGS_POWER_RK3399_POWER_H__ #define __DT_BINDINGS_POWER_RK3399_POWER_H__ diff --git a/include/dt-bindings/pwm/pwm.h b/include/dt-bindings/pwm/pwm.h index ab9a077e3c..96f49e8225 100644 --- a/include/dt-bindings/pwm/pwm.h +++ b/include/dt-bindings/pwm/pwm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for most PWM bindings. * diff --git a/include/dt-bindings/regulator/maxim,max77802.h b/include/dt-bindings/regulator/maxim,max77802.h index d0baba1973..cf28631d71 100644 --- a/include/dt-bindings/regulator/maxim,max77802.h +++ b/include/dt-bindings/regulator/maxim,max77802.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Google, Inc * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Device Tree binding constants for the Maxim 77802 PMIC regulators */ diff --git a/include/dt-bindings/reset/altr,rst-mgr-a10.h b/include/dt-bindings/reset/altr,rst-mgr-a10.h index 5d8a494c98..acb0bbf4f9 100644 --- a/include/dt-bindings/reset/altr,rst-mgr-a10.h +++ b/include/dt-bindings/reset/altr,rst-mgr-a10.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, Steffen Trumtrar + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_A10_H diff --git a/include/dt-bindings/reset/altr,rst-mgr.h b/include/dt-bindings/reset/altr,rst-mgr.h index 9b6ce14f62..3f04908fb8 100644 --- a/include/dt-bindings/reset/altr,rst-mgr.h +++ b/include/dt-bindings/reset/altr,rst-mgr.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, Steffen Trumtrar + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_H diff --git a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h index 883bfd3bcb..524d6077ac 100644 --- a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h +++ b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h @@ -1,7 +1,56 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * * Copyright (c) 2016 BayLibre, SAS. * Author: Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H #define _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H @@ -69,7 +118,7 @@ #define RESET_SYS_CPU_L2 58 #define RESET_SYS_CPU_P 59 #define RESET_SYS_CPU_MBIST 60 -#define RESET_ACODEC 61 +/* 61 */ /* 62 */ /* 63 */ /* RESET2 */ diff --git a/include/dt-bindings/reset/amlogic,meson8b-reset.h b/include/dt-bindings/reset/amlogic,meson8b-reset.h index fbc524a900..614aff2c7a 100644 --- a/include/dt-bindings/reset/amlogic,meson8b-reset.h +++ b/include/dt-bindings/reset/amlogic,meson8b-reset.h @@ -1,7 +1,56 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * * Copyright (c) 2016 BayLibre, SAS. * Author: Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H #define _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H @@ -46,9 +95,9 @@ #define RESET_VD_RMEM 64 #define RESET_AUDIN 65 #define RESET_DBLK 66 -#define RESET_PIC_DC 67 -#define RESET_PSC 68 -#define RESET_NAND 69 +#define RESET_PIC_DC 66 +#define RESET_PSC 66 +#define RESET_NAND 66 #define RESET_GE2D 70 #define RESET_PARSER_REG 71 #define RESET_PARSER_FETCH 72 diff --git a/include/dt-bindings/reset/hisi,hi6220-resets.h b/include/dt-bindings/reset/hisi,hi6220-resets.h index 63aff7d8aa..322ec5335b 100644 --- a/include/dt-bindings/reset/hisi,hi6220-resets.h +++ b/include/dt-bindings/reset/hisi,hi6220-resets.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /** * This header provides index for the reset controller * based on hi6220 SoC. @@ -73,11 +72,4 @@ #define MEDIA_MMU 6 #define MEDIA_XG2RAM1 7 -#define AO_G3D 1 -#define AO_CODECISP 2 -#define AO_MCPU 4 -#define AO_BBPHARQMEM 5 -#define AO_HIFI 8 -#define AO_ACPUSCUL2C 12 - #endif /*_DT_BINDINGS_RESET_CONTROLLER_HI6220*/ diff --git a/include/dt-bindings/reset/mt2701-resets.h b/include/dt-bindings/reset/mt2701-resets.h index 91e4200fd7..aaf03057f7 100644 --- a/include/dt-bindings/reset/mt2701-resets.h +++ b/include/dt-bindings/reset/mt2701-resets.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015 MediaTek, Shunli Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_CONTROLLER_MT2701 @@ -72,14 +80,4 @@ #define MT2701_HIFSYS_PCIE1_RST 25 #define MT2701_HIFSYS_PCIE2_RST 26 -/* ETHSYS resets */ -#define MT2701_ETHSYS_SYS_RST 0 -#define MT2701_ETHSYS_MCM_RST 2 -#define MT2701_ETHSYS_FE_RST 6 -#define MT2701_ETHSYS_GMAC_RST 23 -#define MT2701_ETHSYS_PPE_RST 31 - -/* G3DSYS resets */ -#define MT2701_G3DSYS_CORE_RST 0 - #endif /* _DT_BINDINGS_RESET_CONTROLLER_MT2701 */ diff --git a/include/dt-bindings/reset/mt8135-resets.h b/include/dt-bindings/reset/mt8135-resets.h index 8c060d0871..1fb629508d 100644 --- a/include/dt-bindings/reset/mt8135-resets.h +++ b/include/dt-bindings/reset/mt8135-resets.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 MediaTek Inc. * Author: Flora Fu, MediaTek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8135 diff --git a/include/dt-bindings/reset/mt8173-resets.h b/include/dt-bindings/reset/mt8173-resets.h index ba8636eda5..9464b37cf6 100644 --- a/include/dt-bindings/reset/mt8173-resets.h +++ b/include/dt-bindings/reset/mt8173-resets.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 MediaTek Inc. * Author: Flora Fu, MediaTek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8173 diff --git a/include/dt-bindings/reset/pistachio-resets.h b/include/dt-bindings/reset/pistachio-resets.h index 5bb4dd0d63..60a189b1fa 100644 --- a/include/dt-bindings/reset/pistachio-resets.h +++ b/include/dt-bindings/reset/pistachio-resets.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for the reset controller * present in the Pistachio SoC diff --git a/include/dt-bindings/reset/qcom,gcc-apq8084.h b/include/dt-bindings/reset/qcom,gcc-apq8084.h index e76be38342..527caaf48e 100644 --- a/include/dt-bindings/reset/qcom,gcc-apq8084.h +++ b/include/dt-bindings/reset/qcom,gcc-apq8084.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_APQ_GCC_8084_H diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h index 26b6f92006..de9c814093 100644 --- a/include/dt-bindings/reset/qcom,gcc-ipq806x.h +++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_IPQ_806X_H diff --git a/include/dt-bindings/reset/qcom,gcc-mdm9615.h b/include/dt-bindings/reset/qcom,gcc-mdm9615.h index 5faf02d7e2..7f86e9a59d 100644 --- a/include/dt-bindings/reset/qcom,gcc-mdm9615.h +++ b/include/dt-bindings/reset/qcom,gcc-mdm9615.h @@ -1,8 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. * Copyright (c) BayLibre, SAS. * Author : Neil Armstrong + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_GCC_MDM9615_H diff --git a/include/dt-bindings/reset/qcom,gcc-msm8660.h b/include/dt-bindings/reset/qcom,gcc-msm8660.h index f6d2b3cbe7..a83282fe54 100644 --- a/include/dt-bindings/reset/qcom,gcc-msm8660.h +++ b/include/dt-bindings/reset/qcom,gcc-msm8660.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_MSM_GCC_8660_H diff --git a/include/dt-bindings/reset/qcom,gcc-msm8916.h b/include/dt-bindings/reset/qcom,gcc-msm8916.h index 1f9be10872..3d90410f09 100644 --- a/include/dt-bindings/reset/qcom,gcc-msm8916.h +++ b/include/dt-bindings/reset/qcom,gcc-msm8916.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2015 Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_MSM_GCC_8916_H diff --git a/include/dt-bindings/reset/qcom,gcc-msm8960.h b/include/dt-bindings/reset/qcom,gcc-msm8960.h index c7ebae7bb2..47c8686955 100644 --- a/include/dt-bindings/reset/qcom,gcc-msm8960.h +++ b/include/dt-bindings/reset/qcom,gcc-msm8960.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_MSM_GCC_8960_H diff --git a/include/dt-bindings/reset/qcom,gcc-msm8974.h b/include/dt-bindings/reset/qcom,gcc-msm8974.h index 23777e5ca4..9bdf543229 100644 --- a/include/dt-bindings/reset/qcom,gcc-msm8974.h +++ b/include/dt-bindings/reset/qcom,gcc-msm8974.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_MSM_GCC_8974_H diff --git a/include/dt-bindings/reset/qcom,mmcc-apq8084.h b/include/dt-bindings/reset/qcom,mmcc-apq8084.h index faaeb40959..c167139653 100644 --- a/include/dt-bindings/reset/qcom,mmcc-apq8084.h +++ b/include/dt-bindings/reset/qcom,mmcc-apq8084.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_APQ_MMCC_8084_H diff --git a/include/dt-bindings/reset/qcom,mmcc-msm8960.h b/include/dt-bindings/reset/qcom,mmcc-msm8960.h index eb4186aa2c..11741113a8 100644 --- a/include/dt-bindings/reset/qcom,mmcc-msm8960.h +++ b/include/dt-bindings/reset/qcom,mmcc-msm8960.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_MSM_MMCC_8960_H diff --git a/include/dt-bindings/reset/qcom,mmcc-msm8974.h b/include/dt-bindings/reset/qcom,mmcc-msm8974.h index d61b077e91..da3ec37f1b 100644 --- a/include/dt-bindings/reset/qcom,mmcc-msm8974.h +++ b/include/dt-bindings/reset/qcom,mmcc-msm8974.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_RESET_MSM_MMCC_8974_H diff --git a/include/dt-bindings/reset/stih407-resets.h b/include/dt-bindings/reset/stih407-resets.h index f2a2c4f7f0..4ab3a1c949 100644 --- a/include/dt-bindings/reset/stih407-resets.h +++ b/include/dt-bindings/reset/stih407-resets.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for the reset controller * based peripheral powerdown requests on the STMicroelectronics diff --git a/include/dt-bindings/reset/stih415-resets.h b/include/dt-bindings/reset/stih415-resets.h index 96f7831a1d..c2329fe29c 100644 --- a/include/dt-bindings/reset/stih415-resets.h +++ b/include/dt-bindings/reset/stih415-resets.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for the reset controller * based peripheral powerdown requests on the STMicroelectronics diff --git a/include/dt-bindings/reset/stih416-resets.h b/include/dt-bindings/reset/stih416-resets.h index f682c906ed..fcf9af1ac0 100644 --- a/include/dt-bindings/reset/stih416-resets.h +++ b/include/dt-bindings/reset/stih416-resets.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for the reset controller * based peripheral powerdown requests on the STMicroelectronics diff --git a/include/dt-bindings/reset/sun8i-h3-ccu.h b/include/dt-bindings/reset/sun8i-h3-ccu.h index 484c2a2291..6b7af80c26 100644 --- a/include/dt-bindings/reset/sun8i-h3-ccu.h +++ b/include/dt-bindings/reset/sun8i-h3-ccu.h @@ -98,9 +98,6 @@ #define RST_BUS_UART1 50 #define RST_BUS_UART2 51 #define RST_BUS_UART3 52 -#define RST_BUS_SCR0 53 - -/* New resets imported in H5 */ -#define RST_BUS_SCR1 54 +#define RST_BUS_SCR 53 #endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/reset/tegra124-car.h b/include/dt-bindings/reset/tegra124-car.h index 97d2f3db82..070e4f6e74 100644 --- a/include/dt-bindings/reset/tegra124-car.h +++ b/include/dt-bindings/reset/tegra124-car.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides Tegra124-specific constants for binding * nvidia,tegra124-car. diff --git a/include/dt-bindings/reset/ti-syscon.h b/include/dt-bindings/reset/ti-syscon.h index eacc0f1808..884fd91df8 100644 --- a/include/dt-bindings/reset/ti-syscon.h +++ b/include/dt-bindings/reset/ti-syscon.h @@ -1,8 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * TI Syscon Reset definitions * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __DT_BINDINGS_RESET_TI_SYSCON_H__ diff --git a/include/dt-bindings/soc/qcom,gsbi.h b/include/dt-bindings/soc/qcom,gsbi.h index c00ab8c5f3..7ac4292333 100644 --- a/include/dt-bindings/soc/qcom,gsbi.h +++ b/include/dt-bindings/soc/qcom,gsbi.h @@ -1,5 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __DT_BINDINGS_QCOM_GSBI_H #define __DT_BINDINGS_QCOM_GSBI_H diff --git a/include/dt-bindings/soc/rockchip,boot-mode.h b/include/dt-bindings/soc/rockchip,boot-mode.h index 4b0914c098..ae7c867e73 100644 --- a/include/dt-bindings/soc/rockchip,boot-mode.h +++ b/include/dt-bindings/soc/rockchip,boot-mode.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ROCKCHIP_BOOT_MODE_H #define __ROCKCHIP_BOOT_MODE_H diff --git a/include/dt-bindings/sound/apq8016-lpass.h b/include/dt-bindings/sound/apq8016-lpass.h index dc605c4bc2..499076e980 100644 --- a/include/dt-bindings/sound/apq8016-lpass.h +++ b/include/dt-bindings/sound/apq8016-lpass.h @@ -1,9 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_APQ8016_LPASS_H #define __DT_APQ8016_LPASS_H -#include - -/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */ +#define MI2S_PRIMARY 0 +#define MI2S_SECONDARY 1 +#define MI2S_TERTIARY 2 +#define MI2S_QUATERNARY 3 #endif /* __DT_APQ8016_LPASS_H */ diff --git a/include/dt-bindings/sound/audio-jack-events.h b/include/dt-bindings/sound/audio-jack-events.h index 1b29b29512..378349f280 100644 --- a/include/dt-bindings/sound/audio-jack-events.h +++ b/include/dt-bindings/sound/audio-jack-events.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __AUDIO_JACK_EVENTS_H #define __AUDIO_JACK_EVENTS_H diff --git a/include/dt-bindings/sound/cs35l32.h b/include/dt-bindings/sound/cs35l32.h index 7549d5019e..0c6d6a3c15 100644 --- a/include/dt-bindings/sound/cs35l32.h +++ b/include/dt-bindings/sound/cs35l32.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_CS35L32_H #define __DT_CS35L32_H diff --git a/include/dt-bindings/sound/fsl-imx-audmux.h b/include/dt-bindings/sound/fsl-imx-audmux.h index 15f138bebe..50b09e96f2 100644 --- a/include/dt-bindings/sound/fsl-imx-audmux.h +++ b/include/dt-bindings/sound/fsl-imx-audmux.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_FSL_IMX_AUDMUX_H #define __DT_FSL_IMX_AUDMUX_H @@ -25,13 +24,6 @@ #define MX51_AUDMUX_PORT6 5 #define MX51_AUDMUX_PORT7 6 -/* - * TFCSEL/RFCSEL (i.MX27) or TFSEL/TCSEL/RFSEL/RCSEL (i.MX31/51/53/6Q) - * can be sourced from Rx/Tx. - */ -#define IMX_AUDMUX_RXFS 0x8 -#define IMX_AUDMUX_RXCLK 0x8 - /* Register definitions for the i.MX21/27 Digital Audio Multiplexer */ #define IMX_AUDMUX_V1_PCR_INMMASK(x) ((x) & 0xff) #define IMX_AUDMUX_V1_PCR_INMEN (1 << 8) diff --git a/include/dt-bindings/sound/samsung-i2s.h b/include/dt-bindings/sound/samsung-i2s.h index 250de0d6c7..0c69818d53 100644 --- a/include/dt-bindings/sound/samsung-i2s.h +++ b/include/dt-bindings/sound/samsung-i2s.h @@ -1,15 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DT_BINDINGS_SAMSUNG_I2S_H #define _DT_BINDINGS_SAMSUNG_I2S_H -#define CLK_I2S_CDCLK 0 /* the CDCLK (CODECLKO) gate clock */ - -#define CLK_I2S_RCLK_SRC 1 /* the RCLKSRC mux clock (corresponding to - * RCLKSRC bit in IISMOD register) - */ - -#define CLK_I2S_RCLK_PSR 2 /* the RCLK prescaler divider clock - * (corresponding to the IISPSR register) - */ +#define CLK_I2S_CDCLK 0 +#define CLK_I2S_RCLK_SRC 1 +#define CLK_I2S_RCLK_PSR 2 #endif /* _DT_BINDINGS_SAMSUNG_I2S_H */ diff --git a/include/dt-bindings/sound/tas2552.h b/include/dt-bindings/sound/tas2552.h index 0daeb83858..a4e1a07998 100644 --- a/include/dt-bindings/sound/tas2552.h +++ b/include/dt-bindings/sound/tas2552.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_TAS2552_H #define __DT_TAS2552_H diff --git a/include/dt-bindings/sound/tlv320aic31xx-micbias.h b/include/dt-bindings/sound/tlv320aic31xx-micbias.h index c6895a18a4..f5cb772ab9 100644 --- a/include/dt-bindings/sound/tlv320aic31xx-micbias.h +++ b/include/dt-bindings/sound/tlv320aic31xx-micbias.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_TLV320AIC31XX_MICBIAS_H #define __DT_TLV320AIC31XX_MICBIAS_H diff --git a/include/dt-bindings/spmi/spmi.h b/include/dt-bindings/spmi/spmi.h index ad4a43481d..d11e1e5438 100644 --- a/include/dt-bindings/spmi/spmi.h +++ b/include/dt-bindings/spmi/spmi.h @@ -1,5 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __DT_BINDINGS_SPMI_H #define __DT_BINDINGS_SPMI_H diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h index 444c7bdde1..2a99f1d52b 100644 --- a/include/dt-bindings/thermal/tegra124-soctherm.h +++ b/include/dt-bindings/thermal/tegra124-soctherm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides constants for binding nvidia,tegra124-soctherm. */ @@ -12,9 +11,9 @@ #define TEGRA124_SOCTHERM_SENSOR_PLLX 3 #define TEGRA124_SOCTHERM_SENSOR_NUM 4 -#define TEGRA_SOCTHERM_THROT_LEVEL_NONE 0 -#define TEGRA_SOCTHERM_THROT_LEVEL_LOW 1 -#define TEGRA_SOCTHERM_THROT_LEVEL_MED 2 -#define TEGRA_SOCTHERM_THROT_LEVEL_HIGH 3 +#define TEGRA_SOCTHERM_THROT_LEVEL_LOW 0 +#define TEGRA_SOCTHERM_THROT_LEVEL_MED 1 +#define TEGRA_SOCTHERM_THROT_LEVEL_HIGH 2 +#define TEGRA_SOCTHERM_THROT_LEVEL_NONE -1 #endif diff --git a/include/dt-bindings/thermal/thermal.h b/include/dt-bindings/thermal/thermal.h index bc7babb1a6..b5e6b0069a 100644 --- a/include/dt-bindings/thermal/thermal.h +++ b/include/dt-bindings/thermal/thermal.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * This header provides constants for most thermal bindings. * * Copyright (C) 2013 Texas Instruments * Eduardo Valentin + * + * GPLv2 only */ #ifndef _DT_BINDINGS_THERMAL_THERMAL_H diff --git a/include/dt-bindings/thermal/thermal_exynos.h b/include/dt-bindings/thermal/thermal_exynos.h index 52fcb51dda..0646500bca 100644 --- a/include/dt-bindings/thermal/thermal_exynos.h +++ b/include/dt-bindings/thermal/thermal_exynos.h @@ -1,9 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* - * thermal_exynos.h - Samsung Exynos TMU device tree definitions + * thermal_exynos.h - Samsung EXYNOS TMU device tree definitions * * Copyright (C) 2014 Samsung Electronics * Lukasz Majewski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef _EXYNOS_THERMAL_TMU_DT_H diff --git a/include/keys/asymmetric-parser.h b/include/keys/asymmetric-parser.h index c47dc5405f..09b3b4807f 100644 --- a/include/keys/asymmetric-parser.h +++ b/include/keys/asymmetric-parser.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric public-key cryptography data parser * - * See Documentation/crypto/asymmetric-keys.rst + * See Documentation/crypto/asymmetric-keys.txt * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _KEYS_ASYMMETRIC_PARSER_H diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h index d55171f640..afcbfd4baa 100644 --- a/include/keys/asymmetric-subtype.h +++ b/include/keys/asymmetric-subtype.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric public-key cryptography key subtype * - * See Documentation/crypto/asymmetric-keys.rst + * See Documentation/security/asymmetric-keys.txt * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _KEYS_ASYMMETRIC_SUBTYPE_H @@ -13,8 +17,6 @@ #include #include -struct kernel_pkey_query; -struct kernel_pkey_params; struct public_key_signature; /* @@ -32,17 +34,10 @@ struct asymmetric_key_subtype { /* Destroy a key of this subtype */ void (*destroy)(void *payload_crypto, void *payload_auth); - int (*query)(const struct kernel_pkey_params *params, - struct kernel_pkey_query *info); - - /* Encrypt/decrypt/sign data */ - int (*eds_op)(struct kernel_pkey_params *params, - const void *in, void *out); - /* Verify the signature on a key of this subtype (optional) */ int (*verify_signature)(const struct key *key, const struct public_key_signature *sig); -}; +} __do_const; /** * asymmetric_key_subtype - Get the subtype from an asymmetric key diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index c432fdb854..b38240716d 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric Public-key cryptography key type interface * - * See Documentation/crypto/asymmetric-keys.rst + * See Documentation/security/asymmetric-keys.txt * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _KEYS_ASYMMETRIC_TYPE_H @@ -72,12 +76,6 @@ const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key) return key->payload.data[asym_key_ids]; } -static inline -const struct public_key *asymmetric_key_public_key(const struct key *key) -{ - return key->payload.data[asym_crypto]; -} - extern struct key *find_asymmetric_key(struct key *keyring, const struct asymmetric_key_id *id_0, const struct asymmetric_key_id *id_1, diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h index 988d90d77f..e0970a5781 100644 --- a/include/keys/big_key-type.h +++ b/include/keys/big_key-type.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Big capacity key type. * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _KEYS_BIG_KEY_TYPE_H @@ -17,7 +21,6 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep); extern void big_key_revoke(struct key *key); extern void big_key_destroy(struct key *key); extern void big_key_describe(const struct key *big_key, struct seq_file *m); -extern long big_key_read(const struct key *key, char *buffer, size_t buflen); -extern int big_key_update(struct key *key, struct key_preparsed_payload *prep); +extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen); #endif /* _KEYS_BIG_KEY_TYPE_H */ diff --git a/include/keys/ceph-type.h b/include/keys/ceph-type.h index aa6d3e050c..f69c4ac197 100644 --- a/include/keys/ceph-type.h +++ b/include/keys/ceph-type.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _KEYS_CEPH_TYPE_H #define _KEYS_CEPH_TYPE_H diff --git a/include/keys/dns_resolver-type.h b/include/keys/dns_resolver-type.h index 218ca22fb0..9284a19393 100644 --- a/include/keys/dns_resolver-type.h +++ b/include/keys/dns_resolver-type.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* DNS resolver key type * * Copyright (C) 2010 Wang Lei. All Rights Reserved. * Written by Wang Lei (wang840925@gmail.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _KEYS_DNS_RESOLVER_TYPE_H diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h index abfcbe0200..377bc278b1 100644 --- a/include/keys/encrypted-type.h +++ b/include/keys/encrypted-type.h @@ -1,18 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2010 IBM Corporation * Copyright (C) 2010 Politecnico di Torino, Italy - * TORSEC group -- https://security.polito.it + * TORSEC group -- http://security.polito.it * * Authors: * Mimi Zohar * Roberto Sassu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. */ #ifndef _KEYS_ENCRYPTED_TYPE_H #define _KEYS_ENCRYPTED_TYPE_H -#include +#include #include struct encrypted_key_payload { @@ -27,7 +30,7 @@ struct encrypted_key_payload { unsigned short payload_datalen; /* payload data length */ unsigned short encrypted_key_format; /* encrypted key format */ u8 *decrypted_data; /* decrypted data */ - u8 payload_data[]; /* payload data + datablob + hmac */ + u8 payload_data[0]; /* payload data + datablob + hmac */ }; extern struct key_type key_type_encrypted; diff --git a/include/keys/keyring-type.h b/include/keys/keyring-type.h index 1dc83862f5..fca5c62340 100644 --- a/include/keys/keyring-type.h +++ b/include/keys/keyring-type.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Keyring key type * * Copyright (C) 2008, 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _KEYS_KEYRING_TYPE_H diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h index 333c0f49a9..5e8f2c5f3d 100644 --- a/include/keys/rxrpc-type.h +++ b/include/keys/rxrpc-type.h @@ -1,14 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* RxRPC key type * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _KEYS_RXRPC_TYPE_H #define _KEYS_RXRPC_TYPE_H -#include +#include /* * key type for AF_RXRPC keys @@ -28,7 +32,55 @@ struct rxkad_key { u8 primary_flag; /* T if key for primary cell for this user */ u16 ticket_len; /* length of ticket[] */ u8 session_key[8]; /* DES session key */ - u8 ticket[]; /* the encrypted ticket */ + u8 ticket[0]; /* the encrypted ticket */ +}; + +/* + * Kerberos 5 principal + * name/name/name@realm + */ +struct krb5_principal { + u8 n_name_parts; /* N of parts of the name part of the principal */ + char **name_parts; /* parts of the name part of the principal */ + char *realm; /* parts of the realm part of the principal */ +}; + +/* + * Kerberos 5 tagged data + */ +struct krb5_tagged_data { + /* for tag value, see /usr/include/krb5/krb5.h + * - KRB5_AUTHDATA_* for auth data + * - + */ + s32 tag; + u32 data_len; + u8 *data; +}; + +/* + * RxRPC key for Kerberos V (type-5 security) + */ +struct rxk5_key { + u64 authtime; /* time at which auth token generated */ + u64 starttime; /* time at which auth token starts */ + u64 endtime; /* time at which auth token expired */ + u64 renew_till; /* time to which auth token can be renewed */ + s32 is_skey; /* T if ticket is encrypted in another ticket's + * skey */ + s32 flags; /* mask of TKT_FLG_* bits (krb5/krb5.h) */ + struct krb5_principal client; /* client principal name */ + struct krb5_principal server; /* server principal name */ + u16 ticket_len; /* length of ticket */ + u16 ticket2_len; /* length of second ticket */ + u8 n_authdata; /* number of authorisation data elements */ + u8 n_addresses; /* number of addresses */ + struct krb5_tagged_data session; /* session data; tag is enctype */ + struct krb5_tagged_data *addresses; /* addresses */ + u8 *ticket; /* krb5 ticket */ + u8 *ticket2; /* second krb5 ticket, if related to ticket (via + * DUPLICATE-SKEY or ENC-TKT-IN-SKEY) */ + struct krb5_tagged_data *authdata; /* authorisation data */ }; /* @@ -36,10 +88,10 @@ struct rxkad_key { */ struct rxrpc_key_token { u16 security_index; /* RxRPC header security index */ - bool no_leak_key; /* Don't copy the key to userspace */ struct rxrpc_key_token *next; /* the next token in the list */ union { struct rxkad_key *kad; + struct rxk5_key *k5; }; }; @@ -52,7 +104,7 @@ struct rxrpc_key_data_v1 { u32 expiry; /* time_t */ u32 kvno; u8 session_key[8]; - u8 ticket[]; + u8 ticket[0]; }; /* @@ -68,28 +120,11 @@ struct rxrpc_key_data_v1 { #define AFSTOKEN_RK_TIX_MAX 12000 /* max RxKAD ticket size */ #define AFSTOKEN_GK_KEY_MAX 64 /* max GSSAPI key size */ #define AFSTOKEN_GK_TOKEN_MAX 16384 /* max GSSAPI token size */ - -/* - * Truncate a time64_t to the range from 1970 to 2106 as in the network - * protocol. - */ -static inline u32 rxrpc_time64_to_u32(time64_t time) -{ - if (time < 0) - return 0; - - if (time > UINT_MAX) - return UINT_MAX; - - return (u32)time; -} - -/* - * Extend u32 back to time64_t using the same 1970-2106 range. - */ -static inline time64_t rxrpc_u32_to_time64(u32 time) -{ - return (time64_t)time; -} +#define AFSTOKEN_K5_COMPONENTS_MAX 16 /* max K5 components */ +#define AFSTOKEN_K5_NAME_MAX 128 /* max K5 name length */ +#define AFSTOKEN_K5_REALM_MAX 64 /* max K5 realm name length */ +#define AFSTOKEN_K5_TIX_MAX 16384 /* max K5 ticket size */ +#define AFSTOKEN_K5_ADDRESSES_MAX 16 /* max K5 addresses */ +#define AFSTOKEN_K5_AUTHDATA_MAX 16 /* max K5 pieces of auth data */ #endif /* _KEYS_RXRPC_TYPE_H */ diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h index 6acd3cf13a..fbd4647767 100644 --- a/include/keys/system_keyring.h +++ b/include/keys/system_keyring.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* System keyring containing trusted public keys. * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _KEYS_SYSTEM_KEYRING_H @@ -14,63 +18,21 @@ extern int restrict_link_by_builtin_trusted(struct key *keyring, const struct key_type *type, - const union key_payload *payload, - struct key *restriction_key); -extern __init int load_module_cert(struct key *keyring); + const union key_payload *payload); #else #define restrict_link_by_builtin_trusted restrict_link_reject - -static inline __init int load_module_cert(struct key *keyring) -{ - return 0; -} - #endif #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING extern int restrict_link_by_builtin_and_secondary_trusted( struct key *keyring, const struct key_type *type, - const union key_payload *payload, - struct key *restriction_key); + const union key_payload *payload); #else #define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted #endif -extern struct pkcs7_message *pkcs7; -#ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING -extern int mark_hash_blacklisted(const char *hash); -extern int is_hash_blacklisted(const u8 *hash, size_t hash_len, - const char *type); -extern int is_binary_blacklisted(const u8 *hash, size_t hash_len); -#else -static inline int is_hash_blacklisted(const u8 *hash, size_t hash_len, - const char *type) -{ - return 0; -} - -static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len) -{ - return 0; -} -#endif - -#ifdef CONFIG_SYSTEM_REVOCATION_LIST -extern int add_key_to_revocation_list(const char *data, size_t size); -extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7); -#else -static inline int add_key_to_revocation_list(const char *data, size_t size) -{ - return 0; -} -static inline int is_key_on_revocation_list(struct pkcs7_message *pkcs7) -{ - return -ENOKEY; -} -#endif - #ifdef CONFIG_IMA_BLACKLIST_KEYRING extern struct key *ima_blacklist_keyring; @@ -85,13 +47,5 @@ static inline struct key *get_ima_blacklist_keyring(void) } #endif /* CONFIG_IMA_BLACKLIST_KEYRING */ -#if defined(CONFIG_INTEGRITY_PLATFORM_KEYRING) && \ - defined(CONFIG_SYSTEM_TRUSTED_KEYRING) -extern void __init set_platform_trusted_keys(struct key *keyring); -#else -static inline void set_platform_trusted_keys(struct key *keyring) -{ -} -#endif #endif /* _KEYS_SYSTEM_KEYRING_H */ diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h index d89fa2579a..4ea7e55f20 100644 --- a/include/keys/trusted-type.h +++ b/include/keys/trusted-type.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2010 IBM Corporation * Author: David Safford + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. */ #ifndef _KEYS_TRUSTED_TYPE_H @@ -11,12 +14,6 @@ #include #include -#ifdef pr_fmt -#undef pr_fmt -#endif - -#define pr_fmt(fmt) "trusted_key: " fmt - #define MIN_KEY_SIZE 32 #define MAX_KEY_SIZE 128 #define MAX_BLOB_SIZE 512 @@ -28,7 +25,6 @@ struct trusted_key_payload { unsigned int key_len; unsigned int blob_len; unsigned char migratable; - unsigned char old_format; unsigned char key[MAX_KEY_SIZE + 1]; unsigned char blob[MAX_BLOB_SIZE]; }; @@ -37,7 +33,6 @@ struct trusted_key_options { uint16_t keytype; uint32_t keyhandle; unsigned char keyauth[TPM_DIGEST_SIZE]; - uint32_t blobauth_len; unsigned char blobauth[TPM_DIGEST_SIZE]; uint32_t pcrinfo_len; unsigned char pcrinfo[MAX_PCRINFO_SIZE]; @@ -48,53 +43,6 @@ struct trusted_key_options { uint32_t policyhandle; }; -struct trusted_key_ops { - /* - * flag to indicate if trusted key implementation supports migration - * or not. - */ - unsigned char migratable; - - /* Initialize key interface. */ - int (*init)(void); - - /* Seal a key. */ - int (*seal)(struct trusted_key_payload *p, char *datablob); - - /* Unseal a key. */ - int (*unseal)(struct trusted_key_payload *p, char *datablob); - - /* Get a randomized key. */ - int (*get_random)(unsigned char *key, size_t key_len); - - /* Exit key interface. */ - void (*exit)(void); -}; - -struct trusted_key_source { - char *name; - struct trusted_key_ops *ops; -}; - extern struct key_type key_type_trusted; -#define TRUSTED_DEBUG 0 - -#if TRUSTED_DEBUG -static inline void dump_payload(struct trusted_key_payload *p) -{ - pr_info("key_len %d\n", p->key_len); - print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE, - 16, 1, p->key, p->key_len, 0); - pr_info("bloblen %d\n", p->blob_len); - print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE, - 16, 1, p->blob, p->blob_len, 0); - pr_info("migratable %d\n", p->migratable); -} -#else -static inline void dump_payload(struct trusted_key_payload *p) -{ -} -#endif - #endif /* _KEYS_TRUSTED_TYPE_H */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index 386c314327..c9ebdc7b93 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -1,14 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* user-type.h: User-defined key type * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _KEYS_USER_TYPE_H #define _KEYS_USER_TYPE_H -#include +#include #include #ifdef CONFIG_KEYS @@ -27,7 +31,7 @@ struct user_key_payload { struct rcu_head rcu; /* RCU destructor */ unsigned short datalen; /* length of this data */ - char data[] __aligned(__alignof__(u64)); /* actual data */ + char data[0]; /* actual data */ }; extern struct key_type key_type_user; @@ -41,16 +45,12 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep); extern void user_revoke(struct key *key); extern void user_destroy(struct key *key); extern void user_describe(const struct key *user, struct seq_file *m); -extern long user_read(const struct key *key, char *buffer, size_t buflen); +extern long user_read(const struct key *key, + char __user *buffer, size_t buflen); -static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) +static inline const struct user_key_payload *user_key_payload(const struct key *key) { - return (struct user_key_payload *)dereference_key_rcu(key); -} - -static inline struct user_key_payload *user_key_payload_locked(const struct key *key) -{ - return (struct user_key_payload *)dereference_key_locked((struct key *)key); + return (struct user_key_payload *)rcu_dereference_key(key); } #endif /* CONFIG_KEYS */ diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 51c1938110..dda39d8fa1 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 ARM Ltd. * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __ASM_ARM_KVM_ARCH_TIMER_H @@ -9,101 +21,59 @@ #include #include +#include -enum kvm_arch_timers { - TIMER_PTIMER, - TIMER_VTIMER, - NR_KVM_TIMERS +struct arch_timer_kvm { + /* Virtual offset */ + cycle_t cntvoff; }; -enum kvm_arch_timer_regs { - TIMER_REG_CNT, - TIMER_REG_CVAL, - TIMER_REG_TVAL, - TIMER_REG_CTL, -}; +struct arch_timer_cpu { + /* Registers: control register, timer value */ + u32 cntv_ctl; /* Saved/restored */ + cycle_t cntv_cval; /* Saved/restored */ -struct arch_timer_context { - struct kvm_vcpu *vcpu; + /* + * Anything that is not used directly from assembly code goes + * here. + */ + + /* Background timer used when the guest is not running */ + struct hrtimer timer; + + /* Work queued with the above timer expires */ + struct work_struct expired; + + /* Background timer active */ + bool armed; /* Timer IRQ */ struct kvm_irq_level irq; - /* Emulated Timer (may be unused) */ - struct hrtimer hrtimer; - - /* - * We have multiple paths which can save/restore the timer state onto - * the hardware, so we need some way of keeping track of where the - * latest state is. - */ - bool loaded; - - /* Duplicated state from arch_timer.c for convenience */ - u32 host_timer_irq; - u32 host_timer_irq_flags; -}; - -struct timer_map { - struct arch_timer_context *direct_vtimer; - struct arch_timer_context *direct_ptimer; - struct arch_timer_context *emul_ptimer; -}; - -struct arch_timer_cpu { - struct arch_timer_context timers[NR_KVM_TIMERS]; - - /* Background timer used when the guest is not running */ - struct hrtimer bg_timer; + /* Active IRQ state caching */ + bool active_cleared_last; /* Is the timer enabled */ bool enabled; }; -int kvm_timer_hyp_init(bool); +int kvm_timer_hyp_init(void); int kvm_timer_enable(struct kvm_vcpu *vcpu); -int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_timer_init(struct kvm *kvm); +int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, + const struct kvm_irq_level *irq); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); -void kvm_timer_sync_user(struct kvm_vcpu *vcpu); -bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); -void kvm_timer_update_run(struct kvm_vcpu *vcpu); +void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); +void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); -int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); -int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); -int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +bool kvm_timer_should_fire(struct kvm_vcpu *vcpu); +void kvm_timer_schedule(struct kvm_vcpu *vcpu); +void kvm_timer_unschedule(struct kvm_vcpu *vcpu); -bool kvm_timer_is_pending(struct kvm_vcpu *vcpu); - -u64 kvm_phys_timer_read(void); - -void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); -void kvm_timer_init_vhe(void); - -bool kvm_arch_timer_get_input_level(int vintid); - -#define vcpu_timer(v) (&(v)->arch.timer_cpu) -#define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)]) -#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER]) -#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER]) - -#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers) - -u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, - enum kvm_arch_timers tmr, - enum kvm_arch_timer_regs treg); -void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu, - enum kvm_arch_timers tmr, - enum kvm_arch_timer_regs treg, - u64 val); - -/* Needed for tracing */ -u32 timer_get_ctl(struct arch_timer_context *ctxt); -u64 timer_get_cval(struct arch_timer_context *ctxt); - #endif diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 90f21898aa..92e7e97ca8 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -1,7 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 Linaro Ltd. * Author: Shannon Zhao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef __ASM_ARM_KVM_PMU_H @@ -11,60 +22,50 @@ #include #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) -#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) -DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available); - -static __always_inline bool kvm_arm_support_pmu_v3(void) -{ - return static_branch_likely(&kvm_arm_pmu_available); -} - -#ifdef CONFIG_HW_PERF_EVENTS +#ifdef CONFIG_KVM_ARM_PMU struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ struct perf_event *perf_event; + u64 bitmask; }; struct kvm_pmu { int irq_num; struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; - DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); - bool created; + bool ready; bool irq_level; - struct irq_work overflow_work; }; +#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); -u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); -void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); -void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); -void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); -bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); -void kvm_pmu_update_run(struct kvm_vcpu *vcpu); void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); +bool kvm_arm_support_pmu_v3(void); int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); -int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); #else struct kvm_pmu { }; +#define kvm_arm_pmu_v3_ready(v) (false) #define kvm_arm_pmu_irq_initialized(v) (false) static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) @@ -77,22 +78,18 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { return 0; } -static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} -static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} -static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} -static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) -{ - return false; -} -static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} +static inline bool kvm_arm_support_pmu_v3(void) { return false; } static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { @@ -108,15 +105,6 @@ static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, { return -ENXIO; } -static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) -{ - return 0; -} -static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) -{ - return 0; -} - #endif #endif diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index e602d848fc..002f0922cd 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -1,6 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015, 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef __KVM_ARM_VGIC_H #define __KVM_ARM_VGIC_H @@ -15,9 +26,7 @@ #include #include -#include - -#define VGIC_V3_MAX_CPUS 512 +#define VGIC_V3_MAX_CPUS 255 #define VGIC_V2_MAX_CPUS 8 #define VGIC_NR_IRQS_LEGACY 256 #define VGIC_NR_SGIS 16 @@ -29,10 +38,6 @@ #define VGIC_MIN_LPI 8192 #define KVM_IRQCHIP_NUM_PINS (1020 - 32) -#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS) -#define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \ - (irq) <= VGIC_MAX_SPI) - enum vgic_type { VGIC_V2, /* Good ol' GICv2 */ VGIC_V3, /* New fancy GICv3 */ @@ -46,15 +51,11 @@ struct vgic_global { /* Physical address of vgic virtual cpu interface */ phys_addr_t vcpu_base; - /* GICV mapping, kernel VA */ + /* GICV mapping */ void __iomem *vcpu_base_va; - /* GICV mapping, HYP VA */ - void __iomem *vcpu_hyp_va; - /* virtual control interface mapping, kernel VA */ + /* virtual control interface mapping */ void __iomem *vctrl_base; - /* virtual control interface mapping, HYP VA */ - void __iomem *vctrl_hyp; /* Number of implemented list registers */ int nr_lr; @@ -68,17 +69,8 @@ struct vgic_global { /* Only needed for the legacy KVM_CREATE_IRQCHIP */ bool can_emulate_gicv2; - /* Hardware has GICv4? */ - bool has_gicv4; - bool has_gicv4_1; - - /* Pseudo GICv3 from outer space */ - bool no_hw_deactivation; - /* GIC system register CPU interface */ struct static_key_false gicv3_cpuif; - - u32 ich_vtr_el2; }; extern struct vgic_global kvm_vgic_global_state; @@ -92,28 +84,8 @@ enum vgic_irq_config { VGIC_CONFIG_LEVEL }; -/* - * Per-irq ops overriding some common behavious. - * - * Always called in non-preemptible section and the functions can use - * kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs. - */ -struct irq_ops { - /* Per interrupt flags for special-cased interrupts */ - unsigned long flags; - -#define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */ - - /* - * Callback function pointer to in-kernel devices that can tell us the - * state of the input level of mapped level-triggered IRQ faster than - * peaking into the physical GIC. - */ - bool (*get_input_level)(int vintid); -}; - struct vgic_irq { - raw_spinlock_t irq_lock; /* Protects the content of the struct */ + spinlock_t irq_lock; /* Protects the content of the struct */ struct list_head lpi_list; /* Used to link all LPIs together */ struct list_head ap_list; @@ -129,37 +101,23 @@ struct vgic_irq { */ u32 intid; /* Guest visible INTID */ + bool pending; bool line_level; /* Level only */ - bool pending_latch; /* The pending latch state used to calculate - * the pending state for both level - * and edge triggered IRQs. */ + bool soft_pending; /* Level only */ bool active; /* not used for LPIs */ bool enabled; bool hw; /* Tied to HW IRQ */ struct kref refcount; /* Used for LPIs */ u32 hwintid; /* HW INTID number */ - unsigned int host_irq; /* linux irq corresponding to hwintid */ union { u8 targets; /* GICv2 target VCPUs mask */ u32 mpidr; /* GICv3 target VCPU */ }; u8 source; /* GICv2 SGIs only */ - u8 active_source; /* GICv2 SGIs only */ u8 priority; - u8 group; /* 0 == group 0, 1 == group 1 */ enum vgic_irq_config config; /* Level or edge */ - - struct irq_ops *ops; - - void *owner; /* Opaque pointer to reserve an interrupt - for in-kernel devices. */ }; -static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq) -{ - return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE); -} - struct vgic_register_region; struct vgic_its; @@ -187,6 +145,7 @@ struct vgic_its { gpa_t vgic_its_base; bool enabled; + bool initialized; struct vgic_io_device iodev; struct kvm_device *dev; @@ -200,25 +159,12 @@ struct vgic_its { u32 creadr; u32 cwriter; - /* migration ABI revision in use */ - u32 abi_rev; - /* Protects the device and collection lists */ struct mutex its_lock; struct list_head device_list; struct list_head collection_list; }; -struct vgic_state_iter; - -struct vgic_redist_region { - u32 index; - gpa_t base; - u32 count; /* number of redistributors or 0 if single region */ - u32 free_index; /* index of the next free redistributor */ - struct list_head list; -}; - struct vgic_dist { bool in_kernel; bool ready; @@ -227,32 +173,27 @@ struct vgic_dist { /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */ u32 vgic_model; - /* Implementation revision as reported in the GICD_IIDR */ - u32 implementation_rev; - - /* Userspace can write to GICv2 IGROUPR */ - bool v2_groups_user_writable; - /* Do injected MSIs require an additional device ID? */ bool msis_require_devid; int nr_spis; + /* TODO: Consider moving to global state */ + /* Virtual control interface mapping */ + void __iomem *vctrl_base; + /* base addresses in guest physical address space: */ gpa_t vgic_dist_base; /* distributor */ union { /* either a GICv2 CPU interface */ gpa_t vgic_cpu_base; /* or a number of GICv3 redistributor regions */ - struct list_head rd_regions; + gpa_t vgic_redist_base; }; /* distributor enabled */ bool enabled; - /* Wants SGIs without active state */ - bool nassgireq; - struct vgic_irq *spis; struct vgic_io_device dist_iodev; @@ -263,57 +204,36 @@ struct vgic_dist { * Contains the attributes and gpa of the LPI configuration table. * Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share * one address across all redistributors. - * GICv3 spec: IHI 0069E 6.1.1 "LPI Configuration tables" + * GICv3 spec: 6.1.2 "LPI Configuration tables" */ u64 propbaser; /* Protects the lpi_list and the count value below. */ - raw_spinlock_t lpi_list_lock; + spinlock_t lpi_list_lock; struct list_head lpi_list_head; int lpi_list_count; - - /* LPI translation cache */ - struct list_head lpi_translation_cache; - - /* used by vgic-debug */ - struct vgic_state_iter *iter; - - /* - * GICv4 ITS per-VM data, containing the IRQ domain, the VPE - * array, the property table pointer as well as allocation - * data. This essentially ties the Linux IRQ core and ITS - * together, and avoids leaking KVM's data structures anywhere - * else. - */ - struct its_vm its_vm; }; struct vgic_v2_cpu_if { u32 vgic_hcr; u32 vgic_vmcr; + u32 vgic_misr; /* Saved only */ + u64 vgic_eisr; /* Saved only */ + u64 vgic_elrsr; /* Saved only */ u32 vgic_apr; u32 vgic_lr[VGIC_V2_MAX_LRS]; - - unsigned int used_lrs; }; struct vgic_v3_cpu_if { u32 vgic_hcr; u32 vgic_vmcr; u32 vgic_sre; /* Restored only, change ignored */ + u32 vgic_misr; /* Saved only */ + u32 vgic_eisr; /* Saved only */ + u32 vgic_elrsr; /* Saved only */ u32 vgic_ap0r[4]; u32 vgic_ap1r[4]; u64 vgic_lr[VGIC_V3_MAX_LRS]; - - /* - * GICv4 ITS per-VPE data, containing the doorbell IRQ, the - * pending table pointer, the its_vm pointer and a few other - * HW specific things. As for the its_vm structure, this is - * linking the Linux IRQ subsystem and the ITS together. - */ - struct its_vpe its_vpe; - - unsigned int used_lrs; }; struct vgic_cpu { @@ -323,9 +243,10 @@ struct vgic_cpu { struct vgic_v3_cpu_if vgic_v3; }; + unsigned int used_lrs; struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; - raw_spinlock_t ap_list_lock; /* Protects the ap_list */ + spinlock_t ap_list_lock; /* Protects the ap_list */ /* * List of IRQs that this VCPU should consider because they are either @@ -335,52 +256,42 @@ struct vgic_cpu { */ struct list_head ap_list_head; + u64 live_lrs; + /* * Members below are used with GICv3 emulation only and represent * parts of the redistributor. */ struct vgic_io_device rd_iodev; - struct vgic_redist_region *rdreg; - u32 rdreg_index; + struct vgic_io_device sgi_iodev; /* Contains the attributes and gpa of the LPI pending tables. */ u64 pendbaser; bool lpis_enabled; - - /* Cache guest priority bits */ - u32 num_pri_bits; - - /* Cache guest interrupt ID bits */ - u32 num_id_bits; }; extern struct static_key_false vgic_v2_cpuif_trap; -extern struct static_key_false vgic_v3_cpuif_trap; int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); void kvm_vgic_early_init(struct kvm *kvm); -int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); int kvm_vgic_create(struct kvm *kvm, u32 type); void kvm_vgic_destroy(struct kvm *kvm); +void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu); void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); -void kvm_vgic_init_cpu_hardware(void); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, - bool level, void *owner); -int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, - u32 vintid, struct irq_ops *ops); -int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); -bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid); + bool level); +int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid, + bool level); +int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq); +int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq); +bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq); int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); -void kvm_vgic_load(struct kvm_vcpu *vcpu); -void kvm_vgic_put(struct kvm_vcpu *vcpu); -void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu); - #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define vgic_initialized(k) ((k)->arch.vgic.initialized) #define vgic_ready(k) ((k)->arch.vgic.ready) @@ -390,9 +301,8 @@ void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu); bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); -void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid); -void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1); +void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); /** * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW @@ -405,24 +315,12 @@ static inline int kvm_vgic_get_max_vcpus(void) return kvm_vgic_global_state.max_gic_vcpus; } +int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); + /** * kvm_vgic_setup_default_irq_routing: * Setup a default flat gsi routing table mapping all SPIs */ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm); -int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner); - -struct kvm_kernel_irq_routing_entry; - -int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq, - struct kvm_kernel_irq_routing_entry *irq_entry); - -int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq, - struct kvm_kernel_irq_routing_entry *irq_entry); - -int vgic_v4_load(struct kvm_vcpu *vcpu); -void vgic_v4_commit(struct kvm_vcpu *vcpu); -int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db); - #endif /* __KVM_ARM_VGIC_H */ diff --git a/include/kvm/iodev.h b/include/kvm/iodev.h index d75fc43657..a6d208b916 100644 --- a/include/kvm/iodev.h +++ b/include/kvm/iodev.h @@ -1,4 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ #ifndef __KVM_IODEV_H__ #define __KVM_IODEV_H__ diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h index 9c777d2c98..b24ff086a6 100644 --- a/include/linux/8250_pci.h +++ b/include/linux/8250_pci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for PCI support. */ diff --git a/include/linux/a.out.h b/include/linux/a.out.h index 600cf45645..ee88416898 100644 --- a/include/linux/a.out.h +++ b/include/linux/a.out.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __A_OUT_GNU_H__ #define __A_OUT_GNU_H__ diff --git a/include/linux/acct.h b/include/linux/acct.h index bc70e81895..dccc2d4fe7 100644 --- a/include/linux/acct.h +++ b/include/linux/acct.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * BSD Process Accounting for Linux - Definitions * @@ -20,6 +19,9 @@ #ifdef CONFIG_BSD_PROCESS_ACCT +struct vfsmount; +struct super_block; +struct pacct_struct; struct pid_namespace; extern int acct_parm[]; /* for sysctl */ extern void acct_collect(long exitcode, int group_dead); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 974d497a89..61a3d90f32 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * acpi.h - ACPI Interface * * Copyright (C) 2001 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef _LINUX_ACPI_H @@ -10,11 +23,9 @@ #include #include /* for struct resource */ -#include #include #include #include -#include #ifndef _LINUX #define _LINUX @@ -44,29 +55,6 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) -#define ACPI_HANDLE_FWNODE(fwnode) \ - acpi_device_handle(to_acpi_device_node(fwnode)) - -static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) -{ - struct fwnode_handle *fwnode; - - fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL); - if (!fwnode) - return NULL; - - fwnode_init(fwnode, &acpi_static_fwnode_ops); - - return fwnode; -} - -static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode) -{ - if (WARN_ON(!is_acpi_static_node(fwnode))) - return; - - kfree(fwnode); -} /** * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with @@ -89,7 +77,7 @@ static inline bool has_acpi_companion(struct device *dev) static inline void acpi_preset_companion(struct device *dev, struct acpi_device *parent, u64 addr) { - ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false)); + ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL)); } static inline const char *acpi_dev_name(struct acpi_device *adev) @@ -129,15 +117,10 @@ enum acpi_address_range_id { /* Table Handlers */ -union acpi_subtable_headers { - struct acpi_subtable_header common; - struct acpi_hmat_structure hmat; - struct acpi_prmt_module_header prmt; -}; typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); -typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header, +typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header, const unsigned long end); /* Debugger support */ @@ -219,20 +202,24 @@ struct acpi_subtable_proc { int count; }; -void __iomem *__acpi_map_table(unsigned long phys, unsigned long size); -void __acpi_unmap_table(void __iomem *map, unsigned long size); +char * __acpi_map_table (unsigned long phys_addr, unsigned long size); +void __acpi_unmap_table(char *map, unsigned long size); int early_acpi_boot_init(void); int acpi_boot_init (void); -void acpi_boot_table_prepare (void); void acpi_boot_table_init (void); int acpi_mps_check (void); int acpi_numa_init (void); -int acpi_locate_initial_tables (void); -void acpi_reserve_initial_tables (void); -void acpi_table_init_complete (void); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); +int __init acpi_parse_entries(char *id, unsigned long table_size, + acpi_tbl_entry_handler handler, + struct acpi_table_header *table_header, + int entry_id, unsigned int max_entries); +int __init acpi_table_parse_entries(char *id, unsigned long table_size, + int entry_id, + acpi_tbl_entry_handler handler, + unsigned int max_entries); int __init acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, acpi_tbl_entry_handler handler, @@ -249,7 +236,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); /* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); -#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) +#if defined(CONFIG_X86) || defined(CONFIG_IA64) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void @@ -260,12 +247,9 @@ void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); #ifdef CONFIG_ARM64 void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); -void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size); #else static inline void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } -static inline void -acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) { } #endif int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); @@ -286,30 +270,17 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) } /* Validate the processor object's proc_id */ -bool acpi_duplicate_processor_id(int proc_id); -/* Processor _CTS control */ -struct acpi_processor_power; - -#ifdef CONFIG_ACPI_PROCESSOR_CSTATE -bool acpi_processor_claim_cst_control(void); -int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, - struct acpi_processor_power *info); -#else -static inline bool acpi_processor_claim_cst_control(void) { return false; } -static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, - struct acpi_processor_power *info) -{ - return -ENODEV; -} -#endif +bool acpi_processor_validate_proc_id(int proc_id); #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ -int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, - int *pcpu); +int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu); int acpi_unmap_cpu(int cpu); +int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ +void acpi_set_processor_mapping(void); + #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); #endif @@ -338,19 +309,10 @@ int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); void acpi_set_irq_model(enum acpi_irq_model_id model, struct fwnode_handle *fwnode); -struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, - unsigned int size, - struct fwnode_handle *fwnode, - const struct irq_domain_ops *ops, - void *host_data); - #ifdef CONFIG_X86_IO_APIC extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else -static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) -{ - return -1; -} +#define acpi_get_override_irq(gsi, trigger, polarity) (-1) #endif /* * This function undoes the effect of one call to acpi_register_gsi(). @@ -364,14 +326,7 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); bool acpi_isa_irq_available(int irq); -#ifdef CONFIG_PCI void acpi_penalize_sci_irq(int irq, int trigger, int polarity); -#else -static inline void acpi_penalize_sci_irq(int irq, int trigger, - int polarity) -{ -} -#endif void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); @@ -400,7 +355,6 @@ extern acpi_status wmi_install_notify_handler(const char *guid, extern acpi_status wmi_remove_notify_handler(const char *guid); extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out); extern bool wmi_has_guid(const char *guid); -extern char *wmi_get_acpi_device_uid(const char *guid); #endif /* CONFIG_ACPI_WMI */ @@ -424,35 +378,10 @@ extern void acpi_osi_setup(char *str); extern bool acpi_osi_is_win8(void); #ifdef CONFIG_ACPI_NUMA -int acpi_map_pxm_to_node(int pxm); +int acpi_map_pxm_to_online_node(int pxm); int acpi_get_node(acpi_handle handle); - -/** - * pxm_to_online_node - Map proximity ID to online node - * @pxm: ACPI proximity ID - * - * This is similar to pxm_to_node(), but always returns an online - * node. When the mapped node from a given proximity ID is offline, it - * looks up the node distance table and returns the nearest online node. - * - * ACPI device drivers, which are called after the NUMA initialization has - * completed in the kernel, can call this interface to obtain their device - * NUMA topology from ACPI tables. Such drivers do not have to deal with - * offline nodes. A node may be offline when SRAT memory entry does not exist, - * or NUMA is disabled, ex. "numa=off" on x86. - */ -static inline int pxm_to_online_node(int pxm) -{ - int node = pxm_to_node(pxm); - - return numa_map_to_online_node(node); -} #else -static inline int pxm_to_online_node(int pxm) -{ - return 0; -} -static inline int acpi_map_pxm_to_node(int pxm) +static inline int acpi_map_pxm_to_online_node(int pxm) { return 0; } @@ -482,8 +411,6 @@ void acpi_dev_free_resource_list(struct list_head *list); int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, int (*preproc)(struct acpi_resource *, void *), void *preproc_data); -int acpi_dev_get_dma_resources(struct acpi_device *adev, - struct list_head *list); int acpi_dev_filter_resource_type(struct acpi_resource *ares, unsigned long types); @@ -493,16 +420,11 @@ static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares, return acpi_dev_filter_resource_type(ares, (unsigned long)arg); } -struct acpi_device *acpi_resource_consumer(struct resource *res); - int acpi_check_resource_conflict(const struct resource *res); int acpi_check_region(resource_size_t start, resource_size_t n, const char *name); -acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, - u32 level); - int acpi_resources_are_enforced(void); #ifdef CONFIG_HIBERNATION @@ -513,14 +435,8 @@ void __init acpi_no_s4_hw_signature(void); void __init acpi_old_suspend_ordering(void); void __init acpi_nvs_nosave(void); void __init acpi_nvs_nosave_s3(void); -void __init acpi_sleep_no_blacklist(void); #endif /* CONFIG_PM_SLEEP */ -int acpi_register_wakeup_handler( - int wake_irq, bool (*wakeup)(void *context), void *context); -void acpi_unregister_wakeup_handler( - bool (*wakeup)(void *context), void *context); - struct acpi_osc_context { char *uuid_str; /* UUID string */ int rev; @@ -528,6 +444,7 @@ struct acpi_osc_context { struct acpi_buffer ret; /* free by caller if success */ }; +acpi_status acpi_str_to_uuid(char *str, u8 *uuid); acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); /* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */ @@ -552,22 +469,9 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_CPCV2_SUPPORT 0x00000040 #define OSC_SB_PCLPI_SUPPORT 0x00000080 #define OSC_SB_OSLPI_SUPPORT 0x00000100 -#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 -#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000 -#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000 -#define OSC_SB_PRM_SUPPORT 0x00200000 extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; -extern bool osc_sb_native_usb4_support_confirmed; - -/* USB4 Capabilities */ -#define OSC_USB_USB3_TUNNELING 0x00000001 -#define OSC_USB_DP_TUNNELING 0x00000002 -#define OSC_USB_PCIE_TUNNELING 0x00000004 -#define OSC_USB_XDOMAIN 0x00000008 - -extern u32 osc_sb_native_usb4_control; /* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ #define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 @@ -575,9 +479,7 @@ extern u32 osc_sb_native_usb4_control; #define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 #define OSC_PCI_MSI_SUPPORT 0x00000010 -#define OSC_PCI_EDR_SUPPORT 0x00000080 -#define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100 -#define OSC_PCI_SUPPORT_MASKS 0x0000019f +#define OSC_PCI_SUPPORT_MASKS 0x0000001f /* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 @@ -585,9 +487,7 @@ extern u32 osc_sb_native_usb4_control; #define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004 #define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 #define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 -#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020 -#define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080 -#define OSC_PCI_CONTROL_MASKS 0x000000bf +#define OSC_PCI_CONTROL_MASKS 0x0000001f #define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 #define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 @@ -600,6 +500,9 @@ extern u32 osc_sb_native_usb4_control; #define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E #define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F +extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, + u32 *mask, u32 req); + /* Enable _OST when all relevant hotplug operations are enabled */ #if defined(CONFIG_ACPI_HOTPLUG_CPU) && \ defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \ @@ -635,28 +538,8 @@ extern u32 osc_sb_native_usb4_control; #define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81 #define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 -enum acpi_predicate { - all_versions, - less_than_or_equal, - equal, - greater_than_or_equal, -}; - -/* Table must be terminted by a NULL entry */ -struct acpi_platform_list { - char oem_id[ACPI_OEM_ID_SIZE+1]; - char oem_table_id[ACPI_OEM_TABLE_ID_SIZE+1]; - u32 oem_revision; - char *table; - enum acpi_predicate pred; - char *reason; - u32 data; -}; -int acpi_match_platform_list(const struct acpi_platform_list *plat); - extern void acpi_early_init(void); extern void acpi_subsystem_init(void); -extern void arch_post_acpi_subsys_init(void); extern int acpi_nvs_register(__u64 start, __u64 size); @@ -666,11 +549,11 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev); -const void *acpi_device_get_match_data(const struct device *dev); extern bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv); int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); +void acpi_walk_dep_device_list(acpi_handle handle); struct platform_device *acpi_create_platform_device(struct acpi_device *, struct property_entry *); @@ -694,28 +577,6 @@ enum acpi_reconfig_event { int acpi_reconfig_notifier_register(struct notifier_block *nb); int acpi_reconfig_notifier_unregister(struct notifier_block *nb); -#ifdef CONFIG_ACPI_GTDT -int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count); -int acpi_gtdt_map_ppi(int type); -bool acpi_gtdt_c3stop(int type); -int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count); -#endif - -#ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER -static inline void acpi_arch_set_root_pointer(u64 addr) -{ -} -#endif - -#ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER -static inline u64 acpi_arch_get_root_pointer(void) -{ - return 0; -} -#endif - -int acpi_get_local_address(acpi_handle handle, u32 *addr); - #else /* !CONFIG_ACPI */ #define acpi_disabled 1 @@ -723,11 +584,8 @@ int acpi_get_local_address(acpi_handle handle, u32 *addr); #define ACPI_COMPANION(dev) (NULL) #define ACPI_COMPANION_SET(dev, adev) do { } while (0) #define ACPI_HANDLE(dev) (NULL) -#define ACPI_HANDLE_FWNODE(fwnode) (NULL) #define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), -#include - struct fwnode_handle; static inline bool acpi_dev_found(const char *hid) @@ -735,58 +593,32 @@ static inline bool acpi_dev_found(const char *hid) return false; } -static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) +static inline bool is_acpi_node(struct fwnode_handle *fwnode) { return false; } -struct acpi_device; - -static inline bool -acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2) +static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) { return false; } -static inline struct acpi_device * -acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv) +static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) { return NULL; } -static inline bool acpi_reduced_hardware(void) +static inline bool is_acpi_data_node(struct fwnode_handle *fwnode) { return false; } -static inline void acpi_dev_put(struct acpi_device *adev) {} - -static inline bool is_acpi_node(const struct fwnode_handle *fwnode) -{ - return false; -} - -static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode) -{ - return false; -} - -static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode) +static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) { return NULL; } -static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode) -{ - return false; -} - -static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode) -{ - return NULL; -} - -static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode, +static inline bool acpi_data_node_match(struct fwnode_handle *fwnode, const char *name) { return false; @@ -829,12 +661,9 @@ static inline int acpi_boot_init(void) return 0; } -static inline void acpi_boot_table_prepare(void) -{ -} - static inline void acpi_boot_table_init(void) { + return; } static inline int acpi_mps_check(void) @@ -879,11 +708,6 @@ static inline const struct acpi_device_id *acpi_match_device( return NULL; } -static inline const void *acpi_device_get_match_data(const struct device *dev) -{ - return NULL; -} - static inline bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv) { @@ -891,8 +715,8 @@ static inline bool acpi_driver_match_device(struct device *dev, } static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle, - const guid_t *guid, - u64 rev, u64 func, + const u8 *uuid, + int rev, int func, union acpi_object *argv4) { return NULL; @@ -910,14 +734,7 @@ static inline int acpi_device_modalias(struct device *dev, return -ENODEV; } -static inline struct platform_device * -acpi_create_platform_device(struct acpi_device *adev, - struct property_entry *properties) -{ - return NULL; -} - -static inline bool acpi_dma_supported(const struct acpi_device *adev) +static inline bool acpi_dma_supported(struct acpi_device *adev) { return false; } @@ -927,25 +744,6 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) return DEV_DMA_NOT_SUPPORTED; } -static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr, - u64 *offset, u64 *size) -{ - return -ENODEV; -} - -static inline int acpi_dma_configure(struct device *dev, - enum dev_dma_attr attr) -{ - return 0; -} - -static inline int acpi_dma_configure_id(struct device *dev, - enum dev_dma_attr attr, - const u32 *input_id) -{ - return 0; -} - #define ACPI_PTR(_ptr) (NULL) static inline void acpi_device_set_enumerated(struct acpi_device *adev) @@ -966,16 +764,6 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) return -EINVAL; } -static inline struct acpi_device *acpi_resource_consumer(struct resource *res) -{ - return NULL; -} - -static inline int acpi_get_local_address(acpi_handle handle, u32 *addr) -{ - return -ENODEV; -} - #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC @@ -997,7 +785,7 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); -#ifndef CONFIG_IA64 +#ifdef CONFIG_X86 void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else static inline void arch_reserve_mem_area(acpi_physical_address addr, @@ -1010,64 +798,62 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr, #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM) -int acpi_dev_suspend(struct device *dev, bool wakeup); -int acpi_dev_resume(struct device *dev); +int acpi_dev_runtime_suspend(struct device *dev); +int acpi_dev_runtime_resume(struct device *dev); int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); +struct acpi_device *acpi_dev_pm_get_node(struct device *dev); int acpi_dev_pm_attach(struct device *dev, bool power_on); -bool acpi_storage_d3(struct device *dev); #else +static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } +static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } +static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) +{ + return NULL; +} static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) { - return 0; -} -static inline bool acpi_storage_d3(struct device *dev) -{ - return false; + return -ENODEV; } #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) +int acpi_dev_suspend_late(struct device *dev); +int acpi_dev_resume_early(struct device *dev); int acpi_subsys_prepare(struct device *dev); void acpi_subsys_complete(struct device *dev); int acpi_subsys_suspend_late(struct device *dev); -int acpi_subsys_suspend_noirq(struct device *dev); +int acpi_subsys_resume_early(struct device *dev); int acpi_subsys_suspend(struct device *dev); int acpi_subsys_freeze(struct device *dev); -int acpi_subsys_poweroff(struct device *dev); -void acpi_ec_mark_gpe_for_wake(void); -void acpi_ec_set_gpe_wake_mask(u8 action); #else +static inline int acpi_dev_suspend_late(struct device *dev) { return 0; } +static inline int acpi_dev_resume_early(struct device *dev) { return 0; } static inline int acpi_subsys_prepare(struct device *dev) { return 0; } static inline void acpi_subsys_complete(struct device *dev) {} static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } -static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; } +static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } -static inline int acpi_subsys_poweroff(struct device *dev) { return 0; } -static inline void acpi_ec_mark_gpe_for_wake(void) {} -static inline void acpi_ec_set_gpe_wake_mask(u8 action) {} #endif #ifdef CONFIG_ACPI __printf(3, 4) void acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...); -void acpi_evaluation_failure_warn(acpi_handle handle, const char *name, - acpi_status status); #else /* !CONFIG_ACPI */ static inline __printf(3, 4) void acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} -static inline void acpi_evaluation_failure_warn(acpi_handle handle, - const char *name, - acpi_status status) {} #endif /* !CONFIG_ACPI */ #if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) __printf(3, 4) void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...); +#else +#define __acpi_handle_debug(descriptor, handle, fmt, ...) \ + acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); #endif /* @@ -1097,8 +883,12 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c #else #if defined(CONFIG_DYNAMIC_DEBUG) #define acpi_handle_debug(handle, fmt, ...) \ - _dynamic_func_call(fmt, __acpi_handle_debug, \ - handle, pr_fmt(fmt), ##__VA_ARGS__) +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ + __acpi_handle_debug(&descriptor, handle, pr_fmt(fmt), \ + ##__VA_ARGS__); \ +} while (0) #else #define acpi_handle_debug(handle, fmt, ...) \ ({ \ @@ -1109,68 +899,78 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c #endif #endif +struct acpi_gpio_params { + unsigned int crs_entry_index; + unsigned int line_index; + bool active_low; +}; + +struct acpi_gpio_mapping { + const char *name; + const struct acpi_gpio_params *data; + unsigned int size; +}; + #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) -bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, - struct acpi_resource_gpio **agpio); -bool acpi_gpio_get_io_resource(struct acpi_resource *ares, - struct acpi_resource_gpio **agpio); -int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index); +int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios); + +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) +{ + if (adev) + adev->driver_gpios = NULL; +} + +int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index); #else -static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, - struct acpi_resource_gpio **agpio) +static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios) { - return false; + return -ENXIO; } -static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares, - struct acpi_resource_gpio **agpio) -{ - return false; -} -static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, - const char *name, int index) +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} + +static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) { return -ENXIO; } #endif -static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) -{ - return acpi_dev_gpio_irq_get_by(adev, NULL, index); -} - /* Device properties */ -#ifdef CONFIG_ACPI -int acpi_dev_get_property(const struct acpi_device *adev, const char *name, - acpi_object_type type, const union acpi_object **obj); -int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, - const char *name, size_t index, size_t num_args, - struct fwnode_reference_args *args); +#define MAX_ACPI_REFERENCE_ARGS 8 +struct acpi_reference_args { + struct acpi_device *adev; + size_t nargs; + u64 args[MAX_ACPI_REFERENCE_ARGS]; +}; -static inline int acpi_node_get_property_reference( - const struct fwnode_handle *fwnode, +#ifdef CONFIG_ACPI +int acpi_dev_get_property(struct acpi_device *adev, const char *name, + acpi_object_type type, const union acpi_object **obj); +int __acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, size_t num_args, + struct acpi_reference_args *args); + +static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, const char *name, size_t index, - struct fwnode_reference_args *args) + struct acpi_reference_args *args) { return __acpi_node_get_property_reference(fwnode, name, index, - NR_FWNODE_REFERENCE_ARGS, args); + MAX_ACPI_REFERENCE_ARGS, args); } -static inline bool acpi_dev_has_props(const struct acpi_device *adev) -{ - return !list_empty(&adev->data.properties); -} - -struct acpi_device_properties * -acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid, - const union acpi_object *properties); - -int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, +int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, void **valptr); +int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, + enum dev_prop_type proptype, void *val); +int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname, + enum dev_prop_type proptype, void *val, size_t nval); +int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, + enum dev_prop_type proptype, void *val, size_t nval); -struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, - struct fwnode_handle *child); -struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode); +struct fwnode_handle *acpi_get_next_subnode(struct device *dev, + struct fwnode_handle *subnode); struct acpi_probe_entry; typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, @@ -1202,27 +1002,16 @@ struct acpi_probe_entry { kernel_ulong_t driver_data; }; -#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \ - valid, data, fn) \ +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ static const struct acpi_probe_entry __acpi_probe_##name \ - __used __section("__" #table "_acpi_probe_table") = { \ + __used __section(__##table##_acpi_probe_table) \ + = { \ .id = table_id, \ .type = subtable, \ .subtable_valid = valid, \ - .probe_table = fn, \ - .driver_data = data, \ - } - -#define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \ - subtable, valid, data, fn) \ - static const struct acpi_probe_entry __acpi_probe_##name \ - __used __section("__" #table "_acpi_probe_table") = { \ - .id = table_id, \ - .type = subtable, \ - .subtable_valid = valid, \ - .probe_subtbl = fn, \ - .driver_data = data, \ - } + .probe_table = (acpi_tbl_table_handler)fn, \ + .driver_data = data, \ + } #define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table #define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end @@ -1246,57 +1035,64 @@ static inline int acpi_dev_get_property(struct acpi_device *adev, } static inline int -__acpi_node_get_property_reference(const struct fwnode_handle *fwnode, +__acpi_node_get_property_reference(struct fwnode_handle *fwnode, const char *name, size_t index, size_t num_args, - struct fwnode_reference_args *args) + struct acpi_reference_args *args) { return -ENXIO; } -static inline int -acpi_node_get_property_reference(const struct fwnode_handle *fwnode, - const char *name, size_t index, - struct fwnode_reference_args *args) +static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, + struct acpi_reference_args *args) { return -ENXIO; } -static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode, +static inline int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, void **valptr) { return -ENXIO; } -static inline struct fwnode_handle * -acpi_get_next_subnode(const struct fwnode_handle *fwnode, - struct fwnode_handle *child) -{ - return NULL; -} - -static inline struct fwnode_handle * -acpi_node_get_parent(const struct fwnode_handle *fwnode) -{ - return NULL; -} - -static inline struct fwnode_handle * -acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode, - struct fwnode_handle *prev) -{ - return ERR_PTR(-ENXIO); -} - -static inline int -acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode, - struct fwnode_handle **remote, - struct fwnode_handle **port, - struct fwnode_handle **endpoint) +static inline int acpi_dev_prop_get(struct acpi_device *adev, + const char *propname, + void **valptr) { return -ENXIO; } +static inline int acpi_dev_prop_read_single(struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val) +{ + return -ENXIO; +} + +static inline int acpi_node_prop_read(struct fwnode_handle *fwnode, + const char *propname, + enum dev_prop_type proptype, + void *val, size_t nval) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_read(struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val, size_t nval) +{ + return -ENXIO; +} + +static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, + struct fwnode_handle *subnode) +{ + return NULL; +} + #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ static const void * __acpi_table_##name[] \ __attribute__((unused)) \ @@ -1322,69 +1118,9 @@ static inline bool acpi_has_watchdog(void) { return false; } #endif #ifdef CONFIG_ACPI_SPCR_TABLE -extern bool qdf2400_e44_present; -int acpi_parse_spcr(bool enable_earlycon, bool enable_console); +int parse_spcr(bool earlycon); #else -static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console) -{ - return 0; -} -#endif - -#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI) -int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res); -#else -static inline -int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res) -{ - return -EINVAL; -} -#endif - -#ifdef CONFIG_ACPI_LPIT -int lpit_read_residency_count_address(u64 *address); -#else -static inline int lpit_read_residency_count_address(u64 *address) -{ - return -EINVAL; -} -#endif - -#ifdef CONFIG_ACPI_PPTT -int acpi_pptt_cpu_is_thread(unsigned int cpu); -int find_acpi_cpu_topology(unsigned int cpu, int level); -int find_acpi_cpu_topology_package(unsigned int cpu); -int find_acpi_cpu_topology_hetero_id(unsigned int cpu); -int find_acpi_cpu_cache_topology(unsigned int cpu, int level); -#else -static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) -{ - return -EINVAL; -} -static inline int find_acpi_cpu_topology(unsigned int cpu, int level) -{ - return -EINVAL; -} -static inline int find_acpi_cpu_topology_package(unsigned int cpu) -{ - return -EINVAL; -} -static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) -{ - return -EINVAL; -} -static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) -{ - return -EINVAL; -} -#endif - -#ifdef CONFIG_ACPI -extern void acpi_device_notify(struct device *dev); -extern void acpi_device_notify_remove(struct device *dev); -#else -static inline void acpi_device_notify(struct device *dev) { } -static inline void acpi_device_notify_remove(struct device *dev) { } +static inline int parse_spcr(bool earlycon) { return 0; } #endif #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h index 72cedb916a..329436d38e 100644 --- a/include/linux/acpi_dma.h +++ b/include/linux/acpi_dma.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * ACPI helpers for DMA request / controller * @@ -6,6 +5,10 @@ * * Copyright (C) 2013, Intel Corporation * Author: Andy Shevchenko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_ACPI_DMA_H diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index f1f0842a2c..0e32dac8fd 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016, Semihalf * Author: Tomasz Nowicki + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. */ #ifndef __ACPI_IORT_H__ @@ -11,52 +23,20 @@ #include #include -#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) -#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) - -/* - * PMCG model identifiers for use in smmu pmu driver. Please note - * that this is purely for the use of software and has nothing to - * do with hardware or with IORT specification. - */ -#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */ -#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */ - -int iort_register_domain_token(int trans_id, phys_addr_t base, - struct fwnode_handle *fw_node); +int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node); void iort_deregister_domain_token(int trans_id); struct fwnode_handle *iort_find_domain_token(int trans_id); #ifdef CONFIG_ACPI_IORT void acpi_iort_init(void); -u32 iort_msi_map_id(struct device *dev, u32 id); -struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, - enum irq_domain_bus_token bus_token); -void acpi_configure_pmsi_domain(struct device *dev); -int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); -/* IOMMU interface */ -int iort_dma_get_ranges(struct device *dev, u64 *size); -int iort_iommu_configure_id(struct device *dev, const u32 *id_in); -int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); -phys_addr_t acpi_iort_dma_get_max_cpu_address(void); +u32 iort_msi_map_rid(struct device *dev, u32 req_id); +struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); #else static inline void acpi_iort_init(void) { } -static inline u32 iort_msi_map_id(struct device *dev, u32 id) -{ return id; } -static inline struct irq_domain *iort_get_device_domain( - struct device *dev, u32 id, enum irq_domain_bus_token bus_token) +static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) +{ return req_id; } +static inline struct irq_domain *iort_get_device_domain(struct device *dev, + u32 req_id) { return NULL; } -static inline void acpi_configure_pmsi_domain(struct device *dev) { } -/* IOMMU interface */ -static inline int iort_dma_get_ranges(struct device *dev, u64 *size) -{ return -ENODEV; } -static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in) -{ return -ENODEV; } -static inline -int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) -{ return 0; } - -static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void) -{ return PHYS_ADDR_MAX; } #endif #endif /* __ACPI_IORT_H__ */ diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h index 50d88bf149..1d0ef1ae80 100644 --- a/include/linux/acpi_pmtmr.h +++ b/include/linux/acpi_pmtmr.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ACPI_PMTMR_H_ #define _ACPI_PMTMR_H_ diff --git a/include/linux/adb.h b/include/linux/adb.h index f6306fc860..cde41300c7 100644 --- a/include/linux/adb.h +++ b/include/linux/adb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for ADB (Apple Desktop Bus) support. */ diff --git a/include/linux/adfs_fs.h b/include/linux/adfs_fs.h index 4836e382ad..0d991071a9 100644 --- a/include/linux/adfs_fs.h +++ b/include/linux/adfs_fs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ADFS_FS_H #define _ADFS_FS_H diff --git a/include/linux/aer.h b/include/linux/aer.h index 97f64ba1b3..04602cbe85 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2006 Intel Corp. * Tom Long Nguyen (tom.l.nguyen@intel.com) @@ -14,7 +13,6 @@ #define AER_NONFATAL 0 #define AER_FATAL 1 #define AER_CORRECTABLE 2 -#define DPC_FATAL 3 struct pci_dev; @@ -41,12 +39,11 @@ struct aer_capability_regs { }; #if defined(CONFIG_PCIEAER) -/* PCIe port driver needs this function to enable AER */ +/* pci-e port driver needs this function to enable aer */ int pci_enable_pcie_error_reporting(struct pci_dev *dev); int pci_disable_pcie_error_reporting(struct pci_dev *dev); -int pci_aer_clear_nonfatal_status(struct pci_dev *dev); -void pci_save_aer_state(struct pci_dev *dev); -void pci_restore_aer_state(struct pci_dev *dev); +int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); +int pci_cleanup_aer_error_status_regs(struct pci_dev *dev); #else static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) { @@ -56,18 +53,21 @@ static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev) { return -EINVAL; } -static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev) +static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) +{ + return -EINVAL; +} +static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) { return -EINVAL; } -static inline void pci_save_aer_state(struct pci_dev *dev) {} -static inline void pci_restore_aer_state(struct pci_dev *dev) {} #endif void cper_print_aer(struct pci_dev *dev, int aer_severity, struct aer_capability_regs *aer); int cper_severity_to_aer(int cper_severity); void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, - int severity, struct aer_capability_regs *aer_regs); + int severity, + struct aer_capability_regs *aer_regs); #endif //_AER_H_ diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h index 21b34a96cf..c6b61ca970 100644 --- a/include/linux/agpgart.h +++ b/include/linux/agpgart.h @@ -30,6 +30,8 @@ #include #include +#define AGPGART_MINOR 175 + struct agp_info { struct agp_version version; /* version of the driver */ u32 bridge_id; /* bridge vendor/device */ diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h index 49e5383d42..a270f25ee7 100644 --- a/include/linux/ahci_platform.h +++ b/include/linux/ahci_platform.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * AHCI SATA platform driver * @@ -6,6 +5,11 @@ * Jeff Garzik * Copyright 2010 MontaVista Software, LLC. * Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. */ #ifndef _AHCI_PLATFORM_H @@ -19,8 +23,6 @@ struct ahci_host_priv; struct platform_device; struct scsi_host_template; -int ahci_platform_enable_phys(struct ahci_host_priv *hpriv); -void ahci_platform_disable_phys(struct ahci_host_priv *hpriv); int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv); @@ -28,19 +30,15 @@ void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv); int ahci_platform_enable_resources(struct ahci_host_priv *hpriv); void ahci_platform_disable_resources(struct ahci_host_priv *hpriv); struct ahci_host_priv *ahci_platform_get_resources( - struct platform_device *pdev, unsigned int flags); + struct platform_device *pdev); int ahci_platform_init_host(struct platform_device *pdev, struct ahci_host_priv *hpriv, const struct ata_port_info *pi_template, struct scsi_host_template *sht); -void ahci_platform_shutdown(struct platform_device *pdev); - int ahci_platform_suspend_host(struct device *dev); int ahci_platform_resume_host(struct device *dev); int ahci_platform_suspend(struct device *dev); int ahci_platform_resume(struct device *dev); -#define AHCI_PLATFORM_GET_RESETS 0x01 - #endif /* _AHCI_PLATFORM_H */ diff --git a/include/linux/aio.h b/include/linux/aio.h index b83e68dd00..9eb42dbc55 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX__AIO_H #define __LINUX__AIO_H @@ -8,14 +7,21 @@ struct kioctx; struct kiocb; struct mm_struct; +#define KIOCB_KEY 0 + typedef int (kiocb_cancel_fn)(struct kiocb *); /* prototypes */ #ifdef CONFIG_AIO extern void exit_aio(struct mm_struct *mm); +extern long do_io_submit(aio_context_t ctx_id, long nr, + struct iocb __user *__user *iocbpp, bool compat); void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); #else static inline void exit_aio(struct mm_struct *mm) { } +static inline long do_io_submit(aio_context_t ctx_id, long nr, + struct iocb __user * __user *iocbpp, + bool compat) { return 0; } static inline void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) { } #endif /* CONFIG_AIO */ diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 05e758b8b8..9d8031257a 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -1,23 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ALARMTIMER_H #define _LINUX_ALARMTIMER_H #include #include #include - -struct rtc_device; +#include enum alarmtimer_type { ALARM_REALTIME, ALARM_BOOTTIME, - /* Supported types end here */ ALARM_NUMTYPE, - - /* Used for tracing information. No usable types. */ - ALARM_REALTIME_FREEZER, - ALARM_BOOTTIME_FREEZER, }; enum alarmtimer_restart { @@ -60,11 +53,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval); u64 alarm_forward_now(struct alarm *alarm, ktime_t interval); ktime_t alarm_expires_remaining(const struct alarm *alarm); -#ifdef CONFIG_RTC_CLASS /* Provide way to access the rtc device being used by alarmtimers */ struct rtc_device *alarmtimer_get_rtcdev(void); -#else -static inline struct rtc_device *alarmtimer_get_rtcdev(void) { return NULL; } -#endif #endif diff --git a/include/linux/altera_jtaguart.h b/include/linux/altera_jtaguart.h index 527a142cd5..953b178a16 100644 --- a/include/linux/altera_jtaguart.h +++ b/include/linux/altera_jtaguart.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * altera_jtaguart.h -- Altera JTAG UART driver defines. */ diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h index 3eb73b8c49..c022c82db7 100644 --- a/include/linux/altera_uart.h +++ b/include/linux/altera_uart.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * altera_uart.h -- Altera UART driver defines. */ diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index c68d87b872..d143c13bed 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/amba/bus.h * @@ -7,6 +6,10 @@ * region or that is derived from a PrimeCell. * * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef ASMARM_AMBA_H #define ASMARM_AMBA_H @@ -22,53 +25,13 @@ #define AMBA_CID 0xb105f00d #define CORESIGHT_CID 0xb105900d -/* - * CoreSight Architecture specification updates the ID specification - * for components on the AMBA bus. (ARM IHI 0029E) - * - * Bits 15:12 of the CID are the device class. - * - * Class 0xF remains for PrimeCell and legacy components. (AMBA_CID above) - * Class 0x9 defines the component as CoreSight (CORESIGHT_CID above) - * Class 0x0, 0x1, 0xB, 0xE define components that do not have driver support - * at present. - * Class 0x2-0x8,0xA and 0xD-0xD are presently reserved. - * - * Remaining CID bits stay as 0xb105-00d - */ - -/** - * Class 0x9 components use additional values to form a Unique Component - * Identifier (UCI), where peripheral ID values are identical for different - * components. Passed to the amba bus code from the component driver via - * the amba_id->data pointer. - * @devarch : coresight devarch register value - * @devarch_mask: mask bits used for matching. 0 indicates UCI not used. - * @devtype : coresight device type value - * @data : additional driver data. As we have usurped the original - * pointer some devices may still need additional data - */ -struct amba_cs_uci_id { - unsigned int devarch; - unsigned int devarch_mask; - unsigned int devtype; - void *data; -}; - -/* define offsets for registers used by UCI */ -#define UCI_REG_DEVTYPE_OFFSET 0xFCC -#define UCI_REG_DEVARCH_OFFSET 0xFBC - struct clk; struct amba_device { struct device dev; struct resource res; struct clk *pclk; - struct device_dma_parameters dma_parms; unsigned int periphid; - unsigned int cid; - struct amba_cs_uci_id uci; unsigned int irq[AMBA_NR_IRQS]; char *driver_override; }; @@ -76,7 +39,7 @@ struct amba_device { struct amba_driver { struct device_driver drv; int (*probe)(struct amba_device *, const struct amba_id *); - void (*remove)(struct amba_device *); + int (*remove)(struct amba_device *); void (*shutdown)(struct amba_device *); const struct amba_id *id_table; }; @@ -105,19 +68,8 @@ extern struct bus_type amba_bustype; #define amba_get_drvdata(d) dev_get_drvdata(&d->dev) #define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p) -#ifdef CONFIG_ARM_AMBA int amba_driver_register(struct amba_driver *); void amba_driver_unregister(struct amba_driver *); -#else -static inline int amba_driver_register(struct amba_driver *drv) -{ - return -EINVAL; -} -static inline void amba_driver_unregister(struct amba_driver *drv) -{ -} -#endif - struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t); void amba_device_put(struct amba_device *); int amba_device_add(struct amba_device *, struct resource *); diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h index b6e0cbeaf5..1035879b32 100644 --- a/include/linux/amba/clcd.h +++ b/include/linux/amba/clcd.h @@ -10,7 +10,73 @@ * for more details. */ #include -#include + +/* + * CLCD Controller Internal Register addresses + */ +#define CLCD_TIM0 0x00000000 +#define CLCD_TIM1 0x00000004 +#define CLCD_TIM2 0x00000008 +#define CLCD_TIM3 0x0000000c +#define CLCD_UBAS 0x00000010 +#define CLCD_LBAS 0x00000014 + +#define CLCD_PL110_IENB 0x00000018 +#define CLCD_PL110_CNTL 0x0000001c +#define CLCD_PL110_STAT 0x00000020 +#define CLCD_PL110_INTR 0x00000024 +#define CLCD_PL110_UCUR 0x00000028 +#define CLCD_PL110_LCUR 0x0000002C + +#define CLCD_PL111_CNTL 0x00000018 +#define CLCD_PL111_IENB 0x0000001c +#define CLCD_PL111_RIS 0x00000020 +#define CLCD_PL111_MIS 0x00000024 +#define CLCD_PL111_ICR 0x00000028 +#define CLCD_PL111_UCUR 0x0000002c +#define CLCD_PL111_LCUR 0x00000030 + +#define CLCD_PALL 0x00000200 +#define CLCD_PALETTE 0x00000200 + +#define TIM2_CLKSEL (1 << 5) +#define TIM2_IVS (1 << 11) +#define TIM2_IHS (1 << 12) +#define TIM2_IPC (1 << 13) +#define TIM2_IOE (1 << 14) +#define TIM2_BCD (1 << 26) + +#define CNTL_LCDEN (1 << 0) +#define CNTL_LCDBPP1 (0 << 1) +#define CNTL_LCDBPP2 (1 << 1) +#define CNTL_LCDBPP4 (2 << 1) +#define CNTL_LCDBPP8 (3 << 1) +#define CNTL_LCDBPP16 (4 << 1) +#define CNTL_LCDBPP16_565 (6 << 1) +#define CNTL_LCDBPP16_444 (7 << 1) +#define CNTL_LCDBPP24 (5 << 1) +#define CNTL_LCDBW (1 << 4) +#define CNTL_LCDTFT (1 << 5) +#define CNTL_LCDMONO8 (1 << 6) +#define CNTL_LCDDUAL (1 << 7) +#define CNTL_BGR (1 << 8) +#define CNTL_BEBO (1 << 9) +#define CNTL_BEPO (1 << 10) +#define CNTL_LCDPWR (1 << 11) +#define CNTL_LCDVCOMP(x) ((x) << 12) +#define CNTL_LDMAFIFOTIME (1 << 15) +#define CNTL_WATERMARK (1 << 16) + +/* ST Microelectronics variant bits */ +#define CNTL_ST_1XBPP_444 0x0 +#define CNTL_ST_1XBPP_5551 (1 << 17) +#define CNTL_ST_1XBPP_565 (1 << 18) +#define CNTL_ST_CDWID_12 0x0 +#define CNTL_ST_CDWID_16 (1 << 19) +#define CNTL_ST_CDWID_18 (1 << 20) +#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20)) +#define CNTL_ST_CEAEN (1 << 21) +#define CNTL_ST_LCDBPP24_PACKED (6 << 1) enum { /* individual formats */ @@ -124,11 +190,38 @@ struct clcd_board { struct amba_device; struct clk; +/** + * struct clcd_vendor_data - holds hardware (IP-block) vendor-specific + * variant information + * + * @clock_timregs: the CLCD needs to be clocked when accessing the + * timer registers, or the hardware will hang. + * @packed_24_bit_pixels: this variant supports 24bit packed pixel data, + * so that RGB accesses 3 bytes at a time, not just on even 32bit + * boundaries, packing the pixel data in memory. ST Microelectronics + * have this. + * @st_bitmux_control: ST Microelectronics have implemented output + * bit line multiplexing into the CLCD control register. This indicates + * that we need to use this. + * @init_board: custom board init function for this variant + * @init_panel: custom panel init function for this variant + */ +struct clcd_vendor_data { + bool clock_timregs; + bool packed_24_bit_pixels; + bool st_bitmux_control; + int (*init_board)(struct amba_device *adev, + struct clcd_board *board); + int (*init_panel)(struct clcd_fb *fb, + struct device_node *panel); +}; + /* this data structure describes each frame buffer device we find */ struct clcd_fb { struct fb_info fb; struct amba_device *dev; struct clk *clk; + struct clcd_vendor_data *vendor; struct clcd_panel *panel; struct clcd_board *board; void *board_data; @@ -230,6 +323,10 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs) else val |= CNTL_LCDBPP16_444; break; + case 24: + /* Modified variant supporting 24 bit packed pixels */ + val |= CNTL_ST_LCDBPP24_PACKED; + break; case 32: val |= CNTL_LCDBPP24; break; diff --git a/include/linux/amba/kmi.h b/include/linux/amba/kmi.h index 94dd727f1a..a39e5be751 100644 --- a/include/linux/amba/kmi.h +++ b/include/linux/amba/kmi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/include/asm-arm/hardware/amba_kmi.h * @@ -6,6 +5,21 @@ * * Copyright (C) 2000 Deep Blue Solutions Ltd. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * * --------------------------------------------------------------------------- * From ARM PrimeCell(tm) PS2 Keyboard/Mouse Interface (PL050) Technical * Reference Manual - ARM DDI 0143B - see http://www.arm.com/ diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index c92ebc39fc..8c98113069 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/amba/mmci.h */ @@ -18,13 +17,20 @@ * mask into a value to be binary (or set some other custom bits * in MMCIPWR) or:ed and written into the MMCIPWR register of the * block. May also control external power based on the power_mode. - * @status: if no GPIO line was given to the block in this function will - * be called to determine whether a card is present in the MMC slot or not + * @status: if no GPIO read function was given to the block in + * gpio_wp (below) this function will be called to determine + * whether a card is present in the MMC slot or not + * @gpio_wp: read this GPIO pin to see if the card is write protected + * @gpio_cd: read this GPIO pin to detect card insertion + * @cd_invert: true if the gpio_cd pin value is active low */ struct mmci_platform_data { unsigned int ocr_mask; int (*ios_handler)(struct device *, struct mmc_ios *); unsigned int (*status)(struct device *); + int gpio_wp; + int gpio_cd; + bool cd_invert; }; #endif diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h index 9bf58aac0d..854b7294f6 100644 --- a/include/linux/amba/pl022.h +++ b/include/linux/amba/pl022.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/amba/pl022.h * @@ -11,6 +10,16 @@ * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c * Initial adoption to PL022 by: * Sachin Verma + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _SSP_PL022_H @@ -223,6 +232,10 @@ struct dma_chan; /** * struct pl022_ssp_master - device.platform_data for SPI controller devices. * @bus_id: identifier for this bus + * @num_chipselect: chipselects are used to distinguish individual + * SPI slaves, and are numbered from zero to num_chipselects - 1. + * each slave has a chipselect signal, but it's common that not + * every chipselect is connected to a slave. * @enable_dma: if true enables DMA driven transfers. * @dma_rx_param: parameter to locate an RX DMA channel. * @dma_tx_param: parameter to locate a TX DMA channel. @@ -231,15 +244,18 @@ struct dma_chan; * indicates no delay and the device will be suspended immediately. * @rt: indicates the controller should run the message pump with realtime * priority to minimise the transfer latency on the bus. + * @chipselects: list of chip select gpios */ struct pl022_ssp_controller { u16 bus_id; + u8 num_chipselect; u8 enable_dma:1; bool (*dma_filter)(struct dma_chan *chan, void *filter_param); void *dma_rx_param; void *dma_tx_param; int autosuspend_delay; bool rt; + int *chipselects; }; /** @@ -258,6 +274,8 @@ struct pl022_ssp_controller { * @duplex: Microwire interface: Full/Half duplex * @clkdelay: on the PL023 variant, the delay in feeback clock cycles * before sampling the incoming line + * @cs_control: function pointer to board-specific function to + * assert/deassert I/O port to control HW generation of devices chip-select. */ struct pl022_config_chip { enum ssp_interface iface; @@ -271,6 +289,7 @@ struct pl022_config_chip { enum ssp_microwire_wait_state wait_state; enum ssp_duplex duplex; enum ssp_clkdelay clkdelay; + void (*cs_control) (u32 control); }; #endif /* _SSP_PL022_H */ diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h new file mode 100644 index 0000000000..fb83c04534 --- /dev/null +++ b/include/linux/amba/pl061.h @@ -0,0 +1,16 @@ +#include + +/* platform data for the PL061 GPIO driver */ + +struct pl061_platform_data { + /* number of the first GPIO */ + unsigned gpio_base; + + /* number of the first IRQ. + * If the IRQ functionality in not desired this must be set to 0. + */ + unsigned irq_base; + + u8 directions; /* startup directions, 1: out, 0: in */ + u8 values; /* startup values */ +}; diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h index e192d54663..91b84a7f05 100644 --- a/include/linux/amba/pl080.h +++ b/include/linux/amba/pl080.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* include/linux/amba/pl080.h * * Copyright 2008 Openmoko, Inc. @@ -7,6 +6,10 @@ * Ben Dooks * * ARM PrimeCell PL080 DMA controller + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ /* Note, there are some Samsung updates to this controller block which @@ -35,23 +38,24 @@ #define PL080_SOFT_LSREQ (0x2C) #define PL080_CONFIG (0x30) -#define PL080_CONFIG_M2_BE BIT(2) -#define PL080_CONFIG_M1_BE BIT(1) -#define PL080_CONFIG_ENABLE BIT(0) +#define PL080_CONFIG_M2_BE (1 << 2) +#define PL080_CONFIG_M1_BE (1 << 1) +#define PL080_CONFIG_ENABLE (1 << 0) #define PL080_SYNC (0x34) -/* The Faraday Technology FTDMAC020 variant registers */ -#define FTDMAC020_CH_BUSY (0x20) -/* Identical to PL080_CONFIG */ -#define FTDMAC020_CSR (0x24) -/* Identical to PL080_SYNC */ -#define FTDMAC020_SYNC (0x2C) -#define FTDMAC020_REVISION (0x30) -#define FTDMAC020_FEATURE (0x34) - /* Per channel configuration registers */ + +#define PL080_Cx_STRIDE (0x20) #define PL080_Cx_BASE(x) ((0x100 + (x * 0x20))) +#define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20))) +#define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20))) +#define PL080_Cx_LLI(x) ((0x108 + (x * 0x20))) +#define PL080_Cx_CONTROL(x) ((0x10C + (x * 0x20))) +#define PL080_Cx_CONFIG(x) ((0x110 + (x * 0x20))) +#define PL080S_Cx_CONTROL2(x) ((0x110 + (x * 0x20))) +#define PL080S_Cx_CONFIG(x) ((0x114 + (x * 0x20))) + #define PL080_CH_SRC_ADDR (0x00) #define PL080_CH_DST_ADDR (0x04) #define PL080_CH_LLI (0x08) @@ -59,38 +63,31 @@ #define PL080_CH_CONFIG (0x10) #define PL080S_CH_CONTROL2 (0x10) #define PL080S_CH_CONFIG (0x14) -/* The Faraday FTDMAC020 derivative shuffles the registers around */ -#define FTDMAC020_CH_CSR (0x00) -#define FTDMAC020_CH_CFG (0x04) -#define FTDMAC020_CH_SRC_ADDR (0x08) -#define FTDMAC020_CH_DST_ADDR (0x0C) -#define FTDMAC020_CH_LLP (0x10) -#define FTDMAC020_CH_SIZE (0x14) -#define PL080_LLI_ADDR_MASK GENMASK(31, 2) +#define PL080_LLI_ADDR_MASK (0x3fffffff << 2) #define PL080_LLI_ADDR_SHIFT (2) -#define PL080_LLI_LM_AHB2 BIT(0) +#define PL080_LLI_LM_AHB2 (1 << 0) -#define PL080_CONTROL_TC_IRQ_EN BIT(31) -#define PL080_CONTROL_PROT_MASK GENMASK(30, 28) +#define PL080_CONTROL_TC_IRQ_EN (1 << 31) +#define PL080_CONTROL_PROT_MASK (0x7 << 28) #define PL080_CONTROL_PROT_SHIFT (28) -#define PL080_CONTROL_PROT_CACHE BIT(30) -#define PL080_CONTROL_PROT_BUFF BIT(29) -#define PL080_CONTROL_PROT_SYS BIT(28) -#define PL080_CONTROL_DST_INCR BIT(27) -#define PL080_CONTROL_SRC_INCR BIT(26) -#define PL080_CONTROL_DST_AHB2 BIT(25) -#define PL080_CONTROL_SRC_AHB2 BIT(24) -#define PL080_CONTROL_DWIDTH_MASK GENMASK(23, 21) +#define PL080_CONTROL_PROT_CACHE (1 << 30) +#define PL080_CONTROL_PROT_BUFF (1 << 29) +#define PL080_CONTROL_PROT_SYS (1 << 28) +#define PL080_CONTROL_DST_INCR (1 << 27) +#define PL080_CONTROL_SRC_INCR (1 << 26) +#define PL080_CONTROL_DST_AHB2 (1 << 25) +#define PL080_CONTROL_SRC_AHB2 (1 << 24) +#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21) #define PL080_CONTROL_DWIDTH_SHIFT (21) -#define PL080_CONTROL_SWIDTH_MASK GENMASK(20, 18) +#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18) #define PL080_CONTROL_SWIDTH_SHIFT (18) -#define PL080_CONTROL_DB_SIZE_MASK GENMASK(17, 15) +#define PL080_CONTROL_DB_SIZE_MASK (0x7 << 15) #define PL080_CONTROL_DB_SIZE_SHIFT (15) -#define PL080_CONTROL_SB_SIZE_MASK GENMASK(14, 12) +#define PL080_CONTROL_SB_SIZE_MASK (0x7 << 12) #define PL080_CONTROL_SB_SIZE_SHIFT (12) -#define PL080_CONTROL_TRANSFER_SIZE_MASK GENMASK(11, 0) -#define PL080S_CONTROL_TRANSFER_SIZE_MASK GENMASK(24, 0) +#define PL080_CONTROL_TRANSFER_SIZE_MASK (0xfff << 0) +#define PL080S_CONTROL_TRANSFER_SIZE_MASK (0x1ffffff << 0) #define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0) #define PL080_BSIZE_1 (0x0) @@ -106,20 +103,20 @@ #define PL080_WIDTH_16BIT (0x1) #define PL080_WIDTH_32BIT (0x2) -#define PL080N_CONFIG_ITPROT BIT(20) -#define PL080N_CONFIG_SECPROT BIT(19) -#define PL080_CONFIG_HALT BIT(18) -#define PL080_CONFIG_ACTIVE BIT(17) /* RO */ -#define PL080_CONFIG_LOCK BIT(16) -#define PL080_CONFIG_TC_IRQ_MASK BIT(15) -#define PL080_CONFIG_ERR_IRQ_MASK BIT(14) -#define PL080_CONFIG_FLOW_CONTROL_MASK GENMASK(13, 11) +#define PL080N_CONFIG_ITPROT (1 << 20) +#define PL080N_CONFIG_SECPROT (1 << 19) +#define PL080_CONFIG_HALT (1 << 18) +#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */ +#define PL080_CONFIG_LOCK (1 << 16) +#define PL080_CONFIG_TC_IRQ_MASK (1 << 15) +#define PL080_CONFIG_ERR_IRQ_MASK (1 << 14) +#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11) #define PL080_CONFIG_FLOW_CONTROL_SHIFT (11) -#define PL080_CONFIG_DST_SEL_MASK GENMASK(9, 6) +#define PL080_CONFIG_DST_SEL_MASK (0xf << 6) #define PL080_CONFIG_DST_SEL_SHIFT (6) -#define PL080_CONFIG_SRC_SEL_MASK GENMASK(4, 1) +#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1) #define PL080_CONFIG_SRC_SEL_SHIFT (1) -#define PL080_CONFIG_ENABLE BIT(0) +#define PL080_CONFIG_ENABLE (1 << 0) #define PL080_FLOW_MEM2MEM (0x0) #define PL080_FLOW_MEM2PER (0x1) @@ -130,73 +127,6 @@ #define PL080_FLOW_PER2MEM_PER (0x6) #define PL080_FLOW_SRC2DST_SRC (0x7) -#define FTDMAC020_CH_CSR_TC_MSK BIT(31) -/* Later versions have a threshold in bits 24..26, */ -#define FTDMAC020_CH_CSR_FIFOTH_MSK GENMASK(26, 24) -#define FTDMAC020_CH_CSR_FIFOTH_SHIFT (24) -#define FTDMAC020_CH_CSR_CHPR1_MSK GENMASK(23, 22) -#define FTDMAC020_CH_CSR_PROT3 BIT(21) -#define FTDMAC020_CH_CSR_PROT2 BIT(20) -#define FTDMAC020_CH_CSR_PROT1 BIT(19) -#define FTDMAC020_CH_CSR_SRC_SIZE_MSK GENMASK(18, 16) -#define FTDMAC020_CH_CSR_SRC_SIZE_SHIFT (16) -#define FTDMAC020_CH_CSR_ABT BIT(15) -#define FTDMAC020_CH_CSR_SRC_WIDTH_MSK GENMASK(13, 11) -#define FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT (11) -#define FTDMAC020_CH_CSR_DST_WIDTH_MSK GENMASK(10, 8) -#define FTDMAC020_CH_CSR_DST_WIDTH_SHIFT (8) -#define FTDMAC020_CH_CSR_MODE BIT(7) -/* 00 = increase, 01 = decrease, 10 = fix */ -#define FTDMAC020_CH_CSR_SRCAD_CTL_MSK GENMASK(6, 5) -#define FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT (5) -#define FTDMAC020_CH_CSR_DSTAD_CTL_MSK GENMASK(4, 3) -#define FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT (3) -#define FTDMAC020_CH_CSR_SRC_SEL BIT(2) -#define FTDMAC020_CH_CSR_DST_SEL BIT(1) -#define FTDMAC020_CH_CSR_EN BIT(0) - -/* FIFO threshold setting */ -#define FTDMAC020_CH_CSR_FIFOTH_1 (0x0) -#define FTDMAC020_CH_CSR_FIFOTH_2 (0x1) -#define FTDMAC020_CH_CSR_FIFOTH_4 (0x2) -#define FTDMAC020_CH_CSR_FIFOTH_8 (0x3) -#define FTDMAC020_CH_CSR_FIFOTH_16 (0x4) -/* The FTDMAC020 supports 64bit wide transfers */ -#define FTDMAC020_WIDTH_64BIT (0x3) -/* Address can be increased, decreased or fixed */ -#define FTDMAC020_CH_CSR_SRCAD_CTL_INC (0x0) -#define FTDMAC020_CH_CSR_SRCAD_CTL_DEC (0x1) -#define FTDMAC020_CH_CSR_SRCAD_CTL_FIXED (0x2) - -#define FTDMAC020_CH_CFG_LLP_CNT_MASK GENMASK(19, 16) -#define FTDMAC020_CH_CFG_LLP_CNT_SHIFT (16) -#define FTDMAC020_CH_CFG_BUSY BIT(8) -#define FTDMAC020_CH_CFG_INT_ABT_MASK BIT(2) -#define FTDMAC020_CH_CFG_INT_ERR_MASK BIT(1) -#define FTDMAC020_CH_CFG_INT_TC_MASK BIT(0) - -/* Inside the LLIs, the applicable CSR fields are mapped differently */ -#define FTDMAC020_LLI_TC_MSK BIT(28) -#define FTDMAC020_LLI_SRC_WIDTH_MSK GENMASK(27, 25) -#define FTDMAC020_LLI_SRC_WIDTH_SHIFT (25) -#define FTDMAC020_LLI_DST_WIDTH_MSK GENMASK(24, 22) -#define FTDMAC020_LLI_DST_WIDTH_SHIFT (22) -#define FTDMAC020_LLI_SRCAD_CTL_MSK GENMASK(21, 20) -#define FTDMAC020_LLI_SRCAD_CTL_SHIFT (20) -#define FTDMAC020_LLI_DSTAD_CTL_MSK GENMASK(19, 18) -#define FTDMAC020_LLI_DSTAD_CTL_SHIFT (18) -#define FTDMAC020_LLI_SRC_SEL BIT(17) -#define FTDMAC020_LLI_DST_SEL BIT(16) -#define FTDMAC020_LLI_TRANSFER_SIZE_MASK GENMASK(11, 0) -#define FTDMAC020_LLI_TRANSFER_SIZE_SHIFT (0) - -#define FTDMAC020_CFG_LLP_CNT_MASK GENMASK(19, 16) -#define FTDMAC020_CFG_LLP_CNT_SHIFT (16) -#define FTDMAC020_CFG_BUSY BIT(8) -#define FTDMAC020_CFG_INT_ABT_MSK BIT(2) -#define FTDMAC020_CFG_INT_ERR_MSK BIT(1) -#define FTDMAC020_CFG_INT_TC_MSK BIT(0) - /* DMA linked list chain structure */ struct pl080_lli { diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 3100e0debc..27e9ec8778 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver * * Copyright (C) 2005 ARM Ltd * Copyright (C) 2010 ST-Ericsson SA * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * pl08x information required by platform code * * Please credit ARM.com @@ -44,6 +47,8 @@ enum { * devices with static assignments * @muxval: a number usually used to poke into some mux regiser to * mux in the signal to this channel + * @cctl_memcpy: options for the channel control register for memcpy + * *** not used for slave channels *** * @addr: source/target address in physical memory for this DMA channel, * can be the address of a FIFO register for burst requests for example. * This can be left undefined if the PrimeCell API is used for configuring @@ -58,28 +63,12 @@ struct pl08x_channel_data { int min_signal; int max_signal; u32 muxval; + u32 cctl_memcpy; dma_addr_t addr; bool single; u8 periph_buses; }; -enum pl08x_burst_size { - PL08X_BURST_SZ_1, - PL08X_BURST_SZ_4, - PL08X_BURST_SZ_8, - PL08X_BURST_SZ_16, - PL08X_BURST_SZ_32, - PL08X_BURST_SZ_64, - PL08X_BURST_SZ_128, - PL08X_BURST_SZ_256, -}; - -enum pl08x_bus_width { - PL08X_BUS_WIDTH_8_BITS, - PL08X_BUS_WIDTH_16_BITS, - PL08X_BUS_WIDTH_32_BITS, -}; - /** * struct pl08x_platform_data - the platform configuration for the PL08x * PrimeCells. @@ -87,11 +76,6 @@ enum pl08x_bus_width { * platform, all inclusive, including multiplexed channels. The available * physical channels will be multiplexed around these signals as they are * requested, just enumerate all possible channels. - * @num_slave_channels: number of elements in the slave channel array - * @memcpy_burst_size: the appropriate burst size for memcpy operations - * @memcpy_bus_width: memory bus width - * @memcpy_prot_buff: whether memcpy DMA is bufferable - * @memcpy_prot_cache: whether memcpy DMA is cacheable * @get_xfer_signal: request a physical signal to be used for a DMA transfer * immediately: if there is some multiplexing or similar blocking the use * of the channel the transfer can be denied by returning less than zero, @@ -100,22 +84,15 @@ enum pl08x_bus_width { * running any DMA transfer and multiplexing can be recycled * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 - * @slave_map: DMA slave matching table - * @slave_map_len: number of elements in @slave_map */ struct pl08x_platform_data { struct pl08x_channel_data *slave_channels; unsigned int num_slave_channels; - enum pl08x_burst_size memcpy_burst_size; - enum pl08x_bus_width memcpy_bus_width; - bool memcpy_prot_buff; - bool memcpy_prot_cache; + struct pl08x_channel_data memcpy_channel; int (*get_xfer_signal)(const struct pl08x_channel_data *); void (*put_xfer_signal)(const struct pl08x_channel_data *, int); u8 lli_buses; u8 mem_buses; - const struct dma_slave_map *slave_map; - int slave_map_len; }; #ifdef CONFIG_AMBA_PL08X diff --git a/include/linux/amba/pl093.h b/include/linux/amba/pl093.h index b17166e3b4..2983e3671a 100644 --- a/include/linux/amba/pl093.h +++ b/include/linux/amba/pl093.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* linux/amba/pl093.h * * Copyright (c) 2008 Simtec Electronics @@ -7,6 +6,10 @@ * * AMBA PL093 SSMC (synchronous static memory controller) * See DDI0236.pdf (r0p4) for more details + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #define SMB_BANK(x) ((x) * 0x20) /* each bank control set is 0x20 apart */ diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h new file mode 100644 index 0000000000..fe93758e84 --- /dev/null +++ b/include/linux/amba/pl330.h @@ -0,0 +1,35 @@ +/* linux/include/linux/amba/pl330.h + * + * Copyright (C) 2010 Samsung Electronics Co. Ltd. + * Jaswinder Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __AMBA_PL330_H_ +#define __AMBA_PL330_H_ + +#include + +struct dma_pl330_platdata { + /* + * Number of valid peripherals connected to DMAC. + * This may be different from the value read from + * CR0, as the PL330 implementation might have 'holes' + * in the peri list or the peri could also be reached + * from another DMAC which the platform prefers. + */ + u8 nr_valid_peri; + /* Array of valid peripherals */ + u8 *peri_id; + /* Operational capabilities */ + dma_cap_mask_t cap_mask; + /* Bytes to allocate for MC buffer */ + unsigned mcbuf_sz; +}; + +extern bool pl330_filter(struct dma_chan *chan, void *param); +#endif /* __AMBA_PL330_H_ */ diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index a1307b58cc..ad0965e21a 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/include/asm-arm/hardware/serial_amba.h * @@ -6,6 +5,20 @@ * * Copyright (C) ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H #define ASM_ARM_HARDWARE_SERIAL_AMBA_H diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 58e6c3806c..09751d3499 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -1,8 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. * Author: Joerg Roedel * Leo Duran + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_X86_AMD_IOMMU_H @@ -10,8 +22,6 @@ #include -struct amd_iommu; - /* * This is mainly used to communicate information back-and-forth * between SVM and IOMMU for setting up and tearing down posted @@ -34,6 +44,24 @@ struct pci_dev; extern int amd_iommu_detect(void); extern int amd_iommu_init_hardware(void); +/** + * amd_iommu_enable_device_erratum() - Enable erratum workaround for device + * in the IOMMUv2 driver + * @pdev: The PCI device the workaround is necessary for + * @erratum: The erratum workaround to enable + * + * The function needs to be called before amd_iommu_init_device(). + * Possible values for the erratum number are for now: + * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI + * is enabled + * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI + * requests to one + */ +#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0 +#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1 + +extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum); + /** * amd_iommu_init_device() - Init device for use with IOMMUv2 driver * @pdev: The PCI device to initialize @@ -60,7 +88,7 @@ extern void amd_iommu_free_device(struct pci_dev *pdev); * * The function returns 0 on success or a negative value on error. */ -extern int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid, +extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, struct task_struct *task); /** @@ -72,7 +100,7 @@ extern int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid, * When this function returns the device is no longer using the PASID * and the PASID is no longer bound to its task. */ -extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid); +extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid); /** * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed @@ -98,7 +126,7 @@ extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid); #define AMD_IOMMU_INV_PRI_RSP_FAIL 2 typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev, - u32 pasid, + int pasid, unsigned long address, u16); @@ -150,7 +178,7 @@ extern int amd_iommu_device_info(struct pci_dev *pdev, * @cb: The call-back function */ -typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, u32 pasid); +typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid); extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, amd_iommu_invalidate_ctx cb); @@ -168,9 +196,6 @@ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)); extern int amd_iommu_update_ga(int cpu, bool is_run, void *data); -extern int amd_iommu_activate_guest_mode(void *data); -extern int amd_iommu_deactivate_guest_mode(void *data); - #else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ static inline int @@ -185,25 +210,6 @@ amd_iommu_update_ga(int cpu, bool is_run, void *data) return 0; } -static inline int amd_iommu_activate_guest_mode(void *data) -{ - return 0; -} - -static inline int amd_iommu_deactivate_guest_mode(void *data) -{ - return 0; -} #endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ -int amd_iommu_get_num_iommus(void); -bool amd_iommu_pc_supported(void); -u8 amd_iommu_pc_get_max_banks(unsigned int idx); -u8 amd_iommu_pc_get_max_counters(unsigned int idx); -int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, - u64 *value); -int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, - u64 *value); -struct amd_iommu *get_amd_iommu(unsigned int idx); - #endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/include/linux/amifd.h b/include/linux/amifd.h new file mode 100644 index 0000000000..346993268b --- /dev/null +++ b/include/linux/amifd.h @@ -0,0 +1,62 @@ +#ifndef _AMIFD_H +#define _AMIFD_H + +/* Definitions for the Amiga floppy driver */ + +#include + +#define FD_MAX_UNITS 4 /* Max. Number of drives */ +#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */ + +#ifndef ASSEMBLER + +struct fd_data_type { + char *name; /* description of data type */ + int sects; /* sectors per track */ +#ifdef __STDC__ + int (*read_fkt)(int); + void (*write_fkt)(int); +#else + int (*read_fkt)(); /* read whole track */ + void (*write_fkt)(); /* write whole track */ +#endif +}; + +/* +** Floppy type descriptions +*/ + +struct fd_drive_type { + unsigned long code; /* code returned from drive */ + char *name; /* description of drive */ + unsigned int tracks; /* number of tracks */ + unsigned int heads; /* number of heads */ + unsigned int read_size; /* raw read size for one track */ + unsigned int write_size; /* raw write size for one track */ + unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */ + unsigned int precomp1; /* start track for precomp 1 */ + unsigned int precomp2; /* start track for precomp 2 */ + unsigned int step_delay; /* time (in ms) for delay after step */ + unsigned int settle_time; /* time to settle after dir change */ + unsigned int side_time; /* time needed to change sides */ +}; + +struct amiga_floppy_struct { + struct fd_drive_type *type; /* type of floppy for this unit */ + struct fd_data_type *dtype; /* type of floppy for this unit */ + int track; /* current track (-1 == unknown) */ + unsigned char *trackbuf; /* current track (kmaloc()'d */ + + int blocks; /* total # blocks on disk */ + + int changed; /* true when not known */ + int disk; /* disk in drive (-1 == unknown) */ + int motor; /* true when motor is at speed */ + int busy; /* true when drive is active */ + int dirty; /* true when trackbuf is not on disk */ + int status; /* current error code for unit */ + struct gendisk *gendisk; +}; +#endif + +#endif diff --git a/include/linux/amifdreg.h b/include/linux/amifdreg.h new file mode 100644 index 0000000000..76188bf48d --- /dev/null +++ b/include/linux/amifdreg.h @@ -0,0 +1,81 @@ +#ifndef _LINUX_AMIFDREG_H +#define _LINUX_AMIFDREG_H + +/* +** CIAAPRA bits (read only) +*/ + +#define DSKRDY (0x1<<5) /* disk ready when low */ +#define DSKTRACK0 (0x1<<4) /* head at track zero when low */ +#define DSKPROT (0x1<<3) /* disk protected when low */ +#define DSKCHANGE (0x1<<2) /* low when disk removed */ + +/* +** CIAAPRB bits (read/write) +*/ + +#define DSKMOTOR (0x1<<7) /* motor on when low */ +#define DSKSEL3 (0x1<<6) /* select drive 3 when low */ +#define DSKSEL2 (0x1<<5) /* select drive 2 when low */ +#define DSKSEL1 (0x1<<4) /* select drive 1 when low */ +#define DSKSEL0 (0x1<<3) /* select drive 0 when low */ +#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */ +#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */ +#define DSKSTEP (0x1) /* pulse low to step head 1 track */ + +/* +** DSKBYTR bits (read only) +*/ + +#define DSKBYT (1<<15) /* register contains valid byte when set */ +#define DMAON (1<<14) /* disk DMA enabled */ +#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */ +#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */ +/* bits 7-0 are data */ + +/* +** ADKCON/ADKCONR bits +*/ + +#ifndef SETCLR +#define ADK_SETCLR (1<<15) /* control bit */ +#endif +#define ADK_PRECOMP1 (1<<14) /* precompensation selection */ +#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */ +#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */ +#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */ +#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */ +#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */ + +/* +** DSKLEN bits +*/ + +#define DSKLEN_DMAEN (1<<15) +#define DSKLEN_WRITE (1<<14) + +/* +** INTENA/INTREQ bits +*/ + +#define DSKINDEX (0x1<<4) /* DSKINDEX bit */ + +/* +** Misc +*/ + +#define MFM_SYNC 0x4489 /* standard MFM sync value */ + +/* Values for FD_COMMAND */ +#define FD_RECALIBRATE 0x07 /* move to track 0 */ +#define FD_SEEK 0x0F /* seek track */ +#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */ +#define FD_WRITE 0xC5 /* write with MT, MFM */ +#define FD_SENSEI 0x08 /* Sense Interrupt Status */ +#define FD_SPECIFY 0x03 /* specify HUT etc */ +#define FD_FORMAT 0x4D /* format one track */ +#define FD_VERSION 0x10 /* get version code */ +#define FD_CONFIGURE 0x13 /* configure FIFO operation */ +#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */ + +#endif /* _LINUX_AMIFDREG_H */ diff --git a/include/linux/amigaffs.h b/include/linux/amigaffs.h new file mode 100644 index 0000000000..43b41c06aa --- /dev/null +++ b/include/linux/amigaffs.h @@ -0,0 +1,144 @@ +#ifndef AMIGAFFS_H +#define AMIGAFFS_H + +#include +#include + +#define FS_OFS 0x444F5300 +#define FS_FFS 0x444F5301 +#define FS_INTLOFS 0x444F5302 +#define FS_INTLFFS 0x444F5303 +#define FS_DCOFS 0x444F5304 +#define FS_DCFFS 0x444F5305 +#define MUFS_FS 0x6d754653 /* 'muFS' */ +#define MUFS_OFS 0x6d754600 /* 'muF\0' */ +#define MUFS_FFS 0x6d754601 /* 'muF\1' */ +#define MUFS_INTLOFS 0x6d754602 /* 'muF\2' */ +#define MUFS_INTLFFS 0x6d754603 /* 'muF\3' */ +#define MUFS_DCOFS 0x6d754604 /* 'muF\4' */ +#define MUFS_DCFFS 0x6d754605 /* 'muF\5' */ + +#define T_SHORT 2 +#define T_LIST 16 +#define T_DATA 8 + +#define ST_LINKFILE -4 +#define ST_FILE -3 +#define ST_ROOT 1 +#define ST_USERDIR 2 +#define ST_SOFTLINK 3 +#define ST_LINKDIR 4 + +#define AFFS_ROOT_BMAPS 25 + +struct affs_date { + __be32 days; + __be32 mins; + __be32 ticks; +}; + +struct affs_short_date { + __be16 days; + __be16 mins; + __be16 ticks; +}; + +struct affs_root_head { + __be32 ptype; + __be32 spare1; + __be32 spare2; + __be32 hash_size; + __be32 spare3; + __be32 checksum; + __be32 hashtable[1]; +}; + +struct affs_root_tail { + __be32 bm_flag; + __be32 bm_blk[AFFS_ROOT_BMAPS]; + __be32 bm_ext; + struct affs_date root_change; + u8 disk_name[32]; + __be32 spare1; + __be32 spare2; + struct affs_date disk_change; + struct affs_date disk_create; + __be32 spare3; + __be32 spare4; + __be32 dcache; + __be32 stype; +}; + +struct affs_head { + __be32 ptype; + __be32 key; + __be32 block_count; + __be32 spare1; + __be32 first_data; + __be32 checksum; + __be32 table[1]; +}; + +struct affs_tail { + __be32 spare1; + __be16 uid; + __be16 gid; + __be32 protect; + __be32 size; + u8 comment[92]; + struct affs_date change; + u8 name[32]; + __be32 spare2; + __be32 original; + __be32 link_chain; + __be32 spare[5]; + __be32 hash_chain; + __be32 parent; + __be32 extension; + __be32 stype; +}; + +struct slink_front +{ + __be32 ptype; + __be32 key; + __be32 spare1[3]; + __be32 checksum; + u8 symname[1]; /* depends on block size */ +}; + +struct affs_data_head +{ + __be32 ptype; + __be32 key; + __be32 sequence; + __be32 size; + __be32 next; + __be32 checksum; + u8 data[1]; /* depends on block size */ +}; + +/* Permission bits */ + +#define FIBF_OTR_READ 0x8000 +#define FIBF_OTR_WRITE 0x4000 +#define FIBF_OTR_EXECUTE 0x2000 +#define FIBF_OTR_DELETE 0x1000 +#define FIBF_GRP_READ 0x0800 +#define FIBF_GRP_WRITE 0x0400 +#define FIBF_GRP_EXECUTE 0x0200 +#define FIBF_GRP_DELETE 0x0100 + +#define FIBF_HIDDEN 0x0080 +#define FIBF_SCRIPT 0x0040 +#define FIBF_PURE 0x0020 /* no use under linux */ +#define FIBF_ARCHIVED 0x0010 /* never set, always cleared on write */ +#define FIBF_NOREAD 0x0008 /* 0 means allowed */ +#define FIBF_NOWRITE 0x0004 /* 0 means allowed */ +#define FIBF_NOEXECUTE 0x0002 /* 0 means allowed, ignored under linux */ +#define FIBF_NODELETE 0x0001 /* 0 means allowed */ + +#define FIBF_OWNER 0x000F /* Bits pertaining to owner */ +#define FIBF_MASK 0xEE0E /* Bits modified by Linux */ + +#endif diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h index 71881a2b6f..8013a45242 100644 --- a/include/linux/anon_inodes.h +++ b/include/linux/anon_inodes.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/anon_inodes.h * @@ -10,17 +9,12 @@ #define _LINUX_ANON_INODES_H struct file_operations; -struct inode; struct file *anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags); int anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags); -int anon_inode_getfd_secure(const char *name, - const struct file_operations *fops, - void *priv, int flags, - const struct inode *context_inode); #endif /* _LINUX_ANON_INODES_H */ diff --git a/include/linux/apm-emulation.h b/include/linux/apm-emulation.h index 94c0369579..e6d800358d 100644 --- a/include/linux/apm-emulation.h +++ b/include/linux/apm-emulation.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* -*- linux-c -*- * * (C) 2003 zecke@handhelds.org * + * GPL version 2 + * * based on arch/arm/kernel/apm.c * factor out the information needed by architectures to provide * apm status diff --git a/include/linux/apm_bios.h b/include/linux/apm_bios.h index 7554192c3a..9c3a87184f 100644 --- a/include/linux/apm_bios.h +++ b/include/linux/apm_bios.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Include file for the interface to an APM BIOS * Copyright 1994-2001 Stephen Rothwell (sfr@canb.auug.org.au) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. */ #ifndef _LINUX_APM_H #define _LINUX_APM_H diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h index ddb10aa67b..714186de8c 100644 --- a/include/linux/apple-gmux.h +++ b/include/linux/apple-gmux.h @@ -1,7 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro * Copyright (C) 2015 Lukas Wunner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . */ #ifndef LINUX_APPLE_GMUX_H diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h index 445af2e3cc..0a95e730fc 100644 --- a/include/linux/apple_bl.h +++ b/include/linux/apple_bl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * apple_bl exported symbols */ diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h index d0e44201d8..521ec1f2e6 100644 --- a/include/linux/arm-cci.h +++ b/include/linux/arm-cci.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * CCI cache coherent interconnect support * * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_ARM_CCI_H diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 63ccb52521..4c5bca38c6 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -1,23 +1,27 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __LINUX_ARM_SMCCC_H #define __LINUX_ARM_SMCCC_H -#include -#include - /* * This file provides common defines for ARM SMC Calling Convention as * specified in - * https://developer.arm.com/docs/den0028/latest - * - * This code is up-to-date with version DEN 0028 C + * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html */ -#define ARM_SMCCC_STD_CALL _AC(0,U) -#define ARM_SMCCC_FAST_CALL _AC(1,U) +#define ARM_SMCCC_STD_CALL 0 +#define ARM_SMCCC_FAST_CALL 1 #define ARM_SMCCC_TYPE_SHIFT 31 #define ARM_SMCCC_SMC_32 0 @@ -48,179 +52,18 @@ #define ARM_SMCCC_OWNER_SIP 2 #define ARM_SMCCC_OWNER_OEM 3 #define ARM_SMCCC_OWNER_STANDARD 4 -#define ARM_SMCCC_OWNER_STANDARD_HYP 5 -#define ARM_SMCCC_OWNER_VENDOR_HYP 6 #define ARM_SMCCC_OWNER_TRUSTED_APP 48 #define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 #define ARM_SMCCC_OWNER_TRUSTED_OS 50 #define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 -#define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01 - #define ARM_SMCCC_QUIRK_NONE 0 #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ -#define ARM_SMCCC_VERSION_1_0 0x10000 -#define ARM_SMCCC_VERSION_1_1 0x10001 -#define ARM_SMCCC_VERSION_1_2 0x10002 -#define ARM_SMCCC_VERSION_1_3 0x10003 - -#define ARM_SMCCC_1_3_SVE_HINT 0x10000 - -#define ARM_SMCCC_VERSION_FUNC_ID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - 0, 0) - -#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - 0, 1) - -#define ARM_SMCCC_ARCH_SOC_ID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - 0, 2) - -#define ARM_SMCCC_ARCH_WORKAROUND_1 \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - 0, 0x8000) - -#define ARM_SMCCC_ARCH_WORKAROUND_2 \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - 0, 0x7fff) - -#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - ARM_SMCCC_OWNER_VENDOR_HYP, \ - ARM_SMCCC_FUNC_QUERY_CALL_UID) - -/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */ -#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U -#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU -#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U -#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU - -/* KVM "vendor specific" services */ -#define ARM_SMCCC_KVM_FUNC_FEATURES 0 -#define ARM_SMCCC_KVM_FUNC_PTP 1 -#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127 -#define ARM_SMCCC_KVM_NUM_FUNCS 128 - -#define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - ARM_SMCCC_OWNER_VENDOR_HYP, \ - ARM_SMCCC_KVM_FUNC_FEATURES) - -#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 - -/* - * ptp_kvm is a feature used for time sync between vm and host. - * ptp_kvm module in guest kernel will get service from host using - * this hypercall ID. - */ -#define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - ARM_SMCCC_OWNER_VENDOR_HYP, \ - ARM_SMCCC_KVM_FUNC_PTP) - -/* ptp_kvm counter type ID */ -#define KVM_PTP_VIRT_COUNTER 0 -#define KVM_PTP_PHYS_COUNTER 1 - -/* Paravirtualised time calls (defined by ARM DEN0057A) */ -#define ARM_SMCCC_HV_PV_TIME_FEATURES \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_64, \ - ARM_SMCCC_OWNER_STANDARD_HYP, \ - 0x20) - -#define ARM_SMCCC_HV_PV_TIME_ST \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_64, \ - ARM_SMCCC_OWNER_STANDARD_HYP, \ - 0x21) - -/* TRNG entropy source calls (defined by ARM DEN0098) */ -#define ARM_SMCCC_TRNG_VERSION \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - ARM_SMCCC_OWNER_STANDARD, \ - 0x50) - -#define ARM_SMCCC_TRNG_FEATURES \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - ARM_SMCCC_OWNER_STANDARD, \ - 0x51) - -#define ARM_SMCCC_TRNG_GET_UUID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - ARM_SMCCC_OWNER_STANDARD, \ - 0x52) - -#define ARM_SMCCC_TRNG_RND32 \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - ARM_SMCCC_OWNER_STANDARD, \ - 0x53) - -#define ARM_SMCCC_TRNG_RND64 \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_64, \ - ARM_SMCCC_OWNER_STANDARD, \ - 0x53) - -/* - * Return codes defined in ARM DEN 0070A - * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C - */ -#define SMCCC_RET_SUCCESS 0 -#define SMCCC_RET_NOT_SUPPORTED -1 -#define SMCCC_RET_NOT_REQUIRED -2 -#define SMCCC_RET_INVALID_PARAMETER -3 - #ifndef __ASSEMBLY__ #include #include - -enum arm_smccc_conduit { - SMCCC_CONDUIT_NONE, - SMCCC_CONDUIT_SMC, - SMCCC_CONDUIT_HVC, -}; - -/** - * arm_smccc_1_1_get_conduit() - * - * Returns the conduit to be used for SMCCCv1.1 or later. - * - * When SMCCCv1.1 is not present, returns SMCCC_CONDUIT_NONE. - */ -enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void); - -/** - * arm_smccc_get_version() - * - * Returns the version to be used for SMCCCv1.1 or later. - * - * When SMCCCv1.1 or above is not present, returns SMCCCv1.0, but this - * does not imply the presence of firmware or a valid conduit. Caller - * handling SMCCCv1.0 must determine the conduit by other means. - */ -u32 arm_smccc_get_version(void); - -void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit); - -extern u64 smccc_has_sve_hint; - /** * struct arm_smccc_res - Result from SMC/HVC call * @a0-a3 result values from registers 0 to 3 @@ -232,61 +75,6 @@ struct arm_smccc_res { unsigned long a3; }; -#ifdef CONFIG_ARM64 -/** - * struct arm_smccc_1_2_regs - Arguments for or Results from SMC/HVC call - * @a0-a17 argument values from registers 0 to 17 - */ -struct arm_smccc_1_2_regs { - unsigned long a0; - unsigned long a1; - unsigned long a2; - unsigned long a3; - unsigned long a4; - unsigned long a5; - unsigned long a6; - unsigned long a7; - unsigned long a8; - unsigned long a9; - unsigned long a10; - unsigned long a11; - unsigned long a12; - unsigned long a13; - unsigned long a14; - unsigned long a15; - unsigned long a16; - unsigned long a17; -}; - -/** - * arm_smccc_1_2_hvc() - make HVC calls - * @args: arguments passed via struct arm_smccc_1_2_regs - * @res: result values via struct arm_smccc_1_2_regs - * - * This function is used to make HVC calls following SMC Calling Convention - * v1.2 or above. The content of the supplied param are copied from the - * structure to registers prior to the HVC instruction. The return values - * are updated with the content from registers on return from the HVC - * instruction. - */ -asmlinkage void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args, - struct arm_smccc_1_2_regs *res); - -/** - * arm_smccc_1_2_smc() - make SMC calls - * @args: arguments passed via struct arm_smccc_1_2_regs - * @res: result values via struct arm_smccc_1_2_regs - * - * This function is used to make SMC calls following SMC Calling Convention - * v1.2 or above. The content of the supplied param are copied from the - * structure to registers prior to the SMC instruction. The return values - * are updated with the content from registers on return from the SMC - * instruction. - */ -asmlinkage void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args, - struct arm_smccc_1_2_regs *res); -#endif - /** * struct arm_smccc_quirk - Contains quirk information * @id: quirk identification @@ -300,15 +88,6 @@ struct arm_smccc_quirk { } state; }; -/** - * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls - * - * Sets the SMCCC hint bit to indicate if there is live state in the SVE - * registers, this modifies x0 in place and should never be called from C - * code. - */ -asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0); - /** * __arm_smccc_smc() - make SMC calls * @a0-a7: arguments passed in registers 0 to 7 @@ -321,20 +100,10 @@ asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0); * from register 0 to 3 on return from the SMC instruction. An optional * quirk structure provides vendor specific behavior. */ -#ifdef CONFIG_HAVE_ARM_SMCCC asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long a6, unsigned long a7, struct arm_smccc_res *res, struct arm_smccc_quirk *quirk); -#else -static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1, - unsigned long a2, unsigned long a3, unsigned long a4, - unsigned long a5, unsigned long a6, unsigned long a7, - struct arm_smccc_res *res, struct arm_smccc_quirk *quirk) -{ - *res = (struct arm_smccc_res){}; -} -#endif /** * __arm_smccc_hvc() - make HVC calls @@ -361,201 +130,5 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) -/* SMCCC v1.1 implementation madness follows */ -#ifdef CONFIG_ARM64 - -#define SMCCC_SMC_INST "smc #0" -#define SMCCC_HVC_INST "hvc #0" - -#elif defined(CONFIG_ARM) -#include -#include - -#define SMCCC_SMC_INST __SMC(0) -#define SMCCC_HVC_INST __HVC(0) - -#endif - -/* nVHE hypervisor doesn't have a current thread so needs separate checks */ -#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__) - -#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n", "bl __arm_smccc_sve_check \n", \ - ARM64_SVE) -#define smccc_sve_clobbers "x16", "x30", "cc", - -#else - -#define SMCCC_SVE_CHECK -#define smccc_sve_clobbers - -#endif - -#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x - -#define __count_args(...) \ - ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) - -#define __constraint_read_0 "r" (arg0) -#define __constraint_read_1 __constraint_read_0, "r" (arg1) -#define __constraint_read_2 __constraint_read_1, "r" (arg2) -#define __constraint_read_3 __constraint_read_2, "r" (arg3) -#define __constraint_read_4 __constraint_read_3, "r" (arg4) -#define __constraint_read_5 __constraint_read_4, "r" (arg5) -#define __constraint_read_6 __constraint_read_5, "r" (arg6) -#define __constraint_read_7 __constraint_read_6, "r" (arg7) - -#define __declare_arg_0(a0, res) \ - struct arm_smccc_res *___res = res; \ - register unsigned long arg0 asm("r0") = (u32)a0 - -#define __declare_arg_1(a0, a1, res) \ - typeof(a1) __a1 = a1; \ - struct arm_smccc_res *___res = res; \ - register unsigned long arg0 asm("r0") = (u32)a0; \ - register typeof(a1) arg1 asm("r1") = __a1 - -#define __declare_arg_2(a0, a1, a2, res) \ - typeof(a1) __a1 = a1; \ - typeof(a2) __a2 = a2; \ - struct arm_smccc_res *___res = res; \ - register unsigned long arg0 asm("r0") = (u32)a0; \ - register typeof(a1) arg1 asm("r1") = __a1; \ - register typeof(a2) arg2 asm("r2") = __a2 - -#define __declare_arg_3(a0, a1, a2, a3, res) \ - typeof(a1) __a1 = a1; \ - typeof(a2) __a2 = a2; \ - typeof(a3) __a3 = a3; \ - struct arm_smccc_res *___res = res; \ - register unsigned long arg0 asm("r0") = (u32)a0; \ - register typeof(a1) arg1 asm("r1") = __a1; \ - register typeof(a2) arg2 asm("r2") = __a2; \ - register typeof(a3) arg3 asm("r3") = __a3 - -#define __declare_arg_4(a0, a1, a2, a3, a4, res) \ - typeof(a4) __a4 = a4; \ - __declare_arg_3(a0, a1, a2, a3, res); \ - register typeof(a4) arg4 asm("r4") = __a4 - -#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ - typeof(a5) __a5 = a5; \ - __declare_arg_4(a0, a1, a2, a3, a4, res); \ - register typeof(a5) arg5 asm("r5") = __a5 - -#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ - typeof(a6) __a6 = a6; \ - __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ - register typeof(a6) arg6 asm("r6") = __a6 - -#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ - typeof(a7) __a7 = a7; \ - __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ - register typeof(a7) arg7 asm("r7") = __a7 - -#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) -#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) - -#define ___constraints(count) \ - : __constraint_read_ ## count \ - : smccc_sve_clobbers "memory" -#define __constraints(count) ___constraints(count) - -/* - * We have an output list that is not necessarily used, and GCC feels - * entitled to optimise the whole sequence away. "volatile" is what - * makes it stick. - */ -#define __arm_smccc_1_1(inst, ...) \ - do { \ - register unsigned long r0 asm("r0"); \ - register unsigned long r1 asm("r1"); \ - register unsigned long r2 asm("r2"); \ - register unsigned long r3 asm("r3"); \ - __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ - asm volatile(SMCCC_SVE_CHECK \ - inst "\n" : \ - "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \ - __constraints(__count_args(__VA_ARGS__))); \ - if (___res) \ - *___res = (typeof(*___res)){r0, r1, r2, r3}; \ - } while (0) - -/* - * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call - * - * This is a variadic macro taking one to eight source arguments, and - * an optional return structure. - * - * @a0-a7: arguments passed in registers 0 to 7 - * @res: result values from registers 0 to 3 - * - * This macro is used to make SMC calls following SMC Calling Convention v1.1. - * The content of the supplied param are copied to registers 0 to 7 prior - * to the SMC instruction. The return values are updated with the content - * from register 0 to 3 on return from the SMC instruction if not NULL. - */ -#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__) - -/* - * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call - * - * This is a variadic macro taking one to eight source arguments, and - * an optional return structure. - * - * @a0-a7: arguments passed in registers 0 to 7 - * @res: result values from registers 0 to 3 - * - * This macro is used to make HVC calls following SMC Calling Convention v1.1. - * The content of the supplied param are copied to registers 0 to 7 prior - * to the HVC instruction. The return values are updated with the content - * from register 0 to 3 on return from the HVC instruction if not NULL. - */ -#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) - -/* - * Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED. - * Used when the SMCCC conduit is not defined. The empty asm statement - * avoids compiler warnings about unused variables. - */ -#define __fail_smccc_1_1(...) \ - do { \ - __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ - asm ("" : __constraints(__count_args(__VA_ARGS__))); \ - if (___res) \ - ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \ - } while (0) - -/* - * arm_smccc_1_1_invoke() - make an SMCCC v1.1 compliant call - * - * This is a variadic macro taking one to eight source arguments, and - * an optional return structure. - * - * @a0-a7: arguments passed in registers 0 to 7 - * @res: result values from registers 0 to 3 - * - * This macro will make either an HVC call or an SMC call depending on the - * current SMCCC conduit. If no valid conduit is available then -1 - * (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied). - * - * The return value also provides the conduit that was used. - */ -#define arm_smccc_1_1_invoke(...) ({ \ - int method = arm_smccc_1_1_get_conduit(); \ - switch (method) { \ - case SMCCC_CONDUIT_HVC: \ - arm_smccc_1_1_hvc(__VA_ARGS__); \ - break; \ - case SMCCC_CONDUIT_SMC: \ - arm_smccc_1_1_smc(__VA_ARGS__); \ - break; \ - default: \ - __fail_smccc_1_1(__VA_ARGS__); \ - method = SMCCC_CONDUIT_NONE; \ - break; \ - } \ - method; \ - }) - #endif /*__ASSEMBLY__*/ #endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/asn1.h b/include/linux/asn1.h index a4d0bdd107..eed6982860 100644 --- a/include/linux/asn1.h +++ b/include/linux/asn1.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* ASN.1 BER/DER/CER encoding definitions * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_ASN1_H diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h index b38361953a..ab3a6c002f 100644 --- a/include/linux/asn1_ber_bytecode.h +++ b/include/linux/asn1_ber_bytecode.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* ASN.1 BER/DER/CER parsing state machine internal definitions * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_ASN1_BER_BYTECODE_H diff --git a/include/linux/asn1_decoder.h b/include/linux/asn1_decoder.h index 83f9c6e1e5..fa2ff5bc04 100644 --- a/include/linux/asn1_decoder.h +++ b/include/linux/asn1_decoder.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* ASN.1 decoder * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_ASN1_DECODER_H diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h index 8b3f230ce8..a89df3be16 100644 --- a/include/linux/assoc_array.h +++ b/include/linux/assoc_array.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Generic associative array implementation. * - * See Documentation/core-api/assoc_array.rst for information. + * See Documentation/assoc_array.txt for information. * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_ASSOC_ARRAY_H diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h index dca733ef67..711275e668 100644 --- a/include/linux/assoc_array_priv.h +++ b/include/linux/assoc_array_priv.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Private definitions for the generic associative array implementation. * - * See Documentation/core-api/assoc_array.rst for information. + * See Documentation/assoc_array.txt for information. * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_ASSOC_ARRAY_PRIV_H diff --git a/include/linux/async.h b/include/linux/async.h index cce4ad31e8..6b0226bdaa 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -1,17 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * async.h: Asynchronous function calls for boot performance * * (C) Copyright 2009 Intel Corporation * Author: Arjan van de Ven + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. */ #ifndef __ASYNC_H__ #define __ASYNC_H__ #include #include -#include -#include typedef u64 async_cookie_t; typedef void (*async_func_t) (void *data, async_cookie_t cookie); @@ -35,83 +37,10 @@ struct async_domain { struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ .registered = 0 } -async_cookie_t async_schedule_node(async_func_t func, void *data, - int node); -async_cookie_t async_schedule_node_domain(async_func_t func, void *data, - int node, - struct async_domain *domain); - -/** - * async_schedule - schedule a function for asynchronous execution - * @func: function to execute asynchronously - * @data: data pointer to pass to the function - * - * Returns an async_cookie_t that may be used for checkpointing later. - * Note: This function may be called from atomic or non-atomic contexts. - */ -static inline async_cookie_t async_schedule(async_func_t func, void *data) -{ - return async_schedule_node(func, data, NUMA_NO_NODE); -} - -/** - * async_schedule_domain - schedule a function for asynchronous execution within a certain domain - * @func: function to execute asynchronously - * @data: data pointer to pass to the function - * @domain: the domain - * - * Returns an async_cookie_t that may be used for checkpointing later. - * @domain may be used in the async_synchronize_*_domain() functions to - * wait within a certain synchronization domain rather than globally. - * Note: This function may be called from atomic or non-atomic contexts. - */ -static inline async_cookie_t -async_schedule_domain(async_func_t func, void *data, - struct async_domain *domain) -{ - return async_schedule_node_domain(func, data, NUMA_NO_NODE, domain); -} - -/** - * async_schedule_dev - A device specific version of async_schedule - * @func: function to execute asynchronously - * @dev: device argument to be passed to function - * - * Returns an async_cookie_t that may be used for checkpointing later. - * @dev is used as both the argument for the function and to provide NUMA - * context for where to run the function. By doing this we can try to - * provide for the best possible outcome by operating on the device on the - * CPUs closest to the device. - * Note: This function may be called from atomic or non-atomic contexts. - */ -static inline async_cookie_t -async_schedule_dev(async_func_t func, struct device *dev) -{ - return async_schedule_node(func, dev, dev_to_node(dev)); -} - -/** - * async_schedule_dev_domain - A device specific version of async_schedule_domain - * @func: function to execute asynchronously - * @dev: device argument to be passed to function - * @domain: the domain - * - * Returns an async_cookie_t that may be used for checkpointing later. - * @dev is used as both the argument for the function and to provide NUMA - * context for where to run the function. By doing this we can try to - * provide for the best possible outcome by operating on the device on the - * CPUs closest to the device. - * @domain may be used in the async_synchronize_*_domain() functions to - * wait within a certain synchronization domain rather than globally. - * Note: This function may be called from atomic or non-atomic contexts. - */ -static inline async_cookie_t -async_schedule_dev_domain(async_func_t func, struct device *dev, - struct async_domain *domain) -{ - return async_schedule_node_domain(func, dev, dev_to_node(dev), domain); -} - +extern async_cookie_t async_schedule(async_func_t func, void *data); +extern async_cookie_t async_schedule_domain(async_func_t func, void *data, + struct async_domain *domain); +void async_unregister_domain(struct async_domain *domain); extern void async_synchronize_full(void); extern void async_synchronize_full_domain(struct async_domain *domain); extern void async_synchronize_cookie(async_cookie_t cookie); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 5cc73d7e5b..388574ea38 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -1,6 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * */ #ifndef _ASYNC_TX_H_ #define _ASYNC_TX_H_ @@ -36,7 +49,7 @@ struct dma_chan_ref { /** * async_tx_flags - modifiers for the async_* calls * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the - * destination address is not a source. The asynchronous case handles this + * the destination address is not a source. The asynchronous case handles this * implicitly, the synchronous case needs to zero the destination block. * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is * also one of the source addresses. In the synchronous case the destination @@ -74,7 +87,7 @@ struct async_submit_ctl { void *scribble; }; -#if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH) +#ifdef CONFIG_DMA_ENGINE #define async_tx_issue_pending_all dma_issue_pending_all /** @@ -162,22 +175,11 @@ struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit); -struct dma_async_tx_descriptor * -async_xor_offs(struct page *dest, unsigned int offset, - struct page **src_list, unsigned int *src_offset, - int src_cnt, size_t len, struct async_submit_ctl *submit); - struct dma_async_tx_descriptor * async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, enum sum_check_flags *result, struct async_submit_ctl *submit); -struct dma_async_tx_descriptor * -async_xor_val_offs(struct page *dest, unsigned int offset, - struct page **src_list, unsigned int *src_offset, - int src_cnt, size_t len, enum sum_check_flags *result, - struct async_submit_ctl *submit); - struct dma_async_tx_descriptor * async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, unsigned int src_offset, size_t len, @@ -186,23 +188,21 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); struct dma_async_tx_descriptor * -async_gen_syndrome(struct page **blocks, unsigned int *offsets, int src_cnt, +async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * -async_syndrome_val(struct page **blocks, unsigned int *offsets, int src_cnt, +async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, size_t len, enum sum_check_flags *pqres, struct page *spare, - unsigned int s_off, struct async_submit_ctl *submit); + struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb, - struct page **ptrs, unsigned int *offs, - struct async_submit_ctl *submit); + struct page **ptrs, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_raid6_datap_recov(int src_num, size_t bytes, int faila, - struct page **ptrs, unsigned int *offs, - struct async_submit_ctl *submit); + struct page **ptrs, struct async_submit_ctl *submit); void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/ata.h b/include/linux/ata.h index 1b44f40c77..fdb180367b 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -1,19 +1,35 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2003-2004 Red Hat, Inc. All rights reserved. * Copyright 2003-2004 Jeff Garzik * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * * libata documentation is available via 'make {ps|pdf}docs', - * as Documentation/driver-api/libata.rst + * as Documentation/DocBook/libata.* * * Hardware documentation available from http://www.t13.org/ + * */ #ifndef __LINUX_ATA_H__ #define __LINUX_ATA_H__ -#include +#include #include #include #include @@ -44,8 +60,7 @@ enum { ATA_ID_FW_REV = 23, ATA_ID_PROD = 27, ATA_ID_MAX_MULTSECT = 47, - ATA_ID_DWORD_IO = 48, /* before ATA-8 */ - ATA_ID_TRUSTED = 48, /* ATA-8 and later */ + ATA_ID_DWORD_IO = 48, ATA_ID_CAPABILITY = 49, ATA_ID_OLD_PIO_MODES = 51, ATA_ID_OLD_DMA_MODES = 52, @@ -321,16 +336,11 @@ enum { /* READ_LOG_EXT pages */ ATA_LOG_DIRECTORY = 0x0, ATA_LOG_SATA_NCQ = 0x10, - ATA_LOG_NCQ_NON_DATA = 0x12, - ATA_LOG_NCQ_SEND_RECV = 0x13, - ATA_LOG_IDENTIFY_DEVICE = 0x30, - - /* Identify device log pages: */ - ATA_LOG_SECURITY = 0x06, + ATA_LOG_NCQ_NON_DATA = 0x12, + ATA_LOG_NCQ_SEND_RECV = 0x13, + ATA_LOG_SATA_ID_DEV_DATA = 0x30, ATA_LOG_SATA_SETTINGS = 0x08, ATA_LOG_ZONED_INFORMATION = 0x09, - - /* Identify device SATA settings log:*/ ATA_LOG_DEVSLP_OFFSET = 0x30, ATA_LOG_DEVSLP_SIZE = 0x08, ATA_LOG_DEVSLP_MDAT = 0x00, @@ -338,7 +348,6 @@ enum { ATA_LOG_DEVSLP_DETO = 0x01, ATA_LOG_DEVSLP_VALID = 0x07, ATA_LOG_DEVSLP_VALID_MASK = 0x80, - ATA_LOG_NCQ_PRIO_OFFSET = 0x09, /* NCQ send and receive log */ ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00, @@ -432,8 +441,6 @@ enum { ATA_SET_MAX_LOCK = 0x02, ATA_SET_MAX_UNLOCK = 0x03, ATA_SET_MAX_FREEZE_LOCK = 0x04, - ATA_SET_MAX_PASSWD_DMA = 0x05, - ATA_SET_MAX_UNLOCK_DMA = 0x06, /* feature values for DEVICE CONFIGURATION OVERLAY */ ATA_DCO_RESTORE = 0xC0, @@ -809,6 +816,11 @@ static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id) return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false; } +static inline bool ata_id_sct_write_same(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false; +} + static inline bool ata_id_sct_long_sector_access(const u16 *id) { return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false; @@ -876,13 +888,6 @@ static inline bool ata_id_has_dword_io(const u16 *id) return id[ATA_ID_DWORD_IO] & (1 << 0); } -static inline bool ata_id_has_trusted(const u16 *id) -{ - if (ata_id_major_version(id) <= 7) - return false; - return id[ATA_ID_TRUSTED] & (1 << 0); -} - static inline bool ata_id_has_unload(const u16 *id) { if (ata_id_major_version(id) >= 7 && @@ -935,11 +940,6 @@ static inline bool ata_id_has_ncq_non_data(const u16 *id) return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5); } -static inline bool ata_id_has_ncq_prio(const u16 *id) -{ - return id[ATA_ID_SATA_CAPABILITY] & BIT(12); -} - static inline bool ata_id_has_trim(const u16 *id) { if (ata_id_major_version(id) >= 7 && diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h index 9cafec9228..619d9e78e6 100644 --- a/include/linux/ata_platform.h +++ b/include/linux/ata_platform.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_ATA_PLATFORM_H #define __LINUX_ATA_PLATFORM_H @@ -19,8 +18,7 @@ extern int __pata_platform_probe(struct device *dev, struct resource *irq_res, unsigned int ioport_shift, int __pio_mask, - struct scsi_host_template *sht, - bool use16bit); + struct scsi_host_template *sht); /* * Marvell SATA private data diff --git a/include/linux/atalk.h b/include/linux/atalk.h index f6034ba774..73fd8b7e95 100644 --- a/include/linux/atalk.h +++ b/include/linux/atalk.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_ATALK_H__ #define __LINUX_ATALK_H__ @@ -108,17 +107,15 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb) #define AARP_RESOLVE_TIME (10 * HZ) extern struct datalink_proto *ddp_dl, *aarp_dl; -extern int aarp_proto_init(void); +extern void aarp_proto_init(void); /* Inter module exports */ /* Give a device find its atif control structure */ -#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK) static inline struct atalk_iface *atalk_find_dev(struct net_device *dev) { return dev->atalk_ptr; } -#endif extern struct atalk_addr *atalk_find_dev_addr(struct net_device *dev); extern struct net_device *atrtr_get_dev(struct atalk_addr *sa); @@ -145,12 +142,7 @@ extern rwlock_t atalk_interfaces_lock; extern struct atalk_route atrtr_default; -struct aarp_iter_state { - int bucket; - struct aarp_entry **table; -}; - -extern const struct seq_operations aarp_seq_ops; +extern const struct file_operations atalk_seq_arp_fops; extern int sysctl_aarp_expiry_time; extern int sysctl_aarp_tick_time; @@ -158,29 +150,19 @@ extern int sysctl_aarp_retransmit_limit; extern int sysctl_aarp_resolve_time; #ifdef CONFIG_SYSCTL -extern int atalk_register_sysctl(void); +extern void atalk_register_sysctl(void); extern void atalk_unregister_sysctl(void); #else -static inline int atalk_register_sysctl(void) -{ - return 0; -} -static inline void atalk_unregister_sysctl(void) -{ -} +#define atalk_register_sysctl() do { } while(0) +#define atalk_unregister_sysctl() do { } while(0) #endif #ifdef CONFIG_PROC_FS extern int atalk_proc_init(void); extern void atalk_proc_exit(void); #else -static inline int atalk_proc_init(void) -{ - return 0; -} -static inline void atalk_proc_exit(void) -{ -} +#define atalk_proc_init() ({ 0; }) +#define atalk_proc_exit() do { } while(0) #endif /* CONFIG_PROC_FS */ #endif /* __LINUX_ATALK_H__ */ diff --git a/include/linux/atm.h b/include/linux/atm.h index 4b50fd0a6e..30006c4359 100644 --- a/include/linux/atm.h +++ b/include/linux/atm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* atm.h - general ATM declarations */ #ifndef _LINUX_ATM_H #define _LINUX_ATM_H diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h index c8ecf6f68f..db6b65fc0a 100644 --- a/include/linux/atm_tcp.h +++ b/include/linux/atm_tcp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* atm_tcp.h - Driver-specific declarations of the ATMTCP driver (for use by driver-specific utilities) */ diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 9b02961d65..1dcec5522f 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* atmdev.h - ATM device driver declarations and various related items */ #ifndef LINUX_ATMDEV_H #define LINUX_ATMDEV_H @@ -12,7 +11,6 @@ #include #include #include -#include #include #ifdef CONFIG_PROC_FS @@ -30,7 +28,7 @@ struct compat_atm_iobuf { #endif struct k_atm_aal_stats { -#define __HANDLE_ITEM(i) atomic_t i +#define __HANDLE_ITEM(i) atomic_unchecked_t i __AAL_STAT_ITEMS #undef __HANDLE_ITEM }; @@ -151,7 +149,7 @@ struct atm_dev { const char *type; /* device type name */ int number; /* device index */ void *dev_data; /* per-device data */ - void *phy_data; /* private PHY data */ + void *phy_data; /* private PHY date */ unsigned long flags; /* device flags (ATM_DF_*) */ struct list_head local; /* local ATM addresses */ struct list_head lecs; /* LECS ATM addresses learned via ILMI */ @@ -160,7 +158,7 @@ struct atm_dev { struct k_atm_dev_stats stats; /* statistics */ char signal; /* signal status (ATM_PHY_SIG_*) */ int link_rate; /* link rate (default: OC3) */ - refcount_t refcnt; /* reference count */ + atomic_t refcnt; /* reference count */ spinlock_t lock; /* protect internal members */ #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_entry; /* proc entry */ @@ -176,6 +174,11 @@ struct atm_dev { #define ATM_OF_IMMED 1 /* Attempt immediate delivery */ #define ATM_OF_INRATE 2 /* Attempt in-rate delivery */ + +/* + * ioctl, getsockopt, and setsockopt are optional and can be set to NULL. + */ + struct atmdev_ops { /* only send is required */ void (*dev_close)(struct atm_dev *dev); int (*open)(struct atm_vcc *vcc); @@ -185,8 +188,11 @@ struct atmdev_ops { /* only send is required */ int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd, void __user *arg); #endif + int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, + void __user *optval,int optlen); + int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, + void __user *optval,unsigned int optlen); int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); - int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb); int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); void (*phy_put)(struct atm_dev *dev,unsigned char value, unsigned long addr); @@ -194,7 +200,7 @@ struct atmdev_ops { /* only send is required */ int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags); int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page); struct module *owner; -}; +} __do_const ; struct atmphy_ops { int (*start)(struct atm_dev *dev); @@ -206,8 +212,7 @@ struct atmphy_ops { struct atm_skb_data { struct atm_vcc *vcc; /* ATM VCC */ unsigned long atm_options; /* ATM layer options */ - unsigned int acct_truesize; /* truesize accounted to vcc */ -} __packed; +}; #define VCC_HTABLE_SIZE 32 @@ -234,20 +239,6 @@ void vcc_insert_socket(struct sock *sk); void atm_dev_release_vccs(struct atm_dev *dev); -static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb) -{ - /* - * Because ATM skbs may not belong to a sock (and we don't - * necessarily want to), skb->truesize may be adjusted, - * escaping the hack in pskb_expand_head() which avoids - * doing so for some cases. So stash the value of truesize - * at the time we accounted it, and atm_pop_raw() can use - * that value later, in case it changes. - */ - refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); - ATM_SKB(skb)->acct_truesize = skb->truesize; - ATM_SKB(skb)->atm_options = vcc->atm_options; -} static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) { @@ -263,20 +254,20 @@ static inline void atm_return(struct atm_vcc *vcc,int truesize) static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size) { - return (size + refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) < + return (size + atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) < sk_atm(vcc)->sk_sndbuf; } static inline void atm_dev_hold(struct atm_dev *dev) { - refcount_inc(&dev->refcnt); + atomic_inc(&dev->refcnt); } static inline void atm_dev_put(struct atm_dev *dev) { - if (refcount_dec_and_test(&dev->refcnt)) { + if (atomic_dec_and_test(&dev->refcnt)) { BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); if (dev->ops->dev_close) dev->ops->dev_close(dev); diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 1491af38cc..42a9e18848 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_ATMEL_MCI_H #define __LINUX_ATMEL_MCI_H diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h index 6091d2abc1..7c0f654989 100644 --- a/include/linux/atmel-ssc.h +++ b/include/linux/atmel-ssc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __INCLUDE_ATMEL_SSC_H #define __INCLUDE_ATMEL_SSC_H @@ -21,7 +20,6 @@ struct ssc_device { int user; int irq; bool clk_from_rk_pin; - bool sound_dai; }; struct ssc_device * __must_check ssc_request(unsigned int ssc_num); diff --git a/include/linux/atmel_pdc.h b/include/linux/atmel_pdc.h index 00a766b5ee..63499ce806 100644 --- a/include/linux/atmel_pdc.h +++ b/include/linux/atmel_pdc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/atmel_pdc.h * @@ -7,6 +6,11 @@ * * Peripheral Data Controller (PDC) registers. * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef ATMEL_PDC_H diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h new file mode 100644 index 0000000000..bd2560502f --- /dev/null +++ b/include/linux/atmel_serial.h @@ -0,0 +1,169 @@ +/* + * include/linux/atmel_serial.h + * + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * USART registers. + * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef ATMEL_SERIAL_H +#define ATMEL_SERIAL_H + +#define ATMEL_US_CR 0x00 /* Control Register */ +#define ATMEL_US_RSTRX BIT(2) /* Reset Receiver */ +#define ATMEL_US_RSTTX BIT(3) /* Reset Transmitter */ +#define ATMEL_US_RXEN BIT(4) /* Receiver Enable */ +#define ATMEL_US_RXDIS BIT(5) /* Receiver Disable */ +#define ATMEL_US_TXEN BIT(6) /* Transmitter Enable */ +#define ATMEL_US_TXDIS BIT(7) /* Transmitter Disable */ +#define ATMEL_US_RSTSTA BIT(8) /* Reset Status Bits */ +#define ATMEL_US_STTBRK BIT(9) /* Start Break */ +#define ATMEL_US_STPBRK BIT(10) /* Stop Break */ +#define ATMEL_US_STTTO BIT(11) /* Start Time-out */ +#define ATMEL_US_SENDA BIT(12) /* Send Address */ +#define ATMEL_US_RSTIT BIT(13) /* Reset Iterations */ +#define ATMEL_US_RSTNACK BIT(14) /* Reset Non Acknowledge */ +#define ATMEL_US_RETTO BIT(15) /* Rearm Time-out */ +#define ATMEL_US_DTREN BIT(16) /* Data Terminal Ready Enable */ +#define ATMEL_US_DTRDIS BIT(17) /* Data Terminal Ready Disable */ +#define ATMEL_US_RTSEN BIT(18) /* Request To Send Enable */ +#define ATMEL_US_RTSDIS BIT(19) /* Request To Send Disable */ +#define ATMEL_US_TXFCLR BIT(24) /* Transmit FIFO Clear */ +#define ATMEL_US_RXFCLR BIT(25) /* Receive FIFO Clear */ +#define ATMEL_US_TXFLCLR BIT(26) /* Transmit FIFO Lock Clear */ +#define ATMEL_US_FIFOEN BIT(30) /* FIFO enable */ +#define ATMEL_US_FIFODIS BIT(31) /* FIFO disable */ + +#define ATMEL_US_MR 0x04 /* Mode Register */ +#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */ +#define ATMEL_US_USMODE_NORMAL 0 +#define ATMEL_US_USMODE_RS485 1 +#define ATMEL_US_USMODE_HWHS 2 +#define ATMEL_US_USMODE_MODEM 3 +#define ATMEL_US_USMODE_ISO7816_T0 4 +#define ATMEL_US_USMODE_ISO7816_T1 6 +#define ATMEL_US_USMODE_IRDA 8 +#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */ +#define ATMEL_US_USCLKS_MCK (0 << 4) +#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4) +#define ATMEL_US_USCLKS_SCK (3 << 4) +#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */ +#define ATMEL_US_CHRL_5 (0 << 6) +#define ATMEL_US_CHRL_6 (1 << 6) +#define ATMEL_US_CHRL_7 (2 << 6) +#define ATMEL_US_CHRL_8 (3 << 6) +#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */ +#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */ +#define ATMEL_US_PAR_EVEN (0 << 9) +#define ATMEL_US_PAR_ODD (1 << 9) +#define ATMEL_US_PAR_SPACE (2 << 9) +#define ATMEL_US_PAR_MARK (3 << 9) +#define ATMEL_US_PAR_NONE (4 << 9) +#define ATMEL_US_PAR_MULTI_DROP (6 << 9) +#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */ +#define ATMEL_US_NBSTOP_1 (0 << 12) +#define ATMEL_US_NBSTOP_1_5 (1 << 12) +#define ATMEL_US_NBSTOP_2 (2 << 12) +#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */ +#define ATMEL_US_CHMODE_NORMAL (0 << 14) +#define ATMEL_US_CHMODE_ECHO (1 << 14) +#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14) +#define ATMEL_US_CHMODE_REM_LOOP (3 << 14) +#define ATMEL_US_MSBF BIT(16) /* Bit Order */ +#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */ +#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */ +#define ATMEL_US_OVER BIT(19) /* Oversampling Mode */ +#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */ +#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */ +#define ATMEL_US_MAX_ITER GENMASK(26, 24) /* Max Iterations */ +#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */ + +#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */ +#define ATMEL_US_RXRDY BIT(0) /* Receiver Ready */ +#define ATMEL_US_TXRDY BIT(1) /* Transmitter Ready */ +#define ATMEL_US_RXBRK BIT(2) /* Break Received / End of Break */ +#define ATMEL_US_ENDRX BIT(3) /* End of Receiver Transfer */ +#define ATMEL_US_ENDTX BIT(4) /* End of Transmitter Transfer */ +#define ATMEL_US_OVRE BIT(5) /* Overrun Error */ +#define ATMEL_US_FRAME BIT(6) /* Framing Error */ +#define ATMEL_US_PARE BIT(7) /* Parity Error */ +#define ATMEL_US_TIMEOUT BIT(8) /* Receiver Time-out */ +#define ATMEL_US_TXEMPTY BIT(9) /* Transmitter Empty */ +#define ATMEL_US_ITERATION BIT(10) /* Max number of Repetitions Reached */ +#define ATMEL_US_TXBUFE BIT(11) /* Transmission Buffer Empty */ +#define ATMEL_US_RXBUFF BIT(12) /* Reception Buffer Full */ +#define ATMEL_US_NACK BIT(13) /* Non Acknowledge */ +#define ATMEL_US_RIIC BIT(16) /* Ring Indicator Input Change */ +#define ATMEL_US_DSRIC BIT(17) /* Data Set Ready Input Change */ +#define ATMEL_US_DCDIC BIT(18) /* Data Carrier Detect Input Change */ +#define ATMEL_US_CTSIC BIT(19) /* Clear to Send Input Change */ +#define ATMEL_US_RI BIT(20) /* RI */ +#define ATMEL_US_DSR BIT(21) /* DSR */ +#define ATMEL_US_DCD BIT(22) /* DCD */ +#define ATMEL_US_CTS BIT(23) /* CTS */ + +#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */ +#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */ +#define ATMEL_US_CSR 0x14 /* Channel Status Register */ +#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */ +#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */ +#define ATMEL_US_SYNH BIT(15) /* Transmit/Receive Sync */ + +#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */ +#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */ +#define ATMEL_US_FP_OFFSET 16 /* Fractional Part */ +#define ATMEL_US_FP_MASK 0x7 + +#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register for USART */ +#define ATMEL_UA_RTOR 0x28 /* Receiver Time-out Register for UART */ +#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */ + +#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */ +#define ATMEL_US_TG GENMASK(7, 0) /* Timeguard Value */ + +#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */ +#define ATMEL_US_NER 0x44 /* Number of Errors Register */ +#define ATMEL_US_IF 0x4c /* IrDA Filter Register */ + +#define ATMEL_US_CMPR 0x90 /* Comparaison Register */ +#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */ +#define ATMEL_US_TXRDYM(data) (((data) & 0x3) << 0) /* TX Ready Mode */ +#define ATMEL_US_RXRDYM(data) (((data) & 0x3) << 4) /* RX Ready Mode */ +#define ATMEL_US_ONE_DATA 0x0 +#define ATMEL_US_TWO_DATA 0x1 +#define ATMEL_US_FOUR_DATA 0x2 +#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */ +#define ATMEL_US_TXFTHRES(thr) (((thr) & 0x3f) << 8) /* TX FIFO Threshold */ +#define ATMEL_US_RXFTHRES(thr) (((thr) & 0x3f) << 16) /* RX FIFO Threshold */ +#define ATMEL_US_RXFTHRES2(thr) (((thr) & 0x3f) << 24) /* RX FIFO Threshold2 */ + +#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */ +#define ATMEL_US_TXFL(reg) (((reg) >> 0) & 0x3f) /* TX FIFO Level */ +#define ATMEL_US_RXFL(reg) (((reg) >> 16) & 0x3f) /* RX FIFO Level */ + +#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */ +#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */ +#define ATMEL_US_FIMR 0xb0 /* FIFO Interrupt Mask Register */ +#define ATMEL_US_FESR 0xb4 /* FIFO Event Status Register */ +#define ATMEL_US_TXFEF BIT(0) /* Transmit FIFO Empty Flag */ +#define ATMEL_US_TXFFF BIT(1) /* Transmit FIFO Full Flag */ +#define ATMEL_US_TXFTHF BIT(2) /* Transmit FIFO Threshold Flag */ +#define ATMEL_US_RXFEF BIT(3) /* Receive FIFO Empty Flag */ +#define ATMEL_US_RXFFF BIT(4) /* Receive FIFO Full Flag */ +#define ATMEL_US_RXFTHF BIT(5) /* Receive FIFO Threshold Flag */ +#define ATMEL_US_TXFPTEF BIT(6) /* Transmit FIFO Pointer Error Flag */ +#define ATMEL_US_RXFPTEF BIT(7) /* Receive FIFO Pointer Error Flag */ +#define ATMEL_US_TXFLOCK BIT(8) /* Transmit FIFO Lock (FESR only) */ +#define ATMEL_US_RXFTHF2 BIT(9) /* Receive FIFO Threshold Flag 2 */ + +#define ATMEL_US_NAME 0xf0 /* Ip Name */ +#define ATMEL_US_VERSION 0xfc /* Ip Version */ + +#endif diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h new file mode 100644 index 0000000000..468fdfa643 --- /dev/null +++ b/include/linux/atmel_tc.h @@ -0,0 +1,270 @@ +/* + * Timer/Counter Unit (TC) registers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef ATMEL_TC_H +#define ATMEL_TC_H + +#include +#include + +/* + * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds + * three general-purpose 16-bit timers. These timers share one register bank. + * Depending on the SOC, each timer may have its own clock and IRQ, or those + * may be shared by the whole TC block. + * + * These TC blocks may have up to nine external pins: TCLK0..2 signals for + * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM + * or triggering. Those pins need to be set up for use with the TC block, + * else they will be used as GPIOs or for a different controller. + * + * Although we expect each TC block to have a platform_device node, those + * nodes are not what drivers bind to. Instead, they ask for a specific + * TC block, by number ... which is a common approach on systems with many + * timers. Then they use clk_get() and platform_get_irq() to get clock and + * IRQ resources. + */ + +struct clk; + +/** + * struct atmel_tcb_config - SoC data for a Timer/Counter Block + * @counter_width: size in bits of a timer counter register + */ +struct atmel_tcb_config { + size_t counter_width; +}; + +/** + * struct atmel_tc - information about a Timer/Counter Block + * @pdev: physical device + * @regs: mapping through which the I/O registers can be accessed + * @id: block id + * @tcb_config: configuration data from SoC + * @irq: irq for each of the three channels + * @clk: internal clock source for each of the three channels + * @node: list node, for tclib internal use + * @allocated: if already used, for tclib internal use + * + * On some platforms, each TC channel has its own clocks and IRQs, + * while on others, all TC channels share the same clock and IRQ. + * Drivers should clk_enable() all the clocks they need even though + * all the entries in @clk may point to the same physical clock. + * Likewise, drivers should request irqs independently for each + * channel, but they must use IRQF_SHARED in case some of the entries + * in @irq are actually the same IRQ. + */ +struct atmel_tc { + struct platform_device *pdev; + void __iomem *regs; + int id; + const struct atmel_tcb_config *tcb_config; + int irq[3]; + struct clk *clk[3]; + struct clk *slow_clk; + struct list_head node; + bool allocated; +}; + +extern struct atmel_tc *atmel_tc_alloc(unsigned block); +extern void atmel_tc_free(struct atmel_tc *tc); + +/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */ +extern const u8 atmel_tc_divisors[5]; + + +/* + * Two registers have block-wide controls. These are: configuring the three + * "external" clocks (or event sources) used by the timer channels; and + * synchronizing the timers by resetting them all at once. + * + * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2 + * signals. Or, it can mean "external to timer", using the TIOA output from + * one of the other two timers that's being run in waveform mode. + */ + +#define ATMEL_TC_BCR 0xc0 /* TC Block Control Register */ +#define ATMEL_TC_SYNC (1 << 0) /* synchronize timers */ + +#define ATMEL_TC_BMR 0xc4 /* TC Block Mode Register */ +#define ATMEL_TC_TC0XC0S (3 << 0) /* external clock 0 source */ +#define ATMEL_TC_TC0XC0S_TCLK0 (0 << 0) +#define ATMEL_TC_TC0XC0S_NONE (1 << 0) +#define ATMEL_TC_TC0XC0S_TIOA1 (2 << 0) +#define ATMEL_TC_TC0XC0S_TIOA2 (3 << 0) +#define ATMEL_TC_TC1XC1S (3 << 2) /* external clock 1 source */ +#define ATMEL_TC_TC1XC1S_TCLK1 (0 << 2) +#define ATMEL_TC_TC1XC1S_NONE (1 << 2) +#define ATMEL_TC_TC1XC1S_TIOA0 (2 << 2) +#define ATMEL_TC_TC1XC1S_TIOA2 (3 << 2) +#define ATMEL_TC_TC2XC2S (3 << 4) /* external clock 2 source */ +#define ATMEL_TC_TC2XC2S_TCLK2 (0 << 4) +#define ATMEL_TC_TC2XC2S_NONE (1 << 4) +#define ATMEL_TC_TC2XC2S_TIOA0 (2 << 4) +#define ATMEL_TC_TC2XC2S_TIOA1 (3 << 4) + + +/* + * Each TC block has three "channels", each with one counter and controls. + * + * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection + * when it's not "external") is silicon-specific. AT91 platforms use one + * set of definitions; AVR32 platforms use a different set. Don't hard-wire + * such knowledge into your code, use the global "atmel_tc_divisors" ... + * where index N is the divisor for clock N+1, else zero to indicate it uses + * the 32 KiHz clock. + * + * The timers can be chained in various ways, and operated in "waveform" + * generation mode (including PWM) or "capture" mode (to time events). In + * both modes, behavior can be configured in many ways. + * + * Each timer has two I/O pins, TIOA and TIOB. Waveform mode uses TIOA as a + * PWM output, and TIOB as either another PWM or as a trigger. Capture mode + * uses them only as inputs. + */ +#define ATMEL_TC_CHAN(idx) ((idx)*0x40) +#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg) + +#define ATMEL_TC_CCR 0x00 /* Channel Control Register */ +#define ATMEL_TC_CLKEN (1 << 0) /* clock enable */ +#define ATMEL_TC_CLKDIS (1 << 1) /* clock disable */ +#define ATMEL_TC_SWTRG (1 << 2) /* software trigger */ + +#define ATMEL_TC_CMR 0x04 /* Channel Mode Register */ + +/* Both modes share some CMR bits */ +#define ATMEL_TC_TCCLKS (7 << 0) /* clock source */ +#define ATMEL_TC_TIMER_CLOCK1 (0 << 0) +#define ATMEL_TC_TIMER_CLOCK2 (1 << 0) +#define ATMEL_TC_TIMER_CLOCK3 (2 << 0) +#define ATMEL_TC_TIMER_CLOCK4 (3 << 0) +#define ATMEL_TC_TIMER_CLOCK5 (4 << 0) +#define ATMEL_TC_XC0 (5 << 0) +#define ATMEL_TC_XC1 (6 << 0) +#define ATMEL_TC_XC2 (7 << 0) +#define ATMEL_TC_CLKI (1 << 3) /* clock invert */ +#define ATMEL_TC_BURST (3 << 4) /* clock gating */ +#define ATMEL_TC_GATE_NONE (0 << 4) +#define ATMEL_TC_GATE_XC0 (1 << 4) +#define ATMEL_TC_GATE_XC1 (2 << 4) +#define ATMEL_TC_GATE_XC2 (3 << 4) +#define ATMEL_TC_WAVE (1 << 15) /* true = Waveform mode */ + +/* CAPTURE mode CMR bits */ +#define ATMEL_TC_LDBSTOP (1 << 6) /* counter stops on RB load */ +#define ATMEL_TC_LDBDIS (1 << 7) /* counter disable on RB load */ +#define ATMEL_TC_ETRGEDG (3 << 8) /* external trigger edge */ +#define ATMEL_TC_ETRGEDG_NONE (0 << 8) +#define ATMEL_TC_ETRGEDG_RISING (1 << 8) +#define ATMEL_TC_ETRGEDG_FALLING (2 << 8) +#define ATMEL_TC_ETRGEDG_BOTH (3 << 8) +#define ATMEL_TC_ABETRG (1 << 10) /* external trigger is TIOA? */ +#define ATMEL_TC_CPCTRG (1 << 14) /* RC compare trigger enable */ +#define ATMEL_TC_LDRA (3 << 16) /* RA loading edge (of TIOA) */ +#define ATMEL_TC_LDRA_NONE (0 << 16) +#define ATMEL_TC_LDRA_RISING (1 << 16) +#define ATMEL_TC_LDRA_FALLING (2 << 16) +#define ATMEL_TC_LDRA_BOTH (3 << 16) +#define ATMEL_TC_LDRB (3 << 18) /* RB loading edge (of TIOA) */ +#define ATMEL_TC_LDRB_NONE (0 << 18) +#define ATMEL_TC_LDRB_RISING (1 << 18) +#define ATMEL_TC_LDRB_FALLING (2 << 18) +#define ATMEL_TC_LDRB_BOTH (3 << 18) + +/* WAVEFORM mode CMR bits */ +#define ATMEL_TC_CPCSTOP (1 << 6) /* RC compare stops counter */ +#define ATMEL_TC_CPCDIS (1 << 7) /* RC compare disables counter */ +#define ATMEL_TC_EEVTEDG (3 << 8) /* external event edge */ +#define ATMEL_TC_EEVTEDG_NONE (0 << 8) +#define ATMEL_TC_EEVTEDG_RISING (1 << 8) +#define ATMEL_TC_EEVTEDG_FALLING (2 << 8) +#define ATMEL_TC_EEVTEDG_BOTH (3 << 8) +#define ATMEL_TC_EEVT (3 << 10) /* external event source */ +#define ATMEL_TC_EEVT_TIOB (0 << 10) +#define ATMEL_TC_EEVT_XC0 (1 << 10) +#define ATMEL_TC_EEVT_XC1 (2 << 10) +#define ATMEL_TC_EEVT_XC2 (3 << 10) +#define ATMEL_TC_ENETRG (1 << 12) /* external event is trigger */ +#define ATMEL_TC_WAVESEL (3 << 13) /* waveform type */ +#define ATMEL_TC_WAVESEL_UP (0 << 13) +#define ATMEL_TC_WAVESEL_UPDOWN (1 << 13) +#define ATMEL_TC_WAVESEL_UP_AUTO (2 << 13) +#define ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13) +#define ATMEL_TC_ACPA (3 << 16) /* RA compare changes TIOA */ +#define ATMEL_TC_ACPA_NONE (0 << 16) +#define ATMEL_TC_ACPA_SET (1 << 16) +#define ATMEL_TC_ACPA_CLEAR (2 << 16) +#define ATMEL_TC_ACPA_TOGGLE (3 << 16) +#define ATMEL_TC_ACPC (3 << 18) /* RC compare changes TIOA */ +#define ATMEL_TC_ACPC_NONE (0 << 18) +#define ATMEL_TC_ACPC_SET (1 << 18) +#define ATMEL_TC_ACPC_CLEAR (2 << 18) +#define ATMEL_TC_ACPC_TOGGLE (3 << 18) +#define ATMEL_TC_AEEVT (3 << 20) /* external event changes TIOA */ +#define ATMEL_TC_AEEVT_NONE (0 << 20) +#define ATMEL_TC_AEEVT_SET (1 << 20) +#define ATMEL_TC_AEEVT_CLEAR (2 << 20) +#define ATMEL_TC_AEEVT_TOGGLE (3 << 20) +#define ATMEL_TC_ASWTRG (3 << 22) /* software trigger changes TIOA */ +#define ATMEL_TC_ASWTRG_NONE (0 << 22) +#define ATMEL_TC_ASWTRG_SET (1 << 22) +#define ATMEL_TC_ASWTRG_CLEAR (2 << 22) +#define ATMEL_TC_ASWTRG_TOGGLE (3 << 22) +#define ATMEL_TC_BCPB (3 << 24) /* RB compare changes TIOB */ +#define ATMEL_TC_BCPB_NONE (0 << 24) +#define ATMEL_TC_BCPB_SET (1 << 24) +#define ATMEL_TC_BCPB_CLEAR (2 << 24) +#define ATMEL_TC_BCPB_TOGGLE (3 << 24) +#define ATMEL_TC_BCPC (3 << 26) /* RC compare changes TIOB */ +#define ATMEL_TC_BCPC_NONE (0 << 26) +#define ATMEL_TC_BCPC_SET (1 << 26) +#define ATMEL_TC_BCPC_CLEAR (2 << 26) +#define ATMEL_TC_BCPC_TOGGLE (3 << 26) +#define ATMEL_TC_BEEVT (3 << 28) /* external event changes TIOB */ +#define ATMEL_TC_BEEVT_NONE (0 << 28) +#define ATMEL_TC_BEEVT_SET (1 << 28) +#define ATMEL_TC_BEEVT_CLEAR (2 << 28) +#define ATMEL_TC_BEEVT_TOGGLE (3 << 28) +#define ATMEL_TC_BSWTRG (3 << 30) /* software trigger changes TIOB */ +#define ATMEL_TC_BSWTRG_NONE (0 << 30) +#define ATMEL_TC_BSWTRG_SET (1 << 30) +#define ATMEL_TC_BSWTRG_CLEAR (2 << 30) +#define ATMEL_TC_BSWTRG_TOGGLE (3 << 30) + +#define ATMEL_TC_CV 0x10 /* counter Value */ +#define ATMEL_TC_RA 0x14 /* register A */ +#define ATMEL_TC_RB 0x18 /* register B */ +#define ATMEL_TC_RC 0x1c /* register C */ + +#define ATMEL_TC_SR 0x20 /* status (read-only) */ +/* Status-only flags */ +#define ATMEL_TC_CLKSTA (1 << 16) /* clock enabled */ +#define ATMEL_TC_MTIOA (1 << 17) /* TIOA mirror */ +#define ATMEL_TC_MTIOB (1 << 18) /* TIOB mirror */ + +#define ATMEL_TC_IER 0x24 /* interrupt enable (write-only) */ +#define ATMEL_TC_IDR 0x28 /* interrupt disable (write-only) */ +#define ATMEL_TC_IMR 0x2c /* interrupt mask (read-only) */ + +/* Status and IRQ flags */ +#define ATMEL_TC_COVFS (1 << 0) /* counter overflow */ +#define ATMEL_TC_LOVRS (1 << 1) /* load overrun */ +#define ATMEL_TC_CPAS (1 << 2) /* RA compare */ +#define ATMEL_TC_CPBS (1 << 3) /* RB compare */ +#define ATMEL_TC_CPCS (1 << 4) /* RC compare */ +#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */ +#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */ +#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */ +#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \ + ATMEL_TC_CPAS | ATMEL_TC_CPBS | \ + ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \ + ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \ + /* all IRQs */ + +#endif diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 8dd57c3a99..957f2d63ed 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -1,9 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* Atomic operations usable in machine independent code */ #ifndef _LINUX_ATOMIC_H #define _LINUX_ATOMIC_H -#include - #include #include @@ -25,60 +22,1042 @@ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. */ -#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) -#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) +#ifndef atomic_read_acquire +#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif -#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) -#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) +#ifndef atomic_set_release +#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif /* * The idea here is to build acquire/release variants by adding explicit * barriers on top of the relaxed variant. In the case where the relaxed * variant is already fully ordered, no additional barriers are needed. * - * If an architecture overrides __atomic_acquire_fence() it will probably - * want to define smp_mb__after_spinlock(). + * Besides, if an arch has a special barrier for acquire/release, it could + * implement its own __atomic_op_* and use the same framework for building + * variants */ -#ifndef __atomic_acquire_fence -#define __atomic_acquire_fence smp_mb__after_atomic -#endif - -#ifndef __atomic_release_fence -#define __atomic_release_fence smp_mb__before_atomic -#endif - -#ifndef __atomic_pre_full_fence -#define __atomic_pre_full_fence smp_mb__before_atomic -#endif - -#ifndef __atomic_post_full_fence -#define __atomic_post_full_fence smp_mb__after_atomic -#endif - +#ifndef __atomic_op_acquire #define __atomic_op_acquire(op, args...) \ ({ \ typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ - __atomic_acquire_fence(); \ + smp_mb__after_atomic(); \ __ret; \ }) +#endif +#ifndef __atomic_op_release #define __atomic_op_release(op, args...) \ ({ \ - __atomic_release_fence(); \ + smp_mb__before_atomic(); \ op##_relaxed(args); \ }) +#endif +#ifndef __atomic_op_fence #define __atomic_op_fence(op, args...) \ ({ \ typeof(op##_relaxed(args)) __ret; \ - __atomic_pre_full_fence(); \ + smp_mb__before_atomic(); \ __ret = op##_relaxed(args); \ - __atomic_post_full_fence(); \ + smp_mb__after_atomic(); \ __ret; \ }) +#endif -#include -#include -#include +/* atomic_add_return_relaxed */ +#ifndef atomic_add_return_relaxed +#define atomic_add_return_relaxed atomic_add_return +#define atomic_add_return_acquire atomic_add_return +#define atomic_add_return_release atomic_add_return +#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked + +#else /* atomic_add_return_relaxed */ + +#ifndef atomic_add_return_acquire +#define atomic_add_return_acquire(...) \ + __atomic_op_acquire(atomic_add_return, __VA_ARGS__) +#endif + +#ifndef atomic_add_return_release +#define atomic_add_return_release(...) \ + __atomic_op_release(atomic_add_return, __VA_ARGS__) +#endif + +#ifndef atomic_add_return +#define atomic_add_return(...) \ + __atomic_op_fence(atomic_add_return, __VA_ARGS__) +#endif + +#ifndef atomic_add_return_unchecked +#define atomic_add_return_unchecked(...) \ + __atomic_op_fence(atomic_add_return_unchecked, __VA_ARGS__) +#endif +#endif /* atomic_add_return_relaxed */ + +/* atomic_inc_return_relaxed */ +#ifndef atomic_inc_return_relaxed +#define atomic_inc_return_relaxed atomic_inc_return +#define atomic_inc_return_acquire atomic_inc_return +#define atomic_inc_return_release atomic_inc_return + +#else /* atomic_inc_return_relaxed */ + +#ifndef atomic_inc_return_acquire +#define atomic_inc_return_acquire(...) \ + __atomic_op_acquire(atomic_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic_inc_return_release +#define atomic_inc_return_release(...) \ + __atomic_op_release(atomic_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic_inc_return +#define atomic_inc_return(...) \ + __atomic_op_fence(atomic_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic_inc_return_unchecked +#define atomic_inc_return_unchecked(...) \ + __atomic_op_fence(atomic_inc_return_unchecked, __VA_ARGS__) +#endif +#endif /* atomic_inc_return_relaxed */ + +/* atomic_sub_return_relaxed */ +#ifndef atomic_sub_return_relaxed +#define atomic_sub_return_relaxed atomic_sub_return +#define atomic_sub_return_acquire atomic_sub_return +#define atomic_sub_return_release atomic_sub_return + +#else /* atomic_sub_return_relaxed */ + +#ifndef atomic_sub_return_acquire +#define atomic_sub_return_acquire(...) \ + __atomic_op_acquire(atomic_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic_sub_return_release +#define atomic_sub_return_release(...) \ + __atomic_op_release(atomic_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic_sub_return +#define atomic_sub_return(...) \ + __atomic_op_fence(atomic_sub_return, __VA_ARGS__) +#endif +#endif /* atomic_sub_return_relaxed */ + +/* atomic_dec_return_relaxed */ +#ifndef atomic_dec_return_relaxed +#define atomic_dec_return_relaxed atomic_dec_return +#define atomic_dec_return_acquire atomic_dec_return +#define atomic_dec_return_release atomic_dec_return + +#else /* atomic_dec_return_relaxed */ + +#ifndef atomic_dec_return_acquire +#define atomic_dec_return_acquire(...) \ + __atomic_op_acquire(atomic_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic_dec_return_release +#define atomic_dec_return_release(...) \ + __atomic_op_release(atomic_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic_dec_return +#define atomic_dec_return(...) \ + __atomic_op_fence(atomic_dec_return, __VA_ARGS__) +#endif +#endif /* atomic_dec_return_relaxed */ + + +/* atomic_fetch_add_relaxed */ +#ifndef atomic_fetch_add_relaxed +#define atomic_fetch_add_relaxed atomic_fetch_add +#define atomic_fetch_add_acquire atomic_fetch_add +#define atomic_fetch_add_release atomic_fetch_add + +#else /* atomic_fetch_add_relaxed */ + +#ifndef atomic_fetch_add_acquire +#define atomic_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_add_release +#define atomic_fetch_add_release(...) \ + __atomic_op_release(atomic_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_add +#define atomic_fetch_add(...) \ + __atomic_op_fence(atomic_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic_fetch_add_relaxed */ + +/* atomic_fetch_inc_relaxed */ +#ifndef atomic_fetch_inc_relaxed + +#ifndef atomic_fetch_inc +#define atomic_fetch_inc(v) atomic_fetch_add(1, (v)) +#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v)) +#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v)) +#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v)) +#else /* atomic_fetch_inc */ +#define atomic_fetch_inc_relaxed atomic_fetch_inc +#define atomic_fetch_inc_acquire atomic_fetch_inc +#define atomic_fetch_inc_release atomic_fetch_inc +#endif /* atomic_fetch_inc */ + +#else /* atomic_fetch_inc_relaxed */ + +#ifndef atomic_fetch_inc_acquire +#define atomic_fetch_inc_acquire(...) \ + __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_inc_release +#define atomic_fetch_inc_release(...) \ + __atomic_op_release(atomic_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_inc +#define atomic_fetch_inc(...) \ + __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__) +#endif +#endif /* atomic_fetch_inc_relaxed */ + +/* atomic_fetch_sub_relaxed */ +#ifndef atomic_fetch_sub_relaxed +#define atomic_fetch_sub_relaxed atomic_fetch_sub +#define atomic_fetch_sub_acquire atomic_fetch_sub +#define atomic_fetch_sub_release atomic_fetch_sub + +#else /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_fetch_sub_acquire +#define atomic_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_sub_release +#define atomic_fetch_sub_release(...) \ + __atomic_op_release(atomic_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_sub +#define atomic_fetch_sub(...) \ + __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic_fetch_sub_relaxed */ + +/* atomic_fetch_dec_relaxed */ +#ifndef atomic_fetch_dec_relaxed + +#ifndef atomic_fetch_dec +#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v)) +#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v)) +#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v)) +#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v)) +#else /* atomic_fetch_dec */ +#define atomic_fetch_dec_relaxed atomic_fetch_dec +#define atomic_fetch_dec_acquire atomic_fetch_dec +#define atomic_fetch_dec_release atomic_fetch_dec +#endif /* atomic_fetch_dec */ + +#else /* atomic_fetch_dec_relaxed */ + +#ifndef atomic_fetch_dec_acquire +#define atomic_fetch_dec_acquire(...) \ + __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_dec_release +#define atomic_fetch_dec_release(...) \ + __atomic_op_release(atomic_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_dec +#define atomic_fetch_dec(...) \ + __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__) +#endif +#endif /* atomic_fetch_dec_relaxed */ + +/* atomic_fetch_or_relaxed */ +#ifndef atomic_fetch_or_relaxed +#define atomic_fetch_or_relaxed atomic_fetch_or +#define atomic_fetch_or_acquire atomic_fetch_or +#define atomic_fetch_or_release atomic_fetch_or + +#else /* atomic_fetch_or_relaxed */ + +#ifndef atomic_fetch_or_acquire +#define atomic_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_or_release +#define atomic_fetch_or_release(...) \ + __atomic_op_release(atomic_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_or +#define atomic_fetch_or(...) \ + __atomic_op_fence(atomic_fetch_or, __VA_ARGS__) +#endif +#endif /* atomic_fetch_or_relaxed */ + +/* atomic_fetch_and_relaxed */ +#ifndef atomic_fetch_and_relaxed +#define atomic_fetch_and_relaxed atomic_fetch_and +#define atomic_fetch_and_acquire atomic_fetch_and +#define atomic_fetch_and_release atomic_fetch_and + +#else /* atomic_fetch_and_relaxed */ + +#ifndef atomic_fetch_and_acquire +#define atomic_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_and_release +#define atomic_fetch_and_release(...) \ + __atomic_op_release(atomic_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_and +#define atomic_fetch_and(...) \ + __atomic_op_fence(atomic_fetch_and, __VA_ARGS__) +#endif +#endif /* atomic_fetch_and_relaxed */ + +#ifdef atomic_andnot +/* atomic_fetch_andnot_relaxed */ +#ifndef atomic_fetch_andnot_relaxed +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot +#define atomic_fetch_andnot_acquire atomic_fetch_andnot +#define atomic_fetch_andnot_release atomic_fetch_andnot + +#else /* atomic_fetch_andnot_relaxed */ + +#ifndef atomic_fetch_andnot_acquire +#define atomic_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_andnot_release +#define atomic_fetch_andnot_release(...) \ + __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_andnot +#define atomic_fetch_andnot(...) \ + __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) +#endif +#endif /* atomic_fetch_andnot_relaxed */ +#endif /* atomic_andnot */ + +/* atomic_fetch_xor_relaxed */ +#ifndef atomic_fetch_xor_relaxed +#define atomic_fetch_xor_relaxed atomic_fetch_xor +#define atomic_fetch_xor_acquire atomic_fetch_xor +#define atomic_fetch_xor_release atomic_fetch_xor + +#else /* atomic_fetch_xor_relaxed */ + +#ifndef atomic_fetch_xor_acquire +#define atomic_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_xor_release +#define atomic_fetch_xor_release(...) \ + __atomic_op_release(atomic_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_xor +#define atomic_fetch_xor(...) \ + __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__) +#endif +#endif /* atomic_fetch_xor_relaxed */ + + +/* atomic_xchg_relaxed */ +#ifndef atomic_xchg_relaxed +#define atomic_xchg_relaxed atomic_xchg +#define atomic_xchg_acquire atomic_xchg +#define atomic_xchg_release atomic_xchg + +#else /* atomic_xchg_relaxed */ + +#ifndef atomic_xchg_acquire +#define atomic_xchg_acquire(...) \ + __atomic_op_acquire(atomic_xchg, __VA_ARGS__) +#endif + +#ifndef atomic_xchg_release +#define atomic_xchg_release(...) \ + __atomic_op_release(atomic_xchg, __VA_ARGS__) +#endif + +#ifndef atomic_xchg +#define atomic_xchg(...) \ + __atomic_op_fence(atomic_xchg, __VA_ARGS__) +#endif +#endif /* atomic_xchg_relaxed */ + +/* atomic_cmpxchg_relaxed */ +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#define atomic_cmpxchg_acquire atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg + +#else /* atomic_cmpxchg_relaxed */ + +#ifndef atomic_cmpxchg_acquire +#define atomic_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic_cmpxchg_release +#define atomic_cmpxchg_release(...) \ + __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic_cmpxchg +#define atomic_cmpxchg(...) \ + __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) +#endif +#endif /* atomic_cmpxchg_relaxed */ + +/* cmpxchg_relaxed */ +#ifndef cmpxchg_relaxed +#define cmpxchg_relaxed cmpxchg +#define cmpxchg_acquire cmpxchg +#define cmpxchg_release cmpxchg + +#else /* cmpxchg_relaxed */ + +#ifndef cmpxchg_acquire +#define cmpxchg_acquire(...) \ + __atomic_op_acquire(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg_release +#define cmpxchg_release(...) \ + __atomic_op_release(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg +#define cmpxchg(...) \ + __atomic_op_fence(cmpxchg, __VA_ARGS__) +#endif +#endif /* cmpxchg_relaxed */ + +/* cmpxchg64_relaxed */ +#ifndef cmpxchg64_relaxed +#define cmpxchg64_relaxed cmpxchg64 +#define cmpxchg64_acquire cmpxchg64 +#define cmpxchg64_release cmpxchg64 + +#else /* cmpxchg64_relaxed */ + +#ifndef cmpxchg64_acquire +#define cmpxchg64_acquire(...) \ + __atomic_op_acquire(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64_release +#define cmpxchg64_release(...) \ + __atomic_op_release(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64 +#define cmpxchg64(...) \ + __atomic_op_fence(cmpxchg64, __VA_ARGS__) +#endif +#endif /* cmpxchg64_relaxed */ + +/* xchg_relaxed */ +#ifndef xchg_relaxed +#define xchg_relaxed xchg +#define xchg_acquire xchg +#define xchg_release xchg + +#else /* xchg_relaxed */ + +#ifndef xchg_acquire +#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__) +#endif + +#ifndef xchg_release +#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__) +#endif + +#ifndef xchg +#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__) +#endif + +#ifndef xchg_unchecked +#define xchg_unchecked(...) __atomic_op_fence(xchg_unchecked, __VA_ARGS__) +#endif +#endif /* xchg_relaxed */ + +/** + * atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u) +{ + return __atomic_add_unless(v, a, u) != u; +} + +/** + * atomic_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1, so long as @v is non-zero. + * Returns non-zero if @v was non-zero, and zero otherwise. + */ +#ifndef atomic_inc_not_zero +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +#endif + +#ifndef atomic_andnot +static inline void atomic_andnot(int i, atomic_t *v) +{ + atomic_and(~i, v); +} + +static inline int atomic_fetch_andnot(int i, atomic_t *v) +{ + return atomic_fetch_and(~i, v); +} + +static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + return atomic_fetch_and_relaxed(~i, v); +} + +static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return atomic_fetch_and_acquire(~i, v); +} + +static inline int atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return atomic_fetch_and_release(~i, v); +} +#endif + +/** + * atomic_inc_not_zero_hint - increment if not null + * @v: pointer of type atomic_t + * @hint: probable value of the atomic before the increment + * + * This version of atomic_inc_not_zero() gives a hint of probable + * value of the atomic. This helps processor to not read the memory + * before doing the atomic read/modify/write cycle, lowering + * number of bus transactions on some arches. + * + * Returns: 0 if increment was not done, 1 otherwise. + */ +#ifndef atomic_inc_not_zero_hint +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) +{ + int val, c = hint; + + /* sanity test, should be removed by compiler if hint is a constant */ + if (!hint) + return atomic_inc_not_zero(v); + + do { + val = atomic_cmpxchg(v, c, c + 1); + if (val == c) + return 1; + c = val; + } while (c); + + return 0; +} +#endif + +#ifndef atomic_inc_unless_negative +static inline int atomic_inc_unless_negative(atomic_t *p) +{ + int v, v1; + for (v = 0; v >= 0; v = v1) { + v1 = atomic_cmpxchg(p, v, v + 1); + if (likely(v1 == v)) + return 1; + } + return 0; +} +#endif + +#ifndef atomic_dec_unless_positive +static inline int atomic_dec_unless_positive(atomic_t *p) +{ + int v, v1; + for (v = 0; v <= 0; v = v1) { + v1 = atomic_cmpxchg(p, v, v - 1); + if (likely(v1 == v)) + return 1; + } + return 0; +} +#endif + +/* + * atomic_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +#ifndef atomic_dec_if_positive +static inline int atomic_dec_if_positive(atomic_t *v) +{ + int c, old, dec; + c = atomic_read(v); + for (;;) { + dec = c - 1; + if (unlikely(dec < 0)) + break; + old = atomic_cmpxchg(v, c, dec); + if (likely(old == c)) + break; + c = old; + } + return dec; +} +#endif + +#ifdef CONFIG_GENERIC_ATOMIC64 +#include +#endif + +#ifndef atomic64_read_acquire +#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif + +#ifndef atomic64_set_release +#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif + +/* atomic64_add_return_relaxed */ +#ifndef atomic64_add_return_relaxed +#define atomic64_add_return_relaxed atomic64_add_return +#define atomic64_add_return_acquire atomic64_add_return +#define atomic64_add_return_release atomic64_add_return + +#else /* atomic64_add_return_relaxed */ + +#ifndef atomic64_add_return_acquire +#define atomic64_add_return_acquire(...) \ + __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return_release +#define atomic64_add_return_release(...) \ + __atomic_op_release(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return +#define atomic64_add_return(...) \ + __atomic_op_fence(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return_unchecked +#define atomic64_add_return_unchecked(...) \ + __atomic_op_fence(atomic64_add_return_unchecked, __VA_ARGS__) +#endif +#endif /* atomic64_add_return_relaxed */ + +/* atomic64_inc_return_relaxed */ +#ifndef atomic64_inc_return_relaxed +#define atomic64_inc_return_relaxed atomic64_inc_return +#define atomic64_inc_return_acquire atomic64_inc_return +#define atomic64_inc_return_release atomic64_inc_return + +#else /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_inc_return_acquire +#define atomic64_inc_return_acquire(...) \ + __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return_release +#define atomic64_inc_return_release(...) \ + __atomic_op_release(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return +#define atomic64_inc_return(...) \ + __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return_unchecked +#define atomic64_inc_return_unchecked(...) \ + __atomic_op_fence(atomic64_inc_return_unchecked, __VA_ARGS__) +#endif +#endif /* atomic64_inc_return_relaxed */ + + +/* atomic64_sub_return_relaxed */ +#ifndef atomic64_sub_return_relaxed +#define atomic64_sub_return_relaxed atomic64_sub_return +#define atomic64_sub_return_acquire atomic64_sub_return +#define atomic64_sub_return_release atomic64_sub_return + +#else /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_sub_return_acquire +#define atomic64_sub_return_acquire(...) \ + __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic64_sub_return_release +#define atomic64_sub_return_release(...) \ + __atomic_op_release(atomic64_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic64_sub_return +#define atomic64_sub_return(...) \ + __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) +#endif +#endif /* atomic64_sub_return_relaxed */ + +/* atomic64_dec_return_relaxed */ +#ifndef atomic64_dec_return_relaxed +#define atomic64_dec_return_relaxed atomic64_dec_return +#define atomic64_dec_return_acquire atomic64_dec_return +#define atomic64_dec_return_release atomic64_dec_return + +#else /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_dec_return_acquire +#define atomic64_dec_return_acquire(...) \ + __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return_release +#define atomic64_dec_return_release(...) \ + __atomic_op_release(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return +#define atomic64_dec_return(...) \ + __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) +#endif +#endif /* atomic64_dec_return_relaxed */ + + +/* atomic64_fetch_add_relaxed */ +#ifndef atomic64_fetch_add_relaxed +#define atomic64_fetch_add_relaxed atomic64_fetch_add +#define atomic64_fetch_add_acquire atomic64_fetch_add +#define atomic64_fetch_add_release atomic64_fetch_add + +#else /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_fetch_add_acquire +#define atomic64_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add_release +#define atomic64_fetch_add_release(...) \ + __atomic_op_release(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add +#define atomic64_fetch_add(...) \ + __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_add_relaxed */ + +/* atomic64_fetch_inc_relaxed */ +#ifndef atomic64_fetch_inc_relaxed + +#ifndef atomic64_fetch_inc +#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v)) +#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v)) +#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v)) +#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v)) +#else /* atomic64_fetch_inc */ +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc +#define atomic64_fetch_inc_acquire atomic64_fetch_inc +#define atomic64_fetch_inc_release atomic64_fetch_inc +#endif /* atomic64_fetch_inc */ + +#else /* atomic64_fetch_inc_relaxed */ + +#ifndef atomic64_fetch_inc_acquire +#define atomic64_fetch_inc_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_inc_release +#define atomic64_fetch_inc_release(...) \ + __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_inc +#define atomic64_fetch_inc(...) \ + __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_inc_relaxed */ + +/* atomic64_fetch_sub_relaxed */ +#ifndef atomic64_fetch_sub_relaxed +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub +#define atomic64_fetch_sub_acquire atomic64_fetch_sub +#define atomic64_fetch_sub_release atomic64_fetch_sub + +#else /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_fetch_sub_acquire +#define atomic64_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub_release +#define atomic64_fetch_sub_release(...) \ + __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub +#define atomic64_fetch_sub(...) \ + __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_sub_relaxed */ + +/* atomic64_fetch_dec_relaxed */ +#ifndef atomic64_fetch_dec_relaxed + +#ifndef atomic64_fetch_dec +#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v)) +#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v)) +#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v)) +#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v)) +#else /* atomic64_fetch_dec */ +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec +#define atomic64_fetch_dec_acquire atomic64_fetch_dec +#define atomic64_fetch_dec_release atomic64_fetch_dec +#endif /* atomic64_fetch_dec */ + +#else /* atomic64_fetch_dec_relaxed */ + +#ifndef atomic64_fetch_dec_acquire +#define atomic64_fetch_dec_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_dec_release +#define atomic64_fetch_dec_release(...) \ + __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_dec +#define atomic64_fetch_dec(...) \ + __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_dec_relaxed */ + +/* atomic64_fetch_or_relaxed */ +#ifndef atomic64_fetch_or_relaxed +#define atomic64_fetch_or_relaxed atomic64_fetch_or +#define atomic64_fetch_or_acquire atomic64_fetch_or +#define atomic64_fetch_or_release atomic64_fetch_or + +#else /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_or_acquire +#define atomic64_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or_release +#define atomic64_fetch_or_release(...) \ + __atomic_op_release(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or +#define atomic64_fetch_or(...) \ + __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_or_relaxed */ + +/* atomic64_fetch_and_relaxed */ +#ifndef atomic64_fetch_and_relaxed +#define atomic64_fetch_and_relaxed atomic64_fetch_and +#define atomic64_fetch_and_acquire atomic64_fetch_and +#define atomic64_fetch_and_release atomic64_fetch_and + +#else /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_fetch_and_acquire +#define atomic64_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and_release +#define atomic64_fetch_and_release(...) \ + __atomic_op_release(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and +#define atomic64_fetch_and(...) \ + __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_and_relaxed */ + +#ifdef atomic64_andnot +/* atomic64_fetch_andnot_relaxed */ +#ifndef atomic64_fetch_andnot_relaxed +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot +#define atomic64_fetch_andnot_release atomic64_fetch_andnot + +#else /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_andnot_acquire +#define atomic64_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot_release +#define atomic64_fetch_andnot_release(...) \ + __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot +#define atomic64_fetch_andnot(...) \ + __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_andnot_relaxed */ +#endif /* atomic64_andnot */ + +/* atomic64_fetch_xor_relaxed */ +#ifndef atomic64_fetch_xor_relaxed +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor +#define atomic64_fetch_xor_acquire atomic64_fetch_xor +#define atomic64_fetch_xor_release atomic64_fetch_xor + +#else /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_fetch_xor_acquire +#define atomic64_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor_release +#define atomic64_fetch_xor_release(...) \ + __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor +#define atomic64_fetch_xor(...) \ + __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_xor_relaxed */ + + +/* atomic64_xchg_relaxed */ +#ifndef atomic64_xchg_relaxed +#define atomic64_xchg_relaxed atomic64_xchg +#define atomic64_xchg_acquire atomic64_xchg +#define atomic64_xchg_release atomic64_xchg + +#else /* atomic64_xchg_relaxed */ + +#ifndef atomic64_xchg_acquire +#define atomic64_xchg_acquire(...) \ + __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg_release +#define atomic64_xchg_release(...) \ + __atomic_op_release(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg +#define atomic64_xchg(...) \ + __atomic_op_fence(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg_unchecked +#define atomic64_xchg_unchecked(...) \ + __atomic_op_fence(atomic64_xchg_unchecked, __VA_ARGS__) +#endif +#endif /* atomic64_xchg_relaxed */ + +/* atomic64_cmpxchg_relaxed */ +#ifndef atomic64_cmpxchg_relaxed +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg +#define atomic64_cmpxchg_acquire atomic64_cmpxchg +#define atomic64_cmpxchg_release atomic64_cmpxchg + +#else /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_cmpxchg_acquire +#define atomic64_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg_release +#define atomic64_cmpxchg_release(...) \ + __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg +#define atomic64_cmpxchg(...) \ + __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg_unchecked +#define atomic64_cmpxchg_unchecked(...) \ + __atomic_op_fence(atomic64_cmpxchg_unchecked, __VA_ARGS__) +#endif +#endif /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_andnot +static inline void atomic64_andnot(long long i, atomic64_t *v) +{ + atomic64_and(~i, v); +} + +static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v) +{ + return atomic64_fetch_and(~i, v); +} + +static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_relaxed(~i, v); +} + +static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_acquire(~i, v); +} + +static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_release(~i, v); +} +#endif + +#include #endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h index e4004d1e67..896c6892f3 100644 --- a/include/linux/attribute_container.h +++ b/include/linux/attribute_container.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * attribute_container.h - a generic container for all classes * * Copyright (c) 2005 - James Bottomley + * + * This file is licensed under GPLv2 */ #ifndef _ATTRIBUTE_CONTAINER_H_ @@ -54,13 +55,6 @@ void attribute_container_device_trigger(struct device *dev, int (*fn)(struct attribute_container *, struct device *, struct device *)); -int attribute_container_device_trigger_safe(struct device *dev, - int (*fn)(struct attribute_container *, - struct device *, - struct device *), - int (*undo)(struct attribute_container *, - struct device *, - struct device *)); void attribute_container_trigger(struct device *dev, int (*fn)(struct attribute_container *, struct device *)); diff --git a/include/linux/audit.h b/include/linux/audit.h index 82b7c1116a..b0b3fef029 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -1,10 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* audit.h -- Auditing support * * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. * All Rights Reserved. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * * Written by Rickard E. (Rik) Faith + * */ #ifndef _LINUX_AUDIT_H_ #define _LINUX_AUDIT_H_ @@ -12,7 +26,6 @@ #include #include #include -#include #define AUDIT_INO_UNSET ((unsigned long)-1) #define AUDIT_DEV_UNSET ((dev_t)-1) @@ -20,7 +33,7 @@ struct audit_sig_info { uid_t uid; pid_t pid; - char ctx[]; + char ctx[0]; }; struct audit_buffer; @@ -72,52 +85,6 @@ struct audit_field { u32 op; }; -enum audit_ntp_type { - AUDIT_NTP_OFFSET, - AUDIT_NTP_FREQ, - AUDIT_NTP_STATUS, - AUDIT_NTP_TAI, - AUDIT_NTP_TICK, - AUDIT_NTP_ADJUST, - - AUDIT_NTP_NVALS /* count */ -}; - -#ifdef CONFIG_AUDITSYSCALL -struct audit_ntp_val { - long long oldval, newval; -}; - -struct audit_ntp_data { - struct audit_ntp_val vals[AUDIT_NTP_NVALS]; -}; -#else -struct audit_ntp_data {}; -#endif - -enum audit_nfcfgop { - AUDIT_XT_OP_REGISTER, - AUDIT_XT_OP_REPLACE, - AUDIT_XT_OP_UNREGISTER, - AUDIT_NFT_OP_TABLE_REGISTER, - AUDIT_NFT_OP_TABLE_UNREGISTER, - AUDIT_NFT_OP_CHAIN_REGISTER, - AUDIT_NFT_OP_CHAIN_UNREGISTER, - AUDIT_NFT_OP_RULE_REGISTER, - AUDIT_NFT_OP_RULE_UNREGISTER, - AUDIT_NFT_OP_SET_REGISTER, - AUDIT_NFT_OP_SET_UNREGISTER, - AUDIT_NFT_OP_SETELEM_REGISTER, - AUDIT_NFT_OP_SETELEM_UNREGISTER, - AUDIT_NFT_OP_GEN_REGISTER, - AUDIT_NFT_OP_OBJ_REGISTER, - AUDIT_NFT_OP_OBJ_UNREGISTER, - AUDIT_NFT_OP_OBJ_RESET, - AUDIT_NFT_OP_FLOWTABLE_REGISTER, - AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, - AUDIT_NFT_OP_INVALID, -}; - extern int is_audit_feature_set(int which); extern int __init audit_register_class(int class, unsigned *list); @@ -148,9 +115,8 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall); struct filename; -#define AUDIT_OFF 0 -#define AUDIT_ON 1 -#define AUDIT_LOCKED 2 +extern void audit_log_session_info(struct audit_buffer *ab); + #ifdef CONFIG_AUDIT /* These are defined in audit.c */ /* Public API */ @@ -169,7 +135,7 @@ extern void audit_log_n_hex(struct audit_buffer *ab, size_t len); extern void audit_log_n_string(struct audit_buffer *ab, const char *buf, - size_t n); + size_t n) __nocapture(2); extern void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string, size_t n); @@ -180,35 +146,28 @@ extern void audit_log_d_path(struct audit_buffer *ab, const struct path *path); extern void audit_log_key(struct audit_buffer *ab, char *key); -extern void audit_log_path_denied(int type, - const char *operation); +extern void audit_log_link_denied(const char *operation, + struct path *link); extern void audit_log_lost(const char *message); +#ifdef CONFIG_SECURITY +extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); +#else +static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid) +{ } +#endif extern int audit_log_task_context(struct audit_buffer *ab); -extern void audit_log_task_info(struct audit_buffer *ab); +extern void audit_log_task_info(struct audit_buffer *ab, + struct task_struct *tsk); extern int audit_update_lsm_rules(void); /* Private API (for audit.c only) */ -extern int audit_rule_change(int type, int seq, void *data, size_t datasz); +extern int audit_rule_change(int type, __u32 portid, int seq, + void *data, size_t datasz); extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); -extern int audit_set_loginuid(kuid_t loginuid); - -static inline kuid_t audit_get_loginuid(struct task_struct *tsk) -{ - return tsk->loginuid; -} - -static inline unsigned int audit_get_sessionid(struct task_struct *tsk) -{ - return tsk->sessionid; -} - extern u32 audit_enabled; - -extern int audit_signal_info(int sig, struct task_struct *t); - #else /* CONFIG_AUDIT */ static inline __printf(4, 5) void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, @@ -242,32 +201,19 @@ static inline void audit_log_d_path(struct audit_buffer *ab, { } static inline void audit_log_key(struct audit_buffer *ab, char *key) { } -static inline void audit_log_path_denied(int type, const char *operation) +static inline void audit_log_link_denied(const char *string, + const struct path *link) +{ } +static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid) { } static inline int audit_log_task_context(struct audit_buffer *ab) { return 0; } -static inline void audit_log_task_info(struct audit_buffer *ab) +static inline void audit_log_task_info(struct audit_buffer *ab, + struct task_struct *tsk) { } - -static inline kuid_t audit_get_loginuid(struct task_struct *tsk) -{ - return INVALID_UID; -} - -static inline unsigned int audit_get_sessionid(struct task_struct *tsk) -{ - return AUDIT_SID_UNSET; -} - -#define audit_enabled AUDIT_OFF - -static inline int audit_signal_info(int sig, struct task_struct *t) -{ - return 0; -} - +#define audit_enabled 0 #endif /* CONFIG_AUDIT */ #ifdef CONFIG_AUDIT_COMPAT_GENERIC @@ -276,10 +222,6 @@ static inline int audit_signal_info(int sig, struct task_struct *t) #define audit_is_compat(arch) false #endif -#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ -#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ -#define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */ - #ifdef CONFIG_AUDITSYSCALL #include /* for syscall_get_arch() */ @@ -292,30 +234,21 @@ extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1, extern void __audit_syscall_exit(int ret_success, long ret_value); extern struct filename *__audit_reusename(const __user char *uptr); extern void __audit_getname(struct filename *name); + +#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ +#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); extern void __audit_file(const struct file *); extern void __audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type); -extern void audit_seccomp(unsigned long syscall, long signr, int code); -extern void audit_seccomp_actions_logged(const char *names, - const char *old_names, int res); +extern void __audit_seccomp(unsigned long syscall, long signr, int code); extern void __audit_ptrace(struct task_struct *t); -static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) -{ - task->audit_context = ctx; -} - -static inline struct audit_context *audit_context(void) -{ - return current->audit_context; -} - static inline bool audit_dummy_context(void) { - void *p = audit_context(); + void *p = current->audit_context; return !p || *(int *)p; } static inline void audit_free(struct task_struct *task) @@ -327,12 +260,12 @@ static inline void audit_syscall_entry(int major, unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3) { - if (unlikely(audit_context())) + if (unlikely(current->audit_context)) __audit_syscall_entry(major, a0, a1, a2, a3); } static inline void audit_syscall_exit(void *pt_regs) { - if (unlikely(audit_context())) { + if (unlikely(current->audit_context)) { int success = is_syscall_success(pt_regs); long return_code = regs_return_value(pt_regs); @@ -352,9 +285,13 @@ static inline void audit_getname(struct filename *name) } static inline void audit_inode(struct filename *name, const struct dentry *dentry, - unsigned int aflags) { - if (unlikely(!audit_dummy_context())) - __audit_inode(name, dentry, aflags); + unsigned int parent) { + if (unlikely(!audit_dummy_context())) { + unsigned int flags = 0; + if (parent) + flags |= AUDIT_INODE_PARENT; + __audit_inode(name, dentry, flags); + } } static inline void audit_file(struct file *file) { @@ -376,6 +313,16 @@ static inline void audit_inode_child(struct inode *parent, } void audit_core_dumps(long signr); +static inline void audit_seccomp(unsigned long syscall, long signr, int code) +{ + if (!audit_enabled) + return; + + /* Force a record to be reported if a signal was delivered. */ + if (signr || unlikely(!audit_dummy_context())) + __audit_seccomp(syscall, signr, code); +} + static inline void audit_ptrace(struct task_struct *t) { if (unlikely(!audit_dummy_context())) @@ -383,6 +330,21 @@ static inline void audit_ptrace(struct task_struct *t) } /* Private API (for audit.c only) */ +extern unsigned int audit_serial(void); +extern int auditsc_get_stamp(struct audit_context *ctx, + struct timespec *t, unsigned int *serial); +extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid); + +static inline kuid_t audit_get_loginuid(struct task_struct *tsk) +{ + return tsk->loginuid; +} + +static inline unsigned int audit_get_sessionid(struct task_struct *tsk) +{ + return tsk->sessionid; +} + extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); extern void __audit_bprm(struct linux_binprm *bprm); @@ -390,7 +352,7 @@ extern int __audit_socketcall(int nargs, unsigned long *args); extern int __audit_sockaddr(int len, void *addr); extern void __audit_fd_pair(int fd1, int fd2); extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); -extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout); +extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout); extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, @@ -398,12 +360,6 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *old); extern void __audit_log_capset(const struct cred *new, const struct cred *old); extern void __audit_mmap_fd(int fd, int flags); -extern void __audit_log_kern_module(char *name); -extern void __audit_fanotify(unsigned int response); -extern void __audit_tk_injoffset(struct timespec64 offset); -extern void __audit_ntp_log(const struct audit_ntp_data *ad); -extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, - enum audit_nfcfgop op, gfp_t gfp); static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { @@ -431,20 +387,6 @@ static inline int audit_socketcall(int nargs, unsigned long *args) return __audit_socketcall(nargs, args); return 0; } - -static inline int audit_socketcall_compat(int nargs, u32 *args) -{ - unsigned long a[AUDITSC_ARGS]; - int i; - - if (audit_dummy_context()) - return 0; - - for (i = 0; i < nargs; i++) - a[i] = (unsigned long)args[i]; - return __audit_socketcall(nargs, a); -} - static inline int audit_sockaddr(int len, void *addr) { if (unlikely(!audit_dummy_context())) @@ -456,7 +398,7 @@ static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) if (unlikely(!audit_dummy_context())) __audit_mq_open(oflag, mode, attr); } -static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout) +static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout) { if (unlikely(!audit_dummy_context())) __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); @@ -494,59 +436,6 @@ static inline void audit_mmap_fd(int fd, int flags) __audit_mmap_fd(fd, flags); } -static inline void audit_log_kern_module(char *name) -{ - if (!audit_dummy_context()) - __audit_log_kern_module(name); -} - -static inline void audit_fanotify(unsigned int response) -{ - if (!audit_dummy_context()) - __audit_fanotify(response); -} - -static inline void audit_tk_injoffset(struct timespec64 offset) -{ - /* ignore no-op events */ - if (offset.tv_sec == 0 && offset.tv_nsec == 0) - return; - - if (!audit_dummy_context()) - __audit_tk_injoffset(offset); -} - -static inline void audit_ntp_init(struct audit_ntp_data *ad) -{ - memset(ad, 0, sizeof(*ad)); -} - -static inline void audit_ntp_set_old(struct audit_ntp_data *ad, - enum audit_ntp_type type, long long val) -{ - ad->vals[type].oldval = val; -} - -static inline void audit_ntp_set_new(struct audit_ntp_data *ad, - enum audit_ntp_type type, long long val) -{ - ad->vals[type].newval = val; -} - -static inline void audit_ntp_log(const struct audit_ntp_data *ad) -{ - if (!audit_dummy_context()) - __audit_ntp_log(ad); -} - -static inline void audit_log_nfcfg(const char *name, u8 af, - unsigned int nentries, - enum audit_nfcfgop op, gfp_t gfp) -{ - if (audit_enabled) - __audit_log_nfcfg(name, af, nentries, op, gfp); -} - extern int audit_n_rules; extern int audit_signals; #else /* CONFIG_AUDITSYSCALL */ @@ -566,21 +455,23 @@ static inline bool audit_dummy_context(void) { return true; } -static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) -{ } -static inline struct audit_context *audit_context(void) -{ - return NULL; -} static inline struct filename *audit_reusename(const __user char *name) { return NULL; } static inline void audit_getname(struct filename *name) { } +static inline void __audit_inode(struct filename *name, + const struct dentry *dentry, + unsigned int flags) +{ } +static inline void __audit_inode_child(struct inode *parent, + const struct dentry *dentry, + const unsigned char type) +{ } static inline void audit_inode(struct filename *name, const struct dentry *dentry, - unsigned int aflags) + unsigned int parent) { } static inline void audit_file(struct file *file) { @@ -594,11 +485,23 @@ static inline void audit_inode_child(struct inode *parent, { } static inline void audit_core_dumps(long signr) { } +static inline void __audit_seccomp(unsigned long syscall, long signr, int code) +{ } static inline void audit_seccomp(unsigned long syscall, long signr, int code) { } -static inline void audit_seccomp_actions_logged(const char *names, - const char *old_names, int res) -{ } +static inline int auditsc_get_stamp(struct audit_context *ctx, + struct timespec *t, unsigned int *serial) +{ + return 0; +} +static inline kuid_t audit_get_loginuid(struct task_struct *tsk) +{ + return INVALID_UID; +} +static inline unsigned int audit_get_sessionid(struct task_struct *tsk) +{ + return -1; +} static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, @@ -610,12 +513,6 @@ static inline int audit_socketcall(int nargs, unsigned long *args) { return 0; } - -static inline int audit_socketcall_compat(int nargs, u32 *args) -{ - return 0; -} - static inline void audit_fd_pair(int fd1, int fd2) { } static inline int audit_sockaddr(int len, void *addr) @@ -626,7 +523,7 @@ static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { } static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, - const struct timespec64 *abs_timeout) + const struct timespec *abs_timeout) { } static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) @@ -644,39 +541,8 @@ static inline void audit_log_capset(const struct cred *new, { } static inline void audit_mmap_fd(int fd, int flags) { } - -static inline void audit_log_kern_module(char *name) -{ -} - -static inline void audit_fanotify(unsigned int response) -{ } - -static inline void audit_tk_injoffset(struct timespec64 offset) -{ } - -static inline void audit_ntp_init(struct audit_ntp_data *ad) -{ } - -static inline void audit_ntp_set_old(struct audit_ntp_data *ad, - enum audit_ntp_type type, long long val) -{ } - -static inline void audit_ntp_set_new(struct audit_ntp_data *ad, - enum audit_ntp_type type, long long val) -{ } - -static inline void audit_ntp_log(const struct audit_ntp_data *ad) -{ } - static inline void audit_ptrace(struct task_struct *t) { } - -static inline void audit_log_nfcfg(const char *name, u8 af, - unsigned int nentries, - enum audit_nfcfgop op, gfp_t gfp) -{ } - #define audit_n_rules 0 #define audit_signals 0 #endif /* CONFIG_AUDITSYSCALL */ @@ -686,4 +552,10 @@ static inline bool audit_loginuid_set(struct task_struct *tsk) return uid_valid(audit_get_loginuid(tsk)); } +static inline __nocapture(2) +void audit_log_string(struct audit_buffer *ab, const char *buf) +{ + audit_log_n_string(ab, buf, strlen(buf)); +} + #endif diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h index 6e1ca6f95f..28c15050eb 100644 --- a/include/linux/auto_dev-ioctl.h +++ b/include/linux/auto_dev-ioctl.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2008 Red Hat, Inc. All rights reserved. * Copyright 2008 Ian Kent + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. */ #ifndef _LINUX_AUTO_DEV_IOCTL_H diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h index 893f952ca4..b8f814c95c 100644 --- a/include/linux/auto_fs.h +++ b/include/linux/auto_fs.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 1997 Transmeta Corporation - All Rights Reserved + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. */ #ifndef _LINUX_AUTO_FS_H diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index f68d0ec2d7..3e0fbe4417 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_AUXVEC_H #define _LINUX_AUXVEC_H diff --git a/include/linux/average.h b/include/linux/average.h index a1a8f09631..3de0da882d 100644 --- a/include/linux/average.h +++ b/include/linux/average.h @@ -1,71 +1,45 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_AVERAGE_H #define _LINUX_AVERAGE_H -#include -#include -#include +/* Exponentially weighted moving average (EWMA) */ -/* - * Exponentially weighted moving average (EWMA) - * - * This implements a fixed-precision EWMA algorithm, with both the - * precision and fall-off coefficient determined at compile-time - * and built into the generated helper funtions. - * - * The first argument to the macro is the name that will be used - * for the struct and helper functions. - * - * The second argument, the precision, expresses how many bits are - * used for the fractional part of the fixed-precision values. - * - * The third argument, the weight reciprocal, determines how the - * new values will be weighed vs. the old state, new values will - * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note - * that this parameter must be a power of two for efficiency. - */ - -#define DECLARE_EWMA(name, _precision, _weight_rcp) \ +#define DECLARE_EWMA(name, _factor, _weight) \ struct ewma_##name { \ unsigned long internal; \ }; \ static inline void ewma_##name##_init(struct ewma_##name *e) \ { \ - BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ - /* \ - * Even if you want to feed it just 0/1 you should have \ - * some bits for the non-fractional part... \ - */ \ - BUILD_BUG_ON((_precision) > 30); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ + BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ e->internal = 0; \ } \ static inline unsigned long \ ewma_##name##_read(struct ewma_##name *e) \ { \ - BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ - BUILD_BUG_ON((_precision) > 30); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ - return e->internal >> (_precision); \ + BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ + return e->internal >> ilog2(_factor); \ } \ static inline void ewma_##name##_add(struct ewma_##name *e, \ unsigned long val) \ { \ - unsigned long internal = READ_ONCE(e->internal); \ - unsigned long weight_rcp = ilog2(_weight_rcp); \ - unsigned long precision = _precision; \ + unsigned long internal = ACCESS_ONCE(e->internal); \ + unsigned long weight = ilog2(_weight); \ + unsigned long factor = ilog2(_factor); \ \ - BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ - BUILD_BUG_ON((_precision) > 30); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ + BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ \ - WRITE_ONCE(e->internal, internal ? \ - (((internal << weight_rcp) - internal) + \ - (val << precision)) >> weight_rcp : \ - (val << precision)); \ + ACCESS_ONCE_RW(e->internal) = internal ? \ + (((internal << weight) - internal) + \ + (val << factor)) >> weight : \ + (val << factor); \ } #endif /* _LINUX_AVERAGE_H */ diff --git a/include/linux/b1pcmcia.h b/include/linux/b1pcmcia.h new file mode 100644 index 0000000000..12a867c606 --- /dev/null +++ b/include/linux/b1pcmcia.h @@ -0,0 +1,21 @@ +/* $Id: b1pcmcia.h,v 1.1.8.2 2001/09/23 22:25:05 kai Exp $ + * + * Exported functions of module b1pcmcia to be called by + * avm_cs card services module. + * + * Copyright 1999 by Carsten Paeth (calle@calle.in-berlin.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef _B1PCMCIA_H_ +#define _B1PCMCIA_H_ + +int b1pcmcia_addcard_b1(unsigned int port, unsigned irq); +int b1pcmcia_addcard_m1(unsigned int port, unsigned irq); +int b1pcmcia_addcard_m2(unsigned int port, unsigned irq); +int b1pcmcia_delcard(unsigned int port, unsigned irq); + +#endif /* _B1PCMCIA_H_ */ diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 33207004cf..c357f27d54 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BACKING_DEV_DEFS_H #define __LINUX_BACKING_DEV_DEFS_H @@ -11,8 +10,6 @@ #include #include #include -#include -#include struct page; struct device; @@ -25,7 +22,6 @@ enum wb_state { WB_registered, /* bdi_register() was done */ WB_writeback_running, /* Writeback is in progress */ WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ - WB_start_all, /* nr_pages == 0 (all) work pending */ }; enum wb_congested_state { @@ -33,6 +29,8 @@ enum wb_congested_state { WB_sync_congested, /* The sync queue is getting full */ }; +typedef int (congested_fn)(void *, int); + enum wb_stat_item { WB_RECLAIMABLE, WB_WRITEBACK, @@ -44,47 +42,23 @@ enum wb_stat_item { #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) /* - * why some writeback work was initiated + * For cgroup writeback, multiple wb's may map to the same blkcg. Those + * wb's can operate mostly independently but should share the congested + * state. To facilitate such sharing, the congested state is tracked using + * the following struct which is created on demand, indexed by blkcg ID on + * its bdi, and refcounted. */ -enum wb_reason { - WB_REASON_BACKGROUND, - WB_REASON_VMSCAN, - WB_REASON_SYNC, - WB_REASON_PERIODIC, - WB_REASON_LAPTOP_TIMER, - WB_REASON_FS_FREE_SPACE, - /* - * There is no bdi forker thread any more and works are done - * by emergency worker, however, this is TPs userland visible - * and we'll be exposing exactly the same information, - * so it has a mismatch name. - */ - WB_REASON_FORKER_THREAD, - WB_REASON_FOREIGN_FLUSH, +struct bdi_writeback_congested { + unsigned long state; /* WB_[a]sync_congested flags */ + atomic_t refcnt; /* nr of attached wb's and blkg */ - WB_REASON_MAX, +#ifdef CONFIG_CGROUP_WRITEBACK + struct backing_dev_info *bdi; /* the associated bdi */ + int blkcg_id; /* ID of the associated blkcg */ + struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ +#endif }; -struct wb_completion { - atomic_t cnt; - wait_queue_head_t *waitq; -}; - -#define __WB_COMPLETION_INIT(_waitq) \ - (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) } - -/* - * If one wants to wait for one or more wb_writeback_works, each work's - * ->done should be set to a wb_completion defined using the following - * macro. Once all work items are issued with wb_queue_work(), the caller - * can wait for the completion of all using wb_wait_for_completion(). Work - * items which are waited upon aren't freed automatically on completion. - */ -#define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq) - -#define DEFINE_WB_COMPLETION(cmpl, bdi) \ - struct wb_completion cmpl = WB_COMPLETION_INIT(bdi) - /* * Each wb (bdi_writeback) can perform writeback operations, is measured * and throttled, independently. Without cgroup writeback, each bdi @@ -116,10 +90,9 @@ struct bdi_writeback { struct list_head b_dirty_time; /* time stamps are dirty */ spinlock_t list_lock; /* protects the b_* lists */ - atomic_t writeback_inodes; /* number of inodes under writeback */ struct percpu_counter stat[NR_WB_STAT_ITEMS]; - unsigned long congested; /* WB_[a]sync_congested flags */ + struct bdi_writeback_congested *congested; unsigned long bw_time_stamp; /* last time write bw is updated */ unsigned long dirtied_stamp; @@ -138,14 +111,10 @@ struct bdi_writeback { struct fprop_local_percpu completions; int dirty_exceeded; - enum wb_reason start_all_reason; spinlock_t work_lock; /* protects work_list & dwork scheduling */ struct list_head work_list; struct delayed_work dwork; /* work item used for writeback */ - struct delayed_work bw_dwork; /* work item used for bandwidth estimate */ - - unsigned long dirty_sleep; /* last wait */ struct list_head bdi_node; /* anchored at bdi->wb_list */ @@ -156,8 +125,6 @@ struct bdi_writeback { struct cgroup_subsys_state *blkcg_css; /* and blkcg */ struct list_head memcg_node; /* anchored at memcg->cgwb_list */ struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ - struct list_head b_attached; /* attached inodes, protected by list_lock */ - struct list_head offline_node; /* anchored at offline_cgwbs */ union { struct work_struct release_work; @@ -167,14 +134,14 @@ struct bdi_writeback { }; struct backing_dev_info { - u64 id; - struct rb_node rb_node; /* keyed by ->id */ struct list_head bdi_list; unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ - unsigned long io_pages; /* max allowed IO size */ - - struct kref refcnt; /* Reference counter for the structure */ unsigned int capabilities; /* Device capabilities */ + congested_fn *congested_fn; /* Function pointer if device is md/dm */ + void *congested_data; /* Pointer to aux data for congested func */ + + char *name; + unsigned int min_ratio; unsigned int max_ratio, max_prop_frac; @@ -188,19 +155,21 @@ struct backing_dev_info { struct list_head wb_list; /* list of all wbs */ #ifdef CONFIG_CGROUP_WRITEBACK struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ - struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ - struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ + struct rb_root cgwb_congested_tree; /* their congested states */ + atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */ +#else + struct bdi_writeback_congested *wb_congested; #endif wait_queue_head_t wb_waitq; struct device *dev; - char dev_name[64]; struct device *owner; struct timer_list laptop_mode_wb_timer; #ifdef CONFIG_DEBUG_FS struct dentry *debug_dir; + struct dentry *debug_stats; #endif }; @@ -209,13 +178,18 @@ enum { BLK_RW_SYNC = 1, }; -void clear_bdi_congested(struct backing_dev_info *bdi, int sync); -void set_bdi_congested(struct backing_dev_info *bdi, int sync); +void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); +void set_wb_congested(struct bdi_writeback_congested *congested, int sync); -struct wb_lock_cookie { - bool locked; - unsigned long flags; -}; +static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) +{ + clear_wb_congested(bdi->wb.congested, sync); +} + +static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) +{ + set_wb_congested(bdi->wb.congested, sync); +} #ifdef CONFIG_CGROUP_WRITEBACK @@ -240,32 +214,14 @@ static inline void wb_get(struct bdi_writeback *wb) percpu_ref_get(&wb->refcnt); } -/** - * wb_put - decrement a wb's refcount - * @wb: bdi_writeback to put - * @nr: number of references to put - */ -static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr) -{ - if (WARN_ON_ONCE(!wb->bdi)) { - /* - * A driver bug might cause a file to be removed before bdi was - * initialized. - */ - return; - } - - if (wb != &wb->bdi->wb) - percpu_ref_put_many(&wb->refcnt, nr); -} - /** * wb_put - decrement a wb's refcount * @wb: bdi_writeback to put */ static inline void wb_put(struct bdi_writeback *wb) { - wb_put_many(wb, 1); + if (wb != &wb->bdi->wb) + percpu_ref_put(&wb->refcnt); } /** @@ -294,10 +250,6 @@ static inline void wb_put(struct bdi_writeback *wb) { } -static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr) -{ -} - static inline bool wb_dying(struct bdi_writeback *wb) { return false; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index ac7f231b88..43b93a947e 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/backing-dev.h * @@ -13,42 +12,34 @@ #include #include #include -#include #include #include #include #include -static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) -{ - kref_get(&bdi->refcnt); - return bdi; -} +int __must_check bdi_init(struct backing_dev_info *bdi); +void bdi_exit(struct backing_dev_info *bdi); -struct backing_dev_info *bdi_get_by_id(u64 id); -void bdi_put(struct backing_dev_info *bdi); - -__printf(2, 3) -int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); -__printf(2, 0) -int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, - va_list args); -void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner); +__printf(3, 4) +int bdi_register(struct backing_dev_info *bdi, struct device *parent, + const char *fmt, ...); +int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); +int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); void bdi_unregister(struct backing_dev_info *bdi); -struct backing_dev_info *bdi_alloc(int node_id); +int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); +void bdi_destroy(struct backing_dev_info *bdi); +void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, + bool range_cyclic, enum wb_reason reason); void wb_start_background_writeback(struct bdi_writeback *wb); void wb_workfn(struct work_struct *work); void wb_wakeup_delayed(struct bdi_writeback *wb); -void wb_wait_for_completion(struct wb_completion *done); - extern spinlock_t bdi_lock; extern struct list_head bdi_list; extern struct workqueue_struct *bdi_wq; -extern struct workqueue_struct *bdi_async_bio_wq; static inline bool wb_has_dirty_io(struct bdi_writeback *wb) { @@ -67,35 +58,68 @@ static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) static inline void __add_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item, s64 amount) { - percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); + __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH); } -static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) +static inline void __inc_wb_stat(struct bdi_writeback *wb, + enum wb_stat_item item) { __add_wb_stat(wb, item, 1); } -static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) +static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) +{ + unsigned long flags; + + local_irq_save(flags); + __inc_wb_stat(wb, item); + local_irq_restore(flags); +} + +static inline void __dec_wb_stat(struct bdi_writeback *wb, + enum wb_stat_item item) { __add_wb_stat(wb, item, -1); } +static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) +{ + unsigned long flags; + + local_irq_save(flags); + __dec_wb_stat(wb, item); + local_irq_restore(flags); +} + static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { return percpu_counter_read_positive(&wb->stat[item]); } -static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) +static inline s64 __wb_stat_sum(struct bdi_writeback *wb, + enum wb_stat_item item) { return percpu_counter_sum_positive(&wb->stat[item]); } +static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) +{ + s64 sum; + unsigned long flags; + + local_irq_save(flags); + sum = __wb_stat_sum(wb, item); + local_irq_restore(flags); + + return sum; +} + extern void wb_writeout_inc(struct bdi_writeback *wb); /* * maximal error of a stat counter. */ -static inline unsigned long wb_stat_error(void) +static inline unsigned long wb_stat_error(struct bdi_writeback *wb) { #ifdef CONFIG_SMP return nr_cpu_ids * WB_STAT_BATCH; @@ -110,14 +134,30 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); /* * Flags in backing_dev_info::capability * - * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages - * should contribute to accounting - * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages - * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold + * The first three flags control whether dirty pages will contribute to the + * VM's accounting and whether writepages() should be called for dirty pages + * (something that would not, for example, be appropriate for ramfs) + * + * WARNING: these flags are closely related and should not normally be + * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these + * three flags into a single convenience macro. + * + * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting + * BDI_CAP_NO_WRITEBACK: Don't write pages back + * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages + * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. + * + * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback. */ -#define BDI_CAP_WRITEBACK (1 << 0) -#define BDI_CAP_WRITEBACK_ACCT (1 << 1) -#define BDI_CAP_STRICTLIMIT (1 << 2) +#define BDI_CAP_NO_ACCT_DIRTY 0x00000001 +#define BDI_CAP_NO_WRITEBACK 0x00000002 +#define BDI_CAP_NO_ACCT_WB 0x00000004 +#define BDI_CAP_STABLE_WRITES 0x00000008 +#define BDI_CAP_STRICTLIMIT 0x00000010 +#define BDI_CAP_CGROUP_WRITEBACK 0x00000020 + +#define BDI_CAP_NO_ACCT_AND_WRITEBACK \ + (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) extern struct backing_dev_info noop_backing_dev_info; @@ -143,22 +183,55 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) sb = inode->i_sb; #ifdef CONFIG_BLOCK if (sb_is_blkdev_sb(sb)) - return I_BDEV(inode)->bd_disk->bdi; + return blk_get_backing_dev_info(I_BDEV(inode)); #endif return sb->s_bdi; } static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) { - return wb->congested & cong_bits; + struct backing_dev_info *bdi = wb->bdi; + + if (bdi->congested_fn) + return bdi->congested_fn(bdi->congested_data, cong_bits); + return wb->congested->state & cong_bits; } long congestion_wait(int sync, long timeout); -long wait_iff_congested(int sync, long timeout); +long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout); +int pdflush_proc_obsolete(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); -static inline bool mapping_can_writeback(struct address_space *mapping) +static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi) { - return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; + return bdi->capabilities & BDI_CAP_STABLE_WRITES; +} + +static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) +{ + return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK); +} + +static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi) +{ + return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY); +} + +static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) +{ + /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */ + return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB | + BDI_CAP_NO_WRITEBACK)); +} + +static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) +{ + return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host)); +} + +static inline bool mapping_cap_account_dirty(struct address_space *mapping) +{ + return bdi_cap_account_dirty(inode_to_bdi(mapping->host)); } static inline int bdi_sched_wait(void *word) @@ -169,8 +242,9 @@ static inline int bdi_sched_wait(void *word) #ifdef CONFIG_CGROUP_WRITEBACK -struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, - struct cgroup_subsys_state *memcg_css); +struct bdi_writeback_congested * +wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp); +void wb_congested_put(struct bdi_writeback_congested *congested); struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css, gfp_t gfp); @@ -182,9 +256,9 @@ int inode_congested(struct inode *inode, int cong_bits); * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode * @inode: inode of interest * - * Cgroup writeback requires support from the filesystem. Also, both memcg and - * iocg have to be on the default hierarchy. Test whether all conditions are - * met. + * cgroup writeback requires support from both the bdi and filesystem. + * Also, both memcg and iocg have to be on the default hierarchy. Test + * whether all conditions are met. * * Note that the test result may change dynamically on the same inode * depending on how memcg and iocg are configured. @@ -195,7 +269,8 @@ static inline bool inode_cgwb_enabled(struct inode *inode) return cgroup_subsys_on_dfl(memory_cgrp_subsys) && cgroup_subsys_on_dfl(io_cgrp_subsys) && - (bdi->capabilities & BDI_CAP_WRITEBACK) && + bdi_cap_account_dirty(bdi) && + (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } @@ -274,63 +349,52 @@ static inline bool inode_to_wb_is_valid(struct inode *inode) * @inode: inode of interest * * Returns the wb @inode is currently associated with. The caller must be - * holding either @inode->i_lock, the i_pages lock, or the + * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the * associated wb's list_lock. */ -static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) +static inline struct bdi_writeback *inode_to_wb(struct inode *inode) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&inode->i_lock) && - !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && + !lockdep_is_held(&inode->i_mapping->tree_lock) && !lockdep_is_held(&inode->i_wb->list_lock))); #endif return inode->i_wb; } -static inline struct bdi_writeback *inode_to_wb_wbc( - struct inode *inode, - struct writeback_control *wbc) -{ - /* - * If wbc does not have inode attached, it means cgroup writeback was - * disabled when wbc started. Just use the default wb in that case. - */ - return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb; -} - /** * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * @inode: target inode - * @cookie: output param, to be passed to the end function + * @lockedp: temp bool output param, to be passed to the end function * * The caller wants to access the wb associated with @inode but isn't - * holding inode->i_lock, the i_pages lock or wb->list_lock. This + * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This * function determines the wb associated with @inode and ensures that the * association doesn't change until the transaction is finished with * unlocked_inode_to_wb_end(). * - * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and - * can't sleep during the transaction. IRQs may or may not be disabled on - * return. + * The caller must call unlocked_inode_to_wb_end() with *@lockdep + * afterwards and can't sleep during transaction. IRQ may or may not be + * disabled on return. */ static inline struct bdi_writeback * -unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) +unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) { rcu_read_lock(); /* - * Paired with store_release in inode_switch_wbs_work_fn() and + * Paired with store_release in inode_switch_wb_work_fn() and * ensures that we see the new wb if we see cleared I_WB_SWITCH. */ - cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; + *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; - if (unlikely(cookie->locked)) - xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); + if (unlikely(*lockedp)) + spin_lock_irq(&inode->i_mapping->tree_lock); /* - * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages - * lock. inode_to_wb() will bark. Deref directly. + * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock. + * inode_to_wb() will bark. Deref directly. */ return inode->i_wb; } @@ -338,13 +402,12 @@ unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) /** * unlocked_inode_to_wb_end - end inode wb access transaction * @inode: target inode - * @cookie: @cookie from unlocked_inode_to_wb_begin() + * @locked: *@lockedp from unlocked_inode_to_wb_begin() */ -static inline void unlocked_inode_to_wb_end(struct inode *inode, - struct wb_lock_cookie *cookie) +static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) { - if (unlikely(cookie->locked)) - xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); + if (unlikely(locked)) + spin_unlock_irq(&inode->i_mapping->tree_lock); rcu_read_unlock(); } @@ -356,6 +419,19 @@ static inline bool inode_cgwb_enabled(struct inode *inode) return false; } +static inline struct bdi_writeback_congested * +wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) +{ + atomic_inc(&bdi->wb_congested->refcnt); + return bdi->wb_congested; +} + +static inline void wb_congested_put(struct bdi_writeback_congested *congested) +{ + if (atomic_dec_and_test(&congested->refcnt)) + kfree(congested); +} + static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) { return &bdi->wb; @@ -377,22 +453,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) return &inode_to_bdi(inode)->wb; } -static inline struct bdi_writeback *inode_to_wb_wbc( - struct inode *inode, - struct writeback_control *wbc) -{ - return inode_to_wb(inode); -} - - static inline struct bdi_writeback * -unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) +unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) { return inode_to_wb(inode); } -static inline void unlocked_inode_to_wb_end(struct inode *inode, - struct wb_lock_cookie *cookie) +static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) { } @@ -448,6 +515,4 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) (1 << WB_async_congested)); } -const char *bdi_dev_name(struct backing_dev_info *bdi); - #endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 614653e07e..5f2fd61ef4 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Backlight Lowlevel Control Abstraction * @@ -14,336 +13,110 @@ #include #include -/** - * enum backlight_update_reason - what method was used to update backlight +/* Notes on locking: * - * A driver indicates the method (reason) used for updating the backlight - * when calling backlight_force_update(). + * backlight_device->ops_lock is an internal backlight lock protecting the + * ops pointer and no code outside the core should need to touch it. + * + * Access to update_status() is serialised by the update_lock mutex since + * most drivers seem to need this and historically get it wrong. + * + * Most drivers don't need locking on their get_brightness() method. + * If yours does, you need to implement it in the driver. You can use the + * update_lock mutex if appropriate. + * + * Any other use of the locks below is probably wrong. */ -enum backlight_update_reason { - /** - * @BACKLIGHT_UPDATE_HOTKEY: The backlight was updated using a hot-key. - */ - BACKLIGHT_UPDATE_HOTKEY, - /** - * @BACKLIGHT_UPDATE_SYSFS: The backlight was updated using sysfs. - */ +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY, BACKLIGHT_UPDATE_SYSFS, }; -/** - * enum backlight_type - the type of backlight control - * - * The type of interface used to control the backlight. - */ enum backlight_type { - /** - * @BACKLIGHT_RAW: - * - * The backlight is controlled using hardware registers. - */ BACKLIGHT_RAW = 1, - - /** - * @BACKLIGHT_PLATFORM: - * - * The backlight is controlled using a platform-specific interface. - */ BACKLIGHT_PLATFORM, - - /** - * @BACKLIGHT_FIRMWARE: - * - * The backlight is controlled using a standard firmware interface. - */ BACKLIGHT_FIRMWARE, - - /** - * @BACKLIGHT_TYPE_MAX: Number of entries. - */ BACKLIGHT_TYPE_MAX, }; -/** - * enum backlight_notification - the type of notification - * - * The notifications that is used for notification sent to the receiver - * that registered notifications using backlight_register_notifier(). - */ enum backlight_notification { - /** - * @BACKLIGHT_REGISTERED: The backlight device is registered. - */ BACKLIGHT_REGISTERED, - - /** - * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered. - */ BACKLIGHT_UNREGISTERED, }; -/** enum backlight_scale - the type of scale used for brightness values - * - * The type of scale used for brightness values. - */ -enum backlight_scale { - /** - * @BACKLIGHT_SCALE_UNKNOWN: The scale is unknown. - */ - BACKLIGHT_SCALE_UNKNOWN = 0, - - /** - * @BACKLIGHT_SCALE_LINEAR: The scale is linear. - * - * The linear scale will increase brightness the same for each step. - */ - BACKLIGHT_SCALE_LINEAR, - - /** - * @BACKLIGHT_SCALE_NON_LINEAR: The scale is not linear. - * - * This is often used when the brightness values tries to adjust to - * the relative perception of the eye demanding a non-linear scale. - */ - BACKLIGHT_SCALE_NON_LINEAR, -}; - struct backlight_device; struct fb_info; -/** - * struct backlight_ops - backlight operations - * - * The backlight operations are specified when the backlight device is registered. - */ struct backlight_ops { - /** - * @options: Configure how operations are called from the core. - * - * The options parameter is used to adjust the behaviour of the core. - * Set BL_CORE_SUSPENDRESUME to get the update_status() operation called - * upon suspend and resume. - */ unsigned int options; #define BL_CORE_SUSPENDRESUME (1 << 0) - /** - * @update_status: Operation called when properties have changed. - * - * Notify the backlight driver some property has changed. - * The update_status operation is protected by the update_lock. - * - * The backlight driver is expected to use backlight_is_blank() - * to check if the display is blanked and set brightness accordingly. - * update_status() is called when any of the properties has changed. - * - * RETURNS: - * - * 0 on success, negative error code if any failure occurred. - */ + /* Notify the backlight driver some property has changed */ int (*update_status)(struct backlight_device *); - - /** - * @get_brightness: Return the current backlight brightness. - * - * The driver may implement this as a readback from the HW. - * This operation is optional and if not present then the current - * brightness property value is used. - * - * RETURNS: - * - * A brightness value which is 0 or a positive number. - * On failure a negative error code is returned. - */ + /* Return the current backlight brightness (accounting for power, + fb_blank etc.) */ int (*get_brightness)(struct backlight_device *); - - /** - * @check_fb: Check the framebuffer device. - * - * Check if given framebuffer device is the one bound to this backlight. - * This operation is optional and if not implemented it is assumed that the - * fbdev is always the one bound to the backlight. - * - * RETURNS: - * - * If info is NULL or the info matches the fbdev bound to the backlight return true. - * If info does not match the fbdev bound to the backlight return false. - */ - int (*check_fb)(struct backlight_device *bd, struct fb_info *info); + /* Check if given framebuffer device is the one bound to this backlight; + return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ + int (*check_fb)(struct backlight_device *, struct fb_info *); }; -/** - * struct backlight_properties - backlight properties - * - * This structure defines all the properties of a backlight. - */ +/* This structure defines all the properties of a backlight */ struct backlight_properties { - /** - * @brightness: The current brightness requested by the user. - * - * The backlight core makes sure the range is (0 to max_brightness) - * when the brightness is set via the sysfs attribute: - * /sys/class/backlight//brightness. - * - * This value can be set in the backlight_properties passed - * to devm_backlight_device_register() to set a default brightness - * value. - */ + /* Current User requested brightness (0 - max_brightness) */ int brightness; - - /** - * @max_brightness: The maximum brightness value. - * - * This value must be set in the backlight_properties passed to - * devm_backlight_device_register() and shall not be modified by the - * driver after registration. - */ + /* Maximal value for brightness (read-only) */ int max_brightness; - - /** - * @power: The current power mode. - * - * User space can configure the power mode using the sysfs - * attribute: /sys/class/backlight//bl_power - * When the power property is updated update_status() is called. - * - * The possible values are: (0: full on, 1 to 3: power saving - * modes; 4: full off), see FB_BLANK_XXX. - * - * When the backlight device is enabled @power is set - * to FB_BLANK_UNBLANK. When the backlight device is disabled - * @power is set to FB_BLANK_POWERDOWN. - */ + /* Current FB Power mode (0: full on, 1..3: power saving + modes; 4: full off), see FB_BLANK_XXX */ int power; - - /** - * @fb_blank: The power state from the FBIOBLANK ioctl. - * - * When the FBIOBLANK ioctl is called @fb_blank is set to the - * blank parameter and the update_status() operation is called. - * - * When the backlight device is enabled @fb_blank is set - * to FB_BLANK_UNBLANK. When the backlight device is disabled - * @fb_blank is set to FB_BLANK_POWERDOWN. - * - * Backlight drivers should avoid using this property. It has been - * replaced by state & BL_CORE_FBLANK (although most drivers should - * use backlight_is_blank() as the preferred means to get the blank - * state). - * - * fb_blank is deprecated and will be removed. - */ + /* FB Blanking active? (values as for power) */ + /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ int fb_blank; - - /** - * @type: The type of backlight supported. - * - * The backlight type allows userspace to make appropriate - * policy decisions based on the backlight type. - * - * This value must be set in the backlight_properties - * passed to devm_backlight_device_register(). - */ + /* Backlight type */ enum backlight_type type; - - /** - * @state: The state of the backlight core. - * - * The state is a bitmask. BL_CORE_FBBLANK is set when the display - * is expected to be blank. BL_CORE_SUSPENDED is set when the - * driver is suspended. - * - * backlight drivers are expected to use backlight_is_blank() - * in their update_status() operation rather than reading the - * state property. - * - * The state is maintained by the core and drivers may not modify it. - */ + /* Flags used to signal drivers of state changes */ + /* Upper 4 bits are reserved for driver internal use */ unsigned int state; #define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ #define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ +#define BL_CORE_DRIVER4 (1 << 28) /* reserved for driver specific use */ +#define BL_CORE_DRIVER3 (1 << 29) /* reserved for driver specific use */ +#define BL_CORE_DRIVER2 (1 << 30) /* reserved for driver specific use */ +#define BL_CORE_DRIVER1 (1 << 31) /* reserved for driver specific use */ - /** - * @scale: The type of the brightness scale. - */ - enum backlight_scale scale; }; -/** - * struct backlight_device - backlight device data - * - * This structure holds all data required by a backlight device. - */ struct backlight_device { - /** - * @props: Backlight properties - */ + /* Backlight properties */ struct backlight_properties props; - /** - * @update_lock: The lock used when calling the update_status() operation. - * - * update_lock is an internal backlight lock that serialise access - * to the update_status() operation. The backlight core holds the update_lock - * when calling the update_status() operation. The update_lock shall not - * be used by backlight drivers. - */ + /* Serialise access to update_status method */ struct mutex update_lock; - /** - * @ops_lock: The lock used around everything related to backlight_ops. - * - * ops_lock is an internal backlight lock that protects the ops pointer - * and is used around all accesses to ops and when the operations are - * invoked. The ops_lock shall not be used by backlight drivers. - */ + /* This protects the 'ops' field. If 'ops' is NULL, the driver that + registered this device has been unloaded, and if class_get_devdata() + points to something in the body of that driver, it is also invalid. */ struct mutex ops_lock; - - /** - * @ops: Pointer to the backlight operations. - * - * If ops is NULL, the driver that registered this device has been unloaded, - * and if class_get_devdata() points to something in the body of that driver, - * it is also invalid. - */ const struct backlight_ops *ops; - /** - * @fb_notif: The framebuffer notifier block - */ + /* The framebuffer notifier block */ struct notifier_block fb_notif; - /** - * @entry: List entry of all registered backlight devices - */ + /* list entry of all registered backlight devices */ struct list_head entry; - /** - * @dev: Parent device. - */ struct device dev; - /** - * @fb_bl_on: The state of individual fbdev's. - * - * Multiple fbdev's may share one backlight device. The fb_bl_on - * records the state of the individual fbdev. - */ + /* Multiple framebuffers may share one backlight device */ bool fb_bl_on[FB_MAX]; - /** - * @use_count: The number of uses of fb_bl_on. - */ int use_count; }; -/** - * backlight_update_status - force an update of the backlight device status - * @bd: the backlight device - */ static inline int backlight_update_status(struct backlight_device *bd) { int ret = -ENOENT; @@ -356,116 +129,39 @@ static inline int backlight_update_status(struct backlight_device *bd) return ret; } -/** - * backlight_enable - Enable backlight - * @bd: the backlight device to enable - */ -static inline int backlight_enable(struct backlight_device *bd) -{ - if (!bd) - return 0; - - bd->props.power = FB_BLANK_UNBLANK; - bd->props.fb_blank = FB_BLANK_UNBLANK; - bd->props.state &= ~BL_CORE_FBBLANK; - - return backlight_update_status(bd); -} - -/** - * backlight_disable - Disable backlight - * @bd: the backlight device to disable - */ -static inline int backlight_disable(struct backlight_device *bd) -{ - if (!bd) - return 0; - - bd->props.power = FB_BLANK_POWERDOWN; - bd->props.fb_blank = FB_BLANK_POWERDOWN; - bd->props.state |= BL_CORE_FBBLANK; - - return backlight_update_status(bd); -} - -/** - * backlight_is_blank - Return true if display is expected to be blank - * @bd: the backlight device - * - * Display is expected to be blank if any of these is true:: - * - * 1) if power in not UNBLANK - * 2) if fb_blank is not UNBLANK - * 3) if state indicate BLANK or SUSPENDED - * - * Returns true if display is expected to be blank, false otherwise. - */ -static inline bool backlight_is_blank(const struct backlight_device *bd) -{ - return bd->props.power != FB_BLANK_UNBLANK || - bd->props.fb_blank != FB_BLANK_UNBLANK || - bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); -} - -/** - * backlight_get_brightness - Returns the current brightness value - * @bd: the backlight device - * - * Returns the current brightness value, taking in consideration the current - * state. If backlight_is_blank() returns true then return 0 as brightness - * otherwise return the current brightness property value. - * - * Backlight drivers are expected to use this function in their update_status() - * operation to get the brightness value. - */ -static inline int backlight_get_brightness(const struct backlight_device *bd) -{ - if (backlight_is_blank(bd)) - return 0; - else - return bd->props.brightness; -} - -struct backlight_device * -backlight_device_register(const char *name, struct device *dev, void *devdata, - const struct backlight_ops *ops, - const struct backlight_properties *props); -struct backlight_device * -devm_backlight_device_register(struct device *dev, const char *name, - struct device *parent, void *devdata, - const struct backlight_ops *ops, - const struct backlight_properties *props); -void backlight_device_unregister(struct backlight_device *bd); -void devm_backlight_device_unregister(struct device *dev, - struct backlight_device *bd); -void backlight_force_update(struct backlight_device *bd, - enum backlight_update_reason reason); -int backlight_register_notifier(struct notifier_block *nb); -int backlight_unregister_notifier(struct notifier_block *nb); -struct backlight_device *backlight_device_get_by_name(const char *name); -struct backlight_device *backlight_device_get_by_type(enum backlight_type type); -int backlight_device_set_brightness(struct backlight_device *bd, - unsigned long brightness); +extern struct backlight_device *backlight_device_register(const char *name, + struct device *dev, void *devdata, const struct backlight_ops *ops, + const struct backlight_properties *props); +extern struct backlight_device *devm_backlight_device_register( + struct device *dev, const char *name, struct device *parent, + void *devdata, const struct backlight_ops *ops, + const struct backlight_properties *props); +extern void backlight_device_unregister(struct backlight_device *bd); +extern void devm_backlight_device_unregister(struct device *dev, + struct backlight_device *bd); +extern void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason); +extern int backlight_register_notifier(struct notifier_block *nb); +extern int backlight_unregister_notifier(struct notifier_block *nb); +extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) -/** - * bl_get_data - access devdata - * @bl_dev: pointer to backlight device - * - * When a backlight device is registered the driver has the possibility - * to supply a void * devdata. bl_get_data() return a pointer to the - * devdata. - * - * RETURNS: - * - * pointer to devdata stored while registering the backlight device. - */ static inline void * bl_get_data(struct backlight_device *bl_dev) { return dev_get_drvdata(&bl_dev->dev); } +struct generic_bl_info { + const char *name; + int max_intensity; + int default_intensity; + int limit_mask; + void (*set_bl_intensity)(int intensity); + void (*kick_battery)(void); +}; + #ifdef CONFIG_OF struct backlight_device *of_find_backlight_by_node(struct device_node *node); #else @@ -476,14 +172,4 @@ of_find_backlight_by_node(struct device_node *node) } #endif -#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) -struct backlight_device *devm_of_find_backlight(struct device *dev); -#else -static inline struct backlight_device * -devm_of_find_backlight(struct device *dev) -{ - return NULL; -} -#endif - #endif diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h index 2426276b9b..c3bdf8c594 100644 --- a/include/linux/badblocks.h +++ b/include/linux/badblocks.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BADBLOCKS_H #define _LINUX_BADBLOCKS_H diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 338aa27e47..79542b2698 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -1,21 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/balloon_compaction.h * * Common interface definitions for making balloon pages movable by compaction. * - * Balloon page migration makes use of the general non-lru movable page - * feature. - * - * page->private is used to reference the responsible balloon device. - * page->mapping is used in context of non-lru page migration to reference - * the address space operations for page isolation/migration/compaction. + * Despite being perfectly possible to perform ballooned pages migration, they + * make a special corner case to compaction scans because balloon pages are not + * enlisted at any LRU list like the other pages we do compact / migrate. * * As the page isolation scanning step a compaction thread does is a lockless * procedure (from a page standpoint), it might bring some racy situations while * performing balloon page compaction. In order to sort out these racy scenarios * and safely perform balloon's page compaction and migration we must, always, - * ensure following these simple rules: + * ensure following these three simple rules: * * i. when updating a balloon's page ->mapping element, strictly do it under * the following lock order, independently of the far superior @@ -24,8 +20,19 @@ * +--spin_lock_irq(&b_dev_info->pages_lock); * ... page->mapping updates here ... * - * ii. isolation or dequeueing procedure must remove the page from balloon - * device page list under b_dev_info->pages_lock. + * ii. before isolating or dequeueing a balloon page from the balloon device + * pages list, the page reference counter must be raised by one and the + * extra refcount must be dropped when the page is enqueued back into + * the balloon device page list, thus a balloon page keeps its reference + * counter raised only while it is under our special handling; + * + * iii. after the lockless scan step have selected a potential balloon page for + * isolation, re-test the PageBalloon mark and the PagePrivate flag + * under the proper page lock, to ensure isolating a valid balloon page + * (not yet isolated, nor under release procedure) + * + * iv. isolation or dequeueing procedure must clear PagePrivate flag under + * page lock together with removing page from balloon device page list. * * The functions provided by this interface are placed to help on coping with * the aforementioned balloon page corner case, as well as to ensure the simple @@ -42,7 +49,6 @@ #include #include #include -#include /* * Balloon device information descriptor. @@ -60,14 +66,8 @@ struct balloon_dev_info { struct inode *inode; }; -extern struct page *balloon_page_alloc(void); -extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, - struct page *page); +extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); -extern size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, - struct list_head *pages); -extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, - struct list_head *pages, size_t n_req_pages); static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) { @@ -99,7 +99,7 @@ extern int balloon_page_migrate(struct address_space *mapping, static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { - __SetPageOffline(page); + __SetPageBalloon(page); __SetPageMovable(page, balloon->inode->i_mapping); set_page_private(page, (unsigned long)balloon); list_add(&page->lru, &balloon->pages); @@ -115,7 +115,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, */ static inline void balloon_page_delete(struct page *page) { - __ClearPageOffline(page); + __ClearPageBalloon(page); __ClearPageMovable(page); set_page_private(page, 0); /* @@ -145,16 +145,31 @@ static inline gfp_t balloon_mapping_gfp_mask(void) static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { - __SetPageOffline(page); + __SetPageBalloon(page); list_add(&page->lru, &balloon->pages); } static inline void balloon_page_delete(struct page *page) { - __ClearPageOffline(page); + __ClearPageBalloon(page); list_del(&page->lru); } +static inline bool __is_movable_balloon_page(struct page *page) +{ + return false; +} + +static inline bool balloon_page_movable(struct page *page) +{ + return false; +} + +static inline bool isolated_balloon_page(struct page *page) +{ + return false; +} + static inline bool balloon_page_isolate(struct page *page) { return false; @@ -177,34 +192,4 @@ static inline gfp_t balloon_mapping_gfp_mask(void) } #endif /* CONFIG_BALLOON_COMPACTION */ - -/* - * balloon_page_push - insert a page into a page list. - * @head : pointer to list - * @page : page to be added - * - * Caller must ensure the page is private and protect the list. - */ -static inline void balloon_page_push(struct list_head *pages, struct page *page) -{ - list_add(&page->lru, pages); -} - -/* - * balloon_page_pop - remove a page from a page list. - * @head : pointer to list - * @page : page to be added - * - * Caller must ensure the page is private and protect the list. - */ -static inline struct page *balloon_page_pop(struct list_head *pages) -{ - struct page *page = list_first_entry_or_null(pages, struct page, lru); - - if (!page) - return NULL; - - list_del(&page->lru); - return page; -} #endif /* _LINUX_BALLOON_COMPACTION_H */ diff --git a/include/linux/bcd.h b/include/linux/bcd.h index 118bea36d7..18fff11fb3 100644 --- a/include/linux/bcd.h +++ b/include/linux/bcd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCD_H #define _BCD_H diff --git a/include/linux/bch.h b/include/linux/bch.h index 85fdce83d4..295b4ef153 100644 --- a/include/linux/bch.h +++ b/include/linux/bch.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic binary BCH encoding/decoding library * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * * Copyright © 2011 Parrot S.A. * * Author: Ivan Djelic @@ -33,7 +45,6 @@ * @cache: log-based polynomial representation buffer * @elp: error locator polynomial * @poly_2t: temporary polynomials of degree 2t - * @swap_bits: swap bits within data and syndrome bytes */ struct bch_control { unsigned int m; @@ -52,18 +63,16 @@ struct bch_control { int *cache; struct gf_poly *elp; struct gf_poly *poly_2t[4]; - bool swap_bits; }; -struct bch_control *bch_init(int m, int t, unsigned int prim_poly, - bool swap_bits); +struct bch_control *init_bch(int m, int t, unsigned int prim_poly); -void bch_free(struct bch_control *bch); +void free_bch(struct bch_control *bch); -void bch_encode(struct bch_control *bch, const uint8_t *data, +void encode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc); -int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len, +int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc); diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h index 53b31f69b7..2793652fbf 100644 --- a/include/linux/bcm47xx_nvram.h +++ b/include/linux/bcm47xx_nvram.h @@ -1,11 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __BCM47XX_NVRAM_H #define __BCM47XX_NVRAM_H -#include #include #include #include diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h index f8254fd53e..c06b47c84e 100644 --- a/include/linux/bcm47xx_sprom.h +++ b/include/linux/bcm47xx_sprom.h @@ -1,5 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __BCM47XX_SPROM_H @@ -9,19 +12,9 @@ #include #include -struct ssb_sprom; - #ifdef CONFIG_BCM47XX_SPROM -void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix, - bool fallback); int bcm47xx_sprom_register_fallbacks(void); #else -static inline void bcm47xx_fill_sprom(struct ssb_sprom *sprom, - const char *prefix, - bool fallback) -{ -} - static inline int bcm47xx_sprom_register_fallbacks(void) { return -ENOTSUPP; diff --git a/include/linux/bcm47xx_wdt.h b/include/linux/bcm47xx_wdt.h index fc9dcdb4b9..8d9d07ec22 100644 --- a/include/linux/bcm47xx_wdt.h +++ b/include/linux/bcm47xx_wdt.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_BCM47XX_WDT_H_ #define LINUX_BCM47XX_WDT_H_ diff --git a/include/linux/bcm963xx_nvram.h b/include/linux/bcm963xx_nvram.h index c8c7f01159..290c231b8c 100644 --- a/include/linux/bcm963xx_nvram.h +++ b/include/linux/bcm963xx_nvram.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BCM963XX_NVRAM_H__ #define __LINUX_BCM963XX_NVRAM_H__ diff --git a/include/linux/bcm963xx_tag.h b/include/linux/bcm963xx_tag.h index 7edb809a25..161c7b37a7 100644 --- a/include/linux/bcm963xx_tag.h +++ b/include/linux/bcm963xx_tag.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BCM963XX_TAG_H__ #define __LINUX_BCM963XX_TAG_H__ @@ -84,7 +83,7 @@ struct bcm_tag { char flash_layout_ver[FLASHLAYOUTVER_LEN]; /* 196-199: kernel+rootfs CRC32 */ __u32 fskernel_crc; - /* 200-215: Unused except on Alice Gate where it is information */ + /* 200-215: Unused except on Alice Gate where is is information */ char information2[TAGINFO2_LEN]; /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */ __u32 image_crc; diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 60b94b944e..8eeedb2db9 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_BCMA_H_ #define LINUX_BCMA_H_ @@ -332,8 +331,6 @@ extern int bcma_arch_register_fallback_sprom( struct ssb_sprom *out)); struct bcma_bus { - struct device *dev; - /* The MMIO area. */ void __iomem *mmio; @@ -341,7 +338,14 @@ struct bcma_bus { enum bcma_hosttype hosttype; bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */ - struct pci_dev *host_pci; /* PCI bus pointer (BCMA_HOSTTYPE_PCI only) */ + union { + /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */ + struct pci_dev *host_pci; + /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */ + struct sdio_func *host_sdio; + /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */ + struct platform_device *host_pdev; + }; struct bcma_chipinfo chipinfo; diff --git a/include/linux/bcma/bcma_driver_arm_c9.h b/include/linux/bcma/bcma_driver_arm_c9.h index 688cf590c9..93bd73d670 100644 --- a/include/linux/bcma/bcma_driver_arm_c9.h +++ b/include/linux/bcma/bcma_driver_arm_c9.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_BCMA_DRIVER_ARM_C9_H_ #define LINUX_BCMA_DRIVER_ARM_C9_H_ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index d35b920609..b20e3d5625 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_BCMA_DRIVER_CC_H_ #define LINUX_BCMA_DRIVER_CC_H_ @@ -594,6 +593,9 @@ struct bcma_sflash { u32 blocksize; u16 numblocks; u32 size; + + struct mtd_info *mtd; + void *priv; }; #endif diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h index 420e222d7a..4354d4ea67 100644 --- a/include/linux/bcma/bcma_driver_gmac_cmn.h +++ b/include/linux/bcma/bcma_driver_gmac_cmn.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_BCMA_DRIVER_GMAC_CMN_H_ #define LINUX_BCMA_DRIVER_GMAC_CMN_H_ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h index 798013fab5..8eea7f9e33 100644 --- a/include/linux/bcma/bcma_driver_mips.h +++ b/include/linux/bcma/bcma_driver_mips.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_BCMA_DRIVER_MIPS_H_ #define LINUX_BCMA_DRIVER_MIPS_H_ diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 68da8dba51..9657f11d48 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_BCMA_DRIVER_PCI_H_ #define LINUX_BCMA_DRIVER_PCI_H_ @@ -81,7 +80,7 @@ struct pci_dev; #define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */ #define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */ #define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */ -#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal register */ +#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal regsiter */ #define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */ #define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */ #define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */ diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h index 91ce515e3a..31e6d17ab7 100644 --- a/include/linux/bcma/bcma_driver_pcie2.h +++ b/include/linux/bcma/bcma_driver_pcie2.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_BCMA_DRIVER_PCIE2_H_ #define LINUX_BCMA_DRIVER_PCIE2_H_ diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h index 944105cbd6..9986f8288d 100644 --- a/include/linux/bcma/bcma_regs.h +++ b/include/linux/bcma/bcma_regs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_BCMA_REGS_H_ #define LINUX_BCMA_REGS_H_ diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h index f3c43519ba..1b5fc0c3b1 100644 --- a/include/linux/bcma/bcma_soc.h +++ b/include/linux/bcma/bcma_soc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_BCMA_SOC_H_ #define LINUX_BCMA_SOC_H_ @@ -6,7 +5,6 @@ struct bcma_soc { struct bcma_bus bus; - struct device *dev; }; int __init bcma_host_soc_register(struct bcma_soc *soc); diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h new file mode 100644 index 0000000000..a69554ef84 --- /dev/null +++ b/include/linux/bfin_mac.h @@ -0,0 +1,30 @@ +/* + * Blackfin On-Chip MAC Driver + * + * Copyright 2004-2010 Analog Devices Inc. + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _LINUX_BFIN_MAC_H_ +#define _LINUX_BFIN_MAC_H_ + +#include + +struct bfin_phydev_platform_data { + unsigned short addr; + int irq; +}; + +struct bfin_mii_bus_platform_data { + int phydev_number; + struct bfin_phydev_platform_data *phydev_data; + const unsigned short *mac_peripherals; + int phy_mode; + unsigned int phy_mask; + unsigned short vlan1_mask, vlan2_mask; +}; + +#endif diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 049cf9421d..c8196d8055 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BINFMTS_H #define _LINUX_BINFMTS_H @@ -7,14 +6,13 @@ #include #include -struct filename; - #define CORENAME_MAX_SIZE 128 /* * This structure is used to hold the arguments that are used when loading binaries. */ struct linux_binprm { + char buf[BINPRM_BUF_SIZE]; #ifdef CONFIG_MMU struct vm_area_struct *vma; unsigned long vma_pages; @@ -24,69 +22,50 @@ struct linux_binprm { #endif struct mm_struct *mm; unsigned long p; /* current top of mem */ - unsigned long argmin; /* rlimit marker for copy_strings() */ unsigned int - /* Should an execfd be passed to userspace? */ - have_execfd:1, - - /* Use the creds of a script (see binfmt_misc) */ - execfd_creds:1, - /* - * Set by bprm_creds_for_exec hook to indicate a - * privilege-gaining exec has happened. Used to set - * AT_SECURE auxv for glibc. - */ - secureexec:1, - /* - * Set when errors can no longer be returned to the - * original userspace. - */ - point_of_no_return:1; + cred_prepared:1,/* true if creds already prepared (multiple + * preps happen for interpreters) */ + cap_effective:1;/* true if has elevated effective capabilities, + * false if not; except for init which inherits + * its parent's caps anyway */ #ifdef __alpha__ unsigned int taso:1; #endif - struct file *executable; /* Executable to pass to the interpreter */ - struct file *interpreter; - struct file *file; + unsigned int recursion_depth; /* only for search_binary_handler() */ + struct file * file; struct cred *cred; /* new credentials */ int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ unsigned int per_clear; /* bits to clear in current->personality */ int argc, envc; - const char *filename; /* Name of binary as seen by procps */ - const char *interp; /* Name of the binary really executed. Most + const char * filename; /* Name of binary as seen by procps */ + const char * interp; /* Name of the binary really executed. Most of the time same as filename, but could be different for binfmt_{misc,script} */ - const char *fdpath; /* generated filename for execveat */ unsigned interp_flags; - int execfd; /* File descriptor of the executable */ + unsigned interp_data; unsigned long loader, exec; - - struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */ - - char buf[BINPRM_BUF_SIZE]; } __randomize_layout; #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT) +/* fd of the binary should be passed to the interpreter */ +#define BINPRM_FLAGS_EXECFD_BIT 1 +#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) + /* filename of the binary will be inaccessible after exec */ #define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 #define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) -/* preserve argv0 for the interpreter */ -#define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3 -#define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT) - /* Function parameter for binfmt->coredump */ struct coredump_params { - const kernel_siginfo_t *siginfo; + const siginfo_t *siginfo; struct pt_regs *regs; struct file *file; unsigned long limit; unsigned long mm_flags; loff_t written; loff_t pos; - loff_t to_skip; }; /* @@ -99,8 +78,10 @@ struct linux_binfmt { int (*load_binary)(struct linux_binprm *); int (*load_shlib)(struct file *); int (*core_dump)(struct coredump_params *cprm); + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags); + void (*handle_mmap)(struct file *); unsigned long min_coredump; /* minimal dump size */ -} __randomize_layout; +} __do_const __randomize_layout; extern void __register_binfmt(struct linux_binfmt *fmt, int insert); @@ -117,10 +98,11 @@ static inline void insert_binfmt(struct linux_binfmt *fmt) extern void unregister_binfmt(struct linux_binfmt *); +extern int prepare_binprm(struct linux_binprm *); extern int __must_check remove_arg_zero(struct linux_binprm *); -extern int begin_new_exec(struct linux_binprm * bprm); +extern int search_binary_handler(struct linux_binprm *); +extern int flush_old_exec(struct linux_binprm * bprm); extern void setup_new_exec(struct linux_binprm * bprm); -extern void finalize_exec(struct linux_binprm *bprm); extern void would_dump(struct linux_binprm *, struct file *); extern int suid_dumpable; @@ -135,12 +117,12 @@ extern int setup_arg_pages(struct linux_binprm * bprm, int executable_stack); extern int transfer_args_to_stack(struct linux_binprm *bprm, unsigned long *sp_location); -extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm); -int copy_string_kernel(const char *arg, struct linux_binprm *bprm); +extern int bprm_change_interp(char *interp, struct linux_binprm *bprm); +extern int copy_strings_kernel(int argc, const char *const *argv, + struct linux_binprm *bprm); +extern int prepare_bprm_creds(struct linux_binprm *bprm); +extern void install_exec_creds(struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); -int kernel_execve(const char *filename, - const char *const *argv, const char *const *envp); - #endif /* _LINUX_BINFMTS_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 00952e92ea..cd9664a519 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -1,15 +1,34 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2001 Jens Axboe + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- */ #ifndef __LINUX_BIO_H #define __LINUX_BIO_H +#include #include #include +#include + +#ifdef CONFIG_BLOCK + +#include + /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include -#include #define BIO_DEBUG @@ -19,12 +38,7 @@ #define BIO_BUG_ON #endif -#define BIO_MAX_VECS 256U - -static inline unsigned int bio_max_segs(unsigned int nr_segs) -{ - return min(nr_segs, BIO_MAX_VECS); -} +#define BIO_MAX_PAGES 256 #define bio_prio(bio) (bio)->bi_ioprio #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) @@ -43,17 +57,10 @@ static inline unsigned int bio_max_segs(unsigned int nr_segs) #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) -#define bvec_iter_sectors(iter) ((iter).bi_size >> 9) -#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) - -#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) -#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) - -/* - * Return the data direction, READ or WRITE. - */ -#define bio_data_dir(bio) \ - (op_is_write(bio_op(bio)) ? WRITE : READ) +#define bio_multiple_segments(bio) \ + ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) +#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) +#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) /* * Check whether this bio carries any data or not. A NULL bio is allowed. @@ -63,19 +70,28 @@ static inline bool bio_has_data(struct bio *bio) if (bio && bio->bi_iter.bi_size && bio_op(bio) != REQ_OP_DISCARD && - bio_op(bio) != REQ_OP_SECURE_ERASE && - bio_op(bio) != REQ_OP_WRITE_ZEROES) + bio_op(bio) != REQ_OP_SECURE_ERASE) return true; return false; } -static inline bool bio_no_advance_iter(const struct bio *bio) +static inline bool bio_no_advance_iter(struct bio *bio) { return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE || - bio_op(bio) == REQ_OP_WRITE_SAME || - bio_op(bio) == REQ_OP_WRITE_ZEROES; + bio_op(bio) == REQ_OP_WRITE_SAME; +} + +static inline bool bio_is_rw(struct bio *bio) +{ + if (!bio_has_data(bio)) + return false; + + if (bio_no_advance_iter(bio)) + return false; + + return true; } static inline bool bio_mergeable(struct bio *bio) @@ -102,44 +118,54 @@ static inline void *bio_data(struct bio *bio) return NULL; } -/** - * bio_full - check if the bio is full - * @bio: bio to check - * @len: length of one segment to be added - * - * Return true if @bio is full and one segment with @len bytes can't be - * added to the bio, otherwise return false +/* + * will die */ -static inline bool bio_full(struct bio *bio, unsigned len) -{ - if (bio->bi_vcnt >= bio->bi_max_vecs) - return true; +#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio))) +#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) - if (bio->bi_iter.bi_size > UINT_MAX - len) - return true; +/* + * queues that have highmem support enabled may still need to revert to + * PIO transfers occasionally and thus map high pages temporarily. For + * permanent PIO fall back, user is probably better off disabling highmem + * I/O completely on that queue (see ide-dma for example) + */ +#define __bio_kmap_atomic(bio, iter) \ + (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \ + bio_iter_iovec((bio), (iter)).bv_offset) - return false; -} +#define __bio_kunmap_atomic(addr) kunmap_atomic(addr) -static inline bool bio_next_segment(const struct bio *bio, - struct bvec_iter_all *iter) -{ - if (iter->idx >= bio->bi_vcnt) - return false; +/* + * merge helpers etc + */ - bvec_advance(&bio->bi_io_vec[iter->idx], iter); - return true; -} +/* Default implementation of BIOVEC_PHYS_MERGEABLE */ +#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) + +/* + * allow arch override, for eg virtualized architectures (put in asm/io.h) + */ +#ifndef BIOVEC_PHYS_MERGEABLE +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + __BIOVEC_PHYS_MERGEABLE(vec1, vec2) +#endif + +#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ + (((addr1) | (mask)) == (((addr2) - 1) | (mask))) +#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ + __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) /* * drivers should _never_ use the all version - the bio may have been split * before it got to the driver and the driver won't own all of it */ -#define bio_for_each_segment_all(bvl, bio, iter) \ - for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) +#define bio_for_each_segment_all(bvl, bio, i) \ + for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) -static inline void bio_advance_iter(const struct bio *bio, - struct bvec_iter *iter, unsigned int bytes) +static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, + unsigned bytes) { iter->bi_sector += bytes >> 9; @@ -147,49 +173,17 @@ static inline void bio_advance_iter(const struct bio *bio, iter->bi_size -= bytes; else bvec_iter_advance(bio->bi_io_vec, iter, bytes); - /* TODO: It is reasonable to complete bio with error here. */ -} - -/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */ -static inline void bio_advance_iter_single(const struct bio *bio, - struct bvec_iter *iter, - unsigned int bytes) -{ - iter->bi_sector += bytes >> 9; - - if (bio_no_advance_iter(bio)) - iter->bi_size -= bytes; - else - bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); } #define __bio_for_each_segment(bvl, bio, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bio_iter_iovec((bio), (iter))), 1); \ - bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) + bio_advance_iter((bio), &(iter), (bvl).bv_len)) #define bio_for_each_segment(bvl, bio, iter) \ __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) -#define __bio_for_each_bvec(bvl, bio, iter, start) \ - for (iter = (start); \ - (iter).bi_size && \ - ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ - bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) - -/* iterate over multi-page bvec */ -#define bio_for_each_bvec(bvl, bio, iter) \ - __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) - -/* - * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the - * same reasons as bio_for_each_segment_all(). - */ -#define bio_for_each_bvec_all(bvl, bio, i) \ - for (i = 0, bvl = bio_first_bvec_all(bio); \ - i < (bio)->bi_vcnt; i++, bvl++) \ - #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) static inline unsigned bio_segments(struct bio *bio) @@ -199,20 +193,18 @@ static inline unsigned bio_segments(struct bio *bio) struct bvec_iter iter; /* - * We special case discard/write same/write zeroes, because they - * interpret bi_size differently: + * We special case discard/write same, because they interpret bi_size + * differently: */ - switch (bio_op(bio)) { - case REQ_OP_DISCARD: - case REQ_OP_SECURE_ERASE: - case REQ_OP_WRITE_ZEROES: - return 0; - case REQ_OP_WRITE_SAME: + if (bio_op(bio) == REQ_OP_DISCARD) + return 1; + + if (bio_op(bio) == REQ_OP_SECURE_ERASE) + return 1; + + if (bio_op(bio) == REQ_OP_WRITE_SAME) return 1; - default: - break; - } bio_for_each_segment(bv, bio, iter) segs++; @@ -245,7 +237,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count) { if (count != 1) { bio->bi_flags |= (1 << BIO_REFFED); - smp_mb(); + smp_mb__before_atomic(); } atomic_set(&bio->__bi_cnt, count); } @@ -267,7 +259,7 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit) static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) { - *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); + *bv = bio_iovec(bio); } static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) @@ -275,9 +267,10 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) struct bvec_iter iter = bio->bi_iter; int idx; - bio_get_first_bvec(bio, bv); - if (bv->bv_len == bio->bi_iter.bi_size) - return; /* this bio only has a single bvec */ + if (unlikely(!bio_multiple_segments(bio))) { + *bv = bio_iovec(bio); + return; + } bio_advance_iter(bio, &iter, iter.bi_size); @@ -296,23 +289,6 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) bv->bv_len = iter.bi_bvec_done; } -static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) -{ - WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); - return bio->bi_io_vec; -} - -static inline struct page *bio_first_page_all(struct bio *bio) -{ - return bio_first_bvec_all(bio)->bv_page; -} - -static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) -{ - WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); - return &bio->bi_io_vec[bio->bi_vcnt - 1]; -} - enum bip_flags { BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ @@ -329,16 +305,17 @@ struct bio_integrity_payload { struct bvec_iter bip_iter; + bio_end_io_t *bip_end_io; /* saved I/O completion fn */ + + unsigned short bip_slab; /* slab the bip came from */ unsigned short bip_vcnt; /* # of integrity bio_vecs */ unsigned short bip_max_vcnt; /* integrity bio_vec slots */ unsigned short bip_flags; /* control flags */ - struct bvec_iter bio_iter; /* for rewinding parent bio */ - struct work_struct bip_work; /* I/O completion */ struct bio_vec *bip_vec; - struct bio_vec bip_inline_vecs[];/* embedded bvec array */ + struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ }; #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -374,8 +351,8 @@ static inline void bip_set_seed(struct bio_integrity_payload *bip, #endif /* CONFIG_BLK_DEV_INTEGRITY */ -void bio_trim(struct bio *bio, sector_t offset, sector_t size); -extern struct bio *bio_split(struct bio *bio, int sectors, +extern void bio_trim(struct bio *bio, int offset, int size); +extern struct bio *bio_split(struct bio *bio, unsigned int sectors, gfp_t gfp, struct bio_set *bs); /** @@ -388,7 +365,7 @@ extern struct bio *bio_split(struct bio *bio, int sectors, * Returns a bio representing the next @sectors of @bio - if the bio is smaller * than @sectors, returns the original bio unchanged. */ -static inline struct bio *bio_next_split(struct bio *bio, int sectors, +static inline struct bio *bio_next_split(struct bio *bio, unsigned int sectors, gfp_t gfp, struct bio_set *bs) { if (sectors >= bio_sectors(bio)) @@ -397,130 +374,169 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors, return bio_split(bio, sectors, gfp, bs); } -enum { - BIOSET_NEED_BVECS = BIT(0), - BIOSET_NEED_RESCUER = BIT(1), - BIOSET_PERCPU_CACHE = BIT(2), -}; -extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); -extern void bioset_exit(struct bio_set *); -extern int biovec_init_pool(mempool_t *pool, int pool_entries); -extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); +extern struct bio_set *bioset_create(unsigned int, unsigned int); +extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int); +extern void bioset_free(struct bio_set *); +extern mempool_t *biovec_create_pool(int pool_entries); -struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs, - struct bio_set *bs); -struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, - struct bio_set *bs); -struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); +extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); extern void bio_put(struct bio *); extern void __bio_clone_fast(struct bio *, struct bio *); extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); +extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); -extern struct bio_set fs_bio_set; +extern struct bio_set *fs_bio_set; -static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs) +static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) { - return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); + return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); } -extern blk_qc_t submit_bio(struct bio *); +static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) +{ + return bio_clone_bioset(bio, gfp_mask, fs_bio_set); +} + +static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) +{ + return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); +} + +static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) +{ + return bio_clone_bioset(bio, gfp_mask, NULL); + +} extern void bio_endio(struct bio *); static inline void bio_io_error(struct bio *bio) { - bio->bi_status = BLK_STS_IOERR; + bio->bi_error = -EIO; bio_endio(bio); } -static inline void bio_wouldblock_error(struct bio *bio) -{ - bio_set_flag(bio, BIO_QUIET); - bio->bi_status = BLK_STS_AGAIN; - bio_endio(bio); -} - -/* - * Calculate number of bvec segments that should be allocated to fit data - * pointed by @iter. If @iter is backed by bvec it's going to be reused - * instead of allocating a new one. - */ -static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) -{ - if (iov_iter_is_bvec(iter)) - return 0; - return iov_iter_npages(iter, max_segs); -} - struct request_queue; +extern int bio_phys_segments(struct request_queue *, struct bio *); extern int submit_bio_wait(struct bio *bio); extern void bio_advance(struct bio *, unsigned); -extern void bio_init(struct bio *bio, struct bio_vec *table, - unsigned short max_vecs); -extern void bio_uninit(struct bio *); +extern void bio_init(struct bio *); extern void bio_reset(struct bio *); void bio_chain(struct bio *, struct bio *); extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); -int bio_add_zone_append_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset); -bool __bio_try_merge_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int off, bool *same_page); -void __bio_add_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int off); -int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); -void bio_release_pages(struct bio *bio, bool mark_dirty); +struct rq_map_data; +extern struct bio *bio_map_user_iov(struct request_queue *, + const struct iov_iter *, gfp_t); +extern void bio_unmap_user(struct bio *); +extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, + gfp_t); +extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, + gfp_t, int); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); -extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, - struct bio *src, struct bvec_iter *src_iter); +void generic_start_io_acct(int rw, unsigned long sectors, + struct hd_struct *part); +void generic_end_io_acct(int rw, struct hd_struct *part, + unsigned long start_time); + +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" +#endif +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +extern void bio_flush_dcache_pages(struct bio *bi); +#else +static inline void bio_flush_dcache_pages(struct bio *bi) +{ +} +#endif + extern void bio_copy_data(struct bio *dst, struct bio *src); +extern int bio_alloc_pages(struct bio *bio, gfp_t gfp); extern void bio_free_pages(struct bio *bio); -void bio_truncate(struct bio *bio, unsigned new_size); -void guard_bio_eod(struct bio *bio); + +extern struct bio *bio_copy_user_iov(struct request_queue *, + struct rq_map_data *, + const struct iov_iter *, + gfp_t); +extern int bio_uncopy_user(struct bio *); void zero_fill_bio(struct bio *bio); - -extern const char *bio_devname(struct bio *bio, char *buffer); - -#define bio_set_dev(bio, bdev) \ -do { \ - bio_clear_flag(bio, BIO_REMAPPED); \ - if ((bio)->bi_bdev != (bdev)) \ - bio_clear_flag(bio, BIO_THROTTLED); \ - (bio)->bi_bdev = (bdev); \ - bio_associate_blkg(bio); \ -} while (0) - -#define bio_copy_dev(dst, src) \ -do { \ - bio_clear_flag(dst, BIO_REMAPPED); \ - (dst)->bi_bdev = (src)->bi_bdev; \ - bio_clone_blkg_association(dst, src); \ -} while (0) - -#define bio_dev(bio) \ - disk_devt((bio)->bi_bdev->bd_disk) +extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); +extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); +extern unsigned int bvec_nr_vecs(unsigned short idx); #ifdef CONFIG_BLK_CGROUP -void bio_associate_blkg(struct bio *bio); -void bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css); -void bio_clone_blkg_association(struct bio *dst, struct bio *src); +int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); +int bio_associate_current(struct bio *bio); +void bio_disassociate_task(struct bio *bio); +void bio_clone_blkcg_association(struct bio *dst, struct bio *src); #else /* CONFIG_BLK_CGROUP */ -static inline void bio_associate_blkg(struct bio *bio) { } -static inline void bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css) -{ } -static inline void bio_clone_blkg_association(struct bio *dst, - struct bio *src) { } +static inline int bio_associate_blkcg(struct bio *bio, + struct cgroup_subsys_state *blkcg_css) { return 0; } +static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } +static inline void bio_disassociate_task(struct bio *bio) { } +static inline void bio_clone_blkcg_association(struct bio *dst, + struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ +#ifdef CONFIG_HIGHMEM +/* + * remember never ever reenable interrupts between a bvec_kmap_irq and + * bvec_kunmap_irq! + */ +static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +{ + unsigned long addr; + + /* + * might not be a highmem page, but the preempt/irq count + * balancing is a lot nicer this way + */ + local_irq_save(*flags); + addr = (unsigned long) kmap_atomic(bvec->bv_page); + + BUG_ON(addr & ~PAGE_MASK); + + return (char *) addr + bvec->bv_offset; +} + +static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) +{ + unsigned long ptr = (unsigned long) buffer & PAGE_MASK; + + kunmap_atomic((void *) ptr); + local_irq_restore(*flags); +} + +#else +static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +{ + return page_address(bvec->bv_page) + bvec->bv_offset; +} + +static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) +{ + *flags = 0; +} +#endif + +static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter, + unsigned long *flags) +{ + return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags); +} +#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) + +#define bio_kmap_irq(bio, flags) \ + __bio_kmap_irq((bio), (bio)->bi_iter, (flags)) +#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) + /* * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. * @@ -660,19 +676,13 @@ struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; - /* - * per-cpu bio alloc cache - */ - struct bio_alloc_cache __percpu *cache; - - mempool_t bio_pool; - mempool_t bvec_pool; + mempool_t *bio_pool; + mempool_t *bvec_pool; #if defined(CONFIG_BLK_DEV_INTEGRITY) - mempool_t bio_integrity_pool; - mempool_t bvec_integrity_pool; + mempool_t *bio_integrity_pool; + mempool_t *bvec_integrity_pool; #endif - unsigned int back_pad; /* * Deadlock avoidance for stacking block drivers: see comments in * bio_alloc_bioset() for details @@ -681,17 +691,19 @@ struct bio_set { struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; - - /* - * Hot un-plug notifier for the per-cpu cache, if used - */ - struct hlist_node cpuhp_dead; }; -static inline bool bioset_initialized(struct bio_set *bs) -{ - return bs->bio_slab != NULL; -} +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +/* + * a small number of entries is fine, not going to be performance critical. + * basically we just need to survive + */ +#define BIO_SPLIT_ENTRIES 2 #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -703,10 +715,13 @@ static inline bool bioset_initialized(struct bio_set *bs) bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); +extern void bio_integrity_free(struct bio *); extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); -extern bool bio_integrity_prep(struct bio *); +extern bool bio_integrity_enabled(struct bio *bio); +extern int bio_integrity_prep(struct bio *); +extern void bio_integrity_endio(struct bio *); extern void bio_integrity_advance(struct bio *, unsigned int); -extern void bio_integrity_trim(struct bio *); +extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); extern int bioset_integrity_create(struct bio_set *, int); extern void bioset_integrity_free(struct bio_set *); @@ -719,6 +734,11 @@ static inline void *bio_integrity(struct bio *bio) return NULL; } +static inline bool bio_integrity_enabled(struct bio *bio) +{ + return false; +} + static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) { return 0; @@ -729,9 +749,14 @@ static inline void bioset_integrity_free (struct bio_set *bs) return; } -static inline bool bio_integrity_prep(struct bio *bio) +static inline int bio_integrity_prep(struct bio *bio) { - return true; + return 0; +} + +static inline void bio_integrity_free(struct bio *bio) +{ + return; } static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, @@ -746,7 +771,8 @@ static inline void bio_integrity_advance(struct bio *bio, return; } -static inline void bio_integrity_trim(struct bio *bio) +static inline void bio_integrity_trim(struct bio *bio, unsigned int offset, + unsigned int sectors) { return; } @@ -775,20 +801,5 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page, #endif /* CONFIG_BLK_DEV_INTEGRITY */ -/* - * Mark a bio as polled. Note that for async polled IO, the caller must - * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). - * We cannot block waiting for requests on polled IO, as those completions - * must be found by the caller. This is different than IRQ driven IO, where - * it's safe to wait for IO to complete. - */ -static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) -{ - bio->bi_opf |= REQ_HIPRI; - if (!is_sync_kiocb(kiocb)) - bio->bi_opf |= REQ_NOWAIT; -} - -struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); - +#endif /* CONFIG_BLOCK */ #endif /* __LINUX_BIO_H */ diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h index bbc4730a65..3b5bafce43 100644 --- a/include/linux/bit_spinlock.h +++ b/include/linux/bit_spinlock.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BIT_SPINLOCK_H #define __LINUX_BIT_SPINLOCK_H diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 4e035aca6f..f6505d8306 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -1,14 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Felix Fietkau * Copyright (C) 2004 - 2009 Ivo van Doorn + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _LINUX_BITFIELD_H #define _LINUX_BITFIELD_H -#include -#include +#include /* * Bitfield access macros @@ -45,7 +52,7 @@ ({ \ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ _pfx "mask is not constant"); \ - BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \ + BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \ BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \ _pfx "value too large for the field"); \ @@ -55,32 +62,6 @@ (1ULL << __bf_shf(_mask))); \ }) -/** - * FIELD_MAX() - produce the maximum value representable by a field - * @_mask: shifted mask defining the field's length and position - * - * FIELD_MAX() returns the maximum value that can be held in the field - * specified by @_mask. - */ -#define FIELD_MAX(_mask) \ - ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \ - (typeof(_mask))((_mask) >> __bf_shf(_mask)); \ - }) - -/** - * FIELD_FIT() - check if value fits in the field - * @_mask: shifted mask defining the field's length and position - * @_val: value to test against the field - * - * Return: true if @_val can fit inside @_mask, false if @_val is too big. - */ -#define FIELD_FIT(_mask, _val) \ - ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ - !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ - }) - /** * FIELD_PREP() - prepare a bitfield element * @_mask: shifted mask defining the field's length and position @@ -98,7 +79,7 @@ /** * FIELD_GET() - extract a bitfield element * @_mask: shifted mask defining the field's length and position - * @_reg: value of entire bitfield + * @_reg: 32bit value of entire bitfield * * FIELD_GET() extracts the field specified by @_mask from the * bitfield passed in as @_reg by masking and shifting it down. @@ -109,51 +90,4 @@ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ }) -extern void __compiletime_error("value doesn't fit into mask") -__field_overflow(void); -extern void __compiletime_error("bad bitfield mask") -__bad_mask(void); -static __always_inline u64 field_multiplier(u64 field) -{ - if ((field | (field - 1)) & ((field | (field - 1)) + 1)) - __bad_mask(); - return field & -field; -} -static __always_inline u64 field_mask(u64 field) -{ - return field / field_multiplier(field); -} -#define field_max(field) ((typeof(field))field_mask(field)) -#define ____MAKE_OP(type,base,to,from) \ -static __always_inline __##type type##_encode_bits(base v, base field) \ -{ \ - if (__builtin_constant_p(v) && (v & ~field_mask(field))) \ - __field_overflow(); \ - return to((v & field_mask(field)) * field_multiplier(field)); \ -} \ -static __always_inline __##type type##_replace_bits(__##type old, \ - base val, base field) \ -{ \ - return (old & ~to(field)) | type##_encode_bits(val, field); \ -} \ -static __always_inline void type##p_replace_bits(__##type *p, \ - base val, base field) \ -{ \ - *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ -} \ -static __always_inline base type##_get_bits(__##type v, base field) \ -{ \ - return (from(v) & field)/field_multiplier(field); \ -} -#define __MAKE_OP(size) \ - ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ - ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ - ____MAKE_OP(u##size,u##size,,) -____MAKE_OP(u8,u8,,) -__MAKE_OP(16) -__MAKE_OP(32) -__MAKE_OP(64) -#undef __MAKE_OP -#undef ____MAKE_OP - #endif diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 37f36dad18..7e677c29df 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -1,16 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BITMAP_H #define __LINUX_BITMAP_H #ifndef __ASSEMBLY__ -#include -#include -#include -#include #include - -struct device; +#include +#include +#include /* * bitmaps provide bit arrays that consume one or more unsigned @@ -25,155 +21,107 @@ struct device; * See lib/bitmap.c for more details. */ -/** - * DOC: bitmap overview - * +/* * The available bitmap operations and their rough meaning in the * case that the bitmap is a single unsigned long are thus: * - * The generated code is more efficient when nbits is known at - * compile-time and at most BITS_PER_LONG. + * Note that nbits should be always a compile time evaluable constant. + * Otherwise many inlines will generate horrible code. * - * :: - * - * bitmap_zero(dst, nbits) *dst = 0UL - * bitmap_fill(dst, nbits) *dst = ~0UL - * bitmap_copy(dst, src, nbits) *dst = *src - * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 - * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 - * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 - * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) - * bitmap_complement(dst, src, nbits) *dst = ~(*src) - * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? - * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? - * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? - * bitmap_empty(src, nbits) Are all bits zero in *src? - * bitmap_full(src, nbits) Are all bits set in *src? - * bitmap_weight(src, nbits) Hamming Weight: number set bits - * bitmap_set(dst, pos, nbits) Set specified bit area - * bitmap_clear(dst, pos, nbits) Clear specified bit area - * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area - * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above - * bitmap_next_clear_region(map, &start, &end, nbits) Find next clear region - * bitmap_next_set_region(map, &start, &end, nbits) Find next set region - * bitmap_for_each_clear_region(map, rs, re, start, end) - * Iterate over all clear regions - * bitmap_for_each_set_region(map, rs, re, start, end) - * Iterate over all set regions - * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n - * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n - * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest - * bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask) - * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) - * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) - * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap - * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz - * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf - * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf - * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf - * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf - * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region - * bitmap_release_region(bitmap, pos, order) Free specified bit region - * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region - * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst - * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst - * bitmap_get_value8(map, start) Get 8bit value from map at start - * bitmap_set_value8(map, value, start) Set 8bit value to map at start - * - * Note, bitmap_zero() and bitmap_fill() operate over the region of - * unsigned longs, that is, bits behind bitmap till the unsigned long - * boundary will be zeroed or filled as well. Consider to use - * bitmap_clear() or bitmap_set() to make explicit zeroing or filling - * respectively. + * bitmap_zero(dst, nbits) *dst = 0UL + * bitmap_fill(dst, nbits) *dst = ~0UL + * bitmap_copy(dst, src, nbits) *dst = *src + * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 + * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 + * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 + * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) + * bitmap_complement(dst, src, nbits) *dst = ~(*src) + * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? + * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? + * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? + * bitmap_empty(src, nbits) Are all bits zero in *src? + * bitmap_full(src, nbits) Are all bits set in *src? + * bitmap_weight(src, nbits) Hamming Weight: number set bits + * bitmap_set(dst, pos, nbits) Set specified bit area + * bitmap_clear(dst, pos, nbits) Clear specified bit area + * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above + * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n + * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n + * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) + * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) + * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap + * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz + * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf + * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf + * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region + * bitmap_release_region(bitmap, pos, order) Free specified bit region + * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region + * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words) + * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words) */ -/** - * DOC: bitmap bitops - * - * Also the following operations in asm/bitops.h apply to bitmaps.:: - * - * set_bit(bit, addr) *addr |= bit - * clear_bit(bit, addr) *addr &= ~bit - * change_bit(bit, addr) *addr ^= bit - * test_bit(bit, addr) Is bit set in *addr? - * test_and_set_bit(bit, addr) Set bit and return old value - * test_and_clear_bit(bit, addr) Clear bit and return old value - * test_and_change_bit(bit, addr) Change bit and return old value - * find_first_zero_bit(addr, nbits) Position first zero bit in *addr - * find_first_bit(addr, nbits) Position first set bit in *addr - * find_next_zero_bit(addr, nbits, bit) - * Position next zero bit in *addr >= bit - * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit - * find_next_and_bit(addr1, addr2, nbits, bit) - * Same as find_next_bit, but in - * (*addr1 & *addr2) +/* + * Also the following operations in asm/bitops.h apply to bitmaps. * + * set_bit(bit, addr) *addr |= bit + * clear_bit(bit, addr) *addr &= ~bit + * change_bit(bit, addr) *addr ^= bit + * test_bit(bit, addr) Is bit set in *addr? + * test_and_set_bit(bit, addr) Set bit and return old value + * test_and_clear_bit(bit, addr) Clear bit and return old value + * test_and_change_bit(bit, addr) Change bit and return old value + * find_first_zero_bit(addr, nbits) Position first zero bit in *addr + * find_first_bit(addr, nbits) Position first set bit in *addr + * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit + * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit */ -/** - * DOC: declare bitmap +/* * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used * to declare an array named 'name' of just enough unsigned longs to * contain all bit positions from 0 to 'bits' - 1. */ -/* - * Allocation and deallocation of bitmap. - * Provided in lib/bitmap.c to avoid circular dependency. - */ -unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); -unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); -void bitmap_free(const unsigned long *bitmap); - -/* Managed variants of the above. */ -unsigned long *devm_bitmap_alloc(struct device *dev, - unsigned int nbits, gfp_t flags); -unsigned long *devm_bitmap_zalloc(struct device *dev, - unsigned int nbits, gfp_t flags); - /* * lib/bitmap.c provides these functions: */ -int __bitmap_equal(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -bool __pure __bitmap_or_equal(const unsigned long *src1, - const unsigned long *src2, - const unsigned long *src3, - unsigned int nbits); -void __bitmap_complement(unsigned long *dst, const unsigned long *src, - unsigned int nbits); -void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, - unsigned int shift, unsigned int nbits); -void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, - unsigned int shift, unsigned int nbits); -void bitmap_cut(unsigned long *dst, const unsigned long *src, - unsigned int first, unsigned int cut, unsigned int nbits); -int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -void __bitmap_replace(unsigned long *dst, - const unsigned long *old, const unsigned long *new, - const unsigned long *mask, unsigned int nbits); -int __bitmap_intersects(const unsigned long *bitmap1, +extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits); +extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits); +extern int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, + unsigned int nbits); +extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); -int __bitmap_subset(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); -void __bitmap_set(unsigned long *map, unsigned int start, int len); -void __bitmap_clear(unsigned long *map, unsigned int start, int len); +extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); -unsigned long bitmap_find_next_zero_area_off(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask, - unsigned long align_offset); +extern void bitmap_set(unsigned long *map, unsigned int start, int len); +extern void bitmap_clear(unsigned long *map, unsigned int start, int len); + +extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); /** * bitmap_find_next_zero_area - find a contiguous aligned zero area @@ -198,92 +146,79 @@ bitmap_find_next_zero_area(unsigned long *map, align_mask, 0); } -int bitmap_parse(const char *buf, unsigned int buflen, +extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, unsigned long *dst, int nbits); -int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, +extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -int bitmap_parselist(const char *buf, unsigned long *maskp, +extern int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); -int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, +extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -void bitmap_remap(unsigned long *dst, const unsigned long *src, +extern void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, unsigned int nbits); -int bitmap_bitremap(int oldbit, +extern int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits); -void bitmap_onto(unsigned long *dst, const unsigned long *orig, +extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, const unsigned long *relmap, unsigned int bits); -void bitmap_fold(unsigned long *dst, const unsigned long *orig, +extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, unsigned int sz, unsigned int nbits); -int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); -void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); -int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); - +extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); +extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); +extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); +extern unsigned int bitmap_from_u32array(unsigned long *bitmap, + unsigned int nbits, + const u32 *buf, + unsigned int nwords); +extern unsigned int bitmap_to_u32array(u32 *buf, + unsigned int nwords, + const unsigned long *bitmap, + unsigned int nbits); #ifdef __BIG_ENDIAN -void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); +extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); #else #define bitmap_copy_le bitmap_copy #endif -unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); -int bitmap_print_to_pagebuf(bool list, char *buf, +extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); +extern int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); -extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, - int nmaskbits, loff_t off, size_t count); - -extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, - int nmaskbits, loff_t off, size_t count); - #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) +#define small_const_nbits(nbits) \ + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) + static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memset(dst, 0, len); + if (small_const_nbits(nbits)) + *dst = 0UL; + else { + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } } static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memset(dst, 0xff, len); + unsigned int nlongs = BITS_TO_LONGS(nbits); + if (!small_const_nbits(nbits)) { + unsigned int len = (nlongs - 1) * sizeof(unsigned long); + memset(dst, 0xff, len); + } + dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); } static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits) { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memcpy(dst, src, len); + if (small_const_nbits(nbits)) + *dst = *src; + else { + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memcpy(dst, src, len); + } } -/* - * Copy bitmap and clear tail bits in last word. - */ -static inline void bitmap_copy_clear_tail(unsigned long *dst, - const unsigned long *src, unsigned int nbits) -{ - bitmap_copy(dst, src, nbits); - if (nbits % BITS_PER_LONG) - dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); -} - -/* - * On 32-bit systems bitmaps are represented as u32 arrays internally, and - * therefore conversion is not needed when copying data from/to arrays of u32. - */ -#if BITS_PER_LONG == 64 -void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, - unsigned int nbits); -void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, - unsigned int nbits); -#else -#define bitmap_from_arr32(bitmap, buf, nbits) \ - bitmap_copy_clear_tail((unsigned long *) (bitmap), \ - (const unsigned long *) (buf), (nbits)) -#define bitmap_to_arr32(buf, bitmap, nbits) \ - bitmap_copy_clear_tail((unsigned long *) (buf), \ - (const unsigned long *) (bitmap), (nbits)) -#endif - static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { @@ -327,44 +262,18 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr __bitmap_complement(dst, src, nbits); } -#ifdef __LITTLE_ENDIAN -#define BITMAP_MEM_ALIGNMENT 8 -#else -#define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long)) -#endif -#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) - static inline int bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); - if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) && - IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) +#ifdef CONFIG_S390 + if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0) return !memcmp(src1, src2, nbits / 8); +#endif return __bitmap_equal(src1, src2, nbits); } -/** - * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third - * @src1: Pointer to bitmap 1 - * @src2: Pointer to bitmap 2 will be or'ed with bitmap 1 - * @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2 - * @nbits: number of bits in each of these bitmaps - * - * Returns: True if (*@src1 | *@src2) == *@src3, false otherwise - */ -static inline bool bitmap_or_equal(const unsigned long *src1, - const unsigned long *src2, - const unsigned long *src3, - unsigned int nbits) -{ - if (!small_const_nbits(nbits)) - return __bitmap_or_equal(src1, src2, src3, nbits); - - return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits)); -} - static inline int bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { @@ -383,7 +292,7 @@ static inline int bitmap_subset(const unsigned long *src1, return __bitmap_subset(src1, src2, nbits); } -static inline bool bitmap_empty(const unsigned long *src, unsigned nbits) +static inline int bitmap_empty(const unsigned long *src, unsigned nbits) { if (small_const_nbits(nbits)) return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); @@ -391,7 +300,7 @@ static inline bool bitmap_empty(const unsigned long *src, unsigned nbits) return find_first_bit(src, nbits) == nbits; } -static inline bool bitmap_full(const unsigned long *src, unsigned int nbits) +static inline int bitmap_full(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); @@ -399,43 +308,15 @@ static inline bool bitmap_full(const unsigned long *src, unsigned int nbits) return find_first_zero_bit(src, nbits) == nbits; } -static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits) +static __always_inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); return __bitmap_weight(src, nbits); } -static __always_inline void bitmap_set(unsigned long *map, unsigned int start, - unsigned int nbits) -{ - if (__builtin_constant_p(nbits) && nbits == 1) - __set_bit(start, map); - else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && - IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && - __builtin_constant_p(nbits & BITMAP_MEM_MASK) && - IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) - memset((char *)map + start / 8, 0xff, nbits / 8); - else - __bitmap_set(map, start, nbits); -} - -static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, - unsigned int nbits) -{ - if (__builtin_constant_p(nbits) && nbits == 1) - __clear_bit(start, map); - else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && - IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && - __builtin_constant_p(nbits & BITMAP_MEM_MASK) && - IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) - memset((char *)map + start / 8, 0, nbits / 8); - else - __bitmap_clear(map, start, nbits); -} - static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, - unsigned int shift, unsigned int nbits) + unsigned int shift, int nbits) { if (small_const_nbits(nbits)) *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift; @@ -452,94 +333,20 @@ static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *sr __bitmap_shift_left(dst, src, shift, nbits); } -static inline void bitmap_replace(unsigned long *dst, - const unsigned long *old, - const unsigned long *new, - const unsigned long *mask, - unsigned int nbits) +static inline int bitmap_parse(const char *buf, unsigned int buflen, + unsigned long *maskp, int nmaskbits) { - if (small_const_nbits(nbits)) - *dst = (*old & ~(*mask)) | (*new & *mask); - else - __bitmap_replace(dst, old, new, mask, nbits); -} - -static inline void bitmap_next_clear_region(unsigned long *bitmap, - unsigned int *rs, unsigned int *re, - unsigned int end) -{ - *rs = find_next_zero_bit(bitmap, end, *rs); - *re = find_next_bit(bitmap, end, *rs + 1); -} - -static inline void bitmap_next_set_region(unsigned long *bitmap, - unsigned int *rs, unsigned int *re, - unsigned int end) -{ - *rs = find_next_bit(bitmap, end, *rs); - *re = find_next_zero_bit(bitmap, end, *rs + 1); + return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); } /* - * Bitmap region iterators. Iterates over the bitmap between [@start, @end). - * @rs and @re should be integer variables and will be set to start and end - * index of the current clear or set region. - */ -#define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \ - for ((rs) = (start), \ - bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \ - (rs) < (re); \ - (rs) = (re) + 1, \ - bitmap_next_clear_region((bitmap), &(rs), &(re), (end))) - -#define bitmap_for_each_set_region(bitmap, rs, re, start, end) \ - for ((rs) = (start), \ - bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \ - (rs) < (re); \ - (rs) = (re) + 1, \ - bitmap_next_set_region((bitmap), &(rs), &(re), (end))) - -/** - * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. - * @n: u64 value - * - * Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit - * integers in 32-bit environment, and 64-bit integers in 64-bit one. - * - * There are four combinations of endianness and length of the word in linux - * ABIs: LE64, BE64, LE32 and BE32. - * - * On 64-bit kernels 64-bit LE and BE numbers are naturally ordered in - * bitmaps and therefore don't require any special handling. - * - * On 32-bit kernels 32-bit LE ABI orders lo word of 64-bit number in memory - * prior to hi, and 32-bit BE orders hi word prior to lo. The bitmap on the - * other hand is represented as an array of 32-bit words and the position of - * bit N may therefore be calculated as: word #(N/32) and bit #(N%32) in that - * word. For example, bit #42 is located at 10th position of 2nd word. - * It matches 32-bit LE ABI, and we can simply let the compiler store 64-bit - * values in memory as it usually does. But for BE we need to swap hi and lo - * words manually. - * - * With all that, the macro BITMAP_FROM_U64() does explicit reordering of hi and - * lo parts of u64. For LE32 it does nothing, and for BE environment it swaps - * hi and lo words, as is expected by bitmap. - */ -#if __BITS_PER_LONG == 64 -#define BITMAP_FROM_U64(n) (n) -#else -#define BITMAP_FROM_U64(n) ((unsigned long) ((u64)(n) & ULONG_MAX)), \ - ((unsigned long) ((u64)(n) >> 32)) -#endif - -/** * bitmap_from_u64 - Check and swap words within u64. * @mask: source bitmap * @dst: destination bitmap * - * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]`` + * In 32-bit Big Endian kernel, when using (u32 *)(&val)[*] * to read u64 mask, we will get the wrong word. - * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits, + * That is "(u32 *)(&val)[0]" gets the upper 32 bits, * but we expect the lower 32-bits of u64. */ static inline void bitmap_from_u64(unsigned long *dst, u64 mask) @@ -550,39 +357,6 @@ static inline void bitmap_from_u64(unsigned long *dst, u64 mask) dst[1] = mask >> 32; } -/** - * bitmap_get_value8 - get an 8-bit value within a memory region - * @map: address to the bitmap memory region - * @start: bit offset of the 8-bit value; must be a multiple of 8 - * - * Returns the 8-bit value located at the @start bit offset within the @src - * memory region. - */ -static inline unsigned long bitmap_get_value8(const unsigned long *map, - unsigned long start) -{ - const size_t index = BIT_WORD(start); - const unsigned long offset = start % BITS_PER_LONG; - - return (map[index] >> offset) & 0xFF; -} - -/** - * bitmap_set_value8 - set an 8-bit value within a memory region - * @map: address to the bitmap memory region - * @value: the 8-bit value; values wider than 8 bits may clobber bitmap - * @start: bit offset of the 8-bit value; must be a multiple of 8 - */ -static inline void bitmap_set_value8(unsigned long *map, unsigned long value, - unsigned long start) -{ - const size_t index = BIT_WORD(start); - const unsigned long offset = start % BITS_PER_LONG; - - map[index] &= ~(0xFFUL << offset); - map[index] |= value << offset; -} - #endif /* __ASSEMBLY__ */ #endif /* __LINUX_BITMAP_H */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 5e62e2383b..20d61d787a 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -1,25 +1,28 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BITOPS_H #define _LINUX_BITOPS_H - #include -#include -#include -#include - -/* Set bits in the first 'n' bytes when loaded from memory */ -#ifdef __LITTLE_ENDIAN -# define aligned_byte_mask(n) ((1UL << 8*(n))-1) -#else -# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) +#ifdef __KERNEL__ +#define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) +#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) +#define BITS_PER_BYTE 8 +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) #endif -#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) -#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) -#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) -#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) -#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) +/* + * Create a contiguous bitmask starting at bit position @l and ending at + * position @h. For example + * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. + */ +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); @@ -54,18 +57,6 @@ extern unsigned long __sw_hweight64(__u64 w); (bit) < (size); \ (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) -/** - * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits - * @start: bit offset to start search and to store the current iteration offset - * @clump: location to store copy of current 8-bit clump - * @bits: bitmap address to base the search on - * @size: bitmap size in number of bits - */ -#define for_each_set_clump8(start, clump, bits, size) \ - for ((start) = find_first_clump8(&(clump), (bits), (size)); \ - (start) < (size); \ - (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8)) - static inline int get_bitmask_order(unsigned int count) { int order; @@ -74,9 +65,9 @@ static inline int get_bitmask_order(unsigned int count) return order; /* We could be slightly more clever with -1 here... */ } -static __always_inline unsigned long hweight_long(unsigned long w) +static __always_inline unsigned long __intentional_overflow(-1) hweight_long(unsigned long w) { - return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); + return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } /** @@ -86,7 +77,7 @@ static __always_inline unsigned long hweight_long(unsigned long w) */ static inline __u64 rol64(__u64 word, unsigned int shift) { - return (word << (shift & 63)) | (word >> ((-shift) & 63)); + return (word << shift) | (word >> (64 - shift)); } /** @@ -96,7 +87,7 @@ static inline __u64 rol64(__u64 word, unsigned int shift) */ static inline __u64 ror64(__u64 word, unsigned int shift) { - return (word >> (shift & 63)) | (word << ((-shift) & 63)); + return (word >> shift) | (word << (64 - shift)); } /** @@ -104,9 +95,9 @@ static inline __u64 ror64(__u64 word, unsigned int shift) * @word: value to rotate * @shift: bits to roll */ -static inline __u32 rol32(__u32 word, unsigned int shift) +static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift) { - return (word << (shift & 31)) | (word >> ((-shift) & 31)); + return (word << shift) | (word >> ((-shift) & 31)); } /** @@ -114,9 +105,9 @@ static inline __u32 rol32(__u32 word, unsigned int shift) * @word: value to rotate * @shift: bits to roll */ -static inline __u32 ror32(__u32 word, unsigned int shift) +static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift) { - return (word >> (shift & 31)) | (word << ((-shift) & 31)); + return (word >> shift) | (word << (32 - shift)); } /** @@ -126,7 +117,7 @@ static inline __u32 ror32(__u32 word, unsigned int shift) */ static inline __u16 rol16(__u16 word, unsigned int shift) { - return (word << (shift & 15)) | (word >> ((-shift) & 15)); + return (word << shift) | (word >> (16 - shift)); } /** @@ -136,7 +127,7 @@ static inline __u16 rol16(__u16 word, unsigned int shift) */ static inline __u16 ror16(__u16 word, unsigned int shift) { - return (word >> (shift & 15)) | (word << ((-shift) & 15)); + return (word >> shift) | (word << (16 - shift)); } /** @@ -146,7 +137,7 @@ static inline __u16 ror16(__u16 word, unsigned int shift) */ static inline __u8 rol8(__u8 word, unsigned int shift) { - return (word << (shift & 7)) | (word >> ((-shift) & 7)); + return (word << shift) | (word >> (8 - shift)); } /** @@ -156,7 +147,7 @@ static inline __u8 rol8(__u8 word, unsigned int shift) */ static inline __u8 ror8(__u8 word, unsigned int shift) { - return (word >> (shift & 7)) | (word << ((-shift) & 7)); + return (word >> shift) | (word << (8 - shift)); } /** @@ -166,7 +157,7 @@ static inline __u8 ror8(__u8 word, unsigned int shift) * * This is safe to use for 16- and 8-bit types as well. */ -static __always_inline __s32 sign_extend32(__u32 value, int index) +static inline __s32 sign_extend32(__u32 value, int index) { __u8 shift = 31 - index; return (__s32)(value << shift) >> shift; @@ -177,13 +168,13 @@ static __always_inline __s32 sign_extend32(__u32 value, int index) * @value: value to sign extend * @index: 0 based bit index (0<=index<64) to sign bit */ -static __always_inline __s64 sign_extend64(__u64 value, int index) +static inline __s64 sign_extend64(__u64 value, int index) { __u8 shift = 63 - index; return (__s64)(value << shift) >> shift; } -static inline unsigned fls_long(unsigned long l) +static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l) { if (sizeof(l) == 4) return fls(l); @@ -192,10 +183,12 @@ static inline unsigned fls_long(unsigned long l) static inline int get_count_order(unsigned int count) { - if (count == 0) - return -1; + int order; - return fls(--count); + order = fls(count) - 1; + if (count & (count - 1)) + order++; + return order; } /** @@ -208,14 +201,17 @@ static inline int get_count_order_long(unsigned long l) { if (l == 0UL) return -1; - return (int)fls_long(--l); + else if (l & (l - 1UL)) + return (int)fls_long(l); + else + return (int)fls_long(l) - 1; } /** * __ffs64 - find first set bit in a 64 bit word * @word: The 64 bit word * - * On 64 bit arches this is a synonym for __ffs + * On 64 bit arches this is a synomyn for __ffs * The result is not defined if no bits are set, so check that @word * is non-zero before calling this. */ @@ -230,111 +226,50 @@ static inline unsigned long __ffs64(u64 word) return __ffs((unsigned long)word); } -/** - * assign_bit - Assign value to a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * @value: the value to assign - */ -static __always_inline void assign_bit(long nr, volatile unsigned long *addr, - bool value) -{ - if (value) - set_bit(nr, addr); - else - clear_bit(nr, addr); -} - -static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, - bool value) -{ - if (value) - __set_bit(nr, addr); - else - __clear_bit(nr, addr); -} - -/** - * __ptr_set_bit - Set bit in a pointer's value - * @nr: the bit to set - * @addr: the address of the pointer variable - * - * Example: - * void *p = foo(); - * __ptr_set_bit(bit, &p); - */ -#define __ptr_set_bit(nr, addr) \ - ({ \ - typecheck_pointer(*(addr)); \ - __set_bit(nr, (unsigned long *)(addr)); \ - }) - -/** - * __ptr_clear_bit - Clear bit in a pointer's value - * @nr: the bit to clear - * @addr: the address of the pointer variable - * - * Example: - * void *p = foo(); - * __ptr_clear_bit(bit, &p); - */ -#define __ptr_clear_bit(nr, addr) \ - ({ \ - typecheck_pointer(*(addr)); \ - __clear_bit(nr, (unsigned long *)(addr)); \ - }) - -/** - * __ptr_test_bit - Test bit in a pointer's value - * @nr: the bit to test - * @addr: the address of the pointer variable - * - * Example: - * void *p = foo(); - * if (__ptr_test_bit(bit, &p)) { - * ... - * } else { - * ... - * } - */ -#define __ptr_test_bit(nr, addr) \ - ({ \ - typecheck_pointer(*(addr)); \ - test_bit(nr, (unsigned long *)(addr)); \ - }) - #ifdef __KERNEL__ #ifndef set_mask_bits -#define set_mask_bits(ptr, mask, bits) \ +#define set_mask_bits(ptr, _mask, _bits) \ ({ \ - const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ - typeof(*(ptr)) old__, new__; \ + const typeof(*ptr) mask = (_mask), bits = (_bits); \ + typeof(*ptr) old, new; \ \ do { \ - old__ = READ_ONCE(*(ptr)); \ - new__ = (old__ & ~mask__) | bits__; \ - } while (cmpxchg(ptr, old__, new__) != old__); \ + old = ACCESS_ONCE(*ptr); \ + new = (old & ~mask) | bits; \ + } while (cmpxchg(ptr, old, new) != old); \ \ - old__; \ + new; \ }) #endif #ifndef bit_clear_unless -#define bit_clear_unless(ptr, clear, test) \ +#define bit_clear_unless(ptr, _clear, _test) \ ({ \ - const typeof(*(ptr)) clear__ = (clear), test__ = (test);\ - typeof(*(ptr)) old__, new__; \ + const typeof(*ptr) clear = (_clear), test = (_test); \ + typeof(*ptr) old, new; \ \ do { \ - old__ = READ_ONCE(*(ptr)); \ - new__ = old__ & ~clear__; \ - } while (!(old__ & test__) && \ - cmpxchg(ptr, old__, new__) != old__); \ + old = ACCESS_ONCE(*ptr); \ + new = old & ~clear; \ + } while (!(old & test) && \ + cmpxchg(ptr, old, new) != old); \ \ - !(old__ & test__); \ + !(old & test); \ }) #endif +#ifndef find_last_bit +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The number of bits to search + * + * Returns the bit number of the last set bit, or size. + */ +extern unsigned long find_last_bit(const unsigned long *addr, + unsigned long size); +#endif + #endif /* __KERNEL__ */ #endif diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h index d35b8ec1c4..fb790b8449 100644 --- a/include/linux/bitrev.h +++ b/include/linux/bitrev.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BITREV_H #define _LINUX_BITREV_H @@ -30,45 +29,34 @@ static inline u32 __bitrev32(u32 x) #endif /* CONFIG_HAVE_ARCH_BITREVERSE */ -#define __bitrev8x4(x) (__bitrev32(swab32(x))) - #define __constant_bitrev32(x) \ ({ \ - u32 ___x = x; \ - ___x = (___x >> 16) | (___x << 16); \ - ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \ - ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ - ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ - ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ - ___x; \ + u32 __x = x; \ + __x = (__x >> 16) | (__x << 16); \ + __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \ + __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ + __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ + __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ + __x; \ }) #define __constant_bitrev16(x) \ ({ \ - u16 ___x = x; \ - ___x = (___x >> 8) | (___x << 8); \ - ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \ - ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \ - ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \ - ___x; \ -}) - -#define __constant_bitrev8x4(x) \ -({ \ - u32 ___x = x; \ - ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ - ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ - ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ - ___x; \ + u16 __x = x; \ + __x = (__x >> 8) | (__x << 8); \ + __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \ + __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \ + __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \ + __x; \ }) #define __constant_bitrev8(x) \ ({ \ - u8 ___x = x; \ - ___x = (___x >> 4) | (___x << 4); \ - ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \ - ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \ - ___x; \ + u8 __x = x; \ + __x = (__x >> 4) | (__x << 4); \ + __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \ + __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \ + __x; \ }) #define bitrev32(x) \ @@ -87,14 +75,6 @@ static inline u32 __bitrev32(u32 x) __bitrev16(__x); \ }) -#define bitrev8x4(x) \ -({ \ - u32 __x = x; \ - __builtin_constant_p(__x) ? \ - __constant_bitrev8x4(__x) : \ - __bitrev8x4(__x); \ - }) - #define bitrev8(x) \ ({ \ u8 __x = x; \ diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index b4de2010fb..1c78e6c37d 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BLK_CGROUP_H #define _BLK_CGROUP_H /* @@ -15,32 +14,28 @@ */ #include -#include #include -#include #include #include #include #include -#include -#include /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) /* Max limits for throttle policy */ #define THROTL_IOPS_MAX UINT_MAX -#define FC_APPID_LEN 129 - #ifdef CONFIG_BLK_CGROUP -enum blkg_iostat_type { - BLKG_IOSTAT_READ, - BLKG_IOSTAT_WRITE, - BLKG_IOSTAT_DISCARD, +enum blkg_rwstat_type { + BLKG_RWSTAT_READ, + BLKG_RWSTAT_WRITE, + BLKG_RWSTAT_SYNC, + BLKG_RWSTAT_ASYNC, - BLKG_IOSTAT_NR, + BLKG_RWSTAT_NR, + BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, }; struct blkcg_gq; @@ -48,7 +43,6 @@ struct blkcg_gq; struct blkcg { struct cgroup_subsys_state css; spinlock_t lock; - refcount_t online_pin; struct radix_tree_root blkg_tree; struct blkcg_gq __rcu *blkg_hint; @@ -57,23 +51,24 @@ struct blkcg { struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; struct list_head all_blkcgs_node; -#ifdef CONFIG_BLK_CGROUP_FC_APPID - char fc_app_id[FC_APPID_LEN]; -#endif #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; #endif }; -struct blkg_iostat { - u64 bytes[BLKG_IOSTAT_NR]; - u64 ios[BLKG_IOSTAT_NR]; +/* + * blkg_[rw]stat->aux_cnt is excluded for local stats but included for + * recursive. Used to carry stats of dead children, and, for blkg_rwstat, + * to carry result values from read and sum operations. + */ +struct blkg_stat { + struct percpu_counter cpu_cnt; + atomic64_unchecked_t aux_cnt; }; -struct blkg_iostat_set { - struct u64_stats_sync sync; - struct blkg_iostat cur; - struct blkg_iostat last; +struct blkg_rwstat { + struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR]; + atomic64_unchecked_t aux_cnt[BLKG_RWSTAT_NR]; }; /* @@ -114,30 +109,29 @@ struct blkcg_gq { struct hlist_node blkcg_node; struct blkcg *blkcg; + /* + * Each blkg gets congested separately and the congestion state is + * propagated to the matching bdi_writeback_congested. + */ + struct bdi_writeback_congested *wb_congested; + /* all non-root blkcg_gq's are guaranteed to have access to parent */ struct blkcg_gq *parent; + /* request allocation list for this blkcg-q pair */ + struct request_list rl; + /* reference count */ - struct percpu_ref refcnt; + atomic_t refcnt; /* is this blkg online? protected by both blkcg and q locks */ bool online; - struct blkg_iostat_set __percpu *iostat_cpu; - struct blkg_iostat_set iostat; + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; struct blkg_policy_data *pd[BLKCG_MAX_POLS]; - spinlock_t async_bio_lock; - struct bio_list async_bios; - struct work_struct async_bio_work; - - atomic_t use_delay; - atomic64_t delay_nsec; - atomic64_t delay_start; - u64 last_delay; - int last_use; - struct rcu_head rcu_head; }; @@ -145,15 +139,12 @@ typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); -typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, - struct request_queue *q, struct blkcg *blkcg); +typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node); typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); -typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, - struct seq_file *s); struct blkcg_policy { int plid; @@ -173,16 +164,17 @@ struct blkcg_policy { blkcg_pol_offline_pd_fn *pd_offline_fn; blkcg_pol_free_pd_fn *pd_free_fn; blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; - blkcg_pol_stat_pd_fn *pd_stat_fn; }; extern struct blkcg blkcg_root; extern struct cgroup_subsys_state * const blkcg_root_css; -extern bool blkcg_debug_stats; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); +struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q); int blkcg_init_queue(struct request_queue *q); +void blkcg_drain_queue(struct request_queue *q); void blkcg_exit_queue(struct request_queue *q); /* Blkio controller policy registration */ @@ -200,110 +192,53 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, const struct blkcg_policy *pol, int data, bool show_total); u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); +u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, + const struct blkg_rwstat *rwstat); +u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); +u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, + int off); +int blkg_print_stat_bytes(struct seq_file *sf, void *v); +int blkg_print_stat_ios(struct seq_file *sf, void *v); +int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v); +int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v); + +u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, + struct blkcg_policy *pol, int off); +struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, + struct blkcg_policy *pol, int off); struct blkg_conf_ctx { - struct block_device *bdev; + struct gendisk *disk; struct blkcg_gq *blkg; char *body; }; -struct block_device *blkcg_conf_open_bdev(char **inputp); int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, char *input, struct blkg_conf_ctx *ctx); void blkg_conf_finish(struct blkg_conf_ctx *ctx); -/** - * blkcg_css - find the current css - * - * Find the css associated with either the kthread or the current task. - * This may return a dying css, so it is up to the caller to use tryget logic - * to confirm it is alive and well. - */ -static inline struct cgroup_subsys_state *blkcg_css(void) -{ - struct cgroup_subsys_state *css; - - css = kthread_blkcg(); - if (css) - return css; - return task_css(current, io_cgrp_id); -} static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) { return css ? container_of(css, struct blkcg, css) : NULL; } -/** - * __bio_blkcg - internal, inconsistent version to get blkcg - * - * DO NOT USE. - * This function is inconsistent and consequently is dangerous to use. The - * first part of the function returns a blkcg where a reference is owned by the - * bio. This means it does not need to be rcu protected as it cannot go away - * with the bio owning a reference to it. However, the latter potentially gets - * it from task_css(). This can race against task migration and the cgroup - * dying. It is also semantically different as it must be called rcu protected - * and is susceptible to failure when trying to get a reference to it. - * Therefore, it is not ok to assume that *_get() will always succeed on the - * blkcg returned here. - */ -static inline struct blkcg *__bio_blkcg(struct bio *bio) +static inline struct blkcg *task_blkcg(struct task_struct *tsk) { - if (bio && bio->bi_blkg) - return bio->bi_blkg->blkcg; - return css_to_blkcg(blkcg_css()); + return css_to_blkcg(task_css(tsk, io_cgrp_id)); } -/** - * bio_blkcg - grab the blkcg associated with a bio - * @bio: target bio - * - * This returns the blkcg associated with a bio, %NULL if not associated. - * Callers are expected to either handle %NULL or know association has been - * done prior to calling this. - */ static inline struct blkcg *bio_blkcg(struct bio *bio) { - if (bio && bio->bi_blkg) - return bio->bi_blkg->blkcg; - return NULL; + if (bio && bio->bi_css) + return css_to_blkcg(bio->bi_css); + return task_blkcg(current); } -static inline bool blk_cgroup_congested(void) +static inline struct cgroup_subsys_state * +task_get_blkcg_css(struct task_struct *task) { - struct cgroup_subsys_state *css; - bool ret = false; - - rcu_read_lock(); - css = kthread_blkcg(); - if (!css) - css = task_css(current, io_cgrp_id); - while (css) { - if (atomic_read(&css->cgroup->congestion_count)) { - ret = true; - break; - } - css = css->parent; - } - rcu_read_unlock(); - return ret; -} - -/** - * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg - * @return: true if this bio needs to be submitted with the root blkg context. - * - * In order to avoid priority inversions we sometimes need to issue a bio as if - * it were attached to the root blkg, and then backcharge to the actual owning - * blkg. The idea is we do bio_blkcg() to look up the actual context for the - * bio and attach the appropriate blkg to the bio. Then we call this helper and - * if it is true run with the root blkg for that queue and then do any - * backcharging to the originating cgroup once the io is complete. - */ -static inline bool bio_issue_as_root_blkg(struct bio *bio) -{ - return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; + return task_get_css(task, io_cgrp_id); } /** @@ -350,24 +285,17 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, * @q: request_queue of interest * * Lookup blkg for the @blkcg - @q pair. This function should be called - * under RCU read lock. + * under RCU read lock and is guaranteed to return %NULL if @q is bypassing + * - see blk_queue_bypass_start() for details. */ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) { WARN_ON_ONCE(!rcu_read_lock_held()); - return __blkg_lookup(blkcg, q, false); -} -/** - * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair - * @q: request_queue of interest - * - * Lookup blkg for @q at the root level. See also blkg_lookup(). - */ -static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) -{ - return q->root_blkg; + if (unlikely(blk_queue_bypass(q))) + return NULL; + return __blkg_lookup(blkcg, q, false); } /** @@ -405,40 +333,6 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) return cpd ? cpd->blkcg : NULL; } -extern void blkcg_destroy_blkgs(struct blkcg *blkcg); - -/** - * blkcg_pin_online - pin online state - * @blkcg: blkcg of interest - * - * While pinned, a blkcg is kept online. This is primarily used to - * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline - * while an associated cgwb is still active. - */ -static inline void blkcg_pin_online(struct blkcg *blkcg) -{ - refcount_inc(&blkcg->online_pin); -} - -/** - * blkcg_unpin_online - unpin online state - * @blkcg: blkcg of interest - * - * This is primarily used to impedance-match blkg and cgwb lifetimes so - * that blkg doesn't go offline while an associated cgwb is still active. - * When this count goes to zero, all active cgwbs have finished so the - * blkcg can continue destruction by calling blkcg_destroy_blkgs(). - */ -static inline void blkcg_unpin_online(struct blkcg *blkcg) -{ - do { - if (!refcount_dec_and_test(&blkcg->online_pin)) - break; - blkcg_destroy_blkgs(blkcg); - blkcg = blkcg_parent(blkcg); - } while (blkcg); -} - /** * blkg_path - format cgroup path of blkg * @blkg: blkg of interest @@ -460,20 +354,11 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) */ static inline void blkg_get(struct blkcg_gq *blkg) { - percpu_ref_get(&blkg->refcnt); + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + atomic_inc(&blkg->refcnt); } -/** - * blkg_tryget - try and get a blkg reference - * @blkg: blkg to get - * - * This is for use when doing an RCU lookup of the blkg. We may be in the midst - * of freeing this blkg, so we can only use it if the refcnt is not zero. - */ -static inline bool blkg_tryget(struct blkcg_gq *blkg) -{ - return blkg && percpu_ref_tryget(&blkg->refcnt); -} +void __blkg_release_rcu(struct rcu_head *rcu); /** * blkg_put - put a blkg reference @@ -481,7 +366,9 @@ static inline bool blkg_tryget(struct blkcg_gq *blkg) */ static inline void blkg_put(struct blkcg_gq *blkg) { - percpu_ref_put(&blkg->refcnt); + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + if (atomic_dec_and_test(&blkg->refcnt)) + call_rcu(&blkg->rcu_head, __blkg_release_rcu); } /** @@ -516,98 +403,317 @@ static inline void blkg_put(struct blkcg_gq *blkg) if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ (p_blkg)->q, false))) -bool __blkcg_punt_bio_submit(struct bio *bio); - -static inline bool blkcg_punt_bio_submit(struct bio *bio) +/** + * blk_get_rl - get request_list to use + * @q: request_queue of interest + * @bio: bio which will be attached to the allocated request (may be %NULL) + * + * The caller wants to allocate a request from @q to use for @bio. Find + * the request_list to use and obtain a reference on it. Should be called + * under queue_lock. This function is guaranteed to return non-%NULL + * request_list. + */ +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) { - if (bio->bi_opf & REQ_CGROUP_PUNT) - return __blkcg_punt_bio_submit(bio); - else - return false; -} + struct blkcg *blkcg; + struct blkcg_gq *blkg; -static inline void blkcg_bio_issue_init(struct bio *bio) -{ - bio_issue_init(&bio->bi_issue, bio_sectors(bio)); -} + rcu_read_lock(); -static inline void blkcg_use_delay(struct blkcg_gq *blkg) -{ - if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) - return; - if (atomic_add_return(1, &blkg->use_delay) == 1) - atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); -} + blkcg = bio_blkcg(bio); -static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) -{ - int old = atomic_read(&blkg->use_delay); - - if (WARN_ON_ONCE(old < 0)) - return 0; - if (old == 0) - return 0; + /* bypass blkg lookup and use @q->root_rl directly for root */ + if (blkcg == &blkcg_root) + goto root_rl; /* - * We do this song and dance because we can race with somebody else - * adding or removing delay. If we just did an atomic_dec we'd end up - * negative and we'd already be in trouble. We need to subtract 1 and - * then check to see if we were the last delay so we can drop the - * congestion count on the cgroup. + * Try to use blkg->rl. blkg lookup may fail under memory pressure + * or if either the blkcg or queue is going away. Fall back to + * root_rl in such cases. */ - while (old) { - int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); - if (cur == old) - break; - old = cur; + blkg = blkg_lookup(blkcg, q); + if (unlikely(!blkg)) + goto root_rl; + + blkg_get(blkg); + rcu_read_unlock(); + return &blkg->rl; +root_rl: + rcu_read_unlock(); + return &q->root_rl; +} + +/** + * blk_put_rl - put request_list + * @rl: request_list to put + * + * Put the reference acquired by blk_get_rl(). Should be called under + * queue_lock. + */ +static inline void blk_put_rl(struct request_list *rl) +{ + if (rl->blkg->blkcg != &blkcg_root) + blkg_put(rl->blkg); +} + +/** + * blk_rq_set_rl - associate a request with a request_list + * @rq: request of interest + * @rl: target request_list + * + * Associate @rq with @rl so that accounting and freeing can know the + * request_list @rq came from. + */ +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) +{ + rq->rl = rl; +} + +/** + * blk_rq_rl - return the request_list a request came from + * @rq: request of interest + * + * Return the request_list @rq is allocated from. + */ +static inline struct request_list *blk_rq_rl(struct request *rq) +{ + return rq->rl; +} + +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q); +/** + * blk_queue_for_each_rl - iterate through all request_lists of a request_queue + * + * Should be used under queue_lock. + */ +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) + +static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp) +{ + int ret; + + ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); + if (ret) + return ret; + + atomic64_set_unchecked(&stat->aux_cnt, 0); + return 0; +} + +static inline void blkg_stat_exit(struct blkg_stat *stat) +{ + percpu_counter_destroy(&stat->cpu_cnt); +} + +/** + * blkg_stat_add - add a value to a blkg_stat + * @stat: target blkg_stat + * @val: value to add + * + * Add @val to @stat. The caller must ensure that IRQ on the same CPU + * don't re-enter this function for the same counter. + */ +static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) +{ + __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); +} + +/** + * blkg_stat_read - read the current value of a blkg_stat + * @stat: blkg_stat to read + */ +static inline uint64_t blkg_stat_read(struct blkg_stat *stat) +{ + return percpu_counter_sum_positive(&stat->cpu_cnt); +} + +/** + * blkg_stat_reset - reset a blkg_stat + * @stat: blkg_stat to reset + */ +static inline void blkg_stat_reset(struct blkg_stat *stat) +{ + percpu_counter_set(&stat->cpu_cnt, 0); + atomic64_set_unchecked(&stat->aux_cnt, 0); +} + +/** + * blkg_stat_add_aux - add a blkg_stat into another's aux count + * @to: the destination blkg_stat + * @from: the source + * + * Add @from's count including the aux one to @to's aux count. + */ +static inline void blkg_stat_add_aux(struct blkg_stat *to, + struct blkg_stat *from) +{ + atomic64_add_unchecked(blkg_stat_read(from) + atomic64_read_unchecked(&from->aux_cnt), + &to->aux_cnt); +} + +static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp) +{ + int i, ret; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) { + ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp); + if (ret) { + while (--i >= 0) + percpu_counter_destroy(&rwstat->cpu_cnt[i]); + return ret; + } + atomic64_set_unchecked(&rwstat->aux_cnt[i], 0); + } + return 0; +} + +static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) +{ + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + percpu_counter_destroy(&rwstat->cpu_cnt[i]); +} + +/** + * blkg_rwstat_add - add a value to a blkg_rwstat + * @rwstat: target blkg_rwstat + * @op: REQ_OP + * @op_flags: rq_flag_bits + * @val: value to add + * + * Add @val to @rwstat. The counters are chosen according to @rw. The + * caller is responsible for synchronizing calls to this function. + */ +static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, + int op, int op_flags, uint64_t val) +{ + struct percpu_counter *cnt; + + if (op_is_write(op)) + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; + else + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; + + __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); + + if (op_flags & REQ_SYNC) + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; + else + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; + + __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); +} + +/** + * blkg_rwstat_read - read the current values of a blkg_rwstat + * @rwstat: blkg_rwstat to read + * + * Read the current snapshot of @rwstat and return it in the aux counts. + */ +static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) +{ + struct blkg_rwstat result; + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + atomic64_set_unchecked(&result.aux_cnt[i], + percpu_counter_sum_positive(&rwstat->cpu_cnt[i])); + return result; +} + +/** + * blkg_rwstat_total - read the total count of a blkg_rwstat + * @rwstat: blkg_rwstat to read + * + * Return the total count of @rwstat regardless of the IO direction. This + * function can be called without synchronization and takes care of u64 + * atomicity. + */ +static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) +{ + struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); + + return atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + + atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); +} + +/** + * blkg_rwstat_reset - reset a blkg_rwstat + * @rwstat: blkg_rwstat to reset + */ +static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) +{ + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) { + percpu_counter_set(&rwstat->cpu_cnt[i], 0); + atomic64_set_unchecked(&rwstat->aux_cnt[i], 0); + } +} + +/** + * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count + * @to: the destination blkg_rwstat + * @from: the source + * + * Add @from's count including the aux one to @to's aux count. + */ +static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, + struct blkg_rwstat *from) +{ + struct blkg_rwstat v = blkg_rwstat_read(from); + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + atomic64_add_unchecked(atomic64_read_unchecked(&v.aux_cnt[i]) + + atomic64_read_unchecked(&from->aux_cnt[i]), + &to->aux_cnt[i]); +} + +#ifdef CONFIG_BLK_DEV_THROTTLING +extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, + struct bio *bio); +#else +static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, + struct bio *bio) { return false; } +#endif + +static inline bool blkcg_bio_issue_check(struct request_queue *q, + struct bio *bio) +{ + struct blkcg *blkcg; + struct blkcg_gq *blkg; + bool throtl = false; + + rcu_read_lock(); + blkcg = bio_blkcg(bio); + + blkg = blkg_lookup(blkcg, q); + if (unlikely(!blkg)) { + spin_lock_irq(q->queue_lock); + blkg = blkg_lookup_create(blkcg, q); + if (IS_ERR(blkg)) + blkg = NULL; + spin_unlock_irq(q->queue_lock); } - if (old == 0) - return 0; - if (old == 1) - atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); - return 1; + throtl = blk_throtl_bio(q, blkg, bio); + + if (!throtl) { + blkg = blkg ?: q->root_blkg; + blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf, + bio->bi_iter.bi_size); + blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1); + } + + rcu_read_unlock(); + return !throtl; } -/** - * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount - * @blkg: target blkg - * @delay: delay duration in nsecs - * - * When enabled with this function, the delay is not decayed and must be - * explicitly cleared with blkcg_clear_delay(). Must not be mixed with - * blkcg_[un]use_delay() and blkcg_add_delay() usages. - */ -static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) -{ - int old = atomic_read(&blkg->use_delay); - - /* We only want 1 person setting the congestion count for this blkg. */ - if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) - atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); - - atomic64_set(&blkg->delay_nsec, delay); -} - -/** - * blkcg_clear_delay - Disable allocator delay mechanism - * @blkg: target blkg - * - * Disable use_delay mechanism. See blkcg_set_delay(). - */ -static inline void blkcg_clear_delay(struct blkcg_gq *blkg) -{ - int old = atomic_read(&blkg->use_delay); - - /* We only want 1 person clearing the congestion count for this blkg. */ - if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) - atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); -} - -void blk_cgroup_bio_start(struct bio *bio); -void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); -void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); -void blkcg_maybe_throttle_current(void); #else /* CONFIG_BLK_CGROUP */ struct blkcg { @@ -627,17 +733,17 @@ struct blkcg_policy { #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) -static inline void blkcg_maybe_throttle_current(void) { } -static inline bool blk_cgroup_congested(void) { return false; } +static inline struct cgroup_subsys_state * +task_get_blkcg_css(struct task_struct *task) +{ + return NULL; +} #ifdef CONFIG_BLOCK -static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { } - static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } -static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) -{ return NULL; } static inline int blkcg_init_queue(struct request_queue *q) { return 0; } +static inline void blkcg_drain_queue(struct request_queue *q) { } static inline void blkcg_exit_queue(struct request_queue *q) { } static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } @@ -646,7 +752,6 @@ static inline int blkcg_activate_policy(struct request_queue *q, static inline void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol) { } -static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, @@ -656,71 +761,18 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } static inline void blkg_get(struct blkcg_gq *blkg) { } static inline void blkg_put(struct blkcg_gq *blkg) { } -static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } -static inline void blkcg_bio_issue_init(struct bio *bio) { } -static inline void blk_cgroup_bio_start(struct bio *bio) { } +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) { return &q->root_rl; } +static inline void blk_put_rl(struct request_list *rl) { } +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } +static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } + +static inline bool blkcg_bio_issue_check(struct request_queue *q, + struct bio *bio) { return true; } #define blk_queue_for_each_rl(rl, q) \ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) #endif /* CONFIG_BLOCK */ #endif /* CONFIG_BLK_CGROUP */ - -#ifdef CONFIG_BLK_CGROUP_FC_APPID -/* - * Sets the fc_app_id field associted to blkcg - * @app_id: application identifier - * @cgrp_id: cgroup id - * @app_id_len: size of application identifier - */ -static inline int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len) -{ - struct cgroup *cgrp; - struct cgroup_subsys_state *css; - struct blkcg *blkcg; - int ret = 0; - - if (app_id_len > FC_APPID_LEN) - return -EINVAL; - - cgrp = cgroup_get_from_id(cgrp_id); - if (!cgrp) - return -ENOENT; - css = cgroup_get_e_css(cgrp, &io_cgrp_subsys); - if (!css) { - ret = -ENOENT; - goto out_cgrp_put; - } - blkcg = css_to_blkcg(css); - /* - * There is a slight race condition on setting the appid. - * Worst case an I/O may not find the right id. - * This is no different from the I/O we let pass while obtaining - * the vmid from the fabric. - * Adding the overhead of a lock is not necessary. - */ - strlcpy(blkcg->fc_app_id, app_id, app_id_len); - css_put(css); -out_cgrp_put: - cgroup_put(cgrp); - return ret; -} - -/** - * blkcg_get_fc_appid - get the fc app identifier associated with a bio - * @bio: target bio - * - * On success return the fc_app_id, on failure return NULL - */ -static inline char *blkcg_get_fc_appid(struct bio *bio) -{ - if (bio && bio->bi_blkg && - (bio->bi_blkg->blkcg->fc_app_id[0] != '\0')) - return bio->bi_blkg->blkcg->fc_app_id; - return NULL; -} -#else -static inline int blkcg_set_fc_appid(char *buf, u64 id, size_t len) { return -EINVAL; } -static inline char *blkcg_get_fc_appid(struct bio *bio) { return NULL; } -#endif /*CONFIG_BLK_CGROUP_FC_APPID*/ #endif /* _BLK_CGROUP_H */ diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h index 0b1f45c626..6ab5952591 100644 --- a/include/linux/blk-mq-pci.h +++ b/include/linux/blk-mq-pci.h @@ -1,11 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BLK_MQ_PCI_H #define _LINUX_BLK_MQ_PCI_H -struct blk_mq_queue_map; +struct blk_mq_tag_set; struct pci_dev; -int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev, - int offset); +int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev); #endif /* _LINUX_BLK_MQ_PCI_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 13ba1861e6..535ab2e13d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -1,425 +1,158 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef BLK_MQ_H #define BLK_MQ_H #include #include -#include -#include struct blk_mq_tags; struct blk_flush_queue; -/** - * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware - * block device - */ struct blk_mq_hw_ctx { struct { - /** @lock: Protects the dispatch list. */ spinlock_t lock; - /** - * @dispatch: Used for requests that are ready to be - * dispatched to the hardware but for some reason (e.g. lack of - * resources) could not be sent to the hardware. As soon as the - * driver can send new requests, requests at this list will - * be sent first for a fairer dispatch. - */ struct list_head dispatch; - /** - * @state: BLK_MQ_S_* flags. Defines the state of the hw - * queue (active, scheduled to restart, stopped). - */ - unsigned long state; + unsigned long state; /* BLK_MQ_S_* flags */ } ____cacheline_aligned_in_smp; - /** - * @run_work: Used for scheduling a hardware queue run at a later time. - */ - struct delayed_work run_work; - /** @cpumask: Map of available CPUs where this hctx can run. */ + struct work_struct run_work; cpumask_var_t cpumask; - /** - * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU - * selection from @cpumask. - */ int next_cpu; - /** - * @next_cpu_batch: Counter of how many works left in the batch before - * changing to the next CPU. - */ int next_cpu_batch; - /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */ - unsigned long flags; + unsigned long flags; /* BLK_MQ_F_* flags */ - /** - * @sched_data: Pointer owned by the IO scheduler attached to a request - * queue. It's up to the IO scheduler how to use this pointer. - */ - void *sched_data; - /** - * @queue: Pointer to the request queue that owns this hardware context. - */ struct request_queue *queue; - /** @fq: Queue of requests that need to perform a flush operation. */ struct blk_flush_queue *fq; - /** - * @driver_data: Pointer to data owned by the block driver that created - * this hctx - */ void *driver_data; - /** - * @ctx_map: Bitmap for each software queue. If bit is on, there is a - * pending request in that software queue. - */ struct sbitmap ctx_map; - /** - * @dispatch_from: Software queue to be used when no scheduler was - * selected. - */ - struct blk_mq_ctx *dispatch_from; - /** - * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to - * decide if the hw_queue is busy using Exponential Weighted Moving - * Average algorithm. - */ - unsigned int dispatch_busy; - - /** @type: HCTX_TYPE_* flags. Type of hardware queue. */ - unsigned short type; - /** @nr_ctx: Number of software queues. */ - unsigned short nr_ctx; - /** @ctxs: Array of software queues. */ struct blk_mq_ctx **ctxs; + unsigned int nr_ctx; - /** @dispatch_wait_lock: Lock for dispatch_wait queue. */ - spinlock_t dispatch_wait_lock; - /** - * @dispatch_wait: Waitqueue to put requests when there is no tag - * available at the moment, to wait for another try in the future. - */ - wait_queue_entry_t dispatch_wait; - - /** - * @wait_index: Index of next available dispatch_wait queue to insert - * requests. - */ atomic_t wait_index; - /** - * @tags: Tags owned by the block driver. A tag at this set is only - * assigned when a request is dispatched from a hardware queue. - */ struct blk_mq_tags *tags; - /** - * @sched_tags: Tags owned by I/O scheduler. If there is an I/O - * scheduler associated with a request queue, a tag is assigned when - * that request is allocated. Else, this member is not used. - */ - struct blk_mq_tags *sched_tags; - /** @queued: Number of queued requests. */ unsigned long queued; - /** @run: Number of dispatched requests. */ unsigned long run; #define BLK_MQ_MAX_DISPATCH_ORDER 7 - /** @dispatched: Number of dispatch requests by queue. */ unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; - /** @numa_node: NUMA node the storage adapter has been connected to. */ unsigned int numa_node; - /** @queue_num: Index of this hardware queue. */ unsigned int queue_num; - /** - * @nr_active: Number of active requests. Only used when a tag set is - * shared across request queues. - */ atomic_t nr_active; - /** @cpuhp_online: List to store request if CPU is going to die */ - struct hlist_node cpuhp_online; - /** @cpuhp_dead: List to store request if some CPU die. */ + struct delayed_work delay_work; + struct hlist_node cpuhp_dead; - /** @kobj: Kernel object for sysfs. */ struct kobject kobj; - /** @poll_considered: Count times blk_poll() was called. */ unsigned long poll_considered; - /** @poll_invoked: Count how many requests blk_poll() polled. */ unsigned long poll_invoked; - /** @poll_success: Count how many polled requests were completed. */ unsigned long poll_success; - -#ifdef CONFIG_BLK_DEBUG_FS - /** - * @debugfs_dir: debugfs directory for this hardware queue. Named - * as cpu. - */ - struct dentry *debugfs_dir; - /** @sched_debugfs_dir: debugfs directory for the scheduler. */ - struct dentry *sched_debugfs_dir; -#endif - - /** - * @hctx_list: if this hctx is not in use, this is an entry in - * q->unused_hctx_list. - */ - struct list_head hctx_list; - - /** - * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is - * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also - * blk_mq_hw_ctx_size(). - */ - struct srcu_struct srcu[]; }; -/** - * struct blk_mq_queue_map - Map software queues to hardware queues - * @mq_map: CPU ID to hardware queue index map. This is an array - * with nr_cpu_ids elements. Each element has a value in the range - * [@queue_offset, @queue_offset + @nr_queues). - * @nr_queues: Number of hardware queues to map CPU IDs onto. - * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe - * driver to map each hardware queue type (enum hctx_type) onto a distinct - * set of hardware queues. - */ -struct blk_mq_queue_map { - unsigned int *mq_map; - unsigned int nr_queues; - unsigned int queue_offset; -}; - -/** - * enum hctx_type - Type of hardware queue - * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for. - * @HCTX_TYPE_READ: Just for READ I/O. - * @HCTX_TYPE_POLL: Polled I/O of any kind. - * @HCTX_MAX_TYPES: Number of types of hctx. - */ -enum hctx_type { - HCTX_TYPE_DEFAULT, - HCTX_TYPE_READ, - HCTX_TYPE_POLL, - - HCTX_MAX_TYPES, -}; - -/** - * struct blk_mq_tag_set - tag set that can be shared between request queues - * @map: One or more ctx -> hctx mappings. One map exists for each - * hardware queue type (enum hctx_type) that the driver wishes - * to support. There are no restrictions on maps being of the - * same size, and it's perfectly legal to share maps between - * types. - * @nr_maps: Number of elements in the @map array. A number in the range - * [1, HCTX_MAX_TYPES]. - * @ops: Pointers to functions that implement block driver behavior. - * @nr_hw_queues: Number of hardware queues supported by the block driver that - * owns this data structure. - * @queue_depth: Number of tags per hardware queue, reserved tags included. - * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag - * allocations. - * @cmd_size: Number of additional bytes to allocate per request. The block - * driver owns these additional bytes. - * @numa_node: NUMA node the storage adapter has been connected to. - * @timeout: Request processing timeout in jiffies. - * @flags: Zero or more BLK_MQ_F_* flags. - * @driver_data: Pointer to data owned by the block driver that created this - * tag set. - * @active_queues_shared_sbitmap: - * number of active request queues per tag set. - * @__bitmap_tags: A shared tags sbitmap, used over all hctx's - * @__breserved_tags: - * A shared reserved tags sbitmap, used over all hctx's - * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues - * elements. - * @tag_list_lock: Serializes tag_list accesses. - * @tag_list: List of the request queues that use this tag set. See also - * request_queue.tag_set_list. - */ struct blk_mq_tag_set { - struct blk_mq_queue_map map[HCTX_MAX_TYPES]; - unsigned int nr_maps; - const struct blk_mq_ops *ops; + unsigned int *mq_map; + struct blk_mq_ops *ops; unsigned int nr_hw_queues; - unsigned int queue_depth; + unsigned int queue_depth; /* max hw supported */ unsigned int reserved_tags; - unsigned int cmd_size; + unsigned int cmd_size; /* per-request extra data */ int numa_node; unsigned int timeout; - unsigned int flags; + unsigned int flags; /* BLK_MQ_F_* */ void *driver_data; - atomic_t active_queues_shared_sbitmap; - struct sbitmap_queue __bitmap_tags; - struct sbitmap_queue __breserved_tags; struct blk_mq_tags **tags; struct mutex tag_list_lock; struct list_head tag_list; }; -/** - * struct blk_mq_queue_data - Data about a request inserted in a queue - * - * @rq: Request pointer. - * @last: If it is the last request in the queue. - */ struct blk_mq_queue_data { struct request *rq; + struct list_head *list; bool last; }; -typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, +typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); +typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); +typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); +typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); +typedef int (init_request_fn)(void *, struct request *, unsigned int, + unsigned int, unsigned int); +typedef void (exit_request_fn)(void *, struct request *, unsigned int, + unsigned int); +typedef int (reinit_request_fn)(void *, struct request *); + +typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, bool); -typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); +typedef void (busy_tag_iter_fn)(struct request *, void *, bool); +typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); +typedef int (map_queues_fn)(struct blk_mq_tag_set *set); + -/** - * struct blk_mq_ops - Callback functions that implements block driver - * behaviour. - */ struct blk_mq_ops { - /** - * @queue_rq: Queue a new request from block IO. + /* + * Queue request */ - blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, - const struct blk_mq_queue_data *); + queue_rq_fn *queue_rq; - /** - * @commit_rqs: If a driver uses bd->last to judge when to submit - * requests to hardware, it must define this function. In case of errors - * that make us stop issuing further requests, this hook serves the - * purpose of kicking the hardware (which the last request otherwise - * would have done). + /* + * Called on request timeout */ - void (*commit_rqs)(struct blk_mq_hw_ctx *); + timeout_fn *timeout; - /** - * @get_budget: Reserve budget before queue request, once .queue_rq is - * run, it is driver's responsibility to release the - * reserved budget. Also we have to handle failure case - * of .get_budget for avoiding I/O deadlock. + /* + * Called to poll for completion of a specific tag. */ - int (*get_budget)(struct request_queue *); + poll_fn *poll; - /** - * @put_budget: Release the reserved budget. - */ - void (*put_budget)(struct request_queue *, int); + softirq_done_fn *complete; - /** - * @set_rq_budget_token: store rq's budget token + /* + * Called when the block layer side of a hardware queue has been + * set up, allowing the driver to allocate/init matching structures. + * Ditto for exit/teardown. */ - void (*set_rq_budget_token)(struct request *, int); - /** - * @get_rq_budget_token: retrieve rq's budget token - */ - int (*get_rq_budget_token)(struct request *); + init_hctx_fn *init_hctx; + exit_hctx_fn *exit_hctx; - /** - * @timeout: Called on request timeout. - */ - enum blk_eh_timer_return (*timeout)(struct request *, bool); - - /** - * @poll: Called to poll for completion of a specific tag. - */ - int (*poll)(struct blk_mq_hw_ctx *); - - /** - * @complete: Mark the request as complete. - */ - void (*complete)(struct request *); - - /** - * @init_hctx: Called when the block layer side of a hardware queue has - * been set up, allowing the driver to allocate/init matching - * structures. - */ - int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); - /** - * @exit_hctx: Ditto for exit/teardown. - */ - void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); - - /** - * @init_request: Called for every command allocated by the block layer - * to allow the driver to set up driver specific data. + /* + * Called for every command allocated by the block layer to allow + * the driver to set up driver specific data. * * Tag greater than or equal to queue_depth is for setting up * flush request. + * + * Ditto for exit/teardown. */ - int (*init_request)(struct blk_mq_tag_set *set, struct request *, - unsigned int, unsigned int); - /** - * @exit_request: Ditto for exit/teardown. - */ - void (*exit_request)(struct blk_mq_tag_set *set, struct request *, - unsigned int); + init_request_fn *init_request; + exit_request_fn *exit_request; + reinit_request_fn *reinit_request; - /** - * @initialize_rq_fn: Called from inside blk_get_request(). - */ - void (*initialize_rq_fn)(struct request *rq); - - /** - * @cleanup_rq: Called before freeing one request which isn't completed - * yet, and usually for freeing the driver private data. - */ - void (*cleanup_rq)(struct request *); - - /** - * @busy: If set, returns whether or not this queue currently is busy. - */ - bool (*busy)(struct request_queue *); - - /** - * @map_queues: This allows drivers specify their own queue mapping by - * overriding the setup-time function that builds the mq_map. - */ - int (*map_queues)(struct blk_mq_tag_set *set); - -#ifdef CONFIG_BLK_DEBUG_FS - /** - * @show_rq: Used by the debugfs implementation to show driver-specific - * information about a request. - */ - void (*show_rq)(struct seq_file *m, struct request *rq); -#endif + map_queues_fn *map_queues; }; enum { + BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ + BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ + BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ + BLK_MQ_F_SHOULD_MERGE = 1 << 0, - BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, - /* - * Set when this device requires underlying blk-mq device for - * completing IO: - */ - BLK_MQ_F_STACKING = 1 << 2, - BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, + BLK_MQ_F_TAG_SHARED = 1 << 1, + BLK_MQ_F_SG_MERGE = 1 << 2, + BLK_MQ_F_DEFER_ISSUE = 1 << 4, BLK_MQ_F_BLOCKING = 1 << 5, - /* Do not allow an I/O scheduler to be configured. */ - BLK_MQ_F_NO_SCHED = 1 << 6, - /* - * Select 'none' during queue registration in case of a single hwq - * or shared hwqs instead of 'mq-deadline'. - */ - BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, - BLK_MQ_S_SCHED_RESTART = 2, - - /* hw queue is inactive after all its CPUs become offline */ - BLK_MQ_S_INACTIVE = 3, BLK_MQ_MAX_DEPTH = 10240, @@ -432,45 +165,31 @@ enum { ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ << BLK_MQ_F_ALLOC_POLICY_START_BIT) -struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, - struct lock_class_key *lkclass); -#define blk_mq_alloc_disk(set, queuedata) \ -({ \ - static struct lock_class_key __key; \ - \ - __blk_mq_alloc_disk(set, queuedata, &__key); \ -}) struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); -int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, - struct request_queue *q); +struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + struct request_queue *q); +int blk_mq_register_dev(struct device *, struct request_queue *); void blk_mq_unregister_dev(struct device *, struct request_queue *); int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); -int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, - const struct blk_mq_ops *ops, unsigned int queue_depth, - unsigned int set_flags); void blk_mq_free_tag_set(struct blk_mq_tag_set *set); void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); +void blk_mq_insert_request(struct request *, bool, bool, bool); void blk_mq_free_request(struct request *rq); - -bool blk_mq_queue_inflight(struct request_queue *q); +void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); +bool blk_mq_can_queue(struct blk_mq_hw_ctx *); enum { - /* return when out of requests */ - BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), - /* allocate from reserved pool */ - BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), - /* set RQF_PM */ - BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), + BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */ + BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */ }; -struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, - blk_mq_req_flags_t flags); -struct request *blk_mq_alloc_request_hctx(struct request_queue *q, - unsigned int op, blk_mq_req_flags_t flags, - unsigned int hctx_idx); +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, + unsigned int flags); +struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op, + unsigned int flags, unsigned int hctx_idx); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); enum { @@ -490,108 +209,44 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; } -/** - * blk_mq_rq_state() - read the current MQ_RQ_* state of a request - * @rq: target request. - */ -static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) -{ - return READ_ONCE(rq->state); -} - -static inline int blk_mq_request_started(struct request *rq) -{ - return blk_mq_rq_state(rq) != MQ_RQ_IDLE; -} - -static inline int blk_mq_request_completed(struct request *rq) -{ - return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; -} - -/* - * - * Set the state to complete when completing a request from inside ->queue_rq. - * This is used by drivers that want to ensure special complete actions that - * need access to the request are called on failure, e.g. by nvme for - * multipathing. - */ -static inline void blk_mq_set_request_complete(struct request *rq) -{ - WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); -} +int blk_mq_request_started(struct request *rq); void blk_mq_start_request(struct request *rq); -void blk_mq_end_request(struct request *rq, blk_status_t error); -void __blk_mq_end_request(struct request *rq, blk_status_t error); +void blk_mq_end_request(struct request *rq, int error); +void __blk_mq_end_request(struct request *rq, int error); -void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); +void blk_mq_requeue_request(struct request *rq); +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); +void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); -void blk_mq_complete_request(struct request *rq); -bool blk_mq_complete_request_remote(struct request *rq); -bool blk_mq_queue_stopped(struct request_queue *q); +void blk_mq_abort_requeue_list(struct request_queue *q); +void blk_mq_complete_request(struct request *rq, int error); + void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); -void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); -void blk_mq_quiesce_queue(struct request_queue *q); -void blk_mq_unquiesce_queue(struct request_queue *q); -void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); -void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); -void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs); +void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv); -void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); -void blk_freeze_queue_start(struct request_queue *q); -void blk_mq_freeze_queue_wait(struct request_queue *q); -int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, - unsigned long timeout); +void blk_mq_freeze_queue_start(struct request_queue *q); +int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); -int blk_mq_map_queues(struct blk_mq_queue_map *qmap); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); -void blk_mq_quiesce_queue_nowait(struct request_queue *q); - -unsigned int blk_mq_rq_cpu(struct request *rq); - -bool __blk_should_fake_timeout(struct request_queue *q); -static inline bool blk_should_fake_timeout(struct request_queue *q) -{ - if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && - test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) - return __blk_should_fake_timeout(q); - return false; -} - -/** - * blk_mq_rq_from_pdu - cast a PDU to a request - * @pdu: the PDU (Protocol Data Unit) to be casted - * - * Return: request - * +/* * Driver command data is immediately after the request. So subtract request - * size to get back to the original request. + * size to get back to the original request, add request size to get the PDU. */ static inline struct request *blk_mq_rq_from_pdu(void *pdu) { return pdu - sizeof(struct request); } - -/** - * blk_mq_rq_to_pdu - cast a request to a PDU - * @rq: the request to be casted - * - * Return: pointer to the PDU - * - * Driver command data is immediately after the request. So add request to get - * the PDU. - */ static inline void *blk_mq_rq_to_pdu(struct request *rq) { return rq + 1; @@ -605,36 +260,4 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) for ((i) = 0; (i) < (hctx)->nr_ctx && \ ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) -static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, - struct request *rq) -{ - if (rq->tag != -1) - return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT); - - return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) | - BLK_QC_T_INTERNAL; -} - -static inline void blk_mq_cleanup_rq(struct request *rq) -{ - if (rq->q->mq_ops->cleanup_rq) - rq->q->mq_ops->cleanup_rq(rq); -} - -static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, - unsigned int nr_segs) -{ - rq->nr_phys_segments = nr_segs; - rq->__data_len = bio->bi_iter.bi_size; - rq->bio = rq->biotail = bio; - rq->ioprio = bio_prio(bio); - - if (bio->bi_bdev) - rq->rq_disk = bio->bi_bdev->bd_disk; -} - -blk_qc_t blk_mq_submit_bio(struct bio *bio); -void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, - struct lock_class_key *key); - #endif diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index be622b5a21..cd395ecec9 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Block data types and constants. Directly include this file only to * break include dependency loop. @@ -8,206 +7,17 @@ #include #include -#include -#include struct bio_set; struct bio; struct bio_integrity_payload; struct page; +struct block_device; struct io_context; struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); -struct bio_crypt_ctx; - -struct block_device { - sector_t bd_start_sect; - struct disk_stats __percpu *bd_stats; - unsigned long bd_stamp; - bool bd_read_only; /* read-only policy */ - dev_t bd_dev; - int bd_openers; - struct inode * bd_inode; /* will die */ - struct super_block * bd_super; - void * bd_claiming; - struct device bd_device; - void * bd_holder; - int bd_holders; - bool bd_write_holder; - struct kobject *bd_holder_dir; - u8 bd_partno; - spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ - struct gendisk * bd_disk; - - /* The counter of freeze processes */ - int bd_fsfreeze_count; - /* Mutex for freeze */ - struct mutex bd_fsfreeze_mutex; - struct super_block *bd_fsfreeze_sb; - - struct partition_meta_info *bd_meta_info; -#ifdef CONFIG_FAIL_MAKE_REQUEST - bool bd_make_it_fail; -#endif -} __randomize_layout; - -#define bdev_whole(_bdev) \ - ((_bdev)->bd_disk->part0) - -#define dev_to_bdev(device) \ - container_of((device), struct block_device, bd_device) - -#define bdev_kobj(_bdev) \ - (&((_bdev)->bd_device.kobj)) - -/* - * Block error status values. See block/blk-core:blk_errors for the details. - * Alpha cannot write a byte atomically, so we need to use 32-bit value. - */ -#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) -typedef u32 __bitwise blk_status_t; -#else -typedef u8 __bitwise blk_status_t; -#endif -#define BLK_STS_OK 0 -#define BLK_STS_NOTSUPP ((__force blk_status_t)1) -#define BLK_STS_TIMEOUT ((__force blk_status_t)2) -#define BLK_STS_NOSPC ((__force blk_status_t)3) -#define BLK_STS_TRANSPORT ((__force blk_status_t)4) -#define BLK_STS_TARGET ((__force blk_status_t)5) -#define BLK_STS_NEXUS ((__force blk_status_t)6) -#define BLK_STS_MEDIUM ((__force blk_status_t)7) -#define BLK_STS_PROTECTION ((__force blk_status_t)8) -#define BLK_STS_RESOURCE ((__force blk_status_t)9) -#define BLK_STS_IOERR ((__force blk_status_t)10) - -/* hack for device mapper, don't use elsewhere: */ -#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) - -#define BLK_STS_AGAIN ((__force blk_status_t)12) - -/* - * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if - * device related resources are unavailable, but the driver can guarantee - * that the queue will be rerun in the future once resources become - * available again. This is typically the case for device specific - * resources that are consumed for IO. If the driver fails allocating these - * resources, we know that inflight (or pending) IO will free these - * resource upon completion. - * - * This is different from BLK_STS_RESOURCE in that it explicitly references - * a device specific resource. For resources of wider scope, allocation - * failure can happen without having pending IO. This means that we can't - * rely on request completions freeing these resources, as IO may not be in - * flight. Examples of that are kernel memory allocations, DMA mappings, or - * any other system wide resources. - */ -#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) - -/* - * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone - * related resources are unavailable, but the driver can guarantee the queue - * will be rerun in the future once the resources become available again. - * - * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references - * a zone specific resource and IO to a different zone on the same device could - * still be served. Examples of that are zones that are write-locked, but a read - * to the same zone could be served. - */ -#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14) - -/* - * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion - * path if the device returns a status indicating that too many zone resources - * are currently open. The same command should be successful if resubmitted - * after the number of open zones decreases below the device's limits, which is - * reported in the request_queue's max_open_zones. - */ -#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15) - -/* - * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion - * path if the device returns a status indicating that too many zone resources - * are currently active. The same command should be successful if resubmitted - * after the number of active zones decreases below the device's limits, which - * is reported in the request_queue's max_active_zones. - */ -#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16) - -/** - * blk_path_error - returns true if error may be path related - * @error: status the request was completed with - * - * Description: - * This classifies block error status into non-retryable errors and ones - * that may be successful if retried on a failover path. - * - * Return: - * %false - retrying failover path will not help - * %true - may succeed if retried - */ -static inline bool blk_path_error(blk_status_t error) -{ - switch (error) { - case BLK_STS_NOTSUPP: - case BLK_STS_NOSPC: - case BLK_STS_TARGET: - case BLK_STS_NEXUS: - case BLK_STS_MEDIUM: - case BLK_STS_PROTECTION: - return false; - } - - /* Anything else could be a path failure, so should be retried */ - return true; -} - -/* - * From most significant bit: - * 1 bit: reserved for other usage, see below - * 12 bits: original size of bio - * 51 bits: issue time of bio - */ -#define BIO_ISSUE_RES_BITS 1 -#define BIO_ISSUE_SIZE_BITS 12 -#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) -#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) -#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) -#define BIO_ISSUE_SIZE_MASK \ - (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) -#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) - -/* Reserved bit for blk-throtl */ -#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) - -struct bio_issue { - u64 value; -}; - -static inline u64 __bio_issue_time(u64 time) -{ - return time & BIO_ISSUE_TIME_MASK; -} - -static inline u64 bio_issue_time(struct bio_issue *issue) -{ - return __bio_issue_time(issue->value); -} - -static inline sector_t bio_issue_size(struct bio_issue *issue) -{ - return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); -} - -static inline void bio_issue_init(struct bio_issue *issue, - sector_t size) -{ - size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; - issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | - (ktime_get_ns() & BIO_ISSUE_TIME_MASK) | - ((u64)size << BIO_ISSUE_SIZE_SHIFT)); -} +#ifdef CONFIG_BLOCK /* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) @@ -215,39 +25,41 @@ static inline void bio_issue_init(struct bio_issue *issue, struct bio { struct bio *bi_next; /* request queue link */ struct block_device *bi_bdev; + int bi_error; unsigned int bi_opf; /* bottom bits req flags, * top bits REQ_OP. Use * accessors. */ - unsigned short bi_flags; /* BIO_* below */ + unsigned short bi_flags; /* status, command, etc */ unsigned short bi_ioprio; - unsigned short bi_write_hint; - blk_status_t bi_status; - atomic_t __bi_remaining; struct bvec_iter bi_iter; + /* Number of segments in this BIO after + * physical address coalescing is performed. + */ + unsigned int bi_phys_segments; + + /* + * To keep track of the max segment size, we account for the + * sizes of the first and last mergeable segments in this bio. + */ + unsigned int bi_seg_front_size; + unsigned int bi_seg_back_size; + + atomic_t __bi_remaining; + bio_end_io_t *bi_end_io; void *bi_private; #ifdef CONFIG_BLK_CGROUP /* - * Represents the association of the css and request_queue for the bio. - * If a bio goes direct to device, it will not have a blkg as it will - * not have a request_queue associated with it. The reference is put - * on release of the bio. + * Optional ioc and css associated with this bio. Put on bio + * release. Read comment on top of bio_associate_current(). */ - struct blkcg_gq *bi_blkg; - struct bio_issue bi_issue; -#ifdef CONFIG_BLK_CGROUP_IOCOST - u64 bi_iocost_cost; + struct io_context *bi_ioc; + struct cgroup_subsys_state *bi_css; #endif -#endif - -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - struct bio_crypt_ctx *bi_crypt_context; -#endif - union { #if defined(CONFIG_BLK_DEV_INTEGRITY) struct bio_integrity_payload *bi_integrity; /* data integrity */ @@ -273,122 +85,110 @@ struct bio { * double allocations for a small number of bio_vecs. This member * MUST obviously be kept at the very end of the bio. */ - struct bio_vec bi_inline_vecs[]; + struct bio_vec bi_inline_vecs[0]; }; +#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS) +#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1)) +#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT) + +#define bio_set_op_attrs(bio, op, op_flags) do { \ + if (__builtin_constant_p(op)) \ + BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \ + else \ + WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \ + if (__builtin_constant_p(op_flags)) \ + BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ + else \ + WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ + (bio)->bi_opf = bio_flags(bio); \ + (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \ + (bio)->bi_opf |= (op_flags); \ +} while (0) + #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) -#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT) /* * bio flags */ -enum { - BIO_NO_PAGE_REF, /* don't put release vec pages */ - BIO_CLONED, /* doesn't own data */ - BIO_BOUNCED, /* bio is a bounce bio */ - BIO_WORKINGSET, /* contains userspace workingset pages */ - BIO_QUIET, /* Make BIO Quiet */ - BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ - BIO_REFFED, /* bio has elevated ->bi_cnt */ - BIO_THROTTLED, /* This bio has already been subjected to - * throttling rules. Don't do it again. */ - BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion - * of this bio. */ - BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ - BIO_TRACKED, /* set if bio goes through the rq_qos path */ - BIO_REMAPPED, - BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ - BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */ - BIO_FLAG_LAST -}; - -typedef __u32 __bitwise blk_mq_req_flags_t; +#define BIO_SEG_VALID 1 /* bi_phys_segments valid */ +#define BIO_CLONED 2 /* doesn't own data */ +#define BIO_BOUNCED 3 /* bio is a bounce bio */ +#define BIO_USER_MAPPED 4 /* contains user pages */ +#define BIO_NULL_MAPPED 5 /* contains invalid user pages */ +#define BIO_QUIET 6 /* Make BIO Quiet */ +#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ +#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ /* - * Operations and flags common to the bio and request structures. - * We use 8 bits for encoding the operation, and the remaining 24 for flags. - * - * The least significant bit of the operation number indicates the data - * transfer direction: - * - * - if the least significant bit is set transfers are TO the device - * - if the least significant bit is not set transfers are FROM the device - * - * If a operation does not transfer data the least significant bit has no - * meaning. + * Flags starting here get preserved by bio_reset() - this includes + * BVEC_POOL_IDX() */ -#define REQ_OP_BITS 8 -#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) -#define REQ_FLAG_BITS 24 +#define BIO_RESET_BITS 10 -enum req_opf { - /* read sectors from the device */ - REQ_OP_READ = 0, - /* write sectors to the device */ - REQ_OP_WRITE = 1, - /* flush the volatile write cache */ - REQ_OP_FLUSH = 2, - /* discard sectors */ - REQ_OP_DISCARD = 3, - /* securely erase sectors */ - REQ_OP_SECURE_ERASE = 5, - /* write the same sector many times */ - REQ_OP_WRITE_SAME = 7, - /* write the zero filled sector many times */ - REQ_OP_WRITE_ZEROES = 9, - /* Open a zone */ - REQ_OP_ZONE_OPEN = 10, - /* Close a zone */ - REQ_OP_ZONE_CLOSE = 11, - /* Transition a zone to full */ - REQ_OP_ZONE_FINISH = 12, - /* write data at the current zone write pointer */ - REQ_OP_ZONE_APPEND = 13, - /* reset a zone write pointer */ - REQ_OP_ZONE_RESET = 15, - /* reset all the zone present on the device */ - REQ_OP_ZONE_RESET_ALL = 17, +/* + * We support 6 different bvec pools, the last one is magic in that it + * is backed by a mempool. + */ +#define BVEC_POOL_NR 6 +#define BVEC_POOL_MAX (BVEC_POOL_NR - 1) - /* Driver private requests */ - REQ_OP_DRV_IN = 34, - REQ_OP_DRV_OUT = 35, +/* + * Top 4 bits of bio flags indicate the pool the bvecs came from. We add + * 1 to the actual index so that 0 indicates that there are no bvecs to be + * freed. + */ +#define BVEC_POOL_BITS (4) +#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) +#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) - REQ_OP_LAST, -}; +#endif /* CONFIG_BLOCK */ -enum req_flag_bits { - __REQ_FAILFAST_DEV = /* no driver retries of device errors */ - REQ_OP_BITS, +/* + * Request flags. For use in the cmd_flags field of struct request, and in + * bi_opf of struct bio. Note that some flags are only valid in either one. + */ +enum rq_flag_bits { + /* common flags */ + __REQ_FAILFAST_DEV, /* no driver retries of device errors */ __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ + __REQ_SYNC, /* request is sync (sync write or read) */ __REQ_META, /* metadata io request */ __REQ_PRIO, /* boost priority in cfq */ - __REQ_NOMERGE, /* don't touch this for merging */ - __REQ_IDLE, /* anticipate more IO after this one */ + + __REQ_NOIDLE, /* don't anticipate more IO after this one */ __REQ_INTEGRITY, /* I/O includes block integrity payload */ __REQ_FUA, /* forced unit access */ __REQ_PREFLUSH, /* request for cache flush */ + + /* bio only flags */ __REQ_RAHEAD, /* read ahead, can fail anytime */ - __REQ_BACKGROUND, /* background IO */ - __REQ_NOWAIT, /* Don't wait if request will block */ - /* - * When a shared kthread needs to issue a bio for a cgroup, doing - * so synchronously can lead to priority inversions as the kthread - * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes - * submit_bio() punt the actual issuing to a dedicated per-blkcg - * work item to avoid such priority inversions. - */ - __REQ_CGROUP_PUNT, + __REQ_THROTTLED, /* This bio has already been subjected to + * throttling rules. Don't do it again. */ - /* command specific flags for REQ_OP_WRITE_ZEROES: */ - __REQ_NOUNMAP, /* do not free blocks when zeroing */ - - __REQ_HIPRI, - - /* for driver use */ - __REQ_DRV, - __REQ_SWAP, /* swapping request. */ + /* request only flags */ + __REQ_SORTED, /* elevator knows about this request */ + __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ + __REQ_NOMERGE, /* don't touch this for merging */ + __REQ_STARTED, /* drive already may have started this one */ + __REQ_DONTPREP, /* don't call prep for this one */ + __REQ_QUEUED, /* uses queueing */ + __REQ_ELVPRIV, /* elevator private data attached */ + __REQ_FAILED, /* set if the request failed */ + __REQ_QUIET, /* don't worry about errors */ + __REQ_PREEMPT, /* set for "ide_preempt" requests and also + for requests for which the SCSI "quiesce" + state must be ignored. */ + __REQ_ALLOCED, /* request came from our alloc pool */ + __REQ_COPY_USER, /* contains copies of user pages */ + __REQ_FLUSH_SEQ, /* request for flush sequence */ + __REQ_IO_STAT, /* account I/O stat */ + __REQ_MIXED_MERGE, /* merge of different types, fail separately */ + __REQ_PM, /* runtime pm request */ + __REQ_HASHED, /* on IO scheduler merge hash */ + __REQ_MQ_INFLIGHT, /* track inflight for MQ */ __REQ_NR_BITS, /* stops here */ }; @@ -398,118 +198,72 @@ enum req_flag_bits { #define REQ_SYNC (1ULL << __REQ_SYNC) #define REQ_META (1ULL << __REQ_META) #define REQ_PRIO (1ULL << __REQ_PRIO) -#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) -#define REQ_IDLE (1ULL << __REQ_IDLE) +#define REQ_NOIDLE (1ULL << __REQ_NOIDLE) #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) -#define REQ_FUA (1ULL << __REQ_FUA) -#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) -#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) -#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) -#define REQ_NOWAIT (1ULL << __REQ_NOWAIT) -#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) - -#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) -#define REQ_HIPRI (1ULL << __REQ_HIPRI) - -#define REQ_DRV (1ULL << __REQ_DRV) -#define REQ_SWAP (1ULL << __REQ_SWAP) #define REQ_FAILFAST_MASK \ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) +#define REQ_COMMON_MASK \ + (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ + REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE) +#define REQ_CLONE_MASK REQ_COMMON_MASK +/* This mask is used for both bio and request merge checking */ #define REQ_NOMERGE_FLAGS \ - (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) + (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) -enum stat_group { - STAT_READ, - STAT_WRITE, - STAT_DISCARD, - STAT_FLUSH, +#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) +#define REQ_THROTTLED (1ULL << __REQ_THROTTLED) - NR_STAT_GROUPS +#define REQ_SORTED (1ULL << __REQ_SORTED) +#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER) +#define REQ_FUA (1ULL << __REQ_FUA) +#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) +#define REQ_STARTED (1ULL << __REQ_STARTED) +#define REQ_DONTPREP (1ULL << __REQ_DONTPREP) +#define REQ_QUEUED (1ULL << __REQ_QUEUED) +#define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV) +#define REQ_FAILED (1ULL << __REQ_FAILED) +#define REQ_QUIET (1ULL << __REQ_QUIET) +#define REQ_PREEMPT (1ULL << __REQ_PREEMPT) +#define REQ_ALLOCED (1ULL << __REQ_ALLOCED) +#define REQ_COPY_USER (1ULL << __REQ_COPY_USER) +#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) +#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) +#define REQ_IO_STAT (1ULL << __REQ_IO_STAT) +#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) +#define REQ_PM (1ULL << __REQ_PM) +#define REQ_HASHED (1ULL << __REQ_HASHED) +#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) + +enum req_op { + REQ_OP_READ, + REQ_OP_WRITE, + REQ_OP_DISCARD, /* request to discard sectors */ + REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ + REQ_OP_WRITE_SAME, /* write same block many times */ + REQ_OP_FLUSH, /* request for cache flush */ }; -#define bio_op(bio) \ - ((bio)->bi_opf & REQ_OP_MASK) -#define req_op(req) \ - ((req)->cmd_flags & REQ_OP_MASK) - -/* obsolete, don't use in new code */ -static inline void bio_set_op_attrs(struct bio *bio, unsigned op, - unsigned op_flags) -{ - bio->bi_opf = op | op_flags; -} - -static inline bool op_is_write(unsigned int op) -{ - return (op & 1); -} - -/* - * Check if the bio or request is one that needs special treatment in the - * flush state machine. - */ -static inline bool op_is_flush(unsigned int op) -{ - return op & (REQ_FUA | REQ_PREFLUSH); -} - -/* - * Reads are always treated as synchronous, as are requests with the FUA or - * PREFLUSH flag. Other operations may be marked as synchronous using the - * REQ_SYNC flag. - */ -static inline bool op_is_sync(unsigned int op) -{ - return (op & REQ_OP_MASK) == REQ_OP_READ || - (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); -} - -static inline bool op_is_discard(unsigned int op) -{ - return (op & REQ_OP_MASK) == REQ_OP_DISCARD; -} - -/* - * Check if a bio or request operation is a zone management operation, with - * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case - * due to its different handling in the block layer and device response in - * case of command failure. - */ -static inline bool op_is_zone_mgmt(enum req_opf op) -{ - switch (op & REQ_OP_MASK) { - case REQ_OP_ZONE_RESET: - case REQ_OP_ZONE_OPEN: - case REQ_OP_ZONE_CLOSE: - case REQ_OP_ZONE_FINISH: - return true; - default: - return false; - } -} - -static inline int op_stat_group(unsigned int op) -{ - if (op_is_discard(op)) - return STAT_DISCARD; - return op_is_write(op); -} +#define REQ_OP_BITS 3 typedef unsigned int blk_qc_t; -#define BLK_QC_T_NONE -1U -#define BLK_QC_T_SHIFT 16 -#define BLK_QC_T_INTERNAL (1U << 31) +#define BLK_QC_T_NONE -1U +#define BLK_QC_T_SHIFT 16 static inline bool blk_qc_t_valid(blk_qc_t cookie) { return cookie != BLK_QC_T_NONE; } +static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num) +{ + return tag | (queue_num << BLK_QC_T_SHIFT); +} + static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) { - return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; + return cookie >> BLK_QC_T_SHIFT; } static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) @@ -517,17 +271,4 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) return cookie & ((1u << BLK_QC_T_SHIFT) - 1); } -static inline bool blk_qc_t_is_internal(blk_qc_t cookie) -{ - return (cookie & BLK_QC_T_INTERNAL) != 0; -} - -struct blk_rq_stat { - u64 mean; - u64 min; - u64 max; - u32 nr_samples; - u64 batch; -}; - #endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 12b9dbcc98..e26e3a7d57 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1,114 +1,84 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BLKDEV_H #define _LINUX_BLKDEV_H #include -#include + +#ifdef CONFIG_BLOCK + #include #include #include #include -#include #include #include +#include +#include #include #include #include #include #include #include +#include #include #include #include #include -#include -#include -#include struct module; +struct scsi_ioctl_command; + struct request_queue; struct elevator_queue; struct blk_trace; struct request; struct sg_io_hdr; +struct bsg_job; struct blkcg_gq; struct blk_flush_queue; struct pr_ops; -struct rq_qos; -struct blk_queue_stats; -struct blk_stat_callback; -struct blk_keyslot_manager; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ -/* Must be consistent with blk_mq_poll_stats_bkt() */ -#define BLK_MQ_POLL_STATS_BKTS 16 - -/* Doing classic polling */ -#define BLK_MQ_POLL_CLASSIC -1 - /* * Maximum number of blkcg policies allowed to be registered concurrently. * Defined here to simplify include dependency. */ -#define BLKCG_MAX_POLS 6 +#define BLKCG_MAX_POLS 2 -typedef void (rq_end_io_fn)(struct request *, blk_status_t); +typedef void (rq_end_io_fn)(struct request *, int); -/* - * request flags */ -typedef __u32 __bitwise req_flags_t; +#define BLK_RL_SYNCFULL (1U << 0) +#define BLK_RL_ASYNCFULL (1U << 1) -/* drive already may have started this one */ -#define RQF_STARTED ((__force req_flags_t)(1 << 1)) -/* may not be passed by ioscheduler */ -#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) -/* request for flush sequence */ -#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) -/* merge of different types, fail separately */ -#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) -/* track inflight for MQ */ -#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) -/* don't call prep for this one */ -#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) -/* vaguely specified driver internal error. Ignored by the block layer */ -#define RQF_FAILED ((__force req_flags_t)(1 << 10)) -/* don't warn about errors */ -#define RQF_QUIET ((__force req_flags_t)(1 << 11)) -/* elevator private data attached */ -#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) -/* account into disk and partition IO statistics */ -#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) -/* runtime pm request */ -#define RQF_PM ((__force req_flags_t)(1 << 15)) -/* on IO scheduler merge hash */ -#define RQF_HASHED ((__force req_flags_t)(1 << 16)) -/* track IO completion time */ -#define RQF_STATS ((__force req_flags_t)(1 << 17)) -/* Look at ->special_vec for the actual data payload instead of the - bio chain. */ -#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) -/* The per-zone write lock is held for this request */ -#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) -/* already slept for hybrid poll */ -#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) -/* ->timeout has been called, don't expire again */ -#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) - -/* flags that prevent us from merging requests: */ -#define RQF_NOMERGE_FLAGS \ - (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) - -/* - * Request state for blk-mq. - */ -enum mq_rq_state { - MQ_RQ_IDLE = 0, - MQ_RQ_IN_FLIGHT = 1, - MQ_RQ_COMPLETE = 2, +struct request_list { + struct request_queue *q; /* the queue this rl belongs to */ +#ifdef CONFIG_BLK_CGROUP + struct blkcg_gq *blkg; /* blkg this request pool belongs to */ +#endif + /* + * count[], starved[], and wait[] are indexed by + * BLK_RW_SYNC/BLK_RW_ASYNC + */ + int count[2]; + int starved[2]; + mempool_t *rq_pool; + wait_queue_head_t wait[2]; + unsigned int flags; }; +/* + * request command types + */ +enum rq_cmd_type_bits { + REQ_TYPE_FS = 1, /* fs request */ + REQ_TYPE_BLOCK_PC, /* scsi command */ + REQ_TYPE_DRV_PRIV, /* driver defined types from here */ +}; + +#define BLK_MAX_CDB 16 + /* * Try to put the fields that are referenced together in the same cacheline. * @@ -116,15 +86,19 @@ enum mq_rq_state { * especially blk_mq_rq_ctx_init() to take care of the added fields. */ struct request { + struct list_head queuelist; + union { + struct call_single_data csd; + u64 fifo_time; + }; + struct request_queue *q; struct blk_mq_ctx *mq_ctx; - struct blk_mq_hw_ctx *mq_hctx; - unsigned int cmd_flags; /* op and common flags */ - req_flags_t rq_flags; - - int tag; - int internal_tag; + int cpu; + unsigned cmd_type; + u64 cmd_flags; + unsigned long atomic_flags; /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ @@ -133,8 +107,6 @@ struct request { struct bio *bio; struct bio *biotail; - struct list_head queuelist; - /* * The hash is used inside the scheduler, and killed once the * request reaches the dispatch list. The ipi_list is only used @@ -144,7 +116,7 @@ struct request { */ union { struct hlist_node hash; /* merge hash */ - struct llist_node ipi_list; + struct list_head ipi_list; }; /* @@ -154,9 +126,7 @@ struct request { */ union { struct rb_node rb_node; /* sort/lookup */ - struct bio_vec special_vec; void *completion_data; - int error_count; /* for legacy drivers, don't use */ }; /* @@ -179,72 +149,68 @@ struct request { }; struct gendisk *rq_disk; - struct block_device *part; -#ifdef CONFIG_BLK_RQ_ALLOC_TIME - /* Time that the first bio started allocating this request. */ - u64 alloc_time_ns; + struct hd_struct *part; + unsigned long start_time; +#ifdef CONFIG_BLK_CGROUP + struct request_list *rl; /* rl this rq is alloced from */ + unsigned long long start_time_ns; + unsigned long long io_start_time_ns; /* when passed to hardware */ #endif - /* Time that this request was allocated for this IO. */ - u64 start_time_ns; - /* Time that I/O was submitted to the device. */ - u64 io_start_time_ns; - -#ifdef CONFIG_BLK_WBT - unsigned short wbt_flags; -#endif - /* - * rq sectors used for blk stats. It has the same value - * with blk_rq_sectors(rq), except that it never be zeroed - * by completion. - */ - unsigned short stats_sectors; - - /* - * Number of scatter-gather DMA addr+len pairs after + /* Number of scatter-gather DMA addr+len pairs after * physical address coalescing is performed. */ unsigned short nr_phys_segments; - #if defined(CONFIG_BLK_DEV_INTEGRITY) unsigned short nr_integrity_segments; #endif -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - struct bio_crypt_ctx *crypt_ctx; - struct blk_ksm_keyslot *crypt_keyslot; -#endif - - unsigned short write_hint; unsigned short ioprio; - enum mq_rq_state state; - refcount_t ref; + void *special; /* opaque pointer available for LLD use */ + + int tag; + int errors; + + /* + * when request is used as a packet command carrier + */ + unsigned char __cmd[BLK_MAX_CDB]; + unsigned char *cmd; + unsigned short cmd_len; + + unsigned int extra_len; /* length of alignment and padding */ + unsigned int sense_len; + unsigned int resid_len; /* residual count */ + void *sense; - unsigned int timeout; unsigned long deadline; - - union { - struct __call_single_data csd; - u64 fifo_time; - }; + struct list_head timeout_list; + unsigned int timeout; + int retries; /* * completion callback. */ rq_end_io_fn *end_io; void *end_io_data; + + /* for bidi */ + struct request *next_rq; }; -static inline bool blk_op_is_passthrough(unsigned int op) -{ - op &= REQ_OP_MASK; - return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; -} +#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS) +#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT) -static inline bool blk_rq_is_passthrough(struct request *rq) -{ - return blk_op_is_passthrough(req_op(rq)); -} +#define req_set_op(req, op) do { \ + WARN_ON(op >= (1 << REQ_OP_BITS)); \ + (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \ + (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \ +} while (0) + +#define req_set_op_attrs(req, op, flags) do { \ + req_set_op(req, op); \ + (req)->cmd_flags |= flags; \ +} while (0) static inline unsigned short req_get_ioprio(struct request *req) { @@ -255,44 +221,48 @@ static inline unsigned short req_get_ioprio(struct request *req) struct blk_queue_ctx; +typedef void (request_fn_proc) (struct request_queue *q); +typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); +typedef int (prep_rq_fn) (struct request_queue *, struct request *); +typedef void (unprep_rq_fn) (struct request_queue *, struct request *); + struct bio_vec; +typedef void (softirq_done_fn)(struct request *); +typedef int (dma_drain_needed_fn)(struct request *); +typedef int (lld_busy_fn) (struct request_queue *q); +typedef int (bsg_job_fn) (struct bsg_job *); enum blk_eh_timer_return { - BLK_EH_DONE, /* drivers has completed the command */ - BLK_EH_RESET_TIMER, /* reset timer and try again */ + BLK_EH_NOT_HANDLED, + BLK_EH_HANDLED, + BLK_EH_RESET_TIMER, }; +typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); + enum blk_queue_state { Queue_down, Queue_up, }; +struct blk_queue_tag { + struct request **tag_index; /* map of busy tags */ + unsigned long *tag_map; /* bit map of free/busy tags */ + int busy; /* current depth */ + int max_depth; /* what we will send to device */ + int real_max_depth; /* what the array can hold */ + atomic_t refcnt; /* map can be shared */ + int alloc_policy; /* tag allocation policy */ + int next_tag; /* next tag */ +}; #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ -/* - * Zoned block device models (zoned limit). - * - * Note: This needs to be ordered from the least to the most severe - * restrictions for the inheritance in blk_stack_limits() to work. - */ -enum blk_zoned_model { - BLK_ZONED_NONE = 0, /* Regular block device */ - BLK_ZONED_HA, /* Host-aware zoned block device */ - BLK_ZONED_HM, /* Host-managed zoned block device */ -}; - -/* - * BLK_BOUNCE_NONE: never bounce (default) - * BLK_BOUNCE_HIGH: bounce all highmem pages - */ -enum blk_bounce { - BLK_BOUNCE_NONE, - BLK_BOUNCE_HIGH, -}; +#define BLK_SCSI_MAX_CMDS (256) +#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) struct queue_limits { - enum blk_bounce bounce; + unsigned long bounce_pfn; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; @@ -302,94 +272,78 @@ struct queue_limits { unsigned int max_sectors; unsigned int max_segment_size; unsigned int physical_block_size; - unsigned int logical_block_size; unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; unsigned int max_discard_sectors; unsigned int max_hw_discard_sectors; unsigned int max_write_same_sectors; - unsigned int max_write_zeroes_sectors; - unsigned int max_zone_append_sectors; unsigned int discard_granularity; unsigned int discard_alignment; - unsigned int zone_write_granularity; + unsigned short logical_block_size; unsigned short max_segments; unsigned short max_integrity_segments; - unsigned short max_discard_segments; unsigned char misaligned; unsigned char discard_misaligned; + unsigned char cluster; + unsigned char discard_zeroes_data; unsigned char raid_partial_stripes_expensive; - enum blk_zoned_model zoned; }; -typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, - void *data); - -void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model); - -#ifdef CONFIG_BLK_DEV_ZONED - -#define BLK_ALL_ZONES ((unsigned int)-1) -int blkdev_report_zones(struct block_device *bdev, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data); -unsigned int blkdev_nr_zones(struct gendisk *disk); -extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, - sector_t sectors, sector_t nr_sectors, - gfp_t gfp_mask); -int blk_revalidate_disk_zones(struct gendisk *disk, - void (*update_driver_data)(struct gendisk *disk)); - -extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg); -extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg); - -#else /* CONFIG_BLK_DEV_ZONED */ - -static inline unsigned int blkdev_nr_zones(struct gendisk *disk) -{ - return 0; -} - -static inline int blkdev_report_zones_ioctl(struct block_device *bdev, - fmode_t mode, unsigned int cmd, - unsigned long arg) -{ - return -ENOTTY; -} - -static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, - fmode_t mode, unsigned int cmd, - unsigned long arg) -{ - return -ENOTTY; -} - -#endif /* CONFIG_BLK_DEV_ZONED */ - struct request_queue { + /* + * Together with queue_head for cacheline sharing + */ + struct list_head queue_head; struct request *last_merge; struct elevator_queue *elevator; + int nr_rqs[2]; /* # allocated [a]sync rqs */ + int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ - struct percpu_ref q_usage_counter; + /* + * If blkcg is not used, @q->root_rl serves all requests. If blkcg + * is used, root blkg allocates from @q->root_rl and all other + * blkgs from their own blkg->rl. Which one to use should be + * determined using bio_request_list(). + */ + struct request_list root_rl; - struct blk_queue_stats *stats; - struct rq_qos *rq_qos; + request_fn_proc *request_fn; + make_request_fn *make_request_fn; + prep_rq_fn *prep_rq_fn; + unprep_rq_fn *unprep_rq_fn; + softirq_done_fn *softirq_done_fn; + rq_timed_out_fn *rq_timed_out_fn; + dma_drain_needed_fn *dma_drain_needed; + lld_busy_fn *lld_busy_fn; - const struct blk_mq_ops *mq_ops; + struct blk_mq_ops *mq_ops; + + unsigned int *mq_map; /* sw queues */ struct blk_mq_ctx __percpu *queue_ctx; - - unsigned int queue_depth; + unsigned int nr_queues; /* hw dispatch queues */ struct blk_mq_hw_ctx **queue_hw_ctx; unsigned int nr_hw_queues; + /* + * Dispatch queue sorting + */ + sector_t end_sector; + struct request *boundary_rq; + + /* + * Delayed queue handling + */ + struct delayed_work delay_work; + + struct backing_dev_info backing_dev_info; + /* * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. @@ -400,11 +354,6 @@ struct request_queue { * various queue flags, see QUEUE_* below */ unsigned long queue_flags; - /* - * Number of contexts that have called blk_set_pm_only(). If this - * counter is above zero then only RQF_PM requests are processed. - */ - atomic_t pm_only; /* * ida allocated id for this queue. Used to index queues from @@ -412,9 +361,18 @@ struct request_queue { */ int id; - spinlock_t queue_lock; + /* + * queue needs bounce pages for pages above this limit + */ + gfp_t bounce_gfp; - struct gendisk *disk; + /* + * protects queue structures from reentrancy. ->__queue_lock should + * _never_ be used directly, it is queue private. always use + * ->queue_lock. + */ + spinlock_t __queue_lock; + spinlock_t *queue_lock; /* * queue kobject @@ -424,7 +382,7 @@ struct request_queue { /* * mq queue kobject */ - struct kobject *mq_kobj; + struct kobject mq_kobj; #ifdef CONFIG_BLK_DEV_INTEGRITY struct blk_integrity integrity; @@ -432,35 +390,39 @@ struct request_queue { #ifdef CONFIG_PM struct device *dev; - enum rpm_status rpm_status; + int rpm_status; + unsigned int nr_pending; #endif /* * queue settings */ unsigned long nr_requests; /* Max # of requests */ + unsigned int nr_congestion_on; + unsigned int nr_congestion_off; + unsigned int nr_batching; + unsigned int dma_drain_size; + void *dma_drain_buffer; unsigned int dma_pad_mask; unsigned int dma_alignment; -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - /* Inline crypto capabilities */ - struct blk_keyslot_manager *ksm; -#endif + struct blk_queue_tag *queue_tags; + struct list_head tag_busy_list; + + unsigned int nr_sorted; + unsigned int in_flight[2]; + /* + * Number of active block driver functions for which blk_drain_queue() + * must wait. Must be incremented around functions that unlock the + * queue_lock internally, e.g. scsi_request_fn(). + */ + unsigned int request_fn_active; unsigned int rq_timeout; - int poll_nsec; - - struct blk_stat_callback *poll_cb; - struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS]; - struct timer_list timeout; struct work_struct timeout_work; - - atomic_t nr_active_requests_shared_sbitmap; - - struct sbitmap_queue sched_bitmap_tags; - struct sbitmap_queue sched_breserved_tags; + struct list_head timeout_list; struct list_head icq_list; #ifdef CONFIG_BLK_CGROUP @@ -471,37 +433,14 @@ struct request_queue { struct queue_limits limits; - unsigned int required_elevator_features; - -#ifdef CONFIG_BLK_DEV_ZONED /* - * Zoned block device information for request dispatch control. - * nr_zones is the total number of zones of the device. This is always - * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones - * bits which indicates if a zone is conventional (bit set) or - * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones - * bits which indicates if a zone is write locked, that is, if a write - * request targeting the zone was dispatched. All three fields are - * initialized by the low level device driver (e.g. scsi/sd.c). - * Stacking drivers (device mappers) may or may not initialize - * these fields. - * - * Reads of this information must be protected with blk_queue_enter() / - * blk_queue_exit(). Modifying this information is only allowed while - * no requests are being processed. See also blk_mq_freeze_queue() and - * blk_mq_unfreeze_queue(). + * sg stuff */ - unsigned int nr_zones; - unsigned long *conv_zones_bitmap; - unsigned long *seq_zones_wlock; - unsigned int max_open_zones; - unsigned int max_active_zones; -#endif /* CONFIG_BLK_DEV_ZONED */ - + unsigned int sg_timeout; + unsigned int sg_reserved_size; int node; - struct mutex debugfs_mutex; #ifdef CONFIG_BLK_DEV_IO_TRACE - struct blk_trace __rcu *blk_trace; + struct blk_trace *blk_trace; #endif /* * for flush operations @@ -513,16 +452,15 @@ struct request_queue { struct delayed_work requeue_work; struct mutex sysfs_lock; - struct mutex sysfs_dir_lock; - /* - * for reusing dead hctx instance in case of updating - * nr_hw_queues - */ - struct list_head unused_hctx_list; - spinlock_t unused_hctx_lock; + int bypass_depth; + atomic_t mq_freeze_depth; - int mq_freeze_depth; +#if defined(CONFIG_BLK_DEV_BSG) + bsg_job_fn *bsg_job_fn; + int bsg_job_size; + struct bsg_class_device bsg_dev; +#endif #ifdef CONFIG_BLK_DEV_THROTTLING /* Throttle data */ @@ -530,277 +468,248 @@ struct request_queue { #endif struct rcu_head rcu_head; wait_queue_head_t mq_freeze_wq; - /* - * Protect concurrent access to q_usage_counter by - * percpu_ref_kill() and percpu_ref_reinit(). - */ - struct mutex mq_freeze_lock; + struct percpu_ref q_usage_counter; + struct list_head all_q_node; struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; - struct bio_set bio_split; - - struct dentry *debugfs_dir; - -#ifdef CONFIG_BLK_DEBUG_FS - struct dentry *sched_debugfs_dir; - struct dentry *rqos_debugfs_dir; -#endif + struct bio_set *bio_split; bool mq_sysfs_init_done; - - size_t cmd_size; - -#define BLK_MAX_WRITE_HINTS 5 - u64 write_hints[BLK_MAX_WRITE_HINTS]; }; -/* Keep blk_queue_flag_name[] in sync with the definitions below */ -#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ -#define QUEUE_FLAG_DYING 1 /* queue being torn down */ -#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ -#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ -#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ -#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ -#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ -#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ -#define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */ -#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ -#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ -#define QUEUE_FLAG_SECERASE 11 /* supports secure erase */ -#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ -#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */ -#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ -#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ -#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ -#define QUEUE_FLAG_WC 17 /* Write back caching */ -#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ -#define QUEUE_FLAG_DAX 19 /* device supports DAX */ -#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ -#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */ -#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ -#define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */ -#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ -#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ -#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ -#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ -#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ -#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ +#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ +#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ +#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ +#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ +#define QUEUE_FLAG_DYING 5 /* queue being torn down */ +#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ +#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ +#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ +#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ +#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ +#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ +#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ +#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ +#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ +#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ +#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ +#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ +#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ +#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ +#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ +#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ +#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ +#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ +#define QUEUE_FLAG_WC 23 /* Write back caching */ +#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ +#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ +#define QUEUE_FLAG_DAX 26 /* device supports DAX */ + +#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_STACKABLE) | \ + (1 << QUEUE_FLAG_SAME_COMP) | \ + (1 << QUEUE_FLAG_ADD_RANDOM)) #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ - (1 << QUEUE_FLAG_SAME_COMP) | \ - (1 << QUEUE_FLAG_NOWAIT)) + (1 << QUEUE_FLAG_STACKABLE) | \ + (1 << QUEUE_FLAG_SAME_COMP) | \ + (1 << QUEUE_FLAG_POLL)) -void blk_queue_flag_set(unsigned int flag, struct request_queue *q); -void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); -bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); +static inline void queue_lockdep_assert_held(struct request_queue *q) +{ + if (q->queue_lock) + lockdep_assert_held(q->queue_lock); +} +static inline void queue_flag_set_unlocked(unsigned int flag, + struct request_queue *q) +{ + __set_bit(flag, &q->queue_flags); +} + +static inline int queue_flag_test_and_clear(unsigned int flag, + struct request_queue *q) +{ + queue_lockdep_assert_held(q); + + if (test_bit(flag, &q->queue_flags)) { + __clear_bit(flag, &q->queue_flags); + return 1; + } + + return 0; +} + +static inline int queue_flag_test_and_set(unsigned int flag, + struct request_queue *q) +{ + queue_lockdep_assert_held(q); + + if (!test_bit(flag, &q->queue_flags)) { + __set_bit(flag, &q->queue_flags); + return 0; + } + + return 1; +} + +static inline void queue_flag_set(unsigned int flag, struct request_queue *q) +{ + queue_lockdep_assert_held(q); + __set_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear_unlocked(unsigned int flag, + struct request_queue *q) +{ + __clear_bit(flag, &q->queue_flags); +} + +static inline int queue_in_flight(struct request_queue *q) +{ + return q->in_flight[0] + q->in_flight[1]; +} + +static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) +{ + queue_lockdep_assert_held(q); + __clear_bit(flag, &q->queue_flags); +} + +#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) +#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_noxmerges(q) \ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) -#define blk_queue_stable_writes(q) \ - test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) +#define blk_queue_stackable(q) \ + test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) -#define blk_queue_zone_resetall(q) \ - test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) #define blk_queue_secure_erase(q) \ (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) -#define blk_queue_scsi_passthrough(q) \ - test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) -#define blk_queue_pci_p2pdma(q) \ - test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) -#ifdef CONFIG_BLK_RQ_ALLOC_TIME -#define blk_queue_rq_alloc_time(q) \ - test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) -#else -#define blk_queue_rq_alloc_time(q) false -#endif #define blk_noretry_request(rq) \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ REQ_FAILFAST_DRIVER)) -#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) -#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) -#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) -#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) -#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags) -extern void blk_set_pm_only(struct request_queue *q); -extern void blk_clear_pm_only(struct request_queue *q); +#define blk_account_rq(rq) \ + (((rq)->cmd_flags & REQ_STARTED) && \ + ((rq)->cmd_type == REQ_TYPE_FS)) + +#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) +#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) +/* rq->queuelist of dequeued request must be list_empty() */ +#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) -#define rq_dma_dir(rq) \ - (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) - -#define dma_map_bvec(dev, bv, dir, attrs) \ - dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ - (dir), (attrs)) - -static inline bool queue_is_mq(struct request_queue *q) +/* + * Driver can handle struct request, if it either has an old style + * request_fn defined, or is blk-mq based. + */ +static inline bool queue_is_rq_based(struct request_queue *q) { - return q->mq_ops; + return q->request_fn || q->mq_ops; } -#ifdef CONFIG_PM -static inline enum rpm_status queue_rpm_status(struct request_queue *q) +static inline unsigned int blk_queue_cluster(struct request_queue *q) { - return q->rpm_status; -} -#else -static inline enum rpm_status queue_rpm_status(struct request_queue *q) -{ - return RPM_ACTIVE; -} -#endif - -static inline enum blk_zoned_model -blk_queue_zoned_model(struct request_queue *q) -{ - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) - return q->limits.zoned; - return BLK_ZONED_NONE; + return q->limits.cluster; } -static inline bool blk_queue_is_zoned(struct request_queue *q) +/* + * We regard a request as sync, if either a read or a sync write + */ +static inline bool rw_is_sync(int op, unsigned int rw_flags) { - switch (blk_queue_zoned_model(q)) { - case BLK_ZONED_HA: - case BLK_ZONED_HM: - return true; - default: - return false; - } + return op == REQ_OP_READ || (rw_flags & REQ_SYNC); } -static inline sector_t blk_queue_zone_sectors(struct request_queue *q) -{ - return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; -} - -#ifdef CONFIG_BLK_DEV_ZONED -static inline unsigned int blk_queue_nr_zones(struct request_queue *q) -{ - return blk_queue_is_zoned(q) ? q->nr_zones : 0; -} - -static inline unsigned int blk_queue_zone_no(struct request_queue *q, - sector_t sector) -{ - if (!blk_queue_is_zoned(q)) - return 0; - return sector >> ilog2(q->limits.chunk_sectors); -} - -static inline bool blk_queue_zone_is_seq(struct request_queue *q, - sector_t sector) -{ - if (!blk_queue_is_zoned(q)) - return false; - if (!q->conv_zones_bitmap) - return true; - return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap); -} - -static inline void blk_queue_max_open_zones(struct request_queue *q, - unsigned int max_open_zones) -{ - q->max_open_zones = max_open_zones; -} - -static inline unsigned int queue_max_open_zones(const struct request_queue *q) -{ - return q->max_open_zones; -} - -static inline void blk_queue_max_active_zones(struct request_queue *q, - unsigned int max_active_zones) -{ - q->max_active_zones = max_active_zones; -} - -static inline unsigned int queue_max_active_zones(const struct request_queue *q) -{ - return q->max_active_zones; -} -#else /* CONFIG_BLK_DEV_ZONED */ -static inline unsigned int blk_queue_nr_zones(struct request_queue *q) -{ - return 0; -} -static inline bool blk_queue_zone_is_seq(struct request_queue *q, - sector_t sector) -{ - return false; -} -static inline unsigned int blk_queue_zone_no(struct request_queue *q, - sector_t sector) -{ - return 0; -} -static inline unsigned int queue_max_open_zones(const struct request_queue *q) -{ - return 0; -} -static inline unsigned int queue_max_active_zones(const struct request_queue *q) -{ - return 0; -} -#endif /* CONFIG_BLK_DEV_ZONED */ - static inline bool rq_is_sync(struct request *rq) { - return op_is_sync(rq->cmd_flags); + return rw_is_sync(req_op(rq), rq->cmd_flags); +} + +static inline bool blk_rl_full(struct request_list *rl, bool sync) +{ + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + return rl->flags & flag; +} + +static inline void blk_set_rl_full(struct request_list *rl, bool sync) +{ + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + rl->flags |= flag; +} + +static inline void blk_clear_rl_full(struct request_list *rl, bool sync) +{ + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + rl->flags &= ~flag; } static inline bool rq_mergeable(struct request *rq) { - if (blk_rq_is_passthrough(rq)) + if (rq->cmd_type != REQ_TYPE_FS) return false; if (req_op(rq) == REQ_OP_FLUSH) return false; - if (req_op(rq) == REQ_OP_WRITE_ZEROES) - return false; - - if (req_op(rq) == REQ_OP_ZONE_APPEND) - return false; - if (rq->cmd_flags & REQ_NOMERGE_FLAGS) return false; - if (rq->rq_flags & RQF_NOMERGE_FLAGS) - return false; return true; } static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) { - if (bio_page(a) == bio_page(b) && - bio_offset(a) == bio_offset(b)) + if (bio_data(a) == bio_data(b)) return true; return false; } -static inline unsigned int blk_queue_depth(struct request_queue *q) -{ - if (q->queue_depth) - return q->queue_depth; +/* + * q->prep_rq_fn return values + */ +enum { + BLKPREP_OK, /* serve it */ + BLKPREP_KILL, /* fatal error, kill, return -EIO */ + BLKPREP_DEFER, /* leave on queue */ + BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ +}; - return q->nr_requests; -} +extern unsigned long blk_max_low_pfn, blk_max_pfn; + +/* + * standard bounce addresses: + * + * BLK_BOUNCE_HIGH : bounce all highmem pages + * BLK_BOUNCE_ANY : don't bounce anything + * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary + */ + +#if BITS_PER_LONG == 32 +#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) +#else +#define BLK_BOUNCE_HIGH -1ULL +#endif +#define BLK_BOUNCE_ANY (-1ULL) +#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) /* * default timeout for SG_IO if none specified @@ -808,6 +717,19 @@ static inline unsigned int blk_queue_depth(struct request_queue *q) #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) #define BLK_MIN_SG_TIMEOUT (7 * HZ) +#ifdef CONFIG_BOUNCE +extern int init_emergency_isa_pool(void); +extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); +#else +static inline int init_emergency_isa_pool(void) +{ + return 0; +} +static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) +{ +} +#endif /* CONFIG_MMU */ + struct rq_map_data { struct page **pages; int page_order; @@ -833,10 +755,6 @@ struct req_iterator { __rq_for_each_bio(_iter.bio, _rq) \ bio_for_each_segment(bvl, _iter.bio, _iter.iter) -#define rq_for_each_bvec(bvl, _rq, _iter) \ - __rq_for_each_bio(_iter.bio, _rq) \ - bio_for_each_bvec(bvl, _iter.bio, _iter.iter) - #define rq_iter_last(bvec, _iter) \ (_iter.bio->bi_next == NULL && \ bio_iter_last(bvec, _iter.iter)) @@ -852,26 +770,60 @@ static inline void rq_flush_dcache_pages(struct request *rq) } #endif +#ifdef CONFIG_PRINTK +#define vfs_msg(sb, level, fmt, ...) \ + __vfs_msg(sb, level, fmt, ##__VA_ARGS__) +#else +#define vfs_msg(sb, level, fmt, ...) \ +do { \ + no_printk(fmt, ##__VA_ARGS__); \ + __vfs_msg(sb, "", " "); \ +} while (0) +#endif + extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); -blk_qc_t submit_bio_noacct(struct bio *bio); +extern blk_qc_t generic_make_request(struct bio *bio); extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); -extern struct request *blk_get_request(struct request_queue *, unsigned int op, - blk_mq_req_flags_t flags); +extern void __blk_put_request(struct request_queue *, struct request *); +extern struct request *blk_get_request(struct request_queue *, int, gfp_t); +extern void blk_rq_set_block_pc(struct request *); +extern void blk_requeue_request(struct request_queue *, struct request *); +extern void blk_add_request_payload(struct request *rq, struct page *page, + int offset, unsigned int len); extern int blk_lld_busy(struct request_queue *q); extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data); extern void blk_rq_unprep_clone(struct request *rq); -extern blk_status_t blk_insert_cloned_request(struct request_queue *q, +extern int blk_insert_cloned_request(struct request_queue *q, struct request *rq); -int blk_rq_append_bio(struct request *rq, struct bio *bio); -extern void blk_queue_split(struct bio **); -extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); +extern int blk_rq_append_bio(struct request *rq, struct bio *bio); +extern void blk_delay_queue(struct request_queue *, unsigned long); +extern void blk_queue_split(struct request_queue *, struct bio **, + struct bio_set *); +extern void blk_recount_segments(struct request_queue *, struct bio *); +extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); +extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, + unsigned int, void __user *); +extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, + unsigned int, void __user *); +extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, + struct scsi_ioctl_command __user *); + +extern int blk_queue_enter(struct request_queue *q, bool nowait); extern void blk_queue_exit(struct request_queue *q); +extern void blk_start_queue(struct request_queue *q); +extern void blk_start_queue_async(struct request_queue *q); +extern void blk_stop_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); +extern void __blk_stop_queue(struct request_queue *q); +extern void __blk_run_queue(struct request_queue *q); +extern void __blk_run_queue_uncond(struct request_queue *q); +extern void blk_run_queue(struct request_queue *); +extern void blk_run_queue_async(struct request_queue *q); extern int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t); @@ -880,42 +832,18 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns extern int blk_rq_map_user_iov(struct request_queue *, struct request *, struct rq_map_data *, const struct iov_iter *, gfp_t); -extern void blk_execute_rq_nowait(struct gendisk *, +extern int blk_execute_rq(struct request_queue *, struct gendisk *, + struct request *, int); +extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, struct request *, int, rq_end_io_fn *); -blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, - int at_head); - -/* Helper to convert REQ_OP_XXX to its string format XXX */ -extern const char *blk_op_str(unsigned int op); - -int blk_status_to_errno(blk_status_t status); -blk_status_t errno_to_blk_status(int errno); - -int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); +bool blk_poll(struct request_queue *q, blk_qc_t cookie); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { return bdev->bd_disk->queue; /* this is never NULL */ } -/* - * The basic unit of block I/O is a sector. It is used in a number of contexts - * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 - * bytes. Variables of type sector_t represent an offset or size that is a - * multiple of 512 bytes. Hence these two constants. - */ -#ifndef SECTOR_SHIFT -#define SECTOR_SHIFT 9 -#endif -#ifndef SECTOR_SIZE -#define SECTOR_SIZE (1 << SECTOR_SHIFT) -#endif - -#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) -#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) -#define SECTOR_MASK (PAGE_SECTORS - 1) - /* * blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request @@ -923,7 +851,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) * blk_rq_err_bytes() : bytes left till the next error boundary * blk_rq_sectors() : sectors left in the entire request * blk_rq_cur_sectors() : sectors left in the current segment - * blk_rq_stats_sectors() : sectors of the entire request used for stats */ static inline sector_t blk_rq_pos(const struct request *rq) { @@ -944,84 +871,23 @@ extern unsigned int blk_rq_err_bytes(const struct request *rq); static inline unsigned int blk_rq_sectors(const struct request *rq) { - return blk_rq_bytes(rq) >> SECTOR_SHIFT; + return blk_rq_bytes(rq) >> 9; } static inline unsigned int blk_rq_cur_sectors(const struct request *rq) { - return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; -} - -static inline unsigned int blk_rq_stats_sectors(const struct request *rq) -{ - return rq->stats_sectors; -} - -#ifdef CONFIG_BLK_DEV_ZONED - -/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ -const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); - -static inline unsigned int bio_zone_no(struct bio *bio) -{ - return blk_queue_zone_no(bdev_get_queue(bio->bi_bdev), - bio->bi_iter.bi_sector); -} - -static inline unsigned int bio_zone_is_seq(struct bio *bio) -{ - return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev), - bio->bi_iter.bi_sector); -} - -static inline unsigned int blk_rq_zone_no(struct request *rq) -{ - return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); -} - -static inline unsigned int blk_rq_zone_is_seq(struct request *rq) -{ - return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); -} -#endif /* CONFIG_BLK_DEV_ZONED */ - -/* - * Some commands like WRITE SAME have a payload or data transfer size which - * is different from the size of the request. Any driver that supports such - * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to - * calculate the data transfer size. - */ -static inline unsigned int blk_rq_payload_bytes(struct request *rq) -{ - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - return rq->special_vec.bv_len; - return blk_rq_bytes(rq); -} - -/* - * Return the first full biovec in the request. The caller needs to check that - * there are any bvecs before calling this helper. - */ -static inline struct bio_vec req_bvec(struct request *rq) -{ - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - return rq->special_vec; - return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); + return blk_rq_cur_bytes(rq) >> 9; } static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, int op) { if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) - return min(q->limits.max_discard_sectors, - UINT_MAX >> SECTOR_SHIFT); + return min(q->limits.max_discard_sectors, UINT_MAX >> 9); if (unlikely(op == REQ_OP_WRITE_SAME)) return q->limits.max_write_same_sectors; - if (unlikely(op == REQ_OP_WRITE_ZEROES)) - return q->limits.max_write_zeroes_sectors; - return q->limits.max_sectors; } @@ -1030,22 +896,13 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, * file system requests. */ static inline unsigned int blk_max_size_offset(struct request_queue *q, - sector_t offset, - unsigned int chunk_sectors) + sector_t offset) { - if (!chunk_sectors) { - if (q->limits.chunk_sectors) - chunk_sectors = q->limits.chunk_sectors; - else - return q->limits.max_sectors; - } + if (!q->limits.chunk_sectors) + return q->limits.max_sectors; - if (likely(is_power_of_2(chunk_sectors))) - chunk_sectors -= offset & (chunk_sectors - 1); - else - chunk_sectors -= sector_div(offset, chunk_sectors); - - return min(q->limits.max_sectors, chunk_sectors); + return q->limits.chunk_sectors - + (offset & (q->limits.chunk_sectors - 1)); } static inline unsigned int blk_rq_get_max_sectors(struct request *rq, @@ -1053,7 +910,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, { struct request_queue *q = rq->q; - if (blk_rq_is_passthrough(rq)) + if (unlikely(rq->cmd_type != REQ_TYPE_FS)) return q->limits.max_hw_sectors; if (!q->limits.chunk_sectors || @@ -1061,7 +918,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, req_op(rq) == REQ_OP_SECURE_ERASE) return blk_queue_get_max_sectors(q, req_op(rq)); - return min(blk_max_size_offset(q, offset, 0), + return min(blk_max_size_offset(q, offset), blk_queue_get_max_sectors(q, req_op(rq))); } @@ -1076,109 +933,133 @@ static inline unsigned int blk_rq_count_bios(struct request *rq) return nr_bios; } -void blk_steal_bios(struct bio_list *list, struct request *rq); +/* + * Request issue related functions. + */ +extern struct request *blk_peek_request(struct request_queue *q); +extern void blk_start_request(struct request *rq); +extern struct request *blk_fetch_request(struct request_queue *q); /* * Request completion related functions. * * blk_update_request() completes given number of bytes and updates * the request without completing it. + * + * blk_end_request() and friends. __blk_end_request() must be called + * with the request queue spinlock acquired. + * + * Several drivers define their own end_request and call + * blk_end_request() for parts of the original function. + * This prevents code duplication in drivers. */ -extern bool blk_update_request(struct request *rq, blk_status_t error, +extern bool blk_update_request(struct request *rq, int error, unsigned int nr_bytes); +extern void blk_finish_request(struct request *rq, int error); +extern bool blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern void blk_end_request_all(struct request *rq, int error); +extern bool blk_end_request_cur(struct request *rq, int error); +extern bool blk_end_request_err(struct request *rq, int error); +extern bool __blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern void __blk_end_request_all(struct request *rq, int error); +extern bool __blk_end_request_cur(struct request *rq, int error); +extern bool __blk_end_request_err(struct request *rq, int error); +extern void blk_complete_request(struct request *); +extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); +extern void blk_unprep_request(struct request *); /* * Access functions for manipulating queue properties */ +extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, + spinlock_t *lock, int node_id); +extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); +extern struct request_queue *blk_init_allocated_queue(struct request_queue *, + request_fn_proc *, spinlock_t *); extern void blk_cleanup_queue(struct request_queue *); -void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit); +extern void blk_queue_make_request(struct request_queue *, make_request_fn *); +extern void blk_queue_bounce_limit(struct request_queue *, u64); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_segments(struct request_queue *, unsigned short); -extern void blk_queue_max_discard_segments(struct request_queue *, - unsigned short); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); extern void blk_queue_max_write_same_sectors(struct request_queue *q, unsigned int max_write_same_sectors); -extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, - unsigned int max_write_same_sectors); -extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); -extern void blk_queue_max_zone_append_sectors(struct request_queue *q, - unsigned int max_zone_append_sectors); +extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); -void blk_queue_zone_write_granularity(struct request_queue *q, - unsigned int size); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); -void disk_update_readahead(struct gendisk *disk); extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); -extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); extern void blk_set_default_limits(struct queue_limits *lim); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); +extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, + sector_t offset); extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset); +extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); +extern void blk_queue_dma_pad(struct request_queue *, unsigned int); extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); +extern int blk_queue_dma_drain(struct request_queue *q, + dma_drain_needed_fn *dma_drain_needed, + void *buf, unsigned int size); +extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); +extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); +extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); extern void blk_queue_dma_alignment(struct request_queue *, int); extern void blk_queue_update_dma_alignment(struct request_queue *, int); +extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); +extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); +extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); -extern void blk_queue_required_elevator_features(struct request_queue *q, - unsigned int features); -extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, - struct device *dev); +extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); -/* - * Number of physical segments as sent to the device. - * - * Normally this is the number of discontiguous data segments sent by the - * submitter. But for data-less command like discard we might have no - * actual data segments submitted, but the driver might have to add it's - * own special payload. In that case we still return 1 here so that this - * special payload will be mapped. - */ -static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) -{ - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - return 1; - return rq->nr_phys_segments; -} - -/* - * Number of discard segments (or ranges) the driver needs to fill in. - * Each discard bio merged into a request is counted as one segment. - */ -static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) -{ - return max_t(unsigned short, rq->nr_phys_segments, 1); -} - -int __blk_rq_map_sg(struct request_queue *q, struct request *rq, - struct scatterlist *sglist, struct scatterlist **last_sg); -static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, - struct scatterlist *sglist) -{ - struct scatterlist *last_sg = NULL; - - return __blk_rq_map_sg(q, rq, sglist, &last_sg); -} +extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); +extern long nr_blockdev_pages(void); bool __must_check blk_get_queue(struct request_queue *); +struct request_queue *blk_alloc_queue(gfp_t); +struct request_queue *blk_alloc_queue_node(gfp_t, int); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); -#ifdef CONFIG_BLOCK +/* + * block layer runtime pm functions + */ +#ifdef CONFIG_PM +extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); +extern int blk_pre_runtime_suspend(struct request_queue *q); +extern void blk_post_runtime_suspend(struct request_queue *q, int err); +extern void blk_pre_runtime_resume(struct request_queue *q); +extern void blk_post_runtime_resume(struct request_queue *q, int err); +extern void blk_set_runtime_active(struct request_queue *q); +#else +static inline void blk_pm_runtime_init(struct request_queue *q, + struct device *dev) {} +static inline int blk_pre_runtime_suspend(struct request_queue *q) +{ + return -ENOSYS; +} +static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} +static inline void blk_pre_runtime_resume(struct request_queue *q) {} +static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} +static inline void blk_set_runtime_active(struct request_queue *q) {} +#endif + /* * blk_plug permits building a queue of related requests by holding the I/O * fragments for a short period. This allows merging of sequential requests @@ -1192,14 +1073,11 @@ extern void blk_set_queue_dying(struct request_queue *); * schedule() where blk_schedule_flush_plug() is called. */ struct blk_plug { + struct list_head list; /* requests */ struct list_head mq_list; /* blk-mq requests */ struct list_head cb_list; /* md requires an unplug callback */ - unsigned short rq_count; - bool multiple_queues; - bool nowait; }; #define BLK_MAX_REQUEST_COUNT 16 -#define BLK_PLUG_FLUSH_SIZE (128 * 1024) struct blk_plug_cb; typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); @@ -1235,96 +1113,63 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) struct blk_plug *plug = tsk->plug; return plug && - (!list_empty(&plug->mq_list) || + (!list_empty(&plug->list) || + !list_empty(&plug->mq_list) || !list_empty(&plug->cb_list)); } -int blkdev_issue_flush(struct block_device *bdev); -long nr_blockdev_pages(void); -#else /* CONFIG_BLOCK */ -struct blk_plug { -}; +/* + * tag stuff + */ +extern int blk_queue_start_tag(struct request_queue *, struct request *); +extern struct request *blk_queue_find_tag(struct request_queue *, int); +extern void blk_queue_end_tag(struct request_queue *, struct request *); +extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); +extern void blk_queue_free_tags(struct request_queue *); +extern int blk_queue_resize_tags(struct request_queue *, int); +extern void blk_queue_invalidate_tags(struct request_queue *); +extern struct blk_queue_tag *blk_init_tags(int, int); +extern void blk_free_tags(struct blk_queue_tag *); -static inline void blk_start_plug(struct blk_plug *plug) +static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, + int tag) { + if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) + return NULL; + return bqt->tag_index[tag]; } -static inline void blk_finish_plug(struct blk_plug *plug) -{ -} - -static inline void blk_flush_plug(struct task_struct *task) -{ -} - -static inline void blk_schedule_flush_plug(struct task_struct *task) -{ -} - - -static inline bool blk_needs_flush_plug(struct task_struct *tsk) -{ - return false; -} - -static inline int blkdev_issue_flush(struct block_device *bdev) -{ - return 0; -} - -static inline long nr_blockdev_pages(void) -{ - return 0; -} -#endif /* CONFIG_BLOCK */ - -extern void blk_io_schedule(void); - -extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, struct page *page); #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ +#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */ +extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, int flags, struct bio **biop); - -#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ -#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ - -extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, - unsigned flags); +extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, struct page *page); extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, unsigned flags); - + sector_t nr_sects, gfp_t gfp_mask, bool discard); static inline int sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) { - return blkdev_issue_discard(sb->s_bdev, - block << (sb->s_blocksize_bits - - SECTOR_SHIFT), - nr_blocks << (sb->s_blocksize_bits - - SECTOR_SHIFT), + return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), + nr_blocks << (sb->s_blocksize_bits - 9), gfp_mask, flags); } static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask) { return blkdev_issue_zeroout(sb->s_bdev, - block << (sb->s_blocksize_bits - - SECTOR_SHIFT), - nr_blocks << (sb->s_blocksize_bits - - SECTOR_SHIFT), - gfp_mask, 0); + block << (sb->s_blocksize_bits - 9), + nr_blocks << (sb->s_blocksize_bits - 9), + gfp_mask, true); } -static inline bool bdev_is_partition(struct block_device *bdev) -{ - return bdev->bd_partno; -} +extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); enum blk_default_limits { BLK_MAX_SEGMENTS = 128, @@ -1334,55 +1179,44 @@ enum blk_default_limits { BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, }; -static inline unsigned long queue_segment_boundary(const struct request_queue *q) +#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) + +static inline unsigned long queue_bounce_pfn(struct request_queue *q) +{ + return q->limits.bounce_pfn; +} + +static inline unsigned long queue_segment_boundary(struct request_queue *q) { return q->limits.seg_boundary_mask; } -static inline unsigned long queue_virt_boundary(const struct request_queue *q) +static inline unsigned long queue_virt_boundary(struct request_queue *q) { return q->limits.virt_boundary_mask; } -static inline unsigned int queue_max_sectors(const struct request_queue *q) +static inline unsigned int queue_max_sectors(struct request_queue *q) { return q->limits.max_sectors; } -static inline unsigned int queue_max_bytes(struct request_queue *q) -{ - return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; -} - -static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) +static inline unsigned int queue_max_hw_sectors(struct request_queue *q) { return q->limits.max_hw_sectors; } -static inline unsigned short queue_max_segments(const struct request_queue *q) +static inline unsigned short queue_max_segments(struct request_queue *q) { return q->limits.max_segments; } -static inline unsigned short queue_max_discard_segments(const struct request_queue *q) -{ - return q->limits.max_discard_segments; -} - -static inline unsigned int queue_max_segment_size(const struct request_queue *q) +static inline unsigned int queue_max_segment_size(struct request_queue *q) { return q->limits.max_segment_size; } -static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) -{ - - const struct queue_limits *l = &q->limits; - - return min(l->max_zone_append_sectors, l->max_sectors); -} - -static inline unsigned queue_logical_block_size(const struct request_queue *q) +static inline unsigned short queue_logical_block_size(struct request_queue *q) { int retval = 512; @@ -1392,12 +1226,12 @@ static inline unsigned queue_logical_block_size(const struct request_queue *q) return retval; } -static inline unsigned int bdev_logical_block_size(struct block_device *bdev) +static inline unsigned short bdev_logical_block_size(struct block_device *bdev) { return queue_logical_block_size(bdev_get_queue(bdev)); } -static inline unsigned int queue_physical_block_size(const struct request_queue *q) +static inline unsigned int queue_physical_block_size(struct request_queue *q) { return q->limits.physical_block_size; } @@ -1407,7 +1241,7 @@ static inline unsigned int bdev_physical_block_size(struct block_device *bdev) return queue_physical_block_size(bdev_get_queue(bdev)); } -static inline unsigned int queue_io_min(const struct request_queue *q) +static inline unsigned int queue_io_min(struct request_queue *q) { return q->limits.io_min; } @@ -1417,7 +1251,7 @@ static inline int bdev_io_min(struct block_device *bdev) return queue_io_min(bdev_get_queue(bdev)); } -static inline unsigned int queue_io_opt(const struct request_queue *q) +static inline unsigned int queue_io_opt(struct request_queue *q) { return q->limits.io_opt; } @@ -1427,19 +1261,7 @@ static inline int bdev_io_opt(struct block_device *bdev) return queue_io_opt(bdev_get_queue(bdev)); } -static inline unsigned int -queue_zone_write_granularity(const struct request_queue *q) -{ - return q->limits.zone_write_granularity; -} - -static inline unsigned int -bdev_zone_write_granularity(struct block_device *bdev) -{ - return queue_zone_write_granularity(bdev_get_queue(bdev)); -} - -static inline int queue_alignment_offset(const struct request_queue *q) +static inline int queue_alignment_offset(struct request_queue *q) { if (q->limits.misaligned) return -1; @@ -1450,8 +1272,7 @@ static inline int queue_alignment_offset(const struct request_queue *q) static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) { unsigned int granularity = max(lim->physical_block_size, lim->io_min); - unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) - << SECTOR_SHIFT; + unsigned int alignment = sector_div(sector, granularity >> 9) << 9; return (granularity + lim->alignment_offset - alignment) % granularity; } @@ -1462,13 +1283,14 @@ static inline int bdev_alignment_offset(struct block_device *bdev) if (q->limits.misaligned) return -1; - if (bdev_is_partition(bdev)) - return queue_limit_alignment_offset(&q->limits, - bdev->bd_start_sect); + + if (bdev != bdev->bd_contains) + return bdev->bd_part->alignment_offset; + return q->limits.alignment_offset; } -static inline int queue_discard_alignment(const struct request_queue *q) +static inline int queue_discard_alignment(struct request_queue *q) { if (q->limits.discard_misaligned) return -1; @@ -1484,8 +1306,8 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector return 0; /* Why are these in bytes, not sectors? */ - alignment = lim->discard_alignment >> SECTOR_SHIFT; - granularity = lim->discard_granularity >> SECTOR_SHIFT; + alignment = lim->discard_alignment >> 9; + granularity = lim->discard_granularity >> 9; if (!granularity) return 0; @@ -1496,35 +1318,32 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector offset = (granularity + alignment - offset) % granularity; /* Turn it back into bytes, gaah */ - return offset << SECTOR_SHIFT; -} - -/* - * Two cases of handling DISCARD merge: - * If max_discard_segments > 1, the driver takes every bio - * as a range and send them to controller together. The ranges - * needn't to be contiguous. - * Otherwise, the bios/requests will be handled as same as - * others which should be contiguous. - */ -static inline bool blk_discard_mergable(struct request *req) -{ - if (req_op(req) == REQ_OP_DISCARD && - queue_max_discard_segments(req->q) > 1) - return true; - return false; + return offset << 9; } static inline int bdev_discard_alignment(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); - if (bdev_is_partition(bdev)) - return queue_limit_discard_alignment(&q->limits, - bdev->bd_start_sect); + if (bdev != bdev->bd_contains) + return bdev->bd_part->discard_alignment; + return q->limits.discard_alignment; } +static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) +{ + if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) + return 1; + + return 0; +} + +static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) +{ + return queue_discard_zeroes_data(bdev_get_queue(bdev)); +} + static inline unsigned int bdev_write_same(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); @@ -1535,64 +1354,7 @@ static inline unsigned int bdev_write_same(struct block_device *bdev) return 0; } -static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) -{ - struct request_queue *q = bdev_get_queue(bdev); - - if (q) - return q->limits.max_write_zeroes_sectors; - - return 0; -} - -static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) -{ - struct request_queue *q = bdev_get_queue(bdev); - - if (q) - return blk_queue_zoned_model(q); - - return BLK_ZONED_NONE; -} - -static inline bool bdev_is_zoned(struct block_device *bdev) -{ - struct request_queue *q = bdev_get_queue(bdev); - - if (q) - return blk_queue_is_zoned(q); - - return false; -} - -static inline sector_t bdev_zone_sectors(struct block_device *bdev) -{ - struct request_queue *q = bdev_get_queue(bdev); - - if (q) - return blk_queue_zone_sectors(q); - return 0; -} - -static inline unsigned int bdev_max_open_zones(struct block_device *bdev) -{ - struct request_queue *q = bdev_get_queue(bdev); - - if (q) - return queue_max_open_zones(q); - return 0; -} - -static inline unsigned int bdev_max_active_zones(struct block_device *bdev) -{ - struct request_queue *q = bdev_get_queue(bdev); - - if (q) - return queue_max_active_zones(q); - return 0; -} - -static inline int queue_dma_alignment(const struct request_queue *q) +static inline int queue_dma_alignment(struct request_queue *q) { return q ? q->dma_alignment : 511; } @@ -1617,11 +1379,113 @@ static inline unsigned int blksize_bits(unsigned int size) static inline unsigned int block_size(struct block_device *bdev) { - return 1 << bdev->bd_inode->i_blkbits; + return bdev->bd_block_size; +} + +static inline bool queue_flush_queueable(struct request_queue *q) +{ + return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); +} + +typedef struct {struct page *v;} Sector; + +unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); + +static inline void put_dev_sector(Sector p) +{ + put_page(p.v); +} + +static inline bool __bvec_gap_to_prev(struct request_queue *q, + struct bio_vec *bprv, unsigned int offset) +{ + return offset || + ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); +} + +/* + * Check if adding a bio_vec after bprv with offset would create a gap in + * the SG list. Most drivers don't care about this, but some do. + */ +static inline bool bvec_gap_to_prev(struct request_queue *q, + struct bio_vec *bprv, unsigned int offset) +{ + if (!queue_virt_boundary(q)) + return false; + return __bvec_gap_to_prev(q, bprv, offset); +} + +static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, + struct bio *next) +{ + if (bio_has_data(prev) && queue_virt_boundary(q)) { + struct bio_vec pb, nb; + + bio_get_last_bvec(prev, &pb); + bio_get_first_bvec(next, &nb); + + return __bvec_gap_to_prev(q, &pb, nb.bv_offset); + } + + return false; +} + +static inline bool req_gap_back_merge(struct request *req, struct bio *bio) +{ + return bio_will_gap(req->q, req->biotail, bio); +} + +static inline bool req_gap_front_merge(struct request *req, struct bio *bio) +{ + return bio_will_gap(req->q, bio, req->bio); } int kblockd_schedule_work(struct work_struct *work); -int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); +int kblockd_schedule_work_on(int cpu, struct work_struct *work); +int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); +int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); + +#ifdef CONFIG_BLK_CGROUP +/* + * This should not be using sched_clock(). A real patch is in progress + * to fix this up, until that is in place we need to disable preemption + * around sched_clock() in this function and set_io_start_time_ns(). + */ +static inline void set_start_time_ns(struct request *req) +{ + preempt_disable(); + req->start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline void set_io_start_time_ns(struct request *req) +{ + preempt_disable(); + req->io_start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline uint64_t rq_start_time_ns(struct request *req) +{ + return req->start_time_ns; +} + +static inline uint64_t rq_io_start_time_ns(struct request *req) +{ + return req->io_start_time_ns; +} +#else +static inline void set_start_time_ns(struct request *req) {} +static inline void set_io_start_time_ns(struct request *req) {} +static inline uint64_t rq_start_time_ns(struct request *req) +{ + return 0; +} +static inline uint64_t rq_io_start_time_ns(struct request *req) +{ + return 0; +} +#endif #define MODULE_ALIAS_BLOCKDEV(major,minor) \ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) @@ -1646,15 +1510,11 @@ struct blk_integrity_iter { const char *disk_name; }; -typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); -typedef void (integrity_prepare_fn) (struct request *); -typedef void (integrity_complete_fn) (struct request *, unsigned int); +typedef int (integrity_processing_fn) (struct blk_integrity_iter *); struct blk_integrity_profile { integrity_processing_fn *generate_fn; integrity_processing_fn *verify_fn; - integrity_prepare_fn *prepare_fn; - integrity_complete_fn *complete_fn; const char *name; }; @@ -1664,6 +1524,10 @@ extern int blk_integrity_compare(struct gendisk *, struct gendisk *); extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, struct scatterlist *); extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); +extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, + struct request *); +extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, + struct bio *); static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { @@ -1681,12 +1545,6 @@ struct blk_integrity *bdev_get_integrity(struct block_device *bdev) return blk_get_integrity(bdev->bd_disk); } -static inline bool -blk_integrity_queue_supports_integrity(struct request_queue *q) -{ - return q->integrity.profile; -} - static inline bool blk_integrity_rq(struct request *rq) { return rq->cmd_flags & REQ_INTEGRITY; @@ -1699,42 +1557,29 @@ static inline void blk_queue_max_integrity_segments(struct request_queue *q, } static inline unsigned short -queue_max_integrity_segments(const struct request_queue *q) +queue_max_integrity_segments(struct request_queue *q) { return q->limits.max_integrity_segments; } -/** - * bio_integrity_intervals - Return number of integrity intervals for a bio - * @bi: blk_integrity profile for device - * @sectors: Size of the bio in 512-byte sectors - * - * Description: The block layer calculates everything in 512 byte - * sectors but integrity metadata is done in terms of the data integrity - * interval size of the storage device. Convert the block layer sectors - * to the appropriate number of integrity intervals. - */ -static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, - unsigned int sectors) +static inline bool integrity_req_gap_back_merge(struct request *req, + struct bio *next) { - return sectors >> (bi->interval_exp - 9); + struct bio_integrity_payload *bip = bio_integrity(req->bio); + struct bio_integrity_payload *bip_next = bio_integrity(next); + + return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + bip_next->bip_vec[0].bv_offset); } -static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, - unsigned int sectors) +static inline bool integrity_req_gap_front_merge(struct request *req, + struct bio *bio) { - return bio_integrity_intervals(bi, sectors) * bi->tuple_size; -} + struct bio_integrity_payload *bip = bio_integrity(bio); + struct bio_integrity_payload *bip_next = bio_integrity(req->bio); -/* - * Return the first bvec that contains integrity data. Only drivers that are - * limited to a single integrity segment should use this helper. - */ -static inline struct bio_vec *rq_integrity_vec(struct request *rq) -{ - if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) - return NULL; - return rq->bio->bi_integrity->bip_vec; + return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + bip_next->bip_vec[0].bv_offset); } #else /* CONFIG_BLK_DEV_INTEGRITY */ @@ -1767,11 +1612,6 @@ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { return NULL; } -static inline bool -blk_integrity_queue_supports_integrity(struct request_queue *q) -{ - return false; -} static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) { return 0; @@ -1787,222 +1627,124 @@ static inline void blk_queue_max_integrity_segments(struct request_queue *q, unsigned int segs) { } -static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) +static inline unsigned short queue_max_integrity_segments(struct request_queue *q) { return 0; } - -static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, - unsigned int sectors) +static inline bool blk_integrity_merge_rq(struct request_queue *rq, + struct request *r1, + struct request *r2) { - return 0; + return true; +} +static inline bool blk_integrity_merge_bio(struct request_queue *rq, + struct request *r, + struct bio *b) +{ + return true; } -static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, - unsigned int sectors) +static inline bool integrity_req_gap_back_merge(struct request *req, + struct bio *next) { - return 0; + return false; } - -static inline struct bio_vec *rq_integrity_vec(struct request *rq) +static inline bool integrity_req_gap_front_merge(struct request *req, + struct bio *bio) { - return NULL; + return false; } #endif /* CONFIG_BLK_DEV_INTEGRITY */ -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - -bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q); - -void blk_ksm_unregister(struct request_queue *q); - -#else /* CONFIG_BLK_INLINE_ENCRYPTION */ - -static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm, - struct request_queue *q) -{ - return true; -} - -static inline void blk_ksm_unregister(struct request_queue *q) { } - -#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ - - -struct block_device_operations { - blk_qc_t (*submit_bio) (struct bio *bio); - int (*open) (struct block_device *, fmode_t); - void (*release) (struct gendisk *, fmode_t); - int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); - unsigned int (*check_events) (struct gendisk *disk, - unsigned int clearing); - void (*unlock_native_capacity) (struct gendisk *); - int (*getgeo)(struct block_device *, struct hd_geometry *); - int (*set_read_only)(struct block_device *bdev, bool ro); - /* this callback is with swap_lock and sometimes page table lock held */ - void (*swap_slot_free_notify) (struct block_device *, unsigned long); - int (*report_zones)(struct gendisk *, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data); - char *(*devnode)(struct gendisk *disk, umode_t *mode); - struct module *owner; - const struct pr_ops *pr_ops; - - /* - * Special callback for probing GPT entry at a given sector. - * Needed by Android devices, used by GPT scanner and MMC blk - * driver. - */ - int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); +/** + * struct blk_dax_ctl - control and output parameters for ->direct_access + * @sector: (input) offset relative to a block_device + * @addr: (output) kernel virtual address for @sector populated by driver + * @pfn: (output) page frame number for @addr populated by driver + * @size: (input) number of bytes requested + */ +struct blk_dax_ctl { + sector_t sector; + void *addr; + long size; + pfn_t pfn; }; -#ifdef CONFIG_COMPAT -extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t, - unsigned int, unsigned long); -#else -#define blkdev_compat_ptr_ioctl NULL -#endif +struct block_device_operations { + int (*open) (struct block_device *, fmode_t); + void (*release) (struct gendisk *, fmode_t); + int (*rw_page)(struct block_device *, sector_t, struct page *, bool); + int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, + long); + unsigned int (*check_events) (struct gendisk *disk, + unsigned int clearing); + /* ->media_changed() is DEPRECATED, use ->check_events() instead */ + int (*media_changed) (struct gendisk *); + void (*unlock_native_capacity) (struct gendisk *); + int (*revalidate_disk) (struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + /* this callback is with swap_lock and sometimes page table lock held */ + void (*swap_slot_free_notify) (struct block_device *, unsigned long); + struct module *owner; + const struct pr_ops *pr_ops; +} __do_const; +extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, + unsigned long); extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); +extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *); +extern int bdev_dax_supported(struct super_block *, int); +extern bool bdev_dax_capable(struct block_device *); +#else /* CONFIG_BLOCK */ -#ifdef CONFIG_BLK_DEV_ZONED -bool blk_req_needs_zone_write_lock(struct request *rq); -bool blk_req_zone_write_trylock(struct request *rq); -void __blk_req_zone_write_lock(struct request *rq); -void __blk_req_zone_write_unlock(struct request *rq); +struct block_device; -static inline void blk_req_zone_write_lock(struct request *rq) -{ - if (blk_req_needs_zone_write_lock(rq)) - __blk_req_zone_write_lock(rq); -} - -static inline void blk_req_zone_write_unlock(struct request *rq) -{ - if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) - __blk_req_zone_write_unlock(rq); -} - -static inline bool blk_req_zone_is_write_locked(struct request *rq) -{ - return rq->q->seq_zones_wlock && - test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); -} - -static inline bool blk_req_can_dispatch_to_zone(struct request *rq) -{ - if (!blk_req_needs_zone_write_lock(rq)) - return true; - return !blk_req_zone_is_write_locked(rq); -} -#else -static inline bool blk_req_needs_zone_write_lock(struct request *rq) -{ - return false; -} - -static inline void blk_req_zone_write_lock(struct request *rq) -{ -} - -static inline void blk_req_zone_write_unlock(struct request *rq) -{ -} -static inline bool blk_req_zone_is_write_locked(struct request *rq) -{ - return false; -} - -static inline bool blk_req_can_dispatch_to_zone(struct request *rq) -{ - return true; -} -#endif /* CONFIG_BLK_DEV_ZONED */ - -static inline void blk_wake_io_task(struct task_struct *waiter) -{ - /* - * If we're polling, the task itself is doing the completions. For - * that case, we don't need to signal a wakeup, it's enough to just - * mark us as RUNNING. - */ - if (waiter == current) - __set_current_state(TASK_RUNNING); - else - wake_up_process(waiter); -} - -unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, - unsigned int op); -void disk_end_io_acct(struct gendisk *disk, unsigned int op, - unsigned long start_time); - -unsigned long bio_start_io_acct(struct bio *bio); -void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, - struct block_device *orig_bdev); - -/** - * bio_end_io_acct - end I/O accounting for bio based drivers - * @bio: bio to end account for - * @start: start time returned by bio_start_io_acct() +/* + * stubs for when the block layer is configured out */ -static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) -{ - return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); -} +#define buffer_heads_over_limit 0 -int bdev_read_only(struct block_device *bdev); -int set_blocksize(struct block_device *bdev, int size); - -const char *bdevname(struct block_device *bdev, char *buffer); -int lookup_bdev(const char *pathname, dev_t *dev); - -void blkdev_show(struct seq_file *seqf, off_t offset); - -#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ -#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ -#ifdef CONFIG_BLOCK -#define BLKDEV_MAJOR_MAX 512 -#else -#define BLKDEV_MAJOR_MAX 0 -#endif - -struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, - void *holder); -struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); -int bd_prepare_to_claim(struct block_device *bdev, void *holder); -void bd_abort_claiming(struct block_device *bdev, void *holder); -void blkdev_put(struct block_device *bdev, fmode_t mode); - -/* just for blk-cgroup, don't use elsewhere */ -struct block_device *blkdev_get_no_open(dev_t dev); -void blkdev_put_no_open(struct block_device *bdev); - -struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); -void bdev_add(struct block_device *bdev, dev_t dev); -struct block_device *I_BDEV(struct inode *inode); -int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, - loff_t lend); - -#ifdef CONFIG_BLOCK -void invalidate_bdev(struct block_device *bdev); -int sync_blockdev(struct block_device *bdev); -#else -static inline void invalidate_bdev(struct block_device *bdev) -{ -} -static inline int sync_blockdev(struct block_device *bdev) +static inline long nr_blockdev_pages(void) { return 0; } + +struct blk_plug { +}; + +static inline void blk_start_plug(struct blk_plug *plug) +{ +} + +static inline void blk_finish_plug(struct blk_plug *plug) +{ +} + +static inline void blk_flush_plug(struct task_struct *task) +{ +} + +static inline void blk_schedule_flush_plug(struct task_struct *task) +{ +} + + +static inline bool blk_needs_flush_plug(struct task_struct *tsk) +{ + return false; +} + +static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, + sector_t *error_sector) +{ + return 0; +} + +#endif /* CONFIG_BLOCK */ + #endif -int fsync_bdev(struct block_device *bdev); - -int freeze_bdev(struct block_device *bdev); -int thaw_bdev(struct block_device *bdev); - -#endif /* _LINUX_BLKDEV_H */ diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h index 1c91753c3c..bef124fde6 100644 --- a/include/linux/blkpg.h +++ b/include/linux/blkpg.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BLKPG_H #define _LINUX_BLKPG_H diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index a083e15df6..c9f287a26c 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef BLKTRACE_H #define BLKTRACE_H @@ -23,16 +22,19 @@ struct blk_trace { u32 pid; u32 dev; struct dentry *dir; + struct dentry *dropped_file; + struct dentry *msg_file; struct list_head running_list; - atomic_t dropped; + atomic_unchecked_t dropped; }; -struct blkcg; - extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); extern void blk_trace_shutdown(struct request_queue *); -extern __printf(3, 4) -void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...); +extern int do_blk_trace_setup(struct request_queue *q, char *name, + dev_t dev, struct block_device *bdev, + struct blk_user_trace_setup *buts); +extern __printf(2, 3) +void __trace_note_message(struct blk_trace *, const char *fmt, ...); /** * blk_add_trace_msg - Add a (simple) message to the blktrace stream @@ -47,33 +49,24 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f * NOTE: Can not use 'static inline' due to presence of var args... * **/ -#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ - do { \ - struct blk_trace *bt; \ - \ - rcu_read_lock(); \ - bt = rcu_dereference((q)->blk_trace); \ - if (unlikely(bt)) \ - __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\ - rcu_read_unlock(); \ - } while (0) #define blk_add_trace_msg(q, fmt, ...) \ - blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) + do { \ + struct blk_trace *bt = (q)->blk_trace; \ + if (unlikely(bt)) \ + __trace_note_message(bt, fmt, ##__VA_ARGS__); \ + } while (0) #define BLK_TN_MAX_MSG 128 static inline bool blk_trace_note_message_enabled(struct request_queue *q) { - struct blk_trace *bt; - bool ret; - - rcu_read_lock(); - bt = rcu_dereference(q->blk_trace); - ret = bt && (bt->act_mask & BLK_TC_NOTIFY); - rcu_read_unlock(); - return ret; + struct blk_trace *bt = q->blk_trace; + if (likely(!bt)) + return false; + return bt->act_mask & BLK_TC_NOTIFY; } -extern void blk_add_driver_data(struct request *rq, void *data, size_t len); +extern void blk_add_driver_data(struct request_queue *q, struct request *rq, + void *data, size_t len); extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, char __user *arg); @@ -87,12 +80,12 @@ extern struct attribute_group blk_trace_attr_group; #else /* !CONFIG_BLK_DEV_IO_TRACE */ # define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) # define blk_trace_shutdown(q) do { } while (0) -# define blk_add_driver_data(rq, data, len) do {} while (0) +# define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY) +# define blk_add_driver_data(q, rq, data, len) do {} while (0) # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) # define blk_trace_startstop(q, start) (-ENOTTY) # define blk_trace_remove(q) (-ENOTTY) # define blk_add_trace_msg(q, fmt, ...) do { } while (0) -# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0) # define blk_trace_remove_sysfs(dev) do { } while (0) # define blk_trace_note_message_enabled(q) (false) static inline int blk_trace_init_sysfs(struct device *dev) @@ -117,22 +110,16 @@ struct compat_blk_user_trace_setup { #endif -void blk_fill_rwbs(char *rwbs, unsigned int op); +#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) -static inline sector_t blk_rq_trace_sector(struct request *rq) +static inline int blk_cmd_buf_len(struct request *rq) { - /* - * Tracing should ignore starting sector for passthrough requests and - * requests where starting sector didn't get set. - */ - if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) - return 0; - return blk_rq_pos(rq); + return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1; } -static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) -{ - return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq); -} +extern void blk_dump_cmd(char *buf, struct request *rq); +extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes); + +#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ #endif diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h index 511ab123a8..225bdb7dae 100644 --- a/include/linux/blockgroup_lock.h +++ b/include/linux/blockgroup_lock.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BLOCKGROUP_LOCK_H #define _LINUX_BLOCKGROUP_LOCK_H /* diff --git a/include/linux/bma150.h b/include/linux/bma150.h index 31c9e323a3..97ade7cdc8 100644 --- a/include/linux/bma150.h +++ b/include/linux/bma150.h @@ -1,7 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2011 Bosch Sensortec GmbH * Copyright (c) 2011 Unixphere + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _BMA150_H_ diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h new file mode 100644 index 0000000000..962164d365 --- /dev/null +++ b/include/linux/bootmem.h @@ -0,0 +1,373 @@ +/* + * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 + */ +#ifndef _LINUX_BOOTMEM_H +#define _LINUX_BOOTMEM_H + +#include +#include +#include +#include + +/* + * simple boot-time physical memory area allocator. + */ + +extern unsigned long max_low_pfn; +extern unsigned long min_low_pfn; + +/* + * highest page + */ +extern unsigned long max_pfn; +/* + * highest possible page + */ +extern unsigned long long max_possible_pfn; + +#ifndef CONFIG_NO_BOOTMEM +/* + * node_bootmem_map is a map pointer - the bits represent all physical + * memory pages (including holes) on the node. + */ +typedef struct bootmem_data { + unsigned long node_min_pfn; + unsigned long node_low_pfn; + void *node_bootmem_map; + unsigned long last_end_off; + unsigned long hint_idx; + struct list_head list; +} bootmem_data_t; + +extern bootmem_data_t bootmem_node_data[]; +#endif + +extern unsigned long bootmem_bootmap_pages(unsigned long); + +extern unsigned long init_bootmem_node(pg_data_t *pgdat, + unsigned long freepfn, + unsigned long startpfn, + unsigned long endpfn); +extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); + +extern unsigned long free_all_bootmem(void); +extern void reset_node_managed_pages(pg_data_t *pgdat); +extern void reset_all_zones_managed_pages(void); + +extern void free_bootmem_node(pg_data_t *pgdat, + unsigned long addr, + unsigned long size); +extern void free_bootmem(unsigned long physaddr, unsigned long size); +extern void free_bootmem_late(unsigned long physaddr, unsigned long size); + +/* + * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, + * the architecture-specific code should honor this). + * + * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success). + * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory + * already was reserved. + */ +#define BOOTMEM_DEFAULT 0 +#define BOOTMEM_EXCLUSIVE (1<<0) + +extern int reserve_bootmem(unsigned long addr, + unsigned long size, + int flags); +extern int reserve_bootmem_node(pg_data_t *pgdat, + unsigned long physaddr, + unsigned long size, + int flags); + +extern void *__alloc_bootmem(unsigned long size, + unsigned long align, + unsigned long goal); +extern void *__alloc_bootmem_nopanic(unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +extern void *__alloc_bootmem_node(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +void *__alloc_bootmem_node_high(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal, + unsigned long limit) __malloc; +extern void *__alloc_bootmem_low(unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +void *__alloc_bootmem_low_nopanic(unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal) __malloc; + +#ifdef CONFIG_NO_BOOTMEM +/* We are using top down, so it is safe to use 0 here */ +#define BOOTMEM_LOW_LIMIT 0 +#else +#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS) +#endif + +#ifndef ARCH_LOW_ADDRESS_LIMIT +#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL +#endif + +#define alloc_bootmem(x) \ + __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_align(x, align) \ + __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_nopanic(x) \ + __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages(x) \ + __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages_nopanic(x) \ + __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_node(pgdat, x) \ + __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_node_nopanic(pgdat, x) \ + __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages_node(pgdat, x) \ + __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages_node_nopanic(pgdat, x) \ + __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) + +#define alloc_bootmem_low(x) \ + __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) +#define alloc_bootmem_low_pages_nopanic(x) \ + __alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0) +#define alloc_bootmem_low_pages(x) \ + __alloc_bootmem_low(x, PAGE_SIZE, 0) +#define alloc_bootmem_low_pages_node(pgdat, x) \ + __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) + + +#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) + +/* FIXME: use MEMBLOCK_ALLOC_* variants here */ +#define BOOTMEM_ALLOC_ACCESSIBLE 0 +#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0) + +/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */ +void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size, + phys_addr_t align, phys_addr_t min_addr, + phys_addr_t max_addr, int nid); +void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, int nid); +void __memblock_free_early(phys_addr_t base, phys_addr_t size); +void __memblock_free_late(phys_addr_t base, phys_addr_t size); + +static inline void * __init memblock_virt_alloc( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_nopanic( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid_nopanic(size, align, + BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_low( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid(size, align, + BOOTMEM_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, + NUMA_NO_NODE); +} +static inline void * __init memblock_virt_alloc_low_nopanic( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid_nopanic(size, align, + BOOTMEM_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_from_nopanic( + phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) +{ + return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr, + BOOTMEM_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_node( + phys_addr_t size, int nid) +{ + return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, nid); +} + +static inline void * __init memblock_virt_alloc_node_nopanic( + phys_addr_t size, int nid) +{ + return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, + nid); +} + +static inline void __init memblock_free_early( + phys_addr_t base, phys_addr_t size) +{ + __memblock_free_early(base, size); +} + +static inline void __init memblock_free_early_nid( + phys_addr_t base, phys_addr_t size, int nid) +{ + __memblock_free_early(base, size); +} + +static inline void __init memblock_free_late( + phys_addr_t base, phys_addr_t size) +{ + __memblock_free_late(base, size); +} + +#else + +#define BOOTMEM_ALLOC_ACCESSIBLE 0 + + +/* Fall back to all the existing bootmem APIs */ +static inline void * __init memblock_virt_alloc( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_nopanic( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_low( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem_low(size, align, 0); +} + +static inline void * __init memblock_virt_alloc_low_nopanic( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem_low_nopanic(size, align, 0); +} + +static inline void * __init memblock_virt_alloc_from_nopanic( + phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) +{ + return __alloc_bootmem_nopanic(size, align, min_addr); +} + +static inline void * __init memblock_virt_alloc_node( + phys_addr_t size, int nid) +{ + return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES, + BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_node_nopanic( + phys_addr_t size, int nid) +{ + return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size, + SMP_CACHE_BYTES, + BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size, + phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) +{ + return __alloc_bootmem_node_high(NODE_DATA(nid), size, align, + min_addr); +} + +static inline void * __init memblock_virt_alloc_try_nid_nopanic( + phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, int nid) +{ + return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align, + min_addr, max_addr); +} + +static inline void __init memblock_free_early( + phys_addr_t base, phys_addr_t size) +{ + free_bootmem(base, size); +} + +static inline void __init memblock_free_early_nid( + phys_addr_t base, phys_addr_t size, int nid) +{ + free_bootmem_node(NODE_DATA(nid), base, size); +} + +static inline void __init memblock_free_late( + phys_addr_t base, phys_addr_t size) +{ + free_bootmem_late(base, size); +} +#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */ + +#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP +extern void *alloc_remap(int nid, unsigned long size); +#else +static inline void *alloc_remap(int nid, unsigned long size) +{ + return NULL; +} +#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */ + +extern void *alloc_large_system_hash(const char *tablename, + unsigned long bucketsize, + unsigned long numentries, + int scale, + int flags, + unsigned int *_hash_shift, + unsigned int *_hash_mask, + unsigned long low_limit, + unsigned long high_limit); + +#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ +#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min + * shift passed via *_hash_shift */ + +/* Only NUMA needs hash distribution. 64bit NUMA architectures have + * sufficient vmalloc space. + */ +#ifdef CONFIG_NUMA +#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) +extern int hashdist; /* Distribute hashes across NUMA nodes? */ +#else +#define hashdist (0) +#endif + + +#endif /* _LINUX_BOOTMEM_H */ diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index eed86eb0a1..8fdcb78319 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -1,10 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BH_H #define _LINUX_BH_H #include -#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS) +#ifdef CONFIG_TRACE_IRQFLAGS extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); #else static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) @@ -32,10 +31,4 @@ static inline void local_bh_enable(void) __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); } -#ifdef CONFIG_PREEMPT_RT -extern bool local_bh_blocked(void); -#else -static inline bool local_bh_blocked(void) { return false; } -#endif - #endif /* _LINUX_BH_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3db6f6c954..c201017b57 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1,299 +1,60 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. */ #ifndef _LINUX_BPF_H #define _LINUX_BPF_H 1 #include - #include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -struct bpf_verifier_env; -struct bpf_verifier_log; struct perf_event; -struct bpf_prog; -struct bpf_prog_aux; struct bpf_map; -struct sock; -struct seq_file; -struct btf; -struct btf_type; -struct exception_table_entry; -struct seq_operations; -struct bpf_iter_aux_info; -struct bpf_local_storage; -struct bpf_local_storage_map; -struct kobject; -struct mem_cgroup; -struct module; -struct bpf_func_state; -extern struct idr btf_idr; -extern spinlock_t btf_idr_lock; -extern struct kobject *btf_kobj; - -typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, - struct bpf_iter_aux_info *aux); -typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); -struct bpf_iter_seq_info { - const struct seq_operations *seq_ops; - bpf_iter_init_seq_priv_t init_seq_private; - bpf_iter_fini_seq_priv_t fini_seq_private; - u32 seq_priv_size; -}; - -/* map is generic key/value storage optionally accessible by eBPF programs */ +/* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ - int (*map_alloc_check)(union bpf_attr *attr); struct bpf_map *(*map_alloc)(union bpf_attr *attr); void (*map_release)(struct bpf_map *map, struct file *map_file); void (*map_free)(struct bpf_map *map); int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); - void (*map_release_uref)(struct bpf_map *map); - void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); - int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, - union bpf_attr __user *uattr); - int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, - void *value, u64 flags); - int (*map_lookup_and_delete_batch)(struct bpf_map *map, - const union bpf_attr *attr, - union bpf_attr __user *uattr); - int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, - union bpf_attr __user *uattr); - int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, - union bpf_attr __user *uattr); /* funcs callable from userspace and from eBPF programs */ void *(*map_lookup_elem)(struct bpf_map *map, void *key); int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); int (*map_delete_elem)(struct bpf_map *map, void *key); - int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); - int (*map_pop_elem)(struct bpf_map *map, void *value); - int (*map_peek_elem)(struct bpf_map *map, void *value); /* funcs called by prog_array and perf_event_array map */ void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, int fd); void (*map_fd_put_ptr)(void *ptr); - int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); - u32 (*map_fd_sys_lookup_elem)(void *ptr); - void (*map_seq_show_elem)(struct bpf_map *map, void *key, - struct seq_file *m); - int (*map_check_btf)(const struct bpf_map *map, - const struct btf *btf, - const struct btf_type *key_type, - const struct btf_type *value_type); - - /* Prog poke tracking helpers. */ - int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); - void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); - void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, - struct bpf_prog *new); - - /* Direct value access helpers. */ - int (*map_direct_value_addr)(const struct bpf_map *map, - u64 *imm, u32 off); - int (*map_direct_value_meta)(const struct bpf_map *map, - u64 imm, u32 *off); - int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); - __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, - struct poll_table_struct *pts); - - /* Functions called by bpf_local_storage maps */ - int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, - void *owner, u32 size); - void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, - void *owner, u32 size); - struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); - - /* Misc helpers.*/ - int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags); - - /* map_meta_equal must be implemented for maps that can be - * used as an inner map. It is a runtime check to ensure - * an inner map can be inserted to an outer map. - * - * Some properties of the inner map has been used during the - * verification time. When inserting an inner map at the runtime, - * map_meta_equal has to ensure the inserting map has the same - * properties that the verifier has used earlier. - */ - bool (*map_meta_equal)(const struct bpf_map *meta0, - const struct bpf_map *meta1); - - - int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, - struct bpf_func_state *caller, - struct bpf_func_state *callee); - int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn, - void *callback_ctx, u64 flags); - - /* BTF name and id of struct allocated by map_alloc */ - const char * const map_btf_name; - int *map_btf_id; - - /* bpf_iter info used to open a seq_file */ - const struct bpf_iter_seq_info *iter_seq_info; }; struct bpf_map { - /* The first two cachelines with read-mostly members of which some - * are also accessed in fast-path (e.g. ops, max_entries). - */ - const struct bpf_map_ops *ops ____cacheline_aligned; - struct bpf_map *inner_map_meta; -#ifdef CONFIG_SECURITY - void *security; -#endif + atomic_t refcnt; enum bpf_map_type map_type; u32 key_size; u32 value_size; u32 max_entries; u32 map_flags; - int spin_lock_off; /* >=0 valid offset, <0 error */ - int timer_off; /* >=0 valid offset, <0 error */ - u32 id; - int numa_node; - u32 btf_key_type_id; - u32 btf_value_type_id; - struct btf *btf; -#ifdef CONFIG_MEMCG_KMEM - struct mem_cgroup *memcg; -#endif - char name[BPF_OBJ_NAME_LEN]; - u32 btf_vmlinux_value_type_id; - bool bypass_spec_v1; - bool frozen; /* write-once; write-protected by freeze_mutex */ - /* 22 bytes hole */ - - /* The 3rd and 4th cacheline with misc members to avoid false sharing - * particularly with refcounting. - */ - atomic64_t refcnt ____cacheline_aligned; - atomic64_t usercnt; + u32 pages; + struct user_struct *user; + const struct bpf_map_ops *ops; struct work_struct work; - struct mutex freeze_mutex; - u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ + atomic_t usercnt; }; -static inline bool map_value_has_spin_lock(const struct bpf_map *map) -{ - return map->spin_lock_off >= 0; -} - -static inline bool map_value_has_timer(const struct bpf_map *map) -{ - return map->timer_off >= 0; -} - -static inline void check_and_init_map_value(struct bpf_map *map, void *dst) -{ - if (unlikely(map_value_has_spin_lock(map))) - *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = - (struct bpf_spin_lock){}; - if (unlikely(map_value_has_timer(map))) - *(struct bpf_timer *)(dst + map->timer_off) = - (struct bpf_timer){}; -} - -/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */ -static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) -{ - u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0; - - if (unlikely(map_value_has_spin_lock(map))) { - s_off = map->spin_lock_off; - s_sz = sizeof(struct bpf_spin_lock); - } else if (unlikely(map_value_has_timer(map))) { - t_off = map->timer_off; - t_sz = sizeof(struct bpf_timer); - } - - if (unlikely(s_sz || t_sz)) { - if (s_off < t_off || !s_sz) { - swap(s_off, t_off); - swap(s_sz, t_sz); - } - memcpy(dst, src, t_off); - memcpy(dst + t_off + t_sz, - src + t_off + t_sz, - s_off - t_off - t_sz); - memcpy(dst + s_off + s_sz, - src + s_off + s_sz, - map->value_size - s_off - s_sz); - } else { - memcpy(dst, src, map->value_size); - } -} -void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, - bool lock_src); -void bpf_timer_cancel_and_free(void *timer); -int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); - -struct bpf_offload_dev; -struct bpf_offloaded_map; - -struct bpf_map_dev_ops { - int (*map_get_next_key)(struct bpf_offloaded_map *map, - void *key, void *next_key); - int (*map_lookup_elem)(struct bpf_offloaded_map *map, - void *key, void *value); - int (*map_update_elem)(struct bpf_offloaded_map *map, - void *key, void *value, u64 flags); - int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); +struct bpf_map_type_list { + struct list_head list_node; + const struct bpf_map_ops *ops; + enum bpf_map_type type; }; -struct bpf_offloaded_map { - struct bpf_map map; - struct net_device *netdev; - const struct bpf_map_dev_ops *dev_ops; - void *dev_priv; - struct list_head offloads; -}; - -static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) -{ - return container_of(map, struct bpf_offloaded_map, map); -} - -static inline bool bpf_map_offload_neutral(const struct bpf_map *map) -{ - return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; -} - -static inline bool bpf_map_support_seq_show(const struct bpf_map *map) -{ - return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && - map->ops->map_seq_show_elem; -} - -int map_check_no_btf(const struct bpf_map *map, - const struct btf *btf, - const struct btf_type *key_type, - const struct btf_type *value_type); - -bool bpf_map_meta_equal(const struct bpf_map *meta0, - const struct bpf_map *meta1); - -extern const struct bpf_map_ops bpf_map_offload_ops; - /* function argument constraints */ enum bpf_arg_type { ARG_DONTCARE = 0, /* unused argument in helper function */ @@ -304,58 +65,28 @@ enum bpf_arg_type { ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ - ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ - ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ /* the following constraints used to prototype bpf_memcmp() and other * functions that access data on eBPF program stack */ - ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ - ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ - ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, - * helper function must fill all bytes or clear - * them in error case. + ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ + ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not + * need to be initialized, helper function must fill + * all bytes or clear them in error case. */ - ARG_CONST_SIZE, /* number of bytes accessed from memory */ - ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ + ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ + ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */ ARG_PTR_TO_CTX, /* pointer to context */ - ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ ARG_ANYTHING, /* any (initialized) argument is ok */ - ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ - ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ - ARG_PTR_TO_INT, /* pointer to int */ - ARG_PTR_TO_LONG, /* pointer to long */ - ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ - ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ - ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ - ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ - ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ - ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ - ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ - ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ - ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ - ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ - ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ - ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ - __BPF_ARG_TYPE_MAX, }; /* type of values returned from helper functions */ enum bpf_return_type { RET_INTEGER, /* function returns integer */ RET_VOID, /* function doesn't return anything */ - RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ - RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ - RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ - RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ - RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ - RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ - RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */ - RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ - RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ }; /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs @@ -367,28 +98,11 @@ struct bpf_func_proto { bool gpl_only; bool pkt_access; enum bpf_return_type ret_type; - union { - struct { - enum bpf_arg_type arg1_type; - enum bpf_arg_type arg2_type; - enum bpf_arg_type arg3_type; - enum bpf_arg_type arg4_type; - enum bpf_arg_type arg5_type; - }; - enum bpf_arg_type arg_type[5]; - }; - union { - struct { - u32 *arg1_btf_id; - u32 *arg2_btf_id; - u32 *arg3_btf_id; - u32 *arg4_btf_id; - u32 *arg5_btf_id; - }; - u32 *arg_btf_id[5]; - }; - int *ret_btf_id; /* return value btf_id */ - bool (*allowed)(const struct bpf_prog *prog); + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; }; /* bpf_context is intentionally undefined structure. Pointer to bpf_context is @@ -403,645 +117,85 @@ enum bpf_access_type { }; /* types of values stored in eBPF registers */ -/* Pointer types represent: - * pointer - * pointer + imm - * pointer + (u16) var - * pointer + (u16) var + imm - * if (range > 0) then [ptr, ptr + range - off) is safe to access - * if (id > 0) means that some 'var' was added - * if (off > 0) means that 'imm' was added - */ enum bpf_reg_type { NOT_INIT = 0, /* nothing was written into register */ - SCALAR_VALUE, /* reg doesn't contain a valid pointer */ + UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */ PTR_TO_CTX, /* reg points to bpf_context */ CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ PTR_TO_MAP_VALUE, /* reg points to map element value */ PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ - PTR_TO_STACK, /* reg == frame_pointer + offset */ - PTR_TO_PACKET_META, /* skb->data - meta_len */ - PTR_TO_PACKET, /* reg points to skb->data */ + FRAME_PTR, /* reg == frame_pointer */ + PTR_TO_STACK, /* reg == frame_pointer + imm */ + CONST_IMM, /* constant integer value */ + + /* PTR_TO_PACKET represents: + * skb->data + * skb->data + imm + * skb->data + (u16) var + * skb->data + (u16) var + imm + * if (range > 0) then [ptr, ptr + range - off) is safe to access + * if (id > 0) means that some 'var' was added + * if (off > 0) menas that 'imm' was added + */ + PTR_TO_PACKET, PTR_TO_PACKET_END, /* skb->data + headlen */ - PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ - PTR_TO_SOCKET, /* reg points to struct bpf_sock */ - PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ - PTR_TO_SOCK_COMMON, /* reg points to sock_common */ - PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ - PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ - PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ - PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ - PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ - /* PTR_TO_BTF_ID points to a kernel struct that does not need - * to be null checked by the BPF program. This does not imply the - * pointer is _not_ null and in practice this can easily be a null - * pointer when reading pointer chains. The assumption is program - * context will handle null pointer dereference typically via fault - * handling. The verifier must keep this in mind and can make no - * assumptions about null or non-null when doing branch analysis. - * Further, when passed into helpers the helpers can not, without - * additional context, assume the value is non-null. + + /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map + * elem value. We only allow this if we can statically verify that + * access from this register are going to fall within the size of the + * map element. */ - PTR_TO_BTF_ID, - /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not - * been checked for null. Used primarily to inform the verifier - * an explicit null check is required for this struct. - */ - PTR_TO_BTF_ID_OR_NULL, - PTR_TO_MEM, /* reg points to valid memory region */ - PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ - PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ - PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ - PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ - PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ - PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ - PTR_TO_FUNC, /* reg points to a bpf program function */ - PTR_TO_MAP_KEY, /* reg points to a map element key */ - __BPF_REG_TYPE_MAX, + PTR_TO_MAP_VALUE_ADJ, }; -/* The information passed from prog-specific *_is_valid_access - * back to the verifier. - */ -struct bpf_insn_access_aux { - enum bpf_reg_type reg_type; - union { - int ctx_field_size; - struct { - struct btf *btf; - u32 btf_id; - }; - }; - struct bpf_verifier_log *log; /* for verbose logs */ -}; - -static inline void -bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) -{ - aux->ctx_field_size = size; -} - -struct bpf_prog_ops { - int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, - union bpf_attr __user *uattr); -}; +struct bpf_prog; struct bpf_verifier_ops { /* return eBPF function prototype for verification */ - const struct bpf_func_proto * - (*get_func_proto)(enum bpf_func_id func_id, - const struct bpf_prog *prog); + const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); /* return true if 'size' wide access at offset 'off' within bpf_context * with 'type' (read or write) is allowed */ bool (*is_valid_access)(int off, int size, enum bpf_access_type type, - const struct bpf_prog *prog, - struct bpf_insn_access_aux *info); + enum bpf_reg_type *reg_type); int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, const struct bpf_prog *prog); - int (*gen_ld_abs)(const struct bpf_insn *orig, - struct bpf_insn *insn_buf); - u32 (*convert_ctx_access)(enum bpf_access_type type, - const struct bpf_insn *src, - struct bpf_insn *dst, - struct bpf_prog *prog, u32 *target_size); - int (*btf_struct_access)(struct bpf_verifier_log *log, - const struct btf *btf, - const struct btf_type *t, int off, int size, - enum bpf_access_type atype, - u32 *next_btf_id); - bool (*check_kfunc_call)(u32 kfunc_btf_id); + u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, + int src_reg, int ctx_off, + struct bpf_insn *insn, struct bpf_prog *prog); }; -struct bpf_prog_offload_ops { - /* verifier basic callbacks */ - int (*insn_hook)(struct bpf_verifier_env *env, - int insn_idx, int prev_insn_idx); - int (*finalize)(struct bpf_verifier_env *env); - /* verifier optimization callbacks (called after .finalize) */ - int (*replace_insn)(struct bpf_verifier_env *env, u32 off, - struct bpf_insn *insn); - int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); - /* program management callbacks */ - int (*prepare)(struct bpf_prog *prog); - int (*translate)(struct bpf_prog *prog); - void (*destroy)(struct bpf_prog *prog); +struct bpf_prog_type_list { + struct list_head list_node; + const struct bpf_verifier_ops *ops; + enum bpf_prog_type type; }; -struct bpf_prog_offload { - struct bpf_prog *prog; - struct net_device *netdev; - struct bpf_offload_dev *offdev; - void *dev_priv; - struct list_head offloads; - bool dev_state; - bool opt_failed; - void *jited_image; - u32 jited_len; -}; - -enum bpf_cgroup_storage_type { - BPF_CGROUP_STORAGE_SHARED, - BPF_CGROUP_STORAGE_PERCPU, - __BPF_CGROUP_STORAGE_MAX -}; - -#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX - -/* The longest tracepoint has 12 args. - * See include/trace/bpf_probe.h - */ -#define MAX_BPF_FUNC_ARGS 12 - -/* The maximum number of arguments passed through registers - * a single function may have. - */ -#define MAX_BPF_FUNC_REG_ARGS 5 - -struct btf_func_model { - u8 ret_size; - u8 nr_args; - u8 arg_size[MAX_BPF_FUNC_ARGS]; -}; - -/* Restore arguments before returning from trampoline to let original function - * continue executing. This flag is used for fentry progs when there are no - * fexit progs. - */ -#define BPF_TRAMP_F_RESTORE_REGS BIT(0) -/* Call original function after fentry progs, but before fexit progs. - * Makes sense for fentry/fexit, normal calls and indirect calls. - */ -#define BPF_TRAMP_F_CALL_ORIG BIT(1) -/* Skip current frame and return to parent. Makes sense for fentry/fexit - * programs only. Should not be used with normal calls and indirect calls. - */ -#define BPF_TRAMP_F_SKIP_FRAME BIT(2) -/* Store IP address of the caller on the trampoline stack, - * so it's available for trampoline's programs. - */ -#define BPF_TRAMP_F_IP_ARG BIT(3) -/* Return the return value of fentry prog. Only used by bpf_struct_ops. */ -#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) - -/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 - * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 - */ -#define BPF_MAX_TRAMP_PROGS 38 - -struct bpf_tramp_progs { - struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; - int nr_progs; -}; - -/* Different use cases for BPF trampoline: - * 1. replace nop at the function entry (kprobe equivalent) - * flags = BPF_TRAMP_F_RESTORE_REGS - * fentry = a set of programs to run before returning from trampoline - * - * 2. replace nop at the function entry (kprobe + kretprobe equivalent) - * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME - * orig_call = fentry_ip + MCOUNT_INSN_SIZE - * fentry = a set of program to run before calling original function - * fexit = a set of program to run after original function - * - * 3. replace direct call instruction anywhere in the function body - * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) - * With flags = 0 - * fentry = a set of programs to run before returning from trampoline - * With flags = BPF_TRAMP_F_CALL_ORIG - * orig_call = original callback addr or direct function addr - * fentry = a set of program to run before calling original function - * fexit = a set of program to run after original function - */ -struct bpf_tramp_image; -int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, - const struct btf_func_model *m, u32 flags, - struct bpf_tramp_progs *tprogs, - void *orig_call); -/* these two functions are called from generated trampoline */ -u64 notrace __bpf_prog_enter(struct bpf_prog *prog); -void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); -u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog); -void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start); -void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); -void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); - -struct bpf_ksym { - unsigned long start; - unsigned long end; - char name[KSYM_NAME_LEN]; - struct list_head lnode; - struct latch_tree_node tnode; - bool prog; -}; - -enum bpf_tramp_prog_type { - BPF_TRAMP_FENTRY, - BPF_TRAMP_FEXIT, - BPF_TRAMP_MODIFY_RETURN, - BPF_TRAMP_MAX, - BPF_TRAMP_REPLACE, /* more than MAX */ -}; - -struct bpf_tramp_image { - void *image; - struct bpf_ksym ksym; - struct percpu_ref pcref; - void *ip_after_call; - void *ip_epilogue; - union { - struct rcu_head rcu; - struct work_struct work; - }; -}; - -struct bpf_trampoline { - /* hlist for trampoline_table */ - struct hlist_node hlist; - /* serializes access to fields of this trampoline */ - struct mutex mutex; - refcount_t refcnt; - u64 key; - struct { - struct btf_func_model model; - void *addr; - bool ftrace_managed; - } func; - /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF - * program by replacing one of its functions. func.addr is the address - * of the function it replaced. - */ - struct bpf_prog *extension_prog; - /* list of BPF programs using this trampoline */ - struct hlist_head progs_hlist[BPF_TRAMP_MAX]; - /* Number of attached programs. A counter per kind. */ - int progs_cnt[BPF_TRAMP_MAX]; - /* Executable image of trampoline */ - struct bpf_tramp_image *cur_image; - u64 selector; - struct module *mod; -}; - -struct bpf_attach_target_info { - struct btf_func_model fmodel; - long tgt_addr; - const char *tgt_name; - const struct btf_type *tgt_type; -}; - -#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ - -struct bpf_dispatcher_prog { - struct bpf_prog *prog; - refcount_t users; -}; - -struct bpf_dispatcher { - /* dispatcher mutex */ - struct mutex mutex; - void *func; - struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; - int num_progs; - void *image; - u32 image_off; - struct bpf_ksym ksym; -}; - -static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( - const void *ctx, - const struct bpf_insn *insnsi, - unsigned int (*bpf_func)(const void *, - const struct bpf_insn *)) -{ - return bpf_func(ctx, insnsi); -} -#ifdef CONFIG_BPF_JIT -int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); -int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); -struct bpf_trampoline *bpf_trampoline_get(u64 key, - struct bpf_attach_target_info *tgt_info); -void bpf_trampoline_put(struct bpf_trampoline *tr); -#define BPF_DISPATCHER_INIT(_name) { \ - .mutex = __MUTEX_INITIALIZER(_name.mutex), \ - .func = &_name##_func, \ - .progs = {}, \ - .num_progs = 0, \ - .image = NULL, \ - .image_off = 0, \ - .ksym = { \ - .name = #_name, \ - .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ - }, \ -} - -#define DEFINE_BPF_DISPATCHER(name) \ - noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ - const void *ctx, \ - const struct bpf_insn *insnsi, \ - unsigned int (*bpf_func)(const void *, \ - const struct bpf_insn *)) \ - { \ - return bpf_func(ctx, insnsi); \ - } \ - EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ - struct bpf_dispatcher bpf_dispatcher_##name = \ - BPF_DISPATCHER_INIT(bpf_dispatcher_##name); -#define DECLARE_BPF_DISPATCHER(name) \ - unsigned int bpf_dispatcher_##name##_func( \ - const void *ctx, \ - const struct bpf_insn *insnsi, \ - unsigned int (*bpf_func)(const void *, \ - const struct bpf_insn *)); \ - extern struct bpf_dispatcher bpf_dispatcher_##name; -#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func -#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) -void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, - struct bpf_prog *to); -/* Called only from JIT-enabled code, so there's no need for stubs. */ -void *bpf_jit_alloc_exec_page(void); -void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); -void bpf_image_ksym_del(struct bpf_ksym *ksym); -void bpf_ksym_add(struct bpf_ksym *ksym); -void bpf_ksym_del(struct bpf_ksym *ksym); -int bpf_jit_charge_modmem(u32 pages); -void bpf_jit_uncharge_modmem(u32 pages); -#else -static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, - struct bpf_trampoline *tr) -{ - return -ENOTSUPP; -} -static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog, - struct bpf_trampoline *tr) -{ - return -ENOTSUPP; -} -static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, - struct bpf_attach_target_info *tgt_info) -{ - return ERR_PTR(-EOPNOTSUPP); -} -static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} -#define DEFINE_BPF_DISPATCHER(name) -#define DECLARE_BPF_DISPATCHER(name) -#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func -#define BPF_DISPATCHER_PTR(name) NULL -static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, - struct bpf_prog *from, - struct bpf_prog *to) {} -static inline bool is_bpf_image_address(unsigned long address) -{ - return false; -} -#endif - -struct bpf_func_info_aux { - u16 linkage; - bool unreliable; -}; - -enum bpf_jit_poke_reason { - BPF_POKE_REASON_TAIL_CALL, -}; - -/* Descriptor of pokes pointing /into/ the JITed image. */ -struct bpf_jit_poke_descriptor { - void *tailcall_target; - void *tailcall_bypass; - void *bypass_addr; - void *aux; - union { - struct { - struct bpf_map *map; - u32 key; - } tail_call; - }; - bool tailcall_target_stable; - u8 adj_off; - u16 reason; - u32 insn_idx; -}; - -/* reg_type info for ctx arguments */ -struct bpf_ctx_arg_aux { - u32 offset; - enum bpf_reg_type reg_type; - u32 btf_id; -}; - -struct btf_mod_pair { - struct btf *btf; - struct module *module; -}; - -struct bpf_kfunc_desc_tab; - struct bpf_prog_aux { - atomic64_t refcnt; + atomic_t refcnt; u32 used_map_cnt; - u32 used_btf_cnt; u32 max_ctx_offset; - u32 max_pkt_offset; - u32 max_tp_access; - u32 stack_depth; - u32 id; - u32 func_cnt; /* used by non-func prog as the number of func progs */ - u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ - u32 attach_btf_id; /* in-kernel BTF type id to attach to */ - u32 ctx_arg_info_size; - u32 max_rdonly_access; - u32 max_rdwr_access; - struct btf *attach_btf; - const struct bpf_ctx_arg_aux *ctx_arg_info; - struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ - struct bpf_prog *dst_prog; - struct bpf_trampoline *dst_trampoline; - enum bpf_prog_type saved_dst_prog_type; - enum bpf_attach_type saved_dst_attach_type; - bool verifier_zext; /* Zero extensions has been inserted by verifier. */ - bool offload_requested; - bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ - bool func_proto_unreliable; - bool sleepable; - bool tail_call_reachable; - struct hlist_node tramp_hlist; - /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ - const struct btf_type *attach_func_proto; - /* function name for valid attach_btf_id */ - const char *attach_func_name; - struct bpf_prog **func; - void *jit_data; /* JIT specific data. arch dependent */ - struct bpf_jit_poke_descriptor *poke_tab; - struct bpf_kfunc_desc_tab *kfunc_tab; - u32 size_poke_tab; - struct bpf_ksym ksym; - const struct bpf_prog_ops *ops; + const struct bpf_verifier_ops *ops; struct bpf_map **used_maps; - struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ - struct btf_mod_pair *used_btfs; struct bpf_prog *prog; struct user_struct *user; - u64 load_time; /* ns since boottime */ - struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; - char name[BPF_OBJ_NAME_LEN]; -#ifdef CONFIG_SECURITY - void *security; -#endif - struct bpf_prog_offload *offload; - struct btf *btf; - struct bpf_func_info *func_info; - struct bpf_func_info_aux *func_info_aux; - /* bpf_line_info loaded from userspace. linfo->insn_off - * has the xlated insn offset. - * Both the main and sub prog share the same linfo. - * The subprog can access its first linfo by - * using the linfo_idx. - */ - struct bpf_line_info *linfo; - /* jited_linfo is the jited addr of the linfo. It has a - * one to one mapping to linfo: - * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. - * Both the main and sub prog share the same jited_linfo. - * The subprog can access its first jited_linfo by - * using the linfo_idx. - */ - void **jited_linfo; - u32 func_info_cnt; - u32 nr_linfo; - /* subprog can use linfo_idx to access its first linfo and - * jited_linfo. - * main prog always has linfo_idx == 0 - */ - u32 linfo_idx; - u32 num_exentries; - struct exception_table_entry *extable; union { struct work_struct work; struct rcu_head rcu; }; }; -struct bpf_array_aux { - /* 'Ownership' of prog array is claimed by the first program that - * is going to use this map or by the first program which FD is - * stored in the map to make sure that all callers and callees have - * the same prog type and JITed flag. - */ - struct { - spinlock_t lock; - enum bpf_prog_type type; - bool jited; - } owner; - /* Programs with direct jumps into programs part of this array. */ - struct list_head poke_progs; - struct bpf_map *map; - struct mutex poke_mutex; - struct work_struct work; -}; - -struct bpf_link { - atomic64_t refcnt; - u32 id; - enum bpf_link_type type; - const struct bpf_link_ops *ops; - struct bpf_prog *prog; - struct work_struct work; -}; - -struct bpf_link_ops { - void (*release)(struct bpf_link *link); - void (*dealloc)(struct bpf_link *link); - int (*detach)(struct bpf_link *link); - int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, - struct bpf_prog *old_prog); - void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); - int (*fill_link_info)(const struct bpf_link *link, - struct bpf_link_info *info); -}; - -struct bpf_link_primer { - struct bpf_link *link; - struct file *file; - int fd; - u32 id; -}; - -struct bpf_struct_ops_value; -struct btf_member; - -#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 -struct bpf_struct_ops { - const struct bpf_verifier_ops *verifier_ops; - int (*init)(struct btf *btf); - int (*check_member)(const struct btf_type *t, - const struct btf_member *member); - int (*init_member)(const struct btf_type *t, - const struct btf_member *member, - void *kdata, const void *udata); - int (*reg)(void *kdata); - void (*unreg)(void *kdata); - const struct btf_type *type; - const struct btf_type *value_type; - const char *name; - struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; - u32 type_id; - u32 value_id; -}; - -#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) -#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) -const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); -void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); -bool bpf_struct_ops_get(const void *kdata); -void bpf_struct_ops_put(const void *kdata); -int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, - void *value); -static inline bool bpf_try_module_get(const void *data, struct module *owner) -{ - if (owner == BPF_MODULE_OWNER) - return bpf_struct_ops_get(data); - else - return try_module_get(owner); -} -static inline void bpf_module_put(const void *data, struct module *owner) -{ - if (owner == BPF_MODULE_OWNER) - bpf_struct_ops_put(data); - else - module_put(owner); -} -#else -static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) -{ - return NULL; -} -static inline void bpf_struct_ops_init(struct btf *btf, - struct bpf_verifier_log *log) -{ -} -static inline bool bpf_try_module_get(const void *data, struct module *owner) -{ - return try_module_get(owner); -} -static inline void bpf_module_put(const void *data, struct module *owner) -{ - module_put(owner); -} -static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, - void *key, - void *value) -{ - return -EINVAL; -} -#endif - struct bpf_array { struct bpf_map map; u32 elem_size; - u32 index_mask; - struct bpf_array_aux *aux; + /* 'ownership' of prog_array is claimed by the first program that + * is going to use this map or by the first program which FD is stored + * in the map to make sure that all callers and callees have the same + * prog_type and JITed flag + */ + enum bpf_prog_type owner_prog_type; + bool owner_jited; union { char value[0] __aligned(8); void *ptrs[0] __aligned(8); @@ -1049,38 +203,8 @@ struct bpf_array { }; }; -#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ #define MAX_TAIL_CALL_CNT 32 -#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ - BPF_F_RDONLY_PROG | \ - BPF_F_WRONLY | \ - BPF_F_WRONLY_PROG) - -#define BPF_MAP_CAN_READ BIT(0) -#define BPF_MAP_CAN_WRITE BIT(1) - -static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) -{ - u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); - - /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is - * not possible. - */ - if (access_flags & BPF_F_RDONLY_PROG) - return BPF_MAP_CAN_READ; - else if (access_flags & BPF_F_WRONLY_PROG) - return BPF_MAP_CAN_WRITE; - else - return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; -} - -static inline bool bpf_map_flags_access_ok(u32 access_flags) -{ - return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != - (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); -} - struct bpf_event_entry { struct perf_event *event; struct file *perf_file; @@ -1088,460 +212,45 @@ struct bpf_event_entry { struct rcu_head rcu; }; +u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); +u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); -int bpf_prog_calc_tag(struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, unsigned long off, unsigned long len); -typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, - const struct bpf_insn *src, - struct bpf_insn *dst, - struct bpf_prog *prog, - u32 *target_size); u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); -/* an array of programs to be executed under rcu_lock. - * - * Typical usage: - * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run); - * - * the structure returned by bpf_prog_array_alloc() should be populated - * with program pointers and the last pointer must be NULL. - * The user has to keep refcnt on the program and make sure the program - * is removed from the array before bpf_prog_put(). - * The 'struct bpf_prog_array *' should only be replaced with xchg() - * since other cpus are walking the array of pointers in parallel. - */ -struct bpf_prog_array_item { - struct bpf_prog *prog; - union { - struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; - u64 bpf_cookie; - }; -}; - -struct bpf_prog_array { - struct rcu_head rcu; - struct bpf_prog_array_item items[]; -}; - -struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); -void bpf_prog_array_free(struct bpf_prog_array *progs); -int bpf_prog_array_length(struct bpf_prog_array *progs); -bool bpf_prog_array_is_empty(struct bpf_prog_array *array); -int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, - __u32 __user *prog_ids, u32 cnt); - -void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, - struct bpf_prog *old_prog); -int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); -int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, - struct bpf_prog *prog); -int bpf_prog_array_copy_info(struct bpf_prog_array *array, - u32 *prog_ids, u32 request_cnt, - u32 *prog_cnt); -int bpf_prog_array_copy(struct bpf_prog_array *old_array, - struct bpf_prog *exclude_prog, - struct bpf_prog *include_prog, - u64 bpf_cookie, - struct bpf_prog_array **new_array); - -struct bpf_run_ctx {}; - -struct bpf_cg_run_ctx { - struct bpf_run_ctx run_ctx; - const struct bpf_prog_array_item *prog_item; -}; - -struct bpf_trace_run_ctx { - struct bpf_run_ctx run_ctx; - u64 bpf_cookie; -}; - -static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) -{ - struct bpf_run_ctx *old_ctx = NULL; - -#ifdef CONFIG_BPF_SYSCALL - old_ctx = current->bpf_ctx; - current->bpf_ctx = new_ctx; -#endif - return old_ctx; -} - -static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) -{ -#ifdef CONFIG_BPF_SYSCALL - current->bpf_ctx = old_ctx; -#endif -} - -/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ -#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) -/* BPF program asks to set CN on the packet. */ -#define BPF_RET_SET_CN (1 << 0) - -typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); - -static __always_inline u32 -BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, - const void *ctx, bpf_prog_run_fn run_prog, - u32 *ret_flags) -{ - const struct bpf_prog_array_item *item; - const struct bpf_prog *prog; - const struct bpf_prog_array *array; - struct bpf_run_ctx *old_run_ctx; - struct bpf_cg_run_ctx run_ctx; - u32 ret = 1; - u32 func_ret; - - migrate_disable(); - rcu_read_lock(); - array = rcu_dereference(array_rcu); - item = &array->items[0]; - old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); - while ((prog = READ_ONCE(item->prog))) { - run_ctx.prog_item = item; - func_ret = run_prog(prog, ctx); - ret &= (func_ret & 1); - *(ret_flags) |= (func_ret >> 1); - item++; - } - bpf_reset_run_ctx(old_run_ctx); - rcu_read_unlock(); - migrate_enable(); - return ret; -} - -static __always_inline u32 -BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, - const void *ctx, bpf_prog_run_fn run_prog) -{ - const struct bpf_prog_array_item *item; - const struct bpf_prog *prog; - const struct bpf_prog_array *array; - struct bpf_run_ctx *old_run_ctx; - struct bpf_cg_run_ctx run_ctx; - u32 ret = 1; - - migrate_disable(); - rcu_read_lock(); - array = rcu_dereference(array_rcu); - item = &array->items[0]; - old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); - while ((prog = READ_ONCE(item->prog))) { - run_ctx.prog_item = item; - ret &= run_prog(prog, ctx); - item++; - } - bpf_reset_run_ctx(old_run_ctx); - rcu_read_unlock(); - migrate_enable(); - return ret; -} - -static __always_inline u32 -BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu, - const void *ctx, bpf_prog_run_fn run_prog) -{ - const struct bpf_prog_array_item *item; - const struct bpf_prog *prog; - const struct bpf_prog_array *array; - struct bpf_run_ctx *old_run_ctx; - struct bpf_trace_run_ctx run_ctx; - u32 ret = 1; - - migrate_disable(); - rcu_read_lock(); - array = rcu_dereference(array_rcu); - if (unlikely(!array)) - goto out; - old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); - item = &array->items[0]; - while ((prog = READ_ONCE(item->prog))) { - run_ctx.bpf_cookie = item->bpf_cookie; - ret &= run_prog(prog, ctx); - item++; - } - bpf_reset_run_ctx(old_run_ctx); -out: - rcu_read_unlock(); - migrate_enable(); - return ret; -} - -/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs - * so BPF programs can request cwr for TCP packets. - * - * Current cgroup skb programs can only return 0 or 1 (0 to drop the - * packet. This macro changes the behavior so the low order bit - * indicates whether the packet should be dropped (0) or not (1) - * and the next bit is a congestion notification bit. This could be - * used by TCP to call tcp_enter_cwr() - * - * Hence, new allowed return values of CGROUP EGRESS BPF programs are: - * 0: drop packet - * 1: keep packet - * 2: drop packet and cn - * 3: keep packet and cn - * - * This macro then converts it to one of the NET_XMIT or an error - * code that is then interpreted as drop packet (and no cn): - * 0: NET_XMIT_SUCCESS skb should be transmitted - * 1: NET_XMIT_DROP skb should be dropped and cn - * 2: NET_XMIT_CN skb should be transmitted and cn - * 3: -EPERM skb should be dropped - */ -#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ - ({ \ - u32 _flags = 0; \ - bool _cn; \ - u32 _ret; \ - _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \ - _cn = _flags & BPF_RET_SET_CN; \ - if (_ret) \ - _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ - else \ - _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ - _ret; \ - }) - #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); -extern struct mutex bpf_stats_enabled_mutex; -/* - * Block execution of BPF programs attached to instrumentation (perf, - * kprobes, tracepoints) to prevent deadlocks on map operations as any of - * these events can happen inside a region which holds a map bucket lock - * and can deadlock on it. - * - * Use the preemption safe inc/dec variants on RT because migrate disable - * is preemptible on RT and preemption in the middle of the RMW operation - * might lead to inconsistent state. Use the raw variants for non RT - * kernels as migrate_disable() maps to preempt_disable() so the slightly - * more expensive save operation can be avoided. - */ -static inline void bpf_disable_instrumentation(void) -{ - migrate_disable(); - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_inc(bpf_prog_active); - else - __this_cpu_inc(bpf_prog_active); -} - -static inline void bpf_enable_instrumentation(void) -{ - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_dec(bpf_prog_active); - else - __this_cpu_dec(bpf_prog_active); - migrate_enable(); -} - -extern const struct file_operations bpf_map_fops; -extern const struct file_operations bpf_prog_fops; -extern const struct file_operations bpf_iter_fops; - -#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ - extern const struct bpf_prog_ops _name ## _prog_ops; \ - extern const struct bpf_verifier_ops _name ## _verifier_ops; -#define BPF_MAP_TYPE(_id, _ops) \ - extern const struct bpf_map_ops _ops; -#define BPF_LINK_TYPE(_id, _name) -#include -#undef BPF_PROG_TYPE -#undef BPF_MAP_TYPE -#undef BPF_LINK_TYPE - -extern const struct bpf_prog_ops bpf_offload_prog_ops; -extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; -extern const struct bpf_verifier_ops xdp_analyzer_ops; +void bpf_register_prog_type(struct bpf_prog_type_list *tl); +void bpf_register_map_type(struct bpf_map_type_list *tl); struct bpf_prog *bpf_prog_get(u32 ufd); -struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, - bool attach_drv); -void bpf_prog_add(struct bpf_prog *prog, int i); -void bpf_prog_sub(struct bpf_prog *prog, int i); -void bpf_prog_inc(struct bpf_prog *prog); -struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); +struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); +struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i); +struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); -void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); -void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); - -struct bpf_map *bpf_map_get(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); -void bpf_map_inc(struct bpf_map *map); -void bpf_map_inc_with_uref(struct bpf_map *map); -struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); +struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); -void *bpf_map_area_alloc(u64 size, int numa_node); -void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); -void bpf_map_area_free(void *base); -void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); -int generic_map_lookup_batch(struct bpf_map *map, - const union bpf_attr *attr, - union bpf_attr __user *uattr); -int generic_map_update_batch(struct bpf_map *map, - const union bpf_attr *attr, - union bpf_attr __user *uattr); -int generic_map_delete_batch(struct bpf_map *map, - const union bpf_attr *attr, - union bpf_attr __user *uattr); -struct bpf_map *bpf_map_get_curr_or_next(u32 *id); -struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); - -#ifdef CONFIG_MEMCG_KMEM -void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, - int node); -void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); -void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, - size_t align, gfp_t flags); -#else -static inline void * -bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, - int node) -{ - return kmalloc_node(size, flags, node); -} - -static inline void * -bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) -{ - return kzalloc(size, flags); -} - -static inline void __percpu * -bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, - gfp_t flags) -{ - return __alloc_percpu_gfp(size, align, flags); -} -#endif +int bpf_map_precharge_memlock(u32 pages); extern int sysctl_unprivileged_bpf_disabled; -static inline bool bpf_allow_ptr_leaks(void) -{ - return perfmon_capable(); -} - -static inline bool bpf_allow_uninit_stack(void) -{ - return perfmon_capable(); -} - -static inline bool bpf_allow_ptr_to_map_access(void) -{ - return perfmon_capable(); -} - -static inline bool bpf_bypass_spec_v1(void) -{ - return perfmon_capable(); -} - -static inline bool bpf_bypass_spec_v4(void) -{ - return perfmon_capable(); -} - -int bpf_map_new_fd(struct bpf_map *map, int flags); +int bpf_map_new_fd(struct bpf_map *map); int bpf_prog_new_fd(struct bpf_prog *prog); -void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, - const struct bpf_link_ops *ops, struct bpf_prog *prog); -int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); -int bpf_link_settle(struct bpf_link_primer *primer); -void bpf_link_cleanup(struct bpf_link_primer *primer); -void bpf_link_inc(struct bpf_link *link); -void bpf_link_put(struct bpf_link *link); -int bpf_link_new_fd(struct bpf_link *link); -struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); -struct bpf_link *bpf_link_get_from_fd(u32 ufd); - int bpf_obj_pin_user(u32 ufd, const char __user *pathname); -int bpf_obj_get_user(const char __user *pathname, int flags); - -#define BPF_ITER_FUNC_PREFIX "bpf_iter_" -#define DEFINE_BPF_ITER_FUNC(target, args...) \ - extern int bpf_iter_ ## target(args); \ - int __init bpf_iter_ ## target(args) { return 0; } - -struct bpf_iter_aux_info { - struct bpf_map *map; -}; - -typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, - union bpf_iter_link_info *linfo, - struct bpf_iter_aux_info *aux); -typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); -typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, - struct seq_file *seq); -typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, - struct bpf_link_info *info); -typedef const struct bpf_func_proto * -(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, - const struct bpf_prog *prog); - -enum bpf_iter_feature { - BPF_ITER_RESCHED = BIT(0), -}; - -#define BPF_ITER_CTX_ARG_MAX 2 -struct bpf_iter_reg { - const char *target; - bpf_iter_attach_target_t attach_target; - bpf_iter_detach_target_t detach_target; - bpf_iter_show_fdinfo_t show_fdinfo; - bpf_iter_fill_link_info_t fill_link_info; - bpf_iter_get_func_proto_t get_func_proto; - u32 ctx_arg_info_size; - u32 feature; - struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; - const struct bpf_iter_seq_info *seq_info; -}; - -struct bpf_iter_meta { - __bpf_md_ptr(struct seq_file *, seq); - u64 session_id; - u64 seq_num; -}; - -struct bpf_iter__bpf_map_elem { - __bpf_md_ptr(struct bpf_iter_meta *, meta); - __bpf_md_ptr(struct bpf_map *, map); - __bpf_md_ptr(void *, key); - __bpf_md_ptr(void *, value); -}; - -int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); -void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); -bool bpf_iter_prog_supported(struct bpf_prog *prog); -const struct bpf_func_proto * -bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); -int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); -int bpf_iter_new_fd(struct bpf_link *link); -bool bpf_link_is_iter(struct bpf_link *link); -struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); -int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); -void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, - struct seq_file *seq); -int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, - struct bpf_link_info *info); - -int map_set_for_each_callback_args(struct bpf_verifier_env *env, - struct bpf_func_state *caller, - struct bpf_func_state *callee); +int bpf_obj_get_user(const char __user *pathname); int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); @@ -1554,14 +263,7 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags); -int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); -int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, - void *key, void *value, u64 map_flags); -int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); - -int bpf_get_file_flag(int flags); -int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, - size_t actual_size); +void bpf_fd_array_map_clear(struct bpf_map *map); /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and * forced to use 'long' read/writes to try to atomically copy long counters. @@ -1580,648 +282,54 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) } /* verify correctness of eBPF program */ -int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr); - -#ifndef CONFIG_BPF_JIT_ALWAYS_ON -void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); -#endif - -struct btf *bpf_get_btf_vmlinux(void); - -/* Map specifics */ -struct xdp_buff; -struct sk_buff; -struct bpf_dtab_netdev; -struct bpf_cpu_map_entry; - -void __dev_flush(void); -int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, - struct net_device *dev_rx); -int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, - struct net_device *dev_rx); -int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, - struct bpf_map *map, bool exclude_ingress); -int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, - struct bpf_prog *xdp_prog); -int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, - struct bpf_prog *xdp_prog, struct bpf_map *map, - bool exclude_ingress); - -void __cpu_map_flush(void); -int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, - struct net_device *dev_rx); -int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, - struct sk_buff *skb); - -/* Return map's numa specified by userspace */ -static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); +#else +static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl) { - return (attr->map_flags & BPF_F_NUMA_NODE) ? - attr->numa_node : NUMA_NO_NODE; } -struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); -int array_map_alloc_check(union bpf_attr *attr); - -int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, - union bpf_attr __user *uattr); -int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, - union bpf_attr __user *uattr); -int bpf_prog_test_run_tracing(struct bpf_prog *prog, - const union bpf_attr *kattr, - union bpf_attr __user *uattr); -int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, - const union bpf_attr *kattr, - union bpf_attr __user *uattr); -int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, - const union bpf_attr *kattr, - union bpf_attr __user *uattr); -int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, - const union bpf_attr *kattr, - union bpf_attr __user *uattr); -bool bpf_prog_test_check_kfunc_call(u32 kfunc_id); -bool btf_ctx_access(int off, int size, enum bpf_access_type type, - const struct bpf_prog *prog, - struct bpf_insn_access_aux *info); -int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, - const struct btf_type *t, int off, int size, - enum bpf_access_type atype, - u32 *next_btf_id); -bool btf_struct_ids_match(struct bpf_verifier_log *log, - const struct btf *btf, u32 id, int off, - const struct btf *need_btf, u32 need_type_id); - -int btf_distill_func_proto(struct bpf_verifier_log *log, - struct btf *btf, - const struct btf_type *func_proto, - const char *func_name, - struct btf_func_model *m); - -struct bpf_reg_state; -int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, - struct bpf_reg_state *regs); -int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, - const struct btf *btf, u32 func_id, - struct bpf_reg_state *regs); -int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, - struct bpf_reg_state *reg); -int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, - struct btf *btf, const struct btf_type *t); - -struct bpf_prog *bpf_prog_by_id(u32 id); -struct bpf_link *bpf_link_by_id(u32 id); - -const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); -void bpf_task_storage_free(struct task_struct *task); -bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); -const struct btf_func_model * -bpf_jit_find_kfunc_model(const struct bpf_prog *prog, - const struct bpf_insn *insn); -#else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { return ERR_PTR(-EOPNOTSUPP); } -static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, - enum bpf_prog_type type, - bool attach_drv) +static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, + enum bpf_prog_type type) { return ERR_PTR(-EOPNOTSUPP); } - -static inline void bpf_prog_add(struct bpf_prog *prog, int i) -{ -} - -static inline void bpf_prog_sub(struct bpf_prog *prog, int i) +static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) { + return ERR_PTR(-EOPNOTSUPP); } static inline void bpf_prog_put(struct bpf_prog *prog) { } - -static inline void bpf_prog_inc(struct bpf_prog *prog) -{ -} - -static inline struct bpf_prog *__must_check -bpf_prog_inc_not_zero(struct bpf_prog *prog) +static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) { return ERR_PTR(-EOPNOTSUPP); } - -static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, - const struct bpf_link_ops *ops, - struct bpf_prog *prog) -{ -} - -static inline int bpf_link_prime(struct bpf_link *link, - struct bpf_link_primer *primer) -{ - return -EOPNOTSUPP; -} - -static inline int bpf_link_settle(struct bpf_link_primer *primer) -{ - return -EOPNOTSUPP; -} - -static inline void bpf_link_cleanup(struct bpf_link_primer *primer) -{ -} - -static inline void bpf_link_inc(struct bpf_link *link) -{ -} - -static inline void bpf_link_put(struct bpf_link *link) -{ -} - -static inline int bpf_obj_get_user(const char __user *pathname, int flags) -{ - return -EOPNOTSUPP; -} - -static inline bool dev_map_can_have_prog(struct bpf_map *map) -{ - return false; -} - -static inline void __dev_flush(void) -{ -} - -struct xdp_buff; -struct bpf_dtab_netdev; -struct bpf_cpu_map_entry; - -static inline -int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, - struct net_device *dev_rx) -{ - return 0; -} - -static inline -int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, - struct net_device *dev_rx) -{ - return 0; -} - -static inline -int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, - struct bpf_map *map, bool exclude_ingress) -{ - return 0; -} - -struct sk_buff; - -static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, - struct sk_buff *skb, - struct bpf_prog *xdp_prog) -{ - return 0; -} - -static inline -int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, - struct bpf_prog *xdp_prog, struct bpf_map *map, - bool exclude_ingress) -{ - return 0; -} - -static inline void __cpu_map_flush(void) -{ -} - -static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, - struct xdp_buff *xdp, - struct net_device *dev_rx) -{ - return 0; -} - -static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, - struct sk_buff *skb) -{ - return -EOPNOTSUPP; -} - -static inline bool cpu_map_prog_allowed(struct bpf_map *map) -{ - return false; -} - -static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, - enum bpf_prog_type type) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, - const union bpf_attr *kattr, - union bpf_attr __user *uattr) -{ - return -ENOTSUPP; -} - -static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, - const union bpf_attr *kattr, - union bpf_attr __user *uattr) -{ - return -ENOTSUPP; -} - -static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, - const union bpf_attr *kattr, - union bpf_attr __user *uattr) -{ - return -ENOTSUPP; -} - -static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, - const union bpf_attr *kattr, - union bpf_attr __user *uattr) -{ - return -ENOTSUPP; -} - -static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, - const union bpf_attr *kattr, - union bpf_attr __user *uattr) -{ - return -ENOTSUPP; -} - -static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) -{ - return false; -} - -static inline void bpf_map_put(struct bpf_map *map) -{ -} - -static inline struct bpf_prog *bpf_prog_by_id(u32 id) -{ - return ERR_PTR(-ENOTSUPP); -} - -static inline const struct bpf_func_proto * -bpf_base_func_proto(enum bpf_func_id func_id) -{ - return NULL; -} - -static inline void bpf_task_storage_free(struct task_struct *task) -{ -} - -static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) -{ - return false; -} - -static inline const struct btf_func_model * -bpf_jit_find_kfunc_model(const struct bpf_prog *prog, - const struct bpf_insn *insn) -{ - return NULL; -} #endif /* CONFIG_BPF_SYSCALL */ -void __bpf_free_used_btfs(struct bpf_prog_aux *aux, - struct btf_mod_pair *used_btfs, u32 len); - -static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, - enum bpf_prog_type type) -{ - return bpf_prog_get_type_dev(ufd, type, false); -} - -void __bpf_free_used_maps(struct bpf_prog_aux *aux, - struct bpf_map **used_maps, u32 len); - -bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); - -int bpf_prog_offload_compile(struct bpf_prog *prog); -void bpf_prog_offload_destroy(struct bpf_prog *prog); -int bpf_prog_offload_info_fill(struct bpf_prog_info *info, - struct bpf_prog *prog); - -int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); - -int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); -int bpf_map_offload_update_elem(struct bpf_map *map, - void *key, void *value, u64 flags); -int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); -int bpf_map_offload_get_next_key(struct bpf_map *map, - void *key, void *next_key); - -bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); - -struct bpf_offload_dev * -bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); -void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); -void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); -int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, - struct net_device *netdev); -void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, - struct net_device *netdev); -bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); - -#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) -int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); - -static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) -{ - return aux->offload_requested; -} - -static inline bool bpf_map_is_dev_bound(struct bpf_map *map) -{ - return unlikely(map->ops == &bpf_map_offload_ops); -} - -struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); -void bpf_map_offload_map_free(struct bpf_map *map); -int bpf_prog_test_run_syscall(struct bpf_prog *prog, - const union bpf_attr *kattr, - union bpf_attr __user *uattr); - -int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); -int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); -int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); -void sock_map_unhash(struct sock *sk); -void sock_map_close(struct sock *sk, long timeout); -#else -static inline int bpf_prog_offload_init(struct bpf_prog *prog, - union bpf_attr *attr) -{ - return -EOPNOTSUPP; -} - -static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) -{ - return false; -} - -static inline bool bpf_map_is_dev_bound(struct bpf_map *map) -{ - return false; -} - -static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void bpf_map_offload_map_free(struct bpf_map *map) -{ -} - -static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, - const union bpf_attr *kattr, - union bpf_attr __user *uattr) -{ - return -ENOTSUPP; -} - -#ifdef CONFIG_BPF_SYSCALL -static inline int sock_map_get_from_fd(const union bpf_attr *attr, - struct bpf_prog *prog) -{ - return -EINVAL; -} - -static inline int sock_map_prog_detach(const union bpf_attr *attr, - enum bpf_prog_type ptype) -{ - return -EOPNOTSUPP; -} - -static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, - u64 flags) -{ - return -EOPNOTSUPP; -} -#endif /* CONFIG_BPF_SYSCALL */ -#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ - -#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) -void bpf_sk_reuseport_detach(struct sock *sk); -int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, - void *value); -int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, - void *value, u64 map_flags); -#else -static inline void bpf_sk_reuseport_detach(struct sock *sk) -{ -} - -#ifdef CONFIG_BPF_SYSCALL -static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, - void *key, void *value) -{ - return -EOPNOTSUPP; -} - -static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, - void *key, void *value, - u64 map_flags) -{ - return -EOPNOTSUPP; -} -#endif /* CONFIG_BPF_SYSCALL */ -#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ - /* verifier prototypes for helper functions called from eBPF programs */ extern const struct bpf_func_proto bpf_map_lookup_elem_proto; extern const struct bpf_func_proto bpf_map_update_elem_proto; extern const struct bpf_func_proto bpf_map_delete_elem_proto; -extern const struct bpf_func_proto bpf_map_push_elem_proto; -extern const struct bpf_func_proto bpf_map_pop_elem_proto; -extern const struct bpf_func_proto bpf_map_peek_elem_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; -extern const struct bpf_func_proto bpf_get_numa_node_id_proto; extern const struct bpf_func_proto bpf_tail_call_proto; extern const struct bpf_func_proto bpf_ktime_get_ns_proto; -extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; extern const struct bpf_func_proto bpf_get_current_comm_proto; +extern const struct bpf_func_proto bpf_skb_vlan_push_proto; +extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; extern const struct bpf_func_proto bpf_get_stackid_proto; -extern const struct bpf_func_proto bpf_get_stack_proto; -extern const struct bpf_func_proto bpf_get_task_stack_proto; -extern const struct bpf_func_proto bpf_get_stackid_proto_pe; -extern const struct bpf_func_proto bpf_get_stack_proto_pe; -extern const struct bpf_func_proto bpf_sock_map_update_proto; -extern const struct bpf_func_proto bpf_sock_hash_update_proto; -extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; -extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; -extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; -extern const struct bpf_func_proto bpf_msg_redirect_map_proto; -extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; -extern const struct bpf_func_proto bpf_sk_redirect_map_proto; -extern const struct bpf_func_proto bpf_spin_lock_proto; -extern const struct bpf_func_proto bpf_spin_unlock_proto; -extern const struct bpf_func_proto bpf_get_local_storage_proto; -extern const struct bpf_func_proto bpf_strtol_proto; -extern const struct bpf_func_proto bpf_strtoul_proto; -extern const struct bpf_func_proto bpf_tcp_sock_proto; -extern const struct bpf_func_proto bpf_jiffies64_proto; -extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; -extern const struct bpf_func_proto bpf_event_output_data_proto; -extern const struct bpf_func_proto bpf_ringbuf_output_proto; -extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; -extern const struct bpf_func_proto bpf_ringbuf_submit_proto; -extern const struct bpf_func_proto bpf_ringbuf_discard_proto; -extern const struct bpf_func_proto bpf_ringbuf_query_proto; -extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; -extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; -extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; -extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; -extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; -extern const struct bpf_func_proto bpf_copy_from_user_proto; -extern const struct bpf_func_proto bpf_snprintf_btf_proto; -extern const struct bpf_func_proto bpf_snprintf_proto; -extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; -extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; -extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; -extern const struct bpf_func_proto bpf_sock_from_file_proto; -extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; -extern const struct bpf_func_proto bpf_task_storage_get_proto; -extern const struct bpf_func_proto bpf_task_storage_delete_proto; -extern const struct bpf_func_proto bpf_for_each_map_elem_proto; -extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; -extern const struct bpf_func_proto bpf_sk_setsockopt_proto; -extern const struct bpf_func_proto bpf_sk_getsockopt_proto; - -const struct bpf_func_proto *tracing_prog_func_proto( - enum bpf_func_id func_id, const struct bpf_prog *prog); /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); - -#if defined(CONFIG_NET) -bool bpf_sock_common_is_valid_access(int off, int size, - enum bpf_access_type type, - struct bpf_insn_access_aux *info); -bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, - struct bpf_insn_access_aux *info); -u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, - const struct bpf_insn *si, - struct bpf_insn *insn_buf, - struct bpf_prog *prog, - u32 *target_size); -#else -static inline bool bpf_sock_common_is_valid_access(int off, int size, - enum bpf_access_type type, - struct bpf_insn_access_aux *info) -{ - return false; -} -static inline bool bpf_sock_is_valid_access(int off, int size, - enum bpf_access_type type, - struct bpf_insn_access_aux *info) -{ - return false; -} -static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, - const struct bpf_insn *si, - struct bpf_insn *insn_buf, - struct bpf_prog *prog, - u32 *target_size) -{ - return 0; -} -#endif - -#ifdef CONFIG_INET -struct sk_reuseport_kern { - struct sk_buff *skb; - struct sock *sk; - struct sock *selected_sk; - struct sock *migrating_sk; - void *data_end; - u32 hash; - u32 reuseport_id; - bool bind_inany; -}; -bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, - struct bpf_insn_access_aux *info); - -u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, - const struct bpf_insn *si, - struct bpf_insn *insn_buf, - struct bpf_prog *prog, - u32 *target_size); - -bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, - struct bpf_insn_access_aux *info); - -u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, - const struct bpf_insn *si, - struct bpf_insn *insn_buf, - struct bpf_prog *prog, - u32 *target_size); -#else -static inline bool bpf_tcp_sock_is_valid_access(int off, int size, - enum bpf_access_type type, - struct bpf_insn_access_aux *info) -{ - return false; -} - -static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, - const struct bpf_insn *si, - struct bpf_insn *insn_buf, - struct bpf_prog *prog, - u32 *target_size) -{ - return 0; -} -static inline bool bpf_xdp_sock_is_valid_access(int off, int size, - enum bpf_access_type type, - struct bpf_insn_access_aux *info) -{ - return false; -} - -static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, - const struct bpf_insn *si, - struct bpf_insn *insn_buf, - struct bpf_prog *prog, - u32 *target_size) -{ - return 0; -} -#endif /* CONFIG_INET */ - -enum bpf_text_poke_type { - BPF_MOD_CALL, - BPF_MOD_JUMP, -}; - -int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, - void *addr1, void *addr2); - -struct btf_id_set; -bool btf_id_set_contains(const struct btf_id_set *set, u32 id); - -int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, - u32 **bin_buf, u32 num_args); -void bpf_bprintf_cleanup(void); #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 5424124dbe..a13b031dc6 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -1,531 +1,103 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. */ #ifndef _LINUX_BPF_VERIFIER_H #define _LINUX_BPF_VERIFIER_H 1 #include /* for enum bpf_reg_type */ -#include /* for struct btf and btf_id() */ #include /* for MAX_BPF_STACK */ -#include -/* Maximum variable offset umax_value permitted when resolving memory accesses. - * In practice this is far bigger than any realistic pointer offset; this limit - * ensures that umax_value + (int)off + (int)size cannot overflow a u64. - */ -#define BPF_MAX_VAR_OFF (1 << 29) -/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures - * that converting umax_value to int cannot overflow. - */ -#define BPF_MAX_VAR_SIZ (1 << 29) - -/* Liveness marks, used for registers and spilled-regs (in stack slots). - * Read marks propagate upwards until they find a write mark; they record that - * "one of this state's descendants read this reg" (and therefore the reg is - * relevant for states_equal() checks). - * Write marks collect downwards and do not propagate; they record that "the - * straight-line code that reached this state (from its parent) wrote this reg" - * (and therefore that reads propagated from this state or its descendants - * should not propagate to its parent). - * A state with a write mark can receive read marks; it just won't propagate - * them to its parent, since the write mark is a property, not of the state, - * but of the link between it and its parent. See mark_reg_read() and - * mark_stack_slot_read() in kernel/bpf/verifier.c. - */ -enum bpf_reg_liveness { - REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ - REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ - REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ - REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, - REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ - REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ -}; + /* Just some arbitrary values so we can safely do math without overflowing and + * are obviously wrong for any sort of memory access. + */ +#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) +#define BPF_REGISTER_MIN_RANGE -1 struct bpf_reg_state { - /* Ordering of fields matters. See states_equal() */ enum bpf_reg_type type; - /* Fixed part of pointer offset, pointer types only */ - s32 off; union { - /* valid when type == PTR_TO_PACKET */ - int range; + /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ + s64 imm; + + /* valid when type == PTR_TO_PACKET* */ + struct { + u16 off; + u16 range; + }; /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL */ - struct { - struct bpf_map *map_ptr; - /* To distinguish map lookups from outer map - * the map_uid is non-zero for registers - * pointing to inner maps. - */ - u32 map_uid; - }; - - /* for PTR_TO_BTF_ID */ - struct { - struct btf *btf; - u32 btf_id; - }; - - u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ - - /* Max size from any of the above. */ - struct { - unsigned long raw1; - unsigned long raw2; - } raw; - - u32 subprogno; /* for PTR_TO_FUNC */ + struct bpf_map *map_ptr; }; - /* For PTR_TO_PACKET, used to find other pointers with the same variable - * offset, so they can share range knowledge. - * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we - * came from, when one is tested for != NULL. - * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation - * for the purpose of tracking that it's freed. - * For PTR_TO_SOCKET this is used to share which pointers retain the - * same reference to the socket, to determine proper reference freeing. - */ u32 id; - /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned - * from a pointer-cast helper, bpf_sk_fullsock() and - * bpf_tcp_sock(). - * - * Consider the following where "sk" is a reference counted - * pointer returned from "sk = bpf_sk_lookup_tcp();": - * - * 1: sk = bpf_sk_lookup_tcp(); - * 2: if (!sk) { return 0; } - * 3: fullsock = bpf_sk_fullsock(sk); - * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } - * 5: tp = bpf_tcp_sock(fullsock); - * 6: if (!tp) { bpf_sk_release(sk); return 0; } - * 7: bpf_sk_release(sk); - * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain - * - * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and - * "tp" ptr should be invalidated also. In order to do that, - * the reg holding "fullsock" and "sk" need to remember - * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id - * such that the verifier can reset all regs which have - * ref_obj_id matching the sk_reg->id. - * - * sk_reg->ref_obj_id is set to sk_reg->id at line 1. - * sk_reg->id will stay as NULL-marking purpose only. - * After NULL-marking is done, sk_reg->id can be reset to 0. - * - * After "fullsock = bpf_sk_fullsock(sk);" at line 3, - * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. - * - * After "tp = bpf_tcp_sock(fullsock);" at line 5, - * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id - * which is the same as sk_reg->ref_obj_id. - * - * From the verifier perspective, if sk, fullsock and tp - * are not NULL, they are the same ptr with different - * reg->type. In particular, bpf_sk_release(tp) is also - * allowed and has the same effect as bpf_sk_release(sk). - */ - u32 ref_obj_id; - /* For scalar types (SCALAR_VALUE), this represents our knowledge of - * the actual value. - * For pointer types, this represents the variable part of the offset - * from the pointed-to object, and is shared with all bpf_reg_states - * with the same id as us. - */ - struct tnum var_off; /* Used to determine if any memory access using this register will - * result in a bad access. - * These refer to the same value as var_off, not necessarily the actual - * contents of the register. + * result in a bad access. These two fields must be last. + * See states_equal() */ - s64 smin_value; /* minimum possible (s64)value */ - s64 smax_value; /* maximum possible (s64)value */ - u64 umin_value; /* minimum possible (u64)value */ - u64 umax_value; /* maximum possible (u64)value */ - s32 s32_min_value; /* minimum possible (s32)value */ - s32 s32_max_value; /* maximum possible (s32)value */ - u32 u32_min_value; /* minimum possible (u32)value */ - u32 u32_max_value; /* maximum possible (u32)value */ - /* parentage chain for liveness checking */ - struct bpf_reg_state *parent; - /* Inside the callee two registers can be both PTR_TO_STACK like - * R1=fp-8 and R2=fp-8, but one of them points to this function stack - * while another to the caller's stack. To differentiate them 'frameno' - * is used which is an index in bpf_verifier_state->frame[] array - * pointing to bpf_func_state. - */ - u32 frameno; - /* Tracks subreg definition. The stored value is the insn_idx of the - * writing insn. This is safe because subreg_def is used before any insn - * patching which only happens after main verification finished. - */ - s32 subreg_def; - enum bpf_reg_liveness live; - /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ - bool precise; + s64 min_value; + u64 max_value; }; enum bpf_stack_slot_type { STACK_INVALID, /* nothing was stored in this stack slot */ STACK_SPILL, /* register spilled into stack */ - STACK_MISC, /* BPF program wrote some data into this slot */ - STACK_ZERO, /* BPF program wrote constant zero */ + STACK_MISC /* BPF program wrote some data into this slot */ }; #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ -struct bpf_stack_state { - struct bpf_reg_state spilled_ptr; - u8 slot_type[BPF_REG_SIZE]; -}; - -struct bpf_reference_state { - /* Track each reference created with a unique id, even if the same - * instruction creates the reference multiple times (eg, via CALL). - */ - int id; - /* Instruction where the allocation of this reference occurred. This - * is used purely to inform the user of a reference leak. - */ - int insn_idx; -}; - /* state of the program: * type of all registers and stack info */ -struct bpf_func_state { - struct bpf_reg_state regs[MAX_BPF_REG]; - /* index of call instruction that called into this func */ - int callsite; - /* stack frame number of this function state from pov of - * enclosing bpf_verifier_state. - * 0 = main function, 1 = first callee. - */ - u32 frameno; - /* subprog number == index within subprog_info - * zero == main subprog - */ - u32 subprogno; - /* Every bpf_timer_start will increment async_entry_cnt. - * It's used to distinguish: - * void foo(void) { for(;;); } - * void foo(void) { bpf_timer_set_callback(,foo); } - */ - u32 async_entry_cnt; - bool in_callback_fn; - bool in_async_callback_fn; - - /* The following fields should be last. See copy_func_state() */ - int acquired_refs; - struct bpf_reference_state *refs; - int allocated_stack; - struct bpf_stack_state *stack; -}; - -struct bpf_idx_pair { - u32 prev_idx; - u32 idx; -}; - -struct bpf_id_pair { - u32 old; - u32 cur; -}; - -/* Maximum number of register states that can exist at once */ -#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) -#define MAX_CALL_FRAMES 8 struct bpf_verifier_state { - /* call stack tracking */ - struct bpf_func_state *frame[MAX_CALL_FRAMES]; - struct bpf_verifier_state *parent; - /* - * 'branches' field is the number of branches left to explore: - * 0 - all possible paths from this state reached bpf_exit or - * were safely pruned - * 1 - at least one path is being explored. - * This state hasn't reached bpf_exit - * 2 - at least two paths are being explored. - * This state is an immediate parent of two children. - * One is fallthrough branch with branches==1 and another - * state is pushed into stack (to be explored later) also with - * branches==1. The parent of this state has branches==1. - * The verifier state tree connected via 'parent' pointer looks like: - * 1 - * 1 - * 2 -> 1 (first 'if' pushed into stack) - * 1 - * 2 -> 1 (second 'if' pushed into stack) - * 1 - * 1 - * 1 bpf_exit. - * - * Once do_check() reaches bpf_exit, it calls update_branch_counts() - * and the verifier state tree will look: - * 1 - * 1 - * 2 -> 1 (first 'if' pushed into stack) - * 1 - * 1 -> 1 (second 'if' pushed into stack) - * 0 - * 0 - * 0 bpf_exit. - * After pop_stack() the do_check() will resume at second 'if'. - * - * If is_state_visited() sees a state with branches > 0 it means - * there is a loop. If such state is exactly equal to the current state - * it's an infinite loop. Note states_equal() checks for states - * equvalency, so two states being 'states_equal' does not mean - * infinite loop. The exact comparison is provided by - * states_maybe_looping() function. It's a stronger pre-check and - * much faster than states_equal(). - * - * This algorithm may not find all possible infinite loops or - * loop iteration count may be too high. - * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. - */ - u32 branches; - u32 insn_idx; - u32 curframe; - u32 active_spin_lock; - bool speculative; - - /* first and last insn idx of this verifier state */ - u32 first_insn_idx; - u32 last_insn_idx; - /* jmp history recorded from first to last. - * backtracking is using it to go from last to first. - * For most states jmp_history_cnt is [0-3]. - * For loops can go up to ~40. - */ - struct bpf_idx_pair *jmp_history; - u32 jmp_history_cnt; + struct bpf_reg_state regs[MAX_BPF_REG]; + u8 stack_slot_type[MAX_BPF_STACK]; + struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; }; -#define bpf_get_spilled_reg(slot, frame) \ - (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ - (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ - ? &frame->stack[slot].spilled_ptr : NULL) - -/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ -#define bpf_for_each_spilled_reg(iter, frame, reg) \ - for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ - iter < frame->allocated_stack / BPF_REG_SIZE; \ - iter++, reg = bpf_get_spilled_reg(iter, frame)) - /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; struct bpf_verifier_state_list *next; - int miss_cnt, hit_cnt; }; -/* Possible states for alu_state member. */ -#define BPF_ALU_SANITIZE_SRC (1U << 0) -#define BPF_ALU_SANITIZE_DST (1U << 1) -#define BPF_ALU_NEG_VALUE (1U << 2) -#define BPF_ALU_NON_POINTER (1U << 3) -#define BPF_ALU_IMMEDIATE (1U << 4) -#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ - BPF_ALU_SANITIZE_DST) - struct bpf_insn_aux_data { - union { - enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ - unsigned long map_ptr_state; /* pointer/poison value for maps */ - s32 call_imm; /* saved imm field of call insn */ - u32 alu_limit; /* limit for add/sub register with pointer */ - struct { - u32 map_index; /* index into used_maps[] */ - u32 map_off; /* offset from value base address */ - }; - struct { - enum bpf_reg_type reg_type; /* type of pseudo_btf_id */ - union { - struct { - struct btf *btf; - u32 btf_id; /* btf_id for struct typed var */ - }; - u32 mem_size; /* mem_size for non-struct typed var */ - }; - } btf_var; - }; - u64 map_key_state; /* constant (32 bit) key tracking for maps */ - int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ - u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ - bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ - bool zext_dst; /* this insn zero extends dst reg */ - u8 alu_state; /* used in combination with alu_limit */ - - /* below fields are initialized once */ - unsigned int orig_idx; /* original instruction index */ - bool prune_point; + enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ -#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */ -#define BPF_VERIFIER_TMP_LOG_SIZE 1024 - -struct bpf_verifier_log { - u32 level; - char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; - char __user *ubuf; - u32 len_used; - u32 len_total; -}; - -static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) -{ - return log->len_used >= log->len_total - 1; -} - -#define BPF_LOG_LEVEL1 1 -#define BPF_LOG_LEVEL2 2 -#define BPF_LOG_STATS 4 -#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) -#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) -#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ - -static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) -{ - return log && - ((log->level && log->ubuf && !bpf_verifier_log_full(log)) || - log->level == BPF_LOG_KERNEL); -} - -#define BPF_MAX_SUBPROGS 256 - -struct bpf_subprog_info { - /* 'start' has to be the first field otherwise find_subprog() won't work */ - u32 start; /* insn idx of function entry point */ - u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ - u16 stack_depth; /* max. stack depth used by this function */ - bool has_tail_call; - bool tail_call_reachable; - bool has_ld_abs; - bool is_async_cb; +struct bpf_verifier_env; +struct bpf_ext_analyzer_ops { + int (*insn_hook)(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); }; /* single container for all structs * one verifier_env per bpf_check() call */ struct bpf_verifier_env { - u32 insn_idx; - u32 prev_insn_idx; struct bpf_prog *prog; /* eBPF program being verified */ - const struct bpf_verifier_ops *ops; struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ int stack_size; /* number of states to be processed */ - bool strict_alignment; /* perform strict pointer alignment checks */ - bool test_state_freq; /* test verifier with different pruning frequency */ - struct bpf_verifier_state *cur_state; /* current verifier state */ + struct bpf_verifier_state cur_state; /* current verifier state */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ - struct bpf_verifier_state_list *free_list; + const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */ + void *analyzer_priv; /* pointer to external analyzer's private data */ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ - struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */ u32 used_map_cnt; /* number of used maps */ - u32 used_btf_cnt; /* number of used BTF objects */ u32 id_gen; /* used to generate unique reg IDs */ - bool explore_alu_limits; bool allow_ptr_leaks; - bool allow_uninit_stack; - bool allow_ptr_to_map_access; - bool bpf_capable; - bool bypass_spec_v1; - bool bypass_spec_v4; bool seen_direct_write; + bool varlen_map_value_access; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ - const struct bpf_line_info *prev_linfo; - struct bpf_verifier_log log; - struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; - struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; - struct { - int *insn_state; - int *insn_stack; - int cur_stack; - } cfg; - u32 pass_cnt; /* number of times do_check() was called */ - u32 subprog_cnt; - /* number of instructions analyzed by the verifier */ - u32 prev_insn_processed, insn_processed; - /* number of jmps, calls, exits analyzed so far */ - u32 prev_jmps_processed, jmps_processed; - /* total verification time */ - u64 verification_time; - /* maximum number of verifier states kept in 'branching' instructions */ - u32 max_states_per_insn; - /* total number of allocated verifier states */ - u32 total_states; - /* some states are freed during program analysis. - * this is peak number of states. this number dominates kernel - * memory consumption during verification - */ - u32 peak_states; - /* longest register parentage chain walked for liveness marking */ - u32 longest_mark_read_walk; - bpfptr_t fd_array; }; -__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, - const char *fmt, va_list args); -__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, - const char *fmt, ...); -__printf(2, 3) void bpf_log(struct bpf_verifier_log *log, - const char *fmt, ...); - -static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) -{ - struct bpf_verifier_state *cur = env->cur_state; - - return cur->frame[cur->curframe]; -} - -static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) -{ - return cur_func(env)->regs; -} - -int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); -int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, - int insn_idx, int prev_insn_idx); -int bpf_prog_offload_finalize(struct bpf_verifier_env *env); -void -bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, - struct bpf_insn *insn); -void -bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); - -int check_ctx_reg(struct bpf_verifier_env *env, - const struct bpf_reg_state *reg, int regno); -int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, - u32 regno, u32 mem_size); - -/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ -static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, - struct btf *btf, u32 btf_id) -{ - if (tgt_prog) - return ((u64)tgt_prog->aux->id << 32) | btf_id; - else - return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id; -} - -/* unpack the IDs from the key as constructed above */ -static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id) -{ - if (obj_id) - *obj_id = key >> 32; - if (btf_id) - *btf_id = key & 0x7FFFFFFF; -} - -int bpf_check_attach_target(struct bpf_verifier_log *log, - const struct bpf_prog *prog, - const struct bpf_prog *tgt_prog, - u32 btf_id, - struct bpf_attach_target_info *tgt_info); +int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops, + void *priv); #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index ea2f8ea92d..e3354b7428 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BRCMPHY_H #define _LINUX_BRCMPHY_H @@ -14,31 +13,15 @@ #define PHY_ID_BCM5241 0x0143bc30 #define PHY_ID_BCMAC131 0x0143bc70 #define PHY_ID_BCM5481 0x0143bca0 -#define PHY_ID_BCM5395 0x0143bcf0 -#define PHY_ID_BCM53125 0x03625f20 -#define PHY_ID_BCM54810 0x03625d00 -#define PHY_ID_BCM54811 0x03625cc0 #define PHY_ID_BCM5482 0x0143bcb0 #define PHY_ID_BCM5411 0x00206070 #define PHY_ID_BCM5421 0x002060e0 -#define PHY_ID_BCM54210E 0x600d84a0 -#define PHY_ID_BCM54213PE 0x600d84a2 #define PHY_ID_BCM5464 0x002060b0 #define PHY_ID_BCM5461 0x002060c0 -#define PHY_ID_BCM54612E 0x03625e60 #define PHY_ID_BCM54616S 0x03625d10 -#define PHY_ID_BCM54140 0xae025009 #define PHY_ID_BCM57780 0x03625d90 -#define PHY_ID_BCM89610 0x03625cd0 -#define PHY_ID_BCM72113 0x35905310 -#define PHY_ID_BCM72116 0x35905350 #define PHY_ID_BCM7250 0xae025280 -#define PHY_ID_BCM7255 0xae025120 -#define PHY_ID_BCM7260 0xae025190 -#define PHY_ID_BCM7268 0xae025090 -#define PHY_ID_BCM7271 0xae0253b0 -#define PHY_ID_BCM7278 0xae0251a0 #define PHY_ID_BCM7364 0xae025260 #define PHY_ID_BCM7366 0x600d8490 #define PHY_ID_BCM7346 0x600d8650 @@ -46,13 +29,11 @@ #define PHY_ID_BCM7425 0x600d86b0 #define PHY_ID_BCM7429 0x600d8730 #define PHY_ID_BCM7435 0x600d8750 -#define PHY_ID_BCM74371 0xae0252e0 #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7439_2 0xae025080 #define PHY_ID_BCM7445 0x600d8510 #define PHY_ID_BCM_CYGNUS 0xae025200 -#define PHY_ID_BCM_OMEGA 0xae025100 #define PHY_BCM_OUI_MASK 0xfffffc00 #define PHY_BCM_OUI_1 0x00206000 @@ -62,12 +43,18 @@ #define PHY_BCM_OUI_5 0x03625e00 #define PHY_BCM_OUI_6 0xae025000 -#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000001 -#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000002 -#define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004 -#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008 -#define PHY_BRCM_EN_MASTER_MODE 0x00000010 - +#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001 +#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002 +#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010 +#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020 +#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100 +#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200 +#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400 +#define PHY_BRCM_STD_IBND_DISABLE 0x00000800 +#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000 +#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 +#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 +#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 /* Broadcom BCM7xxx specific workarounds */ #define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) #define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff) @@ -77,7 +64,6 @@ #define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ #define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ #define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */ -#define MII_BCM54XX_ECR_FIFOE 0x0001 /* FIFO elasticity */ #define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */ #define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */ @@ -86,7 +72,6 @@ #define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */ #define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */ #define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */ -#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */ #define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */ #define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */ @@ -112,31 +97,19 @@ #define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10) #define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0) -#define MII_BCM54XX_RDB_ADDR 0x1e -#define MII_BCM54XX_RDB_DATA 0x1f - -/* legacy access control via rdb/expansion register */ -#define BCM54XX_RDB_REG0087 0x0087 -#define BCM54XX_EXP_REG7E (MII_BCM54XX_EXP_SEL_ER + 0x7E) -#define BCM54XX_ACCESS_MODE_LEGACY_EN BIT(15) - /* * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18) */ -#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00 +#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 #define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400 #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 -#define MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN 0x4000 -#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07 -#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010 -#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN 0x0080 -#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100 -#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 -#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 +#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 +#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 +#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007 -#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 -#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007 +#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 /* * Broadcom LED source encodings. These are used in BCM5461, BCM5481, @@ -151,48 +124,22 @@ #define BCM_LED_SRC_INTR 0x6 #define BCM_LED_SRC_QUALITY 0x7 #define BCM_LED_SRC_RCVLED 0x8 -#define BCM_LED_SRC_WIRESPEED 0x9 #define BCM_LED_SRC_MULTICOLOR1 0xa #define BCM_LED_SRC_OPENSHORT 0xb #define BCM_LED_SRC_OFF 0xe /* Tied high */ #define BCM_LED_SRC_ON 0xf /* Tied low */ -/* - * Broadcom Multicolor LED configurations (expansion register 4) - */ -#define BCM_EXP_MULTICOLOR (MII_BCM54XX_EXP_SEL_ER + 0x04) -#define BCM_LED_MULTICOLOR_IN_PHASE BIT(8) -#define BCM_LED_MULTICOLOR_LINK_ACT 0x0 -#define BCM_LED_MULTICOLOR_SPEED 0x1 -#define BCM_LED_MULTICOLOR_ACT_FLASH 0x2 -#define BCM_LED_MULTICOLOR_FDX 0x3 -#define BCM_LED_MULTICOLOR_OFF 0x4 -#define BCM_LED_MULTICOLOR_ON 0x5 -#define BCM_LED_MULTICOLOR_ALT 0x6 -#define BCM_LED_MULTICOLOR_FLASH 0x7 -#define BCM_LED_MULTICOLOR_LINK 0x8 -#define BCM_LED_MULTICOLOR_ACT 0x9 -#define BCM_LED_MULTICOLOR_PROGRAM 0xa /* * BCM5482: Shadow registers * Shadow values go into bits [14:10] of register 0x1c to select a shadow * register to access. */ - -/* 00100: Reserved control register 2 */ -#define BCM54XX_SHD_SCR2 0x04 -#define BCM54XX_SHD_SCR2_WSPD_RTRY_DIS 0x100 -#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT 2 -#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET 2 -#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK 0x7 - /* 00101: Spare Control Register 3 */ #define BCM54XX_SHD_SCR3 0x05 #define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 #define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002 #define BCM54XX_SHD_SCR3_TRDDAPD 0x0004 -#define BCM54XX_SHD_SCR3_RXCTXC_DIS 0x0100 /* 01010: Auto Power-Down */ #define BCM54XX_SHD_APD 0x0a @@ -210,18 +157,9 @@ #define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ #define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ #define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ +#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ +#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ -/* 10011: SerDes 100-FX Control Register */ -#define BCM54616S_SHD_100FX_CTRL 0x13 -#define BCM54616S_100FX_MODE BIT(0) /* 100-FX SerDes Enable */ - -/* 11111: Mode Control Register */ -#define BCM54XX_SHD_MODE 0x1f -#define BCM54XX_SHD_INTF_SEL_MASK GENMASK(2, 1) /* INTERF_SEL[1:0] */ -#define BCM54XX_SHD_INTF_SEL_RGMII 0x02 -#define BCM54XX_SHD_INTF_SEL_SGMII 0x04 -#define BCM54XX_SHD_INTF_SEL_GBIC 0x06 -#define BCM54XX_SHD_MODE_1000BX BIT(0) /* Enable 1000-X registers */ /* * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) @@ -251,15 +189,6 @@ #define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ #define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ -/* BCM54810 Registers */ -#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90) -#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) -#define BCM54810_SHD_CLK_CTL 0x3 -#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) - -/* BCM54612E Registers */ -#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34) -#define BCM54612E_LED4_CLK125OUT_EN (1 << 1) /*****************************************************************************/ /* Fast Ethernet Transceiver definitions. */ @@ -293,58 +222,8 @@ #define LPI_FEATURE_EN_DIG1000X 0x4000 /* Core register definitions*/ -#define MII_BRCM_CORE_BASE12 0x12 -#define MII_BRCM_CORE_BASE13 0x13 -#define MII_BRCM_CORE_BASE14 0x14 #define MII_BRCM_CORE_BASE1E 0x1E #define MII_BRCM_CORE_EXPB0 0xB0 #define MII_BRCM_CORE_EXPB1 0xB1 -/* Enhanced Cable Diagnostics */ -#define BCM54XX_RDB_ECD_CTRL 0x2a0 -#define BCM54XX_EXP_ECD_CTRL (MII_BCM54XX_EXP_SEL_ER + 0xc0) - -#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT3 1 /* CAT3 or worse */ -#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT5 0 /* CAT5 or better */ -#define BCM54XX_ECD_CTRL_CABLE_TYPE_MASK BIT(0) /* cable type */ -#define BCM54XX_ECD_CTRL_INVALID BIT(3) /* invalid result */ -#define BCM54XX_ECD_CTRL_UNIT_CM 0 /* centimeters */ -#define BCM54XX_ECD_CTRL_UNIT_M 1 /* meters */ -#define BCM54XX_ECD_CTRL_UNIT_MASK BIT(10) /* cable length unit */ -#define BCM54XX_ECD_CTRL_IN_PROGRESS BIT(11) /* test in progress */ -#define BCM54XX_ECD_CTRL_BREAK_LINK BIT(12) /* unconnect link - * during test - */ -#define BCM54XX_ECD_CTRL_CROSS_SHORT_DIS BIT(13) /* disable inter-pair - * short check - */ -#define BCM54XX_ECD_CTRL_RUN BIT(15) /* run immediate */ - -#define BCM54XX_RDB_ECD_FAULT_TYPE 0x2a1 -#define BCM54XX_EXP_ECD_FAULT_TYPE (MII_BCM54XX_EXP_SEL_ER + 0xc1) -#define BCM54XX_ECD_FAULT_TYPE_INVALID 0x0 -#define BCM54XX_ECD_FAULT_TYPE_OK 0x1 -#define BCM54XX_ECD_FAULT_TYPE_OPEN 0x2 -#define BCM54XX_ECD_FAULT_TYPE_SAME_SHORT 0x3 /* short same pair */ -#define BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT 0x4 /* short different pairs */ -#define BCM54XX_ECD_FAULT_TYPE_BUSY 0x9 -#define BCM54XX_ECD_FAULT_TYPE_PAIR_D_MASK GENMASK(3, 0) -#define BCM54XX_ECD_FAULT_TYPE_PAIR_C_MASK GENMASK(7, 4) -#define BCM54XX_ECD_FAULT_TYPE_PAIR_B_MASK GENMASK(11, 8) -#define BCM54XX_ECD_FAULT_TYPE_PAIR_A_MASK GENMASK(15, 12) -#define BCM54XX_ECD_PAIR_A_LENGTH_RESULTS 0x2a2 -#define BCM54XX_ECD_PAIR_B_LENGTH_RESULTS 0x2a3 -#define BCM54XX_ECD_PAIR_C_LENGTH_RESULTS 0x2a4 -#define BCM54XX_ECD_PAIR_D_LENGTH_RESULTS 0x2a5 - -#define BCM54XX_RDB_ECD_PAIR_A_LENGTH_RESULTS 0x2a2 -#define BCM54XX_EXP_ECD_PAIR_A_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc2) -#define BCM54XX_RDB_ECD_PAIR_B_LENGTH_RESULTS 0x2a3 -#define BCM54XX_EXP_ECD_PAIR_B_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc3) -#define BCM54XX_RDB_ECD_PAIR_C_LENGTH_RESULTS 0x2a4 -#define BCM54XX_EXP_ECD_PAIR_C_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc4) -#define BCM54XX_RDB_ECD_PAIR_D_LENGTH_RESULTS 0x2a5 -#define BCM54XX_EXP_ECD_PAIR_D_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc5) -#define BCM54XX_ECD_LENGTH_RESULTS_INVALID 0xffff - #endif /* _LINUX_BRCMPHY_H */ diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h index e66b711d09..90b1aa8672 100644 --- a/include/linux/bsearch.h +++ b/include/linux/bsearch.h @@ -1,32 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BSEARCH_H #define _LINUX_BSEARCH_H #include -static __always_inline -void *__inline_bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp) -{ - const char *pivot; - int result; - - while (num > 0) { - pivot = base + (num >> 1) * size; - result = cmp(key, pivot); - - if (result == 0) - return (void *)pivot; - - if (result > 0) { - base = pivot + size; - num--; - } - num >>= 1; - } - - return NULL; -} - -extern void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp); +void *bsearch(const void *key, const void *base, size_t num, size_t size, + int (*cmp)(const void *key, const void *elt)); #endif /* _LINUX_BSEARCH_H */ diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 6b211323a4..a226652a5a 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -1,26 +1,35 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * BSG helper library * * Copyright (C) 2008 James Smart, Emulex Corporation * Copyright (C) 2011 Red Hat, Inc. All rights reserved. * Copyright (C) 2011 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * */ #ifndef _BLK_BSG_ #define _BLK_BSG_ #include -#include -struct bsg_job; struct request; struct device; struct scatterlist; struct request_queue; -typedef int (bsg_job_fn) (struct bsg_job *); -typedef enum blk_eh_timer_return (bsg_timeout_fn)(struct request *); - struct bsg_buffer { unsigned int payload_len; int sg_cnt; @@ -29,10 +38,7 @@ struct bsg_buffer { struct bsg_job { struct device *dev; - - struct kref kref; - - unsigned int timeout; + struct request *req; /* Transport/driver specific request/reply structs */ void *request; @@ -53,22 +59,13 @@ struct bsg_job { struct bsg_buffer request_payload; struct bsg_buffer reply_payload; - int result; - unsigned int reply_payload_rcv_len; - - /* BIDI support */ - struct request *bidi_rq; - struct bio *bidi_bio; - void *dd_data; /* Used for driver-specific storage */ }; void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len); -struct request_queue *bsg_setup_queue(struct device *dev, const char *name, - bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size); -void bsg_remove_queue(struct request_queue *q); -void bsg_job_put(struct bsg_job *job); -int __must_check bsg_job_get(struct bsg_job *job); +int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, + bsg_job_fn *job_fn, int dd_job_size); +void bsg_request_fn(struct request_queue *q); #endif diff --git a/include/linux/bsg.h b/include/linux/bsg.h index 1ac81c809d..7173f6e9d2 100644 --- a/include/linux/bsg.h +++ b/include/linux/bsg.h @@ -1,19 +1,33 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_BSG_H -#define _LINUX_BSG_H +#ifndef BSG_H +#define BSG_H #include -struct bsg_device; -struct device; -struct request_queue; -typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr, - fmode_t mode, unsigned int timeout); +#if defined(CONFIG_BLK_DEV_BSG) +struct bsg_class_device { + struct device *class_dev; + struct device *parent; + int minor; + struct request_queue *queue; + struct kref ref; + void (*release)(struct device *); +}; -struct bsg_device *bsg_register_queue(struct request_queue *q, - struct device *parent, const char *name, - bsg_sg_io_fn *sg_io_fn); -void bsg_unregister_queue(struct bsg_device *bcd); +extern int bsg_register_queue(struct request_queue *q, + struct device *parent, const char *name, + void (*release)(struct device *)); +extern void bsg_unregister_queue(struct request_queue *); +#else +static inline int bsg_register_queue(struct request_queue *q, + struct device *parent, const char *name, + void (*release)(struct device *)) +{ + return 0; +} +static inline void bsg_unregister_queue(struct request_queue *q) +{ +} +#endif -#endif /* _LINUX_BSG_H */ +#endif diff --git a/include/linux/btree-128.h b/include/linux/btree-128.h index 22c09f5c3c..0b3414c4c9 100644 --- a/include/linux/btree-128.h +++ b/include/linux/btree-128.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ extern struct btree_geo btree_geo128; struct btree_head128 { struct btree_head h; }; diff --git a/include/linux/btree-type.h b/include/linux/btree-type.h index fb34a52c78..9a1147ef85 100644 --- a/include/linux/btree-type.h +++ b/include/linux/btree-type.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #define __BTREE_TP(pfx, type, sfx) pfx ## type ## sfx #define _BTREE_TP(pfx, type, sfx) __BTREE_TP(pfx, type, sfx) #define BTREE_TP(pfx) _BTREE_TP(pfx, BTREE_TYPE_SUFFIX,) diff --git a/include/linux/btree.h b/include/linux/btree.h index 243ee54439..65b5bb0583 100644 --- a/include/linux/btree.h +++ b/include/linux/btree.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef BTREE_H #define BTREE_H @@ -10,7 +9,7 @@ * * A B+Tree is a data structure for looking up arbitrary (currently allowing * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure - * is described at https://en.wikipedia.org/wiki/B-tree, we currently do not + * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not * use binary search to find the key on lookups. * * Each B+Tree consists of a head, that contains bookkeeping information and diff --git a/include/linux/btrfs.h b/include/linux/btrfs.h index 9a37a45ec8..22d799147d 100644 --- a/include/linux/btrfs.h +++ b/include/linux/btrfs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BTRFS_H #define _LINUX_BTRFS_H diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 36f33685c8..ebbacd14d4 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/buffer_head.h * @@ -22,6 +21,9 @@ enum bh_state_bits { BH_Dirty, /* Is dirty */ BH_Lock, /* Is locked */ BH_Req, /* Has been submitted for I/O */ + BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise + * IO completion of other buffers in the page + */ BH_Mapped, /* Has a disk mapping */ BH_New, /* Disk mapping was newly created by get_block */ @@ -73,22 +75,16 @@ struct buffer_head { struct address_space *b_assoc_map; /* mapping this buffer is associated with */ atomic_t b_count; /* users using this buffer_head */ - spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to - * serialise IO completion of other - * buffers in the page */ }; /* * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() * and buffer_foo() functions. - * To avoid reset buffer flags that are already set, because that causes - * a costly cache line transition, check the flag first. */ #define BUFFER_FNS(bit, name) \ static __always_inline void set_buffer_##name(struct buffer_head *bh) \ { \ - if (!test_bit(BH_##bit, &(bh)->b_state)) \ - set_bit(BH_##bit, &(bh)->b_state); \ + set_bit(BH_##bit, &(bh)->b_state); \ } \ static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ { \ @@ -153,13 +149,13 @@ void buffer_check_dirty_writeback(struct page *page, */ void mark_buffer_dirty(struct buffer_head *bh); -void mark_buffer_write_io_error(struct buffer_head *bh); +void init_buffer(struct buffer_head *, bh_end_io_t *, void *); void touch_buffer(struct buffer_head *bh); void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset); int try_to_free_buffers(struct page *); struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, - bool retry); + int retry); void create_empty_buffers(struct page *, unsigned long, unsigned long b_state); void end_buffer_read_sync(struct buffer_head *bh, int uptodate); @@ -172,12 +168,7 @@ int inode_has_buffers(struct inode *); void invalidate_inode_buffers(struct inode *); int remove_inode_buffers(struct inode *inode); int sync_mapping_buffers(struct address_space *mapping); -void clean_bdev_aliases(struct block_device *bdev, sector_t block, - sector_t len); -static inline void clean_bdev_bh_alias(struct buffer_head *bh) -{ - clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1); -} +void unmap_underlying_metadata(struct block_device *bdev, sector_t block); void mark_buffer_async_write(struct buffer_head *bh); void __wait_on_buffer(struct buffer_head *); @@ -189,13 +180,9 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, unsigned int size); -void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, - gfp_t gfp); struct buffer_head *__bread_gfp(struct block_device *, sector_t block, unsigned size, gfp_t gfp); void invalidate_bh_lrus(void); -void invalidate_bh_lrus_cpu(void); -bool has_bh_in_lru(int cpu, void *dummy); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void unlock_buffer(struct buffer_head *bh); @@ -204,6 +191,8 @@ void ll_rw_block(int, int, int, struct buffer_head * bh[]); int sync_dirty_buffer(struct buffer_head *bh); int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); void write_dirty_buffer(struct buffer_head *bh, int op_flags); +int _submit_bh(int op, int op_flags, struct buffer_head *bh, + unsigned long bio_flags); int submit_bh(int, int, struct buffer_head *); void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize); @@ -237,7 +226,6 @@ int generic_write_end(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page *, void *); void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); -void clean_page_buffers(struct page *page); int cont_write_begin(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page **, void **, get_block_t *, loff_t *); @@ -246,14 +234,16 @@ int block_commit_write(struct page *page, unsigned from, unsigned to); int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block); /* Convert errno to return value from ->page_mkwrite() call */ -static inline vm_fault_t block_page_mkwrite_return(int err) +static inline int block_page_mkwrite_return(int err) { if (err == 0) return VM_FAULT_LOCKED; - if (err == -EFAULT || err == -EAGAIN) + if (err == -EFAULT) return VM_FAULT_NOPAGE; if (err == -ENOMEM) return VM_FAULT_OOM; + if (err == -EAGAIN) + return VM_FAULT_RETRY; /* -ENOSPC, -EDQUOT, -EIO ... */ return VM_FAULT_SIGBUS; } @@ -274,6 +264,14 @@ void buffer_init(void); * inline definitions */ +static inline void attach_page_buffers(struct page *page, + struct buffer_head *head) +{ + get_page(page); + SetPagePrivate(page); + set_page_private(page, (unsigned long)head); +} + static inline void get_bh(struct buffer_head *bh) { atomic_inc(&bh->b_count); @@ -315,12 +313,6 @@ sb_breadahead(struct super_block *sb, sector_t block) __breadahead(sb->s_bdev, block, sb->s_blocksize); } -static inline void -sb_breadahead_unmovable(struct super_block *sb, sector_t block) -{ - __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0); -} - static inline struct buffer_head * sb_getblk(struct super_block *sb, sector_t block) { @@ -408,9 +400,6 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } -static inline void invalidate_bh_lrus_cpu(void) {} -static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; } -#define buffer_heads_over_limit 0 #endif /* CONFIG_BLOCK */ #endif /* _LINUX_BUFFER_HEAD_H */ diff --git a/include/linux/bug.h b/include/linux/bug.h index 348acf2558..292d6a10b0 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -1,10 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BUG_H #define _LINUX_BUG_H #include #include -#include enum bug_trap_type { BUG_TRAP_TYPE_NONE = 0, @@ -15,9 +13,80 @@ enum bug_trap_type { struct pt_regs; #ifdef __CHECKER__ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) +#define BUILD_BUG_ON_ZERO(e) (0) +#define BUILD_BUG_ON_NULL(e) ((void*)0) +#define BUILD_BUG_ON_INVALID(e) (0) +#define BUILD_BUG_ON_MSG(cond, msg) (0) +#define BUILD_BUG_ON(condition) (0) +#define BUILD_BUG() (0) #define MAYBE_BUILD_BUG_ON(cond) (0) #else /* __CHECKER__ */ +/* Force a compilation error if a constant expression is not a power of 2 */ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON(((n) & ((n) - 1)) != 0) +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) + +/* Force a compilation error if condition is true, but also produce a + result (of value 0 and type size_t), so the expression can be used + e.g. in a structure initializer (or where-ever else comma expressions + aren't permitted). */ +#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) +#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) + +/* + * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the + * expression but avoids the generation of any code, even if that expression + * has side-effects. + */ +#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e)))) + +/** + * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied + * error message. + * @condition: the condition which the compiler should know is false. + * + * See BUILD_BUG_ON for description. + */ +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) + +/** + * BUILD_BUG_ON - break compile if a condition is true. + * @condition: the condition which the compiler should know is false. + * + * If you have some code which relies on certain constants being equal, or + * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to + * detect if someone changes it. + * + * The implementation uses gcc's reluctance to create a negative array, but gcc + * (as of 4.4) only emits that error for obvious cases (e.g. not arguments to + * inline functions). Luckily, in 4.3 they added the "error" function + * attribute just for this type of case. Thus, we use a negative sized array + * (should always create an error on gcc versions older than 4.4) and then call + * an undefined function with the error attribute (should always create an + * error on gcc 4.3 and later). If for some reason, neither creates a + * compile-time error, we'll still have a link-time error, which is harder to + * track down. + */ +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif + +/** + * BUILD_BUG - break compile if used. + * + * If you have some code that you expect the compiler to eliminate at + * build time, you should use BUILD_BUG to detect if it is + * unexpectedly used. + */ +#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") + #define MAYBE_BUILD_BUG_ON(cond) \ do { \ if (__builtin_constant_p((cond))) \ @@ -36,59 +105,20 @@ static inline int is_warning_bug(const struct bug_entry *bug) return bug->flags & BUGFLAG_WARNING; } -void bug_get_file_line(struct bug_entry *bug, const char **file, - unsigned int *line); - -struct bug_entry *find_bug(unsigned long bugaddr); +const struct bug_entry *find_bug(unsigned long bugaddr); enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); /* These are defined by the architecture */ int is_valid_bugaddr(unsigned long addr); -void generic_bug_clear_once(void); - #else /* !CONFIG_GENERIC_BUG */ -static inline void *find_bug(unsigned long bugaddr) -{ - return NULL; -} - static inline enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs) { return BUG_TRAP_TYPE_BUG; } -struct bug_entry; -static inline void bug_get_file_line(struct bug_entry *bug, const char **file, - unsigned int *line) -{ - *file = NULL; - *line = 0; -} - -static inline void generic_bug_clear_once(void) {} - #endif /* CONFIG_GENERIC_BUG */ - -/* - * Since detected data corruption should stop operation on the affected - * structures. Return value must be checked and sanely acted on by caller. - */ -static inline __must_check bool check_data_corruption(bool v) { return v; } -#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ - check_data_corruption(({ \ - bool corruption = unlikely(condition); \ - if (corruption) { \ - if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ - pr_err(fmt, ##__VA_ARGS__); \ - BUG(); \ - } else \ - WARN(1, fmt, ##__VA_ARGS__); \ - } \ - corruption; \ - })) - #endif /* _LINUX_BUG_H */ diff --git a/include/linux/bvec.h b/include/linux/bvec.h index 0e9bdd42da..89b65b82d9 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -1,33 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * bvec iterator * * Copyright (C) 2001 Ming Lei + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- */ -#ifndef __LINUX_BVEC_H -#define __LINUX_BVEC_H +#ifndef __LINUX_BVEC_ITER_H +#define __LINUX_BVEC_ITER_H -#include +#include #include -#include -#include -#include -#include -#include -struct page; - -/** - * struct bio_vec - a contiguous range of physical memory addresses - * @bv_page: First page associated with the address range. - * @bv_len: Number of bytes in the address range. - * @bv_offset: Start of the address range relative to the start of @bv_page. - * - * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len: - * - * nth_page(@bv_page, n) == @bv_page + n - * - * This holds because page_is_mergeable() checks the above property. +/* + * was unsigned short, but we might as well be ready for > 64kB I/O pages */ struct bio_vec { struct page *bv_page; @@ -46,51 +43,22 @@ struct bvec_iter { current bvec */ }; -struct bvec_iter_all { - struct bio_vec bv; - int idx; - unsigned done; -}; - /* * various member access, note that bio_data should of course not be used * on highmem page vectors */ #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) -/* multi-page (mp_bvec) helpers */ -#define mp_bvec_iter_page(bvec, iter) \ +#define bvec_iter_page(bvec, iter) \ (__bvec_iter_bvec((bvec), (iter))->bv_page) -#define mp_bvec_iter_len(bvec, iter) \ +#define bvec_iter_len(bvec, iter) \ min((iter).bi_size, \ __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) -#define mp_bvec_iter_offset(bvec, iter) \ +#define bvec_iter_offset(bvec, iter) \ (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) -#define mp_bvec_iter_page_idx(bvec, iter) \ - (mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE) - -#define mp_bvec_iter_bvec(bvec, iter) \ -((struct bio_vec) { \ - .bv_page = mp_bvec_iter_page((bvec), (iter)), \ - .bv_len = mp_bvec_iter_len((bvec), (iter)), \ - .bv_offset = mp_bvec_iter_offset((bvec), (iter)), \ -}) - -/* For building single-page bvec in flight */ - #define bvec_iter_offset(bvec, iter) \ - (mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE) - -#define bvec_iter_len(bvec, iter) \ - min_t(unsigned, mp_bvec_iter_len((bvec), (iter)), \ - PAGE_SIZE - bvec_iter_offset((bvec), (iter))) - -#define bvec_iter_page(bvec, iter) \ - (mp_bvec_iter_page((bvec), (iter)) + \ - mp_bvec_iter_page_idx((bvec), (iter))) - #define bvec_iter_bvec(bvec, iter) \ ((struct bio_vec) { \ .bv_page = bvec_iter_page((bvec), (iter)), \ @@ -98,147 +66,32 @@ struct bvec_iter_all { .bv_offset = bvec_iter_offset((bvec), (iter)), \ }) -static inline bool bvec_iter_advance(const struct bio_vec *bv, - struct bvec_iter *iter, unsigned bytes) +static inline void bvec_iter_advance(const struct bio_vec *bv, + struct bvec_iter *iter, + unsigned bytes) { - unsigned int idx = iter->bi_idx; + WARN_ONCE(bytes > iter->bi_size, + "Attempted to advance past end of bvec iter\n"); - if (WARN_ONCE(bytes > iter->bi_size, - "Attempted to advance past end of bvec iter\n")) { - iter->bi_size = 0; - return false; + while (bytes) { + unsigned iter_len = bvec_iter_len(bv, *iter); + unsigned len = min(bytes, iter_len); + + bytes -= len; + iter->bi_size -= len; + iter->bi_bvec_done += len; + + if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { + iter->bi_bvec_done = 0; + iter->bi_idx++; + } } - - iter->bi_size -= bytes; - bytes += iter->bi_bvec_done; - - while (bytes && bytes >= bv[idx].bv_len) { - bytes -= bv[idx].bv_len; - idx++; - } - - iter->bi_idx = idx; - iter->bi_bvec_done = bytes; - return true; -} - -/* - * A simpler version of bvec_iter_advance(), @bytes should not span - * across multiple bvec entries, i.e. bytes <= bv[i->bi_idx].bv_len - */ -static inline void bvec_iter_advance_single(const struct bio_vec *bv, - struct bvec_iter *iter, unsigned int bytes) -{ - unsigned int done = iter->bi_bvec_done + bytes; - - if (done == bv[iter->bi_idx].bv_len) { - done = 0; - iter->bi_idx++; - } - iter->bi_bvec_done = done; - iter->bi_size -= bytes; } #define for_each_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len)) + bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) -/* for iterating one bio from start to end */ -#define BVEC_ITER_ALL_INIT (struct bvec_iter) \ -{ \ - .bi_sector = 0, \ - .bi_size = UINT_MAX, \ - .bi_idx = 0, \ - .bi_bvec_done = 0, \ -} - -static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all) -{ - iter_all->done = 0; - iter_all->idx = 0; - - return &iter_all->bv; -} - -static inline void bvec_advance(const struct bio_vec *bvec, - struct bvec_iter_all *iter_all) -{ - struct bio_vec *bv = &iter_all->bv; - - if (iter_all->done) { - bv->bv_page++; - bv->bv_offset = 0; - } else { - bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT); - bv->bv_offset = bvec->bv_offset & ~PAGE_MASK; - } - bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, - bvec->bv_len - iter_all->done); - iter_all->done += bv->bv_len; - - if (iter_all->done == bvec->bv_len) { - iter_all->idx++; - iter_all->done = 0; - } -} - -/** - * bvec_kmap_local - map a bvec into the kernel virtual address space - * @bvec: bvec to map - * - * Must be called on single-page bvecs only. Call kunmap_local on the returned - * address to unmap. - */ -static inline void *bvec_kmap_local(struct bio_vec *bvec) -{ - return kmap_local_page(bvec->bv_page) + bvec->bv_offset; -} - -/** - * memcpy_from_bvec - copy data from a bvec - * @bvec: bvec to copy from - * - * Must be called on single-page bvecs only. - */ -static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec) -{ - memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len); -} - -/** - * memcpy_to_bvec - copy data to a bvec - * @bvec: bvec to copy to - * - * Must be called on single-page bvecs only. - */ -static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from) -{ - memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len); -} - -/** - * memzero_bvec - zero all data in a bvec - * @bvec: bvec to zero - * - * Must be called on single-page bvecs only. - */ -static inline void memzero_bvec(struct bio_vec *bvec) -{ - memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len); -} - -/** - * bvec_virt - return the virtual address for a bvec - * @bvec: bvec to return the virtual address for - * - * Note: the caller must ensure that @bvec->bv_page is not a highmem page. - */ -static inline void *bvec_virt(struct bio_vec *bvec) -{ - WARN_ON_ONCE(PageHighMem(bvec->bv_page)); - return page_address(bvec->bv_page) + bvec->bv_offset; -} - -#endif /* __LINUX_BVEC_H */ +#endif /* __LINUX_BVEC_ITER_H */ diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h index d64a524d3c..392041475c 100644 --- a/include/linux/byteorder/big_endian.h +++ b/include/linux/byteorder/big_endian.h @@ -1,12 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H #define _LINUX_BYTEORDER_BIG_ENDIAN_H #include -#ifndef CONFIG_CPU_BIG_ENDIAN -#warning inconsistent configuration, needs CONFIG_CPU_BIG_ENDIAN -#endif - #include #endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */ diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h index 4b13e0a3e1..89f67c1c31 100644 --- a/include/linux/byteorder/generic.h +++ b/include/linux/byteorder/generic.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BYTEORDER_GENERIC_H #define _LINUX_BYTEORDER_GENERIC_H @@ -156,23 +155,6 @@ static inline void le64_add_cpu(__le64 *var, u64 val) *var = cpu_to_le64(le64_to_cpu(*var) + val); } -/* XXX: this stuff can be optimized */ -static inline void le32_to_cpu_array(u32 *buf, unsigned int words) -{ - while (words--) { - __le32_to_cpus(buf); - buf++; - } -} - -static inline void cpu_to_le32_array(u32 *buf, unsigned int words) -{ - while (words--) { - __cpu_to_le32s(buf); - buf++; - } -} - static inline void be16_add_cpu(__be16 *var, u16 val) { *var = cpu_to_be16(be16_to_cpu(*var) + val); @@ -188,20 +170,4 @@ static inline void be64_add_cpu(__be64 *var, u64 val) *var = cpu_to_be64(be64_to_cpu(*var) + val); } -static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len) -{ - int i; - - for (i = 0; i < len; i++) - dst[i] = cpu_to_be32(src[i]); -} - -static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len) -{ - int i; - - for (i = 0; i < len; i++) - dst[i] = be32_to_cpu(src[i]); -} - #endif /* _LINUX_BYTEORDER_GENERIC_H */ diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h index 1ec650ff76..08057377aa 100644 --- a/include/linux/byteorder/little_endian.h +++ b/include/linux/byteorder/little_endian.h @@ -1,12 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H #define _LINUX_BYTEORDER_LITTLE_ENDIAN_H #include -#ifdef CONFIG_CPU_BIG_ENDIAN -#warning inconsistent configuration, CONFIG_CPU_BIG_ENDIAN is set -#endif - #include #endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ diff --git a/include/linux/c2port.h b/include/linux/c2port.h index 4e93bc63c2..4efabcb513 100644 --- a/include/linux/c2port.h +++ b/include/linux/c2port.h @@ -1,11 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Silicon Labs C2 port Linux support * * Copyright (c) 2007 Rodolfo Giometti * Copyright (c) 2007 Eurotech S.p.A. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation */ +#include + #define C2PORT_NAME_LEN 32 struct device; @@ -17,8 +22,10 @@ struct device; /* Main struct */ struct c2port_ops; struct c2port_device { + kmemcheck_bitfield_begin(flags); unsigned int access:1; unsigned int flash_access:1; + kmemcheck_bitfield_end(flags); int id; char name[C2PORT_NAME_LEN]; diff --git a/include/linux/cache.h b/include/linux/cache.h index d742c57eae..f28266d9b8 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_CACHE_H #define __LINUX_CACHE_H @@ -15,14 +14,8 @@ /* * __read_mostly is used to keep rarely changing variables out of frequently - * updated cachelines. Its use should be reserved for data that is used - * frequently in hot paths. Performance traces can help decide when to use - * this. You want __read_mostly data to be tightly packed, so that in the - * best case multiple frequently read variables for a hot path will be next - * to each other in order to reduce the number of cachelines needed to - * execute a critical path. We should be mindful and selective of its use. - * ie: if you're going to use it please supply a *good* justification in your - * commit log + * updated cachelines. If an architecture doesn't support it, ignore the + * hint. */ #ifndef __read_mostly #define __read_mostly @@ -33,8 +26,18 @@ * after mark_rodata_ro() has been called). These are effectively read-only, * but may get written to during init, so can't live in .rodata (via "const"). */ +#ifdef CONFIG_PAX_KERNEXEC +# ifdef __ro_after_init +# error KERNEXEC requires __read_only +# endif +# define __read_only __attribute__((__section__(".data..read_only"))) +# define __ro_after_init __read_only +#else +# define __read_only __read_mostly +#endif + #ifndef __ro_after_init -#define __ro_after_init __section(".data..ro_after_init") +#define __ro_after_init __attribute__((__section__(".data..ro_after_init"))) #endif #ifndef ____cacheline_aligned diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 2f909ed084..2189935075 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -1,9 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CACHEINFO_H #define _LINUX_CACHEINFO_H #include -#include #include #include @@ -18,11 +16,8 @@ enum cache_type { CACHE_TYPE_UNIFIED = BIT(2), }; -extern unsigned int coherency_max_size; - /** * struct cacheinfo - represent a cache leaf node - * @id: This cache's id. It is unique among caches with the same (type, level). * @type: type of the cache - data, inst or unified * @level: represents the hierarchy in the multi-level cache * @coherency_line_size: size of each cache line usually representing @@ -37,8 +32,9 @@ extern unsigned int coherency_max_size; * @shared_cpu_map: logical cpumask representing all the cpus sharing * this cache node * @attributes: bitfield representing various cache attributes - * @fw_token: Unique value used to determine if different cacheinfo - * structures represent a single hardware cache instance. + * @of_node: if devicetree is used, this represents either the cpu node in + * case there's no explicit cache node or the cache node itself in the + * device tree * @disable_sysfs: indicates whether this node is visible to the user via * sysfs or not * @priv: pointer to any private data structure specific to particular @@ -48,7 +44,6 @@ extern unsigned int coherency_max_size; * keeping, the remaining members form the core properties of the cache */ struct cacheinfo { - unsigned int id; enum cache_type type; unsigned int level; unsigned int coherency_line_size; @@ -66,8 +61,8 @@ struct cacheinfo { #define CACHE_WRITE_ALLOCATE BIT(3) #define CACHE_ALLOCATE_POLICY_MASK \ (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE) -#define CACHE_ID BIT(4) - void *fw_token; + + struct device_node *of_node; bool disable_sysfs; void *priv; }; @@ -76,50 +71,30 @@ struct cpu_cacheinfo { struct cacheinfo *info_list; unsigned int num_levels; unsigned int num_leaves; - bool cpu_map_populated; }; +/* + * Helpers to make sure "func" is executed on the cpu whose cache + * attributes are being detected + */ +#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \ +static inline void _##func(void *ret) \ +{ \ + int cpu = smp_processor_id(); \ + *(int *)ret = __##func(cpu); \ +} \ + \ +int func(unsigned int cpu) \ +{ \ + int ret; \ + smp_call_function_single(cpu, _##func, &ret, true); \ + return ret; \ +} + struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); int init_cache_level(unsigned int cpu); int populate_cache_leaves(unsigned int cpu); -int cache_setup_acpi(unsigned int cpu); -#ifndef CONFIG_ACPI_PPTT -/* - * acpi_find_last_cache_level is only called on ACPI enabled - * platforms using the PPTT for topology. This means that if - * the platform supports other firmware configuration methods - * we need to stub out the call when ACPI is disabled. - * ACPI enabled platforms not using PPTT won't be making calls - * to this function so we need not worry about them. - */ -static inline int acpi_find_last_cache_level(unsigned int cpu) -{ - return 0; -} -#else -int acpi_find_last_cache_level(unsigned int cpu); -#endif const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); -/* - * Get the id of the cache associated with @cpu at level @level. - * cpuhp lock must be held. - */ -static inline int get_cpu_cacheinfo_id(int cpu, int level) -{ - struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); - int i; - - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == level) { - if (ci->info_list[i].attributes & CACHE_ID) - return ci->info_list[i].id; - return -1; - } - } - - return -1; -} - #endif /* _LINUX_CACHEINFO_H */ diff --git a/include/linux/can/core.h b/include/linux/can/core.h index 5fb8d0e3f9..df08a41d5b 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h @@ -1,12 +1,11 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * linux/can/core.h * - * Prototypes and definitions for CAN protocol modules using the PF_CAN core + * Protoypes and definitions for CAN protocol modules using the PF_CAN core * * Authors: Oliver Hartkopp * Urs Thuermann - * Copyright (c) 2002-2017 Volkswagen Group Electronic Research + * Copyright (c) 2002-2007 Volkswagen Group Electronic Research * All rights reserved. * */ @@ -18,6 +17,13 @@ #include #include +#define CAN_VERSION "20120528" + +/* increment this number each time you change some user-space interface */ +#define CAN_ABI_VERSION "9" + +#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION + #define DNAME(dev) ((dev) ? (dev)->name : "any") /** @@ -34,30 +40,21 @@ struct can_proto { struct proto *prot; }; -/* required_size - * macro to find the minimum size of a struct - * that includes a requested member - */ -#define CAN_REQUIRED_SIZE(struct_type, member) \ - (offsetof(typeof(struct_type), member) + \ - sizeof(((typeof(struct_type) *)(NULL))->member)) - /* function prototypes for the CAN networklayer core (af_can.c) */ extern int can_proto_register(const struct can_proto *cp); extern void can_proto_unregister(const struct can_proto *cp); -int can_rx_register(struct net *net, struct net_device *dev, - canid_t can_id, canid_t mask, +int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, void (*func)(struct sk_buff *, void *), void *data, char *ident, struct sock *sk); -extern void can_rx_unregister(struct net *net, struct net_device *dev, - canid_t can_id, canid_t mask, +extern void can_rx_unregister(struct net_device *dev, canid_t can_id, + canid_t mask, void (*func)(struct sk_buff *, void *), void *data); extern int can_send(struct sk_buff *skb, int loop); -void can_sock_destruct(struct sock *sk); +extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); #endif /* !_CAN_CORE_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 2413253e54..5f5270941b 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/can/dev.h * @@ -15,12 +14,9 @@ #define _CAN_DEV_H #include -#include #include #include -#include #include -#include #include /* @@ -32,12 +28,6 @@ enum can_mode { CAN_MODE_SLEEP }; -enum can_termination_gpio { - CAN_TERMINATION_GPIO_DISABLED = 0, - CAN_TERMINATION_GPIO_ENABLED, - CAN_TERMINATION_GPIO_MAX, -}; - /* * CAN common private data */ @@ -45,25 +35,11 @@ struct can_priv { struct net_device *dev; struct can_device_stats can_stats; + struct can_bittiming bittiming, data_bittiming; const struct can_bittiming_const *bittiming_const, *data_bittiming_const; - struct can_bittiming bittiming, data_bittiming; - const struct can_tdc_const *tdc_const; - struct can_tdc tdc; - - unsigned int bitrate_const_cnt; - const u32 *bitrate_const; - const u32 *data_bitrate_const; - unsigned int data_bitrate_const_cnt; - u32 bitrate_max; struct can_clock clock; - unsigned int termination_const_cnt; - const u16 *termination_const; - u16 termination; - struct gpio_desc *termination_gpio; - u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX]; - enum can_state state; /* CAN controller features - see include/uapi/linux/can/netlink.h */ @@ -77,7 +53,6 @@ struct can_priv { int (*do_set_bittiming)(struct net_device *dev); int (*do_set_data_bittiming)(struct net_device *dev); int (*do_set_mode)(struct net_device *dev, enum can_mode mode); - int (*do_set_termination)(struct net_device *dev, u16 term); int (*do_get_state)(const struct net_device *dev, enum can_state *state); int (*do_get_berr_counter)(const struct net_device *dev, @@ -96,6 +71,46 @@ struct can_priv { #endif }; +/* + * get_can_dlc(value) - helper macro to cast a given data length code (dlc) + * to __u8 and ensure the dlc value to be max. 8 bytes. + * + * To be used in the CAN netdriver receive path to ensure conformance with + * ISO 11898-1 Chapter 8.4.2.3 (DLC field) + */ +#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC)) +#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) + +/* Drop a given socketbuffer if it does not contain a valid CAN frame. */ +static inline bool can_dropped_invalid_skb(struct net_device *dev, + struct sk_buff *skb) +{ + const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + + if (skb->protocol == htons(ETH_P_CAN)) { + if (unlikely(skb->len != CAN_MTU || + cfd->len > CAN_MAX_DLEN)) + goto inval_skb; + } else if (skb->protocol == htons(ETH_P_CANFD)) { + if (unlikely(skb->len != CANFD_MTU || + cfd->len > CANFD_MAX_DLEN)) + goto inval_skb; + } else + goto inval_skb; + + return false; + +inval_skb: + kfree_skb(skb); + dev->stats.tx_dropped++; + return true; +} + +static inline bool can_is_canfd_skb(const struct sk_buff *skb) +{ + /* the CAN specific type of skb is identified by its data length */ + return skb->len == CANFD_MTU; +} /* helper to define static CAN controller features at device creation time */ static inline void can_set_static_ctrlmode(struct net_device *dev, @@ -112,14 +127,13 @@ static inline void can_set_static_ctrlmode(struct net_device *dev, dev->mtu = CANFD_MTU; } -void can_setup(struct net_device *dev); +/* get data length from can_dlc with sanitized can_dlc */ +u8 can_dlc2len(u8 can_dlc); -struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, - unsigned int txqs, unsigned int rxqs); -#define alloc_candev(sizeof_priv, echo_skb_max) \ - alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1) -#define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \ - alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count) +/* map the sanitized data length to an appropriate data length code */ +u8 can_len2dlc(u8 len); + +struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max); void free_candev(struct net_device *dev); /* a candev safe wrapper around netdev_priv */ @@ -135,18 +149,18 @@ void unregister_candev(struct net_device *dev); int can_restart_now(struct net_device *dev); void can_bus_off(struct net_device *dev); -const char *can_get_state_str(const enum can_state state); void can_change_state(struct net_device *dev, struct can_frame *cf, enum can_state tx_state, enum can_state rx_state); -#ifdef CONFIG_OF -void of_can_transceiver(struct net_device *dev); -#else -static inline void of_can_transceiver(struct net_device *dev) { } -#endif +void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + unsigned int idx); +unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); +void can_free_echo_skb(struct net_device *dev, unsigned int idx); -extern struct rtnl_link_ops can_link_ops; -int can_netlink_register(void); -void can_netlink_unregister(void); +struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); +struct sk_buff *alloc_canfd_skb(struct net_device *dev, + struct canfd_frame **cfd); +struct sk_buff *alloc_can_err_skb(struct net_device *dev, + struct can_frame **cf); #endif /* !_CAN_DEV_H */ diff --git a/include/linux/can/led.h b/include/linux/can/led.h index 7c3cfd798c..2746f7c2f8 100644 --- a/include/linux/can/led.h +++ b/include/linux/can/led.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2012, Fabio Baltieri + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _CAN_LED_H diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h index 9587d68829..78b2d44f04 100644 --- a/include/linux/can/platform/cc770.h +++ b/include/linux/can/platform/cc770.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _CAN_PLATFORM_CC770_H #define _CAN_PLATFORM_CC770_H diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h new file mode 100644 index 0000000000..d44fcae274 --- /dev/null +++ b/include/linux/can/platform/mcp251x.h @@ -0,0 +1,21 @@ +#ifndef _CAN_PLATFORM_MCP251X_H +#define _CAN_PLATFORM_MCP251X_H + +/* + * + * CAN bus driver for Microchip 251x CAN Controller with SPI Interface + * + */ + +#include + +/* + * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data + * @oscillator_frequency: - oscillator frequency in Hz + */ + +struct mcp251x_platform_data { + unsigned long oscillator_frequency; +}; + +#endif /* !_CAN_PLATFORM_MCP251X_H */ diff --git a/include/linux/can/platform/rcar_can.h b/include/linux/can/platform/rcar_can.h new file mode 100644 index 0000000000..0f4a2f3df5 --- /dev/null +++ b/include/linux/can/platform/rcar_can.h @@ -0,0 +1,17 @@ +#ifndef _CAN_PLATFORM_RCAR_CAN_H_ +#define _CAN_PLATFORM_RCAR_CAN_H_ + +#include + +/* Clock Select Register settings */ +enum CLKR { + CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */ + CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */ + CLKR_CLKEXT = 3 /* Externally input clock */ +}; + +struct rcar_can_platform_data { + enum CLKR clock_select; /* Clock source select */ +}; + +#endif /* !_CAN_PLATFORM_RCAR_CAN_H_ */ diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h index 5755ae5a47..93570b61ec 100644 --- a/include/linux/can/platform/sja1000.h +++ b/include/linux/can/platform/sja1000.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _CAN_PLATFORM_SJA1000_H #define _CAN_PLATFORM_SJA1000_H diff --git a/include/linux/can/platform/ti_hecc.h b/include/linux/can/platform/ti_hecc.h new file mode 100644 index 0000000000..a52f47ca6c --- /dev/null +++ b/include/linux/can/platform/ti_hecc.h @@ -0,0 +1,44 @@ +#ifndef _CAN_PLATFORM_TI_HECC_H +#define _CAN_PLATFORM_TI_HECC_H + +/* + * TI HECC (High End CAN Controller) driver platform header + * + * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed as is WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/** + * struct hecc_platform_data - HECC Platform Data + * + * @scc_hecc_offset: mostly 0 - should really never change + * @scc_ram_offset: SCC RAM offset + * @hecc_ram_offset: HECC RAM offset + * @mbx_offset: Mailbox RAM offset + * @int_line: Interrupt line to use - 0 or 1 + * @version: version for future use + * @transceiver_switch: platform specific callback fn for transceiver control + * + * Platform data structure to get all platform specific settings. + * this structure also accounts the fact that the IP may have different + * RAM and mailbox offsets for different SOC's + */ +struct ti_hecc_platform_data { + u32 scc_hecc_offset; + u32 scc_ram_offset; + u32 hecc_ram_offset; + u32 mbx_offset; + u32 int_line; + u32 version; + void (*transceiver_switch) (int); +}; +#endif /* !_CAN_PLATFORM_TI_HECC_H */ diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index d311bc369a..51bb653278 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * linux/can/skb.h * @@ -16,21 +15,6 @@ #include #include -void can_flush_echo_skb(struct net_device *dev); -int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, - unsigned int idx, unsigned int frame_len); -struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, - u8 *len_ptr, unsigned int *frame_len_ptr); -unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx, - unsigned int *frame_len_ptr); -void can_free_echo_skb(struct net_device *dev, unsigned int idx, - unsigned int *frame_len_ptr); -struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); -struct sk_buff *alloc_canfd_skb(struct net_device *dev, - struct canfd_frame **cfd); -struct sk_buff *alloc_can_err_skb(struct net_device *dev, - struct can_frame **cf); - /* * The struct can_skb_priv is used to transport additional information along * with the stored struct can(fd)_frame that can not be contained in existing @@ -44,14 +28,12 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev, * struct can_skb_priv - private additional data inside CAN sk_buffs * @ifindex: ifindex of the first interface the CAN frame appeared on * @skbcnt: atomic counter to have an unique id together with skb pointer - * @frame_len: length of CAN frame in data link layer * @cf: align to the following CAN frame at skb->data */ struct can_skb_priv { int ifindex; int skbcnt; - unsigned int frame_len; - struct can_frame cf[]; + struct can_frame cf[0]; }; static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb) @@ -66,12 +48,8 @@ static inline void can_skb_reserve(struct sk_buff *skb) static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) { - /* If the socket has already been closed by user space, the - * refcount may already be 0 (and the socket will be freed - * after the last TX skb has been freed). So only increase - * socket refcount if the refcount is > 0. - */ - if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { + if (sk) { + sock_hold(sk); skb->destructor = sock_efree; skb->sk = sk; } @@ -82,81 +60,21 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) */ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) { - struct sk_buff *nskb; + if (skb_shared(skb)) { + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); - nskb = skb_clone(skb, GFP_ATOMIC); - if (unlikely(!nskb)) { - kfree_skb(skb); - return NULL; + if (likely(nskb)) { + can_skb_set_owner(nskb, skb->sk); + consume_skb(skb); + return nskb; + } else { + kfree_skb(skb); + return NULL; + } } - can_skb_set_owner(nskb, skb->sk); - consume_skb(skb); - return nskb; -} - -/* Check for outgoing skbs that have not been created by the CAN subsystem */ -static inline bool can_skb_headroom_valid(struct net_device *dev, - struct sk_buff *skb) -{ - /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */ - if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv))) - return false; - - /* af_packet does not apply CAN skb specific settings */ - if (skb->ip_summed == CHECKSUM_NONE) { - /* init headroom */ - can_skb_prv(skb)->ifindex = dev->ifindex; - can_skb_prv(skb)->skbcnt = 0; - - skb->ip_summed = CHECKSUM_UNNECESSARY; - - /* perform proper loopback on capable devices */ - if (dev->flags & IFF_ECHO) - skb->pkt_type = PACKET_LOOPBACK; - else - skb->pkt_type = PACKET_HOST; - - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); - } - - return true; -} - -/* Drop a given socketbuffer if it does not contain a valid CAN frame. */ -static inline bool can_dropped_invalid_skb(struct net_device *dev, - struct sk_buff *skb) -{ - const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - - if (skb->protocol == htons(ETH_P_CAN)) { - if (unlikely(skb->len != CAN_MTU || - cfd->len > CAN_MAX_DLEN)) - goto inval_skb; - } else if (skb->protocol == htons(ETH_P_CANFD)) { - if (unlikely(skb->len != CANFD_MTU || - cfd->len > CANFD_MAX_DLEN)) - goto inval_skb; - } else - goto inval_skb; - - if (!can_skb_headroom_valid(dev, skb)) - goto inval_skb; - - return false; - -inval_skb: - kfree_skb(skb); - dev->stats.tx_dropped++; - return true; -} - -static inline bool can_is_canfd_skb(const struct sk_buff *skb) -{ - /* the CAN specific type of skb is identified by its data length */ - return skb->len == CANFD_MTU; + /* we can assume to have an unshared skb with proper owner */ + return skb; } #endif /* !_CAN_SKB_H */ diff --git a/include/linux/capability.h b/include/linux/capability.h index 65efb74c35..ef1ec2aa02 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This is * @@ -14,7 +13,7 @@ #define _LINUX_CAPABILITY_H #include -#include + #define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 #define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 @@ -25,12 +24,11 @@ typedef struct kernel_cap_struct { __u32 cap[_KERNEL_CAPABILITY_U32S]; } kernel_cap_t; -/* same as vfs_ns_cap_data but in cpu endian and always filled completely */ +/* exact same as vfs_cap_data but in cpu endian and always filled completely */ struct cpu_vfs_cap_data { __u32 magic_etc; kernel_cap_t permitted; kernel_cap_t inheritable; - kuid_t rootid; }; #define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct)) @@ -210,7 +208,6 @@ extern bool has_ns_capability_noaudit(struct task_struct *t, extern bool capable(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); extern bool ns_capable_noaudit(struct user_namespace *ns, int cap); -extern bool ns_capable_setid(struct user_namespace *ns, int cap); #else static inline bool has_capability(struct task_struct *t, int cap) { @@ -234,6 +231,10 @@ static inline bool capable(int cap) { return true; } +static inline bool capable_nolog(int cap) +{ + return true; +} static inline bool ns_capable(struct user_namespace *ns, int cap) { return true; @@ -242,40 +243,17 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap) { return true; } -static inline bool ns_capable_setid(struct user_namespace *ns, int cap) -{ - return true; -} #endif /* CONFIG_MULTIUSER */ -bool privileged_wrt_inode_uidgid(struct user_namespace *ns, - struct user_namespace *mnt_userns, - const struct inode *inode); -bool capable_wrt_inode_uidgid(struct user_namespace *mnt_userns, - const struct inode *inode, int cap); +extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode); +extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); +extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap); extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); +extern bool capable_nolog(int cap); extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns); -static inline bool perfmon_capable(void) -{ - return capable(CAP_PERFMON) || capable(CAP_SYS_ADMIN); -} - -static inline bool bpf_capable(void) -{ - return capable(CAP_BPF) || capable(CAP_SYS_ADMIN); -} - -static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns) -{ - return ns_capable(ns, CAP_CHECKPOINT_RESTORE) || - ns_capable(ns, CAP_SYS_ADMIN); -} /* audit system wants to get cap info from files as well */ -int get_vfs_caps_from_disk(struct user_namespace *mnt_userns, - const struct dentry *dentry, - struct cpu_vfs_cap_data *cpu_caps); +extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); -int cap_convert_nscap(struct user_namespace *mnt_userns, struct dentry *dentry, - const void **ivalue, size_t size); +extern int is_privileged_binary(const struct dentry *dentry); #endif /* !_LINUX_CAPABILITY_H */ diff --git a/include/linux/cb710.h b/include/linux/cb710.h index 405657a9a0..8cc10411ba 100644 --- a/include/linux/cb710.h +++ b/include/linux/cb710.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * cb710/cb710.h * * Copyright by Michał Mirosław, 2008-2009 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef LINUX_CB710_DRIVER_H #define LINUX_CB710_DRIVER_H @@ -36,7 +39,7 @@ struct cb710_chip { unsigned slot_mask; unsigned slots; spinlock_t irq_lock; - struct cb710_slot slot[]; + struct cb710_slot slot[0]; }; /* NOTE: cb710_chip.slots is modified only during device init/exit and @@ -126,6 +129,10 @@ void cb710_dump_regs(struct cb710_chip *chip, unsigned dump); * cb710/sgbuf2.h * * Copyright by Michał Mirosław, 2008-2009 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef LINUX_CB710_SG_H #define LINUX_CB710_SG_H diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h index 1d5229200a..84b6e2d0f4 100644 --- a/include/linux/cciss_ioctl.h +++ b/include/linux/cciss_ioctl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef CCISS_IOCTLH #define CCISS_IOCTLH diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 868924dec5..edc5d04b96 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -1,27 +1,31 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ -#ifndef __CCP_H__ -#define __CCP_H__ +#ifndef __CPP_H__ +#define __CPP_H__ #include #include #include #include -#include -#include +#include + struct ccp_device; struct ccp_cmd; -#if defined(CONFIG_CRYPTO_DEV_SP_CCP) +#if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \ + defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE) /** * ccp_present - check if a CCP device is present @@ -67,7 +71,7 @@ unsigned int ccp_version(void); */ int ccp_enqueue_cmd(struct ccp_cmd *cmd); -#else /* CONFIG_CRYPTO_DEV_CCP_SP_DEV is not enabled */ +#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */ static inline int ccp_present(void) { @@ -84,7 +88,7 @@ static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) return -ENODEV; } -#endif /* CONFIG_CRYPTO_DEV_SP_CCP */ +#endif /* CONFIG_CRYPTO_DEV_CCP_DD */ /***** AES engine *****/ @@ -119,10 +123,6 @@ enum ccp_aes_mode { CCP_AES_MODE_CFB, CCP_AES_MODE_CTR, CCP_AES_MODE_CMAC, - CCP_AES_MODE_GHASH, - CCP_AES_MODE_GCTR, - CCP_AES_MODE_GCM, - CCP_AES_MODE_GMAC, CCP_AES_MODE__LAST, }; @@ -137,9 +137,6 @@ enum ccp_aes_action { CCP_AES_ACTION_ENCRYPT, CCP_AES_ACTION__LAST, }; -/* Overloaded field */ -#define CCP_AES_GHASHAAD CCP_AES_ACTION_DECRYPT -#define CCP_AES_GHASHFINAL CCP_AES_ACTION_ENCRYPT /** * struct ccp_aes_engine - CCP AES operation @@ -171,8 +168,6 @@ struct ccp_aes_engine { enum ccp_aes_mode mode; enum ccp_aes_action action; - u32 authsize; - struct scatterlist *key; u32 key_len; /* In bytes */ @@ -186,8 +181,6 @@ struct ccp_aes_engine { struct scatterlist *cmac_key; /* K1/K2 cmac key required for * final cmac cmd */ u32 cmac_key_len; /* In bytes */ - - u32 aad_len; /* In bytes */ }; /***** XTS-AES engine *****/ @@ -229,7 +222,6 @@ enum ccp_xts_aes_unit_size { * AES operation the new IV overwrites the old IV. */ struct ccp_xts_aes_engine { - enum ccp_aes_type type; enum ccp_aes_action action; enum ccp_xts_aes_unit_size unit_size; @@ -257,8 +249,6 @@ enum ccp_sha_type { CCP_SHA_TYPE_1 = 1, CCP_SHA_TYPE_224, CCP_SHA_TYPE_256, - CCP_SHA_TYPE_384, - CCP_SHA_TYPE_512, CCP_SHA_TYPE__LAST, }; @@ -300,60 +290,6 @@ struct ccp_sha_engine { * final sha cmd */ }; -/***** 3DES engine *****/ -enum ccp_des3_mode { - CCP_DES3_MODE_ECB = 0, - CCP_DES3_MODE_CBC, - CCP_DES3_MODE_CFB, - CCP_DES3_MODE__LAST, -}; - -enum ccp_des3_type { - CCP_DES3_TYPE_168 = 1, - CCP_DES3_TYPE__LAST, - }; - -enum ccp_des3_action { - CCP_DES3_ACTION_DECRYPT = 0, - CCP_DES3_ACTION_ENCRYPT, - CCP_DES3_ACTION__LAST, -}; - -/** - * struct ccp_des3_engine - CCP SHA operation - * @type: Type of 3DES operation - * @mode: cipher mode - * @action: 3DES operation (decrypt/encrypt) - * @key: key to be used for this 3DES operation - * @key_len: length of key (in bytes) - * @iv: IV to be used for this AES operation - * @iv_len: length in bytes of iv - * @src: input data to be used for this operation - * @src_len: length of input data used for this operation (in bytes) - * @dst: output data produced by this operation - * - * Variables required to be set when calling ccp_enqueue_cmd(): - * - type, mode, action, key, key_len, src, dst, src_len - * - iv, iv_len for any mode other than ECB - * - * The iv variable is used as both input and output. On completion of the - * 3DES operation the new IV overwrites the old IV. - */ -struct ccp_des3_engine { - enum ccp_des3_type type; - enum ccp_des3_mode mode; - enum ccp_des3_action action; - - struct scatterlist *key; - u32 key_len; /* In bytes */ - - struct scatterlist *iv; - u32 iv_len; /* In bytes */ - - struct scatterlist *src, *dst; - u64 src_len; /* In bytes */ -}; - /***** RSA engine *****/ /** * struct ccp_rsa_engine - CCP RSA operation @@ -603,7 +539,7 @@ struct ccp_ecc_engine { enum ccp_engine { CCP_ENGINE_AES = 0, CCP_ENGINE_XTS_AES_128, - CCP_ENGINE_DES3, + CCP_ENGINE_RSVD1, CCP_ENGINE_SHA, CCP_ENGINE_RSA, CCP_ENGINE_PASSTHRU, @@ -617,7 +553,7 @@ enum ccp_engine { #define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 /** - * struct ccp_cmd - CCP operation request + * struct ccp_cmd - CPP operation request * @entry: list element (ccp driver use only) * @work: work element used for callbacks (ccp driver use only) * @ccp: CCP device to be run on @@ -651,7 +587,6 @@ struct ccp_cmd { union { struct ccp_aes_engine aes; struct ccp_xts_aes_engine xts; - struct ccp_des3_engine des3; struct ccp_sha_engine sha; struct ccp_rsa_engine rsa; struct ccp_passthru_engine passthru; diff --git a/include/linux/cdev.h b/include/linux/cdev.h index 0e8cd6293d..7c05fd9dd8 100644 --- a/include/linux/cdev.h +++ b/include/linux/cdev.h @@ -1,11 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CDEV_H #define _LINUX_CDEV_H #include #include #include -#include struct file_operations; struct inode; @@ -28,10 +26,6 @@ void cdev_put(struct cdev *p); int cdev_add(struct cdev *, dev_t, unsigned); -void cdev_set_parent(struct cdev *p, struct kobject *kobj); -int cdev_device_add(struct cdev *cdev, struct device *dev); -void cdev_device_del(struct cdev *cdev, struct device *dev); - void cdev_del(struct cdev *); void cd_forget(struct inode *); diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index c4fef00abd..86e4d795e3 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * -- * General header file for linux CD-ROM drivers @@ -13,7 +12,6 @@ #include /* not really needed, later.. */ #include -#include #include struct packet_command @@ -22,7 +20,7 @@ struct packet_command unsigned char *buffer; unsigned int buflen; int stat; - struct scsi_sense_hdr *sshdr; + struct request_sense *sense; unsigned char data_direction; int quiet; int timeout; @@ -38,7 +36,7 @@ struct packet_command /* Uniform cdrom data structures for cdrom.c */ struct cdrom_device_info { - const struct cdrom_device_ops *ops; /* link to device_ops */ + struct cdrom_device_ops *ops; /* link to device_ops */ struct list_head list; /* linked list of all device_info */ struct gendisk *disk; /* matching block layer disk */ void *handle; /* driver-dependent data */ @@ -73,6 +71,7 @@ struct cdrom_device_ops { int (*drive_status) (struct cdrom_device_info *, int); unsigned int (*check_events) (struct cdrom_device_info *cdi, unsigned int clearing, int slot); + int (*media_changed) (struct cdrom_device_info *, int); int (*tray_move) (struct cdrom_device_info *, int); int (*lock_door) (struct cdrom_device_info *, int); int (*select_speed) (struct cdrom_device_info *, int); @@ -86,20 +85,13 @@ struct cdrom_device_ops { /* play stuff */ int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *); +/* driver specifications */ + const int capability; /* capability flags */ /* handle uniform packets for scsi type devices (scsi,atapi) */ int (*generic_packet) (struct cdrom_device_info *, struct packet_command *); - int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf, - u32 lba, u32 nframes, u8 *last_sense); -/* driver specifications */ - const int capability; /* capability flags */ }; -int cdrom_multisession(struct cdrom_device_info *cdi, - struct cdrom_multisession *info); -int cdrom_read_tocentry(struct cdrom_device_info *cdi, - struct cdrom_tocentry *entry); - /* the general block_device operations structure: */ extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode); @@ -108,8 +100,9 @@ extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, unsigned int clearing); +extern int cdrom_media_changed(struct cdrom_device_info *); -extern int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi); +extern int register_cdrom(struct cdrom_device_info *cdi); extern void unregister_cdrom(struct cdrom_device_info *cdi); typedef struct { @@ -129,8 +122,6 @@ extern int cdrom_mode_sense(struct cdrom_device_info *cdi, int page_code, int page_control); extern void init_cdrom_command(struct packet_command *cgc, void *buffer, int len, int type); -extern int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, - struct packet_command *cgc); /* The SCSI spec says there could be 256 slots. */ #define CDROM_MAX_SLOTS 256 diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h new file mode 100644 index 0000000000..138bbf721e --- /dev/null +++ b/include/linux/cec-funcs.h @@ -0,0 +1,1971 @@ +/* + * cec - HDMI Consumer Electronics Control message functions + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * Alternatively you can redistribute this file under the terms of the + * BSD license as stated below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * Note: this framework is still in staging and it is likely the API + * will change before it goes out of staging. + * + * Once it is moved out of staging this header will move to uapi. + */ +#ifndef _CEC_UAPI_FUNCS_H +#define _CEC_UAPI_FUNCS_H + +#include + +/* One Touch Play Feature */ +static inline void cec_msg_active_source(struct cec_msg *msg, __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_ACTIVE_SOURCE; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_active_source(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_image_view_on(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_IMAGE_VIEW_ON; +} + +static inline void cec_msg_text_view_on(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TEXT_VIEW_ON; +} + + +/* Routing Control Feature */ +static inline void cec_msg_inactive_source(struct cec_msg *msg, + __u16 phys_addr) +{ + msg->len = 4; + msg->msg[1] = CEC_MSG_INACTIVE_SOURCE; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_inactive_source(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_request_active_source(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REQUEST_ACTIVE_SOURCE; + msg->reply = reply ? CEC_MSG_ACTIVE_SOURCE : 0; +} + +static inline void cec_msg_routing_information(struct cec_msg *msg, + __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_ROUTING_INFORMATION; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_routing_information(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_routing_change(struct cec_msg *msg, + bool reply, + __u16 orig_phys_addr, + __u16 new_phys_addr) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_ROUTING_CHANGE; + msg->msg[2] = orig_phys_addr >> 8; + msg->msg[3] = orig_phys_addr & 0xff; + msg->msg[4] = new_phys_addr >> 8; + msg->msg[5] = new_phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_ROUTING_INFORMATION : 0; +} + +static inline void cec_ops_routing_change(const struct cec_msg *msg, + __u16 *orig_phys_addr, + __u16 *new_phys_addr) +{ + *orig_phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *new_phys_addr = (msg->msg[4] << 8) | msg->msg[5]; +} + +static inline void cec_msg_set_stream_path(struct cec_msg *msg, __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_SET_STREAM_PATH; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_set_stream_path(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + + +/* Standby Feature */ +static inline void cec_msg_standby(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_STANDBY; +} + + +/* One Touch Record Feature */ +static inline void cec_msg_record_off(struct cec_msg *msg, bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_RECORD_OFF; + msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0; +} + +struct cec_op_arib_data { + __u16 transport_id; + __u16 service_id; + __u16 orig_network_id; +}; + +struct cec_op_atsc_data { + __u16 transport_id; + __u16 program_number; +}; + +struct cec_op_dvb_data { + __u16 transport_id; + __u16 service_id; + __u16 orig_network_id; +}; + +struct cec_op_channel_data { + __u8 channel_number_fmt; + __u16 major; + __u16 minor; +}; + +struct cec_op_digital_service_id { + __u8 service_id_method; + __u8 dig_bcast_system; + union { + struct cec_op_arib_data arib; + struct cec_op_atsc_data atsc; + struct cec_op_dvb_data dvb; + struct cec_op_channel_data channel; + }; +}; + +struct cec_op_record_src { + __u8 type; + union { + struct cec_op_digital_service_id digital; + struct { + __u8 ana_bcast_type; + __u16 ana_freq; + __u8 bcast_system; + } analog; + struct { + __u8 plug; + } ext_plug; + struct { + __u16 phys_addr; + } ext_phys_addr; + }; +}; + +static inline void cec_set_digital_service_id(__u8 *msg, + const struct cec_op_digital_service_id *digital) +{ + *msg++ = (digital->service_id_method << 7) | digital->dig_bcast_system; + if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { + *msg++ = (digital->channel.channel_number_fmt << 2) | + (digital->channel.major >> 8); + *msg++ = digital->channel.major & 0xff; + *msg++ = digital->channel.minor >> 8; + *msg++ = digital->channel.minor & 0xff; + *msg++ = 0; + *msg++ = 0; + return; + } + switch (digital->dig_bcast_system) { + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN: + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE: + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT: + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T: + *msg++ = digital->atsc.transport_id >> 8; + *msg++ = digital->atsc.transport_id & 0xff; + *msg++ = digital->atsc.program_number >> 8; + *msg++ = digital->atsc.program_number & 0xff; + *msg++ = 0; + *msg++ = 0; + break; + default: + *msg++ = digital->dvb.transport_id >> 8; + *msg++ = digital->dvb.transport_id & 0xff; + *msg++ = digital->dvb.service_id >> 8; + *msg++ = digital->dvb.service_id & 0xff; + *msg++ = digital->dvb.orig_network_id >> 8; + *msg++ = digital->dvb.orig_network_id & 0xff; + break; + } +} + +static inline void cec_get_digital_service_id(const __u8 *msg, + struct cec_op_digital_service_id *digital) +{ + digital->service_id_method = msg[0] >> 7; + digital->dig_bcast_system = msg[0] & 0x7f; + if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { + digital->channel.channel_number_fmt = msg[1] >> 2; + digital->channel.major = ((msg[1] & 3) << 6) | msg[2]; + digital->channel.minor = (msg[3] << 8) | msg[4]; + return; + } + digital->dvb.transport_id = (msg[1] << 8) | msg[2]; + digital->dvb.service_id = (msg[3] << 8) | msg[4]; + digital->dvb.orig_network_id = (msg[5] << 8) | msg[6]; +} + +static inline void cec_msg_record_on_own(struct cec_msg *msg) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_OWN; +} + +static inline void cec_msg_record_on_digital(struct cec_msg *msg, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 10; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_DIGITAL; + cec_set_digital_service_id(msg->msg + 3, digital); +} + +static inline void cec_msg_record_on_analog(struct cec_msg *msg, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 7; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_ANALOG; + msg->msg[3] = ana_bcast_type; + msg->msg[4] = ana_freq >> 8; + msg->msg[5] = ana_freq & 0xff; + msg->msg[6] = bcast_system; +} + +static inline void cec_msg_record_on_plug(struct cec_msg *msg, + __u8 plug) +{ + msg->len = 4; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PLUG; + msg->msg[3] = plug; +} + +static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg, + __u16 phys_addr) +{ + msg->len = 5; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PHYS_ADDR; + msg->msg[3] = phys_addr >> 8; + msg->msg[4] = phys_addr & 0xff; +} + +static inline void cec_msg_record_on(struct cec_msg *msg, + bool reply, + const struct cec_op_record_src *rec_src) +{ + switch (rec_src->type) { + case CEC_OP_RECORD_SRC_OWN: + cec_msg_record_on_own(msg); + break; + case CEC_OP_RECORD_SRC_DIGITAL: + cec_msg_record_on_digital(msg, &rec_src->digital); + break; + case CEC_OP_RECORD_SRC_ANALOG: + cec_msg_record_on_analog(msg, + rec_src->analog.ana_bcast_type, + rec_src->analog.ana_freq, + rec_src->analog.bcast_system); + break; + case CEC_OP_RECORD_SRC_EXT_PLUG: + cec_msg_record_on_plug(msg, rec_src->ext_plug.plug); + break; + case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR: + cec_msg_record_on_phys_addr(msg, + rec_src->ext_phys_addr.phys_addr); + break; + } + msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0; +} + +static inline void cec_ops_record_on(const struct cec_msg *msg, + struct cec_op_record_src *rec_src) +{ + rec_src->type = msg->msg[2]; + switch (rec_src->type) { + case CEC_OP_RECORD_SRC_OWN: + break; + case CEC_OP_RECORD_SRC_DIGITAL: + cec_get_digital_service_id(msg->msg + 3, &rec_src->digital); + break; + case CEC_OP_RECORD_SRC_ANALOG: + rec_src->analog.ana_bcast_type = msg->msg[3]; + rec_src->analog.ana_freq = + (msg->msg[4] << 8) | msg->msg[5]; + rec_src->analog.bcast_system = msg->msg[6]; + break; + case CEC_OP_RECORD_SRC_EXT_PLUG: + rec_src->ext_plug.plug = msg->msg[3]; + break; + case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR: + rec_src->ext_phys_addr.phys_addr = + (msg->msg[3] << 8) | msg->msg[4]; + break; + } +} + +static inline void cec_msg_record_status(struct cec_msg *msg, __u8 rec_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_RECORD_STATUS; + msg->msg[2] = rec_status; +} + +static inline void cec_ops_record_status(const struct cec_msg *msg, + __u8 *rec_status) +{ + *rec_status = msg->msg[2]; +} + +static inline void cec_msg_record_tv_screen(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_RECORD_TV_SCREEN; + msg->reply = reply ? CEC_MSG_RECORD_ON : 0; +} + + +/* Timer Programming Feature */ +static inline void cec_msg_timer_status(struct cec_msg *msg, + __u8 timer_overlap_warning, + __u8 media_info, + __u8 prog_info, + __u8 prog_error, + __u8 duration_hr, + __u8 duration_min) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_TIMER_STATUS; + msg->msg[2] = (timer_overlap_warning << 7) | + (media_info << 5) | + (prog_info ? 0x10 : 0) | + (prog_info ? prog_info : prog_error); + if (prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE || + prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE || + prog_error == CEC_OP_PROG_ERROR_DUPLICATE) { + msg->len += 2; + msg->msg[3] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[4] = ((duration_min / 10) << 4) | (duration_min % 10); + } +} + +static inline void cec_ops_timer_status(const struct cec_msg *msg, + __u8 *timer_overlap_warning, + __u8 *media_info, + __u8 *prog_info, + __u8 *prog_error, + __u8 *duration_hr, + __u8 *duration_min) +{ + *timer_overlap_warning = msg->msg[2] >> 7; + *media_info = (msg->msg[2] >> 5) & 3; + if (msg->msg[2] & 0x10) { + *prog_info = msg->msg[2] & 0xf; + *prog_error = 0; + } else { + *prog_info = 0; + *prog_error = msg->msg[2] & 0xf; + } + if (*prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE || + *prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE || + *prog_error == CEC_OP_PROG_ERROR_DUPLICATE) { + *duration_hr = (msg->msg[3] >> 4) * 10 + (msg->msg[3] & 0xf); + *duration_min = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + } else { + *duration_hr = *duration_min = 0; + } +} + +static inline void cec_msg_timer_cleared_status(struct cec_msg *msg, + __u8 timer_cleared_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_TIMER_CLEARED_STATUS; + msg->msg[2] = timer_cleared_status; +} + +static inline void cec_ops_timer_cleared_status(const struct cec_msg *msg, + __u8 *timer_cleared_status) +{ + *timer_cleared_status = msg->msg[2]; +} + +static inline void cec_msg_clear_analogue_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_CLEAR_ANALOGUE_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ana_bcast_type; + msg->msg[10] = ana_freq >> 8; + msg->msg[11] = ana_freq & 0xff; + msg->msg[12] = bcast_system; + msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; +} + +static inline void cec_ops_clear_analogue_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ana_bcast_type, + __u16 *ana_freq, + __u8 *bcast_system) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ana_bcast_type = msg->msg[9]; + *ana_freq = (msg->msg[10] << 8) | msg->msg[11]; + *bcast_system = msg->msg[12]; +} + +static inline void cec_msg_clear_digital_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 16; + msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; + msg->msg[1] = CEC_MSG_CLEAR_DIGITAL_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + cec_set_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_ops_clear_digital_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + struct cec_op_digital_service_id *digital) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + cec_get_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_msg_clear_ext_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ext_src_spec, + __u8 plug, + __u16 phys_addr) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_CLEAR_EXT_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ext_src_spec; + msg->msg[10] = plug; + msg->msg[11] = phys_addr >> 8; + msg->msg[12] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; +} + +static inline void cec_ops_clear_ext_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ext_src_spec, + __u8 *plug, + __u16 *phys_addr) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ext_src_spec = msg->msg[9]; + *plug = msg->msg[10]; + *phys_addr = (msg->msg[11] << 8) | msg->msg[12]; +} + +static inline void cec_msg_set_analogue_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_SET_ANALOGUE_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ana_bcast_type; + msg->msg[10] = ana_freq >> 8; + msg->msg[11] = ana_freq & 0xff; + msg->msg[12] = bcast_system; + msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; +} + +static inline void cec_ops_set_analogue_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ana_bcast_type, + __u16 *ana_freq, + __u8 *bcast_system) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ana_bcast_type = msg->msg[9]; + *ana_freq = (msg->msg[10] << 8) | msg->msg[11]; + *bcast_system = msg->msg[12]; +} + +static inline void cec_msg_set_digital_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 16; + msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; + msg->msg[1] = CEC_MSG_SET_DIGITAL_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + cec_set_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_ops_set_digital_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + struct cec_op_digital_service_id *digital) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + cec_get_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_msg_set_ext_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ext_src_spec, + __u8 plug, + __u16 phys_addr) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_SET_EXT_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ext_src_spec; + msg->msg[10] = plug; + msg->msg[11] = phys_addr >> 8; + msg->msg[12] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; +} + +static inline void cec_ops_set_ext_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ext_src_spec, + __u8 *plug, + __u16 *phys_addr) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ext_src_spec = msg->msg[9]; + *plug = msg->msg[10]; + *phys_addr = (msg->msg[11] << 8) | msg->msg[12]; +} + +static inline void cec_msg_set_timer_program_title(struct cec_msg *msg, + const char *prog_title) +{ + unsigned int len = strlen(prog_title); + + if (len > 14) + len = 14; + msg->len = 2 + len; + msg->msg[1] = CEC_MSG_SET_TIMER_PROGRAM_TITLE; + memcpy(msg->msg + 2, prog_title, len); +} + +static inline void cec_ops_set_timer_program_title(const struct cec_msg *msg, + char *prog_title) +{ + unsigned int len = msg->len > 2 ? msg->len - 2 : 0; + + if (len > 14) + len = 14; + memcpy(prog_title, msg->msg + 2, len); + prog_title[len] = '\0'; +} + +/* System Information Feature */ +static inline void cec_msg_cec_version(struct cec_msg *msg, __u8 cec_version) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_CEC_VERSION; + msg->msg[2] = cec_version; +} + +static inline void cec_ops_cec_version(const struct cec_msg *msg, + __u8 *cec_version) +{ + *cec_version = msg->msg[2]; +} + +static inline void cec_msg_get_cec_version(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GET_CEC_VERSION; + msg->reply = reply ? CEC_MSG_CEC_VERSION : 0; +} + +static inline void cec_msg_report_physical_addr(struct cec_msg *msg, + __u16 phys_addr, __u8 prim_devtype) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REPORT_PHYSICAL_ADDR; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->msg[4] = prim_devtype; +} + +static inline void cec_ops_report_physical_addr(const struct cec_msg *msg, + __u16 *phys_addr, __u8 *prim_devtype) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *prim_devtype = msg->msg[4]; +} + +static inline void cec_msg_give_physical_addr(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_PHYSICAL_ADDR; + msg->reply = reply ? CEC_MSG_REPORT_PHYSICAL_ADDR : 0; +} + +static inline void cec_msg_set_menu_language(struct cec_msg *msg, + const char *language) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_SET_MENU_LANGUAGE; + memcpy(msg->msg + 2, language, 3); +} + +static inline void cec_ops_set_menu_language(const struct cec_msg *msg, + char *language) +{ + memcpy(language, msg->msg + 2, 3); + language[3] = '\0'; +} + +static inline void cec_msg_get_menu_language(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GET_MENU_LANGUAGE; + msg->reply = reply ? CEC_MSG_SET_MENU_LANGUAGE : 0; +} + +/* + * Assumes a single RC Profile byte and a single Device Features byte, + * i.e. no extended features are supported by this helper function. + * + * As of CEC 2.0 no extended features are defined, should those be added + * in the future, then this function needs to be adapted or a new function + * should be added. + */ +static inline void cec_msg_report_features(struct cec_msg *msg, + __u8 cec_version, __u8 all_device_types, + __u8 rc_profile, __u8 dev_features) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REPORT_FEATURES; + msg->msg[2] = cec_version; + msg->msg[3] = all_device_types; + msg->msg[4] = rc_profile; + msg->msg[5] = dev_features; +} + +static inline void cec_ops_report_features(const struct cec_msg *msg, + __u8 *cec_version, __u8 *all_device_types, + const __u8 **rc_profile, const __u8 **dev_features) +{ + const __u8 *p = &msg->msg[4]; + + *cec_version = msg->msg[2]; + *all_device_types = msg->msg[3]; + *rc_profile = p; + while (p < &msg->msg[14] && (*p & CEC_OP_FEAT_EXT)) + p++; + if (!(*p & CEC_OP_FEAT_EXT)) { + *dev_features = p + 1; + while (p < &msg->msg[15] && (*p & CEC_OP_FEAT_EXT)) + p++; + } + if (*p & CEC_OP_FEAT_EXT) + *rc_profile = *dev_features = NULL; +} + +static inline void cec_msg_give_features(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_FEATURES; + msg->reply = reply ? CEC_MSG_REPORT_FEATURES : 0; +} + +/* Deck Control Feature */ +static inline void cec_msg_deck_control(struct cec_msg *msg, + __u8 deck_control_mode) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_DECK_CONTROL; + msg->msg[2] = deck_control_mode; +} + +static inline void cec_ops_deck_control(const struct cec_msg *msg, + __u8 *deck_control_mode) +{ + *deck_control_mode = msg->msg[2]; +} + +static inline void cec_msg_deck_status(struct cec_msg *msg, + __u8 deck_info) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_DECK_STATUS; + msg->msg[2] = deck_info; +} + +static inline void cec_ops_deck_status(const struct cec_msg *msg, + __u8 *deck_info) +{ + *deck_info = msg->msg[2]; +} + +static inline void cec_msg_give_deck_status(struct cec_msg *msg, + bool reply, + __u8 status_req) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS; + msg->msg[2] = status_req; + msg->reply = reply ? CEC_MSG_DECK_STATUS : 0; +} + +static inline void cec_ops_give_deck_status(const struct cec_msg *msg, + __u8 *status_req) +{ + *status_req = msg->msg[2]; +} + +static inline void cec_msg_play(struct cec_msg *msg, + __u8 play_mode) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_PLAY; + msg->msg[2] = play_mode; +} + +static inline void cec_ops_play(const struct cec_msg *msg, + __u8 *play_mode) +{ + *play_mode = msg->msg[2]; +} + + +/* Tuner Control Feature */ +struct cec_op_tuner_device_info { + __u8 rec_flag; + __u8 tuner_display_info; + bool is_analog; + union { + struct cec_op_digital_service_id digital; + struct { + __u8 ana_bcast_type; + __u16 ana_freq; + __u8 bcast_system; + } analog; + }; +}; + +static inline void cec_msg_tuner_device_status_analog(struct cec_msg *msg, + __u8 rec_flag, + __u8 tuner_display_info, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 7; + msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS; + msg->msg[2] = (rec_flag << 7) | tuner_display_info; + msg->msg[3] = ana_bcast_type; + msg->msg[4] = ana_freq >> 8; + msg->msg[5] = ana_freq & 0xff; + msg->msg[6] = bcast_system; +} + +static inline void cec_msg_tuner_device_status_digital(struct cec_msg *msg, + __u8 rec_flag, __u8 tuner_display_info, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 10; + msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS; + msg->msg[2] = (rec_flag << 7) | tuner_display_info; + cec_set_digital_service_id(msg->msg + 3, digital); +} + +static inline void cec_msg_tuner_device_status(struct cec_msg *msg, + const struct cec_op_tuner_device_info *tuner_dev_info) +{ + if (tuner_dev_info->is_analog) + cec_msg_tuner_device_status_analog(msg, + tuner_dev_info->rec_flag, + tuner_dev_info->tuner_display_info, + tuner_dev_info->analog.ana_bcast_type, + tuner_dev_info->analog.ana_freq, + tuner_dev_info->analog.bcast_system); + else + cec_msg_tuner_device_status_digital(msg, + tuner_dev_info->rec_flag, + tuner_dev_info->tuner_display_info, + &tuner_dev_info->digital); +} + +static inline void cec_ops_tuner_device_status(const struct cec_msg *msg, + struct cec_op_tuner_device_info *tuner_dev_info) +{ + tuner_dev_info->is_analog = msg->len < 10; + tuner_dev_info->rec_flag = msg->msg[2] >> 7; + tuner_dev_info->tuner_display_info = msg->msg[2] & 0x7f; + if (tuner_dev_info->is_analog) { + tuner_dev_info->analog.ana_bcast_type = msg->msg[3]; + tuner_dev_info->analog.ana_freq = (msg->msg[4] << 8) | msg->msg[5]; + tuner_dev_info->analog.bcast_system = msg->msg[6]; + return; + } + cec_get_digital_service_id(msg->msg + 3, &tuner_dev_info->digital); +} + +static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg, + bool reply, + __u8 status_req) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS; + msg->msg[2] = status_req; + msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0; +} + +static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg, + __u8 *status_req) +{ + *status_req = msg->msg[2]; +} + +static inline void cec_msg_select_analogue_service(struct cec_msg *msg, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 6; + msg->msg[1] = CEC_MSG_SELECT_ANALOGUE_SERVICE; + msg->msg[2] = ana_bcast_type; + msg->msg[3] = ana_freq >> 8; + msg->msg[4] = ana_freq & 0xff; + msg->msg[5] = bcast_system; +} + +static inline void cec_ops_select_analogue_service(const struct cec_msg *msg, + __u8 *ana_bcast_type, + __u16 *ana_freq, + __u8 *bcast_system) +{ + *ana_bcast_type = msg->msg[2]; + *ana_freq = (msg->msg[3] << 8) | msg->msg[4]; + *bcast_system = msg->msg[5]; +} + +static inline void cec_msg_select_digital_service(struct cec_msg *msg, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 9; + msg->msg[1] = CEC_MSG_SELECT_DIGITAL_SERVICE; + cec_set_digital_service_id(msg->msg + 2, digital); +} + +static inline void cec_ops_select_digital_service(const struct cec_msg *msg, + struct cec_op_digital_service_id *digital) +{ + cec_get_digital_service_id(msg->msg + 2, digital); +} + +static inline void cec_msg_tuner_step_decrement(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TUNER_STEP_DECREMENT; +} + +static inline void cec_msg_tuner_step_increment(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TUNER_STEP_INCREMENT; +} + + +/* Vendor Specific Commands Feature */ +static inline void cec_msg_device_vendor_id(struct cec_msg *msg, __u32 vendor_id) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_DEVICE_VENDOR_ID; + msg->msg[2] = vendor_id >> 16; + msg->msg[3] = (vendor_id >> 8) & 0xff; + msg->msg[4] = vendor_id & 0xff; +} + +static inline void cec_ops_device_vendor_id(const struct cec_msg *msg, + __u32 *vendor_id) +{ + *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4]; +} + +static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_DEVICE_VENDOR_ID; + msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0; +} + +static inline void cec_msg_vendor_command(struct cec_msg *msg, + __u8 size, const __u8 *vendor_cmd) +{ + if (size > 14) + size = 14; + msg->len = 2 + size; + msg->msg[1] = CEC_MSG_VENDOR_COMMAND; + memcpy(msg->msg + 2, vendor_cmd, size); +} + +static inline void cec_ops_vendor_command(const struct cec_msg *msg, + __u8 *size, + const __u8 **vendor_cmd) +{ + *size = msg->len - 2; + + if (*size > 14) + *size = 14; + *vendor_cmd = msg->msg + 2; +} + +static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg, + __u32 vendor_id, __u8 size, + const __u8 *vendor_cmd) +{ + if (size > 11) + size = 11; + msg->len = 5 + size; + msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID; + msg->msg[2] = vendor_id >> 16; + msg->msg[3] = (vendor_id >> 8) & 0xff; + msg->msg[4] = vendor_id & 0xff; + memcpy(msg->msg + 5, vendor_cmd, size); +} + +static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg, + __u32 *vendor_id, __u8 *size, + const __u8 **vendor_cmd) +{ + *size = msg->len - 5; + + if (*size > 11) + *size = 11; + *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4]; + *vendor_cmd = msg->msg + 5; +} + +static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg, + __u8 size, + const __u8 *rc_code) +{ + if (size > 14) + size = 14; + msg->len = 2 + size; + msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN; + memcpy(msg->msg + 2, rc_code, size); +} + +static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg, + __u8 *size, + const __u8 **rc_code) +{ + *size = msg->len - 2; + + if (*size > 14) + *size = 14; + *rc_code = msg->msg + 2; +} + +static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_UP; +} + + +/* OSD Display Feature */ +static inline void cec_msg_set_osd_string(struct cec_msg *msg, + __u8 disp_ctl, + const char *osd) +{ + unsigned int len = strlen(osd); + + if (len > 13) + len = 13; + msg->len = 3 + len; + msg->msg[1] = CEC_MSG_SET_OSD_STRING; + msg->msg[2] = disp_ctl; + memcpy(msg->msg + 3, osd, len); +} + +static inline void cec_ops_set_osd_string(const struct cec_msg *msg, + __u8 *disp_ctl, + char *osd) +{ + unsigned int len = msg->len > 3 ? msg->len - 3 : 0; + + *disp_ctl = msg->msg[2]; + if (len > 13) + len = 13; + memcpy(osd, msg->msg + 3, len); + osd[len] = '\0'; +} + + +/* Device OSD Transfer Feature */ +static inline void cec_msg_set_osd_name(struct cec_msg *msg, const char *name) +{ + unsigned int len = strlen(name); + + if (len > 14) + len = 14; + msg->len = 2 + len; + msg->msg[1] = CEC_MSG_SET_OSD_NAME; + memcpy(msg->msg + 2, name, len); +} + +static inline void cec_ops_set_osd_name(const struct cec_msg *msg, + char *name) +{ + unsigned int len = msg->len > 2 ? msg->len - 2 : 0; + + if (len > 14) + len = 14; + memcpy(name, msg->msg + 2, len); + name[len] = '\0'; +} + +static inline void cec_msg_give_osd_name(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_OSD_NAME; + msg->reply = reply ? CEC_MSG_SET_OSD_NAME : 0; +} + + +/* Device Menu Control Feature */ +static inline void cec_msg_menu_status(struct cec_msg *msg, + __u8 menu_state) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_MENU_STATUS; + msg->msg[2] = menu_state; +} + +static inline void cec_ops_menu_status(const struct cec_msg *msg, + __u8 *menu_state) +{ + *menu_state = msg->msg[2]; +} + +static inline void cec_msg_menu_request(struct cec_msg *msg, + bool reply, + __u8 menu_req) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_MENU_REQUEST; + msg->msg[2] = menu_req; + msg->reply = reply ? CEC_MSG_MENU_STATUS : 0; +} + +static inline void cec_ops_menu_request(const struct cec_msg *msg, + __u8 *menu_req) +{ + *menu_req = msg->msg[2]; +} + +struct cec_op_ui_command { + __u8 ui_cmd; + bool has_opt_arg; + union { + struct cec_op_channel_data channel_identifier; + __u8 ui_broadcast_type; + __u8 ui_sound_presentation_control; + __u8 play_mode; + __u8 ui_function_media; + __u8 ui_function_select_av_input; + __u8 ui_function_select_audio_input; + }; +}; + +static inline void cec_msg_user_control_pressed(struct cec_msg *msg, + const struct cec_op_ui_command *ui_cmd) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_USER_CONTROL_PRESSED; + msg->msg[2] = ui_cmd->ui_cmd; + if (!ui_cmd->has_opt_arg) + return; + switch (ui_cmd->ui_cmd) { + case 0x56: + case 0x57: + case 0x60: + case 0x68: + case 0x69: + case 0x6a: + /* The optional operand is one byte for all these ui commands */ + msg->len++; + msg->msg[3] = ui_cmd->play_mode; + break; + case 0x67: + msg->len += 4; + msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) | + (ui_cmd->channel_identifier.major >> 8); + msg->msg[4] = ui_cmd->channel_identifier.major & 0xff; + msg->msg[5] = ui_cmd->channel_identifier.minor >> 8; + msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff; + break; + } +} + +static inline void cec_ops_user_control_pressed(const struct cec_msg *msg, + struct cec_op_ui_command *ui_cmd) +{ + ui_cmd->ui_cmd = msg->msg[2]; + ui_cmd->has_opt_arg = false; + if (msg->len == 3) + return; + switch (ui_cmd->ui_cmd) { + case 0x56: + case 0x57: + case 0x60: + case 0x68: + case 0x69: + case 0x6a: + /* The optional operand is one byte for all these ui commands */ + ui_cmd->play_mode = msg->msg[3]; + ui_cmd->has_opt_arg = true; + break; + case 0x67: + if (msg->len < 7) + break; + ui_cmd->has_opt_arg = true; + ui_cmd->channel_identifier.channel_number_fmt = msg->msg[3] >> 2; + ui_cmd->channel_identifier.major = ((msg->msg[3] & 3) << 6) | msg->msg[4]; + ui_cmd->channel_identifier.minor = (msg->msg[5] << 8) | msg->msg[6]; + break; + } +} + +static inline void cec_msg_user_control_released(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_USER_CONTROL_RELEASED; +} + +/* Remote Control Passthrough Feature */ + +/* Power Status Feature */ +static inline void cec_msg_report_power_status(struct cec_msg *msg, + __u8 pwr_state) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_REPORT_POWER_STATUS; + msg->msg[2] = pwr_state; +} + +static inline void cec_ops_report_power_status(const struct cec_msg *msg, + __u8 *pwr_state) +{ + *pwr_state = msg->msg[2]; +} + +static inline void cec_msg_give_device_power_status(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_DEVICE_POWER_STATUS; + msg->reply = reply ? CEC_MSG_REPORT_POWER_STATUS : 0; +} + +/* General Protocol Messages */ +static inline void cec_msg_feature_abort(struct cec_msg *msg, + __u8 abort_msg, __u8 reason) +{ + msg->len = 4; + msg->msg[1] = CEC_MSG_FEATURE_ABORT; + msg->msg[2] = abort_msg; + msg->msg[3] = reason; +} + +static inline void cec_ops_feature_abort(const struct cec_msg *msg, + __u8 *abort_msg, __u8 *reason) +{ + *abort_msg = msg->msg[2]; + *reason = msg->msg[3]; +} + +/* This changes the current message into a feature abort message */ +static inline void cec_msg_reply_feature_abort(struct cec_msg *msg, __u8 reason) +{ + cec_msg_set_reply_to(msg, msg); + msg->len = 4; + msg->msg[2] = msg->msg[1]; + msg->msg[3] = reason; + msg->msg[1] = CEC_MSG_FEATURE_ABORT; +} + +static inline void cec_msg_abort(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_ABORT; +} + + +/* System Audio Control Feature */ +static inline void cec_msg_report_audio_status(struct cec_msg *msg, + __u8 aud_mute_status, + __u8 aud_vol_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_REPORT_AUDIO_STATUS; + msg->msg[2] = (aud_mute_status << 7) | (aud_vol_status & 0x7f); +} + +static inline void cec_ops_report_audio_status(const struct cec_msg *msg, + __u8 *aud_mute_status, + __u8 *aud_vol_status) +{ + *aud_mute_status = msg->msg[2] >> 7; + *aud_vol_status = msg->msg[2] & 0x7f; +} + +static inline void cec_msg_give_audio_status(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_AUDIO_STATUS; + msg->reply = reply ? CEC_MSG_REPORT_AUDIO_STATUS : 0; +} + +static inline void cec_msg_set_system_audio_mode(struct cec_msg *msg, + __u8 sys_aud_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_SET_SYSTEM_AUDIO_MODE; + msg->msg[2] = sys_aud_status; +} + +static inline void cec_ops_set_system_audio_mode(const struct cec_msg *msg, + __u8 *sys_aud_status) +{ + *sys_aud_status = msg->msg[2]; +} + +static inline void cec_msg_system_audio_mode_request(struct cec_msg *msg, + bool reply, + __u16 phys_addr) +{ + msg->len = phys_addr == 0xffff ? 2 : 4; + msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_SET_SYSTEM_AUDIO_MODE : 0; + +} + +static inline void cec_ops_system_audio_mode_request(const struct cec_msg *msg, + __u16 *phys_addr) +{ + if (msg->len < 4) + *phys_addr = 0xffff; + else + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_system_audio_mode_status(struct cec_msg *msg, + __u8 sys_aud_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_STATUS; + msg->msg[2] = sys_aud_status; +} + +static inline void cec_ops_system_audio_mode_status(const struct cec_msg *msg, + __u8 *sys_aud_status) +{ + *sys_aud_status = msg->msg[2]; +} + +static inline void cec_msg_give_system_audio_mode_status(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS; + msg->reply = reply ? CEC_MSG_SYSTEM_AUDIO_MODE_STATUS : 0; +} + +static inline void cec_msg_report_short_audio_descriptor(struct cec_msg *msg, + __u8 num_descriptors, + const __u32 *descriptors) +{ + unsigned int i; + + if (num_descriptors > 4) + num_descriptors = 4; + msg->len = 2 + num_descriptors * 3; + msg->msg[1] = CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR; + for (i = 0; i < num_descriptors; i++) { + msg->msg[2 + i * 3] = (descriptors[i] >> 16) & 0xff; + msg->msg[3 + i * 3] = (descriptors[i] >> 8) & 0xff; + msg->msg[4 + i * 3] = descriptors[i] & 0xff; + } +} + +static inline void cec_ops_report_short_audio_descriptor(const struct cec_msg *msg, + __u8 *num_descriptors, + __u32 *descriptors) +{ + unsigned int i; + + *num_descriptors = (msg->len - 2) / 3; + if (*num_descriptors > 4) + *num_descriptors = 4; + for (i = 0; i < *num_descriptors; i++) + descriptors[i] = (msg->msg[2 + i * 3] << 16) | + (msg->msg[3 + i * 3] << 8) | + msg->msg[4 + i * 3]; +} + +static inline void cec_msg_request_short_audio_descriptor(struct cec_msg *msg, + bool reply, + __u8 num_descriptors, + const __u8 *audio_format_id, + const __u8 *audio_format_code) +{ + unsigned int i; + + if (num_descriptors > 4) + num_descriptors = 4; + msg->len = 2 + num_descriptors; + msg->msg[1] = CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR; + msg->reply = reply ? CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR : 0; + for (i = 0; i < num_descriptors; i++) + msg->msg[2 + i] = (audio_format_id[i] << 6) | + (audio_format_code[i] & 0x3f); +} + +static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *msg, + __u8 *num_descriptors, + __u8 *audio_format_id, + __u8 *audio_format_code) +{ + unsigned int i; + + *num_descriptors = msg->len - 2; + if (*num_descriptors > 4) + *num_descriptors = 4; + for (i = 0; i < *num_descriptors; i++) { + audio_format_id[i] = msg->msg[2 + i] >> 6; + audio_format_code[i] = msg->msg[2 + i] & 0x3f; + } +} + + +/* Audio Rate Control Feature */ +static inline void cec_msg_set_audio_rate(struct cec_msg *msg, + __u8 audio_rate) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_SET_AUDIO_RATE; + msg->msg[2] = audio_rate; +} + +static inline void cec_ops_set_audio_rate(const struct cec_msg *msg, + __u8 *audio_rate) +{ + *audio_rate = msg->msg[2]; +} + + +/* Audio Return Channel Control Feature */ +static inline void cec_msg_report_arc_initiated(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REPORT_ARC_INITIATED; +} + +static inline void cec_msg_initiate_arc(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_INITIATE_ARC; + msg->reply = reply ? CEC_MSG_REPORT_ARC_INITIATED : 0; +} + +static inline void cec_msg_request_arc_initiation(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REQUEST_ARC_INITIATION; + msg->reply = reply ? CEC_MSG_INITIATE_ARC : 0; +} + +static inline void cec_msg_report_arc_terminated(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REPORT_ARC_TERMINATED; +} + +static inline void cec_msg_terminate_arc(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TERMINATE_ARC; + msg->reply = reply ? CEC_MSG_REPORT_ARC_TERMINATED : 0; +} + +static inline void cec_msg_request_arc_termination(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REQUEST_ARC_TERMINATION; + msg->reply = reply ? CEC_MSG_TERMINATE_ARC : 0; +} + + +/* Dynamic Audio Lipsync Feature */ +/* Only for CEC 2.0 and up */ +static inline void cec_msg_report_current_latency(struct cec_msg *msg, + __u16 phys_addr, + __u8 video_latency, + __u8 low_latency_mode, + __u8 audio_out_compensated, + __u8 audio_out_delay) +{ + msg->len = 7; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->msg[4] = video_latency; + msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated; + msg->msg[6] = audio_out_delay; +} + +static inline void cec_ops_report_current_latency(const struct cec_msg *msg, + __u16 *phys_addr, + __u8 *video_latency, + __u8 *low_latency_mode, + __u8 *audio_out_compensated, + __u8 *audio_out_delay) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *video_latency = msg->msg[4]; + *low_latency_mode = (msg->msg[5] >> 2) & 1; + *audio_out_compensated = msg->msg[5] & 3; + *audio_out_delay = msg->msg[6]; +} + +static inline void cec_msg_request_current_latency(struct cec_msg *msg, + bool reply, + __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REQUEST_CURRENT_LATENCY; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_REPORT_CURRENT_LATENCY : 0; +} + +static inline void cec_ops_request_current_latency(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + + +/* Capability Discovery and Control Feature */ +static inline void cec_msg_cdc_hec_inquire_state(struct cec_msg *msg, + __u16 phys_addr1, + __u16 phys_addr2) +{ + msg->len = 9; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = phys_addr2 >> 8; + msg->msg[8] = phys_addr2 & 0xff; +} + +static inline void cec_ops_cdc_hec_inquire_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u16 *phys_addr2) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; +} + +static inline void cec_msg_cdc_hec_report_state(struct cec_msg *msg, + __u16 target_phys_addr, + __u8 hec_func_state, + __u8 host_func_state, + __u8 enc_func_state, + __u8 cdc_errcode, + __u8 has_field, + __u16 hec_field) +{ + msg->len = has_field ? 10 : 8; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_REPORT_STATE; + msg->msg[5] = target_phys_addr >> 8; + msg->msg[6] = target_phys_addr & 0xff; + msg->msg[7] = (hec_func_state << 6) | + (host_func_state << 4) | + (enc_func_state << 2) | + cdc_errcode; + if (has_field) { + msg->msg[8] = hec_field >> 8; + msg->msg[9] = hec_field & 0xff; + } +} + +static inline void cec_ops_cdc_hec_report_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *target_phys_addr, + __u8 *hec_func_state, + __u8 *host_func_state, + __u8 *enc_func_state, + __u8 *cdc_errcode, + __u8 *has_field, + __u16 *hec_field) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *target_phys_addr = (msg->msg[5] << 8) | msg->msg[6]; + *hec_func_state = msg->msg[7] >> 6; + *host_func_state = (msg->msg[7] >> 4) & 3; + *enc_func_state = (msg->msg[7] >> 4) & 3; + *cdc_errcode = msg->msg[7] & 3; + *has_field = msg->len >= 10; + *hec_field = *has_field ? ((msg->msg[8] << 8) | msg->msg[9]) : 0; +} + +static inline void cec_msg_cdc_hec_set_state(struct cec_msg *msg, + __u16 phys_addr1, + __u16 phys_addr2, + __u8 hec_set_state, + __u16 phys_addr3, + __u16 phys_addr4, + __u16 phys_addr5) +{ + msg->len = 10; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = phys_addr2 >> 8; + msg->msg[8] = phys_addr2 & 0xff; + msg->msg[9] = hec_set_state; + if (phys_addr3 != CEC_PHYS_ADDR_INVALID) { + msg->msg[msg->len++] = phys_addr3 >> 8; + msg->msg[msg->len++] = phys_addr3 & 0xff; + if (phys_addr4 != CEC_PHYS_ADDR_INVALID) { + msg->msg[msg->len++] = phys_addr4 >> 8; + msg->msg[msg->len++] = phys_addr4 & 0xff; + if (phys_addr5 != CEC_PHYS_ADDR_INVALID) { + msg->msg[msg->len++] = phys_addr5 >> 8; + msg->msg[msg->len++] = phys_addr5 & 0xff; + } + } + } +} + +static inline void cec_ops_cdc_hec_set_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u16 *phys_addr2, + __u8 *hec_set_state, + __u16 *phys_addr3, + __u16 *phys_addr4, + __u16 *phys_addr5) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; + *hec_set_state = msg->msg[9]; + *phys_addr3 = *phys_addr4 = *phys_addr5 = CEC_PHYS_ADDR_INVALID; + if (msg->len >= 12) + *phys_addr3 = (msg->msg[10] << 8) | msg->msg[11]; + if (msg->len >= 14) + *phys_addr4 = (msg->msg[12] << 8) | msg->msg[13]; + if (msg->len >= 16) + *phys_addr5 = (msg->msg[14] << 8) | msg->msg[15]; +} + +static inline void cec_msg_cdc_hec_set_state_adjacent(struct cec_msg *msg, + __u16 phys_addr1, + __u8 hec_set_state) +{ + msg->len = 8; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_SET_STATE_ADJACENT; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = hec_set_state; +} + +static inline void cec_ops_cdc_hec_set_state_adjacent(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u8 *hec_set_state) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *hec_set_state = msg->msg[7]; +} + +static inline void cec_msg_cdc_hec_request_deactivation(struct cec_msg *msg, + __u16 phys_addr1, + __u16 phys_addr2, + __u16 phys_addr3) +{ + msg->len = 11; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = phys_addr2 >> 8; + msg->msg[8] = phys_addr2 & 0xff; + msg->msg[9] = phys_addr3 >> 8; + msg->msg[10] = phys_addr3 & 0xff; +} + +static inline void cec_ops_cdc_hec_request_deactivation(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u16 *phys_addr2, + __u16 *phys_addr3) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; + *phys_addr3 = (msg->msg[9] << 8) | msg->msg[10]; +} + +static inline void cec_msg_cdc_hec_notify_alive(struct cec_msg *msg) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_NOTIFY_ALIVE; +} + +static inline void cec_ops_cdc_hec_notify_alive(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_cdc_hec_discover(struct cec_msg *msg) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_DISCOVER; +} + +static inline void cec_ops_cdc_hec_discover(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_cdc_hpd_set_state(struct cec_msg *msg, + __u8 input_port, + __u8 hpd_state) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HPD_SET_STATE; + msg->msg[5] = (input_port << 4) | hpd_state; +} + +static inline void cec_ops_cdc_hpd_set_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u8 *input_port, + __u8 *hpd_state) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *input_port = msg->msg[5] >> 4; + *hpd_state = msg->msg[5] & 0xf; +} + +static inline void cec_msg_cdc_hpd_report_state(struct cec_msg *msg, + __u8 hpd_state, + __u8 hpd_error) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HPD_REPORT_STATE; + msg->msg[5] = (hpd_state << 4) | hpd_error; +} + +static inline void cec_ops_cdc_hpd_report_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u8 *hpd_state, + __u8 *hpd_error) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *hpd_state = msg->msg[5] >> 4; + *hpd_error = msg->msg[5] & 0xf; +} + +#endif diff --git a/include/linux/cec.h b/include/linux/cec.h new file mode 100644 index 0000000000..851968e803 --- /dev/null +++ b/include/linux/cec.h @@ -0,0 +1,1014 @@ +/* + * cec - HDMI Consumer Electronics Control public header + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * Alternatively you can redistribute this file under the terms of the + * BSD license as stated below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * Note: this framework is still in staging and it is likely the API + * will change before it goes out of staging. + * + * Once it is moved out of staging this header will move to uapi. + */ +#ifndef _CEC_UAPI_H +#define _CEC_UAPI_H + +#include + +#define CEC_MAX_MSG_SIZE 16 + +/** + * struct cec_msg - CEC message structure. + * @tx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the + * driver when the message transmission has finished. + * @rx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the + * driver when the message was received. + * @len: Length in bytes of the message. + * @timeout: The timeout (in ms) that is used to timeout CEC_RECEIVE. + * Set to 0 if you want to wait forever. This timeout can also be + * used with CEC_TRANSMIT as the timeout for waiting for a reply. + * If 0, then it will use a 1 second timeout instead of waiting + * forever as is done with CEC_RECEIVE. + * @sequence: The framework assigns a sequence number to messages that are + * sent. This can be used to track replies to previously sent + * messages. + * @flags: Set to 0. + * @msg: The message payload. + * @reply: This field is ignored with CEC_RECEIVE and is only used by + * CEC_TRANSMIT. If non-zero, then wait for a reply with this + * opcode. Set to CEC_MSG_FEATURE_ABORT if you want to wait for + * a possible ABORT reply. If there was an error when sending the + * msg or FeatureAbort was returned, then reply is set to 0. + * If reply is non-zero upon return, then len/msg are set to + * the received message. + * If reply is zero upon return and status has the + * CEC_TX_STATUS_FEATURE_ABORT bit set, then len/msg are set to + * the received feature abort message. + * If reply is zero upon return and status has the + * CEC_TX_STATUS_MAX_RETRIES bit set, then no reply was seen at + * all. If reply is non-zero for CEC_TRANSMIT and the message is a + * broadcast, then -EINVAL is returned. + * if reply is non-zero, then timeout is set to 1000 (the required + * maximum response time). + * @rx_status: The message receive status bits. Set by the driver. + * @tx_status: The message transmit status bits. Set by the driver. + * @tx_arb_lost_cnt: The number of 'Arbitration Lost' events. Set by the driver. + * @tx_nack_cnt: The number of 'Not Acknowledged' events. Set by the driver. + * @tx_low_drive_cnt: The number of 'Low Drive Detected' events. Set by the + * driver. + * @tx_error_cnt: The number of 'Error' events. Set by the driver. + */ +struct cec_msg { + __u64 tx_ts; + __u64 rx_ts; + __u32 len; + __u32 timeout; + __u32 sequence; + __u32 flags; + __u8 msg[CEC_MAX_MSG_SIZE]; + __u8 reply; + __u8 rx_status; + __u8 tx_status; + __u8 tx_arb_lost_cnt; + __u8 tx_nack_cnt; + __u8 tx_low_drive_cnt; + __u8 tx_error_cnt; +}; + +/** + * cec_msg_initiator - return the initiator's logical address. + * @msg: the message structure + */ +static inline __u8 cec_msg_initiator(const struct cec_msg *msg) +{ + return msg->msg[0] >> 4; +} + +/** + * cec_msg_destination - return the destination's logical address. + * @msg: the message structure + */ +static inline __u8 cec_msg_destination(const struct cec_msg *msg) +{ + return msg->msg[0] & 0xf; +} + +/** + * cec_msg_opcode - return the opcode of the message, -1 for poll + * @msg: the message structure + */ +static inline int cec_msg_opcode(const struct cec_msg *msg) +{ + return msg->len > 1 ? msg->msg[1] : -1; +} + +/** + * cec_msg_is_broadcast - return true if this is a broadcast message. + * @msg: the message structure + */ +static inline bool cec_msg_is_broadcast(const struct cec_msg *msg) +{ + return (msg->msg[0] & 0xf) == 0xf; +} + +/** + * cec_msg_init - initialize the message structure. + * @msg: the message structure + * @initiator: the logical address of the initiator + * @destination:the logical address of the destination (0xf for broadcast) + * + * The whole structure is zeroed, the len field is set to 1 (i.e. a poll + * message) and the initiator and destination are filled in. + */ +static inline void cec_msg_init(struct cec_msg *msg, + __u8 initiator, __u8 destination) +{ + memset(msg, 0, sizeof(*msg)); + msg->msg[0] = (initiator << 4) | destination; + msg->len = 1; +} + +/** + * cec_msg_set_reply_to - fill in destination/initiator in a reply message. + * @msg: the message structure for the reply + * @orig: the original message structure + * + * Set the msg destination to the orig initiator and the msg initiator to the + * orig destination. Note that msg and orig may be the same pointer, in which + * case the change is done in place. + */ +static inline void cec_msg_set_reply_to(struct cec_msg *msg, + struct cec_msg *orig) +{ + /* The destination becomes the initiator and vice versa */ + msg->msg[0] = (cec_msg_destination(orig) << 4) | + cec_msg_initiator(orig); + msg->reply = msg->timeout = 0; +} + +/* cec status field */ +#define CEC_TX_STATUS_OK (1 << 0) +#define CEC_TX_STATUS_ARB_LOST (1 << 1) +#define CEC_TX_STATUS_NACK (1 << 2) +#define CEC_TX_STATUS_LOW_DRIVE (1 << 3) +#define CEC_TX_STATUS_ERROR (1 << 4) +#define CEC_TX_STATUS_MAX_RETRIES (1 << 5) + +#define CEC_RX_STATUS_OK (1 << 0) +#define CEC_RX_STATUS_TIMEOUT (1 << 1) +#define CEC_RX_STATUS_FEATURE_ABORT (1 << 2) + +static inline bool cec_msg_status_is_ok(const struct cec_msg *msg) +{ + if (msg->tx_status && !(msg->tx_status & CEC_TX_STATUS_OK)) + return false; + if (msg->rx_status && !(msg->rx_status & CEC_RX_STATUS_OK)) + return false; + if (!msg->tx_status && !msg->rx_status) + return false; + return !(msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT); +} + +#define CEC_LOG_ADDR_INVALID 0xff +#define CEC_PHYS_ADDR_INVALID 0xffff + +/* + * The maximum number of logical addresses one device can be assigned to. + * The CEC 2.0 spec allows for only 2 logical addresses at the moment. The + * Analog Devices CEC hardware supports 3. So let's go wild and go for 4. + */ +#define CEC_MAX_LOG_ADDRS 4 + +/* The logical addresses defined by CEC 2.0 */ +#define CEC_LOG_ADDR_TV 0 +#define CEC_LOG_ADDR_RECORD_1 1 +#define CEC_LOG_ADDR_RECORD_2 2 +#define CEC_LOG_ADDR_TUNER_1 3 +#define CEC_LOG_ADDR_PLAYBACK_1 4 +#define CEC_LOG_ADDR_AUDIOSYSTEM 5 +#define CEC_LOG_ADDR_TUNER_2 6 +#define CEC_LOG_ADDR_TUNER_3 7 +#define CEC_LOG_ADDR_PLAYBACK_2 8 +#define CEC_LOG_ADDR_RECORD_3 9 +#define CEC_LOG_ADDR_TUNER_4 10 +#define CEC_LOG_ADDR_PLAYBACK_3 11 +#define CEC_LOG_ADDR_BACKUP_1 12 +#define CEC_LOG_ADDR_BACKUP_2 13 +#define CEC_LOG_ADDR_SPECIFIC 14 +#define CEC_LOG_ADDR_UNREGISTERED 15 /* as initiator address */ +#define CEC_LOG_ADDR_BROADCAST 15 /* ad destination address */ + +/* The logical address types that the CEC device wants to claim */ +#define CEC_LOG_ADDR_TYPE_TV 0 +#define CEC_LOG_ADDR_TYPE_RECORD 1 +#define CEC_LOG_ADDR_TYPE_TUNER 2 +#define CEC_LOG_ADDR_TYPE_PLAYBACK 3 +#define CEC_LOG_ADDR_TYPE_AUDIOSYSTEM 4 +#define CEC_LOG_ADDR_TYPE_SPECIFIC 5 +#define CEC_LOG_ADDR_TYPE_UNREGISTERED 6 +/* + * Switches should use UNREGISTERED. + * Processors should use SPECIFIC. + */ + +#define CEC_LOG_ADDR_MASK_TV (1 << CEC_LOG_ADDR_TV) +#define CEC_LOG_ADDR_MASK_RECORD ((1 << CEC_LOG_ADDR_RECORD_1) | \ + (1 << CEC_LOG_ADDR_RECORD_2) | \ + (1 << CEC_LOG_ADDR_RECORD_3)) +#define CEC_LOG_ADDR_MASK_TUNER ((1 << CEC_LOG_ADDR_TUNER_1) | \ + (1 << CEC_LOG_ADDR_TUNER_2) | \ + (1 << CEC_LOG_ADDR_TUNER_3) | \ + (1 << CEC_LOG_ADDR_TUNER_4)) +#define CEC_LOG_ADDR_MASK_PLAYBACK ((1 << CEC_LOG_ADDR_PLAYBACK_1) | \ + (1 << CEC_LOG_ADDR_PLAYBACK_2) | \ + (1 << CEC_LOG_ADDR_PLAYBACK_3)) +#define CEC_LOG_ADDR_MASK_AUDIOSYSTEM (1 << CEC_LOG_ADDR_AUDIOSYSTEM) +#define CEC_LOG_ADDR_MASK_BACKUP ((1 << CEC_LOG_ADDR_BACKUP_1) | \ + (1 << CEC_LOG_ADDR_BACKUP_2)) +#define CEC_LOG_ADDR_MASK_SPECIFIC (1 << CEC_LOG_ADDR_SPECIFIC) +#define CEC_LOG_ADDR_MASK_UNREGISTERED (1 << CEC_LOG_ADDR_UNREGISTERED) + +static inline bool cec_has_tv(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_TV; +} + +static inline bool cec_has_record(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_RECORD; +} + +static inline bool cec_has_tuner(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_TUNER; +} + +static inline bool cec_has_playback(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_PLAYBACK; +} + +static inline bool cec_has_audiosystem(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_AUDIOSYSTEM; +} + +static inline bool cec_has_backup(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_BACKUP; +} + +static inline bool cec_has_specific(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_SPECIFIC; +} + +static inline bool cec_is_unregistered(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_UNREGISTERED; +} + +static inline bool cec_is_unconfigured(__u16 log_addr_mask) +{ + return log_addr_mask == 0; +} + +/* + * Use this if there is no vendor ID (CEC_G_VENDOR_ID) or if the vendor ID + * should be disabled (CEC_S_VENDOR_ID) + */ +#define CEC_VENDOR_ID_NONE 0xffffffff + +/* The message handling modes */ +/* Modes for initiator */ +#define CEC_MODE_NO_INITIATOR (0x0 << 0) +#define CEC_MODE_INITIATOR (0x1 << 0) +#define CEC_MODE_EXCL_INITIATOR (0x2 << 0) +#define CEC_MODE_INITIATOR_MSK 0x0f + +/* Modes for follower */ +#define CEC_MODE_NO_FOLLOWER (0x0 << 4) +#define CEC_MODE_FOLLOWER (0x1 << 4) +#define CEC_MODE_EXCL_FOLLOWER (0x2 << 4) +#define CEC_MODE_EXCL_FOLLOWER_PASSTHRU (0x3 << 4) +#define CEC_MODE_MONITOR (0xe << 4) +#define CEC_MODE_MONITOR_ALL (0xf << 4) +#define CEC_MODE_FOLLOWER_MSK 0xf0 + +/* Userspace has to configure the physical address */ +#define CEC_CAP_PHYS_ADDR (1 << 0) +/* Userspace has to configure the logical addresses */ +#define CEC_CAP_LOG_ADDRS (1 << 1) +/* Userspace can transmit messages (and thus become follower as well) */ +#define CEC_CAP_TRANSMIT (1 << 2) +/* + * Passthrough all messages instead of processing them. + */ +#define CEC_CAP_PASSTHROUGH (1 << 3) +/* Supports remote control */ +#define CEC_CAP_RC (1 << 4) +/* Hardware can monitor all messages, not just directed and broadcast. */ +#define CEC_CAP_MONITOR_ALL (1 << 5) + +/** + * struct cec_caps - CEC capabilities structure. + * @driver: name of the CEC device driver. + * @name: name of the CEC device. @driver + @name must be unique. + * @available_log_addrs: number of available logical addresses. + * @capabilities: capabilities of the CEC adapter. + * @version: version of the CEC adapter framework. + */ +struct cec_caps { + char driver[32]; + char name[32]; + __u32 available_log_addrs; + __u32 capabilities; + __u32 version; +}; + +/** + * struct cec_log_addrs - CEC logical addresses structure. + * @log_addr: the claimed logical addresses. Set by the driver. + * @log_addr_mask: current logical address mask. Set by the driver. + * @cec_version: the CEC version that the adapter should implement. Set by the + * caller. + * @num_log_addrs: how many logical addresses should be claimed. Set by the + * caller. + * @vendor_id: the vendor ID of the device. Set by the caller. + * @flags: flags. + * @osd_name: the OSD name of the device. Set by the caller. + * @primary_device_type: the primary device type for each logical address. + * Set by the caller. + * @log_addr_type: the logical address types. Set by the caller. + * @all_device_types: CEC 2.0: all device types represented by the logical + * address. Set by the caller. + * @features: CEC 2.0: The logical address features. Set by the caller. + */ +struct cec_log_addrs { + __u8 log_addr[CEC_MAX_LOG_ADDRS]; + __u16 log_addr_mask; + __u8 cec_version; + __u8 num_log_addrs; + __u32 vendor_id; + __u32 flags; + char osd_name[15]; + __u8 primary_device_type[CEC_MAX_LOG_ADDRS]; + __u8 log_addr_type[CEC_MAX_LOG_ADDRS]; + + /* CEC 2.0 */ + __u8 all_device_types[CEC_MAX_LOG_ADDRS]; + __u8 features[CEC_MAX_LOG_ADDRS][12]; +}; + +/* Allow a fallback to unregistered */ +#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0) + +/* Events */ + +/* Event that occurs when the adapter state changes */ +#define CEC_EVENT_STATE_CHANGE 1 +/* + * This event is sent when messages are lost because the application + * didn't empty the message queue in time + */ +#define CEC_EVENT_LOST_MSGS 2 + +#define CEC_EVENT_FL_INITIAL_STATE (1 << 0) + +/** + * struct cec_event_state_change - used when the CEC adapter changes state. + * @phys_addr: the current physical address + * @log_addr_mask: the current logical address mask + */ +struct cec_event_state_change { + __u16 phys_addr; + __u16 log_addr_mask; +}; + +/** + * struct cec_event_lost_msgs - tells you how many messages were lost due. + * @lost_msgs: how many messages were lost. + */ +struct cec_event_lost_msgs { + __u32 lost_msgs; +}; + +/** + * struct cec_event - CEC event structure + * @ts: the timestamp of when the event was sent. + * @event: the event. + * array. + * @state_change: the event payload for CEC_EVENT_STATE_CHANGE. + * @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS. + * @raw: array to pad the union. + */ +struct cec_event { + __u64 ts; + __u32 event; + __u32 flags; + union { + struct cec_event_state_change state_change; + struct cec_event_lost_msgs lost_msgs; + __u32 raw[16]; + }; +}; + +/* ioctls */ + +/* Adapter capabilities */ +#define CEC_ADAP_G_CAPS _IOWR('a', 0, struct cec_caps) + +/* + * phys_addr is either 0 (if this is the CEC root device) + * or a valid physical address obtained from the sink's EDID + * as read by this CEC device (if this is a source device) + * or a physical address obtained and modified from a sink + * EDID and used for a sink CEC device. + * If nothing is connected, then phys_addr is 0xffff. + * See HDMI 1.4b, section 8.7 (Physical Address). + * + * The CEC_ADAP_S_PHYS_ADDR ioctl may not be available if that is handled + * internally. + */ +#define CEC_ADAP_G_PHYS_ADDR _IOR('a', 1, __u16) +#define CEC_ADAP_S_PHYS_ADDR _IOW('a', 2, __u16) + +/* + * Configure the CEC adapter. It sets the device type and which + * logical types it will try to claim. It will return which + * logical addresses it could actually claim. + * An error is returned if the adapter is disabled or if there + * is no physical address assigned. + */ + +#define CEC_ADAP_G_LOG_ADDRS _IOR('a', 3, struct cec_log_addrs) +#define CEC_ADAP_S_LOG_ADDRS _IOWR('a', 4, struct cec_log_addrs) + +/* Transmit/receive a CEC command */ +#define CEC_TRANSMIT _IOWR('a', 5, struct cec_msg) +#define CEC_RECEIVE _IOWR('a', 6, struct cec_msg) + +/* Dequeue CEC events */ +#define CEC_DQEVENT _IOWR('a', 7, struct cec_event) + +/* + * Get and set the message handling mode for this filehandle. + */ +#define CEC_G_MODE _IOR('a', 8, __u32) +#define CEC_S_MODE _IOW('a', 9, __u32) + +/* + * The remainder of this header defines all CEC messages and operands. + * The format matters since it the cec-ctl utility parses it to generate + * code for implementing all these messages. + * + * Comments ending with 'Feature' group messages for each feature. + * If messages are part of multiple features, then the "Has also" + * comment is used to list the previously defined messages that are + * supported by the feature. + * + * Before operands are defined a comment is added that gives the + * name of the operand and in brackets the variable name of the + * corresponding argument in the cec-funcs.h function. + */ + +/* Messages */ + +/* One Touch Play Feature */ +#define CEC_MSG_ACTIVE_SOURCE 0x82 +#define CEC_MSG_IMAGE_VIEW_ON 0x04 +#define CEC_MSG_TEXT_VIEW_ON 0x0d + + +/* Routing Control Feature */ + +/* + * Has also: + * CEC_MSG_ACTIVE_SOURCE + */ + +#define CEC_MSG_INACTIVE_SOURCE 0x9d +#define CEC_MSG_REQUEST_ACTIVE_SOURCE 0x85 +#define CEC_MSG_ROUTING_CHANGE 0x80 +#define CEC_MSG_ROUTING_INFORMATION 0x81 +#define CEC_MSG_SET_STREAM_PATH 0x86 + + +/* Standby Feature */ +#define CEC_MSG_STANDBY 0x36 + + +/* One Touch Record Feature */ +#define CEC_MSG_RECORD_OFF 0x0b +#define CEC_MSG_RECORD_ON 0x09 +/* Record Source Type Operand (rec_src_type) */ +#define CEC_OP_RECORD_SRC_OWN 1 +#define CEC_OP_RECORD_SRC_DIGITAL 2 +#define CEC_OP_RECORD_SRC_ANALOG 3 +#define CEC_OP_RECORD_SRC_EXT_PLUG 4 +#define CEC_OP_RECORD_SRC_EXT_PHYS_ADDR 5 +/* Service Identification Method Operand (service_id_method) */ +#define CEC_OP_SERVICE_ID_METHOD_BY_DIG_ID 0 +#define CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL 1 +/* Digital Service Broadcast System Operand (dig_bcast_system) */ +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_GEN 0x00 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN 0x01 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_GEN 0x02 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_BS 0x08 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_CS 0x09 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_T 0x0a +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE 0x10 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT 0x11 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T 0x12 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_C 0x18 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S 0x19 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S2 0x1a +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_T 0x1b +/* Analogue Broadcast Type Operand (ana_bcast_type) */ +#define CEC_OP_ANA_BCAST_TYPE_CABLE 0 +#define CEC_OP_ANA_BCAST_TYPE_SATELLITE 1 +#define CEC_OP_ANA_BCAST_TYPE_TERRESTRIAL 2 +/* Broadcast System Operand (bcast_system) */ +#define CEC_OP_BCAST_SYSTEM_PAL_BG 0x00 +#define CEC_OP_BCAST_SYSTEM_SECAM_LQ 0x01 /* SECAM L' */ +#define CEC_OP_BCAST_SYSTEM_PAL_M 0x02 +#define CEC_OP_BCAST_SYSTEM_NTSC_M 0x03 +#define CEC_OP_BCAST_SYSTEM_PAL_I 0x04 +#define CEC_OP_BCAST_SYSTEM_SECAM_DK 0x05 +#define CEC_OP_BCAST_SYSTEM_SECAM_BG 0x06 +#define CEC_OP_BCAST_SYSTEM_SECAM_L 0x07 +#define CEC_OP_BCAST_SYSTEM_PAL_DK 0x08 +#define CEC_OP_BCAST_SYSTEM_OTHER 0x1f +/* Channel Number Format Operand (channel_number_fmt) */ +#define CEC_OP_CHANNEL_NUMBER_FMT_1_PART 0x01 +#define CEC_OP_CHANNEL_NUMBER_FMT_2_PART 0x02 + +#define CEC_MSG_RECORD_STATUS 0x0a +/* Record Status Operand (rec_status) */ +#define CEC_OP_RECORD_STATUS_CUR_SRC 0x01 +#define CEC_OP_RECORD_STATUS_DIG_SERVICE 0x02 +#define CEC_OP_RECORD_STATUS_ANA_SERVICE 0x03 +#define CEC_OP_RECORD_STATUS_EXT_INPUT 0x04 +#define CEC_OP_RECORD_STATUS_NO_DIG_SERVICE 0x05 +#define CEC_OP_RECORD_STATUS_NO_ANA_SERVICE 0x06 +#define CEC_OP_RECORD_STATUS_NO_SERVICE 0x07 +#define CEC_OP_RECORD_STATUS_INVALID_EXT_PLUG 0x09 +#define CEC_OP_RECORD_STATUS_INVALID_EXT_PHYS_ADDR 0x0a +#define CEC_OP_RECORD_STATUS_UNSUP_CA 0x0b +#define CEC_OP_RECORD_STATUS_NO_CA_ENTITLEMENTS 0x0c +#define CEC_OP_RECORD_STATUS_CANT_COPY_SRC 0x0d +#define CEC_OP_RECORD_STATUS_NO_MORE_COPIES 0x0e +#define CEC_OP_RECORD_STATUS_NO_MEDIA 0x10 +#define CEC_OP_RECORD_STATUS_PLAYING 0x11 +#define CEC_OP_RECORD_STATUS_ALREADY_RECORDING 0x12 +#define CEC_OP_RECORD_STATUS_MEDIA_PROT 0x13 +#define CEC_OP_RECORD_STATUS_NO_SIGNAL 0x14 +#define CEC_OP_RECORD_STATUS_MEDIA_PROBLEM 0x15 +#define CEC_OP_RECORD_STATUS_NO_SPACE 0x16 +#define CEC_OP_RECORD_STATUS_PARENTAL_LOCK 0x17 +#define CEC_OP_RECORD_STATUS_TERMINATED_OK 0x1a +#define CEC_OP_RECORD_STATUS_ALREADY_TERM 0x1b +#define CEC_OP_RECORD_STATUS_OTHER 0x1f + +#define CEC_MSG_RECORD_TV_SCREEN 0x0f + + +/* Timer Programming Feature */ +#define CEC_MSG_CLEAR_ANALOGUE_TIMER 0x33 +/* Recording Sequence Operand (recording_seq) */ +#define CEC_OP_REC_SEQ_SUNDAY 0x01 +#define CEC_OP_REC_SEQ_MONDAY 0x02 +#define CEC_OP_REC_SEQ_TUESDAY 0x04 +#define CEC_OP_REC_SEQ_WEDNESDAY 0x08 +#define CEC_OP_REC_SEQ_THURSDAY 0x10 +#define CEC_OP_REC_SEQ_FRIDAY 0x20 +#define CEC_OP_REC_SEQ_SATERDAY 0x40 +#define CEC_OP_REC_SEQ_ONCE_ONLY 0x00 + +#define CEC_MSG_CLEAR_DIGITAL_TIMER 0x99 + +#define CEC_MSG_CLEAR_EXT_TIMER 0xa1 +/* External Source Specifier Operand (ext_src_spec) */ +#define CEC_OP_EXT_SRC_PLUG 0x04 +#define CEC_OP_EXT_SRC_PHYS_ADDR 0x05 + +#define CEC_MSG_SET_ANALOGUE_TIMER 0x34 +#define CEC_MSG_SET_DIGITAL_TIMER 0x97 +#define CEC_MSG_SET_EXT_TIMER 0xa2 + +#define CEC_MSG_SET_TIMER_PROGRAM_TITLE 0x67 +#define CEC_MSG_TIMER_CLEARED_STATUS 0x43 +/* Timer Cleared Status Data Operand (timer_cleared_status) */ +#define CEC_OP_TIMER_CLR_STAT_RECORDING 0x00 +#define CEC_OP_TIMER_CLR_STAT_NO_MATCHING 0x01 +#define CEC_OP_TIMER_CLR_STAT_NO_INFO 0x02 +#define CEC_OP_TIMER_CLR_STAT_CLEARED 0x80 + +#define CEC_MSG_TIMER_STATUS 0x35 +/* Timer Overlap Warning Operand (timer_overlap_warning) */ +#define CEC_OP_TIMER_OVERLAP_WARNING_NO_OVERLAP 0 +#define CEC_OP_TIMER_OVERLAP_WARNING_OVERLAP 1 +/* Media Info Operand (media_info) */ +#define CEC_OP_MEDIA_INFO_UNPROT_MEDIA 0 +#define CEC_OP_MEDIA_INFO_PROT_MEDIA 1 +#define CEC_OP_MEDIA_INFO_NO_MEDIA 2 +/* Programmed Indicator Operand (prog_indicator) */ +#define CEC_OP_PROG_IND_NOT_PROGRAMMED 0 +#define CEC_OP_PROG_IND_PROGRAMMED 1 +/* Programmed Info Operand (prog_info) */ +#define CEC_OP_PROG_INFO_ENOUGH_SPACE 0x08 +#define CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE 0x09 +#define CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE 0x0b +#define CEC_OP_PROG_INFO_NONE_AVAILABLE 0x0a +/* Not Programmed Error Info Operand (prog_error) */ +#define CEC_OP_PROG_ERROR_NO_FREE_TIMER 0x01 +#define CEC_OP_PROG_ERROR_DATE_OUT_OF_RANGE 0x02 +#define CEC_OP_PROG_ERROR_REC_SEQ_ERROR 0x03 +#define CEC_OP_PROG_ERROR_INV_EXT_PLUG 0x04 +#define CEC_OP_PROG_ERROR_INV_EXT_PHYS_ADDR 0x05 +#define CEC_OP_PROG_ERROR_CA_UNSUPP 0x06 +#define CEC_OP_PROG_ERROR_INSUF_CA_ENTITLEMENTS 0x07 +#define CEC_OP_PROG_ERROR_RESOLUTION_UNSUPP 0x08 +#define CEC_OP_PROG_ERROR_PARENTAL_LOCK 0x09 +#define CEC_OP_PROG_ERROR_CLOCK_FAILURE 0x0a +#define CEC_OP_PROG_ERROR_DUPLICATE 0x0e + + +/* System Information Feature */ +#define CEC_MSG_CEC_VERSION 0x9e +/* CEC Version Operand (cec_version) */ +#define CEC_OP_CEC_VERSION_1_3A 4 +#define CEC_OP_CEC_VERSION_1_4 5 +#define CEC_OP_CEC_VERSION_2_0 6 + +#define CEC_MSG_GET_CEC_VERSION 0x9f +#define CEC_MSG_GIVE_PHYSICAL_ADDR 0x83 +#define CEC_MSG_GET_MENU_LANGUAGE 0x91 +#define CEC_MSG_REPORT_PHYSICAL_ADDR 0x84 +/* Primary Device Type Operand (prim_devtype) */ +#define CEC_OP_PRIM_DEVTYPE_TV 0 +#define CEC_OP_PRIM_DEVTYPE_RECORD 1 +#define CEC_OP_PRIM_DEVTYPE_TUNER 3 +#define CEC_OP_PRIM_DEVTYPE_PLAYBACK 4 +#define CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM 5 +#define CEC_OP_PRIM_DEVTYPE_SWITCH 6 +#define CEC_OP_PRIM_DEVTYPE_PROCESSOR 7 + +#define CEC_MSG_SET_MENU_LANGUAGE 0x32 +#define CEC_MSG_REPORT_FEATURES 0xa6 /* HDMI 2.0 */ +/* All Device Types Operand (all_device_types) */ +#define CEC_OP_ALL_DEVTYPE_TV 0x80 +#define CEC_OP_ALL_DEVTYPE_RECORD 0x40 +#define CEC_OP_ALL_DEVTYPE_TUNER 0x20 +#define CEC_OP_ALL_DEVTYPE_PLAYBACK 0x10 +#define CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM 0x08 +#define CEC_OP_ALL_DEVTYPE_SWITCH 0x04 +/* + * And if you wondering what happened to PROCESSOR devices: those should + * be mapped to a SWITCH. + */ + +/* Valid for RC Profile and Device Feature operands */ +#define CEC_OP_FEAT_EXT 0x80 /* Extension bit */ +/* RC Profile Operand (rc_profile) */ +#define CEC_OP_FEAT_RC_TV_PROFILE_NONE 0x00 +#define CEC_OP_FEAT_RC_TV_PROFILE_1 0x02 +#define CEC_OP_FEAT_RC_TV_PROFILE_2 0x06 +#define CEC_OP_FEAT_RC_TV_PROFILE_3 0x0a +#define CEC_OP_FEAT_RC_TV_PROFILE_4 0x0e +#define CEC_OP_FEAT_RC_SRC_HAS_DEV_ROOT_MENU 0x50 +#define CEC_OP_FEAT_RC_SRC_HAS_DEV_SETUP_MENU 0x48 +#define CEC_OP_FEAT_RC_SRC_HAS_CONTENTS_MENU 0x44 +#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_TOP_MENU 0x42 +#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_CONTEXT_MENU 0x41 +/* Device Feature Operand (dev_features) */ +#define CEC_OP_FEAT_DEV_HAS_RECORD_TV_SCREEN 0x40 +#define CEC_OP_FEAT_DEV_HAS_SET_OSD_STRING 0x20 +#define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL 0x10 +#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE 0x08 +#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX 0x04 +#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX 0x02 + +#define CEC_MSG_GIVE_FEATURES 0xa5 /* HDMI 2.0 */ + + +/* Deck Control Feature */ +#define CEC_MSG_DECK_CONTROL 0x42 +/* Deck Control Mode Operand (deck_control_mode) */ +#define CEC_OP_DECK_CTL_MODE_SKIP_FWD 1 +#define CEC_OP_DECK_CTL_MODE_SKIP_REV 2 +#define CEC_OP_DECK_CTL_MODE_STOP 3 +#define CEC_OP_DECK_CTL_MODE_EJECT 4 + +#define CEC_MSG_DECK_STATUS 0x1b +/* Deck Info Operand (deck_info) */ +#define CEC_OP_DECK_INFO_PLAY 0x11 +#define CEC_OP_DECK_INFO_RECORD 0x12 +#define CEC_OP_DECK_INFO_PLAY_REV 0x13 +#define CEC_OP_DECK_INFO_STILL 0x14 +#define CEC_OP_DECK_INFO_SLOW 0x15 +#define CEC_OP_DECK_INFO_SLOW_REV 0x16 +#define CEC_OP_DECK_INFO_FAST_FWD 0x17 +#define CEC_OP_DECK_INFO_FAST_REV 0x18 +#define CEC_OP_DECK_INFO_NO_MEDIA 0x19 +#define CEC_OP_DECK_INFO_STOP 0x1a +#define CEC_OP_DECK_INFO_SKIP_FWD 0x1b +#define CEC_OP_DECK_INFO_SKIP_REV 0x1c +#define CEC_OP_DECK_INFO_INDEX_SEARCH_FWD 0x1d +#define CEC_OP_DECK_INFO_INDEX_SEARCH_REV 0x1e +#define CEC_OP_DECK_INFO_OTHER 0x1f + +#define CEC_MSG_GIVE_DECK_STATUS 0x1a +/* Status Request Operand (status_req) */ +#define CEC_OP_STATUS_REQ_ON 1 +#define CEC_OP_STATUS_REQ_OFF 2 +#define CEC_OP_STATUS_REQ_ONCE 3 + +#define CEC_MSG_PLAY 0x41 +/* Play Mode Operand (play_mode) */ +#define CEC_OP_PLAY_MODE_PLAY_FWD 0x24 +#define CEC_OP_PLAY_MODE_PLAY_REV 0x20 +#define CEC_OP_PLAY_MODE_PLAY_STILL 0x25 +#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MIN 0x05 +#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MED 0x06 +#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MAX 0x07 +#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MIN 0x09 +#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MED 0x0a +#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MAX 0x0b +#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MIN 0x15 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MED 0x16 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MAX 0x17 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MIN 0x19 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MED 0x1a +#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MAX 0x1b + + +/* Tuner Control Feature */ +#define CEC_MSG_GIVE_TUNER_DEVICE_STATUS 0x08 +#define CEC_MSG_SELECT_ANALOGUE_SERVICE 0x92 +#define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93 +#define CEC_MSG_TUNER_DEVICE_STATUS 0x07 +/* Recording Flag Operand (rec_flag) */ +#define CEC_OP_REC_FLAG_USED 0 +#define CEC_OP_REC_FLAG_NOT_USED 1 +/* Tuner Display Info Operand (tuner_display_info) */ +#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0 +#define CEC_OP_TUNER_DISPLAY_INFO_NONE 1 +#define CEC_OP_TUNER_DISPLAY_INFO_ANALOGUE 2 + +#define CEC_MSG_TUNER_STEP_DECREMENT 0x06 +#define CEC_MSG_TUNER_STEP_INCREMENT 0x05 + + +/* Vendor Specific Commands Feature */ + +/* + * Has also: + * CEC_MSG_CEC_VERSION + * CEC_MSG_GET_CEC_VERSION + */ +#define CEC_MSG_DEVICE_VENDOR_ID 0x87 +#define CEC_MSG_GIVE_DEVICE_VENDOR_ID 0x8c +#define CEC_MSG_VENDOR_COMMAND 0x89 +#define CEC_MSG_VENDOR_COMMAND_WITH_ID 0xa0 +#define CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN 0x8a +#define CEC_MSG_VENDOR_REMOTE_BUTTON_UP 0x8b + + +/* OSD Display Feature */ +#define CEC_MSG_SET_OSD_STRING 0x64 +/* Display Control Operand (disp_ctl) */ +#define CEC_OP_DISP_CTL_DEFAULT 0x00 +#define CEC_OP_DISP_CTL_UNTIL_CLEARED 0x40 +#define CEC_OP_DISP_CTL_CLEAR 0x80 + + +/* Device OSD Transfer Feature */ +#define CEC_MSG_GIVE_OSD_NAME 0x46 +#define CEC_MSG_SET_OSD_NAME 0x47 + + +/* Device Menu Control Feature */ +#define CEC_MSG_MENU_REQUEST 0x8d +/* Menu Request Type Operand (menu_req) */ +#define CEC_OP_MENU_REQUEST_ACTIVATE 0x00 +#define CEC_OP_MENU_REQUEST_DEACTIVATE 0x01 +#define CEC_OP_MENU_REQUEST_QUERY 0x02 + +#define CEC_MSG_MENU_STATUS 0x8e +/* Menu State Operand (menu_state) */ +#define CEC_OP_MENU_STATE_ACTIVATED 0x00 +#define CEC_OP_MENU_STATE_DEACTIVATED 0x01 + +#define CEC_MSG_USER_CONTROL_PRESSED 0x44 +/* UI Broadcast Type Operand (ui_bcast_type) */ +#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL 0x00 +#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA 0x01 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE 0x10 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_T 0x20 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_CABLE 0x30 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_SAT 0x40 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL 0x50 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_T 0x60 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_CABLE 0x70 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_SAT 0x80 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT 0x90 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT2 0x91 +#define CEC_OP_UI_BCAST_TYPE_IP 0xa0 +/* UI Sound Presentation Control Operand (ui_snd_pres_ctl) */ +#define CEC_OP_UI_SND_PRES_CTL_DUAL_MONO 0x10 +#define CEC_OP_UI_SND_PRES_CTL_KARAOKE 0x20 +#define CEC_OP_UI_SND_PRES_CTL_DOWNMIX 0x80 +#define CEC_OP_UI_SND_PRES_CTL_REVERB 0x90 +#define CEC_OP_UI_SND_PRES_CTL_EQUALIZER 0xa0 +#define CEC_OP_UI_SND_PRES_CTL_BASS_UP 0xb1 +#define CEC_OP_UI_SND_PRES_CTL_BASS_NEUTRAL 0xb2 +#define CEC_OP_UI_SND_PRES_CTL_BASS_DOWN 0xb3 +#define CEC_OP_UI_SND_PRES_CTL_TREBLE_UP 0xc1 +#define CEC_OP_UI_SND_PRES_CTL_TREBLE_NEUTRAL 0xc2 +#define CEC_OP_UI_SND_PRES_CTL_TREBLE_DOWN 0xc3 + +#define CEC_MSG_USER_CONTROL_RELEASED 0x45 + + +/* Remote Control Passthrough Feature */ + +/* + * Has also: + * CEC_MSG_USER_CONTROL_PRESSED + * CEC_MSG_USER_CONTROL_RELEASED + */ + + +/* Power Status Feature */ +#define CEC_MSG_GIVE_DEVICE_POWER_STATUS 0x8f +#define CEC_MSG_REPORT_POWER_STATUS 0x90 +/* Power Status Operand (pwr_state) */ +#define CEC_OP_POWER_STATUS_ON 0 +#define CEC_OP_POWER_STATUS_STANDBY 1 +#define CEC_OP_POWER_STATUS_TO_ON 2 +#define CEC_OP_POWER_STATUS_TO_STANDBY 3 + + +/* General Protocol Messages */ +#define CEC_MSG_FEATURE_ABORT 0x00 +/* Abort Reason Operand (reason) */ +#define CEC_OP_ABORT_UNRECOGNIZED_OP 0 +#define CEC_OP_ABORT_INCORRECT_MODE 1 +#define CEC_OP_ABORT_NO_SOURCE 2 +#define CEC_OP_ABORT_INVALID_OP 3 +#define CEC_OP_ABORT_REFUSED 4 +#define CEC_OP_ABORT_UNDETERMINED 5 + +#define CEC_MSG_ABORT 0xff + + +/* System Audio Control Feature */ + +/* + * Has also: + * CEC_MSG_USER_CONTROL_PRESSED + * CEC_MSG_USER_CONTROL_RELEASED + */ +#define CEC_MSG_GIVE_AUDIO_STATUS 0x71 +#define CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS 0x7d +#define CEC_MSG_REPORT_AUDIO_STATUS 0x7a +/* Audio Mute Status Operand (aud_mute_status) */ +#define CEC_OP_AUD_MUTE_STATUS_OFF 0 +#define CEC_OP_AUD_MUTE_STATUS_ON 1 + +#define CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR 0xa3 +#define CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR 0xa4 +#define CEC_MSG_SET_SYSTEM_AUDIO_MODE 0x72 +/* System Audio Status Operand (sys_aud_status) */ +#define CEC_OP_SYS_AUD_STATUS_OFF 0 +#define CEC_OP_SYS_AUD_STATUS_ON 1 + +#define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST 0x70 +#define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS 0x7e +/* Audio Format ID Operand (audio_format_id) */ +#define CEC_OP_AUD_FMT_ID_CEA861 0 +#define CEC_OP_AUD_FMT_ID_CEA861_CXT 1 + + +/* Audio Rate Control Feature */ +#define CEC_MSG_SET_AUDIO_RATE 0x9a +/* Audio Rate Operand (audio_rate) */ +#define CEC_OP_AUD_RATE_OFF 0 +#define CEC_OP_AUD_RATE_WIDE_STD 1 +#define CEC_OP_AUD_RATE_WIDE_FAST 2 +#define CEC_OP_AUD_RATE_WIDE_SLOW 3 +#define CEC_OP_AUD_RATE_NARROW_STD 4 +#define CEC_OP_AUD_RATE_NARROW_FAST 5 +#define CEC_OP_AUD_RATE_NARROW_SLOW 6 + + +/* Audio Return Channel Control Feature */ +#define CEC_MSG_INITIATE_ARC 0xc0 +#define CEC_MSG_REPORT_ARC_INITIATED 0xc1 +#define CEC_MSG_REPORT_ARC_TERMINATED 0xc2 +#define CEC_MSG_REQUEST_ARC_INITIATION 0xc3 +#define CEC_MSG_REQUEST_ARC_TERMINATION 0xc4 +#define CEC_MSG_TERMINATE_ARC 0xc5 + + +/* Dynamic Audio Lipsync Feature */ +/* Only for CEC 2.0 and up */ +#define CEC_MSG_REQUEST_CURRENT_LATENCY 0xa7 +#define CEC_MSG_REPORT_CURRENT_LATENCY 0xa8 +/* Low Latency Mode Operand (low_latency_mode) */ +#define CEC_OP_LOW_LATENCY_MODE_OFF 0 +#define CEC_OP_LOW_LATENCY_MODE_ON 1 +/* Audio Output Compensated Operand (audio_out_compensated) */ +#define CEC_OP_AUD_OUT_COMPENSATED_NA 0 +#define CEC_OP_AUD_OUT_COMPENSATED_DELAY 1 +#define CEC_OP_AUD_OUT_COMPENSATED_NO_DELAY 2 +#define CEC_OP_AUD_OUT_COMPENSATED_PARTIAL_DELAY 3 + + +/* Capability Discovery and Control Feature */ +#define CEC_MSG_CDC_MESSAGE 0xf8 +/* Ethernet-over-HDMI: nobody ever does this... */ +#define CEC_MSG_CDC_HEC_INQUIRE_STATE 0x00 +#define CEC_MSG_CDC_HEC_REPORT_STATE 0x01 +/* HEC Functionality State Operand (hec_func_state) */ +#define CEC_OP_HEC_FUNC_STATE_NOT_SUPPORTED 0 +#define CEC_OP_HEC_FUNC_STATE_INACTIVE 1 +#define CEC_OP_HEC_FUNC_STATE_ACTIVE 2 +#define CEC_OP_HEC_FUNC_STATE_ACTIVATION_FIELD 3 +/* Host Functionality State Operand (host_func_state) */ +#define CEC_OP_HOST_FUNC_STATE_NOT_SUPPORTED 0 +#define CEC_OP_HOST_FUNC_STATE_INACTIVE 1 +#define CEC_OP_HOST_FUNC_STATE_ACTIVE 2 +/* ENC Functionality State Operand (enc_func_state) */ +#define CEC_OP_ENC_FUNC_STATE_EXT_CON_NOT_SUPPORTED 0 +#define CEC_OP_ENC_FUNC_STATE_EXT_CON_INACTIVE 1 +#define CEC_OP_ENC_FUNC_STATE_EXT_CON_ACTIVE 2 +/* CDC Error Code Operand (cdc_errcode) */ +#define CEC_OP_CDC_ERROR_CODE_NONE 0 +#define CEC_OP_CDC_ERROR_CODE_CAP_UNSUPPORTED 1 +#define CEC_OP_CDC_ERROR_CODE_WRONG_STATE 2 +#define CEC_OP_CDC_ERROR_CODE_OTHER 3 +/* HEC Support Operand (hec_support) */ +#define CEC_OP_HEC_SUPPORT_NO 0 +#define CEC_OP_HEC_SUPPORT_YES 1 +/* HEC Activation Operand (hec_activation) */ +#define CEC_OP_HEC_ACTIVATION_ON 0 +#define CEC_OP_HEC_ACTIVATION_OFF 1 + +#define CEC_MSG_CDC_HEC_SET_STATE_ADJACENT 0x02 +#define CEC_MSG_CDC_HEC_SET_STATE 0x03 +/* HEC Set State Operand (hec_set_state) */ +#define CEC_OP_HEC_SET_STATE_DEACTIVATE 0 +#define CEC_OP_HEC_SET_STATE_ACTIVATE 1 + +#define CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION 0x04 +#define CEC_MSG_CDC_HEC_NOTIFY_ALIVE 0x05 +#define CEC_MSG_CDC_HEC_DISCOVER 0x06 +/* Hotplug Detect messages */ +#define CEC_MSG_CDC_HPD_SET_STATE 0x10 +/* HPD State Operand (hpd_state) */ +#define CEC_OP_HPD_STATE_CP_EDID_DISABLE 0 +#define CEC_OP_HPD_STATE_CP_EDID_ENABLE 1 +#define CEC_OP_HPD_STATE_CP_EDID_DISABLE_ENABLE 2 +#define CEC_OP_HPD_STATE_EDID_DISABLE 3 +#define CEC_OP_HPD_STATE_EDID_ENABLE 4 +#define CEC_OP_HPD_STATE_EDID_DISABLE_ENABLE 5 +#define CEC_MSG_CDC_HPD_REPORT_STATE 0x11 +/* HPD Error Code Operand (hpd_error) */ +#define CEC_OP_HPD_ERROR_NONE 0 +#define CEC_OP_HPD_ERROR_INITIATOR_NOT_CAPABLE 1 +#define CEC_OP_HPD_ERROR_INITIATOR_WRONG_STATE 2 +#define CEC_OP_HPD_ERROR_OTHER 3 +#define CEC_OP_HPD_ERROR_NONE_NO_VIDEO 4 + +#endif diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index 6b138fa97d..374bb1c4ef 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FS_CEPH_AUTH_H #define _FS_CEPH_AUTH_H @@ -32,6 +31,8 @@ struct ceph_auth_handshake { }; struct ceph_auth_client_ops { + const char *name; + /* * true if we are authenticated and can connect to * services. @@ -50,10 +51,8 @@ struct ceph_auth_client_ops { * another request. */ int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end); - int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id, - void *buf, void *end, u8 *session_key, - int *session_key_len, u8 *con_secret, - int *con_secret_len); + int (*handle_reply)(struct ceph_auth_client *ac, int result, + void *buf, void *end); /* * Create authorizer for connecting to a service, and verify @@ -64,15 +63,8 @@ struct ceph_auth_client_ops { /* ensure that an existing authorizer is up to date */ int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth); - int (*add_authorizer_challenge)(struct ceph_auth_client *ac, - struct ceph_authorizer *a, - void *challenge_buf, - int challenge_buf_len); int (*verify_authorizer_reply)(struct ceph_auth_client *ac, - struct ceph_authorizer *a, - void *reply, int reply_len, - u8 *session_key, int *session_key_len, - u8 *con_secret, int *con_secret_len); + struct ceph_authorizer *a, size_t len); void (*invalidate_authorizer)(struct ceph_auth_client *ac, int peer_type); @@ -98,17 +90,11 @@ struct ceph_auth_client { const struct ceph_crypto_key *key; /* our secret key */ unsigned want_keys; /* which services we want */ - int preferred_mode; /* CEPH_CON_MODE_* */ - int fallback_mode; /* ditto */ - struct mutex mutex; }; -void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id); - -struct ceph_auth_client *ceph_auth_init(const char *name, - const struct ceph_crypto_key *key, - const int *con_modes); +extern struct ceph_auth_client *ceph_auth_init(const char *name, + const struct ceph_crypto_key *key); extern void ceph_auth_destroy(struct ceph_auth_client *ac); extern void ceph_auth_reset(struct ceph_auth_client *ac); @@ -122,22 +108,18 @@ int ceph_auth_entity_name_encode(const char *name, void **p, void *end); extern int ceph_build_auth(struct ceph_auth_client *ac, void *msg_buf, size_t msg_len); -extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); -int __ceph_auth_get_authorizer(struct ceph_auth_client *ac, - struct ceph_auth_handshake *auth, - int peer_type, bool force_new, - int *proto, int *pref_mode, int *fallb_mode); +extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); +extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, + int peer_type, + struct ceph_auth_handshake *auth); void ceph_auth_destroy_authorizer(struct ceph_authorizer *a); -int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac, - struct ceph_authorizer *a, - void *challenge_buf, - int challenge_buf_len); -int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, - struct ceph_authorizer *a, - void *reply, int reply_len, - u8 *session_key, int *session_key_len, - u8 *con_secret, int *con_secret_len); +extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, + int peer_type, + struct ceph_auth_handshake *a); +extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, + struct ceph_authorizer *a, + size_t len); extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type); @@ -157,34 +139,4 @@ int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth, return auth->check_message_signature(auth, msg); return 0; } - -int ceph_auth_get_request(struct ceph_auth_client *ac, void *buf, int buf_len); -int ceph_auth_handle_reply_more(struct ceph_auth_client *ac, void *reply, - int reply_len, void *buf, int buf_len); -int ceph_auth_handle_reply_done(struct ceph_auth_client *ac, - u64 global_id, void *reply, int reply_len, - u8 *session_key, int *session_key_len, - u8 *con_secret, int *con_secret_len); -bool ceph_auth_handle_bad_method(struct ceph_auth_client *ac, - int used_proto, int result, - const int *allowed_protos, int proto_cnt, - const int *allowed_modes, int mode_cnt); - -int ceph_auth_get_authorizer(struct ceph_auth_client *ac, - struct ceph_auth_handshake *auth, - int peer_type, void *buf, int *buf_len); -int ceph_auth_handle_svc_reply_more(struct ceph_auth_client *ac, - struct ceph_auth_handshake *auth, - void *reply, int reply_len, - void *buf, int *buf_len); -int ceph_auth_handle_svc_reply_done(struct ceph_auth_client *ac, - struct ceph_auth_handshake *auth, - void *reply, int reply_len, - u8 *session_key, int *session_key_len, - u8 *con_secret, int *con_secret_len); -bool ceph_auth_handle_bad_authorizer(struct ceph_auth_client *ac, - int peer_type, int used_proto, int result, - const int *allowed_protos, int proto_cnt, - const int *allowed_modes, int mode_cnt); - #endif diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h index 11cdc7c604..07ca15e761 100644 --- a/include/linux/ceph/buffer.h +++ b/include/linux/ceph/buffer.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __FS_CEPH_BUFFER_H #define __FS_CEPH_BUFFER_H @@ -30,8 +29,7 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) static inline void ceph_buffer_put(struct ceph_buffer *b) { - if (b) - kref_put(&b->kref, ceph_buffer_release); + kref_put(&b->kref, ceph_buffer_release); } extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h index d5a5da838c..aa2e19182d 100644 --- a/include/linux/ceph/ceph_debug.h +++ b/include/linux/ceph/ceph_debug.h @@ -1,11 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FS_CEPH_DEBUG_H #define _FS_CEPH_DEBUG_H #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include - #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG /* @@ -15,10 +12,12 @@ */ # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) +extern const char *ceph_file_part(const char *s, int len); # define dout(fmt, ...) \ pr_debug("%.*s %12.12s:%-4d : " fmt, \ 8 - (int)sizeof(KBUILD_MODNAME), " ", \ - kbasename(__FILE__), __LINE__, ##__VA_ARGS__) + ceph_file_part(__FILE__, sizeof(__FILE__)), \ + __LINE__, ##__VA_ARGS__) # else /* faux printk call just to see any compiler warnings. */ # define dout(fmt, ...) do { \ diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 3a47acd9cc..ae2f668337 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -1,224 +1,137 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CEPH_FEATURES #define __CEPH_FEATURES /* - * Each time we reclaim bits for reuse we need to specify another bit - * that, if present, indicates we have the new incarnation of that - * feature. Base case is 1 (first use). + * feature bits */ -#define CEPH_FEATURE_INCARNATION_1 (0ull) -#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // SERVER_JEWEL -#define CEPH_FEATURE_INCARNATION_3 ((1ull<<57)|(1ull<<28)) // SERVER_MIMIC - -#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \ - static const uint64_t __maybe_unused CEPH_FEATURE_##name = (1ULL<= 10) */ -#define CEPH_CLIENT_CAPS_SYNC (1<<0) -#define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1) -#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2) - /* * caps message, used for capability callbacks, acks, requests, etc. */ @@ -873,20 +784,4 @@ struct ceph_mds_snap_realm { } __attribute__ ((packed)); /* followed by my snap list, then prior parent snap list */ -/* - * quotas - */ -struct ceph_mds_quota { - __le64 ino; /* ino */ - struct ceph_timespec rctime; - __le64 rbytes; /* dir stats */ - __le64 rfiles; - __le64 rsubdirs; - __u8 struct_v; /* compat */ - __u8 struct_compat; - __le32 struct_len; - __le64 max_bytes; /* quota max. bytes */ - __le64 max_files; /* quota max. files */ -} __attribute__ ((packed)); - #endif diff --git a/include/linux/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h index fda474c7a5..d099c3f902 100644 --- a/include/linux/ceph/ceph_hash.h +++ b/include/linux/ceph/ceph_hash.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef FS_CEPH_HASH_H #define FS_CEPH_HASH_H diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h index 17bc7584d1..84884d8d47 100644 --- a/include/linux/ceph/cls_lock_client.h +++ b/include/linux/ceph/cls_lock_client.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CEPH_CLS_LOCK_CLIENT_H #define _LINUX_CEPH_CLS_LOCK_CLIENT_H @@ -38,11 +37,6 @@ int ceph_cls_break_lock(struct ceph_osd_client *osdc, struct ceph_object_locator *oloc, char *lock_name, char *cookie, struct ceph_entity_name *locker); -int ceph_cls_set_cookie(struct ceph_osd_client *osdc, - struct ceph_object_id *oid, - struct ceph_object_locator *oloc, - char *lock_name, u8 type, char *old_cookie, - char *tag, char *new_cookie); void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers); @@ -52,7 +46,4 @@ int ceph_cls_lock_info(struct ceph_osd_client *osdc, char *lock_name, u8 *type, char **tag, struct ceph_locker **lockers, u32 *num_lockers); -int ceph_cls_assert_locked(struct ceph_osd_request *req, int which, - char *lock_name, u8 type, char *cookie, char *tag); - #endif diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h index 8b3a1a7a95..29cf897cc5 100644 --- a/include/linux/ceph/debugfs.h +++ b/include/linux/ceph/debugfs.h @@ -1,13 +1,26 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FS_CEPH_DEBUGFS_H #define _FS_CEPH_DEBUGFS_H +#include #include +#define CEPH_DEFINE_SHOW_FUNC(name) \ +static int name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, name, inode->i_private); \ +} \ + \ +static const struct file_operations name##_fops = { \ + .open = name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +}; + /* debugfs.c */ -extern void ceph_debugfs_init(void); +extern int ceph_debugfs_init(void); extern void ceph_debugfs_cleanup(void); -extern void ceph_debugfs_client_init(struct ceph_client *client); +extern int ceph_debugfs_client_init(struct ceph_client *client); extern void ceph_debugfs_client_cleanup(struct ceph_client *client); #endif diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index 04f3ace578..f990f2cc90 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CEPH_DECODE_H #define __CEPH_DECODE_H @@ -134,82 +133,16 @@ static inline char *ceph_extract_encoded_string(void **p, void *end, } /* - * skip helpers + * struct ceph_timespec <-> struct timespec */ -#define ceph_decode_skip_n(p, end, n, bad) \ - do { \ - ceph_decode_need(p, end, n, bad); \ - *p += n; \ - } while (0) - -#define ceph_decode_skip_64(p, end, bad) \ -ceph_decode_skip_n(p, end, sizeof(u64), bad) - -#define ceph_decode_skip_32(p, end, bad) \ -ceph_decode_skip_n(p, end, sizeof(u32), bad) - -#define ceph_decode_skip_16(p, end, bad) \ -ceph_decode_skip_n(p, end, sizeof(u16), bad) - -#define ceph_decode_skip_8(p, end, bad) \ -ceph_decode_skip_n(p, end, sizeof(u8), bad) - -#define ceph_decode_skip_string(p, end, bad) \ - do { \ - u32 len; \ - \ - ceph_decode_32_safe(p, end, len, bad); \ - ceph_decode_skip_n(p, end, len, bad); \ - } while (0) - -#define ceph_decode_skip_set(p, end, type, bad) \ - do { \ - u32 len; \ - \ - ceph_decode_32_safe(p, end, len, bad); \ - while (len--) \ - ceph_decode_skip_##type(p, end, bad); \ - } while (0) - -#define ceph_decode_skip_map(p, end, ktype, vtype, bad) \ - do { \ - u32 len; \ - \ - ceph_decode_32_safe(p, end, len, bad); \ - while (len--) { \ - ceph_decode_skip_##ktype(p, end, bad); \ - ceph_decode_skip_##vtype(p, end, bad); \ - } \ - } while (0) - -#define ceph_decode_skip_map_of_map(p, end, ktype1, ktype2, vtype2, bad) \ - do { \ - u32 len; \ - \ - ceph_decode_32_safe(p, end, len, bad); \ - while (len--) { \ - ceph_decode_skip_##ktype1(p, end, bad); \ - ceph_decode_skip_map(p, end, ktype2, vtype2, bad); \ - } \ - } while (0) - -/* - * struct ceph_timespec <-> struct timespec64 - */ -static inline void ceph_decode_timespec64(struct timespec64 *ts, - const struct ceph_timespec *tv) +static inline void ceph_decode_timespec(struct timespec *ts, + const struct ceph_timespec *tv) { - /* - * This will still overflow in year 2106. We could extend - * the protocol to steal two more bits from tv_nsec to - * add three more 136 year epochs after that the way ext4 - * does if necessary. - */ - ts->tv_sec = (time64_t)le32_to_cpu(tv->tv_sec); + ts->tv_sec = (__kernel_time_t)le32_to_cpu(tv->tv_sec); ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec); } -static inline void ceph_encode_timespec64(struct ceph_timespec *tv, - const struct timespec64 *ts) +static inline void ceph_encode_timespec(struct ceph_timespec *tv, + const struct timespec *ts) { tv->tv_sec = cpu_to_le32((u32)ts->tv_sec); tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec); @@ -218,35 +151,18 @@ static inline void ceph_encode_timespec64(struct ceph_timespec *tv, /* * sockaddr_storage <-> ceph_sockaddr */ -#define CEPH_ENTITY_ADDR_TYPE_NONE 0 -#define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1) -#define CEPH_ENTITY_ADDR_TYPE_MSGR2 __cpu_to_le32(2) -#define CEPH_ENTITY_ADDR_TYPE_ANY __cpu_to_le32(3) - -static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a) +static inline void ceph_encode_addr(struct ceph_entity_addr *a) { __be16 ss_family = htons(a->in_addr.ss_family); a->in_addr.ss_family = *(__u16 *)&ss_family; - - /* Banner addresses require TYPE_NONE */ - a->type = CEPH_ENTITY_ADDR_TYPE_NONE; } -static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a) +static inline void ceph_decode_addr(struct ceph_entity_addr *a) { __be16 ss_family = *(__be16 *)&a->in_addr.ss_family; a->in_addr.ss_family = ntohs(ss_family); WARN_ON(a->in_addr.ss_family == 512); - a->type = CEPH_ENTITY_ADDR_TYPE_LEGACY; } -extern int ceph_decode_entity_addr(void **p, void *end, - struct ceph_entity_addr *addr); -int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2, - struct ceph_entity_addr *addr); - -int ceph_entity_addr_encoding_len(const struct ceph_entity_addr *addr); -void ceph_encode_entity_addr(void **p, const struct ceph_entity_addr *addr); - /* * encoders */ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 409d8c29bc..1816c5e265 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FS_CEPH_LIBCEPH_H #define _FS_CEPH_LIBCEPH_H @@ -15,7 +14,6 @@ #include #include #include -#include #include #include @@ -31,10 +29,10 @@ #define CEPH_OPT_FSID (1<<0) #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ -#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes (msgr1) */ -#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */ -#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */ -#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */ +#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ +#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ +#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ +#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ #define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) @@ -50,12 +48,9 @@ struct ceph_options { unsigned long mount_timeout; /* jiffies */ unsigned long osd_idle_ttl; /* jiffies */ unsigned long osd_keepalive_timeout; /* jiffies */ - unsigned long osd_request_timeout; /* jiffies */ - u32 read_from_replica; /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */ - int con_modes[2]; /* CEPH_CON_MODE_* */ /* - * any type that can't be simply compared or doesn't need + * any type that can't be simply compared or doesn't need need * to be compared should go beyond this point, * ceph_compare_options() should be updated accordingly */ @@ -65,7 +60,6 @@ struct ceph_options { int num_mon; char *name; struct ceph_crypto_key *key; - struct rb_root crush_locs; }; /* @@ -74,8 +68,6 @@ struct ceph_options { #define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) -#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */ -#define CEPH_READ_FROM_REPLICA_DEFAULT 0 /* read from primary */ #define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) #define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) @@ -83,21 +75,23 @@ struct ceph_options { #define CEPH_MONC_HUNT_BACKOFF 2 #define CEPH_MONC_HUNT_MAX_MULT 10 -#define CEPH_MSG_MAX_CONTROL_LEN (16*1024*1024) #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) - -/* - * The largest possible rbd data object is 32M. - * The largest possible rbd object map object is 64M. - * - * There is no limit on the size of cephfs objects, but it has to obey - * rsize and wsize mount options anyway. - */ -#define CEPH_MSG_MAX_DATA_LEN (64*1024*1024) +#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) #define CEPH_AUTH_NAME_DEFAULT "guest" +/* + * Delay telling the MDS we no longer want caps, in case we reopen + * the file. Delay a minimum amount of time, even if we send a cap + * message for some other reason. Otherwise, take the oppotunity to + * update the mds to avoid sending another message later. + */ +#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */ +#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */ + +#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4) + /* mount state */ enum { CEPH_MOUNT_MOUNTING, @@ -105,7 +99,6 @@ enum { CEPH_MOUNT_UNMOUNTING, CEPH_MOUNT_UNMOUNTED, CEPH_MOUNT_SHUTDOWN, - CEPH_MOUNT_RECOVER, }; static inline unsigned long ceph_timeout_jiffies(unsigned long timeout) @@ -152,10 +145,6 @@ struct ceph_client { #define from_msgr(ms) container_of(ms, struct ceph_client, msgr) -static inline bool ceph_msgr2(struct ceph_client *client) -{ - return client->options->con_modes[0] != CEPH_CON_MODE_UNKNOWN; -} /* * snapshots @@ -170,7 +159,7 @@ static inline bool ceph_msgr2(struct ceph_client *client) * dirtied. */ struct ceph_snap_context { - refcount_t nref; + atomic_t nref; u64 seq; u32 num_snaps; u64 snaps[]; @@ -192,12 +181,11 @@ static inline int calc_pages_for(u64 off, u64 len) (off >> PAGE_SHIFT); } -#define RB_BYVAL(a) (a) -#define RB_BYPTR(a) (&(a)) -#define RB_CMP3WAY(a, b) ((a) < (b) ? -1 : (a) > (b)) - -#define DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \ -static bool __insert_##name(struct rb_root *root, type *t) \ +/* + * These are not meant to be generic - an integer key is assumed. + */ +#define DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \ +static void insert_##name(struct rb_root *root, type *t) \ { \ struct rb_node **n = &root->rb_node; \ struct rb_node *parent = NULL; \ @@ -206,26 +194,18 @@ static bool __insert_##name(struct rb_root *root, type *t) \ \ while (*n) { \ type *cur = rb_entry(*n, type, nodefld); \ - int cmp; \ \ parent = *n; \ - cmp = cmpexp(keyexp(t->keyfld), keyexp(cur->keyfld)); \ - if (cmp < 0) \ + if (t->keyfld < cur->keyfld) \ n = &(*n)->rb_left; \ - else if (cmp > 0) \ + else if (t->keyfld > cur->keyfld) \ n = &(*n)->rb_right; \ else \ - return false; \ + BUG(); \ } \ \ rb_link_node(&t->nodefld, parent, n); \ rb_insert_color(&t->nodefld, root); \ - return true; \ -} \ -static void __maybe_unused insert_##name(struct rb_root *root, type *t) \ -{ \ - if (!__insert_##name(root, t)) \ - BUG(); \ } \ static void erase_##name(struct rb_root *root, type *t) \ { \ @@ -234,24 +214,19 @@ static void erase_##name(struct rb_root *root, type *t) \ RB_CLEAR_NODE(&t->nodefld); \ } -/* - * @lookup_param_type is a parameter and not constructed from (@type, - * @keyfld) with typeof() because adding const is too unwieldy. - */ -#define DEFINE_RB_LOOKUP_FUNC2(name, type, keyfld, cmpexp, keyexp, \ - lookup_param_type, nodefld) \ -static type *lookup_##name(struct rb_root *root, lookup_param_type key) \ +#define DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) \ +extern type __lookup_##name##_key; \ +static type *lookup_##name(struct rb_root *root, \ + typeof(__lookup_##name##_key.keyfld) key) \ { \ struct rb_node *n = root->rb_node; \ \ while (n) { \ type *cur = rb_entry(n, type, nodefld); \ - int cmp; \ \ - cmp = cmpexp(key, keyexp(cur->keyfld)); \ - if (cmp < 0) \ + if (key < cur->keyfld) \ n = n->rb_left; \ - else if (cmp > 0) \ + else if (key > cur->keyfld) \ n = n->rb_right; \ else \ return cur; \ @@ -260,23 +235,6 @@ static type *lookup_##name(struct rb_root *root, lookup_param_type key) \ return NULL; \ } -#define DEFINE_RB_FUNCS2(name, type, keyfld, cmpexp, keyexp, \ - lookup_param_type, nodefld) \ -DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \ -DEFINE_RB_LOOKUP_FUNC2(name, type, keyfld, cmpexp, keyexp, \ - lookup_param_type, nodefld) - -/* - * Shorthands for integer keys. - */ -#define DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \ -DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, RB_CMP3WAY, RB_BYVAL, nodefld) - -#define DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) \ -extern type __lookup_##name##_key; \ -DEFINE_RB_LOOKUP_FUNC2(name, type, keyfld, RB_CMP3WAY, RB_BYVAL, \ - typeof(__lookup_##name##_key.keyfld), nodefld) - #define DEFINE_RB_FUNCS(name, type, keyfld, nodefld) \ DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) @@ -286,9 +244,6 @@ extern struct kmem_cache *ceph_cap_cachep; extern struct kmem_cache *ceph_cap_flush_cachep; extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; -extern struct kmem_cache *ceph_dir_file_cachep; -extern struct kmem_cache *ceph_mds_request_cachep; -extern mempool_t *ceph_wb_pagevec_pool; /* ceph_common.c */ extern bool libceph_compatible(void *data); @@ -297,31 +252,31 @@ extern const char *ceph_msg_type_name(int type); extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); extern void *ceph_kvmalloc(size_t size, gfp_t flags); -struct fs_parameter; -struct fc_log; -struct ceph_options *ceph_alloc_options(void); -int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt, - struct fc_log *l); -int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt, - struct fc_log *l); -int ceph_print_client_options(struct seq_file *m, struct ceph_client *client, - bool show_all); +extern struct ceph_options *ceph_parse_options(char *options, + const char *dev_name, const char *dev_name_end, + int (*parse_extra_token)(char *c, void *private), + void *private); +int ceph_print_client_options(struct seq_file *m, struct ceph_client *client); extern void ceph_destroy_options(struct ceph_options *opt); extern int ceph_compare_options(struct ceph_options *new_opt, struct ceph_client *client); -struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private); +extern struct ceph_client *ceph_create_client(struct ceph_options *opt, + void *private, + u64 supported_features, + u64 required_features); struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client); u64 ceph_client_gid(struct ceph_client *client); extern void ceph_destroy_client(struct ceph_client *client); -extern void ceph_reset_client_addr(struct ceph_client *client); extern int __ceph_open_session(struct ceph_client *client, unsigned long started); extern int ceph_open_session(struct ceph_client *client); -int ceph_wait_for_latest_osdmap(struct ceph_client *client, - unsigned long timeout); /* pagevec.c */ extern void ceph_release_page_vector(struct page **pages, int num_pages); + +extern struct page **ceph_get_direct_page_vector(const void __user *data, + int num_pages, + bool write_page); extern void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty); extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h index 523fd04528..87ed09f548 100644 --- a/include/linux/ceph/mdsmap.h +++ b/include/linux/ceph/mdsmap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FS_CEPH_MDSMAP_H #define _FS_CEPH_MDSMAP_H @@ -25,25 +24,19 @@ struct ceph_mdsmap { u32 m_session_timeout; /* seconds */ u32 m_session_autoclose; /* seconds */ u64 m_max_file_size; - u32 m_max_mds; /* expected up:active mds number */ - u32 m_num_active_mds; /* actual up:active mds number */ - u32 possible_max_rank; /* possible max rank index */ + u32 m_max_mds; /* size of m_addr, m_state arrays */ struct ceph_mds_info *m_info; /* which object pools file data can be stored in */ int m_num_data_pg_pools; u64 *m_data_pg_pools; u64 m_cas_pg_pool; - - bool m_enabled; - bool m_damaged; - int m_num_laggy; }; static inline struct ceph_entity_addr * ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) { - if (w >= m->possible_max_rank) + if (w >= m->m_max_mds) return NULL; return &m->m_info[w].addr; } @@ -51,21 +44,20 @@ ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w) { BUG_ON(w < 0); - if (w >= m->possible_max_rank) + if (w >= m->m_max_mds) return CEPH_MDS_STATE_DNE; return m->m_info[w].state; } static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) { - if (w >= 0 && w < m->possible_max_rank) + if (w >= 0 && w < m->m_max_mds) return m->m_info[w].laggy; return false; } extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); -struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2); +extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end); extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); -extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m); #endif diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 0e6e9ad3c3..8dbd7879fd 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -1,9 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __FS_CEPH_MESSENGER_H #define __FS_CEPH_MESSENGER_H -#include -#include +#include #include #include #include @@ -32,10 +30,7 @@ struct ceph_connection_operations { struct ceph_auth_handshake *(*get_authorizer) ( struct ceph_connection *con, int *proto, int force_new); - int (*add_authorizer_challenge)(struct ceph_connection *con, - void *challenge_buf, - int challenge_buf_len); - int (*verify_authorizer_reply) (struct ceph_connection *con); + int (*verify_authorizer_reply) (struct ceph_connection *con, int len); int (*invalidate_authorizer)(struct ceph_connection *con); /* there was some error on the socket (disconnect, whatever) */ @@ -49,30 +44,11 @@ struct ceph_connection_operations { struct ceph_msg_header *hdr, int *skip); - void (*reencode_message) (struct ceph_msg *msg); - int (*sign_message) (struct ceph_msg *msg); int (*check_message_signature) (struct ceph_msg *msg); - - /* msgr2 authentication exchange */ - int (*get_auth_request)(struct ceph_connection *con, - void *buf, int *buf_len, - void **authorizer, int *authorizer_len); - int (*handle_auth_reply_more)(struct ceph_connection *con, - void *reply, int reply_len, - void *buf, int *buf_len, - void **authorizer, int *authorizer_len); - int (*handle_auth_done)(struct ceph_connection *con, - u64 global_id, void *reply, int reply_len, - u8 *session_key, int *session_key_len, - u8 *con_secret, int *con_secret_len); - int (*handle_auth_bad_method)(struct ceph_connection *con, - int used_proto, int result, - const int *allowed_protos, int proto_cnt, - const int *allowed_modes, int mode_cnt); }; -/* use format string %s%lld */ +/* use format string %s%d */ #define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num) struct ceph_messenger { @@ -97,106 +73,37 @@ enum ceph_msg_data_type { #ifdef CONFIG_BLOCK CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */ #endif /* CONFIG_BLOCK */ - CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */ }; +static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) +{ + switch (type) { + case CEPH_MSG_DATA_NONE: + case CEPH_MSG_DATA_PAGES: + case CEPH_MSG_DATA_PAGELIST: #ifdef CONFIG_BLOCK - -struct ceph_bio_iter { - struct bio *bio; - struct bvec_iter iter; -}; - -#define __ceph_bio_iter_advance_step(it, n, STEP) do { \ - unsigned int __n = (n), __cur_n; \ - \ - while (__n) { \ - BUG_ON(!(it)->iter.bi_size); \ - __cur_n = min((it)->iter.bi_size, __n); \ - (void)(STEP); \ - bio_advance_iter((it)->bio, &(it)->iter, __cur_n); \ - if (!(it)->iter.bi_size && (it)->bio->bi_next) { \ - dout("__ceph_bio_iter_advance_step next bio\n"); \ - (it)->bio = (it)->bio->bi_next; \ - (it)->iter = (it)->bio->bi_iter; \ - } \ - __n -= __cur_n; \ - } \ -} while (0) - -/* - * Advance @it by @n bytes. - */ -#define ceph_bio_iter_advance(it, n) \ - __ceph_bio_iter_advance_step(it, n, 0) - -/* - * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec. - */ -#define ceph_bio_iter_advance_step(it, n, BVEC_STEP) \ - __ceph_bio_iter_advance_step(it, n, ({ \ - struct bio_vec bv; \ - struct bvec_iter __cur_iter; \ - \ - __cur_iter = (it)->iter; \ - __cur_iter.bi_size = __cur_n; \ - __bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \ - (void)(BVEC_STEP); \ - })) - + case CEPH_MSG_DATA_BIO: #endif /* CONFIG_BLOCK */ - -struct ceph_bvec_iter { - struct bio_vec *bvecs; - struct bvec_iter iter; -}; - -#define __ceph_bvec_iter_advance_step(it, n, STEP) do { \ - BUG_ON((n) > (it)->iter.bi_size); \ - (void)(STEP); \ - bvec_iter_advance((it)->bvecs, &(it)->iter, (n)); \ -} while (0) - -/* - * Advance @it by @n bytes. - */ -#define ceph_bvec_iter_advance(it, n) \ - __ceph_bvec_iter_advance_step(it, n, 0) - -/* - * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec. - */ -#define ceph_bvec_iter_advance_step(it, n, BVEC_STEP) \ - __ceph_bvec_iter_advance_step(it, n, ({ \ - struct bio_vec bv; \ - struct bvec_iter __cur_iter; \ - \ - __cur_iter = (it)->iter; \ - __cur_iter.bi_size = (n); \ - for_each_bvec(bv, (it)->bvecs, __cur_iter, __cur_iter) \ - (void)(BVEC_STEP); \ - })) - -#define ceph_bvec_iter_shorten(it, n) do { \ - BUG_ON((n) > (it)->iter.bi_size); \ - (it)->iter.bi_size = (n); \ -} while (0) + return true; + default: + return false; + } +} struct ceph_msg_data { + struct list_head links; /* ceph_msg->data */ enum ceph_msg_data_type type; union { #ifdef CONFIG_BLOCK struct { - struct ceph_bio_iter bio_pos; - u32 bio_length; + struct bio *bio; + size_t bio_length; }; #endif /* CONFIG_BLOCK */ - struct ceph_bvec_iter bvec_pos; struct { - struct page **pages; + struct page **pages; /* NOT OWNER. */ size_t length; /* total # bytes */ unsigned int alignment; /* first page */ - bool own_pages; }; struct ceph_pagelist *pagelist; }; @@ -204,6 +111,7 @@ struct ceph_msg_data { struct ceph_msg_data_cursor { size_t total_resid; /* across all data items */ + struct list_head *data_head; /* = &ceph_msg->data */ struct ceph_msg_data *data; /* current data item */ size_t resid; /* bytes not yet consumed */ @@ -211,9 +119,11 @@ struct ceph_msg_data_cursor { bool need_crc; /* crc update needed */ union { #ifdef CONFIG_BLOCK - struct ceph_bio_iter bio_iter; + struct { /* bio */ + struct bio *bio; /* bio from list */ + struct bvec_iter bvec_iter; + }; #endif /* CONFIG_BLOCK */ - struct bvec_iter bvec_iter; struct { /* pages */ unsigned int page_offset; /* offset in page */ unsigned short page_index; /* index in array */ @@ -241,9 +151,7 @@ struct ceph_msg { struct ceph_buffer *middle; size_t data_length; - struct ceph_msg_data *data; - int num_data_items; - int max_data_items; + struct list_head data; struct ceph_msg_data_cursor cursor; struct ceph_connection *con; @@ -253,171 +161,14 @@ struct ceph_msg { bool more_to_follow; bool needs_out_seq; int front_alloc_len; + unsigned long ack_stamp; /* tx: when we were acked */ struct ceph_msgpool *pool; }; -/* - * connection states - */ -#define CEPH_CON_S_CLOSED 1 -#define CEPH_CON_S_PREOPEN 2 -#define CEPH_CON_S_V1_BANNER 3 -#define CEPH_CON_S_V1_CONNECT_MSG 4 -#define CEPH_CON_S_V2_BANNER_PREFIX 5 -#define CEPH_CON_S_V2_BANNER_PAYLOAD 6 -#define CEPH_CON_S_V2_HELLO 7 -#define CEPH_CON_S_V2_AUTH 8 -#define CEPH_CON_S_V2_AUTH_SIGNATURE 9 -#define CEPH_CON_S_V2_SESSION_CONNECT 10 -#define CEPH_CON_S_V2_SESSION_RECONNECT 11 -#define CEPH_CON_S_OPEN 12 -#define CEPH_CON_S_STANDBY 13 - -/* - * ceph_connection flag bits - */ -#define CEPH_CON_F_LOSSYTX 0 /* we can close channel or drop - messages on errors */ -#define CEPH_CON_F_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ -#define CEPH_CON_F_WRITE_PENDING 2 /* we have data ready to send */ -#define CEPH_CON_F_SOCK_CLOSED 3 /* socket state changed to closed */ -#define CEPH_CON_F_BACKOFF 4 /* need to retry queuing delayed - work */ - /* ceph connection fault delay defaults, for exponential backoff */ -#define BASE_DELAY_INTERVAL (HZ / 4) -#define MAX_DELAY_INTERVAL (15 * HZ) - -struct ceph_connection_v1_info { - struct kvec out_kvec[8], /* sending header/footer data */ - *out_kvec_cur; - int out_kvec_left; /* kvec's left in out_kvec */ - int out_skip; /* skip this many bytes */ - int out_kvec_bytes; /* total bytes left */ - bool out_more; /* there is more data after the kvecs */ - bool out_msg_done; - - struct ceph_auth_handshake *auth; - int auth_retry; /* true if we need a newer authorizer */ - - /* connection negotiation temps */ - u8 in_banner[CEPH_BANNER_MAX_LEN]; - struct ceph_entity_addr actual_peer_addr; - struct ceph_entity_addr peer_addr_for_me; - struct ceph_msg_connect out_connect; - struct ceph_msg_connect_reply in_reply; - - int in_base_pos; /* bytes read */ - - /* message in temps */ - u8 in_tag; /* protocol control byte */ - struct ceph_msg_header in_hdr; - __le64 in_temp_ack; /* for reading an ack */ - - /* message out temps */ - struct ceph_msg_header out_hdr; - __le64 out_temp_ack; /* for writing an ack */ - struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2 - stamp */ - - u32 connect_seq; /* identify the most recent connection - attempt for this session */ - u32 peer_global_seq; /* peer's global seq for this connection */ -}; - -#define CEPH_CRC_LEN 4 -#define CEPH_GCM_KEY_LEN 16 -#define CEPH_GCM_IV_LEN sizeof(struct ceph_gcm_nonce) -#define CEPH_GCM_BLOCK_LEN 16 -#define CEPH_GCM_TAG_LEN 16 - -#define CEPH_PREAMBLE_LEN 32 -#define CEPH_PREAMBLE_INLINE_LEN 48 -#define CEPH_PREAMBLE_PLAIN_LEN CEPH_PREAMBLE_LEN -#define CEPH_PREAMBLE_SECURE_LEN (CEPH_PREAMBLE_LEN + \ - CEPH_PREAMBLE_INLINE_LEN + \ - CEPH_GCM_TAG_LEN) -#define CEPH_EPILOGUE_PLAIN_LEN (1 + 3 * CEPH_CRC_LEN) -#define CEPH_EPILOGUE_SECURE_LEN (CEPH_GCM_BLOCK_LEN + CEPH_GCM_TAG_LEN) - -#define CEPH_FRAME_MAX_SEGMENT_COUNT 4 - -struct ceph_frame_desc { - int fd_tag; /* FRAME_TAG_* */ - int fd_seg_cnt; - int fd_lens[CEPH_FRAME_MAX_SEGMENT_COUNT]; /* logical */ - int fd_aligns[CEPH_FRAME_MAX_SEGMENT_COUNT]; -}; - -struct ceph_gcm_nonce { - __le32 fixed; - __le64 counter __packed; -}; - -struct ceph_connection_v2_info { - struct iov_iter in_iter; - struct kvec in_kvecs[5]; /* recvmsg */ - struct bio_vec in_bvec; /* recvmsg (in_cursor) */ - int in_kvec_cnt; - int in_state; /* IN_S_* */ - - struct iov_iter out_iter; - struct kvec out_kvecs[8]; /* sendmsg */ - struct bio_vec out_bvec; /* sendpage (out_cursor, out_zero), - sendmsg (out_enc_pages) */ - int out_kvec_cnt; - int out_state; /* OUT_S_* */ - - int out_zero; /* # of zero bytes to send */ - bool out_iter_sendpage; /* use sendpage if possible */ - - struct ceph_frame_desc in_desc; - struct ceph_msg_data_cursor in_cursor; - struct ceph_msg_data_cursor out_cursor; - - struct crypto_shash *hmac_tfm; /* post-auth signature */ - struct crypto_aead *gcm_tfm; /* on-wire encryption */ - struct aead_request *gcm_req; - struct crypto_wait gcm_wait; - struct ceph_gcm_nonce in_gcm_nonce; - struct ceph_gcm_nonce out_gcm_nonce; - - struct page **out_enc_pages; - int out_enc_page_cnt; - int out_enc_resid; - int out_enc_i; - - int con_mode; /* CEPH_CON_MODE_* */ - - void *conn_bufs[16]; - int conn_buf_cnt; - - struct kvec in_sign_kvecs[8]; - struct kvec out_sign_kvecs[8]; - int in_sign_kvec_cnt; - int out_sign_kvec_cnt; - - u64 client_cookie; - u64 server_cookie; - u64 global_seq; - u64 connect_seq; - u64 peer_global_seq; - - u8 in_buf[CEPH_PREAMBLE_SECURE_LEN]; - u8 out_buf[CEPH_PREAMBLE_SECURE_LEN]; - struct { - u8 late_status; /* FRAME_LATE_STATUS_* */ - union { - struct { - u32 front_crc; - u32 middle_crc; - u32 data_crc; - } __packed; - u8 pad[CEPH_GCM_BLOCK_LEN - 1]; - }; - } out_epil; -}; +#define BASE_DELAY_INTERVAL (HZ/2) +#define MAX_DELAY_INTERVAL (5 * 60 * HZ) /* * A single connection with another host. @@ -433,16 +184,25 @@ struct ceph_connection { struct ceph_messenger *msgr; - int state; /* CEPH_CON_S_* */ atomic_t sock_state; struct socket *sock; + struct ceph_entity_addr peer_addr; /* peer address */ + struct ceph_entity_addr peer_addr_for_me; - unsigned long flags; /* CEPH_CON_F_* */ + unsigned long flags; + unsigned long state; const char *error_msg; /* error message, if any */ struct ceph_entity_name peer_name; /* peer name */ - struct ceph_entity_addr peer_addr; /* peer address */ + u64 peer_features; + u32 connect_seq; /* identify the most recent connection + attempt for this connection, client */ + u32 peer_global_seq; /* peer's global seq for this connection */ + + int auth_retry; /* true if we need a newer authorizer */ + void *auth_reply_buf; /* where to put the authorizer reply */ + int auth_reply_buf_len; struct mutex mutex; @@ -453,87 +213,50 @@ struct ceph_connection { u64 in_seq, in_seq_acked; /* last message received, acked */ - struct ceph_msg *in_msg; + /* connection negotiation temps */ + char in_banner[CEPH_BANNER_MAX_LEN]; + struct ceph_msg_connect out_connect; + struct ceph_msg_connect_reply in_reply; + struct ceph_entity_addr actual_peer_addr; + + /* message out temps */ + struct ceph_msg_header out_hdr; struct ceph_msg *out_msg; /* sending message (== tail of out_sent) */ + bool out_msg_done; + struct kvec out_kvec[8], /* sending header/footer data */ + *out_kvec_cur; + int out_kvec_left; /* kvec's left in out_kvec */ + int out_skip; /* skip this many bytes */ + int out_kvec_bytes; /* total bytes left */ + int out_more; /* there is more data after the kvecs */ + __le64 out_temp_ack; /* for writing an ack */ + struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2 + stamp */ + + /* message in temps */ + struct ceph_msg_header in_hdr; + struct ceph_msg *in_msg; u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */ - struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */ + char in_tag; /* protocol control byte */ + int in_base_pos; /* bytes read */ + __le64 in_temp_ack; /* for reading an ack */ + + struct timespec last_keepalive_ack; /* keepalive2 ack stamp */ struct delayed_work work; /* send|recv work */ unsigned long delay; /* current delay interval */ - - union { - struct ceph_connection_v1_info v1; - struct ceph_connection_v2_info v2; - }; }; -extern struct page *ceph_zero_page; - -void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag); -void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag); -bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag); -bool ceph_con_flag_test_and_clear(struct ceph_connection *con, - unsigned long con_flag); -bool ceph_con_flag_test_and_set(struct ceph_connection *con, - unsigned long con_flag); - -void ceph_encode_my_addr(struct ceph_messenger *msgr); - -int ceph_tcp_connect(struct ceph_connection *con); -int ceph_con_close_socket(struct ceph_connection *con); -void ceph_con_reset_session(struct ceph_connection *con); - -u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt); -void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq); -void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq); - -void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor, - struct ceph_msg *msg, size_t length); -struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, - size_t *page_offset, size_t *length, - bool *last_piece); -void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes); - -u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset, - unsigned int length); - -bool ceph_addr_is_blank(const struct ceph_entity_addr *addr); -int ceph_addr_port(const struct ceph_entity_addr *addr); -void ceph_addr_set_port(struct ceph_entity_addr *addr, int p); - -void ceph_con_process_message(struct ceph_connection *con); -int ceph_con_in_msg_alloc(struct ceph_connection *con, - struct ceph_msg_header *hdr, int *skip); -void ceph_con_get_out_msg(struct ceph_connection *con); - -/* messenger_v1.c */ -int ceph_con_v1_try_read(struct ceph_connection *con); -int ceph_con_v1_try_write(struct ceph_connection *con); -void ceph_con_v1_revoke(struct ceph_connection *con); -void ceph_con_v1_revoke_incoming(struct ceph_connection *con); -bool ceph_con_v1_opened(struct ceph_connection *con); -void ceph_con_v1_reset_session(struct ceph_connection *con); -void ceph_con_v1_reset_protocol(struct ceph_connection *con); - -/* messenger_v2.c */ -int ceph_con_v2_try_read(struct ceph_connection *con); -int ceph_con_v2_try_write(struct ceph_connection *con); -void ceph_con_v2_revoke(struct ceph_connection *con); -void ceph_con_v2_revoke_incoming(struct ceph_connection *con); -bool ceph_con_v2_opened(struct ceph_connection *con); -void ceph_con_v2_reset_session(struct ceph_connection *con); -void ceph_con_v2_reset_protocol(struct ceph_connection *con); - - -extern const char *ceph_pr_addr(const struct ceph_entity_addr *addr); +extern const char *ceph_pr_addr(const struct sockaddr_storage *ss); extern int ceph_parse_ips(const char *c, const char *end, struct ceph_entity_addr *addr, int max_count, int *count); + extern int ceph_msgr_init(void); extern void ceph_msgr_exit(void); extern void ceph_msgr_flush(void); @@ -541,7 +264,6 @@ extern void ceph_msgr_flush(void); extern void ceph_messenger_init(struct ceph_messenger *msgr, struct ceph_entity_addr *myaddr); extern void ceph_messenger_fini(struct ceph_messenger *msgr); -extern void ceph_messenger_reset_nonce(struct ceph_messenger *msgr); extern void ceph_con_init(struct ceph_connection *con, void *private, const struct ceph_connection_operations *ops, @@ -560,19 +282,15 @@ extern void ceph_con_keepalive(struct ceph_connection *con); extern bool ceph_con_keepalive_expired(struct ceph_connection *con, unsigned long interval); -void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, - size_t length, size_t alignment, bool own_pages); +extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, + size_t length, size_t alignment); extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist); #ifdef CONFIG_BLOCK -void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos, - u32 length); +extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, + size_t length); #endif /* CONFIG_BLOCK */ -void ceph_msg_data_add_bvecs(struct ceph_msg *msg, - struct ceph_bvec_iter *bvec_pos); -struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items, - gfp_t flags, bool can_fail); extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index b658961156..d5a3ecea57 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FS_CEPH_MON_CLIENT_H #define _FS_CEPH_MON_CLIENT_H @@ -19,7 +18,7 @@ struct ceph_monmap { struct ceph_fsid fsid; u32 epoch; u32 num_mon; - struct ceph_entity_inst mon_inst[]; + struct ceph_entity_inst mon_inst[0]; }; struct ceph_mon_client; @@ -104,12 +103,12 @@ struct ceph_mon_client { #endif }; +extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end); extern int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr); extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); extern void ceph_monc_stop(struct ceph_mon_client *monc); -extern void ceph_monc_reopen_session(struct ceph_mon_client *monc); enum { CEPH_SUB_MONMAP = 0, @@ -134,15 +133,15 @@ void ceph_monc_renew_subs(struct ceph_mon_client *monc); extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, unsigned long timeout); -int ceph_monc_do_statfs(struct ceph_mon_client *monc, u64 data_pool, - struct ceph_statfs *buf); +extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, + struct ceph_statfs *buf); int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, u64 *newest); int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what, ceph_monc_callback_t cb, u64 private_data); -int ceph_monc_blocklist_add(struct ceph_mon_client *monc, +int ceph_monc_blacklist_add(struct ceph_mon_client *monc, struct ceph_entity_addr *client_addr); extern int ceph_monc_open_session(struct ceph_mon_client *monc); diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h index 729cdf700e..ddd0d48d03 100644 --- a/include/linux/ceph/msgpool.h +++ b/include/linux/ceph/msgpool.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FS_CEPH_MSGPOOL #define _FS_CEPH_MSGPOOL @@ -13,15 +12,14 @@ struct ceph_msgpool { mempool_t *pool; int type; /* preallocated message type */ int front_len; /* preallocated payload size */ - int max_data_items; }; -int ceph_msgpool_init(struct ceph_msgpool *pool, int type, - int front_len, int max_data_items, int size, - const char *name); +extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type, + int front_len, int size, bool blocking, + const char *name); extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); -struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len, - int max_data_items); +extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *, + int front_len); extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *); #endif diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h index 3989dcb94d..0fe2656ac4 100644 --- a/include/linux/ceph/msgr.h +++ b/include/linux/ceph/msgr.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef CEPH_MSGR_H #define CEPH_MSGR_H @@ -8,45 +7,24 @@ #define CEPH_MON_PORT 6789 /* default monitor port */ +/* + * client-side processes will try to bind to ports in this + * range, simply for the benefit of tools like nmap or wireshark + * that would like to identify the protocol. + */ +#define CEPH_PORT_FIRST 6789 +#define CEPH_PORT_START 6800 /* non-monitors start here */ +#define CEPH_PORT_LAST 6900 + /* * tcp connection banner. include a protocol version. and adjust * whenever the wire protocol changes. try to keep this string length * constant. */ #define CEPH_BANNER "ceph v027" -#define CEPH_BANNER_LEN 9 #define CEPH_BANNER_MAX_LEN 30 -/* - * messenger V2 connection banner prefix. - * The full banner string should have the form: "ceph v2\n" - * the 2 bytes are the length of the remaining banner. - */ -#define CEPH_BANNER_V2 "ceph v2\n" -#define CEPH_BANNER_V2_LEN 8 -#define CEPH_BANNER_V2_PREFIX_LEN (CEPH_BANNER_V2_LEN + sizeof(__le16)) - -/* - * messenger V2 features - */ -#define CEPH_MSGR2_INCARNATION_1 (0ull) - -#define DEFINE_MSGR2_FEATURE(bit, incarnation, name) \ - static const uint64_t __maybe_unused CEPH_MSGR2_FEATURE_##name = (1ULL << bit); \ - static const uint64_t __maybe_unused CEPH_MSGR2_FEATUREMASK_##name = \ - (1ULL << bit | CEPH_MSGR2_INCARNATION_##incarnation); - -#define HAVE_MSGR2_FEATURE(x, name) \ - (((x) & (CEPH_MSGR2_FEATUREMASK_##name)) == (CEPH_MSGR2_FEATUREMASK_##name)) - -DEFINE_MSGR2_FEATURE( 0, 1, REVISION_1) // msgr2.1 - -#define CEPH_MSGR2_SUPPORTED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1) - -#define CEPH_MSGR2_REQUIRED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1) - - /* * Rollover-safe type and comparator for 32-bit sequence numbers. * Comparator returns -1, 0, or 1. @@ -82,18 +60,11 @@ extern const char *ceph_entity_type_name(int type); * entity_addr -- network address */ struct ceph_entity_addr { - __le32 type; /* CEPH_ENTITY_ADDR_TYPE_* */ + __le32 type; __le32 nonce; /* unique id for process (e.g. pid) */ struct sockaddr_storage in_addr; } __attribute__ ((packed)); -static inline bool ceph_addr_equal_no_type(const struct ceph_entity_addr *lhs, - const struct ceph_entity_addr *rhs) -{ - return !memcmp(&lhs->in_addr, &rhs->in_addr, sizeof(lhs->in_addr)) && - lhs->nonce == rhs->nonce; -} - struct ceph_entity_inst { struct ceph_entity_name name; struct ceph_entity_addr addr; @@ -119,7 +90,7 @@ struct ceph_entity_inst { #define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */ #define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */ #define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */ -#define CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER 16 /* cephx v2 doing server challenge */ + /* * connection negotiation @@ -188,24 +159,6 @@ struct ceph_msg_header { __le32 crc; /* header crc32c */ } __attribute__ ((packed)); -struct ceph_msg_header2 { - __le64 seq; /* message seq# for this session */ - __le64 tid; /* transaction id */ - __le16 type; /* message type */ - __le16 priority; /* priority. higher value == higher priority */ - __le16 version; /* version of message encoding */ - - __le32 data_pre_padding_len; - __le16 data_off; /* sender: include full offset; - receiver: mask against ~PAGE_MASK */ - - __le64 ack_seq; - __u8 flags; - /* oldest code we think can decode this. unknown if zero. */ - __le16 compat_version; - __le16 reserved; -} __attribute__ ((packed)); - #define CEPH_MSG_PRIO_LOW 64 #define CEPH_MSG_PRIO_DEFAULT 127 #define CEPH_MSG_PRIO_HIGH 196 diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 83fa08a065..a8e66344ba 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -1,14 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FS_CEPH_OSD_CLIENT_H #define _FS_CEPH_OSD_CLIENT_H -#include #include #include #include #include -#include -#include #include #include @@ -26,12 +22,13 @@ struct ceph_osd_client; * completion callback for async writepages */ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *); +typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool); #define CEPH_HOMELESS_OSD -1 /* a given osd we're communicating with */ struct ceph_osd { - refcount_t o_ref; + atomic_t o_ref; struct ceph_osd_client *o_osdc; int o_osd; int o_incarnation; @@ -39,8 +36,6 @@ struct ceph_osd { struct ceph_connection o_con; struct rb_root o_requests; struct rb_root o_linger_requests; - struct rb_root o_backoff_mappings; - struct rb_root o_backoffs_by_id; struct list_head o_osd_lru; struct ceph_auth_handshake o_auth; unsigned long lru_ttl; @@ -58,7 +53,6 @@ enum ceph_osd_data_type { #ifdef CONFIG_BLOCK CEPH_OSD_DATA_TYPE_BIO, #endif /* CONFIG_BLOCK */ - CEPH_OSD_DATA_TYPE_BVECS, }; struct ceph_osd_data { @@ -74,14 +68,10 @@ struct ceph_osd_data { struct ceph_pagelist *pagelist; #ifdef CONFIG_BLOCK struct { - struct ceph_bio_iter bio_pos; - u32 bio_length; + struct bio *bio; /* list of bios */ + size_t bio_length; /* total in list */ }; #endif /* CONFIG_BLOCK */ - struct { - struct ceph_bvec_iter bvec_pos; - u32 num_bvecs; - }; }; }; @@ -136,15 +126,7 @@ struct ceph_osd_req_op { struct { u64 expected_object_size; u64 expected_write_size; - u32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */ } alloc_hint; - struct { - u64 snapid; - u64 src_version; - u8 flags; - u32 src_fadvise_flags; - struct ceph_osd_data osd_data; - } copy_from; }; }; @@ -154,8 +136,7 @@ struct ceph_osd_request_target { struct ceph_object_id target_oid; struct ceph_object_locator target_oloc; - struct ceph_pg pgid; /* last raw pg we mapped to */ - struct ceph_spg spgid; /* last actual spg we mapped to */ + struct ceph_pg pgid; u32 pg_num; u32 pg_num_mask; struct ceph_osds acting; @@ -163,15 +144,10 @@ struct ceph_osd_request_target { int size; int min_size; bool sort_bitwise; - bool recovery_deletes; unsigned int flags; /* CEPH_OSD_FLAG_* */ - bool used_replica; bool paused; - u32 epoch; - u32 last_force_resend; - int osd; }; @@ -180,7 +156,6 @@ struct ceph_osd_request { u64 r_tid; /* unique for this client */ struct rb_node r_node; struct rb_node r_mc_node; /* map check */ - struct work_struct r_complete_work; struct ceph_osd *r_osd; struct ceph_osd_request_target r_t; @@ -195,30 +170,32 @@ struct ceph_osd_request { unsigned int r_num_ops; int r_result; + bool r_got_reply; struct ceph_osd_client *r_osdc; struct kref r_kref; bool r_mempool; - struct completion r_completion; /* private to osd_client.c */ + struct completion r_completion; + struct completion r_safe_completion; /* fsync waiter */ ceph_osdc_callback_t r_callback; + ceph_osdc_unsafe_callback_t r_unsafe_callback; + struct list_head r_unsafe_item; struct inode *r_inode; /* for use by callbacks */ - struct list_head r_private_item; /* ditto */ void *r_priv; /* ditto */ /* set by submitter */ u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */ struct ceph_snap_context *r_snapc; /* for writes */ - struct timespec64 r_mtime; /* ditto */ + struct timespec r_mtime; /* ditto */ u64 r_data_offset; /* ditto */ bool r_linger; /* don't resend on failure */ /* internal */ unsigned long r_stamp; /* jiffies, send or check time */ - unsigned long r_start_stamp; /* jiffies */ - ktime_t r_start_latency; /* ktime_t */ - ktime_t r_end_latency; /* ktime_t */ int r_attempts; + struct ceph_eversion r_replay_version; /* aka reassert_version */ + u32 r_last_force_resend; u32 r_map_dne_bound; struct ceph_osd_req_op r_ops[]; @@ -228,23 +205,6 @@ struct ceph_request_redirect { struct ceph_object_locator oloc; }; -/* - * osd request identifier - * - * caller name + incarnation# + tid to unique identify this request - */ -struct ceph_osd_reqid { - struct ceph_entity_name name; - __le64 tid; - __le32 inc; -} __packed; - -struct ceph_blkin_trace_info { - __le64 trace_id; - __le64 span_id; - __le64 parent_span_id; -} __packed; - typedef void (*rados_watchcb2_t)(void *arg, u64 notify_id, u64 cookie, u64 notifier_id, void *data, size_t data_len); typedef void (*rados_watcherrcb_t)(void *arg, u64 cookie, int err); @@ -263,9 +223,10 @@ struct ceph_osd_linger_request { struct list_head pending_lworks; struct ceph_osd_request_target t; + u32 last_force_resend; u32 map_dne_bound; - struct timespec64 mtime; + struct timespec mtime; struct kref kref; struct mutex lock; @@ -297,48 +258,6 @@ struct ceph_watch_item { struct ceph_entity_addr addr; }; -struct ceph_spg_mapping { - struct rb_node node; - struct ceph_spg spgid; - - struct rb_root backoffs; -}; - -struct ceph_hobject_id { - void *key; - size_t key_len; - void *oid; - size_t oid_len; - u64 snapid; - u32 hash; - u8 is_max; - void *nspace; - size_t nspace_len; - s64 pool; - - /* cache */ - u32 hash_reverse_bits; -}; - -static inline void ceph_hoid_build_hash_cache(struct ceph_hobject_id *hoid) -{ - hoid->hash_reverse_bits = bitrev32(hoid->hash); -} - -/* - * PG-wide backoff: [begin, end) - * per-object backoff: begin == end - */ -struct ceph_osd_backoff { - struct rb_node spg_node; - struct rb_node id_node; - - struct ceph_spg spgid; - u64 id; - struct ceph_hobject_id *begin; - struct ceph_hobject_id *end; -}; - #define CEPH_LINGER_ID_START 0xffff000000000000ULL struct ceph_osd_client { @@ -350,7 +269,6 @@ struct ceph_osd_client { struct rb_root osds; /* osds */ struct list_head osd_lru; /* idle osds */ spinlock_t osd_lru_lock; - u32 epoch_barrier; struct ceph_osd homeless_osd; atomic64_t last_tid; /* tid of last request */ u64 last_linger_id; @@ -359,7 +277,6 @@ struct ceph_osd_client { struct rb_root linger_map_checks; atomic_t num_requests; atomic_t num_homeless; - int abort_err; struct delayed_work timeout_work; struct delayed_work osds_timeout_work; #ifdef CONFIG_DEBUG_FS @@ -372,7 +289,6 @@ struct ceph_osd_client { struct ceph_msgpool msgpool_op_reply; struct workqueue_struct *notify_wq; - struct workqueue_struct *completion_wq; }; static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag) @@ -386,25 +302,13 @@ extern void ceph_osdc_cleanup(void); extern int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client); extern void ceph_osdc_stop(struct ceph_osd_client *osdc); -extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc); extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg); extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg); -void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb); -void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err); -void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc); -#define osd_req_op_data(oreq, whch, typ, fld) \ -({ \ - struct ceph_osd_request *__oreq = (oreq); \ - unsigned int __whch = (whch); \ - BUG_ON(__whch >= __oreq->r_num_ops); \ - &__oreq->r_ops[__whch].typ.fld; \ -}) - -struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req, +extern void osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags); extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, @@ -435,18 +339,10 @@ extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *, unsigned int which, struct ceph_pagelist *pagelist); #ifdef CONFIG_BLOCK -void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, - unsigned int which, - struct ceph_bio_iter *bio_pos, - u32 bio_length); +extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *, + unsigned int which, + struct bio *bio, size_t bio_length); #endif /* CONFIG_BLOCK */ -void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, - unsigned int which, - struct bio_vec *bvecs, u32 num_bvecs, - u32 bytes); -void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, - unsigned int which, - struct ceph_bvec_iter *bvec_pos); extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *, unsigned int which, @@ -456,25 +352,21 @@ extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages); -void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, - unsigned int which, - struct bio_vec *bvecs, u32 num_bvecs, - u32 bytes); extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages); -int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, - const char *class, const char *method); +extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode, + const char *class, const char *method); extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *name, const void *value, size_t size, u8 cmp_op, u8 cmp_mode); extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, unsigned int which, u64 expected_object_size, - u64 expected_write_size, - u32 flags); + u64 expected_write_size); extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, struct ceph_snap_context *snapc, @@ -513,18 +405,24 @@ int ceph_osdc_call(struct ceph_osd_client *osdc, const char *class, const char *method, unsigned int flags, struct page *req_page, size_t req_len, - struct page **resp_pages, size_t *resp_len); + struct page *resp_page, size_t *resp_len); -int ceph_osdc_copy_from(struct ceph_osd_client *osdc, - u64 src_snapid, u64 src_version, - struct ceph_object_id *src_oid, - struct ceph_object_locator *src_oloc, - u32 src_fadvise_flags, - struct ceph_object_id *dst_oid, - struct ceph_object_locator *dst_oloc, - u32 dst_fadvise_flags, - u32 truncate_seq, u64 truncate_size, - u8 copy_from_flags); +extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + u64 off, u64 *plen, + u32 truncate_seq, u64 truncate_size, + struct page **pages, int nr_pages, + int page_align); + +extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + struct ceph_snap_context *sc, + u64 off, u64 len, + u32 truncate_seq, u64 truncate_size, + struct timespec *mtime, + struct page **pages, int nr_pages); /* watch/notify */ struct ceph_osd_linger_request * @@ -543,12 +441,12 @@ int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, u64 notify_id, u64 cookie, void *payload, - u32 payload_len); + size_t payload_len); int ceph_osdc_notify(struct ceph_osd_client *osdc, struct ceph_object_id *oid, struct ceph_object_locator *oloc, void *payload, - u32 payload_len, + size_t payload_len, u32 timeout, struct page ***preply_pages, size_t *preply_len); diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index 5553019c3f..4129066099 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -1,10 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FS_CEPH_OSDMAP_H #define _FS_CEPH_OSDMAP_H #include #include #include +#include #include /* @@ -24,22 +24,11 @@ struct ceph_pg { uint32_t seed; }; -#define CEPH_SPG_NOSHARD -1 - -struct ceph_spg { - struct ceph_pg pgid; - s8 shard; -}; - int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs); -int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs); #define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id together */ #define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */ -#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota, - will set FULL too */ -#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */ struct ceph_pg_pool_info { struct rb_node node; @@ -92,6 +81,13 @@ void ceph_oloc_copy(struct ceph_object_locator *dest, const struct ceph_object_locator *src); void ceph_oloc_destroy(struct ceph_object_locator *oloc); +/* + * Maximum supported by kernel client object name length + * + * (probably outdated: must be >= RBD_MAX_MD_NAME_LEN -- currently 100) + */ +#define CEPH_MAX_OID_NAME_LEN 100 + /* * 51-char inline_name is long enough for all cephfs and all but one * rbd requests: in ".rbd"/"rbd_id." can be @@ -113,16 +109,17 @@ struct ceph_object_id { int name_len; }; -#define __CEPH_OID_INITIALIZER(oid) { .name = (oid).inline_name } - -#define CEPH_DEFINE_OID_ONSTACK(oid) \ - struct ceph_object_id oid = __CEPH_OID_INITIALIZER(oid) - static inline void ceph_oid_init(struct ceph_object_id *oid) { - *oid = (struct ceph_object_id) __CEPH_OID_INITIALIZER(*oid); + oid->name = oid->inline_name; + oid->name_len = 0; } +#define CEPH_OID_INIT_ONSTACK(oid) \ + ({ ceph_oid_init(&oid); oid; }) +#define CEPH_DEFINE_OID_ONSTACK(oid) \ + struct ceph_object_id oid = CEPH_OID_INIT_ONSTACK(oid) + static inline bool ceph_oid_empty(const struct ceph_object_id *oid) { return oid->name == oid->inline_name && !oid->name_len; @@ -137,17 +134,6 @@ int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, const char *fmt, ...); void ceph_oid_destroy(struct ceph_object_id *oid); -struct workspace_manager { - struct list_head idle_ws; - spinlock_t ws_lock; - /* Number of free workspaces */ - int free_ws; - /* Total number of allocated workspaces */ - atomic_t total_ws; - /* Waiters for a free workspace */ - wait_queue_head_t ws_wait; -}; - struct ceph_pg_mapping { struct rb_node node; struct ceph_pg pgid; @@ -156,14 +142,10 @@ struct ceph_pg_mapping { struct { int len; int osds[]; - } pg_temp, pg_upmap; + } pg_temp; struct { int osd; } primary_temp; - struct { - int len; - int from_to[][2]; - } pg_upmap_items; }; }; @@ -175,17 +157,13 @@ struct ceph_osdmap { u32 flags; /* CEPH_OSDMAP_* */ u32 max_osd; /* size of osd_state, _offload, _addr arrays */ - u32 *osd_state; /* CEPH_OSD_* */ + u8 *osd_state; /* CEPH_OSD_* */ u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */ struct ceph_entity_addr *osd_addr; struct rb_root pg_temp; struct rb_root primary_temp; - /* remap (post-CRUSH, pre-up) */ - struct rb_root pg_upmap; /* PG := raw set */ - struct rb_root pg_upmap_items; /* from -> to within raw set */ - u32 *osd_primary_affinity; struct rb_root pg_pools; @@ -195,7 +173,8 @@ struct ceph_osdmap { * the list of osds that store+replicate them. */ struct crush_map *crush; - struct workspace_manager crush_wsm; + struct mutex crush_scratch_mutex; + int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3]; }; static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd) @@ -215,7 +194,7 @@ static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd) return !ceph_osd_is_up(map, osd); } -char *ceph_osdmap_state_str(char *str, int len, u32 state); +extern char *ceph_osdmap_state_str(char *str, int len, int state); extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map, @@ -226,13 +205,11 @@ static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map, return &map->osd_addr[osd]; } -#define CEPH_PGID_ENCODING_LEN (1 + 8 + 4 + 4) - static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid) { __u8 version; - if (!ceph_has_room(p, end, CEPH_PGID_ENCODING_LEN)) { + if (!ceph_has_room(p, end, 1 + 8 + 4 + 4)) { pr_warn("incomplete pg encoding\n"); return -EINVAL; } @@ -251,8 +228,8 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid) } struct ceph_osdmap *ceph_osdmap_alloc(void); -struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end, bool msgr2); -struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2, +extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end); +struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, struct ceph_osdmap *map); extern void ceph_osdmap_destroy(struct ceph_osdmap *map); @@ -270,8 +247,6 @@ static inline void ceph_osds_init(struct ceph_osds *set) void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src); -bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num, - u32 new_pg_num); bool ceph_is_new_interval(const struct ceph_osds *old_acting, const struct ceph_osds *new_acting, const struct ceph_osds *old_up, @@ -284,56 +259,32 @@ bool ceph_is_new_interval(const struct ceph_osds *old_acting, u32 new_pg_num, bool old_sort_bitwise, bool new_sort_bitwise, - bool old_recovery_deletes, - bool new_recovery_deletes, const struct ceph_pg *pgid); bool ceph_osds_changed(const struct ceph_osds *old_acting, const struct ceph_osds *new_acting, bool any_change); -void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, - const struct ceph_object_id *oid, - const struct ceph_object_locator *oloc, - struct ceph_pg *raw_pgid); +/* calculate mapping of a file extent to an object */ +extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, + u64 off, u64 len, + u64 *bno, u64 *oxoff, u64 *oxlen); + int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, - const struct ceph_object_id *oid, - const struct ceph_object_locator *oloc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, struct ceph_pg *raw_pgid); void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, - struct ceph_pg_pool_info *pi, const struct ceph_pg *raw_pgid, struct ceph_osds *up, struct ceph_osds *acting); -bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, - struct ceph_pg_pool_info *pi, - const struct ceph_pg *raw_pgid, - struct ceph_spg *spgid); int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, const struct ceph_pg *raw_pgid); -struct crush_loc { - char *cl_type_name; - char *cl_name; -}; - -struct crush_loc_node { - struct rb_node cl_node; - struct crush_loc cl_loc; /* pointers into cl_data */ - char cl_data[]; -}; - -int ceph_parse_crush_location(char *crush_location, struct rb_root *locs); -int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2); -void ceph_clear_crush_locs(struct rb_root *locs); - -int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id, - struct rb_root *locs); - extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id); + extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id); extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); -u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id); #endif diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h index 5dead8486f..13d71fe18b 100644 --- a/include/linux/ceph/pagelist.h +++ b/include/linux/ceph/pagelist.h @@ -1,9 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __FS_CEPH_PAGELIST_H #define __FS_CEPH_PAGELIST_H #include -#include +#include #include #include @@ -14,7 +13,7 @@ struct ceph_pagelist { size_t room; struct list_head free_list; size_t num_pages_free; - refcount_t refcnt; + atomic_t refcnt; }; struct ceph_pagelist_cursor { @@ -23,7 +22,16 @@ struct ceph_pagelist_cursor { size_t room; /* room remaining to reset to */ }; -struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags); +static inline void ceph_pagelist_init(struct ceph_pagelist *pl) +{ + INIT_LIST_HEAD(&pl->head); + pl->mapped_tail = NULL; + pl->length = 0; + pl->room = 0; + INIT_LIST_HEAD(&pl->free_list); + pl->num_pages_free = 0; + atomic_set(&pl->refcnt, 1); +} extern void ceph_pagelist_release(struct ceph_pagelist *pl); @@ -59,7 +67,7 @@ static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v) return ceph_pagelist_append(pl, &v, 1); } static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl, - char *s, u32 len) + char *s, size_t len) { int ret = ceph_pagelist_encode_32(pl, len); if (ret) diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 43a7a1573b..5c0da61cb7 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef CEPH_RADOS_H #define CEPH_RADOS_H @@ -51,7 +50,7 @@ struct ceph_timespec { #define CEPH_PG_LAYOUT_LINEAR 2 #define CEPH_PG_LAYOUT_HYBRID 3 -#define CEPH_PG_MAX_SIZE 32 /* max # osds in a single pg */ +#define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */ /* * placement group. @@ -143,10 +142,8 @@ extern const char *ceph_osd_state_name(int s); /* * osd map flag bits */ -#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC), - not set since ~luminous */ -#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC), - not set since ~luminous */ +#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */ +#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */ #define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */ #define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */ #define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */ @@ -161,10 +158,6 @@ extern const char *ceph_osd_state_name(int s); #define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */ #define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */ #define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */ -#define CEPH_OSDMAP_REQUIRE_JEWEL (1<<16) /* require jewel for booting osds */ -#define CEPH_OSDMAP_REQUIRE_KRAKEN (1<<17) /* require kraken for booting osds */ -#define CEPH_OSDMAP_REQUIRE_LUMINOUS (1<<18) /* require l for booting osds */ -#define CEPH_OSDMAP_RECOVERY_DELETES (1<<19) /* deletes performed during recovery instead of peering */ /* * The error code to return when an OSD can't handle a write @@ -233,6 +226,7 @@ extern const char *ceph_osd_state_name(int s); \ /* fancy write */ \ f(APPEND, __CEPH_OSD_OP(WR, DATA, 6), "append") \ + f(STARTSYNC, __CEPH_OSD_OP(WR, DATA, 7), "startsync") \ f(SETTRUNC, __CEPH_OSD_OP(WR, DATA, 8), "settrunc") \ f(TRIMTRUNC, __CEPH_OSD_OP(WR, DATA, 9), "trimtrunc") \ \ @@ -258,7 +252,6 @@ extern const char *ceph_osd_state_name(int s); \ /* tiering */ \ f(COPY_FROM, __CEPH_OSD_OP(WR, DATA, 26), "copy-from") \ - f(COPY_FROM2, __CEPH_OSD_OP(WR, DATA, 45), "copy-from2") \ f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \ f(UNDIRTY, __CEPH_OSD_OP(WR, DATA, 28), "undirty") \ f(ISDIRTY, __CEPH_OSD_OP(RD, DATA, 29), "isdirty") \ @@ -413,18 +406,10 @@ enum { enum { CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */ CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */ - CEPH_OSD_OP_FLAG_FADVISE_RANDOM = 0x4, /* the op is random */ - CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL = 0x8, /* the op is sequential */ - CEPH_OSD_OP_FLAG_FADVISE_WILLNEED = 0x10,/* data will be accessed in - the near future */ - CEPH_OSD_OP_FLAG_FADVISE_DONTNEED = 0x20,/* data will not be accessed - in the near future */ - CEPH_OSD_OP_FLAG_FADVISE_NOCACHE = 0x40,/* data will be accessed only - once by this client */ }; #define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/ -#define EBLOCKLISTED ESHUTDOWN /* blocklisted */ +#define EBLACKLISTED ESHUTDOWN /* blacklisted */ /* xattr comparison */ enum { @@ -442,16 +427,6 @@ enum { CEPH_OSD_CMPXATTR_MODE_U64 = 2 }; -enum { - CEPH_OSD_COPY_FROM_FLAG_FLUSH = 1, /* part of a flush operation */ - CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY = 2, /* ignore pool overlay */ - CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE = 4, /* ignore osd cache logic */ - CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to - * cloneid */ - CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16, /* order with write */ - CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ = 32, /* send truncate_{seq,size} */ -}; - enum { CEPH_OSD_WATCH_OP_UNWATCH = 0, CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1, @@ -464,25 +439,6 @@ enum { const char *ceph_osd_watch_op_name(int o); -enum { - CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_WRITE = 1, - CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE = 2, - CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ = 4, - CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ = 8, - CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY = 16, - CEPH_OSD_ALLOC_HINT_FLAG_IMMUTABLE = 32, - CEPH_OSD_ALLOC_HINT_FLAG_SHORTLIVED = 64, - CEPH_OSD_ALLOC_HINT_FLAG_LONGLIVED = 128, - CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE = 256, - CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE = 512, -}; - -enum { - CEPH_OSD_BACKOFF_OP_BLOCK = 1, - CEPH_OSD_BACKOFF_OP_ACK_BLOCK = 2, - CEPH_OSD_BACKOFF_OP_UNBLOCK = 3, -}; - /* * an individual object operation. each may be accompanied by some data * payload @@ -530,19 +486,7 @@ struct ceph_osd_op { struct { __le64 expected_object_size; __le64 expected_write_size; - __le32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */ } __attribute__ ((packed)) alloc_hint; - struct { - __le64 snapid; - __le64 src_version; - __u8 flags; /* CEPH_OSD_COPY_FROM_FLAG_* */ - /* - * CEPH_OSD_OP_FLAG_FADVISE_*: fadvise flags - * for src object, flags for dest object are in - * ceph_osd_op::flags. - */ - __le32 src_fadvise_flags; - } __attribute__ ((packed)) copy_from; }; __le32 payload_len; } __attribute__ ((packed)); diff --git a/include/linux/ceph/string_table.h b/include/linux/ceph/string_table.h index a4a9962d1e..1b02c96daf 100644 --- a/include/linux/ceph/string_table.h +++ b/include/linux/ceph/string_table.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FS_CEPH_STRING_TABLE_H #define _FS_CEPH_STRING_TABLE_H diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h index bd3d532902..d3ff1cf2d2 100644 --- a/include/linux/ceph/types.h +++ b/include/linux/ceph/types.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FS_CEPH_TYPES_H #define _FS_CEPH_TYPES_H @@ -24,7 +23,6 @@ struct ceph_vino { /* context for the caps reservation mechanism */ struct ceph_cap_reservation { int count; - int used; }; diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h index 6617d9c68d..b454dfce60 100644 --- a/include/linux/cfag12864b.h +++ b/include/linux/cfag12864b.h @@ -1,11 +1,25 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Filename: cfag12864b.h * Version: 0.1.0 * Description: cfag12864b LCD driver header + * License: GPLv2 * - * Author: Copyright (C) Miguel Ojeda + * Author: Copyright (C) Miguel Ojeda Sandonis * Date: 2006-10-12 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * */ #ifndef _CFAG12864B_H_ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index db2e147e06..d75785bc61 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/cgroup-defs.h - basic definitions for cgroup * @@ -14,13 +13,9 @@ #include #include #include -#include #include #include -#include #include -#include -#include #ifdef CONFIG_CGROUPS @@ -32,7 +27,6 @@ struct kernfs_node; struct kernfs_ops; struct kernfs_open_file; struct seq_file; -struct poll_table_struct; #define MAX_CGROUP_TYPE_NAMELEN 32 #define MAX_CGROUP_ROOT_NAMELEN 64 @@ -52,7 +46,6 @@ enum { CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ CSS_VISIBLE = (1 << 3), /* css is visible to userland */ - CSS_DYING = (1 << 4), /* css is dying */ }; /* bits in struct cgroup flags field */ @@ -65,55 +58,20 @@ enum { * specified at mount time and thus is implemented here. */ CGRP_CPUSET_CLONE_CHILDREN, - - /* Control group has to be frozen. */ - CGRP_FREEZE, - - /* Cgroup is frozen. */ - CGRP_FROZEN, - - /* Control group has to be killed. */ - CGRP_KILL, }; /* cgroup_root->flags */ enum { CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ - - /* - * Consider namespaces as delegation boundaries. If this flag is - * set, controller specific interface files in a namespace root - * aren't writeable from inside the namespace. - */ - CGRP_ROOT_NS_DELEGATE = (1 << 3), - - /* - * Enable cpuset controller in v1 cgroup to use v2 behavior. - */ - CGRP_ROOT_CPUSET_V2_MODE = (1 << 4), - - /* - * Enable legacy local memory.events. - */ - CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5), - - /* - * Enable recursive subtree protection - */ - CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 6), }; /* cftype->flags */ enum { CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ - CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */ - CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ - CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ - CFTYPE_PRESSURE = (1 << 6), /* only if pressure feature is enabled */ /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ @@ -128,8 +86,6 @@ enum { struct cgroup_file { /* do not access any fields from outside cgroup core */ struct kernfs_node *kn; - unsigned long notified_at; - struct timer_list notify_timer; }; /* @@ -149,13 +105,13 @@ struct cgroup_subsys_state { /* reference count - access via css_[try]get() and css_put() */ struct percpu_ref refcnt; + /* PI: the parent css */ + struct cgroup_subsys_state *parent; + /* siblings list anchored at the parent's ->children */ struct list_head sibling; struct list_head children; - /* flush target list anchored at cgrp->rstat_css_list */ - struct list_head rstat_css_node; - /* * PI: Subsys-unique ID. 0 is unused and root is always 1. The * matching css can be looked up using css_from_id(). @@ -179,14 +135,8 @@ struct cgroup_subsys_state { atomic_t online_cnt; /* percpu_ref killing and RCU release */ + struct rcu_head rcu_head; struct work_struct destroy_work; - struct rcu_work destroy_rwork; - - /* - * PI: the parent css. Placed here for cache proximity to following - * fields of the containing structure. - */ - struct cgroup_subsys_state *parent; }; /* @@ -197,29 +147,14 @@ struct cgroup_subsys_state { * set for a task. */ struct css_set { - /* - * Set of subsystem states, one for each subsystem. This array is - * immutable after creation apart from the init_css_set during - * subsystem registration (at boot time). - */ - struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; - - /* reference count */ - refcount_t refcount; + /* Reference count */ + atomic_t refcount; /* - * For a domain cgroup, the following points to self. If threaded, - * to the matching cset of the nearest domain ancestor. The - * dom_cset provides access to the domain cgroup and its csses to - * which domain level resource consumptions should be charged. + * List running through all cgroup groups in the same hash + * slot. Protected by css_set_lock */ - struct css_set *dom_cset; - - /* the default cgroup associated with this css_set */ - struct cgroup *dfl_cgrp; - - /* internal task count, protected by css_set_lock */ - int nr_tasks; + struct hlist_node hlist; /* * Lists running through all tasks using this cgroup group. @@ -230,29 +165,6 @@ struct css_set { */ struct list_head tasks; struct list_head mg_tasks; - struct list_head dying_tasks; - - /* all css_task_iters currently walking this cset */ - struct list_head task_iters; - - /* - * On the default hierarchy, ->subsys[ssid] may point to a css - * attached to an ancestor instead of the cgroup this css_set is - * associated with. The following node is anchored at - * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to - * iterate through all css's attached to a given cgroup. - */ - struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; - - /* all threaded csets whose ->dom_cset points to this cset */ - struct list_head threaded_csets; - struct list_head threaded_csets_node; - - /* - * List running through all cgroup groups in the same hash - * slot. Protected by css_set_lock - */ - struct hlist_node hlist; /* * List of cgrp_cset_links pointing at cgroups referenced from this @@ -260,6 +172,16 @@ struct css_set { */ struct list_head cgrp_links; + /* the default cgroup associated with this css_set */ + struct cgroup *dfl_cgrp; + + /* + * Set of subsystem states, one for each subsystem. This array is + * immutable after creation apart from the init_css_set during + * subsystem registration (at boot time). + */ + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + /* * List of csets participating in the on-going migration either as * source or destination. Protected by cgroup_mutex. @@ -278,6 +200,18 @@ struct css_set { struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; + /* + * On the default hierarhcy, ->subsys[ssid] may point to a css + * attached to an ancestor instead of the cgroup this css_set is + * associated with. The following node is anchored at + * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to + * iterate through all css's attached to a given cgroup. + */ + struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; + + /* all css_task_iters currently walking this cset */ + struct list_head task_iters; + /* dead and being drained, ignore for migration */ bool dead; @@ -285,84 +219,22 @@ struct css_set { struct rcu_head rcu_head; }; -struct cgroup_base_stat { - struct task_cputime cputime; -}; - -/* - * rstat - cgroup scalable recursive statistics. Accounting is done - * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the - * hierarchy on reads. - * - * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are - * linked into the updated tree. On the following read, propagation only - * considers and consumes the updated tree. This makes reading O(the - * number of descendants which have been active since last read) instead of - * O(the total number of descendants). - * - * This is important because there can be a lot of (draining) cgroups which - * aren't active and stat may be read frequently. The combination can - * become very expensive. By propagating selectively, increasing reading - * frequency decreases the cost of each read. - * - * This struct hosts both the fields which implement the above - - * updated_children and updated_next - and the fields which track basic - * resource statistics on top of it - bsync, bstat and last_bstat. - */ -struct cgroup_rstat_cpu { - /* - * ->bsync protects ->bstat. These are the only fields which get - * updated in the hot path. - */ - struct u64_stats_sync bsync; - struct cgroup_base_stat bstat; - - /* - * Snapshots at the last reading. These are used to calculate the - * deltas to propagate to the global counters. - */ - struct cgroup_base_stat last_bstat; - - /* - * Child cgroups with stat updates on this cpu since the last read - * are linked on the parent's ->updated_children through - * ->updated_next. - * - * In addition to being more compact, singly-linked list pointing - * to the cgroup makes it unnecessary for each per-cpu struct to - * point back to the associated cgroup. - * - * Protected by per-cpu cgroup_rstat_cpu_lock. - */ - struct cgroup *updated_children; /* terminated by self cgroup */ - struct cgroup *updated_next; /* NULL iff not on the list */ -}; - -struct cgroup_freezer_state { - /* Should the cgroup and its descendants be frozen. */ - bool freeze; - - /* Should the cgroup actually be frozen? */ - int e_freeze; - - /* Fields below are protected by css_set_lock */ - - /* Number of frozen descendant cgroups */ - int nr_frozen_descendants; - - /* - * Number of tasks, which are counted as frozen: - * frozen, SIGSTOPped, and PTRACEd. - */ - int nr_frozen_tasks; -}; - struct cgroup { /* self css with NULL ->ss, points back to this cgroup */ struct cgroup_subsys_state self; unsigned long flags; /* "unsigned long" so bitops work */ + /* + * idr allocated in-hierarchy ID. + * + * ID 0 is not used, the ID of the root cgroup is always 1, and a + * new cgroup will be assigned with a smallest available ID. + * + * Allocating/Removing ID must be protected by cgroup_mutex. + */ + int id; + /* * The depth this cgroup is at. The root is at depth zero and each * step down the hierarchy increments the level. This along with @@ -371,40 +243,13 @@ struct cgroup { */ int level; - /* Maximum allowed descent tree depth */ - int max_depth; - - /* - * Keep track of total numbers of visible and dying descent cgroups. - * Dying cgroups are cgroups which were deleted by a user, - * but are still existing because someone else is holding a reference. - * max_descendants is a maximum allowed number of descent cgroups. - * - * nr_descendants and nr_dying_descendants are protected - * by cgroup_mutex and css_set_lock. It's fine to read them holding - * any of cgroup_mutex and css_set_lock; for writing both locks - * should be held. - */ - int nr_descendants; - int nr_dying_descendants; - int max_descendants; - /* * Each non-empty css_set associated with this cgroup contributes - * one to nr_populated_csets. The counter is zero iff this cgroup - * doesn't have any tasks. - * - * All children which have non-zero nr_populated_csets and/or - * nr_populated_children of their own contribute one to either - * nr_populated_domain_children or nr_populated_threaded_children - * depending on their type. Each counter is zero iff all cgroups - * of the type in the subtree proper don't have any tasks. + * one to populated_cnt. All children with non-zero popuplated_cnt + * of their own contribute one. The count is zero iff there's no + * task in this cgroup or its subtree. */ - int nr_populated_csets; - int nr_populated_domain_children; - int nr_populated_threaded_children; - - int nr_threaded_children; /* # of live threaded child cgroups */ + int populated_cnt; struct kernfs_node *kn; /* cgroup kernfs entry */ struct cgroup_file procs_file; /* handle for "cgroup.procs" */ @@ -442,25 +287,6 @@ struct cgroup { */ struct list_head e_csets[CGROUP_SUBSYS_COUNT]; - /* - * If !threaded, self. If threaded, it points to the nearest - * domain ancestor. Inside a threaded subtree, cgroups are exempt - * from process granularity and no-internal-task constraint. - * Domain level resource consumptions which aren't tied to a - * specific task are charged to the dom_cgrp. - */ - struct cgroup *dom_cgrp; - struct cgroup *old_dom_cgrp; /* used while enabling threaded */ - - /* per-cpu recursive resource statistics */ - struct cgroup_rstat_cpu __percpu *rstat_cpu; - struct list_head rstat_css_list; - - /* cgroup basic resource statistics */ - struct cgroup_base_stat last_bstat; - struct cgroup_base_stat bstat; - struct prev_cputime prev_cputime; /* for printing out cputime */ - /* * list of pidlists, up to two for each namespace (one for procs, one * for tasks); created on demand. @@ -474,20 +300,8 @@ struct cgroup { /* used to schedule release agent */ struct work_struct release_agent_work; - /* used to track pressure stalls */ - struct psi_group psi; - - /* used to store eBPF programs */ - struct cgroup_bpf bpf; - - /* If there is block congestion on this cgroup. */ - atomic_t congestion_count; - - /* Used to store internal freezer state */ - struct cgroup_freezer_state freezer; - /* ids of the ancestors at each level including self */ - u64 ancestor_ids[]; + int ancestor_ids[]; }; /* @@ -508,7 +322,7 @@ struct cgroup_root { struct cgroup cgrp; /* for cgrp->ancestor_ids[0] */ - u64 cgrp_ancestor_id_storage; + int cgrp_ancestor_id_storage; /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ atomic_t nr_cgrps; @@ -519,6 +333,9 @@ struct cgroup_root { /* Hierarchy-specific flags */ unsigned int flags; + /* IDs for cgroups in this hierarchy */ + struct idr cgroup_idr; + /* The path to use for release notifications. */ char release_agent_path[PATH_MAX]; @@ -567,9 +384,6 @@ struct cftype { struct list_head node; /* anchored at ss->cfts */ struct kernfs_ops *kf_ops; - int (*open)(struct kernfs_open_file *of); - void (*release)(struct kernfs_open_file *of); - /* * read_u64() is a shortcut for the common case of returning a * single integer. Use it in place of read() @@ -610,17 +424,14 @@ struct cftype { ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off); - __poll_t (*poll)(struct kernfs_open_file *of, - struct poll_table_struct *pt); - #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key lockdep_key; #endif -}; +} __do_const; /* * Control Group subsystem type. - * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details + * See Documentation/cgroups/cgroups.txt for details */ struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); @@ -629,20 +440,16 @@ struct cgroup_subsys { void (*css_released)(struct cgroup_subsys_state *css); void (*css_free)(struct cgroup_subsys_state *css); void (*css_reset)(struct cgroup_subsys_state *css); - void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); - int (*css_extra_stat_show)(struct seq_file *seq, - struct cgroup_subsys_state *css); int (*can_attach)(struct cgroup_taskset *tset); void (*cancel_attach)(struct cgroup_taskset *tset); void (*attach)(struct cgroup_taskset *tset); void (*post_attach)(void); - int (*can_fork)(struct task_struct *task, - struct css_set *cset); - void (*cancel_fork)(struct task_struct *task, struct css_set *cset); + int (*can_fork)(struct task_struct *task); + void (*cancel_fork)(struct task_struct *task); void (*fork)(struct task_struct *task); void (*exit)(struct task_struct *task); - void (*release)(struct task_struct *task); + void (*free)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); bool early_init:1; @@ -661,18 +468,21 @@ struct cgroup_subsys { bool implicit_on_dfl:1; /* - * If %true, the controller, supports threaded mode on the default - * hierarchy. In a threaded subtree, both process granularity and - * no-internal-process constraint are ignored and a threaded - * controllers should be able to handle that. + * If %false, this subsystem is properly hierarchical - + * configuration, resource accounting and restriction on a parent + * cgroup cover those of its children. If %true, hierarchy support + * is broken in some ways - some subsystems ignore hierarchy + * completely while others are only implemented half-way. * - * Note that as an implicit controller is automatically enabled on - * all cgroups on the default hierarchy, it should also be - * threaded. implicit && !threaded is not supported. + * It's now disallowed to create nested cgroups if the subsystem is + * broken and cgroup core will emit a warning message on such + * cases. Eventually, all subsystems will be made properly + * hierarchical and this will go away. */ - bool threaded:1; + bool broken_hierarchy:1; + bool warned_broken_hierarchy:1; - /* the following two fields are initialized automatically during boot */ + /* the following two fields are initialized automtically during boot */ int id; const char *name; @@ -714,8 +524,8 @@ extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups * @tsk: target task * - * Allows cgroup operations to synchronize against threadgroup changes - * using a percpu_rw_semaphore. + * Called from threadgroup_change_begin() and allows cgroup operations to + * synchronize against threadgroup changes using a percpu_rw_semaphore. */ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) { @@ -726,7 +536,8 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups * @tsk: target task * - * Counterpart of cgroup_threadcgroup_change_begin(). + * Called from threadgroup_change_end(). Counterpart of + * cgroup_threadcgroup_change_begin(). */ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) { @@ -737,11 +548,7 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) #define CGROUP_SUBSYS_COUNT 0 -static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) -{ - might_sleep(); -} - +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} #endif /* CONFIG_CGROUPS */ @@ -752,54 +559,103 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains * per-socket cgroup information except for memcg association. * - * On legacy hierarchies, net_prio and net_cls controllers directly - * set attributes on each sock which can then be tested by the network - * layer. On the default hierarchy, each sock is associated with the - * cgroup it was created in and the networking layer can match the - * cgroup directly. + * On legacy hierarchies, net_prio and net_cls controllers directly set + * attributes on each sock which can then be tested by the network layer. + * On the default hierarchy, each sock is associated with the cgroup it was + * created in and the networking layer can match the cgroup directly. + * + * To avoid carrying all three cgroup related fields separately in sock, + * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer. + * On boot, sock_cgroup_data records the cgroup that the sock was created + * in so that cgroup2 matches can be made; however, once either net_prio or + * net_cls starts being used, the area is overriden to carry prioidx and/or + * classid. The two modes are distinguished by whether the lowest bit is + * set. Clear bit indicates cgroup pointer while set bit prioidx and + * classid. + * + * While userland may start using net_prio or net_cls at any time, once + * either is used, cgroup2 matching no longer works. There is no reason to + * mix the two and this is in line with how legacy and v2 compatibility is + * handled. On mode switch, cgroup references which are already being + * pointed to by socks may be leaked. While this can be remedied by adding + * synchronization around sock_cgroup_data, given that the number of leaked + * cgroups is bound and highly unlikely to be high, this seems to be the + * better trade-off. */ struct sock_cgroup_data { - struct cgroup *cgroup; /* v2 */ -#ifdef CONFIG_CGROUP_NET_CLASSID - u32 classid; /* v1 */ -#endif -#ifdef CONFIG_CGROUP_NET_PRIO - u16 prioidx; /* v1 */ + union { +#ifdef __LITTLE_ENDIAN + struct { + u8 is_data; + u8 padding; + u16 prioidx; + u32 classid; + } __packed; +#else + struct { + u32 classid; + u16 prioidx; + u8 padding; + u8 is_data; + } __packed; #endif + u64 val; + }; }; -static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) +/* + * There's a theoretical window where the following accessors race with + * updaters and return part of the previous pointer as the prioidx or + * classid. Such races are short-lived and the result isn't critical. + */ +static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd) { -#ifdef CONFIG_CGROUP_NET_PRIO - return READ_ONCE(skcd->prioidx); -#else - return 1; -#endif + /* fallback to 1 which is always the ID of the root cgroup */ + return (skcd->is_data & 1) ? skcd->prioidx : 1; } -static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) +static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd) { -#ifdef CONFIG_CGROUP_NET_CLASSID - return READ_ONCE(skcd->classid); -#else - return 0; -#endif + /* fallback to 0 which is the unconfigured default classid */ + return (skcd->is_data & 1) ? skcd->classid : 0; } +/* + * If invoked concurrently, the updaters may clobber each other. The + * caller is responsible for synchronization. + */ static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, u16 prioidx) { -#ifdef CONFIG_CGROUP_NET_PRIO - WRITE_ONCE(skcd->prioidx, prioidx); -#endif + struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; + + if (sock_cgroup_prioidx(&skcd_buf) == prioidx) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.prioidx = prioidx; + WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ } static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, u32 classid) { -#ifdef CONFIG_CGROUP_NET_CLASSID - WRITE_ONCE(skcd->classid, classid); -#endif + struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; + + if (sock_cgroup_classid(&skcd_buf) == classid) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.classid = classid; + WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ } #else /* CONFIG_SOCK_CGROUP_DATA */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 75c151413f..307ae63ef2 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CGROUP_H #define _LINUX_CGROUP_H /* @@ -18,21 +17,18 @@ #include #include #include +#include #include #include #include #include -#include -#include #include -struct kernel_clone_args; - #ifdef CONFIG_CGROUPS /* - * All weight knobs on the default hierarchy should use the following min, + * All weight knobs on the default hierarhcy should use the following min, * default and max values. The default value is the logarithmic center of * MIN and MAX and allows 100x to be expressed in both directions. */ @@ -40,30 +36,18 @@ struct kernel_clone_args; #define CGROUP_WEIGHT_DFL 100 #define CGROUP_WEIGHT_MAX 10000 -/* walk only threadgroup leaders */ -#define CSS_TASK_ITER_PROCS (1U << 0) -/* walk all threaded css_sets in the domain */ -#define CSS_TASK_ITER_THREADED (1U << 1) - -/* internal flags */ -#define CSS_TASK_ITER_SKIPPED (1U << 16) - /* a css_task_iter should be treated as an opaque object */ struct css_task_iter { struct cgroup_subsys *ss; - unsigned int flags; struct list_head *cset_pos; struct list_head *cset_head; - struct list_head *tcset_pos; - struct list_head *tcset_head; - struct list_head *task_pos; + struct list_head *tasks_head; + struct list_head *mg_tasks_head; - struct list_head *cur_tasks_head; struct css_set *cur_cset; - struct css_set *cur_dcset; struct task_struct *cur_task; struct list_head iters_node; /* css_set->task_iters */ }; @@ -97,8 +81,6 @@ extern struct css_set init_css_set; bool css_has_online_children(struct cgroup_subsys_state *css); struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); -struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, - struct cgroup_subsys *ss); struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, struct cgroup_subsys *ss); struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, @@ -121,21 +103,15 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); void cgroup_fork(struct task_struct *p); -extern int cgroup_can_fork(struct task_struct *p, - struct kernel_clone_args *kargs); -extern void cgroup_cancel_fork(struct task_struct *p, - struct kernel_clone_args *kargs); -extern void cgroup_post_fork(struct task_struct *p, - struct kernel_clone_args *kargs); +extern int cgroup_can_fork(struct task_struct *p); +extern void cgroup_cancel_fork(struct task_struct *p); +extern void cgroup_post_fork(struct task_struct *p); void cgroup_exit(struct task_struct *p); -void cgroup_release(struct task_struct *p); void cgroup_free(struct task_struct *p); int cgroup_init_early(void); int cgroup_init(void); -int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v); - /* * Iteration helpers and macros. */ @@ -153,7 +129,7 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, struct cgroup_subsys_state **dst_cssp); -void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, +void css_task_iter_start(struct cgroup_subsys_state *css, struct css_task_iter *it); struct task_struct *css_task_iter_next(struct css_task_iter *it); void css_task_iter_end(struct css_task_iter *it); @@ -290,7 +266,7 @@ void css_task_iter_end(struct css_task_iter *it); * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset * @leader: the loop cursor * @dst_css: the destination css - * @tset: taskset to iterate + * @tset: takset to iterate * * Iterate threadgroup leaders of @tset. For single-task migrations, @tset * may not contain any. @@ -307,11 +283,6 @@ void css_task_iter_end(struct css_task_iter *it); * Inline functions. */ -static inline u64 cgroup_id(const struct cgroup *cgrp) -{ - return cgrp->kn->id; -} - /** * css_get - obtain a reference on the specified css * @css: target css @@ -372,26 +343,6 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css) return true; } -/** - * css_is_dying - test whether the specified css is dying - * @css: target css - * - * Test whether @css is in the process of offlining or already offline. In - * most cases, ->css_online() and ->css_offline() callbacks should be - * enough; however, the actual offline operations are RCU delayed and this - * test returns %true also when @css is scheduled to be offlined. - * - * This is useful, for example, when the use case requires synchronous - * behavior with respect to cgroup removal. cgroup removal schedules css - * offlining but the css can seem alive while the operation is being - * delayed. If the delay affects user visible semantics, this test can be - * used to resolve the situation. - */ -static inline bool css_is_dying(struct cgroup_subsys_state *css) -{ - return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); -} - /** * css_put - put a css reference * @css: target css @@ -417,16 +368,6 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) percpu_ref_put_many(&css->refcnt, n); } -static inline void cgroup_get(struct cgroup *cgrp) -{ - css_get(&cgrp->self); -} - -static inline bool cgroup_tryget(struct cgroup *cgrp) -{ - return css_tryget(&cgrp->self); -} - static inline void cgroup_put(struct cgroup *cgrp) { css_put(&cgrp->self); @@ -501,7 +442,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task, * * Find the css for the (@task, @subsys_id) combination, increment a * reference on and return it. This function is guaranteed to return a - * valid css. The returned css may already have been offlined. + * valid css. */ static inline struct cgroup_subsys_state * task_get_css(struct task_struct *task, int subsys_id) @@ -511,13 +452,7 @@ task_get_css(struct task_struct *task, int subsys_id) rcu_read_lock(); while (true) { css = task_css(task, subsys_id); - /* - * Can't use css_tryget_online() here. A task which has - * PF_EXITING set may stay associated with an offline css. - * If such task calls this function, css_tryget_online() - * will keep failing. - */ - if (likely(css_tryget(css))) + if (likely(css_tryget_online(css))) break; cpu_relax(); } @@ -545,20 +480,6 @@ static inline struct cgroup *task_cgroup(struct task_struct *task, return task_css(task, subsys_id)->cgroup; } -static inline struct cgroup *task_dfl_cgroup(struct task_struct *task) -{ - return task_css_set(task)->dfl_cgrp; -} - -static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) -{ - struct cgroup_subsys_state *parent_css = cgrp->self.parent; - - if (parent_css) - return container_of(parent_css, struct cgroup, self); - return NULL; -} - /** * cgroup_is_descendant - test ancestry * @cgrp: the cgroup to be tested @@ -573,28 +494,7 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp, { if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) return false; - return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor); -} - -/** - * cgroup_ancestor - find ancestor of cgroup - * @cgrp: cgroup to find ancestor of - * @ancestor_level: level of ancestor to find starting from root - * - * Find ancestor of cgroup at specified level starting from root if it exists - * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at - * @ancestor_level. - * - * This function is safe to call as long as @cgrp is accessible. - */ -static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, - int ancestor_level) -{ - if (cgrp->level < ancestor_level) - return NULL; - while (cgrp && cgrp->level > ancestor_level) - cgrp = cgroup_parent(cgrp); - return cgrp; + return cgrp->ancestor_ids[ancestor->level] == ancestor->id; } /** @@ -617,14 +517,13 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task, /* no synchronization, the result can only be used as a hint */ static inline bool cgroup_is_populated(struct cgroup *cgrp) { - return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children + - cgrp->nr_populated_threaded_children; + return cgrp->populated_cnt; } /* returns ino associated with a cgroup */ static inline ino_t cgroup_ino(struct cgroup *cgrp) { - return kernfs_ino(cgrp->kn); + return cgrp->kn->ino; } /* cft/css accessors for cftype->write() operation */ @@ -671,13 +570,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } -static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) -{ - return &cgrp->psi; -} - -bool cgroup_psi_enabled(void); - static inline void cgroup_init_kthreadd(void) { /* @@ -697,15 +589,11 @@ static inline void cgroup_kthread_ready(void) current->no_cgroup_migration = 0; } -void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen); -struct cgroup *cgroup_get_from_id(u64 id); #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; struct cgroup; -static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; } -static inline void css_get(struct cgroup_subsys_state *css) {} static inline void css_put(struct cgroup_subsys_state *css) {} static inline int cgroup_attach_task_all(struct task_struct *from, struct task_struct *t) { return 0; } @@ -713,14 +601,10 @@ static inline int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) { return -EINVAL; } static inline void cgroup_fork(struct task_struct *p) {} -static inline int cgroup_can_fork(struct task_struct *p, - struct kernel_clone_args *kargs) { return 0; } -static inline void cgroup_cancel_fork(struct task_struct *p, - struct kernel_clone_args *kargs) {} -static inline void cgroup_post_fork(struct task_struct *p, - struct kernel_clone_args *kargs) {} +static inline int cgroup_can_fork(struct task_struct *p) { return 0; } +static inline void cgroup_cancel_fork(struct task_struct *p) {} +static inline void cgroup_post_fork(struct task_struct *p) {} static inline void cgroup_exit(struct task_struct *p) {} -static inline void cgroup_release(struct task_struct *p) {} static inline void cgroup_free(struct task_struct *p) {} static inline int cgroup_init_early(void) { return 0; } @@ -728,125 +612,56 @@ static inline int cgroup_init(void) { return 0; } static inline void cgroup_init_kthreadd(void) {} static inline void cgroup_kthread_ready(void) {} -static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) -{ - return NULL; -} - -static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) -{ - return NULL; -} - -static inline bool cgroup_psi_enabled(void) -{ - return false; -} - static inline bool task_under_cgroup_hierarchy(struct task_struct *task, struct cgroup *ancestor) { return true; } - -static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) -{} - -static inline struct cgroup *cgroup_get_from_id(u64 id) -{ - return NULL; -} #endif /* !CONFIG_CGROUPS */ -#ifdef CONFIG_CGROUPS -/* - * cgroup scalable recursive statistics. - */ -void cgroup_rstat_updated(struct cgroup *cgrp, int cpu); -void cgroup_rstat_flush(struct cgroup *cgrp); -void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp); -void cgroup_rstat_flush_hold(struct cgroup *cgrp); -void cgroup_rstat_flush_release(void); - -/* - * Basic resource stats. - */ -#ifdef CONFIG_CGROUP_CPUACCT -void cpuacct_charge(struct task_struct *tsk, u64 cputime); -void cpuacct_account_field(struct task_struct *tsk, int index, u64 val); -#else -static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} -static inline void cpuacct_account_field(struct task_struct *tsk, int index, - u64 val) {} -#endif - -void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec); -void __cgroup_account_cputime_field(struct cgroup *cgrp, - enum cpu_usage_stat index, u64 delta_exec); - -static inline void cgroup_account_cputime(struct task_struct *task, - u64 delta_exec) -{ - struct cgroup *cgrp; - - cpuacct_charge(task, delta_exec); - - rcu_read_lock(); - cgrp = task_dfl_cgroup(task); - if (cgroup_parent(cgrp)) - __cgroup_account_cputime(cgrp, delta_exec); - rcu_read_unlock(); -} - -static inline void cgroup_account_cputime_field(struct task_struct *task, - enum cpu_usage_stat index, - u64 delta_exec) -{ - struct cgroup *cgrp; - - cpuacct_account_field(task, index, delta_exec); - - rcu_read_lock(); - cgrp = task_dfl_cgroup(task); - if (cgroup_parent(cgrp)) - __cgroup_account_cputime_field(cgrp, index, delta_exec); - rcu_read_unlock(); -} - -#else /* CONFIG_CGROUPS */ - -static inline void cgroup_account_cputime(struct task_struct *task, - u64 delta_exec) {} -static inline void cgroup_account_cputime_field(struct task_struct *task, - enum cpu_usage_stat index, - u64 delta_exec) {} - -#endif /* CONFIG_CGROUPS */ - /* * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data * definition in cgroup-defs.h. */ #ifdef CONFIG_SOCK_CGROUP_DATA +#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) +extern spinlock_t cgroup_sk_update_lock; +#endif + +void cgroup_sk_alloc_disable(void); void cgroup_sk_alloc(struct sock_cgroup_data *skcd); -void cgroup_sk_clone(struct sock_cgroup_data *skcd); void cgroup_sk_free(struct sock_cgroup_data *skcd); static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) { - return skcd->cgroup; +#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) + unsigned long v; + + /* + * @skcd->val is 64bit but the following is safe on 32bit too as we + * just need the lower ulong to be written and read atomically. + */ + v = READ_ONCE(skcd->val); + + if (v & 1) + return &cgrp_dfl_root.cgrp; + + return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; +#else + return (struct cgroup *)(unsigned long)skcd->val; +#endif } #else /* CONFIG_CGROUP_DATA */ static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} -static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} #endif /* CONFIG_CGROUP_DATA */ struct cgroup_namespace { + atomic_t count; struct ns_common ns; struct user_namespace *user_ns; struct ucounts *ucounts; @@ -881,56 +696,13 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, static inline void get_cgroup_ns(struct cgroup_namespace *ns) { if (ns) - refcount_inc(&ns->ns.count); + atomic_inc(&ns->count); } static inline void put_cgroup_ns(struct cgroup_namespace *ns) { - if (ns && refcount_dec_and_test(&ns->ns.count)) + if (ns && atomic_dec_and_test(&ns->count)) free_cgroup_ns(ns); } -#ifdef CONFIG_CGROUPS - -void cgroup_enter_frozen(void); -void cgroup_leave_frozen(bool always_leave); -void cgroup_update_frozen(struct cgroup *cgrp); -void cgroup_freeze(struct cgroup *cgrp, bool freeze); -void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, - struct cgroup *dst); - -static inline bool cgroup_task_frozen(struct task_struct *task) -{ - return task->frozen; -} - -#else /* !CONFIG_CGROUPS */ - -static inline void cgroup_enter_frozen(void) { } -static inline void cgroup_leave_frozen(bool always_leave) { } -static inline bool cgroup_task_frozen(struct task_struct *task) -{ - return false; -} - -#endif /* !CONFIG_CGROUPS */ - -#ifdef CONFIG_CGROUP_BPF -static inline void cgroup_bpf_get(struct cgroup *cgrp) -{ - percpu_ref_get(&cgrp->bpf.refcnt); -} - -static inline void cgroup_bpf_put(struct cgroup *cgrp) -{ - percpu_ref_put(&cgrp->bpf.refcnt); -} - -#else /* CONFIG_CGROUP_BPF */ - -static inline void cgroup_bpf_get(struct cgroup *cgrp) {} -static inline void cgroup_bpf_put(struct cgroup *cgrp) {} - -#endif /* CONFIG_CGROUP_BPF */ - #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 4452354872..0df0336ace 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * List of cgroup subsystems. * @@ -57,14 +56,6 @@ SUBSYS(hugetlb) SUBSYS(pids) #endif -#if IS_ENABLED(CONFIG_CGROUP_RDMA) -SUBSYS(rdma) -#endif - -#if IS_ENABLED(CONFIG_CGROUP_MISC) -SUBSYS(misc) -#endif - /* * The following subsystems are not supported on the default hierarchy. */ diff --git a/include/linux/circ_buf.h b/include/linux/circ_buf.h index b3233e8202..90f2471dc6 100644 --- a/include/linux/circ_buf.h +++ b/include/linux/circ_buf.h @@ -1,6 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * See Documentation/core-api/circular-buffers.rst for more information. + * See Documentation/circular-buffers.txt for more information. */ #ifndef _LINUX_CIRC_BUF_H diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h index 5f5730c1d3..1d5925e990 100644 --- a/include/linux/cleancache.h +++ b/include/linux/cleancache.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CLEANCACHE_H #define _LINUX_CLEANCACHE_H @@ -28,7 +27,7 @@ struct cleancache_filekey { struct cleancache_ops { int (*init_fs)(size_t); - int (*init_shared_fs)(uuid_t *uuid, size_t); + int (*init_shared_fs)(char *uuid, size_t); int (*get_page)(int, struct cleancache_filekey, pgoff_t, struct page *); void (*put_page)(int, struct cleancache_filekey, @@ -36,7 +35,7 @@ struct cleancache_ops { void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t); void (*invalidate_inode)(int, struct cleancache_filekey); void (*invalidate_fs)(int); -}; +} __no_const; extern int cleancache_register_ops(const struct cleancache_ops *ops); extern void __cleancache_init_fs(struct super_block *); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index f59c875271..24e7490e28 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -1,27 +1,32 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* + * linux/include/linux/clk-provider.h + * * Copyright (c) 2010-2011 Jeremy Kerr * Copyright (C) 2011-2012 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_CLK_PROVIDER_H #define __LINUX_CLK_PROVIDER_H +#include #include -#include + +#ifdef CONFIG_COMMON_CLK /* * flags used across common struct clk. these flags should only affect the * top-level framework. custom flags for dealing with hardware specifics * belong in struct clk_foo - * - * Please update clk_flags[] in drivers/clk/clk.c when making changes here! */ #define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */ #define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ #define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ #define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ /* unused */ - /* unused */ +#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */ #define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ @@ -30,8 +35,6 @@ #define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */ /* parents need enable during gate/ungate, set rate and re-parent */ #define CLK_OPS_PARENT_ENABLE BIT(12) -/* duty cycle call may be forwarded to the parent clock */ -#define CLK_DUTY_CYCLE_PARENT BIT(13) struct clk; struct clk_hw; @@ -60,17 +63,6 @@ struct clk_rate_request { struct clk_hw *best_parent_hw; }; -/** - * struct clk_duty - Struture encoding the duty cycle ratio of a clock - * - * @num: Numerator of the duty cycle ratio - * @den: Denominator of the duty cycle ratio - */ -struct clk_duty { - unsigned int num; - unsigned int den; -}; - /** * struct clk_ops - Callback operations for hardware clocks; these are to * be provided by the clock implementation, and will be called by drivers @@ -111,11 +103,6 @@ struct clk_duty { * Called with enable_lock held. This function must not * sleep. * - * @save_context: Save the context of the clock in prepration for poweroff. - * - * @restore_context: Restore the context of the clock after a restoration - * of power. - * * @recalc_rate Recalculate the rate of this clock, by querying hardware. The * parent rate is an input parameter. It is up to the caller to * ensure that the prepare_mutex is held across this call. @@ -179,25 +166,10 @@ struct clk_duty { * by the second argument. Valid values for degrees are * 0-359. Return 0 on success, otherwise -EERROR. * - * @get_duty_cycle: Queries the hardware to get the current duty cycle ratio - * of a clock. Returned values denominator cannot be 0 and must be - * superior or equal to the numerator. - * - * @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by - * the numerator (2nd argurment) and denominator (3rd argument). - * Argument must be a valid ratio (denominator > 0 - * and >= numerator) Return 0 on success, otherwise -EERROR. - * * @init: Perform platform-specific initialization magic. - * This is not used by any of the basic clock types. - * This callback exist for HW which needs to perform some - * initialisation magic for CCF to get an accurate view of the - * clock. It may also be used dynamic resource allocation is - * required. It shall not used to deal with clock parameters, - * such as rate or parents. - * Returns 0 on success, -EERROR otherwise. - * - * @terminate: Free any resource allocated by init. + * This is not not used by any of the basic clock types. + * Please consider other ways of solving initialization problems + * before using this callback, as its use is discouraged. * * @debug_init: Set up type-specific debugfs entries for this clock. This * is called once, after the debugfs directory entry for this @@ -226,8 +198,6 @@ struct clk_ops { void (*disable)(struct clk_hw *hw); int (*is_enabled)(struct clk_hw *hw); void (*disable_unused)(struct clk_hw *hw); - int (*save_context)(struct clk_hw *hw); - void (*restore_context)(struct clk_hw *hw); unsigned long (*recalc_rate)(struct clk_hw *hw, unsigned long parent_rate); long (*round_rate)(struct clk_hw *hw, unsigned long rate, @@ -245,28 +215,10 @@ struct clk_ops { unsigned long parent_accuracy); int (*get_phase)(struct clk_hw *hw); int (*set_phase)(struct clk_hw *hw, int degrees); - int (*get_duty_cycle)(struct clk_hw *hw, - struct clk_duty *duty); - int (*set_duty_cycle)(struct clk_hw *hw, - struct clk_duty *duty); - int (*init)(struct clk_hw *hw); - void (*terminate)(struct clk_hw *hw); - void (*debug_init)(struct clk_hw *hw, struct dentry *dentry); -}; - -/** - * struct clk_parent_data - clk parent information - * @hw: parent clk_hw pointer (used for clk providers with internal clks) - * @fw_name: parent name local to provider registering clk - * @name: globally unique parent name (used as a fallback) - * @index: parent index local to provider registering clk (if @fw_name absent) - */ -struct clk_parent_data { - const struct clk_hw *hw; - const char *fw_name; - const char *name; - int index; + void (*init)(struct clk_hw *hw); + int (*debug_init)(struct clk_hw *hw, struct dentry *dentry); }; +typedef struct clk_ops __no_const clk_ops_no_const; /** * struct clk_init_data - holds init data that's common to all clocks and is @@ -275,20 +227,13 @@ struct clk_parent_data { * @name: clock name * @ops: operations this clock supports * @parent_names: array of string names for all possible parents - * @parent_data: array of parent data for all possible parents (when some - * parents are external to the clk controller) - * @parent_hws: array of pointers to all possible parents (when all parents - * are internal to the clk controller) * @num_parents: number of possible parents * @flags: framework-level hints and quirks */ struct clk_init_data { const char *name; const struct clk_ops *ops; - /* Only one of the following three should be assigned */ const char * const *parent_names; - const struct clk_parent_data *parent_data; - const struct clk_hw **parent_hws; u8 num_parents; unsigned long flags; }; @@ -306,8 +251,7 @@ struct clk_init_data { * into the clk API * * @init: pointer to struct clk_init_data that contains the init data shared - * with the common clock framework. This pointer will be set to NULL once - * a clk_register() variant is called on this clk_hw pointer. + * with the common clock framework. */ struct clk_hw { struct clk_core *core; @@ -328,119 +272,30 @@ struct clk_hw { * struct clk_fixed_rate - fixed-rate clock * @hw: handle between common and hardware-specific interfaces * @fixed_rate: constant frequency of clock - * @fixed_accuracy: constant accuracy of clock in ppb (parts per billion) - * @flags: hardware specific flags - * - * Flags: - * * CLK_FIXED_RATE_PARENT_ACCURACY - Use the accuracy of the parent clk - * instead of what's set in @fixed_accuracy. */ struct clk_fixed_rate { struct clk_hw hw; unsigned long fixed_rate; unsigned long fixed_accuracy; - unsigned long flags; + u8 flags; }; -#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0) +#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw) extern const struct clk_ops clk_fixed_rate_ops; -struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev, - struct device_node *np, const char *name, - const char *parent_name, const struct clk_hw *parent_hw, - const struct clk_parent_data *parent_data, unsigned long flags, - unsigned long fixed_rate, unsigned long fixed_accuracy, - unsigned long clk_fixed_flags); struct clk *clk_register_fixed_rate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate); -/** - * clk_hw_register_fixed_rate - register fixed-rate clock with the clock - * framework - * @dev: device that is registering this clock - * @name: name of this clock - * @parent_name: name of clock's parent - * @flags: framework-specific flags - * @fixed_rate: non-adjustable clock rate - */ -#define clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \ - __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \ - NULL, (flags), (fixed_rate), 0, 0) -/** - * clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with - * the clock framework - * @dev: device that is registering this clock - * @name: name of this clock - * @parent_hw: pointer to parent clk - * @flags: framework-specific flags - * @fixed_rate: non-adjustable clock rate - */ -#define clk_hw_register_fixed_rate_parent_hw(dev, name, parent_hw, flags, \ - fixed_rate) \ - __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \ - NULL, (flags), (fixed_rate), 0, 0) -/** - * clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with - * the clock framework - * @dev: device that is registering this clock - * @name: name of this clock - * @parent_data: parent clk data - * @flags: framework-specific flags - * @fixed_rate: non-adjustable clock rate - */ -#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_hw, flags, \ - fixed_rate) \ - __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \ - (parent_data), (flags), (fixed_rate), 0, \ - 0) -/** - * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with - * the clock framework - * @dev: device that is registering this clock - * @name: name of this clock - * @parent_name: name of clock's parent - * @flags: framework-specific flags - * @fixed_rate: non-adjustable clock rate - * @fixed_accuracy: non-adjustable clock accuracy - */ -#define clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name, \ - flags, fixed_rate, \ - fixed_accuracy) \ - __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), \ - NULL, NULL, (flags), (fixed_rate), \ - (fixed_accuracy), 0) -/** - * clk_hw_register_fixed_rate_with_accuracy_parent_hw - register fixed-rate - * clock with the clock framework - * @dev: device that is registering this clock - * @name: name of this clock - * @parent_hw: pointer to parent clk - * @flags: framework-specific flags - * @fixed_rate: non-adjustable clock rate - * @fixed_accuracy: non-adjustable clock accuracy - */ -#define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name, \ - parent_hw, flags, fixed_rate, fixed_accuracy) \ - __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \ - NULL, NULL, (flags), (fixed_rate), \ - (fixed_accuracy), 0) -/** - * clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate - * clock with the clock framework - * @dev: device that is registering this clock - * @name: name of this clock - * @parent_name: name of clock's parent - * @flags: framework-specific flags - * @fixed_rate: non-adjustable clock rate - * @fixed_accuracy: non-adjustable clock accuracy - */ -#define clk_hw_register_fixed_rate_with_accuracy_parent_data(dev, name, \ - parent_data, flags, fixed_rate, fixed_accuracy) \ - __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \ - (parent_data), NULL, (flags), \ - (fixed_rate), (fixed_accuracy), 0) - +struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned long fixed_rate); +struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned long fixed_rate, unsigned long fixed_accuracy); void clk_unregister_fixed_rate(struct clk *clk); +struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned long fixed_rate, unsigned long fixed_accuracy); void clk_hw_unregister_fixed_rate(struct clk_hw *hw); void of_fixed_clk_setup(struct device_node *np); @@ -464,9 +319,6 @@ void of_fixed_clk_setup(struct device_node *np); * of this register, and mask of gate bits are in higher 16-bit of this * register. While setting the gate bits, higher 16-bit should also be * updated to indicate changing gate bits. - * CLK_GATE_BIG_ENDIAN - by default little endian register accesses are used for - * the gate register. Setting this flag makes the register accesses big - * endian. */ struct clk_gate { struct clk_hw hw; @@ -480,73 +332,18 @@ struct clk_gate { #define CLK_GATE_SET_TO_DISABLE BIT(0) #define CLK_GATE_HIWORD_MASK BIT(1) -#define CLK_GATE_BIG_ENDIAN BIT(2) extern const struct clk_ops clk_gate_ops; -struct clk_hw *__clk_hw_register_gate(struct device *dev, - struct device_node *np, const char *name, - const char *parent_name, const struct clk_hw *parent_hw, - const struct clk_parent_data *parent_data, - unsigned long flags, - void __iomem *reg, u8 bit_idx, - u8 clk_gate_flags, spinlock_t *lock); struct clk *clk_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, u8 clk_gate_flags, spinlock_t *lock); -/** - * clk_hw_register_gate - register a gate clock with the clock framework - * @dev: device that is registering this clock - * @name: name of this clock - * @parent_name: name of this clock's parent - * @flags: framework-specific flags for this clock - * @reg: register address to control gating of this clock - * @bit_idx: which bit in the register controls gating of this clock - * @clk_gate_flags: gate-specific flags for this clock - * @lock: shared register lock for this clock - */ -#define clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx, \ - clk_gate_flags, lock) \ - __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \ - NULL, (flags), (reg), (bit_idx), \ - (clk_gate_flags), (lock)) -/** - * clk_hw_register_gate_parent_hw - register a gate clock with the clock - * framework - * @dev: device that is registering this clock - * @name: name of this clock - * @parent_hw: pointer to parent clk - * @flags: framework-specific flags for this clock - * @reg: register address to control gating of this clock - * @bit_idx: which bit in the register controls gating of this clock - * @clk_gate_flags: gate-specific flags for this clock - * @lock: shared register lock for this clock - */ -#define clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, reg, \ - bit_idx, clk_gate_flags, lock) \ - __clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \ - NULL, (flags), (reg), (bit_idx), \ - (clk_gate_flags), (lock)) -/** - * clk_hw_register_gate_parent_data - register a gate clock with the clock - * framework - * @dev: device that is registering this clock - * @name: name of this clock - * @parent_data: parent clk data - * @flags: framework-specific flags for this clock - * @reg: register address to control gating of this clock - * @bit_idx: which bit in the register controls gating of this clock - * @clk_gate_flags: gate-specific flags for this clock - * @lock: shared register lock for this clock - */ -#define clk_hw_register_gate_parent_data(dev, name, parent_data, flags, reg, \ - bit_idx, clk_gate_flags, lock) \ - __clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \ - (flags), (reg), (bit_idx), \ - (clk_gate_flags), (lock)) +struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, + u8 clk_gate_flags, spinlock_t *lock); void clk_unregister_gate(struct clk *clk); void clk_hw_unregister_gate(struct clk_hw *hw); -int clk_gate_is_enabled(struct clk_hw *hw); struct clk_div_table { unsigned int val; @@ -589,9 +386,6 @@ struct clk_div_table { * CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED * except when the value read from the register is zero, the divisor is * 2^width of the field. - * CLK_DIVIDER_BIG_ENDIAN - By default little endian register accesses are used - * for the divider register. Setting this flag makes the register accesses - * big endian. */ struct clk_divider { struct clk_hw hw; @@ -603,7 +397,6 @@ struct clk_divider { spinlock_t *lock; }; -#define clk_div_mask(width) ((1 << (width)) - 1) #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) #define CLK_DIVIDER_ONE_BASED BIT(0) @@ -613,223 +406,38 @@ struct clk_divider { #define CLK_DIVIDER_ROUND_CLOSEST BIT(4) #define CLK_DIVIDER_READ_ONLY BIT(5) #define CLK_DIVIDER_MAX_AT_ZERO BIT(6) -#define CLK_DIVIDER_BIG_ENDIAN BIT(7) extern const struct clk_ops clk_divider_ops; extern const struct clk_ops clk_divider_ro_ops; unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, unsigned int val, const struct clk_div_table *table, - unsigned long flags, unsigned long width); -long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, - unsigned long rate, unsigned long *prate, - const struct clk_div_table *table, - u8 width, unsigned long flags); -long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, - unsigned long rate, unsigned long *prate, - const struct clk_div_table *table, u8 width, - unsigned long flags, unsigned int val); -int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req, - const struct clk_div_table *table, u8 width, - unsigned long flags); -int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req, - const struct clk_div_table *table, u8 width, - unsigned long flags, unsigned int val); + unsigned long flags); +long divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate, const struct clk_div_table *table, + u8 width, unsigned long flags); int divider_get_val(unsigned long rate, unsigned long parent_rate, const struct clk_div_table *table, u8 width, unsigned long flags); -struct clk_hw *__clk_hw_register_divider(struct device *dev, - struct device_node *np, const char *name, - const char *parent_name, const struct clk_hw *parent_hw, - const struct clk_parent_data *parent_data, unsigned long flags, - void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, - const struct clk_div_table *table, spinlock_t *lock); -struct clk_hw *__devm_clk_hw_register_divider(struct device *dev, - struct device_node *np, const char *name, - const char *parent_name, const struct clk_hw *parent_hw, - const struct clk_parent_data *parent_data, unsigned long flags, - void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, - const struct clk_div_table *table, spinlock_t *lock); +struct clk *clk_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, spinlock_t *lock); struct clk *clk_register_divider_table(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock); -/** - * clk_register_divider - register a divider clock with the clock framework - * @dev: device registering this clock - * @name: name of this clock - * @parent_name: name of clock's parent - * @flags: framework-specific flags - * @reg: register address to adjust divider - * @shift: number of bits to shift the bitfield - * @width: width of the bitfield - * @clk_divider_flags: divider-specific flags for this clock - * @lock: shared register lock for this clock - */ -#define clk_register_divider(dev, name, parent_name, flags, reg, shift, width, \ - clk_divider_flags, lock) \ - clk_register_divider_table((dev), (name), (parent_name), (flags), \ - (reg), (shift), (width), \ - (clk_divider_flags), NULL, (lock)) -/** - * clk_hw_register_divider - register a divider clock with the clock framework - * @dev: device registering this clock - * @name: name of this clock - * @parent_name: name of clock's parent - * @flags: framework-specific flags - * @reg: register address to adjust divider - * @shift: number of bits to shift the bitfield - * @width: width of the bitfield - * @clk_divider_flags: divider-specific flags for this clock - * @lock: shared register lock for this clock - */ -#define clk_hw_register_divider(dev, name, parent_name, flags, reg, shift, \ - width, clk_divider_flags, lock) \ - __clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \ - NULL, (flags), (reg), (shift), (width), \ - (clk_divider_flags), NULL, (lock)) -/** - * clk_hw_register_divider_parent_hw - register a divider clock with the clock - * framework - * @dev: device registering this clock - * @name: name of this clock - * @parent_hw: pointer to parent clk - * @flags: framework-specific flags - * @reg: register address to adjust divider - * @shift: number of bits to shift the bitfield - * @width: width of the bitfield - * @clk_divider_flags: divider-specific flags for this clock - * @lock: shared register lock for this clock - */ -#define clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, reg, \ - shift, width, clk_divider_flags, \ - lock) \ - __clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw), \ - NULL, (flags), (reg), (shift), (width), \ - (clk_divider_flags), NULL, (lock)) -/** - * clk_hw_register_divider_parent_data - register a divider clock with the clock - * framework - * @dev: device registering this clock - * @name: name of this clock - * @parent_data: parent clk data - * @flags: framework-specific flags - * @reg: register address to adjust divider - * @shift: number of bits to shift the bitfield - * @width: width of the bitfield - * @clk_divider_flags: divider-specific flags for this clock - * @lock: shared register lock for this clock - */ -#define clk_hw_register_divider_parent_data(dev, name, parent_data, flags, \ - reg, shift, width, \ - clk_divider_flags, lock) \ - __clk_hw_register_divider((dev), NULL, (name), NULL, NULL, \ - (parent_data), (flags), (reg), (shift), \ - (width), (clk_divider_flags), NULL, (lock)) -/** - * clk_hw_register_divider_table - register a table based divider clock with - * the clock framework - * @dev: device registering this clock - * @name: name of this clock - * @parent_name: name of clock's parent - * @flags: framework-specific flags - * @reg: register address to adjust divider - * @shift: number of bits to shift the bitfield - * @width: width of the bitfield - * @clk_divider_flags: divider-specific flags for this clock - * @table: array of divider/value pairs ending with a div set to 0 - * @lock: shared register lock for this clock - */ -#define clk_hw_register_divider_table(dev, name, parent_name, flags, reg, \ - shift, width, clk_divider_flags, table, \ - lock) \ - __clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \ - NULL, (flags), (reg), (shift), (width), \ - (clk_divider_flags), (table), (lock)) -/** - * clk_hw_register_divider_table_parent_hw - register a table based divider - * clock with the clock framework - * @dev: device registering this clock - * @name: name of this clock - * @parent_hw: pointer to parent clk - * @flags: framework-specific flags - * @reg: register address to adjust divider - * @shift: number of bits to shift the bitfield - * @width: width of the bitfield - * @clk_divider_flags: divider-specific flags for this clock - * @table: array of divider/value pairs ending with a div set to 0 - * @lock: shared register lock for this clock - */ -#define clk_hw_register_divider_table_parent_hw(dev, name, parent_hw, flags, \ - reg, shift, width, \ - clk_divider_flags, table, \ - lock) \ - __clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw), \ - NULL, (flags), (reg), (shift), (width), \ - (clk_divider_flags), (table), (lock)) -/** - * clk_hw_register_divider_table_parent_data - register a table based divider - * clock with the clock framework - * @dev: device registering this clock - * @name: name of this clock - * @parent_data: parent clk data - * @flags: framework-specific flags - * @reg: register address to adjust divider - * @shift: number of bits to shift the bitfield - * @width: width of the bitfield - * @clk_divider_flags: divider-specific flags for this clock - * @table: array of divider/value pairs ending with a div set to 0 - * @lock: shared register lock for this clock - */ -#define clk_hw_register_divider_table_parent_data(dev, name, parent_data, \ - flags, reg, shift, width, \ - clk_divider_flags, table, \ - lock) \ - __clk_hw_register_divider((dev), NULL, (name), NULL, NULL, \ - (parent_data), (flags), (reg), (shift), \ - (width), (clk_divider_flags), (table), \ - (lock)) -/** - * devm_clk_hw_register_divider - register a divider clock with the clock framework - * @dev: device registering this clock - * @name: name of this clock - * @parent_name: name of clock's parent - * @flags: framework-specific flags - * @reg: register address to adjust divider - * @shift: number of bits to shift the bitfield - * @width: width of the bitfield - * @clk_divider_flags: divider-specific flags for this clock - * @lock: shared register lock for this clock - */ -#define devm_clk_hw_register_divider(dev, name, parent_name, flags, reg, shift, \ - width, clk_divider_flags, lock) \ - __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \ - NULL, (flags), (reg), (shift), (width), \ - (clk_divider_flags), NULL, (lock)) -/** - * devm_clk_hw_register_divider_table - register a table based divider clock - * with the clock framework (devres variant) - * @dev: device registering this clock - * @name: name of this clock - * @parent_name: name of clock's parent - * @flags: framework-specific flags - * @reg: register address to adjust divider - * @shift: number of bits to shift the bitfield - * @width: width of the bitfield - * @clk_divider_flags: divider-specific flags for this clock - * @table: array of divider/value pairs ending with a div set to 0 - * @lock: shared register lock for this clock - */ -#define devm_clk_hw_register_divider_table(dev, name, parent_name, flags, \ - reg, shift, width, \ - clk_divider_flags, table, lock) \ - __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), \ - NULL, NULL, (flags), (reg), (shift), \ - (width), (clk_divider_flags), (table), \ - (lock)) - +struct clk_hw *clk_hw_register_divider_table(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table, + spinlock_t *lock); void clk_unregister_divider(struct clk *clk); void clk_hw_unregister_divider(struct clk_hw *hw); @@ -838,9 +446,8 @@ void clk_hw_unregister_divider(struct clk_hw *hw); * * @hw: handle between common and hardware-specific interfaces * @reg: register controlling multiplexer - * @table: array of register values corresponding to the parent index * @shift: shift to multiplexer bit field - * @mask: mask of mutliplexer bit field + * @width: width of mutliplexer bit field * @flags: hardware-specific flags * @lock: register lock * @@ -854,13 +461,8 @@ void clk_hw_unregister_divider(struct clk_hw *hw); * register, and mask of mux bits are in higher 16-bit of this register. * While setting the mux bits, higher 16-bit should also be updated to * indicate changing mux bits. - * CLK_MUX_READ_ONLY - The mux registers can't be written, only read in the - * .get_parent clk_op. * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired * frequency. - * CLK_MUX_BIG_ENDIAN - By default little endian register accesses are used for - * the mux register. Setting this flag makes the register accesses big - * endian. */ struct clk_mux { struct clk_hw hw; @@ -879,69 +481,31 @@ struct clk_mux { #define CLK_MUX_HIWORD_MASK BIT(2) #define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */ #define CLK_MUX_ROUND_CLOSEST BIT(4) -#define CLK_MUX_BIG_ENDIAN BIT(5) extern const struct clk_ops clk_mux_ops; extern const struct clk_ops clk_mux_ro_ops; -struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np, - const char *name, u8 num_parents, - const char * const *parent_names, - const struct clk_hw **parent_hws, - const struct clk_parent_data *parent_data, - unsigned long flags, void __iomem *reg, u8 shift, u32 mask, - u8 clk_mux_flags, u32 *table, spinlock_t *lock); -struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np, - const char *name, u8 num_parents, - const char * const *parent_names, - const struct clk_hw **parent_hws, - const struct clk_parent_data *parent_data, - unsigned long flags, void __iomem *reg, u8 shift, u32 mask, - u8 clk_mux_flags, u32 *table, spinlock_t *lock); +struct clk *clk_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags, spinlock_t *lock); + struct clk *clk_register_mux_table(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, - unsigned long flags, void __iomem *reg, u8 shift, u32 mask, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, spinlock_t *lock); - -#define clk_register_mux(dev, name, parent_names, num_parents, flags, reg, \ - shift, width, clk_mux_flags, lock) \ - clk_register_mux_table((dev), (name), (parent_names), (num_parents), \ - (flags), (reg), (shift), BIT((width)) - 1, \ - (clk_mux_flags), NULL, (lock)) -#define clk_hw_register_mux_table(dev, name, parent_names, num_parents, \ - flags, reg, shift, mask, clk_mux_flags, \ - table, lock) \ - __clk_hw_register_mux((dev), NULL, (name), (num_parents), \ - (parent_names), NULL, NULL, (flags), (reg), \ - (shift), (mask), (clk_mux_flags), (table), \ - (lock)) -#define clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \ - shift, width, clk_mux_flags, lock) \ - __clk_hw_register_mux((dev), NULL, (name), (num_parents), \ - (parent_names), NULL, NULL, (flags), (reg), \ - (shift), BIT((width)) - 1, (clk_mux_flags), \ - NULL, (lock)) -#define clk_hw_register_mux_hws(dev, name, parent_hws, num_parents, flags, \ - reg, shift, width, clk_mux_flags, lock) \ - __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \ - (parent_hws), NULL, (flags), (reg), (shift), \ - BIT((width)) - 1, (clk_mux_flags), NULL, (lock)) -#define clk_hw_register_mux_parent_data(dev, name, parent_data, num_parents, \ - flags, reg, shift, width, \ - clk_mux_flags, lock) \ - __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \ - (parent_data), (flags), (reg), (shift), \ - BIT((width)) - 1, (clk_mux_flags), NULL, (lock)) -#define devm_clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \ - shift, width, clk_mux_flags, lock) \ - __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), \ - (parent_names), NULL, NULL, (flags), (reg), \ - (shift), BIT((width)) - 1, (clk_mux_flags), \ - NULL, (lock)) - -int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags, - unsigned int val); -unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index); void clk_unregister_mux(struct clk *clk); void clk_hw_unregister_mux(struct clk_hw *hw); @@ -977,9 +541,7 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div); void clk_hw_unregister_fixed_factor(struct clk_hw *hw); -struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev, - const char *name, const char *parent_name, unsigned long flags, - unsigned int mult, unsigned int div); + /** * struct clk_fractional_divider - adjustable fractional divider clock * @@ -992,21 +554,6 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev, * @lock: register lock * * Clock with adjustable fractional divider affecting its output frequency. - * - * Flags: - * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator - * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED - * is set then the numerator and denominator are both the value read - * plus one. - * CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are - * used for the divider register. Setting this flag makes the register - * accesses big endian. - * CLK_FRAC_DIVIDER_POWER_OF_TWO_PS - By default the resulting fraction might - * be saturated and the caller will get quite far from the good enough - * approximation. Instead the caller may require, by setting this flag, - * to shift left by a few bits in case, when the asked one is quite small - * to satisfy the desired range of denominator. It assumes that on the - * caller's side the power-of-two capable prescaler exists. */ struct clk_fractional_divider { struct clk_hw hw; @@ -1018,18 +565,12 @@ struct clk_fractional_divider { u8 nwidth; u32 nmask; u8 flags; - void (*approximation)(struct clk_hw *hw, - unsigned long rate, unsigned long *parent_rate, - unsigned long *m, unsigned long *n); spinlock_t *lock; }; #define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw) -#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0) -#define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1) -#define CLK_FRAC_DIVIDER_POWER_OF_TWO_PS BIT(2) - +extern const struct clk_ops clk_fractional_divider_ops; struct clk *clk_register_fractional_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, @@ -1060,9 +601,6 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw); * leaving the parent rate unmodified. * CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be * rounded to the closest integer instead of the down one. - * CLK_MULTIPLIER_BIG_ENDIAN - By default little endian register accesses are - * used for the multiplier register. Setting this flag makes the register - * accesses big endian. */ struct clk_multiplier { struct clk_hw hw; @@ -1075,9 +613,8 @@ struct clk_multiplier { #define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw) -#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) +#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) #define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) -#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2) extern const struct clk_ops clk_multiplier_ops; @@ -1113,12 +650,6 @@ struct clk *clk_register_composite(struct device *dev, const char *name, struct clk_hw *rate_hw, const struct clk_ops *rate_ops, struct clk_hw *gate_hw, const struct clk_ops *gate_ops, unsigned long flags); -struct clk *clk_register_composite_pdata(struct device *dev, const char *name, - const struct clk_parent_data *parent_data, int num_parents, - struct clk_hw *mux_hw, const struct clk_ops *mux_ops, - struct clk_hw *rate_hw, const struct clk_ops *rate_ops, - struct clk_hw *gate_hw, const struct clk_ops *gate_ops, - unsigned long flags); void clk_unregister_composite(struct clk *clk); struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name, const char * const *parent_names, int num_parents, @@ -1126,28 +657,69 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name, struct clk_hw *rate_hw, const struct clk_ops *rate_ops, struct clk_hw *gate_hw, const struct clk_ops *gate_ops, unsigned long flags); -struct clk_hw *clk_hw_register_composite_pdata(struct device *dev, - const char *name, - const struct clk_parent_data *parent_data, int num_parents, - struct clk_hw *mux_hw, const struct clk_ops *mux_ops, - struct clk_hw *rate_hw, const struct clk_ops *rate_ops, - struct clk_hw *gate_hw, const struct clk_ops *gate_ops, - unsigned long flags); -struct clk_hw *devm_clk_hw_register_composite_pdata(struct device *dev, - const char *name, const struct clk_parent_data *parent_data, - int num_parents, - struct clk_hw *mux_hw, const struct clk_ops *mux_ops, - struct clk_hw *rate_hw, const struct clk_ops *rate_ops, - struct clk_hw *gate_hw, const struct clk_ops *gate_ops, - unsigned long flags); void clk_hw_unregister_composite(struct clk_hw *hw); +/*** + * struct clk_gpio_gate - gpio gated clock + * + * @hw: handle between common and hardware-specific interfaces + * @gpiod: gpio descriptor + * + * Clock with a gpio control for enabling and disabling the parent clock. + * Implements .enable, .disable and .is_enabled + */ + +struct clk_gpio { + struct clk_hw hw; + struct gpio_desc *gpiod; +}; + +#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw) + +extern const struct clk_ops clk_gpio_gate_ops; +struct clk *clk_register_gpio_gate(struct device *dev, const char *name, + const char *parent_name, unsigned gpio, bool active_low, + unsigned long flags); +struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name, + const char *parent_name, unsigned gpio, bool active_low, + unsigned long flags); +void clk_hw_unregister_gpio_gate(struct clk_hw *hw); + +/** + * struct clk_gpio_mux - gpio controlled clock multiplexer + * + * @hw: see struct clk_gpio + * @gpiod: gpio descriptor to select the parent of this clock multiplexer + * + * Clock with a gpio control for selecting the parent clock. + * Implements .get_parent, .set_parent and .determine_rate + */ + +extern const struct clk_ops clk_gpio_mux_ops; +struct clk *clk_register_gpio_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, unsigned gpio, + bool active_low, unsigned long flags); +struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, unsigned gpio, + bool active_low, unsigned long flags); +void clk_hw_unregister_gpio_mux(struct clk_hw *hw); + +/** + * clk_register - allocate a new clock, register it and return an opaque cookie + * @dev: device that is registering this clock + * @hw: link to hardware-specific clock data + * + * clk_register is the primary interface for populating the clock tree with new + * clock nodes. It returns a pointer to the newly allocated struct clk which + * cannot be dereferenced by driver code but may be used in conjuction with the + * rest of the clock API. In the event of an error clk_register will return an + * error code; drivers must test for an error code after calling clk_register. + */ struct clk *clk_register(struct device *dev, struct clk_hw *hw); struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw); int __must_check clk_hw_register(struct device *dev, struct clk_hw *hw); int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw); -int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw); void clk_unregister(struct clk *clk); void devm_clk_unregister(struct device *dev, struct clk *clk); @@ -1158,33 +730,16 @@ void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw); /* helper functions */ const char *__clk_get_name(const struct clk *clk); const char *clk_hw_get_name(const struct clk_hw *hw); -#ifdef CONFIG_COMMON_CLK struct clk_hw *__clk_get_hw(struct clk *clk); -#else -static inline struct clk_hw *__clk_get_hw(struct clk *clk) -{ - return (struct clk_hw *)clk; -} -#endif - -struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id); -struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw, - const char *con_id); - unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw); struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index); -int clk_hw_get_parent_index(struct clk_hw *hw); -int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent); unsigned int __clk_get_enable_count(struct clk *clk); unsigned long clk_hw_get_rate(const struct clk_hw *hw); +unsigned long __clk_get_flags(struct clk *clk); unsigned long clk_hw_get_flags(const struct clk_hw *hw); -#define clk_hw_can_set_rate_parent(hw) \ - (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT) - bool clk_hw_is_prepared(const struct clk_hw *hw); -bool clk_hw_rate_is_protected(const struct clk_hw *hw); bool clk_hw_is_enabled(const struct clk_hw *hw); bool __clk_is_enabled(struct clk *clk); struct clk *__clk_lookup(const char *name); @@ -1193,9 +748,6 @@ int __clk_mux_determine_rate(struct clk_hw *hw, int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req); int __clk_mux_determine_rate_closest(struct clk_hw *hw, struct clk_rate_request *req); -int clk_mux_determine_rate_flags(struct clk_hw *hw, - struct clk_rate_request *req, - unsigned long flags); void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, unsigned long max_rate); @@ -1206,31 +758,15 @@ static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src) dst->core = src->core; } -static inline long divider_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate, - const struct clk_div_table *table, - u8 width, unsigned long flags) -{ - return divider_round_rate_parent(hw, clk_hw_get_parent(hw), - rate, prate, table, width, flags); -} - -static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate, - const struct clk_div_table *table, - u8 width, unsigned long flags, - unsigned int val) -{ - return divider_ro_round_rate_parent(hw, clk_hw_get_parent(hw), - rate, prate, table, width, flags, - val); -} - /* * FIXME clock api without lock protection */ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate); +struct of_device_id; + +typedef void (*of_clk_init_cb_t)(struct device_node *); + struct clk_onecell_data { struct clk **clks; unsigned int clk_num; @@ -1241,6 +777,8 @@ struct clk_hw_onecell_data { struct clk_hw *hws[]; }; +extern struct of_device_id __clk_of_table; + #define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn) /* @@ -1255,133 +793,6 @@ struct clk_hw_onecell_data { } \ OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver) -#define CLK_HW_INIT(_name, _parent, _ops, _flags) \ - (&(struct clk_init_data) { \ - .flags = _flags, \ - .name = _name, \ - .parent_names = (const char *[]) { _parent }, \ - .num_parents = 1, \ - .ops = _ops, \ - }) - -#define CLK_HW_INIT_HW(_name, _parent, _ops, _flags) \ - (&(struct clk_init_data) { \ - .flags = _flags, \ - .name = _name, \ - .parent_hws = (const struct clk_hw*[]) { _parent }, \ - .num_parents = 1, \ - .ops = _ops, \ - }) - -/* - * This macro is intended for drivers to be able to share the otherwise - * individual struct clk_hw[] compound literals created by the compiler - * when using CLK_HW_INIT_HW. It does NOT support multiple parents. - */ -#define CLK_HW_INIT_HWS(_name, _parent, _ops, _flags) \ - (&(struct clk_init_data) { \ - .flags = _flags, \ - .name = _name, \ - .parent_hws = _parent, \ - .num_parents = 1, \ - .ops = _ops, \ - }) - -#define CLK_HW_INIT_FW_NAME(_name, _parent, _ops, _flags) \ - (&(struct clk_init_data) { \ - .flags = _flags, \ - .name = _name, \ - .parent_data = (const struct clk_parent_data[]) { \ - { .fw_name = _parent }, \ - }, \ - .num_parents = 1, \ - .ops = _ops, \ - }) - -#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \ - (&(struct clk_init_data) { \ - .flags = _flags, \ - .name = _name, \ - .parent_names = _parents, \ - .num_parents = ARRAY_SIZE(_parents), \ - .ops = _ops, \ - }) - -#define CLK_HW_INIT_PARENTS_HW(_name, _parents, _ops, _flags) \ - (&(struct clk_init_data) { \ - .flags = _flags, \ - .name = _name, \ - .parent_hws = _parents, \ - .num_parents = ARRAY_SIZE(_parents), \ - .ops = _ops, \ - }) - -#define CLK_HW_INIT_PARENTS_DATA(_name, _parents, _ops, _flags) \ - (&(struct clk_init_data) { \ - .flags = _flags, \ - .name = _name, \ - .parent_data = _parents, \ - .num_parents = ARRAY_SIZE(_parents), \ - .ops = _ops, \ - }) - -#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \ - (&(struct clk_init_data) { \ - .flags = _flags, \ - .name = _name, \ - .parent_names = NULL, \ - .num_parents = 0, \ - .ops = _ops, \ - }) - -#define CLK_FIXED_FACTOR(_struct, _name, _parent, \ - _div, _mult, _flags) \ - struct clk_fixed_factor _struct = { \ - .div = _div, \ - .mult = _mult, \ - .hw.init = CLK_HW_INIT(_name, \ - _parent, \ - &clk_fixed_factor_ops, \ - _flags), \ - } - -#define CLK_FIXED_FACTOR_HW(_struct, _name, _parent, \ - _div, _mult, _flags) \ - struct clk_fixed_factor _struct = { \ - .div = _div, \ - .mult = _mult, \ - .hw.init = CLK_HW_INIT_HW(_name, \ - _parent, \ - &clk_fixed_factor_ops, \ - _flags), \ - } - -/* - * This macro allows the driver to reuse the _parent array for multiple - * fixed factor clk declarations. - */ -#define CLK_FIXED_FACTOR_HWS(_struct, _name, _parent, \ - _div, _mult, _flags) \ - struct clk_fixed_factor _struct = { \ - .div = _div, \ - .mult = _mult, \ - .hw.init = CLK_HW_INIT_HWS(_name, \ - _parent, \ - &clk_fixed_factor_ops, \ - _flags), \ - } - -#define CLK_FIXED_FACTOR_FW_NAME(_struct, _name, _parent, \ - _div, _mult, _flags) \ - struct clk_fixed_factor _struct = { \ - .div = _div, \ - .mult = _mult, \ - .hw.init = CLK_HW_INIT_FW_NAME(_name, \ - _parent, \ - &clk_fixed_factor_ops, \ - _flags), \ - } - #ifdef CONFIG_OF int of_clk_add_provider(struct device_node *np, struct clk *(*clk_src_get)(struct of_phandle_args *args, @@ -1391,12 +802,7 @@ int of_clk_add_hw_provider(struct device_node *np, struct clk_hw *(*get)(struct of_phandle_args *clkspec, void *data), void *data); -int devm_of_clk_add_hw_provider(struct device *dev, - struct clk_hw *(*get)(struct of_phandle_args *clkspec, - void *data), - void *data); void of_clk_del_provider(struct device_node *np); -void devm_of_clk_del_provider(struct device *dev); struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, void *data); struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, @@ -1404,10 +810,13 @@ struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data); +unsigned int of_clk_get_parent_count(struct device_node *np); int of_clk_parent_fill(struct device_node *np, const char **parents, unsigned int size); +const char *of_clk_get_parent_name(struct device_node *np, int index); int of_clk_detect_critical(struct device_node *np, int index, unsigned long *flags); +void of_clk_init(const struct of_device_id *matches); #else /* !CONFIG_OF */ @@ -1425,15 +834,7 @@ static inline int of_clk_add_hw_provider(struct device_node *np, { return 0; } -static inline int devm_of_clk_add_hw_provider(struct device *dev, - struct clk_hw *(*get)(struct of_phandle_args *clkspec, - void *data), - void *data) -{ - return 0; -} static inline void of_clk_del_provider(struct device_node *np) {} -static inline void devm_of_clk_del_provider(struct device *dev) {} static inline struct clk *of_clk_src_simple_get( struct of_phandle_args *clkspec, void *data) { @@ -1454,18 +855,63 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) { return ERR_PTR(-ENOENT); } +static inline unsigned int of_clk_get_parent_count(struct device_node *np) +{ + return 0; +} static inline int of_clk_parent_fill(struct device_node *np, const char **parents, unsigned int size) { return 0; } +static inline const char *of_clk_get_parent_name(struct device_node *np, + int index) +{ + return NULL; +} static inline int of_clk_detect_critical(struct device_node *np, int index, unsigned long *flags) { return 0; } +static inline void of_clk_init(const struct of_device_id *matches) {} #endif /* CONFIG_OF */ -void clk_gate_restore_context(struct clk_hw *hw); +/* + * wrap access to peripherals in accessor routines + * for improved portability across platforms + */ +#if IS_ENABLED(CONFIG_PPC) + +static inline u32 clk_readl(u32 __iomem *reg) +{ + return ioread32be(reg); +} + +static inline void clk_writel(u32 val, u32 __iomem *reg) +{ + iowrite32be(val, reg); +} + +#else /* platform dependent I/O accessors */ + +static inline u32 clk_readl(u32 __iomem *reg) +{ + return readl(reg); +} + +static inline void clk_writel(u32 val, u32 __iomem *reg) +{ + writel(val, reg); +} + +#endif /* platform dependent I/O accessors */ + +#ifdef CONFIG_DEBUG_FS +struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode, + void *data, const struct file_operations *fops); +#endif + +#endif /* CONFIG_COMMON_CLK */ #endif /* CLK_PROVIDER_H */ diff --git a/include/linux/clk.h b/include/linux/clk.h index 2aa52140d8..123c027888 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/clk.h * * Copyright (C) 2004 ARM Limited. * Written by Deep Blue Solutions Limited. * Copyright (C) 2011-2012 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_CLK_H #define __LINUX_CLK_H @@ -14,10 +17,8 @@ #include struct device; + struct clk; -struct clk_request; -struct device_node; -struct of_phandle_args; /** * DOC: clk notifier callback types @@ -75,25 +76,10 @@ struct clk_notifier_data { unsigned long new_rate; }; -/** - * struct clk_bulk_data - Data used for bulk clk operations. - * - * @id: clock consumer ID - * @clk: struct clk * to store the associated clock - * - * The CLK APIs provide a series of clk_bulk_() API calls as - * a convenience to consumers which require multiple clks. This - * structure is used to manage data for these calls. - */ -struct clk_bulk_data { - const char *id; - struct clk *clk; -}; - #ifdef CONFIG_COMMON_CLK /** - * clk_notifier_register - register a clock rate-change notifier callback + * clk_notifier_register: register a clock rate-change notifier callback * @clk: clock whose rate we are interested in * @nb: notifier block with callback function pointer * @@ -104,23 +90,12 @@ struct clk_bulk_data { int clk_notifier_register(struct clk *clk, struct notifier_block *nb); /** - * clk_notifier_unregister - unregister a clock rate-change notifier callback + * clk_notifier_unregister: unregister a clock rate-change notifier callback * @clk: clock whose rate we are no longer interested in * @nb: notifier block which will be unregistered */ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); -/** - * devm_clk_notifier_register - register a managed rate-change notifier callback - * @dev: device for clock "consumer" - * @clk: clock whose rate we are interested in - * @nb: notifier block with callback function pointer - * - * Returns 0 on success, -EERROR otherwise - */ -int devm_clk_notifier_register(struct device *dev, struct clk *clk, - struct notifier_block *nb); - /** * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) * for a clock source. @@ -150,35 +125,14 @@ int clk_set_phase(struct clk *clk, int degrees); */ int clk_get_phase(struct clk *clk); -/** - * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal - * @clk: clock signal source - * @num: numerator of the duty cycle ratio to be applied - * @den: denominator of the duty cycle ratio to be applied - * - * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on - * success, -EERROR otherwise. - */ -int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); - -/** - * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal - * @clk: clock signal source - * @scale: scaling factor to be applied to represent the ratio as an integer - * - * Returns the duty cycle ratio multiplied by the scale provided, otherwise - * returns -EERROR. - */ -int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); - /** * clk_is_match - check if two clk's point to the same hardware clock * @p: clk compared against q * @q: clk compared against p * * Returns true if the two struct clk pointers both point to the same hardware - * clock node. Put differently, returns true if @p and @q - * share the same &struct clk_core object. + * clock node. Put differently, returns true if struct clk *p and struct clk *q + * share the same struct clk_core object. * * Returns false otherwise. Note that two NULL clks are treated as matching. */ @@ -198,13 +152,6 @@ static inline int clk_notifier_unregister(struct clk *clk, return -ENOTSUPP; } -static inline int devm_clk_notifier_register(struct device *dev, - struct clk *clk, - struct notifier_block *nb) -{ - return -ENOTSUPP; -} - static inline long clk_get_accuracy(struct clk *clk) { return -ENOTSUPP; @@ -220,18 +167,6 @@ static inline long clk_get_phase(struct clk *clk) return -ENOTSUPP; } -static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, - unsigned int den) -{ - return -ENOTSUPP; -} - -static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, - unsigned int scale) -{ - return 0; -} - static inline bool clk_is_match(const struct clk *p, const struct clk *q) { return p == q; @@ -239,7 +174,6 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q) #endif -#ifdef CONFIG_HAVE_CLK_PREPARE /** * clk_prepare - prepare a clock source * @clk: clock source @@ -248,44 +182,14 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q) * * Must not be called from within atomic context. */ +#ifdef CONFIG_HAVE_CLK_PREPARE int clk_prepare(struct clk *clk); -int __must_check clk_bulk_prepare(int num_clks, - const struct clk_bulk_data *clks); - -/** - * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. - * @clk: clock source - * - * Returns true if clk_prepare() implicitly enables the clock, effectively - * making clk_enable()/clk_disable() no-ops, false otherwise. - * - * This is of interest mainly to the power management code where actually - * disabling the clock also requires unpreparing it to have any material - * effect. - * - * Regardless of the value returned here, the caller must always invoke - * clk_enable() or clk_prepare_enable() and counterparts for usage counts - * to be right. - */ -bool clk_is_enabled_when_prepared(struct clk *clk); #else static inline int clk_prepare(struct clk *clk) { might_sleep(); return 0; } - -static inline int __must_check -clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) -{ - might_sleep(); - return 0; -} - -static inline bool clk_is_enabled_when_prepared(struct clk *clk) -{ - return false; -} #endif /** @@ -299,17 +203,11 @@ static inline bool clk_is_enabled_when_prepared(struct clk *clk) */ #ifdef CONFIG_HAVE_CLK_PREPARE void clk_unprepare(struct clk *clk); -void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); #else static inline void clk_unprepare(struct clk *clk) { might_sleep(); } -static inline void clk_bulk_unprepare(int num_clks, - const struct clk_bulk_data *clks) -{ - might_sleep(); -} #endif #ifdef CONFIG_HAVE_CLK @@ -330,115 +228,6 @@ static inline void clk_bulk_unprepare(int num_clks, */ struct clk *clk_get(struct device *dev, const char *id); -/** - * clk_bulk_get - lookup and obtain a number of references to clock producer. - * @dev: device for clock "consumer" - * @num_clks: the number of clk_bulk_data - * @clks: the clk_bulk_data table of consumer - * - * This helper function allows drivers to get several clk consumers in one - * operation. If any of the clk cannot be acquired then any clks - * that were obtained will be freed before returning to the caller. - * - * Returns 0 if all clocks specified in clk_bulk_data table are obtained - * successfully, or valid IS_ERR() condition containing errno. - * The implementation uses @dev and @clk_bulk_data.id to determine the - * clock consumer, and thereby the clock producer. - * The clock returned is stored in each @clk_bulk_data.clk field. - * - * Drivers must assume that the clock source is not enabled. - * - * clk_bulk_get should not be called from within interrupt context. - */ -int __must_check clk_bulk_get(struct device *dev, int num_clks, - struct clk_bulk_data *clks); -/** - * clk_bulk_get_all - lookup and obtain all available references to clock - * producer. - * @dev: device for clock "consumer" - * @clks: pointer to the clk_bulk_data table of consumer - * - * This helper function allows drivers to get all clk consumers in one - * operation. If any of the clk cannot be acquired then any clks - * that were obtained will be freed before returning to the caller. - * - * Returns a positive value for the number of clocks obtained while the - * clock references are stored in the clk_bulk_data table in @clks field. - * Returns 0 if there're none and a negative value if something failed. - * - * Drivers must assume that the clock source is not enabled. - * - * clk_bulk_get should not be called from within interrupt context. - */ -int __must_check clk_bulk_get_all(struct device *dev, - struct clk_bulk_data **clks); - -/** - * clk_bulk_get_optional - lookup and obtain a number of references to clock producer - * @dev: device for clock "consumer" - * @num_clks: the number of clk_bulk_data - * @clks: the clk_bulk_data table of consumer - * - * Behaves the same as clk_bulk_get() except where there is no clock producer. - * In this case, instead of returning -ENOENT, the function returns 0 and - * NULL for a clk for which a clock producer could not be determined. - */ -int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, - struct clk_bulk_data *clks); -/** - * devm_clk_bulk_get - managed get multiple clk consumers - * @dev: device for clock "consumer" - * @num_clks: the number of clk_bulk_data - * @clks: the clk_bulk_data table of consumer - * - * Return 0 on success, an errno on failure. - * - * This helper function allows drivers to get several clk - * consumers in one operation with management, the clks will - * automatically be freed when the device is unbound. - */ -int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, - struct clk_bulk_data *clks); -/** - * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks - * @dev: device for clock "consumer" - * @num_clks: the number of clk_bulk_data - * @clks: pointer to the clk_bulk_data table of consumer - * - * Behaves the same as devm_clk_bulk_get() except where there is no clock - * producer. In this case, instead of returning -ENOENT, the function returns - * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. - * - * Returns 0 if all clocks specified in clk_bulk_data table are obtained - * successfully or for any clk there was no clk provider available, otherwise - * returns valid IS_ERR() condition containing errno. - * The implementation uses @dev and @clk_bulk_data.id to determine the - * clock consumer, and thereby the clock producer. - * The clock returned is stored in each @clk_bulk_data.clk field. - * - * Drivers must assume that the clock source is not enabled. - * - * clk_bulk_get should not be called from within interrupt context. - */ -int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, - struct clk_bulk_data *clks); -/** - * devm_clk_bulk_get_all - managed get multiple clk consumers - * @dev: device for clock "consumer" - * @clks: pointer to the clk_bulk_data table of consumer - * - * Returns a positive value for the number of clocks obtained while the - * clock references are stored in the clk_bulk_data table in @clks field. - * Returns 0 if there're none and a negative value if something failed. - * - * This helper function allows drivers to get several clk - * consumers in one operation with management, the clks will - * automatically be freed when the device is unbound. - */ - -int __must_check devm_clk_bulk_get_all(struct device *dev, - struct clk_bulk_data **clks); - /** * devm_clk_get - lookup and obtain a managed reference to a clock producer. * @dev: device for clock "consumer" @@ -459,66 +248,6 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, */ struct clk *devm_clk_get(struct device *dev, const char *id); -/** - * devm_clk_get_optional - lookup and obtain a managed reference to an optional - * clock producer. - * @dev: device for clock "consumer" - * @id: clock consumer ID - * - * Behaves the same as devm_clk_get() except where there is no clock producer. - * In this case, instead of returning -ENOENT, the function returns NULL. - */ -struct clk *devm_clk_get_optional(struct device *dev, const char *id); - -/** - * devm_get_clk_from_child - lookup and obtain a managed reference to a - * clock producer from child node. - * @dev: device for clock "consumer" - * @np: pointer to clock consumer node - * @con_id: clock consumer ID - * - * This function parses the clocks, and uses them to look up the - * struct clk from the registered list of clock providers by using - * @np and @con_id - * - * The clock will automatically be freed when the device is unbound - * from the bus. - */ -struct clk *devm_get_clk_from_child(struct device *dev, - struct device_node *np, const char *con_id); -/** - * clk_rate_exclusive_get - get exclusivity over the rate control of a - * producer - * @clk: clock source - * - * This function allows drivers to get exclusive control over the rate of a - * provider. It prevents any other consumer to execute, even indirectly, - * opereation which could alter the rate of the provider or cause glitches - * - * If exlusivity is claimed more than once on clock, even by the same driver, - * the rate effectively gets locked as exclusivity can't be preempted. - * - * Must not be called from within atomic context. - * - * Returns success (0) or negative errno. - */ -int clk_rate_exclusive_get(struct clk *clk); - -/** - * clk_rate_exclusive_put - release exclusivity over the rate control of a - * producer - * @clk: clock source - * - * This function allows drivers to release the exclusivity it previously got - * from clk_rate_exclusive_get() - * - * The caller must balance the number of clk_rate_exclusive_get() and - * clk_rate_exclusive_put() calls. - * - * Must not be called from within atomic context. - */ -void clk_rate_exclusive_put(struct clk *clk); - /** * clk_enable - inform the system when the clock source should be running. * @clk: clock source @@ -531,18 +260,6 @@ void clk_rate_exclusive_put(struct clk *clk); */ int clk_enable(struct clk *clk); -/** - * clk_bulk_enable - inform the system when the set of clks should be running. - * @num_clks: the number of clk_bulk_data - * @clks: the clk_bulk_data table of consumer - * - * May be called from atomic contexts. - * - * Returns success (0) or negative errno. - */ -int __must_check clk_bulk_enable(int num_clks, - const struct clk_bulk_data *clks); - /** * clk_disable - inform the system when the clock source is no longer required. * @clk: clock source @@ -559,24 +276,6 @@ int __must_check clk_bulk_enable(int num_clks, */ void clk_disable(struct clk *clk); -/** - * clk_bulk_disable - inform the system when the set of clks is no - * longer required. - * @num_clks: the number of clk_bulk_data - * @clks: the clk_bulk_data table of consumer - * - * Inform the system that a set of clks is no longer required by - * a driver and may be shut down. - * - * May be called from atomic contexts. - * - * Implementation detail: if the set of clks is shared between - * multiple drivers, clk_bulk_enable() calls must be balanced by the - * same number of clk_bulk_disable() calls for the clock source to be - * disabled. - */ -void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); - /** * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. * This is only valid once the clock source has been enabled. @@ -596,32 +295,6 @@ unsigned long clk_get_rate(struct clk *clk); */ void clk_put(struct clk *clk); -/** - * clk_bulk_put - "free" the clock source - * @num_clks: the number of clk_bulk_data - * @clks: the clk_bulk_data table of consumer - * - * Note: drivers must ensure that all clk_bulk_enable calls made on this - * clock source are balanced by clk_bulk_disable calls prior to calling - * this function. - * - * clk_bulk_put should not be called from within interrupt context. - */ -void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); - -/** - * clk_bulk_put_all - "free" all the clock source - * @num_clks: the number of clk_bulk_data - * @clks: the clk_bulk_data table of consumer - * - * Note: drivers must ensure that all clk_bulk_enable calls made on this - * clock source are balanced by clk_bulk_disable calls prior to calling - * this function. - * - * clk_bulk_put_all should not be called from within interrupt context. - */ -void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); - /** * devm_clk_put - "free" a managed clock source * @dev: device used to acquire the clock @@ -668,30 +341,10 @@ long clk_round_rate(struct clk *clk, unsigned long rate); * @clk: clock source * @rate: desired clock rate in Hz * - * Updating the rate starts at the top-most affected clock and then - * walks the tree down to the bottom-most clock that needs updating. - * * Returns success (0) or negative errno. */ int clk_set_rate(struct clk *clk, unsigned long rate); -/** - * clk_set_rate_exclusive- set the clock rate and claim exclusivity over - * clock source - * @clk: clock source - * @rate: desired clock rate in Hz - * - * This helper function allows drivers to atomically set the rate of a producer - * and claim exclusivity over the rate control of the producer. - * - * It is essentially a combination of clk_set_rate() and - * clk_rate_exclusite_get(). Caller must balance this call with a call to - * clk_rate_exclusive_put() - * - * Returns success (0) or negative errno. - */ -int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); - /** * clk_has_parent - check if a clock is a possible parent for another * @clk: clock source @@ -767,26 +420,6 @@ struct clk *clk_get_parent(struct clk *clk); */ struct clk *clk_get_sys(const char *dev_id, const char *con_id); -/** - * clk_save_context - save clock context for poweroff - * - * Saves the context of the clock register for powerstates in which the - * contents of the registers will be lost. Occurs deep within the suspend - * code so locking is not necessary. - */ -int clk_save_context(void); - -/** - * clk_restore_context - restore clock context after poweroff - * - * This occurs with all clocks enabled. Occurs deep within the resume code - * so locking is not necessary. - */ -void clk_restore_context(void); - -struct clk_request *clk_request_start(struct clk *clk, unsigned long rate); -void clk_request_done(struct clk_request *req); - #else /* !CONFIG_HAVE_CLK */ static inline struct clk *clk_get(struct device *dev, const char *id) @@ -794,93 +427,22 @@ static inline struct clk *clk_get(struct device *dev, const char *id) return NULL; } -static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, - struct clk_bulk_data *clks) -{ - return 0; -} - -static inline int __must_check clk_bulk_get_optional(struct device *dev, - int num_clks, struct clk_bulk_data *clks) -{ - return 0; -} - -static inline int __must_check clk_bulk_get_all(struct device *dev, - struct clk_bulk_data **clks) -{ - return 0; -} - static inline struct clk *devm_clk_get(struct device *dev, const char *id) { return NULL; } -static inline struct clk *devm_clk_get_optional(struct device *dev, - const char *id) -{ - return NULL; -} - -static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, - struct clk_bulk_data *clks) -{ - return 0; -} - -static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, - int num_clks, struct clk_bulk_data *clks) -{ - return 0; -} - -static inline int __must_check devm_clk_bulk_get_all(struct device *dev, - struct clk_bulk_data **clks) -{ - - return 0; -} - -static inline struct clk *devm_get_clk_from_child(struct device *dev, - struct device_node *np, const char *con_id) -{ - return NULL; -} - static inline void clk_put(struct clk *clk) {} -static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} - -static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} - static inline void devm_clk_put(struct device *dev, struct clk *clk) {} - -static inline int clk_rate_exclusive_get(struct clk *clk) -{ - return 0; -} - -static inline void clk_rate_exclusive_put(struct clk *clk) {} - static inline int clk_enable(struct clk *clk) { return 0; } -static inline int __must_check clk_bulk_enable(int num_clks, - const struct clk_bulk_data *clks) -{ - return 0; -} - static inline void clk_disable(struct clk *clk) {} - -static inline void clk_bulk_disable(int num_clks, - const struct clk_bulk_data *clks) {} - static inline unsigned long clk_get_rate(struct clk *clk) { return 0; @@ -891,11 +453,6 @@ static inline int clk_set_rate(struct clk *clk, unsigned long rate) return 0; } -static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) -{ - return 0; -} - static inline long clk_round_rate(struct clk *clk, unsigned long rate) { return 0; @@ -906,22 +463,6 @@ static inline bool clk_has_parent(struct clk *clk, struct clk *parent) return true; } -static inline int clk_set_rate_range(struct clk *clk, unsigned long min, - unsigned long max) -{ - return 0; -} - -static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) -{ - return 0; -} - -static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) -{ - return 0; -} - static inline int clk_set_parent(struct clk *clk, struct clk *parent) { return 0; @@ -936,14 +477,6 @@ static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) { return NULL; } - -static inline int clk_save_context(void) -{ - return 0; -} - -static inline void clk_restore_context(void) {} - #endif /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ @@ -968,46 +501,8 @@ static inline void clk_disable_unprepare(struct clk *clk) clk_unprepare(clk); } -static inline int __must_check -clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) -{ - int ret; - - ret = clk_bulk_prepare(num_clks, clks); - if (ret) - return ret; - ret = clk_bulk_enable(num_clks, clks); - if (ret) - clk_bulk_unprepare(num_clks, clks); - - return ret; -} - -static inline void clk_bulk_disable_unprepare(int num_clks, - const struct clk_bulk_data *clks) -{ - clk_bulk_disable(num_clks, clks); - clk_bulk_unprepare(num_clks, clks); -} - -/** - * clk_get_optional - lookup and obtain a reference to an optional clock - * producer. - * @dev: device for clock "consumer" - * @id: clock consumer ID - * - * Behaves the same as clk_get() except where there is no clock producer. In - * this case, instead of returning -ENOENT, the function returns NULL. - */ -static inline struct clk *clk_get_optional(struct device *dev, const char *id) -{ - struct clk *clk = clk_get(dev, id); - - if (clk == ERR_PTR(-ENOENT)) - return NULL; - - return clk; -} +struct device_node; +struct of_phandle_args; #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) struct clk *of_clk_get(struct device_node *np, int index); @@ -1023,10 +518,6 @@ static inline struct clk *of_clk_get_by_name(struct device_node *np, { return ERR_PTR(-ENOENT); } -static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) -{ - return ERR_PTR(-ENOENT); -} #endif #endif diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index ccb3f034bf..17f413bbbe 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/clk/at91_pmc.h * @@ -7,14 +6,16 @@ * * Power Management Controller (PMC) - System peripherals registers. * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef AT91_PMC_H #define AT91_PMC_H -#define AT91_PMC_V1 (1) /* PMC version 1 */ -#define AT91_PMC_V2 (2) /* PMC version 2 [SAM9X60] */ - #define AT91_PMC_SCER 0x00 /* System Clock Enable Register */ #define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */ @@ -33,42 +34,21 @@ #define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */ #define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */ -#define AT91_PMC_PLL_CTRL0 0x0C /* PLL Control Register 0 [for SAM9X60] */ -#define AT91_PMC_PLL_CTRL0_ENPLL (1 << 28) /* Enable PLL */ -#define AT91_PMC_PLL_CTRL0_ENPLLCK (1 << 29) /* Enable PLL clock for PMC */ -#define AT91_PMC_PLL_CTRL0_ENLOCK (1 << 31) /* Enable PLL lock */ - -#define AT91_PMC_PLL_CTRL1 0x10 /* PLL Control Register 1 [for SAM9X60] */ - #define AT91_PMC_PCER 0x10 /* Peripheral Clock Enable Register */ #define AT91_PMC_PCDR 0x14 /* Peripheral Clock Disable Register */ #define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */ -#define AT91_PMC_PLL_ACR 0x18 /* PLL Analog Control Register [for SAM9X60] */ -#define AT91_PMC_PLL_ACR_DEFAULT_UPLL 0x12020010UL /* Default PLL ACR value for UPLL */ -#define AT91_PMC_PLL_ACR_DEFAULT_PLLA 0x00020010UL /* Default PLL ACR value for PLLA */ -#define AT91_PMC_PLL_ACR_UTMIVR (1 << 12) /* UPLL Voltage regulator Control */ -#define AT91_PMC_PLL_ACR_UTMIBG (1 << 13) /* UPLL Bandgap Control */ - #define AT91_CKGR_UCKR 0x1C /* UTMI Clock Register [some SAM9] */ #define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */ #define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */ #define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */ #define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI BIAS Start-up Time */ -#define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */ -#define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */ -#define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */ -#define AT91_PMC_PLL_UPDT_ID_MSK (0xf) /* PLL ID mask */ -#define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */ - #define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */ #define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */ #define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */ -#define AT91_PMC_WAITMODE (1 << 2) /* Wait Mode Command */ #define AT91_PMC_MOSCRCEN (1 << 3) /* Main On-Chip RC Oscillator Enable [some SAM9] */ #define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */ -#define AT91_PMC_KEY_MASK (0xff << 16) #define AT91_PMC_KEY (0x37 << 16) /* MOR Writing Key */ #define AT91_PMC_MOSCSEL (1 << 24) /* Main Oscillator Selection [some SAM9] */ #define AT91_PMC_CFDEN (1 << 25) /* Clock Failure Detector Enable [some SAM9] */ @@ -92,8 +72,6 @@ #define AT91_PMC_USBDIV_4 (2 << 28) #define AT91_PMC_USB96M (1 << 28) /* Divider by 2 Enable (PLLB only) */ -#define AT91_PMC_CPU_CKR 0x28 /* CPU Clock Register */ - #define AT91_PMC_MCKR 0x30 /* Master Clock Register */ #define AT91_PMC_CSS (3 << 0) /* Master Clock Selection */ #define AT91_PMC_CSS_SLOW (0 << 0) @@ -137,34 +115,6 @@ #define AT91_PMC_PLLADIV2_ON (1 << 12) #define AT91_PMC_H32MXDIV BIT(24) -#define AT91_PMC_MCR_V2 0x30 /* Master Clock Register [SAMA7G5 only] */ -#define AT91_PMC_MCR_V2_ID_MSK (0xF) -#define AT91_PMC_MCR_V2_ID(_id) ((_id) & AT91_PMC_MCR_V2_ID_MSK) -#define AT91_PMC_MCR_V2_CMD (1 << 7) -#define AT91_PMC_MCR_V2_DIV (7 << 8) -#define AT91_PMC_MCR_V2_DIV1 (0 << 8) -#define AT91_PMC_MCR_V2_DIV2 (1 << 8) -#define AT91_PMC_MCR_V2_DIV4 (2 << 8) -#define AT91_PMC_MCR_V2_DIV8 (3 << 8) -#define AT91_PMC_MCR_V2_DIV16 (4 << 8) -#define AT91_PMC_MCR_V2_DIV32 (5 << 8) -#define AT91_PMC_MCR_V2_DIV64 (6 << 8) -#define AT91_PMC_MCR_V2_DIV3 (7 << 8) -#define AT91_PMC_MCR_V2_CSS (0x1F << 16) -#define AT91_PMC_MCR_V2_CSS_MD_SLCK (0 << 16) -#define AT91_PMC_MCR_V2_CSS_TD_SLCK (1 << 16) -#define AT91_PMC_MCR_V2_CSS_MAINCK (2 << 16) -#define AT91_PMC_MCR_V2_CSS_MCK0 (3 << 16) -#define AT91_PMC_MCR_V2_CSS_SYSPLL (5 << 16) -#define AT91_PMC_MCR_V2_CSS_DDRPLL (6 << 16) -#define AT91_PMC_MCR_V2_CSS_IMGPLL (7 << 16) -#define AT91_PMC_MCR_V2_CSS_BAUDPLL (8 << 16) -#define AT91_PMC_MCR_V2_CSS_AUDIOPLL (9 << 16) -#define AT91_PMC_MCR_V2_CSS_ETHPLL (10 << 16) -#define AT91_PMC_MCR_V2_EN (1 << 28) - -#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */ - #define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ #define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */ #define AT91_PMC_USBS_PLLA (0 << 0) @@ -203,23 +153,8 @@ #define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ #define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ #define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ -#define AT91_PMC_MCKXRDY (1 << 26) /* Master Clock x [x=1..4] Ready Status */ #define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ -#define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */ -#define AT91_PMC_FSTT(n) BIT(n) -#define AT91_PMC_RTTAL BIT(16) -#define AT91_PMC_RTCAL BIT(17) /* RTC Alarm Enable */ -#define AT91_PMC_USBAL BIT(18) /* USB Resume Enable */ -#define AT91_PMC_SDMMC_CD BIT(19) /* SDMMC Card Detect Enable */ -#define AT91_PMC_LPM BIT(20) /* Low-power Mode */ -#define AT91_PMC_RXLP_MCE BIT(24) /* Backup UART Receive Enable */ -#define AT91_PMC_ACC_CE BIT(25) /* ACC Enable */ - -#define AT91_PMC_FSPR 0x74 /* Fast Startup Polarity Reg */ - -#define AT91_PMC_FS_INPUT_MASK 0x7ff - #define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */ #define AT91_PMC_PROT 0xe4 /* Write Protect Mode Register [some SAM9] */ @@ -231,42 +166,23 @@ #define AT91_PMC_WPVS (0x1 << 0) /* Write Protect Violation Status */ #define AT91_PMC_WPVSRC (0xffff << 8) /* Write Protect Violation Source */ -#define AT91_PMC_PLL_ISR0 0xEC /* PLL Interrupt Status Register 0 [SAM9X60 only] */ - #define AT91_PMC_PCER1 0x100 /* Peripheral Clock Enable Register 1 [SAMA5 only]*/ #define AT91_PMC_PCDR1 0x104 /* Peripheral Clock Enable Register 1 */ #define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */ #define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */ #define AT91_PMC_PCR_PID_MASK 0x3f +#define AT91_PMC_PCR_GCKCSS_OFFSET 8 +#define AT91_PMC_PCR_GCKCSS_MASK (0x7 << AT91_PMC_PCR_GCKCSS_OFFSET) +#define AT91_PMC_PCR_GCKCSS(n) ((n) << AT91_PMC_PCR_GCKCSS_OFFSET) /* GCK Clock Source Selection */ #define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ -#define AT91_PMC_PCR_GCKDIV_MASK GENMASK(27, 20) +#define AT91_PMC_PCR_DIV_OFFSET 16 +#define AT91_PMC_PCR_DIV_MASK (0x3 << AT91_PMC_PCR_DIV_OFFSET) +#define AT91_PMC_PCR_DIV(n) ((n) << AT91_PMC_PCR_DIV_OFFSET) /* Divisor Value */ +#define AT91_PMC_PCR_GCKDIV_OFFSET 20 +#define AT91_PMC_PCR_GCKDIV_MASK (0xff << AT91_PMC_PCR_GCKDIV_OFFSET) +#define AT91_PMC_PCR_GCKDIV(n) ((n) << AT91_PMC_PCR_GCKDIV_OFFSET) /* Generated Clock Divisor Value */ #define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ #define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */ -#define AT91_PMC_AUDIO_PLL0 0x14c -#define AT91_PMC_AUDIO_PLL_PLLEN (1 << 0) -#define AT91_PMC_AUDIO_PLL_PADEN (1 << 1) -#define AT91_PMC_AUDIO_PLL_PMCEN (1 << 2) -#define AT91_PMC_AUDIO_PLL_RESETN (1 << 3) -#define AT91_PMC_AUDIO_PLL_ND_OFFSET 8 -#define AT91_PMC_AUDIO_PLL_ND_MASK (0x7f << AT91_PMC_AUDIO_PLL_ND_OFFSET) -#define AT91_PMC_AUDIO_PLL_ND(n) ((n) << AT91_PMC_AUDIO_PLL_ND_OFFSET) -#define AT91_PMC_AUDIO_PLL_QDPMC_OFFSET 16 -#define AT91_PMC_AUDIO_PLL_QDPMC_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET) -#define AT91_PMC_AUDIO_PLL_QDPMC(n) ((n) << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET) - -#define AT91_PMC_AUDIO_PLL1 0x150 -#define AT91_PMC_AUDIO_PLL_FRACR_MASK 0x3fffff -#define AT91_PMC_AUDIO_PLL_QDPAD_OFFSET 24 -#define AT91_PMC_AUDIO_PLL_QDPAD_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET) -#define AT91_PMC_AUDIO_PLL_QDPAD(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET) -#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET AT91_PMC_AUDIO_PLL_QDPAD_OFFSET -#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_MASK (0x3 << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET) -#define AT91_PMC_AUDIO_PLL_QDPAD_DIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET) -#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET 26 -#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX 0x1f -#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MASK (AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET) -#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET) - #endif diff --git a/include/linux/clk/bcm2835.h b/include/linux/clk/bcm2835.h new file mode 100644 index 0000000000..aa937f6c17 --- /dev/null +++ b/include/linux/clk/bcm2835.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2010 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_CLK_BCM2835_H_ +#define __LINUX_CLK_BCM2835_H_ + +void __init bcm2835_init_clocks(void); + +#endif diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h index eae9652c70..e0c362363c 100644 --- a/include/linux/clk/clk-conf.h +++ b/include/linux/clk/clk-conf.h @@ -1,12 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Samsung Electronics Co., Ltd. * Sylwester Nawrocki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ -#ifndef __CLK_CONF_H -#define __CLK_CONF_H - #include struct device_node; @@ -20,5 +20,3 @@ static inline int of_clk_set_defaults(struct device_node *node, return 0; } #endif - -#endif /* __CLK_CONF_H */ diff --git a/include/linux/clk/mmp.h b/include/linux/clk/mmp.h index 4451304603..607321fa2c 100644 --- a/include/linux/clk/mmp.h +++ b/include/linux/clk/mmp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CLK_MMP_H #define __CLK_MMP_H diff --git a/include/linux/clk/mxs.h b/include/linux/clk/mxs.h index 2674e607ff..5138a90e01 100644 --- a/include/linux/clk/mxs.h +++ b/include/linux/clk/mxs.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_CLK_MXS_H diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h index 0ebbe2f0b4..ba6fa41485 100644 --- a/include/linux/clk/renesas.h +++ b/include/linux/clk/renesas.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0+ - * +/* * Copyright 2013 Ideas On Board SPRL * Copyright 2013, 2014 Horms Solutions Ltd. * * Contact: Laurent Pinchart * Contact: Simon Horman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __LINUX_CLK_RENESAS_H_ @@ -16,6 +20,10 @@ struct device; struct device_node; struct generic_pm_domain; +void r8a7778_clocks_init(u32 mode); +void r8a7779_clocks_init(u32 mode); +void rcar_gen2_clocks_init(u32 mode); + void cpg_mstp_add_clk_domain(struct device_node *np); #ifdef CONFIG_CLK_RENESAS_CPG_MSTP int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev); diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index d128ad1570..7007a5f480 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -1,6 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2012-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef __LINUX_CLK_TEGRA_H_ @@ -108,134 +119,11 @@ static inline void tegra_cpu_clock_resume(void) tegra_cpu_car_ops->resume(); } -#else -static inline bool tegra_cpu_rail_off_ready(void) -{ - return false; -} - -static inline void tegra_cpu_clock_suspend(void) -{ -} - -static inline void tegra_cpu_clock_resume(void) -{ -} #endif -struct clk; -struct tegra_emc; - -typedef long (tegra20_clk_emc_round_cb)(unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - void *arg); -typedef int (tegra124_emc_prepare_timing_change_cb)(struct tegra_emc *emc, - unsigned long rate); -typedef void (tegra124_emc_complete_timing_change_cb)(struct tegra_emc *emc, - unsigned long rate); - -struct tegra210_clk_emc_config { - unsigned long rate; - bool same_freq; - u32 value; - - unsigned long parent_rate; - u8 parent; -}; - -struct tegra210_clk_emc_provider { - struct module *owner; - struct device *dev; - - struct tegra210_clk_emc_config *configs; - unsigned int num_configs; - - int (*set_rate)(struct device *dev, - const struct tegra210_clk_emc_config *config); -}; - -#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC) -void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, - void *cb_arg); -int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same); -#else -static inline void -tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, - void *cb_arg) -{ -} - -static inline int -tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same) -{ - return 0; -} -#endif - -#ifdef CONFIG_TEGRA124_CLK_EMC -void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb, - tegra124_emc_complete_timing_change_cb *complete_cb); -#else -static inline void -tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb, - tegra124_emc_complete_timing_change_cb *complete_cb) -{ -} -#endif - -#ifdef CONFIG_ARCH_TEGRA_210_SOC -int tegra210_plle_hw_sequence_start(void); -bool tegra210_plle_hw_sequence_is_enabled(void); -void tegra210_xusb_pll_hw_control_enable(void); -void tegra210_xusb_pll_hw_sequence_start(void); -void tegra210_sata_pll_hw_control_enable(void); -void tegra210_sata_pll_hw_sequence_start(void); -void tegra210_set_sata_pll_seq_sw(bool state); -void tegra210_put_utmipll_in_iddq(void); -void tegra210_put_utmipll_out_iddq(void); -int tegra210_clk_handle_mbist_war(unsigned int id); -void tegra210_clk_emc_dll_enable(bool flag); -void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value); -void tegra210_clk_emc_update_setting(u32 emc_src_value); - -int tegra210_clk_emc_attach(struct clk *clk, - struct tegra210_clk_emc_provider *provider); -void tegra210_clk_emc_detach(struct clk *clk); -#else -static inline int tegra210_plle_hw_sequence_start(void) -{ - return 0; -} - -static inline bool tegra210_plle_hw_sequence_is_enabled(void) -{ - return false; -} - -static inline int tegra210_clk_handle_mbist_war(unsigned int id) -{ - return 0; -} - -static inline int -tegra210_clk_emc_attach(struct clk *clk, - struct tegra210_clk_emc_provider *provider) -{ - return 0; -} - -static inline void tegra210_xusb_pll_hw_control_enable(void) {} -static inline void tegra210_xusb_pll_hw_sequence_start(void) {} -static inline void tegra210_sata_pll_hw_control_enable(void) {} -static inline void tegra210_sata_pll_hw_sequence_start(void) {} -static inline void tegra210_set_sata_pll_seq_sw(bool state) {} -static inline void tegra210_put_utmipll_in_iddq(void) {} -static inline void tegra210_put_utmipll_out_iddq(void) {} -static inline void tegra210_clk_emc_dll_enable(bool flag) {} -static inline void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value) {} -static inline void tegra210_clk_emc_update_setting(u32 emc_src_value) {} -static inline void tegra210_clk_emc_detach(struct clk *clk) {} -#endif +extern void tegra210_xusb_pll_hw_control_enable(void); +extern void tegra210_xusb_pll_hw_sequence_start(void); +extern void tegra210_sata_pll_hw_control_enable(void); +extern void tegra210_sata_pll_hw_sequence_start(void); #endif /* __LINUX_CLK_TEGRA_H_ */ diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 3486f20a37..6110fe09ed 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -18,18 +18,6 @@ #include #include -/** - * struct clk_omap_reg - OMAP register declaration - * @offset: offset from the master IP module base address - * @index: index of the master IP module - */ -struct clk_omap_reg { - void __iomem *ptr; - u16 offset; - u8 index; - u8 flags; -}; - /** * struct dpll_data - DPLL registers and integration data * @mult_div1_reg: register containing the DPLL M and N bitfields @@ -63,17 +51,6 @@ struct clk_omap_reg { * @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg * @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs * @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs - * @ssc_deltam_reg: register containing the DPLL SSC frequency spreading - * @ssc_modfreq_reg: register containing the DPLL SSC modulation frequency - * @ssc_modfreq_mant_mask: mask of the mantissa component in @ssc_modfreq_reg - * @ssc_modfreq_exp_mask: mask of the exponent component in @ssc_modfreq_reg - * @ssc_enable_mask: mask of the DPLL SSC enable bit in @control_reg - * @ssc_downspread_mask: mask of the DPLL SSC low frequency only bit in - * @control_reg - * @ssc_modfreq: the DPLL SSC frequency modulation in kHz - * @ssc_deltam: the DPLL SSC frequency spreading in permille (10th of percent) - * @ssc_downspread: require the only low frequency spread of the DPLL in SSC - * mode * @flags: DPLL type/features (see below) * * Possible values for @flags: @@ -90,12 +67,12 @@ struct clk_omap_reg { * can be placed into read-only space. */ struct dpll_data { - struct clk_omap_reg mult_div1_reg; + void __iomem *mult_div1_reg; u32 mult_mask; u32 div1_mask; struct clk_hw *clk_bypass; struct clk_hw *clk_ref; - struct clk_omap_reg control_reg; + void __iomem *control_reg; u32 enable_mask; unsigned long last_rounded_rate; u16 last_rounded_m; @@ -107,8 +84,8 @@ struct dpll_data { u16 max_divider; unsigned long max_rate; u8 modes; - struct clk_omap_reg autoidle_reg; - struct clk_omap_reg idlest_reg; + void __iomem *autoidle_reg; + void __iomem *idlest_reg; u32 autoidle_mask; u32 freqsel_mask; u32 idlest_mask; @@ -121,17 +98,6 @@ struct dpll_data { u8 auto_recal_bit; u8 recal_en_bit; u8 recal_st_bit; - struct clk_omap_reg ssc_deltam_reg; - struct clk_omap_reg ssc_modfreq_reg; - u32 ssc_deltam_int_mask; - u32 ssc_deltam_frac_mask; - u32 ssc_modfreq_mant_mask; - u32 ssc_modfreq_exp_mask; - u32 ssc_enable_mask; - u32 ssc_downspread_mask; - u32 ssc_modfreq; - u32 ssc_deltam; - bool ssc_downspread; u8 flags; }; @@ -147,10 +113,10 @@ struct clk_hw_omap; */ struct clk_hw_omap_ops { void (*find_idlest)(struct clk_hw_omap *oclk, - struct clk_omap_reg *idlest_reg, + void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val); void (*find_companion)(struct clk_hw_omap *oclk, - struct clk_omap_reg *other_reg, + void __iomem **other_reg, u8 *other_bit); void (*allow_idle)(struct clk_hw_omap *oclk); void (*deny_idle)(struct clk_hw_omap *oclk); @@ -163,6 +129,8 @@ struct clk_hw_omap_ops { * @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg) * @flags: see "struct clk.flags possibilities" above * @clksel_reg: for clksel clks, register va containing src/divisor select + * @clksel_mask: bitmask in @clksel_reg for the src/divisor selector + * @clksel: for clksel clks, pointer to struct clksel for this clock * @dpll_data: for DPLLs, pointer to struct dpll_data for this clock * @clkdm_name: clockdomain name that this clock is contained in * @clkdm: pointer to struct clockdomain, resolved from @clkdm_name at runtime @@ -173,16 +141,16 @@ struct clk_hw_omap { struct list_head node; unsigned long fixed_rate; u8 fixed_div; - struct clk_omap_reg enable_reg; + void __iomem *enable_reg; u8 enable_bit; - unsigned long flags; - struct clk_omap_reg clksel_reg; + u8 flags; + void __iomem *clksel_reg; + u32 clksel_mask; + const struct clksel *clksel; struct dpll_data *dpll_data; const char *clkdm_name; struct clockdomain *clkdm; const struct clk_hw_omap_ops *ops; - u32 context; - int autoidle_count; }; /* @@ -204,6 +172,7 @@ struct clk_hw_omap { * should be used. This is a temporary solution - a better approach * would be to associate clock type-specific data with the clock, * similar to the struct dpll_data approach. + * MEMMAP_ADDRESSING: Use memmap addressing to access clock registers. */ #define ENABLE_REG_32BIT (1 << 0) /* Use 32-bit access */ #define CLOCK_IDLE_CONTROL (1 << 1) @@ -211,6 +180,7 @@ struct clk_hw_omap { #define ENABLE_ON_INIT (1 << 3) /* Enable upon framework init */ #define INVERT_ENABLE (1 << 4) /* 0 enables, 1 disables */ #define CLOCK_CLKOUTX2 (1 << 5) +#define MEMMAP_ADDRESSING (1 << 6) /* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */ #define DPLL_LOW_POWER_STOP 0x1 @@ -227,19 +197,26 @@ enum { TI_CLKM_PRM, TI_CLKM_SCRM, TI_CLKM_CTRL, - TI_CLKM_CTRL_AUX, TI_CLKM_PLLSS, CLK_MAX_MEMMAPS }; +/** + * struct clk_omap_reg - OMAP register declaration + * @offset: offset from the master IP module base address + * @index: index of the master IP module + */ +struct clk_omap_reg { + u16 offset; + u16 index; +}; + /** * struct ti_clk_ll_ops - low-level ops for clocks * @clk_readl: pointer to register read function * @clk_writel: pointer to register write function - * @clk_rmw: pointer to register read-modify-write function * @clkdm_clk_enable: pointer to clockdomain enable function * @clkdm_clk_disable: pointer to clockdomain disable function - * @clkdm_lookup: pointer to clockdomain lookup function * @cm_wait_module_ready: pointer to CM module wait ready function * @cm_split_idlest_reg: pointer to CM module function to split idlest reg * @@ -250,22 +227,20 @@ enum { * operations not provided directly by clock drivers. */ struct ti_clk_ll_ops { - u32 (*clk_readl)(const struct clk_omap_reg *reg); - void (*clk_writel)(u32 val, const struct clk_omap_reg *reg); - void (*clk_rmw)(u32 val, u32 mask, const struct clk_omap_reg *reg); + u32 (*clk_readl)(void __iomem *reg); + void (*clk_writel)(u32 val, void __iomem *reg); int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk); int (*clkdm_clk_disable)(struct clockdomain *clkdm, struct clk *clk); - struct clockdomain * (*clkdm_lookup)(const char *name); int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg, u8 idlest_shift); - int (*cm_split_idlest_reg)(struct clk_omap_reg *idlest_reg, - s16 *prcm_inst, u8 *idlest_reg_id); + int (*cm_split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst, + u8 *idlest_reg_id); }; #define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw) -bool omap2_clk_is_hw_omap(struct clk_hw *hw); +void omap2_init_clk_clkdm(struct clk_hw *clk); int omap2_clk_disable_autoidle_all(void); int omap2_clk_enable_autoidle_all(void); int omap2_clk_allow_idle(struct clk *clk); @@ -315,17 +290,9 @@ struct ti_clk_features { #define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1) #define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2) #define TI_CLK_ERRATA_I810 BIT(3) -#define TI_CLK_CLKCTRL_COMPAT BIT(4) -#define TI_CLK_DEVICE_TYPE_GP BIT(5) void ti_clk_setup_features(struct ti_clk_features *features); const struct ti_clk_features *ti_clk_get_features(void); -bool ti_clk_is_in_standby(struct clk *clk); -int omap3_noncore_dpll_save_context(struct clk_hw *hw); -void omap3_noncore_dpll_restore_context(struct clk_hw *hw); - -int omap3_core_dpll_save_context(struct clk_hw *hw); -void omap3_core_dpll_restore_context(struct clk_hw *hw); extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; diff --git a/include/linux/clk/zynq.h b/include/linux/clk/zynq.h index a198dd9255..7a5633b715 100644 --- a/include/linux/clk/zynq.h +++ b/include/linux/clk/zynq.h @@ -1,7 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2013 Xilinx Inc. * Copyright (C) 2012 National Instruments + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_CLK_ZYNQ_H_ diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index 8a8423eb8e..2eabc862ab 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h @@ -1,15 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/clkdev.h * * Copyright (C) 2008 Russell King. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Helper for the clk API to assist looking up a struct clk. */ #ifndef __CLKDEV_H #define __CLKDEV_H -#include +#include struct clk; struct clk_hw; @@ -30,6 +33,11 @@ struct clk_lookup { .clk = c, \ } +struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); +struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); + void clkdev_add(struct clk_lookup *cl); void clkdev_drop(struct clk_lookup *cl); @@ -44,8 +52,9 @@ int clk_add_alias(const char *, const char *, const char *, struct device *); int clk_register_clkdev(struct clk *, const char *, const char *); int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); -int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw, - const char *con_id, const char *dev_id); -void devm_clk_release_clkdev(struct device *dev, const char *con_id, - const char *dev_id); +#ifdef CONFIG_COMMON_CLK +int __clk_get(struct clk *clk); +void __clk_put(struct clk *clk); +#endif + #endif diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h new file mode 100644 index 0000000000..4d1019d56f --- /dev/null +++ b/include/linux/clock_cooling.h @@ -0,0 +1,65 @@ +/* + * linux/include/linux/clock_cooling.h + * + * Copyright (C) 2014 Eduardo Valentin + * + * Copyright (C) 2013 Texas Instruments Inc. + * Contact: Eduardo Valentin + * + * Highly based on cpu_cooling.c. + * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) + * Copyright (C) 2012 Amit Daniel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __CPU_COOLING_H__ +#define __CPU_COOLING_H__ + +#include +#include +#include + +#ifdef CONFIG_CLOCK_THERMAL +/** + * clock_cooling_register - function to create clock cooling device. + * @dev: struct device pointer to the device used as clock cooling device. + * @clock_name: string containing the clock used as cooling mechanism. + */ +struct thermal_cooling_device * +clock_cooling_register(struct device *dev, const char *clock_name); + +/** + * clock_cooling_unregister - function to remove clock cooling device. + * @cdev: thermal cooling device pointer. + */ +void clock_cooling_unregister(struct thermal_cooling_device *cdev); + +unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, + unsigned long freq); +#else /* !CONFIG_CLOCK_THERMAL */ +static inline struct thermal_cooling_device * +clock_cooling_register(struct device *dev, const char *clock_name) +{ + return NULL; +} +static inline +void clock_cooling_unregister(struct thermal_cooling_device *cdev) +{ +} +static inline +unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, + unsigned long freq) +{ + return THERMAL_CSTATE_INVALID; +} +#endif /* CONFIG_CLOCK_THERMAL */ + +#endif /* __CPU_COOLING_H__ */ diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 8ae9a95ebf..0d442e34c3 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* linux/include/linux/clockchips.h * * This file contains the structure definitions for clockchips. @@ -183,6 +182,7 @@ extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *e extern void clockevents_register_device(struct clock_event_device *dev); extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu); +extern void clockevents_config(struct clock_event_device *dev, u32 freq); extern void clockevents_config_and_register(struct clock_event_device *dev, u32 freq, unsigned long min_delta, unsigned long max_delta); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 1d42d4b173..08398182f5 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* linux/include/linux/clocksource.h * * This file contains the structure definitions for clocksources. @@ -17,40 +16,24 @@ #include #include #include -#include #include #include struct clocksource; struct module; -#if defined(CONFIG_ARCH_CLOCKSOURCE_DATA) || \ - defined(CONFIG_GENERIC_GETTIMEOFDAY) +#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA #include #endif -#include - /** * struct clocksource - hardware abstraction for a free running counter * Provides mostly state-free accessors to the underlying hardware. * This is the structure used for system time. * - * @read: Returns a cycle value, passes clocksource as argument - * @mask: Bitmask for two's complement - * subtraction of non 64 bit counters - * @mult: Cycle to nanosecond multiplier - * @shift: Cycle to nanosecond divisor (power of two) - * @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs) - * @maxadj: Maximum adjustment value to mult (~11%) - * @uncertainty_margin: Maximum uncertainty in nanoseconds per half second. - * Zero says to use default WATCHDOG_THRESHOLD. - * @archdata: Optional arch-specific data - * @max_cycles: Maximum safe cycle value which won't overflow on - * multiplication - * @name: Pointer to clocksource name - * @list: List head for registration (internal) - * @rating: Rating value for selection (higher is better) + * @name: ptr to clocksource name + * @list: list head for registration + * @rating: rating value for selection (higher is better) * To avoid rating inflation the following * list should give you a guide as to how * to assign your clocksource a rating @@ -65,27 +48,25 @@ struct module; * 400-499: Perfect * The ideal clocksource. A must-use where * available. - * @id: Defaults to CSID_GENERIC. The id value is captured - * in certain snapshot functions to allow callers to - * validate the clocksource from which the snapshot was - * taken. - * @flags: Flags describing special properties - * @enable: Optional function to enable the clocksource - * @disable: Optional function to disable the clocksource - * @suspend: Optional suspend function for the clocksource - * @resume: Optional resume function for the clocksource - * @mark_unstable: Optional function to inform the clocksource driver that - * the watchdog marked the clocksource unstable - * @tick_stable: Optional function called periodically from the watchdog - * code to provide stable synchronization points - * @wd_list: List head to enqueue into the watchdog list (internal) - * @cs_last: Last clocksource value for clocksource watchdog - * @wd_last: Last watchdog value corresponding to @cs_last - * @owner: Module reference, must be set by clocksource in modules + * @read: returns a cycle value, passes clocksource as argument + * @enable: optional function to enable the clocksource + * @disable: optional function to disable the clocksource + * @mask: bitmask for two's complement + * subtraction of non 64 bit counters + * @mult: cycle to nanosecond multiplier + * @shift: cycle to nanosecond divisor (power of two) + * @max_idle_ns: max idle time permitted by the clocksource (nsecs) + * @maxadj: maximum adjustment value to mult (~11%) + * @max_cycles: maximum safe cycle value which won't overflow on multiplication + * @flags: flags describing special properties + * @archdata: arch-specific data + * @suspend: suspend function for the clocksource, if necessary + * @resume: resume function for the clocksource, if necessary + * @owner: module reference, must be set by clocksource in modules * * Note: This struct is not used in hotpathes of the timekeeping code * because the timekeeper caches the hot path fields in its own data - * structure, so no cache line alignment is required, + * structure, so no line cache alignment is required, * * The pointer to the clocksource itself is handed to the read * callback. If you need extra information there you can wrap struct @@ -94,39 +75,33 @@ struct module; * structure. */ struct clocksource { - u64 (*read)(struct clocksource *cs); - u64 mask; - u32 mult; - u32 shift; - u64 max_idle_ns; - u32 maxadj; - u32 uncertainty_margin; + cycle_t (*read)(struct clocksource *cs); + cycle_t mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA struct arch_clocksource_data archdata; #endif - u64 max_cycles; - const char *name; - struct list_head list; - int rating; - enum clocksource_ids id; - enum vdso_clock_mode vdso_clock_mode; - unsigned long flags; - - int (*enable)(struct clocksource *cs); - void (*disable)(struct clocksource *cs); - void (*suspend)(struct clocksource *cs); - void (*resume)(struct clocksource *cs); - void (*mark_unstable)(struct clocksource *cs); - void (*tick_stable)(struct clocksource *cs); + u64 max_cycles; + const char *name; + struct list_head list; + int rating; + int (*enable)(struct clocksource *cs); + void (*disable)(struct clocksource *cs); + unsigned long flags; + void (*suspend)(struct clocksource *cs); + void (*resume)(struct clocksource *cs); /* private: */ #ifdef CONFIG_CLOCKSOURCE_WATCHDOG /* Watchdog related data, used by the framework */ - struct list_head wd_list; - u64 cs_last; - u64 wd_last; + struct list_head wd_list; + cycle_t cs_last; + cycle_t wd_last; #endif - struct module *owner; + struct module *owner; }; /* @@ -140,9 +115,9 @@ struct clocksource { #define CLOCK_SOURCE_UNSTABLE 0x40 #define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80 #define CLOCK_SOURCE_RESELECT 0x100 -#define CLOCK_SOURCE_VERIFY_PERCPU 0x200 + /* simplify initialization of mask field */ -#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0) +#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) { @@ -194,14 +169,11 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) * @mult: cycle to nanosecond multiplier * @shift: cycle to nanosecond divisor (power of two) * - * Converts clocksource cycles to nanoseconds, using the given @mult and @shift. - * The code is optimized for performance and is not intended to work - * with absolute clocksource cycles (as those will easily overflow), - * but is only intended to be used with relative (delta) clocksource cycles. + * Converts cycles to nanoseconds, using the given mult and shift. * * XXX - This could use some mult_lxl_ll() asm optimization */ -static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift) +static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) { return ((u64) cycles * mult) >> shift; } @@ -214,9 +186,6 @@ extern void clocksource_suspend(void); extern void clocksource_resume(void); extern struct clocksource * __init clocksource_default_clock(void); extern void clocksource_mark_unstable(struct clocksource *cs); -extern void -clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles); -extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now); extern u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles); @@ -261,37 +230,29 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz __clocksource_update_freq_scale(cs, 1000, khz); } -#ifdef CONFIG_ARCH_CLOCKSOURCE_INIT -extern void clocksource_arch_init(struct clocksource *cs); -#else -static inline void clocksource_arch_init(struct clocksource *cs) { } -#endif extern int timekeeping_notify(struct clocksource *clock); -extern u64 clocksource_mmio_readl_up(struct clocksource *); -extern u64 clocksource_mmio_readl_down(struct clocksource *); -extern u64 clocksource_mmio_readw_up(struct clocksource *); -extern u64 clocksource_mmio_readw_down(struct clocksource *); +extern cycle_t clocksource_mmio_readl_up(struct clocksource *); +extern cycle_t clocksource_mmio_readl_down(struct clocksource *); +extern cycle_t clocksource_mmio_readw_up(struct clocksource *); +extern cycle_t clocksource_mmio_readw_down(struct clocksource *); extern int clocksource_mmio_init(void __iomem *, const char *, - unsigned long, int, unsigned, u64 (*)(struct clocksource *)); + unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); extern int clocksource_i8253_init(void); -#define TIMER_OF_DECLARE(name, compat, fn) \ - OF_DECLARE_1_RET(timer, name, compat, fn) +#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ + OF_DECLARE_1_RET(clksrc, name, compat, fn) -#ifdef CONFIG_TIMER_PROBE -extern void timer_probe(void); +#ifdef CONFIG_CLKSRC_PROBE +extern void clocksource_probe(void); #else -static inline void timer_probe(void) {} +static inline void clocksource_probe(void) {} #endif -#define TIMER_ACPI_DECLARE(name, table_id, fn) \ - ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn) - -extern ulong max_cswd_read_retries; -void clocksource_verify_percpu(struct clocksource *cs); +#define CLOCKSOURCE_ACPI_DECLARE(name, table_id, fn) \ + ACPI_DECLARE_PROBE_ENTRY(clksrc, name, table_id, 0, NULL, 0, fn) #endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h index ea4958e07a..88bee3a330 100644 --- a/include/linux/cm4000_cs.h +++ b/include/linux/cm4000_cs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _CM4000_H_ #define _CM4000_H_ diff --git a/include/linux/cma.h b/include/linux/cma.h index 53fd8c3cdb..29f9e774ab 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -1,11 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CMA_H__ #define __CMA_H__ -#include -#include -#include - /* * There is always at least global CMA area and a few optional * areas configured in kernel .config. @@ -18,35 +13,19 @@ #endif -#define CMA_MAX_NAME 64 - struct cma; extern unsigned long totalcma_pages; extern phys_addr_t cma_get_base(const struct cma *cma); extern unsigned long cma_get_size(const struct cma *cma); -extern const char *cma_get_name(const struct cma *cma); -extern int __init cma_declare_contiguous_nid(phys_addr_t base, +extern int __init cma_declare_contiguous(phys_addr_t base, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, - bool fixed, const char *name, struct cma **res_cma, - int nid); -static inline int __init cma_declare_contiguous(phys_addr_t base, - phys_addr_t size, phys_addr_t limit, - phys_addr_t alignment, unsigned int order_per_bit, - bool fixed, const char *name, struct cma **res_cma) -{ - return cma_declare_contiguous_nid(base, size, limit, alignment, - order_per_bit, fixed, name, res_cma, NUMA_NO_NODE); -} + bool fixed, struct cma **res_cma); extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, - const char *name, struct cma **res_cma); -extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, - bool no_warn); -extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); - -extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); +extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); +extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); #endif diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h index 68a541807b..2e6dce6e5c 100644 --- a/include/linux/cmdline-parser.h +++ b/include/linux/cmdline-parser.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Parsing command line, get the partitions information. * diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h index 064428479f..aa629bce90 100644 --- a/include/linux/cnt32_to_63.h +++ b/include/linux/cnt32_to_63.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Extend a 32-bit counter to 63 bits * * Author: Nicolas Pitre * Created: December 3, 2006 * Copyright: MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. */ #ifndef __LINUX_CNT32_TO_63_H__ diff --git a/include/linux/coda.h b/include/linux/coda.h index 0ca0c83fdb..d30209b9ce 100644 --- a/include/linux/coda.h +++ b/include/linux/coda.h @@ -58,7 +58,8 @@ Mellon the rights to redistribute these changes without encumbrance. #ifndef _CODA_HEADER_ #define _CODA_HEADER_ +#if defined(__linux__) typedef unsigned long long u_quad_t; - +#endif #include #endif diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h new file mode 100644 index 0000000000..5b8721efa9 --- /dev/null +++ b/include/linux/coda_psdev.h @@ -0,0 +1,72 @@ +#ifndef __CODA_PSDEV_H +#define __CODA_PSDEV_H + +#include +#include +#include + +struct kstatfs; + +/* communication pending/processing queues */ +struct venus_comm { + u_long vc_seq; + wait_queue_head_t vc_waitq; /* Venus wait queue */ + struct list_head vc_pending; + struct list_head vc_processing; + int vc_inuse; + struct super_block *vc_sb; + struct backing_dev_info bdi; + struct mutex vc_mutex; +}; + + +static inline struct venus_comm *coda_vcp(struct super_block *sb) +{ + return (struct venus_comm *)((sb)->s_fs_info); +} + +/* upcalls */ +int venus_rootfid(struct super_block *sb, struct CodaFid *fidp); +int venus_getattr(struct super_block *sb, struct CodaFid *fid, + struct coda_vattr *attr); +int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *); +int venus_lookup(struct super_block *sb, struct CodaFid *fid, + const char *name, int length, int *type, + struct CodaFid *resfid); +int venus_close(struct super_block *sb, struct CodaFid *fid, int flags, + kuid_t uid); +int venus_open(struct super_block *sb, struct CodaFid *fid, int flags, + struct file **f); +int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length, + struct CodaFid *newfid, struct coda_vattr *attrs); +int venus_create(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length, int excl, int mode, + struct CodaFid *newfid, struct coda_vattr *attrs) ; +int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length); +int venus_remove(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length); +int venus_readlink(struct super_block *sb, struct CodaFid *fid, + char *buffer, int *length); +int venus_rename(struct super_block *, struct CodaFid *new_fid, + struct CodaFid *old_fid, size_t old_length, + size_t new_length, const char *old_name, + const char *new_name); +int venus_link(struct super_block *sb, struct CodaFid *fid, + struct CodaFid *dirfid, const char *name, int len ); +int venus_symlink(struct super_block *sb, struct CodaFid *fid, + const char *name, int len, const char *symname, int symlen); +int venus_access(struct super_block *sb, struct CodaFid *fid, int mask); +int venus_pioctl(struct super_block *sb, struct CodaFid *fid, + unsigned int cmd, struct PioctlData *data); +int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out); +int venus_fsync(struct super_block *sb, struct CodaFid *fid); +int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); + +/* + * Statistics + */ + +extern struct venus_comm coda_comms[]; +#endif diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 34bce35c80..0d8415820f 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_COMPACTION_H #define _LINUX_COMPACTION_H @@ -29,18 +28,21 @@ enum compact_result { /* compaction didn't start as it was deferred due to past failures */ COMPACT_DEFERRED, + /* compaction not active last round */ + COMPACT_INACTIVE = COMPACT_DEFERRED, + /* For more detailed tracepoint output - internal to compaction */ COMPACT_NO_SUITABLE_PAGE, /* compaction should continue to another pageblock */ COMPACT_CONTINUE, /* - * The full zone was compacted scanned but wasn't successful to compact + * The full zone was compacted scanned but wasn't successfull to compact * suitable pages. */ COMPACT_COMPLETE, /* - * direct compaction has scanned part of the zone but wasn't successful + * direct compaction has scanned part of the zone but wasn't successfull * to compact suitable pages. */ COMPACT_PARTIAL_SKIPPED, @@ -81,26 +83,27 @@ static inline unsigned long compact_gap(unsigned int order) } #ifdef CONFIG_COMPACTION -extern unsigned int sysctl_compaction_proactiveness; +extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, - void *buffer, size_t *length, loff_t *ppos); -extern int compaction_proactiveness_sysctl_handler(struct ctl_table *table, - int write, void *buffer, size_t *length, loff_t *ppos); + void __user *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; +extern int sysctl_extfrag_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos); extern int sysctl_compact_unevictable_allowed; -extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, - const struct alloc_context *ac, enum compact_priority prio, - struct page **page); + const struct alloc_context *ac, enum compact_priority prio); extern void reset_isolation_suitable(pg_data_t *pgdat); extern enum compact_result compaction_suitable(struct zone *zone, int order, - unsigned int alloc_flags, int highest_zoneidx); + unsigned int alloc_flags, int classzone_idx); +extern void defer_compaction(struct zone *zone, int order); +extern bool compaction_deferred(struct zone *zone, int order); extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); +extern bool compaction_restarting(struct zone *zone, int order); /* Compaction has made some progress and retrying makes sense */ static inline bool compaction_made_progress(enum compact_result result) @@ -126,8 +129,11 @@ static inline bool compaction_failed(enum compact_result result) return false; } -/* Compaction needs reclaim to be performed first, so it can continue. */ -static inline bool compaction_needs_reclaim(enum compact_result result) +/* + * Compaction has backed off for some reason. It might be throttling or + * lock contention. Retrying is still worthwhile. + */ +static inline bool compaction_withdrawn(enum compact_result result) { /* * Compaction backed off due to watermark checks for order-0 @@ -136,16 +142,6 @@ static inline bool compaction_needs_reclaim(enum compact_result result) if (result == COMPACT_SKIPPED) return true; - return false; -} - -/* - * Compaction has backed off for some reason after doing some work or none - * at all. It might be throttling or lock contention. Retrying might be still - * worthwhile, but with a higher priority if allowed. - */ -static inline bool compaction_withdrawn(enum compact_result result) -{ /* * If compaction is deferred for high-order allocations, it is * because sync compaction recently failed. If this is the case @@ -179,7 +175,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, extern int kcompactd_run(int nid); extern void kcompactd_stop(int nid); -extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); +extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); #else static inline void reset_isolation_suitable(pg_data_t *pgdat) @@ -187,11 +183,20 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) } static inline enum compact_result compaction_suitable(struct zone *zone, int order, - int alloc_flags, int highest_zoneidx) + int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; } +static inline void defer_compaction(struct zone *zone, int order) +{ +} + +static inline bool compaction_deferred(struct zone *zone, int order) +{ + return true; +} + static inline bool compaction_made_progress(enum compact_result result) { return false; @@ -202,11 +207,6 @@ static inline bool compaction_failed(enum compact_result result) return false; } -static inline bool compaction_needs_reclaim(enum compact_result result) -{ - return false; -} - static inline bool compaction_withdrawn(enum compact_result result) { return true; @@ -220,15 +220,14 @@ static inline void kcompactd_stop(int nid) { } -static inline void wakeup_kcompactd(pg_data_t *pgdat, - int order, int highest_zoneidx) +static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) { } #endif /* CONFIG_COMPACTION */ -struct node; #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) +struct node; extern int compaction_register_node(struct node *node); extern void compaction_unregister_node(struct node *node); diff --git a/include/linux/compat.h b/include/linux/compat.h index 1c758b0e03..b81d592a24 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_COMPAT_H #define _LINUX_COMPAT_H /* @@ -7,7 +6,8 @@ */ #include -#include + +#ifdef CONFIG_COMPAT #include #include /* for HZ */ @@ -16,81 +16,98 @@ #include #include #include /* for aio_context_t */ -#include #include #include #include #include -#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER -/* - * It may be useful for an architecture to override the definitions of the - * COMPAT_SYSCALL_DEFINE0 and COMPAT_SYSCALL_DEFINEx() macros, in particular - * to use a different calling convention for syscalls. To allow for that, - + the prototypes for the compat_sys_*() functions below will *not* be included - * if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. - */ -#include -#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ - #ifndef COMPAT_USE_64BIT_TIME #define COMPAT_USE_64BIT_TIME 0 #endif #ifndef __SC_DELOUSE -#define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v)) +#define __SC_DELOUSE(t,v) ((t)(unsigned long)(v)) #endif -#ifndef COMPAT_SYSCALL_DEFINE0 -#define COMPAT_SYSCALL_DEFINE0(name) \ - asmlinkage long compat_sys_##name(void); \ - ALLOW_ERROR_INJECTION(compat_sys_##name, ERRNO); \ +#ifdef CONFIG_PAX_RAP +#define RAP_SYS32_SYSCALL_DEFINE0(name) \ + asmlinkage long rap_sys32_##name(unsigned long a, unsigned long b, unsigned long c, unsigned long d, unsigned long e, unsigned long f)\ + { \ + return sys32_##name(); \ + } +#else +#define RAP_SYS32_SYSCALL_DEFINE0(name) +#endif + +#define SYS32_SYSCALL_DEFINE0(name) \ + asmlinkage long sys32_##name(void); \ + RAP_SYS32_SYSCALL_DEFINE0(name) \ + asmlinkage long sys32_##name(void) + +#define SYS32_SYSCALL_DEFINE1(name, ...) \ + COMPAT_SYSCALL_DEFINEx(1, sys32, , _##name, __VA_ARGS__) +#define SYS32_SYSCALL_DEFINE2(name, ...) \ + COMPAT_SYSCALL_DEFINEx(2, sys32, , _##name, __VA_ARGS__) +#define SYS32_SYSCALL_DEFINE3(name, ...) \ + COMPAT_SYSCALL_DEFINEx(3, sys32, , _##name, __VA_ARGS__) +#define SYS32_SYSCALL_DEFINE4(name, ...) \ + COMPAT_SYSCALL_DEFINEx(4, sys32, , _##name, __VA_ARGS__) +#define SYS32_SYSCALL_DEFINE5(name, ...) \ + COMPAT_SYSCALL_DEFINEx(5, sys32, , _##name, __VA_ARGS__) +#define SYS32_SYSCALL_DEFINE6(name, ...) \ + COMPAT_SYSCALL_DEFINEx(6, sys32, , _##name, __VA_ARGS__) + +#ifdef CONFIG_PAX_RAP +#define RAP_COMPAT_SYSCALL_DEFINE0(name) \ + asmlinkage long rap_compat_sys_##name(unsigned long a, unsigned long b, unsigned long c, unsigned long d, unsigned long e, unsigned long f)\ + { \ + return compat_sys_##name(); \ + } +#else +#define RAP_COMPAT_SYSCALL_DEFINE0(name) +#endif + +#define COMPAT_SYSCALL_DEFINE0(name) \ + RAP_COMPAT_SYSCALL_DEFINE0(name) \ asmlinkage long compat_sys_##name(void) -#endif /* COMPAT_SYSCALL_DEFINE0 */ #define COMPAT_SYSCALL_DEFINE1(name, ...) \ - COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) + COMPAT_SYSCALL_DEFINEx(1, compat, _sys, _##name, __VA_ARGS__) #define COMPAT_SYSCALL_DEFINE2(name, ...) \ - COMPAT_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) + COMPAT_SYSCALL_DEFINEx(2, compat, _sys, _##name, __VA_ARGS__) #define COMPAT_SYSCALL_DEFINE3(name, ...) \ - COMPAT_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) + COMPAT_SYSCALL_DEFINEx(3, compat, _sys, _##name, __VA_ARGS__) #define COMPAT_SYSCALL_DEFINE4(name, ...) \ - COMPAT_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__) + COMPAT_SYSCALL_DEFINEx(4, compat, _sys, _##name, __VA_ARGS__) #define COMPAT_SYSCALL_DEFINE5(name, ...) \ - COMPAT_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) + COMPAT_SYSCALL_DEFINEx(5, compat, _sys, _##name, __VA_ARGS__) #define COMPAT_SYSCALL_DEFINE6(name, ...) \ - COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) + COMPAT_SYSCALL_DEFINEx(6, compat, _sys, _##name, __VA_ARGS__) -/* - * The asmlinkage stub is aliased to a function named __se_compat_sys_*() which - * sign-extends 32-bit ints to longs whenever needed. The actual work is - * done within __do_compat_sys_*(). - */ -#ifndef COMPAT_SYSCALL_DEFINEx -#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ - __diag_push(); \ - __diag_ignore(GCC, 8, "-Wattribute-alias", \ - "Type aliasing is used to sanitize syscall arguments");\ - asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ - __attribute__((alias(__stringify(__se_compat_sys##name)))); \ - ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \ - static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ - asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ - asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ - { \ - long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ - __MAP(x,__SC_TEST,__VA_ARGS__); \ - return ret; \ - } \ - __diag_pop(); \ - static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) -#endif /* COMPAT_SYSCALL_DEFINEx */ +#ifdef CONFIG_PAX_RAP +#define RAP_COMPAT_SYSCALL_DEFINEx(x, prefix, sys, name, ...) \ + asmlinkage __intentional_overflow(-1) \ + long rap_##prefix##sys##name(__RAP_MAP(x,__RAP_SC_LONG,__VA_ARGS__))\ + { \ + return prefix##sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\ + } +#else +#define RAP_COMPAT_SYSCALL_DEFINEx(x, prefix, sys, name, ...) +#endif -struct compat_iovec { - compat_uptr_t iov_base; - compat_size_t iov_len; -}; +#define COMPAT_SYSCALL_DEFINEx(x, prefix, sys, name, ...) \ + static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ + static inline asmlinkage long prefix##_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\ + { \ + return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \ + } \ + asmlinkage long prefix##sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\ + { \ + return prefix##_SyS##name(__MAP(x,__SC_ARGS,__VA_ARGS__));\ + } \ + RAP_COMPAT_SYSCALL_DEFINEx(x,prefix,sys,name,__VA_ARGS__) \ + static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #ifndef compat_user_stack_pointer #define compat_user_stack_pointer() current_user_stack_pointer() @@ -102,9 +119,6 @@ typedef struct compat_sigaltstack { compat_size_t ss_size; } compat_stack_t; #endif -#ifndef COMPAT_MINSIGSTKSZ -#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ -#endif #define compat_jiffies_to_clock_t(x) \ (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) @@ -112,10 +126,25 @@ typedef struct compat_sigaltstack { typedef __compat_uid32_t compat_uid_t; typedef __compat_gid32_t compat_gid_t; +typedef compat_ulong_t compat_aio_context_t; + struct compat_sel_arg_struct; struct rusage; -struct old_itimerval32; +struct compat_itimerspec { + struct compat_timespec it_interval; + struct compat_timespec it_value; +}; + +struct compat_utimbuf { + compat_time_t actime; + compat_time_t modtime; +}; + +struct compat_itimerval { + struct compat_timeval it_interval; + struct compat_timeval it_value; +}; struct compat_tms { compat_clock_t tms_utime; @@ -124,16 +153,38 @@ struct compat_tms { compat_clock_t tms_cstime; }; +struct compat_timex { + compat_uint_t modes; + compat_long_t offset; + compat_long_t freq; + compat_long_t maxerror; + compat_long_t esterror; + compat_int_t status; + compat_long_t constant; + compat_long_t precision; + compat_long_t tolerance; + struct compat_timeval time; + compat_long_t tick; + compat_long_t ppsfreq; + compat_long_t jitter; + compat_int_t shift; + compat_long_t stabil; + compat_long_t jitcnt; + compat_long_t calcnt; + compat_long_t errcnt; + compat_long_t stbcnt; + compat_int_t tai; + + compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; + compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; + compat_int_t:32; compat_int_t:32; compat_int_t:32; +}; + #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) -#ifndef compat_sigset_t typedef struct { compat_sigset_word sig[_COMPAT_NSIG_WORDS]; } compat_sigset_t; -#endif - -int set_compat_user_sigmask(const compat_sigset_t __user *umask, - size_t sigsetsize); struct compat_sigaction { #ifndef __ARCH_HAS_IRIX_SIGACTION @@ -149,109 +200,28 @@ struct compat_sigaction { compat_sigset_t sa_mask __packed; }; -typedef union compat_sigval { - compat_int_t sival_int; - compat_uptr_t sival_ptr; -} compat_sigval_t; +/* + * These functions operate on 32- or 64-bit specs depending on + * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments. + */ +extern int compat_get_timespec(struct timespec *, const void __user *); +extern int compat_put_timespec(const struct timespec *, void __user *); +extern int compat_get_timeval(struct timeval *, const void __user *); +extern int compat_put_timeval(const struct timeval *, void __user *); -typedef struct compat_siginfo { - int si_signo; -#ifndef __ARCH_HAS_SWAPPED_SIGINFO - int si_errno; - int si_code; -#else - int si_code; - int si_errno; -#endif +/* + * This function convert a timespec if necessary and returns a *user + * space* pointer. If no conversion is necessary, it returns the + * initial pointer. NULL is a legitimate argument and will always + * output NULL. + */ +extern int compat_convert_timespec(struct timespec __user **, + const void __user *); - union { - int _pad[128/sizeof(int) - 3]; - - /* kill() */ - struct { - compat_pid_t _pid; /* sender's pid */ - __compat_uid32_t _uid; /* sender's uid */ - } _kill; - - /* POSIX.1b timers */ - struct { - compat_timer_t _tid; /* timer id */ - int _overrun; /* overrun count */ - compat_sigval_t _sigval; /* same as below */ - } _timer; - - /* POSIX.1b signals */ - struct { - compat_pid_t _pid; /* sender's pid */ - __compat_uid32_t _uid; /* sender's uid */ - compat_sigval_t _sigval; - } _rt; - - /* SIGCHLD */ - struct { - compat_pid_t _pid; /* which child */ - __compat_uid32_t _uid; /* sender's uid */ - int _status; /* exit code */ - compat_clock_t _utime; - compat_clock_t _stime; - } _sigchld; - -#ifdef CONFIG_X86_X32_ABI - /* SIGCHLD (x32 version) */ - struct { - compat_pid_t _pid; /* which child */ - __compat_uid32_t _uid; /* sender's uid */ - int _status; /* exit code */ - compat_s64 _utime; - compat_s64 _stime; - } _sigchld_x32; -#endif - - /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ - struct { - compat_uptr_t _addr; /* faulting insn/memory ref. */ -#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \ - sizeof(short) : __alignof__(compat_uptr_t)) - union { - /* used on alpha and sparc */ - int _trapno; /* TRAP # which caused the signal */ - /* - * used when si_code=BUS_MCEERR_AR or - * used when si_code=BUS_MCEERR_AO - */ - short int _addr_lsb; /* Valid LSB of the reported address. */ - /* used when si_code=SEGV_BNDERR */ - struct { - char _dummy_bnd[__COMPAT_ADDR_BND_PKEY_PAD]; - compat_uptr_t _lower; - compat_uptr_t _upper; - } _addr_bnd; - /* used when si_code=SEGV_PKUERR */ - struct { - char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD]; - u32 _pkey; - } _addr_pkey; - /* used when si_code=TRAP_PERF */ - struct { - compat_ulong_t _data; - u32 _type; - } _perf; - }; - } _sigfault; - - /* SIGPOLL */ - struct { - compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ - int _fd; - } _sigpoll; - - struct { - compat_uptr_t _call_addr; /* calling user insn */ - int _syscall; /* triggering system call number */ - unsigned int _arch; /* AUDIT_ARCH_* of syscall */ - } _sigsys; - } _sifields; -} compat_siginfo_t; +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; struct compat_rlimit { compat_ulong_t rlim_cur; @@ -259,8 +229,8 @@ struct compat_rlimit { }; struct compat_rusage { - struct old_timeval32 ru_utime; - struct old_timeval32 ru_stime; + struct compat_timeval ru_utime; + struct compat_timeval ru_stime; compat_long_t ru_maxrss; compat_long_t ru_ixrss; compat_long_t ru_idrss; @@ -281,7 +251,10 @@ extern int put_compat_rusage(const struct rusage *, struct compat_rusage __user *); struct compat_siginfo; -struct __compat_aio_sigset; + +extern asmlinkage long compat_sys_waitid(int, compat_pid_t, + struct compat_siginfo __user *, int, + struct compat_rusage __user *); struct compat_dirent { u32 d_ino; @@ -374,14 +347,6 @@ struct compat_old_sigaction { }; #endif -struct compat_keyctl_kdf_params { - compat_uptr_t hashname; - compat_uptr_t otherinfo; - __u32 otherinfolen; - __u32 __spare[8]; -}; - -struct compat_stat; struct compat_statfs; struct compat_statfs64; struct compat_old_linux_dirent; @@ -395,282 +360,8 @@ struct compat_kexec_segment; struct compat_mq_attr; struct compat_msgbuf; -void copy_siginfo_to_external32(struct compat_siginfo *to, - const struct kernel_siginfo *from); -int copy_siginfo_from_user32(kernel_siginfo_t *to, - const struct compat_siginfo __user *from); -int __copy_siginfo_to_user32(struct compat_siginfo __user *to, - const kernel_siginfo_t *from); -#ifndef copy_siginfo_to_user32 -#define copy_siginfo_to_user32 __copy_siginfo_to_user32 -#endif -int get_compat_sigevent(struct sigevent *event, - const struct compat_sigevent __user *u_event); +extern void compat_exit_robust_list(struct task_struct *curr); -extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat); - -/* - * Defined inline such that size can be compile time constant, which avoids - * CONFIG_HARDENED_USERCOPY complaining about copies from task_struct - */ -static inline int -put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, - unsigned int size) -{ - /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */ -#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT) - compat_sigset_t v; - switch (_NSIG_WORDS) { - case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; - fallthrough; - case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; - fallthrough; - case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; - fallthrough; - case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; - } - return copy_to_user(compat, &v, size) ? -EFAULT : 0; -#else - return copy_to_user(compat, set, size) ? -EFAULT : 0; -#endif -} - -#ifdef CONFIG_CPU_BIG_ENDIAN -#define unsafe_put_compat_sigset(compat, set, label) do { \ - compat_sigset_t __user *__c = compat; \ - const sigset_t *__s = set; \ - \ - switch (_NSIG_WORDS) { \ - case 4: \ - unsafe_put_user(__s->sig[3] >> 32, &__c->sig[7], label); \ - unsafe_put_user(__s->sig[3], &__c->sig[6], label); \ - fallthrough; \ - case 3: \ - unsafe_put_user(__s->sig[2] >> 32, &__c->sig[5], label); \ - unsafe_put_user(__s->sig[2], &__c->sig[4], label); \ - fallthrough; \ - case 2: \ - unsafe_put_user(__s->sig[1] >> 32, &__c->sig[3], label); \ - unsafe_put_user(__s->sig[1], &__c->sig[2], label); \ - fallthrough; \ - case 1: \ - unsafe_put_user(__s->sig[0] >> 32, &__c->sig[1], label); \ - unsafe_put_user(__s->sig[0], &__c->sig[0], label); \ - } \ -} while (0) - -#define unsafe_get_compat_sigset(set, compat, label) do { \ - const compat_sigset_t __user *__c = compat; \ - compat_sigset_word hi, lo; \ - sigset_t *__s = set; \ - \ - switch (_NSIG_WORDS) { \ - case 4: \ - unsafe_get_user(lo, &__c->sig[7], label); \ - unsafe_get_user(hi, &__c->sig[6], label); \ - __s->sig[3] = hi | (((long)lo) << 32); \ - fallthrough; \ - case 3: \ - unsafe_get_user(lo, &__c->sig[5], label); \ - unsafe_get_user(hi, &__c->sig[4], label); \ - __s->sig[2] = hi | (((long)lo) << 32); \ - fallthrough; \ - case 2: \ - unsafe_get_user(lo, &__c->sig[3], label); \ - unsafe_get_user(hi, &__c->sig[2], label); \ - __s->sig[1] = hi | (((long)lo) << 32); \ - fallthrough; \ - case 1: \ - unsafe_get_user(lo, &__c->sig[1], label); \ - unsafe_get_user(hi, &__c->sig[0], label); \ - __s->sig[0] = hi | (((long)lo) << 32); \ - } \ -} while (0) -#else -#define unsafe_put_compat_sigset(compat, set, label) do { \ - compat_sigset_t __user *__c = compat; \ - const sigset_t *__s = set; \ - \ - unsafe_copy_to_user(__c, __s, sizeof(*__c), label); \ -} while (0) - -#define unsafe_get_compat_sigset(set, compat, label) do { \ - const compat_sigset_t __user *__c = compat; \ - sigset_t *__s = set; \ - \ - unsafe_copy_from_user(__s, __c, sizeof(*__c), label); \ -} while (0) -#endif - -extern int compat_ptrace_request(struct task_struct *child, - compat_long_t request, - compat_ulong_t addr, compat_ulong_t data); - -extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, - compat_ulong_t addr, compat_ulong_t data); - -struct epoll_event; /* fortunately, this one is fixed-layout */ - -int compat_restore_altstack(const compat_stack_t __user *uss); -int __compat_save_altstack(compat_stack_t __user *, unsigned long); -#define unsafe_compat_save_altstack(uss, sp, label) do { \ - compat_stack_t __user *__uss = uss; \ - struct task_struct *t = current; \ - unsafe_put_user(ptr_to_compat((void __user *)t->sas_ss_sp), \ - &__uss->ss_sp, label); \ - unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \ - unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \ -} while (0); - -/* - * These syscall function prototypes are kept in the same order as - * include/uapi/asm-generic/unistd.h. Deprecated or obsolete system calls - * go below. - * - * Please note that these prototypes here are only provided for information - * purposes, for static analysis, and for linking from the syscall table. - * These functions should not be called elsewhere from kernel code. - * - * As the syscall calling convention may be different from the default - * for architectures overriding the syscall calling convention, do not - * include the prototypes if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. - */ -#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER -asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); -asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr, - u32 __user *iocb); -asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, - compat_long_t min_nr, - compat_long_t nr, - struct io_event __user *events, - struct old_timespec32 __user *timeout, - const struct __compat_aio_sigset __user *usig); -asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id, - compat_long_t min_nr, - compat_long_t nr, - struct io_event __user *events, - struct __kernel_timespec __user *timeout, - const struct __compat_aio_sigset __user *usig); - -/* fs/cookies.c */ -asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t); - -/* fs/eventpoll.c */ -asmlinkage long compat_sys_epoll_pwait(int epfd, - struct epoll_event __user *events, - int maxevents, int timeout, - const compat_sigset_t __user *sigmask, - compat_size_t sigsetsize); -asmlinkage long compat_sys_epoll_pwait2(int epfd, - struct epoll_event __user *events, - int maxevents, - const struct __kernel_timespec __user *timeout, - const compat_sigset_t __user *sigmask, - compat_size_t sigsetsize); - -/* fs/fcntl.c */ -asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd, - compat_ulong_t arg); -asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd, - compat_ulong_t arg); - -/* fs/ioctl.c */ -asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, - compat_ulong_t arg); - -/* fs/open.c */ -asmlinkage long compat_sys_statfs(const char __user *pathname, - struct compat_statfs __user *buf); -asmlinkage long compat_sys_statfs64(const char __user *pathname, - compat_size_t sz, - struct compat_statfs64 __user *buf); -asmlinkage long compat_sys_fstatfs(unsigned int fd, - struct compat_statfs __user *buf); -asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, - struct compat_statfs64 __user *buf); -asmlinkage long compat_sys_truncate(const char __user *, compat_off_t); -asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t); -/* No generic prototype for truncate64, ftruncate64, fallocate */ -asmlinkage long compat_sys_openat(int dfd, const char __user *filename, - int flags, umode_t mode); - -/* fs/readdir.c */ -asmlinkage long compat_sys_getdents(unsigned int fd, - struct compat_linux_dirent __user *dirent, - unsigned int count); - -/* fs/read_write.c */ -asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); -/* No generic prototype for pread64 and pwrite64 */ -asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd, - const struct iovec __user *vec, - compat_ulong_t vlen, u32 pos_low, u32 pos_high); -asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd, - const struct iovec __user *vec, - compat_ulong_t vlen, u32 pos_low, u32 pos_high); -#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64 -asmlinkage long compat_sys_preadv64(unsigned long fd, - const struct iovec __user *vec, - unsigned long vlen, loff_t pos); -#endif - -#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64 -asmlinkage long compat_sys_pwritev64(unsigned long fd, - const struct iovec __user *vec, - unsigned long vlen, loff_t pos); -#endif - -/* fs/sendfile.c */ -asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, - compat_off_t __user *offset, compat_size_t count); -asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, - compat_loff_t __user *offset, compat_size_t count); - -/* fs/select.c */ -asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp, - compat_ulong_t __user *outp, - compat_ulong_t __user *exp, - struct old_timespec32 __user *tsp, - void __user *sig); -asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp, - compat_ulong_t __user *outp, - compat_ulong_t __user *exp, - struct __kernel_timespec __user *tsp, - void __user *sig); -asmlinkage long compat_sys_ppoll_time32(struct pollfd __user *ufds, - unsigned int nfds, - struct old_timespec32 __user *tsp, - const compat_sigset_t __user *sigmask, - compat_size_t sigsetsize); -asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds, - unsigned int nfds, - struct __kernel_timespec __user *tsp, - const compat_sigset_t __user *sigmask, - compat_size_t sigsetsize); - -/* fs/signalfd.c */ -asmlinkage long compat_sys_signalfd4(int ufd, - const compat_sigset_t __user *sigmask, - compat_size_t sigsetsize, int flags); - -/* fs/stat.c */ -asmlinkage long compat_sys_newfstatat(unsigned int dfd, - const char __user *filename, - struct compat_stat __user *statbuf, - int flag); -asmlinkage long compat_sys_newfstat(unsigned int fd, - struct compat_stat __user *statbuf); - -/* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */ - -/* kernel/exit.c */ -asmlinkage long compat_sys_waitid(int, compat_pid_t, - struct compat_siginfo __user *, int, - struct compat_rusage __user *); - - - -/* kernel/futex.c */ asmlinkage long compat_sys_set_robust_list(struct compat_robust_list_head __user *head, compat_size_t len); @@ -678,206 +369,385 @@ asmlinkage long compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, compat_size_t __user *len_ptr); -/* kernel/itimer.c */ -asmlinkage long compat_sys_getitimer(int which, - struct old_itimerval32 __user *it); -asmlinkage long compat_sys_setitimer(int which, - struct old_itimerval32 __user *in, - struct old_itimerval32 __user *out); +asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32); +asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0); +asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); +asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, + compat_ssize_t msgsz, int msgflg); +asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp, + compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg); +long compat_sys_msgctl(int first, int second, void __user *uptr); +long compat_sys_shmctl(int first, int second, void __user *uptr); +long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, + compat_long_t nsems, const struct compat_timespec __user *timeout); +asmlinkage long compat_sys_keyctl(u32 option, + u32 arg2, u32 arg3, u32 arg4, u32 arg5); +asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); -/* kernel/kexec.c */ -asmlinkage long compat_sys_kexec_load(compat_ulong_t entry, - compat_ulong_t nr_segments, - struct compat_kexec_segment __user *, - compat_ulong_t flags); +asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd, + const struct compat_iovec __user *vec, compat_ulong_t vlen); +asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd, + const struct compat_iovec __user *vec, compat_ulong_t vlen); +asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high); +asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high); +asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags); +asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags); -/* kernel/posix-timers.c */ -asmlinkage long compat_sys_timer_create(clockid_t which_clock, - struct compat_sigevent __user *timer_event_spec, - timer_t __user *created_timer_id); +#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64 +asmlinkage long compat_sys_preadv64(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos); +#endif -/* kernel/ptrace.c */ +#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64 +asmlinkage long compat_sys_pwritev64(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos); +#endif + +asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); + +asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp); +asmlinkage long compat_sys_execveat(int dfd, const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp, int flags); + +asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, + compat_ulong_t __user *outp, compat_ulong_t __user *exp, + struct compat_timeval __user *tvp); + +asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg); + +asmlinkage long compat_sys_wait4(compat_pid_t pid, + compat_uint_t __user *stat_addr, int options, + struct compat_rusage __user *ru); + +#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) + +#define BITS_TO_COMPAT_LONGS(bits) \ + (((bits)+BITS_PER_COMPAT_LONG-1)/BITS_PER_COMPAT_LONG) + +long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, + unsigned long bitmap_size); +long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, + unsigned long bitmap_size); +int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); +int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from); +int get_compat_sigevent(struct sigevent *event, + const struct compat_sigevent __user *u_event); +long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, + struct compat_siginfo __user *uinfo); +#ifdef CONFIG_COMPAT_OLD_SIGACTION +asmlinkage long compat_sys_sigaction(int sig, + const struct compat_old_sigaction __user *act, + struct compat_old_sigaction __user *oact); +#endif + +static inline int compat_timeval_compare(struct compat_timeval *lhs, + struct compat_timeval *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_usec - rhs->tv_usec; +} + +static inline int compat_timespec_compare(struct compat_timespec *lhs, + struct compat_timespec *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_nsec - rhs->tv_nsec; +} + +extern int get_compat_itimerspec(struct itimerspec *dst, + const struct compat_itimerspec __user *src); +extern int put_compat_itimerspec(struct compat_itimerspec __user *dst, + const struct itimerspec *src); + +asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, + struct timezone __user *tz); +asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, + struct timezone __user *tz); + +asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); + +extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat); +extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set); + +asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, + compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, + const compat_ulong_t __user *new_nodes); + +extern int compat_ptrace_request(struct task_struct *child, + compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); + +extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, - compat_long_t addr, compat_long_t data); + compat_ulong_t addr, compat_ulong_t data); -/* kernel/sched/core.c */ +asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t); +/* + * epoll (fs/eventpoll.c) compat bits follow ... + */ +struct epoll_event; /* fortunately, this one is fixed-layout */ +asmlinkage long compat_sys_epoll_pwait(int epfd, + struct epoll_event __user *events, + int maxevents, int timeout, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); + +asmlinkage long compat_sys_utime(const char __user *filename, + struct compat_utimbuf __user *t); +asmlinkage long compat_sys_utimensat(unsigned int dfd, + const char __user *filename, + struct compat_timespec __user *t, + int flags); + +asmlinkage long compat_sys_time(compat_time_t __user *tloc); +asmlinkage long compat_sys_stime(compat_time_t __user *tptr); +asmlinkage long compat_sys_signalfd(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); +asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, + const struct compat_itimerspec __user *utmr, + struct compat_itimerspec __user *otmr); +asmlinkage long compat_sys_timerfd_gettime(int ufd, + struct compat_itimerspec __user *otmr); + +asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages, + __u32 __user *pages, + const int __user *nodes, + int __user *status, + int flags); +asmlinkage long compat_sys_futimesat(unsigned int dfd, + const char __user *filename, + struct compat_timeval __user *t); +asmlinkage long compat_sys_utimes(const char __user *filename, + struct compat_timeval __user *t); +asmlinkage long compat_sys_newstat(const char __user *filename, + struct compat_stat __user *statbuf); +asmlinkage long compat_sys_newlstat(const char __user *filename, + struct compat_stat __user *statbuf); +asmlinkage long compat_sys_newfstatat(unsigned int dfd, + const char __user *filename, + struct compat_stat __user *statbuf, + int flag); +asmlinkage long compat_sys_newfstat(unsigned int fd, + struct compat_stat __user *statbuf); +asmlinkage long compat_sys_statfs(const char __user *pathname, + struct compat_statfs __user *buf); +asmlinkage long compat_sys_fstatfs(unsigned int fd, + struct compat_statfs __user *buf); +asmlinkage long compat_sys_statfs64(const char __user *pathname, + compat_size_t sz, + struct compat_statfs64 __user *buf); +asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, + struct compat_statfs64 __user *buf); +asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); +asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); +asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); +asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id, + compat_long_t min_nr, + compat_long_t nr, + struct io_event __user *events, + struct compat_timespec __user *timeout); +asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr, + u32 __user *iocb); +asmlinkage long compat_sys_mount(const char __user *dev_name, + const char __user *dir_name, + const char __user *type, compat_ulong_t flags, + const void __user *data); +asmlinkage long compat_sys_old_readdir(unsigned int fd, + struct compat_old_linux_dirent __user *, + unsigned int count); +asmlinkage long compat_sys_getdents(unsigned int fd, + struct compat_linux_dirent __user *dirent, + unsigned int count); +#ifdef __ARCH_WANT_COMPAT_SYS_GETDENTS64 +asmlinkage long compat_sys_getdents64(unsigned int fd, + struct linux_dirent64 __user *dirent, + unsigned int count); +#endif +asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *, + unsigned int nr_segs, unsigned int flags); +asmlinkage long compat_sys_open(const char __user *filename, int flags, + umode_t mode); +asmlinkage long compat_sys_openat(int dfd, const char __user *filename, + int flags, umode_t mode); +asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, + struct file_handle __user *handle, + int flags); +asmlinkage long compat_sys_truncate(const char __user *, compat_off_t); +asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t); +asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, + compat_ulong_t __user *outp, + compat_ulong_t __user *exp, + struct compat_timespec __user *tsp, + void __user *sig); +asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, + unsigned int nfds, + struct compat_timespec __user *tsp, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); +asmlinkage long compat_sys_signalfd4(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize, int flags); +asmlinkage long compat_sys_get_mempolicy(int __user *policy, + compat_ulong_t __user *nmask, + compat_ulong_t maxnode, + compat_ulong_t addr, + compat_ulong_t flags); +asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, + compat_ulong_t maxnode); +asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, + compat_ulong_t mode, + compat_ulong_t __user *nmask, + compat_ulong_t maxnode, compat_ulong_t flags); + +asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, + char __user *optval, unsigned int optlen); +asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, + unsigned flags); +asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags); +asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, + unsigned int flags); +asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, + unsigned flags); +asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len, + unsigned flags, struct sockaddr __user *addr, + int __user *addrlen); +asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags, + struct compat_timespec __user *timeout); +asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, + struct compat_timespec __user *rmtp); +asmlinkage long compat_sys_getitimer(int which, + struct compat_itimerval __user *it); +asmlinkage long compat_sys_setitimer(int which, + struct compat_itimerval __user *in, + struct compat_itimerval __user *out); +asmlinkage long compat_sys_times(struct compat_tms __user *tbuf); +asmlinkage long compat_sys_setrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +asmlinkage long compat_sys_getrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr); asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr); - -/* kernel/signal.c */ -asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, - compat_stack_t __user *uoss_ptr); +asmlinkage long compat_sys_timer_create(clockid_t which_clock, + struct compat_sigevent __user *timer_event_spec, + timer_t __user *created_timer_id); +asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, + struct compat_itimerspec __user *new, + struct compat_itimerspec __user *old); +asmlinkage long compat_sys_timer_gettime(timer_t timer_id, + struct compat_itimerspec __user *setting); +asmlinkage long compat_sys_clock_settime(clockid_t which_clock, + struct compat_timespec __user *tp); +asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, + struct compat_timespec __user *tp); +asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock, + struct compat_timex __user *tp); +asmlinkage long compat_sys_clock_getres(clockid_t which_clock, + struct compat_timespec __user *tp); +asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, + struct compat_timespec __user *rqtp, + struct compat_timespec __user *rmtp); +asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, + struct compat_siginfo __user *uinfo, + struct compat_timespec __user *uts, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, + compat_sigset_t __user *oset, + compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, + compat_size_t sigsetsize); #ifndef CONFIG_ODD_RT_SIGACTION asmlinkage long compat_sys_rt_sigaction(int, const struct compat_sigaction __user *, struct compat_sigaction __user *, compat_size_t); #endif -asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, - compat_sigset_t __user *oset, - compat_size_t sigsetsize); -asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, - compat_size_t sigsetsize); -asmlinkage long compat_sys_rt_sigtimedwait_time32(compat_sigset_t __user *uthese, - struct compat_siginfo __user *uinfo, - struct old_timespec32 __user *uts, compat_size_t sigsetsize); -asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese, - struct compat_siginfo __user *uinfo, - struct __kernel_timespec __user *uts, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo); -/* No generic prototype for rt_sigreturn */ - -/* kernel/sys.c */ -asmlinkage long compat_sys_times(struct compat_tms __user *tbuf); -asmlinkage long compat_sys_getrlimit(unsigned int resource, - struct compat_rlimit __user *rlim); -asmlinkage long compat_sys_setrlimit(unsigned int resource, - struct compat_rlimit __user *rlim); -asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); - -/* kernel/time.c */ -asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv, - struct timezone __user *tz); -asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv, - struct timezone __user *tz); - -/* kernel/timer.c */ asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); - -/* ipc/mqueue.c */ -asmlinkage long compat_sys_mq_open(const char __user *u_name, - int oflag, compat_mode_t mode, - struct compat_mq_attr __user *u_attr); -asmlinkage long compat_sys_mq_notify(mqd_t mqdes, - const struct compat_sigevent __user *u_notification); +asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); +asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, + struct compat_timespec __user *utime, u32 __user *uaddr2, + u32 val3); +asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, + char __user *optval, int __user *optlen); +asmlinkage long compat_sys_kexec_load(compat_ulong_t entry, + compat_ulong_t nr_segments, + struct compat_kexec_segment __user *, + compat_ulong_t flags); asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, const struct compat_mq_attr __user *u_mqstat, struct compat_mq_attr __user *u_omqstat); +asmlinkage long compat_sys_mq_notify(mqd_t mqdes, + const struct compat_sigevent __user *u_notification); +asmlinkage long compat_sys_mq_open(const char __user *u_name, + int oflag, compat_mode_t mode, + struct compat_mq_attr __user *u_attr); +asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, + const char __user *u_msg_ptr, + compat_size_t msg_len, unsigned int msg_prio, + const struct compat_timespec __user *u_abs_timeout); +asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, + char __user *u_msg_ptr, + compat_size_t msg_len, unsigned int __user *u_msg_prio, + const struct compat_timespec __user *u_abs_timeout); +asmlinkage long compat_sys_socketcall(int call, u32 __user *args); +asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); -/* ipc/msg.c */ -asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr); -asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp, - compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg); -asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, - compat_ssize_t msgsz, int msgflg); +extern ssize_t compat_rw_copy_check_uvector(int type, + const struct compat_iovec __user *uvector, + unsigned long nr_segs, + unsigned long fast_segs, struct iovec *fast_pointer, + struct iovec **ret_pointer); -/* ipc/sem.c */ -asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); +extern void __user *compat_alloc_user_space(unsigned long len); -/* ipc/shm.c */ -asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); -asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); +asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, + const struct compat_iovec __user *lvec, + compat_ulong_t liovcnt, const struct compat_iovec __user *rvec, + compat_ulong_t riovcnt, compat_ulong_t flags); +asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid, + const struct compat_iovec __user *lvec, + compat_ulong_t liovcnt, const struct compat_iovec __user *rvec, + compat_ulong_t riovcnt, compat_ulong_t flags); -/* net/socket.c */ -asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len, - unsigned flags, struct sockaddr __user *addr, - int __user *addrlen); -asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, - unsigned flags); -asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, - unsigned int flags); +asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, + compat_off_t __user *offset, compat_size_t count); +asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, + compat_loff_t __user *offset, compat_size_t count); +asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, + compat_stack_t __user *uoss_ptr); -/* mm/filemap.c: No generic prototype for readahead */ - -/* security/keys/keyctl.c */ -asmlinkage long compat_sys_keyctl(u32 option, - u32 arg2, u32 arg3, u32 arg4, u32 arg5); - -/* arch/example/kernel/sys_example.c */ -asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, - const compat_uptr_t __user *envp); - -/* mm/fadvise.c: No generic prototype for fadvise64_64 */ - -/* mm/, CONFIG_MMU only */ -asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, - compat_pid_t pid, int sig, - struct compat_siginfo __user *uinfo); -asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg, - unsigned vlen, unsigned int flags, - struct __kernel_timespec __user *timeout); -asmlinkage long compat_sys_recvmmsg_time32(int fd, struct compat_mmsghdr __user *mmsg, - unsigned vlen, unsigned int flags, - struct old_timespec32 __user *timeout); -asmlinkage long compat_sys_wait4(compat_pid_t pid, - compat_uint_t __user *stat_addr, int options, - struct compat_rusage __user *ru); -asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, - int, const char __user *); -asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, - struct file_handle __user *handle, - int flags); -asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, - unsigned vlen, unsigned int flags); -asmlinkage long compat_sys_execveat(int dfd, const char __user *filename, - const compat_uptr_t __user *argv, - const compat_uptr_t __user *envp, int flags); -asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd, - const struct iovec __user *vec, - compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); -asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd, - const struct iovec __user *vec, - compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); -#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2 -asmlinkage long compat_sys_preadv64v2(unsigned long fd, - const struct iovec __user *vec, - unsigned long vlen, loff_t pos, rwf_t flags); -#endif - -#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2 -asmlinkage long compat_sys_pwritev64v2(unsigned long fd, - const struct iovec __user *vec, - unsigned long vlen, loff_t pos, rwf_t flags); -#endif - - -/* - * Deprecated system calls which are still defined in - * include/uapi/asm-generic/unistd.h and wanted by >= 1 arch - */ - -/* __ARCH_WANT_SYSCALL_NO_AT */ -asmlinkage long compat_sys_open(const char __user *filename, int flags, - umode_t mode); - -/* __ARCH_WANT_SYSCALL_NO_FLAGS */ -asmlinkage long compat_sys_signalfd(int ufd, - const compat_sigset_t __user *sigmask, - compat_size_t sigsetsize); - -/* __ARCH_WANT_SYSCALL_OFF_T */ -asmlinkage long compat_sys_newstat(const char __user *filename, - struct compat_stat __user *statbuf); -asmlinkage long compat_sys_newlstat(const char __user *filename, - struct compat_stat __user *statbuf); - -/* __ARCH_WANT_SYSCALL_DEPRECATED */ -asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, - compat_ulong_t __user *outp, compat_ulong_t __user *exp, - struct old_timeval32 __user *tvp); -asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); -asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, - unsigned flags); - -/* obsolete: fs/readdir.c */ -asmlinkage long compat_sys_old_readdir(unsigned int fd, - struct compat_old_linux_dirent __user *, - unsigned int count); - -/* obsolete: fs/select.c */ -asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg); - -/* obsolete: ipc */ -asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32); - -/* obsolete: kernel/signal.c */ #ifdef __ARCH_WANT_SYS_SIGPENDING asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set); #endif @@ -886,100 +756,40 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set); asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset, compat_old_sigset_t __user *oset); #endif -#ifdef CONFIG_COMPAT_OLD_SIGACTION -asmlinkage long compat_sys_sigaction(int sig, - const struct compat_old_sigaction __user *act, - struct compat_old_sigaction __user *oact); -#endif -/* obsolete: net/socket.c */ -asmlinkage long compat_sys_socketcall(int call, u32 __user *args); +int compat_restore_altstack(const compat_stack_t __user *uss); +int __compat_save_altstack(compat_stack_t __user *, unsigned long); +#define compat_save_altstack_ex(uss, sp) do { \ + compat_stack_t __user *__uss = uss; \ + struct task_struct *t = current; \ + put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ + put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + if (t->sas_ss_flags & SS_AUTODISARM) \ + sas_ss_reset(t); \ +} while (0); -#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ +asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, + struct compat_timespec __user *interval); -/** - * ns_to_old_timeval32 - Compat version of ns_to_timeval - * @nsec: the nanoseconds value to be converted - * - * Returns the old_timeval32 representation of the nsec parameter. - */ -static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec) -{ - struct __kernel_old_timeval tv; - struct old_timeval32 ctv; - - tv = ns_to_kernel_old_timeval(nsec); - ctv.tv_sec = tv.tv_sec; - ctv.tv_usec = tv.tv_usec; - - return ctv; -} - -/* - * Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz()) - * directly. Instead, use one of the functions which work equivalently, such - * as the kcompat_sys_xyzyyz() functions prototyped below. - */ - -int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz, - struct compat_statfs64 __user * buf); -int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, - struct compat_statfs64 __user * buf); - -#ifdef CONFIG_COMPAT +asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, + int, const char __user *); /* * For most but not all architectures, "am I in a compat syscall?" and * "am I a compat task?" are the same question. For architectures on which * they aren't the same question, arch code can override in_compat_syscall. */ + #ifndef in_compat_syscall static inline bool in_compat_syscall(void) { return is_compat_task(); } #endif -#else /* !CONFIG_COMPAT */ +#else #define is_compat_task() (0) -/* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */ -#define in_compat_syscall in_compat_syscall static inline bool in_compat_syscall(void) { return false; } #endif /* CONFIG_COMPAT */ -#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) - -#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG) - -long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, - unsigned long bitmap_size); -long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, - unsigned long bitmap_size); - -/* - * Some legacy ABIs like the i386 one use less than natural alignment for 64-bit - * types, and will need special compat treatment for that. Most architectures - * don't need that special handling even for compat syscalls. - */ -#ifndef compat_need_64bit_alignment_fixup -#define compat_need_64bit_alignment_fixup() false -#endif - -/* - * A pointer passed in from user mode. This should not - * be used for syscall parameters, just declare them - * as pointers because the syscall entry code will have - * appropriately converted them already. - */ -#ifndef compat_ptr -static inline void __user *compat_ptr(compat_uptr_t uptr) -{ - return (void __user *)(unsigned long)uptr; -} -#endif - -static inline compat_uptr_t ptr_to_compat(void __user *uptr) -{ - return (u32)(unsigned long)uptr; -} - #endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 3c4de9b6c6..de179993e0 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -1,70 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_COMPILER_TYPES_H +#ifndef __LINUX_COMPILER_H #error "Please don't include directly, include instead." #endif -/* Compiler specific definitions for Clang compiler */ +/* Some compiler specific definitions are overwritten here + * for Clang compiler + */ + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif /* same as gcc, this was present in clang-2.6 so we can assume it works * with any version that can compile the kernel */ #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) - -/* all clang versions usable with the kernel support KASAN ABI version 5 */ -#define KASAN_ABI_VERSION 5 - -/* - * Note: Checking __has_feature(*_sanitizer) is only true if the feature is - * enabled. Therefore it is not required to additionally check defined(CONFIG_*) - * to avoid adding redundant attributes in other configurations. - */ - -#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) -/* Emulate GCC's __SANITIZE_ADDRESS__ flag */ -#define __SANITIZE_ADDRESS__ -#define __no_sanitize_address \ - __attribute__((no_sanitize("address", "hwaddress"))) -#else -#define __no_sanitize_address -#endif - -#if __has_feature(thread_sanitizer) -/* emulate gcc's __SANITIZE_THREAD__ flag */ -#define __SANITIZE_THREAD__ -#define __no_sanitize_thread \ - __attribute__((no_sanitize("thread"))) -#else -#define __no_sanitize_thread -#endif - -#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) -#define __HAVE_BUILTIN_BSWAP32__ -#define __HAVE_BUILTIN_BSWAP64__ -#define __HAVE_BUILTIN_BSWAP16__ -#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ - -#if __has_feature(undefined_behavior_sanitizer) -/* GCC does not have __SANITIZE_UNDEFINED__ */ -#define __no_sanitize_undefined \ - __attribute__((no_sanitize("undefined"))) -#else -#define __no_sanitize_undefined -#endif - -/* - * Support for __has_feature(coverage_sanitizer) was added in Clang 13 together - * with no_sanitize("coverage"). Prior versions of Clang support coverage - * instrumentation, but cannot be queried for support by the preprocessor. - */ -#if __has_feature(coverage_sanitizer) -#define __no_sanitize_coverage __attribute__((no_sanitize("coverage"))) -#else -#define __no_sanitize_coverage -#endif - -#if __has_feature(shadow_call_stack) -# define __noscs __attribute__((__no_sanitize__("shadow-call-stack"))) -#endif - -#define __nocfi __attribute__((__no_sanitize__("cfi"))) -#define __cficanonical __attribute__((__cfi_canonical_jump_table__)) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index bd2b881c6b..dc13b5a92a 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -1,5 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_COMPILER_TYPES_H +#ifndef __LINUX_COMPILER_H #error "Please don't include directly, include instead." #endif @@ -10,6 +9,25 @@ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) +/* Optimization barrier */ + +/* The "volatile" is due to gcc bugs */ +#define barrier() __asm__ __volatile__("": : :"memory") +/* + * This version is i.e. to prevent dead stores elimination on @ptr + * where gcc and llvm may behave differently when otherwise using + * normal barrier(): while gcc behavior gets along with a normal + * barrier(), llvm needs an explicit input variable to be assumed + * clobbered. The issue is as follows: while the inline asm might + * access any memory it wants, the compiler could have fit all of + * @ptr into memory registers instead, and since @ptr never escaped + * from that, it proofed that the inline asm wasn't touching any of + * it. This version works well with both compilers, i.e. we're telling + * the compiler that the inline asm absolutely may see the contents + * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 + */ +#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") + /* * This macro obfuscates arithmetic on a variable address so that gcc * shouldn't recognize the original var, and make assumptions about it. @@ -35,45 +53,233 @@ (typeof(ptr)) (__ptr + (off)); \ }) -#ifdef CONFIG_RETPOLINE -#define __noretpoline __attribute__((__indirect_branch__("keep"))) -#endif +/* Make the optimizer believe the variable can be manipulated arbitrarily. */ +#define OPTIMIZER_HIDE_VAR(var) \ + __asm__ ("" : "=r" (var) : "0" (var)) -#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) - -#define __compiletime_object_size(obj) __builtin_object_size(obj, 0) - -#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) -#define __latent_entropy __attribute__((latent_entropy)) +#ifdef __CHECKER__ +#define __must_be_array(a) 0 +#else +/* &a[0] degrades to a pointer: a different type from an array */ +#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) #endif /* - * calling noreturn functions, __builtin_unreachable() and __builtin_trap() - * confuse the stack allocation in gcc, leading to overly large stack - * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365 - * - * Adding an empty inline assembly before it works around the problem + * Force always-inline if the user requests it so via the .config, + * or if gcc is too old: */ -#define barrier_before_unreachable() asm volatile("") +#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ + !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) +#define inline inline __attribute__((always_inline)) notrace +#define __inline__ __inline__ __attribute__((always_inline)) notrace +#define __inline __inline __attribute__((always_inline)) notrace +#else +/* A lot of inline functions can cause havoc with function tracing */ +#define inline inline notrace +#define __inline__ __inline__ notrace +#define __inline __inline notrace +#endif + +#define __always_inline inline __attribute__((always_inline)) +#define noinline __attribute__((noinline)) + +#define __deprecated __attribute__((deprecated)) +#define __packed __attribute__((packed)) +#define __weak __attribute__((weak)) +#define __alias(symbol) __attribute__((alias(#symbol))) + +/* + * it doesn't make sense on ARM (currently the only user of __naked) + * to trace naked functions because then mcount is called without + * stack and frame pointer being set up and there is no chance to + * restore the lr register to the value before mcount was called. + * + * The asm() bodies of naked functions often depend on standard calling + * conventions, therefore they must be noinline and noclone. + * + * GCC 4.[56] currently fail to enforce this, so we must do so ourselves. + * See GCC PR44290. + */ +#define __naked __attribute__((naked)) noinline __noclone notrace + +#define __noreturn __attribute__((noreturn)) + +/* + * From the GCC manual: + * + * Many functions have no effects except the return value and their + * return value depends only on the parameters and/or global + * variables. Such a function can be subject to common subexpression + * elimination and loop optimization just as an arithmetic operator + * would be. + * [...] + */ +#define __pure __attribute__((pure)) +#define __aligned(x) __attribute__((aligned(x))) +#define __printf(a, b) __attribute__((format(printf, a, b))) __nocapture(a, b) +#define __scanf(a, b) __attribute__((format(scanf, a, b))) __nocapture(a, b) +#define __attribute_const__ __attribute__((const)) +#define __maybe_unused __attribute__((unused)) +#define __always_unused __attribute__((unused)) + +/* gcc version specific checks */ + +#if GCC_VERSION < 30200 +# error Sorry, your compiler is too old - please upgrade it. +#endif + +#if GCC_VERSION < 30300 +# define __used __attribute__((__unused__)) +#else +# define __used __attribute__((__used__)) +#endif + +#ifdef CONFIG_GCOV_KERNEL +# if GCC_VERSION < 30400 +# error "GCOV profiling support for gcc versions below 3.4 not included" +# endif /* __GNUC_MINOR__ */ +#endif /* CONFIG_GCOV_KERNEL */ + +#if GCC_VERSION >= 30400 +#define __must_check __attribute__((warn_unused_result)) +#define __malloc __attribute__((__malloc__)) +#endif + +#if GCC_VERSION >= 40000 + +/* GCC 4.1.[01] miscompiles __weak */ +#ifdef __KERNEL__ +# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101 +# error Your version of gcc miscompiles the __weak directive +# endif +#endif + +#define __used __attribute__((__used__)) +#define __compiler_offsetof(a, b) \ + __builtin_offsetof(a, b) + +#if GCC_VERSION >= 40100 +# define __compiletime_object_size(obj) __builtin_object_size(obj, 0) +#endif + +#if GCC_VERSION >= 40300 +/* Mark functions as cold. gcc will assume any path leading to a call + * to them will be unlikely. This means a lot of manual unlikely()s + * are unnecessary now for any paths leading to the usual suspects + * like BUG(), printk(), panic() etc. [but let's keep them for now for + * older compilers] + * + * Early snapshots of gcc 4.3 don't support this and we can't detect this + * in the preprocessor, but we can live with this because they're unreleased. + * Maketime probing would be overkill here. + * + * gcc also has a __attribute__((__hot__)) to move hot functions into + * a special section, but I don't see any sense in this right now in + * the kernel context + */ +#define __cold __attribute__((__cold__)) + +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +#ifndef __CHECKER__ +# define __compiletime_warning(message) __attribute__((warning(message))) +# define __compiletime_error(message) __attribute__((error(message))) +#endif /* __CHECKER__ */ + +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__))) +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg)) +#define __bos0(ptr) __bos((ptr), 0) +#define __bos1(ptr) __bos((ptr), 1) +#endif /* GCC_VERSION >= 40300 */ + +#if GCC_VERSION >= 40500 + +#ifdef RANDSTRUCT_PLUGIN +#define __randomize_layout __attribute__((randomize_layout)) +#define __no_randomize_layout __attribute__((no_randomize_layout)) +#endif + +#ifdef CONSTIFY_PLUGIN +#define __no_const __attribute__((no_const)) +#define __do_const __attribute__((do_const)) +#define const_cast(x) (*(typeof((typeof(x))0) *)&(x)) +#endif + +#ifdef SIZE_OVERFLOW_PLUGIN +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__))) +#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__))) +#endif + +#ifndef __CHECKER__ +#ifdef LATENT_ENTROPY_PLUGIN +#define __latent_entropy __attribute__((latent_entropy)) +#endif +#endif + +#ifdef INITIFY_PLUGIN +#define __nocapture(...) __attribute__((nocapture(__VA_ARGS__))) +#endif + +/* + * The initify gcc-plugin attempts to identify const arguments that are only + * used during init (see __init and __exit), so they can be moved to the + * .init.rodata/.exit.rodata section. If an argument is passed to a non-init + * function, it must normally be assumed that such an argument has been + * captured by that function and may be used in the future when .init/.exit has + * been unmapped from memory. In order to identify functions that are confirmed + * to not capture their arguments, the __nocapture() attribute is used so that + * initify can better identify candidate variables. + */ +#ifdef INITIFY_PLUGIN +#define __nocapture(...) __attribute__((nocapture(__VA_ARGS__))) +#define __unverified_nocapture(...) __attribute__((unverified_nocapture(__VA_ARGS__))) +#endif + +#ifdef RAP_PLUGIN +#define __rap_hash __attribute__((rap_hash)) +#endif /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer * control elsewhere. + * + * Early snapshots of gcc 4.5 don't support this and we can't detect + * this in the preprocessor, but we can live with this because they're + * unreleased. Really, we need to have autoconf for the kernel. */ -#define unreachable() \ - do { \ - annotate_unreachable(); \ - barrier_before_unreachable(); \ - __builtin_unreachable(); \ - } while (0) +#define unreachable() __builtin_unreachable() -#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__) -#define __randomize_layout __attribute__((randomize_layout)) -#define __no_randomize_layout __attribute__((no_randomize_layout)) -/* This anon struct can add padding, so only enable it under randstruct. */ -#define randomized_struct_fields_start struct { -#define randomized_struct_fields_end } __randomize_layout; +/* Mark a function definition as prohibited from being cloned. */ +#define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) + +#endif /* GCC_VERSION >= 40500 */ + +#if GCC_VERSION >= 40600 +/* + * When used with Link Time Optimization, gcc can optimize away C functions or + * variables which are referenced only from assembly code. __visible tells the + * optimizer that something else uses this function or variable, thus preventing + * this. + */ +#define __visible __attribute__((externally_visible)) +#endif + + +#if GCC_VERSION >= 40900 && !defined(__CHECKER__) +/* + * __assume_aligned(n, k): Tell the optimizer that the returned + * pointer can be assumed to be k modulo n. The second argument is + * optional (default 0), so we use a variadic macro to make the + * shorthand. + * + * Beware: Do not apply this to functions which may return + * ERR_PTRs. Also, it is probably unwise to apply it to functions + * returning extra information in the low bits (but in that case the + * compiler should see some alignment anyway, when the return value is + * massaged by 'flags = ptr & 3; ptr &= ~3;'). + */ +#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) #endif /* @@ -87,60 +293,49 @@ */ #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) -#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) +/* + * sparse (__CHECKER__) pretends to be gcc, but can't do constant + * folding in __builtin_bswap*() (yet), so don't set these for it. + */ +#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) +#if GCC_VERSION >= 40400 #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ +#endif +#if GCC_VERSION >= 40800 #define __HAVE_BUILTIN_BSWAP16__ -#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ +#endif +#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ #if GCC_VERSION >= 70000 #define KASAN_ABI_VERSION 5 -#else +#elif GCC_VERSION >= 50000 #define KASAN_ABI_VERSION 4 +#elif GCC_VERSION >= 40902 +#define KASAN_ABI_VERSION 3 #endif -#if __has_attribute(__no_sanitize_address__) +#if GCC_VERSION >= 40902 +/* + * Tell the compiler that address safety instrumentation (KASAN) + * should not be applied to that function. + * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + */ #define __no_sanitize_address __attribute__((no_sanitize_address)) -#else +#endif + +#endif /* gcc version >= 40000 specific checks */ + +#if !defined(__noclone) +#define __noclone /* not needed */ +#endif + +#if !defined(__no_sanitize_address) #define __no_sanitize_address #endif -#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__) -#define __no_sanitize_thread __attribute__((no_sanitize_thread)) -#else -#define __no_sanitize_thread -#endif - -#if __has_attribute(__no_sanitize_undefined__) -#define __no_sanitize_undefined __attribute__((no_sanitize_undefined)) -#else -#define __no_sanitize_undefined -#endif - -#if defined(CONFIG_KCOV) && __has_attribute(__no_sanitize_coverage__) -#define __no_sanitize_coverage __attribute__((no_sanitize_coverage)) -#else -#define __no_sanitize_coverage -#endif - /* - * Turn individual warnings and errors on and off locally, depending - * on version. + * A trick to suppress uninitialized variable warning without generating any + * code */ -#define __diag_GCC(version, severity, s) \ - __diag_GCC_ ## version(__diag_GCC_ ## severity s) - -/* Severity used in pragma directives */ -#define __diag_GCC_ignore ignored -#define __diag_GCC_warn warning -#define __diag_GCC_error error - -#define __diag_str1(s) #s -#define __diag_str(s) __diag_str1(s) -#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) - -#if GCC_VERSION >= 80000 -#define __diag_GCC_8(s) __diag(s) -#else -#define __diag_GCC_8(s) -#endif +#define uninitialized_var(x) x = x diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index b17f3cd183..d4c71132d0 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -1,17 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_COMPILER_TYPES_H +#ifndef __LINUX_COMPILER_H #error "Please don't include directly, include instead." #endif #ifdef __ECC -/* Compiler specific definitions for Intel ECC compiler */ +/* Some compiler specific definitions are overwritten here + * for Intel ECC compiler + */ #include /* Intel ECC compiler doesn't support gcc specific asm stmts. * It uses intrinsics to do the equivalent things. */ +#undef barrier +#undef barrier_data +#undef RELOC_HIDE +#undef OPTIMIZER_HIDE_VAR #define barrier() __memory_barrier() #define barrier_data(ptr) barrier() @@ -27,8 +32,14 @@ */ #define OPTIMIZER_HIDE_VAR(var) barrier() +/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */ +#define __must_be_array(a) 0 + #endif +#ifndef __HAVE_BUILTIN_BSWAP16__ /* icc has this, but it's called _bswap16 */ #define __HAVE_BUILTIN_BSWAP16__ #define __builtin_bswap16 _bswap16 +#endif + diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 3d5af56337..a7444a7b2e 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -1,38 +1,175 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPILER_H #define __LINUX_COMPILER_H -#include - #ifndef __ASSEMBLY__ +#ifdef __CHECKER__ +# define __user __attribute__((noderef, address_space(1))) +# define __force_user __force __user +# define __kernel __attribute__((address_space(0))) +# define __force_kernel __force __kernel +# define __safe __attribute__((safe)) +# define __force __attribute__((force)) +# define __nocast __attribute__((nocast)) +# define __iomem __attribute__((noderef, address_space(2))) +# define __force_iomem __force __iomem +# define __must_hold(x) __attribute__((context(x,1,1))) +# define __acquires(x) __attribute__((context(x,0,1))) +# define __releases(x) __attribute__((context(x,1,0))) +# define __acquire(x) __context__(x,1) +# define __release(x) __context__(x,-1) +# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) +# define __percpu __attribute__((noderef, address_space(3))) +# define __force_percpu __force __percpu +#ifdef CONFIG_SPARSE_RCU_POINTER +# define __rcu __attribute__((noderef, address_space(4))) +# define __force_rcu __force __rcu +#else /* CONFIG_SPARSE_RCU_POINTER */ +# define __rcu +# define __force_rcu +#endif /* CONFIG_SPARSE_RCU_POINTER */ +# define __private __attribute__((noderef)) +extern void __chk_user_ptr(const volatile void __user *); +extern void __chk_io_ptr(const volatile void __iomem *); +# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) +#else /* __CHECKER__ */ +# ifdef CHECKER_PLUGIN +# ifdef CHECKER_PLUGIN_USER +//# define __user +//# define __force_user +//# define __kernel +//# define __force_kernel +# else +# define __user +# define __force_user +# define __kernel +# define __force_kernel +# endif +# ifdef CHECKER_PLUGIN_CONTEXT +# define __must_hold(x) __attribute__((context(#x,1,1))) +# define __acquires(x) __attribute__((context(#x,0,1))) +# define __releases(x) __attribute__((context(#x,1,0))) +# define __acquire(x) __context__(#x,1) +# define __release(x) __context__(#x,-1) +# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) +# define __cond_unlock(x,c) ((c) ? ({ __release(x); 1; }) : 0) +# else +# define __must_hold(x) +# define __acquires(x) +# define __releases(x) +# define __acquire(x) (void)0 +# define __release(x) (void)0 +# define __cond_lock(x,c) (c) +# define __cond_unlock(x,c) (c) +# endif +# else +# ifdef STRUCTLEAK_PLUGIN +# define __user __attribute__((user)) +# else +# define __user +# endif +# define __force_user +# define __kernel +# define __force_kernel +# define __must_hold(x) +# define __acquires(x) +# define __releases(x) +# define __acquire(x) (void)0 +# define __release(x) (void)0 +# define __cond_lock(x,c) (c) +# endif +# define __safe +# define __force +# define __nocast +# define __iomem +# define __force_iomem +# define __chk_user_ptr(x) (void)0 +# define __chk_io_ptr(x) (void)0 +# define __builtin_warning(x, y...) (1) +# define __percpu +# define __force_percpu +# define __rcu +# define __force_rcu +# define __private +# define ACCESS_PRIVATE(p, member) ((p)->member) +#endif /* __CHECKER__ */ + +/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ +#define ___PASTE(a,b) a##b +#define __PASTE(a,b) ___PASTE(a,b) + #ifdef __KERNEL__ +#ifdef __GNUC__ +#include +#endif + +#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) +#define notrace __attribute__((hotpatch(0,0))) +#else +#define notrace __attribute__((no_instrument_function)) +#endif + +/* Intel compiler defines __GNUC__. So we will overwrite implementations + * coming from above header files here + */ +#ifdef __INTEL_COMPILER +# include +#endif + +/* Clang compiler defines __GNUC__. So we will overwrite implementations + * coming from above header files here + */ +#ifdef __clang__ +#include +#endif + +/* + * Generic compiler-dependent macros required for kernel + * build go below this comment. Actual compiler/compiler version + * specific implementations come from the above header files + */ + +struct ftrace_branch_data { + const char *func; + const char *file; + unsigned line; + union { + struct { + unsigned long correct; + unsigned long incorrect; + }; + struct { + unsigned long miss; + unsigned long hit; + }; + unsigned long miss_hit[2]; + }; +}; + /* * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code * to disable branch tracing on a per file basis. */ #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) -void ftrace_likely_update(struct ftrace_likely_data *f, int val, - int expect, int is_constant); +void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #define likely_notrace(x) __builtin_expect(!!(x), 1) #define unlikely_notrace(x) __builtin_expect(!!(x), 0) -#define __branch_check__(x, expect, is_constant) ({ \ - long ______r; \ - static struct ftrace_likely_data \ - __aligned(4) \ - __section("_ftrace_annotated_branch") \ +#define __branch_check__(x, expect) ({ \ + int ______r; \ + static struct ftrace_branch_data \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_annotated_branch"))) \ ______f = { \ - .data.func = __func__, \ - .data.file = __FILE__, \ - .data.line = __LINE__, \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ }; \ - ______r = __builtin_expect(!!(x), expect); \ - ftrace_likely_update(&______f, ______r, \ - expect, is_constant); \ + ______r = likely_notrace(x); \ + ftrace_likely_update(&______f, ______r, expect); \ ______r; \ }) @@ -42,10 +179,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, * written by Daniel Walker. */ # ifndef likely -# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) +# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) # endif # ifndef unlikely -# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) +# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) # endif #ifdef CONFIG_PROFILE_ALL_BRANCHES @@ -53,109 +190,42 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, * "Define 'is'", Bill Clinton * "Define 'if'", Steven Rostedt */ -#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) - -#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) - -#define __trace_if_value(cond) ({ \ - static struct ftrace_branch_data \ - __aligned(4) \ - __section("_ftrace_branch") \ - __if_trace = { \ - .func = __func__, \ - .file = __FILE__, \ - .line = __LINE__, \ - }; \ - (cond) ? \ - (__if_trace.miss_hit[1]++,1) : \ - (__if_trace.miss_hit[0]++,0); \ -}) - +#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) +#define __trace_if(cond) \ + if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ + ({ \ + int ______r; \ + static struct ftrace_branch_data \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_branch"))) \ + ______f = { \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + }; \ + ______r = !!(cond); \ + ______f.miss_hit[______r]++; \ + ______r; \ + })) #endif /* CONFIG_PROFILE_ALL_BRANCHES */ #else # define likely(x) __builtin_expect(!!(x), 1) # define unlikely(x) __builtin_expect(!!(x), 0) -# define likely_notrace(x) likely(x) -# define unlikely_notrace(x) unlikely(x) #endif /* Optimization barrier */ #ifndef barrier -/* The "volatile" is due to gcc bugs */ -# define barrier() __asm__ __volatile__("": : :"memory") +# define barrier() __memory_barrier() #endif #ifndef barrier_data -/* - * This version is i.e. to prevent dead stores elimination on @ptr - * where gcc and llvm may behave differently when otherwise using - * normal barrier(): while gcc behavior gets along with a normal - * barrier(), llvm needs an explicit input variable to be assumed - * clobbered. The issue is as follows: while the inline asm might - * access any memory it wants, the compiler could have fit all of - * @ptr into memory registers instead, and since @ptr never escaped - * from that, it proved that the inline asm wasn't touching any of - * it. This version works well with both compilers, i.e. we're telling - * the compiler that the inline asm absolutely may see the contents - * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 - */ -# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") -#endif - -/* workaround for GCC PR82365 if needed */ -#ifndef barrier_before_unreachable -# define barrier_before_unreachable() do { } while (0) +# define barrier_data(ptr) barrier() #endif /* Unreachable code */ -#ifdef CONFIG_STACK_VALIDATION -/* - * These macros help objtool understand GCC code flow for unreachable code. - * The __COUNTER__ based labels are a hack to make each instance of the macros - * unique, to convince GCC not to merge duplicate inline asm statements. - */ -#define __stringify_label(n) #n - -#define __annotate_reachable(c) ({ \ - asm volatile(__stringify_label(c) ":\n\t" \ - ".pushsection .discard.reachable\n\t" \ - ".long " __stringify_label(c) "b - .\n\t" \ - ".popsection\n\t"); \ -}) -#define annotate_reachable() __annotate_reachable(__COUNTER__) - -#define __annotate_unreachable(c) ({ \ - asm volatile(__stringify_label(c) ":\n\t" \ - ".pushsection .discard.unreachable\n\t" \ - ".long " __stringify_label(c) "b - .\n\t" \ - ".popsection\n\t"); \ -}) -#define annotate_unreachable() __annotate_unreachable(__COUNTER__) - -#define ASM_UNREACHABLE \ - "999:\n\t" \ - ".pushsection .discard.unreachable\n\t" \ - ".long 999b - .\n\t" \ - ".popsection\n\t" - -/* Annotate a C jump table to allow objtool to follow the code flow */ -#define __annotate_jump_table __section(".rodata..c_jump_table") - -#else -#define annotate_reachable() -#define annotate_unreachable() -#define __annotate_jump_table -#endif - -#ifndef ASM_UNREACHABLE -# define ASM_UNREACHABLE -#endif #ifndef unreachable -# define unreachable() do { \ - annotate_unreachable(); \ - __builtin_unreachable(); \ -} while (0) +# define unreachable() do { } while (1) #endif /* @@ -177,7 +247,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, extern typeof(sym) sym; \ static const unsigned long __kentry_##sym \ __used \ - __attribute__((__section__("___kentry+" #sym))) \ + __attribute__((section("___kentry" "+" #sym ), used)) \ = (unsigned long)&sym; #endif @@ -188,12 +258,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, (typeof(ptr)) (__ptr + (off)); }) #endif -#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0) - #ifndef OPTIMIZER_HIDE_VAR -/* Make the optimizer believe the variable can be manipulated arbitrarily. */ -#define OPTIMIZER_HIDE_VAR(var) \ - __asm__ ("" : "=r" (var) : "0" (var)) +#define OPTIMIZER_HIDE_VAR(var) barrier() #endif /* Not-quite-unique ID. */ @@ -201,68 +267,384 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) #endif -/** - * data_race - mark an expression as containing intentional data races - * - * This data_race() macro is useful for situations in which data races - * should be forgiven. One example is diagnostic code that accesses - * shared variables but is not a part of the core synchronization design. - * - * This macro *does not* affect normal code generation, but is a hint - * to tooling that data races here are to be ignored. +#include + +#ifdef CONFIG_KASAN +/* + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need + * to hide memory access from KASAN. */ -#define data_race(expr) \ -({ \ - __unqual_scalar_typeof(({ expr; })) __v = ({ \ - __kcsan_disable_current(); \ - expr; \ - }); \ - __kcsan_enable_current(); \ - __v; \ +#define READ_ONCE_NOCHECK(x) \ +({ \ + union { typeof(x) __val; char __c[sizeof(x)]; } __u; \ + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ }) /* - * With CONFIG_CFI_CLANG, the compiler replaces function addresses in - * instrumented C code with jump table addresses. Architectures that - * support CFI can define this macro to return the actual function address - * when needed. + * This function is not 'inline' because __no_sanitize_address conflicts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. */ -#ifndef function_nocfi -#define function_nocfi(x) (x) +static __no_sanitize_address __maybe_unused +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(__u8 *)res = *(const volatile __u8 *)p; break; + case 2: *(__u16 *)res = *(const volatile __u16 *)p; break; + case 4: *(__u32 *)res = *(const volatile __u32 *)p; break; + case 8: *(__u64 *)res = *(const volatile __u64 *)p; break; + default: + barrier(); + __builtin_memcpy(res, (const void *)p, size); + barrier(); + } +} +#else +#define READ_ONCE_NOCHECK(x) READ_ONCE(x) +#endif + +/* + * Prevent the compiler from merging or refetching reads or writes. The + * compiler is also forbidden from reordering successive instances of + * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the + * compiler is aware of some particular ordering. One way to make the + * compiler aware of ordering is to put the two invocations of READ_ONCE, + * WRITE_ONCE or ACCESS_ONCE() in different C statements. + * + * In contrast to ACCESS_ONCE these two macros will also work on aggregate + * data types like structs or unions. If the size of the accessed data + * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at + * least two memcpy()s: one for the __builtin_memcpy() and then one for + * the macro doing the copy of variable - '__u' allocated on the stack. + * + * Their two major use cases are: (1) Mediating communication between + * process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + */ + +#define READ_ONCE(x) ({ \ + typeof(x) __val = *(volatile typeof(x) *)&(x); \ + __val; \ +}) + +#define WRITE_ONCE(x, val) ({ \ + typeof(x) __val = (val); \ + (x) = *(volatile typeof(x) *)&__val; \ + __val; \ +}) + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +#ifdef __KERNEL__ +/* + * Allow us to mark functions as 'deprecated' and have gcc emit a nice + * warning for each use, in hopes of speeding the functions removal. + * Usage is: + * int __deprecated foo(void) + */ +#ifndef __deprecated +# define __deprecated /* unimplemented */ +#endif + +#ifdef MODULE +#define __deprecated_for_modules __deprecated +#else +#define __deprecated_for_modules +#endif + +#ifndef __must_check +#define __must_check +#endif + +#ifndef CONFIG_ENABLE_MUST_CHECK +#undef __must_check +#define __must_check +#endif +#ifndef CONFIG_ENABLE_WARN_DEPRECATED +#undef __deprecated +#undef __deprecated_for_modules +#define __deprecated +#define __deprecated_for_modules +#endif + +#ifndef __malloc +#define __malloc +#endif + +/* + * Allow us to avoid 'defined but not used' warnings on functions and data, + * as well as force them to be emitted to the assembly file. + * + * As of gcc 3.4, static functions that are not marked with attribute((used)) + * may be elided from the assembly file. As of gcc 3.4, static data not so + * marked will not be elided, but this may change in a future gcc version. + * + * NOTE: Because distributions shipped with a backported unit-at-a-time + * compiler in gcc 3.3, we must define __used to be __attribute__((used)) + * for gcc >=3.3 instead of 3.4. + * + * In prior versions of gcc, such functions and data would be emitted, but + * would be warned about except with attribute((unused)). + * + * Mark functions that are referenced only in inline assembly as __used so + * the code is emitted even though it appears to be unreferenced. + */ +#ifndef __used +# define __used /* unimplemented */ +#endif + +#ifndef __maybe_unused +# define __maybe_unused /* unimplemented */ +#endif + +#ifndef __always_unused +# define __always_unused /* unimplemented */ +#endif + +#ifndef noinline +#define noinline +#endif + +/* + * Rather then using noinline to prevent stack consumption, use + * noinline_for_stack instead. For documentation reasons. + */ +#define noinline_for_stack noinline + +#ifndef __always_inline +#define __always_inline inline #endif #endif /* __KERNEL__ */ /* - * Force the compiler to emit 'sym' as a symbol, so that we can reference - * it from inline assembler. Necessary in case 'sym' could be inlined - * otherwise, or eliminated entirely due to lack of references that are - * visible to the compiler. + * From the GCC manual: + * + * Many functions do not examine any values except their arguments, + * and have no effects except the return value. Basically this is + * just slightly more strict class than the `pure' attribute above, + * since function is not allowed to read global memory. + * + * Note that a function that has pointer arguments and examines the + * data pointed to must _not_ be declared `const'. Likewise, a + * function that calls a non-`const' function usually must not be + * `const'. It does not make sense for a `const' function to return + * `void'. */ -#define __ADDRESSABLE(sym) \ - static void * __section(".discard.addressable") __used \ - __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym; +#ifndef __attribute_const__ +# define __attribute_const__ /* unimplemented */ +#endif -/** - * offset_to_ptr - convert a relative memory offset to an absolute pointer - * @off: the address of the 32-bit offset value - */ -static inline void *offset_to_ptr(const int *off) -{ - return (void *)((unsigned long)off + *off); -} +#ifndef __latent_entropy +# define __latent_entropy +#endif -#endif /* __ASSEMBLY__ */ +#ifndef __randomize_layout +# define __randomize_layout +#endif -/* &a[0] degrades to a pointer: a different type from an array */ -#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +#ifndef __no_randomize_layout +# define __no_randomize_layout +#endif + +#ifndef __no_const +# define __no_const +#endif + +#ifndef __do_const +# define __do_const +#endif + +#ifndef __size_overflow +# define __size_overflow(...) +#endif + +#ifndef __intentional_overflow +# define __intentional_overflow(...) +#endif + +#ifndef const_cast +# define const_cast(x) (x) +#endif + +#ifndef __nocapture +# define __nocapture(...) +#endif + +#ifndef __unverified_nocapture +# define __unverified_nocapture(...) +#endif + +#ifndef __rap_hash +#define __rap_hash +#endif /* - * This is needed in functions which generate the stack canary, see - * arch/x86/kernel/smpboot.c::start_secondary() for an example. + * Tell gcc if a function is cold. The compiler will assume any path + * directly leading to the call is unlikely. */ -#define prevent_tail_call_optimization() mb() -#include +#ifndef __cold +#define __cold +#endif +#ifndef __alloc_size +#define __alloc_size(...) +#endif + +#ifndef __bos +#define __bos(ptr, arg) +#endif + +#ifndef __bos0 +#define __bos0(ptr) +#endif + +#ifndef __bos1 +#define __bos1(ptr) +#endif + +/* Simple shorthand for a section definition */ +#ifndef __section +# define __section(S) __attribute__ ((__section__(#S))) +#endif + +#ifndef __visible +#define __visible +#endif + +/* + * Assume alignment of return value. + */ +#ifndef __assume_aligned +#define __assume_aligned(a, ...) +#endif + + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#ifndef __same_type +# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) +#endif + +#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0)) + +/* Is this type a native word size -- useful for atomic operations */ +#ifndef __native_word +# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) +#endif + +/* Compile time object size, -1 for unknown */ +#ifndef __compiletime_object_size +# define __compiletime_object_size(obj) -1 +#endif +#ifndef __compiletime_warning +# define __compiletime_warning(message) +#endif +#ifndef __compiletime_error +# define __compiletime_error(message) +/* + * Sparse complains of variable sized arrays due to the temporary variable in + * __compiletime_assert. Unfortunately we can't just expand it out to make + * sparse see a constant array size without breaking compiletime_assert on old + * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. + */ +# ifndef __CHECKER__ +# define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +# endif +#endif +#ifndef __compiletime_error_fallback +# define __compiletime_error_fallback(condition) do { } while (0) +#endif + +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) + +/** + * compiletime_assert - break build and emit msg if condition is false + * @condition: a compile-time constant condition to check + * @msg: a message to emit if condition is false + * + * In tradition of POSIX assert, this macro will break the build if the + * supplied condition is *false*, emitting the supplied error message if the + * compiler has support to do so. + */ +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) + +#define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ + "Need native word sized stores/loads for atomicity.") + +/* + * Prevent the compiler from merging or refetching accesses. The compiler + * is also forbidden from reordering successive instances of ACCESS_ONCE(), + * but only when the compiler is aware of some particular ordering. One way + * to make the compiler aware of ordering is to put the two invocations of + * ACCESS_ONCE() in different C statements. + * + * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE + * on a union member will work as long as the size of the member matches the + * size of the union and the size is smaller than word size. + * + * The major use cases of ACCESS_ONCE used to be (1) Mediating communication + * between process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + * + * If possible use READ_ONCE()/WRITE_ONCE() instead. + */ +#define __ACCESS_ONCE(x) ({ \ + __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ + (volatile const typeof(x) *)&(x); }) +#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x)) + +/** + * lockless_dereference() - safely load a pointer for later dereference + * @p: The pointer to load + * + * Similar to rcu_dereference(), but for situations where the pointed-to + * object's lifetime is managed by something other than RCU. That + * "something other" might be reference counting or simple immortality. + * + * The seemingly unused variable ___typecheck_p validates that @p is + * indeed a pointer type by using a pointer to typeof(*p) as the type. + * Taking a pointer to typeof(*p) again is needed in case p is void *. + */ +#define lockless_dereference(p) \ +({ \ + typeof(p) _________p1 = READ_ONCE(p); \ + typeof(*(p)) *___typecheck_p __maybe_unused; \ + smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ + (_________p1); \ +}) + +/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ +#ifdef CONFIG_KPROBES +# define __kprobes __attribute__((__section__(".kprobes.text"))) +# define nokprobe_inline __always_inline +#else +# define __kprobes +# define nokprobe_inline inline +#endif #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/completion.h b/include/linux/completion.h index 51d9ab0796..5d5aaae3af 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPLETION_H #define __LINUX_COMPLETION_H @@ -9,7 +8,7 @@ * See kernel/sched/completion.c for details. */ -#include +#include /* * struct completion - structure used to maintain state for a "completion" @@ -25,21 +24,14 @@ */ struct completion { unsigned int done; - struct swait_queue_head wait; + wait_queue_head_t wait; }; -#define init_completion_map(x, m) init_completion(x) -static inline void complete_acquire(struct completion *x) {} -static inline void complete_release(struct completion *x) {} - #define COMPLETION_INITIALIZER(work) \ - { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } - -#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ - (*({ init_completion_map(&(work), &(map)); &(work); })) + { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } #define COMPLETION_INITIALIZER_ONSTACK(work) \ - (*({ init_completion(&work); &work; })) + ({ init_completion(&work); work; }) /** * DECLARE_COMPLETION - declare and initialize a completion structure @@ -67,11 +59,8 @@ static inline void complete_release(struct completion *x) {} #ifdef CONFIG_LOCKDEP # define DECLARE_COMPLETION_ONSTACK(work) \ struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) -# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \ - struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) #else # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) -# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work) #endif /** @@ -84,7 +73,7 @@ static inline void complete_release(struct completion *x) {} static inline void init_completion(struct completion *x) { x->done = 0; - init_swait_queue_head(&x->wait); + init_waitqueue_head(&x->wait); } /** diff --git a/include/linux/component.h b/include/linux/component.h index 16de18f473..a559eebc0e 100644 --- a/include/linux/component.h +++ b/include/linux/component.h @@ -1,41 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef COMPONENT_H #define COMPONENT_H #include - struct device; -/** - * struct component_ops - callbacks for component drivers - * - * Components are registered with component_add() and unregistered with - * component_del(). - */ struct component_ops { - /** - * @bind: - * - * Called through component_bind_all() when the aggregate driver is - * ready to bind the overall driver. - */ int (*bind)(struct device *comp, struct device *master, void *master_data); - /** - * @unbind: - * - * Called through component_unbind_all() when the aggregate driver is - * ready to bind the overall driver, or when component_bind_all() fails - * part-ways through and needs to unbind some already bound components. - */ void (*unbind)(struct device *comp, struct device *master, void *master_data); }; int component_add(struct device *, const struct component_ops *); -int component_add_typed(struct device *dev, const struct component_ops *ops, - int subcomponent); void component_del(struct device *, const struct component_ops *); int component_bind_all(struct device *master, void *master_data); @@ -43,42 +20,8 @@ void component_unbind_all(struct device *master, void *master_data); struct master; -/** - * struct component_master_ops - callback for the aggregate driver - * - * Aggregate drivers are registered with component_master_add_with_match() and - * unregistered with component_master_del(). - */ struct component_master_ops { - /** - * @bind: - * - * Called when all components or the aggregate driver, as specified in - * the match list passed to component_master_add_with_match(), are - * ready. Usually there are 3 steps to bind an aggregate driver: - * - * 1. Allocate a structure for the aggregate driver. - * - * 2. Bind all components to the aggregate driver by calling - * component_bind_all() with the aggregate driver structure as opaque - * pointer data. - * - * 3. Register the aggregate driver with the subsystem to publish its - * interfaces. - * - * Note that the lifetime of the aggregate driver does not align with - * any of the underlying &struct device instances. Therefore devm cannot - * be used and all resources acquired or allocated in this callback must - * be explicitly released in the @unbind callback. - */ int (*bind)(struct device *master); - /** - * @unbind: - * - * Called when either the aggregate driver, using - * component_master_del(), or one of its components, using - * component_del(), is unregistered. - */ void (*unbind)(struct device *master); }; @@ -93,27 +36,7 @@ void component_match_add_release(struct device *master, struct component_match **matchptr, void (*release)(struct device *, void *), int (*compare)(struct device *, void *), void *compare_data); -void component_match_add_typed(struct device *master, - struct component_match **matchptr, - int (*compare_typed)(struct device *, int, void *), void *compare_data); -/** - * component_match_add - add a component match entry - * @master: device with the aggregate driver - * @matchptr: pointer to the list of component matches - * @compare: compare function to match against all components - * @compare_data: opaque pointer passed to the @compare function - * - * Adds a new component match to the list stored in @matchptr, which the @master - * aggregate driver needs to function. The list of component matches pointed to - * by @matchptr must be initialized to NULL before adding the first match. This - * only matches against components added with component_add(). - * - * The allocated match list in @matchptr is automatically released using devm - * actions. - * - * See also component_match_add_release() and component_match_add_typed(). - */ static inline void component_match_add(struct device *master, struct component_match **matchptr, int (*compare)(struct device *, void *), void *compare_data) diff --git a/include/linux/concap.h b/include/linux/concap.h new file mode 100644 index 0000000000..977acb3d1f --- /dev/null +++ b/include/linux/concap.h @@ -0,0 +1,112 @@ +/* $Id: concap.h,v 1.3.2.2 2004/01/12 23:08:35 keil Exp $ + * + * Copyright 1997 by Henner Eisen + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + */ + +#ifndef _LINUX_CONCAP_H +#define _LINUX_CONCAP_H + +#include +#include + +/* Stuff to support encapsulation protocols genericly. The encapsulation + protocol is processed at the uppermost layer of the network interface. + + Based on a ideas developed in a 'synchronous device' thread in the + linux-x25 mailing list contributed by Alan Cox, Thomasz Motylewski + and Jonathan Naylor. + + For more documetation on this refer to Documentation/isdn/README.concap +*/ + +struct concap_proto_ops; +struct concap_device_ops; + +/* this manages all data needed by the encapsulation protocol + */ +struct concap_proto{ + struct net_device *net_dev; /* net device using our service */ + struct concap_device_ops *dops; /* callbacks provided by device */ + struct concap_proto_ops *pops; /* callbacks provided by us */ + spinlock_t lock; + int flags; + void *proto_data; /* protocol specific private data, to + be accessed via *pops methods only*/ + /* + : + whatever + : + */ +}; + +/* Operations to be supported by the net device. Called by the encapsulation + * protocol entity. No receive method is offered because the encapsulation + * protocol directly calls netif_rx(). + */ +struct concap_device_ops{ + + /* to request data is submitted by device*/ + int (*data_req)(struct concap_proto *, struct sk_buff *); + + /* Control methods must be set to NULL by devices which do not + support connection control.*/ + /* to request a connection is set up */ + int (*connect_req)(struct concap_proto *); + + /* to request a connection is released */ + int (*disconn_req)(struct concap_proto *); +}; + +/* Operations to be supported by the encapsulation protocol. Called by + * device driver. + */ +struct concap_proto_ops{ + + /* create a new encapsulation protocol instance of same type */ + struct concap_proto * (*proto_new) (void); + + /* delete encapsulation protocol instance and free all its resources. + cprot may no loger be referenced after calling this */ + void (*proto_del)(struct concap_proto *cprot); + + /* initialize the protocol's data. To be called at interface startup + or when the device driver resets the interface. All services of the + encapsulation protocol may be used after this*/ + int (*restart)(struct concap_proto *cprot, + struct net_device *ndev, + struct concap_device_ops *dops); + + /* inactivate an encapsulation protocol instance. The encapsulation + protocol may not call any *dops methods after this. */ + int (*close)(struct concap_proto *cprot); + + /* process a frame handed down to us by upper layer */ + int (*encap_and_xmit)(struct concap_proto *cprot, struct sk_buff *skb); + + /* to be called for each data entity received from lower layer*/ + int (*data_ind)(struct concap_proto *cprot, struct sk_buff *skb); + + /* to be called when a connection was set up/down. + Protocols that don't process these primitives might fill in + dummy methods here */ + int (*connect_ind)(struct concap_proto *cprot); + int (*disconn_ind)(struct concap_proto *cprot); + /* + Some network device support functions, like net_header(), rebuild_header(), + and others, that depend solely on the encapsulation protocol, might + be provided here, too. The net device would just fill them in its + corresponding fields when it is opened. + */ +}; + +/* dummy restart/close/connect/reset/disconn methods + */ +extern int concap_nop(struct concap_proto *cprot); + +/* dummy submit method + */ +extern int concap_drop_skb(struct concap_proto *cprot, struct sk_buff *skb); +#endif diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 97cfd13bae..489772cc16 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -1,7 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * * configfs.h - definitions for the device driver filesystem * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * * Based on sysfs: * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel * @@ -11,7 +27,7 @@ * * configfs Copyright (C) 2005 Oracle. All rights reserved. * - * Please read Documentation/filesystems/configfs.rst before using + * Please read Documentation/filesystems/configfs/configfs.txt before using * the configfs interface, ESPECIALLY the parts about reference counts and * item destructors. */ @@ -19,11 +35,14 @@ #ifndef _CONFIGFS_H_ #define _CONFIGFS_H_ -#include /* S_IRUGO */ -#include /* ssize_t */ -#include /* struct list_head */ -#include /* struct kref */ -#include /* struct mutex */ +#include +#include +#include +#include +#include +#include + +#include #define CONFIGFS_ITEM_NAME_LEN 20 @@ -42,7 +61,7 @@ struct config_item { struct list_head ci_entry; struct config_item *ci_parent; struct config_group *ci_group; - const struct config_item_type *ci_type; + struct config_item_type *ci_type; struct dentry *ci_dentry; }; @@ -56,10 +75,9 @@ static inline char *config_item_name(struct config_item * item) extern void config_item_init_type_name(struct config_item *item, const char *name, - const struct config_item_type *type); + struct config_item_type *type); -extern struct config_item *config_item_get(struct config_item *); -extern struct config_item *config_item_get_unless_zero(struct config_item *); +extern struct config_item * config_item_get(struct config_item *); extern void config_item_put(struct config_item *); struct config_item_type { @@ -85,7 +103,7 @@ struct config_group { extern void config_group_init(struct config_group *group); extern void config_group_init_type_name(struct config_group *group, const char *name, - const struct config_item_type *type); + struct config_item_type *type); static inline struct config_group *to_config_group(struct config_item *item) { @@ -118,7 +136,7 @@ struct configfs_attribute { umode_t ca_mode; ssize_t (*show)(struct config_item *, char *); ssize_t (*store)(struct config_item *, const char *, size_t); -}; +} __do_const; #define CONFIGFS_ATTR(_pfx, _name) \ static struct configfs_attribute _pfx##attr_##_name = { \ @@ -210,7 +228,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \ struct configfs_item_operations { void (*release)(struct config_item *); int (*allow_link)(struct config_item *src, struct config_item *target); - void (*drop_link)(struct config_item *src, struct config_item *target); + int (*drop_link)(struct config_item *src, struct config_item *target); }; struct configfs_group_operations { @@ -245,7 +263,7 @@ void configfs_remove_default_groups(struct config_group *group); struct config_group * configfs_register_default_group(struct config_group *parent_group, const char *name, - const struct config_item_type *item_type); + struct config_item_type *item_type); void configfs_unregister_default_group(struct config_group *group); /* These functions can sleep and can alloc with GFP_KERNEL */ diff --git a/include/linux/connector.h b/include/linux/connector.h index 487350bb19..f8fe8637d7 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -1,15 +1,28 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * connector.h * * 2004-2005 Copyright (c) Evgeniy Polyakov * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __CONNECTOR_H #define __CONNECTOR_H -#include +#include #include #include @@ -36,7 +49,7 @@ struct cn_callback_id { struct cn_callback_entry { struct list_head callback_entry; - refcount_t refcnt; + atomic_t refcnt; struct cn_queue_dev *pdev; struct cn_callback_id id; @@ -50,86 +63,26 @@ struct cn_dev { u32 seq, groups; struct sock *nls; + void (*input) (struct sk_buff *skb); struct cn_queue_dev *cbdev; }; -/** - * cn_add_callback() - Registers new callback with connector core. - * - * @id: unique connector's user identifier. - * It must be registered in connector.h for legal - * in-kernel users. - * @name: connector's callback symbolic name. - * @callback: connector's callback. - * parameters are %cn_msg and the sender's credentials - */ -int cn_add_callback(const struct cb_id *id, const char *name, +int cn_add_callback(struct cb_id *id, const char *name, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); -/** - * cn_del_callback() - Unregisters new callback with connector core. - * - * @id: unique connector's user identifier. - */ -void cn_del_callback(const struct cb_id *id); - - -/** - * cn_netlink_send_mult - Sends message to the specified groups. - * - * @msg: message header(with attached data). - * @len: Number of @msg to be sent. - * @portid: destination port. - * If non-zero the message will be sent to the given port, - * which should be set to the original sender. - * @group: destination group. - * If @portid and @group is zero, then appropriate group will - * be searched through all registered connector users, and - * message will be delivered to the group which was created - * for user with the same ID as in @msg. - * If @group is not zero, then message will be delivered - * to the specified group. - * @gfp_mask: GFP mask. - * - * It can be safely called from softirq context, but may silently - * fail under strong memory pressure. - * - * If there are no listeners for given group %-ESRCH can be returned. - */ +void cn_del_callback(struct cb_id *); int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask); - -/** - * cn_netlink_send - Sends message to the specified groups. - * - * @msg: message header(with attached data). - * @portid: destination port. - * If non-zero the message will be sent to the given port, - * which should be set to the original sender. - * @group: destination group. - * If @portid and @group is zero, then appropriate group will - * be searched through all registered connector users, and - * message will be delivered to the group which was created - * for user with the same ID as in @msg. - * If @group is not zero, then message will be delivered - * to the specified group. - * @gfp_mask: GFP mask. - * - * It can be safely called from softirq context, but may silently - * fail under strong memory pressure. - * - * If there are no listeners for given group %-ESRCH can be returned. - */ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask); int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, - const struct cb_id *id, + struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); -void cn_queue_del_callback(struct cn_queue_dev *dev, const struct cb_id *id); +void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); void cn_queue_release_callback(struct cn_callback_entry *); struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *); void cn_queue_free_dev(struct cn_queue_dev *dev); -int cn_cb_equal(const struct cb_id *, const struct cb_id *); +int cn_cb_equal(struct cb_id *, struct cb_id *); #endif /* __CONNECTOR_H */ diff --git a/include/linux/console.h b/include/linux/console.h index 20874db50b..d530c4627e 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -14,7 +14,6 @@ #ifndef _LINUX_CONSOLE_H_ #define _LINUX_CONSOLE_H_ 1 -#include #include struct vc_data; @@ -22,21 +21,16 @@ struct console_font_op; struct console_font; struct module; struct tty_struct; -struct notifier_block; -enum con_scroll { - SM_UP, - SM_DOWN, -}; - -enum vc_intensity; +/* + * this is what the terminal answers to a ESC-Z or csi0c query. + */ +#define VT100ID "\033[?1;2c" +#define VT102ID "\033[?6c" /** * struct consw - callbacks for consoles * - * @con_scroll: move lines from @top to @bottom in direction @dir by @lines. - * Return true if no generic handling should be done. - * Invoked by csi_M and printing to the console. * @con_set_palette: sets the palette of the console to @table (optional) * @con_scrolldelta: the contents of the console should be scrolled by @lines. * Invoked by user. (optional) @@ -44,52 +38,40 @@ enum vc_intensity; struct consw { struct module *owner; const char *(*con_startup)(void); - void (*con_init)(struct vc_data *vc, int init); - void (*con_deinit)(struct vc_data *vc); - void (*con_clear)(struct vc_data *vc, int sy, int sx, int height, - int width); - void (*con_putc)(struct vc_data *vc, int c, int ypos, int xpos); - void (*con_putcs)(struct vc_data *vc, const unsigned short *s, - int count, int ypos, int xpos); - void (*con_cursor)(struct vc_data *vc, int mode); - bool (*con_scroll)(struct vc_data *vc, unsigned int top, - unsigned int bottom, enum con_scroll dir, - unsigned int lines); - int (*con_switch)(struct vc_data *vc); - int (*con_blank)(struct vc_data *vc, int blank, int mode_switch); - int (*con_font_set)(struct vc_data *vc, struct console_font *font, - unsigned int flags); - int (*con_font_get)(struct vc_data *vc, struct console_font *font); - int (*con_font_default)(struct vc_data *vc, - struct console_font *font, char *name); - int (*con_resize)(struct vc_data *vc, unsigned int width, - unsigned int height, unsigned int user); - void (*con_set_palette)(struct vc_data *vc, + void (*con_init)(struct vc_data *, int); + void (*con_deinit)(struct vc_data *); + void (*con_clear)(struct vc_data *, int, int, int, int); + void (*con_putc)(struct vc_data *, int, int, int); + void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); + void (*con_cursor)(struct vc_data *, int); + int (*con_scroll)(struct vc_data *, int, int, int, int); + int (*con_switch)(struct vc_data *); + int (*con_blank)(struct vc_data *, int, int); + int (*con_font_set)(struct vc_data *, struct console_font *, unsigned); + int (*con_font_get)(struct vc_data *, struct console_font *); + int (*con_font_default)(struct vc_data *, struct console_font *, char *); + int (*con_font_copy)(struct vc_data *, int); + int (*con_resize)(struct vc_data *, unsigned int, unsigned int, + unsigned int); + void (*con_set_palette)(struct vc_data *, const unsigned char *table); - void (*con_scrolldelta)(struct vc_data *vc, int lines); - int (*con_set_origin)(struct vc_data *vc); - void (*con_save_screen)(struct vc_data *vc); - u8 (*con_build_attr)(struct vc_data *vc, u8 color, - enum vc_intensity intensity, - bool blink, bool underline, bool reverse, bool italic); - void (*con_invert_region)(struct vc_data *vc, u16 *p, int count); - u16 *(*con_screen_pos)(const struct vc_data *vc, int offset); - unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position, - int *px, int *py); - /* - * Flush the video console driver's scrollback buffer - */ - void (*con_flush_scrollback)(struct vc_data *vc); + void (*con_scrolldelta)(struct vc_data *, int lines); + int (*con_set_origin)(struct vc_data *); + void (*con_save_screen)(struct vc_data *); + u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8); + void (*con_invert_region)(struct vc_data *, u16 *, int); + u16 *(*con_screen_pos)(struct vc_data *, int); + unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *); /* * Prepare the console for the debugger. This includes, but is not * limited to, unblanking the console, loading an appropriate * palette, and allowing debugger generated output. */ - int (*con_debug_enter)(struct vc_data *vc); + int (*con_debug_enter)(struct vc_data *); /* * Restore the console to its pre-debug state as closely as possible. */ - int (*con_debug_leave)(struct vc_data *vc); + int (*con_debug_leave)(struct vc_data *); }; extern const struct consw *conswitchp; @@ -97,6 +79,7 @@ extern const struct consw *conswitchp; extern const struct consw dummy_con; /* dummy console buffer */ extern const struct consw vga_con; /* VGA text console */ extern const struct consw newport_con; /* SGI Newport console */ +extern const struct consw prom_con; /* SPARC PROM console */ int con_is_bound(const struct consw *csw); int do_unregister_con_driver(const struct consw *csw); @@ -116,6 +99,10 @@ static inline int con_debug_leave(void) } #endif +/* scroll */ +#define SM_UP (1) +#define SM_DOWN (2) + /* cursor */ #define CM_DRAW (1) #define CM_ERASE (2) @@ -130,7 +117,7 @@ static inline int con_debug_leave(void) */ #define CON_PRINTBUFFER (1) -#define CON_CONSDEV (2) /* Preferred console, /dev/console */ +#define CON_CONSDEV (2) /* Last on the command line */ #define CON_ENABLED (4) #define CON_BOOT (8) #define CON_ANYTIME (16) /* Safe to call when cpu is offline */ @@ -144,7 +131,6 @@ struct console { struct tty_driver *(*device)(struct console *, int *); void (*unblank)(void); int (*setup)(struct console *, char *); - int (*exit)(struct console *); int (*match)(struct console *, char *name, int idx, char *options); short flags; short index; @@ -162,11 +148,6 @@ struct console { extern int console_set_on_cmdline; extern struct console *early_console; -enum con_flush_mode { - CONSOLE_FLUSH_PENDING, - CONSOLE_REPLAY_ALL, -}; - extern int add_preferred_console(char *name, int idx, char *options); extern void register_console(struct console *); extern int unregister_console(struct console *); @@ -176,7 +157,7 @@ extern int console_trylock(void); extern void console_unlock(void); extern void console_conditional_schedule(void); extern void console_unblank(void); -extern void console_flush_on_panic(enum con_flush_mode mode); +extern void console_flush_on_panic(void); extern struct tty_driver *console_device(int *); extern void console_stop(struct console *); extern void console_start(struct console *); @@ -197,19 +178,17 @@ extern void suspend_console(void); extern void resume_console(void); int mda_console_init(void); +void prom_con_init(void); void vcs_make_sysfs(int index); void vcs_remove_sysfs(int index); /* Some debug stub to catch some of the obvious races in the VT code */ -#define WARN_CONSOLE_UNLOCKED() \ - WARN_ON(!atomic_read(&ignore_console_lock_warning) && \ - !is_console_locked() && !oops_in_progress) -/* - * Increment ignore_console_lock_warning if you need to quiet - * WARN_CONSOLE_UNLOCKED() for debugging purposes. - */ -extern atomic_t ignore_console_lock_warning; +#if 1 +#define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress) +#else +#define WARN_CONSOLE_UNLOCKED() +#endif /* VESA Blanking Levels */ #define VESA_NO_BLANKING 0 @@ -223,10 +202,4 @@ extern bool vgacon_text_force(void); static inline bool vgacon_text_force(void) { return false; } #endif -extern void console_init(void); - -/* For deferred console takeover */ -void dummycon_register_output_notifier(struct notifier_block *nb); -void dummycon_unregister_output_notifier(struct notifier_block *nb); - #endif /* _LINUX_CONSOLE_H */ diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index d5b9c8d40c..6fd3c908a3 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * console_struct.h * @@ -17,47 +16,10 @@ #include #include +struct vt_struct; struct uni_pagedir; -struct uni_screen; #define NPAR 16 -#define VC_TABSTOPS_COUNT 256U - -enum vc_intensity { - VCI_HALF_BRIGHT, - VCI_NORMAL, - VCI_BOLD, - VCI_MASK = 0x3, -}; - -/** - * struct vc_state -- state of a VC - * @x: cursor's x-position - * @y: cursor's y-position - * @color: foreground & background colors - * @Gx_charset: what's G0/G1 slot set to (like GRAF_MAP, LAT1_MAP) - * @charset: what character set to use (0=G0 or 1=G1) - * @intensity: see enum vc_intensity for values - * @reverse: reversed foreground/background colors - * - * These members are defined separately from struct vc_data as we save & - * restore them at times. - */ -struct vc_state { - unsigned int x, y; - - unsigned char color; - - unsigned char Gx_charset[2]; - unsigned int charset : 1; - - /* attribute flags */ - enum vc_intensity intensity; - bool italic; - bool underline; - bool blink; - bool reverse; -}; /* * Example: vc_data of a console that was scrolled 3 lines down. @@ -94,14 +56,11 @@ struct vc_state { struct vc_data { struct tty_port port; /* Upper level data */ - struct vc_state state, saved_state; - unsigned short vc_num; /* Console number */ unsigned int vc_cols; /* [#] Console size */ unsigned int vc_rows; unsigned int vc_size_row; /* Bytes per row */ unsigned int vc_scan_lines; /* # of scan lines */ - unsigned int vc_cell_height; /* CRTC character cell height */ unsigned long vc_origin; /* [!] Start of real screen */ unsigned long vc_scr_end; /* [!] End of real screen */ unsigned long vc_visible_origin; /* [!] Top of visible window */ @@ -113,6 +72,8 @@ struct vc_data { /* attributes for all characters on screen */ unsigned char vc_attr; /* Current attributes */ unsigned char vc_def_color; /* Default colors */ + unsigned char vc_color; /* Foreground & background */ + unsigned char vc_s_color; /* Saved foreground & background */ unsigned char vc_ulcolor; /* Color for underline mode */ unsigned char vc_itcolor; unsigned char vc_halfcolor; /* Color for half intensity mode */ @@ -120,6 +81,8 @@ struct vc_data { unsigned int vc_cursor_type; unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */ unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */ + unsigned int vc_x, vc_y; /* Cursor position */ + unsigned int vc_saved_x, vc_saved_y; unsigned long vc_pos; /* Cursor address */ /* fonts */ unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */ @@ -134,6 +97,8 @@ struct vc_data { int vt_newvt; wait_queue_head_t paste_wait; /* mode flags */ + unsigned int vc_charset : 1; /* Character set G0 / G1 */ + unsigned int vc_s_charset : 1; /* Saved character set */ unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */ unsigned int vc_toggle_meta : 1; /* Toggle high bit? */ unsigned int vc_decscnm : 1; /* Screen Mode */ @@ -141,17 +106,32 @@ struct vc_data { unsigned int vc_decawm : 1; /* Autowrap Mode */ unsigned int vc_deccm : 1; /* Cursor Visible */ unsigned int vc_decim : 1; /* Insert Mode */ + /* attribute flags */ + unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ + unsigned int vc_italic:1; + unsigned int vc_underline : 1; + unsigned int vc_blink : 1; + unsigned int vc_reverse : 1; + unsigned int vc_s_intensity : 2; /* saved rendition */ + unsigned int vc_s_italic:1; + unsigned int vc_s_underline : 1; + unsigned int vc_s_blink : 1; + unsigned int vc_s_reverse : 1; /* misc */ - unsigned int vc_priv : 3; + unsigned int vc_ques : 1; unsigned int vc_need_wrap : 1; unsigned int vc_can_do_color : 1; unsigned int vc_report_mouse : 2; unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */ unsigned char vc_utf_count; int vc_utf_char; - DECLARE_BITMAP(vc_tab_stop, VC_TABSTOPS_COUNT); /* Tab stops. 256 columns. */ + unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */ unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */ unsigned short * vc_translate; + unsigned char vc_G0_charset; + unsigned char vc_G1_charset; + unsigned char vc_saved_G0; + unsigned char vc_saved_G1; unsigned int vc_resize_user; /* resize request from user */ unsigned int vc_bell_pitch; /* Console bell pitch */ unsigned int vc_bell_duration; /* Console bell duration */ @@ -159,7 +139,7 @@ struct vc_data { struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ struct uni_pagedir *vc_uni_pagedir; struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ - struct uni_screen *vc_uni_screen; /* unicode screen content */ + bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */ /* additional information is in vt_kern.h */ }; @@ -167,31 +147,29 @@ struct vc { struct vc_data *d; struct work_struct SAK_work; - /* might add scrmem, kbd at some time, - to have everything in one place */ + /* might add scrmem, vt_struct, kbd at some time, + to have everything in one place - the disadvantage + would be that vc_cons etc can no longer be static */ }; extern struct vc vc_cons [MAX_NR_CONSOLES]; extern void vc_SAK(struct work_struct *work); -#define CUR_MAKE(size, change, set) ((size) | ((change) << 8) | \ - ((set) << 16)) -#define CUR_SIZE(c) ((c) & 0x00000f) -# define CUR_DEF 0 -# define CUR_NONE 1 -# define CUR_UNDERLINE 2 -# define CUR_LOWER_THIRD 3 -# define CUR_LOWER_HALF 4 -# define CUR_TWO_THIRDS 5 -# define CUR_BLOCK 6 -#define CUR_SW 0x000010 -#define CUR_ALWAYS_BG 0x000020 -#define CUR_INVERT_FG_BG 0x000040 -#define CUR_FG 0x000700 -#define CUR_BG 0x007000 -#define CUR_CHANGE(c) ((c) & 0x00ff00) -#define CUR_SET(c) (((c) & 0xff0000) >> 8) +#define CUR_DEF 0 +#define CUR_NONE 1 +#define CUR_UNDERLINE 2 +#define CUR_LOWER_THIRD 3 +#define CUR_LOWER_HALF 4 +#define CUR_TWO_THIRDS 5 +#define CUR_BLOCK 6 +#define CUR_HWMASK 0x0f +#define CUR_SWMASK 0xfff0 -bool con_is_visible(const struct vc_data *vc); +#define CUR_DEFAULT CUR_UNDERLINE + +static inline bool con_is_visible(const struct vc_data *vc) +{ + return *vc->vc_display_fg == vc; +} #endif /* _LINUX_CONSOLE_STRUCT_H */ diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h index bcfce748c9..c4811da133 100644 --- a/include/linux/consolemap.h +++ b/include/linux/consolemap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * consolemap.h * @@ -17,8 +16,7 @@ #ifdef CONFIG_CONSOLE_TRANSLATIONS struct vc_data; -extern u16 inverse_translate(const struct vc_data *conp, int glyph, - int use_unicode); +extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode); extern unsigned short *set_translate(int m, struct vc_data *vc); extern int conv_uni_to_pc(struct vc_data *conp, long ucs); extern u32 conv_8bit_to_uni(unsigned char c); diff --git a/include/linux/container.h b/include/linux/container.h index 2566a1baa7..3c03e6fd20 100644 --- a/include/linux/container.h +++ b/include/linux/container.h @@ -1,14 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Definitions for container bus type. * * Copyright (C) 2013, Intel Corporation * Author: Rafael J. Wysocki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ -#ifndef _LINUX_CONTAINER_H -#define _LINUX_CONTAINER_H - #include /* drivers/base/power/container.c */ @@ -23,5 +23,3 @@ static inline struct container_dev *to_container_dev(struct device *dev) { return container_of(dev, struct container_dev, dev); } - -#endif /* _LINUX_CONTAINER_H */ diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 4d7fced3a3..c78fc27418 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -1,12 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CONTEXT_TRACKING_H #define _LINUX_CONTEXT_TRACKING_H #include #include #include -#include - #include @@ -24,26 +21,26 @@ extern void context_tracking_user_exit(void); static inline void user_enter(void) { - if (context_tracking_enabled()) + if (context_tracking_is_enabled()) context_tracking_enter(CONTEXT_USER); } static inline void user_exit(void) { - if (context_tracking_enabled()) + if (context_tracking_is_enabled()) context_tracking_exit(CONTEXT_USER); } /* Called with interrupts disabled. */ -static __always_inline void user_enter_irqoff(void) +static inline void user_enter_irqoff(void) { - if (context_tracking_enabled()) + if (context_tracking_is_enabled()) __context_tracking_enter(CONTEXT_USER); } -static __always_inline void user_exit_irqoff(void) +static inline void user_exit_irqoff(void) { - if (context_tracking_enabled()) + if (context_tracking_is_enabled()) __context_tracking_exit(CONTEXT_USER); } @@ -51,8 +48,7 @@ static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; - if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) || - !context_tracking_enabled()) + if (!context_tracking_is_enabled()) return 0; prev_ctx = this_cpu_read(context_tracking.state); @@ -64,26 +60,12 @@ static inline enum ctx_state exception_enter(void) static inline void exception_exit(enum ctx_state prev_ctx) { - if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) && - context_tracking_enabled()) { + if (context_tracking_is_enabled()) { if (prev_ctx != CONTEXT_KERNEL) context_tracking_enter(prev_ctx); } } -static __always_inline bool context_tracking_guest_enter(void) -{ - if (context_tracking_enabled()) - __context_tracking_enter(CONTEXT_GUEST); - - return context_tracking_enabled_this_cpu(); -} - -static __always_inline void context_tracking_guest_exit(void) -{ - if (context_tracking_enabled()) - __context_tracking_exit(CONTEXT_GUEST); -} /** * ct_state() - return the current context tracking state if known @@ -92,9 +74,9 @@ static __always_inline void context_tracking_guest_exit(void) * is enabled. If context tracking is disabled, returns * CONTEXT_DISABLED. This should be used primarily for debugging. */ -static __always_inline enum ctx_state ct_state(void) +static inline enum ctx_state ct_state(void) { - return context_tracking_enabled() ? + return context_tracking_is_enabled() ? this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; } #else @@ -105,12 +87,9 @@ static inline void user_exit_irqoff(void) { } static inline enum ctx_state exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } -static inline bool context_tracking_guest_enter(void) { return false; } -static inline void context_tracking_guest_exit(void) { } - #endif /* !CONFIG_CONTEXT_TRACKING */ -#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) +#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond)) #ifdef CONFIG_CONTEXT_TRACKING_FORCE extern void context_tracking_init(void); @@ -118,4 +97,78 @@ extern void context_tracking_init(void); static inline void context_tracking_init(void) { } #endif /* CONFIG_CONTEXT_TRACKING_FORCE */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +/* must be called with irqs disabled */ +static inline void guest_enter_irqoff(void) +{ + if (vtime_accounting_cpu_enabled()) + vtime_guest_enter(current); + else + current->flags |= PF_VCPU; + + if (context_tracking_is_enabled()) + __context_tracking_enter(CONTEXT_GUEST); + + /* KVM does not hold any references to rcu protected data when it + * switches CPU into a guest mode. In fact switching to a guest mode + * is very similar to exiting to userspace from rcu point of view. In + * addition CPU may stay in a guest mode for quite a long time (up to + * one time slice). Lets treat guest mode as quiescent state, just like + * we do with user-mode execution. + */ + if (!context_tracking_cpu_is_enabled()) + rcu_virt_note_context_switch(smp_processor_id()); +} + +static inline void guest_exit_irqoff(void) +{ + if (context_tracking_is_enabled()) + __context_tracking_exit(CONTEXT_GUEST); + + if (vtime_accounting_cpu_enabled()) + vtime_guest_exit(current); + else + current->flags &= ~PF_VCPU; +} + +#else +static inline void guest_enter_irqoff(void) +{ + /* + * This is running in ioctl context so its safe + * to assume that it's the stime pending cputime + * to flush. + */ + vtime_account_system(current); + current->flags |= PF_VCPU; + rcu_virt_note_context_switch(smp_processor_id()); +} + +static inline void guest_exit_irqoff(void) +{ + /* Flush the guest cputime we spent on the guest */ + vtime_account_system(current); + current->flags &= ~PF_VCPU; +} +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ + +static inline void guest_enter(void) +{ + unsigned long flags; + + local_irq_save(flags); + guest_enter_irqoff(); + local_irq_restore(flags); +} + +static inline void guest_exit(void) +{ + unsigned long flags; + + local_irq_save(flags); + guest_exit_irqoff(); + local_irq_restore(flags); +} + #endif diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index 65a60d3313..1d34fe68f4 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CONTEXT_TRACKING_STATE_H #define _LINUX_CONTEXT_TRACKING_STATE_H @@ -23,33 +22,28 @@ struct context_tracking { }; #ifdef CONFIG_CONTEXT_TRACKING -extern struct static_key_false context_tracking_key; +extern struct static_key_false context_tracking_enabled; DECLARE_PER_CPU(struct context_tracking, context_tracking); -static __always_inline bool context_tracking_enabled(void) +static inline bool context_tracking_is_enabled(void) { - return static_branch_unlikely(&context_tracking_key); + return static_branch_unlikely(&context_tracking_enabled); } -static __always_inline bool context_tracking_enabled_cpu(int cpu) +static inline bool context_tracking_cpu_is_enabled(void) { - return context_tracking_enabled() && per_cpu(context_tracking.active, cpu); + return __this_cpu_read(context_tracking.active); } -static inline bool context_tracking_enabled_this_cpu(void) -{ - return context_tracking_enabled() && __this_cpu_read(context_tracking.active); -} - -static __always_inline bool context_tracking_in_user(void) +static inline bool context_tracking_in_user(void) { return __this_cpu_read(context_tracking.state) == CONTEXT_USER; } #else static inline bool context_tracking_in_user(void) { return false; } -static inline bool context_tracking_enabled(void) { return false; } -static inline bool context_tracking_enabled_cpu(int cpu) { return false; } -static inline bool context_tracking_enabled_this_cpu(void) { return false; } +static inline bool context_tracking_active(void) { return false; } +static inline bool context_tracking_is_enabled(void) { return false; } +static inline bool context_tracking_cpu_is_enabled(void) { return false; } #endif /* CONFIG_CONTEXT_TRACKING */ #endif diff --git a/include/linux/cordic.h b/include/linux/cordic.h index 3d656f54d6..cf68ca4a50 100644 --- a/include/linux/cordic.h +++ b/include/linux/cordic.h @@ -18,15 +18,6 @@ #include -#define CORDIC_ANGLE_GEN 39797 -#define CORDIC_PRECISION_SHIFT 16 -#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2) - -#define CORDIC_FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT)) -#define CORDIC_FLOAT(X) (((X) >= 0) \ - ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \ - : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1)) - /** * struct cordic_iq - i/q coordinate. * diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 78fcd776b1..d016a121a8 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_COREDUMP_H #define _LINUX_COREDUMP_H @@ -7,34 +6,18 @@ #include #include -#ifdef CONFIG_COREDUMP -struct core_vma_metadata { - unsigned long start, end; - unsigned long flags; - unsigned long dump_size; -}; - -extern int core_uses_pid; -extern char core_pattern[]; -extern unsigned int core_pipe_limit; - /* * These are the only things you should do on a core-file: use only these * functions to write out all the necessary info. */ struct coredump_params; -extern void dump_skip_to(struct coredump_params *cprm, unsigned long to); -extern void dump_skip(struct coredump_params *cprm, size_t nr); +extern int dump_skip(struct coredump_params *cprm, size_t nr); extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); -int dump_user_range(struct coredump_params *cprm, unsigned long start, - unsigned long len); -int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, - struct core_vma_metadata **vma_meta, - size_t *vma_data_size_ptr); -extern void do_coredump(const kernel_siginfo_t *siginfo); +#ifdef CONFIG_COREDUMP +extern void do_coredump(const siginfo_t *siginfo); #else -static inline void do_coredump(const kernel_siginfo_t *siginfo) {} +static inline void do_coredump(const siginfo_t *siginfo) {} #endif #endif /* _LINUX_COREDUMP_H */ diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h index 4ac5c081af..7d41026066 100644 --- a/include/linux/coresight-pmu.h +++ b/include/linux/coresight-pmu.h @@ -1,7 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . */ #ifndef _LINUX_CORESIGHT_PMU_H @@ -10,27 +21,9 @@ #define CORESIGHT_ETM_PMU_NAME "cs_etm" #define CORESIGHT_ETM_PMU_SEED 0x10 -/* - * Below are the definition of bit offsets for perf option, and works as - * arbitrary values for all ETM versions. - * - * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore, - * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and - * directly use below macros as config bits. - */ -#define ETM_OPT_CYCACC 12 -#define ETM_OPT_CTXTID 14 -#define ETM_OPT_CTXTID2 15 -#define ETM_OPT_TS 28 -#define ETM_OPT_RETSTK 29 - -/* ETMv4 CONFIGR programming bits for the ETM OPTs */ -#define ETM4_CFG_BIT_CYCACC 4 -#define ETM4_CFG_BIT_CTXTID 6 -#define ETM4_CFG_BIT_VMID 7 -#define ETM4_CFG_BIT_TS 11 -#define ETM4_CFG_BIT_RETSTK 12 -#define ETM4_CFG_BIT_VMID_OPT 15 +/* ETMv3.5/PTM's ETMCR config bit */ +#define ETM_OPT_CYCACC 12 +#define ETM_OPT_TS 28 static inline int coresight_get_trace_id(int cpu) { diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h index 74714b59f9..a978bb8559 100644 --- a/include/linux/coresight-stm.h +++ b/include/linux/coresight-stm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_CORESIGHT_STM_H_ #define __LINUX_CORESIGHT_STM_H_ diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 93a2922b76..2a5982c37d 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -1,13 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2012, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _LINUX_CORESIGHT_H #define _LINUX_CORESIGHT_H #include -#include #include #include @@ -41,16 +47,12 @@ enum coresight_dev_type { CORESIGHT_DEV_TYPE_LINK, CORESIGHT_DEV_TYPE_LINKSINK, CORESIGHT_DEV_TYPE_SOURCE, - CORESIGHT_DEV_TYPE_HELPER, - CORESIGHT_DEV_TYPE_ECT, }; enum coresight_dev_subtype_sink { CORESIGHT_DEV_SUBTYPE_SINK_NONE, CORESIGHT_DEV_SUBTYPE_SINK_PORT, CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, - CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM, - CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM, }; enum coresight_dev_subtype_link { @@ -67,220 +69,115 @@ enum coresight_dev_subtype_source { CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE, }; -enum coresight_dev_subtype_helper { - CORESIGHT_DEV_SUBTYPE_HELPER_NONE, - CORESIGHT_DEV_SUBTYPE_HELPER_CATU, -}; - -/* Embedded Cross Trigger (ECT) sub-types */ -enum coresight_dev_subtype_ect { - CORESIGHT_DEV_SUBTYPE_ECT_NONE, - CORESIGHT_DEV_SUBTYPE_ECT_CTI, -}; - /** - * union coresight_dev_subtype - further characterisation of a type + * struct coresight_dev_subtype - further characterisation of a type * @sink_subtype: type of sink this component is, as defined - * by @coresight_dev_subtype_sink. + by @coresight_dev_subtype_sink. * @link_subtype: type of link this component is, as defined - * by @coresight_dev_subtype_link. + by @coresight_dev_subtype_link. * @source_subtype: type of source this component is, as defined - * by @coresight_dev_subtype_source. - * @helper_subtype: type of helper this component is, as defined - * by @coresight_dev_subtype_helper. - * @ect_subtype: type of cross trigger this component is, as - * defined by @coresight_dev_subtype_ect + by @coresight_dev_subtype_source. */ -union coresight_dev_subtype { - /* We have some devices which acts as LINK and SINK */ - struct { - enum coresight_dev_subtype_sink sink_subtype; - enum coresight_dev_subtype_link link_subtype; - }; +struct coresight_dev_subtype { + enum coresight_dev_subtype_sink sink_subtype; + enum coresight_dev_subtype_link link_subtype; enum coresight_dev_subtype_source source_subtype; - enum coresight_dev_subtype_helper helper_subtype; - enum coresight_dev_subtype_ect ect_subtype; }; /** - * struct coresight_platform_data - data harvested from the firmware - * specification. - * - * @nr_inport: Number of elements for the input connections. - * @nr_outport: Number of elements for the output connections. - * @conns: Sparse array of nr_outport connections from this component. + * struct coresight_platform_data - data harvested from the DT specification + * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs. + * @name: name of the component as shown under sysfs. + * @nr_inport: number of input ports for this component. + * @outports: list of remote endpoint port number. + * @child_names:name of all child components connected to this device. + * @child_ports:child component port number the current component is + connected to. + * @nr_outport: number of output ports for this component. + * @clk: The clock this component is associated to. */ struct coresight_platform_data { + int cpu; + const char *name; int nr_inport; + int *outports; + const char **child_names; + int *child_ports; int nr_outport; - struct coresight_connection *conns; + struct clk *clk; }; -/** - * struct csdev_access - Abstraction of a CoreSight device access. - * - * @io_mem : True if the device has memory mapped I/O - * @base : When io_mem == true, base address of the component - * @read : Read from the given "offset" of the given instance. - * @write : Write "val" to the given "offset". - */ -struct csdev_access { - bool io_mem; - union { - void __iomem *base; - struct { - u64 (*read)(u32 offset, bool relaxed, bool _64bit); - void (*write)(u64 val, u32 offset, bool relaxed, - bool _64bit); - }; - }; -}; - -#define CSDEV_ACCESS_IOMEM(_addr) \ - ((struct csdev_access) { \ - .io_mem = true, \ - .base = (_addr), \ - }) - /** * struct coresight_desc - description of a component required from drivers * @type: as defined by @coresight_dev_type. * @subtype: as defined by @coresight_dev_subtype. * @ops: generic operations for this component, as defined - * by @coresight_ops. + by @coresight_ops. * @pdata: platform data collected from DT. * @dev: The device entity associated to this component. * @groups: operations specific to this component. These will end up - * in the component's sysfs sub-directory. - * @name: name for the coresight device, also shown under sysfs. - * @access: Describe access to the device + in the component's sysfs sub-directory. */ struct coresight_desc { enum coresight_dev_type type; - union coresight_dev_subtype subtype; + struct coresight_dev_subtype subtype; const struct coresight_ops *ops; struct coresight_platform_data *pdata; struct device *dev; const struct attribute_group **groups; - const char *name; - struct csdev_access access; }; /** * struct coresight_connection - representation of a single connection * @outport: a connection's output port number. + * @chid_name: remote component's name. * @child_port: remote component's port number @output is connected to. - * @chid_fwnode: remote component's fwnode handle. * @child_dev: a @coresight_device representation of the component connected to @outport. - * @link: Representation of the connection as a sysfs link. */ struct coresight_connection { int outport; + const char *child_name; int child_port; - struct fwnode_handle *child_fwnode; struct coresight_device *child_dev; - struct coresight_sysfs_link *link; -}; - -/** - * struct coresight_sysfs_link - representation of a connection in sysfs. - * @orig: Originating (master) coresight device for the link. - * @orig_name: Name to use for the link orig->target. - * @target: Target (slave) coresight device for the link. - * @target_name: Name to use for the link target->orig. - */ -struct coresight_sysfs_link { - struct coresight_device *orig; - const char *orig_name; - struct coresight_device *target; - const char *target_name; }; /** * struct coresight_device - representation of a device as used by the framework - * @pdata: Platform data with device connections associated to this device. + * @conns: array of coresight_connections associated to this component. + * @nr_inport: number of input port associated to this component. + * @nr_outport: number of output port associated to this component. * @type: as defined by @coresight_dev_type. * @subtype: as defined by @coresight_dev_subtype. * @ops: generic operations for this component, as defined - * by @coresight_ops. - * @access: Device i/o access abstraction for this device. + by @coresight_ops. * @dev: The device entity associated to this component. * @refcnt: keep track of what is in use. * @orphan: true if the component has connections that haven't been linked. * @enable: 'true' if component is currently part of an active path. * @activated: 'true' only if a _sink_ has been activated. A sink can be - * activated but not yet enabled. Enabling for a _sink_ - * happens when a source has been selected and a path is enabled - * from source to that sink. - * @ea: Device attribute for sink representation under PMU directory. - * @def_sink: cached reference to default sink found for this device. - * @ect_dev: Associated cross trigger device. Not part of the trace data - * path or connections. - * @nr_links: number of sysfs links created to other components from this - * device. These will appear in the "connections" group. - * @has_conns_grp: Have added a "connections" group for sysfs links. - * @feature_csdev_list: List of complex feature programming added to the device. - * @config_csdev_list: List of system configurations added to the device. - * @cscfg_csdev_lock: Protect the lists of configurations and features. - * @active_cscfg_ctxt: Context information for current active system configuration. + activated but not yet enabled. Enabling for a _sink_ + happens when a source has been selected for that it. */ struct coresight_device { - struct coresight_platform_data *pdata; + struct coresight_connection *conns; + int nr_inport; + int nr_outport; enum coresight_dev_type type; - union coresight_dev_subtype subtype; + struct coresight_dev_subtype subtype; const struct coresight_ops *ops; - struct csdev_access access; struct device dev; atomic_t *refcnt; bool orphan; bool enable; /* true only if configured as part of a path */ - /* sink specific fields */ bool activated; /* true only if a sink is part of a path */ - struct dev_ext_attribute *ea; - struct coresight_device *def_sink; - /* cross trigger handling */ - struct coresight_device *ect_dev; - /* sysfs links between components */ - int nr_links; - bool has_conns_grp; - bool ect_enabled; /* true only if associated ect device is enabled */ - /* system configuration and feature lists */ - struct list_head feature_csdev_list; - struct list_head config_csdev_list; - spinlock_t cscfg_csdev_lock; - void *active_cscfg_ctxt; }; -/* - * coresight_dev_list - Mapping for devices to "name" index for device - * names. - * - * @nr_idx: Number of entries already allocated. - * @pfx: Prefix pattern for device name. - * @fwnode_list: Array of fwnode_handles associated with each allocated - * index, upto nr_idx entries. - */ -struct coresight_dev_list { - int nr_idx; - const char *pfx; - struct fwnode_handle **fwnode_list; -}; - -#define DEFINE_CORESIGHT_DEVLIST(var, dev_pfx) \ -static struct coresight_dev_list (var) = { \ - .pfx = dev_pfx, \ - .nr_idx = 0, \ - .fwnode_list = NULL, \ -} - #define to_coresight_device(d) container_of(d, struct coresight_device, dev) #define source_ops(csdev) csdev->ops->source_ops #define sink_ops(csdev) csdev->ops->sink_ops #define link_ops(csdev) csdev->ops->link_ops -#define helper_ops(csdev) csdev->ops->helper_ops -#define ect_ops(csdev) csdev->ops->ect_ops /** * struct coresight_ops_sink - basic operations for a sink @@ -289,16 +186,23 @@ static struct coresight_dev_list (var) = { \ * @disable: disables the sink. * @alloc_buffer: initialises perf's ring buffer for trace collection. * @free_buffer: release memory allocated in @get_config. + * @set_buffer: initialises buffer mechanic before a trace session. + * @reset_buffer: finalises buffer mechanic after a trace session. * @update_buffer: update buffer pointers after a trace session. */ struct coresight_ops_sink { - int (*enable)(struct coresight_device *csdev, u32 mode, void *data); - int (*disable)(struct coresight_device *csdev); - void *(*alloc_buffer)(struct coresight_device *csdev, - struct perf_event *event, void **pages, - int nr_pages, bool overwrite); + int (*enable)(struct coresight_device *csdev, u32 mode); + void (*disable)(struct coresight_device *csdev); + void *(*alloc_buffer)(struct coresight_device *csdev, int cpu, + void **pages, int nr_pages, bool overwrite); void (*free_buffer)(void *config); - unsigned long (*update_buffer)(struct coresight_device *csdev, + int (*set_buffer)(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config); + unsigned long (*reset_buffer)(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config, bool *lost); + void (*update_buffer)(struct coresight_device *csdev, struct perf_output_handle *handle, void *sink_config); }; @@ -333,179 +237,20 @@ struct coresight_ops_source { struct perf_event *event); }; -/** - * struct coresight_ops_helper - Operations for a helper device. - * - * All operations could pass in a device specific data, which could - * help the helper device to determine what to do. - * - * @enable : Enable the device - * @disable : Disable the device - */ -struct coresight_ops_helper { - int (*enable)(struct coresight_device *csdev, void *data); - int (*disable)(struct coresight_device *csdev, void *data); -}; - -/** - * struct coresight_ops_ect - Ops for an embedded cross trigger device - * - * @enable : Enable the device - * @disable : Disable the device - */ -struct coresight_ops_ect { - int (*enable)(struct coresight_device *csdev); - int (*disable)(struct coresight_device *csdev); -}; - struct coresight_ops { const struct coresight_ops_sink *sink_ops; const struct coresight_ops_link *link_ops; const struct coresight_ops_source *source_ops; - const struct coresight_ops_helper *helper_ops; - const struct coresight_ops_ect *ect_ops; }; -#if IS_ENABLED(CONFIG_CORESIGHT) - -static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa, - u32 offset) -{ - if (likely(csa->io_mem)) - return readl_relaxed(csa->base + offset); - - return csa->read(offset, true, false); -} - -static inline u32 csdev_access_read32(struct csdev_access *csa, u32 offset) -{ - if (likely(csa->io_mem)) - return readl(csa->base + offset); - - return csa->read(offset, false, false); -} - -static inline void csdev_access_relaxed_write32(struct csdev_access *csa, - u32 val, u32 offset) -{ - if (likely(csa->io_mem)) - writel_relaxed(val, csa->base + offset); - else - csa->write(val, offset, true, false); -} - -static inline void csdev_access_write32(struct csdev_access *csa, u32 val, u32 offset) -{ - if (likely(csa->io_mem)) - writel(val, csa->base + offset); - else - csa->write(val, offset, false, false); -} - -#ifdef CONFIG_64BIT - -static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa, - u32 offset) -{ - if (likely(csa->io_mem)) - return readq_relaxed(csa->base + offset); - - return csa->read(offset, true, true); -} - -static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset) -{ - if (likely(csa->io_mem)) - return readq(csa->base + offset); - - return csa->read(offset, false, true); -} - -static inline void csdev_access_relaxed_write64(struct csdev_access *csa, - u64 val, u32 offset) -{ - if (likely(csa->io_mem)) - writeq_relaxed(val, csa->base + offset); - else - csa->write(val, offset, true, true); -} - -static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset) -{ - if (likely(csa->io_mem)) - writeq(val, csa->base + offset); - else - csa->write(val, offset, false, true); -} - -#else /* !CONFIG_64BIT */ - -static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa, - u32 offset) -{ - WARN_ON(1); - return 0; -} - -static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset) -{ - WARN_ON(1); - return 0; -} - -static inline void csdev_access_relaxed_write64(struct csdev_access *csa, - u64 val, u32 offset) -{ - WARN_ON(1); -} - -static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset) -{ - WARN_ON(1); -} -#endif /* CONFIG_64BIT */ - -static inline bool coresight_is_percpu_source(struct coresight_device *csdev) -{ - return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) && - (csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC); -} - -static inline bool coresight_is_percpu_sink(struct coresight_device *csdev) -{ - return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SINK) && - (csdev->subtype.sink_subtype == CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM); -} - +#ifdef CONFIG_CORESIGHT extern struct coresight_device * coresight_register(struct coresight_desc *desc); extern void coresight_unregister(struct coresight_device *csdev); extern int coresight_enable(struct coresight_device *csdev); extern void coresight_disable(struct coresight_device *csdev); -extern int coresight_timeout(struct csdev_access *csa, u32 offset, +extern int coresight_timeout(void __iomem *addr, u32 offset, int position, int value); - -extern int coresight_claim_device(struct coresight_device *csdev); -extern int coresight_claim_device_unlocked(struct coresight_device *csdev); - -extern void coresight_disclaim_device(struct coresight_device *csdev); -extern void coresight_disclaim_device_unlocked(struct coresight_device *csdev); -extern char *coresight_alloc_device_name(struct coresight_dev_list *devs, - struct device *dev); - -extern bool coresight_loses_context_with_cpu(struct device *dev); - -u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset); -u32 coresight_read32(struct coresight_device *csdev, u32 offset); -void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset); -void coresight_relaxed_write32(struct coresight_device *csdev, - u32 val, u32 offset); -u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset); -u64 coresight_read64(struct coresight_device *csdev, u32 offset); -void coresight_relaxed_write64(struct coresight_device *csdev, - u64 val, u32 offset); -void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset); - #else static inline struct coresight_device * coresight_register(struct coresight_desc *desc) { return NULL; } @@ -513,78 +258,36 @@ static inline void coresight_unregister(struct coresight_device *csdev) {} static inline int coresight_enable(struct coresight_device *csdev) { return -ENOSYS; } static inline void coresight_disable(struct coresight_device *csdev) {} +static inline int coresight_timeout(void __iomem *addr, u32 offset, + int position, int value) { return 1; } +#endif -static inline int coresight_timeout(struct csdev_access *csa, u32 offset, - int position, int value) +#ifdef CONFIG_OF +extern struct coresight_platform_data *of_get_coresight_platform_data( + struct device *dev, struct device_node *node); +#else +static inline struct coresight_platform_data *of_get_coresight_platform_data( + struct device *dev, struct device_node *node) { return NULL; } +#endif + +#ifdef CONFIG_PID_NS +static inline unsigned long +coresight_vpid_to_pid(unsigned long vpid) { - return 1; + struct task_struct *task = NULL; + unsigned long pid = 0; + + rcu_read_lock(); + task = find_task_by_vpid(vpid); + if (task) + pid = task_pid_nr(task); + rcu_read_unlock(); + + return pid; } +#else +static inline unsigned long +coresight_vpid_to_pid(unsigned long vpid) { return vpid; } +#endif -static inline int coresight_claim_device_unlocked(struct coresight_device *csdev) -{ - return -EINVAL; -} - -static inline int coresight_claim_device(struct coresight_device *csdev) -{ - return -EINVAL; -} - -static inline void coresight_disclaim_device(struct coresight_device *csdev) {} -static inline void coresight_disclaim_device_unlocked(struct coresight_device *csdev) {} - -static inline bool coresight_loses_context_with_cpu(struct device *dev) -{ - return false; -} - -static inline u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset) -{ - WARN_ON_ONCE(1); - return 0; -} - -static inline u32 coresight_read32(struct coresight_device *csdev, u32 offset) -{ - WARN_ON_ONCE(1); - return 0; -} - -static inline void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset) -{ -} - -static inline void coresight_relaxed_write32(struct coresight_device *csdev, - u32 val, u32 offset) -{ -} - -static inline u64 coresight_relaxed_read64(struct coresight_device *csdev, - u32 offset) -{ - WARN_ON_ONCE(1); - return 0; -} - -static inline u64 coresight_read64(struct coresight_device *csdev, u32 offset) -{ - WARN_ON_ONCE(1); - return 0; -} - -static inline void coresight_relaxed_write64(struct coresight_device *csdev, - u64 val, u32 offset) -{ -} - -static inline void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset) -{ -} - -#endif /* IS_ENABLED(CONFIG_CORESIGHT) */ - -extern int coresight_get_cpu(struct device *dev); - -struct coresight_platform_data *coresight_get_platform_data(struct device *dev); - -#endif /* _LINUX_COREISGHT_H */ +#endif diff --git a/include/linux/count_zeros.h b/include/linux/count_zeros.h index 5b8ff5ac66..363da78c4f 100644 --- a/include/linux/count_zeros.h +++ b/include/linux/count_zeros.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Count leading and trailing zeros functions * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_BITOPS_COUNT_ZEROS_H_ diff --git a/include/linux/cper.h b/include/linux/cper.h index 6a511a1078..dcacb1a72e 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -1,9 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * UEFI Common Platform Error Record * * Copyright (C) 2010, Intel Corp. * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef LINUX_CPER_H @@ -32,7 +44,7 @@ */ #define CPER_REC_LEN 256 /* - * Severity definition for error_severity in struct cper_record_header + * Severity difinition for error_severity in struct cper_record_header * and section_severity in struct cper_section_descriptor */ enum { @@ -43,52 +55,55 @@ enum { }; /* - * Validation bits definition for validation_bits in struct + * Validation bits difinition for validation_bits in struct * cper_record_header. If set, corresponding fields in struct * cper_record_header contain valid information. + * + * corresponds platform_id */ #define CPER_VALID_PLATFORM_ID 0x0001 +/* corresponds timestamp */ #define CPER_VALID_TIMESTAMP 0x0002 +/* corresponds partition_id */ #define CPER_VALID_PARTITION_ID 0x0004 /* * Notification type used to generate error record, used in - * notification_type in struct cper_record_header. These UUIDs are defined - * in the UEFI spec v2.7, sec N.2.1. + * notification_type in struct cper_record_header + * + * Corrected Machine Check */ - -/* Corrected Machine Check */ #define CPER_NOTIFY_CMC \ - GUID_INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \ - 0xEB, 0xD4, 0xF8, 0x90) + UUID_LE(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \ + 0xEB, 0xD4, 0xF8, 0x90) /* Corrected Platform Error */ #define CPER_NOTIFY_CPE \ - GUID_INIT(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \ - 0xF2, 0x7E, 0xBE, 0xEE) + UUID_LE(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \ + 0xF2, 0x7E, 0xBE, 0xEE) /* Machine Check Exception */ #define CPER_NOTIFY_MCE \ - GUID_INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \ - 0xE1, 0x49, 0x13, 0xBB) + UUID_LE(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \ + 0xE1, 0x49, 0x13, 0xBB) /* PCI Express Error */ #define CPER_NOTIFY_PCIE \ - GUID_INIT(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \ - 0xAF, 0x67, 0xC1, 0x04) + UUID_LE(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \ + 0xAF, 0x67, 0xC1, 0x04) /* INIT Record (for IPF) */ #define CPER_NOTIFY_INIT \ - GUID_INIT(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \ - 0xD3, 0x9B, 0xC9, 0x8E) + UUID_LE(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \ + 0xD3, 0x9B, 0xC9, 0x8E) /* Non-Maskable Interrupt */ #define CPER_NOTIFY_NMI \ - GUID_INIT(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \ - 0x85, 0xD6, 0xE9, 0x8A) + UUID_LE(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \ + 0x85, 0xD6, 0xE9, 0x8A) /* BOOT Error Record */ #define CPER_NOTIFY_BOOT \ - GUID_INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \ - 0xD4, 0x64, 0xB3, 0x8F) + UUID_LE(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \ + 0xD4, 0x64, 0xB3, 0x8F) /* DMA Remapping Error */ #define CPER_NOTIFY_DMAR \ - GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \ - 0x72, 0x2D, 0xEB, 0x41) + UUID_LE(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \ + 0x72, 0x2D, 0xEB, 0x41) /* * Flags bits definitions for flags in struct cper_record_header @@ -107,11 +122,14 @@ enum { #define CPER_SEC_REV 0x0100 /* - * Validation bits definition for validation_bits in struct + * Validation bits difinition for validation_bits in struct * cper_section_descriptor. If set, corresponding fields in struct * cper_section_descriptor contain valid information. + * + * corresponds fru_id */ #define CPER_SEC_VALID_FRU_ID 0x1 +/* corresponds fru_text */ #define CPER_SEC_VALID_FRU_TEXT 0x2 /* @@ -147,56 +165,51 @@ enum { /* * Section type definitions, used in section_type field in struct - * cper_section_descriptor. These UUIDs are defined in the UEFI spec - * v2.7, sec N.2.2. + * cper_section_descriptor + * + * Processor Generic */ - -/* Processor Generic */ #define CPER_SEC_PROC_GENERIC \ - GUID_INIT(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \ - 0x93, 0xC4, 0xF3, 0xDB) + UUID_LE(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \ + 0x93, 0xC4, 0xF3, 0xDB) /* Processor Specific: X86/X86_64 */ #define CPER_SEC_PROC_IA \ - GUID_INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \ - 0x24, 0x2B, 0x6E, 0x1D) + UUID_LE(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \ + 0x24, 0x2B, 0x6E, 0x1D) /* Processor Specific: IA64 */ #define CPER_SEC_PROC_IPF \ - GUID_INIT(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \ - 0x80, 0xC7, 0x3C, 0x88, 0x81) -/* Processor Specific: ARM */ -#define CPER_SEC_PROC_ARM \ - GUID_INIT(0xE19E3D16, 0xBC11, 0x11E4, 0x9C, 0xAA, 0xC2, 0x05, \ - 0x1D, 0x5D, 0x46, 0xB0) + UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \ + 0x80, 0xC7, 0x3C, 0x88, 0x81) /* Platform Memory */ #define CPER_SEC_PLATFORM_MEM \ - GUID_INIT(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \ - 0xED, 0x7C, 0x83, 0xB1) + UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \ + 0xED, 0x7C, 0x83, 0xB1) #define CPER_SEC_PCIE \ - GUID_INIT(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \ - 0xCB, 0x3C, 0x6F, 0x35) + UUID_LE(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \ + 0xCB, 0x3C, 0x6F, 0x35) /* Firmware Error Record Reference */ #define CPER_SEC_FW_ERR_REC_REF \ - GUID_INIT(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \ - 0x9C, 0x8E, 0x69, 0xED) + UUID_LE(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \ + 0x9C, 0x8E, 0x69, 0xED) /* PCI/PCI-X Bus */ #define CPER_SEC_PCI_X_BUS \ - GUID_INIT(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \ - 0xD3, 0xF9, 0xC9, 0xDD) + UUID_LE(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \ + 0xD3, 0xF9, 0xC9, 0xDD) /* PCI Component/Device */ #define CPER_SEC_PCI_DEV \ - GUID_INIT(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \ - 0x8B, 0x00, 0x13, 0x26) + UUID_LE(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \ + 0x8B, 0x00, 0x13, 0x26) #define CPER_SEC_DMAR_GENERIC \ - GUID_INIT(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \ - 0xDE, 0x3E, 0x2C, 0x64) + UUID_LE(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \ + 0xDE, 0x3E, 0x2C, 0x64) /* Intel VT for Directed I/O specific DMAr */ #define CPER_SEC_DMAR_VT \ - GUID_INIT(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \ - 0xDD, 0x93, 0xE8, 0xCF) + UUID_LE(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \ + 0xDD, 0x93, 0xE8, 0xCF) /* IOMMU specific DMAr */ #define CPER_SEC_DMAR_IOMMU \ - GUID_INIT(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \ - 0xDF, 0xAA, 0x84, 0xEC) + UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \ + 0xDF, 0xAA, 0x84, 0xEC) #define CPER_PROC_VALID_TYPE 0x0001 #define CPER_PROC_VALID_ISA 0x0002 @@ -230,18 +243,6 @@ enum { #define CPER_MEM_VALID_RANK_NUMBER 0x8000 #define CPER_MEM_VALID_CARD_HANDLE 0x10000 #define CPER_MEM_VALID_MODULE_HANDLE 0x20000 -#define CPER_MEM_VALID_ROW_EXT 0x40000 -#define CPER_MEM_VALID_BANK_GROUP 0x80000 -#define CPER_MEM_VALID_BANK_ADDRESS 0x100000 -#define CPER_MEM_VALID_CHIP_ID 0x200000 - -#define CPER_MEM_EXT_ROW_MASK 0x3 -#define CPER_MEM_EXT_ROW_SHIFT 16 - -#define CPER_MEM_BANK_ADDRESS_MASK 0xff -#define CPER_MEM_BANK_GROUP_SHIFT 8 - -#define CPER_MEM_CHIP_ID_SHIFT 5 #define CPER_PCIE_VALID_PORT_TYPE 0x0001 #define CPER_PCIE_VALID_VERSION 0x0002 @@ -254,307 +255,191 @@ enum { #define CPER_PCIE_SLOT_SHIFT 3 -#define CPER_ARM_VALID_MPIDR BIT(0) -#define CPER_ARM_VALID_AFFINITY_LEVEL BIT(1) -#define CPER_ARM_VALID_RUNNING_STATE BIT(2) -#define CPER_ARM_VALID_VENDOR_INFO BIT(3) - -#define CPER_ARM_INFO_VALID_MULTI_ERR BIT(0) -#define CPER_ARM_INFO_VALID_FLAGS BIT(1) -#define CPER_ARM_INFO_VALID_ERR_INFO BIT(2) -#define CPER_ARM_INFO_VALID_VIRT_ADDR BIT(3) -#define CPER_ARM_INFO_VALID_PHYSICAL_ADDR BIT(4) - -#define CPER_ARM_INFO_FLAGS_FIRST BIT(0) -#define CPER_ARM_INFO_FLAGS_LAST BIT(1) -#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2) -#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3) - -#define CPER_ARM_CACHE_ERROR 0 -#define CPER_ARM_TLB_ERROR 1 -#define CPER_ARM_BUS_ERROR 2 -#define CPER_ARM_VENDOR_ERROR 3 -#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR - -#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0) -#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1) -#define CPER_ARM_ERR_VALID_LEVEL BIT(2) -#define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3) -#define CPER_ARM_ERR_VALID_CORRECTED BIT(4) -#define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5) -#define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6) -#define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7) -#define CPER_ARM_ERR_VALID_TIME_OUT BIT(8) -#define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9) -#define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10) -#define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11) - -#define CPER_ARM_ERR_TRANSACTION_SHIFT 16 -#define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0) -#define CPER_ARM_ERR_OPERATION_SHIFT 18 -#define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0) -#define CPER_ARM_ERR_LEVEL_SHIFT 22 -#define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0) -#define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25 -#define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0) -#define CPER_ARM_ERR_CORRECTED_SHIFT 26 -#define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0) -#define CPER_ARM_ERR_PRECISE_PC_SHIFT 27 -#define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0) -#define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28 -#define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0) -#define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29 -#define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0) -#define CPER_ARM_ERR_TIME_OUT_SHIFT 31 -#define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0) -#define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32 -#define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0) -#define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34 -#define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0) -#define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43 -#define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0) - /* * All tables and structs must be byte-packed to match CPER * specification, since the tables are provided by the system BIOS */ #pragma pack(1) -/* Record Header, UEFI v2.7 sec N.2.1 */ struct cper_record_header { char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */ - u16 revision; /* must be CPER_RECORD_REV */ - u32 signature_end; /* must be CPER_SIG_END */ - u16 section_count; - u32 error_severity; - u32 validation_bits; - u32 record_length; - u64 timestamp; - guid_t platform_id; - guid_t partition_id; - guid_t creator_id; - guid_t notification_type; - u64 record_id; - u32 flags; - u64 persistence_information; - u8 reserved[12]; /* must be zero */ + __u16 revision; /* must be CPER_RECORD_REV */ + __u32 signature_end; /* must be CPER_SIG_END */ + __u16 section_count; + __u32 error_severity; + __u32 validation_bits; + __u32 record_length; + __u64 timestamp; + uuid_le platform_id; + uuid_le partition_id; + uuid_le creator_id; + uuid_le notification_type; + __u64 record_id; + __u32 flags; + __u64 persistence_information; + __u8 reserved[12]; /* must be zero */ }; -/* Section Descriptor, UEFI v2.7 sec N.2.2 */ struct cper_section_descriptor { - u32 section_offset; /* Offset in bytes of the + __u32 section_offset; /* Offset in bytes of the * section body from the base * of the record header */ - u32 section_length; - u16 revision; /* must be CPER_RECORD_REV */ - u8 validation_bits; - u8 reserved; /* must be zero */ - u32 flags; - guid_t section_type; - guid_t fru_id; - u32 section_severity; - u8 fru_text[20]; + __u32 section_length; + __u16 revision; /* must be CPER_RECORD_REV */ + __u8 validation_bits; + __u8 reserved; /* must be zero */ + __u32 flags; + uuid_le section_type; + uuid_le fru_id; + __u32 section_severity; + __u8 fru_text[20]; }; -/* Generic Processor Error Section, UEFI v2.7 sec N.2.4.1 */ +/* Generic Processor Error Section */ struct cper_sec_proc_generic { - u64 validation_bits; - u8 proc_type; - u8 proc_isa; - u8 proc_error_type; - u8 operation; - u8 flags; - u8 level; - u16 reserved; - u64 cpu_version; + __u64 validation_bits; + __u8 proc_type; + __u8 proc_isa; + __u8 proc_error_type; + __u8 operation; + __u8 flags; + __u8 level; + __u16 reserved; + __u64 cpu_version; char cpu_brand[128]; - u64 proc_id; - u64 target_addr; - u64 requestor_id; - u64 responder_id; - u64 ip; + __u64 proc_id; + __u64 target_addr; + __u64 requestor_id; + __u64 responder_id; + __u64 ip; }; -/* IA32/X64 Processor Error Section, UEFI v2.7 sec N.2.4.2 */ +/* IA32/X64 Processor Error Section */ struct cper_sec_proc_ia { - u64 validation_bits; - u64 lapic_id; - u8 cpuid[48]; + __u64 validation_bits; + __u8 lapic_id; + __u8 cpuid[48]; }; -/* IA32/X64 Processor Error Information Structure, UEFI v2.7 sec N.2.4.2.1 */ +/* IA32/X64 Processor Error Information Structure */ struct cper_ia_err_info { - guid_t err_type; - u64 validation_bits; - u64 check_info; - u64 target_id; - u64 requestor_id; - u64 responder_id; - u64 ip; + uuid_le err_type; + __u64 validation_bits; + __u64 check_info; + __u64 target_id; + __u64 requestor_id; + __u64 responder_id; + __u64 ip; }; -/* IA32/X64 Processor Context Information Structure, UEFI v2.7 sec N.2.4.2.2 */ +/* IA32/X64 Processor Context Information Structure */ struct cper_ia_proc_ctx { - u16 reg_ctx_type; - u16 reg_arr_size; - u32 msr_addr; - u64 mm_reg_addr; + __u16 reg_ctx_type; + __u16 reg_arr_size; + __u32 msr_addr; + __u64 mm_reg_addr; }; -/* ARM Processor Error Section, UEFI v2.7 sec N.2.4.4 */ -struct cper_sec_proc_arm { - u32 validation_bits; - u16 err_info_num; /* Number of Processor Error Info */ - u16 context_info_num; /* Number of Processor Context Info Records*/ - u32 section_length; - u8 affinity_level; - u8 reserved[3]; /* must be zero */ - u64 mpidr; - u64 midr; - u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */ - u32 psci_state; -}; - -/* ARM Processor Error Information Structure, UEFI v2.7 sec N.2.4.4.1 */ -struct cper_arm_err_info { - u8 version; - u8 length; - u16 validation_bits; - u8 type; - u16 multiple_error; - u8 flags; - u64 error_info; - u64 virt_fault_addr; - u64 physical_fault_addr; -}; - -/* ARM Processor Context Information Structure, UEFI v2.7 sec N.2.4.4.2 */ -struct cper_arm_ctx_info { - u16 version; - u16 type; - u32 size; -}; - -/* Old Memory Error Section, UEFI v2.1, v2.2 */ +/* Old Memory Error Section UEFI 2.1, 2.2 */ struct cper_sec_mem_err_old { - u64 validation_bits; - u64 error_status; - u64 physical_addr; - u64 physical_addr_mask; - u16 node; - u16 card; - u16 module; - u16 bank; - u16 device; - u16 row; - u16 column; - u16 bit_pos; - u64 requestor_id; - u64 responder_id; - u64 target_id; - u8 error_type; + __u64 validation_bits; + __u64 error_status; + __u64 physical_addr; + __u64 physical_addr_mask; + __u16 node; + __u16 card; + __u16 module; + __u16 bank; + __u16 device; + __u16 row; + __u16 column; + __u16 bit_pos; + __u64 requestor_id; + __u64 responder_id; + __u64 target_id; + __u8 error_type; }; -/* Memory Error Section (UEFI >= v2.3), UEFI v2.8 sec N.2.5 */ +/* Memory Error Section UEFI >= 2.3 */ struct cper_sec_mem_err { - u64 validation_bits; - u64 error_status; - u64 physical_addr; - u64 physical_addr_mask; - u16 node; - u16 card; - u16 module; - u16 bank; - u16 device; - u16 row; - u16 column; - u16 bit_pos; - u64 requestor_id; - u64 responder_id; - u64 target_id; - u8 error_type; - u8 extended; - u16 rank; - u16 mem_array_handle; /* "card handle" in UEFI 2.4 */ - u16 mem_dev_handle; /* "module handle" in UEFI 2.4 */ + __u64 validation_bits; + __u64 error_status; + __u64 physical_addr; + __u64 physical_addr_mask; + __u16 node; + __u16 card; + __u16 module; + __u16 bank; + __u16 device; + __u16 row; + __u16 column; + __u16 bit_pos; + __u64 requestor_id; + __u64 responder_id; + __u64 target_id; + __u8 error_type; + __u8 reserved; + __u16 rank; + __u16 mem_array_handle; /* card handle in UEFI 2.4 */ + __u16 mem_dev_handle; /* module handle in UEFI 2.4 */ }; struct cper_mem_err_compact { - u64 validation_bits; - u16 node; - u16 card; - u16 module; - u16 bank; - u16 device; - u16 row; - u16 column; - u16 bit_pos; - u64 requestor_id; - u64 responder_id; - u64 target_id; - u16 rank; - u16 mem_array_handle; - u16 mem_dev_handle; - u8 extended; + __u64 validation_bits; + __u16 node; + __u16 card; + __u16 module; + __u16 bank; + __u16 device; + __u16 row; + __u16 column; + __u16 bit_pos; + __u64 requestor_id; + __u64 responder_id; + __u64 target_id; + __u16 rank; + __u16 mem_array_handle; + __u16 mem_dev_handle; }; -static inline u32 cper_get_mem_extension(u64 mem_valid, u8 mem_extended) -{ - if (!(mem_valid & CPER_MEM_VALID_ROW_EXT)) - return 0; - return (mem_extended & CPER_MEM_EXT_ROW_MASK) << CPER_MEM_EXT_ROW_SHIFT; -} - -/* PCI Express Error Section, UEFI v2.7 sec N.2.7 */ struct cper_sec_pcie { - u64 validation_bits; - u32 port_type; + __u64 validation_bits; + __u32 port_type; struct { - u8 minor; - u8 major; - u8 reserved[2]; + __u8 minor; + __u8 major; + __u8 reserved[2]; } version; - u16 command; - u16 status; - u32 reserved; + __u16 command; + __u16 status; + __u32 reserved; struct { - u16 vendor_id; - u16 device_id; - u8 class_code[3]; - u8 function; - u8 device; - u16 segment; - u8 bus; - u8 secondary_bus; - u16 slot; - u8 reserved; + __u16 vendor_id; + __u16 device_id; + __u8 class_code[3]; + __u8 function; + __u8 device; + __u16 segment; + __u8 bus; + __u8 secondary_bus; + __u16 slot; + __u8 reserved; } device_id; struct { - u32 lower; - u32 upper; + __u32 lower; + __u32 upper; } serial_number; struct { - u16 secondary_status; - u16 control; + __u16 secondary_status; + __u16 control; } bridge; - u8 capability[60]; - u8 aer_info[96]; -}; - -/* Firmware Error Record Reference, UEFI v2.7 sec N.2.10 */ -struct cper_sec_fw_err_rec_ref { - u8 record_type; - u8 revision; - u8 reserved[6]; - u64 record_identifier; - guid_t record_identifier_guid; + __u8 capability[60]; + __u8 aer_info[96]; }; /* Reset to default packing */ #pragma pack() -extern const char *const cper_proc_error_type_strs[4]; - u64 cper_next_record_id(void); const char *cper_severity_str(unsigned int); const char *cper_mem_err_type_str(unsigned int); @@ -564,9 +449,5 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *, struct cper_mem_err_compact *); const char *cper_mem_err_unpack(struct trace_seq *, struct cper_mem_err_compact *); -void cper_print_proc_arm(const char *pfx, - const struct cper_sec_proc_arm *proc); -void cper_print_proc_ia(const char *pfx, - const struct cper_sec_proc_ia *proc); #endif diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 9cf51e41e6..e571128ad9 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/cpu.h - generic cpu definition * @@ -30,9 +29,7 @@ struct cpu { }; extern void boot_cpu_init(void); -extern void boot_cpu_hotplug_init(void); -extern void cpu_init(void); -extern void trap_init(void); +extern void boot_cpu_state_init(void); extern int register_cpu(struct cpu *cpu, int num); extern struct device *get_cpu_device(unsigned cpu); @@ -47,25 +44,6 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr); extern int cpu_add_dev_attr_group(struct attribute_group *attrs); extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); -extern ssize_t cpu_show_meltdown(struct device *dev, - struct device_attribute *attr, char *buf); -extern ssize_t cpu_show_spectre_v1(struct device *dev, - struct device_attribute *attr, char *buf); -extern ssize_t cpu_show_spectre_v2(struct device *dev, - struct device_attribute *attr, char *buf); -extern ssize_t cpu_show_spec_store_bypass(struct device *dev, - struct device_attribute *attr, char *buf); -extern ssize_t cpu_show_l1tf(struct device *dev, - struct device_attribute *attr, char *buf); -extern ssize_t cpu_show_mds(struct device *dev, - struct device_attribute *attr, char *buf); -extern ssize_t cpu_show_tsx_async_abort(struct device *dev, - struct device_attribute *attr, - char *buf); -extern ssize_t cpu_show_itlb_multihit(struct device *dev, - struct device_attribute *attr, char *buf); -extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf); - extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, const struct attribute_group **groups, @@ -75,31 +53,107 @@ extern void unregister_cpu(struct cpu *cpu); extern ssize_t arch_cpu_probe(const char *, size_t); extern ssize_t arch_cpu_release(const char *, size_t); #endif +struct notifier_block; -/* - * These states are not related to the core CPU hotplug mechanism. They are - * used by various (sub)architectures to track internal state +#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ +#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ +#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ +#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ +#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ +#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ +#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug + * lock is dropped */ +#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly, + * perhaps due to preemption. */ + +/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend + * operation in progress */ -#define CPU_ONLINE 0x0002 /* CPU is up */ -#define CPU_UP_PREPARE 0x0003 /* CPU coming up */ -#define CPU_DEAD 0x0007 /* CPU dead */ -#define CPU_DEAD_FROZEN 0x0008 /* CPU timed out on unplug */ -#define CPU_POST_DEAD 0x0009 /* CPU successfully unplugged */ -#define CPU_BROKEN 0x000B /* CPU did not die properly */ +#define CPU_TASKS_FROZEN 0x0010 + +#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) +#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) +#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) +#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) +#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) +#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) #ifdef CONFIG_SMP extern bool cpuhp_tasks_frozen; -int add_cpu(unsigned int cpu); -int cpu_device_up(struct device *dev); +/* Need to know about CPUs going up/down? */ +#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) +#define cpu_notifier(fn, pri) { \ + static struct notifier_block fn##_nb = \ + { .notifier_call = fn, .priority = pri }; \ + register_cpu_notifier(&fn##_nb); \ +} + +#define __cpu_notifier(fn, pri) { \ + static struct notifier_block fn##_nb = \ + { .notifier_call = fn, .priority = pri }; \ + __register_cpu_notifier(&fn##_nb); \ +} + +extern int register_cpu_notifier(struct notifier_block *nb); +extern int __register_cpu_notifier(struct notifier_block *nb); +extern void unregister_cpu_notifier(struct notifier_block *nb); +extern void __unregister_cpu_notifier(struct notifier_block *nb); + +#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ +#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) +#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) + +static inline int register_cpu_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int __register_cpu_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline void unregister_cpu_notifier(struct notifier_block *nb) +{ +} + +static inline void __unregister_cpu_notifier(struct notifier_block *nb) +{ +} +#endif + +int cpu_up(unsigned int cpu); void notify_cpu_starting(unsigned int cpu); extern void cpu_maps_update_begin(void); extern void cpu_maps_update_done(void); -int bringup_hibernate_cpu(unsigned int sleep_cpu); -void bringup_nonboot_cpus(unsigned int setup_max_cpus); + +#define cpu_notifier_register_begin cpu_maps_update_begin +#define cpu_notifier_register_done cpu_maps_update_done #else /* CONFIG_SMP */ #define cpuhp_tasks_frozen 0 +#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) +#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) + +static inline int register_cpu_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int __register_cpu_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline void unregister_cpu_notifier(struct notifier_block *nb) +{ +} + +static inline void __unregister_cpu_notifier(struct notifier_block *nb) +{ +} + static inline void cpu_maps_update_begin(void) { } @@ -108,63 +162,62 @@ static inline void cpu_maps_update_done(void) { } -static inline int add_cpu(unsigned int cpu) { return 0;} +static inline void cpu_notifier_register_begin(void) +{ +} + +static inline void cpu_notifier_register_done(void) +{ +} #endif /* CONFIG_SMP */ extern struct bus_type cpu_subsys; -extern int lockdep_is_cpus_held(void); - #ifdef CONFIG_HOTPLUG_CPU -extern void cpus_write_lock(void); -extern void cpus_write_unlock(void); -extern void cpus_read_lock(void); -extern void cpus_read_unlock(void); -extern int cpus_read_trylock(void); -extern void lockdep_assert_cpus_held(void); +/* Stop CPUs going up and down. */ + +extern void cpu_hotplug_begin(void); +extern void cpu_hotplug_done(void); +extern void get_online_cpus(void); +extern void put_online_cpus(void); extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); +#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) +#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri) +#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) +#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb) +#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) +#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb) void clear_tasks_mm_cpumask(int cpu); -int remove_cpu(unsigned int cpu); -int cpu_device_down(struct device *dev); -extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu); +int cpu_down(unsigned int cpu); -#else /* CONFIG_HOTPLUG_CPU */ +#else /* CONFIG_HOTPLUG_CPU */ -static inline void cpus_write_lock(void) { } -static inline void cpus_write_unlock(void) { } -static inline void cpus_read_lock(void) { } -static inline void cpus_read_unlock(void) { } -static inline int cpus_read_trylock(void) { return true; } -static inline void lockdep_assert_cpus_held(void) { } -static inline void cpu_hotplug_disable(void) { } -static inline void cpu_hotplug_enable(void) { } -static inline int remove_cpu(unsigned int cpu) { return -EPERM; } -static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } -#endif /* !CONFIG_HOTPLUG_CPU */ +static inline void cpu_hotplug_begin(void) {} +static inline void cpu_hotplug_done(void) {} +#define get_online_cpus() do { } while (0) +#define put_online_cpus() do { } while (0) +#define cpu_hotplug_disable() do { } while (0) +#define cpu_hotplug_enable() do { } while (0) +#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) +#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) +/* These aren't inline functions due to a GCC bug. */ +#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) +#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) +#define unregister_hotcpu_notifier(nb) ({ (void)(nb); }) +#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); }) +#endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_PM_SLEEP_SMP extern int freeze_secondary_cpus(int primary); -extern void thaw_secondary_cpus(void); - -static inline int suspend_disable_secondary_cpus(void) +static inline int disable_nonboot_cpus(void) { - int cpu = 0; - - if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) - cpu = -1; - - return freeze_secondary_cpus(cpu); + return freeze_secondary_cpus(0); } -static inline void suspend_enable_secondary_cpus(void) -{ - return thaw_secondary_cpus(); -} - +extern void enable_nonboot_cpus(void); #else /* !CONFIG_PM_SLEEP_SMP */ -static inline void thaw_secondary_cpus(void) {} -static inline int suspend_disable_secondary_cpus(void) { return 0; } -static inline void suspend_enable_secondary_cpus(void) { } +static inline int disable_nonboot_cpus(void) { return 0; } +static inline void enable_nonboot_cpus(void) {} #endif /* !CONFIG_PM_SLEEP_SMP */ void cpu_startup_entry(enum cpuhp_state state); @@ -172,7 +225,7 @@ void cpu_startup_entry(enum cpuhp_state state); void cpu_idle_poll_ctrl(bool enable); /* Attach to any functions which should be considered cpuidle. */ -#define __cpuidle __section(".cpuidle.text") +#define __cpuidle __attribute__((__section__(".cpuidle.text"))) bool cpu_in_idle(unsigned long pc); @@ -185,13 +238,6 @@ void arch_cpu_idle_dead(void); int cpu_report_state(int cpu); int cpu_check_up_prepare(int cpu); void cpu_set_state_online(int cpu); -void play_idle_precise(u64 duration_ns, u64 latency_ns); - -static inline void play_idle(unsigned long duration_us) -{ - play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX); -} - #ifdef CONFIG_HOTPLUG_CPU bool cpu_wait_death(unsigned int cpu, int seconds); bool cpu_report_death(void); @@ -200,31 +246,4 @@ void cpuhp_report_idle_dead(void); static inline void cpuhp_report_idle_dead(void) { } #endif /* #ifdef CONFIG_HOTPLUG_CPU */ -enum cpuhp_smt_control { - CPU_SMT_ENABLED, - CPU_SMT_DISABLED, - CPU_SMT_FORCE_DISABLED, - CPU_SMT_NOT_SUPPORTED, - CPU_SMT_NOT_IMPLEMENTED, -}; - -#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) -extern enum cpuhp_smt_control cpu_smt_control; -extern void cpu_smt_disable(bool force); -extern void cpu_smt_check_topology(void); -extern bool cpu_smt_possible(void); -extern int cpuhp_smt_enable(void); -extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval); -#else -# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED) -static inline void cpu_smt_disable(bool force) { } -static inline void cpu_smt_check_topology(void) { } -static inline bool cpu_smt_possible(void) { return false; } -static inline int cpuhp_smt_enable(void) { return 0; } -static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; } -#endif - -extern bool cpu_mitigations_off(void); -extern bool cpu_mitigations_auto_nosmt(void); - #endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h index a3bdc8a98f..c156f50827 100644 --- a/include/linux/cpu_cooling.h +++ b/include/linux/cpu_cooling.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/cpu_cooling.h * @@ -6,6 +5,18 @@ * Copyright (C) 2012 Amit Daniel * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -17,15 +28,53 @@ #include #include -struct cpufreq_policy; +typedef int (*get_static_t)(cpumask_t *cpumask, int interval, + unsigned long voltage, u32 *power); -#ifdef CONFIG_CPU_FREQ_THERMAL +#ifdef CONFIG_CPU_THERMAL /** * cpufreq_cooling_register - function to create cpufreq cooling device. - * @policy: cpufreq policy. + * @clip_cpus: cpumask of cpus where the frequency constraints will happen */ struct thermal_cooling_device * -cpufreq_cooling_register(struct cpufreq_policy *policy); +cpufreq_cooling_register(const struct cpumask *clip_cpus); + +struct thermal_cooling_device * +cpufreq_power_cooling_register(const struct cpumask *clip_cpus, + u32 capacitance, get_static_t plat_static_func); + +/** + * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. + * @np: a valid struct device_node to the cooling device device tree node. + * @clip_cpus: cpumask of cpus where the frequency constraints will happen + */ +#ifdef CONFIG_THERMAL_OF +struct thermal_cooling_device * +of_cpufreq_cooling_register(struct device_node *np, + const struct cpumask *clip_cpus); + +struct thermal_cooling_device * +of_cpufreq_power_cooling_register(struct device_node *np, + const struct cpumask *clip_cpus, + u32 capacitance, + get_static_t plat_static_func); +#else +static inline struct thermal_cooling_device * +of_cpufreq_cooling_register(struct device_node *np, + const struct cpumask *clip_cpus) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct thermal_cooling_device * +of_cpufreq_power_cooling_register(struct device_node *np, + const struct cpumask *clip_cpus, + u32 capacitance, + get_static_t plat_static_func) +{ + return NULL; +} +#endif /** * cpufreq_cooling_unregister - function to remove cpufreq cooling device. @@ -33,41 +82,46 @@ cpufreq_cooling_register(struct cpufreq_policy *policy); */ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev); -/** - * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. - * @policy: cpufreq policy. - */ -struct thermal_cooling_device * -of_cpufreq_cooling_register(struct cpufreq_policy *policy); - -#else /* !CONFIG_CPU_FREQ_THERMAL */ +unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq); +#else /* !CONFIG_CPU_THERMAL */ static inline struct thermal_cooling_device * -cpufreq_cooling_register(struct cpufreq_policy *policy) +cpufreq_cooling_register(const struct cpumask *clip_cpus) { return ERR_PTR(-ENOSYS); } +static inline struct thermal_cooling_device * +cpufreq_power_cooling_register(const struct cpumask *clip_cpus, + u32 capacitance, get_static_t plat_static_func) +{ + return NULL; +} + +static inline struct thermal_cooling_device * +of_cpufreq_cooling_register(struct device_node *np, + const struct cpumask *clip_cpus) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct thermal_cooling_device * +of_cpufreq_power_cooling_register(struct device_node *np, + const struct cpumask *clip_cpus, + u32 capacitance, + get_static_t plat_static_func) +{ + return NULL; +} static inline void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { return; } - -static inline struct thermal_cooling_device * -of_cpufreq_cooling_register(struct cpufreq_policy *policy) +static inline +unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) { - return NULL; + return THERMAL_CSTATE_INVALID; } -#endif /* CONFIG_CPU_FREQ_THERMAL */ - -struct cpuidle_driver; - -#ifdef CONFIG_CPU_IDLE_THERMAL -void cpuidle_cooling_register(struct cpuidle_driver *drv); -#else /* CONFIG_CPU_IDLE_THERMAL */ -static inline void cpuidle_cooling_register(struct cpuidle_driver *drv) -{ -} -#endif /* CONFIG_CPU_IDLE_THERMAL */ +#endif /* CONFIG_CPU_THERMAL */ #endif /* __CPU_COOLING_H__ */ diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h index 552b8f9ea0..455b233dd3 100644 --- a/include/linux/cpu_pm.h +++ b/include/linux/cpu_pm.h @@ -1,9 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 Google, Inc. * * Author: * Colin Cross + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef _LINUX_CPU_PM_H diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h index be8aea04d0..bdd18caa6c 100644 --- a/include/linux/cpu_rmap.h +++ b/include/linux/cpu_rmap.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __LINUX_CPU_RMAP_H #define __LINUX_CPU_RMAP_H /* * cpu_rmap.c: CPU affinity reverse-map support * Copyright 2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. */ #include @@ -28,7 +31,7 @@ struct cpu_rmap { struct { u16 index; u16 dist; - } near[]; + } near[0]; }; #define CPU_RMAP_DIST_INF 0xffff diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h index 6aff540ee9..986c06c88d 100644 --- a/include/linux/cpufeature.h +++ b/include/linux/cpufeature.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_CPUFEATURE_H @@ -42,7 +45,7 @@ * 'asm/cpufeature.h' of your favorite architecture. */ #define module_cpu_feature_match(x, __initfunc) \ -static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \ +static struct cpu_feature const cpu_feature_match_ ## x[] = \ { { .feature = cpu_feature(x) }, { } }; \ MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \ \ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index ff88bb3e44..6e18583497 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -1,23 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/cpufreq.h * * Copyright (C) 2001 Russell King * (C) 2002 - 2003 Dominik Brodowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _LINUX_CPUFREQ_H #define _LINUX_CPUFREQ_H #include -#include #include #include #include #include -#include -#include -#include -#include #include #include @@ -33,7 +31,7 @@ #define CPUFREQ_ETERNAL (-1) #define CPUFREQ_NAME_LEN 16 -/* Print length for names. Extra 1 space for accommodating '\n' in prints */ +/* Print length for names. Extra 1 space for accomodating '\n' in prints */ #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) struct cpufreq_governor; @@ -44,6 +42,13 @@ enum cpufreq_table_sorting { CPUFREQ_TABLE_SORTED_DESCENDING }; +struct cpufreq_freqs { + unsigned int cpu; /* cpu nr */ + unsigned int old; + unsigned int new; + u8 flags; /* flags of cpufreq_driver, see below. */ +}; + struct cpufreq_cpuinfo { unsigned int max_freq; unsigned int min_freq; @@ -52,6 +57,11 @@ struct cpufreq_cpuinfo { unsigned int transition_latency; }; +struct cpufreq_user_policy { + unsigned int min; /* in kHz */ + unsigned int max; /* in kHz */ +}; + struct cpufreq_policy { /* CPUs sharing clock, require sw coordination */ cpumask_var_t cpus; /* Online CPUs only */ @@ -69,6 +79,7 @@ struct cpufreq_policy { unsigned int max; /* in kHz */ unsigned int cur; /* in kHz, only needed if cpufreq * governors are used */ + unsigned int restore_freq; /* = policy->cur before transition */ unsigned int suspend_freq; /* freq to set during suspend */ unsigned int policy; /* see above */ @@ -80,10 +91,7 @@ struct cpufreq_policy { struct work_struct update; /* if update_policy() needs to be * called, but you're in IRQ context */ - struct freq_constraints constraints; - struct freq_qos_request *min_freq_req; - struct freq_qos_request *max_freq_req; - + struct cpufreq_user_policy user_policy; struct cpufreq_frequency_table *freq_table; enum cpufreq_table_sorting freq_table_sorted; @@ -107,36 +115,14 @@ struct cpufreq_policy { * guarantee that frequency can be changed on any CPU sharing the * policy and that the change will affect all of the policy CPUs then. * - fast_switch_enabled is to be set by governors that support fast - * frequency switching with the help of cpufreq_enable_fast_switch(). + * freqnency switching with the help of cpufreq_enable_fast_switch(). */ bool fast_switch_possible; bool fast_switch_enabled; - /* - * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current - * governor. - */ - bool strict_target; - - /* - * Preferred average time interval between consecutive invocations of - * the driver to set the frequency for this policy. To be set by the - * scaling driver (0, which is the default, means no preference). - */ - unsigned int transition_delay_us; - - /* - * Remote DVFS flag (Not added to the driver structure as we don't want - * to access another structure from scheduler hotpath). - * - * Should be set if CPUs can do DVFS on behalf of other CPUs from - * different cpufreq policies. - */ - bool dvfs_possible_from_any_cpu; - /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ unsigned int cached_target_freq; - unsigned int cached_resolved_idx; + int cached_resolved_idx; /* Synchronization for frequency transitions */ bool transition_ongoing; /* Tracks transition status */ @@ -149,33 +135,6 @@ struct cpufreq_policy { /* For cpufreq driver's internal use */ void *driver_data; - - /* Pointer to the cooling device if used for thermal mitigation */ - struct thermal_cooling_device *cdev; - - struct notifier_block nb_min; - struct notifier_block nb_max; -}; - -/* - * Used for passing new cpufreq policy data to the cpufreq driver's ->verify() - * callback for sanitization. That callback is only expected to modify the min - * and max values, if necessary, and specifically it must not update the - * frequency table. - */ -struct cpufreq_policy_data { - struct cpufreq_cpuinfo cpuinfo; - struct cpufreq_frequency_table *freq_table; - unsigned int cpu; - unsigned int min; /* in kHz */ - unsigned int max; /* in kHz */ -}; - -struct cpufreq_freqs { - struct cpufreq_policy *policy; - unsigned int old; - unsigned int new; - u8 flags; /* flags of cpufreq_driver, see below. */ }; /* Only for ACPI */ @@ -200,33 +159,24 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } #endif -static inline bool policy_is_inactive(struct cpufreq_policy *policy) -{ - return cpumask_empty(policy->cpus); -} - static inline bool policy_is_shared(struct cpufreq_policy *policy) { return cpumask_weight(policy->cpus) > 1; } +/* /sys/devices/system/cpu/cpufreq: entry point for global variables */ +extern struct kobject *cpufreq_global_kobject; + #ifdef CONFIG_CPU_FREQ unsigned int cpufreq_get(unsigned int cpu); unsigned int cpufreq_quick_get(unsigned int cpu); unsigned int cpufreq_quick_get_max(unsigned int cpu); -unsigned int cpufreq_get_hw_max_freq(unsigned int cpu); void disable_cpufreq(void); u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); - -struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu); -void cpufreq_cpu_release(struct cpufreq_policy *policy); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); -void refresh_frequency_limits(struct cpufreq_policy *policy); -void cpufreq_update_policy(unsigned int cpu); -void cpufreq_update_limits(unsigned int cpu); +int cpufreq_update_policy(unsigned int cpu); bool have_governor_per_policy(void); -bool cpufreq_supports_freq_invariance(void); struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); @@ -243,14 +193,6 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) { return 0; } -static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) -{ - return 0; -} -static inline bool cpufreq_supports_freq_invariance(void) -{ - return false; -} static inline void disable_cpufreq(void) { } #endif @@ -292,31 +234,40 @@ __ATTR(_name, _perm, show_##_name, NULL) static struct freq_attr _name = \ __ATTR(_name, 0644, show_##_name, store_##_name) -#define cpufreq_freq_attr_wo(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0200, NULL, store_##_name) +struct global_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct kobj_attribute *attr, char *buf); + ssize_t (*store)(struct kobject *a, struct kobj_attribute *b, + const char *c, size_t count); +}; +typedef struct global_attr __no_const global_attr_no_const; #define define_one_global_ro(_name) \ -static struct kobj_attribute _name = \ +static struct global_attr _name = \ __ATTR(_name, 0444, show_##_name, NULL) #define define_one_global_rw(_name) \ -static struct kobj_attribute _name = \ +static struct global_attr _name = \ __ATTR(_name, 0644, show_##_name, store_##_name) struct cpufreq_driver { char name[CPUFREQ_NAME_LEN]; - u16 flags; + u8 flags; void *driver_data; /* needed by all drivers */ int (*init)(struct cpufreq_policy *policy); - int (*verify)(struct cpufreq_policy_data *policy); + int (*verify)(struct cpufreq_policy *policy); /* define one out of two */ int (*setpolicy)(struct cpufreq_policy *policy); + /* + * On failure, should always restore frequency to policy->restore_freq + * (i.e. old freq). + */ int (*target)(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); /* Deprecated */ @@ -324,15 +275,15 @@ struct cpufreq_driver { unsigned int index); unsigned int (*fast_switch)(struct cpufreq_policy *policy, unsigned int target_freq); + /* - * ->fast_switch() replacement for drivers that use an internal - * representation of performance levels and can pass hints other than - * the target performance level to the hardware. + * Caches and returns the lowest driver-supported frequency greater than + * or equal to the target frequency, subject to any driver limitations. + * Does not set the frequency. Only to be implemented for drivers with + * target(). */ - void (*adjust_perf)(unsigned int cpu, - unsigned long min_perf, - unsigned long target_perf, - unsigned long capacity); + unsigned int (*resolve_freq)(struct cpufreq_policy *policy, + unsigned int target_freq); /* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION @@ -340,7 +291,7 @@ struct cpufreq_driver { * * get_intermediate should return a stable intermediate frequency * platform wants to switch to and target_intermediate() should set CPU - * to that frequency, before jumping to the frequency corresponding + * to to that frequency, before jumping to the frequency corresponding * to 'index'. Core will take care of sending notifications and driver * doesn't have to handle them in target_intermediate() or * target_index(). @@ -357,49 +308,33 @@ struct cpufreq_driver { /* should be defined, if possible */ unsigned int (*get)(unsigned int cpu); - /* Called to update policy limits on firmware notifications. */ - void (*update_limits)(unsigned int cpu); - /* optional */ int (*bios_limit)(int cpu, unsigned int *limit); - int (*online)(struct cpufreq_policy *policy); - int (*offline)(struct cpufreq_policy *policy); int (*exit)(struct cpufreq_policy *policy); + void (*stop_cpu)(struct cpufreq_policy *policy); int (*suspend)(struct cpufreq_policy *policy); int (*resume)(struct cpufreq_policy *policy); + /* Will be called after the driver is fully initialized */ + void (*ready)(struct cpufreq_policy *policy); + struct freq_attr **attr; /* platform specific boost support code */ bool boost_enabled; - int (*set_boost)(struct cpufreq_policy *policy, int state); - - /* - * Set by drivers that want to register with the energy model after the - * policy is properly initialized, but before the governor is started. - */ - void (*register_em)(struct cpufreq_policy *policy); -}; + int (*set_boost)(int state); +} __do_const; /* flags */ - -/* - * Set by drivers that need to update internale upper and lower boundaries along - * with the target frequency and so the core and governors should also invoke - * the diver if the target frequency does not change, but the policy min or max - * may have changed. - */ -#define CPUFREQ_NEED_UPDATE_LIMITS BIT(0) - -/* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ -#define CPUFREQ_CONST_LOOPS BIT(1) - -/* - * Set by drivers that want the core to automatically register the cpufreq - * driver as a thermal cooling device. - */ -#define CPUFREQ_IS_COOLING_DEV BIT(2) +#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if + all ->init() calls failed */ +#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other + kernel "constants" aren't + affected by frequency + transitions */ +#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume + speed mismatches */ /* * This should be set by platforms having multiple clock-domains, i.e. @@ -407,14 +342,14 @@ struct cpufreq_driver { * be created in cpu/cpu/cpufreq/ directory and so they can use the same * governor with different tunables for different clusters. */ -#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3) +#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3) /* * Driver will do POSTCHANGE notifications from outside of their ->target() * routine and so must set cpufreq_driver->flags with this flag, so that core * can handle them specially. */ -#define CPUFREQ_ASYNC_NOTIFICATION BIT(4) +#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4) /* * Set by drivers which want cpufreq core to check if CPU is running at a @@ -423,30 +358,16 @@ struct cpufreq_driver { * from the table. And if that fails, we will stop further boot process by * issuing a BUG_ON(). */ -#define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5) - -/* - * Set by drivers to disallow use of governors with "dynamic_switching" flag - * set. - */ -#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6) +#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5) int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); -bool cpufreq_driver_test_flags(u16 flags); const char *cpufreq_get_current_driver(void); void *cpufreq_get_driver_data(void); -static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv) -{ - return IS_ENABLED(CONFIG_CPU_THERMAL) && - (drv->flags & CPUFREQ_IS_COOLING_DEV); -} - -static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy, - unsigned int min, - unsigned int max) +static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, + unsigned int min, unsigned int max) { if (policy->min < min) policy->min = min; @@ -462,10 +383,10 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *poli } static inline void -cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy) +cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy) { cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + policy->cpuinfo.max_freq); } #ifdef CONFIG_CPU_FREQ @@ -489,8 +410,11 @@ static inline void cpufreq_resume(void) {} #define CPUFREQ_POSTCHANGE (1) /* Policy Notifiers */ -#define CPUFREQ_CREATE_POLICY (0) -#define CPUFREQ_REMOVE_POLICY (1) +#define CPUFREQ_ADJUST (0) +#define CPUFREQ_NOTIFY (1) +#define CPUFREQ_START (2) +#define CPUFREQ_CREATE_POLICY (3) +#define CPUFREQ_REMOVE_POLICY (4) #ifdef CONFIG_CPU_FREQ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); @@ -543,7 +467,6 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, * CPUFREQ GOVERNORS * *********************************************************************/ -#define CPUFREQ_POLICY_UNKNOWN (0) /* * If (cpufreq_driver->target) exists, the ->governor decides what frequency * within the limits is used. If (cpufreq_driver->setpolicy> exists, these @@ -557,8 +480,14 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, * polling frequency is 1000 times the transition latency of the processor. The * ondemand governor will work on any processor with transition latency <= 10ms, * using appropriate sampling rate. + * + * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL) + * the ondemand governor will not work. All times here are in us (microseconds). */ +#define MIN_SAMPLING_RATE_RATIO (2) #define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (20) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) struct cpufreq_governor { char name[CPUFREQ_NAME_LEN]; @@ -571,28 +500,16 @@ struct cpufreq_governor { char *buf); int (*store_setspeed) (struct cpufreq_policy *policy, unsigned int freq); + unsigned int max_transition_latency; /* HW must be able to switch to + next freq faster than this value in nano secs or we + will fallback to performance governor */ struct list_head governor_list; struct module *owner; - u8 flags; }; -/* Governor flags */ - -/* For governors which change frequency dynamically by themselves */ -#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0) - -/* For governors wanting the target frequency to be set exactly */ -#define CPUFREQ_GOV_STRICT_TARGET BIT(1) - - /* Pass a target to the cpufreq driver */ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq); -void cpufreq_driver_adjust_perf(unsigned int cpu, - unsigned long min_perf, - unsigned long target_perf, - unsigned long capacity); -bool cpufreq_driver_has_adjust_perf(void); int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); @@ -601,25 +518,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int relation); unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, unsigned int target_freq); -unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy); int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); -int cpufreq_start_governor(struct cpufreq_policy *policy); -void cpufreq_stop_governor(struct cpufreq_policy *policy); - -#define cpufreq_governor_init(__governor) \ -static int __init __governor##_init(void) \ -{ \ - return cpufreq_register_governor(&__governor); \ -} \ -core_initcall(__governor##_init) - -#define cpufreq_governor_exit(__governor) \ -static void __exit __governor##_exit(void) \ -{ \ - return cpufreq_unregister_governor(&__governor); \ -} \ -module_exit(__governor##_exit) struct cpufreq_governor *cpufreq_default_governor(void); struct cpufreq_governor *cpufreq_fallback_governor(void); @@ -701,18 +601,6 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, #define cpufreq_for_each_entry(pos, table) \ for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) -/* - * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table - * with index - * @pos: the cpufreq_frequency_table * to use as a loop cursor. - * @table: the cpufreq_frequency_table * to iterate over. - * @idx: the table entry currently being processed - */ - -#define cpufreq_for_each_entry_idx(pos, table, idx) \ - for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \ - pos++, idx++) - /* * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table * excluding CPUFREQ_ENTRY_INVALID frequencies. @@ -726,27 +614,12 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, continue; \ else -/* - * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq - * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies. - * @pos: the cpufreq_frequency_table * to use as a loop cursor. - * @table: the cpufreq_frequency_table * to iterate over. - * @idx: the table entry currently being processed - */ - -#define cpufreq_for_each_valid_entry_idx(pos, table, idx) \ - cpufreq_for_each_entry_idx(pos, table, idx) \ - if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ - continue; \ - else - - int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table); -int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, +int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table); -int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy); +int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy); int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, unsigned int target_freq, @@ -767,20 +640,19 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; - struct cpufreq_frequency_table *pos; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_valid_entry(pos, table) { freq = pos->frequency; if (freq >= target_freq) - return idx; + return pos - table; - best = idx; + best = pos; } - return best; + return best - table; } /* Find lowest freq at or above target in a table in descending order */ @@ -788,29 +660,28 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; - struct cpufreq_frequency_table *pos; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_valid_entry(pos, table) { freq = pos->frequency; if (freq == target_freq) - return idx; + return pos - table; if (freq > target_freq) { - best = idx; + best = pos; continue; } /* No freq found above target_freq */ - if (best == -1) - return idx; + if (best == table - 1) + return pos - table; - return best; + return best - table; } - return best; + return best - table; } /* Works only on sorted freq-tables */ @@ -830,29 +701,28 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; - struct cpufreq_frequency_table *pos; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_valid_entry(pos, table) { freq = pos->frequency; if (freq == target_freq) - return idx; + return pos - table; if (freq < target_freq) { - best = idx; + best = pos; continue; } /* No freq found below target_freq */ - if (best == -1) - return idx; + if (best == table - 1) + return pos - table; - return best; + return best - table; } - return best; + return best - table; } /* Find highest freq at or below target in a table in descending order */ @@ -860,20 +730,19 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; - struct cpufreq_frequency_table *pos; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_valid_entry(pos, table) { freq = pos->frequency; if (freq <= target_freq) - return idx; + return pos - table; - best = idx; + best = pos; } - return best; + return best - table; } /* Works only on sorted freq-tables */ @@ -893,33 +762,32 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; - struct cpufreq_frequency_table *pos; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_valid_entry(pos, table) { freq = pos->frequency; if (freq == target_freq) - return idx; + return pos - table; if (freq < target_freq) { - best = idx; + best = pos; continue; } /* No freq found below target_freq */ - if (best == -1) - return idx; + if (best == table - 1) + return pos - table; /* Choose the closest freq */ - if (target_freq - table[best].frequency > freq - target_freq) - return idx; + if (target_freq - best->frequency > freq - target_freq) + return pos - table; - return best; + return best - table; } - return best; + return best - table; } /* Find closest freq to target in a table in descending order */ @@ -927,33 +795,32 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; - struct cpufreq_frequency_table *pos; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_valid_entry(pos, table) { freq = pos->frequency; if (freq == target_freq) - return idx; + return pos - table; if (freq > target_freq) { - best = idx; + best = pos; continue; } /* No freq found above target_freq */ - if (best == -1) - return idx; + if (best == table - 1) + return pos - table; /* Choose the closest freq */ - if (table[best].frequency - target_freq > target_freq - freq) - return idx; + if (best->frequency - target_freq > target_freq - freq) + return pos - table; - return best; + return best - table; } - return best; + return best - table; } /* Works only on sorted freq-tables */ @@ -984,73 +851,10 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, case CPUFREQ_RELATION_C: return cpufreq_table_find_index_c(policy, target_freq); default: - WARN_ON_ONCE(1); - return 0; + pr_err("%s: Invalid relation: %d\n", __func__, relation); + return -EINVAL; } } - -static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy) -{ - struct cpufreq_frequency_table *pos; - int count = 0; - - if (unlikely(!policy->freq_table)) - return 0; - - cpufreq_for_each_valid_entry(pos, policy->freq_table) - count++; - - return count; -} - -static inline int parse_perf_domain(int cpu, const char *list_name, - const char *cell_name) -{ - struct device_node *cpu_np; - struct of_phandle_args args; - int ret; - - cpu_np = of_cpu_device_node_get(cpu); - if (!cpu_np) - return -ENODEV; - - ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0, - &args); - if (ret < 0) - return ret; - - of_node_put(cpu_np); - - return args.args[0]; -} - -static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name, - const char *cell_name, struct cpumask *cpumask) -{ - int target_idx; - int cpu, ret; - - ret = parse_perf_domain(pcpu, list_name, cell_name); - if (ret < 0) - return ret; - - target_idx = ret; - cpumask_set_cpu(pcpu, cpumask); - - for_each_possible_cpu(cpu) { - if (cpu == pcpu) - continue; - - ret = parse_perf_domain(pcpu, list_name, cell_name); - if (ret < 0) - continue; - - if (target_idx == ret) - cpumask_set_cpu(cpu, cpumask); - } - - return target_idx; -} #else static inline int cpufreq_boost_trigger_state(int state) { @@ -1070,47 +874,17 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) { return false; } - -static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name, - const char *cell_name, struct cpumask *cpumask) -{ - return -EOPNOTSUPP; -} #endif -#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) -void sched_cpufreq_governor_change(struct cpufreq_policy *policy, - struct cpufreq_governor *old_gov); -#else -static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy, - struct cpufreq_governor *old_gov) { } -#endif - -extern void arch_freq_prepare_all(void); -extern unsigned int arch_freq_get_on_cpu(int cpu); - -#ifndef arch_set_freq_scale -static __always_inline -void arch_set_freq_scale(const struct cpumask *cpus, - unsigned long cur_freq, - unsigned long max_freq) -{ -} -#endif /* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; extern struct freq_attr *cpufreq_generic_attr[]; -int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); +int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); unsigned int cpufreq_generic_get(unsigned int cpu); -void cpufreq_generic_init(struct cpufreq_policy *policy, +int cpufreq_generic_init(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsigned int transition_latency); - -static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy) -{ - dev_pm_opp_of_register_em(get_cpu_device(policy->cpu), - policy->related_cpus); -} #endif /* _LINUX_CPUFREQ_H */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 9919110488..4217ad81bc 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -1,80 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CPUHOTPLUG_H #define __CPUHOTPLUG_H #include -/* - * CPU-up CPU-down - * - * BP AP BP AP - * - * OFFLINE OFFLINE - * | ^ - * v | - * BRINGUP_CPU->AP_OFFLINE BRINGUP_CPU <- AP_IDLE_DEAD (idle thread/play_dead) - * | AP_OFFLINE - * v (IRQ-off) ,---------------^ - * AP_ONLNE | (stop_machine) - * | TEARDOWN_CPU <- AP_ONLINE_IDLE - * | ^ - * v | - * AP_ACTIVE AP_ACTIVE - */ - -/* - * CPU hotplug states. The state machine invokes the installed state - * startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE - * during a CPU online operation. During a CPU offline operation the - * installed teardown callbacks are invoked in the reverse order from - * CPU_ONLINE - 1 down to CPUHP_OFFLINE. - * - * The state space has three sections: PREPARE, STARTING and ONLINE. - * - * PREPARE: The callbacks are invoked on a control CPU before the - * hotplugged CPU is started up or after the hotplugged CPU has died. - * - * STARTING: The callbacks are invoked on the hotplugged CPU from the low level - * hotplug startup/teardown code with interrupts disabled. - * - * ONLINE: The callbacks are invoked on the hotplugged CPU from the per CPU - * hotplug thread with interrupts and preemption enabled. - * - * Adding explicit states to this enum is only necessary when: - * - * 1) The state is within the STARTING section - * - * 2) The state has ordering constraints vs. other states in the - * same section. - * - * If neither #1 nor #2 apply, please use the dynamic state space when - * setting up a state by using CPUHP_PREPARE_DYN or CPUHP_PREPARE_ONLINE - * for the @state argument of the setup function. - * - * See Documentation/core-api/cpu_hotplug.rst for further information and - * examples. - */ enum cpuhp_state { - CPUHP_INVALID = -1, - - /* PREPARE section invoked on a control CPU */ - CPUHP_OFFLINE = 0, + CPUHP_OFFLINE, CPUHP_CREATE_THREADS, CPUHP_PERF_PREPARE, CPUHP_PERF_X86_PREPARE, + CPUHP_PERF_X86_UNCORE_PREP, CPUHP_PERF_X86_AMD_UNCORE_PREP, + CPUHP_PERF_X86_RAPL_PREP, + CPUHP_PERF_BFIN, CPUHP_PERF_POWER, CPUHP_PERF_SUPERH, CPUHP_X86_HPET_DEAD, CPUHP_X86_APB_DEAD, - CPUHP_X86_MCE_DEAD, CPUHP_VIRT_NET_DEAD, CPUHP_SLUB_DEAD, - CPUHP_DEBUG_OBJ_DEAD, CPUHP_MM_WRITEBACK_DEAD, - /* Must be after CPUHP_MM_VMSTAT_DEAD */ - CPUHP_MM_DEMOTION_DEAD, - CPUHP_MM_VMSTAT_DEAD, CPUHP_SOFTIRQ_DEAD, CPUHP_NET_MVNETA_DEAD, CPUHP_CPUIDLE_DEAD, @@ -82,23 +26,10 @@ enum cpuhp_state { CPUHP_ARM_OMAP_WAKE_DEAD, CPUHP_IRQ_POLL_DEAD, CPUHP_BLOCK_SOFTIRQ_DEAD, - CPUHP_BIO_DEAD, + CPUHP_VIRT_SCSI_DEAD, CPUHP_ACPI_CPUDRV_DEAD, CPUHP_S390_PFAULT_DEAD, CPUHP_BLK_MQ_DEAD, - CPUHP_FS_BUFF_DEAD, - CPUHP_PRINTK_DEAD, - CPUHP_MM_MEMCQ_DEAD, - CPUHP_XFS_DEAD, - CPUHP_PERCPU_CNT_DEAD, - CPUHP_RADIX_DEAD, - CPUHP_PAGE_ALLOC, - CPUHP_NET_DEV_DEAD, - CPUHP_PCI_XGENE_DEAD, - CPUHP_IOMMU_IOVA_DEAD, - CPUHP_LUSTRE_CFS_DEAD, - CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, - CPUHP_PADATA_DEAD, CPUHP_WORKQUEUE_PREP, CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, @@ -114,73 +45,50 @@ enum cpuhp_state { CPUHP_POWERPC_MMU_CTX_PREPARE, CPUHP_XEN_PREPARE, CPUHP_XEN_EVTCHN_PREPARE, + CPUHP_NOTIFY_PREPARE, CPUHP_ARM_SHMOBILE_SCU_PREPARE, CPUHP_SH_SH3X_PREPARE, - CPUHP_NET_FLOW_PREPARE, - CPUHP_TOPOLOGY_PREPARE, - CPUHP_NET_IUCV_PREPARE, - CPUHP_ARM_BL_PREPARE, - CPUHP_TRACE_RB_PREPARE, - CPUHP_MM_ZS_PREPARE, - CPUHP_MM_ZSWP_MEM_PREPARE, - CPUHP_MM_ZSWP_POOL_PREPARE, - CPUHP_KVM_PPC_BOOK3S_PREPARE, - CPUHP_ZCOMP_PREPARE, - CPUHP_TIMERS_PREPARE, + CPUHP_BLK_MQ_PREPARE, + CPUHP_TIMERS_DEAD, + CPUHP_NOTF_ERR_INJ_PREPARE, CPUHP_MIPS_SOC_PREPARE, - CPUHP_BP_PREPARE_DYN, - CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, CPUHP_BRINGUP_CPU, - - /* - * STARTING section invoked on the hotplugged CPU in low level - * bringup and teardown code. - */ CPUHP_AP_IDLE_DEAD, CPUHP_AP_OFFLINE, CPUHP_AP_SCHED_STARTING, CPUHP_AP_RCUTREE_DYING, - CPUHP_AP_CPU_PM_STARTING, CPUHP_AP_IRQ_GIC_STARTING, + CPUHP_AP_IRQ_GICV3_STARTING, CPUHP_AP_IRQ_HIP04_STARTING, - CPUHP_AP_IRQ_APPLE_AIC_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING, + CPUHP_AP_IRQ_ARMADA_CASC_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, - CPUHP_AP_IRQ_MIPS_GIC_STARTING, - CPUHP_AP_IRQ_RISCV_STARTING, - CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, - CPUHP_AP_MICROCODE_LOADER, + CPUHP_AP_PERF_X86_UNCORE_STARTING, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING, CPUHP_AP_PERF_X86_CQM_STARTING, CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_PERF_XTENSA_STARTING, + CPUHP_AP_PERF_METAG_STARTING, CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, - CPUHP_AP_ARM_SDEI_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, - CPUHP_AP_PERF_ARM_ACPI_STARTING, CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_L2X0_STARTING, - CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, CPUHP_AP_JCORE_TIMER_STARTING, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, + CPUHP_AP_METAG_TIMER_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, - CPUHP_AP_TEGRA_TIMER_STARTING, CPUHP_AP_ARMADA_TIMER_STARTING, CPUHP_AP_MARCO_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_ARC_TIMER_STARTING, - CPUHP_AP_RISCV_TIMER_STARTING, - CPUHP_AP_CLINT_TIMER_STARTING, - CPUHP_AP_CSKY_TIMER_STARTING, - CPUHP_AP_TI_GP_TIMER_STARTING, - CPUHP_AP_HYPERV_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, CPUHP_AP_KVM_ARM_VGIC_STARTING, @@ -189,23 +97,15 @@ enum cpuhp_state { CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_ARM_XEN_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING, - CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, + CPUHP_AP_ARM_CORESIGHT4_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_SMPCFD_DYING, CPUHP_AP_X86_TBOOT_DYING, - CPUHP_AP_ARM_CACHE_B15_RAC_DYING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, - - /* Online section invoked on the hotplugged CPU from the hotplug thread */ CPUHP_AP_ONLINE_IDLE, - CPUHP_AP_SCHED_WAIT_EMPTY, CPUHP_AP_SMPBOOT_THREADS, CPUHP_AP_X86_VDSO_VMA_ONLINE, - CPUHP_AP_IRQ_AFFINITY_ONLINE, - CPUHP_AP_BLK_MQ_ONLINE, - CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, - CPUHP_AP_X86_INTEL_EPB_ONLINE, CPUHP_AP_PERF_ONLINE, CPUHP_AP_PERF_X86_ONLINE, CPUHP_AP_PERF_X86_UNCORE_ONLINE, @@ -214,162 +114,107 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, - CPUHP_AP_PERF_X86_IDXD_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, - CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, - CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, - CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, - CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, - CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, CPUHP_AP_PERF_ARM_L2X0_ONLINE, - CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, - CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, - CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, - CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, - CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, - CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, - CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, - CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE, - CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, - CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE, - CPUHP_AP_PERF_CSKY_ONLINE, - CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, - CPUHP_AP_BASE_CACHEINFO_ONLINE, + CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, - /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */ - CPUHP_AP_MM_DEMOTION_ONLINE, CPUHP_AP_X86_HPET_ONLINE, CPUHP_AP_X86_KVM_CLK_ONLINE, - CPUHP_AP_DTPM_CPU_ONLINE, CPUHP_AP_ACTIVE, CPUHP_ONLINE, }; -int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, - int (*startup)(unsigned int cpu), - int (*teardown)(unsigned int cpu), bool multi_instance); +union cpuhp_step_startup { + int (*single)(unsigned int cpu); + int (*multi)(unsigned int cpu, struct hlist_node *node); +} __no_const; + +union cpuhp_step_teardown { + int (*single)(unsigned int cpu); + int (*multi)(unsigned int cpu, struct hlist_node *node); +} __no_const; + +int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, + union cpuhp_step_startup startup, + union cpuhp_step_teardown teardown, bool multi_instance); -int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, - bool invoke, - int (*startup)(unsigned int cpu), - int (*teardown)(unsigned int cpu), - bool multi_instance); /** - * cpuhp_setup_state - Setup hotplug state callbacks with calling the @startup - * callback + * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks * @state: The state for which the calls are installed * @name: Name of the callback (will be used in debug output) - * @startup: startup callback function or NULL if not required - * @teardown: teardown callback function or NULL if not required + * @startup: startup callback function + * @teardown: teardown callback function * - * Installs the callback functions and invokes the @startup callback on - * the online cpus which have already reached the @state. + * Installs the callback functions and invokes the startup callback on + * the present cpus which have already reached the @state. */ static inline int cpuhp_setup_state(enum cpuhp_state state, const char *name, - int (*startup)(unsigned int cpu), - int (*teardown)(unsigned int cpu)) + int (*_startup)(unsigned int cpu), + int (*_teardown)(unsigned int cpu)) { + union cpuhp_step_startup startup = { .single = _startup }; + union cpuhp_step_teardown teardown = { .single = _teardown }; + return __cpuhp_setup_state(state, name, true, startup, teardown, false); } -/** - * cpuhp_setup_state_cpuslocked - Setup hotplug state callbacks with calling - * @startup callback from a cpus_read_lock() - * held region - * @state: The state for which the calls are installed - * @name: Name of the callback (will be used in debug output) - * @startup: startup callback function or NULL if not required - * @teardown: teardown callback function or NULL if not required - * - * Same as cpuhp_setup_state() except that it must be invoked from within a - * cpus_read_lock() held region. - */ -static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state, - const char *name, - int (*startup)(unsigned int cpu), - int (*teardown)(unsigned int cpu)) -{ - return __cpuhp_setup_state_cpuslocked(state, name, true, startup, - teardown, false); -} - /** * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the - * @startup callback - * @state: The state for which the calls are installed - * @name: Name of the callback. - * @startup: startup callback function or NULL if not required - * @teardown: teardown callback function or NULL if not required - * - * Same as cpuhp_setup_state() except that the @startup callback is not - * invoked during installation. NOP if SMP=n or HOTPLUG_CPU=n. - */ -static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state, - const char *name, - int (*startup)(unsigned int cpu), - int (*teardown)(unsigned int cpu)) -{ - return __cpuhp_setup_state(state, name, false, startup, teardown, - false); -} - -/** - * cpuhp_setup_state_nocalls_cpuslocked - Setup hotplug state callbacks without - * invoking the @startup callback from - * a cpus_read_lock() held region * callbacks * @state: The state for which the calls are installed * @name: Name of the callback. - * @startup: startup callback function or NULL if not required - * @teardown: teardown callback function or NULL if not required + * @startup: startup callback function + * @teardown: teardown callback function * - * Same as cpuhp_setup_state_nocalls() except that it must be invoked from - * within a cpus_read_lock() held region. + * Same as @cpuhp_setup_state except that no calls are executed are invoked + * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n. */ -static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state, - const char *name, - int (*startup)(unsigned int cpu), - int (*teardown)(unsigned int cpu)) +static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state, + const char *name, + int (*_startup)(unsigned int cpu), + int (*_teardown)(unsigned int cpu)) { - return __cpuhp_setup_state_cpuslocked(state, name, false, startup, - teardown, false); + union cpuhp_step_startup startup = { .single = _startup }; + union cpuhp_step_teardown teardown = { .single = _teardown }; + + return __cpuhp_setup_state(state, name, false, startup, teardown, + false); } /** * cpuhp_setup_state_multi - Add callbacks for multi state * @state: The state for which the calls are installed * @name: Name of the callback. - * @startup: startup callback function or NULL if not required - * @teardown: teardown callback function or NULL if not required + * @startup: startup callback function + * @teardown: teardown callback function * * Sets the internal multi_instance flag and prepares a state to work as a multi * instance callback. No callbacks are invoked at this point. The callbacks are * invoked once an instance for this state are registered via - * cpuhp_state_add_instance() or cpuhp_state_add_instance_nocalls() + * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls. */ static inline int cpuhp_setup_state_multi(enum cpuhp_state state, const char *name, - int (*startup)(unsigned int cpu, + int (*_startup)(unsigned int cpu, struct hlist_node *node), - int (*teardown)(unsigned int cpu, + int (*_teardown)(unsigned int cpu, struct hlist_node *node)) { - return __cpuhp_setup_state(state, name, false, - (void *) startup, - (void *) teardown, true); + union cpuhp_step_startup startup = { .multi = _startup }; + union cpuhp_step_teardown teardown = { .multi = _teardown }; + + return __cpuhp_setup_state(state, name, false, startup, teardown, true); } int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke); -int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, - struct hlist_node *node, bool invoke); /** * cpuhp_state_add_instance - Add an instance for a state and invoke startup @@ -377,10 +222,9 @@ int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, * @state: The state for which the instance is installed * @node: The node for this individual state. * - * Installs the instance for the @state and invokes the registered startup - * callback on the online cpus which have already reached the @state. The - * @state must have been earlier marked as multi-instance by - * cpuhp_setup_state_multi(). + * Installs the instance for the @state and invokes the startup callback on + * the present cpus which have already reached the @state. The @state must have + * been earlier marked as multi-instance by @cpuhp_setup_state_multi. */ static inline int cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node) @@ -394,9 +238,8 @@ static inline int cpuhp_state_add_instance(enum cpuhp_state state, * @state: The state for which the instance is installed * @node: The node for this individual state. * - * Installs the instance for the @state. The @state must have been earlier - * marked as multi-instance by cpuhp_setup_state_multi. NOP if SMP=n or - * HOTPLUG_CPU=n. + * Installs the instance for the @state The @state must have been earlier + * marked as multi-instance by @cpuhp_setup_state_multi. */ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state, struct hlist_node *node) @@ -404,33 +247,14 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state, return __cpuhp_state_add_instance(state, node, false); } -/** - * cpuhp_state_add_instance_nocalls_cpuslocked - Add an instance for a state - * without invoking the startup - * callback from a cpus_read_lock() - * held region. - * @state: The state for which the instance is installed - * @node: The node for this individual state. - * - * Same as cpuhp_state_add_instance_nocalls() except that it must be - * invoked from within a cpus_read_lock() held region. - */ -static inline int -cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state, - struct hlist_node *node) -{ - return __cpuhp_state_add_instance_cpuslocked(state, node, false); -} - void __cpuhp_remove_state(enum cpuhp_state state, bool invoke); -void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke); /** * cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown * @state: The state for which the calls are removed * * Removes the callback functions and invokes the teardown callback on - * the online cpus which have already reached the @state. + * the present cpus which have already reached the @state. */ static inline void cpuhp_remove_state(enum cpuhp_state state) { @@ -439,7 +263,7 @@ static inline void cpuhp_remove_state(enum cpuhp_state state) /** * cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking - * the teardown callback + * teardown * @state: The state for which the calls are removed */ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state) @@ -447,19 +271,6 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state) __cpuhp_remove_state(state, false); } -/** - * cpuhp_remove_state_nocalls_cpuslocked - Remove hotplug state callbacks without invoking - * teardown from a cpus_read_lock() held region. - * @state: The state for which the calls are removed - * - * Same as cpuhp_remove_state nocalls() except that it must be invoked - * from within a cpus_read_lock() held region. - */ -static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state) -{ - __cpuhp_remove_state_cpuslocked(state, false); -} - /** * cpuhp_remove_multi_state - Remove hotplug multi state callback * @state: The state for which the calls are removed @@ -482,8 +293,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state, * @state: The state from which the instance is removed * @node: The node for this individual state. * - * Removes the instance and invokes the teardown callback on the online cpus - * which have already reached @state. + * Removes the instance and invokes the teardown callback on the present cpus + * which have already reached the @state. */ static inline int cpuhp_state_remove_instance(enum cpuhp_state state, struct hlist_node *node) @@ -493,7 +304,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state, /** * cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state - * without invoking the teardown callback + * without invoking the reatdown callback * @state: The state from which the instance is removed * @node: The node for this individual state. * diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index bd605b5585..e85eb5fb01 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -29,32 +29,21 @@ struct cpuidle_driver; * CPUIDLE DEVICE INTERFACE * ****************************/ -#define CPUIDLE_STATE_DISABLED_BY_USER BIT(0) -#define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1) - struct cpuidle_state_usage { unsigned long long disable; unsigned long long usage; - u64 time_ns; - unsigned long long above; /* Number of times it's been too deep */ - unsigned long long below; /* Number of times it's been too shallow */ - unsigned long long rejected; /* Number of times idle entry was rejected */ -#ifdef CONFIG_SUSPEND - unsigned long long s2idle_usage; - unsigned long long s2idle_time; /* in US */ -#endif + unsigned long long time; /* in US */ }; struct cpuidle_state { char name[CPUIDLE_NAME_LEN]; char desc[CPUIDLE_DESC_LEN]; - u64 exit_latency_ns; - u64 target_residency_ns; unsigned int flags; unsigned int exit_latency; /* in US */ int power_usage; /* in mW */ unsigned int target_residency; /* in US */ + bool disabled; /* disabled on all CPUs */ int (*enter) (struct cpuidle_device *dev, struct cpuidle_driver *drv, @@ -63,27 +52,21 @@ struct cpuidle_state { int (*enter_dead) (struct cpuidle_device *dev, int index); /* - * CPUs execute ->enter_s2idle with the local tick or entire timekeeping + * CPUs execute ->enter_freeze with the local tick or entire timekeeping * suspended, so it must not re-enable interrupts at any point (even * temporarily) or attempt to change states of clock event devices. - * - * This callback may point to the same function as ->enter if all of - * the above requirements are met by it. */ - int (*enter_s2idle)(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index); -}; + void (*enter_freeze) (struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index); +} __do_const; +typedef struct cpuidle_state __no_const cpuidle_state_no_const; /* Idle State Flags */ -#define CPUIDLE_FLAG_NONE (0x00) -#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ -#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ -#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ -#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ -#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ -#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */ -#define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */ +#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ +#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ + +#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) struct cpuidle_device_kobj; struct cpuidle_state_kobj; @@ -92,14 +75,9 @@ struct cpuidle_driver_kobj; struct cpuidle_device { unsigned int registered:1; unsigned int enabled:1; - unsigned int poll_time_limit:1; unsigned int cpu; - ktime_t next_hrtimer; - int last_state_idx; - u64 last_residency_ns; - u64 poll_limit_ns; - u64 forced_idle_latency_limit_ns; + int last_residency; struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; struct cpuidle_driver_kobj *kobj_driver; @@ -115,6 +93,16 @@ struct cpuidle_device { DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); +/** + * cpuidle_get_last_residency - retrieves the last state's residency time + * @dev: the target CPU + */ +static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) +{ + return dev->last_residency; +} + + /**************************** * CPUIDLE DRIVER INTERFACE * ****************************/ @@ -122,6 +110,7 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); struct cpuidle_driver { const char *name; struct module *owner; + int refcnt; /* used by the cpuidle framework to setup the broadcast timer */ unsigned int bctimer:1; @@ -132,9 +121,6 @@ struct cpuidle_driver { /* the driver handles the cpus in cpumask */ struct cpumask *cpumask; - - /* preferred governor to switch at register time */ - const char *governor; }; #ifdef CONFIG_CPU_IDLE @@ -143,18 +129,15 @@ extern bool cpuidle_not_available(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern int cpuidle_select(struct cpuidle_driver *drv, - struct cpuidle_device *dev, - bool *stop_tick); + struct cpuidle_device *dev); extern int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index); extern void cpuidle_reflect(struct cpuidle_device *dev, int index); -extern u64 cpuidle_poll_time(struct cpuidle_driver *drv, - struct cpuidle_device *dev); extern int cpuidle_register_driver(struct cpuidle_driver *drv); extern struct cpuidle_driver *cpuidle_get_driver(void); -extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx, - bool disable); +extern struct cpuidle_driver *cpuidle_driver_ref(void); +extern void cpuidle_driver_unref(void); extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); extern int cpuidle_register_device(struct cpuidle_device *dev); extern void cpuidle_unregister_device(struct cpuidle_device *dev); @@ -178,20 +161,17 @@ static inline bool cpuidle_not_available(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return true; } static inline int cpuidle_select(struct cpuidle_driver *drv, - struct cpuidle_device *dev, bool *stop_tick) + struct cpuidle_device *dev) {return -ENODEV; } static inline int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) {return -ENODEV; } static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { } -static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv, - struct cpuidle_device *dev) -{return 0; } static inline int cpuidle_register_driver(struct cpuidle_driver *drv) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } -static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, - int idx, bool disable) { } +static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; } +static inline void cpuidle_driver_unref(void) {} static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } static inline int cpuidle_register_device(struct cpuidle_device *dev) {return -ENODEV; } @@ -213,24 +193,18 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver( static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } #endif -#ifdef CONFIG_CPU_IDLE +#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, - struct cpuidle_device *dev, - u64 latency_limit_ns); -extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, + struct cpuidle_device *dev); +extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev); -extern void cpuidle_use_deepest_state(u64 latency_limit_ns); #else static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, - struct cpuidle_device *dev, - u64 latency_limit_ns) + struct cpuidle_device *dev) {return -ENODEV; } -static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, +static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return -ENODEV; } -static inline void cpuidle_use_deepest_state(u64 latency_limit_ns) -{ -} #endif /* kernel/sched/idle.c */ @@ -245,12 +219,6 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, } #endif -#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX) -void cpuidle_poll_state_init(struct cpuidle_driver *drv); -#else -static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {} -#endif - /****************************** * CPUIDLE GOVERNOR INTERFACE * ******************************/ @@ -266,47 +234,41 @@ struct cpuidle_governor { struct cpuidle_device *dev); int (*select) (struct cpuidle_driver *drv, - struct cpuidle_device *dev, - bool *stop_tick); + struct cpuidle_device *dev); void (*reflect) (struct cpuidle_device *dev, int index); -}; + struct module *owner; +} __do_const; + +#ifdef CONFIG_CPU_IDLE extern int cpuidle_register_governor(struct cpuidle_governor *gov); -extern s64 cpuidle_governor_latency_req(unsigned int cpu); +#else +static inline int cpuidle_register_governor(struct cpuidle_governor *gov) +{return 0;} +#endif -#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \ - idx, \ - state, \ - is_retention) \ -({ \ - int __ret = 0; \ - \ - if (!idx) { \ - cpu_do_idle(); \ - return idx; \ - } \ - \ - if (!is_retention) \ - __ret = cpu_pm_enter(); \ - if (!__ret) { \ - __ret = low_level_idle_enter(state); \ - if (!is_retention) \ - cpu_pm_exit(); \ - } \ - \ - __ret ? -1 : idx; \ -}) +#ifdef CONFIG_ARCH_HAS_CPU_RELAX +#define CPUIDLE_DRIVER_STATE_START 1 +#else +#define CPUIDLE_DRIVER_STATE_START 0 +#endif #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0) - -#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1) - -#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \ - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0) - -#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \ - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1) +({ \ + int __ret; \ + \ + if (!idx) { \ + cpu_do_idle(); \ + return idx; \ + } \ + \ + __ret = cpu_pm_enter(); \ + if (!__ret) { \ + __ret = low_level_idle_enter(idx); \ + cpu_pm_exit(); \ + } \ + \ + __ret ? -1 : idx; \ +}) #endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 1e7399fc69..258dde58d9 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_CPUMASK_H #define __LINUX_CPUMASK_H @@ -10,7 +9,6 @@ #include #include #include -#include #include /* Don't assign or return these: may not be this big! */ @@ -34,9 +32,9 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) #if NR_CPUS == 1 -#define nr_cpu_ids 1U +#define nr_cpu_ids 1 #else -extern unsigned int nr_cpu_ids; +extern int nr_cpu_ids; #endif #ifdef CONFIG_CPUMASK_OFFSTACK @@ -44,7 +42,7 @@ extern unsigned int nr_cpu_ids; * not all bits may be allocated. */ #define nr_cpumask_bits nr_cpu_ids #else -#define nr_cpumask_bits ((unsigned int)NR_CPUS) +#define nr_cpumask_bits NR_CPUS #endif /* @@ -91,28 +89,37 @@ extern struct cpumask __cpu_possible_mask; extern struct cpumask __cpu_online_mask; extern struct cpumask __cpu_present_mask; extern struct cpumask __cpu_active_mask; -extern struct cpumask __cpu_dying_mask; #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) -#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask) -extern atomic_t __num_online_cpus; - -extern cpumask_t cpus_booted_once_mask; - -static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) -{ -#ifdef CONFIG_DEBUG_PER_CPU_MAPS - WARN_ON_ONCE(cpu >= bits); -#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ -} +#if NR_CPUS > 1 +#define num_online_cpus() cpumask_weight(cpu_online_mask) +#define num_possible_cpus() cpumask_weight(cpu_possible_mask) +#define num_present_cpus() cpumask_weight(cpu_present_mask) +#define num_active_cpus() cpumask_weight(cpu_active_mask) +#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) +#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) +#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) +#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) +#else +#define num_online_cpus() 1U +#define num_possible_cpus() 1U +#define num_present_cpus() 1U +#define num_active_cpus() 1U +#define cpu_online(cpu) ((cpu) == 0) +#define cpu_possible(cpu) ((cpu) == 0) +#define cpu_present(cpu) ((cpu) == 0) +#define cpu_active(cpu) ((cpu) == 0) +#endif /* verify cpu argument to cpumask_* operators */ static inline unsigned int cpumask_check(unsigned int cpu) { - cpu_max_bits_warn(cpu, nr_cpumask_bits); +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + WARN_ON_ONCE(cpu >= nr_cpumask_bits); +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ return cpu; } @@ -123,36 +130,24 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp) return 0; } -static inline unsigned int cpumask_last(const struct cpumask *srcp) -{ - return 0; -} - /* Valid inputs for n are -1 and 0. */ -static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) +static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp) { return n+1; } -static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) +static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp) { return n+1; } -static inline unsigned int cpumask_next_and(int n, +static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n, const struct cpumask *srcp, const struct cpumask *andp) { return n+1; } -static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, - int start, bool wrap) -{ - /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */ - return (wrap && n == 0); -} - /* cpu must be a valid cpu, ie 0, so there's no other choice. */ static inline unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) @@ -165,24 +160,12 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node) return 0; } -static inline int cpumask_any_and_distribute(const struct cpumask *src1p, - const struct cpumask *src2p) { - return cpumask_next_and(-1, src1p, src2p); -} - -static inline int cpumask_any_distribute(const struct cpumask *srcp) -{ - return cpumask_first(srcp); -} - #define for_each_cpu(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) #define for_each_cpu_not(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#define for_each_cpu_wrap(cpu, mask, start) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) -#define for_each_cpu_and(cpu, mask1, mask2) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2) +#define for_each_cpu_and(cpu, mask, and) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) #else /** * cpumask_first - get the first cpu in a cpumask @@ -196,18 +179,20 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp) } /** - * cpumask_last - get the last CPU in a cpumask - * @srcp: - the cpumask pointer + * cpumask_next - get the next cpu in a cpumask + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @srcp: the cpumask pointer * - * Returns >= nr_cpumask_bits if no CPUs set. + * Returns >= nr_cpu_ids if no further cpus set. */ -static inline unsigned int cpumask_last(const struct cpumask *srcp) +static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp) { - return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits); + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } -unsigned int __pure cpumask_next(int n, const struct cpumask *srcp); - /** * cpumask_next_zero - get the next unset cpu in a cpumask * @n: the cpu prior to the place to search (ie. return will be > @n) @@ -215,7 +200,7 @@ unsigned int __pure cpumask_next(int n, const struct cpumask *srcp); * * Returns >= nr_cpu_ids if no further cpus unset. */ -static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) +static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp) { /* -1 is a legal arg here. */ if (n != -1) @@ -223,12 +208,9 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } -int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); -int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu); +int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1); +int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); unsigned int cpumask_local_spread(unsigned int i, int node); -int cpumask_any_and_distribute(const struct cpumask *src1p, - const struct cpumask *src2p); -int cpumask_any_distribute(const struct cpumask *srcp); /** * for_each_cpu - iterate over every cpu in a mask @@ -254,40 +236,23 @@ int cpumask_any_distribute(const struct cpumask *srcp); (cpu) = cpumask_next_zero((cpu), (mask)), \ (cpu) < nr_cpu_ids;) -extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); - -/** - * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location - * @cpu: the (optionally unsigned) integer iterator - * @mask: the cpumask pointer - * @start: the start location - * - * The implementation does not assume any bit in @mask is set (including @start). - * - * After the loop, cpu is >= nr_cpu_ids. - */ -#define for_each_cpu_wrap(cpu, mask, start) \ - for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \ - (cpu) < nr_cpumask_bits; \ - (cpu) = cpumask_next_wrap((cpu), (mask), (start), true)) - /** * for_each_cpu_and - iterate over every cpu in both masks * @cpu: the (optionally unsigned) integer iterator - * @mask1: the first cpumask pointer - * @mask2: the second cpumask pointer + * @mask: the first cpumask pointer + * @and: the second cpumask pointer * * This saves a temporary CPU mask in many places. It is equivalent to: * struct cpumask tmp; - * cpumask_and(&tmp, &mask1, &mask2); + * cpumask_and(&tmp, &mask, &and); * for_each_cpu(cpu, &tmp) * ... * * After the loop, cpu is >= nr_cpu_ids. */ -#define for_each_cpu_and(cpu, mask1, mask2) \ +#define for_each_cpu_and(cpu, mask, and) \ for ((cpu) = -1; \ - (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \ + (cpu) = cpumask_next_and((cpu), (mask), (and)), \ (cpu) < nr_cpu_ids;) #endif /* SMP */ @@ -311,12 +276,6 @@ static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) set_bit(cpumask_check(cpu), cpumask_bits(dstp)); } -static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) -{ - __set_bit(cpumask_check(cpu), cpumask_bits(dstp)); -} - - /** * cpumask_clear_cpu - clear a cpu in a cpumask * @cpu: cpu number (< nr_cpu_ids) @@ -327,11 +286,6 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); } -static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) -{ - __clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); -} - /** * cpumask_test_cpu - test for a cpu in a cpumask * @cpu: cpu number (< nr_cpu_ids) @@ -473,20 +427,6 @@ static inline bool cpumask_equal(const struct cpumask *src1p, nr_cpumask_bits); } -/** - * cpumask_or_equal - *src1p | *src2p == *src3p - * @src1p: the first input - * @src2p: the second input - * @src3p: the third input - */ -static inline bool cpumask_or_equal(const struct cpumask *src1p, - const struct cpumask *src2p, - const struct cpumask *src3p) -{ - return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p), - cpumask_bits(src3p), nr_cpumask_bits); -} - /** * cpumask_intersects - (*src1p & *src2p) != 0 * @src1p: the first input @@ -535,7 +475,7 @@ static inline bool cpumask_full(const struct cpumask *srcp) * cpumask_weight - Count of bits in *srcp * @srcp: the cpumask to count bits (< nr_cpu_ids) in. */ -static inline unsigned int cpumask_weight(const struct cpumask *srcp) +static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp) { return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); } @@ -647,7 +587,10 @@ static inline int cpumask_parselist_user(const char __user *buf, int len, */ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) { - return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits); + char *nl = strchr(buf, '\n'); + unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); + + return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); } /** @@ -665,7 +608,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) /** * cpumask_size - size to allocate for a 'struct cpumask' in bytes */ -static inline unsigned int cpumask_size(void) +static inline size_t cpumask_size(void) { return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); } @@ -706,15 +649,11 @@ static inline unsigned int cpumask_size(void) * used. Please use this_cpu_cpumask_var_t in those cases. The direct use * of this_cpu_ptr() or this_cpu_read() will lead to failures when the * other type of cpumask_var_t implementation is configured. - * - * Please also note that __cpumask_var_read_mostly can be used to declare - * a cpumask_var_t variable itself (not its content) as read mostly. */ #ifdef CONFIG_CPUMASK_OFFSTACK typedef struct cpumask *cpumask_var_t; -#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) -#define __cpumask_var_read_mostly __read_mostly +#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); @@ -724,16 +663,10 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void free_cpumask_var(cpumask_var_t mask); void free_bootmem_cpumask_var(cpumask_var_t mask); -static inline bool cpumask_available(cpumask_var_t mask) -{ - return mask != NULL; -} - #else typedef struct cpumask cpumask_var_t[1]; #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) -#define __cpumask_var_read_mostly static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) { @@ -770,11 +703,6 @@ static inline void free_cpumask_var(cpumask_var_t mask) static inline void free_bootmem_cpumask_var(cpumask_var_t mask) { } - -static inline bool cpumask_available(cpumask_var_t mask) -{ - return true; -} #endif /* CONFIG_CPUMASK_OFFSTACK */ /* It's common to want to use cpu_all_mask in struct member initializers, @@ -794,11 +722,6 @@ void init_cpu_present(const struct cpumask *src); void init_cpu_possible(const struct cpumask *src); void init_cpu_online(const struct cpumask *src); -static inline void reset_cpu_possible_mask(void) -{ - bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS); -} - static inline void set_cpu_possible(unsigned int cpu, bool possible) { @@ -817,7 +740,14 @@ set_cpu_present(unsigned int cpu, bool present) cpumask_clear_cpu(cpu, &__cpu_present_mask); } -void set_cpu_online(unsigned int cpu, bool online); +static inline void +set_cpu_online(unsigned int cpu, bool online) +{ + if (online) + cpumask_set_cpu(cpu, &__cpu_online_mask); + else + cpumask_clear_cpu(cpu, &__cpu_online_mask); +} static inline void set_cpu_active(unsigned int cpu, bool active) @@ -828,14 +758,6 @@ set_cpu_active(unsigned int cpu, bool active) cpumask_clear_cpu(cpu, &__cpu_active_mask); } -static inline void -set_cpu_dying(unsigned int cpu, bool dying) -{ - if (dying) - cpumask_set_cpu(cpu, &__cpu_dying_mask); - else - cpumask_clear_cpu(cpu, &__cpu_dying_mask); -} /** * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * @@ -873,82 +795,6 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) return to_cpumask(p); } -#if NR_CPUS > 1 -/** - * num_online_cpus() - Read the number of online CPUs - * - * Despite the fact that __num_online_cpus is of type atomic_t, this - * interface gives only a momentary snapshot and is not protected against - * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held - * region. - */ -static inline unsigned int num_online_cpus(void) -{ - return atomic_read(&__num_online_cpus); -} -#define num_possible_cpus() cpumask_weight(cpu_possible_mask) -#define num_present_cpus() cpumask_weight(cpu_present_mask) -#define num_active_cpus() cpumask_weight(cpu_active_mask) - -static inline bool cpu_online(unsigned int cpu) -{ - return cpumask_test_cpu(cpu, cpu_online_mask); -} - -static inline bool cpu_possible(unsigned int cpu) -{ - return cpumask_test_cpu(cpu, cpu_possible_mask); -} - -static inline bool cpu_present(unsigned int cpu) -{ - return cpumask_test_cpu(cpu, cpu_present_mask); -} - -static inline bool cpu_active(unsigned int cpu) -{ - return cpumask_test_cpu(cpu, cpu_active_mask); -} - -static inline bool cpu_dying(unsigned int cpu) -{ - return cpumask_test_cpu(cpu, cpu_dying_mask); -} - -#else - -#define num_online_cpus() 1U -#define num_possible_cpus() 1U -#define num_present_cpus() 1U -#define num_active_cpus() 1U - -static inline bool cpu_online(unsigned int cpu) -{ - return cpu == 0; -} - -static inline bool cpu_possible(unsigned int cpu) -{ - return cpu == 0; -} - -static inline bool cpu_present(unsigned int cpu) -{ - return cpu == 0; -} - -static inline bool cpu_active(unsigned int cpu) -{ - return cpu == 0; -} - -static inline bool cpu_dying(unsigned int cpu) -{ - return false; -} - -#endif /* NR_CPUS > 1 */ - #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) #if NR_CPUS <= BITS_PER_LONG @@ -983,45 +829,6 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) nr_cpu_ids); } -/** - * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as - * hex values of cpumask - * - * @buf: the buffer to copy into - * @mask: the cpumask to copy - * @off: in the string from which we are copying, we copy to @buf - * @count: the maximum number of bytes to print - * - * The function prints the cpumask into the buffer as hex values of - * cpumask; Typically used by bin_attribute to export cpumask bitmask - * ABI. - * - * Returns the length of how many bytes have been copied, excluding - * terminating '\0'. - */ -static inline ssize_t -cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, - loff_t off, size_t count) -{ - return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask), - nr_cpu_ids, off, count) - 1; -} - -/** - * cpumap_print_list_to_buf - copies the cpumask into the buffer as - * comma-separated list of cpus - * - * Everything is same with the above cpumap_print_bitmask_to_buf() - * except the print format. - */ -static inline ssize_t -cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, - loff_t off, size_t count) -{ - return bitmap_print_list_to_buf(buf, cpumask_bits(mask), - nr_cpu_ids, off, count) - 1; -} - #if NR_CPUS <= BITS_PER_LONG #define CPU_MASK_ALL \ (cpumask_t) { { \ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index d2b9c41c8e..bfc204e703 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CPUSET_H #define _LINUX_CPUSET_H /* @@ -10,56 +9,40 @@ */ #include -#include -#include #include #include #include -#include #include #ifdef CONFIG_CPUSETS -/* - * Static branch rewrites can happen in an arbitrary order for a given - * key. In code paths where we need to loop with read_mems_allowed_begin() and - * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need - * to ensure that begin() always gets rewritten before retry() in the - * disabled -> enabled transition. If not, then if local irqs are disabled - * around the loop, we can deadlock since retry() would always be - * comparing the latest value of the mems_allowed seqcount against 0 as - * begin() still would see cpusets_enabled() as false. The enabled -> disabled - * transition should happen in reverse order for the same reasons (want to stop - * looking at real value of mems_allowed.sequence in retry() first). - */ -extern struct static_key_false cpusets_pre_enable_key; extern struct static_key_false cpusets_enabled_key; static inline bool cpusets_enabled(void) { return static_branch_unlikely(&cpusets_enabled_key); } +static inline int nr_cpusets(void) +{ + /* jump label reference count + the top-level cpuset */ + return static_key_count(&cpusets_enabled_key.key) + 1; +} + static inline void cpuset_inc(void) { - static_branch_inc_cpuslocked(&cpusets_pre_enable_key); - static_branch_inc_cpuslocked(&cpusets_enabled_key); + static_branch_inc(&cpusets_enabled_key); } static inline void cpuset_dec(void) { - static_branch_dec_cpuslocked(&cpusets_enabled_key); - static_branch_dec_cpuslocked(&cpusets_pre_enable_key); + static_branch_dec(&cpusets_enabled_key); } extern int cpuset_init(void); extern void cpuset_init_smp(void); -extern void cpuset_force_rebuild(void); -extern void cpuset_update_active_cpus(void); -extern void cpuset_wait_for_hotplug(void); -extern void cpuset_read_lock(void); -extern void cpuset_read_unlock(void); +extern void cpuset_update_active_cpus(bool cpu_online); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); -extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); +extern void cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); @@ -115,7 +98,7 @@ static inline int cpuset_do_slab_mem_spread(void) return task_spread_slab(current); } -extern bool current_cpuset_is_being_rebound(void); +extern int current_cpuset_is_being_rebound(void); extern void rebuild_sched_domains(void); @@ -130,7 +113,7 @@ extern void cpuset_print_current_mems_allowed(void); */ static inline unsigned int read_mems_allowed_begin(void) { - if (!static_branch_unlikely(&cpusets_pre_enable_key)) + if (!cpusets_enabled()) return 0; return read_seqcount_begin(¤t->mems_allowed_seq); @@ -144,7 +127,7 @@ static inline unsigned int read_mems_allowed_begin(void) */ static inline bool read_mems_allowed_retry(unsigned int seq) { - if (!static_branch_unlikely(&cpusets_enabled_key)) + if (!cpusets_enabled()) return false; return read_seqcount_retry(¤t->mems_allowed_seq, seq); @@ -170,27 +153,19 @@ static inline bool cpusets_enabled(void) { return false; } static inline int cpuset_init(void) { return 0; } static inline void cpuset_init_smp(void) {} -static inline void cpuset_force_rebuild(void) { } - -static inline void cpuset_update_active_cpus(void) +static inline void cpuset_update_active_cpus(bool cpu_online) { partition_sched_domains(1, NULL, NULL); } -static inline void cpuset_wait_for_hotplug(void) { } - -static inline void cpuset_read_lock(void) { } -static inline void cpuset_read_unlock(void) { } - static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { - cpumask_copy(mask, task_cpu_possible_mask(p)); + cpumask_copy(mask, cpu_possible_mask); } -static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p) +static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) { - return false; } static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) @@ -254,9 +229,9 @@ static inline int cpuset_do_slab_mem_spread(void) return 0; } -static inline bool current_cpuset_is_being_rebound(void) +static inline int current_cpuset_is_being_rebound(void) { - return false; + return 0; } static inline void rebuild_sched_domains(void) diff --git a/include/linux/cputime.h b/include/linux/cputime.h new file mode 100644 index 0000000000..f2eb2ee535 --- /dev/null +++ b/include/linux/cputime.h @@ -0,0 +1,16 @@ +#ifndef __LINUX_CPUTIME_H +#define __LINUX_CPUTIME_H + +#include + +#ifndef cputime_to_nsecs +# define cputime_to_nsecs(__ct) \ + (cputime_to_usecs(__ct) * NSEC_PER_USEC) +#endif + +#ifndef nsecs_to_cputime +# define nsecs_to_cputime(__nsecs) \ + usecs_to_cputime((__nsecs) / NSEC_PER_USEC) +#endif + +#endif /* __LINUX_CPUTIME_H */ diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 2618577a4d..3873697ba2 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -1,23 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_CRASH_DUMP_H #define LINUX_CRASH_DUMP_H +#ifdef CONFIG_CRASH_DUMP #include #include #include -#include -#include -#include /* for pgprot_t */ +#include /* for pgprot_t */ -/* For IS_ENABLED(CONFIG_CRASH_DUMP) */ #define ELFCORE_ADDR_MAX (-1ULL) #define ELFCORE_ADDR_ERR (-2ULL) extern unsigned long long elfcorehdr_addr; extern unsigned long long elfcorehdr_size; -#ifdef CONFIG_CRASH_DUMP extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size); extern void elfcorehdr_free(unsigned long long addr); extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos); @@ -28,10 +24,6 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma, extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, unsigned long, int); -extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, - size_t csize, unsigned long offset, - int userbuf); - void vmcore_cleanup(void); /* Architecture code defines this if there are other possible ELF @@ -59,13 +51,13 @@ void vmcore_cleanup(void); * has passed the elf core header address on command line. * * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will - * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic - * of previous kernel. + * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of + * previous kernel. */ -static inline bool is_kdump_kernel(void) +static inline int is_kdump_kernel(void) { - return elfcorehdr_addr != ELFCORE_ADDR_MAX; + return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0; } /* is_vmcore_usable() checks if the kernel is booting after a panic and @@ -96,37 +88,8 @@ extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)); extern void unregister_oldmem_pfn_is_ram(void); #else /* !CONFIG_CRASH_DUMP */ -static inline bool is_kdump_kernel(void) { return 0; } +static inline int is_kdump_kernel(void) { return 0; } #endif /* CONFIG_CRASH_DUMP */ -/* Device Dump information to be filled by drivers */ -struct vmcoredd_data { - char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */ - unsigned int size; /* Size of the dump */ - /* Driver's registered callback to be invoked to collect dump */ - int (*vmcoredd_callback)(struct vmcoredd_data *data, void *buf); -}; - -#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP -int vmcore_add_device_dump(struct vmcoredd_data *data); -#else -static inline int vmcore_add_device_dump(struct vmcoredd_data *data) -{ - return -EOPNOTSUPP; -} -#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ - -#ifdef CONFIG_PROC_VMCORE -ssize_t read_from_oldmem(char *buf, size_t count, - u64 *ppos, int userbuf, - bool encrypted); -#else -static inline ssize_t read_from_oldmem(char *buf, size_t count, - u64 *ppos, int userbuf, - bool encrypted) -{ - return -EOPNOTSUPP; -} -#endif /* CONFIG_PROC_VMCORE */ - +extern unsigned long saved_max_pfn; #endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h index 72c92c396b..f52696a1ff 100644 --- a/include/linux/crc-ccitt.h +++ b/include/linux/crc-ccitt.h @@ -1,23 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CRC_CCITT_H #define _LINUX_CRC_CCITT_H #include extern u16 const crc_ccitt_table[256]; -extern u16 const crc_ccitt_false_table[256]; extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len); -extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len); static inline u16 crc_ccitt_byte(u16 crc, const u8 c) { return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff]; } -static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c) -{ - return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c]; -} - #endif /* _LINUX_CRC_CCITT_H */ diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h index a4367051e1..a9953c762e 100644 --- a/include/linux/crc-itu-t.h +++ b/include/linux/crc-itu-t.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * crc-itu-t.h - CRC ITU-T V.41 routine * @@ -6,6 +5,9 @@ * Width 16 * Poly 0x1021 (x^16 + x^12 + x^15 + 1) * Init 0 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. */ #ifndef CRC_ITU_T_H diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h index 6bb0c0bf35..d81961e9e3 100644 --- a/include/linux/crc-t10dif.h +++ b/include/linux/crc-t10dif.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CRC_T10DIF_H #define _LINUX_CRC_T10DIF_H @@ -6,7 +5,6 @@ #define CRC_T10DIF_DIGEST_SIZE 2 #define CRC_T10DIF_BLOCK_SIZE 1 -#define CRC_T10DIF_STRING "crct10dif" extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len); diff --git a/include/linux/crc16.h b/include/linux/crc16.h index 9fa74529b3..9443c084f8 100644 --- a/include/linux/crc16.h +++ b/include/linux/crc16.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * crc16.h - CRC-16 routine * @@ -8,6 +7,9 @@ * Init 0 * * Copyright (c) 2005 Ben Gardner + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. */ #ifndef __CRC16_H diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h index bd21af828f..bd8b44d96b 100644 --- a/include/linux/crc32c.h +++ b/include/linux/crc32c.h @@ -1,11 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CRC32C_H #define _LINUX_CRC32C_H #include extern u32 crc32c(u32 crc, const void *address, unsigned int length); -extern const char *crc32c_impl(void); /* This macro exists for backwards-compatibility. */ #define crc32c_le crc32c diff --git a/include/linux/crc7.h b/include/linux/crc7.h index b462842f3c..d590765106 100644 --- a/include/linux/crc7.h +++ b/include/linux/crc7.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CRC7_H #define _LINUX_CRC7_H #include diff --git a/include/linux/crc8.h b/include/linux/crc8.h index 674045c59a..13c8dabb04 100644 --- a/include/linux/crc8.h +++ b/include/linux/crc8.h @@ -96,6 +96,6 @@ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial); * Williams, Ross N., rossross.net * (see URL http://www.ross.net/crc/download/crc_v3.txt). */ -u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc); +u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc); #endif /* __CRC8_H_ */ diff --git a/include/linux/cred.h b/include/linux/cred.h index fcbc6885cc..0de3207d3e 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* Credentials management - see Documentation/security/credentials.rst +/* Credentials management - see Documentation/security/credentials.txt * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_CRED_H @@ -11,11 +15,11 @@ #include #include #include +#include #include #include -#include -#include +struct user_struct; struct cred; struct inode; @@ -25,7 +29,7 @@ struct inode; struct group_info { atomic_t usage; int ngroups; - kgid_t gid[]; + kgid_t gid[0]; } __randomize_layout; /** @@ -53,18 +57,13 @@ do { \ groups_free(group_info); \ } while (0) +extern struct group_info init_groups; #ifdef CONFIG_MULTIUSER extern struct group_info *groups_alloc(int); extern void groups_free(struct group_info *); extern int in_group_p(kgid_t); extern int in_egroup_p(kgid_t); -extern int groups_search(const struct group_info *, kgid_t); - -extern int set_current_groups(struct group_info *); -extern void set_groups(struct cred *, struct group_info *); -extern bool may_setgroups(void); -extern void groups_sort(struct group_info *); #else static inline void groups_free(struct group_info *group_info) { @@ -78,11 +77,11 @@ static inline int in_egroup_p(kgid_t grp) { return 1; } -static inline int groups_search(const struct group_info *group_info, kgid_t grp) -{ - return 1; -} #endif +extern int set_current_groups(struct group_info *); +extern void set_groups(struct cred *, struct group_info *); +extern int groups_search(const struct group_info *, kgid_t); +extern bool may_setgroups(void); /* * The security context of a task @@ -133,23 +132,18 @@ struct cred { #ifdef CONFIG_KEYS unsigned char jit_keyring; /* default keyring to attach requested * keys to */ - struct key *session_keyring; /* keyring inherited over fork */ + struct key __rcu *session_keyring; /* keyring inherited over fork */ struct key *process_keyring; /* keyring private to this process */ struct key *thread_keyring; /* keyring private to this thread */ struct key *request_key_auth; /* assumed request_key authority */ #endif #ifdef CONFIG_SECURITY - void *security; /* LSM security */ + void *security; /* subjective LSM security */ #endif struct user_struct *user; /* real user ID subscription */ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ - struct ucounts *ucounts; struct group_info *group_info; /* supplementary groups for euid/fsgid */ - /* RCU deletion */ - union { - int non_rcu; /* Can we skip RCU deletion? */ - struct rcu_head rcu; /* RCU deletion hook */ - }; + struct rcu_head rcu; /* RCU deletion hook */ } __randomize_layout; extern void __put_cred(struct cred *); @@ -168,9 +162,7 @@ extern int change_create_files_as(struct cred *, struct inode *); extern int set_security_override(struct cred *, u32); extern int set_security_override_from_ctx(struct cred *, const char *); extern int set_create_files_as(struct cred *, struct inode *); -extern int cred_fscmp(const struct cred *, const struct cred *); extern void __init cred_init(void); -extern int set_cred_ucounts(struct cred *); /* * check for validity of credentials @@ -210,6 +202,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk) static inline void validate_process_creds(void) { } +static inline void validate_task_creds(struct task_struct *task) +{ +} #endif static inline bool cap_ambient_invariant_ok(const struct cred *cred) @@ -237,7 +232,7 @@ static inline struct cred *get_new_cred(struct cred *cred) * @cred: The credentials to reference * * Get a reference on the specified set of credentials. The caller must - * release the reference. If %NULL is passed, it is returned with no action. + * release the reference. * * This is used to deal with a committed set of credentials. Although the * pointer is const, this will temporarily discard the const and increment the @@ -248,31 +243,16 @@ static inline struct cred *get_new_cred(struct cred *cred) static inline const struct cred *get_cred(const struct cred *cred) { struct cred *nonconst_cred = (struct cred *) cred; - if (!cred) - return cred; validate_creds(cred); - nonconst_cred->non_rcu = 0; return get_new_cred(nonconst_cred); } -static inline const struct cred *get_cred_rcu(const struct cred *cred) -{ - struct cred *nonconst_cred = (struct cred *) cred; - if (!cred) - return NULL; - if (!atomic_inc_not_zero(&nonconst_cred->usage)) - return NULL; - validate_creds(cred); - nonconst_cred->non_rcu = 0; - return cred; -} - /** * put_cred - Release a reference to a set of credentials * @cred: The credentials to release * * Release a reference to a set of credentials, deleting them when the last ref - * is released. If %NULL is passed, nothing is done. + * is released. * * This takes a const pointer to a set of credentials because the credentials * on task_struct are attached by const pointers to prevent accidental @@ -282,11 +262,9 @@ static inline void put_cred(const struct cred *_cred) { struct cred *cred = (struct cred *) _cred; - if (cred) { - validate_creds(cred); - if (atomic_dec_and_test(&(cred)->usage)) - __put_cred(cred); - } + validate_creds(cred); + if (atomic_dec_and_test(&(cred)->usage)) + __put_cred(cred); } /** @@ -371,7 +349,7 @@ static inline void put_cred(const struct cred *_cred) #define task_uid(task) (task_cred_xxx((task), uid)) #define task_euid(task) (task_cred_xxx((task), euid)) -#define task_ucounts(task) (task_cred_xxx((task), ucounts)) +#define task_securebits(task) (task_cred_xxx((task), securebits)) #define current_cred_xxx(xxx) \ ({ \ @@ -388,7 +366,7 @@ static inline void put_cred(const struct cred *_cred) #define current_fsgid() (current_cred_xxx(fsgid)) #define current_cap() (current_cred_xxx(cap_effective)) #define current_user() (current_cred_xxx(user)) -#define current_ucounts() (current_cred_xxx(ucounts)) +#define current_security() (current_cred_xxx(security)) extern struct user_namespace init_user_ns; #ifdef CONFIG_USER_NS diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index 30dba392b7..be8f12b8f1 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -1,9 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef CEPH_CRUSH_CRUSH_H #define CEPH_CRUSH_CRUSH_H #ifdef __KERNEL__ -# include # include #else # include "crush_compat.h" @@ -17,7 +15,7 @@ * The algorithm was originally described in detail in this paper * (although the algorithm has evolved somewhat since then): * - * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf + * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf * * LGPL2 */ @@ -87,7 +85,7 @@ struct crush_rule_mask { struct crush_rule { __u32 len; struct crush_rule_mask mask; - struct crush_rule_step steps[]; + struct crush_rule_step steps[0]; }; #define crush_rule_size(len) (sizeof(struct crush_rule) + \ @@ -137,68 +135,13 @@ struct crush_bucket { __u32 size; /* num items */ __s32 *items; -}; - -/** @ingroup API - * - * Replacement weights for each item in a bucket. The size of the - * array must be exactly the size of the straw2 bucket, just as the - * item_weights array. - * - */ -struct crush_weight_set { - __u32 *weights; /*!< 16.16 fixed point weights - in the same order as items */ - __u32 size; /*!< size of the __weights__ array */ -}; - -/** @ingroup API - * - * Replacement weights and ids for a given straw2 bucket, for - * placement purposes. - * - * When crush_do_rule() chooses the Nth item from a straw2 bucket, the - * replacement weights found at __weight_set[N]__ are used instead of - * the weights from __item_weights__. If __N__ is greater than - * __weight_set_size__, the weights found at __weight_set_size-1__ are - * used instead. For instance if __weight_set__ is: - * - * [ [ 0x10000, 0x20000 ], // position 0 - * [ 0x20000, 0x40000 ] ] // position 1 - * - * choosing the 0th item will use position 0 weights [ 0x10000, 0x20000 ] - * choosing the 1th item will use position 1 weights [ 0x20000, 0x40000 ] - * choosing the 2th item will use position 1 weights [ 0x20000, 0x40000 ] - * etc. - * - */ -struct crush_choose_arg { - __s32 *ids; /*!< values to use instead of items */ - __u32 ids_size; /*!< size of the __ids__ array */ - struct crush_weight_set *weight_set; /*!< weight replacements for - a given position */ - __u32 weight_set_size; /*!< size of the __weight_set__ array */ -}; - -/** @ingroup API - * - * Replacement weights and ids for each bucket in the crushmap. The - * __size__ of the __args__ array must be exactly the same as the - * __map->max_buckets__. - * - * The __crush_choose_arg__ at index N will be used when choosing - * an item from the bucket __map->buckets[N]__ bucket, provided it - * is a straw2 bucket. - * - */ -struct crush_choose_arg_map { -#ifdef __KERNEL__ - struct rb_node node; - s64 choose_args_index; -#endif - struct crush_choose_arg *args; /*!< replacement for each bucket - in the crushmap */ - __u32 size; /*!< size of the __args__ array */ + /* + * cached random permutation: used for uniform bucket and for + * the linear search fallback for the other bucket types. + */ + __u32 perm_x; /* @x for which *perm is defined */ + __u32 perm_n; /* num elements of *perm that are permuted/defined */ + __u32 *perm; }; struct crush_bucket_uniform { @@ -268,21 +211,6 @@ struct crush_map { * device fails. */ __u8 chooseleaf_stable; - /* - * This value is calculated after decode or construction by - * the builder. It is exposed here (rather than having a - * 'build CRUSH working space' function) so that callers can - * reserve a static buffer, allocate space on the stack, or - * otherwise avoid calling into the heap allocator if they - * want to. The size of the working space depends on the map, - * while the size of the scratch vector passed to the mapper - * depends on the size of the desired result set. - * - * Nothing stops the caller from allocating both in one swell - * foop and passing in two points, though. - */ - size_t working_size; - #ifndef __KERNEL__ /* * version 0 (original) of straw_calc has various flaws. version 1 @@ -300,15 +228,6 @@ struct crush_map { __u32 allowed_bucket_algs; __u32 *choose_tries; -#else - /* device/bucket type id -> type name (CrushWrapper::type_map) */ - struct rb_root type_names; - - /* device/bucket id -> name (CrushWrapper::name_map) */ - struct rb_root names; - - /* CrushWrapper::choose_args */ - struct rb_root choose_args; #endif }; @@ -329,32 +248,4 @@ static inline int crush_calc_tree_node(int i) return ((i+1) << 1)-1; } -/* - * These data structures are private to the CRUSH implementation. They - * are exposed in this header file because builder needs their - * definitions to calculate the total working size. - * - * Moving this out of the crush map allow us to treat the CRUSH map as - * immutable within the mapper and removes the requirement for a CRUSH - * map lock. - */ -struct crush_work_bucket { - __u32 perm_x; /* @x for which *perm is defined */ - __u32 perm_n; /* num elements of *perm that are permuted/defined */ - __u32 *perm; /* Permutation of the bucket's items */ -}; - -struct crush_work { - struct crush_work_bucket **work; /* Per-bucket working store */ -#ifdef __KERNEL__ - struct list_head item; -#endif -}; - -#ifdef __KERNEL__ -/* osdmap.c */ -void clear_crush_names(struct rb_root *root); -void clear_choose_args(struct crush_map *c); -#endif - #endif diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h index 904df41f78..d1d9025824 100644 --- a/include/linux/crush/hash.h +++ b/include/linux/crush/hash.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef CEPH_CRUSH_HASH_H #define CEPH_CRUSH_HASH_H diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h index f9b99232f5..5dfd5b1125 100644 --- a/include/linux/crush/mapper.h +++ b/include/linux/crush/mapper.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef CEPH_CRUSH_MAPPER_H #define CEPH_CRUSH_MAPPER_H @@ -12,23 +11,10 @@ #include "crush.h" extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size); -int crush_do_rule(const struct crush_map *map, - int ruleno, int x, int *result, int result_max, - const __u32 *weight, int weight_max, - void *cwin, const struct crush_choose_arg *choose_args); - -/* - * Returns the exact amount of workspace that will need to be used - * for a given combination of crush_map and result_max. The caller can - * then allocate this much on its own, either on the stack, in a - * per-thread long-lived buffer, or however it likes. - */ -static inline size_t crush_work_size(const struct crush_map *map, - int result_max) -{ - return map->working_size + result_max * 3 * sizeof(__u32); -} - -void crush_init_workspace(const struct crush_map *map, void *v); +extern int crush_do_rule(const struct crush_map *map, + int ruleno, + int x, int *result, int result_max, + const __u32 *weights, int weight_max, + int *scratch); #endif diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 855869e1fd..65ead504d0 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Scatterlist Cryptographic API. * @@ -8,6 +7,12 @@ * * Portions derived from Cryptoapi, by Alexander Kjeldaas * and Nettle, by Niels Möller. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * */ #ifndef _LINUX_CRYPTO_H #define _LINUX_CRYPTO_H @@ -16,9 +21,9 @@ #include #include #include -#include #include -#include +#include +#include /* * Autoloaded crypto modules should only use a prefixed name to avoid allowing @@ -40,19 +45,21 @@ #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 +#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 +#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_KPP 0x00000008 -#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a -#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d +#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e #define CRYPTO_ALG_TYPE_HASH 0x0000000e #define CRYPTO_ALG_TYPE_SHASH 0x0000000e #define CRYPTO_ALG_TYPE_AHASH 0x0000000f #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e -#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e +#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c #define CRYPTO_ALG_LARVAL 0x00000010 #define CRYPTO_ALG_DEAD 0x00000020 @@ -60,11 +67,17 @@ #define CRYPTO_ALG_ASYNC 0x00000080 /* - * Set if the algorithm (or an algorithm which it uses) requires another - * algorithm of the same type to handle corner cases. + * Set this bit if and only if the algorithm requires another algorithm of + * the same type to handle corner cases. */ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 +/* + * This bit is set for symmetric key ciphers that have already been wrapped + * with a generic IV generator to prevent them from being wrapped again. + */ +#define CRYPTO_ALG_GENIV 0x00000200 + /* * Set if the algorithm has passed automated run-time testing. Note that * if there is no run-time testing for a given algorithm it is considered @@ -74,7 +87,7 @@ #define CRYPTO_ALG_TESTED 0x00000400 /* - * Set if the algorithm is an instance that is built from templates. + * Set if the algorithm is an instance that is build from templates. */ #define CRYPTO_ALG_INSTANCE 0x00000800 @@ -89,83 +102,45 @@ */ #define CRYPTO_ALG_INTERNAL 0x00002000 -/* - * Set if the algorithm has a ->setkey() method but can be used without - * calling it first, i.e. there is a default key. - */ -#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 - -/* - * Don't trigger module loading - */ -#define CRYPTO_NOLOAD 0x00008000 - -/* - * The algorithm may allocate memory during request processing, i.e. during - * encryption, decryption, or hashing. Users can request an algorithm with this - * flag unset if they can't handle memory allocation failures. - * - * This flag is currently only implemented for algorithms of type "skcipher", - * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not - * have this flag set even if they allocate memory. - * - * In some edge cases, algorithms can allocate memory regardless of this flag. - * To avoid these cases, users must obey the following usage constraints: - * skcipher: - * - The IV buffer and all scatterlist elements must be aligned to the - * algorithm's alignmask. - * - If the data were to be divided into chunks of size - * crypto_skcipher_walksize() (with any remainder going at the end), no - * chunk can cross a page boundary or a scatterlist element boundary. - * aead: - * - The IV buffer and all scatterlist elements must be aligned to the - * algorithm's alignmask. - * - The first scatterlist element must contain all the associated data, - * and its pages must be !PageHighMem. - * - If the plaintext/ciphertext were to be divided into chunks of size - * crypto_aead_walksize() (with the remainder going at the end), no chunk - * can cross a page boundary or a scatterlist element boundary. - * ahash: - * - The result buffer must be aligned to the algorithm's alignmask. - * - crypto_ahash_finup() must not be used unless the algorithm implements - * ->finup() natively. - */ -#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000 - /* * Transform masks and values (for crt_flags). */ -#define CRYPTO_TFM_NEED_KEY 0x00000001 - #define CRYPTO_TFM_REQ_MASK 0x000fff00 -#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 +#define CRYPTO_TFM_RES_MASK 0xfff00000 + +#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 +#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 +#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 +#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 +#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000 +#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000 /* * Miscellaneous stuff. */ -#define CRYPTO_MAX_ALG_NAME 128 +#define CRYPTO_MAX_ALG_NAME 64 /* * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual * declaration) is used to ensure that the crypto_tfm context structure is * aligned correctly for the given architecture so that there are no alignment - * faults for C data types. On architectures that support non-cache coherent - * DMA, such as ARM or arm64, it also takes into account the minimal alignment - * that is required to ensure that the context struct member does not share any - * cachelines with the rest of the struct. This is needed to ensure that cache - * maintenance for non-coherent DMA (cache invalidation in particular) does not - * affect data that may be accessed by the CPU concurrently. + * faults for C data types. In particular, this is required on platforms such + * as arm where pointers are 32-bit aligned but there are data types such as + * u64 which require 64-bit alignment. */ #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) struct scatterlist; +struct crypto_ablkcipher; struct crypto_async_request; +struct crypto_blkcipher; struct crypto_tfm; struct crypto_type; +struct skcipher_givcrypt_request; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); @@ -185,6 +160,33 @@ struct crypto_async_request { u32 flags; }; +struct ablkcipher_request { + struct crypto_async_request base; + + unsigned int nbytes; + + void *info; + + struct scatterlist *src; + struct scatterlist *dst; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct blkcipher_desc { + struct crypto_blkcipher *tfm; + void *info; + u32 flags; +}; + +struct cipher_desc { + struct crypto_tfm *tfm; + void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, + const u8 *src, unsigned int nbytes); + void *info; +}; + /** * DOC: Block Cipher Algorithm Definitions * @@ -192,6 +194,101 @@ struct crypto_async_request { * managed via crypto_register_alg() and crypto_unregister_alg(). */ +/** + * struct ablkcipher_alg - asynchronous block cipher definition + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt + * the supplied scatterlist containing the blocks of data. The crypto + * API consumer is responsible for aligning the entries of the + * scatterlist properly and making sure the chunks are correctly + * sized. In case a software fallback was put in place in the + * @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. In case the + * key was stored in transformation context, the key might need to be + * re-programmed into the hardware in this function. This function + * shall not modify the transformation context, as this function may + * be called in parallel with the same transformation object. + * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt + * and the conditions are exactly the same. + * @givencrypt: Update the IV for encryption. With this function, a cipher + * implementation may provide the function on how to update the IV + * for encryption. + * @givdecrypt: Update the IV for decryption. This is the reverse of + * @givencrypt . + * @geniv: The transformation implementation may use an "IV generator" provided + * by the kernel crypto API. Several use cases have a predefined + * approach how IVs are to be updated. For such use cases, the kernel + * crypto API provides ready-to-use implementations that can be + * referenced with this variable. + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * + * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are + * mandatory and must be filled. + */ +struct ablkcipher_alg { + int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct ablkcipher_request *req); + int (*decrypt)(struct ablkcipher_request *req); + int (*givencrypt)(struct skcipher_givcrypt_request *req); + int (*givdecrypt)(struct skcipher_givcrypt_request *req); + + const char *geniv; + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + +/** + * struct blkcipher_alg - synchronous block cipher definition + * @min_keysize: see struct ablkcipher_alg + * @max_keysize: see struct ablkcipher_alg + * @setkey: see struct ablkcipher_alg + * @encrypt: see struct ablkcipher_alg + * @decrypt: see struct ablkcipher_alg + * @geniv: see struct ablkcipher_alg + * @ivsize: see struct ablkcipher_alg + * + * All fields except @geniv and @ivsize are mandatory and must be filled. + */ +struct blkcipher_alg { + int (*setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes); + int (*decrypt)(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes); + + const char *geniv; + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + /** * struct cipher_alg - single-block symmetric ciphers definition * @cia_min_keysize: Minimum key size supported by the transformation. This is @@ -248,17 +345,6 @@ struct cipher_alg { void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); }; -/** - * struct compress_alg - compression/decompression algorithm - * @coa_compress: Compress a buffer of specified length, storing the resulting - * data in the specified buffer. Return the length of the - * compressed data in dlen. - * @coa_decompress: Decompress the source buffer, storing the uncompressed - * data in the specified buffer. The length of the data is - * returned in dlen. - * - * All fields are mandatory. - */ struct compress_alg { int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen); @@ -266,116 +352,9 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; -#ifdef CONFIG_CRYPTO_STATS -/* - * struct crypto_istat_aead - statistics for AEAD algorithm - * @encrypt_cnt: number of encrypt requests - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_cnt: number of decrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @err_cnt: number of error for AEAD requests - */ -struct crypto_istat_aead { - atomic64_t encrypt_cnt; - atomic64_t encrypt_tlen; - atomic64_t decrypt_cnt; - atomic64_t decrypt_tlen; - atomic64_t err_cnt; -}; - -/* - * struct crypto_istat_akcipher - statistics for akcipher algorithm - * @encrypt_cnt: number of encrypt requests - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_cnt: number of decrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @verify_cnt: number of verify operation - * @sign_cnt: number of sign requests - * @err_cnt: number of error for akcipher requests - */ -struct crypto_istat_akcipher { - atomic64_t encrypt_cnt; - atomic64_t encrypt_tlen; - atomic64_t decrypt_cnt; - atomic64_t decrypt_tlen; - atomic64_t verify_cnt; - atomic64_t sign_cnt; - atomic64_t err_cnt; -}; - -/* - * struct crypto_istat_cipher - statistics for cipher algorithm - * @encrypt_cnt: number of encrypt requests - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_cnt: number of decrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @err_cnt: number of error for cipher requests - */ -struct crypto_istat_cipher { - atomic64_t encrypt_cnt; - atomic64_t encrypt_tlen; - atomic64_t decrypt_cnt; - atomic64_t decrypt_tlen; - atomic64_t err_cnt; -}; - -/* - * struct crypto_istat_compress - statistics for compress algorithm - * @compress_cnt: number of compress requests - * @compress_tlen: total data size handled by compress requests - * @decompress_cnt: number of decompress requests - * @decompress_tlen: total data size handled by decompress requests - * @err_cnt: number of error for compress requests - */ -struct crypto_istat_compress { - atomic64_t compress_cnt; - atomic64_t compress_tlen; - atomic64_t decompress_cnt; - atomic64_t decompress_tlen; - atomic64_t err_cnt; -}; - -/* - * struct crypto_istat_hash - statistics for has algorithm - * @hash_cnt: number of hash requests - * @hash_tlen: total data size hashed - * @err_cnt: number of error for hash requests - */ -struct crypto_istat_hash { - atomic64_t hash_cnt; - atomic64_t hash_tlen; - atomic64_t err_cnt; -}; - -/* - * struct crypto_istat_kpp - statistics for KPP algorithm - * @setsecret_cnt: number of setsecrey operation - * @generate_public_key_cnt: number of generate_public_key operation - * @compute_shared_secret_cnt: number of compute_shared_secret operation - * @err_cnt: number of error for KPP requests - */ -struct crypto_istat_kpp { - atomic64_t setsecret_cnt; - atomic64_t generate_public_key_cnt; - atomic64_t compute_shared_secret_cnt; - atomic64_t err_cnt; -}; - -/* - * struct crypto_istat_rng: statistics for RNG algorithm - * @generate_cnt: number of RNG generate requests - * @generate_tlen: total data size of generated data by the RNG - * @seed_cnt: number of times the RNG was seeded - * @err_cnt: number of error for RNG requests - */ -struct crypto_istat_rng { - atomic64_t generate_cnt; - atomic64_t generate_tlen; - atomic64_t seed_cnt; - atomic64_t err_cnt; -}; -#endif /* CONFIG_CRYPTO_STATS */ +#define cra_ablkcipher cra_u.ablkcipher +#define cra_blkcipher cra_u.blkcipher #define cra_cipher cra_u.cipher #define cra_compress cra_u.compress @@ -423,8 +402,9 @@ struct crypto_istat_rng { * transformation algorithm. * @cra_type: Type of the cryptographic transformation. This is a pointer to * struct crypto_type, which implements callbacks common for all - * transformation types. There are multiple options, such as - * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type. + * transformation types. There are multiple options: + * &crypto_blkcipher_type, &crypto_ablkcipher_type, + * &crypto_ahash_type, &crypto_rng_type. * This field might be empty. In that case, there are no common * callbacks. This is the case for: cipher, compress, shash. * @cra_u: Callbacks implementing the transformation. This is a union of @@ -443,25 +423,12 @@ struct crypto_istat_rng { * @cra_exit: Deinitialize the cryptographic transformation object. This is a * counterpart to @cra_init, used to remove various changes set in * @cra_init. - * @cra_u.cipher: Union member which contains a single-block symmetric cipher - * definition. See @struct @cipher_alg. - * @cra_u.compress: Union member which contains a (de)compression algorithm. - * See @struct @compress_alg. * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE * @cra_list: internally used * @cra_users: internally used * @cra_refcnt: internally used * @cra_destroy: internally used * - * @stats: union of all possible crypto_istat_xxx structures - * @stats.aead: statistics for AEAD algorithm - * @stats.akcipher: statistics for akcipher algorithm - * @stats.cipher: statistics for cipher algorithm - * @stats.compress: statistics for compress algorithm - * @stats.hash: statistics for hash algorithm - * @stats.rng: statistics for rng algorithm - * @stats.kpp: statistics for KPP algorithm - * * The struct crypto_alg describes a generic Crypto API algorithm and is common * for all of the transformations. Any variable not documented here shall not * be used by a cipher implementation as it is internal to the Crypto API. @@ -476,7 +443,7 @@ struct crypto_alg { unsigned int cra_alignmask; int cra_priority; - refcount_t cra_refcnt; + atomic_t cra_refcnt; char cra_name[CRYPTO_MAX_ALG_NAME]; char cra_driver_name[CRYPTO_MAX_ALG_NAME]; @@ -484,6 +451,8 @@ struct crypto_alg { const struct crypto_type *cra_type; union { + struct ablkcipher_alg ablkcipher; + struct blkcipher_alg blkcipher; struct cipher_alg cipher; struct compress_alg compress; } cra_u; @@ -493,127 +462,15 @@ struct crypto_alg { void (*cra_destroy)(struct crypto_alg *alg); struct module *cra_module; - -#ifdef CONFIG_CRYPTO_STATS - union { - struct crypto_istat_aead aead; - struct crypto_istat_akcipher akcipher; - struct crypto_istat_cipher cipher; - struct crypto_istat_compress compress; - struct crypto_istat_hash hash; - struct crypto_istat_rng rng; - struct crypto_istat_kpp kpp; - } stats; -#endif /* CONFIG_CRYPTO_STATS */ - } CRYPTO_MINALIGN_ATTR; -#ifdef CONFIG_CRYPTO_STATS -void crypto_stats_init(struct crypto_alg *alg); -void crypto_stats_get(struct crypto_alg *alg); -void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); -void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); -void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg); -void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg); -void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg); -void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg); -void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg); -void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg); -void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg); -void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg); -void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret); -void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret); -void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret); -void crypto_stats_rng_seed(struct crypto_alg *alg, int ret); -void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret); -void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); -void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); -#else -static inline void crypto_stats_init(struct crypto_alg *alg) -{} -static inline void crypto_stats_get(struct crypto_alg *alg) -{} -static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) -{} -static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) -{} -static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg) -{} -static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg) -{} -static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg) -{} -static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg) -{} -static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) -{} -static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) -{} -static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) -{} -static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) -{} -static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) -{} -static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) -{} -static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) -{} -static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) -{} -static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret) -{} -static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) -{} -static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) -{} -#endif -/* - * A helper struct for waiting for completion of async crypto ops - */ -struct crypto_wait { - struct completion completion; - int err; -}; - -/* - * Macro for declaring a crypto op async wait object on stack - */ -#define DECLARE_CRYPTO_WAIT(_wait) \ - struct crypto_wait _wait = { \ - COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } - -/* - * Async ops completion helper functioons - */ -void crypto_req_done(struct crypto_async_request *req, int err); - -static inline int crypto_wait_req(int err, struct crypto_wait *wait) -{ - switch (err) { - case -EINPROGRESS: - case -EBUSY: - wait_for_completion(&wait->completion); - reinit_completion(&wait->completion); - err = wait->err; - break; - } - - return err; -} - -static inline void crypto_init_wait(struct crypto_wait *wait) -{ - init_completion(&wait->completion); -} - /* * Algorithm registration interface. */ int crypto_register_alg(struct crypto_alg *alg); -void crypto_unregister_alg(struct crypto_alg *alg); +int crypto_unregister_alg(struct crypto_alg *alg); int crypto_register_algs(struct crypto_alg *algs, int count); -void crypto_unregister_algs(struct crypto_alg *algs, int count); +int crypto_unregister_algs(struct crypto_alg *algs, int count); /* * Algorithm query interface. @@ -626,12 +483,60 @@ int crypto_has_alg(const char *name, u32 type, u32 mask); * crypto_free_*(), as well as the various helpers below. */ +struct ablkcipher_tfm { + int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct ablkcipher_request *req); + int (*decrypt)(struct ablkcipher_request *req); + + struct crypto_ablkcipher *base; + + unsigned int ivsize; + unsigned int reqsize; +}; + +struct blkcipher_tfm { + void *iv; + int (*setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes); + int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes); +}; + +struct cipher_tfm { + int (*cit_setkey)(struct crypto_tfm *tfm, + const u8 *key, unsigned int keylen); + void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +} __no_const; + +struct compress_tfm { + int (*cot_compress)(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); + int (*cot_decompress)(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); +} __no_const; + +#define crt_ablkcipher crt_u.ablkcipher +#define crt_blkcipher crt_u.blkcipher +#define crt_cipher crt_u.cipher +#define crt_compress crt_u.compress + struct crypto_tfm { u32 crt_flags; - - int node; + union { + struct ablkcipher_tfm ablkcipher; + struct blkcipher_tfm blkcipher; + struct cipher_tfm cipher; + struct compress_tfm compress; + } crt_u; + void (*exit)(struct crypto_tfm *tfm); struct crypto_alg *__crt_alg; @@ -639,10 +544,48 @@ struct crypto_tfm { void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; }; +struct crypto_ablkcipher { + struct crypto_tfm base; +}; + +struct crypto_blkcipher { + struct crypto_tfm base; +}; + +struct crypto_cipher { + struct crypto_tfm base; +}; + struct crypto_comp { struct crypto_tfm base; }; +enum { + CRYPTOA_UNSPEC, + CRYPTOA_ALG, + CRYPTOA_TYPE, + CRYPTOA_U32, + __CRYPTOA_MAX, +}; + +#define CRYPTOA_MAX (__CRYPTOA_MAX - 1) + +/* Maximum number of (rtattr) parameters for each template. */ +#define CRYPTO_MAX_ATTRS 32 + +struct crypto_attr_alg { + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +struct crypto_attr_u32 { + u32 num; +}; + /* * Transform user interface. */ @@ -716,11 +659,893 @@ static inline unsigned int crypto_tfm_ctx_alignment(void) return __alignof__(tfm->__crt_ctx); } +/* + * API wrappers. + */ +static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( + struct crypto_tfm *tfm) +{ + return (struct crypto_ablkcipher *)tfm; +} + +static inline u32 crypto_skcipher_type(u32 type) +{ + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + return type; +} + +static inline u32 crypto_skcipher_mask(u32 mask) +{ + mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; + return mask; +} + +/** + * DOC: Asynchronous Block Cipher API + * + * Asynchronous block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). + * + * Asynchronous cipher operations imply that the function invocation for a + * cipher request returns immediately before the completion of the operation. + * The cipher request is scheduled as a separate kernel thread and therefore + * load-balanced on the different CPUs via the process scheduler. To allow + * the kernel crypto API to inform the caller about the completion of a cipher + * request, the caller must provide a callback function. That function is + * invoked with the cipher handle when the request completes. + * + * To support the asynchronous operation, additional information than just the + * cipher handle must be supplied to the kernel crypto API. That additional + * information is given by filling in the ablkcipher_request data structure. + * + * For the asynchronous block cipher API, the state is maintained with the tfm + * cipher handle. A single tfm can be used across multiple calls and in + * parallel. For asynchronous block cipher calls, context data supplied and + * only used by the caller can be referenced the request data structure in + * addition to the IV used for the cipher request. The maintenance of such + * state information would be important for a crypto driver implementer to + * have, because when calling the callback function upon completion of the + * cipher operation, that callback function may need some information about + * which operation just finished if it invoked multiple in parallel. This + * state information is unused by the kernel crypto API. + */ + +static inline struct crypto_tfm *crypto_ablkcipher_tfm( + struct crypto_ablkcipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_ablkcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) +{ + crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); +} + +/** + * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ablkcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the ablkcipher is known to the kernel crypto API; false + * otherwise + */ +static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, + u32 mask) +{ + return crypto_has_alg(alg_name, crypto_skcipher_type(type), + crypto_skcipher_mask(mask)); +} + +static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( + struct crypto_ablkcipher *tfm) +{ + return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; +} + +/** + * crypto_ablkcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the ablkcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_ablkcipher_ivsize( + struct crypto_ablkcipher *tfm) +{ + return crypto_ablkcipher_crt(tfm)->ivsize; +} + +/** + * crypto_ablkcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the ablkcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_ablkcipher_blocksize( + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); +} + +static inline unsigned int crypto_ablkcipher_alignmask( + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); +} + +static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); +} + +static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); +} + +static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); +} + +/** + * crypto_ablkcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the ablkcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, + const u8 *key, unsigned int keylen) +{ + struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); + + return crt->setkey(crt->base, key, keylen); +} + +/** + * crypto_ablkcipher_reqtfm() - obtain cipher handle from request + * @req: ablkcipher_request out of which the cipher handle is to be obtained + * + * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request + * data structure. + * + * Return: crypto_ablkcipher handle + */ +static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( + struct ablkcipher_request *req) +{ + return __crypto_ablkcipher_cast(req->base.tfm); +} + +/** + * crypto_ablkcipher_encrypt() - encrypt plaintext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the ablkcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * ablkcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + return crt->encrypt(req); +} + +/** + * crypto_ablkcipher_decrypt() - decrypt ciphertext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the ablkcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * ablkcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + return crt->decrypt(req); +} + +/** + * DOC: Asynchronous Cipher Request Handle + * + * The ablkcipher_request data structure contains all pointers to data + * required for the asynchronous cipher operation. This includes the cipher + * handle (which can be used by multiple ablkcipher_request instances), pointer + * to plaintext and ciphertext, asynchronous callback function, etc. It acts + * as a handle to the ablkcipher_request_* API calls in a similar way as + * ablkcipher handle to the crypto_ablkcipher_* API calls. + */ + +/** + * crypto_ablkcipher_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ +static inline unsigned int crypto_ablkcipher_reqsize( + struct crypto_ablkcipher *tfm) +{ + return crypto_ablkcipher_crt(tfm)->reqsize; +} + +/** + * ablkcipher_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing ablkcipher handle in the request + * data structure with a different one. + */ +static inline void ablkcipher_request_set_tfm( + struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) +{ + req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); +} + +static inline struct ablkcipher_request *ablkcipher_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct ablkcipher_request, base); +} + +/** + * ablkcipher_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the ablkcipher + * encrypt and decrypt API calls. During the allocation, the provided ablkcipher + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct ablkcipher_request *ablkcipher_request_alloc( + struct crypto_ablkcipher *tfm, gfp_t gfp) +{ + struct ablkcipher_request *req; + + req = kmalloc(sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(tfm), gfp); + + if (likely(req)) + ablkcipher_request_set_tfm(req, tfm); + + return req; +} + +/** + * ablkcipher_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void ablkcipher_request_free(struct ablkcipher_request *req) +{ + kzfree(req); +} + +/** + * ablkcipher_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once the + * cipher operation completes. + * + * The callback function is registered with the ablkcipher_request handle and + * must comply with the following template + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void ablkcipher_request_set_callback( + struct ablkcipher_request *req, + u32 flags, crypto_completion_t compl, void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * ablkcipher_request_set_crypt() - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @nbytes: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_ablkcipher_ivsize + * + * This function allows setting of the source data and destination data + * scatter / gather lists. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed - the source is the ciphertext and the destination is the plaintext. + */ +static inline void ablkcipher_request_set_crypt( + struct ablkcipher_request *req, + struct scatterlist *src, struct scatterlist *dst, + unsigned int nbytes, void *iv) +{ + req->src = src; + req->dst = dst; + req->nbytes = nbytes; + req->info = iv; +} + +/** + * DOC: Synchronous Block Cipher API + * + * The synchronous block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) + * + * Synchronous calls, have a context in the tfm. But since a single tfm can be + * used in multiple calls and in parallel, this info should not be changeable + * (unless a lock is used). This applies, for example, to the symmetric key. + * However, the IV is changeable, so there is an iv field in blkcipher_tfm + * structure for synchronous blkcipher api. So, its the only state info that can + * be kept for synchronous calls without using a big lock across a tfm. + * + * The block cipher API allows the use of a complete cipher, i.e. a cipher + * consisting of a template (a block chaining mode) and a single block cipher + * primitive (e.g. AES). + * + * The plaintext data buffer and the ciphertext data buffer are pointed to + * by using scatter/gather lists. The cipher operation is performed + * on all segments of the provided scatter/gather lists. + * + * The kernel crypto API supports a cipher operation "in-place" which means that + * the caller may provide the same scatter/gather list for the plaintext and + * cipher text. After the completion of the cipher operation, the plaintext + * data is replaced with the ciphertext data in case of an encryption and vice + * versa for a decryption. The caller must ensure that the scatter/gather lists + * for the output data point to sufficiently large buffers, i.e. multiples of + * the block size of the cipher. + */ + +static inline struct crypto_blkcipher *__crypto_blkcipher_cast( + struct crypto_tfm *tfm) +{ + return (struct crypto_blkcipher *)tfm; +} + +static inline struct crypto_blkcipher *crypto_blkcipher_cast( + struct crypto_tfm *tfm) +{ + BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); + return __crypto_blkcipher_cast(tfm); +} + +/** + * crypto_alloc_blkcipher() - allocate synchronous block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * blkcipher cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a block cipher. The returned struct + * crypto_blkcipher is the cipher handle that is required for any subsequent + * API invocation for that block cipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +static inline struct crypto_blkcipher *crypto_alloc_blkcipher( + const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_blkcipher_tfm( + struct crypto_blkcipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_blkcipher() - zeroize and free the block cipher handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) +{ + crypto_free_tfm(crypto_blkcipher_tfm(tfm)); +} + +/** + * crypto_has_blkcipher() - Search for the availability of a block cipher + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the block cipher is known to the kernel crypto API; false + * otherwise + */ +static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +/** + * crypto_blkcipher_name() - return the name / cra_name from the cipher handle + * @tfm: cipher handle + * + * Return: The character string holding the name of the cipher + */ +static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); +} + +static inline struct blkcipher_tfm *crypto_blkcipher_crt( + struct crypto_blkcipher *tfm) +{ + return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; +} + +static inline struct blkcipher_alg *crypto_blkcipher_alg( + struct crypto_blkcipher *tfm) +{ + return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; +} + +/** + * crypto_blkcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the block cipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) +{ + return crypto_blkcipher_alg(tfm)->ivsize; +} + +/** + * crypto_blkcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the block cipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation. + * + * Return: block size of cipher + */ +static inline unsigned int crypto_blkcipher_blocksize( + struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); +} + +static inline unsigned int crypto_blkcipher_alignmask( + struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); +} + +static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); +} + +static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); +} + +static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); +} + +/** + * crypto_blkcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the block cipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), + key, keylen); +} + +/** + * crypto_blkcipher_encrypt() - encrypt plaintext + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * ciphertext + * @src: scatter/gather list that holds the plaintext + * @nbytes: number of bytes of the plaintext to encrypt. + * + * Encrypt plaintext data using the IV set by the caller with a preceding + * call of crypto_blkcipher_set_iv. + * + * The blkcipher_desc data structure must be filled by the caller and can + * reside on the stack. The caller must fill desc as follows: desc.tfm is filled + * with the block cipher handle; desc.flags is filled with either + * CRYPTO_TFM_REQ_MAY_SLEEP or 0. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + desc->info = crypto_blkcipher_crt(desc->tfm)->iv; + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); +} + +/** + * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * ciphertext + * @src: scatter/gather list that holds the plaintext + * @nbytes: number of bytes of the plaintext to encrypt. + * + * Encrypt plaintext data with the use of an IV that is solely used for this + * cipher operation. Any previously set IV is not used. + * + * The blkcipher_desc data structure must be filled by the caller and can + * reside on the stack. The caller must fill desc as follows: desc.tfm is filled + * with the block cipher handle; desc.info is filled with the IV to be used for + * the current operation; desc.flags is filled with either + * CRYPTO_TFM_REQ_MAY_SLEEP or 0. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); +} + +/** + * crypto_blkcipher_decrypt() - decrypt ciphertext + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * plaintext + * @src: scatter/gather list that holds the ciphertext + * @nbytes: number of bytes of the ciphertext to decrypt. + * + * Decrypt ciphertext data using the IV set by the caller with a preceding + * call of crypto_blkcipher_set_iv. + * + * The blkcipher_desc data structure must be filled by the caller as documented + * for the crypto_blkcipher_encrypt call above. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + * + */ +static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + desc->info = crypto_blkcipher_crt(desc->tfm)->iv; + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); +} + +/** + * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * plaintext + * @src: scatter/gather list that holds the ciphertext + * @nbytes: number of bytes of the ciphertext to decrypt. + * + * Decrypt ciphertext data with the use of an IV that is solely used for this + * cipher operation. Any previously set IV is not used. + * + * The blkcipher_desc data structure must be filled by the caller as documented + * for the crypto_blkcipher_encrypt_iv call above. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); +} + +/** + * crypto_blkcipher_set_iv() - set IV for cipher + * @tfm: cipher handle + * @src: buffer holding the IV + * @len: length of the IV in bytes + * + * The caller provided IV is set for the block cipher referenced by the cipher + * handle. + */ +static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, + const u8 *src, unsigned int len) +{ + memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); +} + +/** + * crypto_blkcipher_get_iv() - obtain IV from cipher + * @tfm: cipher handle + * @dst: buffer filled with the IV + * @len: length of the buffer dst + * + * The caller can obtain the IV set for the block cipher referenced by the + * cipher handle and store it into the user-provided buffer. If the buffer + * has an insufficient space, the IV is truncated to fit the buffer. + */ +static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, + u8 *dst, unsigned int len) +{ + memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); +} + +/** + * DOC: Single Block Cipher API + * + * The single block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). + * + * Using the single block cipher API calls, operations with the basic cipher + * primitive can be implemented. These cipher primitives exclude any block + * chaining operations including IV handling. + * + * The purpose of this single block cipher API is to support the implementation + * of templates or other concepts that only need to perform the cipher operation + * on one block at a time. Templates invoke the underlying cipher primitive + * block-wise and process either the input or the output data of these cipher + * operations. + */ + +static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_cipher *)tfm; +} + +static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) +{ + BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); + return __crypto_cipher_cast(tfm); +} + +/** + * crypto_alloc_cipher() - allocate single block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a single block cipher. The returned struct + * crypto_cipher is the cipher handle that is required for any subsequent API + * invocation for that single block cipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_cipher() - zeroize and free the single block cipher handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_cipher(struct crypto_cipher *tfm) +{ + crypto_free_tfm(crypto_cipher_tfm(tfm)); +} + +/** + * crypto_has_cipher() - Search for the availability of a single block cipher + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the single block cipher is known to the kernel crypto API; + * false otherwise + */ +static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->crt_cipher; +} + +/** + * crypto_cipher_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the single block cipher referenced with the cipher handle + * tfm is returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); +} + +static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); +} + +static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) +{ + return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); +} + +static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); +} + +static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); +} + +/** + * crypto_cipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the single block cipher referenced by the + * cipher handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), + key, keylen); +} + +/** + * crypto_cipher_encrypt_one() - encrypt one block of plaintext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the ciphertext + * @src: buffer holding the plaintext to be encrypted + * + * Invoke the encryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ +static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src) +{ + crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), + dst, src); +} + +/** + * crypto_cipher_decrypt_one() - decrypt one block of ciphertext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the plaintext + * @src: buffer holding the ciphertext to be decrypted + * + * Invoke the decryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ +static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src) +{ + crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), + dst, src); +} + static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) { return (struct crypto_comp *)tfm; } +static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) +{ + BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & + CRYPTO_ALG_TYPE_MASK); + return __crypto_comp_cast(tfm); +} + static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, u32 type, u32 mask) { @@ -755,13 +1580,26 @@ static inline const char *crypto_comp_name(struct crypto_comp *tfm) return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); } -int crypto_comp_compress(struct crypto_comp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); +static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) +{ + return &crypto_comp_tfm(tfm)->crt_compress; +} -int crypto_comp_decompress(struct crypto_comp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); +static inline int crypto_comp_compress(struct crypto_comp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), + src, slen, dst, dlen); +} + +static inline int crypto_comp_decompress(struct crypto_comp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), + src, slen, dst, dlen); +} #endif /* _LINUX_CRYPTO_H */ diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h new file mode 100644 index 0000000000..f4754282c9 --- /dev/null +++ b/include/linux/cryptohash.h @@ -0,0 +1,20 @@ +#ifndef __CRYPTOHASH_H +#define __CRYPTOHASH_H + +#include + +#define SHA_DIGEST_WORDS 5 +#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8) +#define SHA_WORKSPACE_WORDS 16 + +void sha_init(__u32 *buf); +void sha_transform(__u32 *digest, const char *data, __u32 *W); + +#define MD5_DIGEST_WORDS 4 +#define MD5_MESSAGE_BYTES 64 + +void md5_transform(__u32 *hash, __u32 const *in); + +__u32 half_md4_transform(__u32 buf[4], __u32 const in[8]); + +#endif diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h index 2be1120174..cfe83239d7 100644 --- a/include/linux/cs5535.h +++ b/include/linux/cs5535.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AMD CS5535/CS5536 definitions * Copyright (C) 2006 Advanced Micro Devices, Inc. * Copyright (C) 2009 Andres Salomon + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #ifndef _CS5535_H diff --git a/include/linux/ctype.h b/include/linux/ctype.h index bc95aef221..9dc6f7492d 100644 --- a/include/linux/ctype.h +++ b/include/linux/ctype.h @@ -1,9 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CTYPE_H #define _LINUX_CTYPE_H -#include - /* * NOTE! This ctype does not handle EOF like the standard C * library is required to. @@ -25,6 +22,10 @@ extern const unsigned char _ctype[]; #define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) #define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) #define iscntrl(c) ((__ismask(c)&(_C)) != 0) +static inline int isdigit(int c) +{ + return '0' <= c && c <= '9'; +} #define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) #define islower(c) ((__ismask(c)&(_L)) != 0) #define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) @@ -37,15 +38,6 @@ extern const unsigned char _ctype[]; #define isascii(c) (((unsigned char)(c))<=0x7f) #define toascii(c) (((unsigned char)(c))&0x7f) -#if __has_builtin(__builtin_isdigit) -#define isdigit(c) __builtin_isdigit(c) -#else -static inline int isdigit(int c) -{ - return '0' <= c && c <= '9'; -} -#endif - static inline unsigned char __tolower(unsigned char c) { if (isupper(c)) @@ -67,7 +59,7 @@ static inline unsigned char __toupper(unsigned char c) * Fast implementation of tolower() for internal usage. Do not use in your * code. */ -static inline char _tolower(const char c) +static inline unsigned char _tolower(const unsigned char c) { return c | 0x20; } diff --git a/include/linux/cuda.h b/include/linux/cuda.h index 45bfe9d612..b723328238 100644 --- a/include/linux/cuda.h +++ b/include/linux/cuda.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for talking to the CUDA. The CUDA is a microcontroller * which controls the ADB, system power, RTC, and various other things. @@ -8,7 +7,6 @@ #ifndef _LINUX_CUDA_H #define _LINUX_CUDA_H -#include #include @@ -17,7 +15,4 @@ extern int cuda_request(struct adb_request *req, void (*done)(struct adb_request *), int nbytes, ...); extern void cuda_poll(void); -extern time64_t cuda_get_time(void); -extern int cuda_set_rtc_time(struct rtc_time *tm); - #endif /* _LINUX_CUDA_H */ diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index 05ee0f1944..19ae518f54 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $ * linux/include/linux/cyclades.h * @@ -157,9 +156,6 @@ struct cyclades_port { struct cyclades_icount icount; struct completion shutdown_wait; int throttle; -#ifdef CONFIG_CYZ_INTR - struct timer_list rx_full_timer; -#endif }; #define CLOSING_WAIT_DELAY 30*HZ diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h index 28e6cf1356..05b97144d3 100644 --- a/include/linux/davinci_emac.h +++ b/include/linux/davinci_emac.h @@ -46,4 +46,5 @@ enum { EMAC_VERSION_2, /* DM646x */ }; +void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context); #endif diff --git a/include/linux/dax.h b/include/linux/dax.h index 2619d94c30..add6c4bc56 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -1,227 +1,77 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DAX_H #define _LINUX_DAX_H #include #include #include - -/* Flag for synchronous flush */ -#define DAXDEV_F_SYNC (1UL << 0) - -typedef unsigned long dax_entry_t; +#include struct iomap_ops; -struct iomap; -struct dax_device; -struct dax_operations { - /* - * direct_access: translate a device-relative - * logical-page-offset into an absolute physical pfn. Return the - * number of pages available for DAX at that pfn. - */ - long (*direct_access)(struct dax_device *, pgoff_t, long, - void **, pfn_t *); - /* - * Validate whether this device is usable as an fsdax backing - * device. - */ - bool (*dax_supported)(struct dax_device *, struct block_device *, int, - sector_t, sector_t); - /* copy_from_iter: required operation for fs-dax direct-i/o */ - size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, - struct iov_iter *); - /* copy_to_iter: required operation for fs-dax direct-i/o */ - size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, - struct iov_iter *); - /* zero_page_range: required operation. Zero page range */ - int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); -}; -extern struct attribute_group dax_attribute_group; +/* We use lowest available exceptional entry bit for locking */ +#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) -#if IS_ENABLED(CONFIG_DAX) -struct dax_device *alloc_dax(void *private, const char *host, - const struct dax_operations *ops, unsigned long flags); -void put_dax(struct dax_device *dax_dev); -void kill_dax(struct dax_device *dax_dev); -void dax_write_cache(struct dax_device *dax_dev, bool wc); -bool dax_write_cache_enabled(struct dax_device *dax_dev); -bool __dax_synchronous(struct dax_device *dax_dev); -static inline bool dax_synchronous(struct dax_device *dax_dev) -{ - return __dax_synchronous(dax_dev); -} -void __set_dax_synchronous(struct dax_device *dax_dev); -static inline void set_dax_synchronous(struct dax_device *dax_dev) -{ - __set_dax_synchronous(dax_dev); -} -/* - * Check if given mapping is supported by the file / underlying device. - */ -static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, - struct dax_device *dax_dev) -{ - if (!(vma->vm_flags & VM_SYNC)) - return true; - if (!IS_DAX(file_inode(vma->vm_file))) - return false; - return dax_synchronous(dax_dev); -} -#else -static inline struct dax_device *alloc_dax(void *private, const char *host, - const struct dax_operations *ops, unsigned long flags) -{ - /* - * Callers should check IS_ENABLED(CONFIG_DAX) to know if this - * NULL is an error or expected. - */ - return NULL; -} -static inline void put_dax(struct dax_device *dax_dev) -{ -} -static inline void kill_dax(struct dax_device *dax_dev) -{ -} -static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) -{ -} -static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) -{ - return false; -} -static inline bool dax_synchronous(struct dax_device *dax_dev) -{ - return true; -} -static inline void set_dax_synchronous(struct dax_device *dax_dev) -{ -} -static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, - struct dax_device *dax_dev) -{ - return !(vma->vm_flags & VM_SYNC); -} -#endif - -struct writeback_control; -int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); -#if IS_ENABLED(CONFIG_FS_DAX) -bool generic_fsdax_supported(struct dax_device *dax_dev, - struct block_device *bdev, int blocksize, sector_t start, - sector_t sectors); - -bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, - int blocksize, sector_t start, sector_t len); - -static inline void fs_put_dax(struct dax_device *dax_dev) -{ - put_dax(dax_dev); -} - -struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); -int dax_writeback_mapping_range(struct address_space *mapping, - struct dax_device *dax_dev, struct writeback_control *wbc); - -struct page *dax_layout_busy_page(struct address_space *mapping); -struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); -dax_entry_t dax_lock_page(struct page *page); -void dax_unlock_page(struct page *page, dax_entry_t cookie); -#else -#define generic_fsdax_supported NULL - -static inline bool dax_supported(struct dax_device *dax_dev, - struct block_device *bdev, int blocksize, sector_t start, - sector_t len) -{ - return false; -} - -static inline void fs_put_dax(struct dax_device *dax_dev) -{ -} - -static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) -{ - return NULL; -} - -static inline struct page *dax_layout_busy_page(struct address_space *mapping) -{ - return NULL; -} - -static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) -{ - return NULL; -} - -static inline int dax_writeback_mapping_range(struct address_space *mapping, - struct dax_device *dax_dev, struct writeback_control *wbc) -{ - return -EOPNOTSUPP; -} - -static inline dax_entry_t dax_lock_page(struct page *page) -{ - if (IS_DAX(page->mapping->host)) - return ~0UL; - return 0; -} - -static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) -{ -} -#endif - -#if IS_ENABLED(CONFIG_DAX) -int dax_read_lock(void); -void dax_read_unlock(int id); -#else -static inline int dax_read_lock(void) -{ - return 0; -} - -static inline void dax_read_unlock(int id) -{ -} -#endif /* CONFIG_DAX */ -bool dax_alive(struct dax_device *dax_dev); -void *dax_get_private(struct dax_device *dax_dev); -long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, - void **kaddr, pfn_t *pfn); -size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, - size_t bytes, struct iov_iter *i); -size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, - size_t bytes, struct iov_iter *i); -int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, - size_t nr_pages); -void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); - -ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, - const struct iomap_ops *ops); -vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, - pfn_t *pfnp, int *errp, const struct iomap_ops *ops); -vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, - enum page_entry_size pe_size, pfn_t pfn); +ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, + struct iomap_ops *ops); +ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, + get_block_t, dio_iodone_t, int flags); +int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); +int dax_truncate_page(struct inode *, loff_t from, get_block_t); +int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, + struct iomap_ops *ops); +int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); -int dax_invalidate_mapping_entry_sync(struct address_space *mapping, - pgoff_t index); -s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap); +void dax_wake_mapping_entry_waiter(struct address_space *mapping, + pgoff_t index, bool wake_all); + +#ifdef CONFIG_FS_DAX +struct page *read_dax_sector(struct block_device *bdev, sector_t n); +void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index); +int __dax_zero_page_range(struct block_device *bdev, sector_t sector, + unsigned int offset, unsigned int length); +#else +static inline struct page *read_dax_sector(struct block_device *bdev, + sector_t n) +{ + return ERR_PTR(-ENXIO); +} +/* Shouldn't ever be called when dax is disabled. */ +static inline void dax_unlock_mapping_entry(struct address_space *mapping, + pgoff_t index) +{ + BUG(); +} +static inline int __dax_zero_page_range(struct block_device *bdev, + sector_t sector, unsigned int offset, unsigned int length) +{ + return -ENXIO; +} +#endif + +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) +int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, + unsigned int flags, get_block_t); +#else +static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, unsigned int flags, get_block_t gb) +{ + return VM_FAULT_FALLBACK; +} +#endif +int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); +#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) + +static inline bool vma_is_dax(struct vm_area_struct *vma) +{ + return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); +} + static inline bool dax_mapping(struct address_space *mapping) { return mapping->host && IS_DAX(mapping->host); } -#ifdef CONFIG_DEV_DAX_HMEM_DEVICES -void hmem_register_device(int target_nid, struct resource *r); -#else -static inline void hmem_register_device(int target_nid, struct resource *r) -{ -} -#endif - +struct writeback_control; +int dax_writeback_mapping_range(struct address_space *mapping, + struct block_device *bdev, struct writeback_control *wbc); #endif diff --git a/include/linux/dca.h b/include/linux/dca.h index d6228e334f..ad956c2e07 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h @@ -1,6 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. */ #ifndef DCA_H #define DCA_H diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 9e23d33bb6..2b3dcac0b8 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -1,10 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_DCACHE_H #define __LINUX_DCACHE_H #include #include -#include #include #include #include @@ -13,7 +11,6 @@ #include #include #include -#include struct path; struct vfsmount; @@ -57,17 +54,12 @@ struct qstr { #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } -extern const struct qstr empty_name; -extern const struct qstr slash_name; -extern const struct qstr dotdot_name; - struct dentry_stat_t { long nr_dentry; long nr_unused; - long age_limit; /* age in seconds */ - long want_pages; /* pages requested by system */ - long nr_negative; /* # of unused negative dentries */ - long dummy; /* Reserved for future use */ + long age_limit; /* age in seconds */ + long want_pages; /* pages requested by system */ + long dummy[2]; }; extern struct dentry_stat_t dentry_stat; @@ -91,7 +83,7 @@ extern struct dentry_stat_t dentry_stat; struct dentry { /* RCU lookup touched fields */ unsigned int d_flags; /* protected by d_lock */ - seqcount_spinlock_t d_seq; /* per dentry seqlock */ + seqcount_t d_seq; /* per dentry seqlock */ struct hlist_bl_node d_hash; /* lookup hash list */ struct dentry *d_parent; /* parent directory */ struct qstr d_name; @@ -110,6 +102,9 @@ struct dentry { struct list_head d_lru; /* LRU list */ wait_queue_head_t *d_wait; /* in-lookup ones only */ }; +#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME + atomic_t chroot_refcnt; /* tracks use of directory in chroot */ +#endif struct list_head d_child; /* child of parent list */ struct list_head d_subdirs; /* our children */ /* @@ -147,15 +142,16 @@ struct dentry_operations { void (*d_iput)(struct dentry *, struct inode *); char *(*d_dname)(struct dentry *, char *, int); struct vfsmount *(*d_automount)(struct path *); - int (*d_manage)(const struct path *, bool); - struct dentry *(*d_real)(struct dentry *, const struct inode *); + int (*d_manage)(struct dentry *, bool); + struct dentry *(*d_real)(struct dentry *, const struct inode *, + unsigned int); } ____cacheline_aligned; /* * Locking rules for dentry_operations callbacks are to be found in - * Documentation/filesystems/locking.rst. Keep it updated! + * Documentation/filesystems/Locking. Keep it updated! * - * FUrther descriptions are found in Documentation/filesystems/vfs.rst. + * FUrther descriptions are found in Documentation/filesystems/vfs.txt. * Keep it updated too! */ @@ -178,8 +174,7 @@ struct dentry_operations { * typically using d_splice_alias. */ #define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */ - -#define DCACHE_DONTCACHE 0x00000080 /* Purge from memory on final dput() */ +#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */ #define DCACHE_CANT_MOUNT 0x00000100 #define DCACHE_GENOCIDE 0x00000200 @@ -215,12 +210,11 @@ struct dentry_operations { #define DCACHE_MAY_FREE 0x00800000 #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ -#define DCACHE_NOKEY_NAME 0x02000000 /* Encrypted name encoded without key */ +#define DCACHE_ENCRYPTED_WITH_KEY 0x02000000 /* dir is encrypted with a valid key */ #define DCACHE_OP_REAL 0x04000000 #define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ #define DCACHE_DENTRY_CURSOR 0x20000000 -#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */ extern seqlock_t rename_lock; @@ -228,9 +222,8 @@ extern seqlock_t rename_lock; * These are the low-level FS interfaces to the dcache.. */ extern void d_instantiate(struct dentry *, struct inode *); -extern void d_instantiate_new(struct dentry *, struct inode *); extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); -extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); +extern int d_instantiate_no_diralias(struct dentry *, struct inode *); extern void __d_drop(struct dentry *dentry); extern void d_drop(struct dentry *dentry); extern void d_delete(struct dentry *); @@ -238,7 +231,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op /* allocate/de-allocate */ extern struct dentry * d_alloc(struct dentry *, const struct qstr *); -extern struct dentry * d_alloc_anon(struct super_block *); +extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, wait_queue_head_t *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); @@ -263,10 +256,8 @@ extern void d_tmpfile(struct dentry *, struct inode *); extern struct dentry *d_find_alias(struct inode *); extern void d_prune_aliases(struct inode *); -extern struct dentry *d_find_alias_rcu(struct inode *); - /* test whether we have any submounts in a subdir tree */ -extern int path_has_submounts(const struct path *); +extern int have_submounts(struct dentry *); /* * This adds the entry to the hash queues. @@ -275,6 +266,8 @@ extern void d_rehash(struct dentry *); extern void d_add(struct dentry *, struct inode *); +extern void dentry_update_name_case(struct dentry *, const struct qstr *); + /* used for rename() and baskets */ extern void d_move(struct dentry *, struct dentry *); extern void d_exchange(struct dentry *, struct dentry *); @@ -289,7 +282,7 @@ extern struct dentry *__d_lookup_rcu(const struct dentry *parent, static inline unsigned d_count(const struct dentry *dentry) { - return dentry->d_lockref.count; + return __lockref_read(&dentry->d_lockref); } /* @@ -297,12 +290,13 @@ static inline unsigned d_count(const struct dentry *dentry) */ extern __printf(4, 5) char *dynamic_dname(struct dentry *, char *, int, const char *, ...); +extern char *simple_dname(struct dentry *, char *, int); extern char *__d_path(const struct path *, const struct path *, char *, int); extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); -extern char *dentry_path_raw(const struct dentry *, char *, int); -extern char *dentry_path(const struct dentry *, char *, int); +extern char *dentry_path_raw(struct dentry *, char *, int); +extern char *dentry_path(struct dentry *, char *, int); /* Allocation counts.. */ @@ -317,7 +311,7 @@ extern char *dentry_path(const struct dentry *, char *, int); static inline struct dentry *dget_dlock(struct dentry *dentry) { if (dentry) - dentry->d_lockref.count++; + __lockref_inc(&dentry->d_lockref); return dentry; } @@ -361,7 +355,7 @@ static inline void dont_mount(struct dentry *dentry) extern void __d_lookup_done(struct dentry *); -static inline int d_in_lookup(const struct dentry *dentry) +static inline int d_in_lookup(struct dentry *dentry) { return dentry->d_flags & DCACHE_PAR_LOOKUP; } @@ -446,11 +440,6 @@ static inline bool d_is_negative(const struct dentry *dentry) return d_is_miss(dentry); } -static inline bool d_flags_negative(unsigned flags) -{ - return (flags & DCACHE_ENTRY_TYPE) == DCACHE_MISS_TYPE; -} - static inline bool d_is_positive(const struct dentry *dentry) { return !d_is_negative(dentry); @@ -494,7 +483,7 @@ static inline bool d_really_is_positive(const struct dentry *dentry) return dentry->d_inode != NULL; } -static inline int simple_positive(const struct dentry *dentry) +static inline int simple_positive(struct dentry *dentry) { return d_really_is_positive(dentry) && !d_unhashed(dentry); } @@ -527,7 +516,7 @@ static inline struct inode *d_inode(const struct dentry *dentry) } /** - * d_inode_rcu - Get the actual inode of this dentry with READ_ONCE() + * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE() * @dentry: The dentry to query * * This is the helper normal filesystems should use to get at their own inodes @@ -535,7 +524,7 @@ static inline struct inode *d_inode(const struct dentry *dentry) */ static inline struct inode *d_inode_rcu(const struct dentry *dentry) { - return READ_ONCE(dentry->d_inode); + return ACCESS_ONCE(dentry->d_inode); } /** @@ -574,17 +563,19 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper) * d_real - Return the real dentry * @dentry: the dentry to query * @inode: inode to select the dentry from multiple layers (can be NULL) + * @flags: open flags to control copy-up behavior * - * If dentry is on a union/overlay, then return the underlying, real dentry. + * If dentry is on an union/overlay, then return the underlying, real dentry. * Otherwise return the dentry itself. * - * See also: Documentation/filesystems/vfs.rst + * See also: Documentation/filesystems/vfs.txt */ static inline struct dentry *d_real(struct dentry *dentry, - const struct inode *inode) + const struct inode *inode, + unsigned int flags) { if (unlikely(dentry->d_flags & DCACHE_OP_REAL)) - return dentry->d_op->d_real(dentry, inode); + return dentry->d_op->d_real(dentry, inode, flags); else return dentry; } @@ -593,20 +584,14 @@ static inline struct dentry *d_real(struct dentry *dentry, * d_real_inode - Return the real inode * @dentry: The dentry to query * - * If dentry is on a union/overlay, then return the underlying, real inode. + * If dentry is on an union/overlay, then return the underlying, real inode. * Otherwise return d_inode(). */ static inline struct inode *d_real_inode(const struct dentry *dentry) { /* This usage of d_real() results in const dentry */ - return d_backing_inode(d_real((struct dentry *) dentry, NULL)); + return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0)); } -struct name_snapshot { - struct qstr name; - unsigned char inline_name[DNAME_INLINE_LEN]; -}; -void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *); -void release_dentry_name_snapshot(struct name_snapshot *); #endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 07e547c02f..68449293c4 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DCCP_H #define _LINUX_DCCP_H @@ -198,7 +197,7 @@ enum dccp_role { struct dccp_service_list { __u32 dccpsl_nr; - __be32 dccpsl_list[]; + __be32 dccpsl_list[0]; }; #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h index ddfdac20ca..5ac3bdd5ce 100644 --- a/include/linux/dcookies.h +++ b/include/linux/dcookies.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * dcookies.h * @@ -45,7 +44,7 @@ void dcookie_unregister(struct dcookie_user * user); * * Returns 0 on success, with *cookie filled in */ -int get_dcookie(const struct path *path, unsigned long *cookie); +int get_dcookie(struct path *path, unsigned long *cookie); #else @@ -59,7 +58,7 @@ static inline void dcookie_unregister(struct dcookie_user * user) return; } -static inline int get_dcookie(const struct path *path, unsigned long *cookie) +static inline int get_dcookie(struct path *path, unsigned long *cookie) { return -ENOSYS; } diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 3f49e65169..822c1354f3 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -1,17 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_DEBUG_LOCKING_H #define __LINUX_DEBUG_LOCKING_H +#include #include -#include +#include struct task_struct; -extern int debug_locks __read_mostly; -extern int debug_locks_silent __read_mostly; +extern int debug_locks; +extern int debug_locks_silent; -static __always_inline int __debug_locks_off(void) +static inline int __debug_locks_off(void) { return xchg(&debug_locks, 0); } @@ -26,10 +26,8 @@ extern int debug_locks_off(void); int __ret = 0; \ \ if (!oops_in_progress && unlikely(c)) { \ - instrumentation_begin(); \ if (debug_locks_off() && !debug_locks_silent) \ WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \ - instrumentation_end(); \ __ret = 1; \ } \ __ret; \ diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index c869f1e73d..08201a6179 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -1,12 +1,15 @@ -// SPDX-License-Identifier: GPL-2.0 /* * debugfs.h - a tiny little debug file system * * Copyright (C) 2004 Greg Kroah-Hartman * Copyright (C) 2004 IBM Inc. * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * * debugfs is for people to use instead of /proc or /sys. - * See Documentation/filesystems/ for more details. + * See Documentation/DocBook/filesystems for more details. */ #ifndef _DEBUGFS_H_ @@ -20,6 +23,7 @@ struct device; struct file_operations; +struct srcu_struct; struct debugfs_blob_wrapper { void *data; @@ -35,16 +39,29 @@ struct debugfs_regset32 { const struct debugfs_reg32 *regs; int nregs; void __iomem *base; - struct device *dev; /* Optional device for Runtime PM */ -}; - -struct debugfs_u32_array { - u32 *array; - u32 n_elements; }; extern struct dentry *arch_debugfs_dir; +extern struct srcu_struct debugfs_srcu; + +/** + * debugfs_real_fops - getter for the real file operation + * @filp: a pointer to a struct file + * + * Must only be called under the protection established by + * debugfs_use_file_start(). + */ +static inline const struct file_operations *debugfs_real_fops(struct file *filp) + __must_hold(&debugfs_srcu) +{ + /* + * Neither the pointer to the struct file_operations, nor its + * contents ever change -- srcu_dereference() is not needed here. + */ + return filp->f_path.dentry->d_fsdata; +} + #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ static int __fops ## _open(struct inode *inode, struct file *file) \ { \ @@ -57,15 +74,11 @@ static const struct file_operations __fops = { \ .release = simple_attr_release, \ .read = debugfs_attr_read, \ .write = debugfs_attr_write, \ - .llseek = no_llseek, \ + .llseek = generic_file_llseek, \ } -typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); - #if defined(CONFIG_DEBUG_FS) -struct dentry *debugfs_lookup(const char *name, struct dentry *parent); - struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); @@ -73,28 +86,29 @@ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); -void debugfs_create_file_size(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops, - loff_t file_size); +struct dentry *debugfs_create_file_size(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops, + loff_t file_size); struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *dest); +typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, debugfs_automount_t f, void *data); void debugfs_remove(struct dentry *dentry); -#define debugfs_remove_recursive debugfs_remove +void debugfs_remove_recursive(struct dentry *dentry); -const struct file_operations *debugfs_real_fops(const struct file *filp); +int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx) + __acquires(&debugfs_srcu); -int debugfs_file_get(struct dentry *dentry); -void debugfs_file_put(struct dentry *dentry); +void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu); ssize_t debugfs_attr_read(struct file *file, char __user *buf, size_t len, loff_t *ppos); @@ -104,51 +118,52 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf, struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name); -void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent, - u8 *value); -void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent, - u16 *value); -void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, - u32 *value); -void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, - u64 *value); -void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent, - unsigned long *value); -void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, - u8 *value); -void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, - u16 *value); -void debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent, - u32 *value); -void debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent, - u64 *value); -void debugfs_create_size_t(const char *name, umode_t mode, - struct dentry *parent, size_t *value); -void debugfs_create_atomic_t(const char *name, umode_t mode, - struct dentry *parent, atomic_t *value); -void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, - bool *value); -void debugfs_create_str(const char *name, umode_t mode, - struct dentry *parent, char **value); +struct dentry *debugfs_create_u8(const char *name, umode_t mode, + struct dentry *parent, u8 *value); +struct dentry *debugfs_create_u16(const char *name, umode_t mode, + struct dentry *parent, u16 *value); +struct dentry *debugfs_create_u32(const char *name, umode_t mode, + struct dentry *parent, u32 *value); +struct dentry *debugfs_create_u64(const char *name, umode_t mode, + struct dentry *parent, u64 *value); +struct dentry *debugfs_create_ulong(const char *name, umode_t mode, + struct dentry *parent, unsigned long *value); +struct dentry *debugfs_create_x8(const char *name, umode_t mode, + struct dentry *parent, u8 *value); +struct dentry *debugfs_create_x16(const char *name, umode_t mode, + struct dentry *parent, u16 *value); +struct dentry *debugfs_create_x32(const char *name, umode_t mode, + struct dentry *parent, u32 *value); +struct dentry *debugfs_create_x64(const char *name, umode_t mode, + struct dentry *parent, u64 *value); +struct dentry *debugfs_create_size_t(const char *name, umode_t mode, + struct dentry *parent, size_t *value); +struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, + struct dentry *parent, atomic_t *value); +struct dentry *debugfs_create_atomic_unchecked_t(const char *name, umode_t mode, + struct dentry *parent, atomic_unchecked_t *value); +struct dentry *debugfs_create_bool(const char *name, umode_t mode, + struct dentry *parent, bool *value); struct dentry *debugfs_create_blob(const char *name, umode_t mode, struct dentry *parent, struct debugfs_blob_wrapper *blob); -void debugfs_create_regset32(const char *name, umode_t mode, - struct dentry *parent, - struct debugfs_regset32 *regset); +struct dentry *debugfs_create_regset32(const char *name, umode_t mode, + struct dentry *parent, + struct debugfs_regset32 *regset); void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, int nregs, void __iomem *base, char *prefix); -void debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, - struct debugfs_u32_array *array); +struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, + struct dentry *parent, + u32 *array, u32 elements); -void debugfs_create_devm_seqfile(struct device *dev, const char *name, - struct dentry *parent, - int (*read_fn)(struct seq_file *s, void *data)); +struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, + struct dentry *parent, + int (*read_fn)(struct seq_file *s, + void *data)); bool debugfs_initialized(void); @@ -158,9 +173,6 @@ ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf, ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); -ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos); - #else #include @@ -171,12 +183,6 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf, * want to duplicate the design decision mistakes of procfs and devfs again. */ -static inline struct dentry *debugfs_lookup(const char *name, - struct dentry *parent) -{ - return ERR_PTR(-ENODEV); -} - static inline struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) @@ -184,20 +190,14 @@ static inline struct dentry *debugfs_create_file(const char *name, umode_t mode, return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_file_unsafe(const char *name, - umode_t mode, struct dentry *parent, - void *data, - const struct file_operations *fops) +static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops, + loff_t file_size) { return ERR_PTR(-ENODEV); } -static inline void debugfs_create_file_size(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops, - loff_t file_size) -{ } - static inline struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) { @@ -213,7 +213,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name, static inline struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, - debugfs_automount_t f, + struct vfsmount *(*f)(void *), void *data) { return ERR_PTR(-ENODEV); @@ -225,14 +225,15 @@ static inline void debugfs_remove(struct dentry *dentry) static inline void debugfs_remove_recursive(struct dentry *dentry) { } -const struct file_operations *debugfs_real_fops(const struct file *filp); - -static inline int debugfs_file_get(struct dentry *dentry) +static inline int debugfs_use_file_start(const struct dentry *dentry, + int *srcu_idx) + __acquires(&debugfs_srcu) { return 0; } -static inline void debugfs_file_put(struct dentry *dentry) +static inline void debugfs_use_file_finish(int srcu_idx) + __releases(&debugfs_srcu) { } static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf, @@ -254,50 +255,87 @@ static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentr return ERR_PTR(-ENODEV); } -static inline void debugfs_create_u8(const char *name, umode_t mode, - struct dentry *parent, u8 *value) { } +static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode, + struct dentry *parent, + u8 *value) +{ + return ERR_PTR(-ENODEV); +} -static inline void debugfs_create_u16(const char *name, umode_t mode, - struct dentry *parent, u16 *value) { } +static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode, + struct dentry *parent, + u16 *value) +{ + return ERR_PTR(-ENODEV); +} -static inline void debugfs_create_u32(const char *name, umode_t mode, - struct dentry *parent, u32 *value) { } +static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode, + struct dentry *parent, + u32 *value) +{ + return ERR_PTR(-ENODEV); +} -static inline void debugfs_create_u64(const char *name, umode_t mode, - struct dentry *parent, u64 *value) { } +static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode, + struct dentry *parent, + u64 *value) +{ + return ERR_PTR(-ENODEV); +} -static inline void debugfs_create_ulong(const char *name, umode_t mode, - struct dentry *parent, - unsigned long *value) { } +static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode, + struct dentry *parent, + u8 *value) +{ + return ERR_PTR(-ENODEV); +} -static inline void debugfs_create_x8(const char *name, umode_t mode, - struct dentry *parent, u8 *value) { } +static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode, + struct dentry *parent, + u16 *value) +{ + return ERR_PTR(-ENODEV); +} -static inline void debugfs_create_x16(const char *name, umode_t mode, - struct dentry *parent, u16 *value) { } +static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode, + struct dentry *parent, + u32 *value) +{ + return ERR_PTR(-ENODEV); +} -static inline void debugfs_create_x32(const char *name, umode_t mode, - struct dentry *parent, u32 *value) { } +static inline struct dentry *debugfs_create_x64(const char *name, umode_t mode, + struct dentry *parent, + u64 *value) +{ + return ERR_PTR(-ENODEV); +} -static inline void debugfs_create_x64(const char *name, umode_t mode, - struct dentry *parent, u64 *value) { } +static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode, + struct dentry *parent, + size_t *value) +{ + return ERR_PTR(-ENODEV); +} -static inline void debugfs_create_size_t(const char *name, umode_t mode, - struct dentry *parent, size_t *value) -{ } +static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, + struct dentry *parent, atomic_t *value) +{ + return ERR_PTR(-ENODEV); +} -static inline void debugfs_create_atomic_t(const char *name, umode_t mode, - struct dentry *parent, - atomic_t *value) -{ } +static inline struct dentry *debugfs_create_atomic_unchecked_t(const char *name, umode_t mode, + struct dentry *parent, atomic_unchecked_t *value) +{ + return ERR_PTR(-ENODEV); +} -static inline void debugfs_create_bool(const char *name, umode_t mode, - struct dentry *parent, bool *value) { } - -static inline void debugfs_create_str(const char *name, umode_t mode, - struct dentry *parent, - char **value) -{ } +static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, + struct dentry *parent, + bool *value) +{ + return ERR_PTR(-ENODEV); +} static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode, struct dentry *parent, @@ -306,10 +344,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode, return ERR_PTR(-ENODEV); } -static inline void debugfs_create_regset32(const char *name, umode_t mode, - struct dentry *parent, - struct debugfs_regset32 *regset) +static inline struct dentry *debugfs_create_regset32(const char *name, + umode_t mode, struct dentry *parent, + struct debugfs_regset32 *regset) { + return ERR_PTR(-ENODEV); } static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, @@ -322,18 +361,20 @@ static inline bool debugfs_initialized(void) return false; } -static inline void debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, - struct debugfs_u32_array *array) +static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, + struct dentry *parent, + u32 *array, u32 elements) { + return ERR_PTR(-ENODEV); } -static inline void debugfs_create_devm_seqfile(struct device *dev, - const char *name, - struct dentry *parent, - int (*read_fn)(struct seq_file *s, - void *data)) +static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev, + const char *name, + struct dentry *parent, + int (*read_fn)(struct seq_file *s, + void *data)) { + return ERR_PTR(-ENODEV); } static inline ssize_t debugfs_read_file_bool(struct file *file, @@ -350,34 +391,6 @@ static inline ssize_t debugfs_write_file_bool(struct file *file, return -ENODEV; } -static inline ssize_t debugfs_read_file_str(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - return -ENODEV; -} - #endif -/** - * debugfs_create_xul - create a debugfs file that is used to read and write an - * unsigned long value, formatted in hexadecimal - * @name: a pointer to a string containing the name of the file to create. - * @mode: the permission that the file should have - * @parent: a pointer to the parent dentry for this file. This should be a - * directory dentry if set. If this parameter is %NULL, then the - * file will be created in the root of the debugfs filesystem. - * @value: a pointer to the variable that the file should read to and write - * from. - */ -static inline void debugfs_create_xul(const char *name, umode_t mode, - struct dentry *parent, - unsigned long *value) -{ - if (sizeof(*value) == sizeof(u32)) - debugfs_create_x32(name, mode, parent, (u32 *)value); - else - debugfs_create_x64(name, mode, parent, (u64 *)value); -} - #endif diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 32444686b6..d82bf19944 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DEBUGOBJECTS_H #define _LINUX_DEBUGOBJECTS_H @@ -18,7 +17,7 @@ enum debug_obj_state { struct debug_obj_descr; /** - * struct debug_obj - representation of an tracked object + * struct debug_obj - representaion of an tracked object * @node: hlist node to link the object into the tracker list * @state: tracked object state * @astate: current active state @@ -30,7 +29,7 @@ struct debug_obj { enum debug_obj_state state; unsigned int astate; void *object; - const struct debug_obj_descr *descr; + struct debug_obj_descr *descr; }; /** @@ -64,14 +63,14 @@ struct debug_obj_descr { }; #ifdef CONFIG_DEBUG_OBJECTS -extern void debug_object_init (void *addr, const struct debug_obj_descr *descr); +extern void debug_object_init (void *addr, struct debug_obj_descr *descr); extern void -debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr); -extern int debug_object_activate (void *addr, const struct debug_obj_descr *descr); -extern void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr); -extern void debug_object_destroy (void *addr, const struct debug_obj_descr *descr); -extern void debug_object_free (void *addr, const struct debug_obj_descr *descr); -extern void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr); +debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr); +extern int debug_object_activate (void *addr, struct debug_obj_descr *descr); +extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); +extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); +extern void debug_object_free (void *addr, struct debug_obj_descr *descr); +extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr); /* * Active state: @@ -79,26 +78,26 @@ extern void debug_object_assert_init(void *addr, const struct debug_obj_descr *d * - Must return to 0 before deactivation. */ extern void -debug_object_active_state(void *addr, const struct debug_obj_descr *descr, +debug_object_active_state(void *addr, struct debug_obj_descr *descr, unsigned int expect, unsigned int next); extern void debug_objects_early_init(void); extern void debug_objects_mem_init(void); #else static inline void -debug_object_init (void *addr, const struct debug_obj_descr *descr) { } +debug_object_init (void *addr, struct debug_obj_descr *descr) { } static inline void -debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) { } +debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { } static inline int -debug_object_activate (void *addr, const struct debug_obj_descr *descr) { return 0; } +debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; } static inline void -debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) { } +debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { } static inline void -debug_object_destroy (void *addr, const struct debug_obj_descr *descr) { } +debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } static inline void -debug_object_free (void *addr, const struct debug_obj_descr *descr) { } +debug_object_free (void *addr, struct debug_obj_descr *descr) { } static inline void -debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) { } +debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { } static inline void debug_objects_early_init(void) { } static inline void debug_objects_mem_init(void) { } diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h index 5860163942..4d683df898 100644 --- a/include/linux/decompress/bunzip2.h +++ b/include/linux/decompress/bunzip2.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef DECOMPRESS_BUNZIP2_H #define DECOMPRESS_BUNZIP2_H diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h index 207d80138d..1fcfd64b50 100644 --- a/include/linux/decompress/generic.h +++ b/include/linux/decompress/generic.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef DECOMPRESS_GENERIC_H #define DECOMPRESS_GENERIC_H diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h index b65f24e7d4..e4f411fdbd 100644 --- a/include/linux/decompress/inflate.h +++ b/include/linux/decompress/inflate.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_DECOMPRESS_INFLATE_H #define LINUX_DECOMPRESS_INFLATE_H diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h index 868e9eacd6..d5143d2fb6 100644 --- a/include/linux/decompress/mm.h +++ b/include/linux/decompress/mm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/compr_mm.h * @@ -78,7 +77,7 @@ static void free(void *where) * warnings when not needed (indeed large_malloc / large_free are not * needed by inflate */ -#define malloc(a) kmalloc(a, GFP_KERNEL) +#define malloc(a) kmalloc((a), GFP_KERNEL) #define free(a) kfree(a) #define large_malloc(a) vmalloc(a) diff --git a/include/linux/decompress/unlz4.h b/include/linux/decompress/unlz4.h index 5a235f605d..3273c2f364 100644 --- a/include/linux/decompress/unlz4.h +++ b/include/linux/decompress/unlz4.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef DECOMPRESS_UNLZ4_H #define DECOMPRESS_UNLZ4_H diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h index 1c930f1251..8a891a1938 100644 --- a/include/linux/decompress/unlzma.h +++ b/include/linux/decompress/unlzma.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef DECOMPRESS_UNLZMA_H #define DECOMPRESS_UNLZMA_H diff --git a/include/linux/decompress/unlzo.h b/include/linux/decompress/unlzo.h index 550ae8783d..af18f95d65 100644 --- a/include/linux/decompress/unlzo.h +++ b/include/linux/decompress/unlzo.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef DECOMPRESS_UNLZO_H #define DECOMPRESS_UNLZO_H diff --git a/include/linux/delay.h b/include/linux/delay.h index 1d0e2ce6b6..a6ecb34cf5 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DELAY_H #define _LINUX_DELAY_H @@ -6,17 +5,6 @@ * Copyright (C) 1993 Linus Torvalds * * Delay routines, using a pre-computed "loops_per_jiffy" value. - * - * Please note that ndelay(), udelay() and mdelay() may return early for - * several reasons: - * 1. computed loops_per_jiffy too low (due to the time taken to - * execute the timer interrupt.) - * 2. cache behaviour affecting the time it takes to execute the - * loop function. - * 3. CPU clock rate changes. - * - * Please see this thread: - * https://lists.openwall.net/linux-kernel/2011/01/09/56 */ #include @@ -55,7 +43,6 @@ static inline void ndelay(unsigned long x) extern unsigned long lpj_fine; void calibrate_delay(void); -void __attribute__((weak)) calibration_delay_done(void); void msleep(unsigned int msecs); unsigned long msleep_interruptible(unsigned int msecs); void usleep_range(unsigned long min, unsigned long max); @@ -65,15 +52,4 @@ static inline void ssleep(unsigned int seconds) msleep(seconds * 1000); } -/* see Documentation/timers/timers-howto.rst for the thresholds */ -static inline void fsleep(unsigned long usecs) -{ - if (usecs <= 10) - udelay(usecs); - else if (usecs <= 20000) - usleep_range(usecs, 2 * usecs); - else - msleep(DIV_ROUND_UP(usecs, 1000)); -} - #endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index af7e6eb502..6cee17c223 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -1,13 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* delayacct.h - per-task delay accounting * * Copyright (C) Shailabh Nagar, IBM Corp. 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * */ #ifndef _LINUX_DELAYACCT_H #define _LINUX_DELAYACCT_H -#include +#include +#include /* * Per-task flags relevant to delay accounting @@ -18,67 +29,18 @@ #define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */ #ifdef CONFIG_TASK_DELAY_ACCT -struct task_delay_info { - raw_spinlock_t lock; - unsigned int flags; /* Private per-task flags */ - /* For each stat XXX, add following, aligned appropriately - * - * struct timespec XXX_start, XXX_end; - * u64 XXX_delay; - * u32 XXX_count; - * - * Atomicity of updates to XXX_delay, XXX_count protected by - * single lock above (split into XXX_lock if contention is an issue). - */ - - /* - * XXX_count is incremented on every XXX operation, the delay - * associated with the operation is added to XXX_delay. - * XXX_delay contains the accumulated delay time in nanoseconds. - */ - u64 blkio_start; /* Shared by blkio, swapin */ - u64 blkio_delay; /* wait for sync block io completion */ - u64 swapin_delay; /* wait for swapin block io completion */ - u32 blkio_count; /* total count of the number of sync block */ - /* io operations performed */ - u32 swapin_count; /* total count of the number of swapin block */ - /* io operations performed */ - - u64 freepages_start; - u64 freepages_delay; /* wait for memory reclaim */ - - u64 thrashing_start; - u64 thrashing_delay; /* wait for thrashing page */ - - u32 freepages_count; /* total count of memory reclaim */ - u32 thrashing_count; /* total count of thrash waits */ -}; -#endif - -#include -#include -#include - -#ifdef CONFIG_TASK_DELAY_ACCT -DECLARE_STATIC_KEY_FALSE(delayacct_key); extern int delayacct_on; /* Delay accounting turned on/off */ extern struct kmem_cache *delayacct_cache; extern void delayacct_init(void); - -extern int sysctl_delayacct(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); - extern void __delayacct_tsk_init(struct task_struct *); extern void __delayacct_tsk_exit(struct task_struct *); extern void __delayacct_blkio_start(void); -extern void __delayacct_blkio_end(struct task_struct *); -extern int delayacct_add_tsk(struct taskstats *, struct task_struct *); +extern void __delayacct_blkio_end(void); +extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); extern __u64 __delayacct_blkio_ticks(struct task_struct *); extern void __delayacct_freepages_start(void); extern void __delayacct_freepages_end(void); -extern void __delayacct_thrashing_start(void); -extern void __delayacct_thrashing_end(void); static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) { @@ -88,16 +50,16 @@ static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) return 0; } -static inline void delayacct_set_flag(struct task_struct *p, int flag) +static inline void delayacct_set_flag(int flag) { - if (p->delays) - p->delays->flags |= flag; + if (current->delays) + current->delays->flags |= flag; } -static inline void delayacct_clear_flag(struct task_struct *p, int flag) +static inline void delayacct_clear_flag(int flag) { - if (p->delays) - p->delays->flags &= ~flag; + if (current->delays) + current->delays->flags &= ~flag; } static inline void delayacct_tsk_init(struct task_struct *tsk) @@ -120,22 +82,24 @@ static inline void delayacct_tsk_free(struct task_struct *tsk) static inline void delayacct_blkio_start(void) { - if (!static_branch_unlikely(&delayacct_key)) - return; - - delayacct_set_flag(current, DELAYACCT_PF_BLKIO); + delayacct_set_flag(DELAYACCT_PF_BLKIO); if (current->delays) __delayacct_blkio_start(); } -static inline void delayacct_blkio_end(struct task_struct *p) +static inline void delayacct_blkio_end(void) { - if (!static_branch_unlikely(&delayacct_key)) - return; + if (current->delays) + __delayacct_blkio_end(); + delayacct_clear_flag(DELAYACCT_PF_BLKIO); +} - if (p->delays) - __delayacct_blkio_end(p); - delayacct_clear_flag(p, DELAYACCT_PF_BLKIO); +static inline int delayacct_add_tsk(struct taskstats *d, + struct task_struct *tsk) +{ + if (!delayacct_on || !tsk->delays) + return 0; + return __delayacct_add_tsk(d, tsk); } static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) @@ -157,22 +121,10 @@ static inline void delayacct_freepages_end(void) __delayacct_freepages_end(); } -static inline void delayacct_thrashing_start(void) -{ - if (current->delays) - __delayacct_thrashing_start(); -} - -static inline void delayacct_thrashing_end(void) -{ - if (current->delays) - __delayacct_thrashing_end(); -} - #else -static inline void delayacct_set_flag(struct task_struct *p, int flag) +static inline void delayacct_set_flag(int flag) {} -static inline void delayacct_clear_flag(struct task_struct *p, int flag) +static inline void delayacct_clear_flag(int flag) {} static inline void delayacct_init(void) {} @@ -182,7 +134,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk) {} static inline void delayacct_blkio_start(void) {} -static inline void delayacct_blkio_end(struct task_struct *p) +static inline void delayacct_blkio_end(void) {} static inline int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) @@ -195,10 +147,6 @@ static inline void delayacct_freepages_start(void) {} static inline void delayacct_freepages_end(void) {} -static inline void delayacct_thrashing_start(void) -{} -static inline void delayacct_thrashing_end(void) -{} #endif /* CONFIG_TASK_DELAY_ACCT */ diff --git a/include/linux/delayed_call.h b/include/linux/delayed_call.h index a26c3b95b5..f7fa76ae1a 100644 --- a/include/linux/delayed_call.h +++ b/include/linux/delayed_call.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DELAYED_CALL_H #define _DELAYED_CALL_H diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h new file mode 100644 index 0000000000..7009b8bec7 --- /dev/null +++ b/include/linux/dell-led.h @@ -0,0 +1,10 @@ +#ifndef __DELL_LED_H__ +#define __DELL_LED_H__ + +enum { + DELL_LED_MICMUTE, +}; + +int dell_app_wmi_led_set(int whichled, int on); + +#endif diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h index c008169ed2..269521f143 100644 --- a/include/linux/devcoredump.h +++ b/include/linux/devcoredump.h @@ -1,6 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* + * This file is provided under the GPLv2 license. + * + * GPL LICENSE SUMMARY + * * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. */ #ifndef __DEVCOREDUMP_H #define __DEVCOREDUMP_H diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h index 4a50a5c71a..4db00b02ca 100644 --- a/include/linux/devfreq-event.h +++ b/include/linux/devfreq-event.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * devfreq-event: a framework to provide raw data and events of devfreq devices * * Copyright (C) 2014 Samsung Electronics * Author: Chanwoo Choi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_DEVFREQ_EVENT_H__ @@ -78,20 +81,14 @@ struct devfreq_event_ops { * struct devfreq_event_desc - the descriptor of devfreq-event device * * @name : the name of devfreq-event device. - * @event_type : the type of the event determined and used by driver * @driver_data : the private data for devfreq-event driver. * @ops : the operation to control devfreq-event device. * * Each devfreq-event device is described with a this structure. * This structure contains the various data for devfreq-event device. - * The event_type describes what is going to be counted in the register. - * It might choose to count e.g. read requests, write data in bytes, etc. - * The full supported list of types is present in specyfic header in: - * include/dt-bindings/pmu/. */ struct devfreq_event_desc { const char *name; - u32 event_type; void *driver_data; const struct devfreq_event_ops *ops; @@ -106,11 +103,8 @@ extern int devfreq_event_get_event(struct devfreq_event_dev *edev, struct devfreq_event_data *edata); extern int devfreq_event_reset_event(struct devfreq_event_dev *edev); extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle( - struct device *dev, - const char *phandle_name, - int index); -extern int devfreq_event_get_edev_count(struct device *dev, - const char *phandle_name); + struct device *dev, int index); +extern int devfreq_event_get_edev_count(struct device *dev); extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev, struct devfreq_event_desc *desc); extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev); @@ -155,15 +149,12 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev) } static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle( - struct device *dev, - const char *phandle_name, - int index) + struct device *dev, int index) { return ERR_PTR(-EINVAL); } -static inline int devfreq_event_get_edev_count(struct device *dev, - const char *phandle_name) +static inline int devfreq_event_get_edev_count(struct device *dev) { return -EINVAL; } diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 142474b4af..7627e4b15e 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework * for Non-CPU Devices. * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_DEVFREQ_H__ @@ -13,14 +16,8 @@ #include #include #include -#include -/* DEVFREQ governor name */ -#define DEVFREQ_GOV_SIMPLE_ONDEMAND "simple_ondemand" -#define DEVFREQ_GOV_PERFORMANCE "performance" -#define DEVFREQ_GOV_POWERSAVE "powersave" -#define DEVFREQ_GOV_USERSPACE "userspace" -#define DEVFREQ_GOV_PASSIVE "passive" +#define DEVFREQ_NAME_LEN 16 /* DEVFREQ notifier interface */ #define DEVFREQ_TRANSITION_NOTIFIER (0) @@ -29,16 +26,7 @@ #define DEVFREQ_PRECHANGE (0) #define DEVFREQ_POSTCHANGE (1) -/* DEVFREQ work timers */ -enum devfreq_timer { - DEVFREQ_TIMER_DEFERRABLE = 0, - DEVFREQ_TIMER_DELAYED, - DEVFREQ_TIMER_NUM, -}; - struct devfreq; -struct devfreq_governor; -struct thermal_cooling_device; /** * struct devfreq_dev_status - Data given from devfreq user device to @@ -76,7 +64,6 @@ struct devfreq_dev_status { * @initial_freq: The operating frequency when devfreq_add_device() is * called. * @polling_ms: The polling interval in ms. 0 disables polling. - * @timer: Timer type is either deferrable or delayed timer. * @target: The device should set its operating frequency at * freq or lowest-upper-than-freq value. If freq is * higher than any operable frequency, set maximum. @@ -96,18 +83,12 @@ struct devfreq_dev_status { * from devfreq_remove_device() call. If the user * has registered devfreq->nb at a notifier-head, * this is the time to unregister it. - * @freq_table: Optional list of frequencies to support statistics - * and freq_table must be generated in ascending order. - * @max_state: The size of freq_table. - * - * @is_cooling_device: A self-explanatory boolean giving the device a - * cooling effect property. + * @freq_table: Optional list of frequencies to support statistics. + * @max_state: The size of freq_table. */ struct devfreq_dev_profile { unsigned long initial_freq; unsigned int polling_ms; - enum devfreq_timer timer; - bool is_cooling_device; int (*target)(struct device *dev, unsigned long *freq, u32 flags); int (*get_dev_status)(struct device *dev, @@ -120,18 +101,33 @@ struct devfreq_dev_profile { }; /** - * struct devfreq_stats - Statistics of devfreq device behavior - * @total_trans: Number of devfreq transitions. - * @trans_table: Statistics of devfreq transitions. - * @time_in_state: Statistics of devfreq states. - * @last_update: The last time stats were updated. + * struct devfreq_governor - Devfreq policy governor + * @node: list node - contains registered devfreq governors + * @name: Governor's name + * @immutable: Immutable flag for governor. If the value is 1, + * this govenror is never changeable to other governor. + * @get_target_freq: Returns desired operating frequency for the device. + * Basically, get_target_freq will run + * devfreq_dev_profile.get_dev_status() to get the + * status of the device (load = busy_time / total_time). + * If no_central_polling is set, this callback is called + * only with update_devfreq() notified by OPP. + * @event_handler: Callback for devfreq core framework to notify events + * to governors. Events include per device governor + * init and exit, opp changes out of devfreq, suspend + * and resume of per device devfreq during device idle. + * + * Note that the callbacks are called with devfreq->lock locked by devfreq. */ -struct devfreq_stats { - unsigned int total_trans; - unsigned int *trans_table; - u64 *time_in_state; - u64 last_update; -}; +struct devfreq_governor { + struct list_head node; + + const char name[DEVFREQ_NAME_LEN]; + const unsigned int immutable; + int (*get_target_freq)(struct devfreq *this, unsigned long *freq); + int (*event_handler)(struct devfreq *devfreq, + unsigned int event, void *data); +} __do_const; /** * struct devfreq - Device devfreq structure @@ -142,36 +138,30 @@ struct devfreq_stats { * using devfreq. * @profile: device-specific devfreq profile * @governor: method how to choose frequency based on the usage. - * @opp_table: Reference to OPP table of dev.parent, if one exists. + * @governor_name: devfreq governor name for use with this devfreq * @nb: notifier block used to notify devfreq object that it should * reevaluate operable frequencies. Devfreq users may use * devfreq.nb to the corresponding register notifier call chain. * @work: delayed work for load monitoring. * @previous_freq: previously configured frequency value. - * @last_status: devfreq user device info, performance statistics * @data: Private data of the governor. The devfreq framework does not * touch this. - * @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs) - * @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs) - * @scaling_min_freq: Limit minimum frequency requested by OPP interface - * @scaling_max_freq: Limit maximum frequency requested by OPP interface + * @min_freq: Limit minimum frequency requested by user (0: none) + * @max_freq: Limit maximum frequency requested by user (0: none) * @stop_polling: devfreq polling status of a device. - * @suspend_freq: frequency of a device set during suspend phase. - * @resume_freq: frequency of a device set in resume phase. - * @suspend_count: suspend requests counter for a device. - * @stats: Statistics of devfreq device behavior + * @total_trans: Number of devfreq transitions + * @trans_table: Statistics of devfreq transitions + * @time_in_state: Statistics of devfreq states + * @last_stat_updated: The last time stat updated * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier - * @cdev: Cooling device pointer if the devfreq has cooling property - * @nb_min: Notifier block for DEV_PM_QOS_MIN_FREQUENCY - * @nb_max: Notifier block for DEV_PM_QOS_MAX_FREQUENCY * - * This structure stores the devfreq information for a given device. + * This structure stores the devfreq information for a give device. * * Note that when a governor accesses entries in struct devfreq in its * functions except for the context of callbacks defined in struct * devfreq_governor, the governor should protect its access with the * struct mutex lock in struct devfreq. A governor may use this mutex - * to protect its own private data in ``void *data`` as well. + * to protect its own private data in void *data as well. */ struct devfreq { struct list_head node; @@ -180,7 +170,7 @@ struct devfreq { struct device dev; struct devfreq_dev_profile *profile; const struct devfreq_governor *governor; - struct opp_table *opp_table; + char governor_name[DEVFREQ_NAME_LEN]; struct notifier_block nb; struct delayed_work work; @@ -189,26 +179,17 @@ struct devfreq { void *data; /* private data for governors */ - struct dev_pm_qos_request user_min_freq_req; - struct dev_pm_qos_request user_max_freq_req; - unsigned long scaling_min_freq; - unsigned long scaling_max_freq; + unsigned long min_freq; + unsigned long max_freq; bool stop_polling; - unsigned long suspend_freq; - unsigned long resume_freq; - atomic_t suspend_count; - - /* information for device frequency transitions */ - struct devfreq_stats stats; + /* information for device frequency transition */ + unsigned int total_trans; + unsigned int *trans_table; + unsigned long *time_in_state; + unsigned long last_stat_updated; struct srcu_notifier_head transition_notifier_list; - - /* Pointer to the cooling device if used for thermal mitigation */ - struct thermal_cooling_device *cdev; - - struct notifier_block nb_min; - struct notifier_block nb_max; }; struct devfreq_freqs { @@ -217,59 +198,66 @@ struct devfreq_freqs { }; #if defined(CONFIG_PM_DEVFREQ) -struct devfreq *devfreq_add_device(struct device *dev, - struct devfreq_dev_profile *profile, - const char *governor_name, - void *data); -int devfreq_remove_device(struct devfreq *devfreq); -struct devfreq *devm_devfreq_add_device(struct device *dev, - struct devfreq_dev_profile *profile, - const char *governor_name, - void *data); -void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq); +extern struct devfreq *devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data); +extern int devfreq_remove_device(struct devfreq *devfreq); +extern struct devfreq *devm_devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data); +extern void devm_devfreq_remove_device(struct device *dev, + struct devfreq *devfreq); /* Supposed to be called by PM callbacks */ -int devfreq_suspend_device(struct devfreq *devfreq); -int devfreq_resume_device(struct devfreq *devfreq); - -void devfreq_suspend(void); -void devfreq_resume(void); - -/* update_devfreq() - Reevaluate the device and configure frequency */ -int update_devfreq(struct devfreq *devfreq); +extern int devfreq_suspend_device(struct devfreq *devfreq); +extern int devfreq_resume_device(struct devfreq *devfreq); /* Helper functions for devfreq user device driver with OPP. */ -struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, - unsigned long *freq, u32 flags); -int devfreq_register_opp_notifier(struct device *dev, - struct devfreq *devfreq); -int devfreq_unregister_opp_notifier(struct device *dev, - struct devfreq *devfreq); -int devm_devfreq_register_opp_notifier(struct device *dev, - struct devfreq *devfreq); -void devm_devfreq_unregister_opp_notifier(struct device *dev, - struct devfreq *devfreq); -int devfreq_register_notifier(struct devfreq *devfreq, - struct notifier_block *nb, - unsigned int list); -int devfreq_unregister_notifier(struct devfreq *devfreq, - struct notifier_block *nb, - unsigned int list); -int devm_devfreq_register_notifier(struct device *dev, +extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, + unsigned long *freq, u32 flags); +extern int devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq); +extern int devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq); +extern int devm_devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq); +extern void devm_devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq); +extern int devfreq_register_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern int devfreq_unregister_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern int devm_devfreq_register_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list); -void devm_devfreq_unregister_notifier(struct device *dev, +extern void devm_devfreq_unregister_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list); -struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node); -struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, - const char *phandle_name, int index); +extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, + int index); + +/** + * devfreq_update_stats() - update the last_status pointer in struct devfreq + * @df: the devfreq instance whose status needs updating + * + * Governors are recommended to use this function along with last_status, + * which allows other entities to reuse the last_status without affecting + * the values fetched later by governors. + */ +static inline int devfreq_update_stats(struct devfreq *df) +{ + return df->profile->get_dev_status(df->dev.parent, &df->last_status); +} #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) /** - * struct devfreq_simple_ondemand_data - ``void *data`` fed to struct devfreq + * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq * and devfreq_add_device * @upthreshold: If the load is over this value, the frequency jumps. * Specify 0 to use the default. Valid value = 0 to 100. @@ -289,7 +277,7 @@ struct devfreq_simple_ondemand_data { #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) /** - * struct devfreq_passive_data - ``void *data`` fed to struct devfreq + * struct devfreq_passive_data - void *data fed to struct devfreq * and devfreq_add_device * @parent: the devfreq instance of parent device. * @get_target_freq: Optional callback, Returns desired operating frequency @@ -322,9 +310,9 @@ struct devfreq_passive_data { #else /* !CONFIG_PM_DEVFREQ */ static inline struct devfreq *devfreq_add_device(struct device *dev, - struct devfreq_dev_profile *profile, - const char *governor_name, - void *data) + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data) { return ERR_PTR(-ENOSYS); } @@ -357,35 +345,32 @@ static inline int devfreq_resume_device(struct devfreq *devfreq) return 0; } -static inline void devfreq_suspend(void) {} -static inline void devfreq_resume(void) {} - static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, - unsigned long *freq, u32 flags) + unsigned long *freq, u32 flags) { return ERR_PTR(-EINVAL); } static inline int devfreq_register_opp_notifier(struct device *dev, - struct devfreq *devfreq) + struct devfreq *devfreq) { return -EINVAL; } static inline int devfreq_unregister_opp_notifier(struct device *dev, - struct devfreq *devfreq) + struct devfreq *devfreq) { return -EINVAL; } static inline int devm_devfreq_register_opp_notifier(struct device *dev, - struct devfreq *devfreq) + struct devfreq *devfreq) { return -EINVAL; } static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, - struct devfreq *devfreq) + struct devfreq *devfreq) { } @@ -404,27 +389,22 @@ static inline int devfreq_unregister_notifier(struct devfreq *devfreq, } static inline int devm_devfreq_register_notifier(struct device *dev, - struct devfreq *devfreq, - struct notifier_block *nb, - unsigned int list) + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) { return 0; } static inline void devm_devfreq_unregister_notifier(struct device *dev, - struct devfreq *devfreq, - struct notifier_block *nb, - unsigned int list) + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) { } -static inline struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node) -{ - return ERR_PTR(-ENODEV); -} - static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, - const char *phandle_name, int index) + int index) { return ERR_PTR(-ENODEV); } diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index 14baa73fc2..7adf6cc4b3 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h @@ -1,10 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * devfreq_cooling: Thermal cooling device implementation for devices using * devfreq * * Copyright (C) 2014-2015 ARM Limited * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __DEVFREQ_COOLING_H__ @@ -13,34 +20,29 @@ #include #include +#ifdef CONFIG_DEVFREQ_THERMAL /** * struct devfreq_cooling_power - Devfreq cooling power ops - * @get_real_power: When this is set, the framework uses it to ask the - * device driver for the actual power. - * Some devices have more sophisticated methods - * (like power counters) to approximate the actual power - * that they use. - * This function provides more accurate data to the - * thermal governor. When the driver does not provide - * such function, framework just uses pre-calculated - * table and scale the power by 'utilization' - * (based on 'busy_time' and 'total_time' taken from - * devfreq 'last_status'). - * The value returned by this function must be lower - * or equal than the maximum power value - * for the current state - * (which can be found in power_table[state]). - * When this interface is used, the power_table holds - * max total (static + dynamic) power value for each OPP. + * @get_static_power: Take voltage, in mV, and return the static power + * in mW. If NULL, the static power is assumed + * to be 0. + * @get_dynamic_power: Take voltage, in mV, and frequency, in HZ, and + * return the dynamic power draw in mW. If NULL, + * a simple power model is used. + * @dyn_power_coeff: Coefficient for the simple dynamic power model in + * mW/(MHz mV mV). + * If get_dynamic_power() is NULL, then the + * dynamic power is calculated as + * @dyn_power_coeff * frequency * voltage^2 */ struct devfreq_cooling_power { - int (*get_real_power)(struct devfreq *df, u32 *power, - unsigned long freq, unsigned long voltage); + unsigned long (*get_static_power)(unsigned long voltage); + unsigned long (*get_dynamic_power)(unsigned long freq, + unsigned long voltage); + unsigned long dyn_power_coeff; }; -#ifdef CONFIG_DEVFREQ_THERMAL - struct thermal_cooling_device * of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, struct devfreq_cooling_power *dfc_power); @@ -48,13 +50,10 @@ struct thermal_cooling_device * of_devfreq_cooling_register(struct device_node *np, struct devfreq *df); struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df); void devfreq_cooling_unregister(struct thermal_cooling_device *dfc); -struct thermal_cooling_device * -devfreq_cooling_em_register(struct devfreq *df, - struct devfreq_cooling_power *dfc_power); #else /* !CONFIG_DEVFREQ_THERMAL */ -static inline struct thermal_cooling_device * +struct thermal_cooling_device * of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, struct devfreq_cooling_power *dfc_power) { @@ -73,13 +72,6 @@ devfreq_cooling_register(struct devfreq *df) return ERR_PTR(-EINVAL); } -static inline struct thermal_cooling_device * -devfreq_cooling_em_register(struct devfreq *df, - struct devfreq_cooling_power *dfc_power) -{ - return ERR_PTR(-EINVAL); -} - static inline void devfreq_cooling_unregister(struct thermal_cooling_device *dfc) { diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 114553b487..ef7962e844 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -10,28 +10,25 @@ #include #include -#include #include #include struct dm_dev; struct dm_target; struct dm_table; -struct dm_report_zones_args; struct mapped_device; struct bio_vec; /* * Type of table, mapped_device's mempool and request_queue */ -enum dm_queue_mode { - DM_TYPE_NONE = 0, - DM_TYPE_BIO_BASED = 1, - DM_TYPE_REQUEST_BASED = 2, - DM_TYPE_DAX_BIO_BASED = 3, -}; +#define DM_TYPE_NONE 0 +#define DM_TYPE_BIO_BASED 1 +#define DM_TYPE_REQUEST_BASED 2 +#define DM_TYPE_MQ_REQUEST_BASED 3 +#define DM_TYPE_DAX_BIO_BASED 4 -typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t; +typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; union map_info { void *ptr; @@ -58,12 +55,13 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti); * = 2: The target wants to push back the io */ typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); +typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, + union map_info *map_context); typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, struct request *rq, union map_info *map_context, struct request **clone); -typedef void (*dm_release_clone_request_fn) (struct request *clone, - union map_info *map_context); +typedef void (*dm_release_clone_request_fn) (struct request *clone); /* * Returns: @@ -74,9 +72,9 @@ typedef void (*dm_release_clone_request_fn) (struct request *clone, * 2 : The target wants to push back the io */ typedef int (*dm_endio_fn) (struct dm_target *ti, - struct bio *bio, blk_status_t *error); + struct bio *bio, int error); typedef int (*dm_request_endio_fn) (struct dm_target *ti, - struct request *clone, blk_status_t error, + struct request *clone, int error, union map_info *map_context); typedef void (*dm_presuspend_fn) (struct dm_target *ti); @@ -88,23 +86,10 @@ typedef void (*dm_resume_fn) (struct dm_target *ti); typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, unsigned status_flags, char *result, unsigned maxlen); -typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, - char *result, unsigned maxlen); +typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); -typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); - -#ifdef CONFIG_BLK_DEV_ZONED -typedef int (*dm_report_zones_fn) (struct dm_target *ti, - struct dm_report_zones_args *args, - unsigned int nr_zones); -#else -/* - * Define dm_report_zones_fn so that targets can assign to NULL if - * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do - * awkward #ifdefs in their target_type, etc. - */ -typedef int (*dm_report_zones_fn) (struct dm_target *dummy); -#endif +typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, + struct block_device **bdev, fmode_t *mode); /* * These iteration functions are typically used to check (and combine) @@ -145,18 +130,13 @@ typedef int (*dm_busy_fn) (struct dm_target *ti); * < 0 : error * >= 0 : the number of bytes accessible at the address */ -typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn); -typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i); -typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, - size_t nr_pages); +typedef long (*dm_direct_access_fn) (struct dm_target *ti, sector_t sector, + void **kaddr, pfn_t *pfn, long size); void dm_error(const char *message); struct dm_dev { struct block_device *bdev; - struct dax_device *dax_dev; fmode_t mode; char name[16]; }; @@ -183,6 +163,7 @@ struct target_type { dm_ctr_fn ctr; dm_dtr_fn dtr; dm_map_fn map; + dm_map_request_fn map_rq; dm_clone_and_map_request_fn clone_and_map_rq; dm_release_clone_request_fn release_clone_rq; dm_endio_fn end_io; @@ -195,14 +176,10 @@ struct target_type { dm_status_fn status; dm_message_fn message; dm_prepare_ioctl_fn prepare_ioctl; - dm_report_zones_fn report_zones; dm_busy_fn busy; dm_iterate_devices_fn iterate_devices; dm_io_hints_fn io_hints; - dm_dax_direct_access_fn direct_access; - dm_dax_copy_iter_fn dax_copy_from_iter; - dm_dax_copy_iter_fn dax_copy_to_iter; - dm_dax_zero_page_range_fn dax_zero_page_range; + dm_direct_access_fn direct_access; /* For internal device-mapper use. */ struct list_head list; @@ -240,52 +217,12 @@ struct target_type { #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) /* - * A target implements own bio data integrity. + * Some targets need to be sent the same WRITE bio severals times so + * that they can send copies of it to different devices. This function + * examines any supplied bio and returns the number of copies of it the + * target requires. */ -#define DM_TARGET_INTEGRITY 0x00000010 -#define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) - -/* - * A target passes integrity data to the lower device. - */ -#define DM_TARGET_PASSES_INTEGRITY 0x00000020 -#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) - -/* - * Indicates support for zoned block devices: - * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned - * block devices but does not support combining different zoned models. - * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple - * devices with different zoned models. - */ -#ifdef CONFIG_BLK_DEV_ZONED -#define DM_TARGET_ZONED_HM 0x00000040 -#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) -#else -#define DM_TARGET_ZONED_HM 0x00000000 -#define dm_target_supports_zoned_hm(type) (false) -#endif - -/* - * A target handles REQ_NOWAIT - */ -#define DM_TARGET_NOWAIT 0x00000080 -#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT) - -/* - * A target supports passing through inline crypto support. - */ -#define DM_TARGET_PASSES_CRYPTO 0x00000100 -#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO) - -#ifdef CONFIG_BLK_DEV_ZONED -#define DM_TARGET_MIXED_ZONED_MODEL 0x00000200 -#define dm_target_supports_mixed_zoned_model(type) \ - ((type)->features & DM_TARGET_MIXED_ZONED_MODEL) -#else -#define DM_TARGET_MIXED_ZONED_MODEL 0x00000000 -#define dm_target_supports_mixed_zoned_model(type) (false) -#endif +typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio); struct dm_target { struct dm_table *table; @@ -314,30 +251,25 @@ struct dm_target { */ unsigned num_discard_bios; - /* - * The number of secure erase bios that will be submitted to the target. - * The bio number can be accessed with dm_bio_get_target_bio_nr. - */ - unsigned num_secure_erase_bios; - /* * The number of WRITE SAME bios that will be submitted to the target. * The bio number can be accessed with dm_bio_get_target_bio_nr. */ unsigned num_write_same_bios; - /* - * The number of WRITE ZEROES bios that will be submitted to the target. - * The bio number can be accessed with dm_bio_get_target_bio_nr. - */ - unsigned num_write_zeroes_bios; - /* * The minimum number of extra bytes allocated in each io for the * target to use. */ unsigned per_io_data_size; + /* + * If defined, this function is called to find out how many + * duplicate bios should be sent to the target when writing + * data. + */ + dm_num_write_bios_fn num_write_bios; + /* target specific data */ void *private; @@ -357,22 +289,52 @@ struct dm_target { bool discards_supported:1; /* - * Set if we need to limit the number of in-flight bios when swapping. + * Set if the target required discard bios to be split + * on max_io_len boundary. */ - bool limit_swap_bios:1; + bool split_discard_bios:1; /* - * Set if this target implements a a zoned device and needs emulation of - * zone append operations using regular writes. + * Set if this target does not return zeroes on discarded blocks. */ - bool emulate_zone_append:1; + bool discard_zeroes_data_unsupported:1; }; -void *dm_per_bio_data(struct bio *bio, size_t data_size); -struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); -unsigned dm_bio_get_target_bio_nr(const struct bio *bio); +/* Each target can link one of these into the table */ +struct dm_target_callbacks { + struct list_head list; + int (*congested_fn) (struct dm_target_callbacks *, int); +}; -u64 dm_start_time_ns_from_clone(struct bio *bio); +/* + * For bio-based dm. + * One of these is allocated for each bio. + * This structure shouldn't be touched directly by target drivers. + * It is here so that we can inline dm_per_bio_data and + * dm_bio_from_per_bio_data + */ +struct dm_target_io { + struct dm_io *io; + struct dm_target *ti; + unsigned target_bio_nr; + unsigned *len_ptr; + struct bio clone; +}; + +static inline void *dm_per_bio_data(struct bio *bio, size_t data_size) +{ + return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; +} + +static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) +{ + return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); +} + +static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio) +{ + return container_of(bio, struct dm_target_io, clone)->target_bio_nr; +} int dm_register_target(struct target_type *t); void dm_unregister_target(struct target_type *t); @@ -399,7 +361,7 @@ struct dm_arg { * Validate the next argument, either returning it as *value or, if invalid, * returning -EINVAL and setting *error. */ -int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, +int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error); /* @@ -407,7 +369,7 @@ int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, * arg->min and arg->max further arguments. Either return the size as * *num_args or, if invalid, return -EINVAL and set *error. */ -int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, +int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *num_args, char **error); /* @@ -466,35 +428,10 @@ const char *dm_device_name(struct mapped_device *md); int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct dm_target *ti); -int dm_post_suspending(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); union map_info *dm_get_rq_mapinfo(struct request *rq); -#ifdef CONFIG_BLK_DEV_ZONED -struct dm_report_zones_args { - struct dm_target *tgt; - sector_t next_sector; - - void *orig_data; - report_zones_cb orig_cb; - unsigned int zone_idx; - - /* must be filled by ->report_zones before calling dm_report_zones_cb */ - sector_t start; -}; -int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector, - struct dm_report_zones_args *args, unsigned int nr_zones); -#endif /* CONFIG_BLK_DEV_ZONED */ - -/* - * Device mapper functions to parse and create devices specified by the - * parameter "dm-mod.create=" - */ -int __init dm_early_create(struct dm_ioctl *dmi, - struct dm_target_spec **spec_array, - char **target_params_array); - struct queue_limits *dm_get_queue_limits(struct mapped_device *md); /* @@ -519,24 +456,24 @@ int dm_table_create(struct dm_table **result, fmode_t mode, int dm_table_add_target(struct dm_table *t, const char *type, sector_t start, sector_t len, char *params); +/* + * Target_ctr should call this if it needs to add any callbacks. + */ +void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); + /* * Target can use this to set the table's type. * Can only ever be called from a target's ctr. * Useful for "hybrid" target (supports both bio-based * and request-based). */ -void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); +void dm_table_set_type(struct dm_table *t, unsigned type); /* * Finally call this to make the table ready for use. */ int dm_table_complete(struct dm_table *t); -/* - * Destroy the table when finished. - */ -void dm_table_destroy(struct dm_table *t); - /* * Target may require that it is never sent I/O larger than len. */ @@ -556,7 +493,6 @@ sector_t dm_table_get_size(struct dm_table *t); unsigned int dm_table_get_num_targets(struct dm_table *t); fmode_t dm_table_get_mode(struct dm_table *t); struct mapped_device *dm_table_get_md(struct dm_table *t); -const char *dm_table_device_name(struct dm_table *t); /* * Trigger an event. @@ -576,43 +512,77 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *t); /* - * Table keyslot manager functions + * A wrapper around vmalloc. */ -void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); +void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); /*----------------------------------------------------------------- * Macros. *---------------------------------------------------------------*/ #define DM_NAME "device-mapper" -#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" +#ifdef CONFIG_PRINTK +extern struct ratelimit_state dm_ratelimit_state; -#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) +#define dm_ratelimit() __ratelimit(&dm_ratelimit_state) +#else +#define dm_ratelimit() 0 +#endif -#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) -#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) -#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) -#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) -#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) -#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) +#define DMCRIT(f, arg...) \ + printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) -#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__) -#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) +#define DMERR(f, arg...) \ + printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) +#define DMERR_LIMIT(f, arg...) \ + do { \ + if (dm_ratelimit()) \ + printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \ + f "\n", ## arg); \ + } while (0) + +#define DMWARN(f, arg...) \ + printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) +#define DMWARN_LIMIT(f, arg...) \ + do { \ + if (dm_ratelimit()) \ + printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \ + f "\n", ## arg); \ + } while (0) + +#define DMINFO(f, arg...) \ + printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) +#define DMINFO_LIMIT(f, arg...) \ + do { \ + if (dm_ratelimit()) \ + printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \ + "\n", ## arg); \ + } while (0) + +#ifdef CONFIG_DM_DEBUG +# define DMDEBUG(f, arg...) \ + printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg) +# define DMDEBUG_LIMIT(f, arg...) \ + do { \ + if (dm_ratelimit()) \ + printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \ + "\n", ## arg); \ + } while (0) +#else +# define DMDEBUG(f, arg...) do {} while (0) +# define DMDEBUG_LIMIT(f, arg...) do {} while (0) +#endif #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 0 : scnprintf(result + sz, maxlen - sz, x)) -#define DMEMIT_TARGET_NAME_VERSION(y) \ - DMEMIT("target_name=%s,target_version=%u.%u.%u", \ - (y)->name, (y)->version[0], (y)->version[1], (y)->version[2]) +#define SECTOR_SHIFT 9 /* * Definitions of return values from target end_io function. */ -#define DM_ENDIO_DONE 0 #define DM_ENDIO_INCOMPLETE 1 #define DM_ENDIO_REQUEUE 2 -#define DM_ENDIO_DELAY_REQUEUE 3 /* * Definitions of return values from target map function. @@ -620,8 +590,7 @@ void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); #define DM_MAPIO_SUBMITTED 0 #define DM_MAPIO_REMAPPED 1 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE -#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE -#define DM_MAPIO_KILL 4 +#define DM_MAPIO_DELAY_REQUEUE 3 #define dm_sector_div64(x, y)( \ { \ @@ -649,13 +618,16 @@ void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); */ #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) +#define dm_array_too_big(fixed, obj, num) \ + ((num) > (UINT_MAX - (fixed)) / (obj)) + /* * Sector offset taken relative to the start of the target instead of * relative to the start of the device. */ #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) -static inline sector_t to_sector(unsigned long long n) +static inline sector_t to_sector(unsigned long n) { return (n >> SECTOR_SHIFT); } diff --git a/include/linux/device.h b/include/linux/device.h index e270cb740b..88bcdabdab 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * device.h - generic, centralized driver model * @@ -6,14 +5,14 @@ * Copyright (c) 2004-2009 Greg Kroah-Hartman * Copyright (c) 2008-2009 Novell Inc. * - * See Documentation/driver-api/driver-model/ for more information. + * This file is released under the GPLv2 + * + * See Documentation/driver-model/ for more information. */ #ifndef _DEVICE_H_ #define _DEVICE_H_ -#include -#include #include #include #include @@ -22,14 +21,12 @@ #include #include #include +#include #include #include +#include #include #include -#include -#include -#include -#include #include struct device; @@ -39,17 +36,302 @@ struct driver_private; struct module; struct class; struct subsys_private; +struct bus_type; struct device_node; struct fwnode_handle; struct iommu_ops; struct iommu_group; -struct dev_pin_info; -struct dev_iommu; +struct iommu_fwspec; + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(struct bus_type *bus, char *buf); + ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); +}; + +#define BUS_ATTR(_name, _mode, _show, _store) \ + struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define BUS_ATTR_RW(_name) \ + struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) +#define BUS_ATTR_RO(_name) \ + struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) + +extern int __must_check bus_create_file(struct bus_type *, + struct bus_attribute *); +extern void bus_remove_file(struct bus_type *, struct bus_attribute *); + +/** + * struct bus_type - The bus type of the device + * + * @name: The name of the bus. + * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id). + * @dev_root: Default device to use as the parent. + * @dev_attrs: Default attributes of the devices on the bus. + * @bus_groups: Default attributes of the bus. + * @dev_groups: Default attributes of the devices on the bus. + * @drv_groups: Default attributes of the device drivers on the bus. + * @match: Called, perhaps multiple times, whenever a new device or driver + * is added for this bus. It should return a positive value if the + * given device can be handled by the given driver and zero + * otherwise. It may also return error code if determining that + * the driver supports the device is not possible. In case of + * -EPROBE_DEFER it will queue the device for deferred probing. + * @uevent: Called when a device is added, removed, or a few other things + * that generate uevents to add the environment variables. + * @probe: Called when a new device or driver add to this bus, and callback + * the specific driver's probe to initial the matched device. + * @remove: Called when a device removed from this bus. + * @shutdown: Called at shut-down time to quiesce the device. + * + * @online: Called to put the device back online (after offlining it). + * @offline: Called to put the device offline for hot-removal. May fail. + * + * @suspend: Called when a device on this bus wants to go to sleep mode. + * @resume: Called to bring a device on this bus out of sleep mode. + * @pm: Power management operations of this bus, callback the specific + * device driver's pm-ops. + * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU + * driver implementations to a bus and allow the driver to do + * bus-specific setup + * @p: The private data of the driver core, only the driver core can + * touch this. + * @lock_key: Lock class key for use by the lock validator + * + * A bus is a channel between the processor and one or more devices. For the + * purposes of the device model, all devices are connected via a bus, even if + * it is an internal, virtual, "platform" bus. Buses can plug into each other. + * A USB controller is usually a PCI device, for example. The device model + * represents the actual connections between buses and the devices they control. + * A bus is represented by the bus_type structure. It contains the name, the + * default attributes, the bus' methods, PM operations, and the driver core's + * private data. + */ +struct bus_type { + const char *name; + const char *dev_name; + struct device *dev_root; + struct device_attribute *dev_attrs; /* use dev_groups instead */ + const struct attribute_group **bus_groups; + const struct attribute_group **dev_groups; + const struct attribute_group **drv_groups; + + int (*match)(struct device *dev, struct device_driver *drv); + int (*uevent)(struct device *dev, struct kobj_uevent_env *env); + int (*probe)(struct device *dev); + int (*remove)(struct device *dev); + void (*shutdown)(struct device *dev); + + int (*online)(struct device *dev); + int (*offline)(struct device *dev); + + int (*suspend)(struct device *dev, pm_message_t state); + int (*resume)(struct device *dev); + + const struct dev_pm_ops *pm; + + const struct iommu_ops *iommu_ops; + + struct subsys_private *p; + struct lock_class_key lock_key; +}; + +extern int __must_check bus_register(struct bus_type *bus); + +extern void bus_unregister(struct bus_type *bus); + +extern int __must_check bus_rescan_devices(struct bus_type *bus); + +/* iterator helpers for buses */ +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; +void subsys_dev_iter_init(struct subsys_dev_iter *iter, + struct bus_type *subsys, + struct device *start, + const struct device_type *type); +struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); +void subsys_dev_iter_exit(struct subsys_dev_iter *iter); + +int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, + int (*fn)(struct device *dev, void *data)); +struct device *bus_find_device(struct bus_type *bus, struct device *start, + void *data, + int (*match)(struct device *dev, void *data)); +struct device *bus_find_device_by_name(struct bus_type *bus, + struct device *start, + const char *name); +struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, + struct device *hint); +int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, + void *data, int (*fn)(struct device_driver *, void *)); +void bus_sort_breadthfirst(struct bus_type *bus, + int (*compare)(const struct device *a, + const struct device *b)); +/* + * Bus notifiers: Get notified of addition/removal of devices + * and binding/unbinding of drivers to devices. + * In the long run, it should be a replacement for the platform + * notify hooks. + */ +struct notifier_block; + +extern int bus_register_notifier(struct bus_type *bus, + struct notifier_block *nb); +extern int bus_unregister_notifier(struct bus_type *bus, + struct notifier_block *nb); + +/* All 4 notifers below get called with the target struct device * + * as an argument. Note that those functions are likely to be called + * with the device lock held in the core, so be careful. + */ +#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ +#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */ +#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */ +#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be + bound */ +#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */ +#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be + unbound */ +#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound + from the device */ +#define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */ + +extern struct kset *bus_get_kset(struct bus_type *bus); +extern struct klist *bus_get_device_klist(struct bus_type *bus); + +/** + * enum probe_type - device driver probe type to try + * Device drivers may opt in for special handling of their + * respective probe routines. This tells the core what to + * expect and prefer. + * + * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well + * whether probed synchronously or asynchronously. + * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which + * probing order is not essential for booting the system may + * opt into executing their probes asynchronously. + * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need + * their probe routines to run synchronously with driver and + * device registration (with the exception of -EPROBE_DEFER + * handling - re-probing always ends up being done asynchronously). + * + * Note that the end goal is to switch the kernel to use asynchronous + * probing by default, so annotating drivers with + * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us + * to speed up boot process while we are validating the rest of the + * drivers. + */ +enum probe_type { + PROBE_DEFAULT_STRATEGY, + PROBE_PREFER_ASYNCHRONOUS, + PROBE_FORCE_SYNCHRONOUS, +}; + +/** + * struct device_driver - The basic device driver structure + * @name: Name of the device driver. + * @bus: The bus which the device of this driver belongs to. + * @owner: The module owner. + * @mod_name: Used for built-in modules. + * @suppress_bind_attrs: Disables bind/unbind via sysfs. + * @probe_type: Type of the probe (synchronous or asynchronous) to use. + * @of_match_table: The open firmware table. + * @acpi_match_table: The ACPI match table. + * @probe: Called to query the existence of a specific device, + * whether this driver can work with it, and bind the driver + * to a specific device. + * @remove: Called when the device is removed from the system to + * unbind a device from this driver. + * @shutdown: Called at shut-down time to quiesce the device. + * @suspend: Called to put the device to sleep mode. Usually to a + * low power state. + * @resume: Called to bring a device from sleep mode. + * @groups: Default attributes that get created by the driver core + * automatically. + * @pm: Power management operations of the device which matched + * this driver. + * @p: Driver core's private data, no one other than the driver + * core can touch this. + * + * The device driver-model tracks all of the drivers known to the system. + * The main reason for this tracking is to enable the driver core to match + * up drivers with new devices. Once drivers are known objects within the + * system, however, a number of other things become possible. Device drivers + * can export information and configuration variables that are independent + * of any specific device. + */ +struct device_driver { + const char *name; + struct bus_type *bus; + + struct module *owner; + const char *mod_name; /* used for built-in modules */ + + bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ + enum probe_type probe_type; + + const struct of_device_id *of_match_table; + const struct acpi_device_id *acpi_match_table; + + int (*probe) (struct device *dev); + int (*remove) (struct device *dev); + void (*shutdown) (struct device *dev); + int (*suspend) (struct device *dev, pm_message_t state); + int (*resume) (struct device *dev); + const struct attribute_group **groups; + + const struct dev_pm_ops *pm; + + struct driver_private *p; +}; + + +extern int __must_check driver_register(struct device_driver *drv); +extern void driver_unregister(struct device_driver *drv); + +extern struct device_driver *driver_find(const char *name, + struct bus_type *bus); +extern int driver_probe_done(void); +extern void wait_for_device_probe(void); + + +/* sysfs interface for exporting driver attributes */ + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *driver, char *buf); + ssize_t (*store)(struct device_driver *driver, const char *buf, + size_t count); +}; + +#define DRIVER_ATTR(_name, _mode, _show, _store) \ + struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define DRIVER_ATTR_RW(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) +#define DRIVER_ATTR_RO(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_RO(_name) +#define DRIVER_ATTR_WO(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_WO(_name) + +extern int __must_check driver_create_file(struct device_driver *driver, + const struct driver_attribute *attr); +extern void driver_remove_file(struct device_driver *driver, + const struct driver_attribute *attr); + +extern int __must_check driver_for_each_device(struct device_driver *drv, + struct device *start, + void *data, + int (*fn)(struct device *dev, + void *)); +struct device *driver_find_device(struct device_driver *drv, + struct device *start, void *data, + int (*match)(struct device *dev, void *data)); /** * struct subsys_interface - interfaces to device functions * @name: name of the device function - * @subsys: subsystem of the devices to attach to + * @subsys: subsytem of the devices to attach to * @node: the list of functions registered at the subsystem * @add_dev: device hookup to device function handler * @remove_dev: device hookup to device function handler @@ -65,7 +347,7 @@ struct subsys_interface { struct list_head node; int (*add_dev)(struct device *dev, struct subsys_interface *sif); void (*remove_dev)(struct device *dev, struct subsys_interface *sif); -}; +} __do_const; int subsys_interface_register(struct subsys_interface *sif); void subsys_interface_unregister(struct subsys_interface *sif); @@ -75,6 +357,174 @@ int subsys_system_register(struct bus_type *subsys, int subsys_virtual_register(struct bus_type *subsys, const struct attribute_group **groups); +/** + * struct class - device classes + * @name: Name of the class. + * @owner: The module owner. + * @class_attrs: Default attributes of this class. + * @dev_groups: Default attributes of the devices that belong to the class. + * @dev_kobj: The kobject that represents this class and links it into the hierarchy. + * @dev_uevent: Called when a device is added, removed from this class, or a + * few other things that generate uevents to add the environment + * variables. + * @devnode: Callback to provide the devtmpfs. + * @class_release: Called to release this class. + * @dev_release: Called to release the device. + * @suspend: Used to put the device to sleep mode, usually to a low power + * state. + * @resume: Used to bring the device from the sleep mode. + * @ns_type: Callbacks so sysfs can detemine namespaces. + * @namespace: Namespace of the device belongs to this class. + * @pm: The default device power management operations of this class. + * @p: The private data of the driver core, no one other than the + * driver core can touch this. + * + * A class is a higher-level view of a device that abstracts out low-level + * implementation details. Drivers may see a SCSI disk or an ATA disk, but, + * at the class level, they are all simply disks. Classes allow user space + * to work with devices based on what they do, rather than how they are + * connected or how they work. + */ +struct class { + const char *name; + struct module *owner; + + struct class_attribute *class_attrs; + const struct attribute_group **dev_groups; + struct kobject *dev_kobj; + + int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); + char *(*devnode)(struct device *dev, umode_t *mode); + + void (*class_release)(struct class *class); + void (*dev_release)(struct device *dev); + + int (*suspend)(struct device *dev, pm_message_t state); + int (*resume)(struct device *dev); + + const struct kobj_ns_type_operations *ns_type; + const void *(*namespace)(struct device *dev); + + const struct dev_pm_ops *pm; + + struct subsys_private *p; +}; + +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +extern struct kobject *sysfs_dev_block_kobj; +extern struct kobject *sysfs_dev_char_kobj; +extern int __must_check __class_register(struct class *class, + struct lock_class_key *key); +extern void class_unregister(struct class *class); + +/* This is a #define to keep the compiler from merging different + * instances of the __key variable */ +#define class_register(class) \ +({ \ + static struct lock_class_key __key; \ + __class_register(class, &__key); \ +}) + +struct class_compat; +struct class_compat *class_compat_register(const char *name); +void class_compat_unregister(struct class_compat *cls); +int class_compat_create_link(struct class_compat *cls, struct device *dev, + struct device *device_link); +void class_compat_remove_link(struct class_compat *cls, struct device *dev, + struct device *device_link); + +extern void class_dev_iter_init(struct class_dev_iter *iter, + struct class *class, + struct device *start, + const struct device_type *type); +extern struct device *class_dev_iter_next(struct class_dev_iter *iter); +extern void class_dev_iter_exit(struct class_dev_iter *iter); + +extern int class_for_each_device(struct class *class, struct device *start, + void *data, + int (*fn)(struct device *dev, void *data)); +extern struct device *class_find_device(struct class *class, + struct device *start, const void *data, + int (*match)(struct device *, const void *)); + +struct class_attribute { + struct attribute attr; + ssize_t (*show)(struct class *class, struct class_attribute *attr, + char *buf); + ssize_t (*store)(struct class *class, struct class_attribute *attr, + const char *buf, size_t count); +}; + +#define CLASS_ATTR(_name, _mode, _show, _store) \ + struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define CLASS_ATTR_RW(_name) \ + struct class_attribute class_attr_##_name = __ATTR_RW(_name) +#define CLASS_ATTR_RO(_name) \ + struct class_attribute class_attr_##_name = __ATTR_RO(_name) + +extern int __must_check class_create_file_ns(struct class *class, + const struct class_attribute *attr, + const void *ns); +extern void class_remove_file_ns(struct class *class, + const struct class_attribute *attr, + const void *ns); + +static inline int __must_check class_create_file(struct class *class, + const struct class_attribute *attr) +{ + return class_create_file_ns(class, attr, NULL); +} + +static inline void class_remove_file(struct class *class, + const struct class_attribute *attr) +{ + return class_remove_file_ns(class, attr, NULL); +} + +/* Simple class attribute that is just a static string */ +struct class_attribute_string { + struct class_attribute attr; + char *str; +}; + +/* Currently read-only only */ +#define _CLASS_ATTR_STRING(_name, _mode, _str) \ + { __ATTR(_name, _mode, show_class_attr_string, NULL), _str } +#define CLASS_ATTR_STRING(_name, _mode, _str) \ + struct class_attribute_string class_attr_##_name = \ + _CLASS_ATTR_STRING(_name, _mode, _str) + +extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, + char *buf); + +struct class_interface { + struct list_head node; + struct class *class; + + int (*add_dev) (struct device *, struct class_interface *); + void (*remove_dev) (struct device *, struct class_interface *); +}; + +extern int __must_check class_interface_register(struct class_interface *); +extern void class_interface_unregister(struct class_interface *); + +extern struct class * __must_check __class_create(struct module *owner, + const char *name, + struct lock_class_key *key); +extern void class_destroy(struct class *cls); + +/* This is a #define to keep the compiler from merging different + * instances of the __key variable */ +#define class_create(owner, name) \ +({ \ + static struct lock_class_key __key; \ + __class_create(owner, name, &__key); \ +}) + /* * The type of device, "struct device" is embedded in. A class * or bus can contain devices of different types @@ -93,7 +543,7 @@ struct device_type { void (*release)(struct device *dev); const struct dev_pm_ops *pm; -}; +} __do_const; /* interface for exporting device attributes */ struct device_attribute { @@ -103,11 +553,12 @@ struct device_attribute { ssize_t (*store)(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); }; +typedef struct device_attribute __no_const device_attribute_no_const; struct dev_ext_attribute { struct device_attribute attr; void *var; -}; +} __do_const; ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, char *buf); @@ -124,17 +575,10 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, #define DEVICE_ATTR(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) -#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \ - struct device_attribute dev_attr_##_name = \ - __ATTR_PREALLOC(_name, _mode, _show, _store) #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) -#define DEVICE_ATTR_ADMIN_RW(_name) \ - struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600) #define DEVICE_ATTR_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO(_name) -#define DEVICE_ATTR_ADMIN_RO(_name) \ - struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400) #define DEVICE_ATTR_WO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_WO(_name) #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ @@ -150,59 +594,68 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, struct device_attribute dev_attr_##_name = \ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) -int device_create_file(struct device *device, - const struct device_attribute *entry); -void device_remove_file(struct device *dev, - const struct device_attribute *attr); -bool device_remove_file_self(struct device *dev, - const struct device_attribute *attr); -int __must_check device_create_bin_file(struct device *dev, +extern int device_create_file(struct device *device, + const struct device_attribute *entry); +extern void device_remove_file(struct device *dev, + const struct device_attribute *attr); +extern bool device_remove_file_self(struct device *dev, + const struct device_attribute *attr); +extern int __must_check device_create_bin_file(struct device *dev, const struct bin_attribute *attr); -void device_remove_bin_file(struct device *dev, - const struct bin_attribute *attr); +extern void device_remove_bin_file(struct device *dev, + const struct bin_attribute *attr); /* device resource management */ typedef void (*dr_release_t)(struct device *dev, void *res); typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); -void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid, const char *name) __malloc; +#ifdef CONFIG_DEBUG_DEVRES +extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, + int nid, const char *name) __malloc; #define devres_alloc(release, size, gfp) \ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) #define devres_alloc_node(release, size, gfp, nid) \ __devres_alloc_node(release, size, gfp, nid, #release) +#else +extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, + int nid) __malloc; +static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) +{ + return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); +} +#endif -void devres_for_each_res(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data, - void (*fn)(struct device *, void *, void *), - void *data); -void devres_free(void *res); -void devres_add(struct device *dev, void *res); -void *devres_find(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -void *devres_get(struct device *dev, void *new_res, - dr_match_t match, void *match_data); -void *devres_remove(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -int devres_destroy(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -int devres_release(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); +extern void devres_for_each_res(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data, + void (*fn)(struct device *, void *, void *), + void *data); +extern void devres_free(void *res); +extern void devres_add(struct device *dev, void *res); +extern void *devres_find(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +extern void *devres_get(struct device *dev, void *new_res, + dr_match_t match, void *match_data); +extern void *devres_remove(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +extern int devres_destroy(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +extern int devres_release(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); /* devres group */ -void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp); -void devres_close_group(struct device *dev, void *id); -void devres_remove_group(struct device *dev, void *id); -int devres_release_group(struct device *dev, void *id); +extern void * __must_check devres_open_group(struct device *dev, void *id, + gfp_t gfp); +extern void devres_close_group(struct device *dev, void *id); +extern void devres_remove_group(struct device *dev, void *id); +extern int devres_release_group(struct device *dev, void *id); /* managed devm_k.alloc/kfree for device drivers */ -void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; -void *devm_krealloc(struct device *dev, void *ptr, size_t size, - gfp_t gfp) __must_check; -__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, - const char *fmt, va_list ap) __malloc; -__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp, - const char *fmt, ...) __malloc; +extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; +extern __printf(3, 0) +char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, + va_list ap) __malloc; +extern __printf(3, 4) +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc; static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) { return devm_kmalloc(dev, size, gfp | __GFP_ZERO); @@ -210,40 +663,29 @@ static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) static inline void *devm_kmalloc_array(struct device *dev, size_t n, size_t size, gfp_t flags) { - size_t bytes; - - if (unlikely(check_mul_overflow(n, size, &bytes))) + if (size != 0 && n > SIZE_MAX / size) return NULL; - - return devm_kmalloc(dev, bytes, flags); + return devm_kmalloc(dev, n * size, flags); } static inline void *devm_kcalloc(struct device *dev, size_t n, size_t size, gfp_t flags) { return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); } -void devm_kfree(struct device *dev, const void *p); -char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; -const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp); -void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); +extern void devm_kfree(struct device *dev, void *p); +extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; +extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); -unsigned long devm_get_free_pages(struct device *dev, - gfp_t gfp_mask, unsigned int order); -void devm_free_pages(struct device *dev, unsigned long addr); +extern unsigned long devm_get_free_pages(struct device *dev, + gfp_t gfp_mask, unsigned int order); +extern void devm_free_pages(struct device *dev, unsigned long addr); -void __iomem *devm_ioremap_resource(struct device *dev, - const struct resource *res); -void __iomem *devm_ioremap_resource_wc(struct device *dev, - const struct resource *res); - -void __iomem *devm_of_iomap(struct device *dev, - struct device_node *node, int index, - resource_size_t *size); +void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); /* allows to add/remove a custom action to devres stack */ int devm_add_action(struct device *dev, void (*action)(void *), void *data); void devm_remove_action(struct device *dev, void (*action)(void *), void *data); -void devm_release_action(struct device *dev, void (*action)(void *), void *data); static inline int devm_add_action_or_reset(struct device *dev, void (*action)(void *), void *data) @@ -257,120 +699,15 @@ static inline int devm_add_action_or_reset(struct device *dev, return ret; } -/** - * devm_alloc_percpu - Resource-managed alloc_percpu - * @dev: Device to allocate per-cpu memory for - * @type: Type to allocate per-cpu memory for - * - * Managed alloc_percpu. Per-cpu memory allocated with this function is - * automatically freed on driver detach. - * - * RETURNS: - * Pointer to allocated memory on success, NULL on failure. - */ -#define devm_alloc_percpu(dev, type) \ - ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ - __alignof__(type))) - -void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, - size_t align); -void devm_free_percpu(struct device *dev, void __percpu *pdata); - struct device_dma_parameters { /* * a low level driver may set these to teach IOMMU code about * sg limitations. */ unsigned int max_segment_size; - unsigned int min_align_mask; unsigned long segment_boundary_mask; }; -/** - * enum device_link_state - Device link states. - * @DL_STATE_NONE: The presence of the drivers is not being tracked. - * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present. - * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not. - * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present). - * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present. - * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding. - */ -enum device_link_state { - DL_STATE_NONE = -1, - DL_STATE_DORMANT = 0, - DL_STATE_AVAILABLE, - DL_STATE_CONSUMER_PROBE, - DL_STATE_ACTIVE, - DL_STATE_SUPPLIER_UNBIND, -}; - -/* - * Device link flags. - * - * STATELESS: The core will not remove this link automatically. - * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind. - * PM_RUNTIME: If set, the runtime PM framework will use this link. - * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. - * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind. - * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds. - * MANAGED: The core tracks presence of supplier/consumer drivers (internal). - * SYNC_STATE_ONLY: Link only affects sync_state() behavior. - * INFERRED: Inferred from data (eg: firmware) and not from driver actions. - */ -#define DL_FLAG_STATELESS BIT(0) -#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) -#define DL_FLAG_PM_RUNTIME BIT(2) -#define DL_FLAG_RPM_ACTIVE BIT(3) -#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) -#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5) -#define DL_FLAG_MANAGED BIT(6) -#define DL_FLAG_SYNC_STATE_ONLY BIT(7) -#define DL_FLAG_INFERRED BIT(8) - -/** - * enum dl_dev_state - Device driver presence tracking information. - * @DL_DEV_NO_DRIVER: There is no driver attached to the device. - * @DL_DEV_PROBING: A driver is probing. - * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device. - * @DL_DEV_UNBINDING: The driver is unbinding from the device. - */ -enum dl_dev_state { - DL_DEV_NO_DRIVER = 0, - DL_DEV_PROBING, - DL_DEV_DRIVER_BOUND, - DL_DEV_UNBINDING, -}; - -/** - * enum device_removable - Whether the device is removable. The criteria for a - * device to be classified as removable is determined by its subsystem or bus. - * @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this - * device (default). - * @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown. - * @DEVICE_FIXED: Device is not removable by the user. - * @DEVICE_REMOVABLE: Device is removable by the user. - */ -enum device_removable { - DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */ - DEVICE_REMOVABLE_UNKNOWN, - DEVICE_FIXED, - DEVICE_REMOVABLE, -}; - -/** - * struct dev_links_info - Device data related to device links. - * @suppliers: List of links to supplier devices. - * @consumers: List of links to consumer devices. - * @defer_sync: Hook to global list of devices that have deferred sync_state. - * @status: Driver status information. - */ -struct dev_links_info { - struct list_head suppliers; - struct list_head consumers; - struct list_head defer_sync; - enum dl_dev_state status; -}; - /** * struct device - The basic device structure * @parent: The device's "parent" device, the device to which it is attached. @@ -385,8 +722,6 @@ struct dev_links_info { * This identifies the device type and carries type-specific * information. * @mutex: Mutex to synchronize calls to its driver. - * @lockdep_mutex: An optional debug lock that a subsystem can use as a - * peer lock to gain localized lockdep coverage of the device_lock. * @bus: Type of bus device is on. * @driver: Which driver has allocated this * @platform_data: Platform data specific to the device. @@ -398,33 +733,26 @@ struct dev_links_info { * on. This shrinks the "Board Support Packages" (BSPs) and * minimizes board-specific #ifdefs in drivers. * @driver_data: Private pointer for driver specific info. - * @links: Links to suppliers and consumers of this device. * @power: For device power management. - * See Documentation/driver-api/pm/devices.rst for details. + * See Documentation/power/devices.txt for details. * @pm_domain: Provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions * along with subsystem-level and driver-level callbacks. - * @em_pd: device's energy model performance domain * @pins: For device pin management. - * See Documentation/driver-api/pin-control.rst for details. - * @msi_lock: Lock to protect MSI mask cache and mask register + * See Documentation/pinctrl.txt for details. * @msi_list: Hosts MSI descriptors * @msi_domain: The generic MSI domain this device is using. * @numa_node: NUMA node this device is close to. - * @dma_ops: DMA mapping operations for this device. * @dma_mask: Dma mask (if dma'ble device). * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all * hardware supports 64-bit addresses for consistent allocations * such descriptors. - * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller - * DMA limit than the device itself supports. - * @dma_range_map: map for DMA memory ranges relative to that of RAM + * @dma_pfn_offset: offset of DMA memory range relatively of RAM * @dma_parms: A low level driver may set these to teach IOMMU code about * segment limitations. * @dma_pools: Dma pools (if dma'ble device). * @dma_mem: Internal for coherent mem override. * @cma_area: Contiguous memory area for dma allocations - * @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use. * @archdata: For arch-specific additions. * @of_node: Associated device tree node. * @fwnode: Associated device node supplied by platform firmware. @@ -439,28 +767,10 @@ struct dev_links_info { * gone away. This should be set by the allocator of the * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. - * @iommu: Per device generic IOMMU runtime data - * @removable: Whether the device can be removed from the system. This - * should be set by the subsystem / bus driver that discovered - * the device. + * @iommu_fwspec: IOMMU-specific properties supplied by firmware. * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). - * @of_node_reused: Set if the device-tree node is shared with an ancestor - * device. - * @state_synced: The hardware state of this device has been synced to match - * the software state of this device by calling the driver/bus - * sync_state() callback. - * @can_match: The device has matched with a driver at least once or it is in - * a bus (like AMBA) which can't check for matching drivers until - * other devices probe successfully. - * @dma_coherent: this particular device is dma coherent, even if the - * architecture supports non-coherent devices. - * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the - * streaming DMA operations (->map_* / ->unmap_* / ->sync_*), - * and optionall (if the coherent mask is large enough) also - * for dma allocations. This flag is managed by the dma ops - * instance from ->dma_supported. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information @@ -471,36 +781,28 @@ struct dev_links_info { * a higher-level representation of the device. */ struct device { - struct kobject kobj; struct device *parent; struct device_private *p; + struct kobject kobj; const char *init_name; /* initial name of the device */ const struct device_type *type; + struct mutex mutex; /* mutex to synchronize calls to + * its driver. + */ + struct bus_type *bus; /* type of bus device is on */ struct device_driver *driver; /* which driver has allocated this device */ void *platform_data; /* Platform specific data, device core doesn't touch it */ void *driver_data; /* Driver data, set and get with - dev_set_drvdata/dev_get_drvdata */ -#ifdef CONFIG_PROVE_LOCKING - struct mutex lockdep_mutex; -#endif - struct mutex mutex; /* mutex to synchronize calls to - * its driver. - */ - - struct dev_links_info links; + dev_set/get_drvdata */ struct dev_pm_info power; struct dev_pm_domain *pm_domain; -#ifdef CONFIG_ENERGY_MODEL - struct em_perf_domain *em_pd; -#endif - #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN struct irq_domain *msi_domain; #endif @@ -508,11 +810,11 @@ struct device { struct dev_pin_info *pins; #endif #ifdef CONFIG_GENERIC_MSI_IRQ - raw_spinlock_t msi_lock; struct list_head msi_list; #endif -#ifdef CONFIG_DMA_OPS - const struct dma_map_ops *dma_ops; + +#ifdef CONFIG_NUMA + int numa_node; /* NUMA node this device is close to */ #endif u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for @@ -520,23 +822,17 @@ struct device { not all hardware supports 64 bit addresses for consistent allocations such descriptors. */ - u64 bus_dma_limit; /* upstream dma constraint */ - const struct bus_dma_region *dma_range_map; + unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; /* dma pools (if dma'ble) */ -#ifdef CONFIG_DMA_DECLARE_COHERENT struct dma_coherent_mem *dma_mem; /* internal for coherent mem override */ -#endif #ifdef CONFIG_DMA_CMA struct cma *cma_area; /* contiguous memory area for dma allocations */ -#endif -#ifdef CONFIG_SWIOTLB - struct io_tlb_mem *dma_io_tlb_mem; #endif /* arch specific additions */ struct dev_archdata archdata; @@ -544,65 +840,22 @@ struct device { struct device_node *of_node; /* associated device tree node */ struct fwnode_handle *fwnode; /* firmware device node */ -#ifdef CONFIG_NUMA - int numa_node; /* NUMA node this device is close to */ -#endif dev_t devt; /* dev_t, creates the sysfs "dev" */ u32 id; /* device instance */ spinlock_t devres_lock; struct list_head devres_head; + struct klist_node knode_class; struct class *class; const struct attribute_group **groups; /* optional groups */ void (*release)(struct device *dev); struct iommu_group *iommu_group; - struct dev_iommu *iommu; - - enum device_removable removable; + struct iommu_fwspec *iommu_fwspec; bool offline_disabled:1; bool offline:1; - bool of_node_reused:1; - bool state_synced:1; - bool can_match:1; -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) - bool dma_coherent:1; -#endif -#ifdef CONFIG_DMA_OPS_BYPASS - bool dma_ops_bypass : 1; -#endif -}; - -/** - * struct device_link - Device link representation. - * @supplier: The device on the supplier end of the link. - * @s_node: Hook to the supplier device's list of links to consumers. - * @consumer: The device on the consumer end of the link. - * @c_node: Hook to the consumer device's list of links to suppliers. - * @link_dev: device used to expose link details in sysfs - * @status: The state of the link (with respect to the presence of drivers). - * @flags: Link flags. - * @rpm_active: Whether or not the consumer device is runtime-PM-active. - * @kref: Count repeated addition of the same link. - * @rm_work: Work structure used for removing the link. - * @supplier_preactivated: Supplier has been made active before consumer probe. - */ -struct device_link { - struct device *supplier; - struct list_head s_node; - struct device *consumer; - struct list_head c_node; - struct device link_dev; - enum device_link_state status; - u32 flags; - refcount_t rpm_active; - struct kref kref; - struct work_struct rm_work; - bool supplier_preactivated; /* Owned by consumer probe. */ }; static inline struct device *kobj_to_dev(struct kobject *kobj) @@ -610,16 +863,6 @@ static inline struct device *kobj_to_dev(struct kobject *kobj) return container_of(kobj, struct device, kobj); } -/** - * device_iommu_mapped - Returns true when the device DMA is translated - * by an IOMMU - * @dev: Device to perform the check on - */ -static inline bool device_iommu_mapped(struct device *dev) -{ - return (dev->iommu_group != NULL); -} - /* Get the wakeup routines, which depend on struct device */ #include @@ -632,19 +875,8 @@ static inline const char *dev_name(const struct device *dev) return kobject_name(&dev->kobj); } -/** - * dev_bus_name - Return a device's bus/class name, if at all possible - * @dev: struct device to get the bus/class name of - * - * Will return the name of the bus/class the device is attached to. If it is - * not attached to a bus/class, an empty string will be returned. - */ -static inline const char *dev_bus_name(const struct device *dev) -{ - return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : ""); -} - -__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...); +extern __printf(2, 3) +int dev_set_name(struct device *dev, const char *name, ...); #ifdef CONFIG_NUMA static inline int dev_to_node(struct device *dev) @@ -658,7 +890,7 @@ static inline void set_dev_node(struct device *dev, int node) #else static inline int dev_to_node(struct device *dev) { - return NUMA_NO_NODE; + return -1; } static inline void set_dev_node(struct device *dev, int node) { @@ -728,16 +960,6 @@ static inline bool device_async_suspend_enabled(struct device *dev) return !!dev->power.async_suspend; } -static inline bool device_pm_not_required(struct device *dev) -{ - return dev->power.no_pm; -} - -static inline void device_set_pm_not_required(struct device *dev) -{ - dev->power.no_pm = true; -} - static inline void dev_pm_syscore_device(struct device *dev, bool val) { #ifdef CONFIG_PM_SLEEP @@ -745,16 +967,6 @@ static inline void dev_pm_syscore_device(struct device *dev, bool val) #endif } -static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags) -{ - dev->power.driver_flags = flags; -} - -static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags) -{ - return !!(dev->power.driver_flags & flags); -} - static inline void device_lock(struct device *dev) { mutex_lock(&dev->mutex); @@ -782,94 +994,58 @@ static inline void device_lock_assert(struct device *dev) static inline struct device_node *dev_of_node(struct device *dev) { - if (!IS_ENABLED(CONFIG_OF) || !dev) + if (!IS_ENABLED(CONFIG_OF)) return NULL; return dev->of_node; } -static inline bool dev_has_sync_state(struct device *dev) -{ - if (!dev) - return false; - if (dev->driver && dev->driver->sync_state) - return true; - if (dev->bus && dev->bus->sync_state) - return true; - return false; -} - -static inline void dev_set_removable(struct device *dev, - enum device_removable removable) -{ - dev->removable = removable; -} - -static inline bool dev_is_removable(struct device *dev) -{ - return dev->removable == DEVICE_REMOVABLE; -} - -static inline bool dev_removable_is_valid(struct device *dev) -{ - return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED; -} +void driver_init(void); /* * High level routines for use by the bus drivers */ -int __must_check device_register(struct device *dev); -void device_unregister(struct device *dev); -void device_initialize(struct device *dev); -int __must_check device_add(struct device *dev); -void device_del(struct device *dev); -int device_for_each_child(struct device *dev, void *data, - int (*fn)(struct device *dev, void *data)); -int device_for_each_child_reverse(struct device *dev, void *data, - int (*fn)(struct device *dev, void *data)); -struct device *device_find_child(struct device *dev, void *data, - int (*match)(struct device *dev, void *data)); -struct device *device_find_child_by_name(struct device *parent, - const char *name); -int device_rename(struct device *dev, const char *new_name); -int device_move(struct device *dev, struct device *new_parent, - enum dpm_order dpm_order); -int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); -const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, - kgid_t *gid, const char **tmp); -int device_is_dependent(struct device *dev, void *target); +extern int __must_check device_register(struct device *dev); +extern void device_unregister(struct device *dev); +extern void device_initialize(struct device *dev); +extern int __must_check device_add(struct device *dev); +extern void device_del(struct device *dev); +extern int device_for_each_child(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +extern int device_for_each_child_reverse(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +extern struct device *device_find_child(struct device *dev, void *data, + int (*match)(struct device *dev, void *data)); +extern int device_rename(struct device *dev, const char *new_name); +extern int device_move(struct device *dev, struct device *new_parent, + enum dpm_order dpm_order); +extern const char *device_get_devnode(struct device *dev, + umode_t *mode, kuid_t *uid, kgid_t *gid, + const char **tmp); static inline bool device_supports_offline(struct device *dev) { return dev->bus && dev->bus->offline && dev->bus->online; } -void lock_device_hotplug(void); -void unlock_device_hotplug(void); -int lock_device_hotplug_sysfs(void); -int device_offline(struct device *dev); -int device_online(struct device *dev); -void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); -void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); -void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); -void device_set_node(struct device *dev, struct fwnode_handle *fwnode); - -static inline int dev_num_vf(struct device *dev) -{ - if (dev->bus && dev->bus->num_vf) - return dev->bus->num_vf(dev); - return 0; -} +extern void lock_device_hotplug(void); +extern void unlock_device_hotplug(void); +extern int lock_device_hotplug_sysfs(void); +extern int device_offline(struct device *dev); +extern int device_online(struct device *dev); +extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); +extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); /* * Root device objects for grouping under /sys/devices */ -struct device *__root_device_register(const char *name, struct module *owner); +extern struct device *__root_device_register(const char *name, + struct module *owner); /* This is a macro to avoid include problems with THIS_MODULE */ #define root_device_register(name) \ __root_device_register(name, THIS_MODULE) -void root_device_unregister(struct device *root); +extern void root_device_unregister(struct device *root); static inline void *dev_get_platdata(const struct device *dev) { @@ -880,58 +1056,32 @@ static inline void *dev_get_platdata(const struct device *dev) * Manual binding of a device to driver. See drivers/base/bus.c * for information on use. */ -int __must_check device_driver_attach(struct device_driver *drv, - struct device *dev); -int __must_check device_bind_driver(struct device *dev); -void device_release_driver(struct device *dev); -int __must_check device_attach(struct device *dev); -int __must_check driver_attach(struct device_driver *drv); -void device_initial_probe(struct device *dev); -int __must_check device_reprobe(struct device *dev); +extern int __must_check device_bind_driver(struct device *dev); +extern void device_release_driver(struct device *dev); +extern int __must_check device_attach(struct device *dev); +extern int __must_check driver_attach(struct device_driver *drv); +extern void device_initial_probe(struct device *dev); +extern int __must_check device_reprobe(struct device *dev); -bool device_is_bound(struct device *dev); +extern bool device_is_bound(struct device *dev); /* * Easy functions for dynamically creating devices on the fly */ -__printf(5, 6) struct device * -device_create(struct class *cls, struct device *parent, dev_t devt, - void *drvdata, const char *fmt, ...); -__printf(6, 7) struct device * -device_create_with_groups(struct class *cls, struct device *parent, dev_t devt, - void *drvdata, const struct attribute_group **groups, - const char *fmt, ...); -void device_destroy(struct class *cls, dev_t devt); - -int __must_check device_add_groups(struct device *dev, - const struct attribute_group **groups); -void device_remove_groups(struct device *dev, - const struct attribute_group **groups); - -static inline int __must_check device_add_group(struct device *dev, - const struct attribute_group *grp) -{ - const struct attribute_group *groups[] = { grp, NULL }; - - return device_add_groups(dev, groups); -} - -static inline void device_remove_group(struct device *dev, - const struct attribute_group *grp) -{ - const struct attribute_group *groups[] = { grp, NULL }; - - return device_remove_groups(dev, groups); -} - -int __must_check devm_device_add_groups(struct device *dev, - const struct attribute_group **groups); -void devm_device_remove_groups(struct device *dev, - const struct attribute_group **groups); -int __must_check devm_device_add_group(struct device *dev, - const struct attribute_group *grp); -void devm_device_remove_group(struct device *dev, - const struct attribute_group *grp); +extern __printf(5, 0) +struct device *device_create_vargs(struct class *cls, struct device *parent, + dev_t devt, void *drvdata, + const char *fmt, va_list vargs); +extern __printf(5, 6) +struct device *device_create(struct class *cls, struct device *parent, + dev_t devt, void *drvdata, + const char *fmt, ...); +extern __printf(6, 7) +struct device *device_create_with_groups(struct class *cls, + struct device *parent, dev_t devt, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...); +extern void device_destroy(struct class *cls, dev_t devt); /* * Platform "fixup" functions - allow the platform to have their say @@ -948,32 +1098,227 @@ extern int (*platform_notify_remove)(struct device *dev); * get_device - atomically increment the reference count for the device. * */ -struct device *get_device(struct device *dev); -void put_device(struct device *dev); -bool kill_device(struct device *dev); +extern struct device *get_device(struct device *dev); +extern void put_device(struct device *dev); #ifdef CONFIG_DEVTMPFS -int devtmpfs_mount(void); +extern int devtmpfs_create_node(struct device *dev); +extern int devtmpfs_delete_node(struct device *dev); +extern int devtmpfs_mount(const char *mntdir); #else -static inline int devtmpfs_mount(void) { return 0; } +static inline int devtmpfs_create_node(struct device *dev) { return 0; } +static inline int devtmpfs_delete_node(struct device *dev) { return 0; } +static inline int devtmpfs_mount(const char *mountpoint) { return 0; } #endif /* drivers/base/power/shutdown.c */ -void device_shutdown(void); +extern void device_shutdown(void); /* debugging and troubleshooting/diagnostic helpers. */ -const char *dev_driver_string(const struct device *dev); +extern const char *dev_driver_string(const struct device *dev); -/* Device links interface. */ -struct device_link *device_link_add(struct device *consumer, - struct device *supplier, u32 flags); -void device_link_del(struct device_link *link); -void device_link_remove(void *consumer, struct device *supplier); -void device_links_supplier_sync_state_pause(void); -void device_links_supplier_sync_state_resume(void); + +#ifdef CONFIG_PRINTK + +extern __printf(3, 0) +int dev_vprintk_emit(int level, const struct device *dev, + const char *fmt, va_list args); +extern __printf(3, 4) +int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); extern __printf(3, 4) -int dev_err_probe(const struct device *dev, int err, const char *fmt, ...); +void dev_printk(const char *level, const struct device *dev, + const char *fmt, ...); +extern __printf(2, 3) +void dev_emerg(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +void dev_alert(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +void dev_crit(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +void dev_err(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +void dev_warn(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +void dev_notice(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +void _dev_info(const struct device *dev, const char *fmt, ...); + +#else + +static inline __printf(3, 0) +int dev_vprintk_emit(int level, const struct device *dev, + const char *fmt, va_list args) +{ return 0; } +static inline __printf(3, 4) +int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) +{ return 0; } + +static inline void __dev_printk(const char *level, const struct device *dev, + struct va_format *vaf) +{} +static inline __printf(3, 4) +void dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) +{} + +static inline __printf(2, 3) +void dev_emerg(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void dev_crit(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void dev_alert(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void dev_err(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void dev_warn(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void dev_notice(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void _dev_info(const struct device *dev, const char *fmt, ...) +{} + +#endif + +/* + * Stupid hackaround for existing uses of non-printk uses dev_info + * + * Note that the definition of dev_info below is actually _dev_info + * and a macro is used to avoid redefining dev_info + */ + +#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg) + +#if defined(CONFIG_DYNAMIC_DEBUG) +#define dev_dbg(dev, format, ...) \ +do { \ + dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ +} while (0) +#elif defined(DEBUG) +#define dev_dbg(dev, format, arg...) \ + dev_printk(KERN_DEBUG, dev, format, ##arg) +#else +#define dev_dbg(dev, format, arg...) \ +({ \ + if (0) \ + dev_printk(KERN_DEBUG, dev, format, ##arg); \ +}) +#endif + +#ifdef CONFIG_PRINTK +#define dev_level_once(dev_level, dev, fmt, ...) \ +do { \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + dev_level(dev, fmt, ##__VA_ARGS__); \ + } \ +} while (0) +#else +#define dev_level_once(dev_level, dev, fmt, ...) \ +do { \ + if (0) \ + dev_level(dev, fmt, ##__VA_ARGS__); \ +} while (0) +#endif + +#define dev_emerg_once(dev, fmt, ...) \ + dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) +#define dev_alert_once(dev, fmt, ...) \ + dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) +#define dev_crit_once(dev, fmt, ...) \ + dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) +#define dev_err_once(dev, fmt, ...) \ + dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) +#define dev_warn_once(dev, fmt, ...) \ + dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) +#define dev_notice_once(dev, fmt, ...) \ + dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) +#define dev_info_once(dev, fmt, ...) \ + dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) +#define dev_dbg_once(dev, fmt, ...) \ + dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__) + +#define dev_level_ratelimited(dev_level, dev, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + dev_level(dev, fmt, ##__VA_ARGS__); \ +} while (0) + +#define dev_emerg_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__) +#define dev_alert_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__) +#define dev_crit_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__) +#define dev_err_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__) +#define dev_warn_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__) +#define dev_notice_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__) +#define dev_info_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__) +#if defined(CONFIG_DYNAMIC_DEBUG) +/* descriptor check is first to prevent flooding with "callbacks suppressed" */ +#define dev_dbg_ratelimited(dev, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ + __ratelimit(&_rs)) \ + __dynamic_dev_dbg(&descriptor, dev, fmt, \ + ##__VA_ARGS__); \ +} while (0) +#elif defined(DEBUG) +#define dev_dbg_ratelimited(dev, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ +} while (0) +#else +#define dev_dbg_ratelimited(dev, fmt, ...) \ +do { \ + if (0) \ + dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ +} while (0) +#endif + +#ifdef VERBOSE_DEBUG +#define dev_vdbg dev_dbg +#else +#define dev_vdbg(dev, format, arg...) \ +({ \ + if (0) \ + dev_printk(KERN_DEBUG, dev, format, ##arg); \ +}) +#endif + +/* + * dev_WARN*() acts like dev_printk(), but with the key difference of + * using WARN/WARN_ONCE to include file/line information and a backtrace. + */ +#define dev_WARN(dev, format, arg...) \ + WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg); + +#define dev_WARN_ONCE(dev, condition, format, arg...) \ + WARN_ONCE(condition, "%s %s: " format, \ + dev_driver_string(dev), dev_name(dev), ## arg) /* Create alias, so I can be autoloaded. */ #define MODULE_ALIAS_CHARDEV(major,minor) \ @@ -987,4 +1332,52 @@ extern long sysfs_deprecated; #define sysfs_deprecated 0 #endif +/** + * module_driver() - Helper macro for drivers that don't do anything + * special in module init/exit. This eliminates a lot of boilerplate. + * Each module may only use this macro once, and calling it replaces + * module_init() and module_exit(). + * + * @__driver: driver name + * @__register: register function for this driver type + * @__unregister: unregister function for this driver type + * @...: Additional arguments to be passed to __register and __unregister. + * + * Use this macro to construct bus specific macros for registering + * drivers, and do not use it on its own. + */ +#define module_driver(__driver, __register, __unregister, ...) \ +static int __init __driver##_init(void) \ +{ \ + return __register(&(__driver) , ##__VA_ARGS__); \ +} \ +module_init(__driver##_init); \ +static void __exit __driver##_exit(void) \ +{ \ + __unregister(&(__driver) , ##__VA_ARGS__); \ +} \ +module_exit(__driver##_exit); + +/** + * builtin_driver() - Helper macro for drivers that don't do anything + * special in init and have no exit. This eliminates some boilerplate. + * Each driver may only use this macro once, and calling it replaces + * device_initcall (or in some cases, the legacy __initcall). This is + * meant to be a direct parallel of module_driver() above but without + * the __exit stuff that is not used for builtin cases. + * + * @__driver: driver name + * @__register: register function for this driver type + * @...: Additional arguments to be passed to __register + * + * Use this macro to construct bus specific macros for registering + * drivers, and do not use it on its own. + */ +#define builtin_driver(__driver, __register, ...) \ +static int __init __driver##_init(void) \ +{ \ + return __register(&(__driver) , ##__VA_ARGS__); \ +} \ +device_initcall(__driver##_init); + #endif /* _DEVICE_H_ */ diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h index d02f32b751..8b64221b43 100644 --- a/include/linux/device_cgroup.h +++ b/include/linux/device_cgroup.h @@ -1,65 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #include -#define DEVCG_ACC_MKNOD 1 -#define DEVCG_ACC_READ 2 -#define DEVCG_ACC_WRITE 4 -#define DEVCG_ACC_MASK (DEVCG_ACC_MKNOD | DEVCG_ACC_READ | DEVCG_ACC_WRITE) - -#define DEVCG_DEV_BLOCK 1 -#define DEVCG_DEV_CHAR 2 -#define DEVCG_DEV_ALL 4 /* this represents all devices */ - - -#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) -int devcgroup_check_permission(short type, u32 major, u32 minor, - short access); +#ifdef CONFIG_CGROUP_DEVICE +extern int __devcgroup_inode_permission(struct inode *inode, int mask); +extern int devcgroup_inode_mknod(int mode, dev_t dev); static inline int devcgroup_inode_permission(struct inode *inode, int mask) { - short type, access = 0; - if (likely(!inode->i_rdev)) return 0; - - if (S_ISBLK(inode->i_mode)) - type = DEVCG_DEV_BLOCK; - else if (S_ISCHR(inode->i_mode)) - type = DEVCG_DEV_CHAR; - else + if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)) return 0; - - if (mask & MAY_WRITE) - access |= DEVCG_ACC_WRITE; - if (mask & MAY_READ) - access |= DEVCG_ACC_READ; - - return devcgroup_check_permission(type, imajor(inode), iminor(inode), - access); + return __devcgroup_inode_permission(inode, mask); } - -static inline int devcgroup_inode_mknod(int mode, dev_t dev) -{ - short type; - - if (!S_ISBLK(mode) && !S_ISCHR(mode)) - return 0; - - if (S_ISCHR(mode) && dev == WHITEOUT_DEV) - return 0; - - if (S_ISBLK(mode)) - type = DEVCG_DEV_BLOCK; - else - type = DEVCG_DEV_CHAR; - - return devcgroup_check_permission(type, MAJOR(dev), MINOR(dev), - DEVCG_ACC_MKNOD); -} - #else -static inline int devcgroup_check_permission(short type, u32 major, u32 minor, - short access) -{ return 0; } static inline int devcgroup_inode_permission(struct inode *inode, int mask) { return 0; } static inline int devcgroup_inode_mknod(int mode, dev_t dev) diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h index 45f746a48d..277ab9af9a 100644 --- a/include/linux/devpts_fs.h +++ b/include/linux/devpts_fs.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* -*- linux-c -*- --------------------------------------------------------- * * * linux/include/linux/devpts_fs.h * * Copyright 1998-2004 H. Peter Anvin -- All Rights Reserved * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + * * ------------------------------------------------------------------------- */ #ifndef _LINUX_DEVPTS_FS_H @@ -16,7 +19,6 @@ struct pts_fs_info; -struct vfsmount *devpts_mntget(struct file *, struct pts_fs_info *); struct pts_fs_info *devpts_acquire(struct file *); void devpts_release(struct pts_fs_info *); @@ -30,15 +32,6 @@ void *devpts_get_priv(struct dentry *); /* unlink */ void devpts_pty_kill(struct dentry *); -/* in pty.c */ -int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags); - -#else -static inline int -ptm_open_peer(struct file *master, struct tty_struct *tty, int flags) -{ - return -EIO; -} #endif diff --git a/include/linux/digsig.h b/include/linux/digsig.h index 2ace69e410..6f85a070bb 100644 --- a/include/linux/digsig.h +++ b/include/linux/digsig.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 Nokia Corporation * Copyright (C) 2011 Intel Corporation @@ -6,6 +5,11 @@ * Author: * Dmitry Kasatkin * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * */ #ifndef _DIGSIG_H @@ -29,7 +33,7 @@ struct pubkey_hdr { uint32_t timestamp; /* key made, always 0 for now */ uint8_t algo; uint8_t nmpi; - char mpi[]; + char mpi[0]; } __packed; struct signature_hdr { @@ -39,7 +43,7 @@ struct signature_hdr { uint8_t hash; uint8_t keyid[8]; uint8_t nmpi; - char mpi[]; + char mpi[0]; } __packed; #if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE) diff --git a/include/linux/dio.h b/include/linux/dio.h index 5abd07361e..2cc0fd0046 100644 --- a/include/linux/dio.h +++ b/include/linux/dio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* header file for DIO boards for the HP300 architecture. * Maybe this should handle DIO-II later? * The general structure of this is vaguely based on how @@ -247,6 +246,11 @@ extern int dio_create_sysfs_dev_files(struct dio_dev *); /* New-style probing */ extern int dio_register_driver(struct dio_driver *); extern void dio_unregister_driver(struct dio_driver *); +extern const struct dio_device_id *dio_match_device(const struct dio_device_id *ids, const struct dio_dev *z); +static inline struct dio_driver *dio_dev_driver(const struct dio_dev *d) +{ + return d->driver; +} #define dio_resource_start(d) ((d)->resource.start) #define dio_resource_end(d) ((d)->resource.end) diff --git a/include/linux/dirent.h b/include/linux/dirent.h index 99002220cd..f072fb8d10 100644 --- a/include/linux/dirent.h +++ b/include/linux/dirent.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DIRENT_H #define _LINUX_DIRENT_H @@ -7,7 +6,7 @@ struct linux_dirent64 { s64 d_off; unsigned short d_reclen; unsigned char d_type; - char d_name[]; + char d_name[0]; }; #endif diff --git a/include/linux/dlm.h b/include/linux/dlm.h index ff951e9f6f..d02da2c6fc 100644 --- a/include/linux/dlm.h +++ b/include/linux/dlm.h @@ -1,10 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** +** This copyrighted material is made available to anyone wishing to use, +** modify, copy, or redistribute it subject to the terms and conditions +** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h index e6d76e8715..95ad387a77 100644 --- a/include/linux/dlm_plock.h +++ b/include/linux/dlm_plock.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. */ #ifndef __DLM_PLOCK_DOT_H__ #define __DLM_PLOCK_DOT_H__ diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index a52c6580cc..b91b023def 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h @@ -58,7 +58,7 @@ struct dm_io_notify { struct dm_io_client; struct dm_io_request { int bi_op; /* REQ_OP */ - int bi_op_flags; /* req_flag_bits */ + int bi_op_flags; /* rq_flag_bits */ struct dm_io_memory mem; /* Memory to use for io */ struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ struct dm_io_client *client; /* Client memory handler */ diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h index c1707ee5b5..f486d636b8 100644 --- a/include/linux/dm-kcopyd.h +++ b/include/linux/dm-kcopyd.h @@ -20,7 +20,6 @@ #define DM_KCOPYD_MAX_REGIONS 8 #define DM_KCOPYD_IGNORE_ERROR 1 -#define DM_KCOPYD_WRITE_SEQ 2 struct dm_kcopyd_throttle { unsigned throttle; @@ -51,7 +50,6 @@ MODULE_PARM_DESC(name, description) struct dm_kcopyd_client; struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle); void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc); -void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc); /* * Submit a copy job to kcopyd. This is built on top of the @@ -63,9 +61,9 @@ void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc); typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err, void *context); -void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, - unsigned num_dests, struct dm_io_region *dests, - unsigned flags, dm_kcopyd_notify_fn fn, void *context); +int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, + unsigned num_dests, struct dm_io_region *dests, + unsigned flags, dm_kcopyd_notify_fn fn, void *context); /* * Prepare a callback and submit it via the kcopyd thread. @@ -82,9 +80,9 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc, dm_kcopyd_notify_fn fn, void *context); void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err); -void dm_kcopyd_zero(struct dm_kcopyd_client *kc, - unsigned num_dests, struct dm_io_region *dests, - unsigned flags, dm_kcopyd_notify_fn fn, void *context); +int dm_kcopyd_zero(struct dm_kcopyd_client *kc, + unsigned num_dests, struct dm_io_region *dests, + unsigned flags, dm_kcopyd_notify_fn fn, void *context); #endif /* __KERNEL__ */ #endif /* _LINUX_DM_KCOPYD_H */ diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h index df0341dbb4..841925fbfe 100644 --- a/include/linux/dm9000.h +++ b/include/linux/dm9000.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* include/linux/dm9000.h * * Copyright (c) 2004 Simtec Electronics * Ben Dooks * * Header file for dm9000 platform data + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DM9000_PLATFORM_DATA diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 8b32b4bdd5..e0b0741ae6 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Header file for dma buffer sharing framework. * @@ -9,18 +8,29 @@ * Arnd Bergmann , Rob Clark and * Daniel Vetter for their support in creation and * refining of this idea. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . */ #ifndef __DMA_BUF_H__ #define __DMA_BUF_H__ -#include #include #include #include #include #include #include -#include +#include #include struct device; @@ -29,491 +39,132 @@ struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf + * @attach: [optional] allows different devices to 'attach' themselves to the + * given buffer. It might return -EBUSY to signal that backing storage + * is already allocated and incompatible with the requirements + * of requesting device. + * @detach: [optional] detach a given device from this buffer. + * @map_dma_buf: returns list of scatter pages allocated, increases usecount + * of the buffer. Requires atleast one attach to be called + * before. Returned sg list should already be mapped into + * _device_ address space. This call may sleep. May also return + * -EINTR. Should return -EINVAL if attach hasn't been called yet. + * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter + * pages. + * @release: release this buffer; to be called after the last dma_buf_put. + * @begin_cpu_access: [optional] called before cpu access to invalidate cpu + * caches and allocate backing storage (if not yet done) + * respectively pin the object into memory. + * @end_cpu_access: [optional] called after cpu access to flush caches. + * @kmap_atomic: maps a page from the buffer into kernel address + * space, users may not block until the subsequent unmap call. + * This callback must not sleep. + * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer. + * This Callback must not sleep. + * @kmap: maps a page from the buffer into kernel address space. + * @kunmap: [optional] unmaps a page from the buffer. + * @mmap: used to expose the backing storage to userspace. Note that the + * mapping needs to be coherent - if the exporter doesn't directly + * support this, it needs to fake coherency by shooting down any ptes + * when transitioning away from the cpu domain. * @vmap: [optional] creates a virtual mapping for the buffer into kernel * address space. Same restrictions as for vmap and friends apply. * @vunmap: [optional] unmaps a vmap from the buffer */ struct dma_buf_ops { - /** - * @cache_sgt_mapping: - * - * If true the framework will cache the first mapping made for each - * attachment. This avoids creating mappings for attachments multiple - * times. - */ - bool cache_sgt_mapping; + int (*attach)(struct dma_buf *, struct device *, + struct dma_buf_attachment *); - /** - * @attach: - * - * This is called from dma_buf_attach() to make sure that a given - * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters - * which support buffer objects in special locations like VRAM or - * device-specific carveout areas should check whether the buffer could - * be move to system memory (or directly accessed by the provided - * device), and otherwise need to fail the attach operation. - * - * The exporter should also in general check whether the current - * allocation fulfills the DMA constraints of the new device. If this - * is not the case, and the allocation cannot be moved, it should also - * fail the attach operation. - * - * Any exporter-private housekeeping data can be stored in the - * &dma_buf_attachment.priv pointer. - * - * This callback is optional. - * - * Returns: - * - * 0 on success, negative error code on failure. It might return -EBUSY - * to signal that backing storage is already allocated and incompatible - * with the requirements of requesting device. - */ - int (*attach)(struct dma_buf *, struct dma_buf_attachment *); - - /** - * @detach: - * - * This is called by dma_buf_detach() to release a &dma_buf_attachment. - * Provided so that exporters can clean up any housekeeping for an - * &dma_buf_attachment. - * - * This callback is optional. - */ void (*detach)(struct dma_buf *, struct dma_buf_attachment *); - /** - * @pin: - * - * This is called by dma_buf_pin() and lets the exporter know that the - * DMA-buf can't be moved any more. The exporter should pin the buffer - * into system memory to make sure it is generally accessible by other - * devices. - * - * This is called with the &dmabuf.resv object locked and is mutual - * exclusive with @cache_sgt_mapping. - * - * This is called automatically for non-dynamic importers from - * dma_buf_attach(). - * - * Note that similar to non-dynamic exporters in their @map_dma_buf - * callback the driver must guarantee that the memory is available for - * use and cleared of any old data by the time this function returns. - * Drivers which pipeline their buffer moves internally must wait for - * all moves and clears to complete. - * - * Returns: - * - * 0 on success, negative error code on failure. - */ - int (*pin)(struct dma_buf_attachment *attach); - - /** - * @unpin: - * - * This is called by dma_buf_unpin() and lets the exporter know that the - * DMA-buf can be moved again. - * - * This is called with the dmabuf->resv object locked and is mutual - * exclusive with @cache_sgt_mapping. - * - * This callback is optional. - */ - void (*unpin)(struct dma_buf_attachment *attach); - - /** - * @map_dma_buf: - * - * This is called by dma_buf_map_attachment() and is used to map a - * shared &dma_buf into device address space, and it is mandatory. It - * can only be called if @attach has been called successfully. - * - * This call may sleep, e.g. when the backing storage first needs to be - * allocated, or moved to a location suitable for all currently attached - * devices. - * - * Note that any specific buffer attributes required for this function - * should get added to device_dma_parameters accessible via - * &device.dma_params from the &dma_buf_attachment. The @attach callback - * should also check these constraints. - * - * If this is being called for the first time, the exporter can now - * choose to scan through the list of attachments for this buffer, - * collate the requirements of the attached devices, and choose an - * appropriate backing storage for the buffer. - * - * Based on enum dma_data_direction, it might be possible to have - * multiple users accessing at the same time (for reading, maybe), or - * any other kind of sharing that the exporter might wish to make - * available to buffer-users. - * - * This is always called with the dmabuf->resv object locked when - * the dynamic_mapping flag is true. - * - * Note that for non-dynamic exporters the driver must guarantee that - * that the memory is available for use and cleared of any old data by - * the time this function returns. Drivers which pipeline their buffer - * moves internally must wait for all moves and clears to complete. - * Dynamic exporters do not need to follow this rule: For non-dynamic - * importers the buffer is already pinned through @pin, which has the - * same requirements. Dynamic importers otoh are required to obey the - * dma_resv fences. - * - * Returns: - * - * A &sg_table scatter list of the backing storage of the DMA buffer, - * already mapped into the device address space of the &device attached - * with the provided &dma_buf_attachment. The addresses and lengths in - * the scatter list are PAGE_SIZE aligned. - * - * On failure, returns a negative error value wrapped into a pointer. - * May also return -EINTR when a signal was received while being - * blocked. - * - * Note that exporters should not try to cache the scatter list, or - * return the same one for multiple calls. Caching is done either by the - * DMA-BUF code (for non-dynamic importers) or the importer. Ownership - * of the scatter list is transferred to the caller, and returned by - * @unmap_dma_buf. + /* For {map,unmap}_dma_buf below, any specific buffer attributes + * required should get added to device_dma_parameters accessible + * via dev->dma_params. */ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, - enum dma_data_direction); - /** - * @unmap_dma_buf: - * - * This is called by dma_buf_unmap_attachment() and should unmap and - * release the &sg_table allocated in @map_dma_buf, and it is mandatory. - * For static dma_buf handling this might also unpin the backing - * storage if this is the last mapping of the DMA buffer. - */ + enum dma_data_direction); void (*unmap_dma_buf)(struct dma_buf_attachment *, - struct sg_table *, - enum dma_data_direction); - + struct sg_table *, + enum dma_data_direction); /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY * if the call would block. */ - /** - * @release: - * - * Called after the last dma_buf_put to release the &dma_buf, and - * mandatory. - */ + /* after final dma_buf_put() */ void (*release)(struct dma_buf *); - /** - * @begin_cpu_access: - * - * This is called from dma_buf_begin_cpu_access() and allows the - * exporter to ensure that the memory is actually coherent for cpu - * access. The exporter also needs to ensure that cpu access is coherent - * for the access direction. The direction can be used by the exporter - * to optimize the cache flushing, i.e. access with a different - * direction (read instead of write) might return stale or even bogus - * data (e.g. when the exporter needs to copy the data to temporary - * storage). - * - * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL - * command for userspace mappings established through @mmap, and also - * for kernel mappings established with @vmap. - * - * This callback is optional. - * - * Returns: - * - * 0 on success or a negative error code on failure. This can for - * example fail when the backing storage can't be allocated. Can also - * return -ERESTARTSYS or -EINTR when the call has been interrupted and - * needs to be restarted. - */ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); - - /** - * @end_cpu_access: - * - * This is called from dma_buf_end_cpu_access() when the importer is - * done accessing the CPU. The exporter can use this to flush caches and - * undo anything else done in @begin_cpu_access. - * - * This callback is optional. - * - * Returns: - * - * 0 on success or a negative error code on failure. Can return - * -ERESTARTSYS or -EINTR when the call has been interrupted and needs - * to be restarted. - */ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); + void *(*kmap_atomic)(struct dma_buf *, unsigned long); + void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); + void *(*kmap)(struct dma_buf *, unsigned long); + void (*kunmap)(struct dma_buf *, unsigned long, void *); - /** - * @mmap: - * - * This callback is used by the dma_buf_mmap() function - * - * Note that the mapping needs to be incoherent, userspace is expected - * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface. - * - * Because dma-buf buffers have invariant size over their lifetime, the - * dma-buf core checks whether a vma is too large and rejects such - * mappings. The exporter hence does not need to duplicate this check. - * Drivers do not need to check this themselves. - * - * If an exporter needs to manually flush caches and hence needs to fake - * coherency for mmap support, it needs to be able to zap all the ptes - * pointing at the backing storage. Now linux mm needs a struct - * address_space associated with the struct file stored in vma->vm_file - * to do that with the function unmap_mapping_range. But the dma_buf - * framework only backs every dma_buf fd with the anon_file struct file, - * i.e. all dma_bufs share the same file. - * - * Hence exporters need to setup their own file (and address_space) - * association by setting vma->vm_file and adjusting vma->vm_pgoff in - * the dma_buf mmap callback. In the specific case of a gem driver the - * exporter could use the shmem file already provided by gem (and set - * vm_pgoff = 0). Exporters can then zap ptes by unmapping the - * corresponding range of the struct address_space associated with their - * own file. - * - * This callback is optional. - * - * Returns: - * - * 0 on success or a negative error code on failure. - */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); - int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); - void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); + void *(*vmap)(struct dma_buf *); + void (*vunmap)(struct dma_buf *, void *vaddr); }; /** * struct dma_buf - shared buffer object - * - * This represents a shared buffer, created by calling dma_buf_export(). The - * userspace representation is a normal file descriptor, which can be created by - * calling dma_buf_fd(). - * - * Shared dma buffers are reference counted using dma_buf_put() and - * get_dma_buf(). - * - * Device DMA access is handled by the separate &struct dma_buf_attachment. + * @size: size of the buffer + * @file: file pointer used for sharing buffers across, and for refcounting. + * @attachments: list of dma_buf_attachment that denotes all devices attached. + * @ops: dma_buf_ops associated with this buffer object. + * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap + * @vmapping_counter: used internally to refcnt the vmaps + * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 + * @exp_name: name of the exporter; useful for debugging. + * @owner: pointer to exporter module; used for refcounting when exporter is a + * kernel module. + * @list_node: node for dma_buf accounting and debugging. + * @priv: exporter specific private data for this buffer object. + * @resv: reservation object linked to this dma-buf + * @poll: for userspace poll support + * @cb_excl: for userspace poll support + * @cb_shared: for userspace poll support */ struct dma_buf { - /** - * @size: - * - * Size of the buffer; invariant over the lifetime of the buffer. - */ size_t size; - - /** - * @file: - * - * File pointer used for sharing buffers across, and for refcounting. - * See dma_buf_get() and dma_buf_put(). - */ struct file *file; - - /** - * @attachments: - * - * List of dma_buf_attachment that denotes all devices attached, - * protected by &dma_resv lock @resv. - */ struct list_head attachments; - - /** @ops: dma_buf_ops associated with this buffer object. */ const struct dma_buf_ops *ops; - - /** - * @lock: - * - * Used internally to serialize list manipulation, attach/detach and - * vmap/unmap. Note that in many cases this is superseeded by - * dma_resv_lock() on @resv. - */ struct mutex lock; - - /** - * @vmapping_counter: - * - * Used internally to refcnt the vmaps returned by dma_buf_vmap(). - * Protected by @lock. - */ unsigned vmapping_counter; - - /** - * @vmap_ptr: - * The current vmap ptr if @vmapping_counter > 0. Protected by @lock. - */ - struct dma_buf_map vmap_ptr; - - /** - * @exp_name: - * - * Name of the exporter; useful for debugging. See the - * DMA_BUF_SET_NAME IOCTL. - */ + void *vmap_ptr; const char *exp_name; - - /** - * @name: - * - * Userspace-provided name; useful for accounting and debugging, - * protected by dma_resv_lock() on @resv and @name_lock for read access. - */ - const char *name; - - /** @name_lock: Spinlock to protect name acces for read access. */ - spinlock_t name_lock; - - /** - * @owner: - * - * Pointer to exporter module; used for refcounting when exporter is a - * kernel module. - */ struct module *owner; - - /** @list_node: node for dma_buf accounting and debugging. */ struct list_head list_node; - - /** @priv: exporter specific private data for this buffer object. */ void *priv; + struct reservation_object *resv; - /** - * @resv: - * - * Reservation object linked to this dma-buf. - * - * IMPLICIT SYNCHRONIZATION RULES: - * - * Drivers which support implicit synchronization of buffer access as - * e.g. exposed in `Implicit Fence Poll Support`_ must follow the - * below rules. - * - * - Drivers must add a shared fence through dma_resv_add_shared_fence() - * for anything the userspace API considers a read access. This highly - * depends upon the API and window system. - * - * - Similarly drivers must set the exclusive fence through - * dma_resv_add_excl_fence() for anything the userspace API considers - * write access. - * - * - Drivers may just always set the exclusive fence, since that only - * causes unecessarily synchronization, but no correctness issues. - * - * - Some drivers only expose a synchronous userspace API with no - * pipelining across drivers. These do not set any fences for their - * access. An example here is v4l. - * - * DYNAMIC IMPORTER RULES: - * - * Dynamic importers, see dma_buf_attachment_is_dynamic(), have - * additional constraints on how they set up fences: - * - * - Dynamic importers must obey the exclusive fence and wait for it to - * signal before allowing access to the buffer's underlying storage - * through the device. - * - * - Dynamic importers should set fences for any access that they can't - * disable immediately from their &dma_buf_attach_ops.move_notify - * callback. - */ - struct dma_resv *resv; - - /** @poll: for userspace poll support */ + /* poll support */ wait_queue_head_t poll; - /** @cb_excl: for userspace poll support */ - /** @cb_shared: for userspace poll support */ struct dma_buf_poll_cb_t { - struct dma_fence_cb cb; + struct fence_cb cb; wait_queue_head_t *poll; - __poll_t active; + unsigned long active; } cb_excl, cb_shared; -#ifdef CONFIG_DMABUF_SYSFS_STATS - /** - * @sysfs_entry: - * - * For exposing information about this buffer in sysfs. See also - * `DMA-BUF statistics`_ for the uapi this enables. - */ - struct dma_buf_sysfs_entry { - struct kobject kobj; - struct dma_buf *dmabuf; - } *sysfs_entry; -#endif -}; - -/** - * struct dma_buf_attach_ops - importer operations for an attachment - * - * Attachment operations implemented by the importer. - */ -struct dma_buf_attach_ops { - /** - * @allow_peer2peer: - * - * If this is set to true the importer must be able to handle peer - * resources without struct pages. - */ - bool allow_peer2peer; - - /** - * @move_notify: [optional] notification that the DMA-buf is moving - * - * If this callback is provided the framework can avoid pinning the - * backing store while mappings exists. - * - * This callback is called with the lock of the reservation object - * associated with the dma_buf held and the mapping function must be - * called with this lock held as well. This makes sure that no mapping - * is created concurrently with an ongoing move operation. - * - * Mappings stay valid and are not directly affected by this callback. - * But the DMA-buf can now be in a different physical location, so all - * mappings should be destroyed and re-created as soon as possible. - * - * New mappings can be created after this callback returns, and will - * point to the new location of the DMA-buf. - */ - void (*move_notify)(struct dma_buf_attachment *attach); }; /** * struct dma_buf_attachment - holds device-buffer attachment data * @dmabuf: buffer for this attachment. * @dev: device attached to the buffer. - * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf. - * @sgt: cached mapping. - * @dir: direction of cached mapping. - * @peer2peer: true if the importer can handle peer resources without pages. + * @node: list of dma_buf_attachment. * @priv: exporter specific attachment data. - * @importer_ops: importer operations for this attachment, if provided - * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held. - * @importer_priv: importer specific attachment data. * * This structure holds the attachment information between the dma_buf buffer * and its user device(s). The list contains one attachment struct per device * attached to the buffer. - * - * An attachment is created by calling dma_buf_attach(), and released again by - * calling dma_buf_detach(). The DMA mapping itself needed to initiate a - * transfer is created by dma_buf_map_attachment() and freed again by calling - * dma_buf_unmap_attachment(). */ struct dma_buf_attachment { struct dma_buf *dmabuf; struct device *dev; struct list_head node; - struct sg_table *sgt; - enum dma_data_direction dir; - bool peer2peer; - const struct dma_buf_attach_ops *importer_ops; - void *importer_priv; void *priv; }; @@ -522,7 +173,7 @@ struct dma_buf_attachment { * @exp_name: name of the exporter - useful for debugging. * @owner: pointer to exporter module - used for refcounting kernel module * @ops: Attach allocator-defined dma buf ops to the new buffer - * @size: Size of the buffer - invariant over the lifetime of the buffer + * @size: Size of the buffer * @flags: mode flags for the file * @resv: reservation-object, NULL to allocate default one * @priv: Attach private data of allocator to this buffer @@ -536,16 +187,14 @@ struct dma_buf_export_info { const struct dma_buf_ops *ops; size_t size; int flags; - struct dma_resv *resv; + struct reservation_object *resv; void *priv; }; /** - * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters - * @name: export-info name + * helper macro for exporters; zeros and fills in most common values * - * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, - * zeroes it out and pre-populates exp_name in it. + * @name: export-info name */ #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ @@ -565,43 +214,10 @@ static inline void get_dma_buf(struct dma_buf *dmabuf) get_file(dmabuf->file); } -/** - * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings. - * @dmabuf: the DMA-buf to check - * - * Returns true if a DMA-buf exporter wants to be called with the dma_resv - * locked for the map/unmap callbacks, false if it doesn't wants to be called - * with the lock held. - */ -static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf) -{ - return !!dmabuf->ops->pin; -} - -/** - * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic - * mappings - * @attach: the DMA-buf attachment to check - * - * Returns true if a DMA-buf importer wants to call the map/unmap functions with - * the dma_resv lock held. - */ -static inline bool -dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) -{ - return !!attach->importer_ops; -} - struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, - struct device *dev); -struct dma_buf_attachment * -dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, - const struct dma_buf_attach_ops *importer_ops, - void *importer_priv); + struct device *dev); void dma_buf_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attach); -int dma_buf_pin(struct dma_buf_attachment *attach); -void dma_buf_unpin(struct dma_buf_attachment *attach); + struct dma_buf_attachment *dmabuf_attach); struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); @@ -613,14 +229,17 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); -void dma_buf_move_notify(struct dma_buf *dma_buf); int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); int dma_buf_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); +void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); +void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); +void *dma_buf_kmap(struct dma_buf *, unsigned long); +void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); -int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map); -void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map); +void *dma_buf_vmap(struct dma_buf *); +void dma_buf_vunmap(struct dma_buf *, void *vaddr); #endif /* __DMA_BUF_H__ */ diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h new file mode 100644 index 0000000000..fec734df15 --- /dev/null +++ b/include/linux/dma-contiguous.h @@ -0,0 +1,164 @@ +#ifndef __LINUX_CMA_H +#define __LINUX_CMA_H + +/* + * Contiguous Memory Allocator for DMA mapping framework + * Copyright (c) 2010-2011 by Samsung Electronics. + * Written by: + * Marek Szyprowski + * Michal Nazarewicz + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License or (at your optional) any later version of the license. + */ + +/* + * Contiguous Memory Allocator + * + * The Contiguous Memory Allocator (CMA) makes it possible to + * allocate big contiguous chunks of memory after the system has + * booted. + * + * Why is it needed? + * + * Various devices on embedded systems have no scatter-getter and/or + * IO map support and require contiguous blocks of memory to + * operate. They include devices such as cameras, hardware video + * coders, etc. + * + * Such devices often require big memory buffers (a full HD frame + * is, for instance, more then 2 mega pixels large, i.e. more than 6 + * MB of memory), which makes mechanisms such as kmalloc() or + * alloc_page() ineffective. + * + * At the same time, a solution where a big memory region is + * reserved for a device is suboptimal since often more memory is + * reserved then strictly required and, moreover, the memory is + * inaccessible to page system even if device drivers don't use it. + * + * CMA tries to solve this issue by operating on memory regions + * where only movable pages can be allocated from. This way, kernel + * can use the memory for pagecache and when device driver requests + * it, allocated pages can be migrated. + * + * Driver usage + * + * CMA should not be used by the device drivers directly. It is + * only a helper framework for dma-mapping subsystem. + * + * For more information, see kernel-docs in drivers/base/dma-contiguous.c + */ + +#ifdef __KERNEL__ + +#include + +struct cma; +struct page; + +#ifdef CONFIG_DMA_CMA + +extern struct cma *dma_contiguous_default_area; + +static inline struct cma *dev_get_cma_area(struct device *dev) +{ + if (dev && dev->cma_area) + return dev->cma_area; + return dma_contiguous_default_area; +} + +static inline void dev_set_cma_area(struct device *dev, struct cma *cma) +{ + if (dev) + dev->cma_area = cma; +} + +static inline void dma_contiguous_set_default(struct cma *cma) +{ + dma_contiguous_default_area = cma; +} + +void dma_contiguous_reserve(phys_addr_t addr_limit); + +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma, + bool fixed); + +/** + * dma_declare_contiguous() - reserve area for contiguous memory handling + * for particular device + * @dev: Pointer to device structure. + * @size: Size of the reserved memory. + * @base: Start address of the reserved memory (optional, 0 for any). + * @limit: End address of the reserved memory (optional, 0 for any). + * + * This function reserves memory for specified device. It should be + * called by board specific code when early allocator (memblock or bootmem) + * is still activate. + */ + +static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, + phys_addr_t base, phys_addr_t limit) +{ + struct cma *cma; + int ret; + ret = dma_contiguous_reserve_area(size, base, limit, &cma, true); + if (ret == 0) + dev_set_cma_area(dev, cma); + + return ret; +} + +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, + unsigned int order); +bool dma_release_from_contiguous(struct device *dev, struct page *pages, + int count); + +#else + +static inline struct cma *dev_get_cma_area(struct device *dev) +{ + return NULL; +} + +static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { } + +static inline void dma_contiguous_set_default(struct cma *cma) { } + +static inline void dma_contiguous_reserve(phys_addr_t limit) { } + +static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma, + bool fixed) +{ + return -ENOSYS; +} + +static inline +int dma_declare_contiguous(struct device *dev, phys_addr_t size, + phys_addr_t base, phys_addr_t limit) +{ + return -ENOSYS; +} + +static inline +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, + unsigned int order) +{ + return NULL; +} + +static inline +bool dma_release_from_contiguous(struct device *dev, struct page *pages, + int count) +{ + return false; +} + +#endif + +#endif + +#endif diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h new file mode 100644 index 0000000000..c7d844f09c --- /dev/null +++ b/include/linux/dma-debug.h @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2008 Advanced Micro Devices, Inc. + * + * Author: Joerg Roedel + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __DMA_DEBUG_H +#define __DMA_DEBUG_H + +#include + +struct device; +struct scatterlist; +struct bus_type; + +#ifdef CONFIG_DMA_API_DEBUG + +extern void dma_debug_add_bus(struct bus_type *bus); + +extern void dma_debug_init(u32 num_entries); + +extern int dma_debug_resize_entries(u32 num_entries); + +extern void debug_dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + int direction, dma_addr_t dma_addr, + bool map_single); + +extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); + +extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction, bool map_single); + +extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction); + +extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, int dir); + +extern void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt); + +extern void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr); + +extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr, + size_t size, int direction, + dma_addr_t dma_addr); + +extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction); + +extern void debug_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + int direction); + +extern void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction); + +extern void debug_dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction); + +extern void debug_dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, int direction); + +extern void debug_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, + int nelems, int direction); + +extern void debug_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, + int nelems, int direction); + +extern void debug_dma_dump_mappings(struct device *dev); + +extern void debug_dma_assert_idle(struct page *page); + +#else /* CONFIG_DMA_API_DEBUG */ + +static inline void dma_debug_add_bus(struct bus_type *bus) +{ +} + +static inline void dma_debug_init(u32 num_entries) +{ +} + +static inline int dma_debug_resize_entries(u32 num_entries) +{ + return 0; +} + +static inline void debug_dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + int direction, dma_addr_t dma_addr, + bool map_single) +{ +} + +static inline void debug_dma_mapping_error(struct device *dev, + dma_addr_t dma_addr) +{ +} + +static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction, + bool map_single) +{ +} + +static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction) +{ +} + +static inline void debug_dma_unmap_sg(struct device *dev, + struct scatterlist *sglist, + int nelems, int dir) +{ +} + +static inline void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt) +{ +} + +static inline void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr) +{ +} + +static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr, + size_t size, int direction, + dma_addr_t dma_addr) +{ +} + +static inline void debug_dma_unmap_resource(struct device *dev, + dma_addr_t dma_addr, size_t size, + int direction) +{ +} + +static inline void debug_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction) +{ +} + +static inline void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction) +{ +} + +static inline void debug_dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction) +{ +} + +static inline void debug_dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction) +{ +} + +static inline void debug_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, + int nelems, int direction) +{ +} + +static inline void debug_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, + int nelems, int direction) +{ +} + +static inline void debug_dma_dump_mappings(struct device *dev) +{ +} + +static inline void debug_dma_assert_idle(struct page *page) +{ +} + +#endif /* CONFIG_DMA_API_DEBUG */ + +#endif /* __DMA_DEBUG_H */ diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h index a2fe4571bc..95b6a82f59 100644 --- a/include/linux/dma-direction.h +++ b/include/linux/dma-direction.h @@ -1,18 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DMA_DIRECTION_H #define _LINUX_DMA_DIRECTION_H - +/* + * These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts. + */ enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3, }; - -static inline int valid_dma_direction(enum dma_data_direction dir) -{ - return dir == DMA_BIDIRECTIONAL || dir == DMA_TO_DEVICE || - dir == DMA_FROM_DEVICE; -} - -#endif /* _LINUX_DMA_DIRECTION_H */ +#endif diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 24607dc3c2..32c589062b 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -1,63 +1,80 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-2015 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef __DMA_IOMMU_H #define __DMA_IOMMU_H -#include -#include +#ifdef __KERNEL__ +#include #ifdef CONFIG_IOMMU_DMA -#include #include #include +int iommu_dma_init(void); + /* Domain management interface for IOMMU drivers */ int iommu_get_dma_cookie(struct iommu_domain *domain); -int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); void iommu_put_dma_cookie(struct iommu_domain *domain); /* Setup call for arch DMA mapping code */ -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); -int iommu_dma_init_fq(struct iommu_domain *domain); +int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, + u64 size, struct device *dev); + +/* General helpers for DMA-API <-> IOMMU-API interaction */ +int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); + +/* + * These implement the bulk of the relevant DMA mapping callbacks, but require + * the arch code to take care of attributes and cache maintenance + */ +struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, + unsigned long attrs, int prot, dma_addr_t *handle, + void (*flush_page)(struct device *, const void *, phys_addr_t)); +void iommu_dma_free(struct device *dev, struct page **pages, size_t size, + dma_addr_t *handle); + +int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma); + +dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, int prot); +int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int prot); + +/* + * Arch code with no special attribute handling may use these + * directly as DMA mapping callbacks for simplicity + */ +void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, + enum dma_data_direction dir, unsigned long attrs); +void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs); +int iommu_dma_supported(struct device *dev, u64 mask); +int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); /* The DMA API isn't _quite_ the whole story, though... */ -/* - * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device - * - * The MSI page will be stored in @desc. - * - * Return: 0 on success otherwise an error describing the failure. - */ -int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); +void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); -/* Update the MSI message if required. */ -void iommu_dma_compose_msi_msg(struct msi_desc *desc, - struct msi_msg *msg); - -void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); - -void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, - struct iommu_domain *domain); - -extern bool iommu_dma_forcedac; - -#else /* CONFIG_IOMMU_DMA */ +#else struct iommu_domain; -struct msi_desc; struct msi_msg; -struct device; -static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, - u64 dma_limit) +static inline int iommu_dma_init(void) { -} - -static inline int iommu_dma_init_fq(struct iommu_domain *domain) -{ - return -EINVAL; + return 0; } static inline int iommu_get_dma_cookie(struct iommu_domain *domain) @@ -65,29 +82,14 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain) return -ENODEV; } -static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) -{ - return -ENODEV; -} - static inline void iommu_put_dma_cookie(struct iommu_domain *domain) { } -static inline int iommu_dma_prepare_msi(struct msi_desc *desc, - phys_addr_t msi_addr) -{ - return 0; -} - -static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, - struct msi_msg *msg) -{ -} - -static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) +static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) { } #endif /* CONFIG_IOMMU_DMA */ +#endif /* __KERNEL__ */ #endif /* __DMA_IOMMU_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index dca2b1355b..45b9b691ff 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DMA_MAPPING_H #define _LINUX_DMA_MAPPING_H @@ -6,16 +5,20 @@ #include #include #include +#include #include #include +#include #include -#include /** * List of possible attributes associated with a DMA mapping. The semantics - * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. + * of each attribute should be defined in Documentation/DMA-attributes.txt. + * + * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute + * forces all pending DMA writes to complete. */ - +#define DMA_ATTR_WRITE_BARRIER (1UL << 0) /* * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping * may be weakly ordered, that is that reads and writes may pass each other. @@ -26,6 +29,11 @@ * buffered to improve performance. */ #define DMA_ATTR_WRITE_COMBINE (1UL << 2) +/* + * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either + * consistent or non-consistent memory as it sees fit. + */ +#define DMA_ATTR_NON_CONSISTENT (1UL << 3) /* * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel * virtual mapping for the allocated buffer. @@ -55,368 +63,435 @@ #define DMA_ATTR_NO_WARN (1UL << 8) /* - * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully - * accessible at an elevated privilege level (and ideally inaccessible or - * at least read-only at lesser-privileged levels). + * A dma_addr_t can hold any valid DMA or bus address for the platform. + * It can be given to a device to use as a DMA source or target. A CPU cannot + * reference a dma_addr_t directly because there may be translation between + * its physical address space and the bus address space. */ -#define DMA_ATTR_PRIVILEGED (1UL << 9) +struct dma_map_ops { + void* (*alloc)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs); + void (*free)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + unsigned long attrs); + int (*mmap)(struct device *, struct vm_area_struct *, + void *, dma_addr_t, size_t, + unsigned long attrs); -/* - * A dma_addr_t can hold any valid DMA or bus address for the platform. It can - * be given to a device to use as a DMA source or target. It is specific to a - * given device and there may be a translation between the CPU physical address - * space and the bus address space. - * - * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not - * be used directly in drivers, but checked for using dma_mapping_error() - * instead. - */ -#define DMA_MAPPING_ERROR (~(dma_addr_t)0) + int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, + dma_addr_t, size_t, unsigned long attrs); + + dma_addr_t (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + /* + * map_sg returns 0 on error and a value > 0 on success. + * It should never return a value < 0. + */ + int (*map_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_sg)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + unsigned long attrs); + dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + void (*sync_single_for_cpu)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); + void (*sync_single_for_device)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); + void (*sync_sg_for_cpu)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + void (*sync_sg_for_device)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); + int (*dma_supported)(struct device *dev, u64 mask); + int (*set_dma_mask)(struct device *dev, u64 mask); +#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK + u64 (*get_required_mask)(struct device *dev); +#endif + int is_phys; +} __do_const; + +extern struct dma_map_ops dma_noop_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) -#ifdef CONFIG_DMA_API_DEBUG -void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); -void debug_dma_map_single(struct device *dev, const void *addr, - unsigned long len); +#define DMA_MASK_NONE 0x0ULL + +static inline int valid_dma_direction(int dma_direction) +{ + return ((dma_direction == DMA_BIDIRECTIONAL) || + (dma_direction == DMA_TO_DEVICE) || + (dma_direction == DMA_FROM_DEVICE)); +} + +static inline int is_device_dma_capable(struct device *dev) +{ + return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; +} + +#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT +/* + * These three functions are only for dma allocator. + * Don't use them in device drivers. + */ +int dma_alloc_from_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret); +int dma_release_from_coherent(struct device *dev, int order, void *vaddr); + +int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, size_t size, int *ret); #else -static inline void debug_dma_mapping_error(struct device *dev, - dma_addr_t dma_addr) -{ -} -static inline void debug_dma_map_single(struct device *dev, const void *addr, - unsigned long len) -{ -} -#endif /* CONFIG_DMA_API_DEBUG */ +#define dma_alloc_from_coherent(dev, size, handle, ret) (0) +#define dma_release_from_coherent(dev, order, vaddr) (0) +#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) +#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ #ifdef CONFIG_HAS_DMA -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +#include +#else +/* + * Define the dma api to allow compilation but not linking of + * dma dependent code. Code that depends on the dma-mapping + * API needs to set 'depends on HAS_DMA' in its Kconfig + */ +extern struct dma_map_ops bad_dma_ops; +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - debug_dma_mapping_error(dev, dma_addr); - - if (unlikely(dma_addr == DMA_MAPPING_ERROR)) - return -ENOMEM; - return 0; -} - -dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, - size_t offset, size_t size, enum dma_data_direction dir, - unsigned long attrs); -void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, - enum dma_data_direction dir, unsigned long attrs); -unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs); -void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs); -int dma_map_sgtable(struct device *dev, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs); -dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, - size_t size, enum dma_data_direction dir, unsigned long attrs); -void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, - enum dma_data_direction dir, unsigned long attrs); -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, - enum dma_data_direction dir); -void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir); -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir); -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir); -void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flag, unsigned long attrs); -void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle, unsigned long attrs); -void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs); -void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle); -int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs); -int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs); -bool dma_can_mmap(struct device *dev); -int dma_supported(struct device *dev, u64 mask); -int dma_set_mask(struct device *dev, u64 mask); -int dma_set_coherent_mask(struct device *dev, u64 mask); -u64 dma_get_required_mask(struct device *dev); -size_t dma_max_mapping_size(struct device *dev); -bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); -unsigned long dma_get_merge_boundary(struct device *dev); -struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, - enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); -void dma_free_noncontiguous(struct device *dev, size_t size, - struct sg_table *sgt, enum dma_data_direction dir); -void *dma_vmap_noncontiguous(struct device *dev, size_t size, - struct sg_table *sgt); -void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); -int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, - size_t size, struct sg_table *sgt); -#else /* CONFIG_HAS_DMA */ -static inline dma_addr_t dma_map_page_attrs(struct device *dev, - struct page *page, size_t offset, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - return DMA_MAPPING_ERROR; -} -static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ -} -static inline unsigned int dma_map_sg_attrs(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - return 0; -} -static inline void dma_unmap_sg_attrs(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir, - unsigned long attrs) -{ -} -static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs) -{ - return -EOPNOTSUPP; -} -static inline dma_addr_t dma_map_resource(struct device *dev, - phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - return DMA_MAPPING_ERROR; -} -static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ -} -static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ -} -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ -} -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nelems, enum dma_data_direction dir) -{ -} -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, enum dma_data_direction dir) -{ -} -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return -ENOMEM; -} -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) -{ - return NULL; -} -static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle, unsigned long attrs) -{ -} -static inline void *dmam_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) -{ - return NULL; -} -static inline void dmam_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ -} -static inline int dma_get_sgtable_attrs(struct device *dev, - struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, - size_t size, unsigned long attrs) -{ - return -ENXIO; -} -static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - return -ENXIO; -} -static inline bool dma_can_mmap(struct device *dev) -{ - return false; -} -static inline int dma_supported(struct device *dev, u64 mask) -{ - return 0; -} -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - return -EIO; -} -static inline int dma_set_coherent_mask(struct device *dev, u64 mask) -{ - return -EIO; -} -static inline u64 dma_get_required_mask(struct device *dev) -{ - return 0; -} -static inline size_t dma_max_mapping_size(struct device *dev) -{ - return 0; -} -static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) -{ - return false; -} -static inline unsigned long dma_get_merge_boundary(struct device *dev) -{ - return 0; -} -static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, - size_t size, enum dma_data_direction dir, gfp_t gfp, - unsigned long attrs) -{ - return NULL; -} -static inline void dma_free_noncontiguous(struct device *dev, size_t size, - struct sg_table *sgt, enum dma_data_direction dir) -{ -} -static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, - struct sg_table *sgt) -{ - return NULL; -} -static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) -{ -} -static inline int dma_mmap_noncontiguous(struct device *dev, - struct vm_area_struct *vma, size_t size, struct sg_table *sgt) -{ - return -EINVAL; -} -#endif /* CONFIG_HAS_DMA */ - -struct page *dma_alloc_pages(struct device *dev, size_t size, - dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); -void dma_free_pages(struct device *dev, size_t size, struct page *page, - dma_addr_t dma_handle, enum dma_data_direction dir); -int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, - size_t size, struct page *page); - -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) -{ - struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); - return page ? page_address(page) : NULL; -} - -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir) -{ - dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); + return &bad_dma_ops; } +#endif static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, - size_t size, enum dma_data_direction dir, unsigned long attrs) + size_t size, + enum dma_data_direction dir, + unsigned long attrs) { - /* DMA must never operate on areas that might be remapped. */ - if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), - "rejecting DMA map of vmalloc memory\n")) - return DMA_MAPPING_ERROR; - debug_dma_map_single(dev, ptr, size); - return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), - size, dir, attrs); + struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(ptr, size); + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, virt_to_page(ptr), + offset_in_page(ptr), size, + dir, attrs); + debug_dma_map_page(dev, virt_to_page(ptr), + offset_in_page(ptr), size, + dir, addr, true); + return addr; } static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) + size_t size, + enum dma_data_direction dir, + unsigned long attrs) { - return dma_unmap_page_attrs(dev, addr, size, dir, attrs); + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, attrs); + debug_dma_unmap_page(dev, addr, size, dir, true); +} + +/* + * dma_maps_sg_attrs returns 0 on error and > 0 on success. + * It should never return a value < 0. + */ +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + int i, ents; + struct scatterlist *s; + + for_each_sg(sg, s, nents, i) + kmemcheck_mark_initialized(sg_virt(s), s->length); + BUG_ON(!valid_dma_direction(dir)); + ents = ops->map_sg(dev, sg, nents, dir, attrs); + BUG_ON(ents < 0); + debug_dma_map_sg(dev, sg, nents, ents, dir); + + return ents; +} + +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + debug_dma_unmap_sg(dev, sg, nents, dir); + if (ops->unmap_sg) + ops->unmap_sg(dev, sg, nents, dir, attrs); +} + +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(page_address(page) + offset, size); + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, page, offset, size, dir, 0); + debug_dma_map_page(dev, page, offset, size, dir, addr, false); + + return addr; +} + +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, 0); + debug_dma_unmap_page(dev, addr, size, dir, false); +} + +static inline dma_addr_t dma_map_resource(struct device *dev, + phys_addr_t phys_addr, + size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + BUG_ON(!valid_dma_direction(dir)); + + /* Don't allow RAM to be mapped */ + BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); + + addr = phys_addr; + if (ops->map_resource) + addr = ops->map_resource(dev, phys_addr, size, dir, attrs); + + debug_dma_map_resource(dev, phys_addr, size, dir, addr); + + return addr; +} + +static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_resource) + ops->unmap_resource(dev, addr, size, dir, attrs); + debug_dma_unmap_resource(dev, addr, size, dir); +} + +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr, size, dir); + debug_dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr, size, dir); + debug_dma_sync_single_for_device(dev, addr, size, dir); } static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t addr, unsigned long offset, size_t size, - enum dma_data_direction dir) + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) { - return dma_sync_single_for_cpu(dev, addr + offset, size, dir); + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr + offset, size, dir); + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t addr, unsigned long offset, size_t size, - enum dma_data_direction dir) + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) { - return dma_sync_single_for_device(dev, addr + offset, size, dir); + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr + offset, size, dir); + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); } -/** - * dma_unmap_sgtable - Unmap the given buffer for DMA - * @dev: The device for which to perform the DMA operation - * @sgt: The sg_table object describing the buffer - * @dir: DMA direction - * @attrs: Optional DMA attributes for the unmap operation - * - * Unmaps a buffer described by a scatterlist stored in the given sg_table - * object for the @dir DMA operation by the @dev device. After this function - * the ownership of the buffer is transferred back to the CPU domain. - */ -static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs) +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) { - dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_cpu) + ops->sync_sg_for_cpu(dev, sg, nelems, dir); + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); } -/** - * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access - * @dev: The device for which to perform the DMA operation - * @sgt: The sg_table object describing the buffer - * @dir: DMA direction - * - * Performs the needed cache synchronization and moves the ownership of the - * buffer back to the CPU domain, so it is safe to perform any access to it - * by the CPU. Before doing any further DMA operations, one has to transfer - * the ownership of the buffer back to the DMA domain by calling the - * dma_sync_sgtable_for_device(). - */ -static inline void dma_sync_sgtable_for_cpu(struct device *dev, - struct sg_table *sgt, enum dma_data_direction dir) +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) { - dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); -} + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_device) + ops->sync_sg_for_device(dev, sg, nelems, dir); + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); -/** - * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA - * @dev: The device for which to perform the DMA operation - * @sgt: The sg_table object describing the buffer - * @dir: DMA direction - * - * Performs the needed cache synchronization and moves the ownership of the - * buffer back to the DMA domain, so it is safe to perform the DMA operation. - * Once finished, one has to call dma_sync_sgtable_for_cpu() or - * dma_unmap_sgtable(). - */ -static inline void dma_sync_sgtable_for_device(struct device *dev, - struct sg_table *sgt, enum dma_data_direction dir) -{ - dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); } #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) -#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) -#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) -#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) + +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +void *dma_common_contiguous_remap(struct page *page, size_t size, + unsigned long vm_flags, + pgprot_t prot, const void *caller); + +void *dma_common_pages_remap(struct page **pages, size_t size, + unsigned long vm_flags, pgprot_t prot, + const void *caller); +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); + +/** + * dma_mmap_attrs - map a coherent DMA allocation into user space + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices + * @vma: vm_area_struct describing requested user mapping + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs + * @handle: device-view address returned from dma_alloc_attrs + * @size: size of memory originally requested in dma_alloc_attrs + * @attrs: attributes of mapping properties requested in dma_alloc_attrs + * + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs + * into user space. The coherent DMA buffer must not be freed by the + * driver until the user space mapping has been released. + */ +static inline int +dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size, unsigned long attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + if (ops->mmap) + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); +} + #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) +int +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +static inline int +dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, + dma_addr_t dma_addr, size_t size, + unsigned long attrs) { - return dma_alloc_attrs(dev, size, dma_handle, gfp, - (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + if (ops->get_sgtable) + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, + attrs); + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); +} + +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) + +#ifndef arch_dma_alloc_attrs +#define arch_dma_alloc_attrs(dev, flag) (true) +#endif + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + unsigned long attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + void *cpu_addr; + + BUG_ON(!ops); + + if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) + return cpu_addr; + + if (!arch_dma_alloc_attrs(&dev, &flag)) + return NULL; + if (!ops->alloc) + return NULL; + + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); + return cpu_addr; +} + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + unsigned long attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!ops); + WARN_ON(irqs_disabled()); + + if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) + return; + + if (!ops->free || !cpu_addr) + return; + + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + ops->free(dev, size, cpu_addr, dma_handle, attrs); +} + +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + return dma_alloc_attrs(dev, size, dma_handle, flag, 0); } static inline void dma_free_coherent(struct device *dev, size_t size, @@ -425,14 +500,81 @@ static inline void dma_free_coherent(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); } +static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + return dma_alloc_attrs(dev, size, dma_handle, gfp, + DMA_ATTR_NON_CONSISTENT); +} + +static inline void dma_free_noncoherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + dma_free_attrs(dev, size, cpu_addr, dma_handle, + DMA_ATTR_NON_CONSISTENT); +} + +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + debug_dma_mapping_error(dev, dma_addr); + + if (get_dma_ops(dev)->mapping_error) + return get_dma_ops(dev)->mapping_error(dev, dma_addr); + +#ifdef DMA_ERROR_CODE + return dma_addr == DMA_ERROR_CODE; +#else + return 0; +#endif +} + +#ifndef HAVE_ARCH_DMA_SUPPORTED +static inline int dma_supported(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + if (!ops) + return 0; + if (!ops->dma_supported) + return 1; + return ops->dma_supported(dev, mask); +} +#endif + +#ifndef HAVE_ARCH_DMA_SET_MASK +static inline int dma_set_mask(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops->set_dma_mask) + return ops->set_dma_mask(dev, mask); + + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + *dev->dma_mask = mask; + return 0; +} +#endif static inline u64 dma_get_mask(struct device *dev) { - if (dev->dma_mask && *dev->dma_mask) + if (dev && dev->dma_mask && *dev->dma_mask) return *dev->dma_mask; return DMA_BIT_MASK(32); } +#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK +int dma_set_coherent_mask(struct device *dev, u64 mask); +#else +static inline int dma_set_coherent_mask(struct device *dev, u64 mask) +{ + if (!dma_supported(dev, mask)) + return -EIO; + dev->coherent_dma_mask = mask; + return 0; +} +#endif + /* * Set both the DMA mask and the coherent DMA mask to the same thing. * Note that we don't check the return value from dma_set_coherent_mask() @@ -457,19 +599,17 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) return dma_set_mask_and_coherent(dev, mask); } -/** - * dma_addressing_limited - return if the device is addressing limited - * @dev: device to check - * - * Return %true if the devices DMA mask is too small to address all memory in - * the system, else %false. Lack of addressing bits is the prime reason for - * bounce buffering, but might not be the only one. - */ -static inline bool dma_addressing_limited(struct device *dev) -{ - return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < - dma_get_required_mask(dev); -} +extern u64 dma_get_required_mask(struct device *dev); + +#ifndef arch_setup_dma_ops +static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, + u64 size, const struct iommu_ops *iommu, + bool coherent) { } +#endif + +#ifndef arch_teardown_dma_ops +static inline void arch_teardown_dma_ops(struct device *dev) { } +#endif static inline unsigned int dma_get_max_seg_size(struct device *dev) { @@ -478,7 +618,8 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev) return SZ_64K; } -static inline int dma_set_max_seg_size(struct device *dev, unsigned int size) +static inline unsigned int dma_set_max_seg_size(struct device *dev, + unsigned int size) { if (dev->dma_parms) { dev->dma_parms->max_segment_size = size; @@ -491,26 +632,7 @@ static inline unsigned long dma_get_seg_boundary(struct device *dev) { if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) return dev->dma_parms->segment_boundary_mask; - return ULONG_MAX; -} - -/** - * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units - * @dev: device to guery the boundary for - * @page_shift: ilog() of the IOMMU page size - * - * Return the segment boundary in IOMMU page units (which may be different from - * the CPU page size) for the passed in device. - * - * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for - * non-DMA API callers. - */ -static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, - unsigned int page_shift) -{ - if (!dev) - return (U32_MAX >> page_shift) + 1; - return (dma_get_seg_boundary(dev) >> page_shift) + 1; + return DMA_BIT_MASK(32); } static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) @@ -522,22 +644,22 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) return -EIO; } -static inline unsigned int dma_get_min_align_mask(struct device *dev) +#ifndef dma_max_pfn +static inline unsigned long dma_max_pfn(struct device *dev) { - if (dev->dma_parms) - return dev->dma_parms->min_align_mask; - return 0; -} - -static inline int dma_set_min_align_mask(struct device *dev, - unsigned int min_align_mask) -{ - if (WARN_ON_ONCE(!dev->dma_parms)) - return -EIO; - dev->dma_parms->min_align_mask = min_align_mask; - return 0; + return *dev->dma_mask >> PAGE_SHIFT; +} +#endif + +static inline void *dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, + flag | __GFP_ZERO); + return ret; } +#ifdef CONFIG_HAS_DMA static inline int dma_get_cache_alignment(void) { #ifdef ARCH_DMA_MINALIGN @@ -545,24 +667,80 @@ static inline int dma_get_cache_alignment(void) #endif return 1; } +#endif -static inline void *dmam_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) +/* flags for the coherent memory api */ +#define DMA_MEMORY_MAP 0x01 +#define DMA_MEMORY_IO 0x02 +#define DMA_MEMORY_INCLUDES_CHILDREN 0x04 +#define DMA_MEMORY_EXCLUSIVE 0x08 + +#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, int flags); +void dma_release_declared_memory(struct device *dev); +void *dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); +#else +static inline int +dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, int flags) { - return dmam_alloc_attrs(dev, size, dma_handle, gfp, - (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); + return 0; } +static inline void +dma_release_declared_memory(struct device *dev) +{ +} + +static inline void * +dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size) +{ + return ERR_PTR(-EBUSY); +} +#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ + +/* + * Managed DMA API + */ +extern void *dmam_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); +extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle); +extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); +extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle); +#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT +extern int dmam_declare_coherent_memory(struct device *dev, + phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, + int flags); +extern void dmam_release_declared_memory(struct device *dev); +#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ +static inline int dmam_declare_coherent_memory(struct device *dev, + phys_addr_t phys_addr, dma_addr_t device_addr, + size_t size, gfp_t gfp) +{ + return 0; +} + +static inline void dmam_release_declared_memory(struct device *dev) +{ +} +#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ + static inline void *dma_alloc_wc(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp) { - unsigned long attrs = DMA_ATTR_WRITE_COMBINE; - - if (gfp & __GFP_NOWARN) - attrs |= DMA_ATTR_NO_WARN; - - return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); + return dma_alloc_attrs(dev, size, dma_addr, gfp, + DMA_ATTR_WRITE_COMBINE); } +#ifndef dma_alloc_writecombine +#define dma_alloc_writecombine dma_alloc_wc +#endif static inline void dma_free_wc(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr) @@ -570,6 +748,9 @@ static inline void dma_free_wc(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_addr, DMA_ATTR_WRITE_COMBINE); } +#ifndef dma_free_writecombine +#define dma_free_writecombine dma_free_wc +#endif static inline int dma_mmap_wc(struct device *dev, struct vm_area_struct *vma, @@ -579,8 +760,11 @@ static inline int dma_mmap_wc(struct device *dev, return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, DMA_ATTR_WRITE_COMBINE); } +#ifndef dma_mmap_writecombine +#define dma_mmap_writecombine dma_mmap_wc +#endif -#ifdef CONFIG_NEED_DMA_MAP_STATE +#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG) #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) @@ -596,4 +780,4 @@ static inline int dma_mmap_wc(struct device *dev, #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) #endif -#endif /* _LINUX_DMA_MAPPING_H */ +#endif diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index 9752f3745f..ccfd0c3777 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics * Copyright (C) 2014 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _DMA_DW_H #define _DMA_DW_H @@ -20,7 +23,6 @@ struct dw_dma; /** * struct dw_dma_chip - representation of DesignWare DMA controller hardware * @dev: struct device of the DMA controller - * @id: instance ID * @irq: irq line * @regs: memory mapped I/O space * @clk: hclk clock @@ -29,7 +31,6 @@ struct dw_dma; */ struct dw_dma_chip { struct device *dev; - int id; int irq; void __iomem *regs; struct clk *clk; @@ -42,13 +43,30 @@ struct dw_dma_chip { #if IS_ENABLED(CONFIG_DW_DMAC_CORE) int dw_dma_probe(struct dw_dma_chip *chip); int dw_dma_remove(struct dw_dma_chip *chip); -int idma32_dma_probe(struct dw_dma_chip *chip); -int idma32_dma_remove(struct dw_dma_chip *chip); #else static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; } -static inline int idma32_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } -static inline int idma32_dma_remove(struct dw_dma_chip *chip) { return 0; } #endif /* CONFIG_DW_DMAC_CORE */ +/* DMA API extensions */ +struct dw_desc; + +struct dw_cyclic_desc { + struct dw_desc **desc; + unsigned long periods; + void (*period_callback)(void *param); + void *period_callback_param; +}; + +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, size_t period_len, + enum dma_transfer_direction direction); +void dw_dma_cyclic_free(struct dma_chan *chan); +int dw_dma_cyclic_start(struct dma_chan *chan); +void dw_dma_cyclic_stop(struct dma_chan *chan); + +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); + +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); + #endif /* _DMA_DW_H */ diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h index a6b7bc7073..197eec63e5 100644 --- a/include/linux/dma/hsu.h +++ b/include/linux/dma/hsu.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Driver for the High Speed UART DMA * * Copyright (C) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _DMA_HSU_H diff --git a/include/linux/dma/ipu-dma.h b/include/linux/dma/ipu-dma.h index 6969391580..18031115c6 100644 --- a/include/linux/dma/ipu-dma.h +++ b/include/linux/dma/ipu-dma.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2008 * Guennadi Liakhovetski, DENX Software Engineering, * * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_DMA_IPU_DMA_H diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h index 25cab62a28..2dc9b2bc18 100644 --- a/include/linux/dma/mmp-pdma.h +++ b/include/linux/dma/mmp-pdma.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _MMP_PDMA_H_ #define _MMP_PDMA_H_ diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h index fceb5df070..3edc99294b 100644 --- a/include/linux/dma/pxa-dma.h +++ b/include/linux/dma/pxa-dma.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PXA_DMA_H_ #define _PXA_DMA_H_ @@ -9,18 +8,20 @@ enum pxad_chan_prio { PXAD_PRIO_LOWEST, }; -/** - * struct pxad_param - dma channel request parameters - * @drcmr: requestor line number - * @prio: minimal mandatory priority of the channel - * - * If a requested channel is granted, its priority will be at least @prio, - * ie. if PXAD_PRIO_LOW is required, the requested channel will be either - * PXAD_PRIO_LOW, PXAD_PRIO_NORMAL or PXAD_PRIO_HIGHEST. - */ struct pxad_param { unsigned int drcmr; enum pxad_chan_prio prio; }; +struct dma_chan; + +#ifdef CONFIG_PXA_DMA +bool pxad_filter_fn(struct dma_chan *chan, void *param); +#else +static inline bool pxad_filter_fn(struct dma_chan *chan, void *param) +{ + return false; +} +#endif + #endif /* _PXA_DMA_H_ */ diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h index 0dde1a46ab..3ae3000525 100644 --- a/include/linux/dma/xilinx_dma.h +++ b/include/linux/dma/xilinx_dma.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Xilinx DMA Engine drivers support header file * * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DMA_XILINX_DMA_H @@ -23,7 +27,6 @@ * @delay: Delay counter * @reset: Reset Channel * @ext_fsync: External Frame Sync source - * @vflip_en: Vertical Flip enable */ struct xilinx_vdma_config { int frm_dly; @@ -36,7 +39,20 @@ struct xilinx_vdma_config { int delay; int reset; int ext_fsync; - bool vflip_en; +}; + +/** + * enum xdma_ip_type: DMA IP type. + * + * XDMA_TYPE_AXIDMA: Axi dma ip. + * XDMA_TYPE_CDMA: Axi cdma ip. + * XDMA_TYPE_VDMA: Axi vdma ip. + * + */ +enum xdma_ip_type { + XDMA_TYPE_AXIDMA = 0, + XDMA_TYPE_CDMA, + XDMA_TYPE_VDMA, }; int xilinx_vdma_channel_set_config(struct dma_chan *dchan, diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h new file mode 100644 index 0000000000..187c102997 --- /dev/null +++ b/include/linux/dma_remapping.h @@ -0,0 +1,56 @@ +#ifndef _DMA_REMAPPING_H +#define _DMA_REMAPPING_H + +/* + * VT-d hardware uses 4KiB page size regardless of host page size. + */ +#define VTD_PAGE_SHIFT (12) +#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) +#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) +#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) + +#define VTD_STRIDE_SHIFT (9) +#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) + +#define DMA_PTE_READ (1) +#define DMA_PTE_WRITE (2) +#define DMA_PTE_LARGE_PAGE (1 << 7) +#define DMA_PTE_SNP (1 << 11) + +#define CONTEXT_TT_MULTI_LEVEL 0 +#define CONTEXT_TT_DEV_IOTLB 1 +#define CONTEXT_TT_PASS_THROUGH 2 +/* Extended context entry types */ +#define CONTEXT_TT_PT_PASID 4 +#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5 +#define CONTEXT_TT_MASK (7ULL << 2) + +#define CONTEXT_DINVE (1ULL << 8) +#define CONTEXT_PRS (1ULL << 9) +#define CONTEXT_PASIDE (1ULL << 11) + +struct intel_iommu; +struct dmar_domain; +struct root_entry; + + +#ifdef CONFIG_INTEL_IOMMU +extern int iommu_calculate_agaw(struct intel_iommu *iommu); +extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); +extern int dmar_disabled; +extern int intel_iommu_enabled; +#else +static inline int iommu_calculate_agaw(struct intel_iommu *iommu) +{ + return 0; +} +static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) +{ + return 0; +} +#define dmar_disabled (1) +#define intel_iommu_enabled (0) +#endif + + +#endif diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index e5c2c9e71b..cc535a478b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1,6 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. */ #ifndef LINUX_DMAENGINE_H #define LINUX_DMAENGINE_H @@ -39,7 +51,6 @@ enum dma_status { DMA_IN_PROGRESS, DMA_PAUSED, DMA_ERROR, - DMA_OUT_OF_ORDER, }; /** @@ -57,14 +68,12 @@ enum dma_transaction_type { DMA_MEMSET, DMA_MEMSET_SG, DMA_INTERRUPT, + DMA_SG, DMA_PRIVATE, DMA_ASYNC_TX, DMA_SLAVE, DMA_CYCLIC, DMA_INTERLEAVE, - DMA_COMPLETION_NO_ORDER, - DMA_REPEAT, - DMA_LOAD_EOT, /* last transaction type for creation of the capabilities mask */ DMA_TX_TYPE_END, }; @@ -87,9 +96,9 @@ enum dma_transfer_direction { /** * Interleaved Transfer Request * ---------------------------- - * A chunk is collection of contiguous bytes to be transferred. + * A chunk is collection of contiguous bytes to be transfered. * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). - * ICGs may or may not change between chunks. + * ICGs may or maynot change between chunks. * A FRAME is the smallest series of contiguous {chunk,icg} pairs, * that when repeated an integral number of times, specifies the transfer. * A transfer template is specification of a Frame, the number of times @@ -157,7 +166,7 @@ struct dma_interleaved_template { bool dst_sgl; size_t numf; size_t frame_size; - struct data_chunk sgl[]; + struct data_chunk sgl[0]; }; /** @@ -166,7 +175,7 @@ struct dma_interleaved_template { * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of * this transaction * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client - * acknowledges receipt, i.e. has a chance to establish any dependency + * acknowledges receipt, i.e. has has a chance to establish any dependency * chains * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P @@ -177,19 +186,6 @@ struct dma_interleaved_template { * on the result of this operation * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till * cleared or freed - * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command - * data and the descriptor should be in different format from normal - * data descriptors. - * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically - * repeated when it ends until a transaction is issued on the same channel - * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to - * interleaved transactions and is ignored for all other transaction types. - * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any - * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the - * repeated transaction ends. Not setting this flag when the previously queued - * transaction is marked with DMA_PREP_REPEAT will cause the new transaction - * to never be processed and stay in the issued queue forever. The flag is - * ignored if the previous transaction is not a repeated transaction. */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), @@ -199,9 +195,6 @@ enum dma_ctrl_flags { DMA_PREP_CONTINUE = (1 << 4), DMA_PREP_FENCE = (1 << 5), DMA_CTRL_REUSE = (1 << 6), - DMA_PREP_CMD = (1 << 7), - DMA_PREP_REPEAT = (1 << 8), - DMA_PREP_LOAD_EOT = (1 << 9), }; /** @@ -229,67 +222,12 @@ enum sum_check_flags { */ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; -/** - * enum dma_desc_metadata_mode - per descriptor metadata mode types supported - * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the - * client driver and it is attached (via the dmaengine_desc_attach_metadata() - * helper) to the descriptor. - * - * Client drivers interested to use this mode can follow: - * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: - * 1. prepare the descriptor (dmaengine_prep_*) - * construct the metadata in the client's buffer - * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the - * descriptor - * 3. submit the transfer - * - DMA_DEV_TO_MEM: - * 1. prepare the descriptor (dmaengine_prep_*) - * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the - * descriptor - * 3. submit the transfer - * 4. when the transfer is completed, the metadata should be available in the - * attached buffer - * - * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA - * driver. The client driver can ask for the pointer, maximum size and the - * currently used size of the metadata and can directly update or read it. - * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is - * provided as helper functions. - * - * Note: the metadata area for the descriptor is no longer valid after the - * transfer has been completed (valid up to the point when the completion - * callback returns if used). - * - * Client drivers interested to use this mode can follow: - * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: - * 1. prepare the descriptor (dmaengine_prep_*) - * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's - * metadata area - * 3. update the metadata at the pointer - * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount - * of data the client has placed into the metadata buffer - * 5. submit the transfer - * - DMA_DEV_TO_MEM: - * 1. prepare the descriptor (dmaengine_prep_*) - * 2. submit the transfer - * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the - * pointer to the engine's metadata area - * 4. Read out the metadata from the pointer - * - * Note: the two mode is not compatible and clients must use one mode for a - * descriptor. - */ -enum dma_desc_metadata_mode { - DESC_METADATA_NONE = 0, - DESC_METADATA_CLIENT = BIT(0), - DESC_METADATA_ENGINE = BIT(1), -}; - /** * struct dma_chan_percpu - the per-CPU part of struct dma_chan * @memcpy_count: transaction counter * @bytes_transferred: byte counter */ + struct dma_chan_percpu { /* stats */ unsigned long memcpy_count; @@ -309,14 +247,10 @@ struct dma_router { /** * struct dma_chan - devices supply DMA channels, clients use them * @device: ptr to the dma device who supplies this channel, always !%NULL - * @slave: ptr to the device using this channel * @cookie: last cookie value returned to client * @completed_cookie: last completed cookie for this channel * @chan_id: channel ID for sysfs * @dev: class device for sysfs - * @name: backlink name for sysfs - * @dbg_client_name: slave name for debugfs in format: - * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx" * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu * @client_count: how many clients are using this channel @@ -327,17 +261,12 @@ struct dma_router { */ struct dma_chan { struct dma_device *device; - struct device *slave; dma_cookie_t cookie; dma_cookie_t completed_cookie; /* sysfs */ int chan_id; struct dma_chan_dev *dev; - const char *name; -#ifdef CONFIG_DEBUG_FS - char *dbg_client_name; -#endif struct list_head device_node; struct dma_chan_percpu __percpu *local; @@ -356,14 +285,13 @@ struct dma_chan { * @chan: driver channel device * @device: sysfs device * @dev_id: parent dma_device dev_id - * @chan_dma_dev: The channel is using custom/different dma-mapping - * compared to the parent dma_device + * @idr_ref: reference count to gate release of dma_device dev_id */ struct dma_chan_dev { struct dma_chan *chan; struct device device; int dev_id; - bool chan_dma_dev; + atomic_t *idr_ref; }; /** @@ -380,7 +308,6 @@ enum dma_slave_buswidth { DMA_SLAVE_BUSWIDTH_16_BYTES = 16, DMA_SLAVE_BUSWIDTH_32_BYTES = 32, DMA_SLAVE_BUSWIDTH_64_BYTES = 64, - DMA_SLAVE_BUSWIDTH_128_BYTES = 128, }; /** @@ -399,7 +326,7 @@ enum dma_slave_buswidth { * @src_addr_width: this is the width in bytes of the source (RX) * register where DMA data shall be read. If the source * is memory this may be ignored depending on architecture. - * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128. + * Legal values: 1, 2, 4, 8. * @dst_addr_width: same as src_addr_width but for destination * target (TX) mutatis mutandis. * @src_maxburst: the maximum number of words (note: words, as in @@ -409,21 +336,12 @@ enum dma_slave_buswidth { * may or may not be applicable on memory sources. * @dst_maxburst: same as src_maxburst but for destination target * mutatis mutandis. - * @src_port_window_size: The length of the register area in words the data need - * to be accessed on the device side. It is only used for devices which is using - * an area instead of a single register to receive the data. Typically the DMA - * loops in this area in order to transfer the data. - * @dst_port_window_size: same as src_port_window_size but for the destination - * port. * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill * with 'true' if peripheral should be flow controller. Direction will be * selected at Runtime. * @slave_id: Slave requester id. Only valid for slave channels. The dma * slave peripheral will have unique id as dma requester which need to be * pass as slave config. - * @peripheral_config: peripheral configuration for programming peripheral - * for dmaengine transfer - * @peripheral_size: peripheral configuration buffer size * * This struct is passed in as configuration data to a DMA engine * in order to set up a certain channel for DMA transport at runtime. @@ -445,12 +363,8 @@ struct dma_slave_config { enum dma_slave_buswidth dst_addr_width; u32 src_maxburst; u32 dst_maxburst; - u32 src_port_window_size; - u32 dst_port_window_size; bool device_fc; unsigned int slave_id; - void *peripheral_config; - size_t peripheral_size; }; /** @@ -479,24 +393,16 @@ enum dma_residue_granularity { DMA_RESIDUE_GRANULARITY_BURST = 2, }; -/** - * struct dma_slave_caps - expose capabilities of a slave channel only - * @src_addr_widths: bit mask of src addr widths the channel supports. - * Width is specified in bytes, e.g. for a channel supporting - * a width of 4 the mask should have BIT(4) set. - * @dst_addr_widths: bit mask of dst addr widths the channel supports - * @directions: bit mask of slave directions the channel supports. - * Since the enum dma_transfer_direction is not defined as bit flag for - * each type, the dma controller should set BIT() and same - * should be checked by controller as well - * @min_burst: min burst capability per-transfer +/* struct dma_slave_caps - expose capabilities of a slave channel only + * + * @src_addr_widths: bit mask of src addr widths the channel supports + * @dst_addr_widths: bit mask of dstn addr widths the channel supports + * @directions: bit mask of slave direction the channel supported + * since the enum dma_transfer_direction is not defined as bits for each + * type of direction, the dma controller should fill (1 << ) and same + * should be checked by controller as well * @max_burst: max burst capability per-transfer - * @max_sg_burst: max number of SG list entries executed in a single burst - * DMA tansaction with no software intervention for reinitialization. - * Zero value means unlimited number of entries. - * @cmd_pause: true, if pause is supported (i.e. for reading residue or - * for resume later) - * @cmd_resume: true, if resume is supported + * @cmd_pause: true, if pause and thereby resume is supported * @cmd_terminate: true, if terminate cmd is supported * @residue_granularity: granularity of the reported transfer residue * @descriptor_reuse: if a descriptor can be reused by client and @@ -506,11 +412,8 @@ struct dma_slave_caps { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; - u32 min_burst; u32 max_burst; - u32 max_sg_burst; bool cmd_pause; - bool cmd_resume; bool cmd_terminate; enum dma_residue_granularity residue_granularity; bool descriptor_reuse; @@ -554,30 +457,14 @@ typedef void (*dma_async_tx_callback_result)(void *dma_async_param, const struct dmaengine_result *result); struct dmaengine_unmap_data { -#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) - u16 map_cnt; -#else u8 map_cnt; -#endif u8 to_cnt; u8 from_cnt; u8 bidi_cnt; struct device *dev; struct kref kref; size_t len; - dma_addr_t addr[]; -}; - -struct dma_async_tx_descriptor; - -struct dma_descriptor_metadata_ops { - int (*attach)(struct dma_async_tx_descriptor *desc, void *data, - size_t len); - - void *(*get_ptr)(struct dma_async_tx_descriptor *desc, - size_t *payload_len, size_t *max_len); - int (*set_len)(struct dma_async_tx_descriptor *desc, - size_t payload_len); + dma_addr_t addr[0]; }; /** @@ -586,18 +473,13 @@ struct dma_descriptor_metadata_ops { * @cookie: tracking cookie for this transaction, set to -EBUSY if * this tx is sitting on a dependency list * @flags: flags to augment operation preparation, control completion, and - * communicate status + * communicate status * @phys: physical address of the descriptor * @chan: target channel for this operation * @tx_submit: accept the descriptor, assign ordered cookie and mark the * descriptor pending. To be pushed on .issue_pending() call * @callback: routine to call after this operation is complete * @callback_param: general parameter to pass to the callback routine - * @desc_metadata_mode: core managed metadata mode to protect mixed use of - * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise - * DESC_METADATA_NONE - * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the - * DMA driver if metadata mode is supported with the descriptor * ---async_tx api specific fields--- * @next: at completion submit this descriptor * @parent: pointer to the next level up in the dependency chain @@ -614,8 +496,6 @@ struct dma_async_tx_descriptor { dma_async_tx_callback_result callback_result; void *callback_param; struct dmaengine_unmap_data *unmap; - enum dma_desc_metadata_mode desc_metadata_mode; - struct dma_descriptor_metadata_ops *metadata_ops; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; @@ -651,11 +531,10 @@ static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) { - if (!tx->unmap) - return; - - dmaengine_unmap_put(tx->unmap); - tx->unmap = NULL; + if (tx->unmap) { + dmaengine_unmap_put(tx->unmap); + tx->unmap = NULL; + } } #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH @@ -724,13 +603,11 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr * @residue: the remaining number of bytes left to transmit * on the selected transfer for states DMA_IN_PROGRESS and * DMA_PAUSED if this is implemented in the driver, else 0 - * @in_flight_bytes: amount of data in bytes cached by the DMA. */ struct dma_tx_state { dma_cookie_t last; dma_cookie_t used; u32 residue; - u32 in_flight_bytes; }; /** @@ -745,8 +622,6 @@ enum dmaengine_alignment { DMAENGINE_ALIGN_16_BYTES = 4, DMAENGINE_ALIGN_32_BYTES = 5, DMAENGINE_ALIGN_64_BYTES = 6, - DMAENGINE_ALIGN_128_BYTES = 7, - DMAENGINE_ALIGN_256_BYTES = 8, }; /** @@ -783,7 +658,6 @@ struct dma_filter { * @global_node: list_head for global dma_device_list * @filter: information for device/slave to filter function/param mapping * @cap_mask: one or more dma_capability flags - * @desc_metadata_modes: supported metadata modes by the DMA device * @max_xor: maximum number of xor sources, 0 if no capability * @max_pq: maximum number of PQ sources and PQ-continue capability * @copy_align: alignment shift for memcpy operations @@ -792,25 +666,17 @@ struct dma_filter { * @fill_align: alignment shift for memset operations * @dev_id: unique device ID * @dev: struct device reference for dma mapping api - * @owner: owner module (automatically set based on the provided dev) * @src_addr_widths: bit mask of src addr widths the device supports - * Width is specified in bytes, e.g. for a device supporting - * a width of 4 the mask should have BIT(4) set. * @dst_addr_widths: bit mask of dst addr widths the device supports - * @directions: bit mask of slave directions the device supports. - * Since the enum dma_transfer_direction is not defined as bit flag for - * each type, the dma controller should set BIT() and same - * should be checked by controller as well - * @min_burst: min burst capability per-transfer + * @directions: bit mask of slave direction the device supports since + * the enum dma_transfer_direction is not defined as bits for + * each type of direction, the dma controller should fill (1 << + * ) and same should be checked by controller as well * @max_burst: max burst capability per-transfer - * @max_sg_burst: max number of SG list entries executed in a single burst - * DMA tansaction with no software intervention for reinitialization. - * Zero value means unlimited number of entries. * @residue_granularity: granularity of the transfer residue reported * by tx_status * @device_alloc_chan_resources: allocate resources and return the * number of allocated descriptors - * @device_router_config: optional callback for DMA router configuration * @device_free_chan_resources: release DMA channel's resources * @device_prep_dma_memcpy: prepares a memcpy operation * @device_prep_dma_xor: prepares a xor operation @@ -826,8 +692,6 @@ struct dma_filter { * be called after period_len bytes have been transferred. * @device_prep_interleaved_dma: Transfer expression in a generic way. * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address - * @device_caps: May be used to override the generic DMA slave capabilities - * with per-channel specific ones * @device_config: Pushes a new configuration to a channel, return 0 or an error * code * @device_pause: Pauses any transfer happening on a channel. Returns @@ -844,23 +708,15 @@ struct dma_filter { * will just return a simple status code * @device_issue_pending: push pending transactions to hardware * @descriptor_reuse: a submitted transfer can be resubmitted after completion - * @device_release: called sometime atfer dma_async_device_unregister() is - * called and there are no further references to this structure. This - * must be implemented to free resources however many existing drivers - * do not and are therefore not safe to unbind while in use. - * @dbg_summary_show: optional routine to show contents in debugfs; default code - * will be used when this is omitted, but custom code can show extra, - * controller specific information. */ struct dma_device { - struct kref ref; + unsigned int chancnt; unsigned int privatecnt; struct list_head channels; struct list_head global_node; struct dma_filter filter; dma_cap_mask_t cap_mask; - enum dma_desc_metadata_mode desc_metadata_modes; unsigned short max_xor; unsigned short max_pq; enum dmaengine_alignment copy_align; @@ -871,21 +727,15 @@ struct dma_device { int dev_id; struct device *dev; - struct module *owner; - struct ida chan_ida; - struct mutex chan_mutex; /* to protect chan_ida */ u32 src_addr_widths; u32 dst_addr_widths; u32 directions; - u32 min_burst; u32 max_burst; - u32 max_sg_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; int (*device_alloc_chan_resources)(struct dma_chan *chan); - int (*device_router_config)(struct dma_chan *chan); void (*device_free_chan_resources)(struct dma_chan *chan); struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( @@ -913,6 +763,11 @@ struct dma_device { unsigned int nents, int value, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_chan *chan, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_sg)( + struct dma_chan *chan, + struct scatterlist *dst_sg, unsigned int dst_nents, + struct scatterlist *src_sg, unsigned int src_nents, + unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_slave_sg)( struct dma_chan *chan, struct scatterlist *sgl, @@ -929,8 +784,6 @@ struct dma_device { struct dma_chan *chan, dma_addr_t dst, u64 data, unsigned long flags); - void (*device_caps)(struct dma_chan *chan, - struct dma_slave_caps *caps); int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config); int (*device_pause)(struct dma_chan *chan); @@ -942,12 +795,6 @@ struct dma_device { dma_cookie_t cookie, struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); - void (*device_release)(struct dma_device *dev); - /* debugfs support */ -#ifdef CONFIG_DEBUG_FS - void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev); - struct dentry *dbg_dev_root; -#endif }; static inline int dmaengine_slave_config(struct dma_chan *chan, @@ -1024,9 +871,6 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( { if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) return NULL; - if (flags & DMA_PREP_REPEAT && - !test_bit(DMA_REPEAT, chan->device->cap_mask.bits)) - return NULL; return chan->device->device_prep_interleaved_dma(chan, xt, flags); } @@ -1042,52 +886,19 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( len, flags); } -static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy( - struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, - size_t len, unsigned long flags) +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( + struct dma_chan *chan, + struct scatterlist *dst_sg, unsigned int dst_nents, + struct scatterlist *src_sg, unsigned int src_nents, + unsigned long flags) { - if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy) + if (!chan || !chan->device || !chan->device->device_prep_dma_sg) return NULL; - return chan->device->device_prep_dma_memcpy(chan, dest, src, - len, flags); + return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, + src_sg, src_nents, flags); } -static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan, - enum dma_desc_metadata_mode mode) -{ - if (!chan) - return false; - - return !!(chan->device->desc_metadata_modes & mode); -} - -#ifdef CONFIG_DMA_ENGINE -int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, - void *data, size_t len); -void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, - size_t *payload_len, size_t *max_len); -int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, - size_t payload_len); -#else /* CONFIG_DMA_ENGINE */ -static inline int dmaengine_desc_attach_metadata( - struct dma_async_tx_descriptor *desc, void *data, size_t len) -{ - return -EINVAL; -} -static inline void *dmaengine_desc_get_metadata_ptr( - struct dma_async_tx_descriptor *desc, size_t *payload_len, - size_t *max_len) -{ - return NULL; -} -static inline int dmaengine_desc_set_metadata_len( - struct dma_async_tx_descriptor *desc, size_t payload_len) -{ - return -EINVAL; -} -#endif /* CONFIG_DMA_ENGINE */ - /** * dmaengine_terminate_all() - Terminate all active DMA transfers * @chan: The channel for which to terminate the transfers @@ -1116,7 +927,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) * dmaengine_synchronize() needs to be called before it is safe to free * any memory that is accessed by previously submitted descriptors or before * freeing any resources accessed from within the completion callback of any - * previously submitted descriptors. + * perviously submitted descriptors. * * This function can be called from atomic context as well as from within a * complete callback of a descriptor submitted on the same channel. @@ -1138,7 +949,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan) * * Synchronizes to the DMA channel termination to the current context. When this * function returns it is guaranteed that all transfers for previously issued - * descriptors have stopped and it is safe to free the memory associated + * descriptors have stopped and and it is safe to free the memory assoicated * with them. Furthermore it is guaranteed that all complete callback functions * for a previously submitted descriptor have finished running and it is safe to * free resources accessed from within the complete callbacks. @@ -1215,7 +1026,14 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc static inline bool dmaengine_check_align(enum dmaengine_alignment align, size_t off1, size_t off2, size_t len) { - return !(((1 << align) - 1) & (off1 | off2 | len)); + size_t mask; + + if (!align) + return true; + mask = (1 << align) - 1; + if (mask & (off1 | off2 | len)) + return false; + return true; } static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, @@ -1289,9 +1107,9 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) { if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) return dma_dev_to_maxpq(dma); - if (dmaf_p_disabled_continue(flags)) + else if (dmaf_p_disabled_continue(flags)) return dma_dev_to_maxpq(dma) - 1; - if (dmaf_continue(flags)) + else if (dmaf_continue(flags)) return dma_dev_to_maxpq(dma) - 3; BUG(); } @@ -1302,7 +1120,7 @@ static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg, if (inc) { if (dir_icg) return dir_icg; - if (sgl) + else if (sgl) return icg; } @@ -1468,12 +1286,11 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, static inline void dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) { - if (!st) - return; - - st->last = last; - st->used = used; - st->residue = residue; + if (st) { + st->last = last; + st->used = used; + st->residue = residue; + } } #ifdef CONFIG_DMA_ENGINE @@ -1482,8 +1299,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param, - struct device_node *np); + dma_filter_fn fn, void *fn_param); +struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); struct dma_chan *dma_request_chan(struct device *dev, const char *name); struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); @@ -1507,9 +1324,12 @@ static inline void dma_issue_pending_all(void) { } static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, - void *fn_param, - struct device_node *np) + dma_filter_fn fn, void *fn_param) +{ + return NULL; +} +static inline struct dma_chan *dma_request_slave_channel(struct device *dev, + const char *name) { return NULL; } @@ -1533,20 +1353,20 @@ static inline int dma_get_slave_caps(struct dma_chan *chan, } #endif +#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name) + static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) { struct dma_slave_caps caps; - int ret; - ret = dma_get_slave_caps(tx->chan, &caps); - if (ret) - return ret; + dma_get_slave_caps(tx->chan, &caps); - if (!caps.descriptor_reuse) + if (caps.descriptor_reuse) { + tx->flags |= DMA_CTRL_REUSE; + return 0; + } else { return -EPERM; - - tx->flags |= DMA_CTRL_REUSE; - return 0; + } } static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx) @@ -1562,36 +1382,25 @@ static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx) static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) { /* this is supported for reusable desc, so check that */ - if (!dmaengine_desc_test_reuse(desc)) + if (dmaengine_desc_test_reuse(desc)) + return desc->desc_free(desc); + else return -EPERM; - - return desc->desc_free(desc); } /* --- DMA device --- */ int dma_async_device_register(struct dma_device *device); -int dmaenginem_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); -int dma_async_device_channel_register(struct dma_device *device, - struct dma_chan *chan); -void dma_async_device_channel_unregister(struct dma_device *device, - struct dma_chan *chan); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); -#define dma_request_channel(mask, x, y) \ - __dma_request_channel(&(mask), x, y, NULL) - -/* Deprecated, please use dma_request_chan() directly */ -static inline struct dma_chan * __deprecated -dma_request_slave_channel(struct device *dev, const char *name) -{ - struct dma_chan *ch = dma_request_chan(dev, name); - - return IS_ERR(ch) ? NULL : ch; -} +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); +#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) +#define dma_request_slave_channel_compat(mask, x, y, dev, name) \ + __dma_request_slave_channel_compat(&(mask), x, y, dev, name) static inline struct dma_chan -*dma_request_slave_channel_compat(const dma_cap_mask_t mask, +*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param, struct device *dev, const char *name) { @@ -1604,32 +1413,6 @@ static inline struct dma_chan if (!fn || !fn_param) return NULL; - return __dma_request_channel(&mask, fn, fn_param, NULL); + return __dma_request_channel(mask, fn, fn_param); } - -static inline char * -dmaengine_get_direction_text(enum dma_transfer_direction dir) -{ - switch (dir) { - case DMA_DEV_TO_MEM: - return "DEV_TO_MEM"; - case DMA_MEM_TO_DEV: - return "MEM_TO_DEV"; - case DMA_MEM_TO_MEM: - return "MEM_TO_MEM"; - case DMA_DEV_TO_DEV: - return "DEV_TO_DEV"; - default: - return "invalid"; - } -} - -static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan) -{ - if (chan->dev->chan_dma_dev) - return &chan->dev->device; - - return chan->device->dev; -} - #endif /* DMAENGINE_H */ diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index f632ecfb42..53ba737505 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h @@ -16,8 +16,6 @@ struct device; -#ifdef CONFIG_HAS_DMA - struct dma_pool *dma_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation); @@ -25,6 +23,13 @@ void dma_pool_destroy(struct dma_pool *pool); void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle); + +static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle) +{ + return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle); +} + void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); /* @@ -34,26 +39,5 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation); void dmam_pool_destroy(struct dma_pool *pool); -#else /* !CONFIG_HAS_DMA */ -static inline struct dma_pool *dma_pool_create(const char *name, - struct device *dev, size_t size, size_t align, size_t allocation) -{ return NULL; } -static inline void dma_pool_destroy(struct dma_pool *pool) { } -static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, - dma_addr_t *handle) { return NULL; } -static inline void dma_pool_free(struct dma_pool *pool, void *vaddr, - dma_addr_t addr) { } -static inline struct dma_pool *dmam_pool_create(const char *name, - struct device *dev, size_t size, size_t align, size_t allocation) -{ return NULL; } -static inline void dmam_pool_destroy(struct dma_pool *pool) { } -#endif /* !CONFIG_HAS_DMA */ - -static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, - dma_addr_t *handle) -{ - return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle); -} - #endif diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e04436a7ff..e9bc9292bd 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2006, Intel Corporation. * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * * Copyright (C) Ashok Raj * Copyright (C) Shaohua Li */ @@ -14,7 +26,7 @@ #include #include #include -#include +#include struct acpi_dmar_header; @@ -27,7 +39,6 @@ struct acpi_dmar_header; /* DMAR Flags */ #define DMAR_INTR_REMAP 0x1 #define DMAR_X2APIC_OPT_OUT 0x2 -#define DMAR_PLATFORM_OPT_IN 0x4 struct intel_iommu; @@ -48,7 +59,6 @@ struct dmar_drhd_unit { u16 segment; /* PCI domain */ u8 ignored:1; /* ignore drhd */ u8 include_all:1; - u8 gfx_dedicated:1; /* graphic dedicated */ struct intel_iommu *iommu; }; @@ -70,23 +80,19 @@ struct dmar_pci_notify_info { extern struct rw_semaphore dmar_global_lock; extern struct list_head dmar_drhd_units; -#define for_each_drhd_unit(drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ - dmar_rcu_check()) +#define for_each_drhd_unit(drhd) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) #define for_each_active_drhd_unit(drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ - dmar_rcu_check()) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ if (drhd->ignored) {} else #define for_each_active_iommu(i, drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ - dmar_rcu_check()) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ if (i=drhd->iommu, drhd->ignored) {} else #define for_each_iommu(i, drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ - dmar_rcu_check()) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ if (i=drhd->iommu, 0) {} else static inline bool dmar_rcu_check(void) @@ -97,18 +103,15 @@ static inline bool dmar_rcu_check(void) #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) -#define for_each_dev_scope(devs, cnt, i, tmp) \ - for ((i) = 0; ((tmp) = (i) < (cnt) ? \ - dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \ - (i)++) +#define for_each_dev_scope(a, c, p, d) \ + for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \ + NULL, (p) < (c)); (p)++) -#define for_each_active_dev_scope(devs, cnt, i, tmp) \ - for_each_dev_scope((devs), (cnt), (i), (tmp)) \ - if (!(tmp)) { continue; } else +#define for_each_active_dev_scope(a, c, p, d) \ + for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else extern int dmar_table_init(void); extern int dmar_dev_scope_init(void); -extern void dmar_register_bus_notifier(void); extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, struct dmar_dev_scope **devices, u16 segment); extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt); @@ -134,23 +137,19 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) #ifdef CONFIG_INTEL_IOMMU extern int iommu_detected, no_iommu; extern int intel_iommu_init(void); -extern void intel_iommu_shutdown(void); extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); -extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg); extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); #else /* !CONFIG_INTEL_IOMMU: */ static inline int intel_iommu_init(void) { return -ENODEV; } -static inline void intel_iommu_shutdown(void) { } #define dmar_parse_one_rmrr dmar_res_noop #define dmar_parse_one_atsr dmar_res_noop #define dmar_check_one_atsr dmar_res_noop #define dmar_release_one_atsr dmar_res_noop -#define dmar_parse_one_satc dmar_res_noop static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { @@ -170,8 +169,6 @@ static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) { return 0; } #endif /* CONFIG_IRQ_REMAP */ -extern bool dmar_platform_optin(void); - #else /* CONFIG_DMAR_TABLE */ static inline int dmar_device_add(void *handle) @@ -184,11 +181,6 @@ static inline int dmar_device_remove(void *handle) return 0; } -static inline bool dmar_platform_optin(void) -{ - return false; -} - #endif /* CONFIG_DMAR_TABLE */ struct irte { @@ -272,6 +264,11 @@ static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src) #define PDA_LOW_BIT 26 #define PDA_HIGH_BIT 32 +enum { + IRQ_REMAP_XAPIC_MODE, + IRQ_REMAP_X2APIC_MODE, +}; + /* Can't use the common MSI interrupt functions * since DMAR is not a pci device */ diff --git a/include/linux/dmi.h b/include/linux/dmi.h index 927f8a8b7a..5e9c74cf88 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DMI_H__ #define __DMI_H__ @@ -102,9 +101,10 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); extern const char * dmi_get_system_info(int field); extern const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from); -extern void dmi_setup(void); +extern void dmi_scan_machine(void); +extern void dmi_memdev_walk(void); +extern void dmi_set_dump_stack_arch_desc(void); extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp); -extern int dmi_get_bios_year(void); extern int dmi_name_in_vendors(const char *str); extern int dmi_name_in_serial(const char *str); extern int dmi_available; @@ -112,9 +112,6 @@ extern int dmi_walk(void (*decode)(const struct dmi_header *, void *), void *private_data); extern bool dmi_match(enum dmi_field f, const char *str); extern void dmi_memdev_name(u16 handle, const char **bank, const char **device); -extern u64 dmi_memdev_size(u16 handle); -extern u8 dmi_memdev_type(u16 handle); -extern u16 dmi_memdev_handle(int slot); #else @@ -122,7 +119,9 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0; static inline const char * dmi_get_system_info(int field) { return NULL; } static inline const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from) { return NULL; } -static inline void dmi_setup(void) { } +static inline void dmi_scan_machine(void) { return; } +static inline void dmi_memdev_walk(void) { } +static inline void dmi_set_dump_stack_arch_desc(void) { } static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) { if (yearp) @@ -133,19 +132,15 @@ static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) *dayp = 0; return false; } -static inline int dmi_get_bios_year(void) { return -ENXIO; } static inline int dmi_name_in_vendors(const char *s) { return 0; } static inline int dmi_name_in_serial(const char *s) { return 0; } #define dmi_available 0 static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), - void *private_data) { return -ENXIO; } + void *private_data) { return -1; } static inline bool dmi_match(enum dmi_field f, const char *str) { return false; } static inline void dmi_memdev_name(u16 handle, const char **bank, const char **device) { } -static inline u64 dmi_memdev_size(u16 handle) { return ~0ul; } -static inline u8 dmi_memdev_type(u16 handle) { return 0x0; } -static inline u16 dmi_memdev_handle(int slot) { return 0xffff; } static inline const struct dmi_system_id * dmi_first_match(const struct dmi_system_id *list) { return NULL; } diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h index 0aad774bea..3290555a52 100644 --- a/include/linux/dnotify.h +++ b/include/linux/dnotify.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DNOTIFY_H #define _LINUX_DNOTIFY_H /* diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h index 976cbbdb28..6ac3cad9ae 100644 --- a/include/linux/dns_resolver.h +++ b/include/linux/dns_resolver.h @@ -24,11 +24,11 @@ #ifndef _LINUX_DNS_RESOLVER_H #define _LINUX_DNS_RESOLVER_H -#include +#ifdef __KERNEL__ -struct net; -extern int dns_query(struct net *net, const char *type, const char *name, size_t namelen, - const char *options, char **_result, time64_t *_expiry, - bool invalidate); +extern int dns_query(const char *type, const char *name, size_t namelen, + const char *options, char **_result, time64_t *_expiry); + +#endif /* KERNEL */ #endif /* _LINUX_DNS_RESOLVER_H */ diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h index 100d22a46b..0de21e9359 100644 --- a/include/linux/dqblk_qtree.h +++ b/include/linux/dqblk_qtree.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions of structures and functions for quota formats using trie */ diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h index 85d837a148..c0d4d1e2a4 100644 --- a/include/linux/dqblk_v1.h +++ b/include/linux/dqblk_v1.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * File with in-memory structures of old quota format */ diff --git a/include/linux/dqblk_v2.h b/include/linux/dqblk_v2.h index da95932ad9..18000a5426 100644 --- a/include/linux/dqblk_v2.h +++ b/include/linux/dqblk_v2.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for vfsv0 quota format */ diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 5755537b51..002611c853 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* drbd.h Kernel module for 2.6.x Kernels @@ -9,6 +8,19 @@ Copyright (C) 2001-2008, Philipp Reisner . Copyright (C) 2001-2008, Lars Ellenberg . + drbd is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + drbd is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with drbd; see the file COPYING. If not, write to + the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef DRBD_H @@ -39,7 +51,7 @@ #endif extern const char *drbd_buildtag(void); -#define REL_VERSION "8.4.11" +#define REL_VERSION "8.4.7" #define API_VERSION 1 #define PRO_VERSION_MIN 86 #define PRO_VERSION_MAX 101 diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h index 53f44b8cd7..c934d3a96b 100644 --- a/include/linux/drbd_genl.h +++ b/include/linux/drbd_genl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * General overview: * full generic netlink message: @@ -68,7 +67,7 @@ * genl_magic_func.h * generates an entry in the static genl_ops array, * and static register/unregister functions to - * genl_register_family(). + * genl_register_family_with_ops(). * * flags and handler: * GENL_op_init( .doit = x, .dumpit = y, .flags = something) @@ -133,8 +132,7 @@ GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf, __flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF) __flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF) __flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF) - __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF) - __flg_field_def(26, 0 /* OPTIONAL */, disable_write_same, DRBD_DISABLE_WRITE_SAME_DEF) + __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED) ) GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts, diff --git a/include/linux/drbd_genl_api.h b/include/linux/drbd_genl_api.h index bd62efc290..9ef50d51e3 100644 --- a/include/linux/drbd_genl_api.h +++ b/include/linux/drbd_genl_api.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef DRBD_GENL_STRUCT_H #define DRBD_GENL_STRUCT_H diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h index 9e33f7038b..ddac68422a 100644 --- a/include/linux/drbd_limits.h +++ b/include/linux/drbd_limits.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* drbd_limits.h This file is part of DRBD by Philipp Reisner and Lars Ellenberg. @@ -210,18 +209,12 @@ #define DRBD_MD_FLUSHES_DEF 1 #define DRBD_TCP_CORK_DEF 1 #define DRBD_AL_UPDATES_DEF 1 - /* We used to ignore the discard_zeroes_data setting. * To not change established (and expected) behaviour, * by default assume that, for discard_zeroes_data=0, * we can make that an effective discard_zeroes_data=1, * if we only explicitly zero-out unaligned partial chunks. */ -#define DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF 1 - -/* Some backends pretend to support WRITE SAME, - * but fail such requests when they are actually submitted. - * This is to tell DRBD to not even try. */ -#define DRBD_DISABLE_WRITE_SAME_DEF 0 +#define DRBD_DISCARD_ZEROES_IF_ALIGNED 1 #define DRBD_ALLOW_TWO_PRIMARIES_DEF 0 #define DRBD_ALWAYS_ASBP_DEF 0 diff --git a/include/linux/ds2782_battery.h b/include/linux/ds2782_battery.h index fb6c97e109..b4e281f65c 100644 --- a/include/linux/ds2782_battery.h +++ b/include/linux/ds2782_battery.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_DS2782_BATTERY_H #define __LINUX_DS2782_BATTERY_H diff --git a/include/linux/dtlk.h b/include/linux/dtlk.h index 27b95e70bd..22a7b9a5f5 100644 --- a/include/linux/dtlk.h +++ b/include/linux/dtlk.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #define DTLK_MINOR 0 #define DTLK_IO_EXTENT 0x02 diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h index 82ebf92239..1f79b20918 100644 --- a/include/linux/dw_apb_timer.h +++ b/include/linux/dw_apb_timer.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * (C) Copyright 2009 Intel Corporation * Author: Jacob Pan (jacob.jun.pan@intel.com) * * Shared with ARM platforms, Jamie Iles, Picochip 2011 * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Support for the Synopsys DesignWare APB Timers. */ #ifndef __DW_APB_TIMER_H__ @@ -25,6 +28,7 @@ struct dw_apb_timer { struct dw_apb_clock_event_device { struct clock_event_device ced; struct dw_apb_timer timer; + struct irqaction irqaction; void (*eoi)(struct dw_apb_timer *); }; @@ -46,6 +50,6 @@ dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, unsigned long freq); void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); -u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); +cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); #endif /* __DW_APB_TIMER_H__ */ diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index dce631e678..546d68057e 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -1,8 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DYNAMIC_DEBUG_H #define _DYNAMIC_DEBUG_H -#if defined(CONFIG_JUMP_LABEL) +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) #include #endif @@ -32,18 +31,13 @@ struct _ddebug { #define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2) #define _DPRINTK_FLAGS_INCL_LINENO (1<<3) #define _DPRINTK_FLAGS_INCL_TID (1<<4) - -#define _DPRINTK_FLAGS_INCL_ANY \ - (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\ - _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID) - #if defined DEBUG #define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT #else #define _DPRINTK_FLAGS_DEFAULT 0 #endif unsigned int flags:8; -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL union { struct static_key_true dd_key_true; struct static_key_false dd_key_false; @@ -52,14 +46,10 @@ struct _ddebug { } __attribute__((aligned(8))); - -#if defined(CONFIG_DYNAMIC_DEBUG_CORE) - -/* exported for module authors to exercise >control */ -int dynamic_debug_exec_queries(const char *query, const char *modname); - int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *modname); + +#if defined(CONFIG_DYNAMIC_DEBUG) extern int ddebug_remove_module(const char *mod_name); extern __printf(2, 3) void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); @@ -80,43 +70,44 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, const struct net_device *dev, const char *fmt, ...); -struct ib_device; - -extern __printf(3, 4) -void __dynamic_ibdev_dbg(struct _ddebug *descriptor, - const struct ib_device *ibdev, - const char *fmt, ...); - -#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ +#define DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, key, init) \ static struct _ddebug __aligned(8) \ - __section("__dyndbg") name = { \ + __attribute__((section("__verbose"))) name = { \ .modname = KBUILD_MODNAME, \ .function = __func__, \ .filename = __FILE__, \ .format = (fmt), \ .lineno = __LINE__, \ .flags = _DPRINTK_FLAGS_DEFAULT, \ - _DPRINTK_KEY_INIT \ + dd_key_init(key, init) \ } -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL + +#define dd_key_init(key, init) key = (init) #ifdef DEBUG - -#define _DPRINTK_KEY_INIT .key.dd_key_true = (STATIC_KEY_TRUE_INIT) +#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ + DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_true, \ + (STATIC_KEY_TRUE_INIT)) #define DYNAMIC_DEBUG_BRANCH(descriptor) \ static_branch_likely(&descriptor.key.dd_key_true) #else -#define _DPRINTK_KEY_INIT .key.dd_key_false = (STATIC_KEY_FALSE_INIT) +#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ + DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_false, \ + (STATIC_KEY_FALSE_INIT)) #define DYNAMIC_DEBUG_BRANCH(descriptor) \ static_branch_unlikely(&descriptor.key.dd_key_false) #endif -#else /* !CONFIG_JUMP_LABEL */ +#else -#define _DPRINTK_KEY_INIT +#define dd_key_init(key, init) + +#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ + DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, 0, 0) #ifdef DEBUG #define DYNAMIC_DEBUG_BRANCH(descriptor) \ @@ -126,72 +117,47 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) #endif -#endif /* CONFIG_JUMP_LABEL */ - -#define __dynamic_func_call(id, fmt, func, ...) do { \ - DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \ - if (DYNAMIC_DEBUG_BRANCH(id)) \ - func(&id, ##__VA_ARGS__); \ -} while (0) - -#define __dynamic_func_call_no_desc(id, fmt, func, ...) do { \ - DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \ - if (DYNAMIC_DEBUG_BRANCH(id)) \ - func(__VA_ARGS__); \ -} while (0) - -/* - * "Factory macro" for generating a call to func, guarded by a - * DYNAMIC_DEBUG_BRANCH. The dynamic debug descriptor will be - * initialized using the fmt argument. The function will be called with - * the address of the descriptor as first argument, followed by all - * the varargs. Note that fmt is repeated in invocations of this - * macro. - */ -#define _dynamic_func_call(fmt, func, ...) \ - __dynamic_func_call(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__) -/* - * A variant that does the same, except that the descriptor is not - * passed as the first argument to the function; it is only called - * with precisely the macro's varargs. - */ -#define _dynamic_func_call_no_desc(fmt, func, ...) \ - __dynamic_func_call_no_desc(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__) +#endif #define dynamic_pr_debug(fmt, ...) \ - _dynamic_func_call(fmt, __dynamic_pr_debug, \ - pr_fmt(fmt), ##__VA_ARGS__) +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ + __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ + ##__VA_ARGS__); \ +} while (0) #define dynamic_dev_dbg(dev, fmt, ...) \ - _dynamic_func_call(fmt,__dynamic_dev_dbg, \ - dev, fmt, ##__VA_ARGS__) +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ + __dynamic_dev_dbg(&descriptor, dev, fmt, \ + ##__VA_ARGS__); \ +} while (0) #define dynamic_netdev_dbg(dev, fmt, ...) \ - _dynamic_func_call(fmt, __dynamic_netdev_dbg, \ - dev, fmt, ##__VA_ARGS__) +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ + __dynamic_netdev_dbg(&descriptor, dev, fmt, \ + ##__VA_ARGS__); \ +} while (0) -#define dynamic_ibdev_dbg(dev, fmt, ...) \ - _dynamic_func_call(fmt, __dynamic_ibdev_dbg, \ - dev, fmt, ##__VA_ARGS__) +#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \ + __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ + print_hex_dump(KERN_DEBUG, prefix_str, \ + prefix_type, rowsize, groupsize, \ + buf, len, ascii); \ +} while (0) -#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ - groupsize, buf, len, ascii) \ - _dynamic_func_call_no_desc(__builtin_constant_p(prefix_str) ? prefix_str : "hexdump", \ - print_hex_dump, \ - KERN_DEBUG, prefix_str, prefix_type, \ - rowsize, groupsize, buf, len, ascii) - -#else /* !CONFIG_DYNAMIC_DEBUG_CORE */ +#else #include #include -#include - -static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n, - const char *modname) -{ - return 0; -} static inline int ddebug_remove_module(const char *mod) { @@ -214,19 +180,6 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val, do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) #define dynamic_dev_dbg(dev, fmt, ...) \ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) -#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ - groupsize, buf, len, ascii) \ - do { if (0) \ - print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \ - rowsize, groupsize, buf, len, ascii); \ - } while (0) - -static inline int dynamic_debug_exec_queries(const char *query, const char *modname) -{ - pr_warn("kernel not built with CONFIG_DYNAMIC_DEBUG_CORE\n"); - return 0; -} - -#endif /* !CONFIG_DYNAMIC_DEBUG_CORE */ +#endif #endif diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 407c2f281b..a4be70398c 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Dynamic queue limits (dql) - Definitions * @@ -38,8 +37,6 @@ #ifdef __KERNEL__ -#include - struct dql { /* Fields accessed in enqueue path (dql_queued) */ unsigned int num_queued; /* Total ever queued */ @@ -91,7 +88,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count) /* Returns how many objects can be queued, < 0 indicates over limit. */ static inline int dql_avail(const struct dql *dql) { - return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued); + return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued); } /* Record number of completed objects and recalculate the limit. */ @@ -101,7 +98,7 @@ void dql_completed(struct dql *dql, unsigned int count); void dql_reset(struct dql *dql); /* Initialize dql state */ -void dql_init(struct dql *dql, unsigned int hold_time); +int dql_init(struct dql *dql, unsigned hold_time); #endif /* _KERNEL_ */ diff --git a/include/linux/earlycpio.h b/include/linux/earlycpio.h index c70519267c..111f46d83d 100644 --- a/include/linux/earlycpio.h +++ b/include/linux/earlycpio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_EARLYCPIO_H #define _LINUX_EARLYCPIO_H diff --git a/include/linux/ecryptfs.h b/include/linux/ecryptfs.h index 91e142abf7..8d5ab998a2 100644 --- a/include/linux/ecryptfs.h +++ b/include/linux/ecryptfs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ECRYPTFS_H #define _LINUX_ECRYPTFS_H diff --git a/include/linux/edac.h b/include/linux/edac.h index 4207d06996..9e0d789665 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -17,9 +17,6 @@ #include #include #include -#include - -#define EDAC_DEVICE_NAME_LEN 31 struct device; @@ -29,8 +26,40 @@ struct device; #define EDAC_OPSTATE_INT 2 extern int edac_op_state; +extern int edac_err_assert; +extern atomic_t edac_handlers; -struct bus_type *edac_get_sysfs_subsys(void); +extern int edac_handler_set(void); +extern void edac_atomic_assert_error(void); +extern struct bus_type *edac_get_sysfs_subsys(void); + +enum { + EDAC_REPORTING_ENABLED, + EDAC_REPORTING_DISABLED, + EDAC_REPORTING_FORCE +}; + +extern int edac_report_status; +#ifdef CONFIG_EDAC +static inline int get_edac_report_status(void) +{ + return edac_report_status; +} + +static inline void set_edac_report_status(int new) +{ + edac_report_status = new; +} +#else +static inline int get_edac_report_status(void) +{ + return EDAC_REPORTING_DISABLED; +} + +static inline void set_edac_report_status(int new) +{ +} +#endif static inline void opstate_init(void) { @@ -99,21 +128,12 @@ enum dev_type { * fatal (maybe it is on an unused memory area, * or the memory controller could recover from * it for example, by re-trying the operation). - * @HW_EVENT_ERR_DEFERRED: Deferred Error - Indicates an uncorrectable - * error whose handling is not urgent. This could - * be due to hardware data poisoning where the - * system can continue operation until the poisoned - * data is consumed. Preemptive measures may also - * be taken, e.g. offlining pages, etc. * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not * be recovered. - * @HW_EVENT_ERR_INFO: Informational - The CPER spec defines a forth - * type of error: informational logs. */ enum hw_event_mc_err_type { HW_EVENT_ERR_CORRECTED, HW_EVENT_ERR_UNCORRECTED, - HW_EVENT_ERR_DEFERRED, HW_EVENT_ERR_FATAL, HW_EVENT_ERR_INFO, }; @@ -125,8 +145,6 @@ static inline char *mc_event_error_type(const unsigned int err_type) return "Corrected"; case HW_EVENT_ERR_UNCORRECTED: return "Uncorrected"; - case HW_EVENT_ERR_DEFERRED: - return "Deferred"; case HW_EVENT_ERR_FATAL: return "Fatal"; default: @@ -139,7 +157,7 @@ static inline char *mc_event_error_type(const unsigned int err_type) * enum mem_type - memory types. For a more detailed reference, please see * http://en.wikipedia.org/wiki/DRAM * - * @MEM_EMPTY: Empty csrow + * @MEM_EMPTY Empty csrow * @MEM_RESERVED: Reserved csrow type * @MEM_UNKNOWN: Unknown csrow type * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995. @@ -159,8 +177,8 @@ static inline char *mc_event_error_type(const unsigned int err_type) * part of the memory details to the memory controller. * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers. * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F. - * Those memories are labeled as "PC2-" instead of "PC" to - * differentiate from DDR. + * Those memories are labed as "PC2-" instead of "PC" to + * differenciate from DDR. * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205 * and JESD206. * Those memories are accessed per DIMM slot, and not by @@ -174,17 +192,10 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_DDR3: DDR3 RAM * @MEM_RDDR3: Registered DDR3 RAM * This is a variant of the DDR3 memories. - * @MEM_LRDDR3: Load-Reduced DDR3 memory. - * @MEM_LPDDR3: Low-Power DDR3 memory. + * @MEM_LRDDR3 Load-Reduced DDR3 memory. * @MEM_DDR4: Unbuffered DDR4 RAM * @MEM_RDDR4: Registered DDR4 RAM * This is a variant of the DDR4 memories. - * @MEM_LRDDR4: Load-Reduced DDR4 memory. - * @MEM_LPDDR4: Low-Power DDR4 memory. - * @MEM_DDR5: Unbuffered DDR5 RAM - * @MEM_NVDIMM: Non-volatile RAM - * @MEM_WIO2: Wide I/O 2. - * @MEM_HBM2: High bandwidth Memory Gen 2. */ enum mem_type { MEM_EMPTY = 0, @@ -205,15 +216,8 @@ enum mem_type { MEM_DDR3, MEM_RDDR3, MEM_LRDDR3, - MEM_LPDDR3, MEM_DDR4, MEM_RDDR4, - MEM_LRDDR4, - MEM_LPDDR4, - MEM_DDR5, - MEM_NVDIMM, - MEM_WIO2, - MEM_HBM2, }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) @@ -233,18 +237,11 @@ enum mem_type { #define MEM_FLAG_XDR BIT(MEM_XDR) #define MEM_FLAG_DDR3 BIT(MEM_DDR3) #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) -#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3) #define MEM_FLAG_DDR4 BIT(MEM_DDR4) #define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) -#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) -#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4) -#define MEM_FLAG_DDR5 BIT(MEM_DDR5) -#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) -#define MEM_FLAG_WIO2 BIT(MEM_WIO2) -#define MEM_FLAG_HBM2 BIT(MEM_HBM2) /** - * enum edac_type - Error Detection and Correction capabilities and mode + * enum edac-type - Error Detection and Correction capabilities and mode * @EDAC_UNKNOWN: Unknown if ECC is available * @EDAC_NONE: Doesn't support ECC * @EDAC_RESERVED: Reserved ECC type @@ -281,7 +278,7 @@ enum edac_type { /** * enum scrub_type - scrubbing capabilities - * @SCRUB_UNKNOWN: Unknown if scrubber is available + * @SCRUB_UNKNOWN Unknown if scrubber is available * @SCRUB_NONE: No scrubber * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing * @SCRUB_SW_SRC: Software scrub only errors @@ -290,7 +287,7 @@ enum edac_type { * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing * @SCRUB_HW_SRC: Hardware scrub only errors * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error - * @SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable + * SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable */ enum scrub_type { SCRUB_UNKNOWN = 0, @@ -323,8 +320,116 @@ enum scrub_type { #define OP_RUNNING_POLL_INTR 0x203 #define OP_OFFLINE 0x300 +/* + * Concepts used at the EDAC subsystem + * + * There are several things to be aware of that aren't at all obvious: + * + * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc.. + * + * These are some of the many terms that are thrown about that don't always + * mean what people think they mean (Inconceivable!). In the interest of + * creating a common ground for discussion, terms and their definitions + * will be established. + * + * Memory devices: The individual DRAM chips on a memory stick. These + * devices commonly output 4 and 8 bits each (x4, x8). + * Grouping several of these in parallel provides the + * number of bits that the memory controller expects: + * typically 72 bits, in order to provide 64 bits + + * 8 bits of ECC data. + * + * Memory Stick: A printed circuit board that aggregates multiple + * memory devices in parallel. In general, this is the + * Field Replaceable Unit (FRU) which gets replaced, in + * the case of excessive errors. Most often it is also + * called DIMM (Dual Inline Memory Module). + * + * Memory Socket: A physical connector on the motherboard that accepts + * a single memory stick. Also called as "slot" on several + * datasheets. + * + * Channel: A memory controller channel, responsible to communicate + * with a group of DIMMs. Each channel has its own + * independent control (command) and data bus, and can + * be used independently or grouped with other channels. + * + * Branch: It is typically the highest hierarchy on a + * Fully-Buffered DIMM memory controller. + * Typically, it contains two channels. + * Two channels at the same branch can be used in single + * mode or in lockstep mode. + * When lockstep is enabled, the cacheline is doubled, + * but it generally brings some performance penalty. + * Also, it is generally not possible to point to just one + * memory stick when an error occurs, as the error + * correction code is calculated using two DIMMs instead + * of one. Due to that, it is capable of correcting more + * errors than on single mode. + * + * Single-channel: The data accessed by the memory controller is contained + * into one dimm only. E. g. if the data is 64 bits-wide, + * the data flows to the CPU using one 64 bits parallel + * access. + * Typically used with SDR, DDR, DDR2 and DDR3 memories. + * FB-DIMM and RAMBUS use a different concept for channel, + * so this concept doesn't apply there. + * + * Double-channel: The data size accessed by the memory controller is + * interlaced into two dimms, accessed at the same time. + * E. g. if the DIMM is 64 bits-wide (72 bits with ECC), + * the data flows to the CPU using a 128 bits parallel + * access. + * + * Chip-select row: This is the name of the DRAM signal used to select the + * DRAM ranks to be accessed. Common chip-select rows for + * single channel are 64 bits, for dual channel 128 bits. + * It may not be visible by the memory controller, as some + * DIMM types have a memory buffer that can hide direct + * access to it from the Memory Controller. + * + * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory. + * Motherboards commonly drive two chip-select pins to + * a memory stick. A single-ranked stick, will occupy + * only one of those rows. The other will be unused. + * + * Double-Ranked stick: A double-ranked stick has two chip-select rows which + * access different sets of memory devices. The two + * rows cannot be accessed concurrently. + * + * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick. + * A double-sided stick has two chip-select rows which + * access different sets of memory devices. The two + * rows cannot be accessed concurrently. "Double-sided" + * is irrespective of the memory devices being mounted + * on both sides of the memory stick. + * + * Socket set: All of the memory sticks that are required for + * a single memory access or all of the memory sticks + * spanned by a chip-select row. A single socket set + * has two chip-select rows and if double-sided sticks + * are used these will occupy those chip-select rows. + * + * Bank: This term is avoided because it is unclear when + * needing to distinguish between chip-select rows and + * socket sets. + * + * Controller pages: + * + * Physical pages: + * + * Virtual pages: + * + * + * STRUCTURE ORGANIZATION AND CHOICES + * + * + * + * PS - I enjoyed writing all that about as much as you enjoyed reading it. + */ + /** - * enum edac_mc_layer_type - memory controller hierarchy layer + * enum edac_mc_layer - memory controller hierarchy layer * * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch" * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel" @@ -347,7 +452,7 @@ enum edac_mc_layer_type { /** * struct edac_mc_layer - describes the memory controller hierarchy - * @type: layer type + * @layer: layer type * @size: number of components per layer. For example, * if the channel layer has two channels, size = 2 * @is_virt_csrow: This layer is part of the "csrow" when old API @@ -369,16 +474,81 @@ struct edac_mc_layer { */ #define EDAC_MAX_LAYERS 3 +/** + * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array + * for the element given by [layer0,layer1,layer2] position + * + * @layers: a struct edac_mc_layer array, describing how many elements + * were allocated for each layer + * @n_layers: Number of layers at the @layers array + * @layer0: layer0 position + * @layer1: layer1 position. Unused if n_layers < 2 + * @layer2: layer2 position. Unused if n_layers < 3 + * + * For 1 layer, this macro returns &var[layer0] - &var + * For 2 layers, this macro is similar to allocate a bi-dimensional array + * and to return "&var[layer0][layer1] - &var" + * For 3 layers, this macro is similar to allocate a tri-dimensional array + * and to return "&var[layer0][layer1][layer2] - &var" + * + * A loop could be used here to make it more generic, but, as we only have + * 3 layers, this is a little faster. + * By design, layers can never be 0 or more than 3. If that ever happens, + * a NULL is returned, causing an OOPS during the memory allocation routine, + * with would point to the developer that he's doing something wrong. + */ +#define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \ + int __i; \ + if ((nlayers) == 1) \ + __i = layer0; \ + else if ((nlayers) == 2) \ + __i = (layer1) + ((layers[1]).size * (layer0)); \ + else if ((nlayers) == 3) \ + __i = (layer2) + ((layers[2]).size * ((layer1) + \ + ((layers[1]).size * (layer0)))); \ + else \ + __i = -EINVAL; \ + __i; \ +}) + +/** + * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array + * for the element given by [layer0,layer1,layer2] position + * + * @layers: a struct edac_mc_layer array, describing how many elements + * were allocated for each layer + * @var: name of the var where we want to get the pointer + * (like mci->dimms) + * @n_layers: Number of layers at the @layers array + * @layer0: layer0 position + * @layer1: layer1 position. Unused if n_layers < 2 + * @layer2: layer2 position. Unused if n_layers < 3 + * + * For 1 layer, this macro returns &var[layer0] + * For 2 layers, this macro is similar to allocate a bi-dimensional array + * and to return "&var[layer0][layer1]" + * For 3 layers, this macro is similar to allocate a tri-dimensional array + * and to return "&var[layer0][layer1][layer2]" + */ +#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ + typeof(*var) __p; \ + int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \ + if (___i < 0) \ + __p = NULL; \ + else \ + __p = (var)[___i]; \ + __p; \ +}) + struct dimm_info { struct device dev; char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ /* Memory location data */ - unsigned int location[EDAC_MAX_LAYERS]; + unsigned location[EDAC_MAX_LAYERS]; struct mem_ctl_info *mci; /* the parent */ - unsigned int idx; /* index within the parent dimm array */ u32 grain; /* granularity of reported error in bytes */ enum dev_type dtype; /* memory device type */ @@ -387,12 +557,7 @@ struct dimm_info { u32 nr_pages; /* number of pages on this dimm */ - unsigned int csrow, cschannel; /* Points to the old API data */ - - u16 smbios_handle; /* Handle for SMBIOS type 17 */ - - u32 ce_count; - u32 ue_count; + unsigned csrow, cschannel; /* Points to the old API data */ }; /** @@ -449,10 +614,9 @@ struct errcount_attribute_data { }; /** - * struct edac_raw_error_desc - Raw error report structure + * edac_raw_error_desc - Raw error report structure * @grain: minimum granularity for an error report, in bytes * @error_count: number of errors of the same type - * @type: severity of the error (CE/UE/Fatal) * @top_layer: top layer of the error (layer[0]) * @mid_layer: middle layer of the error (layer[1]) * @low_layer: low layer of the error (layer[2]) @@ -464,14 +628,20 @@ struct errcount_attribute_data { * @location: location of the error * @label: label of the affected DIMM(s) * @other_detail: other driver-specific detail about the error + * @enable_per_layer_report: if false, the error affects all layers + * (typically, a memory controller error) */ struct edac_raw_error_desc { + /* + * NOTE: everything before grain won't be cleaned by + * edac_raw_error_desc_clean() + */ char location[LOCATION_SIZE]; char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS]; long grain; + /* the vars below and grain will be cleaned on every new error report */ u16 error_count; - enum hw_event_mc_err_type type; int top_layer; int mid_layer; int low_layer; @@ -480,6 +650,7 @@ struct edac_raw_error_desc { unsigned long syndrome; const char *msg; const char *other_detail; + bool enable_per_layer_report; }; /* MEMORY controller information structure @@ -530,7 +701,7 @@ struct mem_ctl_info { unsigned long page); int mc_idx; struct csrow_info **csrows; - unsigned int nr_csrows, num_cschannel; + unsigned nr_csrows, num_cschannel; /* * Memory Controller hierarchy @@ -541,14 +712,14 @@ struct mem_ctl_info { * of the recent drivers enumerate memories per DIMM, instead. * When the memory controller is per rank, csbased is true. */ - unsigned int n_layers; + unsigned n_layers; struct edac_mc_layer *layers; bool csbased; /* * DIMM info. Will eventually remove the entire csrows_info some day */ - unsigned int tot_dimms; + unsigned tot_dimms; struct dimm_info **dimms; /* @@ -558,6 +729,7 @@ struct mem_ctl_info { */ struct device *pdev; const char *mod_name; + const char *mod_ver; const char *ctl_name; const char *dev_name; void *pvt_info; @@ -569,6 +741,7 @@ struct mem_ctl_info { */ u32 ce_noinfo_count, ue_noinfo_count; u32 ue_mc, ce_mc; + u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; struct completion complete; @@ -602,54 +775,9 @@ struct mem_ctl_info { u16 fake_inject_count; }; -#define mci_for_each_dimm(mci, dimm) \ - for ((dimm) = (mci)->dimms[0]; \ - (dimm); \ - (dimm) = (dimm)->idx + 1 < (mci)->tot_dimms \ - ? (mci)->dimms[(dimm)->idx + 1] \ - : NULL) - -/** - * edac_get_dimm - Get DIMM info from a memory controller given by - * [layer0,layer1,layer2] position - * - * @mci: MC descriptor struct mem_ctl_info - * @layer0: layer0 position - * @layer1: layer1 position. Unused if n_layers < 2 - * @layer2: layer2 position. Unused if n_layers < 3 - * - * For 1 layer, this function returns "dimms[layer0]"; - * - * For 2 layers, this function is similar to allocating a two-dimensional - * array and returning "dimms[layer0][layer1]"; - * - * For 3 layers, this function is similar to allocating a tri-dimensional - * array and returning "dimms[layer0][layer1][layer2]"; +/* + * Maximum number of memory controllers in the coherent fabric. */ -static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci, - int layer0, int layer1, int layer2) -{ - int index; +#define EDAC_MAX_MCS 16 - if (layer0 < 0 - || (mci->n_layers > 1 && layer1 < 0) - || (mci->n_layers > 2 && layer2 < 0)) - return NULL; - - index = layer0; - - if (mci->n_layers > 1) - index = index * mci->layers[1].size + layer1; - - if (mci->n_layers > 2) - index = index * mci->layers[2].size + layer2; - - if (index < 0 || index >= mci->tot_dimms) - return NULL; - - if (WARN_ON_ONCE(mci->dimms[index]->idx != index)) - return NULL; - - return mci->dimms[index]; -} -#endif /* _LINUX_EDAC_H_ */ +#endif diff --git a/include/linux/edd.h b/include/linux/edd.h index 1c16fbcb81..83d4371ec9 100644 --- a/include/linux/edd.h +++ b/include/linux/edd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/edd.h * Copyright (C) 2002, 2003, 2004 Dell Inc. @@ -17,6 +16,16 @@ * transferred into the edd structure, and in drivers/firmware/edd.c, that * information is used to identify BIOS boot disk. The code in setup.S * is very sensitive to the size of these structures. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef _LINUX_EDD_H #define _LINUX_EDD_H diff --git a/include/linux/edma.h b/include/linux/edma.h new file mode 100644 index 0000000000..a1307e7827 --- /dev/null +++ b/include/linux/edma.h @@ -0,0 +1,29 @@ +/* + * TI EDMA DMA engine driver + * + * Copyright 2012 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __LINUX_EDMA_H +#define __LINUX_EDMA_H + +struct dma_chan; + +#if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE) +bool edma_filter_fn(struct dma_chan *, void *); +#else +static inline bool edma_filter_fn(struct dma_chan *chan, void *param) +{ + return false; +} +#endif + +#endif diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index c860c72a92..eb0b198805 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Copyright (C) 2004 - 2006 rt2x00 SourceForge Project + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h index 34c2175e6a..885f587a35 100644 --- a/include/linux/eeprom_93xx46.h +++ b/include/linux/eeprom_93xx46.h @@ -1,26 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Module: eeprom_93xx46 * platform description for 93xx46 EEPROMs. */ -#include + +struct gpio_desc; struct eeprom_93xx46_platform_data { unsigned char flags; #define EE_ADDR8 0x01 /* 8 bit addr. cfg */ #define EE_ADDR16 0x02 /* 16 bit addr. cfg */ #define EE_READONLY 0x08 /* forbid writing */ -#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */ -#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */ -#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */ unsigned int quirks; /* Single word read transfers only; no sequential read. */ #define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) /* Instructions such as EWEN are (addrlen + 2) in length. */ #define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) -/* Add extra cycle after address during a read */ -#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2) /* * optional hooks to control additional logic diff --git a/include/linux/efi-bgrt.h b/include/linux/efi-bgrt.h index e6cd510056..051b21fedf 100644 --- a/include/linux/efi-bgrt.h +++ b/include/linux/efi-bgrt.h @@ -1,25 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_EFI_BGRT_H #define _LINUX_EFI_BGRT_H -#include - #ifdef CONFIG_ACPI_BGRT -void efi_bgrt_init(struct acpi_table_header *table); -int __init acpi_parse_bgrt(struct acpi_table_header *table); +#include + +void efi_bgrt_init(void); /* The BGRT data itself; only valid if bgrt_image != NULL. */ +extern void *bgrt_image; extern size_t bgrt_image_size; -extern struct acpi_table_bgrt bgrt_tab; +extern struct acpi_table_bgrt *bgrt_tab; #else /* !CONFIG_ACPI_BGRT */ -static inline void efi_bgrt_init(struct acpi_table_header *table) {} -static inline int __init acpi_parse_bgrt(struct acpi_table_header *table) -{ - return 0; -} +static inline void efi_bgrt_init(void) {} #endif /* !CONFIG_ACPI_BGRT */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 6b5d36babf..36d2b6006c 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_EFI_H #define _LINUX_EFI_H @@ -29,17 +28,16 @@ #include #define EFI_SUCCESS 0 -#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1))) +#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1))) #define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1))) #define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1))) -#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1))) +#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1))) #define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1))) #define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1))) #define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1))) #define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1))) #define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1))) #define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1))) -#define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1))) #define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1))) #define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1))) @@ -49,33 +47,10 @@ typedef u16 efi_char16_t; /* UNICODE character */ typedef u64 efi_physical_addr_t; typedef void *efi_handle_t; -#if defined(CONFIG_X86_64) -#define __efiapi __attribute__((ms_abi)) -#elif defined(CONFIG_X86_32) -#define __efiapi __attribute__((regparm(0))) -#else -#define __efiapi -#endif +typedef uuid_le efi_guid_t; -/* - * The UEFI spec and EDK2 reference implementation both define EFI_GUID as - * struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment - * is 32 bits not 8 bits like our guid_t. In some cases (i.e., on 32-bit ARM), - * this means that firmware services invoked by the kernel may assume that - * efi_guid_t* arguments are 32-bit aligned, and use memory accessors that - * do not tolerate misalignment. So let's set the minimum alignment to 32 bits. - * - * Note that the UEFI spec as well as some comments in the EDK2 code base - * suggest that EFI_GUID should be 64-bit aligned, but this appears to be - * a mistake, given that no code seems to exist that actually enforces that - * or relies on it. - */ -typedef guid_t efi_guid_t __aligned(__alignof__(u32)); - -#define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \ - (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ - (b) & 0xff, ((b) >> 8) & 0xff, \ - (c) & 0xff, ((c) >> 8) & 0xff, d } } +#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ + UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) /* * Generic EFI table header @@ -123,8 +98,6 @@ typedef struct { #define EFI_MEMORY_MORE_RELIABLE \ ((u64)0x0000000000010000ULL) /* higher reliability */ #define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */ -#define EFI_MEMORY_SP ((u64)0x0000000000040000ULL) /* soft reserved */ -#define EFI_MEMORY_CPU_CRYPTO ((u64)0x0000000000080000ULL) /* supports encryption */ #define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ #define EFI_MEMORY_DESCRIPTOR_VERSION 1 @@ -148,6 +121,15 @@ typedef struct { u32 imagesize; } efi_capsule_header_t; +struct efi_boot_memmap { + efi_memory_desc_t **map; + unsigned long *map_size; + unsigned long *desc_size; + u32 *desc_ver; + unsigned long *key_ptr; + unsigned long *buff_size; +}; + /* * EFI capsule flags */ @@ -155,19 +137,15 @@ typedef struct { #define EFI_CAPSULE_POPULATE_SYSTEM_TABLE 0x00020000 #define EFI_CAPSULE_INITIATE_RESET 0x00040000 -struct capsule_info { - efi_capsule_header_t header; - efi_capsule_header_t *capsule; - int reset_type; - long index; - size_t count; - size_t total_size; - struct page **pages; - phys_addr_t *phys; - size_t page_bytes_remain; -}; +/* + * Allocation types for calls to boottime->allocate_pages. + */ +#define EFI_ALLOCATE_ANY_PAGES 0 +#define EFI_ALLOCATE_MAX_ADDRESS 1 +#define EFI_ALLOCATE_ADDRESS 2 +#define EFI_MAX_ALLOCATE_TYPE 3 -int __efi_capsule_setup_info(struct capsule_info *cap_info); +typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg); /* * Types and defines for Time Services @@ -196,7 +174,275 @@ typedef struct { u8 sets_to_zero; } efi_time_cap_t; -typedef union efi_boot_services efi_boot_services_t; +typedef struct { + efi_table_hdr_t hdr; + u32 raise_tpl; + u32 restore_tpl; + u32 allocate_pages; + u32 free_pages; + u32 get_memory_map; + u32 allocate_pool; + u32 free_pool; + u32 create_event; + u32 set_timer; + u32 wait_for_event; + u32 signal_event; + u32 close_event; + u32 check_event; + u32 install_protocol_interface; + u32 reinstall_protocol_interface; + u32 uninstall_protocol_interface; + u32 handle_protocol; + u32 __reserved; + u32 register_protocol_notify; + u32 locate_handle; + u32 locate_device_path; + u32 install_configuration_table; + u32 load_image; + u32 start_image; + u32 exit; + u32 unload_image; + u32 exit_boot_services; + u32 get_next_monotonic_count; + u32 stall; + u32 set_watchdog_timer; + u32 connect_controller; + u32 disconnect_controller; + u32 open_protocol; + u32 close_protocol; + u32 open_protocol_information; + u32 protocols_per_handle; + u32 locate_handle_buffer; + u32 locate_protocol; + u32 install_multiple_protocol_interfaces; + u32 uninstall_multiple_protocol_interfaces; + u32 calculate_crc32; + u32 copy_mem; + u32 set_mem; + u32 create_event_ex; +} __packed efi_boot_services_32_t; + +typedef struct { + efi_table_hdr_t hdr; + u64 raise_tpl; + u64 restore_tpl; + u64 allocate_pages; + u64 free_pages; + u64 get_memory_map; + u64 allocate_pool; + u64 free_pool; + u64 create_event; + u64 set_timer; + u64 wait_for_event; + u64 signal_event; + u64 close_event; + u64 check_event; + u64 install_protocol_interface; + u64 reinstall_protocol_interface; + u64 uninstall_protocol_interface; + u64 handle_protocol; + u64 __reserved; + u64 register_protocol_notify; + u64 locate_handle; + u64 locate_device_path; + u64 install_configuration_table; + u64 load_image; + u64 start_image; + u64 exit; + u64 unload_image; + u64 exit_boot_services; + u64 get_next_monotonic_count; + u64 stall; + u64 set_watchdog_timer; + u64 connect_controller; + u64 disconnect_controller; + u64 open_protocol; + u64 close_protocol; + u64 open_protocol_information; + u64 protocols_per_handle; + u64 locate_handle_buffer; + u64 locate_protocol; + u64 install_multiple_protocol_interfaces; + u64 uninstall_multiple_protocol_interfaces; + u64 calculate_crc32; + u64 copy_mem; + u64 set_mem; + u64 create_event_ex; +} __packed efi_boot_services_64_t; + +/* + * EFI Boot Services table + */ +typedef struct { + efi_table_hdr_t hdr; + void *raise_tpl; + void *restore_tpl; + efi_status_t (*allocate_pages)(int, int, unsigned long, + efi_physical_addr_t *); + efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long); + efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *, + unsigned long *, u32 *); + efi_status_t (*allocate_pool)(int, unsigned long, void **); + efi_status_t (*free_pool)(void *); + void *create_event; + void *set_timer; + void *wait_for_event; + void *signal_event; + void *close_event; + void *check_event; + void *install_protocol_interface; + void *reinstall_protocol_interface; + void *uninstall_protocol_interface; + efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **); + void *__reserved; + void *register_protocol_notify; + efi_status_t (*locate_handle)(int, efi_guid_t *, void *, + unsigned long *, efi_handle_t *); + void *locate_device_path; + efi_status_t (*install_configuration_table)(efi_guid_t *, void *); + void *load_image; + void *start_image; + void *exit; + void *unload_image; + efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long); + void *get_next_monotonic_count; + void *stall; + void *set_watchdog_timer; + void *connect_controller; + void *disconnect_controller; + void *open_protocol; + void *close_protocol; + void *open_protocol_information; + void *protocols_per_handle; + void *locate_handle_buffer; + efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **); + void *install_multiple_protocol_interfaces; + void *uninstall_multiple_protocol_interfaces; + void *calculate_crc32; + void *copy_mem; + void *set_mem; + void *create_event_ex; +} efi_boot_services_t; + +typedef enum { + EfiPciIoWidthUint8, + EfiPciIoWidthUint16, + EfiPciIoWidthUint32, + EfiPciIoWidthUint64, + EfiPciIoWidthFifoUint8, + EfiPciIoWidthFifoUint16, + EfiPciIoWidthFifoUint32, + EfiPciIoWidthFifoUint64, + EfiPciIoWidthFillUint8, + EfiPciIoWidthFillUint16, + EfiPciIoWidthFillUint32, + EfiPciIoWidthFillUint64, + EfiPciIoWidthMaximum +} EFI_PCI_IO_PROTOCOL_WIDTH; + +typedef enum { + EfiPciIoAttributeOperationGet, + EfiPciIoAttributeOperationSet, + EfiPciIoAttributeOperationEnable, + EfiPciIoAttributeOperationDisable, + EfiPciIoAttributeOperationSupported, + EfiPciIoAttributeOperationMaximum +} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION; + +typedef struct { + u32 read; + u32 write; +} efi_pci_io_protocol_access_32_t; + +typedef struct { + u64 read; + u64 write; +} efi_pci_io_protocol_access_64_t; + +typedef struct { + void *read; + void *write; +} efi_pci_io_protocol_access_t; + +typedef struct { + u32 poll_mem; + u32 poll_io; + efi_pci_io_protocol_access_32_t mem; + efi_pci_io_protocol_access_32_t io; + efi_pci_io_protocol_access_32_t pci; + u32 copy_mem; + u32 map; + u32 unmap; + u32 allocate_buffer; + u32 free_buffer; + u32 flush; + u32 get_location; + u32 attributes; + u32 get_bar_attributes; + u32 set_bar_attributes; + uint64_t romsize; + void *romimage; +} efi_pci_io_protocol_32; + +typedef struct { + u64 poll_mem; + u64 poll_io; + efi_pci_io_protocol_access_64_t mem; + efi_pci_io_protocol_access_64_t io; + efi_pci_io_protocol_access_64_t pci; + u64 copy_mem; + u64 map; + u64 unmap; + u64 allocate_buffer; + u64 free_buffer; + u64 flush; + u64 get_location; + u64 attributes; + u64 get_bar_attributes; + u64 set_bar_attributes; + uint64_t romsize; + void *romimage; +} efi_pci_io_protocol_64; + +typedef struct { + void *poll_mem; + void *poll_io; + efi_pci_io_protocol_access_t mem; + efi_pci_io_protocol_access_t io; + efi_pci_io_protocol_access_t pci; + void *copy_mem; + void *map; + void *unmap; + void *allocate_buffer; + void *free_buffer; + void *flush; + void *get_location; + void *attributes; + void *get_bar_attributes; + void *set_bar_attributes; + uint64_t romsize; + void *romimage; +} efi_pci_io_protocol; + +#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001 +#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002 +#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004 +#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008 +#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010 +#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020 +#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080 +#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200 +#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000 +#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000 +#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000 +#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000 +#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000 +#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000 +#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000 /* * Types and defines for EFI ResetSystem @@ -229,6 +475,42 @@ typedef struct { u32 query_variable_info; } efi_runtime_services_32_t; +typedef struct { + efi_table_hdr_t hdr; + u64 get_time; + u64 set_time; + u64 get_wakeup_time; + u64 set_wakeup_time; + u64 set_virtual_address_map; + u64 convert_pointer; + u64 get_variable; + u64 get_next_variable; + u64 set_variable; + u64 get_next_high_mono_count; + u64 reset_system; + u64 update_capsule; + u64 query_capsule_caps; + u64 query_variable_info; +} efi_runtime_services_64_t; + +typedef struct { + efi_table_hdr_t hdr; + void *get_time; + void *set_time; + void *get_wakeup_time; + void *set_wakeup_time; + void *set_virtual_address_map; + void *convert_pointer; + void *get_variable; + void *get_next_variable; + void *set_variable; + void *get_next_high_mono_count; + void *reset_system; + void *update_capsule; + void *query_capsule_caps; + void *query_variable_info; +} efi_runtime_services_t; + typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc); typedef efi_status_t efi_set_time_t (efi_time_t *tm); typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending, @@ -263,27 +545,6 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size, bool nonblocking); -typedef union { - struct { - efi_table_hdr_t hdr; - efi_get_time_t __efiapi *get_time; - efi_set_time_t __efiapi *set_time; - efi_get_wakeup_time_t __efiapi *get_wakeup_time; - efi_set_wakeup_time_t __efiapi *set_wakeup_time; - efi_set_virtual_address_map_t __efiapi *set_virtual_address_map; - void *convert_pointer; - efi_get_variable_t __efiapi *get_variable; - efi_get_next_variable_t __efiapi *get_next_variable; - efi_set_variable_t __efiapi *set_variable; - efi_get_next_high_mono_count_t __efiapi *get_next_high_mono_count; - efi_reset_system_t __efiapi *reset_system; - efi_update_capsule_t __efiapi *update_capsule; - efi_query_capsule_caps_t __efiapi *query_capsule_caps; - efi_query_variable_info_t __efiapi *query_variable_info; - }; - efi_runtime_services_32_t mixed_mode; -} efi_runtime_services_t; - void efi_native_runtime_setup(void); /* @@ -329,21 +590,8 @@ void efi_native_runtime_setup(void); #define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) #define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) #define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) -#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61) #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) -#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) -#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f) -#define EFI_LOAD_FILE_PROTOCOL_GUID EFI_GUID(0x56ec3091, 0x954c, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) -#define EFI_LOAD_FILE2_PROTOCOL_GUID EFI_GUID(0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d) -#define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9) - -#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f) -#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23) - -#define EFI_CERT_SHA256_GUID EFI_GUID(0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28) -#define EFI_CERT_X509_GUID EFI_GUID(0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72) -#define EFI_CERT_X509_SHA256_GUID EFI_GUID(0x3bd2a492, 0x96c0, 0x4079, 0xb4, 0x20, 0xfc, 0xf9, 0x8e, 0xf1, 0x03, 0xed) /* * This GUID is used to pass to the kernel proper the struct screen_info @@ -351,17 +599,7 @@ void efi_native_runtime_setup(void); * associated with ConOut */ #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) -#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2) #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) -#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) -#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) -#define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25) -#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2) -#define LINUX_EFI_INITRD_MEDIA_GUID EFI_GUID(0x5568e427, 0x68fc, 0x4f3d, 0xac, 0x74, 0xca, 0x55, 0x52, 0x31, 0xcc, 0x68) -#define LINUX_EFI_MOK_VARIABLE_TABLE_GUID EFI_GUID(0xc451ed2b, 0x9694, 0x45d3, 0xba, 0xba, 0xed, 0x9f, 0x89, 0x88, 0xa3, 0x89) - -/* OEM GUIDs */ -#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55) typedef struct { efi_guid_t guid; @@ -373,18 +611,15 @@ typedef struct { u32 table; } efi_config_table_32_t; -typedef union { - struct { - efi_guid_t guid; - void *table; - }; - efi_config_table_32_t mixed_mode; +typedef struct { + efi_guid_t guid; + unsigned long table; } efi_config_table_t; typedef struct { efi_guid_t guid; + const char *name; unsigned long *ptr; - const char name[16]; } efi_config_table_type_t; #define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL) @@ -430,39 +665,32 @@ typedef struct { u32 tables; } efi_system_table_32_t; -typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t; -typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t; - -typedef union { - struct { - efi_table_hdr_t hdr; - unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */ - u32 fw_revision; - unsigned long con_in_handle; - efi_simple_text_input_protocol_t *con_in; - unsigned long con_out_handle; - efi_simple_text_output_protocol_t *con_out; - unsigned long stderr_handle; - unsigned long stderr; - efi_runtime_services_t *runtime; - efi_boot_services_t *boottime; - unsigned long nr_tables; - unsigned long tables; - }; - efi_system_table_32_t mixed_mode; +typedef struct { + efi_table_hdr_t hdr; + unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */ + u32 fw_revision; + unsigned long con_in_handle; + unsigned long con_in; + unsigned long con_out_handle; + unsigned long con_out; + unsigned long stderr_handle; + unsigned long stderr; + efi_runtime_services_t *runtime; + efi_boot_services_t *boottime; + unsigned long nr_tables; + unsigned long tables; } efi_system_table_t; /* * Architecture independent structure for describing a memory map for the - * benefit of efi_memmap_init_early(), and for passing context between - * efi_memmap_alloc() and efi_memmap_install(). + * benefit of efi_memmap_init_early(), saving us the need to pass four + * parameters. */ struct efi_memory_map_data { phys_addr_t phys_map; unsigned long size; unsigned long desc_version; unsigned long desc_size; - unsigned long flags; }; struct efi_memory_map { @@ -472,10 +700,7 @@ struct efi_memory_map { int nr_map; unsigned long desc_version; unsigned long desc_size; -#define EFI_MEMMAP_LATE (1UL << 0) -#define EFI_MEMMAP_MEMBLOCK (1UL << 1) -#define EFI_MEMMAP_SLAB (1UL << 2) - unsigned long flags; + bool late; }; struct efi_mem_range { @@ -483,6 +708,130 @@ struct efi_mem_range { u64 attribute; }; +struct efi_fdt_params { + u64 system_table; + u64 mmap; + u32 mmap_size; + u32 desc_size; + u32 desc_ver; +}; + +typedef struct { + u32 revision; + u32 parent_handle; + u32 system_table; + u32 device_handle; + u32 file_path; + u32 reserved; + u32 load_options_size; + u32 load_options; + u32 image_base; + __aligned_u64 image_size; + unsigned int image_code_type; + unsigned int image_data_type; + unsigned long unload; +} efi_loaded_image_32_t; + +typedef struct { + u32 revision; + u64 parent_handle; + u64 system_table; + u64 device_handle; + u64 file_path; + u64 reserved; + u32 load_options_size; + u64 load_options; + u64 image_base; + __aligned_u64 image_size; + unsigned int image_code_type; + unsigned int image_data_type; + unsigned long unload; +} efi_loaded_image_64_t; + +typedef struct { + u32 revision; + void *parent_handle; + efi_system_table_t *system_table; + void *device_handle; + void *file_path; + void *reserved; + u32 load_options_size; + void *load_options; + void *image_base; + __aligned_u64 image_size; + unsigned int image_code_type; + unsigned int image_data_type; + unsigned long unload; +} efi_loaded_image_t; + + +typedef struct { + u64 size; + u64 file_size; + u64 phys_size; + efi_time_t create_time; + efi_time_t last_access_time; + efi_time_t modification_time; + __aligned_u64 attribute; + efi_char16_t filename[1]; +} efi_file_info_t; + +typedef struct { + u64 revision; + u32 open; + u32 close; + u32 delete; + u32 read; + u32 write; + u32 get_position; + u32 set_position; + u32 get_info; + u32 set_info; + u32 flush; +} efi_file_handle_32_t; + +typedef struct { + u64 revision; + u64 open; + u64 close; + u64 delete; + u64 read; + u64 write; + u64 get_position; + u64 set_position; + u64 get_info; + u64 set_info; + u64 flush; +} efi_file_handle_64_t; + +typedef struct _efi_file_handle { + u64 revision; + efi_status_t (*open)(struct _efi_file_handle *, + struct _efi_file_handle **, + efi_char16_t *, u64, u64); + efi_status_t (*close)(struct _efi_file_handle *); + void *delete; + efi_status_t (*read)(struct _efi_file_handle *, unsigned long *, + void *); + void *write; + void *get_position; + void *set_position; + efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *, + unsigned long *, void *); + void *set_info; + void *flush; +} efi_file_handle_t; + +typedef struct _efi_file_io_interface { + u64 revision; + int (*open_volume)(struct _efi_file_io_interface *, + efi_file_handle_t **); +} efi_file_io_interface_t; + +#define EFI_FILE_MODE_READ 0x0000000000000001 +#define EFI_FILE_MODE_WRITE 0x0000000000000002 +#define EFI_FILE_MODE_CREATE 0x8000000000000000 + typedef struct { u32 version; u32 length; @@ -492,14 +841,6 @@ typedef struct { #define EFI_PROPERTIES_TABLE_VERSION 0x00010000 #define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1 -typedef struct { - u16 version; - u16 length; - u32 runtime_services_supported; -} efi_rt_properties_table_t; - -#define EFI_RT_PROPERTIES_TABLE_VERSION 0x1 - #define EFI_INVALID_TABLE_ADDR (~0UL) typedef struct { @@ -510,87 +851,47 @@ typedef struct { efi_memory_desc_t entry[0]; } efi_memory_attributes_table_t; -typedef struct { - efi_guid_t signature_owner; - u8 signature_data[]; -} efi_signature_data_t; - -typedef struct { - efi_guid_t signature_type; - u32 signature_list_size; - u32 signature_header_size; - u32 signature_size; - u8 signature_header[]; - /* efi_signature_data_t signatures[][] */ -} efi_signature_list_t; - -typedef u8 efi_sha256_hash_t[32]; - -typedef struct { - efi_sha256_hash_t to_be_signed_hash; - efi_time_t time_of_revocation; -} efi_cert_x509_sha256_t; - -extern unsigned long __ro_after_init efi_rng_seed; /* RNG Seed table */ - /* * All runtime access to EFI goes through this structure: */ extern struct efi { - const efi_runtime_services_t *runtime; /* EFI runtime services table */ - unsigned int runtime_version; /* Runtime services version */ - unsigned int runtime_supported_mask; - - unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ - unsigned long acpi20; /* ACPI table (ACPI 2.0) */ - unsigned long smbios; /* SMBIOS table (32 bit entry point) */ - unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ - unsigned long esrt; /* ESRT table */ - unsigned long tpm_log; /* TPM2 Event Log table */ - unsigned long tpm_final_log; /* TPM2 Final Events Log table */ - unsigned long mokvar_table; /* MOK variable config table */ - - efi_get_time_t *get_time; - efi_set_time_t *set_time; - efi_get_wakeup_time_t *get_wakeup_time; - efi_set_wakeup_time_t *set_wakeup_time; - efi_get_variable_t *get_variable; - efi_get_next_variable_t *get_next_variable; - efi_set_variable_t *set_variable; - efi_set_variable_t *set_variable_nonblocking; - efi_query_variable_info_t *query_variable_info; - efi_query_variable_info_t *query_variable_info_nonblocking; - efi_update_capsule_t *update_capsule; - efi_query_capsule_caps_t *query_capsule_caps; - efi_get_next_high_mono_count_t *get_next_high_mono_count; - efi_reset_system_t *reset_system; - - struct efi_memory_map memmap; - unsigned long flags; + efi_system_table_t *systab; /* EFI system table */ + unsigned int runtime_version; /* Runtime services version */ + unsigned long mps; /* MPS table */ + unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ + unsigned long acpi20; /* ACPI table (ACPI 2.0) */ + unsigned long smbios; /* SMBIOS table (32 bit entry point) */ + unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ + unsigned long sal_systab; /* SAL system table */ + unsigned long boot_info; /* boot info table */ + unsigned long hcdp; /* HCDP table */ + unsigned long uga; /* UGA table */ + unsigned long uv_systab; /* UV system table */ + unsigned long fw_vendor; /* fw_vendor */ + unsigned long runtime; /* runtime table */ + unsigned long config_table; /* config tables */ + unsigned long esrt; /* ESRT table */ + unsigned long properties_table; /* properties table */ + unsigned long mem_attr_table; /* memory attributes table */ + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; + efi_set_wakeup_time_t *set_wakeup_time; + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_set_variable_t *set_variable_nonblocking; + efi_query_variable_info_t *query_variable_info; + efi_query_variable_info_t *query_variable_info_nonblocking; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; + efi_get_next_high_mono_count_t *get_next_high_mono_count; + efi_reset_system_t *reset_system; + efi_set_virtual_address_map_t *set_virtual_address_map; + struct efi_memory_map memmap; + unsigned long flags; } efi; -#define EFI_RT_SUPPORTED_GET_TIME 0x0001 -#define EFI_RT_SUPPORTED_SET_TIME 0x0002 -#define EFI_RT_SUPPORTED_GET_WAKEUP_TIME 0x0004 -#define EFI_RT_SUPPORTED_SET_WAKEUP_TIME 0x0008 -#define EFI_RT_SUPPORTED_GET_VARIABLE 0x0010 -#define EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME 0x0020 -#define EFI_RT_SUPPORTED_SET_VARIABLE 0x0040 -#define EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP 0x0080 -#define EFI_RT_SUPPORTED_CONVERT_POINTER 0x0100 -#define EFI_RT_SUPPORTED_GET_NEXT_HIGH_MONOTONIC_COUNT 0x0200 -#define EFI_RT_SUPPORTED_RESET_SYSTEM 0x0400 -#define EFI_RT_SUPPORTED_UPDATE_CAPSULE 0x0800 -#define EFI_RT_SUPPORTED_QUERY_CAPSULE_CAPABILITIES 0x1000 -#define EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO 0x2000 - -#define EFI_RT_SUPPORTED_ALL 0x3fff - -#define EFI_RT_SUPPORTED_TIME_SERVICES 0x000f -#define EFI_RT_SUPPORTED_VARIABLE_SERVICES 0x0070 - -extern struct mm_struct efi_mm; - static inline int efi_guidcmp (efi_guid_t left, efi_guid_t right) { @@ -605,16 +906,21 @@ efi_guid_to_str(efi_guid_t *guid, char *out) } extern void efi_init (void); -#ifdef CONFIG_EFI +extern void *efi_get_pal_addr (void); +extern void efi_map_pal_code (void); +extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); +extern void efi_gettimeofday (struct timespec64 *ts); extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ -#else -static inline void efi_enter_virtual_mode (void) {} -#endif #ifdef CONFIG_X86 +extern void efi_late_init(void); +extern void efi_free_boot_services(void); extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, bool nonblocking); +extern void efi_find_mirror(void); #else +static inline void efi_late_init(void) {} +static inline void efi_free_boot_services(void) {} static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, @@ -625,43 +931,36 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, #endif extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); -extern int __init efi_memmap_alloc(unsigned int num_entries, - struct efi_memory_map_data *data); -extern void __efi_memmap_free(u64 phys, unsigned long size, - unsigned long flags); +extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries); extern int __init efi_memmap_init_early(struct efi_memory_map_data *data); extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size); extern void __init efi_memmap_unmap(void); -extern int __init efi_memmap_install(struct efi_memory_map_data *data); +extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map); extern int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range); extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf, struct efi_mem_range *mem); +extern int efi_config_init(efi_config_table_type_t *arch_tables); #ifdef CONFIG_EFI_ESRT extern void __init efi_esrt_init(void); #else static inline void efi_esrt_init(void) { } #endif -extern int efi_config_parse_tables(const efi_config_table_t *config_tables, - int count, - const efi_config_table_type_t *arch_tables); -extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr, - int min_major_version); -extern void efi_systab_report_header(const efi_table_hdr_t *systab_hdr, - unsigned long fw_vendor); +extern int efi_config_parse_tables(void *config_tables, int count, int sz, + efi_config_table_type_t *arch_tables); extern u64 efi_get_iobase (void); -extern int efi_mem_type(unsigned long phys_addr); +extern u32 efi_mem_type (unsigned long phys_addr); extern u64 efi_mem_attributes (unsigned long phys_addr); extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); extern int __init efi_uart_console_only (void); extern u64 efi_mem_desc_end(efi_memory_desc_t *md); extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); extern void efi_mem_reserve(phys_addr_t addr, u64 size); -extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); -extern u64 efi_get_fdt_params(struct efi_memory_map_data *data); +extern void efi_reserve_boot_services(void); +extern int efi_get_fdt_params(struct efi_fdt_params *params); extern struct kobject *efi_kobj; extern int efi_reboot_quirk_mode; @@ -673,8 +972,6 @@ extern void __init efi_fake_memmap(void); static inline void efi_fake_memmap(void) { } #endif -extern unsigned long efi_mem_attr_table; - /* * efi_memattr_perm_setter - arch specific callback function passed into * efi_memattr_apply_permissions() that updates the @@ -688,28 +985,6 @@ extern int efi_memattr_init(void); extern int efi_memattr_apply_permissions(struct mm_struct *mm, efi_memattr_perm_setter fn); -/* - * efi_early_memdesc_ptr - get the n-th EFI memmap descriptor - * @map: the start of efi memmap - * @desc_size: the size of space for each EFI memmap descriptor - * @n: the index of efi memmap descriptor - * - * EFI boot service provides the GetMemoryMap() function to get a copy of the - * current memory map which is an array of memory descriptors, each of - * which describes a contiguous block of memory. It also gets the size of the - * map, and the size of each descriptor, etc. - * - * Note that per section 6.2 of UEFI Spec 2.6 Errata A, the returned size of - * each descriptor might not be equal to sizeof(efi_memory_memdesc_t), - * since efi_memory_memdesc_t may be extended in the future. Thus the OS - * MUST use the returned size of the descriptor to find the start of each - * efi_memory_memdesc_t in the memory map array. This should only be used - * during bootup since for_each_efi_memory_desc_xxx() is available after the - * kernel initializes the EFI subsystem to set up struct efi_memory_map. - */ -#define efi_early_memdesc_ptr(map, desc_size, n) \ - (efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size))) - /* Iterate through an efi_memory_map */ #define for_each_efi_memory_desc_in_map(m, md) \ for ((md) = (m)->map; \ @@ -732,15 +1007,6 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm, char * __init efi_md_typeattr_format(char *buf, size_t size, const efi_memory_desc_t *md); - -typedef void (*efi_element_handler_t)(const char *source, - const void *element_data, - size_t element_size); -extern int __init parse_efi_signature_list( - const char *source, - const void *data, size_t size, - efi_element_handler_t (*get_handler_for_guid)(const efi_guid_t *)); - /** * efi_range_is_wc - check the WC bit on an address range * @start: starting kvirt address @@ -779,9 +1045,6 @@ extern int __init efi_setup_pcdp_console(char *); #define EFI_ARCH_1 7 /* First arch-specific bit */ #define EFI_DBG 8 /* Print additional debug info at runtime */ #define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ -#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */ -#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */ -#define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */ #ifdef CONFIG_EFI /* @@ -792,19 +1055,6 @@ static inline bool efi_enabled(int feature) return test_bit(feature, &efi.flags) != 0; } extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); - -bool __pure __efi_soft_reserve_enabled(void); - -static inline bool __pure efi_soft_reserve_enabled(void) -{ - return IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) - && __efi_soft_reserve_enabled(); -} - -static inline bool efi_rt_services_supported(unsigned int mask) -{ - return (efi.runtime_supported_mask & mask) == mask; -} #else static inline bool efi_enabled(int feature) { @@ -813,12 +1063,8 @@ static inline bool efi_enabled(int feature) static inline void efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {} -static inline bool efi_soft_reserve_enabled(void) -{ - return false; -} - -static inline bool efi_rt_services_supported(unsigned int mask) +static inline bool +efi_capsule_pending(int *reset_type) { return false; } @@ -850,6 +1096,13 @@ extern int efi_status_to_err(efi_status_t status); */ #define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN +/* + * The type of search to perform when calling boottime->locate_handle + */ +#define EFI_LOCATE_ALL_HANDLES 0 +#define EFI_LOCATE_BY_REGISTER_NOTIFY 1 +#define EFI_LOCATE_BY_PROTOCOL 2 + /* * EFI Device Path information */ @@ -889,40 +1142,10 @@ extern int efi_status_to_err(efi_status_t status); #define EFI_DEV_END_ENTIRE 0xFF struct efi_generic_dev_path { - u8 type; - u8 sub_type; - u16 length; -} __packed; - -struct efi_acpi_dev_path { - struct efi_generic_dev_path header; - u32 hid; - u32 uid; -} __packed; - -struct efi_pci_dev_path { - struct efi_generic_dev_path header; - u8 fn; - u8 dev; -} __packed; - -struct efi_vendor_dev_path { - struct efi_generic_dev_path header; - efi_guid_t vendorguid; - u8 vendordata[]; -} __packed; - -struct efi_dev_path { - union { - struct efi_generic_dev_path header; - struct efi_acpi_dev_path acpi; - struct efi_pci_dev_path pci; - struct efi_vendor_dev_path vendor; - }; -} __packed; - -struct device *efi_get_device_by_path(const struct efi_dev_path **node, - size_t *len); + u8 type; + u8 sub_type; + u16 length; +} __attribute ((packed)); static inline void memrange_efi_to_native(u64 *addr, u64 *npages) { @@ -944,6 +1167,7 @@ struct efivar_operations { efi_set_variable_t *set_variable_nonblocking; efi_query_variable_store_t *query_variable_store; }; +typedef struct efivar_operations __no_const efivar_operations_no_const; struct efivars { struct kset *kset; @@ -977,6 +1201,100 @@ struct efivar_entry { bool deleting; }; +struct efi_simple_text_output_protocol_32 { + u32 reset; + u32 output_string; + u32 test_string; +}; + +struct efi_simple_text_output_protocol_64 { + u64 reset; + u64 output_string; + u64 test_string; +}; + +struct efi_simple_text_output_protocol { + void *reset; + efi_status_t (*output_string)(void *, void *); + void *test_string; +}; + +#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0 +#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1 +#define PIXEL_BIT_MASK 2 +#define PIXEL_BLT_ONLY 3 +#define PIXEL_FORMAT_MAX 4 + +struct efi_pixel_bitmask { + u32 red_mask; + u32 green_mask; + u32 blue_mask; + u32 reserved_mask; +}; + +struct efi_graphics_output_mode_info { + u32 version; + u32 horizontal_resolution; + u32 vertical_resolution; + int pixel_format; + struct efi_pixel_bitmask pixel_information; + u32 pixels_per_scan_line; +} __packed; + +struct efi_graphics_output_protocol_mode_32 { + u32 max_mode; + u32 mode; + u32 info; + u32 size_of_info; + u64 frame_buffer_base; + u32 frame_buffer_size; +} __packed; + +struct efi_graphics_output_protocol_mode_64 { + u32 max_mode; + u32 mode; + u64 info; + u64 size_of_info; + u64 frame_buffer_base; + u64 frame_buffer_size; +} __packed; + +struct efi_graphics_output_protocol_mode { + u32 max_mode; + u32 mode; + unsigned long info; + unsigned long size_of_info; + u64 frame_buffer_base; + unsigned long frame_buffer_size; +} __packed; + +struct efi_graphics_output_protocol_32 { + u32 query_mode; + u32 set_mode; + u32 blt; + u32 mode; +}; + +struct efi_graphics_output_protocol_64 { + u64 query_mode; + u64 set_mode; + u64 blt; + u64 mode; +}; + +struct efi_graphics_output_protocol { + unsigned long query_mode; + unsigned long set_mode; + unsigned long blt; + struct efi_graphics_output_protocol_mode *mode; +}; + +typedef efi_status_t (*efi_graphics_output_protocol_query_mode)( + struct efi_graphics_output_protocol *, u32, unsigned long *, + struct efi_graphics_output_mode_info **); + +extern struct list_head efivar_sysfs_list; + static inline void efivar_unregister(struct efivar_entry *var) { @@ -989,7 +1307,6 @@ int efivars_register(struct efivars *efivars, int efivars_unregister(struct efivars *efivars); struct kobject *efivars_kobject(void); -int efivar_supports_writes(void); int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), void *data, bool duplicates, struct list_head *head); @@ -1028,17 +1345,22 @@ bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, size_t len); -#if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER) +extern struct work_struct efivar_work; +void efivar_run_worker(void); + +#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE) +int efivars_sysfs_init(void); + +#define EFIVARS_DATA_SIZE_MAX 1024 + +#endif /* CONFIG_EFI_VARS */ extern bool efi_capsule_pending(int *reset_type); extern int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset); extern int efi_capsule_update(efi_capsule_header_t *capsule, - phys_addr_t *pages); -#else -static inline bool efi_capsule_pending(int *reset_type) { return false; } -#endif + struct page **pages); #ifdef CONFIG_EFI_RUNTIME_MAP int efi_runtime_map_init(struct kobject *); @@ -1068,51 +1390,52 @@ static inline int efi_runtime_map_copy(void *buf, size_t bufsz) #endif -#ifdef CONFIG_EFI -extern bool efi_runtime_disabled(void); -#else -static inline bool efi_runtime_disabled(void) { return true; } -#endif +/* prototypes shared between arch specific and generic stub code */ +#define pr_efi(sys_table, msg) efi_printk(sys_table, "EFI stub: "msg) +#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg) + +void efi_printk(efi_system_table_t *sys_table_arg, char *str); + +void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, + unsigned long addr); + +char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, + efi_loaded_image_t *image, int *cmd_line_len); + +efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, + struct efi_boot_memmap *map); + +efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, + unsigned long size, unsigned long align, + unsigned long *addr); + +efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, + unsigned long size, unsigned long align, + unsigned long *addr, unsigned long max); + +efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, + unsigned long *image_addr, + unsigned long image_size, + unsigned long alloc_size, + unsigned long preferred_addr, + unsigned long alignment); + +efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, + efi_loaded_image_t *image, + char *cmd_line, char *option_string, + unsigned long max_addr, + unsigned long *load_addr, + unsigned long *load_size); + +efi_status_t efi_parse_options(char *cmdline); + +efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, + struct screen_info *si, efi_guid_t *proto, + unsigned long size); + +bool efi_runtime_disabled(void); extern void efi_call_virt_check_flags(unsigned long flags, const char *call); -extern unsigned long efi_call_virt_save_flags(void); - -enum efi_secureboot_mode { - efi_secureboot_mode_unset, - efi_secureboot_mode_unknown, - efi_secureboot_mode_disabled, - efi_secureboot_mode_enabled, -}; - -static inline -enum efi_secureboot_mode efi_get_secureboot_mode(efi_get_variable_t *get_var) -{ - u8 secboot, setupmode = 0; - efi_status_t status; - unsigned long size; - - size = sizeof(secboot); - status = get_var(L"SecureBoot", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size, - &secboot); - if (status == EFI_NOT_FOUND) - return efi_secureboot_mode_disabled; - if (status != EFI_SUCCESS) - return efi_secureboot_mode_unknown; - - size = sizeof(setupmode); - get_var(L"SetupMode", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size, &setupmode); - if (secboot == 0 || setupmode == 1) - return efi_secureboot_mode_disabled; - return efi_secureboot_mode_enabled; -} - -#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE -void efi_check_for_embedded_firmwares(void); -#else -static inline void efi_check_for_embedded_firmwares(void) { } -#endif - -efi_status_t efi_random_get_seed(void); /* * Arch code can implement the following three template macros, avoiding @@ -1141,7 +1464,7 @@ efi_status_t efi_random_get_seed(void); \ arch_efi_call_virt_setup(); \ \ - __flags = efi_call_virt_save_flags(); \ + local_save_flags(__flags); \ __s = arch_efi_call_virt(p, f, args); \ efi_call_virt_check_flags(__flags, __stringify(f)); \ \ @@ -1156,130 +1479,21 @@ efi_status_t efi_random_get_seed(void); \ arch_efi_call_virt_setup(); \ \ - __flags = efi_call_virt_save_flags(); \ + local_save_flags(__flags); \ arch_efi_call_virt(p, f, args); \ efi_call_virt_check_flags(__flags, __stringify(f)); \ \ arch_efi_call_virt_teardown(); \ }) -#define EFI_RANDOM_SEED_SIZE 64U - -struct linux_efi_random_seed { - u32 size; - u8 bits[]; -}; - -struct linux_efi_tpm_eventlog { - u32 size; - u32 final_events_preboot_size; - u8 version; - u8 log[]; -}; - -extern int efi_tpm_eventlog_init(void); - -struct efi_tcg2_final_events_table { - u64 version; - u64 nr_events; - u8 events[]; -}; -extern int efi_tpm_final_log_size; - -extern unsigned long rci2_table_phys; - -/* - * efi_runtime_service() function identifiers. - * "NONE" is used by efi_recover_from_page_fault() to check if the page - * fault happened while executing an efi runtime service. - */ -enum efi_rts_ids { - EFI_NONE, - EFI_GET_TIME, - EFI_SET_TIME, - EFI_GET_WAKEUP_TIME, - EFI_SET_WAKEUP_TIME, - EFI_GET_VARIABLE, - EFI_GET_NEXT_VARIABLE, - EFI_SET_VARIABLE, - EFI_QUERY_VARIABLE_INFO, - EFI_GET_NEXT_HIGH_MONO_COUNT, - EFI_RESET_SYSTEM, - EFI_UPDATE_CAPSULE, - EFI_QUERY_CAPSULE_CAPS, -}; - -/* - * efi_runtime_work: Details of EFI Runtime Service work - * @arg<1-5>: EFI Runtime Service function arguments - * @status: Status of executing EFI Runtime Service - * @efi_rts_id: EFI Runtime Service function identifier - * @efi_rts_comp: Struct used for handling completions - */ -struct efi_runtime_work { - void *arg1; - void *arg2; - void *arg3; - void *arg4; - void *arg5; - efi_status_t status; - struct work_struct work; - enum efi_rts_ids efi_rts_id; - struct completion efi_rts_comp; -}; - -extern struct efi_runtime_work efi_rts_work; - -/* Workqueue to queue EFI Runtime Services */ -extern struct workqueue_struct *efi_rts_wq; - -struct linux_efi_memreserve { - int size; // allocated size of the array - atomic_t count; // number of entries used - phys_addr_t next; // pa of next struct instance - struct { - phys_addr_t base; - phys_addr_t size; - } entry[]; -}; - -#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ - / sizeof_field(struct linux_efi_memreserve, entry[0])) - -void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size); - -char *efi_systab_show_arch(char *str); - -/* - * The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table can be provided - * to the kernel by an EFI boot loader. The table contains a packed - * sequence of these entries, one for each named MOK variable. - * The sequence is terminated by an entry with a completely NULL - * name and 0 data size. - */ -struct efi_mokvar_table_entry { - char name[256]; - u64 data_size; - u8 data[]; -} __attribute((packed)); - -#ifdef CONFIG_LOAD_UEFI_KEYS -extern void __init efi_mokvar_table_init(void); -extern struct efi_mokvar_table_entry *efi_mokvar_entry_next( - struct efi_mokvar_table_entry **mokvar_entry); -extern struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name); -#else -static inline void efi_mokvar_table_init(void) { } -static inline struct efi_mokvar_table_entry *efi_mokvar_entry_next( - struct efi_mokvar_table_entry **mokvar_entry) -{ - return NULL; -} -static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find( - const char *name) -{ - return NULL; -} -#endif +typedef efi_status_t (*efi_exit_boot_map_processing)( + efi_system_table_t *sys_table_arg, + struct efi_boot_memmap *map, + void *priv); +efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table, + void *handle, + struct efi_boot_memmap *map, + void *priv, + efi_exit_boot_map_processing priv_func); #endif /* _LINUX_EFI_H */ diff --git a/include/linux/efs_vh.h b/include/linux/efs_vh.h index 206c5270f7..8a11150c61 100644 --- a/include/linux/efs_vh.h +++ b/include/linux/efs_vh.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * efs_vh.h * diff --git a/include/linux/eisa.h b/include/linux/eisa.h index b012e30afe..6925249a5a 100644 --- a/include/linux/eisa.h +++ b/include/linux/eisa.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_EISA_H #define _LINUX_EISA_H diff --git a/include/linux/elevator.h b/include/linux/elevator.h index ef9ceead3d..e7f358d2e5 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ELEVATOR_H #define _LINUX_ELEVATOR_H @@ -9,47 +8,73 @@ struct io_cq; struct elevator_type; -#ifdef CONFIG_BLK_DEBUG_FS -struct blk_mq_debugfs_attr; -#endif -/* - * Return values from elevator merger - */ -enum elv_merge { - ELEVATOR_NO_MERGE = 0, - ELEVATOR_FRONT_MERGE = 1, - ELEVATOR_BACK_MERGE = 2, - ELEVATOR_DISCARD_MERGE = 3, -}; +typedef int (elevator_merge_fn) (struct request_queue *, struct request **, + struct bio *); -struct blk_mq_alloc_data; -struct blk_mq_hw_ctx; +typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *); -struct elevator_mq_ops { - int (*init_sched)(struct request_queue *, struct elevator_type *); - void (*exit_sched)(struct elevator_queue *); - int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); - void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); - void (*depth_updated)(struct blk_mq_hw_ctx *); +typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int); - bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); - bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); - int (*request_merge)(struct request_queue *q, struct request **, struct bio *); - void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); - void (*requests_merged)(struct request_queue *, struct request *, struct request *); - void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *); - void (*prepare_request)(struct request *); - void (*finish_request)(struct request *); - void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); - struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); - bool (*has_work)(struct blk_mq_hw_ctx *); - void (*completed_request)(struct request *, u64); - void (*requeue_request)(struct request *); - struct request *(*former_request)(struct request_queue *, struct request *); - struct request *(*next_request)(struct request_queue *, struct request *); - void (*init_icq)(struct io_cq *); - void (*exit_icq)(struct io_cq *); +typedef int (elevator_allow_bio_merge_fn) (struct request_queue *, + struct request *, struct bio *); + +typedef int (elevator_allow_rq_merge_fn) (struct request_queue *, + struct request *, struct request *); + +typedef void (elevator_bio_merged_fn) (struct request_queue *, + struct request *, struct bio *); + +typedef int (elevator_dispatch_fn) (struct request_queue *, int); + +typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); +typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); +typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); +typedef int (elevator_may_queue_fn) (struct request_queue *, int, int); + +typedef void (elevator_init_icq_fn) (struct io_cq *); +typedef void (elevator_exit_icq_fn) (struct io_cq *); +typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, + struct bio *, gfp_t); +typedef void (elevator_put_req_fn) (struct request *); +typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); +typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); + +typedef int (elevator_init_fn) (struct request_queue *, + struct elevator_type *e); +typedef void (elevator_exit_fn) (struct elevator_queue *); +typedef void (elevator_registered_fn) (struct request_queue *); + +struct elevator_ops +{ + elevator_merge_fn *elevator_merge_fn; + elevator_merged_fn *elevator_merged_fn; + elevator_merge_req_fn *elevator_merge_req_fn; + elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn; + elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn; + elevator_bio_merged_fn *elevator_bio_merged_fn; + + elevator_dispatch_fn *elevator_dispatch_fn; + elevator_add_req_fn *elevator_add_req_fn; + elevator_activate_req_fn *elevator_activate_req_fn; + elevator_deactivate_req_fn *elevator_deactivate_req_fn; + + elevator_completed_req_fn *elevator_completed_req_fn; + + elevator_request_list_fn *elevator_former_req_fn; + elevator_request_list_fn *elevator_latter_req_fn; + + elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */ + elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */ + + elevator_set_req_fn *elevator_set_req_fn; + elevator_put_req_fn *elevator_put_req_fn; + + elevator_may_queue_fn *elevator_may_queue_fn; + + elevator_init_fn *elevator_init_fn; + elevator_exit_fn *elevator_exit_fn; + elevator_registered_fn *elevator_registered_fn; }; #define ELV_NAME_MAX (16) @@ -69,32 +94,20 @@ struct elevator_type struct kmem_cache *icq_cache; /* fields provided by elevator implementation */ - struct elevator_mq_ops ops; - + struct elevator_ops ops; size_t icq_size; /* see iocontext.h */ size_t icq_align; /* ditto */ struct elv_fs_entry *elevator_attrs; - const char *elevator_name; - const char *elevator_alias; - const unsigned int elevator_features; + char elevator_name[ELV_NAME_MAX]; struct module *elevator_owner; -#ifdef CONFIG_BLK_DEBUG_FS - const struct blk_mq_debugfs_attr *queue_debugfs_attrs; - const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; -#endif /* managed by elevator core */ - char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */ + char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ struct list_head list; }; #define ELV_HASH_BITS 6 -void elv_rqhash_del(struct request_queue *q, struct request *rq); -void elv_rqhash_add(struct request_queue *q, struct request *rq); -void elv_rqhash_reposition(struct request_queue *q, struct request *rq); -struct request *elv_rqhash_find(struct request_queue *q, sector_t offset); - /* * each queue has an elevator_queue associated with it */ @@ -111,21 +124,32 @@ struct elevator_queue /* * block elevator interface */ -extern enum elv_merge elv_merge(struct request_queue *, struct request **, - struct bio *); +extern void elv_dispatch_sort(struct request_queue *, struct request *); +extern void elv_dispatch_add_tail(struct request_queue *, struct request *); +extern void elv_add_request(struct request_queue *, struct request *, int); +extern void __elv_add_request(struct request_queue *, struct request *, int); +extern int elv_merge(struct request_queue *, struct request **, struct bio *); extern void elv_merge_requests(struct request_queue *, struct request *, struct request *); -extern void elv_merged_request(struct request_queue *, struct request *, - enum elv_merge); -extern bool elv_attempt_insert_merge(struct request_queue *, struct request *, - struct list_head *); +extern void elv_merged_request(struct request_queue *, struct request *, int); +extern void elv_bio_merged(struct request_queue *q, struct request *, + struct bio *); +extern void elv_requeue_request(struct request_queue *, struct request *); extern struct request *elv_former_request(struct request_queue *, struct request *); extern struct request *elv_latter_request(struct request_queue *, struct request *); -void elevator_init_mq(struct request_queue *q); +extern int elv_register_queue(struct request_queue *q); +extern void elv_unregister_queue(struct request_queue *q); +extern int elv_may_queue(struct request_queue *, int, int); +extern void elv_completed_request(struct request_queue *, struct request *); +extern int elv_set_request(struct request_queue *q, struct request *rq, + struct bio *bio, gfp_t gfp_mask); +extern void elv_put_request(struct request_queue *, struct request *); +extern void elv_drain_elevator(struct request_queue *); /* * io scheduler registration */ +extern void __init load_default_elevator_module(void); extern int elv_register(struct elevator_type *); extern void elv_unregister(struct elevator_type *); @@ -135,6 +159,9 @@ extern void elv_unregister(struct elevator_type *); extern ssize_t elv_iosched_show(struct request_queue *, char *); extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); +extern int elevator_init(struct request_queue *, char *); +extern void elevator_exit(struct elevator_queue *); +extern int elevator_change(struct request_queue *, const char *); extern bool elv_bio_merge_ok(struct request *, struct bio *); extern struct elevator_queue *elevator_alloc(struct request_queue *, struct elevator_type *); @@ -152,6 +179,13 @@ extern void elv_rb_add(struct rb_root *, struct request *); extern void elv_rb_del(struct rb_root *, struct request *); extern struct request *elv_rb_find(struct rb_root *, sector_t); +/* + * Return values from elevator merger + */ +#define ELEVATOR_NO_MERGE 0 +#define ELEVATOR_FRONT_MERGE 1 +#define ELEVATOR_BACK_MERGE 2 + /* * Insertion selection */ @@ -162,20 +196,24 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); #define ELEVATOR_INSERT_FLUSH 5 #define ELEVATOR_INSERT_SORT_MERGE 6 +/* + * return values from elevator_may_queue_fn + */ +enum { + ELV_MQUEUE_MAY, + ELV_MQUEUE_NO, + ELV_MQUEUE_MUST, +}; + #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) #define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) #define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist) -/* - * Elevator features. - */ +#else /* CONFIG_BLOCK */ -/* Supports zoned block devices sequential write constraint */ -#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) -/* Supports scheduling on multiple hardware queues */ -#define ELEVATOR_F_MQ_AWARE (1U << 1) +static inline void load_default_elevator_module(void) { } #endif /* CONFIG_BLOCK */ #endif diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h index 3bea95a1af..386440317b 100644 --- a/include/linux/elf-fdpic.h +++ b/include/linux/elf-fdpic.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* FDPIC ELF load map * * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_ELF_FDPIC_H diff --git a/include/linux/elf-randomize.h b/include/linux/elf-randomize.h index da0dbb7b6b..b5f0bda947 100644 --- a/include/linux/elf-randomize.h +++ b/include/linux/elf-randomize.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ELF_RANDOMIZE_H #define _ELF_RANDOMIZE_H diff --git a/include/linux/elf.h b/include/linux/elf.h index c9a46c4e18..3d0dd189a2 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -1,8 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ELF_H #define _LINUX_ELF_H -#include #include #include @@ -22,19 +20,6 @@ SET_PERSONALITY(ex) #endif -#ifndef START_THREAD -#define START_THREAD(elf_ex, regs, elf_entry, start_stack) \ - start_thread(regs, elf_entry, start_stack) -#endif - -#if defined(ARCH_HAS_SETUP_ADDITIONAL_PAGES) && !defined(ARCH_SETUP_ADDITIONAL_PAGES) -#define ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \ - arch_setup_additional_pages(bprm, interpreter) -#endif - -#define ELF32_GNU_PROPERTY_ALIGN 4 -#define ELF64_GNU_PROPERTY_ALIGN 8 - #if ELF_CLASS == ELFCLASS32 extern Elf32_Dyn _DYNAMIC []; @@ -44,8 +29,7 @@ extern Elf32_Dyn _DYNAMIC []; #define elf_note elf32_note #define elf_addr_t Elf32_Off #define Elf_Half Elf32_Half -#define Elf_Word Elf32_Word -#define ELF_GNU_PROPERTY_ALIGN ELF32_GNU_PROPERTY_ALIGN +#define elf_dyn Elf32_Dyn #else @@ -56,8 +40,7 @@ extern Elf64_Dyn _DYNAMIC []; #define elf_note elf64_note #define elf_addr_t Elf64_Off #define Elf_Half Elf64_Half -#define Elf_Word Elf64_Word -#define ELF_GNU_PROPERTY_ALIGN ELF64_GNU_PROPERTY_ALIGN +#define elf_dyn Elf64_Dyn #endif @@ -72,41 +55,4 @@ static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { extern int elf_coredump_extra_notes_size(void); extern int elf_coredump_extra_notes_write(struct coredump_params *cprm); #endif - -/* - * NT_GNU_PROPERTY_TYPE_0 header: - * Keep this internal until/unless there is an agreed UAPI definition. - * pr_type values (GNU_PROPERTY_*) are public and defined in the UAPI header. - */ -struct gnu_property { - u32 pr_type; - u32 pr_datasz; -}; - -struct arch_elf_state; - -#ifndef CONFIG_ARCH_USE_GNU_PROPERTY -static inline int arch_parse_elf_property(u32 type, const void *data, - size_t datasz, bool compat, - struct arch_elf_state *arch) -{ - return 0; -} -#else -extern int arch_parse_elf_property(u32 type, const void *data, size_t datasz, - bool compat, struct arch_elf_state *arch); -#endif - -#ifdef CONFIG_ARCH_HAVE_ELF_PROT -int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, - bool has_interp, bool is_interp); -#else -static inline int arch_elf_adjust_prot(int prot, - const struct arch_elf_state *state, - bool has_interp, bool is_interp) -{ - return prot; -} -#endif - #endif /* _LINUX_ELF_H */ diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h index e272c3d452..0a90e1c3a4 100644 --- a/include/linux/elfcore-compat.h +++ b/include/linux/elfcore-compat.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ELFCORE_COMPAT_H #define _LINUX_ELFCORE_COMPAT_H @@ -17,7 +16,7 @@ struct compat_elf_siginfo compat_int_t si_errno; }; -struct compat_elf_prstatus_common +struct compat_elf_prstatus { struct compat_elf_siginfo pr_info; short pr_cursig; @@ -27,10 +26,16 @@ struct compat_elf_prstatus_common compat_pid_t pr_ppid; compat_pid_t pr_pgrp; compat_pid_t pr_sid; - struct old_timeval32 pr_utime; - struct old_timeval32 pr_stime; - struct old_timeval32 pr_cutime; - struct old_timeval32 pr_cstime; + struct compat_timeval pr_utime; + struct compat_timeval pr_stime; + struct compat_timeval pr_cutime; + struct compat_timeval pr_cstime; + compat_elf_gregset_t pr_reg; +#ifdef CONFIG_BINFMT_ELF_FDPIC + compat_ulong_t pr_exec_fdpic_loadmap; + compat_ulong_t pr_interp_fdpic_loadmap; +#endif + compat_int_t pr_fpvalid; }; struct compat_elf_prpsinfo @@ -47,15 +52,4 @@ struct compat_elf_prpsinfo char pr_psargs[ELF_PRARGSZ]; }; -#ifdef CONFIG_ARCH_HAS_ELFCORE_COMPAT -#include -#endif - -struct compat_elf_prstatus -{ - struct compat_elf_prstatus_common common; - compat_elf_gregset_t pr_reg; - compat_int_t pr_fpvalid; -}; - #endif /* _LINUX_ELFCORE_COMPAT_H */ diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 957ebec35a..698d51a0ee 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -1,74 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ELFCORE_H #define _LINUX_ELFCORE_H #include #include -#include -#include -#include -#include -#include -#include -#include +#include +#include struct coredump_params; -struct elf_siginfo -{ - int si_signo; /* signal number */ - int si_code; /* extra code */ - int si_errno; /* errno */ -}; - -/* - * Definitions to generate Intel SVR4-like core files. - * These mostly have the same names as the SVR4 types with "elf_" - * tacked on the front to prevent clashes with linux definitions, - * and the typedef forms have been avoided. This is mostly like - * the SVR4 structure, but more Linuxy, with things that Linux does - * not support and which gdb doesn't really use excluded. - */ -struct elf_prstatus_common -{ - struct elf_siginfo pr_info; /* Info associated with signal */ - short pr_cursig; /* Current signal */ - unsigned long pr_sigpend; /* Set of pending signals */ - unsigned long pr_sighold; /* Set of held signals */ - pid_t pr_pid; - pid_t pr_ppid; - pid_t pr_pgrp; - pid_t pr_sid; - struct __kernel_old_timeval pr_utime; /* User time */ - struct __kernel_old_timeval pr_stime; /* System time */ - struct __kernel_old_timeval pr_cutime; /* Cumulative user time */ - struct __kernel_old_timeval pr_cstime; /* Cumulative system time */ -}; - -struct elf_prstatus -{ - struct elf_prstatus_common common; - elf_gregset_t pr_reg; /* GP registers */ - int pr_fpvalid; /* True if math co-processor being used. */ -}; - -#define ELF_PRARGSZ (80) /* Number of chars for args */ - -struct elf_prpsinfo -{ - char pr_state; /* numeric process state */ - char pr_sname; /* char for pr_state */ - char pr_zomb; /* zombie */ - char pr_nice; /* nice val */ - unsigned long pr_flag; /* flags */ - __kernel_uid_t pr_uid; - __kernel_gid_t pr_gid; - pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; - /* Lots missing */ - char pr_fname[16]; /* filename of executable */ - char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ -}; - static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs) { #ifdef ELF_CORE_COPY_REGS @@ -109,7 +48,13 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg #endif } -#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64) +#ifdef ELF_CORE_COPY_XFPREGS +static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) +{ + return ELF_CORE_COPY_XFPREGS(t, xfpu); +} +#endif + /* * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out * extra segments containing the gate DSO contents. Dumping its @@ -124,26 +69,5 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); extern int elf_core_write_extra_data(struct coredump_params *cprm); extern size_t elf_core_extra_data_size(void); -#else -static inline Elf_Half elf_core_extra_phdrs(void) -{ - return 0; -} - -static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) -{ - return 1; -} - -static inline int elf_core_write_extra_data(struct coredump_params *cprm) -{ - return 1; -} - -static inline size_t elf_core_extra_data_size(void) -{ - return 0; -} -#endif #endif /* _LINUX_ELFCORE_H */ diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h index 69b136e4dd..278e3ef053 100644 --- a/include/linux/elfnote.h +++ b/include/linux/elfnote.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ELFNOTE_H #define _LINUX_ELFNOTE_H /* @@ -54,12 +53,12 @@ .popsection ; #define ELFNOTE(name, type, desc) \ - ELFNOTE_START(name, type, "a") \ + ELFNOTE_START(name, type, "") \ desc ; \ ELFNOTE_END #else /* !__ASSEMBLER__ */ -#include +#include /* * Use an anonymous structure which matches the shape of * Elf{32,64}_Nhdr, but includes the name and desc data. The size and diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h index 1c630e2c27..a4cf57cd0f 100644 --- a/include/linux/enclosure.h +++ b/include/linux/enclosure.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Enclosure Services * @@ -6,6 +5,18 @@ * **----------------------------------------------------------------------------- ** +** This program is free software; you can redistribute it and/or +** modify it under the terms of the GNU General Public License +** version 2 as published by the Free Software Foundation. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +** You should have received a copy of the GNU General Public License +** along with this program; if not, write to the Free Software +** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ** **----------------------------------------------------------------------------- */ @@ -101,7 +112,7 @@ struct enclosure_device { struct device edev; struct enclosure_component_callbacks *cb; int components; - struct enclosure_component component[]; + struct enclosure_component component[0]; }; static inline struct enclosure_device * diff --git a/include/linux/err.h b/include/linux/err.h index a139c64aef..ce9721b121 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ERR_H #define _LINUX_ERR_H @@ -21,12 +20,12 @@ #define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO) -static inline void * __must_check ERR_PTR(long error) +static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error) { return (void *) error; } -static inline long __must_check PTR_ERR(__force const void *ptr) +static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr) { return (long) ptr; } @@ -62,6 +61,9 @@ static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr) return 0; } +/* Deprecated */ +#define PTR_RET(p) PTR_ERR_OR_ZERO(p) + #endif #endif /* _LINUX_ERR_H */ diff --git a/include/linux/errno.h b/include/linux/errno.h index 8b0c754bab..7ce9fb1b7d 100644 --- a/include/linux/errno.h +++ b/include/linux/errno.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ERRNO_H #define _LINUX_ERRNO_H @@ -18,7 +17,6 @@ #define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ #define EPROBE_DEFER 517 /* Driver requests probe retry */ #define EOPENSTALE 518 /* open found a stale dentry */ -#define ENOPARAM 519 /* Parameter not supported */ /* Defined for the NFSv3 protocol */ #define EBADHANDLE 521 /* Illegal NFS file handle */ @@ -31,6 +29,5 @@ #define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ #define EIOCBQUEUED 529 /* iocb queued, will get completion event */ #define ERECALLCONFLICT 530 /* conflict with recalled state */ -#define ENOGRACE 531 /* NFS file lock reclaim refused */ #endif diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h index be1cf7291d..9ca23fcfb5 100644 --- a/include/linux/errqueue.h +++ b/include/linux/errqueue.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ERRQUEUE_H #define _LINUX_ERRQUEUE_H 1 @@ -21,8 +20,6 @@ struct sock_exterr_skb { struct sock_extended_err ee; u16 addr_offset; __be16 port; - u8 opt_stats:1, - unused:7; }; #endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index c58d504514..6fec9e81bd 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. NET is implemented using the BSD Socket @@ -11,8 +10,14 @@ * Authors: Ross Biro * Fred N. van Kempen, * - * Relocated to include/linux where it belongs by Alan Cox + * Relocated to include/linux where it belongs by Alan Cox * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * */ #ifndef _LINUX_ETHERDEVICE_H #define _LINUX_ETHERDEVICE_H @@ -20,16 +25,14 @@ #include #include #include -#include #include #include #ifdef __KERNEL__ struct device; int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); -unsigned char *arch_get_platform_mac_address(void); -int nvmem_get_mac_address(struct device *dev, void *addrbuf); -u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len); +unsigned char *arch_get_platform_get_mac_address(void); +u32 eth_get_headlen(void *data, unsigned int max_len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; @@ -40,10 +43,10 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr); -__be16 eth_header_parse_protocol(const struct sk_buff *skb); int eth_prepare_mac_addr_change(struct net_device *dev, void *p); void eth_commit_mac_addr_change(struct net_device *dev, void *p); int eth_mac_addr(struct net_device *dev, void *p); +int eth_change_mtu(struct net_device *dev, int new_mtu); int eth_validate_addr(struct net_device *dev); struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, @@ -51,18 +54,13 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) -struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, - unsigned int txqs, - unsigned int rxqs); -#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1) - -struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb); +struct sk_buff **eth_gro_receive(struct sk_buff **head, + struct sk_buff *skb); int eth_gro_complete(struct sk_buff *skb, int nhoff); /* Reserved Ethernet Addresses per IEEE 802.1Q */ static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; -#define eth_stp_addr eth_reserved_addr_base /** * is_link_local_ether_addr - Determine if given Ethernet address is link-local @@ -266,17 +264,6 @@ static inline void eth_hw_addr_random(struct net_device *dev) eth_random_addr(dev->dev_addr); } -/** - * eth_hw_addr_crc - Calculate CRC from netdev_hw_addr - * @ha: pointer to hardware address - * - * Calculate CRC from a hardware address as basis for filter hashes. - */ -static inline u32 eth_hw_addr_crc(struct netdev_hw_addr *ha) -{ - return ether_crc(ETH_ALEN, ha->addr); -} - /** * ether_addr_copy - Copy an Ethernet address * @dst: Pointer to a six-byte array Ethernet address destination @@ -299,18 +286,6 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src) #endif } -/** - * eth_hw_addr_set - Assign Ethernet address to a net_device - * @dev: pointer to net_device structure - * @addr: address to assign - * - * Assign given address to the net_device, addr_assign_type is not changed. - */ -static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) -{ - __dev_addr_set(dev, addr, ETH_ALEN); -} - /** * eth_hw_addr_inherit - Copy dev_addr from another net_device * @dst: pointer to net_device to copy dev_addr to @@ -421,63 +396,6 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2, return true; } -/** - * ether_addr_to_u64 - Convert an Ethernet address into a u64 value. - * @addr: Pointer to a six-byte array containing the Ethernet address - * - * Return a u64 value of the address - */ -static inline u64 ether_addr_to_u64(const u8 *addr) -{ - u64 u = 0; - int i; - - for (i = 0; i < ETH_ALEN; i++) - u = u << 8 | addr[i]; - - return u; -} - -/** - * u64_to_ether_addr - Convert a u64 to an Ethernet address. - * @u: u64 to convert to an Ethernet MAC address - * @addr: Pointer to a six-byte array to contain the Ethernet address - */ -static inline void u64_to_ether_addr(u64 u, u8 *addr) -{ - int i; - - for (i = ETH_ALEN - 1; i >= 0; i--) { - addr[i] = u & 0xff; - u = u >> 8; - } -} - -/** - * eth_addr_dec - Decrement the given MAC address - * - * @addr: Pointer to a six-byte array containing Ethernet address to decrement - */ -static inline void eth_addr_dec(u8 *addr) -{ - u64 u = ether_addr_to_u64(addr); - - u--; - u64_to_ether_addr(u, addr); -} - -/** - * eth_addr_inc() - Increment the given MAC address. - * @addr: Pointer to a six-byte array containing Ethernet address to increment. - */ -static inline void eth_addr_inc(u8 *addr) -{ - u64 u = ether_addr_to_u64(addr); - - u++; - u64_to_ether_addr(u, addr); -} - /** * is_etherdev_addr - Tell if given Ethernet address belongs to the device. * @dev: Pointer to a device structure diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 849524b55d..e11a245714 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * ethtool.h: Defines for Linux ethtool. * @@ -15,9 +14,10 @@ #include #include -#include #include +#ifdef CONFIG_COMPAT + struct compat_ethtool_rx_flow_spec { u32 flow_type; union ethtool_flow_union h_u; @@ -34,9 +34,11 @@ struct compat_ethtool_rxnfc { compat_u64 data; struct compat_ethtool_rx_flow_spec fs; u32 rule_cnt; - u32 rule_locs[]; + u32 rule_locs[0]; }; +#endif /* CONFIG_COMPAT */ + #include /** @@ -58,7 +60,6 @@ enum ethtool_phys_id_state { enum { ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ - ETH_RSS_HASH_CRC32_BIT, /* Configurable RSS hash function - Crc32 */ /* * Add your fresh new hash function bits above and remember to update @@ -72,32 +73,16 @@ enum { #define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) #define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) -#define ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32) #define ETH_RSS_HASH_UNKNOWN 0 #define ETH_RSS_HASH_NO_CHANGE 0 struct net_device; -struct netlink_ext_ack; /* Some generic methods drivers may use in their ethtool_ops */ u32 ethtool_op_get_link(struct net_device *dev); int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti); - -/* Link extended state and substate. */ -struct ethtool_link_ext_state_info { - enum ethtool_link_ext_state link_ext_state; - union { - enum ethtool_link_ext_substate_autoneg autoneg; - enum ethtool_link_ext_substate_link_training link_training; - enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; - enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; - enum ethtool_link_ext_substate_cable_issue cable_issue; - u8 __link_ext_substate; - }; -}; - /** * ethtool_rxfh_indir_default - get default value for RX flow hash indirection * @index: Index in RX flow hash indirection table @@ -110,6 +95,10 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) return index % n_rx_rings; } +/* number of link mode bits/ulongs handled internally by kernel */ +#define __ETHTOOL_LINK_MODE_MASK_NBITS \ + (__ETHTOOL_LINK_MODE_LAST + 1) + /* declare a link mode bitmap */ #define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS) @@ -124,7 +113,6 @@ struct ethtool_link_ksettings { __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); } link_modes; - u32 lanes; }; /** @@ -146,17 +134,6 @@ struct ethtool_link_ksettings { #define ethtool_link_ksettings_add_link_mode(ptr, name, mode) \ __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) -/** - * ethtool_link_ksettings_del_link_mode - clear bit in link_ksettings - * link mode mask - * @ptr : pointer to struct ethtool_link_ksettings - * @name : one of supported/advertising/lp_advertising - * @mode : one of the ETHTOOL_LINK_MODE_*_BIT - * (not atomic, no bound checking) - */ -#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ - __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) - /** * ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask * @ptr : pointer to struct ethtool_link_ksettings @@ -173,21 +150,6 @@ extern int __ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings); -struct kernel_ethtool_coalesce { - u8 use_cqe_mode_tx; - u8 use_cqe_mode_rx; -}; - -/** - * ethtool_intersect_link_masks - Given two link masks, AND them together - * @dst: first mask and where result is stored - * @src: second mask to intersect with - * - * Given two link mode masks, AND them together and save the result in dst. - */ -void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, - struct ethtool_link_ksettings *src); - void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, u32 legacy_u32); @@ -195,231 +157,16 @@ void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, const unsigned long *src); -#define ETHTOOL_COALESCE_RX_USECS BIT(0) -#define ETHTOOL_COALESCE_RX_MAX_FRAMES BIT(1) -#define ETHTOOL_COALESCE_RX_USECS_IRQ BIT(2) -#define ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ BIT(3) -#define ETHTOOL_COALESCE_TX_USECS BIT(4) -#define ETHTOOL_COALESCE_TX_MAX_FRAMES BIT(5) -#define ETHTOOL_COALESCE_TX_USECS_IRQ BIT(6) -#define ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ BIT(7) -#define ETHTOOL_COALESCE_STATS_BLOCK_USECS BIT(8) -#define ETHTOOL_COALESCE_USE_ADAPTIVE_RX BIT(9) -#define ETHTOOL_COALESCE_USE_ADAPTIVE_TX BIT(10) -#define ETHTOOL_COALESCE_PKT_RATE_LOW BIT(11) -#define ETHTOOL_COALESCE_RX_USECS_LOW BIT(12) -#define ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW BIT(13) -#define ETHTOOL_COALESCE_TX_USECS_LOW BIT(14) -#define ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW BIT(15) -#define ETHTOOL_COALESCE_PKT_RATE_HIGH BIT(16) -#define ETHTOOL_COALESCE_RX_USECS_HIGH BIT(17) -#define ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH BIT(18) -#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19) -#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20) -#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21) -#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22) -#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23) -#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(23, 0) - -#define ETHTOOL_COALESCE_USECS \ - (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS) -#define ETHTOOL_COALESCE_MAX_FRAMES \ - (ETHTOOL_COALESCE_RX_MAX_FRAMES | ETHTOOL_COALESCE_TX_MAX_FRAMES) -#define ETHTOOL_COALESCE_USECS_IRQ \ - (ETHTOOL_COALESCE_RX_USECS_IRQ | ETHTOOL_COALESCE_TX_USECS_IRQ) -#define ETHTOOL_COALESCE_MAX_FRAMES_IRQ \ - (ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | \ - ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ) -#define ETHTOOL_COALESCE_USE_ADAPTIVE \ - (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_USE_ADAPTIVE_TX) -#define ETHTOOL_COALESCE_USECS_LOW_HIGH \ - (ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_TX_USECS_LOW | \ - ETHTOOL_COALESCE_RX_USECS_HIGH | ETHTOOL_COALESCE_TX_USECS_HIGH) -#define ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH \ - (ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \ - ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \ - ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \ - ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH) -#define ETHTOOL_COALESCE_PKT_RATE_RX_USECS \ - (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | \ - ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \ - ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \ - ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL) -#define ETHTOOL_COALESCE_USE_CQE \ - (ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX) - -#define ETHTOOL_STAT_NOT_SET (~0ULL) - -static inline void ethtool_stats_init(u64 *stats, unsigned int n) -{ - while (n--) - stats[n] = ETHTOOL_STAT_NOT_SET; -} - -/* Basic IEEE 802.3 MAC statistics (30.3.1.1.*), not otherwise exposed - * via a more targeted API. - */ -struct ethtool_eth_mac_stats { - u64 FramesTransmittedOK; - u64 SingleCollisionFrames; - u64 MultipleCollisionFrames; - u64 FramesReceivedOK; - u64 FrameCheckSequenceErrors; - u64 AlignmentErrors; - u64 OctetsTransmittedOK; - u64 FramesWithDeferredXmissions; - u64 LateCollisions; - u64 FramesAbortedDueToXSColls; - u64 FramesLostDueToIntMACXmitError; - u64 CarrierSenseErrors; - u64 OctetsReceivedOK; - u64 FramesLostDueToIntMACRcvError; - u64 MulticastFramesXmittedOK; - u64 BroadcastFramesXmittedOK; - u64 FramesWithExcessiveDeferral; - u64 MulticastFramesReceivedOK; - u64 BroadcastFramesReceivedOK; - u64 InRangeLengthErrors; - u64 OutOfRangeLengthField; - u64 FrameTooLongErrors; -}; - -/* Basic IEEE 802.3 PHY statistics (30.3.2.1.*), not otherwise exposed - * via a more targeted API. - */ -struct ethtool_eth_phy_stats { - u64 SymbolErrorDuringCarrier; -}; - -/* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed - * via a more targeted API. - */ -struct ethtool_eth_ctrl_stats { - u64 MACControlFramesTransmitted; - u64 MACControlFramesReceived; - u64 UnsupportedOpcodesReceived; -}; - -/** - * struct ethtool_pause_stats - statistics for IEEE 802.3x pause frames - * @tx_pause_frames: transmitted pause frame count. Reported to user space - * as %ETHTOOL_A_PAUSE_STAT_TX_FRAMES. - * - * Equivalent to `30.3.4.2 aPAUSEMACCtrlFramesTransmitted` - * from the standard. - * - * @rx_pause_frames: received pause frame count. Reported to user space - * as %ETHTOOL_A_PAUSE_STAT_RX_FRAMES. Equivalent to: - * - * Equivalent to `30.3.4.3 aPAUSEMACCtrlFramesReceived` - * from the standard. - */ -struct ethtool_pause_stats { - u64 tx_pause_frames; - u64 rx_pause_frames; -}; - -#define ETHTOOL_MAX_LANES 8 - -/** - * struct ethtool_fec_stats - statistics for IEEE 802.3 FEC - * @corrected_blocks: number of received blocks corrected by FEC - * Reported to user space as %ETHTOOL_A_FEC_STAT_CORRECTED. - * - * Equivalent to `30.5.1.1.17 aFECCorrectedBlocks` from the standard. - * - * @uncorrectable_blocks: number of received blocks FEC was not able to correct - * Reported to user space as %ETHTOOL_A_FEC_STAT_UNCORR. - * - * Equivalent to `30.5.1.1.18 aFECUncorrectableBlocks` from the standard. - * - * @corrected_bits: number of bits corrected by FEC - * Similar to @corrected_blocks but counts individual bit changes, - * not entire FEC data blocks. This is a non-standard statistic. - * Reported to user space as %ETHTOOL_A_FEC_STAT_CORR_BITS. - * - * @lane: per-lane/PCS-instance counts as defined by the standard - * @total: error counts for the entire port, for drivers incapable of reporting - * per-lane stats - * - * Drivers should fill in either only total or per-lane statistics, core - * will take care of adding lane values up to produce the total. - */ -struct ethtool_fec_stats { - struct ethtool_fec_stat { - u64 total; - u64 lanes[ETHTOOL_MAX_LANES]; - } corrected_blocks, uncorrectable_blocks, corrected_bits; -}; - -/** - * struct ethtool_rmon_hist_range - byte range for histogram statistics - * @low: low bound of the bucket (inclusive) - * @high: high bound of the bucket (inclusive) - */ -struct ethtool_rmon_hist_range { - u16 low; - u16 high; -}; - -#define ETHTOOL_RMON_HIST_MAX 10 - -/** - * struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics - * @undersize_pkts: Equivalent to `etherStatsUndersizePkts` from the RFC. - * @oversize_pkts: Equivalent to `etherStatsOversizePkts` from the RFC. - * @fragments: Equivalent to `etherStatsFragments` from the RFC. - * @jabbers: Equivalent to `etherStatsJabbers` from the RFC. - * @hist: Packet counter for packet length buckets (e.g. - * `etherStatsPkts128to255Octets` from the RFC). - * @hist_tx: Tx counters in similar form to @hist, not defined in the RFC. - * - * Selection of RMON (RFC 2819) statistics which are not exposed via different - * APIs, primarily the packet-length-based counters. - * Unfortunately different designs choose different buckets beyond - * the 1024B mark (jumbo frame teritory), so the definition of the bucket - * ranges is left to the driver. - */ -struct ethtool_rmon_stats { - u64 undersize_pkts; - u64 oversize_pkts; - u64 fragments; - u64 jabbers; - - u64 hist[ETHTOOL_RMON_HIST_MAX]; - u64 hist_tx[ETHTOOL_RMON_HIST_MAX]; -}; - -#define ETH_MODULE_EEPROM_PAGE_LEN 128 -#define ETH_MODULE_MAX_I2C_ADDRESS 0x7f - -/** - * struct ethtool_module_eeprom - EEPROM dump from specified page - * @offset: Offset within the specified EEPROM page to begin read, in bytes. - * @length: Number of bytes to read. - * @page: Page number to read from. - * @bank: Page bank number to read from, if applicable by EEPROM spec. - * @i2c_address: I2C address of a page. Value less than 0x7f expected. Most - * EEPROMs use 0x50 or 0x51. - * @data: Pointer to buffer with EEPROM data of @length size. - * - * This can be used to manage pages during EEPROM dump in ethtool and pass - * required information to the driver. - */ -struct ethtool_module_eeprom { - u32 offset; - u32 length; - u8 page; - u8 bank; - u8 i2c_address; - u8 *data; -}; - /** * struct ethtool_ops - optional netdev operations - * @cap_link_lanes_supported: indicates if the driver supports lanes - * parameter. - * @supported_coalesce_params: supported types of interrupt coalescing. + * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings + * API. Get various device settings including Ethernet link + * settings. The @cmd parameter is expected to have been cleared + * before get_settings is called. Returns a negative error code + * or zero. + * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings + * API. Set various device settings including Ethernet link + * settings. Returns a negative error code or zero. * @get_drvinfo: Report driver/device information. Should only set the * @driver, @version, @fw_version and @bus_info fields. If not * implemented, the @driver and @bus_info fields will be filled in @@ -437,14 +184,6 @@ struct ethtool_module_eeprom { * @get_link: Report whether physical link is up. Will only be called if * the netdev is up. Should usually be set to ethtool_op_get_link(), * which uses netif_carrier_ok(). - * @get_link_ext_state: Report link extended state. Should set link_ext_state and - * link_ext_substate (link_ext_substate of 0 means link_ext_substate is unknown, - * do not attach ext_substate attribute to netlink message). If link_ext_state - * and link_ext_substate are unknown, return -ENODATA. If not implemented, - * link_ext_state and link_ext_substate will not be sent to userspace. - * @get_eeprom_len: Read range of EEPROM addresses for validation of - * @get_eeprom and @set_eeprom requests. - * Returns 0 if device does not support EEPROM access. * @get_eeprom: Read data from the device EEPROM. * Should fill in the magic field. Don't need to check len for zero * or wraparound. Fill in the data argument with the eeprom values @@ -456,14 +195,10 @@ struct ethtool_module_eeprom { * or zero. * @get_coalesce: Get interrupt coalescing parameters. Returns a negative * error code or zero. - * @set_coalesce: Set interrupt coalescing parameters. Supported coalescing - * types should be set in @supported_coalesce_params. - * Returns a negative error code or zero. + * @set_coalesce: Set interrupt coalescing parameters. Returns a negative + * error code or zero. * @get_ringparam: Report ring sizes * @set_ringparam: Set ring sizes. Returns a negative error code or zero. - * @get_pause_stats: Report pause frame statistics. Drivers must not zero - * statistics which they don't report. The stats structure is initialized - * to ETHTOOL_STAT_NOT_SET indicating driver does not report statistics. * @get_pauseparam: Report pause parameters * @set_pauseparam: Set pause parameters. Returns a negative error code * or zero. @@ -513,15 +248,6 @@ struct ethtool_module_eeprom { * will remain unchanged. * Returns a negative error code or zero. An error code must be returned * if at least one unsupported change was requested. - * @get_rxfh_context: Get the contents of the RX flow hash indirection table, - * hash key, and/or hash function assiciated to the given rss context. - * Returns a negative error code or zero. - * @set_rxfh_context: Create, remove and configure RSS contexts. Allows setting - * the contents of the RX flow hash indirection table, hash key, and/or - * hash function associated to the given context. Arguments which are set - * to %NULL or zero will remain unchanged. - * Returns a negative error code or zero. An error code must be returned - * if at least one unsupported change was requested. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or * zero. @@ -537,8 +263,6 @@ struct ethtool_module_eeprom { * @get_module_eeprom: Get the eeprom information from the plug-in module * @get_eee: Get Energy-Efficient (EEE) supported and status. * @set_eee: Set EEE status (enable/disable) as well as LPI timers. - * @get_tunable: Read the value of a driver / device tunable. - * @set_tunable: Set the value of a driver / device tunable. * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue. * It must check that the given queue number is valid. If neither a RX nor * a TX queue has this number, return -EINVAL. If only a RX queue or a TX @@ -547,39 +271,21 @@ struct ethtool_module_eeprom { * @set_per_queue_coalesce: Set interrupt coalescing parameters per queue. * It must check that the given queue number is valid. If neither a RX nor * a TX queue has this number, return -EINVAL. If only a RX queue or a TX - * queue has this number, ignore the inapplicable fields. Supported - * coalescing types should be set in @supported_coalesce_params. + * queue has this number, ignore the inapplicable fields. * Returns a negative error code or zero. - * @get_link_ksettings: Get various device settings including Ethernet link - * settings. The %cmd and %link_mode_masks_nwords fields should be - * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), - * any change to them will be overwritten by kernel. Returns a negative - * error code or zero. - * @set_link_ksettings: Set various device settings including Ethernet link - * settings. The %cmd and %link_mode_masks_nwords fields should be - * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), - * any change to them will be overwritten by kernel. Returns a negative - * error code or zero. - * @get_fec_stats: Report FEC statistics. - * Core will sum up per-lane stats to get the total. - * Drivers must not zero statistics which they don't report. The stats - * structure is initialized to ETHTOOL_STAT_NOT_SET indicating driver does - * not report statistics. - * @get_fecparam: Get the network device Forward Error Correction parameters. - * @set_fecparam: Set the network device Forward Error Correction parameters. - * @get_ethtool_phy_stats: Return extended statistics about the PHY device. - * This is only useful if the device maintains PHY statistics and - * cannot use the standard PHY library helpers. - * @get_phy_tunable: Read the value of a PHY tunable. - * @set_phy_tunable: Set the value of a PHY tunable. - * @get_module_eeprom_by_page: Get a region of plug-in module EEPROM data from - * specified page. Returns a negative error code or the amount of bytes - * read. - * @get_eth_phy_stats: Query some of the IEEE 802.3 PHY statistics. - * @get_eth_mac_stats: Query some of the IEEE 802.3 MAC statistics. - * @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics. - * @get_rmon_stats: Query some of the RMON (RFC 2819) statistics. - * Set %ranges to a pointer to zero-terminated array of byte ranges. + * @get_link_ksettings: When defined, takes precedence over the + * %get_settings method. Get various device settings + * including Ethernet link settings. The %cmd and + * %link_mode_masks_nwords fields should be ignored (use + * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any + * change to them will be overwritten by kernel. Returns a + * negative error code or zero. + * @set_link_ksettings: When defined, takes precedence over the + * %set_settings method. Set various device settings including + * Ethernet link settings. The %cmd and %link_mode_masks_nwords + * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS + * instead of the latter), any change to them will be overwritten + * by kernel. Returns a negative error code or zero. * * All operations are optional (i.e. the function pointer may be set * to %NULL) and callers must take this into account. Callers must @@ -594,8 +300,8 @@ struct ethtool_module_eeprom { * of the generic netdev features interface. */ struct ethtool_ops { - u32 cap_link_lanes_supported:1; - u32 supported_coalesce_params; + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); @@ -605,27 +311,17 @@ struct ethtool_ops { void (*set_msglevel)(struct net_device *, u32); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); - int (*get_link_ext_state)(struct net_device *, - struct ethtool_link_ext_state_info *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); - int (*get_coalesce)(struct net_device *, - struct ethtool_coalesce *, - struct kernel_ethtool_coalesce *, - struct netlink_ext_ack *); - int (*set_coalesce)(struct net_device *, - struct ethtool_coalesce *, - struct kernel_ethtool_coalesce *, - struct netlink_ext_ack *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); - void (*get_pause_stats)(struct net_device *dev, - struct ethtool_pause_stats *pause_stats); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam*); int (*set_pauseparam)(struct net_device *, @@ -651,11 +347,6 @@ struct ethtool_ops { u8 *hfunc); int (*set_rxfh)(struct net_device *, const u32 *indir, const u8 *key, const u8 hfunc); - int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key, - u8 *hfunc, u32 rss_context); - int (*set_rxfh_context)(struct net_device *, const u32 *indir, - const u8 *key, const u8 hfunc, - u32 *rss_context, bool delete); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); @@ -681,111 +372,6 @@ struct ethtool_ops { struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); - void (*get_fec_stats)(struct net_device *dev, - struct ethtool_fec_stats *fec_stats); - int (*get_fecparam)(struct net_device *, - struct ethtool_fecparam *); - int (*set_fecparam)(struct net_device *, - struct ethtool_fecparam *); - void (*get_ethtool_phy_stats)(struct net_device *, - struct ethtool_stats *, u64 *); - int (*get_phy_tunable)(struct net_device *, - const struct ethtool_tunable *, void *); - int (*set_phy_tunable)(struct net_device *, - const struct ethtool_tunable *, const void *); - int (*get_module_eeprom_by_page)(struct net_device *dev, - const struct ethtool_module_eeprom *page, - struct netlink_ext_ack *extack); - void (*get_eth_phy_stats)(struct net_device *dev, - struct ethtool_eth_phy_stats *phy_stats); - void (*get_eth_mac_stats)(struct net_device *dev, - struct ethtool_eth_mac_stats *mac_stats); - void (*get_eth_ctrl_stats)(struct net_device *dev, - struct ethtool_eth_ctrl_stats *ctrl_stats); - void (*get_rmon_stats)(struct net_device *dev, - struct ethtool_rmon_stats *rmon_stats, - const struct ethtool_rmon_hist_range **ranges); }; - -int ethtool_check_ops(const struct ethtool_ops *ops); - -struct ethtool_rx_flow_rule { - struct flow_rule *rule; - unsigned long priv[]; -}; - -struct ethtool_rx_flow_spec_input { - const struct ethtool_rx_flow_spec *fs; - u32 rss_ctx; -}; - -struct ethtool_rx_flow_rule * -ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input); -void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule); - -bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd); -int ethtool_virtdev_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd, - u32 *dev_speed, u8 *dev_duplex); - -struct phy_device; -struct phy_tdr_config; - -/** - * struct ethtool_phy_ops - Optional PHY device options - * @get_sset_count: Get number of strings that @get_strings will write. - * @get_strings: Return a set of strings that describe the requested objects - * @get_stats: Return extended statistics about the PHY device. - * @start_cable_test: Start a cable test - * @start_cable_test_tdr: Start a Time Domain Reflectometry cable test - * - * All operations are optional (i.e. the function pointer may be set to %NULL) - * and callers must take this into account. Callers must hold the RTNL lock. - */ -struct ethtool_phy_ops { - int (*get_sset_count)(struct phy_device *dev); - int (*get_strings)(struct phy_device *dev, u8 *data); - int (*get_stats)(struct phy_device *dev, - struct ethtool_stats *stats, u64 *data); - int (*start_cable_test)(struct phy_device *phydev, - struct netlink_ext_ack *extack); - int (*start_cable_test_tdr)(struct phy_device *phydev, - struct netlink_ext_ack *extack, - const struct phy_tdr_config *config); -}; - -/** - * ethtool_set_ethtool_phy_ops - Set the ethtool_phy_ops singleton - * @ops: Ethtool PHY operations to set - */ -void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops); - -/** - * ethtool_params_from_link_mode - Derive link parameters from a given link mode - * @link_ksettings: Link parameters to be derived from the link mode - * @link_mode: Link mode - */ -void -ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, - enum ethtool_link_mode_bit_indices link_mode); - -/** - * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller - * is responsible to free memory of vclock_index - * @dev: pointer to net_device structure - * @vclock_index: pointer to pointer of vclock index - * - * Return number of phc vclocks - */ -int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index); - -/** - * ethtool_sprintf - Write formatted string to ethtool string data - * @data: Pointer to start of string to update - * @fmt: Format of string to write - * - * Write formatted string to data. Update data to point at start of - * next string. - */ -extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...); +typedef struct ethtool_ops __no_const ethtool_ops_no_const; #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 305d5f1909..ff0b981f07 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/eventfd.h * @@ -11,10 +10,6 @@ #include #include -#include -#include -#include -#include /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining @@ -30,24 +25,20 @@ #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) -struct eventfd_ctx; struct file; #ifdef CONFIG_EVENTFD +struct file *eventfd_file_create(unsigned int count, int flags); +struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx); void eventfd_ctx_put(struct eventfd_ctx *ctx); struct file *eventfd_fget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); -int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, +ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); +int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, __u64 *cnt); -void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); - -static inline bool eventfd_signal_allowed(void) -{ - return !current->in_eventfd_signal; -} #else /* CONFIG_EVENTFD */ @@ -55,6 +46,10 @@ static inline bool eventfd_signal_allowed(void) * Ugly ugly ugly error layer to support modules that uses eventfd but * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. */ +static inline struct file *eventfd_file_create(unsigned int count, int flags) +{ + return ERR_PTR(-ENOSYS); +} static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) { @@ -71,20 +66,16 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) } -static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, - wait_queue_entry_t *wait, __u64 *cnt) +static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, + __u64 *cnt) { return -ENOSYS; } -static inline bool eventfd_signal_allowed(void) +static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, + wait_queue_t *wait, __u64 *cnt) { - return true; -} - -static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) -{ - + return -ENOSYS; } #endif diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 3337745d81..6daf6d4971 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -1,15 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/eventpoll.h ( Efficient event polling implementation ) * Copyright (C) 2001,...,2006 Davide Libenzi * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * * Davide Libenzi + * */ #ifndef _LINUX_EVENTPOLL_H #define _LINUX_EVENTPOLL_H #include -#include /* Forward declarations to avoid compiler errors */ @@ -18,9 +22,13 @@ struct file; #ifdef CONFIG_EPOLL -#ifdef CONFIG_KCMP -struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff); -#endif +/* Used to initialize the epoll bits inside the "struct file" */ +static inline void eventpoll_init_file(struct file *file) +{ + INIT_LIST_HEAD(&file->f_ep_links); + INIT_LIST_HEAD(&file->f_tfile_llink); +} + /* Used to release the epoll bits inside the "struct file" */ void eventpoll_release_file(struct file *file); @@ -42,7 +50,7 @@ static inline void eventpoll_release(struct file *file) * because the file in on the way to be removed and nobody ( but * eventpoll ) has still a reference to this file. */ - if (likely(!file->f_ep)) + if (likely(list_empty(&file->f_ep_links))) return; /* @@ -53,37 +61,11 @@ static inline void eventpoll_release(struct file *file) eventpoll_release_file(file); } -int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, - bool nonblock); - -/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ -static inline int ep_op_has_event(int op) -{ - return op != EPOLL_CTL_DEL; -} - #else +static inline void eventpoll_init_file(struct file *file) {} static inline void eventpoll_release(struct file *file) {} #endif -#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT) -/* ARM OABI has an incompatible struct layout and needs a special handler */ -extern struct epoll_event __user * -epoll_put_uevent(__poll_t revents, __u64 data, - struct epoll_event __user *uevent); -#else -static inline struct epoll_event __user * -epoll_put_uevent(__poll_t revents, __u64 data, - struct epoll_event __user *uevent) -{ - if (__put_user(revents, &uevent->events) || - __put_user(data, &uevent->data)) - return NULL; - - return uevent+1; -} -#endif - #endif /* #ifndef _LINUX_EVENTPOLL_H */ diff --git a/include/linux/evm.h b/include/linux/evm.h index 4c374be702..35ed9a8a40 100644 --- a/include/linux/evm.h +++ b/include/linux/evm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * evm.h * @@ -23,25 +22,18 @@ extern enum integrity_status evm_verifyxattr(struct dentry *dentry, struct integrity_iint_cache *iint); extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr); extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid); -extern int evm_inode_setxattr(struct user_namespace *mnt_userns, - struct dentry *dentry, const char *name, +extern int evm_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size); extern void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len); -extern int evm_inode_removexattr(struct user_namespace *mnt_userns, - struct dentry *dentry, const char *xattr_name); +extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name); extern void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name); extern int evm_inode_init_security(struct inode *inode, const struct xattr *xattr_array, struct xattr *evm); -extern bool evm_revalidate_status(const char *xattr_name); -extern int evm_protected_xattr_if_enabled(const char *req_xattr_name); -extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer, - int buffer_size, char type, - bool canonical_fmt); #ifdef CONFIG_FS_POSIX_ACL extern int posix_xattr_acl(const char *xattrname); #else @@ -78,8 +70,7 @@ static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid) return; } -static inline int evm_inode_setxattr(struct user_namespace *mnt_userns, - struct dentry *dentry, const char *name, +static inline int evm_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size) { return 0; @@ -93,8 +84,7 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry, return; } -static inline int evm_inode_removexattr(struct user_namespace *mnt_userns, - struct dentry *dentry, +static inline int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name) { return 0; @@ -113,22 +103,5 @@ static inline int evm_inode_init_security(struct inode *inode, return 0; } -static inline bool evm_revalidate_status(const char *xattr_name) -{ - return false; -} - -static inline int evm_protected_xattr_if_enabled(const char *req_xattr_name) -{ - return false; -} - -static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer, - int buffer_size, char type, - bool canonical_fmt) -{ - return -EOPNOTSUPP; -} - #endif /* CONFIG_EVM */ #endif /* LINUX_EVM_H */ diff --git a/include/linux/export.h b/include/linux/export.h index 27d848712b..2a0f61fbc7 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _LINUX_EXPORT_H #define _LINUX_EXPORT_H @@ -11,7 +10,26 @@ * hackers place grumpy comments in header files. */ +/* Some toolchains use a `_' prefix for all user symbols. */ +#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX +#define __VMLINUX_SYMBOL(x) _##x +#define __VMLINUX_SYMBOL_STR(x) "_" #x +#else +#define __VMLINUX_SYMBOL(x) x +#define __VMLINUX_SYMBOL_STR(x) #x +#endif + +/* Indirect, so macros are expanded before pasting. */ +#define VMLINUX_SYMBOL(x) __VMLINUX_SYMBOL(x) +#define VMLINUX_SYMBOL_STR(x) __VMLINUX_SYMBOL_STR(x) + #ifndef __ASSEMBLY__ +struct kernel_symbol +{ + unsigned long value; + const char *name; +}; + #ifdef MODULE extern struct module __this_module; #define THIS_MODULE (&__this_module) @@ -19,152 +37,90 @@ extern struct module __this_module; #define THIS_MODULE ((struct module *)0) #endif +#ifdef CONFIG_MODULES + +#if defined(__KERNEL__) && !defined(__GENKSYMS__) #ifdef CONFIG_MODVERSIONS /* Mark the CRC weak since genksyms apparently decides not to * generate a checksums for some symbols */ -#if defined(CONFIG_MODULE_REL_CRCS) #define __CRC_SYMBOL(sym, sec) \ - asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ - " .weak __crc_" #sym " \n" \ - " .long __crc_" #sym " - . \n" \ - " .previous \n") -#else -#define __CRC_SYMBOL(sym, sec) \ - asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ - " .weak __crc_" #sym " \n" \ - " .long __crc_" #sym " \n" \ - " .previous \n") -#endif + extern __visible void *__crc_##sym __attribute__((weak)); \ + static const unsigned long __kcrctab_##sym \ + __used \ + __attribute__((section("___kcrctab" sec "+" #sym), used)) \ + = (unsigned long) &__crc_##sym; #else #define __CRC_SYMBOL(sym, sec) #endif -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS -#include -/* - * Emit the ksymtab entry as a pair of relative references: this reduces - * the size by half on 64-bit architectures, and eliminates the need for - * absolute relocations that require runtime processing on relocatable - * kernels. - */ -#define __KSYMTAB_ENTRY(sym, sec) \ - __ADDRESSABLE(sym) \ - asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \ - " .balign 4 \n" \ - "__ksymtab_" #sym ": \n" \ - " .long " #sym "- . \n" \ - " .long __kstrtab_" #sym "- . \n" \ - " .long __kstrtabns_" #sym "- . \n" \ - " .previous \n") - -struct kernel_symbol { - int value_offset; - int name_offset; - int namespace_offset; -}; -#else -#define __KSYMTAB_ENTRY(sym, sec) \ +/* For every exported symbol, place a struct in the __ksymtab section */ +#define ___EXPORT_SYMBOL(sym, sec) \ + extern typeof(sym) sym; \ + __CRC_SYMBOL(sym, sec) \ + static const char __kstrtab_##sym[] \ + __attribute__((section("__ksymtab_strings"), aligned(1))) \ + = VMLINUX_SYMBOL_STR(sym); \ static const struct kernel_symbol __ksymtab_##sym \ + __used \ __attribute__((section("___ksymtab" sec "+" #sym), used)) \ - __aligned(sizeof(void *)) \ - = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym } + = { (unsigned long)&sym, __kstrtab_##sym } -struct kernel_symbol { - unsigned long value; - const char *name; - const char *namespace; -}; -#endif - -#ifdef __GENKSYMS__ - -#define ___EXPORT_SYMBOL(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym) - -#else +#if defined(__KSYM_DEPS__) /* - * For every exported symbol, do the following: - * - * - If applicable, place a CRC entry in the __kcrctab section. - * - Put the name of the symbol and namespace (empty string "" for none) in - * __ksymtab_strings. - * - Place a struct kernel_symbol entry in the __ksymtab section. - * - * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE) - * section flag requires it. Use '%progbits' instead of '@progbits' since the - * former apparently works on all arches according to the binutils source. + * For fine grained build dependencies, we want to tell the build system + * about each possible exported symbol even if they're not actually exported. + * We use a string pattern that is unlikely to be valid code that the build + * system filters out from the preprocessor output (see ksym_dep_filter + * in scripts/Kbuild.include). */ -#define ___EXPORT_SYMBOL(sym, sec, ns) \ - extern typeof(sym) sym; \ - extern const char __kstrtab_##sym[]; \ - extern const char __kstrtabns_##sym[]; \ - __CRC_SYMBOL(sym, sec); \ - asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" \ - "__kstrtab_" #sym ": \n" \ - " .asciz \"" #sym "\" \n" \ - "__kstrtabns_" #sym ": \n" \ - " .asciz \"" ns "\" \n" \ - " .previous \n"); \ - __KSYMTAB_ENTRY(sym, sec) - -#endif - -#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS) - -/* - * Allow symbol exports to be disabled completely so that C code may - * be reused in other execution contexts such as the UEFI stub or the - * decompressor. - */ -#define __EXPORT_SYMBOL(sym, sec, ns) +#define __EXPORT_SYMBOL(sym, sec) === __KSYM_##sym === #elif defined(CONFIG_TRIM_UNUSED_KSYMS) #include -/* - * For fine grained build dependencies, we want to tell the build system - * about each possible exported symbol even if they're not actually exported. - * We use a symbol pattern __ksym_marker_ that the build system filters - * from the $(NM) output (see scripts/gen_ksymdeps.sh). These symbols are - * discarded in the final link stage. - */ -#define __ksym_marker(sym) \ - static int __ksym_marker_##sym[0] __section(".discard.ksym") __used +#define __EXPORT_SYMBOL(sym, sec) \ + __cond_export_sym(sym, sec, __is_defined(__KSYM_##sym)) +#define __cond_export_sym(sym, sec, conf) \ + ___cond_export_sym(sym, sec, conf) +#define ___cond_export_sym(sym, sec, enabled) \ + __cond_export_sym_##enabled(sym, sec) +#define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec) +#define __cond_export_sym_0(sym, sec) /* nothing */ -#define __EXPORT_SYMBOL(sym, sec, ns) \ - __ksym_marker(sym); \ - __cond_export_sym(sym, sec, ns, __is_defined(__KSYM_##sym)) -#define __cond_export_sym(sym, sec, ns, conf) \ - ___cond_export_sym(sym, sec, ns, conf) -#define ___cond_export_sym(sym, sec, ns, enabled) \ - __cond_export_sym_##enabled(sym, sec, ns) -#define __cond_export_sym_1(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns) - -#ifdef __GENKSYMS__ -#define __cond_export_sym_0(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym) #else -#define __cond_export_sym_0(sym, sec, ns) /* nothing */ +#define __EXPORT_SYMBOL ___EXPORT_SYMBOL #endif -#else +#define EXPORT_SYMBOL(sym) \ + __EXPORT_SYMBOL(sym, "") -#define __EXPORT_SYMBOL(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns) +#define EXPORT_SYMBOL_GPL(sym) \ + __EXPORT_SYMBOL(sym, "_gpl") + +#define EXPORT_SYMBOL_GPL_FUTURE(sym) \ + __EXPORT_SYMBOL(sym, "_gpl_future") + +#ifdef CONFIG_UNUSED_SYMBOLS +#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") +#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") +#else +#define EXPORT_UNUSED_SYMBOL(sym) +#define EXPORT_UNUSED_SYMBOL_GPL(sym) +#endif + +#endif /* __GENKSYMS__ */ + +#else /* !CONFIG_MODULES... */ + +#define EXPORT_SYMBOL(sym) +#define EXPORT_SYMBOL_GPL(sym) +#define EXPORT_SYMBOL_GPL_FUTURE(sym) +#define EXPORT_UNUSED_SYMBOL(sym) +#define EXPORT_UNUSED_SYMBOL_GPL(sym) #endif /* CONFIG_MODULES */ - -#ifdef DEFAULT_SYMBOL_NAMESPACE -#include -#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, __stringify(DEFAULT_SYMBOL_NAMESPACE)) -#else -#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, "") -#endif - -#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "") -#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_gpl") -#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", #ns) -#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", #ns) - #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_EXPORT_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 3260fe7148..5ab958cdc5 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_EXPORTFS_H #define LINUX_EXPORTFS_H 1 @@ -104,11 +103,6 @@ enum fid_type { */ FILEID_LUSTRE = 0x97, - /* - * 64 bit unique kernfs id - */ - FILEID_KERNFS = 0xfe, - /* * Filesystems must not use 0xff file ID. */ @@ -144,7 +138,7 @@ struct fid { * @get_parent: find the parent of a given directory * @commit_metadata: commit metadata changes to stable storage * - * See Documentation/filesystems/nfs/exporting.rst for details on how to use + * See Documentation/filesystems/nfs/Exporting for details on how to use * this interface correctly. * * encode_fh: @@ -178,7 +172,7 @@ struct fid { * get_name: * @get_name should find a name for the given @child in the given @parent * directory. The name should be stored in the @name (with the - * understanding that it is already pointing to a %NAME_MAX+1 sized + * understanding that it is already pointing to a a %NAME_MAX+1 sized * buffer. get_name() should return %0 on success, a negative error code * or error. @get_name will be called without @parent->i_mutex held. * @@ -213,28 +207,12 @@ struct export_operations { bool write, u32 *device_generation); int (*commit_blocks)(struct inode *inode, struct iomap *iomaps, int nr_iomaps, struct iattr *iattr); - u64 (*fetch_iversion)(struct inode *); -#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */ -#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */ -#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */ -#define EXPORT_OP_REMOTE_FS (0x8) /* Filesystem is remote */ -#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply - atomic attribute updates - */ -#define EXPORT_OP_SYNC_LOCKS (0x20) /* Filesystem can't do - asychronous blocking locks */ - unsigned long flags; }; extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid, int *max_len, struct inode *parent); extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len, int connectable); -extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt, - struct fid *fid, int fh_len, - int fileid_type, - int (*acceptable)(void *, struct dentry *), - void *context); extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *), void *context); diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h index 1fef885690..2723e715f6 100644 --- a/include/linux/ext2_fs.h +++ b/include/linux/ext2_fs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/ext2_fs.h * diff --git a/include/linux/extable.h b/include/linux/extable.h index 4ab9e78f31..7effea4b25 100644 --- a/include/linux/extable.h +++ b/include/linux/extable.h @@ -1,16 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_EXTABLE_H #define _LINUX_EXTABLE_H #include /* for NULL */ -#include struct module; struct exception_table_entry; const struct exception_table_entry * -search_extable(const struct exception_table_entry *base, - const size_t num, +search_extable(const struct exception_table_entry *first, + const struct exception_table_entry *last, unsigned long value); void sort_extable(struct exception_table_entry *start, struct exception_table_entry *finish); @@ -19,8 +17,6 @@ void trim_init_extable(struct module *m); /* Given an address, look for it in the exception tables */ const struct exception_table_entry *search_exception_tables(unsigned long add); -const struct exception_table_entry * -search_kernel_exception_table(unsigned long addr); #ifdef CONFIG_MODULES /* For extable.c to search modules' exception tables. */ @@ -33,14 +29,4 @@ search_module_extables(unsigned long addr) } #endif /*CONFIG_MODULES*/ -#ifdef CONFIG_BPF_JIT -const struct exception_table_entry *search_bpf_extables(unsigned long addr); -#else -static inline const struct exception_table_entry * -search_bpf_extables(unsigned long addr) -{ - return NULL; -} -#endif - #endif /* _LINUX_EXTABLE_H */ diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 0c19010da7..3e43dbaee5 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -1,7 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* - * External Connector (extcon) framework - * - linux/include/linux/extcon.h for extcon consumer device driver. + * External connector (extcon) class driver * * Copyright (C) 2015 Samsung Electronics * Author: Chanwoo Choi @@ -13,7 +11,17 @@ * based on switch class driver * Copyright (C) 2008 Google, Inc. * Author: Mike Lockwood - */ + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ #ifndef __LINUX_EXTCON_H__ #define __LINUX_EXTCON_H__ @@ -38,18 +46,7 @@ #define EXTCON_USB 1 #define EXTCON_USB_HOST 2 -/* - * Charging external connector - * - * When one SDP charger connector was reported, we should also report - * the USB connector, which means EXTCON_CHG_USB_SDP should always - * appear together with EXTCON_USB. The same as ACA charger connector, - * EXTCON_CHG_USB_ACA would normally appear with EXTCON_USB_HOST. - * - * The EXTCON_CHG_USB_SLOW connector can provide at least 500mA of - * current at 5V. The EXTCON_CHG_USB_FAST connector can provide at - * least 1A of current at 5V. - */ +/* Charging external connector */ #define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */ #define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */ #define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */ @@ -57,7 +54,6 @@ #define EXTCON_CHG_USB_FAST 9 #define EXTCON_CHG_USB_SLOW 10 #define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */ -#define EXTCON_CHG_USB_PD 12 /* USB Power Delivery */ /* Jack external connector */ #define EXTCON_JACK_MICROPHONE 20 @@ -85,7 +81,7 @@ #define EXTCON_NUM 63 /* - * Define the properties of supported external connectors. + * Define the property of supported external connectors. * * When adding the new extcon property, they *must* have * the type/value/default information. Also, you *have to* @@ -163,99 +159,256 @@ union extcon_property_value { int intval; /* type : integer (intval) */ }; -struct extcon_dev; +struct extcon_cable; + +/** + * struct extcon_dev - An extcon device represents one external connector. + * @name: The name of this extcon device. Parent device name is + * used if NULL. + * @supported_cable: Array of supported cable names ending with EXTCON_NONE. + * If supported_cable is NULL, cable name related APIs + * are disabled. + * @mutually_exclusive: Array of mutually exclusive set of cables that cannot + * be attached simultaneously. The array should be + * ending with NULL or be NULL (no mutually exclusive + * cables). For example, if it is { 0x7, 0x30, 0}, then, + * {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot + * be attached simulataneously. {0x7, 0} is equivalent to + * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there + * can be no simultaneous connections. + * @dev: Device of this extcon. + * @state: Attach/detach state of this extcon. Do not provide at + * register-time. + * @nh: Notifier for the state change events from this extcon + * @entry: To support list of extcon devices so that users can + * search for extcon devices based on the extcon name. + * @lock: + * @max_supported: Internal value to store the number of cables. + * @extcon_dev_type: Device_type struct to provide attribute_groups + * customized for each extcon device. + * @cables: Sysfs subdirectories. Each represents one cable. + * + * In most cases, users only need to provide "User initializing data" of + * this struct when registering an extcon. In some exceptional cases, + * optional callbacks may be needed. However, the values in "internal data" + * are overwritten by register function. + */ +struct extcon_dev { + /* Optional user initializing data */ + const char *name; + const unsigned int *supported_cable; + const u32 *mutually_exclusive; + + /* Internal data. Please do not set. */ + struct device dev; + struct raw_notifier_head *nh; + struct list_head entry; + int max_supported; + spinlock_t lock; /* could be called by irq handler */ + u32 state; + + /* /sys/class/extcon/.../cable.n/... */ + struct device_type extcon_dev_type; + struct extcon_cable *cables; + + /* /sys/class/extcon/.../mutually_exclusive/... */ + struct attribute_group attr_g_muex; + struct attribute **attrs_muex; + device_attribute_no_const *d_attrs_muex; +}; #if IS_ENABLED(CONFIG_EXTCON) -/* - * Following APIs get the connected state of each external connector. - * The 'id' argument indicates the defined external connector. - */ -int extcon_get_state(struct extcon_dev *edev, unsigned int id); /* - * Following APIs get the property of each external connector. - * The 'id' argument indicates the defined external connector - * and the 'prop' indicates the extcon property. - * - * And extcon_get_property_capability() get the capability of the property - * for each external connector. They are used to get the capability of the - * property of each external connector based on the id and property. + * Following APIs are for notifiers or configurations. + * Notifiers are the external port and connection devices. */ -int extcon_get_property(struct extcon_dev *edev, unsigned int id, +extern int extcon_dev_register(struct extcon_dev *edev); +extern void extcon_dev_unregister(struct extcon_dev *edev); +extern int devm_extcon_dev_register(struct device *dev, + struct extcon_dev *edev); +extern void devm_extcon_dev_unregister(struct device *dev, + struct extcon_dev *edev); +extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name); + +/* + * Following APIs control the memory of extcon device. + */ +extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable); +extern void extcon_dev_free(struct extcon_dev *edev); +extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, + const unsigned int *cable); +extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev); + +/* + * get/set_state access each bit of the 32b encoded state value. + * They are used to access the status of each cable based on the cable id. + */ +extern int extcon_get_state(struct extcon_dev *edev, unsigned int id); +extern int extcon_set_state(struct extcon_dev *edev, unsigned int id, + bool cable_state); +extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, + bool cable_state); +/* + * Synchronize the state and property data for a specific external connector. + */ +extern int extcon_sync(struct extcon_dev *edev, unsigned int id); + +/* + * get/set_property access the property value of each external connector. + * They are used to access the property of each cable based on the property id. + */ +extern int extcon_get_property(struct extcon_dev *edev, unsigned int id, unsigned int prop, union extcon_property_value *prop_val); -int extcon_get_property_capability(struct extcon_dev *edev, +extern int extcon_set_property(struct extcon_dev *edev, unsigned int id, + unsigned int prop, + union extcon_property_value prop_val); +extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id, + unsigned int prop, + union extcon_property_value prop_val); + +/* + * get/set_property_capability set the capability of the property for each + * external connector. They are used to set the capability of the property + * of each external connector based on the id and property. + */ +extern int extcon_get_property_capability(struct extcon_dev *edev, + unsigned int id, unsigned int prop); +extern int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id, unsigned int prop); /* - * Following APIs register the notifier block in order to detect - * the change of both state and property value for each external connector. - * - * extcon_register_notifier(*edev, id, *nb) : Register a notifier block - * for specific external connector of the extcon. - * extcon_register_notifier_all(*edev, *nb) : Register a notifier block - * for all supported external connectors of the extcon. + * Following APIs are to monitor every action of a notifier. + * Registrar gets notified for every external port of a connection device. + * Probably this could be used to debug an action of notifier; however, + * we do not recommend to use this for normal 'notifiee' device drivers who + * want to be notified by a specific external port of the notifier. */ -int extcon_register_notifier(struct extcon_dev *edev, unsigned int id, - struct notifier_block *nb); -int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, - struct notifier_block *nb); -int devm_extcon_register_notifier(struct device *dev, +extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); +extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); +extern int devm_extcon_register_notifier(struct device *dev, struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); -void devm_extcon_unregister_notifier(struct device *dev, +extern void devm_extcon_unregister_notifier(struct device *dev, struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); -int extcon_register_notifier_all(struct extcon_dev *edev, - struct notifier_block *nb); -int extcon_unregister_notifier_all(struct extcon_dev *edev, - struct notifier_block *nb); -int devm_extcon_register_notifier_all(struct device *dev, - struct extcon_dev *edev, - struct notifier_block *nb); -void devm_extcon_unregister_notifier_all(struct device *dev, - struct extcon_dev *edev, - struct notifier_block *nb); - /* - * Following APIs get the extcon_dev from devicetree or by through extcon name. + * Following API get the extcon device from devicetree. + * This function use phandle of devicetree to get extcon device directly. */ -struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name); -struct extcon_dev *extcon_find_edev_by_node(struct device_node *node); -struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, +extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index); -/* Following API get the name of extcon device. */ -const char *extcon_get_edev_name(struct extcon_dev *edev); +/* Following API to get information of extcon device */ +extern const char *extcon_get_edev_name(struct extcon_dev *edev); + #else /* CONFIG_EXTCON */ +static inline int extcon_dev_register(struct extcon_dev *edev) +{ + return 0; +} + +static inline void extcon_dev_unregister(struct extcon_dev *edev) { } + +static inline int devm_extcon_dev_register(struct device *dev, + struct extcon_dev *edev) +{ + return -EINVAL; +} + +static inline void devm_extcon_dev_unregister(struct device *dev, + struct extcon_dev *edev) { } + +static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void extcon_dev_free(struct extcon_dev *edev) { } + +static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, + const unsigned int *cable) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void devm_extcon_dev_free(struct extcon_dev *edev) { } + + static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id) { return 0; } +static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id, + bool cable_state) +{ + return 0; +} + +static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, + bool cable_state) +{ + return 0; +} + +static inline int extcon_sync(struct extcon_dev *edev, unsigned int id) +{ + return 0; +} + static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id, - unsigned int prop, - union extcon_property_value *prop_val) + unsigned int prop, + union extcon_property_value *prop_val) +{ + return 0; +} +static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id, + unsigned int prop, + union extcon_property_value prop_val) +{ + return 0; +} + +static inline int extcon_set_property_sync(struct extcon_dev *edev, + unsigned int id, unsigned int prop, + union extcon_property_value prop_val) { return 0; } static inline int extcon_get_property_capability(struct extcon_dev *edev, - unsigned int id, unsigned int prop) + unsigned int id, unsigned int prop) { return 0; } +static inline int extcon_set_property_capability(struct extcon_dev *edev, + unsigned int id, unsigned int prop) +{ + return 0; +} + +static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) +{ + return NULL; +} + static inline int extcon_register_notifier(struct extcon_dev *edev, - unsigned int id, struct notifier_block *nb) + unsigned int id, + struct notifier_block *nb) { return 0; } static inline int extcon_unregister_notifier(struct extcon_dev *edev, - unsigned int id, struct notifier_block *nb) + unsigned int id, + struct notifier_block *nb) { return 0; } @@ -271,49 +424,11 @@ static inline void devm_extcon_unregister_notifier(struct device *dev, struct extcon_dev *edev, unsigned int id, struct notifier_block *nb) { } -static inline int extcon_register_notifier_all(struct extcon_dev *edev, - struct notifier_block *nb) -{ - return 0; -} - -static inline int extcon_unregister_notifier_all(struct extcon_dev *edev, - struct notifier_block *nb) -{ - return 0; -} - -static inline int devm_extcon_register_notifier_all(struct device *dev, - struct extcon_dev *edev, - struct notifier_block *nb) -{ - return 0; -} - -static inline void devm_extcon_unregister_notifier_all(struct device *dev, - struct extcon_dev *edev, - struct notifier_block *nb) { } - -static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) -{ - return ERR_PTR(-ENODEV); -} - -static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node) -{ - return ERR_PTR(-ENODEV); -} - static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, - int index) + int index) { return ERR_PTR(-ENODEV); } - -static inline const char *extcon_get_edev_name(struct extcon_dev *edev) -{ - return NULL; -} #endif /* CONFIG_EXTCON */ /* @@ -328,14 +443,26 @@ struct extcon_specific_cable_nb { }; static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj, - const char *extcon_name, const char *cable_name, - struct notifier_block *nb) + const char *extcon_name, const char *cable_name, + struct notifier_block *nb) { return -EINVAL; } -static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj) +static inline int extcon_unregister_interest(struct extcon_specific_cable_nb + *obj) { return -EINVAL; } + +static inline int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id) +{ + return extcon_get_state(edev, id); +} + +static inline int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id, + bool cable_state) +{ + return extcon_set_state_sync(edev, id, cable_state); +} #endif /* __LINUX_EXTCON_H__ */ diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h index 19b437e9c0..a0e03b13b4 100644 --- a/include/linux/extcon/extcon-adc-jack.h +++ b/include/linux/extcon/extcon-adc-jack.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/extcon/extcon-adc-jack.h * @@ -6,6 +5,11 @@ * * Copyright (C) 2012 Samsung Electronics * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef _EXTCON_ADC_JACK_H_ @@ -55,7 +59,7 @@ struct adc_jack_pdata { const char *name; const char *consumer_channel; - const unsigned int *cable_names; + const enum extcon *cable_names; /* The last entry's state should be 0 */ struct adc_jack_cond *adc_conditions; diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h new file mode 100644 index 0000000000..7cacafb78b --- /dev/null +++ b/include/linux/extcon/extcon-gpio.h @@ -0,0 +1,47 @@ +/* + * Single-state GPIO extcon driver based on extcon class + * + * Copyright (C) 2012 Samsung Electronics + * Author: MyungJoo Ham + * + * based on switch class driver + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __EXTCON_GPIO_H__ +#define __EXTCON_GPIO_H__ __FILE__ + +#include + +/** + * struct gpio_extcon_pdata - A simple GPIO-controlled extcon device. + * @extcon_id: The unique id of specific external connector. + * @gpio: Corresponding GPIO. + * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0 + * If true, low state of gpio means active. + * If false, high state of gpio means active. + * @debounce: Debounce time for GPIO IRQ in ms. + * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW). + * @check_on_resume: Boolean describing whether to check the state of gpio + * while resuming from sleep. + */ +struct gpio_extcon_pdata { + unsigned int extcon_id; + unsigned gpio; + bool gpio_active_low; + unsigned long debounce; + unsigned long irq_flags; + + bool check_on_resume; +}; + +#endif /* __EXTCON_GPIO_H__ */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index d445150c53..422630b8e5 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -1,9 +1,12 @@ -// SPDX-License-Identifier: GPL-2.0 /** * include/linux/f2fs_fs.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _LINUX_F2FS_FS_H #define _LINUX_F2FS_FS_H @@ -18,12 +21,10 @@ #define F2FS_BLKSIZE 4096 /* support only 4KB block */ #define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ -#define F2FS_EXTENSION_LEN 8 /* max size of extension */ #define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS) #define NULL_ADDR ((block_t)0) /* used as block_t addresses */ #define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ -#define COMPRESS_ADDR ((block_t)-2) /* used as compressed data flag */ #define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS) #define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS) @@ -31,24 +32,13 @@ /* 0, 1(node nid), 2(meta nid) are reserved node id */ #define F2FS_RESERVED_NODE_NUM 3 -#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num) -#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num) -#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num) -#define F2FS_COMPRESS_INO(sbi) (NM_I(sbi)->max_nid) - -#define F2FS_MAX_QUOTAS 3 - -#define F2FS_ENC_UTF8_12_1 1 - -#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */ -#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */ -#define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */ -#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */ -#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) -#define F2FS_IO_ALIGNED(sbi) (F2FS_IO_SIZE(sbi) > 1) +#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num) +#define F2FS_NODE_INO(sbi) (sbi->node_ino_num) +#define F2FS_META_INO(sbi) (sbi->meta_ino_num) /* This flag is used by node and meta inodes, and by recovery */ #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) +#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) /* * For further optimization on multi-head logs, on-disk layout supports maximum @@ -62,17 +52,10 @@ #define VERSION_LEN 256 #define MAX_VOLUME_NAME 512 -#define MAX_PATH_LEN 64 -#define MAX_DEVICES 8 /* * For superblock */ -struct f2fs_device { - __u8 path[MAX_PATH_LEN]; - __le32 total_segments; -} __packed; - struct f2fs_super_block { __le32 magic; /* Magic Number */ __le16 major_ver; /* Major Version */ @@ -104,33 +87,19 @@ struct f2fs_super_block { __u8 uuid[16]; /* 128-bit uuid for volume */ __le16 volume_name[MAX_VOLUME_NAME]; /* volume name */ __le32 extension_count; /* # of extensions below */ - __u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */ + __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ __le32 cp_payload; __u8 version[VERSION_LEN]; /* the kernel version */ __u8 init_version[VERSION_LEN]; /* the initial kernel version */ __le32 feature; /* defined features */ __u8 encryption_level; /* versioning level for encryption */ __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ - struct f2fs_device devs[MAX_DEVICES]; /* device list */ - __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ - __u8 hot_ext_count; /* # of hot file extension */ - __le16 s_encoding; /* Filename charset encoding */ - __le16 s_encoding_flags; /* Filename charset encoding flags */ - __u8 reserved[306]; /* valid reserved region */ - __le32 crc; /* checksum of superblock */ + __u8 reserved[871]; /* valid reserved region */ } __packed; /* * For checkpoint */ -#define CP_RESIZEFS_FLAG 0x00004000 -#define CP_DISABLED_QUICK_FLAG 0x00002000 -#define CP_DISABLED_FLAG 0x00001000 -#define CP_QUOTA_NEED_FSCK_FLAG 0x00000800 -#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400 -#define CP_NOCRC_RECOVERY_FLAG 0x00000200 -#define CP_TRIMMED_FLAG 0x00000100 -#define CP_NAT_BITS_FLAG 0x00000080 #define CP_CRC_RECOVERY_FLAG 0x00000040 #define CP_FASTBOOT_FLAG 0x00000020 #define CP_FSCK_FLAG 0x00000010 @@ -169,19 +138,15 @@ struct f2fs_checkpoint { unsigned char alloc_type[MAX_ACTIVE_LOGS]; /* SIT and NAT version bitmap */ - unsigned char sit_nat_version_bitmap[]; + unsigned char sit_nat_version_bitmap[1]; } __packed; -#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */ -#define CP_MIN_CHKSUM_OFFSET \ - (offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap)) - /* * For orphan inode management */ #define F2FS_ORPHANS_PER_BLOCK 1020 -#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \ +#define GET_ORPHAN_BLOCKS(n) ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \ F2FS_ORPHANS_PER_BLOCK) struct f2fs_orphan_block { @@ -199,23 +164,19 @@ struct f2fs_orphan_block { struct f2fs_extent { __le32 fofs; /* start file offset of the extent */ __le32 blk; /* start block address of the extent */ - __le32 len; /* length of the extent */ + __le32 len; /* lengh of the extent */ } __packed; #define F2FS_NAME_LEN 255 -/* 200 bytes for inline xattrs by default */ -#define DEFAULT_INLINE_XATTR_ADDRS 50 +#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */ #define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ -#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \ - get_extra_isize(inode)) #define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */ #define ADDRS_PER_INODE(inode) addrs_per_inode(inode) -#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ -#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode) +#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ #define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ #define ADDRS_PER_PAGE(page, inode) \ - (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode)) + (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK) #define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1) #define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2) @@ -228,9 +189,9 @@ struct f2fs_extent { #define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ #define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ -#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ -#define F2FS_PIN_FILE 0x40 /* file should not be gced */ -#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */ + +#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ + F2FS_INLINE_XATTR_ADDRS - 1)) struct f2fs_inode { __le16 i_mode; /* file mode */ @@ -248,13 +209,7 @@ struct f2fs_inode { __le32 i_ctime_nsec; /* change time in nano scale */ __le32 i_mtime_nsec; /* modification time in nano scale */ __le32 i_generation; /* file version (for NFS) */ - union { - __le32 i_current_depth; /* only for directory depth */ - __le16 i_gc_failures; /* - * # of gc failures on pinned file. - * only for regular files. - */ - }; + __le32 i_current_depth; /* only for directory depth */ __le32 i_xattr_nid; /* nid to save xattr */ __le32 i_flags; /* file attributes */ __le32 i_pino; /* parent inode number */ @@ -264,31 +219,14 @@ struct f2fs_inode { struct f2fs_extent i_ext; /* caching a largest extent */ - union { - struct { - __le16 i_extra_isize; /* extra inode attribute size */ - __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */ - __le32 i_projid; /* project id */ - __le32 i_inode_checksum;/* inode meta checksum */ - __le64 i_crtime; /* creation time */ - __le32 i_crtime_nsec; /* creation time in nano scale */ - __le64 i_compr_blocks; /* # of compressed blocks */ - __u8 i_compress_algorithm; /* compress algorithm */ - __u8 i_log_cluster_size; /* log of cluster size */ - __le16 i_compress_flag; /* compress flag */ - /* 0 bit: chksum flag - * [10,15] bits: compress level - */ - __le32 i_extra_end[0]; /* for attribute size calculation */ - } __packed; - __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ - }; + __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ + __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2), double_indirect(1) node id */ } __packed; struct direct_node { - __le32 addr[DEF_ADDRS_PER_BLOCK]; /* array of data block address */ + __le32 addr[ADDRS_PER_BLOCK]; /* array of data block address */ } __packed; struct indirect_node { @@ -306,7 +244,7 @@ enum { struct node_footer { __le32 nid; /* node id */ - __le32 ino; /* inode number */ + __le32 ino; /* inode nunmber */ __le32 flag; /* include cold/fsync/dentry marks and offset */ __le64 cp_ver; /* checkpoint version */ __le32 next_blkaddr; /* next node page block address */ @@ -347,12 +285,6 @@ struct f2fs_nat_block { #define SIT_VBLOCK_MAP_SIZE 64 #define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry)) -/* - * F2FS uses 4 bytes to represent block address. As a result, supported size of - * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments. - */ -#define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2) - /* * Note that f2fs_sit_entry->vblocks has the following bit-field information. * [15:10] : allocation type such as CURSEG_XXXX_TYPE @@ -501,7 +433,7 @@ typedef __le32 f2fs_hash_t; #define F2FS_SLOT_LEN 8 #define F2FS_SLOT_LEN_BITS 3 -#define GET_DENTRY_SLOTS(x) (((x) + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS) +#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS) /* MAX level for dir lookup */ #define MAX_DIR_HASH_DEPTH 63 @@ -510,13 +442,13 @@ typedef __le32 f2fs_hash_t; #define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1)) /* - * space utilization of regular dentry and inline dentry (w/o extra reservation) - * regular dentry inline dentry (def) inline dentry (min) - * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1 - * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1 - * dentry 11 * 214 = 2354 11 * 182 = 2002 11 * 2 = 22 - * filename 8 * 214 = 1712 8 * 182 = 1456 8 * 2 = 16 - * total 4096 3488 40 + * space utilization of regular dentry and inline dentry + * regular dentry inline dentry + * bitmap 1 * 27 = 27 1 * 23 = 23 + * reserved 1 * 3 = 3 1 * 7 = 7 + * dentry 11 * 214 = 2354 11 * 182 = 2002 + * filename 8 * 214 = 1712 8 * 182 = 1456 + * total 4096 3488 * * Note: there are more reserved space in inline dentry than in regular * dentry, when converting inline dentry we should handle this carefully. @@ -528,13 +460,12 @@ typedef __le32 f2fs_hash_t; #define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \ F2FS_SLOT_LEN) * \ NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP)) -#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */ /* One directory entry slot representing F2FS_SLOT_LEN-sized file name */ struct f2fs_dir_entry { __le32 hash_code; /* hash code of file name */ __le32 ino; /* inode number */ - __le16 name_len; /* length of file name */ + __le16 name_len; /* lengh of file name */ __u8 file_type; /* file type */ } __packed; @@ -547,6 +478,24 @@ struct f2fs_dentry_block { __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; } __packed; +/* for inline dir */ +#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \ + ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ + BITS_PER_BYTE + 1)) +#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \ + BITS_PER_BYTE - 1) / BITS_PER_BYTE) +#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \ + ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ + NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE)) + +/* inline directory entry structure */ +struct f2fs_inline_dentry { + __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE]; + __u8 reserved[INLINE_RESERVED_SIZE]; + struct f2fs_dir_entry dentry[NR_INLINE_DENTRY]; + __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN]; +} __packed; + /* file types used in inode_info->flags */ enum { F2FS_FT_UNKNOWN, @@ -562,6 +511,4 @@ enum { #define S_SHIFT 12 -#define F2FS_DEF_PROJID 0 /* default project ID */ - #endif /* _LINUX_F2FS_FS_H */ diff --git a/include/linux/falloc.h b/include/linux/falloc.h index f3f0b97b16..7494dc67c6 100644 --- a/include/linux/falloc.h +++ b/include/linux/falloc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FALLOC_H_ #define _FALLOC_H_ @@ -20,10 +19,7 @@ struct space_resv { }; #define FS_IOC_RESVSP _IOW('X', 40, struct space_resv) -#define FS_IOC_UNRESVSP _IOW('X', 41, struct space_resv) #define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv) -#define FS_IOC_UNRESVSP64 _IOW('X', 43, struct space_resv) -#define FS_IOC_ZERO_RANGE _IOW('X', 57, struct space_resv) #define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \ FALLOC_FL_PUNCH_HOLE | \ @@ -32,25 +28,4 @@ struct space_resv { FALLOC_FL_INSERT_RANGE | \ FALLOC_FL_UNSHARE_RANGE) -/* on ia32 l_start is on a 32-bit boundary */ -#if defined(CONFIG_X86_64) -struct space_resv_32 { - __s16 l_type; - __s16 l_whence; - __s64 l_start __attribute__((packed)); - /* len == 0 means until end of file */ - __s64 l_len __attribute__((packed)); - __s32 l_sysid; - __u32 l_pid; - __s32 l_pad[4]; /* reserve area */ -}; - -#define FS_IOC_RESVSP_32 _IOW ('X', 40, struct space_resv_32) -#define FS_IOC_UNRESVSP_32 _IOW ('X', 41, struct space_resv_32) -#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32) -#define FS_IOC_UNRESVSP64_32 _IOW ('X', 43, struct space_resv_32) -#define FS_IOC_ZERO_RANGE_32 _IOW ('X', 57, struct space_resv_32) - -#endif - #endif /* _FALLOC_H_ */ diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index eec3b7c408..cef93ddcc5 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -1,118 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FANOTIFY_H #define _LINUX_FANOTIFY_H -#include #include -extern struct ctl_table fanotify_table[]; /* for sysctl */ - -#define FAN_GROUP_FLAG(group, flag) \ - ((group)->fanotify_data.flags & (flag)) - -/* - * Flags allowed to be passed from/to userspace. - * - * We intentionally do not add new bits to the old FAN_ALL_* constants, because - * they are uapi exposed constants. If there are programs out there using - * these constant, the programs may break if re-compiled with new uapi headers - * and then run on an old kernel. - */ - -/* Group classes where permission events are allowed */ -#define FANOTIFY_PERM_CLASSES (FAN_CLASS_CONTENT | \ - FAN_CLASS_PRE_CONTENT) - -#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FANOTIFY_PERM_CLASSES) - -#define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME) - -#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD) - -/* - * fanotify_init() flags that require CAP_SYS_ADMIN. - * We do not allow unprivileged groups to request permission events. - * We do not allow unprivileged groups to get other process pid in events. - * We do not allow unprivileged groups to use unlimited resources. - */ -#define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \ - FAN_REPORT_TID | \ - FAN_REPORT_PIDFD | \ - FAN_UNLIMITED_QUEUE | \ - FAN_UNLIMITED_MARKS) - -/* - * fanotify_init() flags that are allowed for user without CAP_SYS_ADMIN. - * FAN_CLASS_NOTIF is the only class we allow for unprivileged group. - * We do not allow unprivileged groups to get file descriptors in events, - * so one of the flags for reporting file handles is required. - */ -#define FANOTIFY_USER_INIT_FLAGS (FAN_CLASS_NOTIF | \ - FANOTIFY_FID_BITS | \ - FAN_CLOEXEC | FAN_NONBLOCK) - -#define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \ - FANOTIFY_USER_INIT_FLAGS) - -/* Internal group flags */ -#define FANOTIFY_UNPRIV 0x80000000 -#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV) - -#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \ - FAN_MARK_FILESYSTEM) - -#define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \ - FAN_MARK_ADD | \ - FAN_MARK_REMOVE | \ - FAN_MARK_DONT_FOLLOW | \ - FAN_MARK_ONLYDIR | \ - FAN_MARK_IGNORED_MASK | \ - FAN_MARK_IGNORED_SURV_MODIFY | \ - FAN_MARK_FLUSH) - -/* - * Events that can be reported with data type FSNOTIFY_EVENT_PATH. - * Note that FAN_MODIFY can also be reported with data type - * FSNOTIFY_EVENT_INODE. - */ -#define FANOTIFY_PATH_EVENTS (FAN_ACCESS | FAN_MODIFY | \ - FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC) - -/* - * Directory entry modification events - reported only to directory - * where entry is modified and not to a watching parent. - */ -#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE) - -/* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */ -#define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \ - FAN_ATTRIB | FAN_MOVE_SELF | FAN_DELETE_SELF) - -/* Events that user can request to be notified on */ -#define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \ - FANOTIFY_INODE_EVENTS) - -/* Events that require a permission response from user */ -#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \ - FAN_OPEN_EXEC_PERM) - -/* Extra flags that may be reported with event or control handling of events */ -#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR) - -/* Events that may be reported to user */ -#define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \ - FANOTIFY_PERM_EVENTS | \ - FAN_Q_OVERFLOW | FAN_ONDIR) - -#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \ - FANOTIFY_EVENT_FLAGS) - -/* Do not use these old uapi constants internally */ -#undef FAN_ALL_CLASS_BITS -#undef FAN_ALL_INIT_FLAGS -#undef FAN_ALL_MARK_FLAGS -#undef FAN_ALL_EVENTS -#undef FAN_ALL_PERM_EVENTS -#undef FAN_ALL_OUTGOING_EVENTS - +/* not valid from userspace, only kernel internal */ +#define FAN_MARK_ONDIR 0x00000100 #endif /* _LINUX_FANOTIFY_H */ diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index e525f6957c..9f4956d860 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FAULT_INJECT_H #define _LINUX_FAULT_INJECT_H @@ -11,7 +10,7 @@ /* * For explanation of the elements of this struct, see - * Documentation/fault-injection/fault-injection.rst + * Documentation/fault-injection/fault-injection.txt */ struct fault_attr { unsigned long probability; @@ -62,13 +61,10 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name, #endif /* CONFIG_FAULT_INJECTION */ -struct kmem_cache; - -int should_failslab(struct kmem_cache *s, gfp_t gfpflags); #ifdef CONFIG_FAILSLAB -extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags); +extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags); #else -static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) +static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags) { return false; } diff --git a/include/linux/fb.h b/include/linux/fb.h index 5950f8f5dc..09bf71f034 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -1,8 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FB_H #define _LINUX_FB_H -#include #include #include @@ -125,17 +123,45 @@ struct fb_cursor_user { * Register/unregister for framebuffer events */ -/* The resolution of the passed in fb_info about to change */ +/* The resolution of the passed in fb_info about to change */ #define FB_EVENT_MODE_CHANGE 0x01 - -#ifdef CONFIG_GUMSTIX_AM200EPD -/* only used by mach-pxa/am200epd.c */ +/* The display on this fb_info is beeing suspended, no access to the + * framebuffer is allowed any more after that call returns + */ +#define FB_EVENT_SUSPEND 0x02 +/* The display on this fb_info was resumed, you can restore the display + * if you own it + */ +#define FB_EVENT_RESUME 0x03 +/* An entry from the modelist was removed */ +#define FB_EVENT_MODE_DELETE 0x04 +/* A driver registered itself */ #define FB_EVENT_FB_REGISTERED 0x05 +/* A driver unregistered itself */ #define FB_EVENT_FB_UNREGISTERED 0x06 -#endif - -/* A display blank is requested */ +/* CONSOLE-SPECIFIC: get console to framebuffer mapping */ +#define FB_EVENT_GET_CONSOLE_MAP 0x07 +/* CONSOLE-SPECIFIC: set console to framebuffer mapping */ +#define FB_EVENT_SET_CONSOLE_MAP 0x08 +/* A hardware display blank change occurred */ #define FB_EVENT_BLANK 0x09 +/* Private modelist is to be replaced */ +#define FB_EVENT_NEW_MODELIST 0x0A +/* The resolution of the passed in fb_info about to change and + all vc's should be changed */ +#define FB_EVENT_MODE_CHANGE_ALL 0x0B +/* A software display blank change occurred */ +#define FB_EVENT_CONBLANK 0x0C +/* Get drawing requirements */ +#define FB_EVENT_GET_REQ 0x0D +/* Unbind from the console if possible */ +#define FB_EVENT_FB_UNBIND 0x0E +/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */ +#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F +/* A hardware display blank early change occured */ +#define FB_EARLY_EVENT_BLANK 0x10 +/* A hardware display blank revert early change occured */ +#define FB_R_EARLY_EVENT_BLANK 0x11 struct fb_event { struct fb_info *info; @@ -294,7 +320,8 @@ struct fb_ops { /* called at KDB enter and leave time to prepare the console */ int (*fb_debug_enter)(struct fb_info *info); int (*fb_debug_leave)(struct fb_info *info); -}; +} __do_const; +typedef struct fb_ops __no_const fb_ops_no_const; #ifdef CONFIG_FB_TILEBLITTING #define FB_TILE_CURSOR_NONE 0 @@ -374,7 +401,7 @@ struct fb_tile_ops { #endif /* CONFIG_FB_TILEBLITTING */ /* FBINFO_* = fb_info.flags bit flags */ -#define FBINFO_DEFAULT 0 +#define FBINFO_MODULE 0x0001 /* Low-level driver is a module */ #define FBINFO_HWACCEL_DISABLED 0x0002 /* When FBINFO_HWACCEL_DISABLED is set: * Hardware acceleration is turned off. Software implementations @@ -401,6 +428,8 @@ struct fb_tile_ops { #define FBINFO_HWACCEL_YPAN 0x2000 /* optional */ #define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */ +#define FBINFO_MISC_USEREVENT 0x10000 /* event request + from userspace */ #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ /* A driver may set this flag to indicate that it does want a set_par to be @@ -427,23 +456,15 @@ struct fb_tile_ops { * and host endianness. Drivers should not use this flag. */ #define FBINFO_BE_MATH 0x100000 -/* - * Hide smem_start in the FBIOGET_FSCREENINFO IOCTL. This is used by modern DRM - * drivers to stop userspace from trying to share buffers behind the kernel's - * back. Instead dma-buf based buffer sharing should be used. - */ -#define FBINFO_HIDE_SMEM_START 0x200000 +/* report to the VT layer that this fb driver can accept forced console + output like oopses */ +#define FBINFO_CAN_FORCE_OUTPUT 0x200000 struct fb_info { - refcount_t count; + atomic_t count; int node; int flags; - /* - * -1 by default, set to a FB_ROTATE_* value by the driver, if it knows - * a lcd is not mounted upright and fbcon should rotate to compensate. - */ - int fbcon_rotate_hint; struct mutex lock; /* Lock for open/release/ioctl funcs */ struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ struct fb_var_screeninfo var; /* Current var */ @@ -456,14 +477,14 @@ struct fb_info { struct list_head modelist; /* mode list */ struct fb_videomode *mode; /* current mode */ -#if IS_ENABLED(CONFIG_FB_BACKLIGHT) +#ifdef CONFIG_FB_BACKLIGHT /* assigned backlight device */ - /* set before framebuffer registration, + /* set before framebuffer registration, remove after unregister */ struct backlight_device *bl_dev; /* Backlight level curve */ - struct mutex bl_curve_mutex; + struct mutex bl_curve_mutex; u8 bl_curve[FB_BACKLIGHT_LEVELS]; #endif #ifdef CONFIG_FB_DEFERRED_IO @@ -471,7 +492,7 @@ struct fb_info { struct fb_deferred_io *fbdefio; #endif - const struct fb_ops *fbops; + struct fb_ops *fbops; struct device *device; /* This is the parent */ struct device *dev; /* This is this fb device */ int class_flag; /* private sysfs flags */ @@ -482,8 +503,8 @@ struct fb_info { char __iomem *screen_base; /* Virtual address */ char *screen_buffer; }; - unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ - void *pseudo_palette; /* Fake palette of 16 colors */ + unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ + void *pseudo_palette; /* Fake palette of 16 colors */ #define FBINFO_STATE_RUNNING 0 #define FBINFO_STATE_SUSPENDED 1 u32 state; /* Hardware state i.e suspend */ @@ -505,15 +526,22 @@ struct fb_info { }; static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { - struct apertures_struct *a; - - a = kzalloc(struct_size(a, ranges, max_num), GFP_KERNEL); + struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct) + + max_num * sizeof(struct aperture), GFP_KERNEL); if (!a) return NULL; a->count = max_num; return a; } +#ifdef MODULE +#define FBINFO_DEFAULT FBINFO_MODULE +#else +#define FBINFO_DEFAULT 0 +#endif + +// This will go away +#define FBINFO_FLAG_MODULE FBINFO_MODULE #define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT /* This will go away @@ -544,9 +572,7 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { #define fb_memcpy_fromfb sbus_memcpy_fromio #define fb_memcpy_tofb sbus_memcpy_toio -#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \ - defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \ - defined(__arm__) || defined(__aarch64__) +#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__) #define fb_readb __raw_readb #define fb_readw __raw_readw @@ -586,11 +612,11 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { * `Generic' versions of the frame buffer device operations */ -extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var); -extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var); +extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var); +extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var); extern int fb_blank(struct fb_info *info, int blank); -extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); -extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); +extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image); /* * Drawing operations where framebuffer is in system RAM @@ -605,9 +631,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, /* drivers/video/fbmem.c */ extern int register_framebuffer(struct fb_info *fb_info); -extern void unregister_framebuffer(struct fb_info *fb_info); -extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, - const char *name); +extern int unregister_framebuffer(struct fb_info *fb_info); +extern int unlink_framebuffer(struct fb_info *fb_info); extern int remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary); extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); @@ -624,18 +649,9 @@ extern int fb_new_modelist(struct fb_info *info); extern struct fb_info *registered_fb[FB_MAX]; extern int num_registered_fb; -extern bool fb_center_logo; -extern int fb_logo_count; extern struct class *fb_class; -#define for_each_registered_fb(i) \ - for (i = 0; i < FB_MAX; i++) \ - if (!registered_fb[i]) {} else - -static inline void lock_fb_info(struct fb_info *info) -{ - mutex_lock(&info->lock); -} +extern int lock_fb_info(struct fb_info *info); static inline void unlock_fb_info(struct fb_info *info) { @@ -717,6 +733,8 @@ extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var); extern const unsigned char *fb_firmware_edid(struct device *device); extern void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs); +extern void fb_edid_add_monspecs(unsigned char *edid, + struct fb_monspecs *specs); extern void fb_destroy_modedb(struct fb_videomode *modedb); extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb); extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter); @@ -790,6 +808,7 @@ struct dmt_videomode { extern const char *fb_mode_option; extern const struct fb_videomode vesa_modes[]; +extern const struct fb_videomode cea_modes[65]; extern const struct dmt_videomode dmt_modes[]; struct fb_modelist { diff --git a/include/linux/fcdevice.h b/include/linux/fcdevice.h index 3d14ebe59d..5009fa16b5 100644 --- a/include/linux/fcdevice.h +++ b/include/linux/fcdevice.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. NET is implemented using the BSD Socket @@ -13,7 +12,13 @@ * Relocated to include/linux where it belongs by Alan Cox * * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * * WARNING: This move may well be temporary. This file will get merged with others RSN. + * */ #ifndef _LINUX_FCDEVICE_H #define _LINUX_FCDEVICE_H diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index a332e79b32..76ce329e65 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h @@ -1,28 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FCNTL_H #define _LINUX_FCNTL_H -#include #include -/* List of all valid flags for the open/openat flags argument: */ -#define VALID_OPEN_FLAGS \ - (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \ - O_APPEND | O_NDELAY | O_NONBLOCK | __O_SYNC | O_DSYNC | \ - FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \ - O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE) - -/* List of all valid flags for the how->resolve argument: */ -#define VALID_RESOLVE_FLAGS \ - (RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \ - RESOLVE_BENEATH | RESOLVE_IN_ROOT | RESOLVE_CACHED) - -/* List of all open_how "versions". */ -#define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */ -#define OPEN_HOW_SIZE_LATEST OPEN_HOW_SIZE_VER0 #ifndef force_o_largefile -#define force_o_largefile() (!IS_ENABLED(CONFIG_ARCH_32BIT_OFF_T)) +#define force_o_largefile() (BITS_PER_LONG != 32) #endif #if BITS_PER_LONG == 32 diff --git a/include/linux/fd.h b/include/linux/fd.h index ece5ea5320..69275bccc3 100644 --- a/include/linux/fd.h +++ b/include/linux/fd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FD_H #define _LINUX_FD_H diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h index 906ee446db..9a79f0106d 100644 --- a/include/linux/fddidevice.h +++ b/include/linux/fddidevice.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -14,6 +13,11 @@ * Ross Biro * Fred N. van Kempen, * Alan Cox, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_FDDIDEVICE_H #define _LINUX_FDDIDEVICE_H @@ -22,6 +26,7 @@ #ifdef __KERNEL__ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev); +int fddi_change_mtu(struct net_device *dev, int new_mtu); struct net_device *alloc_fddidev(int sizeof_priv); #endif diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index d0e7817487..6e84b2cae6 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * descriptor table internals; you almost certainly want file.h instead. */ @@ -10,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -22,7 +20,6 @@ * as this is the granularity returned by copy_fdset(). */ #define NR_OPEN_DEFAULT BITS_PER_LONG -#define NR_OPEN_MAX ~0U struct fdtable { unsigned int max_fds; @@ -80,54 +77,46 @@ struct dentry; /* * The caller must ensure that fd table isn't shared or hold rcu or file lock */ -static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd) +static inline struct file *__fcheck_files(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = rcu_dereference_raw(files->fdt); - if (fd < fdt->max_fds) { - fd = array_index_nospec(fd, fdt->max_fds); + if (fd < fdt->max_fds) return rcu_dereference_raw(fdt->fd[fd]); - } return NULL; } -static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd) +static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd) { - RCU_LOCKDEP_WARN(!lockdep_is_held(&files->file_lock), + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && + !lockdep_is_held(&files->file_lock), "suspicious rcu_dereference_check() usage"); - return files_lookup_fd_raw(files, fd); + return __fcheck_files(files, fd); } -static inline struct file *files_lookup_fd_rcu(struct files_struct *files, unsigned int fd) -{ - RCU_LOCKDEP_WARN(!rcu_read_lock_held(), - "suspicious rcu_dereference_check() usage"); - return files_lookup_fd_raw(files, fd); -} - -static inline struct file *lookup_fd_rcu(unsigned int fd) -{ - return files_lookup_fd_rcu(current->files, fd); -} - -struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd); -struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *fd); +/* + * Check whether the specified fd has an open file. + */ +#define fcheck(fd) fcheck_files(current->files, fd) struct task_struct; +struct files_struct *get_files_struct(struct task_struct *); void put_files_struct(struct files_struct *fs); -int unshare_files(void); -struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy; +void reset_files_struct(struct files_struct *); +int unshare_files(struct files_struct **); +struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy; void do_close_on_exec(struct files_struct *); int iterate_fd(struct files_struct *, unsigned, int (*)(const void *, struct file *, unsigned), const void *); -extern int close_fd(unsigned int fd); -extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags); -extern int close_fd_get_file(unsigned int fd, struct file **res); -extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, - struct files_struct **new_fdp); +extern int __alloc_fd(struct files_struct *files, + unsigned start, unsigned end, unsigned flags); +extern void __fd_install(struct files_struct *files, + unsigned int fd, struct file *file); +extern int __close_fd(struct files_struct *files, + unsigned int fd); extern struct kmem_cache *files_cachep; diff --git a/include/linux/fec.h b/include/linux/fec.h index 9aaf53f072..1454a50362 100644 --- a/include/linux/fec.h +++ b/include/linux/fec.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* include/linux/fec.h * * Copyright (c) 2009 Orex Computed Radiography @@ -7,6 +6,10 @@ * Copyright (C) 2010 Freescale Semiconductor, Inc. * * Header file for the FEC platform data + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_FEC_H__ #define __LINUX_FEC_H__ diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h new file mode 100644 index 0000000000..a44794e508 --- /dev/null +++ b/include/linux/fence-array.h @@ -0,0 +1,83 @@ +/* + * fence-array: aggregates fence to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan + * Christian König + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_FENCE_ARRAY_H +#define __LINUX_FENCE_ARRAY_H + +#include + +/** + * struct fence_array_cb - callback helper for fence array + * @cb: fence callback structure for signaling + * @array: reference to the parent fence array object + */ +struct fence_array_cb { + struct fence_cb cb; + struct fence_array *array; +}; + +/** + * struct fence_array - fence to represent an array of fences + * @base: fence base class + * @lock: spinlock for fence handling + * @num_fences: number of fences in the array + * @num_pending: fences in the array still pending + * @fences: array of the fences + */ +struct fence_array { + struct fence base; + + spinlock_t lock; + unsigned num_fences; + atomic_t num_pending; + struct fence **fences; +}; + +extern const struct fence_ops fence_array_ops; + +/** + * fence_is_array - check if a fence is from the array subsclass + * + * Return true if it is a fence_array and false otherwise. + */ +static inline bool fence_is_array(struct fence *fence) +{ + return fence->ops == &fence_array_ops; +} + +/** + * to_fence_array - cast a fence to a fence_array + * @fence: fence to cast to a fence_array + * + * Returns NULL if the fence is not a fence_array, + * or the fence_array otherwise. + */ +static inline struct fence_array *to_fence_array(struct fence *fence) +{ + if (fence->ops != &fence_array_ops) + return NULL; + + return container_of(fence, struct fence_array, base); +} + +struct fence_array *fence_array_create(int num_fences, struct fence **fences, + u64 context, unsigned seqno, + bool signal_on_any); + +#endif /* __LINUX_FENCE_ARRAY_H */ diff --git a/include/linux/fence.h b/include/linux/fence.h new file mode 100644 index 0000000000..0d763053f9 --- /dev/null +++ b/include/linux/fence.h @@ -0,0 +1,378 @@ +/* + * Fence mechanism for dma-buf to allow for asynchronous dma access + * + * Copyright (C) 2012 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark + * Maarten Lankhorst + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_FENCE_H +#define __LINUX_FENCE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct fence; +struct fence_ops; +struct fence_cb; + +/** + * struct fence - software synchronization primitive + * @refcount: refcount for this fence + * @ops: fence_ops associated with this fence + * @rcu: used for releasing fence with kfree_rcu + * @cb_list: list of all callbacks to call + * @lock: spin_lock_irqsave used for locking + * @context: execution context this fence belongs to, returned by + * fence_context_alloc() + * @seqno: the sequence number of this fence inside the execution context, + * can be compared to decide which fence would be signaled later. + * @flags: A mask of FENCE_FLAG_* defined below + * @timestamp: Timestamp when the fence was signaled. + * @status: Optional, only valid if < 0, must be set before calling + * fence_signal, indicates that the fence has completed with an error. + * + * the flags member must be manipulated and read using the appropriate + * atomic ops (bit_*), so taking the spinlock will not be needed most + * of the time. + * + * FENCE_FLAG_SIGNALED_BIT - fence is already signaled + * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* + * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the + * implementer of the fence for its own purposes. Can be used in different + * ways by different fence implementers, so do not rely on this. + * + * Since atomic bitops are used, this is not guaranteed to be the case. + * Particularly, if the bit was set, but fence_signal was called right + * before this bit was set, it would have been able to set the + * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. + * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting + * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that + * after fence_signal was called, any enable_signaling call will have either + * been completed, or never called at all. + */ +struct fence { + struct kref refcount; + const struct fence_ops *ops; + struct rcu_head rcu; + struct list_head cb_list; + spinlock_t *lock; + u64 context; + unsigned seqno; + unsigned long flags; + ktime_t timestamp; + int status; +}; + +enum fence_flag_bits { + FENCE_FLAG_SIGNALED_BIT, + FENCE_FLAG_ENABLE_SIGNAL_BIT, + FENCE_FLAG_USER_BITS, /* must always be last member */ +}; + +typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); + +/** + * struct fence_cb - callback for fence_add_callback + * @node: used by fence_add_callback to append this struct to fence::cb_list + * @func: fence_func_t to call + * + * This struct will be initialized by fence_add_callback, additional + * data can be passed along by embedding fence_cb in another struct. + */ +struct fence_cb { + struct list_head node; + fence_func_t func; +}; + +/** + * struct fence_ops - operations implemented for fence + * @get_driver_name: returns the driver name. + * @get_timeline_name: return the name of the context this fence belongs to. + * @enable_signaling: enable software signaling of fence. + * @signaled: [optional] peek whether the fence is signaled, can be null. + * @wait: custom wait implementation, or fence_default_wait. + * @release: [optional] called on destruction of fence, can be null + * @fill_driver_data: [optional] callback to fill in free-form debug info + * Returns amount of bytes filled, or -errno. + * @fence_value_str: [optional] fills in the value of the fence as a string + * @timeline_value_str: [optional] fills in the current value of the timeline + * as a string + * + * Notes on enable_signaling: + * For fence implementations that have the capability for hw->hw + * signaling, they can implement this op to enable the necessary + * irqs, or insert commands into cmdstream, etc. This is called + * in the first wait() or add_callback() path to let the fence + * implementation know that there is another driver waiting on + * the signal (ie. hw->sw case). + * + * This function can be called called from atomic context, but not + * from irq context, so normal spinlocks can be used. + * + * A return value of false indicates the fence already passed, + * or some failure occurred that made it impossible to enable + * signaling. True indicates successful enabling. + * + * fence->status may be set in enable_signaling, but only when false is + * returned. + * + * Calling fence_signal before enable_signaling is called allows + * for a tiny race window in which enable_signaling is called during, + * before, or after fence_signal. To fight this, it is recommended + * that before enable_signaling returns true an extra reference is + * taken on the fence, to be released when the fence is signaled. + * This will mean fence_signal will still be called twice, but + * the second time will be a noop since it was already signaled. + * + * Notes on signaled: + * May set fence->status if returning true. + * + * Notes on wait: + * Must not be NULL, set to fence_default_wait for default implementation. + * the fence_default_wait implementation should work for any fence, as long + * as enable_signaling works correctly. + * + * Must return -ERESTARTSYS if the wait is intr = true and the wait was + * interrupted, and remaining jiffies if fence has signaled, or 0 if wait + * timed out. Can also return other error values on custom implementations, + * which should be treated as if the fence is signaled. For example a hardware + * lockup could be reported like that. + * + * Notes on release: + * Can be NULL, this function allows additional commands to run on + * destruction of the fence. Can be called from irq context. + * If pointer is set to NULL, kfree will get called instead. + */ + +struct fence_ops { + const char * (*get_driver_name)(struct fence *fence); + const char * (*get_timeline_name)(struct fence *fence); + bool (*enable_signaling)(struct fence *fence); + bool (*signaled)(struct fence *fence); + signed long (*wait)(struct fence *fence, bool intr, signed long timeout); + void (*release)(struct fence *fence); + + int (*fill_driver_data)(struct fence *fence, void *data, int size); + void (*fence_value_str)(struct fence *fence, char *str, int size); + void (*timeline_value_str)(struct fence *fence, char *str, int size); +}; + +void fence_init(struct fence *fence, const struct fence_ops *ops, + spinlock_t *lock, u64 context, unsigned seqno); + +void fence_release(struct kref *kref); +void fence_free(struct fence *fence); + +/** + * fence_get - increases refcount of the fence + * @fence: [in] fence to increase refcount of + * + * Returns the same fence, with refcount increased by 1. + */ +static inline struct fence *fence_get(struct fence *fence) +{ + if (fence) + kref_get(&fence->refcount); + return fence; +} + +/** + * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock + * @fence: [in] fence to increase refcount of + * + * Function returns NULL if no refcount could be obtained, or the fence. + */ +static inline struct fence *fence_get_rcu(struct fence *fence) +{ + if (kref_get_unless_zero(&fence->refcount)) + return fence; + else + return NULL; +} + +/** + * fence_put - decreases refcount of the fence + * @fence: [in] fence to reduce refcount of + */ +static inline void fence_put(struct fence *fence) +{ + if (fence) + kref_put(&fence->refcount, fence_release); +} + +int fence_signal(struct fence *fence); +int fence_signal_locked(struct fence *fence); +signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); +int fence_add_callback(struct fence *fence, struct fence_cb *cb, + fence_func_t func); +bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); +void fence_enable_sw_signaling(struct fence *fence); + +/** + * fence_is_signaled_locked - Return an indication if the fence is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if fence_add_callback, fence_wait or fence_enable_sw_signaling + * haven't been called before. + * + * This function requires fence->lock to be held. + */ +static inline bool +fence_is_signaled_locked(struct fence *fence) +{ + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + fence_signal_locked(fence); + return true; + } + + return false; +} + +/** + * fence_is_signaled - Return an indication if the fence is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if fence_add_callback, fence_wait or fence_enable_sw_signaling + * haven't been called before. + * + * It's recommended for seqno fences to call fence_signal when the + * operation is complete, it makes it possible to prevent issues from + * wraparound between time of issue and time of use by checking the return + * value of this function before calling hardware-specific wait instructions. + */ +static inline bool +fence_is_signaled(struct fence *fence) +{ + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + fence_signal(fence); + return true; + } + + return false; +} + +/** + * fence_is_later - return if f1 is chronologically later than f2 + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns true if f1 is chronologically later than f2. Both fences must be + * from the same context, since a seqno is not re-used across contexts. + */ +static inline bool fence_is_later(struct fence *f1, struct fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return false; + + return (int)(f1->seqno - f2->seqno) > 0; +} + +/** + * fence_later - return the chronologically later fence + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns NULL if both fences are signaled, otherwise the fence that would be + * signaled last. Both fences must be from the same context, since a seqno is + * not re-used across contexts. + */ +static inline struct fence *fence_later(struct fence *f1, struct fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return NULL; + + /* + * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been + * set if enable_signaling wasn't called, and enabling that here is + * overkill. + */ + if (fence_is_later(f1, f2)) + return fence_is_signaled(f1) ? NULL : f1; + else + return fence_is_signaled(f2) ? NULL : f2; +} + +signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); +signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, + bool intr, signed long timeout); + +/** + * fence_wait - sleep until the fence gets signaled + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * + * This function will return -ERESTARTSYS if interrupted by a signal, + * or 0 if the fence was signaled. Other error values may be + * returned on custom implementations. + * + * Performs a synchronous wait on this fence. It is assumed the caller + * directly or indirectly holds a reference to the fence, otherwise the + * fence might be freed before return, resulting in undefined behavior. + */ +static inline signed long fence_wait(struct fence *fence, bool intr) +{ + signed long ret; + + /* Since fence_wait_timeout cannot timeout with + * MAX_SCHEDULE_TIMEOUT, only valid return values are + * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. + */ + ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); + + return ret < 0 ? ret : 0; +} + +u64 fence_context_alloc(unsigned num); + +#define FENCE_TRACE(f, fmt, args...) \ + do { \ + struct fence *__ff = (f); \ + if (IS_ENABLED(CONFIG_FENCE_TRACE)) \ + pr_info("f %llu#%u: " fmt, \ + __ff->context, __ff->seqno, ##args); \ + } while (0) + +#define FENCE_WARN(f, fmt, args...) \ + do { \ + struct fence *__ff = (f); \ + pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#define FENCE_ERR(f, fmt, args...) \ + do { \ + struct fence *__ff = (f); \ + pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#endif /* __LINUX_FENCE_H */ diff --git a/include/linux/file.h b/include/linux/file.h index 51e830b4fe..7444f5feda 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Wrapper functions for accessing the file_struct fd array. */ @@ -9,23 +8,17 @@ #include #include #include -#include struct file; extern void fput(struct file *); -extern void fput_many(struct file *, unsigned int); struct file_operations; -struct task_struct; struct vfsmount; struct dentry; -struct inode; struct path; -extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *, - const char *, int flags, const struct file_operations *); -extern struct file *alloc_file_clone(struct file *, int flags, - const struct file_operations *); +extern struct file *alloc_file(struct path *, fmode_t mode, + const struct file_operations *fop); static inline void fput_light(struct file *file, int fput_needed) { @@ -47,9 +40,7 @@ static inline void fdput(struct fd fd) } extern struct file *fget(unsigned int fd); -extern struct file *fget_many(unsigned int fd, unsigned int refs); extern struct file *fget_raw(unsigned int fd); -extern struct file *fget_task(struct task_struct *task, unsigned int fd); extern unsigned long __fdget(unsigned int fd); extern unsigned long __fdget_raw(unsigned int fd); extern unsigned long __fdget_pos(unsigned int fd); @@ -86,29 +77,13 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); extern int replace_fd(unsigned fd, struct file *file, unsigned flags); extern void set_close_on_exec(unsigned int fd, int flag); extern bool get_close_on_exec(unsigned int fd); -extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile); +extern void put_filp(struct file *); extern int get_unused_fd_flags(unsigned flags); extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); -extern int __receive_fd(struct file *file, int __user *ufd, - unsigned int o_flags); - -extern int receive_fd(struct file *file, unsigned int o_flags); - -static inline int receive_fd_user(struct file *file, int __user *ufd, - unsigned int o_flags) -{ - if (ufd == NULL) - return -EFAULT; - return __receive_fd(file, ufd, o_flags); -} -int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags); - extern void flush_delayed_fput(void); extern void __fput_sync(struct file *); -extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max; - #endif /* __LINUX_FILE_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index ef03ff3423..1f09c521ad 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1,12 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Linux Socket Filter Data Structures */ #ifndef __LINUX_FILTER_H__ #define __LINUX_FILTER_H__ +#include + #include -#include #include #include #include @@ -14,17 +14,11 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include #include -#include +#include + #include #include @@ -32,11 +26,6 @@ struct sk_buff; struct sock; struct seccomp_data; struct bpf_prog_aux; -struct xdp_rxq_info; -struct xdp_buff; -struct sock_reuseport; -struct ctl_table; -struct ctl_table_header; /* ArgX, context and stack frame pointer register positions. Note, * Arg1, Arg2, Arg3, etc are used as argument mappings of function @@ -53,34 +42,16 @@ struct ctl_table_header; /* Additional register mappings for converted user programs. */ #define BPF_REG_A BPF_REG_0 #define BPF_REG_X BPF_REG_7 -#define BPF_REG_TMP BPF_REG_2 /* scratch reg */ -#define BPF_REG_D BPF_REG_8 /* data, callee-saved */ -#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ +#define BPF_REG_TMP BPF_REG_8 -/* Kernel hidden auxiliary/helper register. */ +/* Kernel hidden auxiliary/helper register for hardening step. + * Only used by eBPF JITs. It's nothing more than a temporary + * register that JITs use internally, only that here it's part + * of eBPF instructions that have been rewritten for blinding + * constants. See JIT pre-step in bpf_jit_blind_constants(). + */ #define BPF_REG_AX MAX_BPF_REG -#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) -#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG - -/* unused opcode to mark special call to bpf_tail_call() helper */ -#define BPF_TAIL_CALL 0xf0 - -/* unused opcode to mark special load instruction. Same as BPF_ABS */ -#define BPF_PROBE_MEM 0x20 - -/* unused opcode to mark call to interpreter with arguments */ -#define BPF_CALL_ARGS 0xe0 - -/* unused opcode to mark speculation barrier for mitigating - * Speculative Store Bypass - */ -#define BPF_NOSPEC 0xc0 - -/* As per nm, we expose JITed images as text (code) section for - * kallsyms. That way, tools like perf can find it to match - * addresses. - */ -#define BPF_SYM_ELF_TYPE 't' +#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 @@ -169,20 +140,6 @@ struct ctl_table_header; .off = 0, \ .imm = IMM }) -/* Special form of mov32, used for doing explicit zero extension on dst. */ -#define BPF_ZEXT_REG(DST) \ - ((struct bpf_insn) { \ - .code = BPF_ALU | BPF_MOV | BPF_X, \ - .dst_reg = DST, \ - .src_reg = DST, \ - .off = 0, \ - .imm = 1 }) - -static inline bool insn_is_zext(const struct bpf_insn *insn) -{ - return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; -} - /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ #define BPF_LD_IMM64(DST, IMM) \ BPF_LD_IMM64_RAW(DST, 0, IMM) @@ -263,32 +220,15 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = OFF, \ .imm = 0 }) +/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ -/* - * Atomic operations: - * - * BPF_ADD *(uint *) (dst_reg + off16) += src_reg - * BPF_AND *(uint *) (dst_reg + off16) &= src_reg - * BPF_OR *(uint *) (dst_reg + off16) |= src_reg - * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg - * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg); - * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg); - * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg); - * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg); - * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg) - * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg) - */ - -#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \ +#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ - .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ - .imm = OP }) - -/* Legacy alias */ -#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF) + .imm = 0 }) /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ @@ -320,51 +260,8 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = OFF, \ .imm = IMM }) -/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ - -#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ - ((struct bpf_insn) { \ - .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ - .dst_reg = DST, \ - .src_reg = SRC, \ - .off = OFF, \ - .imm = 0 }) - -/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ - -#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ - ((struct bpf_insn) { \ - .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ - .dst_reg = DST, \ - .src_reg = 0, \ - .off = OFF, \ - .imm = IMM }) - -/* Unconditional jumps, goto pc + off16 */ - -#define BPF_JMP_A(OFF) \ - ((struct bpf_insn) { \ - .code = BPF_JMP | BPF_JA, \ - .dst_reg = 0, \ - .src_reg = 0, \ - .off = OFF, \ - .imm = 0 }) - -/* Relative call */ - -#define BPF_CALL_REL(TGT) \ - ((struct bpf_insn) { \ - .code = BPF_JMP | BPF_CALL, \ - .dst_reg = 0, \ - .src_reg = BPF_PSEUDO_CALL, \ - .off = 0, \ - .imm = TGT }) - /* Function call */ -#define BPF_CAST_CALL(x) \ - ((u64 (*)(u64, u64, u64, u64, u64))(x)) - #define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ @@ -393,16 +290,6 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = 0, \ .imm = 0 }) -/* Speculation barrier */ - -#define BPF_ST_NOSPEC() \ - ((struct bpf_insn) { \ - .code = BPF_ST | BPF_NOSPEC, \ - .dst_reg = 0, \ - .src_reg = 0, \ - .off = 0, \ - .imm = 0 }) - /* Internal classic blocks for direct assignment */ #define __BPF_STMT(CODE, K) \ @@ -427,22 +314,6 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) bpf_size; \ }) -#define bpf_size_to_bytes(bpf_size) \ -({ \ - int bytes = -EINVAL; \ - \ - if (bpf_size == BPF_B) \ - bytes = sizeof(u8); \ - else if (bpf_size == BPF_H) \ - bytes = sizeof(u16); \ - else if (bpf_size == BPF_W) \ - bytes = sizeof(u32); \ - else if (bpf_size == BPF_DW) \ - bytes = sizeof(u64); \ - \ - bytes; \ -}) - #define BPF_SIZEOF(type) \ ({ \ const int __size = bytes_to_bpf_size(sizeof(type)); \ @@ -452,18 +323,11 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) #define BPF_FIELD_SIZEOF(type, field) \ ({ \ - const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \ + const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \ BUILD_BUG_ON(__size < 0); \ __size; \ }) -#define BPF_LDST_BYTES(insn) \ - ({ \ - const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \ - WARN_ON(__size < 0); \ - __size; \ - }) - #define __BPF_MAP_0(m, v, ...) v #define __BPF_MAP_1(m, v, t, a, ...) m(t, a) #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) @@ -499,11 +363,10 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) #define BPF_CALL_x(x, name, ...) \ static __always_inline \ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ - typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ { \ - return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ + return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ } \ static __always_inline \ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) @@ -515,213 +378,77 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) #define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) #define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) -#define bpf_ctx_range(TYPE, MEMBER) \ - offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 -#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ - offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 -#if BITS_PER_LONG == 64 -# define bpf_ctx_range_ptr(TYPE, MEMBER) \ - offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 -#else -# define bpf_ctx_range_ptr(TYPE, MEMBER) \ - offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 -#endif /* BITS_PER_LONG == 64 */ - -#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ - ({ \ - BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE)); \ - *(PTR_SIZE) = (SIZE); \ - offsetof(TYPE, MEMBER); \ - }) - +#ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { u16 len; compat_uptr_t filter; /* struct sock_filter * */ }; +#endif struct sock_fprog_kern { u16 len; struct sock_filter *filter; }; -/* Some arches need doubleword alignment for their instructions and/or data */ -#define BPF_IMAGE_ALIGNMENT 8 - struct bpf_binary_header { - u32 pages; - u8 image[] __aligned(BPF_IMAGE_ALIGNMENT); + unsigned int pages; + u8 image[]; }; -struct bpf_prog_stats { - u64 cnt; - u64 nsecs; - u64 misses; - struct u64_stats_sync syncp; -} __aligned(2 * sizeof(u64)); - struct bpf_prog { u16 pages; /* Number of allocated pages */ + kmemcheck_bitfield_begin(meta); u16 jited:1, /* Is our filter JIT'ed? */ - jit_requested:1,/* archs need to JIT the prog */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ - dst_needed:1, /* Do we need dst entry? */ - blinded:1, /* Was blinded */ - is_func:1, /* program is a bpf function */ - kprobe_override:1, /* Do we override a kprobe? */ - has_callchain_buf:1, /* callchain buffer allocated? */ - enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ - call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ - call_get_func_ip:1; /* Do we call get_func_ip() */ - enum bpf_prog_type type; /* Type of BPF program */ - enum bpf_attach_type expected_attach_type; /* For some prog types */ + dst_needed:1; /* Do we need dst entry? */ + kmemcheck_bitfield_end(meta); u32 len; /* Number of filter blocks */ - u32 jited_len; /* Size of jited insns in bytes */ - u8 tag[BPF_TAG_SIZE]; - struct bpf_prog_stats __percpu *stats; - int __percpu *active; - unsigned int (*bpf_func)(const void *ctx, - const struct bpf_insn *insn); + enum bpf_prog_type type; /* Type of BPF program */ struct bpf_prog_aux *aux; /* Auxiliary fields */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ + unsigned int (*bpf_func)(const struct sk_buff *skb, + const struct bpf_insn *filter); /* Instructions for interpreter */ - struct sock_filter insns[0]; - struct bpf_insn insnsi[]; + union { + struct sock_filter insns[0]; + struct bpf_insn insnsi[0]; + }; }; struct sk_filter { - refcount_t refcnt; + atomic_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; -DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); - -typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx, - const struct bpf_insn *insnsi, - unsigned int (*bpf_func)(const void *, - const struct bpf_insn *)); - -static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog, - const void *ctx, - bpf_dispatcher_fn dfunc) -{ - u32 ret; - - cant_migrate(); - if (static_branch_unlikely(&bpf_stats_enabled_key)) { - struct bpf_prog_stats *stats; - u64 start = sched_clock(); - - ret = dfunc(ctx, prog->insnsi, prog->bpf_func); - stats = this_cpu_ptr(prog->stats); - u64_stats_update_begin(&stats->syncp); - stats->cnt++; - stats->nsecs += sched_clock() - start; - u64_stats_update_end(&stats->syncp); - } else { - ret = dfunc(ctx, prog->insnsi, prog->bpf_func); - } - return ret; -} - -static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx) -{ - return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func); -} - -/* - * Use in preemptible and therefore migratable context to make sure that - * the execution of the BPF program runs on one CPU. - * - * This uses migrate_disable/enable() explicitly to document that the - * invocation of a BPF program does not require reentrancy protection - * against a BPF program which is invoked from a preempting task. - * - * For non RT enabled kernels migrate_disable/enable() maps to - * preempt_disable/enable(), i.e. it disables also preemption. - */ -static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, - const void *ctx) -{ - u32 ret; - - migrate_disable(); - ret = bpf_prog_run(prog, ctx); - migrate_enable(); - return ret; -} +#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN struct bpf_skb_data_end { struct qdisc_skb_cb qdisc_cb; - void *data_meta; void *data_end; }; -struct bpf_nh_params { - u32 nh_family; - union { - u32 ipv4_nh; - struct in6_addr ipv6_nh; - }; +struct xdp_buff { + void *data; + void *data_end; }; -struct bpf_redirect_info { - u32 flags; - u32 tgt_index; - void *tgt_value; - struct bpf_map *map; - u32 map_id; - enum bpf_map_type map_type; - u32 kern_flags; - struct bpf_nh_params nh; -}; - -DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); - -/* flags for bpf_redirect_info kern_flags */ -#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ - -/* Compute the linear packet data range [data, data_end) which - * will be accessed by various program types (cls_bpf, act_bpf, - * lwt, ...). Subsystems allowing direct data access must (!) - * ensure that cb[] area can be written to when BPF program is - * invoked (otherwise cb[] save/restore is necessary). +/* compute the linear packet data range [data, data_end) which + * will be accessed by cls_bpf and act_bpf programs */ -static inline void bpf_compute_data_pointers(struct sk_buff *skb) +static inline void bpf_compute_data_end(struct sk_buff *skb) { struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; - BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); - cb->data_meta = skb->data - skb_metadata_len(skb); - cb->data_end = skb->data + skb_headlen(skb); + BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); + cb->data_end = skb->data + skb_headlen(skb); } -/* Similar to bpf_compute_data_pointers(), except that save orginal - * data in cb->data and cb->meta_data for restore. - */ -static inline void bpf_compute_and_save_data_end( - struct sk_buff *skb, void **saved_data_end) -{ - struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; - - *saved_data_end = cb->data_end; - cb->data_end = skb->data + skb_headlen(skb); -} - -/* Restore data saved by bpf_compute_data_pointers(). */ -static inline void bpf_restore_data_end( - struct sk_buff *skb, void *saved_data_end) -{ - struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; - - cb->data_end = saved_data_end; -} - -static inline u8 *bpf_skb_cb(const struct sk_buff *skb) +static inline u8 *bpf_skb_cb(struct sk_buff *skb) { /* eBPF programs may read/write skb->cb[] area to transfer meta * data between tail calls. Since this also needs to work with @@ -733,18 +460,16 @@ static inline u8 *bpf_skb_cb(const struct sk_buff *skb) * attached to sockets, we need to clear the bpf_skb_cb() area * to not leak previous contents to user space. */ - BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN); - BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != - sizeof_field(struct qdisc_skb_cb, data)); + BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); + BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != + FIELD_SIZEOF(struct qdisc_skb_cb, data)); return qdisc_skb_cb(skb)->data; } -/* Must be invoked with migration disabled */ -static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, - const void *ctx) +static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, + struct sk_buff *skb) { - const struct sk_buff *skb = ctx; u8 *cb_data = bpf_skb_cb(skb); u8 cb_saved[BPF_SKB_CB_LEN]; u32 res; @@ -754,7 +479,7 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, memset(cb_data, 0, sizeof(cb_saved)); } - res = bpf_prog_run(prog, skb); + res = BPF_PROG_RUN(prog, skb); if (unlikely(prog->cb_access)) memcpy(cb_data, cb_saved, sizeof(cb_saved)); @@ -762,64 +487,27 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, return res; } -static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, - struct sk_buff *skb) -{ - u32 res; - - migrate_disable(); - res = __bpf_prog_run_save_cb(prog, skb); - migrate_enable(); - return res; -} - static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, struct sk_buff *skb) { u8 *cb_data = bpf_skb_cb(skb); - u32 res; if (unlikely(prog->cb_access)) memset(cb_data, 0, BPF_SKB_CB_LEN); - res = bpf_prog_run_pin_on_cpu(prog, skb); - return res; + return BPF_PROG_RUN(prog, skb); } -DECLARE_BPF_DISPATCHER(xdp) - -DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); - -u32 xdp_master_redirect(struct xdp_buff *xdp); - -static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, - struct xdp_buff *xdp) +static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, + struct xdp_buff *xdp) { - /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus - * under local_bh_disable(), which provides the needed RCU protection - * for accessing map entries. - */ - u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); + u32 ret; - if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) { - if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev)) - act = xdp_master_redirect(xdp); - } + rcu_read_lock(); + ret = BPF_PROG_RUN(prog, (void *)xdp); + rcu_read_unlock(); - return act; -} - -void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); - -static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) -{ - return prog->len * sizeof(struct bpf_insn); -} - -static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) -{ - return round_up(bpf_prog_insn_size(prog) + - sizeof(__be64) + 1, SHA1_BLOCK_SIZE); + return ret; } static inline unsigned int bpf_prog_size(unsigned int proglen) @@ -838,68 +526,28 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) return prog->type == BPF_PROG_TYPE_UNSPEC; } -static inline u32 bpf_ctx_off_adjust_machine(u32 size) -{ - const u32 size_machine = sizeof(unsigned long); - - if (size > size_machine && size % size_machine == 0) - size = size_machine; - - return size; -} - -static inline bool -bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) -{ - return size <= size_default && (size & (size - 1)) == 0; -} - -static inline u8 -bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) -{ - u8 access_off = off & (size_default - 1); - -#ifdef __LITTLE_ENDIAN - return access_off; -#else - return size_default - (access_off + size); -#endif -} - -#define bpf_ctx_wide_access_ok(off, size, type, field) \ - (size == sizeof(__u64) && \ - off >= offsetof(type, field) && \ - off + sizeof(__u64) <= offsetofend(type, field) && \ - off % sizeof(__u64) == 0) - #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) +#ifdef CONFIG_DEBUG_SET_MODULE_RONX static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { -#ifndef CONFIG_BPF_JIT_ALWAYS_ON - if (!fp->jited) { - set_vm_flush_reset_perms(fp); - set_memory_ro((unsigned long)fp, fp->pages); - } -#endif + set_memory_ro((unsigned long)fp, fp->pages); } -static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) +static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { - set_vm_flush_reset_perms(hdr); - set_memory_ro((unsigned long)hdr, hdr->pages); - set_memory_x((unsigned long)hdr, hdr->pages); + set_memory_rw((unsigned long)fp, fp->pages); } - -static inline struct bpf_binary_header * -bpf_jit_binary_hdr(const struct bpf_prog *fp) +#else +static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { - unsigned long real_start = (unsigned long)fp->bpf_func; - unsigned long addr = real_start & PAGE_MASK; - - return (void *)addr; } +static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) +{ +} +#endif /* CONFIG_DEBUG_SET_MODULE_RONX */ + int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); static inline int sk_filter(struct sock *sk, struct sk_buff *skb) { @@ -909,22 +557,14 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb) struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); -bool bpf_opcode_in_insntable(u8 code); - -void bpf_prog_free_linfo(struct bpf_prog *prog); -void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, - const u32 *insn_to_jit_off); -int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); -void bpf_prog_jit_attempt_done(struct bpf_prog *prog); - struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); -struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, gfp_t gfp_extra_flags); void __bpf_prog_free(struct bpf_prog *fp); static inline void bpf_prog_unlock_free(struct bpf_prog *fp) { + bpf_prog_unlock_ro(fp); __bpf_prog_free(fp); } @@ -940,7 +580,6 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_attach_bpf(u32 ufd, struct sock *sk); int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); -void sk_reuseport_prog_free(struct bpf_prog *prog); int sk_detach_filter(struct sock *sk); int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); @@ -949,109 +588,17 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -#define __bpf_call_base_args \ - ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ - (void *)__bpf_call_base) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); -void bpf_jit_compile(struct bpf_prog *prog); -bool bpf_jit_needs_zext(void); -bool bpf_jit_supports_kfunc_call(void); -bool bpf_helper_changes_pkt_data(void *func); - -static inline bool bpf_dump_raw_ok(const struct cred *cred) -{ - /* Reconstruction of call-sites is dependent on kallsyms, - * thus make dump the same restriction. - */ - return kallsyms_show_value(cred); -} +bool bpf_helper_changes_skb_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); -int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); - -void bpf_clear_redirect_map(struct bpf_map *map); - -static inline bool xdp_return_frame_no_direct(void) -{ - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); - - return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; -} - -static inline void xdp_set_return_frame_no_direct(void) -{ - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); - - ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; -} - -static inline void xdp_clear_return_frame_no_direct(void) -{ - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); - - ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; -} - -static inline int xdp_ok_fwd_dev(const struct net_device *fwd, - unsigned int pktlen) -{ - unsigned int len; - - if (unlikely(!(fwd->flags & IFF_UP))) - return -ENETDOWN; - - len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; - if (pktlen > len) - return -EMSGSIZE; - - return 0; -} - -/* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the - * same cpu context. Further for best results no more than a single map - * for the do_redirect/do_flush pair should be used. This limitation is - * because we only track one map and force a flush when the map changes. - * This does not appear to be a real limitation for existing software. - */ -int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, - struct xdp_buff *xdp, struct bpf_prog *prog); -int xdp_do_redirect(struct net_device *dev, - struct xdp_buff *xdp, - struct bpf_prog *prog); -void xdp_do_flush(void); - -/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as - * it is no longer only flushing maps. Keep this define for compatibility - * until all drivers are updated - do not use xdp_do_flush_map() in new code! - */ -#define xdp_do_flush_map xdp_do_flush - void bpf_warn_invalid_xdp_action(u32 act); -#ifdef CONFIG_INET -struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, - struct bpf_prog *prog, struct sk_buff *skb, - struct sock *migrating_sk, - u32 hash); -#else -static inline struct sock * -bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, - struct bpf_prog *prog, struct sk_buff *skb, - struct sock *migrating_sk, - u32 hash) -{ - return NULL; -} -#endif - #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; extern int bpf_jit_harden; -extern int bpf_jit_kallsyms; -extern long bpf_jit_limit; -extern long bpf_jit_limit_max; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); @@ -1060,18 +607,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, unsigned int alignment, bpf_jit_fill_hole_t bpf_fill_ill_insns); void bpf_jit_binary_free(struct bpf_binary_header *hdr); -u64 bpf_jit_alloc_exec_limit(void); -void *bpf_jit_alloc_exec(unsigned long size); -void bpf_jit_free_exec(void *addr); + +void bpf_jit_compile(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp); -int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, - struct bpf_jit_poke_descriptor *poke); - -int bpf_jit_get_func_addr(const struct bpf_prog *prog, - const struct bpf_insn *insn, bool extra_pass, - u64 *func_addr, bool *func_addr_fixed); - struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); @@ -1095,17 +634,7 @@ static inline bool bpf_jit_is_ebpf(void) # endif } -static inline bool ebpf_jit_enabled(void) -{ - return bpf_jit_enable && bpf_jit_is_ebpf(); -} - -static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) -{ - return fp->jited && bpf_jit_is_ebpf(); -} - -static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) +static inline bool bpf_jit_blinding_enabled(void) { /* These are the prerequisites, should someone ever have the * idea to call blinding outside of them, we make sure to @@ -1113,7 +642,7 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) */ if (!bpf_jit_is_ebpf()) return false; - if (!prog->jit_requested) + if (!bpf_jit_enable) return false; if (!bpf_jit_harden) return false; @@ -1122,113 +651,17 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) return true; } - -static inline bool bpf_jit_kallsyms_enabled(void) +#else +static inline void bpf_jit_compile(struct bpf_prog *fp) { - /* There are a couple of corner cases where kallsyms should - * not be enabled f.e. on hardening. - */ - if (bpf_jit_harden) - return false; - if (!bpf_jit_kallsyms) - return false; - if (bpf_jit_kallsyms == 1) - return true; - - return false; -} - -const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, - unsigned long *off, char *sym); -bool is_bpf_text_address(unsigned long addr); -int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, - char *sym); - -static inline const char * -bpf_address_lookup(unsigned long addr, unsigned long *size, - unsigned long *off, char **modname, char *sym) -{ - const char *ret = __bpf_address_lookup(addr, size, off, sym); - - if (ret && modname) - *modname = NULL; - return ret; -} - -void bpf_prog_kallsyms_add(struct bpf_prog *fp); -void bpf_prog_kallsyms_del(struct bpf_prog *fp); - -#else /* CONFIG_BPF_JIT */ - -static inline bool ebpf_jit_enabled(void) -{ - return false; -} - -static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) -{ - return false; -} - -static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) -{ - return false; -} - -static inline int -bpf_jit_add_poke_descriptor(struct bpf_prog *prog, - struct bpf_jit_poke_descriptor *poke) -{ - return -ENOTSUPP; } static inline void bpf_jit_free(struct bpf_prog *fp) { bpf_prog_unlock_free(fp); } - -static inline bool bpf_jit_kallsyms_enabled(void) -{ - return false; -} - -static inline const char * -__bpf_address_lookup(unsigned long addr, unsigned long *size, - unsigned long *off, char *sym) -{ - return NULL; -} - -static inline bool is_bpf_text_address(unsigned long addr) -{ - return false; -} - -static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *sym) -{ - return -ERANGE; -} - -static inline const char * -bpf_address_lookup(unsigned long addr, unsigned long *size, - unsigned long *off, char **modname, char *sym) -{ - return NULL; -} - -static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) -{ -} - -static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) -{ -} - #endif /* CONFIG_BPF_JIT */ -void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); - #define BPF_ANC BIT(15) static inline bool bpf_needs_clear_a(const struct sock_filter *first) @@ -1278,7 +711,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest) BPF_ANCILLARY(RANDOM); BPF_ANCILLARY(VLAN_TPID); } - fallthrough; + /* Fallthrough. */ default: return ftest->code; } @@ -1287,260 +720,18 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest) void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size); +static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, + unsigned int size, void *buffer) +{ + if (k >= 0) + return skb_header_pointer(skb, k, size, buffer); + + return bpf_internal_load_pointer_neg_helper(skb, k, size); +} + static inline int bpf_tell_extensions(void) { return SKF_AD_MAX; } -struct bpf_sock_addr_kern { - struct sock *sk; - struct sockaddr *uaddr; - /* Temporary "register" to make indirect stores to nested structures - * defined above. We need three registers to make such a store, but - * only two (src and dst) are available at convert_ctx_access time - */ - u64 tmp_reg; - void *t_ctx; /* Attach type specific context. */ -}; - -struct bpf_sock_ops_kern { - struct sock *sk; - union { - u32 args[4]; - u32 reply; - u32 replylong[4]; - }; - struct sk_buff *syn_skb; - struct sk_buff *skb; - void *skb_data_end; - u8 op; - u8 is_fullsock; - u8 remaining_opt_len; - u64 temp; /* temp and everything after is not - * initialized to 0 before calling - * the BPF program. New fields that - * should be initialized to 0 should - * be inserted before temp. - * temp is scratch storage used by - * sock_ops_convert_ctx_access - * as temporary storage of a register. - */ -}; - -struct bpf_sysctl_kern { - struct ctl_table_header *head; - struct ctl_table *table; - void *cur_val; - size_t cur_len; - void *new_val; - size_t new_len; - int new_updated; - int write; - loff_t *ppos; - /* Temporary "register" for indirect stores to ppos. */ - u64 tmp_reg; -}; - -#define BPF_SOCKOPT_KERN_BUF_SIZE 32 -struct bpf_sockopt_buf { - u8 data[BPF_SOCKOPT_KERN_BUF_SIZE]; -}; - -struct bpf_sockopt_kern { - struct sock *sk; - u8 *optval; - u8 *optval_end; - s32 level; - s32 optname; - s32 optlen; - s32 retval; -}; - -int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len); - -struct bpf_sk_lookup_kern { - u16 family; - u16 protocol; - __be16 sport; - u16 dport; - struct { - __be32 saddr; - __be32 daddr; - } v4; - struct { - const struct in6_addr *saddr; - const struct in6_addr *daddr; - } v6; - struct sock *selected_sk; - bool no_reuseport; -}; - -extern struct static_key_false bpf_sk_lookup_enabled; - -/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup. - * - * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and - * SK_DROP. Their meaning is as follows: - * - * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result - * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup - * SK_DROP : terminate lookup with -ECONNREFUSED - * - * This macro aggregates return values and selected sockets from - * multiple BPF programs according to following rules in order: - * - * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk, - * macro result is SK_PASS and last ctx.selected_sk is used. - * 2. If any program returned SK_DROP return value, - * macro result is SK_DROP. - * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL. - * - * Caller must ensure that the prog array is non-NULL, and that the - * array as well as the programs it contains remain valid. - */ -#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \ - ({ \ - struct bpf_sk_lookup_kern *_ctx = &(ctx); \ - struct bpf_prog_array_item *_item; \ - struct sock *_selected_sk = NULL; \ - bool _no_reuseport = false; \ - struct bpf_prog *_prog; \ - bool _all_pass = true; \ - u32 _ret; \ - \ - migrate_disable(); \ - _item = &(array)->items[0]; \ - while ((_prog = READ_ONCE(_item->prog))) { \ - /* restore most recent selection */ \ - _ctx->selected_sk = _selected_sk; \ - _ctx->no_reuseport = _no_reuseport; \ - \ - _ret = func(_prog, _ctx); \ - if (_ret == SK_PASS && _ctx->selected_sk) { \ - /* remember last non-NULL socket */ \ - _selected_sk = _ctx->selected_sk; \ - _no_reuseport = _ctx->no_reuseport; \ - } else if (_ret == SK_DROP && _all_pass) { \ - _all_pass = false; \ - } \ - _item++; \ - } \ - _ctx->selected_sk = _selected_sk; \ - _ctx->no_reuseport = _no_reuseport; \ - migrate_enable(); \ - _all_pass || _selected_sk ? SK_PASS : SK_DROP; \ - }) - -static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, - const __be32 saddr, const __be16 sport, - const __be32 daddr, const u16 dport, - struct sock **psk) -{ - struct bpf_prog_array *run_array; - struct sock *selected_sk = NULL; - bool no_reuseport = false; - - rcu_read_lock(); - run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); - if (run_array) { - struct bpf_sk_lookup_kern ctx = { - .family = AF_INET, - .protocol = protocol, - .v4.saddr = saddr, - .v4.daddr = daddr, - .sport = sport, - .dport = dport, - }; - u32 act; - - act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); - if (act == SK_PASS) { - selected_sk = ctx.selected_sk; - no_reuseport = ctx.no_reuseport; - } else { - selected_sk = ERR_PTR(-ECONNREFUSED); - } - } - rcu_read_unlock(); - *psk = selected_sk; - return no_reuseport; -} - -#if IS_ENABLED(CONFIG_IPV6) -static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, - const struct in6_addr *saddr, - const __be16 sport, - const struct in6_addr *daddr, - const u16 dport, - struct sock **psk) -{ - struct bpf_prog_array *run_array; - struct sock *selected_sk = NULL; - bool no_reuseport = false; - - rcu_read_lock(); - run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); - if (run_array) { - struct bpf_sk_lookup_kern ctx = { - .family = AF_INET6, - .protocol = protocol, - .v6.saddr = saddr, - .v6.daddr = daddr, - .sport = sport, - .dport = dport, - }; - u32 act; - - act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); - if (act == SK_PASS) { - selected_sk = ctx.selected_sk; - no_reuseport = ctx.no_reuseport; - } else { - selected_sk = ERR_PTR(-ECONNREFUSED); - } - } - rcu_read_unlock(); - *psk = selected_sk; - return no_reuseport; -} -#endif /* IS_ENABLED(CONFIG_IPV6) */ - -static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex, - u64 flags, const u64 flag_mask, - void *lookup_elem(struct bpf_map *map, u32 key)) -{ - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); - const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX; - - /* Lower bits of the flags are used as return code on lookup failure */ - if (unlikely(flags & ~(action_mask | flag_mask))) - return XDP_ABORTED; - - ri->tgt_value = lookup_elem(map, ifindex); - if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) { - /* If the lookup fails we want to clear out the state in the - * redirect_info struct completely, so that if an eBPF program - * performs multiple lookups, the last one always takes - * precedence. - */ - ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */ - ri->map_type = BPF_MAP_TYPE_UNSPEC; - return flags & action_mask; - } - - ri->tgt_index = ifindex; - ri->map_id = map->id; - ri->map_type = map->map_type; - - if (flags & BPF_F_BROADCAST) { - WRITE_ONCE(ri->map, map); - ri->flags = flags; - } else { - WRITE_ONCE(ri->map, NULL); - ri->flags = 0; - } - - return XDP_REDIRECT; -} - #endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/fips.h b/include/linux/fips.h index c6961e932f..f8fb07b0b6 100644 --- a/include/linux/fips.h +++ b/include/linux/fips.h @@ -1,18 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FIPS_H #define _FIPS_H #ifdef CONFIG_CRYPTO_FIPS extern int fips_enabled; -extern struct atomic_notifier_head fips_fail_notif_chain; - -void fips_fail_notify(void); - #else #define fips_enabled 0 - -static inline void fips_fail_notify(void) {} - #endif #endif diff --git a/include/linux/firewire.h b/include/linux/firewire.h index aec8f30ab2..9feb066729 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FIREWIRE_H #define _LINUX_FIREWIRE_H @@ -452,7 +451,7 @@ struct fw_iso_context { struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type, int channel, int speed, size_t header_size, - fw_iso_callback_t callback, void *callback_data); + void *callback, void *callback_data); int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); int fw_iso_context_queue(struct fw_iso_context *ctx, struct fw_iso_packet *packet, diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h index 3e1077e990..71d4fa721d 100644 --- a/include/linux/firmware-map.h +++ b/include/linux/firmware-map.h @@ -1,8 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/firmware-map.h: * Copyright (C) 2008 SUSE LINUX Products GmbH * by Bernhard Walle + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef _LINUX_FIRMWARE_MAP_H #define _LINUX_FIRMWARE_MAP_H diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 25109192ce..b1f9f0ccb8 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FIRMWARE_H #define _LINUX_FIRMWARE_H @@ -6,12 +5,13 @@ #include #include -#define FW_ACTION_NOUEVENT 0 -#define FW_ACTION_UEVENT 1 +#define FW_ACTION_NOHOTPLUG 0 +#define FW_ACTION_HOTPLUG 1 struct firmware { size_t size; const u8 *data; + struct page **pages; /* firmware loader private fields */ void *priv; @@ -36,15 +36,11 @@ struct builtin_fw { #define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \ static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \ - __used __section(".builtin_fw") = { name, blob, size } + __used __section(.builtin_fw) = { name, blob, size } #if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) int request_firmware(const struct firmware **fw, const char *name, struct device *device); -int firmware_request_nowarn(const struct firmware **fw, const char *name, - struct device *device); -int firmware_request_platform(const struct firmware **fw, const char *name, - struct device *device); int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, @@ -53,9 +49,6 @@ int request_firmware_direct(const struct firmware **fw, const char *name, struct device *device); int request_firmware_into_buf(const struct firmware **firmware_p, const char *name, struct device *device, void *buf, size_t size); -int request_partial_firmware_into_buf(const struct firmware **firmware_p, - const char *name, struct device *device, - void *buf, size_t size, size_t offset); void release_firmware(const struct firmware *fw); #else @@ -65,21 +58,6 @@ static inline int request_firmware(const struct firmware **fw, { return -EINVAL; } - -static inline int firmware_request_nowarn(const struct firmware **fw, - const char *name, - struct device *device) -{ - return -EINVAL; -} - -static inline int firmware_request_platform(const struct firmware **fw, - const char *name, - struct device *device) -{ - return -EINVAL; -} - static inline int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, @@ -105,17 +83,5 @@ static inline int request_firmware_into_buf(const struct firmware **firmware_p, return -EINVAL; } -static inline int request_partial_firmware_into_buf - (const struct firmware **firmware_p, - const char *name, - struct device *device, - void *buf, size_t size, size_t offset) -{ - return -EINVAL; -} - #endif - -int firmware_request_cache(struct device *device, const char *name); - #endif diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h index 95b0da2326..8e953c6f39 100644 --- a/include/linux/firmware/meson/meson_sm.h +++ b/include/linux/firmware/meson/meson_sm.h @@ -1,7 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Endless Mobile, Inc. * Author: Carlo Caione + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef _MESON_SM_FW_H_ @@ -11,21 +17,15 @@ enum { SM_EFUSE_READ, SM_EFUSE_WRITE, SM_EFUSE_USER_MAX, - SM_GET_CHIP_ID, - SM_A1_PWRC_SET, - SM_A1_PWRC_GET, }; struct meson_sm_firmware; -int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index, - u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4); -int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer, - unsigned int b_size, unsigned int cmd_index, u32 arg0, - u32 arg1, u32 arg2, u32 arg3, u32 arg4); -int meson_sm_call_read(struct meson_sm_firmware *fw, void *buffer, - unsigned int bsize, unsigned int cmd_index, u32 arg0, - u32 arg1, u32 arg2, u32 arg3, u32 arg4); -struct meson_sm_firmware *meson_sm_get(struct device_node *firmware_node); +int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, u32 arg1, + u32 arg2, u32 arg3, u32 arg4); +int meson_sm_call_write(void *buffer, unsigned int b_size, unsigned int cmd_index, + u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4); +int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0, u32 arg1, + u32 arg2, u32 arg3, u32 arg4); #endif /* _MESON_SM_FW_H_ */ diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h index 281cb4f83d..d4686fe1ca 100644 --- a/include/linux/fixp-arith.h +++ b/include/linux/fixp-arith.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _FIXP_ARITH_H #define _FIXP_ARITH_H @@ -12,6 +11,19 @@ */ /* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so by * e-mail - mail your message to @@ -141,23 +153,4 @@ static inline s32 fixp_sin32_rad(u32 radians, u32 twopi) #define fixp_cos32_rad(rad, twopi) \ fixp_sin32_rad(rad + twopi / 4, twopi) -/** - * fixp_linear_interpolate() - interpolates a value from two known points - * - * @x0: x value of point 0 - * @y0: y value of point 0 - * @x1: x value of point 1 - * @y1: y value of point 1 - * @x: the linear interpolant - */ -static inline int fixp_linear_interpolate(int x0, int y0, int x1, int y1, int x) -{ - if (y0 == y1 || x == x0) - return y0; - if (x1 == x0 || x == x1) - return y1; - - return y0 + ((y1 - y0) * (x - x0) / (x1 - x0)); -} - #endif diff --git a/include/linux/flat.h b/include/linux/flat.h index 83977c0ce3..2c1eb15c4b 100644 --- a/include/linux/flat.h +++ b/include/linux/flat.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2002-2003 David McCullough * Copyright (C) 1998 Kenneth Albanowski @@ -10,41 +9,8 @@ #ifndef _LINUX_FLAT_H #define _LINUX_FLAT_H -#define FLAT_VERSION 0x00000004L - -/* - * To make everything easier to port and manage cross platform - * development, all fields are in network byte order. - */ - -struct flat_hdr { - char magic[4]; - __be32 rev; /* version (as above) */ - __be32 entry; /* Offset of first executable instruction - with text segment from beginning of file */ - __be32 data_start; /* Offset of data segment from beginning of - file */ - __be32 data_end; /* Offset of end of data segment from beginning - of file */ - __be32 bss_end; /* Offset of end of bss segment from beginning - of file */ - - /* (It is assumed that data_end through bss_end forms the bss segment.) */ - - __be32 stack_size; /* Size of stack, in bytes */ - __be32 reloc_start; /* Offset of relocation records from beginning of - file */ - __be32 reloc_count; /* Number of relocation records */ - __be32 flags; - __be32 build_date; /* When the program/library was built */ - __u32 filler[5]; /* Reservered, set to zero */ -}; - -#define FLAT_FLAG_RAM 0x0001 /* load program entirely into RAM */ -#define FLAT_FLAG_GOTPIC 0x0002 /* program is PIC with GOT */ -#define FLAT_FLAG_GZIP 0x0004 /* all but the header is compressed */ -#define FLAT_FLAG_GZDATA 0x0008 /* only data/relocs are compressed (for XIP) */ -#define FLAT_FLAG_KTRACE 0x0010 /* output useful kernel trace for debugging */ +#include +#include /* * While it would be nice to keep this header clean, users of older @@ -55,21 +21,28 @@ struct flat_hdr { * with the format above, except to fix bugs with old format support. */ +#include + #define OLD_FLAT_VERSION 0x00000002L #define OLD_FLAT_RELOC_TYPE_TEXT 0 #define OLD_FLAT_RELOC_TYPE_DATA 1 #define OLD_FLAT_RELOC_TYPE_BSS 2 typedef union { - u32 value; + unsigned long value; struct { -#if defined(__LITTLE_ENDIAN_BITFIELD) || \ - (defined(mc68000) && !defined(CONFIG_COLDFIRE)) - s32 offset : 30; - u32 type : 2; +# if defined(mc68000) && !defined(CONFIG_COLDFIRE) + signed long offset : 30; + unsigned long type : 2; +# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ # elif defined(__BIG_ENDIAN_BITFIELD) - u32 type : 2; - s32 offset : 30; + unsigned long type : 2; + signed long offset : 30; +# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ +# elif defined(__LITTLE_ENDIAN_BITFIELD) + signed long offset : 30; + unsigned long type : 2; +# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ # else # error "Unknown bitfield order for flat files." # endif diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h new file mode 100644 index 0000000000..b6efb0c644 --- /dev/null +++ b/include/linux/flex_array.h @@ -0,0 +1,81 @@ +#ifndef _FLEX_ARRAY_H +#define _FLEX_ARRAY_H + +#include +#include +#include + +#define FLEX_ARRAY_PART_SIZE PAGE_SIZE +#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE + +struct flex_array_part; + +/* + * This is meant to replace cases where an array-like + * structure has gotten too big to fit into kmalloc() + * and the developer is getting tempted to use + * vmalloc(). + */ + +struct flex_array { + union { + struct { + int element_size; + int total_nr_elements; + int elems_per_part; + struct reciprocal_value reciprocal_elems; + struct flex_array_part *parts[]; + }; + /* + * This little trick makes sure that + * sizeof(flex_array) == PAGE_SIZE + */ + char padding[FLEX_ARRAY_BASE_SIZE]; + }; +}; + +/* Number of bytes left in base struct flex_array, excluding metadata */ +#define FLEX_ARRAY_BASE_BYTES_LEFT \ + (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts)) + +/* Number of pointers in base to struct flex_array_part pages */ +#define FLEX_ARRAY_NR_BASE_PTRS \ + (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *)) + +/* Number of elements of size that fit in struct flex_array_part */ +#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \ + (FLEX_ARRAY_PART_SIZE / size) + +/* + * Defines a statically allocated flex array and ensures its parameters are + * valid. + */ +#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \ + struct flex_array __arrayname = { { { \ + .element_size = (__element_size), \ + .total_nr_elements = (__total), \ + } } }; \ + static inline void __arrayname##_invalid_parameter(void) \ + { \ + BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \ + FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \ + } + +struct flex_array *flex_array_alloc(int element_size, unsigned int total, + gfp_t flags); +int flex_array_prealloc(struct flex_array *fa, unsigned int start, + unsigned int nr_elements, gfp_t flags); +void flex_array_free(struct flex_array *fa); +void flex_array_free_parts(struct flex_array *fa); +int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, + gfp_t flags); +int flex_array_clear(struct flex_array *fa, unsigned int element_nr); +void *flex_array_get(struct flex_array *fa, unsigned int element_nr); +int flex_array_shrink(struct flex_array *fa); + +#define flex_array_put_ptr(fa, nr, src, gfp) \ + flex_array_put(fa, nr, (void *)&(src), gfp) + +void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr); + +#endif /* _FLEX_ARRAY_H */ diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h index c12df59d3f..0d348e011a 100644 --- a/include/linux/flex_proportions.h +++ b/include/linux/flex_proportions.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Floating proportions with flexible aging period * diff --git a/include/linux/fmc-sdb.h b/include/linux/fmc-sdb.h new file mode 100644 index 0000000000..599bd6bab5 --- /dev/null +++ b/include/linux/fmc-sdb.h @@ -0,0 +1,38 @@ +/* + * This file is separate from sdb.h, because I want that one to remain + * unchanged (as far as possible) from the official sdb distribution + * + * This file and associated functionality are a playground for me to + * understand stuff which will later be implemented in more generic places. + */ +#include + +/* This is the union of all currently defined types */ +union sdb_record { + struct sdb_interconnect ic; + struct sdb_device dev; + struct sdb_bridge bridge; + struct sdb_integration integr; + struct sdb_empty empty; + struct sdb_synthesis synthesis; + struct sdb_repo_url repo_url; +}; + +struct fmc_device; + +/* Every sdb table is turned into this structure */ +struct sdb_array { + int len; + int level; + unsigned long baseaddr; + struct fmc_device *fmc; /* the device that hosts it */ + struct sdb_array *parent; /* NULL at root */ + union sdb_record *record; /* copies of the struct */ + struct sdb_array **subtree; /* only valid for bridge items */ +}; + +extern int fmc_scan_sdb_tree(struct fmc_device *fmc, unsigned long address); +extern void fmc_show_sdb_tree(const struct fmc_device *fmc); +extern signed long fmc_find_sdb_device(struct sdb_array *tree, uint64_t vendor, + uint32_t device, unsigned long *sz); +extern int fmc_free_sdb_tree(struct fmc_device *fmc); diff --git a/include/linux/fmc.h b/include/linux/fmc.h new file mode 100644 index 0000000000..a5f0aa5c2a --- /dev/null +++ b/include/linux/fmc.h @@ -0,0 +1,237 @@ +/* + * Copyright (C) 2012 CERN (www.cern.ch) + * Author: Alessandro Rubini + * + * Released according to the GNU GPL, version 2 or any later version. + * + * This work is part of the White Rabbit project, a research effort led + * by CERN, the European Institute for Nuclear Research. + */ +#ifndef __LINUX_FMC_H__ +#define __LINUX_FMC_H__ +#include +#include +#include +#include +#include +#include + +struct fmc_device; +struct fmc_driver; + +/* + * This bus abstraction is developed separately from drivers, so we need + * to check the version of the data structures we receive. + */ + +#define FMC_MAJOR 3 +#define FMC_MINOR 0 +#define FMC_VERSION ((FMC_MAJOR << 16) | FMC_MINOR) +#define __FMC_MAJOR(x) ((x) >> 16) +#define __FMC_MINOR(x) ((x) & 0xffff) + +/* + * The device identification, as defined by the IPMI FRU (Field Replaceable + * Unit) includes four different strings to describe the device. Here we + * only match the "Board Manufacturer" and the "Board Product Name", + * ignoring the "Board Serial Number" and "Board Part Number". All 4 are + * expected to be strings, so they are treated as zero-terminated C strings. + * Unspecified string (NULL) means "any", so if both are unspecified this + * is a catch-all driver. So null entries are allowed and we use array + * and length. This is unlike pci and usb that use null-terminated arrays + */ +struct fmc_fru_id { + char *manufacturer; + char *product_name; +}; + +/* + * If the FPGA is already programmed (think Etherbone or the second + * SVEC slot), we can match on SDB devices in the memory image. This + * match uses an array of devices that must all be present, and the + * match is based on vendor and device only. Further checks are expected + * to happen in the probe function. Zero means "any" and catch-all is allowed. + */ +struct fmc_sdb_one_id { + uint64_t vendor; + uint32_t device; +}; +struct fmc_sdb_id { + struct fmc_sdb_one_id *cores; + int cores_nr; +}; + +struct fmc_device_id { + struct fmc_fru_id *fru_id; + int fru_id_nr; + struct fmc_sdb_id *sdb_id; + int sdb_id_nr; +}; + +/* This sizes the module_param_array used by generic module parameters */ +#define FMC_MAX_CARDS 32 + +/* The driver is a pretty simple thing */ +struct fmc_driver { + unsigned long version; + struct device_driver driver; + int (*probe)(struct fmc_device *); + int (*remove)(struct fmc_device *); + const struct fmc_device_id id_table; + /* What follows is for generic module parameters */ + int busid_n; + int busid_val[FMC_MAX_CARDS]; + int gw_n; + char *gw_val[FMC_MAX_CARDS]; +}; +#define to_fmc_driver(x) container_of((x), struct fmc_driver, driver) + +/* These are the generic parameters, that drivers may instantiate */ +#define FMC_PARAM_BUSID(_d) \ + module_param_array_named(busid, _d.busid_val, int, &_d.busid_n, 0444) +#define FMC_PARAM_GATEWARE(_d) \ + module_param_array_named(gateware, _d.gw_val, charp, &_d.gw_n, 0444) + +/* + * Drivers may need to configure gpio pins in the carrier. To read input + * (a very uncommon operation, and definitely not in the hot paths), just + * configure one gpio only and get 0 or 1 as retval of the config method + */ +struct fmc_gpio { + char *carrier_name; /* name or NULL for virtual pins */ + int gpio; + int _gpio; /* internal use by the carrier */ + int mode; /* GPIOF_DIR_OUT etc, from */ + int irqmode; /* IRQF_TRIGGER_LOW and so on */ +}; + +/* The numbering of gpio pins allows access to raw pins or virtual roles */ +#define FMC_GPIO_RAW(x) (x) /* 4096 of them */ +#define __FMC_GPIO_IS_RAW(x) ((x) < 0x1000) +#define FMC_GPIO_IRQ(x) ((x) + 0x1000) /* 256 of them */ +#define FMC_GPIO_LED(x) ((x) + 0x1100) /* 256 of them */ +#define FMC_GPIO_KEY(x) ((x) + 0x1200) /* 256 of them */ +#define FMC_GPIO_TP(x) ((x) + 0x1300) /* 256 of them */ +#define FMC_GPIO_USER(x) ((x) + 0x1400) /* 256 of them */ +/* We may add SCL and SDA, or other roles if the need arises */ + +/* GPIOF_DIR_IN etc are missing before 3.0. copy from */ +#ifndef GPIOF_DIR_IN +# define GPIOF_DIR_OUT (0 << 0) +# define GPIOF_DIR_IN (1 << 0) +# define GPIOF_INIT_LOW (0 << 1) +# define GPIOF_INIT_HIGH (1 << 1) +#endif + +/* + * The operations are offered by each carrier and should make driver + * design completely independent of the carrier. Named GPIO pins may be + * the exception. + */ +struct fmc_operations { + uint32_t (*read32)(struct fmc_device *fmc, int offset); + void (*write32)(struct fmc_device *fmc, uint32_t value, int offset); + int (*validate)(struct fmc_device *fmc, struct fmc_driver *drv); + int (*reprogram)(struct fmc_device *f, struct fmc_driver *d, char *gw); + int (*irq_request)(struct fmc_device *fmc, irq_handler_t h, + char *name, int flags); + void (*irq_ack)(struct fmc_device *fmc); + int (*irq_free)(struct fmc_device *fmc); + int (*gpio_config)(struct fmc_device *fmc, struct fmc_gpio *gpio, + int ngpio); + int (*read_ee)(struct fmc_device *fmc, int pos, void *d, int l); + int (*write_ee)(struct fmc_device *fmc, int pos, const void *d, int l); +}; + +/* Prefer this helper rather than calling of fmc->reprogram directly */ +extern int fmc_reprogram(struct fmc_device *f, struct fmc_driver *d, char *gw, + int sdb_entry); + +/* + * The device reports all information needed to access hw. + * + * If we have eeprom_len and not contents, the core reads it. + * Then, parsing of identifiers is done by the core which fills fmc_fru_id.. + * Similarly a device that must be matched based on SDB cores must + * fill the entry point and the core will scan the bus (FIXME: sdb match) + */ +struct fmc_device { + unsigned long version; + unsigned long flags; + struct module *owner; /* char device must pin it */ + struct fmc_fru_id id; /* for EEPROM-based match */ + struct fmc_operations *op; /* carrier-provided */ + int irq; /* according to host bus. 0 == none */ + int eeprom_len; /* Usually 8kB, may be less */ + int eeprom_addr; /* 0x50, 0x52 etc */ + uint8_t *eeprom; /* Full contents or leading part */ + char *carrier_name; /* "SPEC" or similar, for special use */ + void *carrier_data; /* "struct spec *" or equivalent */ + __iomem void *fpga_base; /* May be NULL (Etherbone) */ + __iomem void *slot_base; /* Set by the driver */ + struct fmc_device **devarray; /* Allocated by the bus */ + int slot_id; /* Index in the slot array */ + int nr_slots; /* Number of slots in this carrier */ + unsigned long memlen; /* Used for the char device */ + struct device dev; /* For Linux use */ + struct device *hwdev; /* The underlying hardware device */ + unsigned long sdbfs_entry; + struct sdb_array *sdb; + uint32_t device_id; /* Filled by the device */ + char *mezzanine_name; /* Defaults to ``fmc'' */ + void *mezzanine_data; +}; +#define to_fmc_device(x) container_of((x), struct fmc_device, dev) + +#define FMC_DEVICE_HAS_GOLDEN 1 +#define FMC_DEVICE_HAS_CUSTOM 2 +#define FMC_DEVICE_NO_MEZZANINE 4 +#define FMC_DEVICE_MATCH_SDB 8 /* fmc-core must scan sdb in fpga */ + +/* + * If fpga_base can be used, the carrier offers no readl/writel methods, and + * this expands to a single, fast, I/O access. + */ +static inline uint32_t fmc_readl(struct fmc_device *fmc, int offset) +{ + if (unlikely(fmc->op->read32)) + return fmc->op->read32(fmc, offset); + return readl(fmc->fpga_base + offset); +} +static inline void fmc_writel(struct fmc_device *fmc, uint32_t val, int off) +{ + if (unlikely(fmc->op->write32)) + fmc->op->write32(fmc, val, off); + else + writel(val, fmc->fpga_base + off); +} + +/* pci-like naming */ +static inline void *fmc_get_drvdata(const struct fmc_device *fmc) +{ + return dev_get_drvdata(&fmc->dev); +} + +static inline void fmc_set_drvdata(struct fmc_device *fmc, void *data) +{ + dev_set_drvdata(&fmc->dev, data); +} + +/* The 4 access points */ +extern int fmc_driver_register(struct fmc_driver *drv); +extern void fmc_driver_unregister(struct fmc_driver *drv); +extern int fmc_device_register(struct fmc_device *tdev); +extern void fmc_device_unregister(struct fmc_device *tdev); + +/* Two more for device sets, all driven by the same FPGA */ +extern int fmc_device_register_n(struct fmc_device **devs, int n); +extern void fmc_device_unregister_n(struct fmc_device **devs, int n); + +/* Internal cross-calls between files; not exported to other modules */ +extern int fmc_match(struct device *dev, struct device_driver *drv); +extern int fmc_fill_id_info(struct fmc_device *fmc); +extern void fmc_free_id_info(struct fmc_device *fmc); +extern void fmc_dump_eeprom(const struct fmc_device *fmc); +extern void fmc_dump_sdb(const struct fmc_device *fmc); + +#endif /* __LINUX_FMC_H__ */ diff --git a/include/linux/font.h b/include/linux/font.h index abf1442ce7..d6821769dd 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -16,8 +16,7 @@ struct font_desc { int idx; const char *name; - unsigned int width, height; - unsigned int charcount; + int width, height; const void *data; int pref; }; @@ -33,8 +32,6 @@ struct font_desc { #define ACORN8x8_IDX 8 #define MINI4x6_IDX 9 #define FONT6x10_IDX 10 -#define TER16x32_IDX 11 -#define FONT6x8_IDX 12 extern const struct font_desc font_vga_8x8, font_vga_8x16, @@ -46,9 +43,7 @@ extern const struct font_desc font_vga_8x8, font_sun_12x22, font_acorn_8x8, font_mini_4x6, - font_6x10, - font_ter_16x32, - font_6x8; + font_6x10; /* Find a font with a specific name */ @@ -62,17 +57,4 @@ extern const struct font_desc *get_default_font(int xres, int yres, /* Max. length for the name of a predefined font */ #define MAX_FONT_NAME 32 -/* Extra word getters */ -#define REFCOUNT(fd) (((int *)(fd))[-1]) -#define FNTSIZE(fd) (((int *)(fd))[-2]) -#define FNTCHARCNT(fd) (((int *)(fd))[-3]) -#define FNTSUM(fd) (((int *)(fd))[-4]) - -#define FONT_EXTRA_WORDS 4 - -struct font_data { - unsigned int extra[FONT_EXTRA_WORDS]; - const unsigned char data[]; -} __packed; - #endif /* _VIDEO_FONT_H */ diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 474c1f5063..0940bf45e2 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -1,18 +1,27 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * FPGA Framework * - * Copyright (C) 2013-2016 Altera Corporation - * Copyright (C) 2017 Intel Corporation + * Copyright (C) 2013-2015 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . */ -#ifndef _LINUX_FPGA_MGR_H -#define _LINUX_FPGA_MGR_H - #include #include +#ifndef _LINUX_FPGA_MGR_H +#define _LINUX_FPGA_MGR_H + struct fpga_manager; -struct sg_table; /** * enum fpga_mgr_states - fpga framework states @@ -53,105 +62,31 @@ enum fpga_mgr_states { FPGA_MGR_STATE_OPERATING, }; -/** - * DOC: FPGA Manager flags - * - * Flags used in the &fpga_image_info->flags field - * - * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported - * - * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting - * - * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted - * - * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first - * - * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed +/* + * FPGA Manager flags + * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported */ #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) -#define FPGA_MGR_EXTERNAL_CONFIG BIT(1) -#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2) -#define FPGA_MGR_BITSTREAM_LSB_FIRST BIT(3) -#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4) - -/** - * struct fpga_image_info - information specific to an FPGA image - * @flags: boolean flags as defined above - * @enable_timeout_us: maximum time to enable traffic through bridge (uSec) - * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) - * @config_complete_timeout_us: maximum time for FPGA to switch to operating - * status in the write_complete op. - * @firmware_name: name of FPGA image firmware file - * @sgt: scatter/gather table containing FPGA image - * @buf: contiguous buffer containing FPGA image - * @count: size of buf - * @region_id: id of target region - * @dev: device that owns this - * @overlay: Device Tree overlay - */ -struct fpga_image_info { - u32 flags; - u32 enable_timeout_us; - u32 disable_timeout_us; - u32 config_complete_timeout_us; - char *firmware_name; - struct sg_table *sgt; - const char *buf; - size_t count; - int region_id; - struct device *dev; -#ifdef CONFIG_OF - struct device_node *overlay; -#endif -}; /** * struct fpga_manager_ops - ops for low level fpga manager drivers - * @initial_header_size: Maximum number of bytes that should be passed into write_init * @state: returns an enum value of the FPGA's state - * @status: returns status of the FPGA, including reconfiguration error code - * @write_init: prepare the FPGA to receive configuration data + * @write_init: prepare the FPGA to receive confuration data * @write: write count bytes of configuration data to the FPGA - * @write_sg: write the scatter list of configuration data to the FPGA * @write_complete: set FPGA to operating state after writing is done * @fpga_remove: optional: Set FPGA into a specific state during driver remove - * @groups: optional attribute groups. * * fpga_manager_ops are the low level functions implemented by a specific * fpga manager driver. The optional ones are tested for NULL before being * called, so leaving them out is fine. */ struct fpga_manager_ops { - size_t initial_header_size; enum fpga_mgr_states (*state)(struct fpga_manager *mgr); - u64 (*status)(struct fpga_manager *mgr); - int (*write_init)(struct fpga_manager *mgr, - struct fpga_image_info *info, + int (*write_init)(struct fpga_manager *mgr, u32 flags, const char *buf, size_t count); int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); - int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt); - int (*write_complete)(struct fpga_manager *mgr, - struct fpga_image_info *info); + int (*write_complete)(struct fpga_manager *mgr, u32 flags); void (*fpga_remove)(struct fpga_manager *mgr); - const struct attribute_group **groups; -}; - -/* FPGA manager status: Partial/Full Reconfiguration errors */ -#define FPGA_MGR_STATUS_OPERATION_ERR BIT(0) -#define FPGA_MGR_STATUS_CRC_ERR BIT(1) -#define FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR BIT(2) -#define FPGA_MGR_STATUS_IP_PROTOCOL_ERR BIT(3) -#define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4) - -/** - * struct fpga_compat_id - id for compatibility check - * - * @id_h: high 64bit of the compat_id - * @id_l: low 64bit of the compat_id - */ -struct fpga_compat_id { - u64 id_h; - u64 id_l; }; /** @@ -160,7 +95,6 @@ struct fpga_compat_id { * @dev: fpga manager device * @ref_mutex: only allows one reference to fpga manager * @state: state of fpga manager - * @compat_id: FPGA manager id for compatibility check. * @mops: pointer to struct of fpga manager ops * @priv: low level driver private date */ @@ -169,39 +103,25 @@ struct fpga_manager { struct device dev; struct mutex ref_mutex; enum fpga_mgr_states state; - struct fpga_compat_id *compat_id; const struct fpga_manager_ops *mops; void *priv; }; #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) -struct fpga_image_info *fpga_image_info_alloc(struct device *dev); +int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, + const char *buf, size_t count); -void fpga_image_info_free(struct fpga_image_info *info); - -int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info); - -int fpga_mgr_lock(struct fpga_manager *mgr); -void fpga_mgr_unlock(struct fpga_manager *mgr); +int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags, + const char *image_name); struct fpga_manager *of_fpga_mgr_get(struct device_node *node); -struct fpga_manager *fpga_mgr_get(struct device *dev); - void fpga_mgr_put(struct fpga_manager *mgr); -struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name, - const struct fpga_manager_ops *mops, - void *priv); -void fpga_mgr_free(struct fpga_manager *mgr); -int fpga_mgr_register(struct fpga_manager *mgr); -void fpga_mgr_unregister(struct fpga_manager *mgr); +int fpga_mgr_register(struct device *dev, const char *name, + const struct fpga_manager_ops *mops, void *priv); -int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr); - -struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name, - const struct fpga_manager_ops *mops, - void *priv); +void fpga_mgr_unregister(struct device *dev); #endif /*_LINUX_FPGA_MGR_H */ diff --git a/include/linux/frame.h b/include/linux/frame.h new file mode 100644 index 0000000000..e6baaba3f1 --- /dev/null +++ b/include/linux/frame.h @@ -0,0 +1,23 @@ +#ifndef _LINUX_FRAME_H +#define _LINUX_FRAME_H + +#ifdef CONFIG_STACK_VALIDATION +/* + * This macro marks the given function's stack frame as "non-standard", which + * tells objtool to ignore the function when doing stack metadata validation. + * It should only be used in special cases where you're 100% sure it won't + * affect the reliability of frame pointers and kernel stack traces. + * + * For more information, see tools/objtool/Documentation/stack-validation.txt. + */ +#define STACK_FRAME_NON_STANDARD(func) \ + static void __used __section(__func_stack_frame_non_standard) \ + *__func_stack_frame_non_standard_##func = func + +#else /* !CONFIG_STACK_VALIDATION */ + +#define STACK_FRAME_NON_STANDARD(func) + +#endif /* CONFIG_STACK_VALIDATION */ + +#endif /* _LINUX_FRAME_H */ diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 0621c5f86c..dd03e837eb 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* Freezer declarations */ #ifndef FREEZER_H_INCLUDED @@ -182,7 +181,7 @@ static inline void freezable_schedule_unsafe(void) } /* - * Like schedule_timeout(), but should not block the freezer. Do not + * Like freezable_schedule_timeout(), but should not block the freezer. Do not * call this with locks held. */ static inline long freezable_schedule_timeout(long timeout) @@ -207,17 +206,6 @@ static inline long freezable_schedule_timeout_interruptible(long timeout) return __retval; } -/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ -static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout) -{ - long __retval; - - freezer_do_not_count(); - __retval = schedule_timeout_interruptible(timeout); - freezer_count_unsafe(); - return __retval; -} - /* Like schedule_timeout_killable(), but should not block the freezer. */ static inline long freezable_schedule_timeout_killable(long timeout) { @@ -279,6 +267,7 @@ static inline int freeze_kernel_threads(void) { return -ENOSYS; } static inline void thaw_processes(void) {} static inline void thaw_kernel_threads(void) {} +static inline bool try_to_freeze_nowarn(void) { return false; } static inline bool try_to_freeze(void) { return false; } static inline void freezer_do_not_count(void) {} @@ -295,9 +284,6 @@ static inline void set_freezable(void) {} #define freezable_schedule_timeout_interruptible(timeout) \ schedule_timeout_interruptible(timeout) -#define freezable_schedule_timeout_interruptible_unsafe(timeout) \ - schedule_timeout_interruptible(timeout) - #define freezable_schedule_timeout_killable(timeout) \ schedule_timeout_killable(timeout) diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index b07d88c92b..1d18af0345 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FRONTSWAP_H #define _LINUX_FRONTSWAP_H @@ -7,13 +6,6 @@ #include #include -/* - * Return code to denote that requested number of - * frontswap pages are unused(moved to page cache). - * Used in shmem_unuse and try_to_unuse. - */ -#define FRONTSWAP_PAGES_UNUSED 2 - struct frontswap_ops { void (*init)(unsigned); /* this swap type was just swapon'ed */ int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ diff --git a/include/linux/fs.h b/include/linux/fs.h index e7a633353f..473e4cd78d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1,9 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FS_H #define _LINUX_FS_H #include -#include +#include #include #include #include @@ -13,17 +12,15 @@ #include #include #include -#include #include #include #include #include #include #include -#include #include #include -#include +#include #include #include #include @@ -31,25 +28,17 @@ #include #include #include +#include #include +#include #include -#include -#include -#include -#include -#include -#include -#include -#include #include #include struct backing_dev_info; struct bdi_writeback; -struct bio; struct export_operations; -struct fiemap_extent_info; struct hd_geometry; struct iovec; struct kiocb; @@ -66,11 +55,6 @@ struct workqueue_struct; struct iov_iter; struct fscrypt_info; struct fscrypt_operations; -struct fsverity_info; -struct fsverity_operations; -struct fs_context; -struct fs_parameter_spec; -struct fileattr; extern void __init inode_init(void); extern void __init inode_init_early(void); @@ -84,10 +68,6 @@ extern struct inodes_stat_t inodes_stat; extern int leases_enable, lease_break_time; extern int sysctl_protected_symlinks; extern int sysctl_protected_hardlinks; -extern int sysctl_protected_fifos; -extern int sysctl_protected_regular; - -typedef __kernel_rwf_t rwf_t; struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, @@ -107,7 +87,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond - * to O_WRONLY and O_RDWR via the strange trick in do_dentry_open() + * to O_WRONLY and O_RDWR via the strange trick in __dentry_open() */ /* file is open for reading */ @@ -145,7 +125,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* Expect random access pattern */ #define FMODE_RANDOM ((__force fmode_t)0x1000) -/* File is huge (eg. /dev/mem): treat loff_t as unsigned */ +/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */ #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) /* File is opened with O_PATH; almost nothing can be done with it */ @@ -160,26 +140,68 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* Has write method(s) */ #define FMODE_CAN_WRITE ((__force fmode_t)0x40000) -#define FMODE_OPENED ((__force fmode_t)0x80000) -#define FMODE_CREATED ((__force fmode_t)0x100000) - -/* File is stream-like */ -#define FMODE_STREAM ((__force fmode_t)0x200000) - /* File was opened by fanotify and shouldn't generate fanotify events */ #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) -/* File is capable of returning -EAGAIN if I/O will block */ -#define FMODE_NOWAIT ((__force fmode_t)0x8000000) +/* + * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector + * that indicates that they should check the contents of the iovec are + * valid, but not check the memory that the iovec elements + * points too. + */ +#define CHECK_IOVEC_ONLY -1 -/* File represents mount that needs unmounting */ -#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000) +/* + * The below are the various read and write flags that we support. Some of + * them include behavioral modifiers that send information down to the + * block layer and IO scheduler. They should be used along with a req_op. + * Terminology: + * + * The block layer uses device plugging to defer IO a little bit, in + * the hope that we will see more IO very shortly. This increases + * coalescing of adjacent IO and thus reduces the number of IOs we + * have to send to the device. It also allows for better queuing, + * if the IO isn't mergeable. If the caller is going to be waiting + * for the IO, then he must ensure that the device is unplugged so + * that the IO is dispatched to the driver. + * + * All IO is handled async in Linux. This is fine for background + * writes, but for reads or writes that someone waits for completion + * on, we want to notify the block layer and IO scheduler so that they + * know about it. That allows them to make better scheduling + * decisions. So when the below references 'sync' and 'async', it + * is referencing this priority hint. + * + * With that in mind, the available types are: + * + * READ A normal read operation. Device will be plugged. + * READ_SYNC A synchronous read. Device is not plugged, caller can + * immediately wait on this read without caring about + * unplugging. + * WRITE A normal async write. Device will be plugged. + * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down + * the hint that someone will be waiting on this IO + * shortly. The write equivalent of READ_SYNC. + * WRITE_ODIRECT Special case write for O_DIRECT only. + * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. + * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on + * non-volatile media on completion. + * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded + * by a cache flush and data is guaranteed to be on + * non-volatile media on completion. + * + */ +#define RW_MASK REQ_OP_WRITE -/* File does not contribute to nr_files count */ -#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) +#define READ REQ_OP_READ +#define WRITE REQ_OP_WRITE -/* File supports async buffered reads */ -#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) +#define READ_SYNC REQ_SYNC +#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE) +#define WRITE_ODIRECT REQ_SYNC +#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH) +#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA) +#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA) /* * Attribute flags. These should be or-ed together to figure out what @@ -195,6 +217,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define ATTR_ATIME_SET (1 << 7) #define ATTR_MTIME_SET (1 << 8) #define ATTR_FORCE (1 << 9) /* Not a change, but a change it */ +#define ATTR_ATTR_FLAG (1 << 10) #define ATTR_KILL_SUID (1 << 11) #define ATTR_KILL_SGID (1 << 12) #define ATTR_FILE (1 << 13) @@ -225,9 +248,9 @@ struct iattr { kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; - struct timespec64 ia_atime; - struct timespec64 ia_mtime; - struct timespec64 ia_ctime; + struct timespec ia_atime; + struct timespec ia_mtime; + struct timespec ia_ctime; /* * Not an attribute, but an auxiliary info for filesystems wanting to @@ -279,8 +302,9 @@ enum positive_aop_returns { AOP_TRUNCATED_PAGE = 0x80001, }; -#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */ -#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct +#define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */ +#define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */ +#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct * helper code (eg buffer layer) * to clear GFP_FS from alloc */ @@ -290,63 +314,38 @@ enum positive_aop_returns { struct page; struct address_space; struct writeback_control; -struct readahead_control; -/* - * Write life time hint values. - * Stored in struct inode as u8. - */ -enum rw_hint { - WRITE_LIFE_NOT_SET = 0, - WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE, - WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT, - WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM, - WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG, - WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME, -}; - -/* Match RWF_* bits to IOCB bits */ -#define IOCB_HIPRI (__force int) RWF_HIPRI -#define IOCB_DSYNC (__force int) RWF_DSYNC -#define IOCB_SYNC (__force int) RWF_SYNC -#define IOCB_NOWAIT (__force int) RWF_NOWAIT -#define IOCB_APPEND (__force int) RWF_APPEND - -/* non-RWF related bits - start at 16 */ -#define IOCB_EVENTFD (1 << 16) -#define IOCB_DIRECT (1 << 17) -#define IOCB_WRITE (1 << 18) -/* iocb->ki_waitq is valid */ -#define IOCB_WAITQ (1 << 19) -#define IOCB_NOIO (1 << 20) -/* can use bio alloc cache */ -#define IOCB_ALLOC_CACHE (1 << 21) +#define IOCB_EVENTFD (1 << 0) +#define IOCB_APPEND (1 << 1) +#define IOCB_DIRECT (1 << 2) +#define IOCB_HIPRI (1 << 3) +#define IOCB_DSYNC (1 << 4) +#define IOCB_SYNC (1 << 5) +#define IOCB_WRITE (1 << 6) struct kiocb { struct file *ki_filp; - - /* The 'ki_filp' pointer is shared in a union for aio */ - randomized_struct_fields_start - loff_t ki_pos; void (*ki_complete)(struct kiocb *iocb, long ret, long ret2); void *private; int ki_flags; - u16 ki_hint; - u16 ki_ioprio; /* See linux/ioprio.h */ - union { - unsigned int ki_cookie; /* for ->iopoll */ - struct wait_page_queue *ki_waitq; /* for async buffered IO */ - }; - - randomized_struct_fields_end -}; +} __randomize_layout; static inline bool is_sync_kiocb(struct kiocb *kiocb) { return kiocb->ki_complete == NULL; } +static inline int iocb_flags(struct file *file); + +static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) +{ + *kiocb = (struct kiocb) { + .ki_filp = filp, + .ki_flags = iocb_flags(filp), + }; +} + /* * "descriptor" for what we're up to with a read. * This allows us to use the same read code yet @@ -379,13 +378,8 @@ struct address_space_operations { /* Set a page dirty. Return true if this dirtied it */ int (*set_page_dirty)(struct page *page); - /* - * Reads in the requested pages. Unlike ->readpage(), this is - * PURELY used for read-ahead!. - */ int (*readpages)(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages); - void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, @@ -434,78 +428,83 @@ int pagecache_write_end(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); -/** - * struct address_space - Contents of a cacheable, mappable object. - * @host: Owner, either the inode or the block_device. - * @i_pages: Cached pages. - * @invalidate_lock: Guards coherency between page cache contents and - * file offset->disk block mappings in the filesystem during invalidates. - * It is also used to block modification of page cache contents through - * memory mappings. - * @gfp_mask: Memory allocation flags to use for allocating pages. - * @i_mmap_writable: Number of VM_SHARED mappings. - * @nr_thps: Number of THPs in the pagecache (non-shmem only). - * @i_mmap: Tree of private and shared mappings. - * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. - * @nrpages: Number of page entries, protected by the i_pages lock. - * @writeback_index: Writeback starts here. - * @a_ops: Methods. - * @flags: Error bits and flags (AS_*). - * @wb_err: The most recent error which has occurred. - * @private_lock: For use by the owner of the address_space. - * @private_list: For use by the owner of the address_space. - * @private_data: For use by the owner of the address_space. - */ struct address_space { - struct inode *host; - struct xarray i_pages; - struct rw_semaphore invalidate_lock; - gfp_t gfp_mask; - atomic_t i_mmap_writable; -#ifdef CONFIG_READ_ONLY_THP_FOR_FS - /* number of thp, only for non-shmem files */ - atomic_t nr_thps; -#endif - struct rb_root_cached i_mmap; - struct rw_semaphore i_mmap_rwsem; - unsigned long nrpages; - pgoff_t writeback_index; - const struct address_space_operations *a_ops; - unsigned long flags; - errseq_t wb_err; - spinlock_t private_lock; - struct list_head private_list; - void *private_data; + struct inode *host; /* owner: inode, block_device */ + struct radix_tree_root page_tree; /* radix tree of all pages */ + spinlock_t tree_lock; /* and lock protecting it */ + atomic_t i_mmap_writable;/* count VM_SHARED mappings */ + struct rb_root i_mmap; /* tree of private and shared mappings */ + struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ + /* Protected by tree_lock together with the radix tree */ + unsigned long nrpages; /* number of total pages */ + /* number of shadow or DAX exceptional entries */ + unsigned long nrexceptional; + pgoff_t writeback_index;/* writeback starts here */ + const struct address_space_operations *a_ops; /* methods */ + unsigned long flags; /* error bits */ + spinlock_t private_lock; /* for use by the address_space */ + gfp_t gfp_mask; /* implicit gfp mask for allocations */ + struct list_head private_list; /* ditto */ + void *private_data; /* ditto */ } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* * On most architectures that alignment is already the case; but * must be enforced here for CRIS, to let the least significant bit * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. */ +struct request_queue; -/* XArray tags, for tagging dirty and writeback pages in the pagecache. */ -#define PAGECACHE_TAG_DIRTY XA_MARK_0 -#define PAGECACHE_TAG_WRITEBACK XA_MARK_1 -#define PAGECACHE_TAG_TOWRITE XA_MARK_2 +struct block_device { + dev_t bd_dev; /* not a kdev_t - it's a search key */ + int bd_openers; + struct inode * bd_inode; /* will die */ + struct super_block * bd_super; + struct mutex bd_mutex; /* open/close mutex */ + void * bd_claiming; + void * bd_holder; + int bd_holders; + bool bd_write_holder; +#ifdef CONFIG_SYSFS + struct list_head bd_holder_disks; +#endif + struct block_device * bd_contains; + unsigned bd_block_size; + struct hd_struct * bd_part; + /* number of times partitions within this device have been opened. */ + unsigned bd_part_count; + int bd_invalidated; + struct gendisk * bd_disk; + struct request_queue * bd_queue; + struct list_head bd_list; + /* + * Private data. You must have bd_claim'ed the block_device + * to use this. NOTE: bd_claim allows an owner to claim + * the same device multiple times, the owner must take special + * care to not mess up bd_private for that case. + */ + unsigned long bd_private; + + /* The counter of freeze processes */ + int bd_fsfreeze_count; + /* Mutex for freeze */ + struct mutex bd_fsfreeze_mutex; +} __randomize_layout; /* - * Returns true if any of the pages in the mapping are marked with the tag. + * Radix-tree tags, for tagging dirty and writeback pages within the pagecache + * radix trees */ -static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag) -{ - return xa_marked(&mapping->i_pages, tag); -} +#define PAGECACHE_TAG_DIRTY 0 +#define PAGECACHE_TAG_WRITEBACK 1 +#define PAGECACHE_TAG_TOWRITE 2 + +int mapping_tagged(struct address_space *mapping, int tag); static inline void i_mmap_lock_write(struct address_space *mapping) { down_write(&mapping->i_mmap_rwsem); } -static inline int i_mmap_trylock_write(struct address_space *mapping) -{ - return down_write_trylock(&mapping->i_mmap_rwsem); -} - static inline void i_mmap_unlock_write(struct address_space *mapping) { up_write(&mapping->i_mmap_rwsem); @@ -521,27 +520,17 @@ static inline void i_mmap_unlock_read(struct address_space *mapping) up_read(&mapping->i_mmap_rwsem); } -static inline void i_mmap_assert_locked(struct address_space *mapping) -{ - lockdep_assert_held(&mapping->i_mmap_rwsem); -} - -static inline void i_mmap_assert_write_locked(struct address_space *mapping) -{ - lockdep_assert_held_write(&mapping->i_mmap_rwsem); -} - /* * Might pages of this file be mapped into userspace? */ static inline int mapping_mapped(struct address_space *mapping) { - return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root); + return !RB_EMPTY_ROOT(&mapping->i_mmap); } /* * Might pages of this file have been modified in userspace? - * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap + * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff * marks vma as VM_SHARED if it is shared, and the file was opened for * writing i.e. vma may be mprotected writable even if now readonly. * @@ -588,11 +577,6 @@ static inline void mapping_allow_writable(struct address_space *mapping) struct posix_acl; #define ACL_NOT_CACHED ((void *)(-1)) -/* - * ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to - * cache the ACL. This also means that ->get_acl() can be called in RCU mode - * with the LOOKUP_RCU flag. - */ #define ACL_DONT_CACHE ((void *)(-3)) static inline struct posix_acl * @@ -611,9 +595,6 @@ is_uncached_acl(struct posix_acl *acl) #define IOP_LOOKUP 0x0002 #define IOP_NOFOLLOW 0x0004 #define IOP_XATTR 0x0008 -#define IOP_DEFAULT_READLINK 0x0010 - -struct fsnotify_mark_connector; /* * Keep mostly read-only and often accessed (especially for @@ -655,13 +636,12 @@ struct inode { }; dev_t i_rdev; loff_t i_size; - struct timespec64 i_atime; - struct timespec64 i_mtime; - struct timespec64 i_ctime; + struct timespec i_atime; + struct timespec i_mtime; + struct timespec i_ctime; spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ unsigned short i_bytes; - u8 i_blkbits; - u8 i_write_hint; + unsigned int i_blkbits; blkcnt_t i_blocks; #ifdef __NEED_I_SIZE_ORDERED @@ -692,23 +672,20 @@ struct inode { struct hlist_head i_dentry; struct rcu_head i_rcu; }; - atomic64_t i_version; - atomic64_t i_sequence; /* see futex */ + u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; -#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) +#ifdef CONFIG_IMA atomic_t i_readcount; /* struct files open RO */ #endif - union { - const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ - void (*free_inode)(struct inode *); - }; + const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union { struct pipe_inode_info *i_pipe; + struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned i_dir_seq; @@ -718,43 +695,21 @@ struct inode { #ifdef CONFIG_FSNOTIFY __u32 i_fsnotify_mask; /* all events this inode cares about */ - struct fsnotify_mark_connector __rcu *i_fsnotify_marks; + struct hlist_head i_fsnotify_marks; #endif -#ifdef CONFIG_FS_ENCRYPTION +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) struct fscrypt_info *i_crypt_info; #endif -#ifdef CONFIG_FS_VERITY - struct fsverity_info *i_verity_info; -#endif - void *i_private; /* fs or device private pointer */ } __randomize_layout; -struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode); - -static inline unsigned int i_blocksize(const struct inode *node) -{ - return (1 << node->i_blkbits); -} - static inline int inode_unhashed(struct inode *inode) { return hlist_unhashed(&inode->i_hash); } -/* - * __mark_inode_dirty expects inodes to be hashed. Since we don't - * want special inodes in the fileset inode space, we make them - * appear hashed, but do not put on any lists. hlist_del() - * will work fine and require no locking. - */ -static inline void inode_fake_hash(struct inode *inode) -{ - hlist_add_fake(&inode->i_hash); -} - /* * inode->i_mutex nesting subclasses for the lock validator: * @@ -821,47 +776,9 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass) down_write_nested(&inode->i_rwsem, subclass); } -static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass) -{ - down_read_nested(&inode->i_rwsem, subclass); -} - -static inline void filemap_invalidate_lock(struct address_space *mapping) -{ - down_write(&mapping->invalidate_lock); -} - -static inline void filemap_invalidate_unlock(struct address_space *mapping) -{ - up_write(&mapping->invalidate_lock); -} - -static inline void filemap_invalidate_lock_shared(struct address_space *mapping) -{ - down_read(&mapping->invalidate_lock); -} - -static inline int filemap_invalidate_trylock_shared( - struct address_space *mapping) -{ - return down_read_trylock(&mapping->invalidate_lock); -} - -static inline void filemap_invalidate_unlock_shared( - struct address_space *mapping) -{ - up_read(&mapping->invalidate_lock); -} - void lock_two_nondirectories(struct inode *, struct inode*); void unlock_two_nondirectories(struct inode *, struct inode*); -void filemap_invalidate_lock_two(struct address_space *mapping1, - struct address_space *mapping2); -void filemap_invalidate_unlock_two(struct address_space *mapping1, - struct address_space *mapping2); - - /* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic @@ -883,7 +800,7 @@ static inline loff_t i_size_read(const struct inode *inode) i_size = inode->i_size; } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); return i_size; -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) loff_t i_size; preempt_disable(); @@ -908,7 +825,7 @@ static inline void i_size_write(struct inode *inode, loff_t i_size) inode->i_size = i_size; write_seqcount_end(&inode->i_size_seqcount); preempt_enable(); -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) preempt_disable(); inode->i_size = i_size; preempt_enable(); @@ -927,6 +844,8 @@ static inline unsigned imajor(const struct inode *inode) return MAJOR(inode->i_rdev); } +extern struct block_device *I_BDEV(struct inode *inode); + struct fown_struct { rwlock_t lock; /* protects pid, uid, euid fields */ struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ @@ -935,22 +854,18 @@ struct fown_struct { int signum; /* posix.1b rt signal to be delivered on IO */ }; -/** - * struct file_ra_state - Track a file's readahead state. - * @start: Where the most recent readahead started. - * @size: Number of pages read in the most recent readahead. - * @async_size: Start next readahead when this many pages are left. - * @ra_pages: Maximum size of a readahead request. - * @mmap_miss: How many mmap accesses missed in the page cache. - * @prev_pos: The last byte in the most recent read request. +/* + * Track a single file's readahead state */ struct file_ra_state { - pgoff_t start; - unsigned int size; - unsigned int async_size; - unsigned int ra_pages; - unsigned int mmap_miss; - loff_t prev_pos; + pgoff_t start; /* where readahead started */ + unsigned int size; /* # of readahead pages */ + unsigned int async_size; /* do asynchronous readahead when + there are only # of pages ahead */ + + unsigned int ra_pages; /* Maximum readahead window */ + unsigned int mmap_miss; /* Cache miss stat for mmap accesses */ + loff_t prev_pos; /* Cache last read() position */ }; /* @@ -972,11 +887,10 @@ struct file { const struct file_operations *f_op; /* - * Protects f_ep, f_flags. + * Protects f_ep_links, f_flags. * Must not be taken from IRQ context. */ spinlock_t f_lock; - enum rw_hint f_write_hint; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; @@ -995,19 +909,17 @@ struct file { #ifdef CONFIG_EPOLL /* Used by fs/eventpoll.c to link all the hooks to this file */ - struct hlist_head *f_ep; + struct list_head f_ep_links; + struct list_head f_tfile_llink; #endif /* #ifdef CONFIG_EPOLL */ struct address_space *f_mapping; - errseq_t f_wb_err; - errseq_t f_sb_err; /* for syncfs */ -} __randomize_layout - __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ +} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */ struct file_handle { __u32 handle_bytes; int handle_type; /* file identifier */ - unsigned char f_handle[]; + unsigned char f_handle[0]; }; static inline struct file *get_file(struct file *f) @@ -1015,9 +927,8 @@ static inline struct file *get_file(struct file *f) atomic_long_inc(&f->f_count); return f; } -#define get_file_rcu_many(x, cnt) \ - atomic_long_add_unless(&(x)->f_count, (cnt), 0) -#define get_file_rcu(x) get_file_rcu_many((x), 1) +#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count) +#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) #define file_count(x) atomic_long_read(&(x)->f_count) #define MAX_NON_LFS ((1UL<<31) - 1) @@ -1025,9 +936,9 @@ static inline struct file *get_file(struct file *f) /* Page cache limit. The filesystems should put that into their s_maxbytes limits, otherwise bad things can happen in VM. */ #if BITS_PER_LONG==32 -#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT) +#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1) #elif BITS_PER_LONG==64 -#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX) +#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) #endif #define FL_POSIX 1 @@ -1042,9 +953,6 @@ static inline struct file *get_file(struct file *f) #define FL_UNLOCK_PENDING 512 /* Lease is being broken */ #define FL_OFDLCK 1024 /* lock is "owned" by struct file */ #define FL_LAYOUT 2048 /* outstanding pNFS layout */ -#define FL_RECLAIM 4096 /* reclaiming from a reboot server */ - -#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE) /* * Special return value from posix_lock_file() and vfs_lock_file() for @@ -1063,6 +971,8 @@ struct file_lock_operations { }; struct lock_manager_operations { + int (*lm_compare_owner)(struct file_lock *, struct file_lock *); + unsigned long (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t); void (*lm_put_owner)(fl_owner_t); void (*lm_notify)(struct file_lock *); /* unblock callback */ @@ -1070,7 +980,6 @@ struct lock_manager_operations { bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); - bool (*lm_breaker_owns_lease)(struct file_lock *); }; struct lock_manager { @@ -1085,8 +994,8 @@ struct lock_manager { struct net; void locks_start_grace(struct net *, struct lock_manager *); void locks_end_grace(struct lock_manager *); -bool locks_in_grace(struct net *); -bool opens_in_grace(struct net *); +int locks_in_grace(struct net *); +int opens_in_grace(struct net *); /* that will die - we need it for nfs_lock_info */ #include @@ -1109,20 +1018,16 @@ bool opens_in_grace(struct net *); * Obviously, the last two criteria only matter for POSIX locks. */ struct file_lock { - struct file_lock *fl_blocker; /* The lock, that is blocking us */ + struct file_lock *fl_next; /* singly linked list for this inode */ struct list_head fl_list; /* link into file_lock_context */ struct hlist_node fl_link; /* node in global lists */ - struct list_head fl_blocked_requests; /* list of requests with - * ->fl_blocker pointing here - */ - struct list_head fl_blocked_member; /* node in - * ->fl_blocker->fl_blocked_requests - */ + struct list_head fl_block; /* circular list of blocked processes */ fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; /* what cpu's list is this on? */ + struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; @@ -1141,7 +1046,6 @@ struct file_lock { struct { struct list_head link; /* link in AFS vnode's pending_locks list */ int state; /* state of grant or error if -ve */ - unsigned int debug_id; } afs; } fl_u; } __randomize_layout; @@ -1160,19 +1064,31 @@ struct file_lock_context { #define OFFT_OFFSET_MAX INT_LIMIT(off_t) #endif +#include + extern void send_sigio(struct fown_struct *fown, int fd, int band); -#define locks_inode(f) file_inode(f) +/* + * Return the inode to use for locking + * + * For overlayfs this should be the overlay inode, not the real inode returned + * by file_inode(). For any other fs file_inode(filp) and locks_inode(filp) are + * equal. + */ +static inline struct inode *locks_inode(const struct file *f) +{ + return f->f_path.dentry->d_inode; +} #ifdef CONFIG_FILE_LOCKING -extern int fcntl_getlk(struct file *, unsigned int, struct flock *); +extern int fcntl_getlk(struct file *, unsigned int, struct flock __user *); extern int fcntl_setlk(unsigned int, struct file *, unsigned int, - struct flock *); + struct flock __user *); #if BITS_PER_LONG == 32 -extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 *); +extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 __user *); extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, - struct flock64 *); + struct flock64 __user *); #endif extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); @@ -1190,21 +1106,16 @@ extern void locks_remove_file(struct file *); extern void locks_release_private(struct file_lock *); extern void posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); -extern int locks_delete_block(struct file_lock *); +extern int posix_unblock_lock(struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); -extern void lease_get_mtime(struct inode *, struct timespec64 *time); +extern void lease_get_mtime(struct inode *, struct timespec *time); extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); extern int vfs_setlease(struct file *, long, struct file_lock **, void **); extern int lease_modify(struct file_lock *, int, struct list_head *); - -struct notifier_block; -extern int lease_register_notifier(struct notifier_block *); -extern void lease_unregister_notifier(struct notifier_block *); - struct files_struct; extern void show_fd_locks(struct seq_file *f, struct file *filp, struct files_struct *files); @@ -1285,7 +1196,7 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl, return -ENOLCK; } -static inline int locks_delete_block(struct file_lock *waiter) +static inline int posix_unblock_lock(struct file_lock *waiter) { return -ENOENT; } @@ -1316,8 +1227,7 @@ static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned return 0; } -static inline void lease_get_mtime(struct inode *inode, - struct timespec64 *time) +static inline void lease_get_mtime(struct inode *inode, struct timespec *time) { return; } @@ -1352,7 +1262,7 @@ static inline struct inode *file_inode(const struct file *f) static inline struct dentry *file_dentry(const struct file *file) { - return d_real(file->f_path.dentry, file_inode(file)); + return d_real(file->f_path.dentry, file_inode(file), 0); } static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) @@ -1361,7 +1271,7 @@ static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) } struct fasync_struct { - rwlock_t fa_lock; + spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; /* singly linked list */ @@ -1382,44 +1292,12 @@ extern void fasync_free(struct fasync_struct *); extern void kill_fasync(struct fasync_struct **, int, int); extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force); -extern int f_setown(struct file *filp, unsigned long arg, int force); +extern void f_setown(struct file *filp, unsigned long arg, int force); extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); extern int send_sigurg(struct fown_struct *fown); -/* - * sb->s_flags. Note that these mirror the equivalent MS_* flags where - * represented in both. - */ -#define SB_RDONLY 1 /* Mount read-only */ -#define SB_NOSUID 2 /* Ignore suid and sgid bits */ -#define SB_NODEV 4 /* Disallow access to device special files */ -#define SB_NOEXEC 8 /* Disallow program execution */ -#define SB_SYNCHRONOUS 16 /* Writes are synced at once */ -#define SB_MANDLOCK 64 /* Allow mandatory locks on an FS */ -#define SB_DIRSYNC 128 /* Directory modifications are synchronous */ -#define SB_NOATIME 1024 /* Do not update access times. */ -#define SB_NODIRATIME 2048 /* Do not update directory access times */ -#define SB_SILENT 32768 -#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */ -#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */ -#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */ -#define SB_I_VERSION (1<<23) /* Update inode I_version field */ -#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ - -/* These sb flags are internal to the kernel */ -#define SB_SUBMOUNT (1<<26) -#define SB_FORCE (1<<27) -#define SB_NOSEC (1<<28) -#define SB_BORN (1<<29) -#define SB_ACTIVE (1<<30) -#define SB_NOUSER (1<<31) - -/* These flags relate to encoding and casefolding */ -#define SB_ENC_STRICT_MODE_FL (1 << 0) - -#define sb_has_strict_encoding(sb) \ - (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL) +struct mm_struct; /* * Umount options @@ -1435,14 +1313,9 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ #define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ #define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ -#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */ /* sb->s_iflags to limit user namespace mounts */ #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ -#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 -#define SB_I_UNTRUSTED_MOUNTER 0x00000040 - -#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ /* Possible states of 'frozen' field */ enum { @@ -1458,7 +1331,7 @@ enum { struct sb_writers { int frozen; /* Is sb frozen? */ - wait_queue_head_t wait_unfrozen; /* wait for thaw */ + wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */ struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS]; }; @@ -1484,18 +1357,10 @@ struct super_block { void *s_security; #endif const struct xattr_handler **s_xattr; -#ifdef CONFIG_FS_ENCRYPTION + const struct fscrypt_operations *s_cop; - struct key *s_master_keys; /* master crypto keys in use */ -#endif -#ifdef CONFIG_FS_VERITY - const struct fsverity_operations *s_vop; -#endif -#ifdef CONFIG_UNICODE - struct unicode_map *s_encoding; - __u16 s_encoding_flags; -#endif - struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ + + struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ struct list_head s_mounts; /* list of mounts; _not_ for fs use */ struct block_device *s_bdev; struct backing_dev_info *s_bdi; @@ -1506,29 +1371,17 @@ struct super_block { struct sb_writers s_writers; - /* - * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and - * s_fsnotify_marks together for cache efficiency. They are frequently - * accessed and rarely modified. - */ - void *s_fs_info; /* Filesystem private info */ - - /* Granularity of c/m/atime in ns (cannot be worse than a second) */ - u32 s_time_gran; - /* Time limits for c/m/atime in seconds */ - time64_t s_time_min; - time64_t s_time_max; -#ifdef CONFIG_FSNOTIFY - __u32 s_fsnotify_mask; - struct fsnotify_mark_connector __rcu *s_fsnotify_marks; -#endif - - char s_id[32]; /* Informational name */ - uuid_t s_uuid; /* UUID */ + char s_id[32]; /* Informational name */ + u8 s_uuid[16]; /* UUID */ + void *s_fs_info; /* Filesystem private info */ unsigned int s_max_links; fmode_t s_mode; + /* Granularity of c/m/atime in ns. + Cannot be worse than a second */ + u32 s_time_gran; + /* * The next field is for VFS *only*. No filesystems have any business * even looking at it. You had been warned. @@ -1539,8 +1392,13 @@ struct super_block { * Filesystem subtype. If non-empty the filesystem type field * in /proc/mounts will be "type.subtype" */ - const char *s_subtype; + char *s_subtype; + /* + * Saved mount options for lazy filesystems using + * generic_show_options() + */ + char __rcu *s_options; const struct dentry_operations *s_d_op; /* default d_op for dentries */ /* @@ -1553,18 +1411,9 @@ struct super_block { /* Number of inodes with nlink == 0 but still referenced */ atomic_long_t s_remove_count; - /* - * Number of inode/mount/sb objects that are being watched, note that - * inodes objects are currently double-accounted. - */ - atomic_long_t s_fsnotify_connectors; - /* Being remounted read-only */ int s_readonly_remount; - /* per-sb errseq_t for reporting writeback errors via syncfs */ - errseq_t s_wb_err; - /* AIO completions deferred from interrupt context */ struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; @@ -1577,12 +1426,11 @@ struct super_block { struct user_namespace *s_user_ns; /* - * The list_lru structure is essentially just a pointer to a table - * of per-node lru lists, each of which has its own spinlock. - * There is no need to put them into separate cachelines. + * Keep the lru lists last in the structure so they always sit on their + * own individual cachelines. */ - struct list_lru s_dentry_lru; - struct list_lru s_inode_lru; + struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; + struct list_lru s_inode_lru ____cacheline_aligned_in_smp; struct rcu_head rcu; struct work_struct destroy_work; @@ -1626,196 +1474,15 @@ static inline void i_gid_write(struct inode *inode, gid_t gid) inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid); } -/** - * kuid_into_mnt - map a kuid down into a mnt_userns - * @mnt_userns: user namespace of the relevant mount - * @kuid: kuid to be mapped - * - * Return: @kuid mapped according to @mnt_userns. - * If @kuid has no mapping INVALID_UID is returned. - */ -static inline kuid_t kuid_into_mnt(struct user_namespace *mnt_userns, - kuid_t kuid) -{ - return make_kuid(mnt_userns, __kuid_val(kuid)); -} - -/** - * kgid_into_mnt - map a kgid down into a mnt_userns - * @mnt_userns: user namespace of the relevant mount - * @kgid: kgid to be mapped - * - * Return: @kgid mapped according to @mnt_userns. - * If @kgid has no mapping INVALID_GID is returned. - */ -static inline kgid_t kgid_into_mnt(struct user_namespace *mnt_userns, - kgid_t kgid) -{ - return make_kgid(mnt_userns, __kgid_val(kgid)); -} - -/** - * i_uid_into_mnt - map an inode's i_uid down into a mnt_userns - * @mnt_userns: user namespace of the mount the inode was found from - * @inode: inode to map - * - * Return: the inode's i_uid mapped down according to @mnt_userns. - * If the inode's i_uid has no mapping INVALID_UID is returned. - */ -static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns, - const struct inode *inode) -{ - return kuid_into_mnt(mnt_userns, inode->i_uid); -} - -/** - * i_gid_into_mnt - map an inode's i_gid down into a mnt_userns - * @mnt_userns: user namespace of the mount the inode was found from - * @inode: inode to map - * - * Return: the inode's i_gid mapped down according to @mnt_userns. - * If the inode's i_gid has no mapping INVALID_GID is returned. - */ -static inline kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns, - const struct inode *inode) -{ - return kgid_into_mnt(mnt_userns, inode->i_gid); -} - -/** - * kuid_from_mnt - map a kuid up into a mnt_userns - * @mnt_userns: user namespace of the relevant mount - * @kuid: kuid to be mapped - * - * Return: @kuid mapped up according to @mnt_userns. - * If @kuid has no mapping INVALID_UID is returned. - */ -static inline kuid_t kuid_from_mnt(struct user_namespace *mnt_userns, - kuid_t kuid) -{ - return KUIDT_INIT(from_kuid(mnt_userns, kuid)); -} - -/** - * kgid_from_mnt - map a kgid up into a mnt_userns - * @mnt_userns: user namespace of the relevant mount - * @kgid: kgid to be mapped - * - * Return: @kgid mapped up according to @mnt_userns. - * If @kgid has no mapping INVALID_GID is returned. - */ -static inline kgid_t kgid_from_mnt(struct user_namespace *mnt_userns, - kgid_t kgid) -{ - return KGIDT_INIT(from_kgid(mnt_userns, kgid)); -} - -/** - * mapped_fsuid - return caller's fsuid mapped up into a mnt_userns - * @mnt_userns: user namespace of the relevant mount - * - * Use this helper to initialize a new vfs or filesystem object based on - * the caller's fsuid. A common example is initializing the i_uid field of - * a newly allocated inode triggered by a creation event such as mkdir or - * O_CREAT. Other examples include the allocation of quotas for a specific - * user. - * - * Return: the caller's current fsuid mapped up according to @mnt_userns. - */ -static inline kuid_t mapped_fsuid(struct user_namespace *mnt_userns) -{ - return kuid_from_mnt(mnt_userns, current_fsuid()); -} - -/** - * mapped_fsgid - return caller's fsgid mapped up into a mnt_userns - * @mnt_userns: user namespace of the relevant mount - * - * Use this helper to initialize a new vfs or filesystem object based on - * the caller's fsgid. A common example is initializing the i_gid field of - * a newly allocated inode triggered by a creation event such as mkdir or - * O_CREAT. Other examples include the allocation of quotas for a specific - * user. - * - * Return: the caller's current fsgid mapped up according to @mnt_userns. - */ -static inline kgid_t mapped_fsgid(struct user_namespace *mnt_userns) -{ - return kgid_from_mnt(mnt_userns, current_fsgid()); -} - -/** - * inode_fsuid_set - initialize inode's i_uid field with callers fsuid - * @inode: inode to initialize - * @mnt_userns: user namespace of the mount the inode was found from - * - * Initialize the i_uid field of @inode. If the inode was found/created via - * an idmapped mount map the caller's fsuid according to @mnt_users. - */ -static inline void inode_fsuid_set(struct inode *inode, - struct user_namespace *mnt_userns) -{ - inode->i_uid = mapped_fsuid(mnt_userns); -} - -/** - * inode_fsgid_set - initialize inode's i_gid field with callers fsgid - * @inode: inode to initialize - * @mnt_userns: user namespace of the mount the inode was found from - * - * Initialize the i_gid field of @inode. If the inode was found/created via - * an idmapped mount map the caller's fsgid according to @mnt_users. - */ -static inline void inode_fsgid_set(struct inode *inode, - struct user_namespace *mnt_userns) -{ - inode->i_gid = mapped_fsgid(mnt_userns); -} - -/** - * fsuidgid_has_mapping() - check whether caller's fsuid/fsgid is mapped - * @sb: the superblock we want a mapping in - * @mnt_userns: user namespace of the relevant mount - * - * Check whether the caller's fsuid and fsgid have a valid mapping in the - * s_user_ns of the superblock @sb. If the caller is on an idmapped mount map - * the caller's fsuid and fsgid according to the @mnt_userns first. - * - * Return: true if fsuid and fsgid is mapped, false if not. - */ -static inline bool fsuidgid_has_mapping(struct super_block *sb, - struct user_namespace *mnt_userns) -{ - struct user_namespace *s_user_ns = sb->s_user_ns; - - return kuid_has_mapping(s_user_ns, mapped_fsuid(mnt_userns)) && - kgid_has_mapping(s_user_ns, mapped_fsgid(mnt_userns)); -} - -extern struct timespec64 current_time(struct inode *inode); +extern struct timespec current_fs_time(struct super_block *sb); +extern struct timespec current_time(struct inode *inode); /* * Snapshotting support. */ -/* - * These are internal functions, please use sb_start_{write,pagefault,intwrite} - * instead. - */ -static inline void __sb_end_write(struct super_block *sb, int level) -{ - percpu_up_read(sb->s_writers.rw_sem + level-1); -} - -static inline void __sb_start_write(struct super_block *sb, int level) -{ - percpu_down_read(sb->s_writers.rw_sem + level - 1); -} - -static inline bool __sb_start_write_trylock(struct super_block *sb, int level) -{ - return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1); -} +void __sb_end_write(struct super_block *sb, int level); +int __sb_start_write(struct super_block *sb, int level, bool wait); #define __sb_writers_acquired(sb, lev) \ percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_) @@ -1879,12 +1546,12 @@ static inline void sb_end_intwrite(struct super_block *sb) */ static inline void sb_start_write(struct super_block *sb) { - __sb_start_write(sb, SB_FREEZE_WRITE); + __sb_start_write(sb, SB_FREEZE_WRITE, true); } -static inline bool sb_start_write_trylock(struct super_block *sb) +static inline int sb_start_write_trylock(struct super_block *sb) { - return __sb_start_write_trylock(sb, SB_FREEZE_WRITE); + return __sb_start_write(sb, SB_FREEZE_WRITE, false); } /** @@ -1900,18 +1567,18 @@ static inline bool sb_start_write_trylock(struct super_block *sb) * * Since page fault freeze protection behaves as a lock, users have to preserve * ordering of freeze protection and other filesystem locks. It is advised to - * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault + * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault * handling code implies lock dependency: * - * mmap_lock + * mmap_sem * -> sb_start_pagefault */ static inline void sb_start_pagefault(struct super_block *sb) { - __sb_start_write(sb, SB_FREEZE_PAGEFAULT); + __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true); } -/** +/* * sb_start_intwrite - get write access to a superblock for internal fs purposes * @sb: the super we write to * @@ -1926,91 +1593,60 @@ static inline void sb_start_pagefault(struct super_block *sb) */ static inline void sb_start_intwrite(struct super_block *sb) { - __sb_start_write(sb, SB_FREEZE_FS); + __sb_start_write(sb, SB_FREEZE_FS, true); } -static inline bool sb_start_intwrite_trylock(struct super_block *sb) -{ - return __sb_start_write_trylock(sb, SB_FREEZE_FS); -} -bool inode_owner_or_capable(struct user_namespace *mnt_userns, - const struct inode *inode); +extern bool inode_owner_or_capable(const struct inode *inode); /* * VFS helper functions.. */ -int vfs_create(struct user_namespace *, struct inode *, - struct dentry *, umode_t, bool); -int vfs_mkdir(struct user_namespace *, struct inode *, - struct dentry *, umode_t); -int vfs_mknod(struct user_namespace *, struct inode *, struct dentry *, - umode_t, dev_t); -int vfs_symlink(struct user_namespace *, struct inode *, - struct dentry *, const char *); -int vfs_link(struct dentry *, struct user_namespace *, struct inode *, - struct dentry *, struct inode **); -int vfs_rmdir(struct user_namespace *, struct inode *, struct dentry *); -int vfs_unlink(struct user_namespace *, struct inode *, struct dentry *, - struct inode **); - -/** - * struct renamedata - contains all information required for renaming - * @old_mnt_userns: old user namespace of the mount the inode was found from - * @old_dir: parent of source - * @old_dentry: source - * @new_mnt_userns: new user namespace of the mount the inode was found from - * @new_dir: parent of destination - * @new_dentry: destination - * @delegated_inode: returns an inode needing a delegation break - * @flags: rename flags - */ -struct renamedata { - struct user_namespace *old_mnt_userns; - struct inode *old_dir; - struct dentry *old_dentry; - struct user_namespace *new_mnt_userns; - struct inode *new_dir; - struct dentry *new_dentry; - struct inode **delegated_inode; - unsigned int flags; -} __randomize_layout; - -int vfs_rename(struct renamedata *); - -static inline int vfs_whiteout(struct user_namespace *mnt_userns, - struct inode *dir, struct dentry *dentry) -{ - return vfs_mknod(mnt_userns, dir, dentry, S_IFCHR | WHITEOUT_MODE, - WHITEOUT_DEV); -} - -struct dentry *vfs_tmpfile(struct user_namespace *mnt_userns, - struct dentry *dentry, umode_t mode, int open_flag); - -int vfs_mkobj(struct dentry *, umode_t, - int (*f)(struct dentry *, umode_t, void *), - void *); - -int vfs_fchown(struct file *file, uid_t user, gid_t group); -int vfs_fchmod(struct file *file, umode_t mode); -int vfs_utimes(const struct path *path, struct timespec64 *times); - -extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); - -#ifdef CONFIG_COMPAT -extern long compat_ptr_ioctl(struct file *file, unsigned int cmd, - unsigned long arg); -#else -#define compat_ptr_ioctl NULL -#endif +extern int vfs_create(struct inode *, struct dentry *, umode_t, bool); +extern int vfs_mkdir(struct inode *, struct dentry *, umode_t); +extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t); +extern int vfs_symlink(struct inode *, struct dentry *, const char *); +extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **); +extern int vfs_rmdir(struct inode *, struct dentry *); +extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); +extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); +extern int vfs_whiteout(struct inode *, struct dentry *); /* * VFS file helper functions. */ -void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode, - const struct inode *dir, umode_t mode); +extern void inode_init_owner(struct inode *inode, const struct inode *dir, + umode_t mode); extern bool may_open_dev(const struct path *path); +/* + * VFS FS_IOC_FIEMAP helper definitions. + */ +struct fiemap_extent_info { + unsigned int fi_flags; /* Flags as passed from user */ + unsigned int fi_extents_mapped; /* Number of mapped extents */ + unsigned int fi_extents_max; /* Size of fiemap_extent array */ + struct fiemap_extent __user *fi_extents_start; /* Start of + fiemap_extent array */ +}; +int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, + u64 phys, u64 len, u32 flags); +int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); + +/* + * File types + * + * NOTE! These match bits 12..15 of stat.st_mode + * (ie "(i_mode >> 12) & 15"). + */ +#define DT_UNKNOWN 0 +#define DT_FIFO 1 +#define DT_CHR 2 +#define DT_DIR 4 +#define DT_BLK 6 +#define DT_REG 8 +#define DT_LNK 10 +#define DT_SOCK 12 +#define DT_WHT 14 /* * This is the "filldir" function type, used by readdir() to let @@ -2023,10 +1659,18 @@ typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned); struct dir_context { - filldir_t actor; + const filldir_t actor; loff_t pos; }; +struct block_device_operations; + +/* These macros are for out of kernel modules to test that + * the kernel supports the unlocked_ioctl and compat_ioctl + * fields in struct file_operations. */ +#define HAVE_COMPAT_IOCTL 1 +#define HAVE_UNLOCKED_IOCTL 1 + /* * These flags let !MMU mmap() govern direct device mapping vs immediate * copying more easily for MAP_PRIVATE, especially for ROM filesystems. @@ -2046,25 +1690,6 @@ struct dir_context { #define NOMMU_VMFLAGS \ (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC) -/* - * These flags control the behavior of the remap_file_range function pointer. - * If it is called with len == 0 that means "remap to end of source file". - * See Documentation/filesystems/vfs.rst for more details about this call. - * - * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate) - * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request - */ -#define REMAP_FILE_DEDUP (1 << 0) -#define REMAP_FILE_CAN_SHORTEN (1 << 1) - -/* - * These flags signal that the caller is ok with altering various aspects of - * the behavior of the remap operation. The changes must be made by the - * implementation; the vfs remap helper functions can take advantage of them. - * Flags in this category exist to preserve the quirky behavior of the hoisted - * btrfs clone/dedupe ioctls. - */ -#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN) struct iov_iter; @@ -2075,14 +1700,12 @@ struct file_operations { ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); - int (*iopoll)(struct kiocb *kiocb, bool spin); int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); - __poll_t (*poll) (struct file *, struct poll_table_struct *); + unsigned int (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); - unsigned long mmap_supported_flags; int (*open) (struct inode *, struct file *); int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); @@ -2104,98 +1727,66 @@ struct file_operations { #endif ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); - loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t len, unsigned int remap_flags); - int (*fadvise)(struct file *, loff_t, loff_t, int); -} __randomize_layout; + int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, + u64); + ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *, + u64); +} __do_const __randomize_layout; +typedef struct file_operations __no_const file_operations_no_const; struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); - int (*permission) (struct user_namespace *, struct inode *, int); - struct posix_acl * (*get_acl)(struct inode *, int, bool); + int (*permission) (struct inode *, int); + struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink) (struct dentry *, char __user *,int); - int (*create) (struct user_namespace *, struct inode *,struct dentry *, - umode_t, bool); + int (*create) (struct inode *,struct dentry *, umode_t, bool); int (*link) (struct dentry *,struct inode *,struct dentry *); int (*unlink) (struct inode *,struct dentry *); - int (*symlink) (struct user_namespace *, struct inode *,struct dentry *, - const char *); - int (*mkdir) (struct user_namespace *, struct inode *,struct dentry *, - umode_t); + int (*symlink) (struct inode *,struct dentry *,const char *); + int (*mkdir) (struct inode *,struct dentry *,umode_t); int (*rmdir) (struct inode *,struct dentry *); - int (*mknod) (struct user_namespace *, struct inode *,struct dentry *, - umode_t,dev_t); - int (*rename) (struct user_namespace *, struct inode *, struct dentry *, + int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t); + int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); - int (*setattr) (struct user_namespace *, struct dentry *, - struct iattr *); - int (*getattr) (struct user_namespace *, const struct path *, - struct kstat *, u32, unsigned int); + int (*setattr) (struct dentry *, struct iattr *); + int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); - int (*update_time)(struct inode *, struct timespec64 *, int); + int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned open_flag, - umode_t create_mode); - int (*tmpfile) (struct user_namespace *, struct inode *, - struct dentry *, umode_t); - int (*set_acl)(struct user_namespace *, struct inode *, - struct posix_acl *, int); - int (*fileattr_set)(struct user_namespace *mnt_userns, - struct dentry *dentry, struct fileattr *fa); - int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa); + umode_t create_mode, int *opened); + int (*tmpfile) (struct inode *, struct dentry *, umode_t); + int (*set_acl)(struct inode *, struct posix_acl *, int); } ____cacheline_aligned; -static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, - struct iov_iter *iter) -{ - return file->f_op->read_iter(kio, iter); -} - -static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio, - struct iov_iter *iter) -{ - return file->f_op->write_iter(kio, iter); -} - -static inline int call_mmap(struct file *file, struct vm_area_struct *vma) -{ - return file->f_op->mmap(file, vma); -} +ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, + unsigned long nr_segs, unsigned long fast_segs, + struct iovec *fast_pointer, + struct iovec **ret_pointer); +extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *); +extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); +extern ssize_t vfs_readv(struct file *, const struct iovec __user *, + unsigned long, loff_t *, int); +extern ssize_t vfs_writev(struct file *, const struct iovec __user *, + unsigned long, loff_t *, int); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); -extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - size_t len, unsigned int flags); -extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t *count, - unsigned int remap_flags); -extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t len, unsigned int remap_flags); -extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t len, unsigned int remap_flags); +extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, u64 len); extern int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same); -extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, - struct file *dst_file, loff_t dst_pos, - loff_t len, unsigned int remap_flags); - struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); - void (*free_inode)(struct inode *); void (*dirty_inode) (struct inode *, int flags); int (*write_inode) (struct inode *, struct writeback_control *wbc); @@ -2220,6 +1811,7 @@ struct super_operations { ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); struct dquot **(*get_dquots)(struct inode *); #endif + int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); long (*nr_cached_objects)(struct super_block *, struct shrink_control *); long (*free_cached_objects)(struct super_block *, @@ -2229,27 +1821,24 @@ struct super_operations { /* * Inode flags - they have no relation to superblock flags now */ -#define S_SYNC (1 << 0) /* Writes are synced at once */ -#define S_NOATIME (1 << 1) /* Do not update access times */ -#define S_APPEND (1 << 2) /* Append-only file */ -#define S_IMMUTABLE (1 << 3) /* Immutable file */ -#define S_DEAD (1 << 4) /* removed, but still open directory */ -#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */ -#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */ -#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */ -#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */ -#define S_PRIVATE (1 << 9) /* Inode is fs-internal */ -#define S_IMA (1 << 10) /* Inode has an associated IMA struct */ -#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */ -#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */ +#define S_SYNC 1 /* Writes are synced at once */ +#define S_NOATIME 2 /* Do not update access times */ +#define S_APPEND 4 /* Append-only file */ +#define S_IMMUTABLE 8 /* Immutable file */ +#define S_DEAD 16 /* removed, but still open directory */ +#define S_NOQUOTA 32 /* Inode is not counted to quota */ +#define S_DIRSYNC 64 /* Directory modifications are synchronous */ +#define S_NOCMTIME 128 /* Do not update file c/mtime */ +#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */ +#define S_PRIVATE 512 /* Inode is fs-internal */ +#define S_IMA 1024 /* Inode has an associated IMA struct */ +#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */ +#define S_NOSEC 4096 /* no suid or xattr security attributes */ #ifdef CONFIG_FS_DAX -#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */ +#define S_DAX 8192 /* Direct Access, avoiding the page cache */ #else -#define S_DAX 0 /* Make all the DAX code disappear */ +#define S_DAX 0 /* Make all the DAX code disappear */ #endif -#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */ -#define S_CASEFOLD (1 << 15) /* Casefolded file */ -#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */ /* * Note that nosuid etc flags are inode-specific: setting some file-system @@ -2257,7 +1846,7 @@ struct super_operations { * possible to override it selectively if you really wanted to with some * ioctl() that is not currently implemented. * - * Exception: SB_RDONLY is always applied to the entire file system. + * Exception: MS_RDONLY is always applied to the entire file system. * * Unfortunately, it is possible to change a filesystems flags with it mounted * with files in use. This means that all of the inodes will not have their @@ -2266,20 +1855,19 @@ struct super_operations { */ #define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg)) -static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; } -#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb) -#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \ +#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY) +#define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \ ((inode)->i_flags & S_SYNC)) -#define IS_DIRSYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS|SB_DIRSYNC) || \ +#define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \ ((inode)->i_flags & (S_SYNC|S_DIRSYNC))) -#define IS_MANDLOCK(inode) __IS_FLG(inode, SB_MANDLOCK) -#define IS_NOATIME(inode) __IS_FLG(inode, SB_RDONLY|SB_NOATIME) -#define IS_I_VERSION(inode) __IS_FLG(inode, SB_I_VERSION) +#define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK) +#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME) +#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION) #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) -#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL) +#define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL) #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) @@ -2289,66 +1877,20 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) #define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) #define IS_DAX(inode) ((inode)->i_flags & S_DAX) -#define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED) -#define IS_CASEFOLDED(inode) ((inode)->i_flags & S_CASEFOLD) -#define IS_VERITY(inode) ((inode)->i_flags & S_VERITY) #define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ (inode)->i_rdev == WHITEOUT_DEV) -static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns, - struct inode *inode) +static inline bool HAS_UNMAPPED_ID(struct inode *inode) { - return !uid_valid(i_uid_into_mnt(mnt_userns, inode)) || - !gid_valid(i_gid_into_mnt(mnt_userns, inode)); -} - -static inline enum rw_hint file_write_hint(struct file *file) -{ - if (file->f_write_hint != WRITE_LIFE_NOT_SET) - return file->f_write_hint; - - return file_inode(file)->i_write_hint; -} - -static inline int iocb_flags(struct file *file); - -static inline u16 ki_hint_validate(enum rw_hint hint) -{ - typeof(((struct kiocb *)0)->ki_hint) max_hint = -1; - - if (hint <= max_hint) - return hint; - return 0; -} - -static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) -{ - *kiocb = (struct kiocb) { - .ki_filp = filp, - .ki_flags = iocb_flags(filp), - .ki_hint = ki_hint_validate(file_write_hint(filp)), - .ki_ioprio = get_current_ioprio(), - }; -} - -static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, - struct file *filp) -{ - *kiocb = (struct kiocb) { - .ki_filp = filp, - .ki_flags = kiocb_src->ki_flags, - .ki_hint = kiocb_src->ki_hint, - .ki_ioprio = kiocb_src->ki_ioprio, - .ki_pos = kiocb_src->ki_pos, - }; + return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid); } /* * Inode state bits. Protected by inode->i_lock * - * Four bits determine the dirty state of the inode: I_DIRTY_SYNC, - * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME. + * Three bits determine the dirty state of the inode, I_DIRTY_SYNC, + * I_DIRTY_DATASYNC and I_DIRTY_PAGES. * * Four bits define the lifetime of an inode. Initially, inodes are I_NEW, * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at @@ -2357,20 +1899,12 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, * Two bits are used for locking and completion notification, I_NEW and I_SYNC. * * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on - * fdatasync() (unless I_DIRTY_DATASYNC is also set). - * Timestamp updates are the usual cause. - * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of + * fdatasync(). i_atime is the usual cause. + * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of * these changes separately from I_DIRTY_SYNC so that we * don't have to write inode on fdatasync() when only - * e.g. the timestamps have changed. + * mtime has changed in it. * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. - * I_DIRTY_TIME The inode itself only has dirty timestamps, and the - * lazytime mount option is enabled. We keep track of this - * separately from I_DIRTY_SYNC in order to implement - * lazytime. This gets cleared if I_DIRTY_INODE - * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. I.e. - * either I_DIRTY_TIME *or* I_DIRTY_INODE can be set in - * i_state, but not both. I_DIRTY_PAGES may still be set. * I_NEW Serves as both a mutex and completion notification. * New inodes set I_NEW. If two processes both create * the same inode, one of them will release its inode and @@ -2405,19 +1939,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, * * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to * synchronize competing switching instances and to tell - * wb stat updates to grab the i_pages lock. See - * inode_switch_wbs_work_fn() for details. - * - * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper - * and work dirs among overlayfs mounts. - * - * I_CREATING New object's inode in the middle of setting up. - * - * I_DONTCACHE Evict inode as soon as it is not used anymore. - * - * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists. - * Used to detect that mark_inode_dirty() should not move - * inode between dirty lists. + * wb stat updates to grab mapping->tree_lock. See + * inode_switch_wb_work_fn() for details. * * Q: What is the difference between I_WILL_FREE and I_FREEING? */ @@ -2436,14 +1959,11 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) #define I_LINKABLE (1 << 10) #define I_DIRTY_TIME (1 << 11) +#define __I_DIRTY_TIME_EXPIRED 12 +#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) #define I_WB_SWITCH (1 << 13) -#define I_OVL_INUSE (1 << 14) -#define I_CREATING (1 << 15) -#define I_DONTCACHE (1 << 16) -#define I_SYNC_QUEUED (1 << 17) -#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) -#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) +#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) #define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) extern void __mark_inode_dirty(struct inode *, int); @@ -2457,21 +1977,6 @@ static inline void mark_inode_dirty_sync(struct inode *inode) __mark_inode_dirty(inode, I_DIRTY_SYNC); } -/* - * Returns true if the given inode itself only has dirty timestamps (its pages - * may still be dirty) and isn't currently being allocated or freed. - * Filesystems should call this if when writing an inode when lazytime is - * enabled, they want to opportunistically write the timestamps of other inodes - * located very nearby on-disk, e.g. in the same inode block. This returns true - * if the given inode is in need of such an opportunistic update. Requires - * i_lock, or at least later re-checking under i_lock. - */ -static inline bool inode_is_dirtytime_only(struct inode *inode) -{ - return (inode->i_state & (I_DIRTY_TIME | I_NEW | - I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME; -} - extern void inc_nlink(struct inode *inode); extern void drop_nlink(struct inode *inode); extern void clear_nlink(struct inode *inode); @@ -2489,6 +1994,21 @@ static inline void inode_dec_link_count(struct inode *inode) mark_inode_dirty(inode); } +/** + * inode_inc_iversion - increments i_version + * @inode: inode that need to be updated + * + * Every time the inode is modified, the i_version field will be incremented. + * The filesystem has to be mounted with i_version flag + */ + +static inline void inode_inc_iversion(struct inode *inode) +{ + spin_lock(&inode->i_lock); + inode->i_version++; + spin_unlock(&inode->i_lock); +} + enum file_time_flags { S_ATIME = 1, S_MTIME = 2, @@ -2496,7 +2016,6 @@ enum file_time_flags { S_VERSION = 8, }; -extern bool atime_needs_update(const struct path *, struct inode *); extern void touch_atime(const struct path *); static inline void file_accessed(struct file *file) { @@ -2504,8 +2023,7 @@ static inline void file_accessed(struct file *file) touch_atime(&file->f_path); } -extern int file_modified(struct file *file); - +int sync_inode(struct inode *inode, struct writeback_control *wbc); int sync_inode_metadata(struct inode *inode, int wait); struct file_system_type { @@ -2515,12 +2033,7 @@ struct file_system_type { #define FS_BINARY_MOUNTDATA 2 #define FS_HAS_SUBTYPE 4 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ -#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ -#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ -#define FS_THP_SUPPORT 8192 /* Remove once all fs converted */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ - int (*init_fs_context)(struct fs_context *); - const struct fs_parameter_spec *parameters; struct dentry *(*mount) (struct file_system_type *, int, const char *, void *); void (*kill_sb) (struct super_block *); @@ -2535,12 +2048,14 @@ struct file_system_type { struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; - struct lock_class_key invalidate_lock_key; struct lock_class_key i_mutex_dir_key; }; #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) +extern struct dentry *mount_ns(struct file_system_type *fs_type, + int flags, void *data, void *ns, struct user_namespace *user_ns, + int (*fill_super)(struct super_block *, void *, int)); extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)); @@ -2558,16 +2073,30 @@ void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); void deactivate_locked_super(struct super_block *sb); int set_anon_super(struct super_block *s, void *data); -int set_anon_super_fc(struct super_block *s, struct fs_context *fc); int get_anon_bdev(dev_t *); void free_anon_bdev(dev_t); -struct super_block *sget_fc(struct fs_context *fc, - int (*test)(struct super_block *, struct fs_context *), - int (*set)(struct super_block *, struct fs_context *)); +struct super_block *sget_userns(struct file_system_type *type, + int (*test)(struct super_block *,void *), + int (*set)(struct super_block *,void *), + int flags, struct user_namespace *user_ns, + void *data); struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), int flags, void *data); +extern struct dentry *mount_pseudo_xattr(struct file_system_type *, char *, + const struct super_operations *ops, + const struct xattr_handler **xattr, + const struct dentry_operations *dops, + unsigned long); + +static inline struct dentry * +mount_pseudo(struct file_system_type *fs_type, char *name, + const struct super_operations *ops, + const struct dentry_operations *dops, unsigned long magic) +{ + return mount_pseudo_xattr(fs_type, name, ops, NULL, dops, magic); +} /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ #define fops_get(fops) \ @@ -2588,37 +2117,120 @@ struct super_block *sget(struct file_system_type *type, extern int register_filesystem(struct file_system_type *); extern int unregister_filesystem(struct file_system_type *); -extern struct vfsmount *kern_mount(struct file_system_type *); +extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); +#define kern_mount(type) kern_mount_data(type, NULL) extern void kern_unmount(struct vfsmount *mnt); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); extern long do_mount(const char *, const char __user *, const char *, unsigned long, void *); -extern struct vfsmount *collect_mounts(const struct path *); +extern struct vfsmount *collect_mounts(struct path *); extern void drop_collected_mounts(struct vfsmount *); extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, struct vfsmount *); -extern int vfs_statfs(const struct path *, struct kstatfs *); +extern int vfs_statfs(struct path *, struct kstatfs *); extern int user_statfs(const char __user *, struct kstatfs *); extern int fd_statfs(int, struct kstatfs *); +extern int vfs_ustat(dev_t, struct kstatfs *); extern int freeze_super(struct super_block *super); extern int thaw_super(struct super_block *super); extern bool our_mnt(struct vfsmount *mnt); -extern __printf(2, 3) -int super_setup_bdi_name(struct super_block *sb, char *fmt, ...); -extern int super_setup_bdi(struct super_block *sb); extern int current_umask(void); extern void ihold(struct inode * inode); extern void iput(struct inode *); -extern int generic_update_time(struct inode *, struct timespec64 *, int); +extern int generic_update_time(struct inode *, struct timespec *, int); /* /sys/fs */ extern struct kobject *fs_kobj; #define MAX_RW_COUNT (INT_MAX & PAGE_MASK) +#ifdef CONFIG_MANDATORY_FILE_LOCKING +extern int locks_mandatory_locked(struct file *); +extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char); + +/* + * Candidates for mandatory locking have the setgid bit set + * but no group execute bit - an otherwise meaningless combination. + */ + +static inline int __mandatory_lock(struct inode *ino) +{ + return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID; +} + +/* + * ... and these candidates should be on MS_MANDLOCK mounted fs, + * otherwise these will be advisory locks + */ + +static inline int mandatory_lock(struct inode *ino) +{ + return IS_MANDLOCK(ino) && __mandatory_lock(ino); +} + +static inline int locks_verify_locked(struct file *file) +{ + if (mandatory_lock(locks_inode(file))) + return locks_mandatory_locked(file); + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, + struct file *f, + loff_t size) +{ + if (!inode->i_flctx || !mandatory_lock(inode)) + return 0; + + if (size < inode->i_size) { + return locks_mandatory_area(inode, f, size, inode->i_size - 1, + F_WRLCK); + } else { + return locks_mandatory_area(inode, f, inode->i_size, size - 1, + F_WRLCK); + } +} + +#else /* !CONFIG_MANDATORY_FILE_LOCKING */ + +static inline int locks_mandatory_locked(struct file *file) +{ + return 0; +} + +static inline int locks_mandatory_area(struct inode *inode, struct file *filp, + loff_t start, loff_t end, unsigned char type) +{ + return 0; +} + +static inline int __mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_locked(struct file *file) +{ + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, struct file *filp, + size_t size) +{ + return 0; +} + +#endif /* CONFIG_MANDATORY_FILE_LOCKING */ + + #ifdef CONFIG_FILE_LOCKING static inline int break_lease(struct inode *inode, unsigned int mode) { @@ -2714,52 +2326,43 @@ struct audit_names; struct filename { const char *name; /* pointer to actual string */ const __user char *uptr; /* original userland pointer */ - int refcnt; struct audit_names *aname; + int refcnt; const char iname[]; }; -static_assert(offsetof(struct filename, iname) % sizeof(long) == 0); -static inline struct user_namespace *file_mnt_user_ns(struct file *file) -{ - return mnt_user_ns(file->f_path.mnt); -} extern long vfs_truncate(const struct path *, loff_t); -int do_truncate(struct user_namespace *, struct dentry *, loff_t start, - unsigned int time_attrs, struct file *filp); +extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, + struct file *filp); extern int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len); extern long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode); extern struct file *file_open_name(struct filename *, int, umode_t); extern struct file *filp_open(const char *, int, umode_t); -extern struct file *file_open_root(const struct path *, +extern struct file *file_open_root(struct dentry *, struct vfsmount *, const char *, int, umode_t); -static inline struct file *file_open_root_mnt(struct vfsmount *mnt, - const char *name, int flags, umode_t mode) -{ - return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root}, - name, flags, mode); -} extern struct file * dentry_open(const struct path *, int, const struct cred *); -extern struct file * open_with_fake_path(const struct path *, int, - struct inode*, const struct cred *); -static inline struct file *file_clone_open(struct file *file) -{ - return dentry_open(&file->f_path, file->f_flags, file->f_cred); -} extern int filp_close(struct file *, fl_owner_t id); extern struct filename *getname_flags(const char __user *, int, int *); -extern struct filename *getname_uflags(const char __user *, int); extern struct filename *getname(const char __user *); extern struct filename *getname_kernel(const char *); extern void putname(struct filename *name); +enum { + FILE_CREATED = 1, + FILE_OPENED = 2 +}; extern int finish_open(struct file *file, struct dentry *dentry, - int (*open)(struct inode *, struct file *)); + int (*open)(struct inode *, struct file *), + int *opened); extern int finish_no_open(struct file *file, struct dentry *dentry); +/* fs/ioctl.c */ + +extern int ioctl_preallocate(struct file *filp, void __user *argp); + /* fs/dcache.c */ extern void __init vfs_caches_init_early(void); extern void __init vfs_caches_init(void); @@ -2769,31 +2372,97 @@ extern struct kmem_cache *names_cachep; #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) +#ifdef CONFIG_BLOCK +extern int register_blkdev(unsigned int, const char *); +extern void unregister_blkdev(unsigned int, const char *); +extern struct block_device *bdget(dev_t); +extern struct block_device *bdgrab(struct block_device *bdev); +extern void bd_set_size(struct block_device *, loff_t size); +extern void bd_forget(struct inode *inode); +extern void bdput(struct block_device *); +extern void invalidate_bdev(struct block_device *); +extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); +extern int sync_blockdev(struct block_device *bdev); +extern void kill_bdev(struct block_device *); +extern struct super_block *freeze_bdev(struct block_device *); +extern void emergency_thaw_all(void); +extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); +extern int fsync_bdev(struct block_device *); + extern struct super_block *blockdev_superblock; + static inline bool sb_is_blkdev_sb(struct super_block *sb) { - return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock; + return sb == blockdev_superblock; +} +#else +static inline void bd_forget(struct inode *inode) {} +static inline int sync_blockdev(struct block_device *bdev) { return 0; } +static inline void kill_bdev(struct block_device *bdev) {} +static inline void invalidate_bdev(struct block_device *bdev) {} + +static inline struct super_block *freeze_bdev(struct block_device *sb) +{ + return NULL; } -void emergency_thaw_all(void); +static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) +{ + return 0; +} + +static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) +{ +} + +static inline bool sb_is_blkdev_sb(struct super_block *sb) +{ + return false; +} +#endif extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; +#ifdef CONFIG_BLOCK +extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); +extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); +extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); +extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); +extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, + void *holder); +extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, + void *holder); +extern void blkdev_put(struct block_device *bdev, fmode_t mode); +extern int __blkdev_reread_part(struct block_device *bdev); +extern int blkdev_reread_part(struct block_device *bdev); + +#ifdef CONFIG_SYSFS +extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); +extern void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk); +#else +static inline int bd_link_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ + return 0; +} +static inline void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ +} +#endif +#endif /* fs/char_dev.c */ -#define CHRDEV_MAJOR_MAX 512 +#define CHRDEV_MAJOR_HASH_SIZE 255 /* Marks the bottom of the first segment of free char majors */ #define CHRDEV_MAJOR_DYN_END 234 -/* Marks the top and bottom of the second segment of free char majors */ -#define CHRDEV_MAJOR_DYN_EXT_START 511 -#define CHRDEV_MAJOR_DYN_EXT_END 384 - -extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); +extern __nocapture(4) int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); extern int register_chrdev_region(dev_t, unsigned, const char *); extern int __register_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops); -extern void __unregister_chrdev(unsigned int major, unsigned int baseminor, +extern __nocapture(4) void __unregister_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name); extern void unregister_chrdev_region(dev_t, unsigned); extern void chrdev_show(struct seq_file *,off_t); @@ -2809,19 +2478,51 @@ static inline void unregister_chrdev(unsigned int major, const char *name) __unregister_chrdev(major, 0, 256, name); } +/* fs/block_dev.c */ +#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ +#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ + +#ifdef CONFIG_BLOCK +#define BLKDEV_MAJOR_HASH_SIZE 255 +extern const char *__bdevname(dev_t, char *buffer); +extern const char *bdevname(struct block_device *bdev, char *buffer); +extern struct block_device *lookup_bdev(const char *); +extern void blkdev_show(struct seq_file *,off_t); + +#else +#define BLKDEV_MAJOR_HASH_SIZE 0 +#endif + extern void init_special_inode(struct inode *, umode_t, dev_t); /* Invalid inode operations -- fs/bad_inode.c */ extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); +#ifdef CONFIG_BLOCK +static inline bool op_is_write(unsigned int op) +{ + return op == REQ_OP_READ ? false : true; +} + +/* + * return data direction, READ or WRITE + */ +static inline int bio_data_dir(struct bio *bio) +{ + return op_is_write(bio_op(bio)) ? WRITE : READ; +} + +extern void check_disk_size_change(struct gendisk *disk, + struct block_device *bdev); +extern int revalidate_disk(struct gendisk *); +extern int check_disk_change(struct block_device *); +extern int __invalidate_device(struct block_device *, bool); +extern int invalidate_partition(struct gendisk *, int); +#endif unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); -void invalidate_mapping_pagevec(struct address_space *mapping, - pgoff_t start, pgoff_t end, - unsigned long *nr_pagevec); - static inline void invalidate_remote_inode(struct inode *inode) { if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || @@ -2834,21 +2535,11 @@ extern int invalidate_inode_pages2_range(struct address_space *mapping, extern int write_inode_now(struct inode *, int); extern int filemap_fdatawrite(struct address_space *); extern int filemap_flush(struct address_space *); -extern int filemap_fdatawait_keep_errors(struct address_space *mapping); +extern int filemap_fdatawait(struct address_space *); +extern void filemap_fdatawait_keep_errors(struct address_space *); extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); -extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping, - loff_t start_byte, loff_t end_byte); - -static inline int filemap_fdatawait(struct address_space *mapping) -{ - return filemap_fdatawait_range(mapping, 0, LLONG_MAX); -} - -extern bool filemap_range_has_page(struct address_space *, loff_t lstart, - loff_t lend); -extern bool filemap_range_needs_writeback(struct address_space *, - loff_t lstart, loff_t lend); +extern int filemap_write_and_wait(struct address_space *mapping); extern int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend); extern int __filemap_fdatawrite_range(struct address_space *mapping, @@ -2856,94 +2547,11 @@ extern int __filemap_fdatawrite_range(struct address_space *mapping, extern int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); extern int filemap_check_errors(struct address_space *mapping); -extern void __filemap_set_wb_err(struct address_space *mapping, int err); -int filemap_fdatawrite_wbc(struct address_space *mapping, - struct writeback_control *wbc); - -static inline int filemap_write_and_wait(struct address_space *mapping) -{ - return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); -} - -extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart, - loff_t lend); -extern int __must_check file_check_and_advance_wb_err(struct file *file); -extern int __must_check file_write_and_wait_range(struct file *file, - loff_t start, loff_t end); - -static inline int file_write_and_wait(struct file *file) -{ - return file_write_and_wait_range(file, 0, LLONG_MAX); -} - -/** - * filemap_set_wb_err - set a writeback error on an address_space - * @mapping: mapping in which to set writeback error - * @err: error to be set in mapping - * - * When writeback fails in some way, we must record that error so that - * userspace can be informed when fsync and the like are called. We endeavor - * to report errors on any file that was open at the time of the error. Some - * internal callers also need to know when writeback errors have occurred. - * - * When a writeback error occurs, most filesystems will want to call - * filemap_set_wb_err to record the error in the mapping so that it will be - * automatically reported whenever fsync is called on the file. - */ -static inline void filemap_set_wb_err(struct address_space *mapping, int err) -{ - /* Fastpath for common case of no error */ - if (unlikely(err)) - __filemap_set_wb_err(mapping, err); -} - -/** - * filemap_check_wb_err - has an error occurred since the mark was sampled? - * @mapping: mapping to check for writeback errors - * @since: previously-sampled errseq_t - * - * Grab the errseq_t value from the mapping, and see if it has changed "since" - * the given value was sampled. - * - * If it has then report the latest error set, otherwise return 0. - */ -static inline int filemap_check_wb_err(struct address_space *mapping, - errseq_t since) -{ - return errseq_check(&mapping->wb_err, since); -} - -/** - * filemap_sample_wb_err - sample the current errseq_t to test for later errors - * @mapping: mapping to be sampled - * - * Writeback errors are always reported relative to a particular sample point - * in the past. This function provides those sample points. - */ -static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) -{ - return errseq_sample(&mapping->wb_err); -} - -/** - * file_sample_sb_err - sample the current errseq_t to test for later errors - * @file: file pointer to be sampled - * - * Grab the most current superblock-level errseq_t value for the given - * struct file. - */ -static inline errseq_t file_sample_sb_err(struct file *file) -{ - return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); -} extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); -extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes, - unsigned int flags); - /* * Sync the bytes written if this was a synchronous write. Expect ki_pos * to already be updated for the write, and will return either the amount @@ -2964,55 +2572,32 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) extern void emergency_sync(void); extern void emergency_remount(void); - #ifdef CONFIG_BLOCK -extern int bmap(struct inode *inode, sector_t *block); -#else -static inline int bmap(struct inode *inode, sector_t *block) -{ - return -EINVAL; -} +extern sector_t bmap(struct inode *, sector_t); #endif - -int notify_change(struct user_namespace *, struct dentry *, - struct iattr *, struct inode **); -int inode_permission(struct user_namespace *, struct inode *, int); -int generic_permission(struct user_namespace *, struct inode *, int); -static inline int file_permission(struct file *file, int mask) -{ - return inode_permission(file_mnt_user_ns(file), - file_inode(file), mask); -} -static inline int path_permission(const struct path *path, int mask) -{ - return inode_permission(mnt_user_ns(path->mnt), - d_inode(path->dentry), mask); -} -int __check_sticky(struct user_namespace *mnt_userns, struct inode *dir, - struct inode *inode); +extern int notify_change(struct dentry *, struct iattr *, struct inode **); +extern int inode_permission(struct inode *, int); +extern int __inode_permission(struct inode *, int); +extern int generic_permission(struct inode *, int); +extern int __check_sticky(struct inode *dir, struct inode *inode); static inline bool execute_ok(struct inode *inode) { return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode); } -static inline bool inode_wrong_type(const struct inode *inode, umode_t mode) -{ - return (inode->i_mode ^ mode) & S_IFMT; -} - static inline void file_start_write(struct file *file) { if (!S_ISREG(file_inode(file)->i_mode)) return; - sb_start_write(file_inode(file)->i_sb); + __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true); } static inline bool file_start_write_trylock(struct file *file) { if (!S_ISREG(file_inode(file)->i_mode)) return true; - return sb_start_write_trylock(file_inode(file)->i_sb); + return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false); } static inline void file_end_write(struct file *file) @@ -3023,20 +2608,15 @@ static inline void file_end_write(struct file *file) } /* - * This is used for regular files where some users -- especially the - * currently executed binary in a process, previously handled via - * VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap - * read-write shared) accesses. - * * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. - * deny_write_access() denies write access to a file. - * allow_write_access() re-enables write access to a file. - * - * The i_writecount field of an inode can have the following values: - * 0: no write access, no denied write access - * < 0: (-i_writecount) users that denied write access to the file. - * > 0: (i_writecount) users that have write access to the file. + * This is used for regular files. + * We cannot support write (and maybe mmap read-write shared) accesses and + * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode + * can have the following values: + * 0: no writers, no VM_DENYWRITE mappings + * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist + * > 0: (i_writecount) users are writing to the file. * * Normally we operate on that counter with atomic_{inc,dec} and it's safe * except for the cases where we don't hold i_writecount yet. Then we need to @@ -3066,7 +2646,7 @@ static inline bool inode_is_open_for_write(const struct inode *inode) return atomic_read(&inode->i_writecount) > 0; } -#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) +#ifdef CONFIG_IMA static inline void i_readcount_dec(struct inode *inode) { BUG_ON(!atomic_read(&inode->i_readcount)); @@ -3088,15 +2668,49 @@ static inline void i_readcount_inc(struct inode *inode) #endif extern int do_pipe_flags(int *, int); -extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *); -ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos); -extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *); -extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *); +#define __kernel_read_file_id(id) \ + id(UNKNOWN, unknown) \ + id(FIRMWARE, firmware) \ + id(FIRMWARE_PREALLOC_BUFFER, firmware) \ + id(MODULE, kernel-module) \ + id(KEXEC_IMAGE, kexec-image) \ + id(KEXEC_INITRAMFS, kexec-initramfs) \ + id(POLICY, security-policy) \ + id(MAX_ID, ) + +#define __fid_enumify(ENUM, dummy) READING_ ## ENUM, +#define __fid_stringify(dummy, str) #str, + +enum kernel_read_file_id { + __kernel_read_file_id(__fid_enumify) +}; + +static const char * const kernel_read_file_str[] = { + __kernel_read_file_id(__fid_stringify) +}; + +static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) +{ + if (id < 0 || id >= READING_MAX_ID) + return kernel_read_file_str[READING_UNKNOWN]; + + return kernel_read_file_str[id]; +} + +extern int kernel_read(struct file *, loff_t, char *, unsigned long); +extern int kernel_read_file(struct file *, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern int kernel_read_file_from_path(char *, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t); +extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); extern struct file * open_exec(const char *); /* fs/dcache.c -- generic fs support functions */ extern bool is_subdir(struct dentry *, struct dentry *); -extern bool path_is_under(const struct path *, const struct path *); +extern bool path_is_under(struct path *, struct path *); extern char *file_path(struct file *, char *, int); @@ -3118,7 +2732,6 @@ static inline int generic_drop_inode(struct inode *inode) { return !inode->i_nlink || inode_unhashed(inode); } -extern void d_mark_dontcache(struct inode *inode); extern struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), @@ -3127,10 +2740,6 @@ extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), void *data); extern struct inode *ilookup(struct super_block *sb, unsigned long ino); -extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval, - int (*test)(struct inode *, void *), - int (*set)(struct inode *, void *), - void *data); extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); extern struct inode * iget_locked(struct super_block *, unsigned long); extern struct inode *find_inode_nowait(struct super_block *, @@ -3138,9 +2747,6 @@ extern struct inode *find_inode_nowait(struct super_block *, int (*match)(struct inode *, unsigned long, void *), void *data); -extern struct inode *find_inode_rcu(struct super_block *, unsigned long, - int (*)(struct inode *, void *), void *); -extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long); extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); extern int insert_inode_locked(struct inode *); #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -3149,24 +2755,7 @@ extern void lockdep_annotate_inode_mutex_key(struct inode *inode); static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { }; #endif extern void unlock_new_inode(struct inode *); -extern void discard_new_inode(struct inode *); extern unsigned int get_next_ino(void); -extern void evict_inodes(struct super_block *sb); - -/* - * Userspace may rely on the the inode number being non-zero. For example, glibc - * simply ignores files with zero i_ino in unlink() and other places. - * - * As an additional complication, if userspace was compiled with - * _FILE_OFFSET_BITS=32 on a 64-bit kernel we'll only end up reading out the - * lower 32 bits, so we need to check that those aren't zero explicitly. With - * _FILE_OFFSET_BITS=64, this may cause some harmless false-negatives, but - * better safe than sorry. - */ -static inline bool is_zero_ino(ino_t ino) -{ - return (u32)ino == 0; -} extern void __iget(struct inode * inode); extern void iget_failed(struct inode *); @@ -3193,31 +2782,32 @@ static inline void remove_inode_hash(struct inode *inode) extern void inode_sb_list_add(struct inode *inode); +#ifdef CONFIG_BLOCK +extern blk_qc_t submit_bio(struct bio *); +extern int bdev_read_only(struct block_device *); +#endif +extern int set_blocksize(struct block_device *, int); extern int sb_set_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int); extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); -extern int generic_write_check_limits(struct file *file, loff_t pos, - loff_t *count); -extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); -ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to, - ssize_t already_read); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); -ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, - rwf_t flags); -ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos, - rwf_t flags); -ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb, - struct iov_iter *iter); -ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb, - struct iov_iter *iter); +ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos); +ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos); + +/* fs/block_dev.c */ +extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to); +extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); +extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, + int datasync); +extern void block_sync_page(struct page *page); /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, @@ -3244,7 +2834,6 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); extern loff_t no_seek_end_llseek(struct file *, loff_t, int); extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); -extern int stream_open(struct inode * inode, struct file * filp); #ifdef CONFIG_BLOCK typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, @@ -3256,8 +2845,16 @@ enum { /* filesystem does not support filling holes */ DIO_SKIP_HOLES = 0x02, + + /* filesystem can handle aio writes beyond i_size */ + DIO_ASYNC_EXTEND = 0x04, + + /* inode/fs/bdev does not need truncate protection */ + DIO_SKIP_DIO_COUNT = 0x08, }; +void dio_end_io(struct bio *bio, int error); + ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, get_block_t get_block, @@ -3276,7 +2873,7 @@ static inline ssize_t blockdev_direct_IO(struct kiocb *iocb, void inode_dio_wait(struct inode *inode); -/** +/* * inode_dio_begin - signal start of a direct I/O requests * @inode: inode the direct I/O happens on * @@ -3288,7 +2885,7 @@ static inline void inode_dio_begin(struct inode *inode) atomic_inc(&inode->i_dio_count); } -/** +/* * inode_dio_end - signal finish of a direct I/O requests * @inode: inode the direct I/O happens on * @@ -3301,11 +2898,6 @@ static inline void inode_dio_end(struct inode *inode) wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); } -/* - * Warn about a page cache invalidation failure diring a direct I/O write. - */ -void dio_warn_stale_pagecache(struct file *filp); - extern void inode_set_flags(struct inode *inode, unsigned int flags, unsigned int mask); @@ -3323,18 +2915,14 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len, extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); -void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *); -void generic_fill_statx_attr(struct inode *inode, struct kstat *stat); -extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); -extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); +extern int generic_readlink(struct dentry *, char __user *, int); +extern void generic_fillattr(struct inode *, struct kstat *); +int vfs_getattr_nosec(struct path *path, struct kstat *stat); +extern int vfs_getattr(struct path *, struct kstat *); void __inode_add_bytes(struct inode *inode, loff_t bytes); void inode_add_bytes(struct inode *inode, loff_t bytes); void __inode_sub_bytes(struct inode *inode, loff_t bytes); void inode_sub_bytes(struct inode *inode, loff_t bytes); -static inline loff_t __inode_get_bytes(struct inode *inode) -{ - return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes; -} loff_t inode_get_bytes(struct inode *inode); void inode_set_bytes(struct inode *inode, loff_t bytes); const char *simple_get_link(struct dentry *, struct inode *, @@ -3343,29 +2931,27 @@ extern const struct inode_operations simple_symlink_inode_operations; extern int iterate_dir(struct file *, struct dir_context *); -int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat, - int flags); -int vfs_fstat(int fd, struct kstat *stat); - -static inline int vfs_stat(const char __user *filename, struct kstat *stat) -{ - return vfs_fstatat(AT_FDCWD, filename, stat, 0); -} -static inline int vfs_lstat(const char __user *name, struct kstat *stat) -{ - return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW); -} - +extern int vfs_stat(const char __user *, struct kstat *); +extern int vfs_lstat(const char __user *, struct kstat *); +extern int vfs_fstat(unsigned int, struct kstat *); +extern int vfs_fstatat(int , const char __user *, struct kstat *, int); extern const char *vfs_get_link(struct dentry *, struct delayed_call *); -extern int vfs_readlink(struct dentry *, char __user *, int); -extern struct file_system_type *get_filesystem(struct file_system_type *fs); +extern int __generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, + loff_t start, loff_t len, + get_block_t *get_block); +extern int generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, u64 start, + u64 len, get_block_t *get_block); + +extern void get_filesystem(struct file_system_type *fs); extern void put_filesystem(struct file_system_type *fs); extern struct file_system_type *get_fs_type(const char *name); extern struct super_block *get_super(struct block_device *); +extern struct super_block *get_super_thawed(struct block_device *); extern struct super_block *get_active_super(struct block_device *bdev); extern void drop_super(struct super_block *sb); -extern void drop_super_exclusive(struct super_block *sb); extern void iterate_supers(void (*)(struct super_block *, void *), void *); extern void iterate_supers_type(struct file_system_type *, void (*)(struct super_block *, void *), void *); @@ -3374,29 +2960,24 @@ extern int dcache_dir_open(struct inode *, struct file *); extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); extern int dcache_readdir(struct file *, struct dir_context *); -extern int simple_setattr(struct user_namespace *, struct dentry *, - struct iattr *); -extern int simple_getattr(struct user_namespace *, const struct path *, - struct kstat *, u32, unsigned int); +extern int simple_setattr(struct dentry *, struct iattr *); +extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int simple_statfs(struct dentry *, struct kstatfs *); extern int simple_open(struct inode *inode, struct file *file); extern int simple_link(struct dentry *, struct inode *, struct dentry *); extern int simple_unlink(struct inode *, struct dentry *); extern int simple_rmdir(struct inode *, struct dentry *); -extern int simple_rename(struct user_namespace *, struct inode *, - struct dentry *, struct inode *, struct dentry *, - unsigned int); -extern void simple_recursive_removal(struct dentry *, - void (*callback)(struct dentry *)); +extern int simple_rename(struct inode *, struct dentry *, + struct inode *, struct dentry *, unsigned int); extern int noop_fsync(struct file *, loff_t, loff_t, int); -extern void noop_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); -extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); extern int simple_empty(struct dentry *); +extern int simple_readpage(struct file *file, struct page *page); extern int simple_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); -extern const struct address_space_operations ram_aops; +extern int simple_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); extern int simple_nosetlease(struct file *, long, struct file_lock **, void **); @@ -3408,10 +2989,9 @@ extern const struct file_operations simple_dir_operations; extern const struct inode_operations simple_dir_inode_operations; extern void make_empty_dir_inode(struct inode *inode); extern bool is_empty_dir_inode(struct inode *inode); -struct tree_descr { const char *name; const struct file_operations *ops; int mode; }; +struct tree_descr { char *name; const struct file_operations *ops; int mode; }; struct dentry *d_alloc_name(struct dentry *, const char *); -extern int simple_fill_super(struct super_block *, unsigned long, - const struct tree_descr *); +extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *); extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); extern void simple_release_fs(struct vfsmount **mount, int *count); @@ -3425,46 +3005,27 @@ extern int generic_file_fsync(struct file *, loff_t, loff_t, int); extern int generic_check_addressable(unsigned, u64); -extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry); - #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, struct page *, struct page *, enum migrate_mode); -extern int buffer_migrate_page_norefs(struct address_space *, - struct page *, struct page *, - enum migrate_mode); #else #define buffer_migrate_page NULL -#define buffer_migrate_page_norefs NULL #endif -int may_setattr(struct user_namespace *mnt_userns, struct inode *inode, - unsigned int ia_valid); -int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *); +extern int setattr_prepare(struct dentry *, struct iattr *); extern int inode_newsize_ok(const struct inode *, loff_t offset); -void setattr_copy(struct user_namespace *, struct inode *inode, - const struct iattr *attr); +extern void setattr_copy(struct inode *inode, const struct iattr *attr); extern int file_update_time(struct file *file); -static inline bool vma_is_dax(const struct vm_area_struct *vma) -{ - return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); -} +extern int generic_show_options(struct seq_file *m, struct dentry *root); +extern void save_mount_options(struct super_block *sb, char *options); +extern void replace_mount_options(struct super_block *sb, char *options); -static inline bool vma_is_fsdax(struct vm_area_struct *vma) +static inline bool io_is_direct(struct file *filp) { - struct inode *inode; - - if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file) - return false; - if (!vma_is_dax(vma)) - return false; - inode = file_inode(vma->vm_file); - if (S_ISCHR(inode->i_mode)) - return false; /* device-dax */ - return true; + return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host); } static inline int iocb_flags(struct file *file) @@ -3472,7 +3033,7 @@ static inline int iocb_flags(struct file *file) int res = 0; if (file->f_flags & O_APPEND) res |= IOCB_APPEND; - if (file->f_flags & O_DIRECT) + if (io_is_direct(file)) res |= IOCB_DIRECT; if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) res |= IOCB_DSYNC; @@ -3481,31 +3042,6 @@ static inline int iocb_flags(struct file *file) return res; } -static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) -{ - int kiocb_flags = 0; - - /* make sure there's no overlap between RWF and private IOCB flags */ - BUILD_BUG_ON((__force int) RWF_SUPPORTED & IOCB_EVENTFD); - - if (!flags) - return 0; - if (unlikely(flags & ~RWF_SUPPORTED)) - return -EOPNOTSUPP; - - if (flags & RWF_NOWAIT) { - if (!(ki->ki_filp->f_mode & FMODE_NOWAIT)) - return -EOPNOTSUPP; - kiocb_flags |= IOCB_NOIO; - } - kiocb_flags |= (__force int) (flags & RWF_SUPPORTED); - if (flags & RWF_SYNC) - kiocb_flags |= IOCB_DSYNC; - - ki->ki_flags |= kiocb_flags; - return 0; -} - static inline ino_t parent_ino(struct dentry *dentry) { ino_t res; @@ -3528,7 +3064,7 @@ static inline ino_t parent_ino(struct dentry *dentry) */ struct simple_transaction_argresp { ssize_t size; - char data[]; + char data[0]; }; #define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp)) @@ -3589,12 +3125,12 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, struct ctl_table; int proc_nr_files(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); + void __user *buffer, size_t *lenp, loff_t *ppos); int proc_nr_dentry(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); + void __user *buffer, size_t *lenp, loff_t *ppos); int proc_nr_inodes(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); -int __init list_bdev_fs_names(char *buf, size_t size); + void __user *buffer, size_t *lenp, loff_t *ppos); +int __init get_filesystem_list(char *buf); #define __FMODE_EXEC ((__force int) FMODE_EXEC) #define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY) @@ -3608,18 +3144,17 @@ static inline bool is_sxid(umode_t mode) return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); } -static inline int check_sticky(struct user_namespace *mnt_userns, - struct inode *dir, struct inode *inode) +static inline int check_sticky(struct inode *dir, struct inode *inode) { if (!(dir->i_mode & S_ISVTX)) return 0; - return __check_sticky(mnt_userns, dir, inode); + return __check_sticky(dir, inode); } static inline void inode_has_no_xattr(struct inode *inode) { - if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & SB_NOSEC)) + if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC)) inode->i_flags |= S_NOSEC; } @@ -3675,21 +3210,14 @@ static inline bool dir_relax_shared(struct inode *inode) extern bool path_noexec(const struct path *path); extern void inode_nohighmem(struct inode *inode); -/* mm/fadvise.c */ -extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, - int advice); -extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, - int advice); - -/* - * Flush file data before changing attributes. Caller must hold any locks - * required to prevent further writes to this file until we're done setting - * flags. - */ -static inline int inode_drain_writes(struct inode *inode) +static inline bool is_sidechannel_device(const struct inode *inode) { - inode_dio_wait(inode); - return filemap_write_and_wait(inode->i_mapping); +#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL + umode_t mode = inode->i_mode; + return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH))); +#else + return false; +#endif } #endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h index bdd09fd252..3886b3bffd 100644 --- a/include/linux/fs_pin.h +++ b/include/linux/fs_pin.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #include struct fs_pin { @@ -20,5 +19,6 @@ static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *)) } void pin_remove(struct fs_pin *); +void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *); void pin_insert(struct fs_pin *, struct vfsmount *); void pin_kill(struct fs_pin *); diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h index 54210a42c3..da317c7163 100644 --- a/include/linux/fs_stack.h +++ b/include/linux/fs_stack.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FS_STACK_H #define _LINUX_FS_STACK_H diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 783b48dedb..fd236105ca 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FS_STRUCT_H #define _LINUX_FS_STRUCT_H @@ -7,9 +6,9 @@ #include struct fs_struct { - int users; + atomic_t users; spinlock_t lock; - seqcount_spinlock_t seq; + seqcount_t seq; int umask; int in_exec; struct path root, pwd; diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 8d39491c5f..c4db7606b8 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -1,12 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* General filesystem caching backing cache interface * * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * * NOTE!!! See: * - * Documentation/filesystems/caching/backend-api.rst + * Documentation/filesystems/caching/backend-api.txt * * for a description of the cache backend interface declared here. */ @@ -25,18 +29,6 @@ struct fscache_cache_ops; struct fscache_object; struct fscache_operation; -enum fscache_obj_ref_trace { - fscache_obj_get_add_to_deps, - fscache_obj_get_queue, - fscache_obj_put_alloc_fail, - fscache_obj_put_attach_fail, - fscache_obj_put_drop_obj, - fscache_obj_put_enq_dep, - fscache_obj_put_queue, - fscache_obj_put_work, - fscache_obj_ref__nr_traces -}; - /* * cache tag definition */ @@ -46,7 +38,7 @@ struct fscache_cache_tag { unsigned long flags; #define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ atomic_t usage; - char name[]; /* tag name */ + char name[0]; /* tag name */ }; /* @@ -125,14 +117,13 @@ struct fscache_operation { fscache_operation_release_t release; }; -extern atomic_t fscache_op_debug_id; +extern atomic_unchecked_t fscache_op_debug_id; extern void fscache_op_work_func(struct work_struct *work); extern void fscache_enqueue_operation(struct fscache_operation *); extern void fscache_op_complete(struct fscache_operation *, bool); extern void fscache_put_operation(struct fscache_operation *); -extern void fscache_operation_init(struct fscache_cookie *, - struct fscache_operation *, +extern void fscache_operation_init(struct fscache_operation *, fscache_operation_processor_t, fscache_operation_cancel_t, fscache_operation_release_t); @@ -147,6 +138,7 @@ struct fscache_retrieval { fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ void *context; /* netfs read context (pinned) */ struct list_head to_do; /* list of things to be done by the backend */ + unsigned long start_time; /* time at which retrieval started */ atomic_t n_pages; /* number of pages to be retrieved */ }; @@ -191,8 +183,9 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) static inline void fscache_retrieval_complete(struct fscache_retrieval *op, int n_pages) { - if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0) - fscache_op_complete(&op->op, false); + atomic_sub(n_pages, &op->n_pages); + if (atomic_read(&op->n_pages) <= 0) + fscache_op_complete(&op->op, true); } /** @@ -238,8 +231,7 @@ struct fscache_cache_ops { void (*lookup_complete)(struct fscache_object *object); /* increment the usage count on this object (may fail if unmounting) */ - struct fscache_object *(*grab_object)(struct fscache_object *object, - enum fscache_obj_ref_trace why); + struct fscache_object *(*grab_object)(struct fscache_object *object); /* pin an object in the cache */ int (*pin_object)(struct fscache_object *object); @@ -262,8 +254,7 @@ struct fscache_cache_ops { void (*drop_object)(struct fscache_object *object); /* dispose of a reference to an object */ - void (*put_object)(struct fscache_object *object, - enum fscache_obj_ref_trace why); + void (*put_object)(struct fscache_object *object); /* sync a cache */ void (*sync_cache)(struct fscache_cache *cache); @@ -303,10 +294,6 @@ struct fscache_cache_ops { /* dissociate a cache from all the pages it was backing */ void (*dissociate_pages)(struct fscache_cache *cache); - - /* Begin a read operation for the netfs lib */ - int (*begin_read_operation)(struct netfs_read_request *rreq, - struct fscache_retrieval *op); }; extern struct fscache_cookie fscache_fsdef_index; @@ -373,7 +360,6 @@ struct fscache_object { #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ #define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ -#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */ struct list_head cache_link; /* link in cache->object_list */ struct hlist_node cookie_link; /* link in cookie->backing_objects */ @@ -384,6 +370,9 @@ struct fscache_object { struct list_head dependents; /* FIFO of dependent objects */ struct list_head dep_link; /* link in parent's dependents list */ struct list_head pending_ops; /* unstarted operations on this object */ +#ifdef CONFIG_FSCACHE_OBJECT_LIST + struct rb_node objlist_link; /* link in global object list */ +#endif pgoff_t store_limit; /* current storage limit */ loff_t store_limit_l; /* current storage limit */ }; @@ -454,7 +443,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object) * Set the maximum size an object is permitted to reach, implying the highest * byte that may be written. Intended to be called by the attr_changed() op. * - * See Documentation/filesystems/caching/backend-api.rst for a complete + * See Documentation/filesystems/caching/backend-api.txt for a complete * description. */ static inline @@ -506,7 +495,7 @@ static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie) static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie) { - wake_up_var(&cookie->n_active); + wake_up_atomic_t(&cookie->n_active); } /** @@ -548,8 +537,7 @@ extern bool fscache_object_sleep_till_congested(signed long *timeoutp); extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, const void *data, - uint16_t datalen, - loff_t object_size); + uint16_t datalen); extern void fscache_object_retrying_stale(struct fscache_object *object); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index a4dab59986..e7b812b950 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -1,12 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* General filesystem caching interface * * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * * NOTE!!! See: * - * Documentation/filesystems/caching/netfs-api.rst + * Documentation/filesystems/caching/netfs-api.txt * * for a description of the network filesystem interface declared here. */ @@ -18,8 +22,6 @@ #include #include #include -#include -#include #if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE) #define fscache_available() (1) @@ -30,6 +32,16 @@ #endif +/* + * overload PG_private_2 to give us PG_fscache - this is used to indicate that + * a page is currently backed by a local disk cache + */ +#define PageFsCache(page) PagePrivate2((page)) +#define SetPageFsCache(page) SetPagePrivate2((page)) +#define ClearPageFsCache(page) ClearPagePrivate2((page)) +#define TestSetPageFsCache(page) TestSetPagePrivate2((page)) +#define TestClearPageFsCache(page) TestClearPagePrivate2((page)) + /* pattern used to fill dead space in an index entry */ #define FSCACHE_INDEX_DEADFILL_PATTERN 0x79 @@ -37,7 +49,6 @@ struct pagevec; struct fscache_cache_tag; struct fscache_cookie; struct fscache_netfs; -struct netfs_read_request; typedef void (*fscache_rw_complete_t)(struct page *page, void *context, @@ -72,15 +83,45 @@ struct fscache_cookie_def { const void *parent_netfs_data, const void *cookie_netfs_data); + /* get an index key + * - should store the key data in the buffer + * - should return the amount of data stored + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + uint16_t (*get_key)(const void *cookie_netfs_data, + void *buffer, + uint16_t bufmax); + + /* get certain file attributes from the netfs data + * - this function can be absent for an index + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + void (*get_attr)(const void *cookie_netfs_data, uint64_t *size); + + /* get the auxiliary data from netfs data + * - this function can be absent if the index carries no state data + * - should store the auxiliary data in the buffer + * - should return the amount of amount stored + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + uint16_t (*get_aux)(const void *cookie_netfs_data, + void *buffer, + uint16_t bufmax); + /* consult the netfs about the state of an object * - this function can be absent if the index carries no state data * - the netfs data from the cookie being used as the target is - * presented, as is the auxiliary data and the object size + * presented, as is the auxiliary data */ enum fscache_checkaux (*check_aux)(void *cookie_netfs_data, const void *data, - uint16_t datalen, - loff_t object_size); + uint16_t datalen); /* get an extra reference on a read context * - this function can be absent if the completion function doesn't @@ -102,7 +143,16 @@ struct fscache_cookie_def { void (*mark_page_cached)(void *cookie_netfs_data, struct address_space *mapping, struct page *page); -}; + + /* indicate the cookie is no longer cached + * - this function is called when the backing store currently caching + * a cookie is removed + * - the netfs should use this to clean up any markers indicating + * cached pages + * - this is mandatory for any object that may have data + */ + void (*now_uncached)(void *cookie_netfs_data); +} __do_const; /* * fscache cached network filesystem type @@ -113,6 +163,7 @@ struct fscache_netfs { uint32_t version; /* indexing version */ const char *name; /* filesystem name */ struct fscache_cookie *primary_index; + struct list_head link; /* internal link */ }; /* @@ -123,17 +174,14 @@ struct fscache_netfs { * - indices are created on disk just-in-time */ struct fscache_cookie { - refcount_t ref; /* number of users of this cookie */ + atomic_t usage; /* number of users of this cookie */ atomic_t n_children; /* number of children of this cookie */ atomic_t n_active; /* number of active users of netfs ptrs */ - unsigned int debug_id; spinlock_t lock; spinlock_t stores_lock; /* lock on page store tree */ struct hlist_head backing_objects; /* object(s) backing this file/index */ const struct fscache_cookie_def *def; /* definition */ struct fscache_cookie *parent; /* parent of this entry */ - struct hlist_bl_node hash_link; /* Link in hash table */ - struct list_head proc_link; /* Link in proc list */ void *netfs_data; /* back pointer to netfs */ struct radix_tree_root stores; /* pages to be stored on this cookie */ #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ @@ -147,22 +195,6 @@ struct fscache_cookie { #define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */ #define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */ #define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */ -#define FSCACHE_COOKIE_AUX_UPDATED 8 /* T if the auxiliary data was updated */ -#define FSCACHE_COOKIE_ACQUIRED 9 /* T if cookie is in use */ -#define FSCACHE_COOKIE_RELINQUISHING 10 /* T if cookie is being relinquished */ - - u8 type; /* Type of object */ - u8 key_len; /* Length of index key */ - u8 aux_len; /* Length of auxiliary data */ - u32 key_hash; /* Hash of parent, type, key, len */ - union { - void *key; /* Index key */ - u8 inline_key[16]; /* - If the key is short enough */ - }; - union { - void *aux; /* Auxiliary data */ - u8 inline_aux[8]; /* - If the aux data is short enough */ - }; }; static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie) @@ -185,19 +217,13 @@ extern void __fscache_release_cache_tag(struct fscache_cache_tag *); extern struct fscache_cookie *__fscache_acquire_cookie( struct fscache_cookie *, const struct fscache_cookie_def *, - const void *, size_t, - const void *, size_t, - void *, loff_t, bool); -extern void __fscache_relinquish_cookie(struct fscache_cookie *, const void *, bool); -extern int __fscache_check_consistency(struct fscache_cookie *, const void *); -extern void __fscache_update_cookie(struct fscache_cookie *, const void *); + void *, bool); +extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool); +extern int __fscache_check_consistency(struct fscache_cookie *); +extern void __fscache_update_cookie(struct fscache_cookie *); extern int __fscache_attr_changed(struct fscache_cookie *); extern void __fscache_invalidate(struct fscache_cookie *); extern void __fscache_wait_on_invalidate(struct fscache_cookie *); - -#ifdef FSCACHE_USE_NEW_IO_API -extern int __fscache_begin_read_operation(struct netfs_read_request *, struct fscache_cookie *); -#else extern int __fscache_read_or_alloc_page(struct fscache_cookie *, struct page *, fscache_rw_complete_t, @@ -211,7 +237,7 @@ extern int __fscache_read_or_alloc_pages(struct fscache_cookie *, void *, gfp_t); extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t); -extern int __fscache_write_page(struct fscache_cookie *, struct page *, loff_t, gfp_t); +extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); @@ -221,10 +247,8 @@ extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *, struct inode *); extern void __fscache_readpages_cancel(struct fscache_cookie *cookie, struct list_head *pages); -#endif /* FSCACHE_USE_NEW_IO_API */ - -extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool); -extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t, +extern void __fscache_disable_cookie(struct fscache_cookie *, bool); +extern void __fscache_enable_cookie(struct fscache_cookie *, bool (*)(void *), void *); /** @@ -233,7 +257,7 @@ extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_ * * Register a filesystem as desiring caching services if they're available. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -253,7 +277,7 @@ int fscache_register_netfs(struct fscache_netfs *netfs) * Indicate that a filesystem no longer desires caching services for the * moment. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -270,7 +294,7 @@ void fscache_unregister_netfs(struct fscache_netfs *netfs) * Acquire a specific cache referral tag that can be used to select a specific * cache in which to cache an index. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -288,7 +312,7 @@ struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name) * * Release a reference to a cache referral tag previously looked up. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -302,39 +326,27 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag) * fscache_acquire_cookie - Acquire a cookie to represent a cache object * @parent: The cookie that's to be the parent of this one * @def: A description of the cache object, including callback operations - * @index_key: The index key for this cookie - * @index_key_len: Size of the index key - * @aux_data: The auxiliary data for the cookie (may be NULL) - * @aux_data_len: Size of the auxiliary data buffer * @netfs_data: An arbitrary piece of data to be kept in the cookie to * represent the cache object to the netfs - * @object_size: The initial size of object * @enable: Whether or not to enable a data cookie immediately * * This function is used to inform FS-Cache about part of an index hierarchy * that can be used to locate files. This is done by requesting a cookie for * each index in the path to the file. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline struct fscache_cookie *fscache_acquire_cookie( struct fscache_cookie *parent, const struct fscache_cookie_def *def, - const void *index_key, - size_t index_key_len, - const void *aux_data, - size_t aux_data_len, void *netfs_data, - loff_t object_size, bool enable) { if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent)) - return __fscache_acquire_cookie(parent, def, - index_key, index_key_len, - aux_data, aux_data_len, - netfs_data, object_size, enable); + return __fscache_acquire_cookie(parent, def, netfs_data, + enable); else return NULL; } @@ -343,44 +355,36 @@ struct fscache_cookie *fscache_acquire_cookie( * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding * it * @cookie: The cookie being returned - * @aux_data: The updated auxiliary data for the cookie (may be NULL) * @retire: True if the cache object the cookie represents is to be discarded * * This function returns a cookie to the cache, forcibly discarding the - * associated cache object if retire is set to true. The opportunity is - * provided to update the auxiliary data in the cache before the object is - * disconnected. + * associated cache object if retire is set to true. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline -void fscache_relinquish_cookie(struct fscache_cookie *cookie, - const void *aux_data, - bool retire) +void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire) { if (fscache_cookie_valid(cookie)) - __fscache_relinquish_cookie(cookie, aux_data, retire); + __fscache_relinquish_cookie(cookie, retire); } /** - * fscache_check_consistency - Request validation of a cache's auxiliary data + * fscache_check_consistency - Request that if the cache is updated * @cookie: The cookie representing the cache object - * @aux_data: The updated auxiliary data for the cookie (may be NULL) * - * Request an consistency check from fscache, which passes the request to the - * backing cache. The auxiliary data on the cookie will be updated first if - * @aux_data is set. + * Request an consistency check from fscache, which passes the request + * to the backing cache. * * Returns 0 if consistent and -ESTALE if inconsistent. May also * return -ENOMEM and -ERESTARTSYS. */ static inline -int fscache_check_consistency(struct fscache_cookie *cookie, - const void *aux_data) +int fscache_check_consistency(struct fscache_cookie *cookie) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - return __fscache_check_consistency(cookie, aux_data); + return __fscache_check_consistency(cookie); else return 0; } @@ -388,20 +392,18 @@ int fscache_check_consistency(struct fscache_cookie *cookie, /** * fscache_update_cookie - Request that a cache object be updated * @cookie: The cookie representing the cache object - * @aux_data: The updated auxiliary data for the cookie (may be NULL) * * Request an update of the index data for the cache object associated with the - * cookie. The auxiliary data on the cookie will be updated first if @aux_data - * is set. + * cookie. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline -void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data) +void fscache_update_cookie(struct fscache_cookie *cookie) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - __fscache_update_cookie(cookie, aux_data); + __fscache_update_cookie(cookie); } /** @@ -410,7 +412,7 @@ void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data) * * Permit data-storage cache objects to be pinned in the cache. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -425,7 +427,7 @@ int fscache_pin_cookie(struct fscache_cookie *cookie) * * Permit data-storage cache objects to be unpinned from the cache. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -441,7 +443,7 @@ void fscache_unpin_cookie(struct fscache_cookie *cookie) * changed. This includes the data size. These attributes will be obtained * through the get_attr() cookie definition op. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -463,7 +465,7 @@ int fscache_attr_changed(struct fscache_cookie *cookie) * * This can be called with spinlocks held. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -479,7 +481,7 @@ void fscache_invalidate(struct fscache_cookie *cookie) * * Wait for the invalidation of an object to complete. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -498,7 +500,7 @@ void fscache_wait_on_invalidate(struct fscache_cookie *cookie) * cookie so that a write to that object within the space can always be * honoured. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -507,36 +509,6 @@ int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size) return -ENOBUFS; } -#ifdef FSCACHE_USE_NEW_IO_API - -/** - * fscache_begin_read_operation - Begin a read operation for the netfs lib - * @rreq: The read request being undertaken - * @cookie: The cookie representing the cache object - * - * Begin a read operation on behalf of the netfs helper library. @rreq - * indicates the read request to which the operation state should be attached; - * @cookie indicates the cache object that will be accessed. - * - * This is intended to be called from the ->begin_cache_operation() netfs lib - * operation as implemented by the network filesystem. - * - * Returns: - * * 0 - Success - * * -ENOBUFS - No caching available - * * Other error code from the cache, such as -ENOMEM. - */ -static inline -int fscache_begin_read_operation(struct netfs_read_request *rreq, - struct fscache_cookie *cookie) -{ - if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - return __fscache_begin_read_operation(rreq, cookie); - return -ENOBUFS; -} - -#else /* FSCACHE_USE_NEW_IO_API */ - /** * fscache_read_or_alloc_page - Read a page from the cache or allocate a block * in which to store it @@ -563,7 +535,7 @@ int fscache_begin_read_operation(struct netfs_read_request *rreq, * Else, if the page is unbacked, -ENODATA is returned and a block may have * been allocated in the cache. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -612,7 +584,7 @@ int fscache_read_or_alloc_page(struct fscache_cookie *cookie, * regard to different pages, the return values are prioritised in that order. * Any pages submitted for reading are removed from the pages list. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -647,7 +619,7 @@ int fscache_read_or_alloc_pages(struct fscache_cookie *cookie, * Else, a block will be allocated if one wasn't already, and 0 will be * returned * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -685,7 +657,6 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie, * fscache_write_page - Request storage of a page in the cache * @cookie: The cookie representing the cache object * @page: The netfs page to store - * @object_size: Updated size of object * @gfp: The conditions under which memory allocation should be made * * Request the contents of the netfs page be written into the cache. This @@ -697,17 +668,16 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie, * be cleared at the completion of the write to indicate the success or failure * of the operation. Note that the completion may happen before the return. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline int fscache_write_page(struct fscache_cookie *cookie, struct page *page, - loff_t object_size, gfp_t gfp) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - return __fscache_write_page(cookie, page, object_size, gfp); + return __fscache_write_page(cookie, page, gfp); else return -ENOBUFS; } @@ -723,7 +693,7 @@ int fscache_write_page(struct fscache_cookie *cookie, * Note that this cannot cancel any outstanding I/O operations between this * page and the cache. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -741,7 +711,7 @@ void fscache_uncache_page(struct fscache_cookie *cookie, * * Ask the cache if a page is being written to the cache. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -761,7 +731,7 @@ bool fscache_check_page_write(struct fscache_cookie *cookie, * Ask the cache to wake us up when a page is no longer being written to the * cache. * - * See Documentation/filesystems/caching/netfs-api.rst for a complete + * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline @@ -794,7 +764,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie, { if (fscache_cookie_valid(cookie) && PageFsCache(page)) return __fscache_maybe_release_page(cookie, page, gfp); - return true; + return false; } /** @@ -816,12 +786,9 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, __fscache_uncache_all_inode_pages(cookie, inode); } -#endif /* FSCACHE_USE_NEW_IO_API */ - /** * fscache_disable_cookie - Disable a cookie * @cookie: The cookie representing the cache object - * @aux_data: The updated auxiliary data for the cookie (may be NULL) * @invalidate: Invalidate the backing object * * Disable a cookie from accepting further alloc, read, write, invalidate, @@ -832,44 +799,34 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, * * If @invalidate is set, then the backing object will be invalidated and * detached, otherwise it will just be detached. - * - * If @aux_data is set, then auxiliary data will be updated from that. */ static inline -void fscache_disable_cookie(struct fscache_cookie *cookie, - const void *aux_data, - bool invalidate) +void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - __fscache_disable_cookie(cookie, aux_data, invalidate); + __fscache_disable_cookie(cookie, invalidate); } /** * fscache_enable_cookie - Reenable a cookie * @cookie: The cookie representing the cache object - * @aux_data: The updated auxiliary data for the cookie (may be NULL) - * @object_size: Current size of object * @can_enable: A function to permit enablement once lock is held * @data: Data for can_enable() * * Reenable a previously disabled cookie, allowing it to accept further alloc, * read, write, invalidate, update or acquire operations. An attempt will be - * made to immediately reattach the cookie to a backing object. If @aux_data - * is set, the auxiliary data attached to the cookie will be updated. + * made to immediately reattach the cookie to a backing object. * * The can_enable() function is called (if not NULL) once the enablement lock * is held to rule on whether enablement is still permitted to go ahead. */ static inline void fscache_enable_cookie(struct fscache_cookie *cookie, - const void *aux_data, - loff_t object_size, bool (*can_enable)(void *data), void *data) { if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie)) - __fscache_enable_cookie(cookie, aux_data, object_size, - can_enable, data); + __fscache_enable_cookie(cookie, can_enable, data); } #endif /* _LINUX_FSCACHE_H */ diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h new file mode 100644 index 0000000000..f6dfc2950f --- /dev/null +++ b/include/linux/fscrypto.h @@ -0,0 +1,409 @@ +/* + * General per-file encryption definition + * + * Copyright (C) 2015, Google, Inc. + * + * Written by Michael Halcrow, 2015. + * Modified by Jaegeuk Kim, 2015. + */ + +#ifndef _LINUX_FSCRYPTO_H +#define _LINUX_FSCRYPTO_H + +#include +#include +#include +#include +#include +#include +#include + +#define FS_KEY_DERIVATION_NONCE_SIZE 16 +#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1 + +#define FS_POLICY_FLAGS_PAD_4 0x00 +#define FS_POLICY_FLAGS_PAD_8 0x01 +#define FS_POLICY_FLAGS_PAD_16 0x02 +#define FS_POLICY_FLAGS_PAD_32 0x03 +#define FS_POLICY_FLAGS_PAD_MASK 0x03 +#define FS_POLICY_FLAGS_VALID 0x03 + +/* Encryption algorithms */ +#define FS_ENCRYPTION_MODE_INVALID 0 +#define FS_ENCRYPTION_MODE_AES_256_XTS 1 +#define FS_ENCRYPTION_MODE_AES_256_GCM 2 +#define FS_ENCRYPTION_MODE_AES_256_CBC 3 +#define FS_ENCRYPTION_MODE_AES_256_CTS 4 + +/** + * Encryption context for inode + * + * Protector format: + * 1 byte: Protector format (1 = this version) + * 1 byte: File contents encryption mode + * 1 byte: File names encryption mode + * 1 byte: Flags + * 8 bytes: Master Key descriptor + * 16 bytes: Encryption Key derivation nonce + */ +struct fscrypt_context { + u8 format; + u8 contents_encryption_mode; + u8 filenames_encryption_mode; + u8 flags; + u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; + u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; +} __packed; + +/* Encryption parameters */ +#define FS_XTS_TWEAK_SIZE 16 +#define FS_AES_128_ECB_KEY_SIZE 16 +#define FS_AES_256_GCM_KEY_SIZE 32 +#define FS_AES_256_CBC_KEY_SIZE 32 +#define FS_AES_256_CTS_KEY_SIZE 32 +#define FS_AES_256_XTS_KEY_SIZE 64 +#define FS_MAX_KEY_SIZE 64 + +#define FS_KEY_DESC_PREFIX "fscrypt:" +#define FS_KEY_DESC_PREFIX_SIZE 8 + +/* This is passed in from userspace into the kernel keyring */ +struct fscrypt_key { + u32 mode; + u8 raw[FS_MAX_KEY_SIZE]; + u32 size; +} __packed; + +struct fscrypt_info { + u8 ci_data_mode; + u8 ci_filename_mode; + u8 ci_flags; + struct crypto_skcipher *ci_ctfm; + u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; +}; + +#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 +#define FS_WRITE_PATH_FL 0x00000002 + +struct fscrypt_ctx { + union { + struct { + struct page *bounce_page; /* Ciphertext page */ + struct page *control_page; /* Original page */ + } w; + struct { + struct bio *bio; + struct work_struct work; + } r; + struct list_head free_list; /* Free list */ + }; + u8 flags; /* Flags */ + u8 mode; /* Encryption mode for tfm */ +}; + +struct fscrypt_completion_result { + struct completion completion; + int res; +}; + +#define DECLARE_FS_COMPLETION_RESULT(ecr) \ + struct fscrypt_completion_result ecr = { \ + COMPLETION_INITIALIZER((ecr).completion), 0 } + +#define FS_FNAME_NUM_SCATTER_ENTRIES 4 +#define FS_CRYPTO_BLOCK_SIZE 16 +#define FS_FNAME_CRYPTO_DIGEST_SIZE 32 + +/** + * For encrypted symlinks, the ciphertext length is stored at the beginning + * of the string in little-endian format. + */ +struct fscrypt_symlink_data { + __le16 len; + char encrypted_path[1]; +} __packed; + +/** + * This function is used to calculate the disk space required to + * store a filename of length l in encrypted symlink format. + */ +static inline u32 fscrypt_symlink_data_len(u32 l) +{ + if (l < FS_CRYPTO_BLOCK_SIZE) + l = FS_CRYPTO_BLOCK_SIZE; + return (l + sizeof(struct fscrypt_symlink_data) - 1); +} + +struct fscrypt_str { + unsigned char *name; + u32 len; +}; + +struct fscrypt_name { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + u32 hash; + u32 minor_hash; + struct fscrypt_str crypto_buf; +}; + +#define FSTR_INIT(n, l) { .name = n, .len = l } +#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len) +#define fname_name(p) ((p)->disk_name.name) +#define fname_len(p) ((p)->disk_name.len) + +/* + * crypto opertions for filesystems + */ +struct fscrypt_operations { + int (*get_context)(struct inode *, void *, size_t); + int (*key_prefix)(struct inode *, u8 **); + int (*prepare_context)(struct inode *); + int (*set_context)(struct inode *, const void *, size_t, void *); + int (*dummy_context)(struct inode *); + bool (*is_encrypted)(struct inode *); + bool (*empty_dir)(struct inode *); + unsigned (*max_namelen)(struct inode *); +}; + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + if (inode->i_sb->s_cop->dummy_context && + inode->i_sb->s_cop->dummy_context(inode)) + return true; + return false; +} + +static inline bool fscrypt_valid_contents_enc_mode(u32 mode) +{ + return (mode == FS_ENCRYPTION_MODE_AES_256_XTS); +} + +static inline bool fscrypt_valid_filenames_enc_mode(u32 mode) +{ + return (mode == FS_ENCRYPTION_MODE_AES_256_CTS); +} + +static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) +{ + if (str->len == 1 && str->name[0] == '.') + return true; + + if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') + return true; + + return false; +} + +static inline struct page *fscrypt_control_page(struct page *page) +{ +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) + return ((struct fscrypt_ctx *)page_private(page))->w.control_page; +#else + WARN_ON_ONCE(1); + return ERR_PTR(-EINVAL); +#endif +} + +static inline int fscrypt_has_encryption_key(struct inode *inode) +{ +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) + return (inode->i_crypt_info != NULL); +#else + return 0; +#endif +} + +static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry) +{ +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY; + spin_unlock(&dentry->d_lock); +#endif +} + +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) +extern const struct dentry_operations fscrypt_d_ops; +#endif + +static inline void fscrypt_set_d_op(struct dentry *dentry) +{ +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) + d_set_d_op(dentry, &fscrypt_d_ops); +#endif +} + +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) +/* crypto.c */ +extern struct kmem_cache *fscrypt_info_cachep; +int fscrypt_initialize(void); + +extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t); +extern void fscrypt_release_ctx(struct fscrypt_ctx *); +extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t); +extern int fscrypt_decrypt_page(struct page *); +extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *); +extern void fscrypt_pullback_bio_page(struct page **, bool); +extern void fscrypt_restore_control_page(struct page *); +extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t, + unsigned int); +/* policy.c */ +extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *); +extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *); +extern int fscrypt_has_permitted_context(struct inode *, struct inode *); +extern int fscrypt_inherit_context(struct inode *, struct inode *, + void *, bool); +/* keyinfo.c */ +extern int fscrypt_get_encryption_info(struct inode *); +extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); + +/* fname.c */ +extern int fscrypt_setup_filename(struct inode *, const struct qstr *, + int lookup, struct fscrypt_name *); +extern void fscrypt_free_filename(struct fscrypt_name *); +extern u32 fscrypt_fname_encrypted_size(struct inode *, u32); +extern int fscrypt_fname_alloc_buffer(struct inode *, u32, + struct fscrypt_str *); +extern void fscrypt_fname_free_buffer(struct fscrypt_str *); +extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, + const struct fscrypt_str *, struct fscrypt_str *); +extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *, + struct fscrypt_str *); +#endif + +/* crypto.c */ +static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i, + gfp_t f) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c) +{ + return; +} + +static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i, + struct page *p, gfp_t f) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int fscrypt_notsupp_decrypt_page(struct page *p) +{ + return -EOPNOTSUPP; +} + +static inline void fscrypt_notsupp_decrypt_bio_pages(struct fscrypt_ctx *c, + struct bio *b) +{ + return; +} + +static inline void fscrypt_notsupp_pullback_bio_page(struct page **p, bool b) +{ + return; +} + +static inline void fscrypt_notsupp_restore_control_page(struct page *p) +{ + return; +} + +static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p, + sector_t s, unsigned int f) +{ + return -EOPNOTSUPP; +} + +/* policy.c */ +static inline int fscrypt_notsupp_process_policy(struct file *f, + const struct fscrypt_policy *p) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_notsupp_get_policy(struct inode *i, + struct fscrypt_policy *p) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_notsupp_has_permitted_context(struct inode *p, + struct inode *i) +{ + return 0; +} + +static inline int fscrypt_notsupp_inherit_context(struct inode *p, + struct inode *i, void *v, bool b) +{ + return -EOPNOTSUPP; +} + +/* keyinfo.c */ +static inline int fscrypt_notsupp_get_encryption_info(struct inode *i) +{ + return -EOPNOTSUPP; +} + +static inline void fscrypt_notsupp_put_encryption_info(struct inode *i, + struct fscrypt_info *f) +{ + return; +} + + /* fname.c */ +static inline int fscrypt_notsupp_setup_filename(struct inode *dir, + const struct qstr *iname, + int lookup, struct fscrypt_name *fname) +{ + if (dir->i_sb->s_cop->is_encrypted(dir)) + return -EOPNOTSUPP; + + memset(fname, 0, sizeof(struct fscrypt_name)); + fname->usr_fname = iname; + fname->disk_name.name = (unsigned char *)iname->name; + fname->disk_name.len = iname->len; + return 0; +} + +static inline void fscrypt_notsupp_free_filename(struct fscrypt_name *fname) +{ + return; +} + +static inline u32 fscrypt_notsupp_fname_encrypted_size(struct inode *i, u32 s) +{ + /* never happens */ + WARN_ON(1); + return 0; +} + +static inline int fscrypt_notsupp_fname_alloc_buffer(struct inode *inode, + u32 ilen, struct fscrypt_str *crypto_str) +{ + return -EOPNOTSUPP; +} + +static inline void fscrypt_notsupp_fname_free_buffer(struct fscrypt_str *c) +{ + return; +} + +static inline int fscrypt_notsupp_fname_disk_to_usr(struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_notsupp_fname_usr_to_disk(struct inode *inode, + const struct qstr *iname, + struct fscrypt_str *oname) +{ + return -EOPNOTSUPP; +} +#endif /* _LINUX_FSCRYPTO_H */ diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h index 9a55ddc0d2..a1e8277120 100644 --- a/include/linux/fsl-diu-fb.h +++ b/include/linux/fsl-diu-fb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. * @@ -10,6 +9,12 @@ * York Sun * * Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __FSL_DIU_FB_H__ @@ -68,7 +73,7 @@ struct diu_ad { /* Word 0(32-bit) in DDR memory */ /* __u16 comp; */ /* __u16 pixel_s:2; */ -/* __u16 palette:1; */ +/* __u16 pallete:1; */ /* __u16 red_c:2; */ /* __u16 green_c:2; */ /* __u16 blue_c:2; */ @@ -137,7 +142,7 @@ struct diu_ad { struct diu { __be32 desc[3]; __be32 gamma; - __be32 palette; + __be32 pallete; __be32 cursor; __be32 curs_pos; __be32 diu_mode; diff --git a/include/linux/fsl/bestcomm/bestcomm.h b/include/linux/fsl/bestcomm/bestcomm.h index 154e541ce5..a0e2e6b19b 100644 --- a/include/linux/fsl/bestcomm/bestcomm.h +++ b/include/linux/fsl/bestcomm/bestcomm.h @@ -27,7 +27,7 @@ */ struct bcom_bd { u32 status; - u32 data[]; /* variable payload size */ + u32 data[0]; /* variable payload size */ }; /* ======================================================================== */ diff --git a/include/linux/fsl/bestcomm/gen_bd.h b/include/linux/fsl/bestcomm/gen_bd.h index aeb312a1cd..de47260e69 100644 --- a/include/linux/fsl/bestcomm/gen_bd.h +++ b/include/linux/fsl/bestcomm/gen_bd.h @@ -1,10 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Header for Bestcomm General Buffer Descriptor tasks driver * + * * Copyright (C) 2007 Sylvain Munaut * Copyright (C) 2006 AppSpec Computer Technologies Corp. * Jeff Gibbons + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * */ #ifndef __BESTCOMM_GEN_BD_H__ diff --git a/include/linux/fsl/edac.h b/include/linux/fsl/edac.h index 148a297d75..90d64d4ec1 100644 --- a/include/linux/fsl/edac.h +++ b/include/linux/fsl/edac.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef FSL_EDAC_H #define FSL_EDAC_H diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h index fdb55ca47a..649e9171a9 100644 --- a/include/linux/fsl/guts.h +++ b/include/linux/fsl/guts.h @@ -1,20 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* +/** * Freecale 85xx and 86xx Global Utilties register set * * Authors: Jeff Brown * Timur Tabi * * Copyright 2004,2007,2012 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __FSL_GUTS_H__ #define __FSL_GUTS_H__ #include -#include -/* +/** * Global Utility Registers. * * Not all registers defined in this structure are available on all chips, so @@ -26,111 +29,84 @@ * #ifdefs. */ struct ccsr_guts { - u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ - u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ - u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and - * Control Register - */ - u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ - u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ - u32 pordevsr2; /* 0x.0014 - POR device status register 2 */ + __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ + __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ + __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ + __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ + __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ + __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ u8 res018[0x20 - 0x18]; - u32 porcir; /* 0x.0020 - POR Configuration Information - * Register - */ + __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ u8 res024[0x30 - 0x24]; - u32 gpiocr; /* 0x.0030 - GPIO Control Register */ + __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ u8 res034[0x40 - 0x34]; - u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data - * Register - */ + __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ u8 res044[0x50 - 0x44]; - u32 gpindr; /* 0x.0050 - General-Purpose Input Data - * Register - */ + __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ u8 res054[0x60 - 0x54]; - u32 pmuxcr; /* 0x.0060 - Alternate Function Signal - * Multiplex Control - */ - u32 pmuxcr2; /* 0x.0064 - Alternate function signal - * multiplex control 2 - */ - u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ + __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ + __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ + __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ u8 res06c[0x70 - 0x6c]; - u32 devdisr; /* 0x.0070 - Device Disable Control */ + __be32 devdisr; /* 0x.0070 - Device Disable Control */ #define CCSR_GUTS_DEVDISR_TB1 0x00001000 #define CCSR_GUTS_DEVDISR_TB0 0x00004000 - u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ + __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ u8 res078[0x7c - 0x78]; - u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control - * Register - */ - u32 powmgtcsr; /* 0x.0080 - Power Management Status and - * Control Register - */ - u32 pmrccr; /* 0x.0084 - Power Management Reset Counter - * Configuration Register - */ - u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter - * Configuration Register - */ - u32 pmcdr; /* 0x.008c - 4Power management clock disable - * register - */ - u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ - u32 rstrscr; /* 0x.0094 - Reset Request Status and - * Control Register - */ - u32 ectrstcr; /* 0x.0098 - Exception reset control register */ - u32 autorstsr; /* 0x.009c - Automatic reset status register */ - u32 pvr; /* 0x.00a0 - Processor Version Register */ - u32 svr; /* 0x.00a4 - System Version Register */ + __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ + __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ + __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ + __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ + __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ + __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ + __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ + __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ + __be32 autorstsr; /* 0x.009c - Automatic reset status register */ + __be32 pvr; /* 0x.00a0 - Processor Version Register */ + __be32 svr; /* 0x.00a4 - System Version Register */ u8 res0a8[0xb0 - 0xa8]; - u32 rstcr; /* 0x.00b0 - Reset Control Register */ + __be32 rstcr; /* 0x.00b0 - Reset Control Register */ u8 res0b4[0xc0 - 0xb4]; - u32 iovselsr; /* 0x.00c0 - I/O voltage select status register + __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register Called 'elbcvselcr' on 86xx SOCs */ u8 res0c4[0x100 - 0xc4]; - u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers + __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers There are 16 registers */ u8 res140[0x224 - 0x140]; - u32 iodelay1; /* 0x.0224 - IO delay control register 1 */ - u32 iodelay2; /* 0x.0228 - IO delay control register 2 */ + __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ + __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ u8 res22c[0x604 - 0x22c]; - u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ + __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ u8 res608[0x800 - 0x608]; - u32 clkdvdr; /* 0x.0800 - Clock Divide Register */ + __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ u8 res804[0x900 - 0x804]; - u32 ircr; /* 0x.0900 - Infrared Control Register */ + __be32 ircr; /* 0x.0900 - Infrared Control Register */ u8 res904[0x908 - 0x904]; - u32 dmacr; /* 0x.0908 - DMA Control Register */ + __be32 dmacr; /* 0x.0908 - DMA Control Register */ u8 res90c[0x914 - 0x90c]; - u32 elbccr; /* 0x.0914 - eLBC Control Register */ + __be32 elbccr; /* 0x.0914 - eLBC Control Register */ u8 res918[0xb20 - 0x918]; - u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ - u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ - u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ + __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ + __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ + __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ u8 resb2c[0xe00 - 0xb2c]; - u32 clkocr; /* 0x.0e00 - Clock Out Select Register */ + __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ u8 rese04[0xe10 - 0xe04]; - u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ u8 rese14[0xe20 - 0xe14]; - u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ - u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override - * register - */ + __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ + __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ u8 rese28[0xf04 - 0xe28]; - u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ - u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ + __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ + __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ u8 resf0c[0xf2c - 0xf0c]; - u32 itcr; /* 0x.0f2c - Internal transaction control - * register - */ + __be32 itcr; /* 0x.0f2c - Internal transaction control register */ u8 resf30[0xf40 - 0xf30]; - u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ - u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ + __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ + __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ } __attribute__ ((packed)); + /* Alternate function signal multiplex control */ #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 5d231ce870..f291291414 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/fsl_devices.h * @@ -8,6 +7,11 @@ * Maintainer: Kumar Gala * * Copyright 2004,2012 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef _FSL_DEVICE_H_ @@ -94,12 +98,9 @@ struct fsl_usb2_platform_data { unsigned suspended:1; unsigned already_suspended:1; - unsigned has_fsl_erratum_a007792:1; - unsigned has_fsl_erratum_14:1; - unsigned has_fsl_erratum_a005275:1; - unsigned has_fsl_erratum_a005697:1; - unsigned has_fsl_erratum_a006918:1; - unsigned check_phy_clk_valid:1; + unsigned has_fsl_erratum_a007792:1; + unsigned has_fsl_erratum_a005275:1; + unsigned check_phy_clk_valid:1; /* register save area for suspend/resume */ u32 pm_command; diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h index 0af96a45e9..c332f0a456 100644 --- a/include/linux/fsl_ifc.h +++ b/include/linux/fsl_ifc.h @@ -1,9 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Freescale Integrated Flash Controller * * Copyright 2011 Freescale Semiconductor, Inc * * Author: Dipen Dudhat + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __ASM_FSL_IFC_H @@ -261,8 +274,6 @@ */ /* Auto Boot Mode */ #define IFC_NAND_NCFGR_BOOT 0x80000000 -/* SRAM Initialization */ -#define IFC_NAND_NCFGR_SRAM_INIT_EN 0x20000000 /* Addressing Mode-ROW0+n/COL0 */ #define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000 /* Addressing Mode-ROW0+n/COL0+n */ @@ -723,7 +734,11 @@ struct fsl_ifc_nand { u32 res19[0x10]; __be32 nand_fsr; u32 res20; - __be32 nand_eccstat[8]; + /* The V1 nand_eccstat is actually 4 words that overlaps the + * V2 nand_eccstat. + */ + __be32 v1_nand_eccstat[2]; + __be32 v2_nand_eccstat[6]; u32 res21[0x1c]; __be32 nanndcr; u32 res22[0x2]; diff --git a/include/linux/fsldma.h b/include/linux/fsldma.h index c523d716eb..b213c02963 100644 --- a/include/linux/fsldma.h +++ b/include/linux/fsldma.h @@ -1,5 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef FSL_DMA_H diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 12d3a7d308..fd880400b4 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FS_NOTIFY_H #define _LINUX_FS_NOTIFY_H @@ -17,111 +16,43 @@ #include #include -/* - * Notify this @dir inode about a change in a child directory entry. - * The directory entry may have turned positive or negative or its inode may - * have changed (i.e. renamed over). - * - * Unlike fsnotify_parent(), the event will be reported regardless of the - * FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only - * the child is interested and not the parent. - */ -static inline void fsnotify_name(struct inode *dir, __u32 mask, - struct inode *child, - const struct qstr *name, u32 cookie) -{ - if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0) - return; - - fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie); -} - -static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, - __u32 mask) -{ - fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0); -} - -static inline void fsnotify_inode(struct inode *inode, __u32 mask) -{ - if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0) - return; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify(mask, inode, FSNOTIFY_EVENT_INODE, NULL, NULL, inode, 0); -} - /* Notify this dentry's parent about a child's events. */ -static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, - const void *data, int data_type) +static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) { - struct inode *inode = d_inode(dentry); + if (!dentry) + dentry = path->dentry; - if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0) - return 0; - - if (S_ISDIR(inode->i_mode)) { - mask |= FS_ISDIR; - - /* sb/mount marks are not interested in name of directory */ - if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) - goto notify_child; - } - - /* disconnected dentry cannot notify parent */ - if (IS_ROOT(dentry)) - goto notify_child; - - return __fsnotify_parent(dentry, mask, data, data_type); - -notify_child: - return fsnotify(mask, data, data_type, NULL, NULL, inode, 0); + return __fsnotify_parent(path, dentry, mask); } -/* - * Simple wrappers to consolidate calls to fsnotify_parent() when an event - * is on a file/dentry. - */ -static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) +/* simple call site for access decisions */ +static inline int fsnotify_perm(struct file *file, int mask) { - fsnotify_parent(dentry, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE); -} - -static inline int fsnotify_file(struct file *file, __u32 mask) -{ - const struct path *path = &file->f_path; + struct path *path = &file->f_path; + /* + * Do not use file_inode() here or anywhere in this file to get the + * inode. That would break *notity on overlayfs. + */ + struct inode *inode = path->dentry->d_inode; + __u32 fsnotify_mask = 0; + int ret; if (file->f_mode & FMODE_NONOTIFY) return 0; - - return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); -} - -/* Simple call site for access decisions */ -static inline int fsnotify_perm(struct file *file, int mask) -{ - int ret; - __u32 fsnotify_mask = 0; - if (!(mask & (MAY_READ | MAY_OPEN))) return 0; - - if (mask & MAY_OPEN) { + if (mask & MAY_OPEN) fsnotify_mask = FS_OPEN_PERM; - - if (file->f_flags & __FMODE_EXEC) { - ret = fsnotify_file(file, FS_OPEN_EXEC_PERM); - - if (ret) - return ret; - } - } else if (mask & MAY_READ) { + else if (mask & MAY_READ) fsnotify_mask = FS_ACCESS_PERM; - } + else + BUG(); - return fsnotify_file(file, fsnotify_mask); + ret = fsnotify_parent(path, NULL, fsnotify_mask); + if (ret) + return ret; + + return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); } /* @@ -129,22 +60,21 @@ static inline int fsnotify_perm(struct file *file, int mask) */ static inline void fsnotify_link_count(struct inode *inode) { - fsnotify_inode(inode, FS_ATTRIB); + fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0); } /* * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir */ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, - const struct qstr *old_name, - int isdir, struct inode *target, - struct dentry *moved) + const unsigned char *old_name, + int isdir, struct inode *target, struct dentry *moved) { struct inode *source = moved->d_inode; u32 fs_cookie = fsnotify_get_cookie(); - __u32 old_dir_mask = FS_MOVED_FROM; - __u32 new_dir_mask = FS_MOVED_TO; - const struct qstr *new_name = &moved->d_name; + __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); + __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); + const unsigned char *new_name = moved->d_name.name; if (old_dir == new_dir) old_dir_mask |= FS_DN_RENAME; @@ -154,12 +84,16 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, new_dir_mask |= FS_ISDIR; } - fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie); - fsnotify_name(new_dir, new_dir_mask, source, new_name, fs_cookie); + fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name, + fs_cookie); + fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name, + fs_cookie); if (target) fsnotify_link_count(target); - fsnotify_inode(source, FS_MOVE_SELF); + + if (source) + fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE); } @@ -179,12 +113,25 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) __fsnotify_vfsmount_delete(mnt); } +/* + * fsnotify_nameremove - a filename was removed from a directory + */ +static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) +{ + __u32 mask = FS_DELETE; + + if (isdir) + mask |= FS_ISDIR; + + fsnotify_parent(NULL, dentry, mask); +} + /* * fsnotify_inoderemove - an inode is going away */ static inline void fsnotify_inoderemove(struct inode *inode) { - fsnotify_inode(inode, FS_DELETE_SELF); + fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0); __fsnotify_inode_delete(inode); } @@ -195,7 +142,7 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) { audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); - fsnotify_dirent(inode, dentry, FS_CREATE); + fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); } /* @@ -203,26 +150,12 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) * Note: We have to pass also the linked inode ptr as some filesystems leave * new_dentry->d_inode NULL and instantiate inode pointer later */ -static inline void fsnotify_link(struct inode *dir, struct inode *inode, - struct dentry *new_dentry) +static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) { fsnotify_link_count(inode); audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE); - fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0); -} - -/* - * fsnotify_unlink - 'name' was unlinked - * - * Caller must make sure that dentry->d_name is stable. - */ -static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) -{ - /* Expected to be called before d_delete() */ - WARN_ON_ONCE(d_is_negative(dentry)); - - fsnotify_dirent(dir, dentry, FS_DELETE); + fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0); } /* @@ -230,22 +163,12 @@ static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) */ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) { + __u32 mask = (FS_CREATE | FS_ISDIR); + struct inode *d_inode = dentry->d_inode; + audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); - fsnotify_dirent(inode, dentry, FS_CREATE | FS_ISDIR); -} - -/* - * fsnotify_rmdir - directory 'name' was removed - * - * Caller must make sure that dentry->d_name is stable. - */ -static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) -{ - /* Expected to be called before d_delete() */ - WARN_ON_ONCE(d_is_negative(dentry)); - - fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR); + fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); } /* @@ -253,7 +176,20 @@ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) */ static inline void fsnotify_access(struct file *file) { - fsnotify_file(file, FS_ACCESS); + struct path *path = &file->f_path; + struct inode *inode = path->dentry->d_inode; + __u32 mask = FS_ACCESS; + + if (is_sidechannel_device(inode)) + return; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + if (!(file->f_mode & FMODE_NONOTIFY)) { + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + } } /* @@ -261,7 +197,20 @@ static inline void fsnotify_access(struct file *file) */ static inline void fsnotify_modify(struct file *file) { - fsnotify_file(file, FS_MODIFY); + struct path *path = &file->f_path; + struct inode *inode = path->dentry->d_inode; + __u32 mask = FS_MODIFY; + + if (is_sidechannel_device(inode)) + return; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + if (!(file->f_mode & FMODE_NONOTIFY)) { + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + } } /* @@ -269,12 +218,15 @@ static inline void fsnotify_modify(struct file *file) */ static inline void fsnotify_open(struct file *file) { + struct path *path = &file->f_path; + struct inode *inode = path->dentry->d_inode; __u32 mask = FS_OPEN; - if (file->f_flags & __FMODE_EXEC) - mask |= FS_OPEN_EXEC; + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; - fsnotify_file(file, mask); + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); } /* @@ -282,10 +234,18 @@ static inline void fsnotify_open(struct file *file) */ static inline void fsnotify_close(struct file *file) { - __u32 mask = (file->f_mode & FMODE_WRITE) ? FS_CLOSE_WRITE : - FS_CLOSE_NOWRITE; + struct path *path = &file->f_path; + struct inode *inode = path->dentry->d_inode; + fmode_t mode = file->f_mode; + __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; - fsnotify_file(file, mask); + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + if (!(file->f_mode & FMODE_NONOTIFY)) { + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + } } /* @@ -293,7 +253,14 @@ static inline void fsnotify_close(struct file *file) */ static inline void fsnotify_xattr(struct dentry *dentry) { - fsnotify_dentry(dentry, FS_ATTRIB); + struct inode *inode = dentry->d_inode; + __u32 mask = FS_ATTRIB; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_parent(NULL, dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); } /* @@ -302,6 +269,7 @@ static inline void fsnotify_xattr(struct dentry *dentry) */ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) { + struct inode *inode = dentry->d_inode; __u32 mask = 0; if (ia_valid & ATTR_UID) @@ -322,8 +290,44 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) if (ia_valid & ATTR_MODE) mask |= FS_ATTRIB; - if (mask) - fsnotify_dentry(dentry, mask); + if (mask) { + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_parent(NULL, dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + } } +#if defined(CONFIG_FSNOTIFY) /* notify helpers */ + +/* + * fsnotify_oldname_init - save off the old filename before we change it + */ +static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name) +{ + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL); +} + +/* + * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init + */ +static inline void fsnotify_oldname_free(const unsigned char *old_name) +{ + kfree(old_name); +} + +#else /* CONFIG_FSNOTIFY */ + +static inline const char *fsnotify_oldname_init(const unsigned char *name) +{ + return NULL; +} + +static inline void fsnotify_oldname_free(const unsigned char *old_name) +{ +} + +#endif /* CONFIG_FSNOTIFY */ + #endif /* _LINUX_FS_NOTIFY_H */ diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1ce66748a2..79467b239f 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Filesystem access notification for Linux * @@ -17,8 +16,6 @@ #include #include #include -#include -#include /* * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily @@ -38,7 +35,6 @@ #define FS_DELETE 0x00000200 /* Subfile was deleted */ #define FS_DELETE_SELF 0x00000400 /* Self was deleted */ #define FS_MOVE_SELF 0x00000800 /* Self was moved */ -#define FS_OPEN_EXEC 0x00001000 /* File was opened for exec */ #define FS_UNMOUNT 0x00002000 /* inode on umount fs */ #define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ @@ -46,118 +42,65 @@ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ -#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ -/* - * Set on inode mark that cares about things that happen to its children. - * Always set for dnotify and inotify. - * Set on inode/sb/mount marks that care about parent/name info. - */ -#define FS_EVENT_ON_CHILD 0x08000000 - -#define FS_DN_RENAME 0x10000000 /* file renamed */ -#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ #define FS_ISDIR 0x40000000 /* event occurred against dir */ #define FS_IN_ONESHOT 0x80000000 /* only send event once */ +#define FS_DN_RENAME 0x10000000 /* file renamed */ +#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ + +/* This inode cares about things that happen to its children. Always set for + * dnotify and inotify. */ +#define FS_EVENT_ON_CHILD 0x08000000 + +/* This is a list of all events that may get sent to a parernt based on fs event + * happening to inodes inside that directory */ +#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ + FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ + FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ + FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM) + #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) -/* - * Directory entry modification events - reported only to directory - * where entry is modified and not to a watching parent. - * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event - * when a directory entry inside a child subdir changes. - */ -#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE) +#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM) -#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ - FS_OPEN_EXEC_PERM) - -/* - * This is a list of all events that may get sent to a parent that is watching - * with flag FS_EVENT_ON_CHILD based on fs event on a child of that directory. - */ -#define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \ - FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ - FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \ - FS_OPEN | FS_OPEN_EXEC) - -/* - * This is a list of all events that may get sent with the parent inode as the - * @to_tell argument of fsnotify(). - * It may include events that can be sent to an inode/sb/mount mark, but cannot - * be sent to a parent watching children. - */ -#define FS_EVENTS_POSS_TO_PARENT (FS_EVENTS_POSS_ON_CHILD) - -/* Events that can be reported to backends */ -#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \ - FS_EVENTS_POSS_ON_CHILD | \ - FS_DELETE_SELF | FS_MOVE_SELF | FS_DN_RENAME | \ - FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED) - -/* Extra flags that may be reported with event or control handling of events */ -#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \ +#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ + FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \ + FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \ + FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \ + FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ + FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \ + FS_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \ FS_DN_MULTISHOT | FS_EVENT_ON_CHILD) -#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS) - struct fsnotify_group; struct fsnotify_event; struct fsnotify_mark; struct fsnotify_event_private_data; struct fsnotify_fname; -struct fsnotify_iter_info; - -struct mem_cgroup; /* * Each group much define these ops. The fsnotify infrastructure will call * these operations for each relevant group. * * handle_event - main call for a group to handle an fs event - * @group: group to notify - * @mask: event type and flags - * @data: object that event happened on - * @data_type: type of object for fanotify_data_XXX() accessors - * @dir: optional directory associated with event - - * if @file_name is not NULL, this is the directory that - * @file_name is relative to - * @file_name: optional file name associated with event - * @cookie: inotify rename cookie - * @iter_info: array of marks from this group that are interested in the event - * - * handle_inode_event - simple variant of handle_event() for groups that only - * have inode marks and don't have ignore mask - * @mark: mark to notify - * @mask: event type and flags - * @inode: inode that event happened on - * @dir: optional directory associated with event - - * if @file_name is not NULL, this is the directory that - * @file_name is relative to. - * @file_name: optional file name associated with event - * @cookie: inotify rename cookie - * * free_group_priv - called when a group refcnt hits 0 to clean up the private union * freeing_mark - called when a mark is being destroyed for some reason. The group - * MUST be holding a reference on each mark and that reference must be - * dropped in this function. inotify uses this function to send - * userspace messages that marks have been removed. + * MUST be holding a reference on each mark and that reference must be + * dropped in this function. inotify uses this function to send + * userspace messages that marks have been removed. */ struct fsnotify_ops { - int (*handle_event)(struct fsnotify_group *group, u32 mask, - const void *data, int data_type, struct inode *dir, - const struct qstr *file_name, u32 cookie, - struct fsnotify_iter_info *iter_info); - int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask, - struct inode *inode, struct inode *dir, - const struct qstr *file_name, u32 cookie); + int (*handle_event)(struct fsnotify_group *group, + struct inode *inode, + struct fsnotify_mark *inode_mark, + struct fsnotify_mark *vfsmount_mark, + u32 mask, void *data, int data_type, + const unsigned char *file_name, u32 cookie); void (*free_group_priv)(struct fsnotify_group *group); void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); void (*free_event)(struct fsnotify_event *event); - /* called on final put+free to free memory */ - void (*free_mark)(struct fsnotify_mark *mark); }; /* @@ -167,6 +110,9 @@ struct fsnotify_ops { */ struct fsnotify_event { struct list_head list; + /* inode may ONLY be dereferenced during handle_event(). */ + struct inode *inode; /* either the inode the event happened to or its parent */ + u32 mask; /* the type of access, bitwise OR for FS_* event types */ }; /* @@ -176,8 +122,6 @@ struct fsnotify_event { * everything will be cleaned up. */ struct fsnotify_group { - const struct fsnotify_ops *ops; /* how this group handles things */ - /* * How the refcnt is used is up to each group. When the refcnt hits 0 * fsnotify will clean up all of the resources associated with this group. @@ -186,7 +130,9 @@ struct fsnotify_group { * inotify_init() and the refcnt will hit 0 only when that fd has been * closed. */ - refcount_t refcnt; /* things with interest in this group */ + atomic_t refcnt; /* things with interest in this group */ + + const struct fsnotify_ops *ops; /* how this group handles things */ /* needed to send notification to userspace */ spinlock_t notification_lock; /* protect the notification_list */ @@ -206,8 +152,9 @@ struct fsnotify_group { /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ struct mutex mark_mutex; /* protect marks_list */ - atomic_t user_waits; /* Number of tasks waiting for user - * response */ + atomic_t num_marks; /* 1 for each mark and 1 for not being + * past the point of no return when freeing + * a group */ struct list_head marks_list; /* all inode marks for this group */ struct fasync_struct *fsn_fa; /* async notification */ @@ -216,8 +163,6 @@ struct fsnotify_group { * notification list is too * full */ - struct mem_cgroup *memcg; /* memcg to charge allocations */ - /* groups can define private fields here or use the void *private */ union { void *private; @@ -225,143 +170,28 @@ struct fsnotify_group { struct inotify_group_private_data { spinlock_t idr_lock; struct idr idr; - struct ucounts *ucounts; + struct user_struct *user; } inotify_data; #endif #ifdef CONFIG_FANOTIFY struct fanotify_group_private_data { - /* Hash table of events for merge */ - struct hlist_head *merge_hash; +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS /* allows a group to block waiting for a userspace response */ struct list_head access_list; wait_queue_head_t access_waitq; - int flags; /* flags from fanotify_init() */ - int f_flags; /* event_f_flags from fanotify_init() */ - struct ucounts *ucounts; +#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ + int f_flags; + unsigned int max_marks; + struct user_struct *user; } fanotify_data; #endif /* CONFIG_FANOTIFY */ }; }; -/* When calling fsnotify tell it if the data is a path or inode */ -enum fsnotify_data_type { - FSNOTIFY_EVENT_NONE, - FSNOTIFY_EVENT_PATH, - FSNOTIFY_EVENT_INODE, -}; - -static inline struct inode *fsnotify_data_inode(const void *data, int data_type) -{ - switch (data_type) { - case FSNOTIFY_EVENT_INODE: - return (struct inode *)data; - case FSNOTIFY_EVENT_PATH: - return d_inode(((const struct path *)data)->dentry); - default: - return NULL; - } -} - -static inline const struct path *fsnotify_data_path(const void *data, - int data_type) -{ - switch (data_type) { - case FSNOTIFY_EVENT_PATH: - return data; - default: - return NULL; - } -} - -enum fsnotify_obj_type { - FSNOTIFY_OBJ_TYPE_INODE, - FSNOTIFY_OBJ_TYPE_PARENT, - FSNOTIFY_OBJ_TYPE_VFSMOUNT, - FSNOTIFY_OBJ_TYPE_SB, - FSNOTIFY_OBJ_TYPE_COUNT, - FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT -}; - -#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE) -#define FSNOTIFY_OBJ_TYPE_PARENT_FL (1U << FSNOTIFY_OBJ_TYPE_PARENT) -#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT) -#define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB) -#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1) - -static inline bool fsnotify_valid_obj_type(unsigned int type) -{ - return (type < FSNOTIFY_OBJ_TYPE_COUNT); -} - -struct fsnotify_iter_info { - struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT]; - unsigned int report_mask; - int srcu_idx; -}; - -static inline bool fsnotify_iter_should_report_type( - struct fsnotify_iter_info *iter_info, int type) -{ - return (iter_info->report_mask & (1U << type)); -} - -static inline void fsnotify_iter_set_report_type( - struct fsnotify_iter_info *iter_info, int type) -{ - iter_info->report_mask |= (1U << type); -} - -static inline void fsnotify_iter_set_report_type_mark( - struct fsnotify_iter_info *iter_info, int type, - struct fsnotify_mark *mark) -{ - iter_info->marks[type] = mark; - iter_info->report_mask |= (1U << type); -} - -#define FSNOTIFY_ITER_FUNCS(name, NAME) \ -static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \ - struct fsnotify_iter_info *iter_info) \ -{ \ - return (iter_info->report_mask & FSNOTIFY_OBJ_TYPE_##NAME##_FL) ? \ - iter_info->marks[FSNOTIFY_OBJ_TYPE_##NAME] : NULL; \ -} - -FSNOTIFY_ITER_FUNCS(inode, INODE) -FSNOTIFY_ITER_FUNCS(parent, PARENT) -FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) -FSNOTIFY_ITER_FUNCS(sb, SB) - -#define fsnotify_foreach_obj_type(type) \ - for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++) - -/* - * fsnotify_connp_t is what we embed in objects which connector can be attached - * to. fsnotify_connp_t * is how we refer from connector back to object. - */ -struct fsnotify_mark_connector; -typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; - -/* - * Inode/vfsmount/sb point to this structure which tracks all marks attached to - * the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this - * structure. We destroy this structure when there are no more marks attached - * to it. The structure is protected by fsnotify_mark_srcu. - */ -struct fsnotify_mark_connector { - spinlock_t lock; - unsigned short type; /* Type of object [lock] */ -#define FSNOTIFY_CONN_FLAG_HAS_FSID 0x01 - unsigned short flags; /* flags [lock] */ - __kernel_fsid_t fsid; /* fsid of filesystem containing object */ - union { - /* Object pointer [lock] */ - fsnotify_connp_t *obj; - /* Used listing heads to free after srcu period expires */ - struct fsnotify_mark_connector *destroy_next; - }; - struct hlist_head list; -}; +/* when calling fsnotify tell it if the data is a path or inode */ +#define FSNOTIFY_EVENT_NONE 0 +#define FSNOTIFY_EVENT_PATH 1 +#define FSNOTIFY_EVENT_INODE 2 /* * A mark is simply an object attached to an in core inode which allows an @@ -382,26 +212,32 @@ struct fsnotify_mark { __u32 mask; /* We hold one for presence in g_list. Also one ref for each 'thing' * in kernel that found and may be using this mark. */ - refcount_t refcnt; + atomic_t refcnt; /* Group this mark is for. Set on mark creation, stable until last ref * is dropped */ struct fsnotify_group *group; - /* List of marks by group->marks_list. Also reused for queueing + /* List of marks by group->i_fsnotify_marks. Also reused for queueing * mark into destroy_list when it's waiting for the end of SRCU period * before it can be freed. [group->mark_mutex] */ struct list_head g_list; /* Protects inode / mnt pointers, flags, masks */ spinlock_t lock; - /* List of marks for inode / vfsmount [connector->lock, mark ref] */ + /* List of marks for inode / vfsmount [obj_lock] */ struct hlist_node obj_list; - /* Head of list of marks for an object [mark ref] */ - struct fsnotify_mark_connector *connector; + union { /* Object pointer [mark->lock, group->mark_mutex] */ + struct inode *inode; /* inode this mark is associated with */ + struct vfsmount *mnt; /* vfsmount this mark is associated with */ + }; /* Events types to ignore [mark->lock, group->mark_mutex] */ __u32 ignored_mask; -#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x01 -#define FSNOTIFY_MARK_FLAG_ALIVE 0x02 -#define FSNOTIFY_MARK_FLAG_ATTACHED 0x04 +#define FSNOTIFY_MARK_FLAG_INODE 0x01 +#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02 +#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04 +#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 +#define FSNOTIFY_MARK_FLAG_ALIVE 0x10 +#define FSNOTIFY_MARK_FLAG_ATTACHED 0x20 unsigned int flags; /* flags [mark->lock] */ + void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ }; #ifdef CONFIG_FSNOTIFY @@ -409,29 +245,13 @@ struct fsnotify_mark { /* called from the vfs helpers */ /* main fsnotify call to send events */ -extern int fsnotify(__u32 mask, const void *data, int data_type, - struct inode *dir, const struct qstr *name, - struct inode *inode, u32 cookie); -extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, - int data_type); +extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, + const unsigned char *name, u32 cookie); +extern int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask); extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); -extern void fsnotify_sb_delete(struct super_block *sb); extern u32 fsnotify_get_cookie(void); -static inline __u32 fsnotify_parent_needed_mask(__u32 mask) -{ - /* FS_EVENT_ON_CHILD is set on marks that want parent/name info */ - if (!(mask & FS_EVENT_ON_CHILD)) - return 0; - /* - * This object might be watched by a mark that cares about parent/name - * info, does it care about the specific set of events that can be - * reported with parent/name info? - */ - return mask & FS_EVENTS_POSS_TO_PARENT; -} - static inline int fsnotify_inode_watches_children(struct inode *inode) { /* FS_EVENT_ON_CHILD is set if the inode may care */ @@ -467,7 +287,6 @@ static inline void fsnotify_update_flags(struct dentry *dentry) /* create a new group */ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops); -extern struct fsnotify_group *fsnotify_alloc_user_group(const struct fsnotify_ops *ops); /* get reference to a group */ extern void fsnotify_get_group(struct fsnotify_group *group); /* drop reference on a group from fsnotify_alloc_group */ @@ -484,72 +303,37 @@ extern void fsnotify_destroy_event(struct fsnotify_group *group, /* attach the event to the group notification queue */ extern int fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, - int (*merge)(struct fsnotify_group *, - struct fsnotify_event *), - void (*insert)(struct fsnotify_group *, - struct fsnotify_event *)); -/* Queue overflow event to a notification group */ -static inline void fsnotify_queue_overflow(struct fsnotify_group *group) -{ - fsnotify_add_event(group, group->overflow_event, NULL, NULL); -} - -static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group) -{ - assert_spin_locked(&group->notification_lock); - - return list_empty(&group->notification_list); -} - + int (*merge)(struct list_head *, + struct fsnotify_event *)); +/* true if the group notification queue is empty */ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); /* return, but do not dequeue the first event on the notification queue */ extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group); /* return AND dequeue the first event on the notification queue */ extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group); -/* Remove event queued in the notification list */ -extern void fsnotify_remove_queued_event(struct fsnotify_group *group, - struct fsnotify_event *event); /* functions used to manipulate the marks attached to inodes */ -/* Get mask of events for a list of marks */ -extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn); -/* Calculate mask of events for a list of marks */ -extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn); -extern void fsnotify_init_mark(struct fsnotify_mark *mark, - struct fsnotify_group *group); -/* Find mark belonging to given group in the list of marks */ -extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp, - struct fsnotify_group *group); -/* Get cached fsid of filesystem containing object */ -extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn, - __kernel_fsid_t *fsid); -/* attach the mark to the object */ -extern int fsnotify_add_mark(struct fsnotify_mark *mark, - fsnotify_connp_t *connp, unsigned int type, - int allow_dups, __kernel_fsid_t *fsid); -extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, - fsnotify_connp_t *connp, - unsigned int type, int allow_dups, - __kernel_fsid_t *fsid); - -/* attach the mark to the inode */ -static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark, - struct inode *inode, - int allow_dups) -{ - return fsnotify_add_mark(mark, &inode->i_fsnotify_marks, - FSNOTIFY_OBJ_TYPE_INODE, allow_dups, NULL); -} -static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, - struct inode *inode, - int allow_dups) -{ - return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks, - FSNOTIFY_OBJ_TYPE_INODE, allow_dups, - NULL); -} - +/* run all marks associated with a vfsmount and update mnt->mnt_fsnotify_mask */ +extern void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt); +/* run all marks associated with an inode and update inode->i_fsnotify_mask */ +extern void fsnotify_recalc_inode_mask(struct inode *inode); +extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(struct fsnotify_mark *mark)); +/* find (and take a reference) to a mark associated with group and inode */ +extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode); +/* find (and take a reference) to a mark associated with group and vfsmount */ +extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt); +/* copy the values from old into new */ +extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old); +/* set the ignored_mask of a mark */ +extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask); +/* set the mask of a mark (might pin the object into memory */ +extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask); +/* attach the mark to both the group and the inode */ +extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, + struct inode *inode, struct vfsmount *mnt, int allow_dups); +extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_group *group, + struct inode *inode, struct vfsmount *mnt, int allow_dups); /* given a group and a mark, flag mark to be freed when all references are dropped */ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, struct fsnotify_group *group); @@ -557,46 +341,29 @@ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, extern void fsnotify_detach_mark(struct fsnotify_mark *mark); /* free mark */ extern void fsnotify_free_mark(struct fsnotify_mark *mark); -/* Wait until all marks queued for destruction are destroyed */ -extern void fsnotify_wait_marks_destroyed(void); -/* run all the marks in a group, and clear all of the marks attached to given object type */ -extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type); /* run all the marks in a group, and clear all of the vfsmount marks */ -static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) -{ - fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL); -} +extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group); /* run all the marks in a group, and clear all of the inode marks */ -static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group) -{ - fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL); -} -/* run all the marks in a group, and clear all of the sn marks */ -static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group) -{ - fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB_FL); -} +extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group); +/* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/ +extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags); extern void fsnotify_get_mark(struct fsnotify_mark *mark); extern void fsnotify_put_mark(struct fsnotify_mark *mark); -extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); -extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); +extern void fsnotify_unmount_inodes(struct super_block *sb); -static inline void fsnotify_init_event(struct fsnotify_event *event) -{ - INIT_LIST_HEAD(&event->list); -} +/* put here because inotify does some weird stuff when destroying watches */ +extern void fsnotify_init_event(struct fsnotify_event *event, + struct inode *to_tell, u32 mask); #else -static inline int fsnotify(__u32 mask, const void *data, int data_type, - struct inode *dir, const struct qstr *name, - struct inode *inode, u32 cookie) +static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, + const unsigned char *name, u32 cookie) { return 0; } -static inline int __fsnotify_parent(struct dentry *dentry, __u32 mask, - const void *data, int data_type) +static inline int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) { return 0; } @@ -607,9 +374,6 @@ static inline void __fsnotify_inode_delete(struct inode *inode) static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) {} -static inline void fsnotify_sb_delete(struct super_block *sb) -{} - static inline void fsnotify_update_flags(struct dentry *dentry) {} diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 832e65f067..b3d34d3e0e 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -1,13 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Ftrace header. For implementation details beyond the random comments - * scattered below, see: Documentation/trace/ftrace-design.rst + * scattered below, see: Documentation/trace/ftrace-design.txt */ #ifndef _LINUX_FTRACE_H #define _LINUX_FTRACE_H -#include #include #include #include @@ -33,7 +31,7 @@ /* * If the arch's mcount caller does not support all of ftrace's * features, then it must call an indirect function that - * does. Or at least does enough to prevent any unwelcome side effects. + * does. Or at least does enough to prevent any unwelcomed side effects. */ #if !ARCH_SUPPORTS_FTRACE_OPS # define FTRACE_FORCE_LIST_FUNC 1 @@ -44,93 +42,44 @@ /* Main tracing buffer and events set up */ #ifdef CONFIG_TRACING void trace_init(void); -void early_trace_init(void); #else static inline void trace_init(void) { } -static inline void early_trace_init(void) { } #endif struct module; struct ftrace_hash; -struct ftrace_direct_func; - -#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ - defined(CONFIG_DYNAMIC_FTRACE) -const char * -ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, - unsigned long *off, char **modname, char *sym); -#else -static inline const char * -ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, - unsigned long *off, char **modname, char *sym) -{ - return NULL; -} -#endif - -#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) -int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *name, - char *module_name, int *exported); -#else -static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *name, - char *module_name, int *exported) -{ - return -1; -} -#endif #ifdef CONFIG_FUNCTION_TRACER extern int ftrace_enabled; extern int ftrace_enable_sysctl(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); + void __user *buffer, size_t *lenp, + loff_t *ppos); struct ftrace_ops; -#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS - -struct ftrace_regs { - struct pt_regs regs; -}; -#define arch_ftrace_get_regs(fregs) (&(fregs)->regs) - -/* - * ftrace_instruction_pointer_set() is to be defined by the architecture - * if to allow setting of the instruction pointer from the ftrace_regs - * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports - * live kernel patching. - */ -#define ftrace_instruction_pointer_set(fregs, ip) do { } while (0) -#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ - -static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs) -{ - if (!fregs) - return NULL; - - return arch_ftrace_get_regs(fregs); -} - typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *op, struct ftrace_regs *fregs); + struct ftrace_ops *op, struct pt_regs *regs); ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); /* * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * set in the flags member. - * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and + * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and * IPMODIFY are a kind of attribute flags which can be set only before * registering the ftrace_ops, and can not be modified while registered. - * Changing those attribute flags after registering ftrace_ops will + * Changing those attribute flags after regsitering ftrace_ops will * cause unexpected results. * * ENABLED - set/unset when ftrace_ops is registered/unregistered * DYNAMIC - set when ftrace_ops is registered to denote dynamically * allocated ftrace_ops which need special care + * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops + * could be controlled by following calls: + * ftrace_function_local_enable + * ftrace_function_local_disable * SAVE_REGS - The ftrace_ops wants regs saved at each function called * and passed to the callback. If this flag is set, but the * architecture does not support passing regs @@ -145,10 +94,10 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * passing regs to the handler. * Note, if this flag is set, the SAVE_REGS flag will automatically * get set upon registering the ftrace_ops, if the arch supports it. - * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure - * that the call back needs recursion protection. If it does - * not set this, then the ftrace infrastructure will assume - * that the callback can handle recursion on its own. + * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure + * that the call back has its own recursion protection. If it does + * not set this, then the ftrace infrastructure will add recursion + * protection for the caller. * STUB - The ftrace_ops is just a place holder. * INITIALIZED - The ftrace_ops has already been initialized (first use time * register_ftrace_function() is called, it will initialized the ops) @@ -168,47 +117,33 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * for any of the functions that this ops will be registered for, then * this ops will fail to register or set_filter_ip. * PID - Is affected by set_ftrace_pid (allows filtering on those pids) - * RCU - Set when the ops can only be called when RCU is watching. - * TRACE_ARRAY - The ops->private points to a trace_array descriptor. - * PERMANENT - Set when the ops is permanent and should not be affected by - * ftrace_enabled. - * DIRECT - Used by the direct ftrace_ops helper for direct functions - * (internal ftrace only, should not be used by others) */ enum { - FTRACE_OPS_FL_ENABLED = BIT(0), - FTRACE_OPS_FL_DYNAMIC = BIT(1), - FTRACE_OPS_FL_SAVE_REGS = BIT(2), - FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3), - FTRACE_OPS_FL_RECURSION = BIT(4), - FTRACE_OPS_FL_STUB = BIT(5), - FTRACE_OPS_FL_INITIALIZED = BIT(6), - FTRACE_OPS_FL_DELETED = BIT(7), - FTRACE_OPS_FL_ADDING = BIT(8), - FTRACE_OPS_FL_REMOVING = BIT(9), - FTRACE_OPS_FL_MODIFYING = BIT(10), - FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11), - FTRACE_OPS_FL_IPMODIFY = BIT(12), - FTRACE_OPS_FL_PID = BIT(13), - FTRACE_OPS_FL_RCU = BIT(14), - FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), - FTRACE_OPS_FL_PERMANENT = BIT(16), - FTRACE_OPS_FL_DIRECT = BIT(17), + FTRACE_OPS_FL_ENABLED = 1 << 0, + FTRACE_OPS_FL_DYNAMIC = 1 << 1, + FTRACE_OPS_FL_PER_CPU = 1 << 2, + FTRACE_OPS_FL_SAVE_REGS = 1 << 3, + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, + FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, + FTRACE_OPS_FL_STUB = 1 << 6, + FTRACE_OPS_FL_INITIALIZED = 1 << 7, + FTRACE_OPS_FL_DELETED = 1 << 8, + FTRACE_OPS_FL_ADDING = 1 << 9, + FTRACE_OPS_FL_REMOVING = 1 << 10, + FTRACE_OPS_FL_MODIFYING = 1 << 11, + FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, + FTRACE_OPS_FL_IPMODIFY = 1 << 13, + FTRACE_OPS_FL_PID = 1 << 14, + FTRACE_OPS_FL_RCU = 1 << 15, }; #ifdef CONFIG_DYNAMIC_FTRACE /* The hash used to know what functions callbacks trace */ struct ftrace_ops_hash { - struct ftrace_hash __rcu *notrace_hash; - struct ftrace_hash __rcu *filter_hash; + struct ftrace_hash *notrace_hash; + struct ftrace_hash *filter_hash; struct mutex regex_lock; }; - -void ftrace_free_init_mem(void); -void ftrace_free_mem(struct module *mod, void *start, void *end); -#else -static inline void ftrace_free_init_mem(void) { } -static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } #endif /* @@ -224,43 +159,20 @@ static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { */ struct ftrace_ops { ftrace_func_t func; - struct ftrace_ops __rcu *next; + struct ftrace_ops *next; unsigned long flags; void *private; ftrace_func_t saved_func; + int __percpu *disabled; #ifdef CONFIG_DYNAMIC_FTRACE struct ftrace_ops_hash local_hash; struct ftrace_ops_hash *func_hash; struct ftrace_ops_hash old_hash; unsigned long trampoline; unsigned long trampoline_size; - struct list_head list; #endif }; -extern struct ftrace_ops __rcu *ftrace_ops_list; -extern struct ftrace_ops ftrace_list_end; - -/* - * Traverse the ftrace_ops_list, invoking all entries. The reason that we - * can use rcu_dereference_raw_check() is that elements removed from this list - * are simply leaked, so there is no need to interact with a grace-period - * mechanism. The rcu_dereference_raw_check() calls are needed to handle - * concurrent insertions into the ftrace_ops_list. - * - * Silly Alpha and silly pointer-speculation compiler optimizations! - */ -#define do_for_each_ftrace_op(op, list) \ - op = rcu_dereference_raw_check(list); \ - do - -/* - * Optimized for just a single item in the list (as that is the normal case). - */ -#define while_for_each_ftrace_op(op) \ - while (likely(op = rcu_dereference_raw_check((op)->next)) && \ - unlikely((op) != &ftrace_list_end)) - /* * Type of the current tracing. */ @@ -281,9 +193,59 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type; */ int register_ftrace_function(struct ftrace_ops *ops); int unregister_ftrace_function(struct ftrace_ops *ops); +void clear_ftrace_function(void); + +/** + * ftrace_function_local_enable - enable ftrace_ops on current cpu + * + * This function enables tracing on current cpu by decreasing + * the per cpu control variable. + * It must be called with preemption disabled and only on ftrace_ops + * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption + * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. + */ +static inline void ftrace_function_local_enable(struct ftrace_ops *ops) +{ + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) + return; + + (*this_cpu_ptr(ops->disabled))--; +} + +/** + * ftrace_function_local_disable - disable ftrace_ops on current cpu + * + * This function disables tracing on current cpu by increasing + * the per cpu control variable. + * It must be called with preemption disabled and only on ftrace_ops + * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption + * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. + */ +static inline void ftrace_function_local_disable(struct ftrace_ops *ops) +{ + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) + return; + + (*this_cpu_ptr(ops->disabled))++; +} + +/** + * ftrace_function_local_disabled - returns ftrace_ops disabled value + * on current cpu + * + * This function returns value of ftrace_ops::disabled on current cpu. + * It must be called with preemption disabled and only on ftrace_ops + * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption + * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. + */ +static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) +{ + WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)); + return *this_cpu_ptr(ops->disabled); +} extern void ftrace_stub(unsigned long a0, unsigned long a1, - struct ftrace_ops *op, struct ftrace_regs *fregs); + struct ftrace_ops *op, struct pt_regs *regs); #else /* !CONFIG_FUNCTION_TRACER */ /* @@ -292,131 +254,48 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1, */ #define register_ftrace_function(ops) ({ 0; }) #define unregister_ftrace_function(ops) ({ 0; }) -static inline void ftrace_kill(void) { } -static inline void ftrace_free_init_mem(void) { } -static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } -#endif /* CONFIG_FUNCTION_TRACER */ - -struct ftrace_func_entry { - struct hlist_node hlist; - unsigned long ip; - unsigned long direct; /* for direct lookup only */ -}; - -struct dyn_ftrace; - -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS -extern int ftrace_direct_func_count; -int register_ftrace_direct(unsigned long ip, unsigned long addr); -int unregister_ftrace_direct(unsigned long ip, unsigned long addr); -int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr); -struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr); -int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, - struct dyn_ftrace *rec, - unsigned long old_addr, - unsigned long new_addr); -unsigned long ftrace_find_rec_direct(unsigned long ip); -#else -# define ftrace_direct_func_count 0 -static inline int register_ftrace_direct(unsigned long ip, unsigned long addr) -{ - return -ENOTSUPP; -} -static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr) -{ - return -ENOTSUPP; -} -static inline int modify_ftrace_direct(unsigned long ip, - unsigned long old_addr, unsigned long new_addr) -{ - return -ENOTSUPP; -} -static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) -{ - return NULL; -} -static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, - struct dyn_ftrace *rec, - unsigned long old_addr, - unsigned long new_addr) -{ - return -ENODEV; -} -static inline unsigned long ftrace_find_rec_direct(unsigned long ip) +static inline int ftrace_nr_registered_ops(void) { return 0; } -#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ - -#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS -/* - * This must be implemented by the architecture. - * It is the way the ftrace direct_ops helper, when called - * via ftrace (because there's other callbacks besides the - * direct call), can inform the architecture's trampoline that this - * routine has a direct caller, and what the caller is. - * - * For example, in x86, it returns the direct caller - * callback function via the regs->orig_ax parameter. - * Then in the ftrace trampoline, if this is set, it makes - * the return from the trampoline jump to the direct caller - * instead of going back to the function it just traced. - */ -static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, - unsigned long addr) { } -#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ +static inline void clear_ftrace_function(void) { } +static inline void ftrace_kill(void) { } +#endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_STACK_TRACER +#define STACK_TRACE_ENTRIES 500 + +struct stack_trace; + +extern unsigned stack_trace_index[]; +extern struct stack_trace stack_trace_max; +extern unsigned long stack_trace_max_size; +extern arch_spinlock_t stack_trace_max_lock; + extern int stack_tracer_enabled; - -int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); - -/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ -DECLARE_PER_CPU(int, disable_stack_tracer); - -/** - * stack_tracer_disable - temporarily disable the stack tracer - * - * There's a few locations (namely in RCU) where stack tracing - * cannot be executed. This function is used to disable stack - * tracing during those critical sections. - * - * This function must be called with preemption or interrupts - * disabled and stack_tracer_enable() must be called shortly after - * while preemption or interrupts are still disabled. - */ -static inline void stack_tracer_disable(void) -{ - /* Preemption or interrupts must be disabled */ - if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) - WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); - this_cpu_inc(disable_stack_tracer); -} - -/** - * stack_tracer_enable - re-enable the stack tracer - * - * After stack_tracer_disable() is called, stack_tracer_enable() - * must be called shortly afterward. - */ -static inline void stack_tracer_enable(void) -{ - if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) - WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); - this_cpu_dec(disable_stack_tracer); -} -#else -static inline void stack_tracer_disable(void) { } -static inline void stack_tracer_enable(void) { } +void stack_trace_print(void); +int +stack_trace_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); #endif +struct ftrace_func_command { + struct list_head list; + char *name; + int (*func)(struct ftrace_hash *hash, + char *func, char *cmd, + char *params, int enable); +}; + #ifdef CONFIG_DYNAMIC_FTRACE int ftrace_arch_code_modify_prepare(void); int ftrace_arch_code_modify_post_process(void); +struct dyn_ftrace; + enum ftrace_bug_type { FTRACE_BUG_UNKNOWN, FTRACE_BUG_INIT, @@ -436,9 +315,33 @@ void ftrace_bug(int err, struct dyn_ftrace *rec); struct seq_file; +struct ftrace_probe_ops { + void (*func)(unsigned long ip, + unsigned long parent_ip, + void **data); + int (*init)(struct ftrace_probe_ops *ops, + unsigned long ip, void **data); + void (*free)(struct ftrace_probe_ops *ops, + unsigned long ip, void **data); + int (*print)(struct seq_file *m, + unsigned long ip, + struct ftrace_probe_ops *ops, + void *data); +}; + +extern int +register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + void *data); +extern void +unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + void *data); +extern void +unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); +extern void unregister_ftrace_function_probe_all(char *glob); + extern int ftrace_text_reserved(const void *start, const void *end); -struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); +extern int ftrace_nr_registered_ops(void); bool is_ftrace_trampoline(unsigned long addr); @@ -454,10 +357,9 @@ bool is_ftrace_trampoline(unsigned long addr); * REGS_EN - the function is set up to save regs. * IPMODIFY - the record allows for the IP address to be changed. * DISABLED - the record is not ready to be touched yet - * DIRECT - there is a direct function to call * * When a new ftrace_ops is registered and wants a function to save - * pt_regs, the rec->flags REGS is set. When the function has been + * pt_regs, the rec->flag REGS is set. When the function has been * set up to save regs, the REG_EN flag is set. Once a function * starts saving regs it will do so until all ftrace_ops are removed * from tracing that function. @@ -470,14 +372,15 @@ enum { FTRACE_FL_TRAMP_EN = (1UL << 27), FTRACE_FL_IPMODIFY = (1UL << 26), FTRACE_FL_DISABLED = (1UL << 25), - FTRACE_FL_DIRECT = (1UL << 24), - FTRACE_FL_DIRECT_EN = (1UL << 23), }; -#define FTRACE_REF_MAX_SHIFT 23 +#define FTRACE_REF_MAX_SHIFT 25 +#define FTRACE_FL_BITS 7 +#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) +#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) -#define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX) +#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) struct dyn_ftrace { unsigned long ip; /* address of mcount call-site */ @@ -485,6 +388,7 @@ struct dyn_ftrace { struct dyn_arch_ftrace arch; }; +int ftrace_force_update(void); int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset); int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, @@ -494,7 +398,9 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, void ftrace_set_global_filter(unsigned char *buf, int len, int reset); void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); void ftrace_free_filter(struct ftrace_ops *ops); -void ftrace_ops_set_global_filter(struct ftrace_ops *ops); + +int register_ftrace_command(struct ftrace_func_command *cmd); +int unregister_ftrace_command(struct ftrace_func_command *cmd); enum { FTRACE_UPDATE_CALLS = (1 << 0), @@ -502,7 +408,6 @@ enum { FTRACE_UPDATE_TRACE_FUNC = (1 << 2), FTRACE_START_FUNC_RET = (1 << 3), FTRACE_STOP_FUNC_RET = (1 << 4), - FTRACE_MAY_SLEEP = (1 << 5), }; /* @@ -527,16 +432,12 @@ enum { FTRACE_ITER_FILTER = (1 << 0), FTRACE_ITER_NOTRACE = (1 << 1), FTRACE_ITER_PRINTALL = (1 << 2), - FTRACE_ITER_DO_PROBES = (1 << 3), - FTRACE_ITER_PROBE = (1 << 4), - FTRACE_ITER_MOD = (1 << 5), - FTRACE_ITER_ENABLED = (1 << 6), + FTRACE_ITER_DO_HASH = (1 << 3), + FTRACE_ITER_HASH = (1 << 4), + FTRACE_ITER_ENABLED = (1 << 5), }; void arch_ftrace_update_code(int command); -void arch_ftrace_update_trampoline(struct ftrace_ops *ops); -void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); -void arch_ftrace_trampoline_free(struct ftrace_ops *ops); struct ftrace_rec_iter; @@ -550,8 +451,8 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); iter = ftrace_rec_iter_next(iter)) -int ftrace_update_record(struct dyn_ftrace *rec, bool enable); -int ftrace_test_record(struct dyn_ftrace *rec, bool enable); +int ftrace_update_record(struct dyn_ftrace *rec, int enable); +int ftrace_test_record(struct dyn_ftrace *rec, int enable); void ftrace_run_stop_machine(int command); unsigned long ftrace_location(unsigned long ip); unsigned long ftrace_location_range(unsigned long start, unsigned long end); @@ -622,7 +523,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } /** * ftrace_make_nop - convert code into nop * @mod: module structure if called by module load initialization - * @rec: the call site record (e.g. mcount/fentry) + * @rec: the mcount call site record * @addr: the address that the call site should be calling * * This is a very sensitive operation and great care needs @@ -643,54 +544,9 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } extern int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr); -/** - * ftrace_need_init_nop - return whether nop call sites should be initialized - * - * Normally the compiler's -mnop-mcount generates suitable nops, so we don't - * need to call ftrace_init_nop() if the code is built with that flag. - * Architectures where this is not always the case may define their own - * condition. - * - * Return must be: - * 0 if ftrace_init_nop() should be called - * Nonzero if ftrace_init_nop() should not be called - */ - -#ifndef ftrace_need_init_nop -#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT)) -#endif - -/** - * ftrace_init_nop - initialize a nop call site - * @mod: module structure if called by module load initialization - * @rec: the call site record (e.g. mcount/fentry) - * - * This is a very sensitive operation and great care needs - * to be taken by the arch. The operation should carefully - * read the location, check to see if what is read is indeed - * what we expect it to be, and then on success of the compare, - * it should write to the location. - * - * The code segment at @rec->ip should contain the contents created by - * the compiler - * - * Return must be: - * 0 on success - * -EFAULT on error reading the location - * -EINVAL on a failed compare of the contents - * -EPERM on error writing to the location - * Any other value will be considered a failure. - */ -#ifndef ftrace_init_nop -static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) -{ - return ftrace_make_nop(mod, rec, MCOUNT_ADDR); -} -#endif - /** * ftrace_make_call - convert a nop call site into a call to addr - * @rec: the call site record (e.g. mcount/fentry) + * @rec: the mcount call site record * @addr: the address that the call site should call * * This is a very sensitive operation and great care needs @@ -713,7 +569,7 @@ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS /** * ftrace_modify_call - convert from one addr to another (no nop) - * @rec: the call site record (e.g. mcount/fentry) + * @rec: the mcount call site record * @old_addr: the address expected to be currently called to * @addr: the address to change to * @@ -755,11 +611,20 @@ extern void ftrace_disable_daemon(void); extern void ftrace_enable_daemon(void); #else /* CONFIG_DYNAMIC_FTRACE */ static inline int skip_trace(unsigned long ip) { return 0; } +static inline int ftrace_force_update(void) { return 0; } static inline void ftrace_disable_daemon(void) { } static inline void ftrace_enable_daemon(void) { } static inline void ftrace_module_init(struct module *mod) { } static inline void ftrace_module_enable(struct module *mod) { } static inline void ftrace_release_mod(struct module *mod) { } +static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) +{ + return -EINVAL; +} +static inline __init int unregister_ftrace_command(char *cmd_name) +{ + return -EINVAL; +} static inline int ftrace_text_reserved(const void *start, const void *end) { return 0; @@ -780,7 +645,6 @@ static inline unsigned long ftrace_location(unsigned long ip) #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_free_filter(ops) do { } while (0) -#define ftrace_ops_set_global_filter(ops) do { } while (0) static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { return -ENODEV; } @@ -862,7 +726,15 @@ static inline unsigned long get_lock_parent_ip(void) return CALLER_ADDR2; } -#ifdef CONFIG_TRACE_PREEMPT_TOGGLE +#ifdef CONFIG_IRQSOFF_TRACER + extern void time_hardirqs_on(unsigned long a0, unsigned long a1); + extern void time_hardirqs_off(unsigned long a0, unsigned long a1); +#else + static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } + static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } +#endif + +#ifdef CONFIG_PREEMPT_TRACER extern void trace_preempt_on(unsigned long a0, unsigned long a1); extern void trace_preempt_off(unsigned long a0, unsigned long a1); #else @@ -876,11 +748,6 @@ static inline unsigned long get_lock_parent_ip(void) #ifdef CONFIG_FTRACE_MCOUNT_RECORD extern void ftrace_init(void); -#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY -#define FTRACE_CALLSITE_SECTION "__patchable_function_entries" -#else -#define FTRACE_CALLSITE_SECTION "__mcount_loc" -#endif #else static inline void ftrace_init(void) { } #endif @@ -902,25 +769,21 @@ struct ftrace_graph_ent { */ struct ftrace_graph_ret { unsigned long func; /* Current function */ - int depth; /* Number of functions that overran the depth limit for current task */ - unsigned int overrun; + unsigned long overrun; unsigned long long calltime; unsigned long long rettime; + int depth; } __packed; /* Type of the callback handlers for tracing function graph*/ typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ -extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); - #ifdef CONFIG_FUNCTION_GRAPH_TRACER -struct fgraph_ops { - trace_func_graph_ent_t entryfunc; - trace_func_graph_ret_t retfunc; -}; +/* for init task */ +#define INIT_FTRACE_GRAPH .ret_stack = NULL, /* * Stack of return addresses for functions @@ -950,11 +813,8 @@ struct ftrace_ret_stack { extern void return_to_handler(void); extern int -function_graph_enter(unsigned long ret, unsigned long func, - unsigned long frame_pointer, unsigned long *retp); - -struct ftrace_ret_stack * -ftrace_graph_get_ret_stack(struct task_struct *task, int idx); +ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, + unsigned long frame_pointer, unsigned long *retp); unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, unsigned long *retp); @@ -966,11 +826,11 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, */ #define __notrace_funcgraph notrace +#define FTRACE_NOTRACE_DEPTH 65536 #define FTRACE_RETFUNC_DEPTH 50 #define FTRACE_RETSTACK_ALLOC_SIZE 32 - -extern int register_ftrace_graph(struct fgraph_ops *ops); -extern void unregister_ftrace_graph(struct fgraph_ops *ops); +extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, + trace_func_graph_ent_t entryfunc); extern bool ftrace_graph_is_dead(void); extern void ftrace_graph_stop(void); @@ -979,10 +839,17 @@ extern void ftrace_graph_stop(void); extern trace_func_graph_ret_t ftrace_graph_return; extern trace_func_graph_ent_t ftrace_graph_entry; +extern void unregister_ftrace_graph(void); + extern void ftrace_graph_init_task(struct task_struct *t); extern void ftrace_graph_exit_task(struct task_struct *t); extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); +static inline int task_curr_ret_stack(struct task_struct *t) +{ + return t->curr_ret_stack; +} + static inline void pause_graph_tracing(void) { atomic_inc(¤t->tracing_graph_pause); @@ -995,14 +862,23 @@ static inline void unpause_graph_tracing(void) #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ #define __notrace_funcgraph +#define INIT_FTRACE_GRAPH static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } -/* Define as macros as fgraph_ops may not be defined */ -#define register_ftrace_graph(ops) ({ -1; }) -#define unregister_ftrace_graph(ops) do { } while (0) +static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, + trace_func_graph_ent_t entryfunc) +{ + return -1; +} +static inline void unregister_ftrace_graph(void) { } + +static inline int task_curr_ret_stack(struct task_struct *tsk) +{ + return -1; +} static inline unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, @@ -1065,13 +941,18 @@ extern int tracepoint_printk; extern void disable_trace_on_warning(void); extern int __disable_trace_on_warning; -int tracepoint_printk_sysctl(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); +#ifdef CONFIG_PREEMPT +#define INIT_TRACE_RECURSION .trace_recursion = 0, +#endif #else /* CONFIG_TRACING */ static inline void disable_trace_on_warning(void) { } #endif /* CONFIG_TRACING */ +#ifndef INIT_TRACE_RECURSION +#define INIT_TRACE_RECURSION +#endif + #ifdef CONFIG_FTRACE_SYSCALLS unsigned long arch_syscall_addr(int nr); diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index f6faa31289..4ec2c9b205 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h @@ -1,39 +1,36 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FTRACE_IRQ_H #define _LINUX_FTRACE_IRQ_H + +#ifdef CONFIG_FTRACE_NMI_ENTER +extern void arch_ftrace_nmi_enter(void); +extern void arch_ftrace_nmi_exit(void); +#else +static inline void arch_ftrace_nmi_enter(void) { } +static inline void arch_ftrace_nmi_exit(void) { } +#endif + #ifdef CONFIG_HWLAT_TRACER extern bool trace_hwlat_callback_enabled; extern void trace_hwlat_callback(bool enter); #endif -#ifdef CONFIG_OSNOISE_TRACER -extern bool trace_osnoise_callback_enabled; -extern void trace_osnoise_callback(bool enter); -#endif - static inline void ftrace_nmi_enter(void) { #ifdef CONFIG_HWLAT_TRACER if (trace_hwlat_callback_enabled) trace_hwlat_callback(true); #endif -#ifdef CONFIG_OSNOISE_TRACER - if (trace_osnoise_callback_enabled) - trace_osnoise_callback(true); -#endif + arch_ftrace_nmi_enter(); } static inline void ftrace_nmi_exit(void) { + arch_ftrace_nmi_exit(); #ifdef CONFIG_HWLAT_TRACER if (trace_hwlat_callback_enabled) trace_hwlat_callback(false); #endif -#ifdef CONFIG_OSNOISE_TRACER - if (trace_osnoise_callback_enabled) - trace_osnoise_callback(false); -#endif } #endif /* _LINUX_FTRACE_IRQ_H */ diff --git a/include/linux/futex.h b/include/linux/futex.h index b70df27d7e..6435f46d6e 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -1,15 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FUTEX_H #define _LINUX_FUTEX_H -#include -#include - #include struct inode; struct mm_struct; struct task_struct; +union ktime; + +long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, + u32 __user *uaddr2, u32 val2, u32 val3); + +extern int +handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); /* * Futexes are matched on equal values of this key. @@ -31,63 +34,38 @@ struct task_struct; union futex_key { struct { - u64 i_seq; unsigned long pgoff; - unsigned int offset; + struct inode *inode; + int offset; } shared; struct { - union { - struct mm_struct *mm; - u64 __tmp; - }; unsigned long address; - unsigned int offset; + struct mm_struct *mm; + int offset; } private; struct { - u64 ptr; unsigned long word; - unsigned int offset; + void *ptr; + int offset; } both; }; -#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } } +#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } } #ifdef CONFIG_FUTEX -enum { - FUTEX_STATE_OK, - FUTEX_STATE_EXITING, - FUTEX_STATE_DEAD, -}; - -static inline void futex_init_task(struct task_struct *tsk) -{ - tsk->robust_list = NULL; -#ifdef CONFIG_COMPAT - tsk->compat_robust_list = NULL; -#endif - INIT_LIST_HEAD(&tsk->pi_state_list); - tsk->pi_state_cache = NULL; - tsk->futex_state = FUTEX_STATE_OK; - mutex_init(&tsk->futex_exit_mutex); -} - -void futex_exit_recursive(struct task_struct *tsk); -void futex_exit_release(struct task_struct *tsk); -void futex_exec_release(struct task_struct *tsk); - -long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, - u32 __user *uaddr2, u32 val2, u32 val3); +extern void exit_robust_list(struct task_struct *curr); +extern void exit_pi_state_list(struct task_struct *curr); +#ifdef CONFIG_HAVE_FUTEX_CMPXCHG +#define futex_cmpxchg_enabled 1 #else -static inline void futex_init_task(struct task_struct *tsk) { } -static inline void futex_exit_recursive(struct task_struct *tsk) { } -static inline void futex_exit_release(struct task_struct *tsk) { } -static inline void futex_exec_release(struct task_struct *tsk) { } -static inline long do_futex(u32 __user *uaddr, int op, u32 val, - ktime_t *timeout, u32 __user *uaddr2, - u32 val2, u32 val3) +extern int futex_cmpxchg_enabled; +#endif +#else +static inline void exit_robust_list(struct task_struct *curr) +{ +} +static inline void exit_pi_state_list(struct task_struct *curr) { - return -EINVAL; } #endif - #endif diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 9f4ad719bf..8516717427 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -1,197 +1,29 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * fwnode.h - Firmware device node object handle type definition. * * Copyright (C) 2015, Intel Corporation * Author: Rafael J. Wysocki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _LINUX_FWNODE_H_ #define _LINUX_FWNODE_H_ -#include -#include -#include - -struct fwnode_operations; -struct device; - -/* - * fwnode link flags - * - * LINKS_ADDED: The fwnode has already be parsed to add fwnode links. - * NOT_DEVICE: The fwnode will never be populated as a struct device. - * INITIALIZED: The hardware corresponding to fwnode has been initialized. - * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its - * driver needs its child devices to be bound with - * their respective drivers as soon as they are - * added. - */ -#define FWNODE_FLAG_LINKS_ADDED BIT(0) -#define FWNODE_FLAG_NOT_DEVICE BIT(1) -#define FWNODE_FLAG_INITIALIZED BIT(2) -#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3) +enum fwnode_type { + FWNODE_INVALID = 0, + FWNODE_OF, + FWNODE_ACPI, + FWNODE_ACPI_DATA, + FWNODE_PDATA, + FWNODE_IRQCHIP, +}; struct fwnode_handle { + enum fwnode_type type; struct fwnode_handle *secondary; - const struct fwnode_operations *ops; - struct device *dev; - struct list_head suppliers; - struct list_head consumers; - u8 flags; }; -struct fwnode_link { - struct fwnode_handle *supplier; - struct list_head s_hook; - struct fwnode_handle *consumer; - struct list_head c_hook; -}; - -/** - * struct fwnode_endpoint - Fwnode graph endpoint - * @port: Port number - * @id: Endpoint id - * @local_fwnode: reference to the related fwnode - */ -struct fwnode_endpoint { - unsigned int port; - unsigned int id; - const struct fwnode_handle *local_fwnode; -}; - -/* - * ports and endpoints defined as software_nodes should all follow a common - * naming scheme; use these macros to ensure commonality. - */ -#define SWNODE_GRAPH_PORT_NAME_FMT "port@%u" -#define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u" - -#define NR_FWNODE_REFERENCE_ARGS 8 - -/** - * struct fwnode_reference_args - Fwnode reference with additional arguments - * @fwnode:- A reference to the base fwnode - * @nargs: Number of elements in @args array - * @args: Integer arguments on the fwnode - */ -struct fwnode_reference_args { - struct fwnode_handle *fwnode; - unsigned int nargs; - u64 args[NR_FWNODE_REFERENCE_ARGS]; -}; - -/** - * struct fwnode_operations - Operations for fwnode interface - * @get: Get a reference to an fwnode. - * @put: Put a reference to an fwnode. - * @device_is_available: Return true if the device is available. - * @device_get_match_data: Return the device driver match data. - * @property_present: Return true if a property is present. - * @property_read_int_array: Read an array of integer properties. Return zero on - * success, a negative error code otherwise. - * @property_read_string_array: Read an array of string properties. Return zero - * on success, a negative error code otherwise. - * @get_name: Return the name of an fwnode. - * @get_name_prefix: Get a prefix for a node (for printing purposes). - * @get_parent: Return the parent of an fwnode. - * @get_next_child_node: Return the next child node in an iteration. - * @get_named_child_node: Return a child node with a given name. - * @get_reference_args: Return a reference pointed to by a property, with args - * @graph_get_next_endpoint: Return an endpoint node in an iteration. - * @graph_get_remote_endpoint: Return the remote endpoint node of a local - * endpoint node. - * @graph_get_port_parent: Return the parent node of a port node. - * @graph_parse_endpoint: Parse endpoint for port and endpoint id. - * @add_links: Create fwnode links to all the suppliers of the fwnode. Return - * zero on success, a negative error code otherwise. - */ -struct fwnode_operations { - struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); - void (*put)(struct fwnode_handle *fwnode); - bool (*device_is_available)(const struct fwnode_handle *fwnode); - const void *(*device_get_match_data)(const struct fwnode_handle *fwnode, - const struct device *dev); - bool (*property_present)(const struct fwnode_handle *fwnode, - const char *propname); - int (*property_read_int_array)(const struct fwnode_handle *fwnode, - const char *propname, - unsigned int elem_size, void *val, - size_t nval); - int - (*property_read_string_array)(const struct fwnode_handle *fwnode_handle, - const char *propname, const char **val, - size_t nval); - const char *(*get_name)(const struct fwnode_handle *fwnode); - const char *(*get_name_prefix)(const struct fwnode_handle *fwnode); - struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode); - struct fwnode_handle * - (*get_next_child_node)(const struct fwnode_handle *fwnode, - struct fwnode_handle *child); - struct fwnode_handle * - (*get_named_child_node)(const struct fwnode_handle *fwnode, - const char *name); - int (*get_reference_args)(const struct fwnode_handle *fwnode, - const char *prop, const char *nargs_prop, - unsigned int nargs, unsigned int index, - struct fwnode_reference_args *args); - struct fwnode_handle * - (*graph_get_next_endpoint)(const struct fwnode_handle *fwnode, - struct fwnode_handle *prev); - struct fwnode_handle * - (*graph_get_remote_endpoint)(const struct fwnode_handle *fwnode); - struct fwnode_handle * - (*graph_get_port_parent)(struct fwnode_handle *fwnode); - int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode, - struct fwnode_endpoint *endpoint); - int (*add_links)(struct fwnode_handle *fwnode); -}; - -#define fwnode_has_op(fwnode, op) \ - ((fwnode) && (fwnode)->ops && (fwnode)->ops->op) -#define fwnode_call_int_op(fwnode, op, ...) \ - (fwnode ? (fwnode_has_op(fwnode, op) ? \ - (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \ - -EINVAL) - -#define fwnode_call_bool_op(fwnode, op, ...) \ - (fwnode_has_op(fwnode, op) ? \ - (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) - -#define fwnode_call_ptr_op(fwnode, op, ...) \ - (fwnode_has_op(fwnode, op) ? \ - (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : NULL) -#define fwnode_call_void_op(fwnode, op, ...) \ - do { \ - if (fwnode_has_op(fwnode, op)) \ - (fwnode)->ops->op(fwnode, ## __VA_ARGS__); \ - } while (false) -#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev) - -static inline void fwnode_init(struct fwnode_handle *fwnode, - const struct fwnode_operations *ops) -{ - fwnode->ops = ops; - INIT_LIST_HEAD(&fwnode->consumers); - INIT_LIST_HEAD(&fwnode->suppliers); -} - -static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode, - bool initialized) -{ - if (IS_ERR_OR_NULL(fwnode)) - return; - - if (initialized) - fwnode->flags |= FWNODE_FLAG_INITIALIZED; - else - fwnode->flags &= ~FWNODE_FLAG_INITIALIZED; -} - -extern u32 fw_devlink_get_flags(void); -extern bool fw_devlink_is_strict(void); -int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup); -void fwnode_links_purge(struct fwnode_handle *fwnode); -void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode); - #endif diff --git a/include/linux/gameport.h b/include/linux/gameport.h index 69081d8994..bb7de09e8d 100644 --- a/include/linux/gameport.h +++ b/include/linux/gameport.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 1999-2002 Vojtech Pavlik + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef _GAMEPORT_H #define _GAMEPORT_H diff --git a/include/linux/gcd.h b/include/linux/gcd.h index cb572677fd..69f5e8a01b 100644 --- a/include/linux/gcd.h +++ b/include/linux/gcd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _GCD_H #define _GCD_H diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 0bd581003c..29d4385903 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Basic general purpose allocator for managing special purpose * memory, for example, memory that is not managed by the regular @@ -22,6 +21,9 @@ * the allocator can NOT be used in NMI handler. So code uses the * allocator in NMI handler should depend on * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. */ @@ -30,27 +32,24 @@ #include #include -#include struct device; struct device_node; struct gen_pool; /** - * typedef genpool_algo_t: Allocation callback function type definition + * Allocation callback function type definition * @map: Pointer to bitmap * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for - * @data: optional additional data used by the callback - * @pool: the pool being allocated from + * @data: optional additional data used by @genpool_algo_t */ typedef unsigned long (*genpool_algo_t)(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool, - unsigned long start_addr); + void *data, struct gen_pool *pool); /* * General purpose special memory pool descriptor. @@ -71,12 +70,11 @@ struct gen_pool { */ struct gen_pool_chunk { struct list_head next_chunk; /* next chunk in pool */ - atomic_long_t avail; + atomic_t avail; phys_addr_t phys_addr; /* physical starting address of memory chunk */ - void *owner; /* private data to retrieve at alloc time */ unsigned long start_addr; /* start address of memory chunk */ unsigned long end_addr; /* end address of memory chunk (inclusive) */ - unsigned long bits[]; /* bitmap for allocating memory chunk */ + unsigned long bits[0]; /* bitmap for allocating memory chunk */ }; /* @@ -95,15 +93,8 @@ struct genpool_data_fixed { extern struct gen_pool *gen_pool_create(int, int); extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); -extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t, - size_t, int, void *); - -static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, - phys_addr_t phys, size_t size, int nid) -{ - return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); -} - +extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t, + size_t, int); /** * gen_pool_add - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to @@ -122,56 +113,12 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, return gen_pool_add_virt(pool, addr, -1, size, nid); } extern void gen_pool_destroy(struct gen_pool *); -unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, - genpool_algo_t algo, void *data, void **owner); - -static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool, - size_t size, void **owner) -{ - return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data, - owner); -} - -static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool, - size_t size, genpool_algo_t algo, void *data) -{ - return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL); -} - -/** - * gen_pool_alloc - allocate special memory from the pool - * @pool: pool to allocate from - * @size: number of bytes to allocate from the pool - * - * Allocate the requested number of bytes from the specified pool. - * Uses the pool allocation function (with first-fit algorithm by default). - * Can not be used in NMI handler on architectures without - * NMI-safe cmpxchg implementation. - */ -static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) -{ - return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); -} - +extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); +extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t, + genpool_algo_t algo, void *data); extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma); -extern void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size, - dma_addr_t *dma, genpool_algo_t algo, void *data); -extern void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size, - dma_addr_t *dma, int align); -extern void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma); -extern void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size, - dma_addr_t *dma, genpool_algo_t algo, void *data); -extern void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size, - dma_addr_t *dma, int align); -extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, - size_t size, void **owner); -static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr, - size_t size) -{ - gen_pool_free_owner(pool, addr, size, NULL); -} - +extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); extern void gen_pool_for_each_chunk(struct gen_pool *, void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *); extern size_t gen_pool_avail(struct gen_pool *); @@ -182,31 +129,31 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool, unsigned long start_addr); + struct gen_pool *pool); extern unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool, unsigned long start_addr); + void *data, struct gen_pool *pool); extern unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool, unsigned long start_addr); + void *data, struct gen_pool *pool); extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool, unsigned long start_addr); + void *data, struct gen_pool *pool); extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool, unsigned long start_addr); + struct gen_pool *pool); extern struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid, const char *name); extern struct gen_pool *gen_pool_get(struct device *dev, const char *name); -extern bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start, +bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, size_t size); #ifdef CONFIG_OF diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index c285968e43..a4c61cbce7 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GENERIC_NETLINK_H #define __LINUX_GENERIC_NETLINK_H @@ -8,11 +7,35 @@ /* All generic netlink requests are serialized by a global lock. */ extern void genl_lock(void); extern void genl_unlock(void); +#ifdef CONFIG_LOCKDEP +extern bool lockdep_genl_is_held(void); +#endif /* for synchronisation between af_netlink and genetlink */ extern atomic_t genl_sk_destructing_cnt; extern wait_queue_head_t genl_sk_destructing_waitq; +/** + * rcu_dereference_genl - rcu_dereference with debug checking + * @p: The pointer to read, prior to dereferencing + * + * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() + * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference() + */ +#define rcu_dereference_genl(p) \ + rcu_dereference_check(p, lockdep_genl_is_held()) + +/** + * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex + * @p: The pointer to read, prior to dereferencing + * + * Return the value of the specified RCU-protected pointer, but omit + * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because + * caller holds genl mutex. + */ +#define genl_dereference(p) \ + rcu_dereference_protected(p, lockdep_genl_is_held()) + #define MODULE_ALIAS_GENL_FAMILY(family)\ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 0f5315c2b5..b698a758b4 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_GENHD_H #define _LINUX_GENHD_H @@ -16,13 +15,48 @@ #include #include #include -#include -#include -extern const struct device_type disk_type; +#ifdef CONFIG_BLOCK + +#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev) +#define dev_to_part(device) container_of((device), struct hd_struct, __dev) +#define disk_to_dev(disk) (&(disk)->part0.__dev) +#define part_to_dev(part) (&((part)->__dev)) + extern struct device_type part_type; +extern struct kobject *block_depr; extern struct class block_class; +enum { +/* These three have identical behaviour; use the second one if DOS FDISK gets + confused about extended/logical partitions starting past cylinder 1023. */ + DOS_EXTENDED_PARTITION = 5, + LINUX_EXTENDED_PARTITION = 0x85, + WIN98_EXTENDED_PARTITION = 0x0f, + + SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION, + + LINUX_SWAP_PARTITION = 0x82, + LINUX_DATA_PARTITION = 0x83, + LINUX_LVM_PARTITION = 0x8e, + LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */ + + SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION, + NEW_SOLARIS_X86_PARTITION = 0xbf, + + DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */ + DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */ + DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */ + EZD_PARTITION = 0x55, /* EZ-DRIVE */ + + FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */ + OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */ + NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */ + BSDI_PARTITION = 0xb7, /* BSDI Partition ID */ + MINIX_PARTITION = 0x81, /* Minix Partition ID */ + UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */ +}; + #define DISK_MAX_PARTS 256 #define DISK_NAME_LEN 32 @@ -32,7 +66,28 @@ extern struct class block_class; #include #include #include -#include + +struct partition { + unsigned char boot_ind; /* 0x80 - active */ + unsigned char head; /* starting head */ + unsigned char sector; /* starting sector */ + unsigned char cyl; /* starting cylinder */ + unsigned char sys_ind; /* What partition type */ + unsigned char end_head; /* end head */ + unsigned char end_sector; /* end sector */ + unsigned char end_cyl; /* end cylinder */ + __le32 start_sect; /* starting sector counting from 0 */ + __le32 nr_sects; /* nr of sectors in partition */ +} __attribute__((packed)); + +struct disk_stats { + unsigned long sectors[2]; /* READs and WRITEs */ + unsigned long ios[2]; + unsigned long merges[2]; + unsigned long ticks[2]; + unsigned long io_ticks; + unsigned long time_in_queue; +}; #define PARTITION_META_INFO_VOLNAMELTH 64 /* @@ -46,84 +101,82 @@ struct partition_meta_info { u8 volname[PARTITION_META_INFO_VOLNAMELTH]; }; -/** - * DOC: genhd capability flags - * - * ``GENHD_FL_REMOVABLE`` (0x0001): indicates that the block device - * gives access to removable media. - * When set, the device remains present even when media is not - * inserted. - * Must not be set for devices which are removed entirely when the - * media is removed. - * - * ``GENHD_FL_CD`` (0x0008): the block device is a CD-ROM-style - * device. - * Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl. - * - * ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include - * partition information in ``/proc/partitions`` or in the output of - * printk_all_partitions(). - * Used for the null block device and some MMC devices. - * - * ``GENHD_FL_EXT_DEVT`` (0x0040): the driver supports extended - * dynamic ``dev_t``, i.e. it wants extended device numbers - * (``BLOCK_EXT_MAJOR``). - * This affects the maximum number of partitions. - * - * ``GENHD_FL_NATIVE_CAPACITY`` (0x0080): based on information in the - * partition table, the device's capacity has been extended to its - * native capacity; i.e. the device has hidden capacity used by one - * of the partitions (this is a flag used so that native capacity is - * only ever unlocked once). - * - * ``GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE`` (0x0100): event polling is - * blocked whenever a writer holds an exclusive lock. - * - * ``GENHD_FL_NO_PART_SCAN`` (0x0200): partition scanning is disabled. - * Used for loop devices in their default settings and some MMC - * devices. - * - * ``GENHD_FL_HIDDEN`` (0x0400): the block device is hidden; it - * doesn't produce events, doesn't appear in sysfs, and doesn't have - * an associated ``bdev``. - * Implies ``GENHD_FL_SUPPRESS_PARTITION_INFO`` and - * ``GENHD_FL_NO_PART_SCAN``. - * Used for multipath devices. - */ -#define GENHD_FL_REMOVABLE 0x0001 -/* 2 is unused (used to be GENHD_FL_DRIVERFS) */ -/* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */ -#define GENHD_FL_CD 0x0008 -#define GENHD_FL_SUPPRESS_PARTITION_INFO 0x0020 -#define GENHD_FL_EXT_DEVT 0x0040 -#define GENHD_FL_NATIVE_CAPACITY 0x0080 -#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 0x0100 -#define GENHD_FL_NO_PART_SCAN 0x0200 -#define GENHD_FL_HIDDEN 0x0400 +struct hd_struct { + sector_t start_sect; + /* + * nr_sects is protected by sequence counter. One might extend a + * partition while IO is happening to it and update of nr_sects + * can be non-atomic on 32bit machines with 64bit sector_t. + */ + sector_t nr_sects; + seqcount_t nr_sects_seq; + sector_t alignment_offset; + unsigned int discard_alignment; + struct device __dev; + struct kobject *holder_dir; + int policy, partno; + struct partition_meta_info *info; +#ifdef CONFIG_FAIL_MAKE_REQUEST + int make_it_fail; +#endif + unsigned long stamp; + atomic_t in_flight[2]; +#ifdef CONFIG_SMP + struct disk_stats __percpu *dkstats; +#else + struct disk_stats dkstats; +#endif + struct percpu_ref ref; + struct rcu_head rcu_head; +}; + +#define GENHD_FL_REMOVABLE 1 +/* 2 is unused */ +#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 +#define GENHD_FL_CD 8 +#define GENHD_FL_UP 16 +#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 +#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ +#define GENHD_FL_NATIVE_CAPACITY 128 +#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256 +#define GENHD_FL_NO_PART_SCAN 512 enum { DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ }; -enum { - /* Poll even if events_poll_msecs is unset */ - DISK_EVENT_FLAG_POLL = 1 << 0, - /* Forward events to udev */ - DISK_EVENT_FLAG_UEVENT = 1 << 1, +#define BLK_SCSI_MAX_CMDS (256) +#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) + +struct blk_scsi_cmd_filter { + unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; + unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; + struct kobject kobj; +}; + +struct disk_part_tbl { + struct rcu_head rcu_head; + int len; + struct hd_struct __rcu *last_lookup; + struct hd_struct __rcu *part[]; }; struct disk_events; struct badblocks; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + struct blk_integrity { - const struct blk_integrity_profile *profile; - unsigned char flags; - unsigned char tuple_size; - unsigned char interval_exp; - unsigned char tag_size; + struct blk_integrity_profile *profile; + unsigned char flags; + unsigned char tuple_size; + unsigned char interval_exp; + unsigned char tag_size; }; +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + struct gendisk { /* major, first_minor and minors are input parameters only, * don't use directly. Use disk_devt() and disk_max_parts(). @@ -134,65 +187,52 @@ struct gendisk { * disks that can't be partitioned. */ char disk_name[DISK_NAME_LEN]; /* name of major driver */ + char *(*devnode)(struct gendisk *gd, umode_t *mode); - unsigned short events; /* supported events */ - unsigned short event_flags; /* flags related to event processing */ + unsigned int events; /* supported events */ + unsigned int async_events; /* async events, subset of all */ - struct xarray part_tbl; - struct block_device *part0; + /* Array of pointers to partitions indexed by partno. + * Protected with matching bdev lock but stat and other + * non-critical accesses use RCU. Always access through + * helpers. + */ + struct disk_part_tbl __rcu *part_tbl; + struct hd_struct part0; const struct block_device_operations *fops; struct request_queue *queue; void *private_data; int flags; - unsigned long state; -#define GD_NEED_PART_SCAN 0 -#define GD_READ_ONLY 1 -#define GD_DEAD 2 - - struct mutex open_mutex; /* open/close mutex */ - unsigned open_partitions; /* number of open partitions */ - - struct backing_dev_info *bdi; struct kobject *slave_dir; -#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED - struct list_head slave_bdevs; -#endif + struct timer_rand_state *random; - atomic_t sync_io; /* RAID */ + atomic_unchecked_t sync_io; /* RAID */ struct disk_events *ev; #ifdef CONFIG_BLK_DEV_INTEGRITY struct kobject integrity_kobj; #endif /* CONFIG_BLK_DEV_INTEGRITY */ -#if IS_ENABLED(CONFIG_CDROM) - struct cdrom_device_info *cdi; -#endif int node_id; struct badblocks *bb; - struct lockdep_map lockdep_map; - u64 diskseq; }; -static inline bool disk_live(struct gendisk *disk) +static inline struct gendisk *part_to_disk(struct hd_struct *part) { - return !inode_unhashed(disk->part0->bd_inode); + if (likely(part)) { + if (part->partno) + return dev_to_disk(part_to_dev(part)->parent); + else + return dev_to_disk(part_to_dev(part)); + } + return NULL; } -/* - * The gendisk is refcounted by the part0 block_device, and the bd_device - * therein is also used for device model presentation in sysfs. - */ -#define dev_to_disk(device) \ - (dev_to_bdev(device)->bd_disk) -#define disk_to_dev(disk) \ - (&((disk)->part0->bd_device)) - -#if IS_REACHABLE(CONFIG_CDROM) -#define disk_to_cdi(disk) ((disk)->cdi) -#else -#define disk_to_cdi(disk) NULL -#endif +static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to) +{ + uuid_be_to_bin(uuid_str, (uuid_be *)to); + return 0; +} static inline int disk_max_parts(struct gendisk *disk) { @@ -209,33 +249,192 @@ static inline bool disk_part_scan_enabled(struct gendisk *disk) static inline dev_t disk_devt(struct gendisk *disk) { - return MKDEV(disk->major, disk->first_minor); + return disk_to_dev(disk)->devt; } -void disk_uevent(struct gendisk *disk, enum kobject_action action); +static inline dev_t part_devt(struct hd_struct *part) +{ + return part_to_dev(part)->devt; +} + +extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno); + +static inline void disk_put_part(struct hd_struct *part) +{ + if (likely(part)) + put_device(part_to_dev(part)); +} + +/* + * Smarter partition iterator without context limits. + */ +#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */ +#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ +#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ +#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */ + +struct disk_part_iter { + struct gendisk *disk; + struct hd_struct *part; + int idx; + unsigned int flags; +}; + +extern void disk_part_iter_init(struct disk_part_iter *piter, + struct gendisk *disk, unsigned int flags); +extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter); +extern void disk_part_iter_exit(struct disk_part_iter *piter); + +extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, + sector_t sector); + +/* + * Macros to operate on percpu disk statistics: + * + * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters + * and should be called between disk_stat_lock() and + * disk_stat_unlock(). + * + * part_stat_read() can be called at any time. + * + * part_stat_{add|set_all}() and {init|free}_part_stats are for + * internal use only. + */ +#ifdef CONFIG_SMP +#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); }) +#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) + +#define __part_stat_add(cpu, part, field, addnd) \ + (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd)) + +#define part_stat_read(part, field) \ +({ \ + typeof((part)->dkstats->field) res = 0; \ + unsigned int _cpu; \ + for_each_possible_cpu(_cpu) \ + res += per_cpu_ptr((part)->dkstats, _cpu)->field; \ + res; \ +}) + +static inline void part_stat_set_all(struct hd_struct *part, int value) +{ + int i; + + for_each_possible_cpu(i) + memset(per_cpu_ptr(part->dkstats, i), value, + sizeof(struct disk_stats)); +} + +static inline int init_part_stats(struct hd_struct *part) +{ + part->dkstats = alloc_percpu(struct disk_stats); + if (!part->dkstats) + return 0; + return 1; +} + +static inline void free_part_stats(struct hd_struct *part) +{ + free_percpu(part->dkstats); +} + +#else /* !CONFIG_SMP */ +#define part_stat_lock() ({ rcu_read_lock(); 0; }) +#define part_stat_unlock() rcu_read_unlock() + +#define __part_stat_add(cpu, part, field, addnd) \ + ((part)->dkstats.field += addnd) + +#define part_stat_read(part, field) ((part)->dkstats.field) + +static inline void part_stat_set_all(struct hd_struct *part, int value) +{ + memset(&part->dkstats, value, sizeof(struct disk_stats)); +} + +static inline int init_part_stats(struct hd_struct *part) +{ + return 1; +} + +static inline void free_part_stats(struct hd_struct *part) +{ +} + +#endif /* CONFIG_SMP */ + +#define part_stat_add(cpu, part, field, addnd) do { \ + __part_stat_add((cpu), (part), field, addnd); \ + if ((part)->partno) \ + __part_stat_add((cpu), &part_to_disk((part))->part0, \ + field, addnd); \ +} while (0) + +#define part_stat_dec(cpu, gendiskp, field) \ + part_stat_add(cpu, gendiskp, field, -1) +#define part_stat_inc(cpu, gendiskp, field) \ + part_stat_add(cpu, gendiskp, field, 1) +#define part_stat_sub(cpu, gendiskp, field, subnd) \ + part_stat_add(cpu, gendiskp, field, -subnd) + +static inline void part_inc_in_flight(struct hd_struct *part, int rw) +{ + atomic_inc(&part->in_flight[rw]); + if (part->partno) + atomic_inc(&part_to_disk(part)->part0.in_flight[rw]); +} + +static inline void part_dec_in_flight(struct hd_struct *part, int rw) +{ + atomic_dec(&part->in_flight[rw]); + if (part->partno) + atomic_dec(&part_to_disk(part)->part0.in_flight[rw]); +} + +static inline int part_in_flight(struct hd_struct *part) +{ + return atomic_read(&part->in_flight[0]) + atomic_read(&part->in_flight[1]); +} + +static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk) +{ + if (disk) + return kzalloc_node(sizeof(struct partition_meta_info), + GFP_KERNEL, disk->node_id); + return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL); +} + +static inline void free_part_info(struct hd_struct *part) +{ + kfree(part->info); +} + +/* block/blk-core.c */ +extern void part_round_stats(int cpu, struct hd_struct *part); /* block/genhd.c */ -int device_add_disk(struct device *parent, struct gendisk *disk, - const struct attribute_group **groups); -static inline int add_disk(struct gendisk *disk) +extern void device_add_disk(struct device *parent, struct gendisk *disk); +static inline void add_disk(struct gendisk *disk) { - return device_add_disk(NULL, disk, NULL); + device_add_disk(NULL, disk); } -extern void del_gendisk(struct gendisk *gp); -void set_disk_ro(struct gendisk *disk, bool read_only); +extern void del_gendisk(struct gendisk *gp); +extern struct gendisk *get_gendisk(dev_t dev, int *partno); +extern struct block_device *bdget_disk(struct gendisk *disk, int partno); + +extern void set_device_ro(struct block_device *bdev, int flag); +extern void set_disk_ro(struct gendisk *disk, int flag); static inline int get_disk_ro(struct gendisk *disk) { - return disk->part0->bd_read_only || - test_bit(GD_READ_ONLY, &disk->state); + return disk->part0.policy; } extern void disk_block_events(struct gendisk *disk); extern void disk_unblock_events(struct gendisk *disk); extern void disk_flush_events(struct gendisk *disk, unsigned int mask); -bool set_capacity_and_notify(struct gendisk *disk, sector_t size); -bool disk_force_media_change(struct gendisk *disk, unsigned int events); +extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); /* drivers/char/random.c */ extern void add_disk_randomness(struct gendisk *disk) __latent_entropy; @@ -243,87 +442,315 @@ extern void rand_initialize_disk(struct gendisk *disk); static inline sector_t get_start_sect(struct block_device *bdev) { - return bdev->bd_start_sect; + return bdev->bd_part->start_sect; } - -static inline sector_t bdev_nr_sectors(struct block_device *bdev) -{ - return i_size_read(bdev->bd_inode) >> 9; -} - static inline sector_t get_capacity(struct gendisk *disk) { - return bdev_nr_sectors(disk->part0); + return disk->part0.nr_sects; +} +static inline void set_capacity(struct gendisk *disk, sector_t size) +{ + disk->part0.nr_sects = size; } -int bdev_disk_changed(struct gendisk *disk, bool invalidate); -void blk_drop_partitions(struct gendisk *disk); +#ifdef CONFIG_SOLARIS_X86_PARTITION -struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, - struct lock_class_key *lkclass); -extern void put_disk(struct gendisk *disk); -struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); +#define SOLARIS_X86_NUMSLICE 16 +#define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL) -/** - * blk_alloc_disk - allocate a gendisk structure - * @node_id: numa node to allocate on - * - * Allocate and pre-initialize a gendisk structure for use with BIO based - * drivers. - * - * Context: can sleep +struct solaris_x86_slice { + __le16 s_tag; /* ID tag of partition */ + __le16 s_flag; /* permission flags */ + __le32 s_start; /* start sector no of partition */ + __le32 s_size; /* # of blocks in partition */ +}; + +struct solaris_x86_vtoc { + unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */ + __le32 v_sanity; /* to verify vtoc sanity */ + __le32 v_version; /* layout version */ + char v_volume[8]; /* volume name */ + __le16 v_sectorsz; /* sector size in bytes */ + __le16 v_nparts; /* number of partitions */ + unsigned int v_reserved[10]; /* free space */ + struct solaris_x86_slice + v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */ + unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */ + char v_asciilabel[128]; /* for compatibility */ +}; + +#endif /* CONFIG_SOLARIS_X86_PARTITION */ + +#ifdef CONFIG_BSD_DISKLABEL +/* + * BSD disklabel support by Yossi Gottlieb + * updated by Marc Espie */ -#define blk_alloc_disk(node_id) \ -({ \ - static struct lock_class_key __key; \ - \ - __blk_alloc_disk(node_id, &__key); \ -}) -void blk_cleanup_disk(struct gendisk *disk); -int __register_blkdev(unsigned int major, const char *name, - void (*probe)(dev_t devt)); -#define register_blkdev(major, name) \ - __register_blkdev(major, name, NULL) -void unregister_blkdev(unsigned int major, const char *name); +/* check against BSD src/sys/sys/disklabel.h for consistency */ -bool bdev_check_media_change(struct block_device *bdev); -int __invalidate_device(struct block_device *bdev, bool kill_dirty); -void set_capacity(struct gendisk *disk, sector_t size); +#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */ +#define BSD_MAXPARTITIONS 16 +#define OPENBSD_MAXPARTITIONS 16 +#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */ +struct bsd_disklabel { + __le32 d_magic; /* the magic number */ + __s16 d_type; /* drive type */ + __s16 d_subtype; /* controller/d_type specific */ + char d_typename[16]; /* type name, e.g. "eagle" */ + char d_packname[16]; /* pack identifier */ + __u32 d_secsize; /* # of bytes per sector */ + __u32 d_nsectors; /* # of data sectors per track */ + __u32 d_ntracks; /* # of tracks per cylinder */ + __u32 d_ncylinders; /* # of data cylinders per unit */ + __u32 d_secpercyl; /* # of data sectors per cylinder */ + __u32 d_secperunit; /* # of data sectors per unit */ + __u16 d_sparespertrack; /* # of spare sectors per track */ + __u16 d_sparespercyl; /* # of spare sectors per cylinder */ + __u32 d_acylinders; /* # of alt. cylinders per unit */ + __u16 d_rpm; /* rotational speed */ + __u16 d_interleave; /* hardware sector interleave */ + __u16 d_trackskew; /* sector 0 skew, per track */ + __u16 d_cylskew; /* sector 0 skew, per cylinder */ + __u32 d_headswitch; /* head switch time, usec */ + __u32 d_trkseek; /* track-to-track seek, usec */ + __u32 d_flags; /* generic flags */ +#define NDDATA 5 + __u32 d_drivedata[NDDATA]; /* drive-type specific information */ +#define NSPARE 5 + __u32 d_spare[NSPARE]; /* reserved for future use */ + __le32 d_magic2; /* the magic number (again) */ + __le16 d_checksum; /* xor of data incl. partitions */ -/* for drivers/char/raw.c: */ -int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); -long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); + /* filesystem and partition information: */ + __le16 d_npartitions; /* number of partitions in following */ + __le32 d_bbsize; /* size of boot area at sn0, bytes */ + __le32 d_sbsize; /* max size of fs superblock, bytes */ + struct bsd_partition { /* the partition table */ + __le32 p_size; /* number of sectors in partition */ + __le32 p_offset; /* starting sector */ + __le32 p_fsize; /* filesystem basic fragment size */ + __u8 p_fstype; /* filesystem type, see below */ + __u8 p_frag; /* filesystem fragments per block */ + __le16 p_cpg; /* filesystem cylinders per group */ + } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */ +}; -#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED -int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); -void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); -int bd_register_pending_holders(struct gendisk *disk); +#endif /* CONFIG_BSD_DISKLABEL */ + +#ifdef CONFIG_UNIXWARE_DISKLABEL +/* + * Unixware slices support by Andrzej Krzysztofowicz + * and Krzysztof G. Baranowski + */ + +#define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */ +#define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */ +#define UNIXWARE_NUMSLICE 16 +#define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */ + +struct unixware_slice { + __le16 s_label; /* label */ + __le16 s_flags; /* permission flags */ + __le32 start_sect; /* starting sector */ + __le32 nr_sects; /* number of sectors in slice */ +}; + +struct unixware_disklabel { + __le32 d_type; /* drive type */ + __le32 d_magic; /* the magic number */ + __le32 d_version; /* version number */ + char d_serial[12]; /* serial number of the device */ + __le32 d_ncylinders; /* # of data cylinders per device */ + __le32 d_ntracks; /* # of tracks per cylinder */ + __le32 d_nsectors; /* # of data sectors per track */ + __le32 d_secsize; /* # of bytes per sector */ + __le32 d_part_start; /* # of first sector of this partition */ + __le32 d_unknown1[12]; /* ? */ + __le32 d_alt_tbl; /* byte offset of alternate table */ + __le32 d_alt_len; /* byte length of alternate table */ + __le32 d_phys_cyl; /* # of physical cylinders per device */ + __le32 d_phys_trk; /* # of physical tracks per cylinder */ + __le32 d_phys_sec; /* # of physical sectors per track */ + __le32 d_phys_bytes; /* # of physical bytes per sector */ + __le32 d_unknown2; /* ? */ + __le32 d_unknown3; /* ? */ + __le32 d_pad[8]; /* pad */ + + struct unixware_vtoc { + __le32 v_magic; /* the magic number */ + __le32 v_version; /* version number */ + char v_name[8]; /* volume name */ + __le16 v_nslices; /* # of slices */ + __le16 v_unknown1; /* ? */ + __le32 v_reserved[10]; /* reserved */ + struct unixware_slice + v_slice[UNIXWARE_NUMSLICE]; /* slice headers */ + } vtoc; + +}; /* 408 */ + +#endif /* CONFIG_UNIXWARE_DISKLABEL */ + +#ifdef CONFIG_MINIX_SUBPARTITION +# define MINIX_NR_SUBPARTITIONS 4 +#endif /* CONFIG_MINIX_SUBPARTITION */ + +#define ADDPART_FLAG_NONE 0 +#define ADDPART_FLAG_RAID 1 +#define ADDPART_FLAG_WHOLEDISK 2 + +extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt); +extern void blk_free_devt(dev_t devt); +extern dev_t blk_lookup_devt(const char *name, int partno); +extern char *disk_name (struct gendisk *hd, int partno, char *buf); + +extern int disk_expand_part_tbl(struct gendisk *disk, int target); +extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); +extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev); +extern struct hd_struct * __must_check add_partition(struct gendisk *disk, + int partno, sector_t start, + sector_t len, int flags, + struct partition_meta_info + *info); +extern void __delete_partition(struct percpu_ref *); +extern void delete_partition(struct gendisk *, int); +extern void printk_all_partitions(void); + +extern struct gendisk *alloc_disk_node(int minors, int node_id); +extern struct gendisk *alloc_disk(int minors); +extern struct kobject *get_disk(struct gendisk *disk); +extern void put_disk(struct gendisk *disk); +extern void blk_register_region(dev_t devt, unsigned long range, + struct module *module, + struct kobject *(*probe)(dev_t, int *, void *), + int (*lock)(dev_t, void *), + void *data); +extern void blk_unregister_region(dev_t devt, unsigned long range); + +extern ssize_t part_size_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_stat_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_inflight_show(struct device *dev, + struct device_attribute *attr, char *buf); +#ifdef CONFIG_FAIL_MAKE_REQUEST +extern ssize_t part_fail_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_fail_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +#endif /* CONFIG_FAIL_MAKE_REQUEST */ + +static inline int hd_ref_init(struct hd_struct *part) +{ + if (percpu_ref_init(&part->ref, __delete_partition, 0, + GFP_KERNEL)) + return -ENOMEM; + return 0; +} + +static inline void hd_struct_get(struct hd_struct *part) +{ + percpu_ref_get(&part->ref); +} + +static inline int hd_struct_try_get(struct hd_struct *part) +{ + return percpu_ref_tryget_live(&part->ref); +} + +static inline void hd_struct_put(struct hd_struct *part) +{ + percpu_ref_put(&part->ref); +} + +static inline void hd_struct_kill(struct hd_struct *part) +{ + percpu_ref_kill(&part->ref); +} + +static inline void hd_free_part(struct hd_struct *part) +{ + free_part_stats(part); + free_part_info(part); + percpu_ref_exit(&part->ref); +} + +/* + * Any access of part->nr_sects which is not protected by partition + * bd_mutex or gendisk bdev bd_mutex, should be done using this + * accessor function. + * + * Code written along the lines of i_size_read() and i_size_write(). + * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption + * on. + */ +static inline sector_t part_nr_sects_read(struct hd_struct *part) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) + sector_t nr_sects; + unsigned seq; + do { + seq = read_seqcount_begin(&part->nr_sects_seq); + nr_sects = part->nr_sects; + } while (read_seqcount_retry(&part->nr_sects_seq, seq)); + return nr_sects; +#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) + sector_t nr_sects; + + preempt_disable(); + nr_sects = part->nr_sects; + preempt_enable(); + return nr_sects; #else -static inline int bd_link_disk_holder(struct block_device *bdev, - struct gendisk *disk) -{ - return 0; + return part->nr_sects; +#endif } -static inline void bd_unlink_disk_holder(struct block_device *bdev, - struct gendisk *disk) -{ -} -static inline int bd_register_pending_holders(struct gendisk *disk) -{ - return 0; -} -#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ -dev_t part_devt(struct gendisk *disk, u8 partno); -void inc_diskseq(struct gendisk *disk); -dev_t blk_lookup_devt(const char *name, int partno); -void blk_request_module(dev_t devt); -#ifdef CONFIG_BLOCK -void printk_all_partitions(void); -#else /* CONFIG_BLOCK */ -static inline void printk_all_partitions(void) +/* + * Should be called with mutex lock held (typically bd_mutex) of partition + * to provide mutual exlusion among writers otherwise seqcount might be + * left in wrong state leaving the readers spinning infinitely. + */ +static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) { +#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) + write_seqcount_begin(&part->nr_sects_seq); + part->nr_sects = size; + write_seqcount_end(&part->nr_sects_seq); +#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) + preempt_disable(); + part->nr_sects = size; + preempt_enable(); +#else + part->nr_sects = size; +#endif +} + +#if defined(CONFIG_BLK_DEV_INTEGRITY) +extern void blk_integrity_add(struct gendisk *); +extern void blk_integrity_del(struct gendisk *); +extern void blk_integrity_revalidate(struct gendisk *); +#else /* CONFIG_BLK_DEV_INTEGRITY */ +static inline void blk_integrity_add(struct gendisk *disk) { } +static inline void blk_integrity_del(struct gendisk *disk) { } +static inline void blk_integrity_revalidate(struct gendisk *disk) { } +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +#else /* CONFIG_BLOCK */ + +static inline void printk_all_partitions(void) { } + +static inline dev_t blk_lookup_devt(const char *name, int partno) +{ + dev_t devt = MKDEV(0, 0); + return devt; +} + +static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to) +{ + return -EINVAL; } #endif /* CONFIG_BLOCK */ diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 939b1a8f57..abac2a74a6 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h @@ -1,8 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef GENL_MAGIC_FUNC_H #define GENL_MAGIC_FUNC_H -#include #include /* @@ -133,6 +131,17 @@ static void dprint_array(const char *dir, int nla_type, * use one static buffer for parsing of nested attributes */ static struct nlattr *nested_attr_tb[128]; +#ifndef BUILD_BUG_ON +/* Force a compilation error if condition is true */ +#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition)) +/* Force a compilation error if condition is true, but also produce a + result (of value 0 and type size_t), so the expression can be used + e.g. in a structure initializer (or where-ever else comma expressions + aren't permitted). */ +#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) +#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) +#endif + #undef GENL_struct #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ /* *_from_attrs functions are static, but potentially unused */ \ @@ -233,10 +242,11 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd) { \ handler \ .cmd = op_name, \ + .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy), \ }, #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops) -static struct genl_ops ZZZ_genl_ops[] __read_mostly = { +static struct genl_ops ZZZ_genl_ops[] = { #include GENL_MAGIC_INCLUDE_FILE }; @@ -249,7 +259,16 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = { * {{{2 */ #define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family) -static struct genl_family ZZZ_genl_family; +static struct genl_family ZZZ_genl_family __read_mostly = { + .id = GENL_ID_GENERATE, + .name = __stringify(GENL_MAGIC_FAMILY), + .version = GENL_MAGIC_VERSION, +#ifdef GENL_MAGIC_FAMILY_HDRSZ + .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ), +#endif + .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1, +}; + /* * Magic: define multicast groups * Magic: define multicast group registration helper @@ -283,24 +302,11 @@ static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \ #undef GENL_mc_group #define GENL_mc_group(group) -static struct genl_family ZZZ_genl_family __ro_after_init = { - .name = __stringify(GENL_MAGIC_FAMILY), - .version = GENL_MAGIC_VERSION, -#ifdef GENL_MAGIC_FAMILY_HDRSZ - .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ), -#endif - .maxattr = ARRAY_SIZE(CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy))-1, - .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy), - .ops = ZZZ_genl_ops, - .n_ops = ARRAY_SIZE(ZZZ_genl_ops), - .mcgrps = ZZZ_genl_mcgrps, - .n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps), - .module = THIS_MODULE, -}; - int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void) { - return genl_register_family(&ZZZ_genl_family); + return genl_register_family_with_ops_groups(&ZZZ_genl_family, \ + ZZZ_genl_ops, \ + ZZZ_genl_mcgrps); } void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void) @@ -404,3 +410,4 @@ s_fields \ /* }}}1 */ #endif /* GENL_MAGIC_FUNC_H */ +/* vim: set foldmethod=marker foldlevel=1 nofoldenable : */ diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h index f81d489875..ddda3ac067 100644 --- a/include/linux/genl_magic_struct.h +++ b/include/linux/genl_magic_struct.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef GENL_MAGIC_STRUCT_H #define GENL_MAGIC_STRUCT_H @@ -82,14 +81,14 @@ static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value) __field(attr_nr, attr_flag, name, NLA_U32, __u32, \ nla_get_u32, nla_put_u32, false) #define __s32_field(attr_nr, attr_flag, name) \ - __field(attr_nr, attr_flag, name, NLA_U32, __s32, \ - nla_get_u32, nla_put_u32, true) + __field(attr_nr, attr_flag, name, NLA_S32, __s32, \ + nla_get_s32, nla_put_s32, true) #define __u64_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U64, __u64, \ nla_get_u64, nla_put_u64_0pad, false) #define __str_field(attr_nr, attr_flag, name, maxlen) \ __array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \ - nla_strscpy, nla_put, false) + nla_strlcpy, nla_put, false) #define __bin_field(attr_nr, attr_flag, name, maxlen) \ __array(attr_nr, attr_flag, name, NLA_BINARY, char, maxlen, \ nla_memcpy, nla_put, false) @@ -191,7 +190,6 @@ static inline void ct_assert_unique_operations(void) { switch (0) { #include GENL_MAGIC_INCLUDE_FILE - case 0: ; } } @@ -210,7 +208,6 @@ static inline void ct_assert_unique_top_level_attributes(void) { switch (0) { #include GENL_MAGIC_INCLUDE_FILE - case 0: ; } } @@ -220,8 +217,7 @@ static inline void ct_assert_unique_top_level_attributes(void) static inline void ct_assert_unique_ ## s_name ## _attributes(void) \ { \ switch (0) { \ - s_fields \ - case 0: \ + s_fields \ ; \ } \ } @@ -283,3 +279,4 @@ enum { \ /* }}}1 */ #endif /* GENL_MAGIC_STRUCT_H */ +/* vim: set foldmethod=marker nofoldenable : */ diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h index c304dcdb4e..c7372d7a97 100644 --- a/include/linux/getcpu.h +++ b/include/linux/getcpu.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_GETCPU_H #define _LINUX_GETCPU_H 1 diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 55b2ec1f96..593a07b92b 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GFP_H #define __LINUX_GFP_H @@ -8,20 +7,6 @@ #include #include -/* The typedef is in types.h but we want the documentation here */ -#if 0 -/** - * typedef gfp_t - Memory allocation flags. - * - * GFP flags are commonly used throughout Linux to indicate how memory - * should be allocated. The GFP acronym stands for get_free_pages(), - * the underlying memory allocation function. Not every GFP flag is - * supported by every function which may allocate memory. Most users - * will want to use a plain ``GFP_KERNEL``. - */ -typedef unsigned int __bitwise gfp_t; -#endif - struct vm_area_struct; /* @@ -38,28 +23,31 @@ struct vm_area_struct; #define ___GFP_HIGH 0x20u #define ___GFP_IO 0x40u #define ___GFP_FS 0x80u -#define ___GFP_ZERO 0x100u -#define ___GFP_ATOMIC 0x200u -#define ___GFP_DIRECT_RECLAIM 0x400u -#define ___GFP_KSWAPD_RECLAIM 0x800u -#define ___GFP_WRITE 0x1000u -#define ___GFP_NOWARN 0x2000u -#define ___GFP_RETRY_MAYFAIL 0x4000u -#define ___GFP_NOFAIL 0x8000u -#define ___GFP_NORETRY 0x10000u -#define ___GFP_MEMALLOC 0x20000u -#define ___GFP_COMP 0x40000u -#define ___GFP_NOMEMALLOC 0x80000u -#define ___GFP_HARDWALL 0x100000u -#define ___GFP_THISNODE 0x200000u -#define ___GFP_ACCOUNT 0x400000u -#define ___GFP_ZEROTAGS 0x800000u -#define ___GFP_SKIP_KASAN_POISON 0x1000000u -#ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x2000000u +#define ___GFP_COLD 0x100u +#define ___GFP_NOWARN 0x200u +#define ___GFP_REPEAT 0x400u +#define ___GFP_NOFAIL 0x800u +#define ___GFP_NORETRY 0x1000u +#define ___GFP_MEMALLOC 0x2000u +#define ___GFP_COMP 0x4000u +#define ___GFP_ZERO 0x8000u +#define ___GFP_NOMEMALLOC 0x10000u +#define ___GFP_HARDWALL 0x20000u +#define ___GFP_THISNODE 0x40000u +#define ___GFP_ATOMIC 0x80000u +#define ___GFP_ACCOUNT 0x100000u +#define ___GFP_NOTRACK 0x200000u +#define ___GFP_DIRECT_RECLAIM 0x400000u +#define ___GFP_OTHER_NODE 0x800000u +#define ___GFP_WRITE 0x1000000u +#define ___GFP_KSWAPD_RECLAIM 0x2000000u + +#ifdef CONFIG_PAX_USERCOPY +#define ___GFP_USERCOPY 0x4000000u #else -#define ___GFP_NOLOCKDEP 0 +#define ___GFP_USERCOPY 0 #endif + /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -75,250 +63,195 @@ struct vm_area_struct; #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) -/** - * DOC: Page mobility and placement hints - * +/* * Page mobility and placement hints - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * These flags provide hints about how mobile the page is. Pages with similar * mobility are placed within the same pageblocks to minimise problems due * to external fragmentation. * - * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be - * moved by page migration during memory compaction or can be reclaimed. + * __GFP_MOVABLE (also a zone modifier) indicates that the page can be + * moved by page migration during memory compaction or can be reclaimed. * - * %__GFP_RECLAIMABLE is used for slab allocations that specify - * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. + * __GFP_RECLAIMABLE is used for slab allocations that specify + * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. * - * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible, - * these pages will be spread between local zones to avoid all the dirty - * pages being in one zone (fair zone allocation policy). + * __GFP_WRITE indicates the caller intends to dirty the page. Where possible, + * these pages will be spread between local zones to avoid all the dirty + * pages being in one zone (fair zone allocation policy). * - * %__GFP_HARDWALL enforces the cpuset memory allocation policy. + * __GFP_HARDWALL enforces the cpuset memory allocation policy. * - * %__GFP_THISNODE forces the allocation to be satisfied from the requested - * node with no fallbacks or placement policy enforcements. + * __GFP_THISNODE forces the allocation to be satisified from the requested + * node with no fallbacks or placement policy enforcements. * - * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + * + * __GFP_USERCOPY indicates that the page will be copied to/from userland */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) +#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY) -/** - * DOC: Watermark modifiers - * +/* * Watermark modifiers -- controls access to emergency reserves - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * - * %__GFP_HIGH indicates that the caller is high-priority and that granting - * the request is necessary before the system can make forward progress. - * For example, creating an IO context to clean pages. + * __GFP_HIGH indicates that the caller is high-priority and that granting + * the request is necessary before the system can make forward progress. + * For example, creating an IO context to clean pages. * - * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is - * high priority. Users are typically interrupt handlers. This may be - * used in conjunction with %__GFP_HIGH + * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is + * high priority. Users are typically interrupt handlers. This may be + * used in conjunction with __GFP_HIGH * - * %__GFP_MEMALLOC allows access to all memory. This should only be used when - * the caller guarantees the allocation will allow more memory to be freed - * very shortly e.g. process exiting or swapping. Users either should - * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). - * Users of this flag have to be extremely careful to not deplete the reserve - * completely and implement a throttling mechanism which controls the - * consumption of the reserve based on the amount of freed memory. - * Usage of a pre-allocated pool (e.g. mempool) should be always considered - * before using this flag. + * __GFP_MEMALLOC allows access to all memory. This should only be used when + * the caller guarantees the allocation will allow more memory to be freed + * very shortly e.g. process exiting or swapping. Users either should + * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). * - * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. - * This takes precedence over the %__GFP_MEMALLOC flag if both are set. + * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. + * This takes precedence over the __GFP_MEMALLOC flag if both are set. */ #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) -/** - * DOC: Reclaim modifiers - * +/* * Reclaim modifiers - * ~~~~~~~~~~~~~~~~~ - * Please note that all the following flags are only applicable to sleepable - * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them). * - * %__GFP_IO can start physical IO. + * __GFP_IO can start physical IO. * - * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the - * allocator recursing into the filesystem which might already be holding - * locks. + * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the + * allocator recursing into the filesystem which might already be holding + * locks. * - * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. - * This flag can be cleared to avoid unnecessary delays when a fallback - * option is available. + * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. + * This flag can be cleared to avoid unnecessary delays when a fallback + * option is available. * - * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when - * the low watermark is reached and have it reclaim pages until the high - * watermark is reached. A caller may wish to clear this flag when fallback - * options are available and the reclaim is likely to disrupt the system. The - * canonical example is THP allocation where a fallback is cheap but - * reclaim/compaction may cause indirect stalls. + * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when + * the low watermark is reached and have it reclaim pages until the high + * watermark is reached. A caller may wish to clear this flag when fallback + * options are available and the reclaim is likely to disrupt the system. The + * canonical example is THP allocation where a fallback is cheap but + * reclaim/compaction may cause indirect stalls. * - * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. + * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. * - * The default allocator behavior depends on the request size. We have a concept - * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER). - * !costly allocations are too essential to fail so they are implicitly - * non-failing by default (with some exceptions like OOM victims might fail so - * the caller still has to check for failures) while costly requests try to be - * not disruptive and back off even without invoking the OOM killer. - * The following three modifiers might be used to override some of these - * implicit rules + * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt + * _might_ fail. This depends upon the particular VM implementation. * - * %__GFP_NORETRY: The VM implementation will try only very lightweight - * memory direct reclaim to get some memory under memory pressure (thus - * it can sleep). It will avoid disruptive actions like OOM killer. The - * caller must handle the failure which is quite likely to happen under - * heavy memory pressure. The flag is suitable when failure can easily be - * handled at small cost, such as reduced throughput + * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller + * cannot handle allocation failures. New users should be evaluated carefully + * (and the flag should be used only when there is no reasonable failure + * policy) but it is definitely preferable to use the flag rather than + * opencode endless loop around allocator. * - * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim - * procedures that have previously failed if there is some indication - * that progress has been made else where. It can wait for other - * tasks to attempt high level approaches to freeing memory such as - * compaction (which removes fragmentation) and page-out. - * There is still a definite limit to the number of retries, but it is - * a larger limit than with %__GFP_NORETRY. - * Allocations with this flag may fail, but only when there is - * genuinely little unused memory. While these allocations do not - * directly trigger the OOM killer, their failure indicates that - * the system is likely to need to use the OOM killer soon. The - * caller must handle failure, but can reasonably do so by failing - * a higher-level request, or completing it only in a much less - * efficient manner. - * If the allocation does fail, and the caller is in a position to - * free some non-essential memory, doing so could benefit the system - * as a whole. - * - * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller - * cannot handle allocation failures. The allocation could block - * indefinitely but will never return with failure. Testing for - * failure is pointless. - * New users should be evaluated carefully (and the flag should be - * used only when there is no reasonable failure policy) but it is - * definitely preferable to use the flag rather than opencode endless - * loop around allocator. - * Using this flag for costly allocations is _highly_ discouraged. + * __GFP_NORETRY: The VM implementation must not retry indefinitely and will + * return NULL when direct reclaim and memory compaction have failed to allow + * the allocation to succeed. The OOM killer is not called with the current + * implementation. */ #define __GFP_IO ((__force gfp_t)___GFP_IO) #define __GFP_FS ((__force gfp_t)___GFP_FS) #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) -#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) +#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) -/** - * DOC: Action modifiers - * +/* * Action modifiers - * ~~~~~~~~~~~~~~~~ * - * %__GFP_NOWARN suppresses allocation failure reports. + * __GFP_COLD indicates that the caller does not expect to be used in the near + * future. Where possible, a cache-cold page will be returned. * - * %__GFP_COMP address compound page metadata. + * __GFP_NOWARN suppresses allocation failure reports. * - * %__GFP_ZERO returns a zeroed page on success. + * __GFP_COMP address compound page metadata. * - * %__GFP_ZEROTAGS returns a page with zeroed memory tags on success, if - * __GFP_ZERO is set. + * __GFP_ZERO returns a zeroed page on success. * - * %__GFP_SKIP_KASAN_POISON returns a page which does not need to be poisoned - * on deallocation. Typically used for userspace pages. Currently only has an - * effect in HW tags mode. + * __GFP_NOTRACK avoids tracking with kmemcheck. + * + * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of + * distinguishing in the source between false positives and allocations that + * cannot be supported (e.g. page tables). + * + * __GFP_OTHER_NODE is for allocations that are on a remote node but that + * should not be accounted for as a remote allocation in vmstat. A + * typical user would be khugepaged collapsing a huge page on a remote + * node. */ +#define __GFP_COLD ((__force gfp_t)___GFP_COLD) #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) -#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) -#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) - -/* Disable lockdep for GFP context tracking */ -#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) +#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) +#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) +#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT 27 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) -/** - * DOC: Useful GFP flag combinations - * - * Useful GFP flag combinations - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * +/* * Useful GFP flag combinations that are commonly used. It is recommended * that subsystems start with one of these combinations and then set/clear - * %__GFP_FOO flags as necessary. + * __GFP_FOO flags as necessary. * - * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower - * watermark is applied to allow access to "atomic reserves". - * The current implementation doesn't support NMI and few other strict - * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT. + * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower + * watermark is applied to allow access to "atomic reserves" * - * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires - * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim. + * GFP_KERNEL is typical for kernel-internal allocations. The caller requires + * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. * - * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is - * accounted to kmemcg. + * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is + * accounted to kmemcg. * - * %GFP_NOWAIT is for kernel allocations that should not stall for direct - * reclaim, start physical IO or use any filesystem callback. + * GFP_NOWAIT is for kernel allocations that should not stall for direct + * reclaim, start physical IO or use any filesystem callback. * - * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages - * that do not require the starting of any physical IO. - * Please try to avoid using this flag directly and instead use - * memalloc_noio_{save,restore} to mark the whole scope which cannot - * perform any IO with a short explanation why. All allocation requests - * will inherit GFP_NOIO implicitly. + * GFP_NOIO will use direct reclaim to discard clean pages or slab pages + * that do not require the starting of any physical IO. * - * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. - * Please try to avoid using this flag directly and instead use - * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't - * recurse into the FS layer with a short explanation why. All allocation - * requests will inherit GFP_NOFS implicitly. + * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. * - * %GFP_USER is for userspace allocations that also need to be directly - * accessibly by the kernel or hardware. It is typically used by hardware - * for buffers that are mapped to userspace (e.g. graphics) that hardware - * still must DMA to. cpuset limits are enforced for these allocations. + * GFP_USER is for userspace allocations that also need to be directly + * accessibly by the kernel or hardware. It is typically used by hardware + * for buffers that are mapped to userspace (e.g. graphics) that hardware + * still must DMA to. cpuset limits are enforced for these allocations. * - * %GFP_DMA exists for historical reasons and should be avoided where possible. - * The flags indicates that the caller requires that the lowest zone be - * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but - * it would require careful auditing as some users really require it and - * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the - * lowest zone as a type of emergency reserve. + * GFP_DMA exists for historical reasons and should be avoided where possible. + * The flags indicates that the caller requires that the lowest zone be + * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but + * it would require careful auditing as some users really require it and + * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the + * lowest zone as a type of emergency reserve. * - * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit - * address. + * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit + * address. * - * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, - * do not need to be directly accessible by the kernel but that cannot - * move once in use. An example may be a hardware allocation that maps - * data directly into userspace but has no addressing limitations. + * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, + * do not need to be directly accessible by the kernel but that cannot + * move once in use. An example may be a hardware allocation that maps + * data directly into userspace but has no addressing limitations. * - * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not - * need direct access to but can use kmap() when access is required. They - * are expected to be movable via page reclaim or page migration. Typically, - * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE. + * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not + * need direct access to but can use kmap() when access is required. They + * are expected to be movable via page reclaim or page migration. Typically, + * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. * - * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They - * are compound allocations that will generally fail quickly if memory is not - * available and will not wake kswapd/kcompactd on failure. The _LIGHT - * version does not attempt reclaim/compaction at all and is by default used - * in page fault path, while the non-light is used by khugepaged. + * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are + * compound allocations that will generally fail quickly if memory is not + * available and will not wake kswapd/kcompactd on failure. The _LIGHT + * version does not attempt reclaim/compaction at all and is by default used + * in page fault path, while the non-light is used by khugepaged. */ #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) @@ -326,21 +259,24 @@ struct vm_area_struct; #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) #define GFP_NOIO (__GFP_RECLAIM) #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) +#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \ + __GFP_RECLAIMABLE) #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) #define GFP_DMA __GFP_DMA #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) -#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ - __GFP_SKIP_KASAN_POISON) +#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) +#define GFP_USERCOPY __GFP_USERCOPY + /* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) #define GFP_MOVABLE_SHIFT 3 -static inline int gfp_migratetype(const gfp_t gfp_flags) +static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) { VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); @@ -360,29 +296,6 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) return !!(gfp_flags & __GFP_DIRECT_RECLAIM); } -/** - * gfpflags_normal_context - is gfp_flags a normal sleepable context? - * @gfp_flags: gfp_flags to test - * - * Test whether @gfp_flags indicates that the allocation is from the - * %current context and allowed to sleep. - * - * An allocation being allowed to block doesn't mean it owns the %current - * context. When direct reclaim path tries to allocate memory, the - * allocation context is nested inside whatever %current was doing at the - * time of the original allocation. The nested allocation may be allowed - * to block but modifying anything %current owns can corrupt the outer - * context's expectations. - * - * %true result from this function indicates that the allocation context - * can sleep and use anything that's associated with %current. - */ -static inline bool gfpflags_normal_context(const gfp_t gfp_flags) -{ - return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) == - __GFP_DIRECT_RECLAIM; -} - #ifdef CONFIG_HIGHMEM #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM #else @@ -403,8 +316,8 @@ static inline bool gfpflags_normal_context(const gfp_t gfp_flags) /* * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the - * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT - * bits long and there are 16 of them to cover all possible combinations of + * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long + * and there are 16 of them to cover all possible combinations of * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. * * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. @@ -418,7 +331,7 @@ static inline bool gfpflags_normal_context(const gfp_t gfp_flags) * 0x1 => DMA or NORMAL * 0x2 => HIGHMEM or NORMAL * 0x3 => BAD (DMA+HIGHMEM) - * 0x4 => DMA32 or NORMAL + * 0x4 => DMA32 or DMA or NORMAL * 0x5 => BAD (DMA+DMA32) * 0x6 => BAD (HIGHMEM+DMA32) * 0x7 => BAD (HIGHMEM+DMA32+DMA) @@ -426,7 +339,7 @@ static inline bool gfpflags_normal_context(const gfp_t gfp_flags) * 0x9 => DMA or NORMAL (MOVABLE+DMA) * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) * 0xb => BAD (MOVABLE+HIGHMEM+DMA) - * 0xc => DMA32 or NORMAL (MOVABLE+DMA32) + * 0xc => DMA32 (MOVABLE+DMA32) * 0xd => BAD (MOVABLE+DMA32+DMA) * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) @@ -502,12 +415,12 @@ static inline int gfp_zonelist(gfp_t flags) /* * We get the zone list from the current node and the gfp_mask. - * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones. + * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. * There are two zonelists per node, one for all zones with memory and * one containing just zones from the node the zonelist belongs to. * - * For the case of non-NUMA systems the NODE_DATA() gets optimized to - * &contig_page_data at compile-time. + * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets + * optimized to &contig_page_data at compile-time. */ static inline struct zonelist *node_zonelist(int nid, gfp_t flags) { @@ -520,41 +433,16 @@ static inline void arch_free_page(struct page *page, int order) { } #ifndef HAVE_ARCH_ALLOC_PAGE static inline void arch_alloc_page(struct page *page, int order) { } #endif -#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE -static inline int arch_make_page_accessible(struct page *page) + +struct page * +__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, nodemask_t *nodemask); + +static inline struct page * +__alloc_pages(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist) { - return 0; -} -#endif - -struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, - nodemask_t *nodemask); - -unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, - nodemask_t *nodemask, int nr_pages, - struct list_head *page_list, - struct page **page_array); - -/* Bulk allocate order-0 pages */ -static inline unsigned long -alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) -{ - return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL); -} - -static inline unsigned long -alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array) -{ - return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array); -} - -static inline unsigned long -alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) -{ - if (nid == NUMA_NO_NODE) - nid = numa_mem_id(); - - return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array); + return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); } /* @@ -565,9 +453,9 @@ static inline struct page * __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); - VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); + VM_WARN_ON(!node_online(nid)); - return __alloc_pages(gfp_mask, order, nid, NULL); + return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); } /* @@ -585,25 +473,31 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, } #ifdef CONFIG_NUMA -struct page *alloc_pages(gfp_t gfp, unsigned int order); +extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); + +static inline struct page * +alloc_pages(gfp_t gfp_mask, unsigned int order) +{ + return alloc_pages_current(gfp_mask, order); +} extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr, int node, bool hugepage); -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ +#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) #else -static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) -{ - return alloc_pages_node(numa_node_id(), gfp_mask, order); -} +#define alloc_pages(gfp_mask, order) \ + alloc_pages_node(numa_node_id(), gfp_mask, order) #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ alloc_pages(gfp_mask, order) -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ +#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) #define alloc_page_vma(gfp_mask, vma, addr) \ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) +#define alloc_page_vma_node(gfp_mask, vma, addr, node) \ + alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); @@ -620,20 +514,13 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); +extern void free_hot_cold_page(struct page *page, bool cold); +extern void free_hot_cold_page_list(struct list_head *list, bool cold); struct page_frag_cache; -extern void __page_frag_cache_drain(struct page *page, unsigned int count); -extern void *page_frag_alloc_align(struct page_frag_cache *nc, - unsigned int fragsz, gfp_t gfp_mask, - unsigned int align_mask); - -static inline void *page_frag_alloc(struct page_frag_cache *nc, - unsigned int fragsz, gfp_t gfp_mask) -{ - return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u); -} - -extern void page_frag_free(void *addr); +extern void *__alloc_page_frag(struct page_frag_cache *nc, + unsigned int fragsz, gfp_t gfp_mask); +extern void __free_page_frag(void *addr); #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) @@ -641,7 +528,7 @@ extern void page_frag_free(void *addr); void page_alloc_init(void); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(struct zone *zone); -void drain_local_pages(struct zone *zone); +void drain_local_pages(void *zone); void page_alloc_init_late(void); @@ -660,8 +547,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); extern void pm_restrict_gfp_mask(void); extern void pm_restore_gfp_mask(void); -extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); - #ifdef CONFIG_PM_SLEEP extern bool pm_suspended_storage(void); #else @@ -671,14 +556,12 @@ static inline bool pm_suspended_storage(void) } #endif /* CONFIG_PM_SLEEP */ -#ifdef CONFIG_CONTIG_ALLOC +#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, - unsigned migratetype, gfp_t gfp_mask); -extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, - int nid, nodemask_t *nodemask); + unsigned migratetype); +extern void free_contig_range(unsigned long pfn, unsigned nr_pages); #endif -void free_contig_range(unsigned long pfn, unsigned long nr_pages); #ifdef CONFIG_CMA /* CMA stuff */ diff --git a/include/linux/glob.h b/include/linux/glob.h index 861327b33e..861d8347d0 100644 --- a/include/linux/glob.h +++ b/include/linux/glob.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_GLOB_H #define _LINUX_GLOB_H diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h index 12be1601fd..93e080b39c 100644 --- a/include/linux/goldfish.h +++ b/include/linux/goldfish.h @@ -1,21 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GOLDFISH_H #define __LINUX_GOLDFISH_H -#include -#include -#include - /* Helpers for Goldfish virtual platform */ static inline void gf_write_ptr(const void *ptr, void __iomem *portl, void __iomem *porth) { - const unsigned long addr = (unsigned long)ptr; - - __raw_writel(lower_32_bits(addr), portl); + writel((u32)(unsigned long)ptr, portl); #ifdef CONFIG_64BIT - __raw_writel(upper_32_bits(addr), porth); + writel((unsigned long)ptr >> 32, porth); #endif } @@ -23,9 +16,9 @@ static inline void gf_write_dma_addr(const dma_addr_t addr, void __iomem *portl, void __iomem *porth) { - __raw_writel(lower_32_bits(addr), portl); + writel((u32)addr, portl); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - __raw_writel(upper_32_bits(addr), porth); + writel(addr >> 32, porth); #endif } diff --git a/include/linux/gpio-fan.h b/include/linux/gpio-fan.h new file mode 100644 index 0000000000..0966591692 --- /dev/null +++ b/include/linux/gpio-fan.h @@ -0,0 +1,36 @@ +/* + * include/linux/gpio-fan.h + * + * Platform data structure for GPIO fan driver + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LINUX_GPIO_FAN_H +#define __LINUX_GPIO_FAN_H + +struct gpio_fan_alarm { + unsigned gpio; + unsigned active_low; +}; + +struct gpio_fan_speed { + int rpm; + int ctrl_val; +}; + +struct gpio_fan_platform_data { + int num_ctrl; + unsigned *ctrl; /* fan control GPIOs. */ + struct gpio_fan_alarm *alarm; /* fan alarm GPIO. */ + /* + * Speed conversion array: rpm from/to GPIO bit field. + * This array _must_ be sorted in ascending rpm order. + */ + int num_speed; + struct gpio_fan_speed *speed; +}; + +#endif /* __LINUX_GPIO_FAN_H */ diff --git a/include/linux/gpio-pxa.h b/include/linux/gpio-pxa.h index 1e1fa01604..d90ebbe02c 100644 --- a/include/linux/gpio-pxa.h +++ b/include/linux/gpio-pxa.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __GPIO_PXA_H #define __GPIO_PXA_H diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 008ad3ee56..d12b5d566e 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -1,20 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * - * - * This is the LEGACY GPIO bulk include file, including legacy APIs. It is - * used for GPIO drivers still referencing the global GPIO numberspace, - * and should not be included in new code. - * - * If you're implementing a GPIO driver, only include - * If you're implementing a GPIO consumer, only include - */ #ifndef __LINUX_GPIO_H #define __LINUX_GPIO_H #include -/* see Documentation/driver-api/gpio/legacy.rst */ +/* see Documentation/gpio/gpio-legacy.txt */ /* make these flag values available regardless of GPIO kconfig options */ #define GPIOF_DIR_OUT (0 << 0) @@ -102,6 +91,7 @@ void devm_gpio_free(struct device *dev, unsigned int gpio); #include #include #include +#include struct device; struct gpio_chip; @@ -219,6 +209,19 @@ static inline int gpio_to_irq(unsigned gpio) return -EINVAL; } +static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, + unsigned int offset) +{ + WARN_ON(1); + return -EINVAL; +} + +static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, + unsigned int offset) +{ + WARN_ON(1); +} + static inline int irq_to_gpio(unsigned irq) { /* irq can never have been returned from gpio_to_irq() */ @@ -226,6 +229,30 @@ static inline int irq_to_gpio(unsigned irq) return -EINVAL; } +static inline int +gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, + unsigned int gpio_offset, unsigned int pin_offset, + unsigned int npins) +{ + WARN_ON(1); + return -EINVAL; +} + +static inline int +gpiochip_add_pingroup_range(struct gpio_chip *chip, + struct pinctrl_dev *pctldev, + unsigned int gpio_offset, const char *pin_group) +{ + WARN_ON(1); + return -EINVAL; +} + +static inline void +gpiochip_remove_pin_ranges(struct gpio_chip *chip) +{ + WARN_ON(1); +} + static inline int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) { diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 97a28ad339..fb0fde686c 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -1,11 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GPIO_CONSUMER_H #define __LINUX_GPIO_CONSUMER_H -#include #include -#include #include +#include struct device; @@ -18,20 +16,11 @@ struct device; */ struct gpio_desc; -/** - * Opaque descriptor for a structure of GPIO array attributes. This structure - * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be - * passed back to get/set array functions in order to activate fast processing - * path if applicable. - */ -struct gpio_array; - /** * Struct containing an array of descriptors that can be obtained using * gpiod_get_array(). */ struct gpio_descs { - struct gpio_array *info; unsigned int ndescs; struct gpio_desc *desc[]; }; @@ -39,8 +28,6 @@ struct gpio_descs { #define GPIOD_FLAGS_BIT_DIR_SET BIT(0) #define GPIOD_FLAGS_BIT_DIR_OUT BIT(1) #define GPIOD_FLAGS_BIT_DIR_VAL BIT(2) -#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3) -#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4) /** * Optional flags that can be passed to one of gpiod_* to configure direction @@ -52,8 +39,6 @@ enum gpiod_flags { GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT, GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_DIR_VAL, - GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_OPEN_DRAIN, - GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_OPEN_DRAIN, }; #ifdef CONFIG_GPIOLIB @@ -105,7 +90,6 @@ struct gpio_descs *__must_check devm_gpiod_get_array_optional(struct device *dev, const char *con_id, enum gpiod_flags flags); void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); -void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc); void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs); int gpiod_get_direction(struct gpio_desc *desc); @@ -115,58 +99,33 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value); /* Value get/set from non-sleeping context */ int gpiod_get_value(const struct gpio_desc *desc); -int gpiod_get_array_value(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap); void gpiod_set_value(struct gpio_desc *desc, int value); -int gpiod_set_array_value(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap); +void gpiod_set_array_value(unsigned int array_size, + struct gpio_desc **desc_array, int *value_array); int gpiod_get_raw_value(const struct gpio_desc *desc); -int gpiod_get_raw_array_value(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap); void gpiod_set_raw_value(struct gpio_desc *desc, int value); -int gpiod_set_raw_array_value(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap); +void gpiod_set_raw_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); /* Value get/set from sleeping context */ int gpiod_get_value_cansleep(const struct gpio_desc *desc); -int gpiod_get_array_value_cansleep(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap); void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); -int gpiod_set_array_value_cansleep(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap); +void gpiod_set_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); -int gpiod_get_raw_array_value_cansleep(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap); void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); -int gpiod_set_raw_array_value_cansleep(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap); +void gpiod_set_raw_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); -int gpiod_set_config(struct gpio_desc *desc, unsigned long config); -int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce); -int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); -void gpiod_toggle_active_low(struct gpio_desc *desc); +int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); int gpiod_is_active_low(const struct gpio_desc *desc); int gpiod_cansleep(const struct gpio_desc *desc); int gpiod_to_irq(const struct gpio_desc *desc); -int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name); /* Convert between the old gpio_ and new gpiod_ interfaces */ struct gpio_desc *gpio_to_desc(unsigned gpio); @@ -176,23 +135,12 @@ int desc_to_gpio(const struct gpio_desc *desc); struct fwnode_handle; struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label); -struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode, - const char *con_id, int index, - enum gpiod_flags flags, - const char *label); -struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev, - struct fwnode_handle *child, - const char *con_id, int index, - enum gpiod_flags flags, - const char *label); - + const char *propname); +struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, + const char *con_id, + struct fwnode_handle *child); #else /* CONFIG_GPIOLIB */ -#include - static inline int gpiod_count(struct device *dev, const char *con_id) { return 0; @@ -217,14 +165,14 @@ static inline struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { - return NULL; + return ERR_PTR(-ENOSYS); } static inline struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, const char *con_id, unsigned int index, enum gpiod_flags flags) { - return NULL; + return ERR_PTR(-ENOSYS); } static inline struct gpio_descs *__must_check @@ -238,7 +186,7 @@ static inline struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { - return NULL; + return ERR_PTR(-ENOSYS); } static inline void gpiod_put(struct gpio_desc *desc) @@ -246,16 +194,7 @@ static inline void gpiod_put(struct gpio_desc *desc) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(desc); -} - -static inline void devm_gpiod_unhinge(struct device *dev, - struct gpio_desc *desc) -{ - might_sleep(); - - /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); } static inline void gpiod_put_array(struct gpio_descs *descs) @@ -263,7 +202,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(descs); + WARN_ON(1); } static inline struct gpio_desc *__must_check @@ -287,14 +226,14 @@ static inline struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { - return NULL; + return ERR_PTR(-ENOSYS); } static inline struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev, const char *con_id, unsigned int index, enum gpiod_flags flags) { - return NULL; + return ERR_PTR(-ENOSYS); } static inline struct gpio_descs *__must_check @@ -308,7 +247,7 @@ static inline struct gpio_descs *__must_check devm_gpiod_get_array_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { - return NULL; + return ERR_PTR(-ENOSYS); } static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) @@ -316,7 +255,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); } static inline void devm_gpiod_put_array(struct device *dev, @@ -325,32 +264,32 @@ static inline void devm_gpiod_put_array(struct device *dev, might_sleep(); /* GPIO can never have been requested */ - WARN_ON(descs); + WARN_ON(1); } static inline int gpiod_get_direction(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); return -ENOSYS; } static inline int gpiod_direction_input(struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); return -ENOSYS; } static inline int gpiod_direction_output(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); return -ENOSYS; } static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); return -ENOSYS; } @@ -358,361 +297,134 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) static inline int gpiod_get_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(desc); - return 0; -} -static inline int gpiod_get_array_value(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap) -{ - /* GPIO can never have been requested */ - WARN_ON(desc_array); + WARN_ON(1); return 0; } static inline void gpiod_set_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); } -static inline int gpiod_set_array_value(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap) +static inline void gpiod_set_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) { /* GPIO can never have been requested */ - WARN_ON(desc_array); - return 0; + WARN_ON(1); } static inline int gpiod_get_raw_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(desc); - return 0; -} -static inline int gpiod_get_raw_array_value(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap) -{ - /* GPIO can never have been requested */ - WARN_ON(desc_array); + WARN_ON(1); return 0; } static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); } -static inline int gpiod_set_raw_array_value(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap) +static inline void gpiod_set_raw_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) { /* GPIO can never have been requested */ - WARN_ON(desc_array); - return 0; + WARN_ON(1); } static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(desc); - return 0; -} -static inline int gpiod_get_array_value_cansleep(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap) -{ - /* GPIO can never have been requested */ - WARN_ON(desc_array); + WARN_ON(1); return 0; } static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); } -static inline int gpiod_set_array_value_cansleep(unsigned int array_size, +static inline void gpiod_set_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap) + int *value_array) { /* GPIO can never have been requested */ - WARN_ON(desc_array); - return 0; + WARN_ON(1); } static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(desc); - return 0; -} -static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, - struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap) -{ - /* GPIO can never have been requested */ - WARN_ON(desc_array); + WARN_ON(1); return 0; } static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); } -static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, +static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, - struct gpio_array *array_info, - unsigned long *value_bitmap) + int *value_array) { /* GPIO can never have been requested */ - WARN_ON(desc_array); - return 0; + WARN_ON(1); } -static inline int gpiod_set_config(struct gpio_desc *desc, unsigned long config) +static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); return -ENOSYS; } -static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce) -{ - /* GPIO can never have been requested */ - WARN_ON(desc); - return -ENOSYS; -} - -static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) -{ - /* GPIO can never have been requested */ - WARN_ON(desc); - return -ENOSYS; -} - -static inline void gpiod_toggle_active_low(struct gpio_desc *desc) -{ - /* GPIO can never have been requested */ - WARN_ON(desc); -} - static inline int gpiod_is_active_low(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); return 0; } static inline int gpiod_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); return 0; } static inline int gpiod_to_irq(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(desc); - return -EINVAL; -} - -static inline int gpiod_set_consumer_name(struct gpio_desc *desc, - const char *name) -{ - /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); return -EINVAL; } static inline struct gpio_desc *gpio_to_desc(unsigned gpio) { - return NULL; + return ERR_PTR(-EINVAL); } static inline int desc_to_gpio(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(desc); + WARN_ON(1); return -EINVAL; } /* Child properties interface */ struct fwnode_handle; -static inline -struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label) +static inline struct gpio_desc *fwnode_get_named_gpiod( + struct fwnode_handle *fwnode, const char *propname) { return ERR_PTR(-ENOSYS); } -static inline -struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode, - const char *con_id, int index, - enum gpiod_flags flags, - const char *label) -{ - return ERR_PTR(-ENOSYS); -} - -static inline -struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev, - struct fwnode_handle *fwnode, - const char *con_id, int index, - enum gpiod_flags flags, - const char *label) +static inline struct gpio_desc *devm_get_gpiod_from_child( + struct device *dev, const char *con_id, struct fwnode_handle *child) { return ERR_PTR(-ENOSYS); } #endif /* CONFIG_GPIOLIB */ -static inline -struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev, - struct fwnode_handle *fwnode, - const char *con_id, - enum gpiod_flags flags, - const char *label) -{ - return devm_fwnode_gpiod_get_index(dev, fwnode, con_id, 0, - flags, label); -} - -static inline -struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, - const char *con_id, int index, - struct fwnode_handle *child, - enum gpiod_flags flags, - const char *label) -{ - return devm_fwnode_gpiod_get_index(dev, child, con_id, index, - flags, label); -} - -static inline -struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev, - const char *con_id, - struct fwnode_handle *child, - enum gpiod_flags flags, - const char *label) -{ - return devm_fwnode_gpiod_get_index(dev, child, con_id, 0, flags, label); -} - -#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO) -struct device_node; - -struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label); - -#else /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */ - -struct device_node; - -static inline -struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label) -{ - return ERR_PTR(-ENOSYS); -} - -#endif /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */ - -#ifdef CONFIG_GPIOLIB -struct device_node; - -struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, - const struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label); - -#else /* CONFIG_GPIOLIB */ - -struct device_node; - -static inline -struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, - const struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label) -{ - return ERR_PTR(-ENOSYS); -} - -#endif /* CONFIG_GPIOLIB */ - -struct acpi_gpio_params { - unsigned int crs_entry_index; - unsigned int line_index; - bool active_low; -}; - -struct acpi_gpio_mapping { - const char *name; - const struct acpi_gpio_params *data; - unsigned int size; - -/* Ignore IoRestriction field */ -#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) -/* - * When ACPI GPIO mapping table is in use the index parameter inside it - * refers to the GPIO resource in _CRS method. That index has no - * distinction of actual type of the resource. When consumer wants to - * get GpioIo type explicitly, this quirk may be used. - */ -#define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1) -/* Use given pin as an absolute GPIO number in the system */ -#define ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER BIT(2) - - unsigned int quirks; -}; - -struct acpi_device; - -#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI) - -int acpi_dev_add_driver_gpios(struct acpi_device *adev, - const struct acpi_gpio_mapping *gpios); -void acpi_dev_remove_driver_gpios(struct acpi_device *adev); - -int devm_acpi_dev_add_driver_gpios(struct device *dev, - const struct acpi_gpio_mapping *gpios); -void devm_acpi_dev_remove_driver_gpios(struct device *dev); - -struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label); - -#else /* CONFIG_GPIOLIB && CONFIG_ACPI */ - -static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, - const struct acpi_gpio_mapping *gpios) -{ - return -ENXIO; -} -static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} - -static inline int devm_acpi_dev_add_driver_gpios(struct device *dev, - const struct acpi_gpio_mapping *gpios) -{ - return -ENXIO; -} -static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {} - -#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */ - - #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) int gpiod_export(struct gpio_desc *desc, bool direction_may_change); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index a0f9901dca..24e2cc56be 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GPIO_DRIVER_H #define __LINUX_GPIO_DRIVER_H @@ -9,7 +8,6 @@ #include #include #include -#include struct gpio_desc; struct of_phandle_args; @@ -17,255 +15,19 @@ struct device_node; struct seq_file; struct gpio_device; struct module; -enum gpiod_flags; -enum gpio_lookup_flags; -struct gpio_chip; - -#define GPIO_LINE_DIRECTION_IN 1 -#define GPIO_LINE_DIRECTION_OUT 0 +#ifdef CONFIG_GPIOLIB /** - * struct gpio_irq_chip - GPIO interrupt controller + * enum single_ended_mode - mode for single ended operation + * @LINE_MODE_PUSH_PULL: normal mode for a GPIO line, drive actively high/low + * @LINE_MODE_OPEN_DRAIN: set line to be open drain + * @LINE_MODE_OPEN_SOURCE: set line to be open source */ -struct gpio_irq_chip { - /** - * @chip: - * - * GPIO IRQ chip implementation, provided by GPIO driver. - */ - struct irq_chip *chip; - - /** - * @domain: - * - * Interrupt translation domain; responsible for mapping between GPIO - * hwirq number and Linux IRQ number. - */ - struct irq_domain *domain; - - /** - * @domain_ops: - * - * Table of interrupt domain operations for this IRQ chip. - */ - const struct irq_domain_ops *domain_ops; - -#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY - /** - * @fwnode: - * - * Firmware node corresponding to this gpiochip/irqchip, necessary - * for hierarchical irqdomain support. - */ - struct fwnode_handle *fwnode; - - /** - * @parent_domain: - * - * If non-NULL, will be set as the parent of this GPIO interrupt - * controller's IRQ domain to establish a hierarchical interrupt - * domain. The presence of this will activate the hierarchical - * interrupt support. - */ - struct irq_domain *parent_domain; - - /** - * @child_to_parent_hwirq: - * - * This callback translates a child hardware IRQ offset to a parent - * hardware IRQ offset on a hierarchical interrupt chip. The child - * hardware IRQs correspond to the GPIO index 0..ngpio-1 (see the - * ngpio field of struct gpio_chip) and the corresponding parent - * hardware IRQ and type (such as IRQ_TYPE_*) shall be returned by - * the driver. The driver can calculate this from an offset or using - * a lookup table or whatever method is best for this chip. Return - * 0 on successful translation in the driver. - * - * If some ranges of hardware IRQs do not have a corresponding parent - * HWIRQ, return -EINVAL, but also make sure to fill in @valid_mask and - * @need_valid_mask to make these GPIO lines unavailable for - * translation. - */ - int (*child_to_parent_hwirq)(struct gpio_chip *gc, - unsigned int child_hwirq, - unsigned int child_type, - unsigned int *parent_hwirq, - unsigned int *parent_type); - - /** - * @populate_parent_alloc_arg : - * - * This optional callback allocates and populates the specific struct - * for the parent's IRQ domain. If this is not specified, then - * &gpiochip_populate_parent_fwspec_twocell will be used. A four-cell - * variant named &gpiochip_populate_parent_fwspec_fourcell is also - * available. - */ - void *(*populate_parent_alloc_arg)(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type); - - /** - * @child_offset_to_irq: - * - * This optional callback is used to translate the child's GPIO line - * offset on the GPIO chip to an IRQ number for the GPIO to_irq() - * callback. If this is not specified, then a default callback will be - * provided that returns the line offset. - */ - unsigned int (*child_offset_to_irq)(struct gpio_chip *gc, - unsigned int pin); - - /** - * @child_irq_domain_ops: - * - * The IRQ domain operations that will be used for this GPIO IRQ - * chip. If no operations are provided, then default callbacks will - * be populated to setup the IRQ hierarchy. Some drivers need to - * supply their own translate function. - */ - struct irq_domain_ops child_irq_domain_ops; -#endif - - /** - * @handler: - * - * The IRQ handler to use (often a predefined IRQ core function) for - * GPIO IRQs, provided by GPIO driver. - */ - irq_flow_handler_t handler; - - /** - * @default_type: - * - * Default IRQ triggering type applied during GPIO driver - * initialization, provided by GPIO driver. - */ - unsigned int default_type; - - /** - * @lock_key: - * - * Per GPIO IRQ chip lockdep class for IRQ lock. - */ - struct lock_class_key *lock_key; - - /** - * @request_key: - * - * Per GPIO IRQ chip lockdep class for IRQ request. - */ - struct lock_class_key *request_key; - - /** - * @parent_handler: - * - * The interrupt handler for the GPIO chip's parent interrupts, may be - * NULL if the parent interrupts are nested rather than cascaded. - */ - irq_flow_handler_t parent_handler; - - /** - * @parent_handler_data: - * - * Data associated, and passed to, the handler for the parent - * interrupt. - */ - void *parent_handler_data; - - /** - * @num_parents: - * - * The number of interrupt parents of a GPIO chip. - */ - unsigned int num_parents; - - /** - * @parents: - * - * A list of interrupt parents of a GPIO chip. This is owned by the - * driver, so the core will only reference this list, not modify it. - */ - unsigned int *parents; - - /** - * @map: - * - * A list of interrupt parents for each line of a GPIO chip. - */ - unsigned int *map; - - /** - * @threaded: - * - * True if set the interrupt handling uses nested threads. - */ - bool threaded; - - /** - * @init_hw: optional routine to initialize hardware before - * an IRQ chip will be added. This is quite useful when - * a particular driver wants to clear IRQ related registers - * in order to avoid undesired events. - */ - int (*init_hw)(struct gpio_chip *gc); - - /** - * @init_valid_mask: optional routine to initialize @valid_mask, to be - * used if not all GPIO lines are valid interrupts. Sometimes some - * lines just cannot fire interrupts, and this routine, when defined, - * is passed a bitmap in "valid_mask" and it will have ngpios - * bits from 0..(ngpios-1) set to "1" as in valid. The callback can - * then directly set some bits to "0" if they cannot be used for - * interrupts. - */ - void (*init_valid_mask)(struct gpio_chip *gc, - unsigned long *valid_mask, - unsigned int ngpios); - - /** - * @valid_mask: - * - * If not %NULL, holds bitmask of GPIOs which are valid to be included - * in IRQ domain of the chip. - */ - unsigned long *valid_mask; - - /** - * @first: - * - * Required for static IRQ allocation. If set, irq_domain_add_simple() - * will allocate and map all IRQs during initialization. - */ - unsigned int first; - - /** - * @irq_enable: - * - * Store old irq_chip irq_enable callback - */ - void (*irq_enable)(struct irq_data *data); - - /** - * @irq_disable: - * - * Store old irq_chip irq_disable callback - */ - void (*irq_disable)(struct irq_data *data); - /** - * @irq_unmask: - * - * Store old irq_chip irq_unmask callback - */ - void (*irq_unmask)(struct irq_data *data); - - /** - * @irq_mask: - * - * Store old irq_chip irq_mask callback - */ - void (*irq_mask)(struct irq_data *data); +enum single_ended_mode { + LINE_MODE_PUSH_PULL, + LINE_MODE_OPEN_DRAIN, + LINE_MODE_OPEN_SOURCE, }; /** @@ -280,30 +42,27 @@ struct gpio_irq_chip { * @free: optional hook for chip-specific deactivation, such as * disabling module power and clock; may sleep * @get_direction: returns direction for signal "offset", 0=out, 1=in, - * (same as GPIO_LINE_DIRECTION_OUT / GPIO_LINE_DIRECTION_IN), - * or negative error. It is recommended to always implement this - * function, even on input-only or output-only gpio chips. + * (same as GPIOF_DIR_XXX), or negative error * @direction_input: configures signal "offset" as input, or returns error - * This can be omitted on input-only or output-only gpio chips. * @direction_output: configures signal "offset" as output, or returns error - * This can be omitted on input-only or output-only gpio chips. * @get: returns value for signal "offset", 0=low, 1=high, or negative error - * @get_multiple: reads values for multiple signals defined by "mask" and - * stores them in "bits", returns 0 on success or negative error * @set: assigns output value for signal "offset" * @set_multiple: assigns output values for multiple signals defined by "mask" - * @set_config: optional hook for all kinds of settings. Uses the same - * packed config format as generic pinconf. + * @set_debounce: optional hook for setting debounce time for specified gpio in + * interrupt triggered gpio chips + * @set_single_ended: optional hook for setting a line as open drain, open + * source, or non-single ended (restore from open drain/source to normal + * push-pull mode) this should be implemented if the hardware supports + * open drain or open source settings. The GPIOlib will otherwise try + * to emulate open drain/source by not actively driving lines high/low + * if a consumer request this. The driver may return -ENOTSUPP if e.g. + * it supports just open drain but not open source and is called + * with LINE_MODE_OPEN_SOURCE as mode argument. * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; * implementation may not sleep * @dbg_show: optional routine to show contents in debugfs; default code * will be used when this is omitted, but custom code can show extra * state (such as pullup/pulldown configuration). - * @init_valid_mask: optional routine to initialize @valid_mask, to be used if - * not all GPIOs are valid. - * @add_pin_ranges: optional routine to initialize pin ranges, to be used when - * requires special mapping of the pins that provides GPIO functionality. - * It is called after adding GPIO chip and before adding IRQ chip. * @base: identifies the first GPIO number handled by this chip; * or, if negative during registration, requests dynamic ID allocation. * DEPRECATION: providing anything non-negative and nailing the base @@ -312,9 +71,6 @@ struct gpio_irq_chip { * get rid of the static GPIO number space in the long run. * @ngpio: the number of GPIOs handled by this controller; the last GPIO * handled is (base + ngpio - 1). - * @offset: when multiple gpio chips belong to the same device this - * can be used as offset within the device so friendly names can - * be properly assigned. * @names: if set, must be an array of strings to use as alternative * names for the GPIOs in this chip. Any entry in the array * may be NULL if there is no alias for the GPIO, however the @@ -326,18 +82,17 @@ struct gpio_irq_chip { * implies that if the chip supports IRQs, these IRQs need to be threaded * as the chip access may sleep when e.g. reading out the IRQ status * registers. + * @irq_not_threaded: flag must be set if @can_sleep is set but the + * IRQs don't need to be threaded * @read_reg: reader function for generic GPIO * @write_reg: writer function for generic GPIO - * @be_bits: if the generic GPIO has big endian bit order (bit 31 is representing - * line 0, bit 30 is line 1 ... bit 0 is line 31) this is set to true by the - * generic GPIO core. It is for internal housekeeping only. + * @pin2mask: some generic GPIO controllers work with the big-endian bits + * notation, e.g. in a 8-bits register, GPIO7 is the least significant + * bit. This callback assigns the right bit mask. * @reg_dat: data (in) register for generic GPIO * @reg_set: output set register (out=high) for generic GPIO - * @reg_clr: output clear register (out=low) for generic GPIO - * @reg_dir_out: direction out setting register for generic GPIO - * @reg_dir_in: direction in setting register for generic GPIO - * @bgpio_dir_unreadable: indicates that the direction register(s) cannot - * be read and we need to rely on out internal state tracking. + * @reg_clk: output clear register (out=low) for generic GPIO + * @reg_dir: direction setting register for generic GPIO * @bgpio_bits: number of register bits used for a generic GPIO i.e. * * 8 * @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep @@ -345,11 +100,25 @@ struct gpio_irq_chip { * @bgpio_data: shadowed data register for generic GPIO to clear/set bits * safely. * @bgpio_dir: shadowed direction register for generic GPIO to clear/set - * direction safely. A "1" in this word means the line is set as - * output. + * direction safely. + * @irqchip: GPIO IRQ chip impl, provided by GPIO driver + * @irqdomain: Interrupt translation domain; responsible for mapping + * between GPIO hwirq number and linux irq number + * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated) + * @irq_handler: the irq handler to use (often a predefined irq core function) + * for GPIO IRQs, provided by GPIO driver + * @irq_default_type: default IRQ triggering type applied during GPIO driver + * initialization, provided by GPIO driver + * @irq_parent: GPIO IRQ chip parent/bank linux irq number, + * provided by GPIO driver + * @irq_need_valid_mask: If set core allocates @irq_valid_mask with all + * bits set to one + * @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to + * be included in IRQ domain of the chip + * @lock_key: per GPIO IRQ chip lockdep class * * A gpio_chip can help platforms abstract various sources of GPIOs so - * they can all be accessed through a common programming interface. + * they can all be accessed through a common programing interface. * Example sources would be SOC controllers, FPGAs, multifunction * chips, dedicated GPIO expanders, and so on. * @@ -364,211 +133,113 @@ struct gpio_chip { struct device *parent; struct module *owner; - int (*request)(struct gpio_chip *gc, - unsigned int offset); - void (*free)(struct gpio_chip *gc, - unsigned int offset); - int (*get_direction)(struct gpio_chip *gc, - unsigned int offset); - int (*direction_input)(struct gpio_chip *gc, - unsigned int offset); - int (*direction_output)(struct gpio_chip *gc, - unsigned int offset, int value); - int (*get)(struct gpio_chip *gc, - unsigned int offset); - int (*get_multiple)(struct gpio_chip *gc, + int (*request)(struct gpio_chip *chip, + unsigned offset); + void (*free)(struct gpio_chip *chip, + unsigned offset); + int (*get_direction)(struct gpio_chip *chip, + unsigned offset); + int (*direction_input)(struct gpio_chip *chip, + unsigned offset); + int (*direction_output)(struct gpio_chip *chip, + unsigned offset, int value); + int (*get)(struct gpio_chip *chip, + unsigned offset); + void (*set)(struct gpio_chip *chip, + unsigned offset, int value); + void (*set_multiple)(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits); - void (*set)(struct gpio_chip *gc, - unsigned int offset, int value); - void (*set_multiple)(struct gpio_chip *gc, - unsigned long *mask, - unsigned long *bits); - int (*set_config)(struct gpio_chip *gc, - unsigned int offset, - unsigned long config); - int (*to_irq)(struct gpio_chip *gc, - unsigned int offset); + int (*set_debounce)(struct gpio_chip *chip, + unsigned offset, + unsigned debounce); + int (*set_single_ended)(struct gpio_chip *chip, + unsigned offset, + enum single_ended_mode mode); + + int (*to_irq)(struct gpio_chip *chip, + unsigned offset); void (*dbg_show)(struct seq_file *s, - struct gpio_chip *gc); - - int (*init_valid_mask)(struct gpio_chip *gc, - unsigned long *valid_mask, - unsigned int ngpios); - - int (*add_pin_ranges)(struct gpio_chip *gc); - + struct gpio_chip *chip); int base; u16 ngpio; - u16 offset; const char *const *names; bool can_sleep; + bool irq_not_threaded; #if IS_ENABLED(CONFIG_GPIO_GENERIC) unsigned long (*read_reg)(void __iomem *reg); void (*write_reg)(void __iomem *reg, unsigned long data); - bool be_bits; + unsigned long (*pin2mask)(struct gpio_chip *gc, unsigned int pin); void __iomem *reg_dat; void __iomem *reg_set; void __iomem *reg_clr; - void __iomem *reg_dir_out; - void __iomem *reg_dir_in; - bool bgpio_dir_unreadable; + void __iomem *reg_dir; int bgpio_bits; spinlock_t bgpio_lock; unsigned long bgpio_data; unsigned long bgpio_dir; -#endif /* CONFIG_GPIO_GENERIC */ +#endif #ifdef CONFIG_GPIOLIB_IRQCHIP /* * With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib * to handle IRQs for most practical cases. */ - - /** - * @irq: - * - * Integrates interrupt chip functionality with the GPIO chip. Can be - * used to handle IRQs for most practical cases. - */ - struct gpio_irq_chip irq; -#endif /* CONFIG_GPIOLIB_IRQCHIP */ - - /** - * @valid_mask: - * - * If not %NULL, holds bitmask of GPIOs which are valid to be used - * from the chip. - */ - unsigned long *valid_mask; + struct irq_chip *irqchip; + struct irq_domain *irqdomain; + unsigned int irq_base; + irq_flow_handler_t irq_handler; + unsigned int irq_default_type; + int irq_parent; + bool irq_need_valid_mask; + unsigned long *irq_valid_mask; + struct lock_class_key *lock_key; +#endif #if defined(CONFIG_OF_GPIO) /* - * If CONFIG_OF_GPIO is enabled, then all GPIO controllers described in - * the device tree automatically may have an OF translation - */ - - /** - * @of_node: - * - * Pointer to a device tree node representing this GPIO controller. + * If CONFIG_OF is enabled, then all GPIO controllers described in the + * device tree automatically may have an OF translation */ struct device_node *of_node; - - /** - * @of_gpio_n_cells: - * - * Number of cells used to form the GPIO specifier. - */ - unsigned int of_gpio_n_cells; - - /** - * @of_xlate: - * - * Callback to translate a device tree GPIO specifier into a chip- - * relative GPIO number and flags. - */ + int of_gpio_n_cells; int (*of_xlate)(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags); -#endif /* CONFIG_OF_GPIO */ +#endif }; -extern const char *gpiochip_is_requested(struct gpio_chip *gc, - unsigned int offset); - -/** - * for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range - * @chip: the chip to query - * @i: loop variable - * @base: first GPIO in the range - * @size: amount of GPIOs to check starting from @base - * @label: label of current GPIO - */ -#define for_each_requested_gpio_in_range(chip, i, base, size, label) \ - for (i = 0; i < size; i++) \ - if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else - -/* Iterates over all requested GPIO of the given @chip */ -#define for_each_requested_gpio(chip, i, label) \ - for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label) +extern const char *gpiochip_is_requested(struct gpio_chip *chip, + unsigned offset); /* add/remove chips */ -extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, - struct lock_class_key *lock_key, - struct lock_class_key *request_key); - -/** - * gpiochip_add_data() - register a gpio_chip - * @gc: the chip to register, with gc->base initialized - * @data: driver-private data associated with this chip - * - * Context: potentially before irqs will work - * - * When gpiochip_add_data() is called very early during boot, so that GPIOs - * can be freely used, the gc->parent device must be registered before - * the gpio framework's arch_initcall(). Otherwise sysfs initialization - * for GPIOs will fail rudely. - * - * gpiochip_add_data() must only be called after gpiolib initialization, - * i.e. after core_initcall(). - * - * If gc->base is negative, this requests dynamic assignment of - * a range of valid GPIOs. - * - * Returns: - * A negative errno if the chip can't be registered, such as because the - * gc->base is invalid or already associated with a different chip. - * Otherwise it returns zero as a success code. - */ -#ifdef CONFIG_LOCKDEP -#define gpiochip_add_data(gc, data) ({ \ - static struct lock_class_key lock_key; \ - static struct lock_class_key request_key; \ - gpiochip_add_data_with_key(gc, data, &lock_key, \ - &request_key); \ - }) -#define devm_gpiochip_add_data(dev, gc, data) ({ \ - static struct lock_class_key lock_key; \ - static struct lock_class_key request_key; \ - devm_gpiochip_add_data_with_key(dev, gc, data, &lock_key, \ - &request_key); \ - }) -#else -#define gpiochip_add_data(gc, data) gpiochip_add_data_with_key(gc, data, NULL, NULL) -#define devm_gpiochip_add_data(dev, gc, data) \ - devm_gpiochip_add_data_with_key(dev, gc, data, NULL, NULL) -#endif /* CONFIG_LOCKDEP */ - -static inline int gpiochip_add(struct gpio_chip *gc) +extern int gpiochip_add_data(struct gpio_chip *chip, void *data); +static inline int gpiochip_add(struct gpio_chip *chip) { - return gpiochip_add_data(gc, NULL); + return gpiochip_add_data(chip, NULL); } -extern void gpiochip_remove(struct gpio_chip *gc); -extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data, - struct lock_class_key *lock_key, - struct lock_class_key *request_key); +extern void gpiochip_remove(struct gpio_chip *chip); +extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip, + void *data); +extern void devm_gpiochip_remove(struct device *dev, struct gpio_chip *chip); extern struct gpio_chip *gpiochip_find(void *data, - int (*match)(struct gpio_chip *gc, void *data)); + int (*match)(struct gpio_chip *chip, void *data)); -bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset); -int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset); -void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset); -void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset); -void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset); +/* lock/unlock as IRQ */ +int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); +void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset); /* Line status inquiry for drivers */ -bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset); -bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset); - -/* Sleep persistence inquiry for drivers */ -bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset); -bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset); +bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset); /* get driver data */ -void *gpiochip_get_data(struct gpio_chip *gc); +void *gpiochip_get_data(struct gpio_chip *chip); + +struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); struct bgpio_pdata { const char *label; @@ -576,32 +247,7 @@ struct bgpio_pdata { int ngpio; }; -#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY - -void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type); -void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type); - -#else - -static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) -{ - return NULL; -} - -static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) -{ - return NULL; -} - -#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ +#if IS_ENABLED(CONFIG_GPIO_GENERIC) int bgpio_init(struct gpio_chip *gc, struct device *dev, unsigned long sz, void __iomem *dat, void __iomem *set, @@ -614,70 +260,75 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev, #define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3) #define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ #define BGPIOF_NO_OUTPUT BIT(5) /* only input */ -#define BGPIOF_NO_SET_ON_INPUT BIT(6) -int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq); -void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq); - -int gpiochip_irq_domain_activate(struct irq_domain *domain, - struct irq_data *data, bool reserve); -void gpiochip_irq_domain_deactivate(struct irq_domain *domain, - struct irq_data *data); - -bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, - unsigned int offset); - -#ifdef CONFIG_GPIOLIB_IRQCHIP -int gpiochip_irqchip_add_domain(struct gpio_chip *gc, - struct irq_domain *domain); -#else -static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc, - struct irq_domain *domain) -{ - WARN_ON(1); - return -EINVAL; -} #endif -int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset); -void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset); -int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset, - unsigned long config); +#ifdef CONFIG_GPIOLIB_IRQCHIP + +void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + int parent_irq, + irq_flow_handler_t parent_handler); + +int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int first_irq, + irq_flow_handler_t handler, + unsigned int type, + struct lock_class_key *lock_key); + +#ifdef CONFIG_LOCKDEP +#define gpiochip_irqchip_add(...) \ +( \ + ({ \ + static struct lock_class_key _key; \ + _gpiochip_irqchip_add(__VA_ARGS__, &_key); \ + }) \ +) +#else +#define gpiochip_irqchip_add(...) \ + _gpiochip_irqchip_add(__VA_ARGS__, NULL) +#endif + +#endif /* CONFIG_GPIOLIB_IRQCHIP */ + +int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset); +void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset); + +#ifdef CONFIG_PINCTRL /** * struct gpio_pin_range - pin range controlled by a gpio chip - * @node: list for maintaining set of pin ranges, used internally + * @head: list for maintaining set of pin ranges, used internally * @pctldev: pinctrl device which handles corresponding pins * @range: actual range of pins controlled by a gpio controller */ + struct gpio_pin_range { struct list_head node; struct pinctrl_dev *pctldev; struct pinctrl_gpio_range range; }; -#ifdef CONFIG_PINCTRL - -int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name, +int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins); -int gpiochip_add_pingroup_range(struct gpio_chip *gc, +int gpiochip_add_pingroup_range(struct gpio_chip *chip, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group); -void gpiochip_remove_pin_ranges(struct gpio_chip *gc); +void gpiochip_remove_pin_ranges(struct gpio_chip *chip); -#else /* ! CONFIG_PINCTRL */ +#else static inline int -gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name, +gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins) { return 0; } static inline int -gpiochip_add_pingroup_range(struct gpio_chip *gc, +gpiochip_add_pingroup_range(struct gpio_chip *chip, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group) { @@ -685,28 +336,16 @@ gpiochip_add_pingroup_range(struct gpio_chip *gc, } static inline void -gpiochip_remove_pin_ranges(struct gpio_chip *gc) +gpiochip_remove_pin_ranges(struct gpio_chip *chip) { } #endif /* CONFIG_PINCTRL */ -struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc, - unsigned int hwnum, - const char *label, - enum gpio_lookup_flags lflags, - enum gpiod_flags dflags); +struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum, + const char *label); void gpiochip_free_own_desc(struct gpio_desc *desc); -#ifdef CONFIG_GPIOLIB - -/* lock/unlock as IRQ */ -int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset); -void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset); - - -struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); - #else /* CONFIG_GPIOLIB */ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) @@ -716,18 +355,6 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) return ERR_PTR(-ENODEV); } -static inline int gpiochip_lock_as_irq(struct gpio_chip *gc, - unsigned int offset) -{ - WARN_ON(1); - return -EINVAL; -} - -static inline void gpiochip_unlock_as_irq(struct gpio_chip *gc, - unsigned int offset) -{ - WARN_ON(1); -} #endif /* CONFIG_GPIOLIB */ -#endif /* __LINUX_GPIO_DRIVER_H */ +#endif diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index d755e529c1..c0d712d22b 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GPIO_MACHINE_H #define __LINUX_GPIO_MACHINE_H @@ -6,38 +5,29 @@ #include enum gpio_lookup_flags { - GPIO_ACTIVE_HIGH = (0 << 0), - GPIO_ACTIVE_LOW = (1 << 0), - GPIO_OPEN_DRAIN = (1 << 1), - GPIO_OPEN_SOURCE = (1 << 2), - GPIO_PERSISTENT = (0 << 3), - GPIO_TRANSITORY = (1 << 3), - GPIO_PULL_UP = (1 << 4), - GPIO_PULL_DOWN = (1 << 5), - - GPIO_LOOKUP_FLAGS_DEFAULT = GPIO_ACTIVE_HIGH | GPIO_PERSISTENT, + GPIO_ACTIVE_HIGH = (0 << 0), + GPIO_ACTIVE_LOW = (1 << 0), + GPIO_OPEN_DRAIN = (1 << 1), + GPIO_OPEN_SOURCE = (1 << 2), }; /** * struct gpiod_lookup - lookup table - * @key: either the name of the chip the GPIO belongs to, or the GPIO line name - * Note that GPIO line names are not guaranteed to be globally unique, - * so this will use the first match found! - * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO, or - * U16_MAX to indicate that @key is a GPIO line name + * @chip_label: name of the chip the GPIO belongs to + * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO * @con_id: name of the GPIO from the device's point of view * @idx: index of the GPIO in case several GPIOs share the same name - * @flags: bitmask of gpio_lookup_flags GPIO_* values + * @flags: mask of GPIO_* values * * gpiod_lookup is a lookup table for associating GPIOs to specific devices and * functions using platform data. */ struct gpiod_lookup { - const char *key; + const char *chip_label; u16 chip_hwnum; const char *con_id; unsigned int idx; - unsigned long flags; + enum gpio_lookup_flags flags; }; struct gpiod_lookup_table { @@ -46,68 +36,27 @@ struct gpiod_lookup_table { struct gpiod_lookup table[]; }; -/** - * struct gpiod_hog - GPIO line hog table - * @chip_label: name of the chip the GPIO belongs to - * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO - * @line_name: consumer name for the hogged line - * @lflags: bitmask of gpio_lookup_flags GPIO_* values - * @dflags: GPIO flags used to specify the direction and value - */ -struct gpiod_hog { - struct list_head list; - const char *chip_label; - u16 chip_hwnum; - const char *line_name; - unsigned long lflags; - int dflags; -}; - /* * Simple definition of a single GPIO under a con_id */ -#define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \ - GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, 0, _flags) +#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \ + GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags) /* * Use this macro if you need to have several GPIOs under the same con_id. * Each GPIO needs to use a different index and can be accessed using * gpiod_get_index() */ -#define GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, _idx, _flags) \ -(struct gpiod_lookup) { \ - .key = _key, \ +#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \ +{ \ + .chip_label = _chip_label, \ .chip_hwnum = _chip_hwnum, \ .con_id = _con_id, \ .idx = _idx, \ .flags = _flags, \ } -/* - * Simple definition of a single GPIO hog in an array. - */ -#define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \ -(struct gpiod_hog) { \ - .chip_label = _chip_label, \ - .chip_hwnum = _chip_hwnum, \ - .line_name = _line_name, \ - .lflags = _lflags, \ - .dflags = _dflags, \ -} - -#ifdef CONFIG_GPIOLIB void gpiod_add_lookup_table(struct gpiod_lookup_table *table); -void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n); void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); -void gpiod_add_hogs(struct gpiod_hog *hogs); -#else /* ! CONFIG_GPIOLIB */ -static inline -void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {} -static inline -void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {} -static inline -void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {} -static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {} -#endif /* CONFIG_GPIOLIB */ #endif /* __LINUX_GPIO_MACHINE_H */ diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index 3f84aeb81e..ee2d8c6f91 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h @@ -1,10 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _GPIO_KEYS_H #define _GPIO_KEYS_H -#include - struct device; +struct gpio_desc; /** * struct gpio_keys_button - configuration parameters @@ -15,12 +13,12 @@ struct device; * @desc: label that will be attached to button's gpio * @type: input event type (%EV_KEY, %EV_SW, %EV_ABS) * @wakeup: configure the button as a wake-up source - * @wakeup_event_action: event action to trigger wakeup * @debounce_interval: debounce ticks interval in msecs * @can_disable: %true indicates that userspace is allowed to * disable button via sysfs * @value: axis value for %EV_ABS * @irq: Irq number in case of interrupt keys + * @gpiod: GPIO descriptor */ struct gpio_keys_button { unsigned int code; @@ -29,11 +27,11 @@ struct gpio_keys_button { const char *desc; unsigned int type; int wakeup; - int wakeup_event_action; int debounce_interval; bool can_disable; int value; unsigned int irq; + struct gpio_desc *gpiod; }; /** @@ -48,7 +46,7 @@ struct gpio_keys_button { * @name: input device name */ struct gpio_keys_platform_data { - const struct gpio_keys_button *buttons; + struct gpio_keys_button *buttons; int nbuttons; unsigned int poll_interval; unsigned int rep:1; diff --git a/include/linux/gpio_mouse.h b/include/linux/gpio_mouse.h new file mode 100644 index 0000000000..44ed7aa14d --- /dev/null +++ b/include/linux/gpio_mouse.h @@ -0,0 +1,61 @@ +/* + * Driver for simulating a mouse on GPIO lines. + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _GPIO_MOUSE_H +#define _GPIO_MOUSE_H + +#define GPIO_MOUSE_POLARITY_ACT_HIGH 0x00 +#define GPIO_MOUSE_POLARITY_ACT_LOW 0x01 + +#define GPIO_MOUSE_PIN_UP 0 +#define GPIO_MOUSE_PIN_DOWN 1 +#define GPIO_MOUSE_PIN_LEFT 2 +#define GPIO_MOUSE_PIN_RIGHT 3 +#define GPIO_MOUSE_PIN_BLEFT 4 +#define GPIO_MOUSE_PIN_BMIDDLE 5 +#define GPIO_MOUSE_PIN_BRIGHT 6 +#define GPIO_MOUSE_PIN_MAX 7 + +/** + * struct gpio_mouse_platform_data + * @scan_ms: integer in ms specifying the scan periode. + * @polarity: Pin polarity, active high or low. + * @up: GPIO line for up value. + * @down: GPIO line for down value. + * @left: GPIO line for left value. + * @right: GPIO line for right value. + * @bleft: GPIO line for left button. + * @bmiddle: GPIO line for middle button. + * @bright: GPIO line for right button. + * + * This struct must be added to the platform_device in the board code. + * It is used by the gpio_mouse driver to setup GPIO lines and to + * calculate mouse movement. + */ +struct gpio_mouse_platform_data { + int scan_ms; + int polarity; + + union { + struct { + int up; + int down; + int left; + int right; + + int bleft; + int bmiddle; + int bright; + }; + int pins[GPIO_MOUSE_PIN_MAX]; + }; +}; + +#endif /* _GPIO_MOUSE_H */ diff --git a/include/linux/gracl.h b/include/linux/gracl.h new file mode 100644 index 0000000000..a3c4df76dc --- /dev/null +++ b/include/linux/gracl.h @@ -0,0 +1,342 @@ +#ifndef GR_ACL_H +#define GR_ACL_H + +#include +#include +#include +#include +#include + +/* Major status information */ + +#define GR_VERSION "grsecurity 3.1" +#define GRSECURITY_VERSION 0x3100 + +enum { + GR_SHUTDOWN = 0, + GR_ENABLE = 1, + GR_SPROLE = 2, + GR_OLDRELOAD = 3, + GR_SEGVMOD = 4, + GR_STATUS = 5, + GR_UNSPROLE = 6, + GR_PASSSET = 7, + GR_SPROLEPAM = 8, + GR_RELOAD = 9, +}; + +/* Password setup definitions + * kernel/grhash.c */ +enum { + GR_PW_LEN = 128, + GR_SALT_LEN = 16, + GR_SHA_LEN = 32, +}; + +enum { + GR_SPROLE_LEN = 64, +}; + +enum { + GR_NO_GLOB = 0, + GR_REG_GLOB, + GR_CREATE_GLOB +}; + +#define GR_NLIMITS 32 + +/* Begin Data Structures */ + +struct sprole_pw { + unsigned char *rolename; + unsigned char salt[GR_SALT_LEN]; + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */ +}; + +struct name_entry { + __u32 key; + u64 inode; + dev_t device; + char *name; + __u16 len; + __u8 deleted; + struct name_entry *prev; + struct name_entry *next; +}; + +struct inodev_entry { + struct name_entry *nentry; + struct inodev_entry *prev; + struct inodev_entry *next; +}; + +struct acl_role_db { + struct acl_role_label **r_hash; + __u32 r_size; +}; + +struct inodev_db { + struct inodev_entry **i_hash; + __u32 i_size; +}; + +struct name_db { + struct name_entry **n_hash; + __u32 n_size; +}; + +struct crash_uid { + uid_t uid; + unsigned long expires; +}; + +struct gr_hash_struct { + void **table; + void **nametable; + void *first; + __u32 table_size; + __u32 used_size; + int type; +}; + +/* Userspace Grsecurity ACL data structures */ + +struct acl_subject_label { + char *filename; + u64 inode; + dev_t device; + __u32 mode; + kernel_cap_t cap_mask; + kernel_cap_t cap_lower; + kernel_cap_t cap_invert_audit; + + struct rlimit res[GR_NLIMITS]; + __u32 resmask; + + __u8 user_trans_type; + __u8 group_trans_type; + uid_t *user_transitions; + gid_t *group_transitions; + __u16 user_trans_num; + __u16 group_trans_num; + + __u32 sock_families[2]; + __u32 ip_proto[8]; + __u32 ip_type; + struct acl_ip_label **ips; + __u32 ip_num; + __u32 inaddr_any_override; + + __u32 crashes; + unsigned long expires; + + struct acl_subject_label *parent_subject; + struct gr_hash_struct *hash; + struct acl_subject_label *prev; + struct acl_subject_label *next; + + struct acl_object_label **obj_hash; + __u32 obj_hash_size; + __u16 pax_flags; +}; + +struct role_allowed_ip { + __u32 addr; + __u32 netmask; + + struct role_allowed_ip *prev; + struct role_allowed_ip *next; +}; + +struct role_transition { + char *rolename; + + struct role_transition *prev; + struct role_transition *next; +}; + +struct acl_role_label { + char *rolename; + uid_t uidgid; + __u16 roletype; + + __u16 auth_attempts; + unsigned long expires; + + struct acl_subject_label *root_label; + struct gr_hash_struct *hash; + + struct acl_role_label *prev; + struct acl_role_label *next; + + struct role_transition *transitions; + struct role_allowed_ip *allowed_ips; + uid_t *domain_children; + __u16 domain_child_num; + + umode_t umask; + + struct acl_subject_label **subj_hash; + __u32 subj_hash_size; +}; + +struct user_acl_role_db { + struct acl_role_label **r_table; + __u32 num_pointers; /* Number of allocations to track */ + __u32 num_roles; /* Number of roles */ + __u32 num_domain_children; /* Number of domain children */ + __u32 num_subjects; /* Number of subjects */ + __u32 num_objects; /* Number of objects */ +}; + +struct acl_object_label { + char *filename; + u64 inode; + dev_t device; + __u32 mode; + + struct acl_subject_label *nested; + struct acl_object_label *globbed; + + /* next two structures not used */ + + struct acl_object_label *prev; + struct acl_object_label *next; +}; + +struct acl_ip_label { + char *iface; + __u32 addr; + __u32 netmask; + __u16 low, high; + __u8 mode; + __u32 type; + __u32 proto[8]; + + /* next two structures not used */ + + struct acl_ip_label *prev; + struct acl_ip_label *next; +}; + +struct gr_arg { + struct user_acl_role_db role_db; + unsigned char pw[GR_PW_LEN]; + unsigned char salt[GR_SALT_LEN]; + unsigned char sum[GR_SHA_LEN]; + unsigned char sp_role[GR_SPROLE_LEN]; + struct sprole_pw *sprole_pws; + dev_t segv_device; + u64 segv_inode; + uid_t segv_uid; + __u16 num_sprole_pws; + __u16 mode; +}; + +struct gr_arg_wrapper { + struct gr_arg *arg; + __u32 version; + __u32 size; +}; + +struct subject_map { + struct acl_subject_label *user; + struct acl_subject_label *kernel; + struct subject_map *prev; + struct subject_map *next; +}; + +struct acl_subj_map_db { + struct subject_map **s_hash; + __u32 s_size; +}; + +struct gr_policy_state { + struct sprole_pw **acl_special_roles; + __u16 num_sprole_pws; + struct acl_role_label *kernel_role; + struct acl_role_label *role_list; + struct acl_role_label *default_role; + struct acl_role_db acl_role_set; + struct acl_subj_map_db subj_map_set; + struct name_db name_set; + struct inodev_db inodev_set; +}; + +struct gr_alloc_state { + unsigned long alloc_stack_next; + unsigned long alloc_stack_size; + void **alloc_stack; +}; + +struct gr_reload_state { + struct gr_policy_state oldpolicy; + struct gr_alloc_state oldalloc; + struct gr_policy_state newpolicy; + struct gr_alloc_state newalloc; + struct gr_policy_state *oldpolicy_ptr; + struct gr_alloc_state *oldalloc_ptr; + unsigned char oldmode; +}; + +/* End Data Structures Section */ + +/* Hash functions generated by empirical testing by Brad Spengler + Makes good use of the low bits of the inode. Generally 0-1 times + in loop for successful match. 0-3 for unsuccessful match. + Shift/add algorithm with modulus of table size and an XOR*/ + +static __inline__ unsigned int +gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz) +{ + return ((((uid + type) << (16 + type)) ^ uid) % sz); +} + + static __inline__ unsigned int +gr_shash(const struct acl_subject_label *userp, const unsigned int sz) +{ + return ((const unsigned long)userp % sz); +} + +static __inline__ unsigned int +gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz) +{ + unsigned int rem; + div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem); + return rem; +} + +static __inline__ unsigned int +gr_nhash(const char *name, const __u16 len, const unsigned int sz) +{ + return full_name_hash(NULL, (const unsigned char *)name, len) % sz; +} + +#define FOR_EACH_SUBJECT_START(role,subj,iter) \ + subj = NULL; \ + iter = 0; \ + while (iter < role->subj_hash_size) { \ + if (subj == NULL) \ + subj = role->subj_hash[iter]; \ + if (subj == NULL) { \ + iter++; \ + continue; \ + } + +#define FOR_EACH_SUBJECT_END(subj,iter) \ + subj = subj->next; \ + if (subj == NULL) \ + iter++; \ + } + + +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \ + subj = role->hash->first; \ + while (subj != NULL) { + +#define FOR_EACH_NESTED_SUBJECT_END(subj) \ + subj = subj->next; \ + } + +#endif + diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h new file mode 100644 index 0000000000..af640920c0 --- /dev/null +++ b/include/linux/gracl_compat.h @@ -0,0 +1,156 @@ +#ifndef GR_ACL_COMPAT_H +#define GR_ACL_COMPAT_H + +#include +#include + +struct sprole_pw_compat { + compat_uptr_t rolename; + unsigned char salt[GR_SALT_LEN]; + unsigned char sum[GR_SHA_LEN]; +}; + +struct gr_hash_struct_compat { + compat_uptr_t table; + compat_uptr_t nametable; + compat_uptr_t first; + __u32 table_size; + __u32 used_size; + int type; +}; + +struct acl_subject_label_compat { + compat_uptr_t filename; + compat_u64 inode; + __u32 device; + __u32 mode; + kernel_cap_t cap_mask; + kernel_cap_t cap_lower; + kernel_cap_t cap_invert_audit; + + struct compat_rlimit res[GR_NLIMITS]; + __u32 resmask; + + __u8 user_trans_type; + __u8 group_trans_type; + compat_uptr_t user_transitions; + compat_uptr_t group_transitions; + __u16 user_trans_num; + __u16 group_trans_num; + + __u32 sock_families[2]; + __u32 ip_proto[8]; + __u32 ip_type; + compat_uptr_t ips; + __u32 ip_num; + __u32 inaddr_any_override; + + __u32 crashes; + compat_ulong_t expires; + + compat_uptr_t parent_subject; + compat_uptr_t hash; + compat_uptr_t prev; + compat_uptr_t next; + + compat_uptr_t obj_hash; + __u32 obj_hash_size; + __u16 pax_flags; +}; + +struct role_allowed_ip_compat { + __u32 addr; + __u32 netmask; + + compat_uptr_t prev; + compat_uptr_t next; +}; + +struct role_transition_compat { + compat_uptr_t rolename; + + compat_uptr_t prev; + compat_uptr_t next; +}; + +struct acl_role_label_compat { + compat_uptr_t rolename; + uid_t uidgid; + __u16 roletype; + + __u16 auth_attempts; + compat_ulong_t expires; + + compat_uptr_t root_label; + compat_uptr_t hash; + + compat_uptr_t prev; + compat_uptr_t next; + + compat_uptr_t transitions; + compat_uptr_t allowed_ips; + compat_uptr_t domain_children; + __u16 domain_child_num; + + umode_t umask; + + compat_uptr_t subj_hash; + __u32 subj_hash_size; +}; + +struct user_acl_role_db_compat { + compat_uptr_t r_table; + __u32 num_pointers; + __u32 num_roles; + __u32 num_domain_children; + __u32 num_subjects; + __u32 num_objects; +}; + +struct acl_object_label_compat { + compat_uptr_t filename; + compat_u64 inode; + __u32 device; + __u32 mode; + + compat_uptr_t nested; + compat_uptr_t globbed; + + compat_uptr_t prev; + compat_uptr_t next; +}; + +struct acl_ip_label_compat { + compat_uptr_t iface; + __u32 addr; + __u32 netmask; + __u16 low, high; + __u8 mode; + __u32 type; + __u32 proto[8]; + + compat_uptr_t prev; + compat_uptr_t next; +}; + +struct gr_arg_compat { + struct user_acl_role_db_compat role_db; + unsigned char pw[GR_PW_LEN]; + unsigned char salt[GR_SALT_LEN]; + unsigned char sum[GR_SHA_LEN]; + unsigned char sp_role[GR_SPROLE_LEN]; + compat_uptr_t sprole_pws; + __u32 segv_device; + compat_u64 segv_inode; + uid_t segv_uid; + __u16 num_sprole_pws; + __u16 mode; +}; + +struct gr_arg_wrapper_compat { + compat_uptr_t arg; + __u32 version; + __u32 size; +}; + +#endif diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h new file mode 100644 index 0000000000..323ecf2802 --- /dev/null +++ b/include/linux/gralloc.h @@ -0,0 +1,9 @@ +#ifndef __GRALLOC_H +#define __GRALLOC_H + +void acl_free_all(void); +int acl_alloc_stack_init(unsigned long size); +void *acl_alloc(unsigned long len); +void *acl_alloc_num(unsigned long num, unsigned long len); + +#endif diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h new file mode 100644 index 0000000000..be66033e66 --- /dev/null +++ b/include/linux/grdefs.h @@ -0,0 +1,140 @@ +#ifndef GRDEFS_H +#define GRDEFS_H + +/* Begin grsecurity status declarations */ + +enum { + GR_READY = 0x01, + GR_STATUS_INIT = 0x00 // disabled state +}; + +/* Begin ACL declarations */ + +/* Role flags */ + +enum { + GR_ROLE_USER = 0x0001, + GR_ROLE_GROUP = 0x0002, + GR_ROLE_DEFAULT = 0x0004, + GR_ROLE_SPECIAL = 0x0008, + GR_ROLE_AUTH = 0x0010, + GR_ROLE_NOPW = 0x0020, + GR_ROLE_GOD = 0x0040, + GR_ROLE_LEARN = 0x0080, + GR_ROLE_TPE = 0x0100, + GR_ROLE_DOMAIN = 0x0200, + GR_ROLE_PAM = 0x0400, + GR_ROLE_PERSIST = 0x0800 +}; + +/* ACL Subject and Object mode flags */ +enum { + GR_DELETED = 0x80000000 +}; + +/* ACL Object-only mode flags */ +enum { + GR_READ = 0x00000001, + GR_APPEND = 0x00000002, + GR_WRITE = 0x00000004, + GR_EXEC = 0x00000008, + GR_FIND = 0x00000010, + GR_INHERIT = 0x00000020, + GR_SETID = 0x00000040, + GR_CREATE = 0x00000080, + GR_DELETE = 0x00000100, + GR_LINK = 0x00000200, + GR_AUDIT_READ = 0x00000400, + GR_AUDIT_APPEND = 0x00000800, + GR_AUDIT_WRITE = 0x00001000, + GR_AUDIT_EXEC = 0x00002000, + GR_AUDIT_FIND = 0x00004000, + GR_AUDIT_INHERIT= 0x00008000, + GR_AUDIT_SETID = 0x00010000, + GR_AUDIT_CREATE = 0x00020000, + GR_AUDIT_DELETE = 0x00040000, + GR_AUDIT_LINK = 0x00080000, + GR_PTRACERD = 0x00100000, + GR_NOPTRACE = 0x00200000, + GR_SUPPRESS = 0x00400000, + GR_NOLEARN = 0x00800000, + GR_INIT_TRANSFER= 0x01000000 +}; + +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \ + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \ + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK) + +/* ACL subject-only mode flags */ +enum { + GR_KILL = 0x00000001, + GR_VIEW = 0x00000002, + GR_PROTECTED = 0x00000004, + GR_LEARN = 0x00000008, + GR_OVERRIDE = 0x00000010, + /* just a placeholder, this mode is only used in userspace */ + GR_DUMMY = 0x00000020, + GR_PROTSHM = 0x00000040, + GR_KILLPROC = 0x00000080, + GR_KILLIPPROC = 0x00000100, + /* just a placeholder, this mode is only used in userspace */ + GR_NOTROJAN = 0x00000200, + GR_PROTPROCFD = 0x00000400, + GR_PROCACCT = 0x00000800, + GR_RELAXPTRACE = 0x00001000, + //GR_NESTED = 0x00002000, + GR_INHERITLEARN = 0x00004000, + GR_PROCFIND = 0x00008000, + GR_POVERRIDE = 0x00010000, + GR_KERNELAUTH = 0x00020000, + GR_ATSECURE = 0x00040000, + GR_SHMEXEC = 0x00080000 +}; + +enum { + GR_PAX_ENABLE_SEGMEXEC = 0x0001, + GR_PAX_ENABLE_PAGEEXEC = 0x0002, + GR_PAX_ENABLE_MPROTECT = 0x0004, + GR_PAX_ENABLE_RANDMMAP = 0x0008, + GR_PAX_ENABLE_EMUTRAMP = 0x0010, + GR_PAX_DISABLE_SEGMEXEC = 0x0100, + GR_PAX_DISABLE_PAGEEXEC = 0x0200, + GR_PAX_DISABLE_MPROTECT = 0x0400, + GR_PAX_DISABLE_RANDMMAP = 0x0800, + GR_PAX_DISABLE_EMUTRAMP = 0x1000, +}; + +enum { + GR_ID_USER = 0x01, + GR_ID_GROUP = 0x02, +}; + +enum { + GR_ID_ALLOW = 0x01, + GR_ID_DENY = 0x02, +}; + +#define GR_CRASH_RES 31 +#define GR_UIDTABLE_MAX 500 + +/* begin resource learning section */ +enum { + GR_RLIM_CPU_BUMP = 60, + GR_RLIM_FSIZE_BUMP = 50000, + GR_RLIM_DATA_BUMP = 10000, + GR_RLIM_STACK_BUMP = 1000, + GR_RLIM_CORE_BUMP = 10000, + GR_RLIM_RSS_BUMP = 500000, + GR_RLIM_NPROC_BUMP = 1, + GR_RLIM_NOFILE_BUMP = 5, + GR_RLIM_MEMLOCK_BUMP = 50000, + GR_RLIM_AS_BUMP = 500000, + GR_RLIM_LOCKS_BUMP = 2, + GR_RLIM_SIGPENDING_BUMP = 5, + GR_RLIM_MSGQUEUE_BUMP = 10000, + GR_RLIM_NICE_BUMP = 1, + GR_RLIM_RTPRIO_BUMP = 1, + GR_RLIM_RTTIME_BUMP = 1000000 +}; + +#endif diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h new file mode 100644 index 0000000000..1dbf9c8710 --- /dev/null +++ b/include/linux/grinternal.h @@ -0,0 +1,231 @@ +#ifndef __GRINTERNAL_H +#define __GRINTERNAL_H + +#ifdef CONFIG_GRKERNSEC + +#include +#include +#include +#include +#include +#include + +void gr_add_learn_entry(const char *fmt, ...) + __attribute__ ((format (printf, 1, 2))); +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode, + const struct vfsmount *mnt); +__u32 gr_check_create(const struct dentry *new_dentry, + const struct dentry *parent, + const struct vfsmount *mnt, const __u32 mode); +int gr_check_protected_task(const struct task_struct *task); +__u32 to_gr_audit(const __u32 reqmode); +int gr_set_acls(const int type); +int gr_acl_is_enabled(void); +char gr_roletype_to_char(void); + +void gr_handle_alertkill(struct task_struct *task); +char *gr_to_filename(const struct dentry *dentry, + const struct vfsmount *mnt); +char *gr_to_filename1(const struct dentry *dentry, + const struct vfsmount *mnt); +char *gr_to_filename2(const struct dentry *dentry, + const struct vfsmount *mnt); +char *gr_to_filename3(const struct dentry *dentry, + const struct vfsmount *mnt); + +extern int grsec_enable_ptrace_readexec; +extern int grsec_enable_harden_ptrace; +extern int grsec_enable_link; +extern int grsec_enable_fifo; +extern int grsec_enable_execve; +extern int grsec_enable_shm; +extern int grsec_enable_execlog; +extern int grsec_enable_signal; +extern int grsec_enable_audit_ptrace; +extern int grsec_enable_forkfail; +extern int grsec_enable_time; +extern int grsec_enable_rofs; +extern int grsec_deny_new_usb; +extern int grsec_enable_chroot_shmat; +extern int grsec_enable_chroot_mount; +extern int grsec_enable_chroot_double; +extern int grsec_enable_chroot_pivot; +extern int grsec_enable_chroot_chdir; +extern int grsec_enable_chroot_chmod; +extern int grsec_enable_chroot_mknod; +extern int grsec_enable_chroot_fchdir; +extern int grsec_enable_chroot_nice; +extern int grsec_enable_chroot_execlog; +extern int grsec_enable_chroot_caps; +extern int grsec_enable_chroot_rename; +extern int grsec_enable_chroot_sysctl; +extern int grsec_enable_chroot_unix; +extern int grsec_enable_symlinkown; +extern kgid_t grsec_symlinkown_gid; +extern int grsec_enable_tpe; +extern kgid_t grsec_tpe_gid; +extern int grsec_enable_tpe_all; +extern int grsec_enable_tpe_invert; +extern int grsec_enable_socket_all; +extern kgid_t grsec_socket_all_gid; +extern int grsec_enable_socket_client; +extern kgid_t grsec_socket_client_gid; +extern int grsec_enable_socket_server; +extern kgid_t grsec_socket_server_gid; +extern kgid_t grsec_audit_gid; +extern int grsec_enable_group; +extern int grsec_enable_log_rwxmaps; +extern int grsec_enable_mount; +extern int grsec_enable_chdir; +extern int grsec_resource_logging; +extern int grsec_enable_blackhole; +extern int grsec_lastack_retries; +extern int grsec_enable_brute; +extern int grsec_enable_harden_ipc; +extern int grsec_enable_harden_tty; +extern int grsec_lock; + +extern spinlock_t grsec_alert_lock; +extern unsigned long grsec_alert_wtime; +extern unsigned long grsec_alert_fyet; + +extern spinlock_t grsec_audit_lock; + +extern rwlock_t grsec_exec_file_lock; + +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \ + gr_to_filename2((tsk)->exec_file->f_path.dentry, \ + (tsk)->exec_file->f_path.mnt) : "/") + +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \ + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \ + (tsk)->real_parent->exec_file->f_path.mnt) : "/") + +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \ + gr_to_filename((tsk)->exec_file->f_path.dentry, \ + (tsk)->exec_file->f_path.mnt) : "/") + +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \ + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \ + (tsk)->real_parent->exec_file->f_path.mnt) : "/") + +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted) + +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry) + +static inline bool gr_is_same_file(const struct file *file1, const struct file *file2) +{ + if (file1 && file2) { + const struct inode *inode1 = file1->f_path.dentry->d_inode; + const struct inode *inode2 = file2->f_path.dentry->d_inode; + if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev) + return true; + } + + return false; +} + +#define GR_CHROOT_CAPS {{ \ + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \ + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \ + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \ + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \ + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \ + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \ + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }} + +#define security_learn(normal_msg,args...) \ +({ \ + read_lock(&grsec_exec_file_lock); \ + gr_add_learn_entry(normal_msg "\n", ## args); \ + read_unlock(&grsec_exec_file_lock); \ +}) + +enum { + GR_DO_AUDIT, + GR_DONT_AUDIT, + /* used for non-audit messages that we shouldn't kill the task on */ + GR_DONT_AUDIT_GOOD +}; + +enum { + GR_TTYSNIFF, + GR_RBAC, + GR_RBAC_STR, + GR_STR_RBAC, + GR_RBAC_MODE2, + GR_RBAC_MODE3, + GR_FILENAME, + GR_SYSCTL_HIDDEN, + GR_NOARGS, + GR_ONE_INT, + GR_ONE_INT_TWO_STR, + GR_ONE_STR, + GR_STR_INT, + GR_TWO_STR_INT, + GR_TWO_INT, + GR_TWO_U64, + GR_THREE_INT, + GR_FIVE_INT_TWO_STR, + GR_TWO_STR, + GR_THREE_STR, + GR_FOUR_STR, + GR_STR_FILENAME, + GR_FILENAME_STR, + GR_FILENAME_TWO_INT, + GR_FILENAME_TWO_INT_STR, + GR_TEXTREL, + GR_PTRACE, + GR_RESOURCE, + GR_CAP, + GR_SIG, + GR_SIG2, + GR_CRASH1, + GR_CRASH2, + GR_PSACCT, + GR_RWXMAP, + GR_RWXMAPVMA +}; + +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str) +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task) +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt) +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str) +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt) +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2) +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3) +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt) +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS) +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num) +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2) +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str) +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num) +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2) +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2) +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3) +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2) +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2) +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num) +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3) +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4) +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt) +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str) +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2) +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str) +#define gr_log_textrel_ulong_ulong(audit, msg, str, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, str, file, ulong1, ulong2) +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task) +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2) +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str) +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr) +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num) +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong) +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1) +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str) +#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str) + +void gr_log_varargs(int audit, const char *msg, int argtypes, ...); + +#endif + +#endif diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h new file mode 100644 index 0000000000..f1d36054b8 --- /dev/null +++ b/include/linux/grmsg.h @@ -0,0 +1,121 @@ +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " +#define GR_STOPMOD_MSG "denied modification of module state by " +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by " +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by " +#define GR_IOPERM_MSG "denied use of ioperm() by " +#define GR_IOPL_MSG "denied use of iopl() by " +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by " +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by " +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by " +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by " +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by " +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4" +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4" +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by " +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by " +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by " +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by " +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by " +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by " +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by " +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against " +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by " +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by " +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by " +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by " +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for " +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by " +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by " +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by " +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by " +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by " +#define GR_NNP_EXEC_ACL_MSG "denied exec of %.950s due to NNP by " +#define GR_EXEC_ACL_MSG "%s execution of %.950s by " +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by " +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds" +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds" +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by " +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by " +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by " +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by " +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by " +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by " +#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by " +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by " +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by " +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by " +#define GR_CHROOT_PATHAT_MSG "denied relative path access outside of chroot to %.950s by " +#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by " +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by " +#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by " +#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by " +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by " +#define GR_INITF_ACL_MSG "init_variables() failed %s by " +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use gracl=off from your boot loader" +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by " +#define GR_SHUTS_ACL_MSG "shutdown auth success for " +#define GR_SHUTF_ACL_MSG "shutdown auth failure for " +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for " +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for " +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for " +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for " +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by " +#define GR_ENABLEF_ACL_MSG "unable to load %s for " +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system" +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by " +#define GR_RELOADF_ACL_MSG "failed reload of %s for " +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for " +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by " +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by " +#define GR_SPROLEF_ACL_MSG "special role %s failure for " +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for " +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by " +#define GR_INVMODE_ACL_MSG "invalid mode %d by " +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by " +#define GR_FAILFORK_MSG "failed fork with errno %s by " +#define GR_NICE_CHROOT_MSG "denied priority change by " +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in " +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by " +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by " +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by " +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by " +#define GR_TIME_MSG "time set by " +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by " +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by " +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by " +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by " +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by " +#define GR_BIND_MSG "denied bind() by " +#define GR_CONNECT_MSG "denied connect() by " +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by " +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by " +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4" +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process " +#define GR_CAP_ACL_MSG "use of %s denied for " +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for " +#define GR_CAP_ACL_MSG2 "use of %s permitted for " +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for " +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for " +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by " +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by " +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by " +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by " +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by " +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for " +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by " +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by " +#define GR_TEXTREL_AUDIT_MSG "allowed %s text relocation transition in %.950s, VMA:0x%08lx 0x%08lx by " +#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by " +#define GR_VM86_MSG "denied use of vm86 by " +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by " +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by " +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by " +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by " +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by " +#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for " +#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for " +#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by " +#define GR_TIOCSTI_MSG "denied unprivileged use of TIOCSTI by " +#define GR_MSRWRITE_MSG "denied write to CPU MSR by " diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h new file mode 100644 index 0000000000..749b915012 --- /dev/null +++ b/include/linux/grsecurity.h @@ -0,0 +1,259 @@ +#ifndef GR_SECURITY_H +#define GR_SECURITY_H +#include +#include +#include +#include +#include + +/* notify of brain-dead configs */ +#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_GRKERNSEC_KMEM) +#error "CONFIG_DEBUG_FS being enabled is a security risk when CONFIG_GRKERNSEC_KMEM is enabled" +#endif +#if defined(CONFIG_PROC_PAGE_MONITOR) && defined(CONFIG_GRKERNSEC) +#error "CONFIG_PROC_PAGE_MONITOR is a security risk" +#endif +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP) +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled." +#endif +#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP) +#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled" +#endif +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC) +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled." +#endif +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP) +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled." +#endif +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR) +#error "CONFIG_PAX enabled, but no PaX options are enabled." +#endif + +int gr_handle_new_usb(void); + +void gr_handle_brute_attach(int dumpable); +void gr_handle_brute_check(void); +void gr_handle_kernel_exploit(void); + +char gr_roletype_to_char(void); + +int gr_proc_is_restricted(void); + +int gr_acl_enable_at_secure(void); + +int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs); +int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs); + +int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap, bool log); + +void gr_del_task_from_ip_table(struct task_struct *p); + +int gr_pid_is_chrooted(struct task_struct *p); +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type); +int gr_handle_chroot_nice(void); +int gr_handle_chroot_sysctl(const int op); +int gr_handle_chroot_setpriority(struct task_struct *p, + const int niceval); +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt); +int gr_chroot_pathat(int dfd, struct dentry *u_dentry, struct vfsmount *u_mnt, unsigned flags); +int gr_chroot_fhandle(void); +int gr_handle_chroot_chroot(const struct dentry *dentry, + const struct vfsmount *mnt); +void gr_handle_chroot_chdir(const struct path *path); +int gr_handle_chroot_chmod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode); +int gr_handle_chroot_mknod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode); +int gr_handle_chroot_mount(const struct dentry *dentry, + const struct vfsmount *mnt, + const char *dev_name); +int gr_handle_chroot_pivot(void); +int gr_handle_chroot_unix(const pid_t pid); + +int gr_handle_rawio(const struct inode *inode); + +void gr_handle_ioperm(void); +void gr_handle_iopl(void); +void gr_handle_msr_write(void); + +umode_t gr_acl_umask(void); + +int gr_tpe_allow(const struct file *file); + +void gr_set_chroot_entries(struct task_struct *task, const struct path *path); +void gr_clear_chroot_entries(struct task_struct *task); + +void gr_log_forkfail(const int retval); +void gr_log_timechange(void); +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t); +void gr_log_chdir(const struct dentry *dentry, + const struct vfsmount *mnt); +void gr_log_chroot_exec(const struct dentry *dentry, + const struct vfsmount *mnt); +void gr_log_remount(const char *devname, const int retval); +void gr_log_unmount(const char *devname, const int retval); +void gr_log_mount(const char *from, struct path *to, const int retval); +void gr_log_textrel(struct vm_area_struct *vma, bool is_textrel_rw); +void gr_log_ptgnustack(struct file *file); +void gr_log_rwxmmap(struct file *file); +void gr_log_rwxmprotect(struct vm_area_struct *vma); + +int gr_handle_follow_link(const struct dentry *dentry, + const struct vfsmount *mnt); +int gr_handle_fifo(const struct dentry *dentry, + const struct vfsmount *mnt, + const struct dentry *dir, const int flag, + const int acc_mode); +int gr_handle_hardlink(const struct dentry *dentry, + const struct vfsmount *mnt, + const struct filename *to); + +int gr_is_capable(const int cap); +int gr_is_capable_nolog(const int cap); +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap); +int gr_task_is_capable_nolog(const struct task_struct *task, const struct cred *cred, const int cap); + +void gr_copy_label(struct task_struct *tsk); +void gr_handle_crash(struct task_struct *task, const int sig); +int gr_handle_signal(const struct task_struct *p, const int sig); +int gr_check_crash_uid(const kuid_t uid); +int gr_check_protected_task(const struct task_struct *task); +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type); +int gr_acl_handle_mmap(const struct file *file, + const unsigned long prot); +int gr_acl_handle_mprotect(const struct file *file, + const unsigned long prot); +int gr_check_hidden_task(const struct task_struct *tsk); +__u32 gr_acl_handle_truncate(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_utime(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_access(const struct dentry *dentry, + const struct vfsmount *mnt, const int fmode); +__u32 gr_acl_handle_chmod(const struct dentry *dentry, + const struct vfsmount *mnt, umode_t *mode); +__u32 gr_acl_handle_chown(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_setxattr(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_removexattr(const struct dentry *dentry, + const struct vfsmount *mnt); +int gr_handle_ptrace(struct task_struct *task, const long request); +int gr_handle_proc_ptrace(struct task_struct *task); +__u32 gr_acl_handle_execve(const struct dentry *dentry, + const struct vfsmount *mnt); +int gr_check_crash_exec(const struct file *filp); +int gr_acl_is_enabled(void); +void gr_set_role_label(struct task_struct *task, const kuid_t uid, + const kgid_t gid); +int gr_set_proc_label(const struct dentry *dentry, + const struct vfsmount *mnt, + const int unsafe_flags); +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_open(const struct dentry *dentry, + const struct vfsmount *mnt, int acc_mode); +__u32 gr_acl_handle_creat(const struct dentry *dentry, + const struct dentry *p_dentry, + const struct vfsmount *p_mnt, + int open_flags, int acc_mode, const int imode); +void gr_handle_create(const struct dentry *dentry, + const struct vfsmount *mnt); +void gr_handle_proc_create(const struct dentry *dentry, + const struct inode *inode); +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const int mode); +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt); +__u32 gr_acl_handle_rmdir(const struct dentry *dentry, + const struct vfsmount *mnt); +void gr_handle_delete(const u64 ino, const dev_t dev); +__u32 gr_acl_handle_unlink(const struct dentry *dentry, + const struct vfsmount *mnt); +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const struct filename *from); +__u32 gr_acl_handle_link(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const struct dentry *old_dentry, + const struct vfsmount *old_mnt, const struct filename *to); +int gr_handle_symlink_owner(const struct path *link, const struct inode *target); +int gr_acl_handle_rename(struct dentry *new_dentry, + struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + struct dentry *old_dentry, + struct inode *old_parent_inode, + struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags); +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir, + struct dentry *old_dentry, + struct dentry *new_dentry, + struct vfsmount *mnt, const __u8 replace, unsigned int flags); +__u32 gr_check_link(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const struct dentry *old_dentry, + const struct vfsmount *old_mnt); +int gr_acl_handle_filldir(const struct file *file, const char *name, + const unsigned int namelen, const u64 ino); + +__u32 gr_acl_handle_unix(const struct dentry *dentry, + const struct vfsmount *mnt); +void gr_acl_handle_exit(void); +void gr_acl_handle_psacct(struct task_struct *task, const long code); +int gr_acl_handle_procpidmem(const struct task_struct *task); +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags); +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode); +void gr_audit_ptrace(struct task_struct *task); +dev_t gr_get_dev_from_dentry(struct dentry *dentry); +u64 gr_get_ino_from_dentry(struct dentry *dentry); +void gr_put_exec_file(struct task_struct *task); + +int gr_get_symlinkown_enabled(void); + +int gr_ptrace_readexec(struct file *file, int unsafe_flags); + +int gr_handle_tiocsti(struct tty_struct *tty); + +void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt); +void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt); +int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt, + struct dentry *newdentry, struct vfsmount *newmnt); + +#ifdef CONFIG_GRKERNSEC_RESLOG +extern void gr_log_resource(const struct task_struct *task, const int res, + const unsigned long wanted, const int gt); +#else +static inline void gr_log_resource(const struct task_struct *task, const int res, + const unsigned long wanted, const int gt) +{ +} +#endif + +#ifdef CONFIG_GRKERNSEC +void task_grsec_rbac(struct seq_file *m, struct task_struct *p); +void gr_handle_vm86(void); +void gr_handle_mem_readwrite(u64 from, u64 to); + +void gr_log_badprocpid(const char *entry); + +extern int grsec_enable_dmesg; +extern int grsec_disable_privio; + +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP +extern kgid_t grsec_proc_gid; +#endif + +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK +extern int grsec_enable_chroot_findtask; +#endif +#ifdef CONFIG_GRKERNSEC_SETXID +extern int grsec_enable_setxid; +#endif +#endif + +#endif diff --git a/include/linux/grsock.h b/include/linux/grsock.h new file mode 100644 index 0000000000..e7ffaafc54 --- /dev/null +++ b/include/linux/grsock.h @@ -0,0 +1,19 @@ +#ifndef __GRSOCK_H +#define __GRSOCK_H + +extern void gr_attach_curr_ip(const struct sock *sk); +extern int gr_handle_sock_all(const int family, const int type, + const int protocol); +extern int gr_handle_sock_server(const struct sockaddr *sck); +extern int gr_handle_sock_server_other(const struct sock *sck); +extern int gr_handle_sock_client(const struct sockaddr *sck); +extern int gr_search_connect(struct socket * sock, + struct sockaddr_in * addr); +extern int gr_search_bind(struct socket * sock, + struct sockaddr_in * addr); +extern int gr_search_listen(struct socket * sock); +extern int gr_search_accept(struct socket * sock); +extern int gr_search_socket(const int domain, const int type, + const int protocol); + +#endif diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 76878b357f..c683996110 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -1,30 +1,31 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_HARDIRQ_H #define LINUX_HARDIRQ_H -#include #include #include #include -#include #include #include + extern void synchronize_irq(unsigned int irq); extern bool synchronize_hardirq(unsigned int irq); -#ifdef CONFIG_NO_HZ_FULL -void __rcu_irq_enter_check_tick(void); -#else -static inline void __rcu_irq_enter_check_tick(void) { } -#endif +#if defined(CONFIG_TINY_RCU) -static __always_inline void rcu_irq_enter_check_tick(void) +static inline void rcu_nmi_enter(void) { - if (context_tracking_enabled()) - __rcu_irq_enter_check_tick(); } +static inline void rcu_nmi_exit(void) +{ +} + +#else +extern void rcu_nmi_enter(void); +extern void rcu_nmi_exit(void); +#endif + /* * It is safe to do non-atomic ops on ->hardirq_context, * because NMI handlers may not preempt and the ops are @@ -33,119 +34,51 @@ static __always_inline void rcu_irq_enter_check_tick(void) */ #define __irq_enter() \ do { \ + account_irq_enter_time(current); \ preempt_count_add(HARDIRQ_OFFSET); \ - lockdep_hardirq_enter(); \ - account_hardirq_enter(current); \ - } while (0) - -/* - * Like __irq_enter() without time accounting for fast - * interrupts, e.g. reschedule IPI where time accounting - * is more expensive than the actual interrupt. - */ -#define __irq_enter_raw() \ - do { \ - preempt_count_add(HARDIRQ_OFFSET); \ - lockdep_hardirq_enter(); \ + trace_hardirq_enter(); \ } while (0) /* * Enter irq context (on NO_HZ, update jiffies): */ -void irq_enter(void); -/* - * Like irq_enter(), but RCU is already watching. - */ -void irq_enter_rcu(void); +extern void irq_enter(void); /* * Exit irq context without processing softirqs: */ #define __irq_exit() \ do { \ - account_hardirq_exit(current); \ - lockdep_hardirq_exit(); \ - preempt_count_sub(HARDIRQ_OFFSET); \ - } while (0) - -/* - * Like __irq_exit() without time accounting - */ -#define __irq_exit_raw() \ - do { \ - lockdep_hardirq_exit(); \ + trace_hardirq_exit(); \ + account_irq_exit_time(current); \ preempt_count_sub(HARDIRQ_OFFSET); \ } while (0) /* * Exit irq context and process softirqs if needed: */ -void irq_exit(void); - -/* - * Like irq_exit(), but return with RCU watching. - */ -void irq_exit_rcu(void); - -#ifndef arch_nmi_enter -#define arch_nmi_enter() do { } while (0) -#define arch_nmi_exit() do { } while (0) -#endif - -#ifdef CONFIG_TINY_RCU -static inline void rcu_nmi_enter(void) { } -static inline void rcu_nmi_exit(void) { } -#else -extern void rcu_nmi_enter(void); -extern void rcu_nmi_exit(void); -#endif - -/* - * NMI vs Tracing - * -------------- - * - * We must not land in a tracer until (or after) we've changed preempt_count - * such that in_nmi() becomes true. To that effect all NMI C entry points must - * be marked 'notrace' and call nmi_enter() as soon as possible. - */ - -/* - * nmi_enter() can nest up to 15 times; see NMI_BITS. - */ -#define __nmi_enter() \ - do { \ - lockdep_off(); \ - arch_nmi_enter(); \ - BUG_ON(in_nmi() == NMI_MASK); \ - __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ - } while (0) +extern void irq_exit(void); #define nmi_enter() \ do { \ - __nmi_enter(); \ - lockdep_hardirq_enter(); \ - rcu_nmi_enter(); \ - instrumentation_begin(); \ + printk_nmi_enter(); \ + lockdep_off(); \ ftrace_nmi_enter(); \ - instrumentation_end(); \ - } while (0) - -#define __nmi_exit() \ - do { \ - BUG_ON(!in_nmi()); \ - __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ - arch_nmi_exit(); \ - lockdep_on(); \ + BUG_ON(in_nmi()); \ + preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ + rcu_nmi_enter(); \ + trace_hardirq_enter(); \ } while (0) #define nmi_exit() \ do { \ - instrumentation_begin(); \ - ftrace_nmi_exit(); \ - instrumentation_end(); \ + trace_hardirq_exit(); \ rcu_nmi_exit(); \ - lockdep_hardirq_exit(); \ - __nmi_exit(); \ + BUG_ON(!in_nmi()); \ + preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ + ftrace_nmi_exit(); \ + lockdep_on(); \ + printk_nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h index f6c666730b..661e5c2a8e 100644 --- a/include/linux/hashtable.h +++ b/include/linux/hashtable.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Statically sized hash table implementation * (C) 2012 Sasha Levin @@ -145,7 +144,7 @@ static inline void hash_del_rcu(struct hlist_node *node) * hash entry * @name: hashtable to iterate * @bkt: integer to use as bucket loop cursor - * @tmp: a &struct hlist_node used for temporary storage + * @tmp: a &struct used for temporary storage * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct */ @@ -168,14 +167,15 @@ static inline void hash_del_rcu(struct hlist_node *node) /** * hash_for_each_possible_rcu - iterate over all possible objects hashing to the * same bucket in an rcu enabled hashtable + * in a rcu enabled hashtable * @name: hashtable to iterate * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ -#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \ +#define hash_for_each_possible_rcu(name, obj, member, key) \ hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\ - member, ## cond) + member) /** * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing @@ -197,7 +197,7 @@ static inline void hash_del_rcu(struct hlist_node *node) * same bucket safe against removals * @name: hashtable to iterate * @obj: the type * to use as a loop cursor for each entry - * @tmp: a &struct hlist_node used for temporary storage + * @tmp: a &struct used for temporary storage * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index 630a388035..e31bcd4c78 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic HDLC support routines for Linux * * Copyright (C) 1999-2005 Krzysztof Halasa + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #ifndef __HDLC_H #define __HDLC_H @@ -22,7 +25,7 @@ struct hdlc_proto { void (*start)(struct net_device *dev); /* if open & DCD */ void (*stop)(struct net_device *dev); /* if open & !DCD */ void (*detach)(struct net_device *dev); - int (*ioctl)(struct net_device *dev, struct if_settings *ifs); + int (*ioctl)(struct net_device *dev, struct ifreq *ifr); __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); int (*netif_rx)(struct sk_buff *skb); netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); @@ -54,7 +57,7 @@ typedef struct hdlc_device { /* Exported from hdlc module */ /* Called by hardware driver when a user requests HDLC service */ -int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs); +int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); /* Must be used by hardware driver on module startup/exit */ #define register_hdlc_device(dev) register_netdev(dev) @@ -90,6 +93,8 @@ static __inline__ void debug_frame(const struct sk_buff *skb) int hdlc_open(struct net_device *dev); /* Must be called by hardware driver when HDLC device is being closed */ void hdlc_close(struct net_device *dev); +/* May be used by hardware driver */ +int hdlc_change_mtu(struct net_device *dev, int new_mtu); /* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */ netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h index 5d70c3f98f..be3be25bb8 100644 --- a/include/linux/hdlcdrv.h +++ b/include/linux/hdlcdrv.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * hdlcdrv.h -- HDLC packet radio network driver. * The Linux soundcard driver for 1200 baud and 9600 baud packet radio @@ -79,7 +78,7 @@ struct hdlcdrv_ops { */ int (*open)(struct net_device *); int (*close)(struct net_device *); - int (*ioctl)(struct net_device *, void __user *, + int (*ioctl)(struct net_device *, struct ifreq *, struct hdlcdrv_ioctl *, int); }; diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index c8ec982ff4..e9744202fa 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -27,37 +27,18 @@ #include #include -enum hdmi_packet_type { - HDMI_PACKET_TYPE_NULL = 0x00, - HDMI_PACKET_TYPE_AUDIO_CLOCK_REGEN = 0x01, - HDMI_PACKET_TYPE_AUDIO_SAMPLE = 0x02, - HDMI_PACKET_TYPE_GENERAL_CONTROL = 0x03, - HDMI_PACKET_TYPE_ACP = 0x04, - HDMI_PACKET_TYPE_ISRC1 = 0x05, - HDMI_PACKET_TYPE_ISRC2 = 0x06, - HDMI_PACKET_TYPE_ONE_BIT_AUDIO_SAMPLE = 0x07, - HDMI_PACKET_TYPE_DST_AUDIO = 0x08, - HDMI_PACKET_TYPE_HBR_AUDIO_STREAM = 0x09, - HDMI_PACKET_TYPE_GAMUT_METADATA = 0x0a, - /* + enum hdmi_infoframe_type */ -}; - enum hdmi_infoframe_type { HDMI_INFOFRAME_TYPE_VENDOR = 0x81, HDMI_INFOFRAME_TYPE_AVI = 0x82, HDMI_INFOFRAME_TYPE_SPD = 0x83, HDMI_INFOFRAME_TYPE_AUDIO = 0x84, - HDMI_INFOFRAME_TYPE_DRM = 0x87, }; #define HDMI_IEEE_OUI 0x000c03 -#define HDMI_FORUM_IEEE_OUI 0xc45dd8 #define HDMI_INFOFRAME_HEADER_SIZE 4 #define HDMI_AVI_INFOFRAME_SIZE 13 #define HDMI_SPD_INFOFRAME_SIZE 25 #define HDMI_AUDIO_INFOFRAME_SIZE 10 -#define HDMI_DRM_INFOFRAME_SIZE 26 -#define HDMI_VENDOR_INFOFRAME_SIZE 4 #define HDMI_INFOFRAME_SIZE(type) \ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE) @@ -97,8 +78,6 @@ enum hdmi_picture_aspect { HDMI_PICTURE_ASPECT_NONE, HDMI_PICTURE_ASPECT_4_3, HDMI_PICTURE_ASPECT_16_9, - HDMI_PICTURE_ASPECT_64_27, - HDMI_PICTURE_ASPECT_256_135, HDMI_PICTURE_ASPECT_RESERVED, }; @@ -119,8 +98,8 @@ enum hdmi_extended_colorimetry { HDMI_EXTENDED_COLORIMETRY_XV_YCC_601, HDMI_EXTENDED_COLORIMETRY_XV_YCC_709, HDMI_EXTENDED_COLORIMETRY_S_YCC_601, - HDMI_EXTENDED_COLORIMETRY_OPYCC_601, - HDMI_EXTENDED_COLORIMETRY_OPRGB, + HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601, + HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB, /* The following EC values are only defined in CEA-861-F. */ HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM, @@ -155,17 +134,6 @@ enum hdmi_content_type { HDMI_CONTENT_TYPE_GAME, }; -enum hdmi_metadata_type { - HDMI_STATIC_METADATA_TYPE1 = 0, -}; - -enum hdmi_eotf { - HDMI_EOTF_TRADITIONAL_GAMMA_SDR, - HDMI_EOTF_TRADITIONAL_GAMMA_HDR, - HDMI_EOTF_SMPTE_ST2084, - HDMI_EOTF_BT_2100_HLG, -}; - struct hdmi_avi_infoframe { enum hdmi_infoframe_type type; unsigned char version; @@ -189,39 +157,9 @@ struct hdmi_avi_infoframe { unsigned short right_bar; }; -/* DRM Infoframe as per CTA 861.G spec */ -struct hdmi_drm_infoframe { - enum hdmi_infoframe_type type; - unsigned char version; - unsigned char length; - enum hdmi_eotf eotf; - enum hdmi_metadata_type metadata_type; - struct { - u16 x, y; - } display_primaries[3]; - struct { - u16 x, y; - } white_point; - u16 max_display_mastering_luminance; - u16 min_display_mastering_luminance; - u16 max_cll; - u16 max_fall; -}; - -void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame); +int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame); ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, size_t size); -ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame, - void *buffer, size_t size); -int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame); -int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame); -ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer, - size_t size); -ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame, - void *buffer, size_t size); -int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame); -int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame, - const void *buffer, size_t size); enum hdmi_spd_sdi { HDMI_SPD_SDI_UNKNOWN, @@ -253,9 +191,6 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame, const char *vendor, const char *product); ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer, size_t size); -ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame, - void *buffer, size_t size); -int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame); enum hdmi_audio_coding_type { HDMI_AUDIO_CODING_TYPE_STREAM, @@ -334,9 +269,6 @@ struct hdmi_audio_infoframe { int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame); ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, void *buffer, size_t size); -ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame, - void *buffer, size_t size); -int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame); enum hdmi_3d_structure { HDMI_3D_STRUCTURE_INVALID = -1, @@ -361,39 +293,9 @@ struct hdmi_vendor_infoframe { unsigned int s3d_ext_data; }; -/* HDR Metadata as per 861.G spec */ -struct hdr_static_metadata { - __u8 eotf; - __u8 metadata_type; - __u16 max_cll; - __u16 max_fall; - __u16 min_cll; -}; - -/** - * struct hdr_sink_metadata - HDR sink metadata - * - * Metadata Information read from Sink's EDID - */ -struct hdr_sink_metadata { - /** - * @metadata_type: Static_Metadata_Descriptor_ID. - */ - __u32 metadata_type; - /** - * @hdmi_type1: HDR Metadata Infoframe. - */ - union { - struct hdr_static_metadata hdmi_type1; - }; -}; - int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame); ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, void *buffer, size_t size); -ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame, - void *buffer, size_t size); -int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame); union hdmi_vendor_any_infoframe { struct { @@ -412,7 +314,6 @@ union hdmi_vendor_any_infoframe { * @spd: spd infoframe * @vendor: union of all vendor infoframes * @audio: audio infoframe - * @drm: Dynamic Range and Mastering infoframe * * This is used by the generic pack function. This works since all infoframes * have the same header which also indicates which type of infoframe should be @@ -424,17 +325,12 @@ union hdmi_infoframe { struct hdmi_spd_infoframe spd; union hdmi_vendor_any_infoframe vendor; struct hdmi_audio_infoframe audio; - struct hdmi_drm_infoframe drm; }; -ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, - size_t size); -ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame, - void *buffer, size_t size); -int hdmi_infoframe_check(union hdmi_infoframe *frame); -int hdmi_infoframe_unpack(union hdmi_infoframe *frame, - const void *buffer, size_t size); +ssize_t +hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size); +int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer); void hdmi_infoframe_log(const char *level, struct device *dev, - const union hdmi_infoframe *frame); + union hdmi_infoframe *frame); #endif /* _DRM_HDMI_H */ diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h index ea7b23d13b..8663f216c5 100644 --- a/include/linux/hid-debug.h +++ b/include/linux/hid-debug.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __HID_DEBUG_H #define __HID_DEBUG_H @@ -7,14 +6,25 @@ */ /* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * */ #ifdef CONFIG_DEBUG_FS -#include - #define HID_DEBUG_BUFSIZE 512 -#define HID_DEBUG_FIFOSIZE 512 void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); void hid_dump_report(struct hid_device *, int , u8 *, int); @@ -27,8 +37,11 @@ void hid_debug_init(void); void hid_debug_exit(void); void hid_debug_event(struct hid_device *, char *); + struct hid_debug_list { - DECLARE_KFIFO_PTR(hid_debug_fifo, char); + char *hid_debug_buf; + int head; + int tail; struct fasync_struct *fasync; struct hid_device *hdev; struct list_head node; @@ -51,3 +64,4 @@ struct hid_debug_list { #endif #endif + diff --git a/include/linux/hid-roccat.h b/include/linux/hid-roccat.h index 3214fb0815..24e1ca01f9 100644 --- a/include/linux/hid-roccat.h +++ b/include/linux/hid-roccat.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __HID_ROCCAT_H #define __HID_ROCCAT_H @@ -7,6 +6,10 @@ */ /* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. */ #include diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index c27329e2a5..dd85f35034 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -1,7 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * HID Sensors Driver * Copyright (c) 2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * */ #ifndef _HID_SENSORS_HUB_H #define _HID_SENSORS_HUB_H @@ -150,7 +163,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev, * @info: return information about attribute after parsing report * * Parses report and returns the attribute information such as report id, -* field index, units and exponent etc. +* field index, units and exponet etc. */ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, u8 type, @@ -164,10 +177,9 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, * @attr_usage_id: Attribute usage id as per spec * @report_id: Report id to look for * @flag: Synchronous or asynchronous read -* @is_signed: If true then fields < 32 bits will be sign-extended * * Issues a synchronous or asynchronous read request for an input attribute. -* Return: data up to 32 bits. +* Returns data upto 32 bits. */ enum sensor_hub_read_flags { @@ -178,8 +190,7 @@ enum sensor_hub_read_flags { int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, u32 usage_id, u32 attr_usage_id, u32 report_id, - enum sensor_hub_read_flags flag, - bool is_signed + enum sensor_hub_read_flags flag ); /** @@ -205,9 +216,8 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, * @buffer: buffer to copy output * * Used to get a field in feature report. For example this can get polling -* interval, sensitivity, activate/deactivate state. -* Return: On success, it returns the number of bytes copied to buffer. -* On failure, it returns value < 0. +* interval, sensitivity, activate/deactivate state. On success it returns +* number of bytes copied to buffer. On failure, it returns value < 0. */ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, u32 field_index, int buffer_size, void *buffer); @@ -221,18 +231,11 @@ struct hid_sensor_common { unsigned usage_id; atomic_t data_ready; atomic_t user_requested_state; - atomic_t runtime_pm_enable; - int poll_interval; - int raw_hystersis; - int latency_ms; struct iio_trigger *trigger; - int timestamp_ns_scale; struct hid_sensor_hub_attribute_info poll; struct hid_sensor_hub_attribute_info report_state; struct hid_sensor_hub_attribute_info power_state; struct hid_sensor_hub_attribute_info sensitivity; - struct hid_sensor_hub_attribute_info sensitivity_rel; - struct hid_sensor_hub_attribute_info report_latency; struct work_struct work; }; @@ -249,17 +252,11 @@ static inline int hid_sensor_convert_exponent(int unit_expo) int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, u32 usage_id, - struct hid_sensor_common *st, - const u32 *sensitivity_addresses, - u32 sensitivity_addresses_len); + struct hid_sensor_common *st); int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st, int val1, int val2); -int hid_sensor_write_raw_hyst_rel_value(struct hid_sensor_common *st, int val1, - int val2); int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st, int *val1, int *val2); -int hid_sensor_read_raw_hyst_rel_value(struct hid_sensor_common *st, - int *val1, int *val2); int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st, int val1, int val2); int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st, @@ -274,10 +271,4 @@ int hid_sensor_format_scale(u32 usage_id, s32 hid_sensor_read_poll_value(struct hid_sensor_common *st); -int64_t hid_sensor_convert_timestamp(struct hid_sensor_common *st, - int64_t raw_value); -bool hid_sensor_batch_mode_supported(struct hid_sensor_common *st); -int hid_sensor_set_report_latency(struct hid_sensor_common *st, int latency); -int hid_sensor_get_report_latency(struct hid_sensor_common *st); - #endif diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h index ac63115940..f2ee90aed0 100644 --- a/include/linux/hid-sensor-ids.h +++ b/include/linux/hid-sensor-ids.h @@ -1,7 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * HID Sensors Driver * Copyright (c) 2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * */ #ifndef _HID_SENSORS_IDS_H #define _HID_SENSORS_IDS_H @@ -32,14 +45,6 @@ #define HID_USAGE_SENSOR_DATA_ATMOSPHERIC_PRESSURE 0x200430 #define HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE 0x200431 -/* Tempreture (200033) */ -#define HID_USAGE_SENSOR_TEMPERATURE 0x200033 -#define HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE 0x200434 - -/* humidity */ -#define HID_USAGE_SENSOR_HUMIDITY 0x200032 -#define HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY 0x200433 - /* Gyro 3D: (200076) */ #define HID_USAGE_SENSOR_GYRO_3D 0x200076 #define HID_USAGE_SENSOR_DATA_ANGL_VELOCITY 0x200456 @@ -47,9 +52,6 @@ #define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458 #define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459 -/* Gravity vector */ -#define HID_USAGE_SENSOR_GRAVITY_VECTOR 0x20007B - /* ORIENTATION: Compass 3D: (200083) */ #define HID_USAGE_SENSOR_COMPASS_3D 0x200083 #define HID_USAGE_SENSOR_DATA_ORIENTATION 0x200470 @@ -77,8 +79,6 @@ #define HID_USAGE_SENSOR_ORIENT_TILT_Z 0x200481 #define HID_USAGE_SENSOR_DEVICE_ORIENTATION 0x20008A -#define HID_USAGE_SENSOR_RELATIVE_ORIENTATION 0x20008E -#define HID_USAGE_SENSOR_GEOMAGNETIC_ORIENTATION 0x2000C1 #define HID_USAGE_SENSOR_ORIENT_ROTATION_MATRIX 0x200482 #define HID_USAGE_SENSOR_ORIENT_QUATERNION 0x200483 #define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX 0x200484 @@ -95,7 +95,6 @@ #define HID_USAGE_SENSOR_TIME_HOUR 0x200525 #define HID_USAGE_SENSOR_TIME_MINUTE 0x200526 #define HID_USAGE_SENSOR_TIME_SECOND 0x200527 -#define HID_USAGE_SENSOR_TIME_TIMESTAMP 0x200529 /* Units */ #define HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED 0x00 @@ -128,10 +127,6 @@ #define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND 0x15 /* Common selectors */ -#define HID_USAGE_SENSOR_PROP_DESC 0x200300 -#define HID_USAGE_SENSOR_PROP_FRIENDLY_NAME 0x200301 -#define HID_USAGE_SENSOR_PROP_SERIAL_NUM 0x200307 -#define HID_USAGE_SENSOR_PROP_MANUFACTURER 0x200305 #define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E #define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F #define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310 @@ -143,13 +138,9 @@ #define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316 #define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319 -/* Batch mode selectors */ -#define HID_USAGE_SENSOR_PROP_REPORT_LATENCY 0x20031B - /* Per data field properties */ #define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00 #define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000 -#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_REL_PCT 0xE000 /* Power state enumerations */ #define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x200850 @@ -163,14 +154,4 @@ #define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x200840 #define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x200841 -/* Custom Sensor (2000e1) */ -#define HID_USAGE_SENSOR_HINGE 0x20020B -#define HID_USAGE_SENSOR_DATA_FIELD_LOCATION 0x200400 -#define HID_USAGE_SENSOR_DATA_FIELE_TIME_SINCE_SYS_BOOT 0x20052B -#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_USAGE 0x200541 -#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE 0x200543 -/* Custom Sensor data 28=>x>=0 */ -#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(x) \ - (HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE + (x)) - #endif diff --git a/include/linux/hid.h b/include/linux/hid.h index 9e067f937d..b2ec82712b 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -1,10 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2001 Vojtech Pavlik * Copyright (c) 2006-2007 Jiri Kosina */ /* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to , or by paper mail: @@ -14,7 +26,6 @@ #define __HID_H -#include #include #include #include @@ -23,7 +34,6 @@ #include #include #include -#include #include #include @@ -102,7 +112,6 @@ struct hid_item { #define HID_COLLECTION_PHYSICAL 0 #define HID_COLLECTION_APPLICATION 1 #define HID_COLLECTION_LOGICAL 2 -#define HID_COLLECTION_NAMED_ARRAY 4 /* * HID report descriptor global item tags @@ -154,7 +163,6 @@ struct hid_item { #define HID_UP_CONSUMER 0x000c0000 #define HID_UP_DIGITIZER 0x000d0000 #define HID_UP_PID 0x000f0000 -#define HID_UP_BATTERY 0x00850000 #define HID_UP_HPVENDOR 0xff7f0000 #define HID_UP_HPVENDOR2 0xff010000 #define HID_UP_MSVENDOR 0xff000000 @@ -164,8 +172,6 @@ struct hid_item { #define HID_UP_LOGIVENDOR3 0xff430000 #define HID_UP_LNVENDOR 0xffa00000 #define HID_UP_SENSOR 0x00200000 -#define HID_UP_ASUSVENDOR 0xff310000 -#define HID_UP_GOOGLEVENDOR 0xffd10000 #define HID_USAGE 0x0000ffff @@ -176,17 +182,6 @@ struct hid_item { #define HID_GD_KEYBOARD 0x00010006 #define HID_GD_KEYPAD 0x00010007 #define HID_GD_MULTIAXIS 0x00010008 -/* - * Microsoft Win8 Wireless Radio Controls extensions CA, see: - * http://www.usb.org/developers/hidpage/HUTRR40RadioHIDUsagesFinal.pdf - */ -#define HID_GD_WIRELESS_RADIO_CTLS 0x0001000c -/* - * System Multi-Axis, see: - * http://www.usb.org/developers/hidpage/HUTRR62_-_Generic_Desktop_CA_for_System_Multi-Axis_Controllers.txt - */ -#define HID_GD_SYSTEM_MULTIAXIS 0x0001000e - #define HID_GD_X 0x00010030 #define HID_GD_Y 0x00010031 #define HID_GD_Z 0x00010032 @@ -210,28 +205,21 @@ struct hid_item { #define HID_GD_VBRZ 0x00010045 #define HID_GD_VNO 0x00010046 #define HID_GD_FEATURE 0x00010047 -#define HID_GD_RESOLUTION_MULTIPLIER 0x00010048 #define HID_GD_SYSTEM_CONTROL 0x00010080 #define HID_GD_UP 0x00010090 #define HID_GD_DOWN 0x00010091 #define HID_GD_RIGHT 0x00010092 #define HID_GD_LEFT 0x00010093 -/* Microsoft Win8 Wireless Radio Controls CA usage codes */ -#define HID_GD_RFKILL_BTN 0x000100c6 -#define HID_GD_RFKILL_LED 0x000100c7 -#define HID_GD_RFKILL_SWITCH 0x000100c8 #define HID_DC_BATTERYSTRENGTH 0x00060020 #define HID_CP_CONSUMER_CONTROL 0x000c0001 -#define HID_CP_AC_PAN 0x000c0238 #define HID_DG_DIGITIZER 0x000d0001 #define HID_DG_PEN 0x000d0002 #define HID_DG_LIGHTPEN 0x000d0003 #define HID_DG_TOUCHSCREEN 0x000d0004 #define HID_DG_TOUCHPAD 0x000d0005 -#define HID_DG_WHITEBOARD 0x000d0006 #define HID_DG_STYLUS 0x000d0020 #define HID_DG_PUCK 0x000d0021 #define HID_DG_FINGER 0x000d0022 @@ -243,11 +231,7 @@ struct hid_item { #define HID_DG_TAP 0x000d0035 #define HID_DG_TABLETFUNCTIONKEY 0x000d0039 #define HID_DG_PROGRAMCHANGEKEY 0x000d003a -#define HID_DG_BATTERYSTRENGTH 0x000d003b #define HID_DG_INVERT 0x000d003c -#define HID_DG_TILT_X 0x000d003d -#define HID_DG_TILT_Y 0x000d003e -#define HID_DG_TWIST 0x000d0041 #define HID_DG_TIPSWITCH 0x000d0042 #define HID_DG_TIPSWITCH2 0x000d0043 #define HID_DG_BARRELSWITCH 0x000d0044 @@ -264,8 +248,6 @@ struct hid_item { #define HID_CP_SELECTION 0x000c0080 #define HID_CP_MEDIASELECTION 0x000c0087 #define HID_CP_SELECTDISC 0x000c00ba -#define HID_CP_VOLUMEUP 0x000c00e9 -#define HID_CP_VOLUMEDOWN 0x000c00ea #define HID_CP_PLAYBACKSPEED 0x000c00f1 #define HID_CP_PROXIMITY 0x000c0109 #define HID_CP_SPEAKERSYSTEM 0x000c0160 @@ -282,9 +264,6 @@ struct hid_item { #define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180 #define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200 -#define HID_DG_DEVICECONFIG 0x000d000e -#define HID_DG_DEVICESETTINGS 0x000d0023 -#define HID_DG_AZIMUTH 0x000d003f #define HID_DG_CONFIDENCE 0x000d0047 #define HID_DG_WIDTH 0x000d0048 #define HID_DG_HEIGHT 0x000d0049 @@ -293,17 +272,10 @@ struct hid_item { #define HID_DG_DEVICEINDEX 0x000d0053 #define HID_DG_CONTACTCOUNT 0x000d0054 #define HID_DG_CONTACTMAX 0x000d0055 -#define HID_DG_SCANTIME 0x000d0056 -#define HID_DG_SURFACESWITCH 0x000d0057 -#define HID_DG_BUTTONSWITCH 0x000d0058 #define HID_DG_BUTTONTYPE 0x000d0059 #define HID_DG_BARRELSWITCH2 0x000d005a #define HID_DG_TOOLSERIALNUMBER 0x000d005b -#define HID_DG_LATENCYMODE 0x000d0060 -#define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065 - -#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076 /* * HID report types --- Ouch! HID spec says 1 2 3! */ @@ -318,13 +290,13 @@ struct hid_item { * HID connect requests */ -#define HID_CONNECT_HIDINPUT BIT(0) -#define HID_CONNECT_HIDINPUT_FORCE BIT(1) -#define HID_CONNECT_HIDRAW BIT(2) -#define HID_CONNECT_HIDDEV BIT(3) -#define HID_CONNECT_HIDDEV_FORCE BIT(4) -#define HID_CONNECT_FF BIT(5) -#define HID_CONNECT_DRIVER BIT(6) +#define HID_CONNECT_HIDINPUT 0x01 +#define HID_CONNECT_HIDINPUT_FORCE 0x02 +#define HID_CONNECT_HIDRAW 0x04 +#define HID_CONNECT_HIDDEV 0x08 +#define HID_CONNECT_HIDDEV_FORCE 0x10 +#define HID_CONNECT_FF 0x20 +#define HID_CONNECT_DRIVER 0x40 #define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \ HID_CONNECT_HIDDEV|HID_CONNECT_FF) @@ -337,27 +309,24 @@ struct hid_item { */ #define MAX_USBHID_BOOT_QUIRKS 4 -#define HID_QUIRK_INVERT BIT(0) -#define HID_QUIRK_NOTOUCH BIT(1) -#define HID_QUIRK_IGNORE BIT(2) -#define HID_QUIRK_NOGET BIT(3) -#define HID_QUIRK_HIDDEV_FORCE BIT(4) -#define HID_QUIRK_BADPAD BIT(5) -#define HID_QUIRK_MULTI_INPUT BIT(6) -#define HID_QUIRK_HIDINPUT_FORCE BIT(7) -/* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */ -/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */ -#define HID_QUIRK_ALWAYS_POLL BIT(10) -#define HID_QUIRK_INPUT_PER_APP BIT(11) -#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16) -#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17) -#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18) -#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19) -#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20) -#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28) -#define HID_QUIRK_NO_INIT_REPORTS BIT(29) -#define HID_QUIRK_NO_IGNORE BIT(30) -#define HID_QUIRK_NO_INPUT_SYNC BIT(31) +#define HID_QUIRK_INVERT 0x00000001 +#define HID_QUIRK_NOTOUCH 0x00000002 +#define HID_QUIRK_IGNORE 0x00000004 +#define HID_QUIRK_NOGET 0x00000008 +#define HID_QUIRK_HIDDEV_FORCE 0x00000010 +#define HID_QUIRK_BADPAD 0x00000020 +#define HID_QUIRK_MULTI_INPUT 0x00000040 +#define HID_QUIRK_HIDINPUT_FORCE 0x00000080 +#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100 +#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200 +#define HID_QUIRK_ALWAYS_POLL 0x00000400 +#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 +#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 +#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 +#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 +#define HID_QUIRK_NO_INIT_REPORTS 0x20000000 +#define HID_QUIRK_NO_IGNORE 0x40000000 +#define HID_QUIRK_NO_INPUT_SYNC 0x80000000 /* * HID device groups @@ -376,15 +345,6 @@ struct hid_item { #define HID_GROUP_RMI 0x0100 #define HID_GROUP_WACOM 0x0101 #define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102 -#define HID_GROUP_STEAM 0x0103 -#define HID_GROUP_LOGITECH_27MHZ_DEVICE 0x0104 -#define HID_GROUP_VIVALDI 0x0105 - -/* - * HID protocol status - */ -#define HID_REPORT_PROTOCOL 1 -#define HID_BOOT_PROTOCOL 0 /* * This is the global environment of the parser. This information is @@ -414,7 +374,6 @@ struct hid_global { struct hid_local { unsigned usage[HID_MAX_USAGES]; /* usage array */ - u8 usage_size[HID_MAX_USAGES]; /* usage size array */ unsigned collection_index[HID_MAX_USAGES]; /* collection index array */ unsigned usage_index; unsigned usage_minimum; @@ -428,7 +387,6 @@ struct hid_local { */ struct hid_collection { - int parent_idx; /* device->collection */ unsigned type; unsigned usage; unsigned level; @@ -438,16 +396,12 @@ struct hid_usage { unsigned hid; /* hid usage code */ unsigned collection_index; /* index into collection array */ unsigned usage_index; /* index into usage array */ - __s8 resolution_multiplier;/* Effective Resolution Multiplier - (HUT v1.12, 4.3.1), default: 1 */ /* hidinput data */ - __s8 wheel_factor; /* 120/resolution_multiplier */ __u16 code; /* input driver code */ __u8 type; /* input driver type */ __s8 hat_min; /* hat switch fun */ __s8 hat_max; /* ditto */ __s8 hat_dir; /* ditto */ - __s16 wheel_accumulated; /* hi-res wheel */ }; struct hid_input; @@ -481,10 +435,8 @@ struct hid_field { struct hid_report { struct list_head list; - struct list_head hidinput_list; - unsigned int id; /* id of this report */ - unsigned int type; /* report type */ - unsigned int application; /* application usage for this report */ + unsigned id; /* id of this report */ + unsigned type; /* report type */ struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */ unsigned maxfield; /* maximum valid field index */ unsigned size; /* size of the report (bits) */ @@ -500,7 +452,7 @@ struct hid_report_enum { }; #define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */ -#define HID_MAX_BUFFER_SIZE 16384 /* 16kb */ +#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */ #define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */ #define HID_OUTPUT_FIFO_SIZE 64 @@ -515,24 +467,18 @@ struct hid_output_fifo { char *raw_report; }; -#define HID_CLAIMED_INPUT BIT(0) -#define HID_CLAIMED_HIDDEV BIT(1) -#define HID_CLAIMED_HIDRAW BIT(2) -#define HID_CLAIMED_DRIVER BIT(3) +#define HID_CLAIMED_INPUT 1 +#define HID_CLAIMED_HIDDEV 2 +#define HID_CLAIMED_HIDRAW 4 +#define HID_CLAIMED_DRIVER 8 -#define HID_STAT_ADDED BIT(0) -#define HID_STAT_PARSED BIT(1) -#define HID_STAT_DUP_DETECTED BIT(2) -#define HID_STAT_REPROBED BIT(3) +#define HID_STAT_ADDED 1 +#define HID_STAT_PARSED 2 struct hid_input { struct list_head list; struct hid_report *report; struct input_dev *input; - const char *name; - bool registered; - struct list_head reports; /* the list of reports */ - unsigned int application; /* application usage for this input */ }; enum hid_type { @@ -541,12 +487,6 @@ enum hid_type { HID_TYPE_USBNONE }; -enum hid_battery_status { - HID_BATTERY_UNKNOWN = 0, - HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */ - HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */ -}; - struct hid_driver; struct hid_ll_driver; @@ -569,13 +509,11 @@ struct hid_device { /* device report descriptor */ struct hid_report_enum report_enum[HID_REPORT_TYPES]; struct work_struct led_work; /* delayed LED worker */ + struct semaphore driver_lock; /* protects the current driver, except during input */ struct semaphore driver_input_lock; /* protects the current driver */ struct device dev; /* device */ struct hid_driver *driver; - struct hid_ll_driver *ll_driver; - struct mutex ll_open_lock; - unsigned int ll_open_count; #ifdef CONFIG_HID_BATTERY_STRENGTH /* @@ -584,25 +522,23 @@ struct hid_device { /* device report descriptor */ * battery is non-NULL. */ struct power_supply *battery; - __s32 battery_capacity; __s32 battery_min; __s32 battery_max; __s32 battery_report_type; __s32 battery_report_id; - enum hid_battery_status battery_status; - bool battery_avoid_query; - ktime_t battery_ratelimit_time; #endif - unsigned long status; /* see STAT flags above */ + unsigned int status; /* see STAT flags above */ unsigned claimed; /* Claimed by hidinput, hiddev? */ unsigned quirks; /* Various quirks the device can pull on us */ - bool io_started; /* If IO has started */ + bool io_started; /* Protected by driver_lock. If IO has started */ struct list_head inputs; /* The list of inputs */ void *hiddev; /* The hiddev structure */ void *hidraw; + int minor; /* Hiddev minor number */ + int open; /* is the device open by anyone? */ char name[128]; /* Device name */ char phys[64]; /* Device physical location */ char uniq[64]; /* Device unique identifier (serial #) */ @@ -652,13 +588,12 @@ static inline void hid_set_drvdata(struct hid_device *hdev, void *data) struct hid_parser { struct hid_global global; struct hid_global global_stack[HID_GLOBAL_STACK_SIZE]; - unsigned int global_stack_ptr; + unsigned global_stack_ptr; struct hid_local local; - unsigned int *collection_stack; - unsigned int collection_stack_ptr; - unsigned int collection_stack_size; + unsigned collection_stack[HID_COLLECTION_STACK_SIZE]; + unsigned collection_stack_ptr; struct hid_device *device; - unsigned int scan_flags; + unsigned scan_flags; }; struct hid_class_descriptor { @@ -708,7 +643,6 @@ struct hid_usage_id { * to be called) * @dyn_list: list of dynamically added device ids * @dyn_lock: lock protecting @dyn_list - * @match: check if the given device is handled by this driver * @probe: new device inserted * @remove: device removed (NULL if not a hot-plug capable driver) * @report_table: on which reports to call raw_event (NULL means all) @@ -729,8 +663,8 @@ struct hid_usage_id { * input will not be passed to raw_event unless hid_device_io_start is * called. * - * raw_event and event should return negative on error, any other value will - * pass the event on to .event() typically return 0 for success. + * raw_event and event should return 0 on no action performed, 1 when no + * further processing should be done and negative on error * * input_mapping shall return a negative value to completely ignore this usage * (e.g. doubled or invalid usage), zero to continue with parsing of this @@ -749,7 +683,6 @@ struct hid_driver { struct list_head dyn_list; spinlock_t dyn_lock; - bool (*match)(struct hid_device *dev, bool ignore_special_driver); int (*probe)(struct hid_device *dev, const struct hid_device_id *id); void (*remove)(struct hid_device *dev); @@ -793,7 +726,6 @@ struct hid_driver { * @stop: called on remove * @open: called by input layer on open * @close: called by input layer on close - * @power: request underlying hardware to enter requested power mode * @parse: this method is called only once to parse the device data, * shouldn't allocate anything to not leak memory * @request: send report request to device (e.g. feature report) @@ -801,7 +733,6 @@ struct hid_driver { * @raw_request: send raw report request to device (e.g. feature report) * @output_report: send output report to device * @idle: send idle request to device - * @may_wakeup: return if device may act as a wakeup source during system-suspend */ struct hid_ll_driver { int (*start)(struct hid_device *hdev); @@ -826,30 +757,14 @@ struct hid_ll_driver { int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len); int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype); - bool (*may_wakeup)(struct hid_device *hdev); }; -extern struct hid_ll_driver i2c_hid_ll_driver; -extern struct hid_ll_driver hidp_hid_driver; -extern struct hid_ll_driver uhid_hid_driver; -extern struct hid_ll_driver usb_hid_driver; - -static inline bool hid_is_using_ll_driver(struct hid_device *hdev, - struct hid_ll_driver *driver) -{ - return hdev->ll_driver == driver; -} - #define PM_HINT_FULLON 1<<5 #define PM_HINT_NORMAL 1<<1 /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ /* We ignore a few input applications that are not widely used */ -#define IS_INPUT_APPLICATION(a) \ - (((a >= HID_UP_GENDESK) && (a <= HID_GD_MULTIAXIS)) \ - || ((a >= HID_DG_PEN) && (a <= HID_DG_WHITEBOARD)) \ - || (a == HID_GD_SYSTEM_CONTROL) || (a == HID_CP_CONSUMER_CONTROL) \ - || (a == HID_GD_WIRELESS_RADIO_CTLS)) +#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006))) /* HID core API */ @@ -859,8 +774,6 @@ extern bool hid_ignore(struct hid_device *); extern int hid_add_device(struct hid_device *); extern void hid_destroy_device(struct hid_device *); -extern struct bus_type hid_bus_type; - extern int __must_check __hid_register_driver(struct hid_driver *, struct module *, const char *mod_name); @@ -888,37 +801,27 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force); extern void hidinput_disconnect(struct hid_device *); int hid_set_field(struct hid_field *, unsigned, __s32); -int hid_input_report(struct hid_device *, int type, u8 *, u32, int); +int hid_input_report(struct hid_device *, int type, u8 *, int, int); int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field); struct hid_field *hidinput_get_led_field(struct hid_device *hid); unsigned int hidinput_count_leds(struct hid_device *hid); __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code); void hid_output_report(struct hid_report *report, __u8 *data); -int __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype); +void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype); u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags); struct hid_device *hid_allocate_device(void); -struct hid_report *hid_register_report(struct hid_device *device, - unsigned int type, unsigned int id, - unsigned int application); +struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id); int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); struct hid_report *hid_validate_values(struct hid_device *hid, unsigned int type, unsigned int id, unsigned int field_index, unsigned int report_counts); - -void hid_setup_resolution_multiplier(struct hid_device *hid); int hid_open_report(struct hid_device *device); int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); void hid_disconnect(struct hid_device *hid); -bool hid_match_one_id(const struct hid_device *hdev, - const struct hid_device_id *id); -const struct hid_device_id *hid_match_id(const struct hid_device *hdev, +const struct hid_device_id *hid_match_id(struct hid_device *hdev, const struct hid_device_id *id); -const struct hid_device_id *hid_match_device(struct hid_device *hdev, - struct hid_driver *hdrv); -bool hid_compare_device_paths(struct hid_device *hdev_a, - struct hid_device *hdev_b, char separator); s32 hid_snto32(__u32 value, unsigned n); __u32 hid_field_extract(const struct hid_device *hid, __u8 *report, unsigned offset, unsigned n); @@ -926,7 +829,7 @@ __u32 hid_field_extract(const struct hid_device *hid, __u8 *report, /** * hid_device_io_start - enable HID input during probe, remove * - * @hid: the device + * @hid - the device * * This should only be called during probe or remove and only be * called by the thread calling probe or remove. It will allow @@ -944,7 +847,7 @@ static inline void hid_device_io_start(struct hid_device *hid) { /** * hid_device_io_stop - disable HID input during probe, remove * - * @hid: the device + * @hid - the device * * Should only be called after hid_device_io_start. It will prevent * incoming packets from going to the driver for the duration of @@ -970,61 +873,39 @@ static inline void hid_device_io_stop(struct hid_device *hid) { * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type - * - * The value pointed to by @bit will be set to NULL if either @type is - * an unhandled event type, or if @c is out of range for @type. This - * can be used as an error condition. */ static inline void hid_map_usage(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, - __u8 type, unsigned int c) + __u8 type, __u16 c) { struct input_dev *input = hidinput->input; - unsigned long *bmap = NULL; - unsigned int limit = 0; - - switch (type) { - case EV_ABS: - bmap = input->absbit; - limit = ABS_MAX; - break; - case EV_REL: - bmap = input->relbit; - limit = REL_MAX; - break; - case EV_KEY: - bmap = input->keybit; - limit = KEY_MAX; - break; - case EV_LED: - bmap = input->ledbit; - limit = LED_MAX; - break; - } - - if (unlikely(c > limit || !bmap)) { - pr_warn_ratelimited("%s: Invalid code %d type %d\n", - input->name, c, type); - *bit = NULL; - return; - } usage->type = type; usage->code = c; - *max = limit; - *bit = bmap; + + switch (type) { + case EV_ABS: + *bit = input->absbit; + *max = ABS_MAX; + break; + case EV_REL: + *bit = input->relbit; + *max = REL_MAX; + break; + case EV_KEY: + *bit = input->keybit; + *max = KEY_MAX; + break; + case EV_LED: + *bit = input->ledbit; + *max = LED_MAX; + break; + } } /** * hid_map_usage_clear - map usage input bits and clear the input bit * - * @hidinput: hidinput which we are interested in - * @usage: usage to fill in - * @bit: pointer to input->{}bit (out parameter) - * @max: maximal valid usage->code to consider later (out parameter) - * @type: input event type (EV_KEY, EV_REL, ...) - * @c: code which corresponds to this usage and type - * * The same as hid_map_usage, except the @c bit is also cleared in supported * bits (@bit). */ @@ -1033,8 +914,7 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput, __u8 type, __u16 c) { hid_map_usage(hidinput, usage, bit, max, type, c); - if (*bit) - clear_bit(usage->code, *bit); + clear_bit(c, *bit); } /** @@ -1051,11 +931,69 @@ static inline int __must_check hid_parse(struct hid_device *hdev) return hid_open_report(hdev); } -int __must_check hid_hw_start(struct hid_device *hdev, - unsigned int connect_mask); -void hid_hw_stop(struct hid_device *hdev); -int __must_check hid_hw_open(struct hid_device *hdev); -void hid_hw_close(struct hid_device *hdev); +/** + * hid_hw_start - start underlaying HW + * + * @hdev: hid device + * @connect_mask: which outputs to connect, see HID_CONNECT_* + * + * Call this in probe function *after* hid_parse. This will setup HW buffers + * and start the device (if not deffered to device open). hid_hw_stop must be + * called if this was successful. + */ +static inline int __must_check hid_hw_start(struct hid_device *hdev, + unsigned int connect_mask) +{ + int ret = hdev->ll_driver->start(hdev); + if (ret || !connect_mask) + return ret; + ret = hid_connect(hdev, connect_mask); + if (ret) + hdev->ll_driver->stop(hdev); + return ret; +} + +/** + * hid_hw_stop - stop underlaying HW + * + * @hdev: hid device + * + * This is usually called from remove function or from probe when something + * failed and hid_hw_start was called already. + */ +static inline void hid_hw_stop(struct hid_device *hdev) +{ + hid_disconnect(hdev); + hdev->ll_driver->stop(hdev); +} + +/** + * hid_hw_open - signal underlaying HW to start delivering events + * + * @hdev: hid device + * + * Tell underlying HW to start delivering events from the device. + * This function should be called sometime after successful call + * to hid_hiw_start(). + */ +static inline int __must_check hid_hw_open(struct hid_device *hdev) +{ + return hdev->ll_driver->open(hdev); +} + +/** + * hid_hw_close - signal underlaying HW to stop delivering events + * + * @hdev: hid device + * + * This function indicates that we are not interested in the events + * from this device anymore. Delivery of events may or may not stop, + * depending on the number of users still outstanding. + */ +static inline void hid_hw_close(struct hid_device *hdev) +{ + hdev->ll_driver->close(hdev); +} /** * hid_hw_power - requests underlying HW to go into given power mode @@ -1099,7 +1037,7 @@ static inline void hid_hw_request(struct hid_device *hdev, * @rtype: HID report type * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT * - * Return: count of data transferred, negative if error + * @return: count of data transfered, negative if error * * Same behavior as hid_hw_request, but with raw buffers instead. */ @@ -1121,7 +1059,7 @@ static inline int hid_hw_raw_request(struct hid_device *hdev, * @buf: raw data to transfer * @len: length of buf * - * Return: count of data transferred, negative if error + * @return: count of data transfered, negative if error */ static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len) @@ -1152,22 +1090,6 @@ static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle, return 0; } -/** - * hid_may_wakeup - return if the hid device may act as a wakeup source during system-suspend - * - * @hdev: hid device - */ -static inline bool hid_hw_may_wakeup(struct hid_device *hdev) -{ - if (hdev->ll_driver->may_wakeup) - return hdev->ll_driver->may_wakeup(hdev); - - if (hdev->dev.parent) - return device_may_wakeup(hdev->dev.parent); - - return false; -} - /** * hid_hw_wait - wait for buffered io to complete * @@ -1184,18 +1106,19 @@ static inline void hid_hw_wait(struct hid_device *hdev) * * @report: the report we want to know the length */ -static inline u32 hid_report_len(struct hid_report *report) +static inline int hid_report_len(struct hid_report *report) { - return DIV_ROUND_UP(report->size, 8) + (report->id > 0); + /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ + return ((report->size - 1) >> 3) + 1 + (report->id > 0); } -int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, +int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, int interrupt); /* HID quirks API */ -unsigned long hid_lookup_quirk(const struct hid_device *hdev); -int hid_quirks_init(char **quirks_param, __u16 bus, int count); -void hid_quirks_exit(__u16 bus); +u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct); +int usbhid_quirks_init(char **quirks_param); +void usbhid_quirks_exit(void); #ifdef CONFIG_HID_PID int hid_pidff_init(struct hid_device *hid); @@ -1203,32 +1126,29 @@ int hid_pidff_init(struct hid_device *hid); #define hid_pidff_init NULL #endif -#define dbg_hid(fmt, ...) \ +#define dbg_hid(format, arg...) \ do { \ if (hid_debug) \ - printk(KERN_DEBUG "%s: " fmt, __FILE__, ##__VA_ARGS__); \ + printk(KERN_DEBUG "%s: " format, __FILE__, ##arg); \ } while (0) -#define hid_err(hid, fmt, ...) \ - dev_err(&(hid)->dev, fmt, ##__VA_ARGS__) -#define hid_notice(hid, fmt, ...) \ - dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__) -#define hid_warn(hid, fmt, ...) \ - dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__) -#define hid_info(hid, fmt, ...) \ - dev_info(&(hid)->dev, fmt, ##__VA_ARGS__) -#define hid_dbg(hid, fmt, ...) \ - dev_dbg(&(hid)->dev, fmt, ##__VA_ARGS__) - -#define hid_err_once(hid, fmt, ...) \ - dev_err_once(&(hid)->dev, fmt, ##__VA_ARGS__) -#define hid_notice_once(hid, fmt, ...) \ - dev_notice_once(&(hid)->dev, fmt, ##__VA_ARGS__) -#define hid_warn_once(hid, fmt, ...) \ - dev_warn_once(&(hid)->dev, fmt, ##__VA_ARGS__) -#define hid_info_once(hid, fmt, ...) \ - dev_info_once(&(hid)->dev, fmt, ##__VA_ARGS__) -#define hid_dbg_once(hid, fmt, ...) \ - dev_dbg_once(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_printk(level, hid, fmt, arg...) \ + dev_printk(level, &(hid)->dev, fmt, ##arg) +#define hid_emerg(hid, fmt, arg...) \ + dev_emerg(&(hid)->dev, fmt, ##arg) +#define hid_crit(hid, fmt, arg...) \ + dev_crit(&(hid)->dev, fmt, ##arg) +#define hid_alert(hid, fmt, arg...) \ + dev_alert(&(hid)->dev, fmt, ##arg) +#define hid_err(hid, fmt, arg...) \ + dev_err(&(hid)->dev, fmt, ##arg) +#define hid_notice(hid, fmt, arg...) \ + dev_notice(&(hid)->dev, fmt, ##arg) +#define hid_warn(hid, fmt, arg...) \ + dev_warn(&(hid)->dev, fmt, ##arg) +#define hid_info(hid, fmt, arg...) \ + dev_info(&(hid)->dev, fmt, ##arg) +#define hid_dbg(hid, fmt, arg...) \ + dev_dbg(&(hid)->dev, fmt, ##arg) #endif diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h index 2164c03d2c..a5dd814866 100644 --- a/include/linux/hiddev.h +++ b/include/linux/hiddev.h @@ -1,10 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 1999-2000 Vojtech Pavlik * * Sponsored by SuSE */ /* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to , or by paper mail: @@ -20,18 +32,6 @@ * In-kernel definitions. */ -struct hiddev { - int minor; - int exist; - int open; - struct mutex existancelock; - wait_queue_head_t wait; - struct hid_device *hid; - struct list_head list; - spinlock_t list_lock; - bool initialized; -}; - struct hid_device; struct hid_usage; struct hid_field; diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h index cd67f4ca55..ddf52612ee 100644 --- a/include/linux/hidraw.h +++ b/include/linux/hidraw.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2007 Jiri Kosina */ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ #ifndef _HIDRAW_H #define _HIDRAW_H diff --git a/include/linux/highmem.h b/include/linux/highmem.h index b4c49f9cc3..9daed55069 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HIGHMEM_H #define _LINUX_HIGHMEM_H @@ -11,126 +10,16 @@ #include -#include "highmem-internal.h" - -/** - * kmap - Map a page for long term usage - * @page: Pointer to the page to be mapped - * - * Returns: The virtual address of the mapping - * - * Can only be invoked from preemptible task context because on 32bit - * systems with CONFIG_HIGHMEM enabled this function might sleep. - * - * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area - * this returns the virtual address of the direct kernel mapping. - * - * The returned virtual address is globally visible and valid up to the - * point where it is unmapped via kunmap(). The pointer can be handed to - * other contexts. - * - * For highmem pages on 32bit systems this can be slow as the mapping space - * is limited and protected by a global lock. In case that there is no - * mapping slot available the function blocks until a slot is released via - * kunmap(). - */ -static inline void *kmap(struct page *page); - -/** - * kunmap - Unmap the virtual address mapped by kmap() - * @addr: Virtual address to be unmapped - * - * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of - * pages in the low memory area. - */ -static inline void kunmap(struct page *page); - -/** - * kmap_to_page - Get the page for a kmap'ed address - * @addr: The address to look up - * - * Returns: The page which is mapped to @addr. - */ -static inline struct page *kmap_to_page(void *addr); - -/** - * kmap_flush_unused - Flush all unused kmap mappings in order to - * remove stray mappings - */ -static inline void kmap_flush_unused(void); - -/** - * kmap_local_page - Map a page for temporary usage - * @page: Pointer to the page to be mapped - * - * Returns: The virtual address of the mapping - * - * Can be invoked from any context. - * - * Requires careful handling when nesting multiple mappings because the map - * management is stack based. The unmap has to be in the reverse order of - * the map operation: - * - * addr1 = kmap_local_page(page1); - * addr2 = kmap_local_page(page2); - * ... - * kunmap_local(addr2); - * kunmap_local(addr1); - * - * Unmapping addr1 before addr2 is invalid and causes malfunction. - * - * Contrary to kmap() mappings the mapping is only valid in the context of - * the caller and cannot be handed to other contexts. - * - * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the - * virtual address of the direct mapping. Only real highmem pages are - * temporarily mapped. - * - * While it is significantly faster than kmap() for the higmem case it - * comes with restrictions about the pointer validity. Only use when really - * necessary. - * - * On HIGHMEM enabled systems mapping a highmem page has the side effect of - * disabling migration in order to keep the virtual address stable across - * preemption. No caller of kmap_local_page() can rely on this side effect. - */ -static inline void *kmap_local_page(struct page *page); - -/** - * kmap_atomic - Atomically map a page for temporary usage - Deprecated! - * @page: Pointer to the page to be mapped - * - * Returns: The virtual address of the mapping - * - * Effectively a wrapper around kmap_local_page() which disables pagefaults - * and preemption. - * - * Do not use in new code. Use kmap_local_page() instead. - */ -static inline void *kmap_atomic(struct page *page); - -/** - * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - * @addr: Virtual address to be unmapped - * - * Counterpart to kmap_atomic(). - * - * Effectively a wrapper around kunmap_local() which additionally undoes - * the side effects of kmap_atomic(), i.e. reenabling pagefaults and - * preemption. - */ - -/* Highmem related interfaces for management code */ -static inline unsigned int nr_free_highpages(void); -static inline unsigned long totalhigh_pages(void); - #ifndef ARCH_HAS_FLUSH_ANON_PAGE static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { } #endif -#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE +#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +static inline void flush_kernel_dcache_page(struct page *page) +{ +} static inline void flush_kernel_vmap_range(void *vaddr, int size) { } @@ -139,6 +28,106 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) } #endif +#include + +#ifdef CONFIG_HIGHMEM +#include + +/* declarations for linux/mm/highmem.c */ +unsigned int nr_free_highpages(void); +extern unsigned long totalhigh_pages; + +void kmap_flush_unused(void); + +struct page *kmap_to_page(void *addr); + +#else /* CONFIG_HIGHMEM */ + +static inline unsigned int nr_free_highpages(void) { return 0; } + +static inline struct page *kmap_to_page(void *addr) +{ + return virt_to_page(addr); +} + +#define totalhigh_pages 0UL + +#ifndef ARCH_HAS_KMAP +static inline void *kmap(struct page *page) +{ + might_sleep(); + return page_address(page); +} + +static inline void kunmap(struct page *page) +{ +} + +static inline void *kmap_atomic(struct page *page) +{ + preempt_disable(); + pagefault_disable(); + return page_address(page); +} +#define kmap_atomic_prot(page, prot) kmap_atomic(page) + +static inline void __kunmap_atomic(void *addr) +{ + pagefault_enable(); + preempt_enable(); +} + +#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) + +#define kmap_flush_unused() do {} while(0) +#endif + +#endif /* CONFIG_HIGHMEM */ + +#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + +DECLARE_PER_CPU(int, __kmap_atomic_idx); + +static inline int kmap_atomic_idx_push(void) +{ + int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; + +#ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(in_irq() && !irqs_disabled()); + BUG_ON(idx >= KM_TYPE_NR); +#endif + return idx; +} + +static inline int kmap_atomic_idx(void) +{ + return __this_cpu_read(__kmap_atomic_idx) - 1; +} + +static inline void kmap_atomic_idx_pop(void) +{ +#ifdef CONFIG_DEBUG_HIGHMEM + int idx = __this_cpu_dec_return(__kmap_atomic_idx); + + BUG_ON(idx < 0); +#else + __this_cpu_dec(__kmap_atomic_idx); +#endif +} + +#endif + +/* + * Prevent people trying to call kunmap_atomic() as if it were kunmap() + * kunmap_atomic() should get the return value of kmap_atomic, not the page. + */ +#define kunmap_atomic(addr) \ +do { \ + BUILD_BUG_ON(__same_type((addr), struct page *)); \ + __kunmap_atomic(addr); \ +} while (0) + + /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ #ifndef clear_user_highpage static inline void clear_user_highpage(struct page *page, unsigned long vaddr) @@ -149,24 +138,28 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr) } #endif -#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE +#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE /** - * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move + * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags + * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE * @vma: The VMA the page is to be allocated for * @vaddr: The virtual address the page will be inserted into * - * This function will allocate a page for a VMA that the caller knows will - * be able to migrate in the future using move_pages() or reclaimed + * This function will allocate a page for a VMA but the caller is expected + * to specify via movableflags whether the page will be movable in the + * future or not * * An architecture may override this function by defining - * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own + * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own * implementation. */ static inline struct page * -alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, - unsigned long vaddr) +__alloc_zeroed_user_highpage(gfp_t movableflags, + struct vm_area_struct *vma, + unsigned long vaddr) { - struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); + struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, + vma, vaddr); if (page) clear_user_highpage(page, vaddr); @@ -175,6 +168,21 @@ alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, } #endif +/** + * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move + * @vma: The VMA the page is to be allocated for + * @vaddr: The virtual address the page will be inserted into + * + * This function will allocate a page for a VMA that the caller knows will + * be able to migrate in the future using move_pages() or reclaimed + */ +static inline struct page * +alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, + unsigned long vaddr) +{ + return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); +} + static inline void clear_highpage(struct page *page) { void *kaddr = kmap_atomic(page); @@ -182,30 +190,25 @@ static inline void clear_highpage(struct page *page) kunmap_atomic(kaddr); } -#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE - -static inline void tag_clear_highpage(struct page *page) +static inline void sanitize_highpage(struct page *page) { + void *kaddr; + unsigned long flags; + + local_irq_save(flags); + kaddr = kmap_atomic(page); + clear_page(kaddr); + kunmap_atomic(kaddr); + local_irq_restore(flags); } -#endif - -/* - * If we pass in a base or tail page, we can zero up to PAGE_SIZE. - * If we pass in a head page, we can zero up to the size of the compound page. - */ -#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE) -void zero_user_segments(struct page *page, unsigned start1, unsigned end1, - unsigned start2, unsigned end2); -#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ static inline void zero_user_segments(struct page *page, - unsigned start1, unsigned end1, - unsigned start2, unsigned end2) + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) { void *kaddr = kmap_atomic(page); - unsigned int i; - BUG_ON(end1 > page_size(page) || end2 > page_size(page)); + BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); if (end1 > start1) memset(kaddr + start1, 0, end1 - start1); @@ -214,10 +217,8 @@ static inline void zero_user_segments(struct page *page, memset(kaddr + start2, 0, end2 - start2); kunmap_atomic(kaddr); - for (i = 0; i < compound_nr(page); i++) - flush_dcache_page(page + i); + flush_dcache_page(page); } -#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ static inline void zero_user_segment(struct page *page, unsigned start, unsigned end) @@ -247,8 +248,6 @@ static inline void copy_user_highpage(struct page *to, struct page *from, #endif -#ifndef __HAVE_ARCH_COPY_HIGHPAGE - static inline void copy_highpage(struct page *to, struct page *from) { char *vfrom, *vto; @@ -260,71 +259,4 @@ static inline void copy_highpage(struct page *to, struct page *from) kunmap_atomic(vfrom); } -#endif - -static inline void memcpy_page(struct page *dst_page, size_t dst_off, - struct page *src_page, size_t src_off, - size_t len) -{ - char *dst = kmap_local_page(dst_page); - char *src = kmap_local_page(src_page); - - VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); - memcpy(dst + dst_off, src + src_off, len); - kunmap_local(src); - kunmap_local(dst); -} - -static inline void memmove_page(struct page *dst_page, size_t dst_off, - struct page *src_page, size_t src_off, - size_t len) -{ - char *dst = kmap_local_page(dst_page); - char *src = kmap_local_page(src_page); - - VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); - memmove(dst + dst_off, src + src_off, len); - kunmap_local(src); - kunmap_local(dst); -} - -static inline void memset_page(struct page *page, size_t offset, int val, - size_t len) -{ - char *addr = kmap_local_page(page); - - VM_BUG_ON(offset + len > PAGE_SIZE); - memset(addr + offset, val, len); - kunmap_local(addr); -} - -static inline void memcpy_from_page(char *to, struct page *page, - size_t offset, size_t len) -{ - char *from = kmap_local_page(page); - - VM_BUG_ON(offset + len > PAGE_SIZE); - memcpy(to, from + offset, len); - kunmap_local(from); -} - -static inline void memcpy_to_page(struct page *page, size_t offset, - const char *from, size_t len) -{ - char *to = kmap_local_page(page); - - VM_BUG_ON(offset + len > PAGE_SIZE); - memcpy(to + offset, from, len); - flush_dcache_page(page); - kunmap_local(to); -} - -static inline void memzero_page(struct page *page, size_t offset, size_t len) -{ - char *addr = kmap_local_page(page); - memset(addr + offset, 0, len); - flush_dcache_page(page); - kunmap_local(addr); -} - #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/highuid.h b/include/linux/highuid.h index 50d383fd67..434e56246f 100644 --- a/include/linux/highuid.h +++ b/include/linux/highuid.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HIGHUID_H #define _LINUX_HIGHUID_H diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h index 369221fd55..394a8405dd 100644 --- a/include/linux/hil_mlc.h +++ b/include/linux/hil_mlc.h @@ -103,7 +103,7 @@ struct hilse_node { /* Methods for back-end drivers, e.g. hp_sdc_mlc */ typedef int (hil_mlc_cts) (hil_mlc *mlc); -typedef int (hil_mlc_out) (hil_mlc *mlc); +typedef void (hil_mlc_out) (hil_mlc *mlc); typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout); struct hil_mlc_devinfo { @@ -144,12 +144,12 @@ struct hil_mlc { hil_packet ipacket[16]; hil_packet imatch; int icount; - unsigned long instart; - unsigned long intimeout; + struct timeval instart; + suseconds_t intimeout; int ddi; /* Last operational device id */ int lcv; /* LCV to throttle loops */ - time64_t lcv_time; /* Time loop was started */ + struct timeval lcv_tv; /* Time loop was started */ int di_map[7]; /* Maps below items to live devs */ struct hil_mlc_devinfo di[HIL_MLC_DEVMEM]; diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h index 9dc01f7ab5..8ec23fb0b4 100644 --- a/include/linux/hippidevice.h +++ b/include/linux/hippidevice.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -15,6 +14,11 @@ * Fred N. van Kempen, * Alan Cox, * Lawrence V. Stefani, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_HIPPIDEVICE_H #define _LINUX_HIPPIDEVICE_H @@ -28,6 +32,7 @@ struct hippi_cb { }; __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); +int hippi_change_mtu(struct net_device *dev, int new_mtu); int hippi_mac_addr(struct net_device *dev, void *p); int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p); struct net_device *alloc_hippi_dev(int sizeof_priv); diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 7bccf589ab..1ffbf2a8cb 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -1,6 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef __LINUX_HOST1X_H @@ -13,54 +26,20 @@ enum host1x_class { HOST1X_CLASS_HOST1X = 0x1, HOST1X_CLASS_GR2D = 0x51, HOST1X_CLASS_GR2D_SB = 0x52, - HOST1X_CLASS_VIC = 0x5D, HOST1X_CLASS_GR3D = 0x60, }; -struct host1x; struct host1x_client; -struct iommu_group; -u64 host1x_get_dma_mask(struct host1x *host1x); - -/** - * struct host1x_client_ops - host1x client operations - * @early_init: host1x client early initialization code - * @init: host1x client initialization code - * @exit: host1x client tear down code - * @late_exit: host1x client late tear down code - * @suspend: host1x client suspend code - * @resume: host1x client resume code - */ struct host1x_client_ops { - int (*early_init)(struct host1x_client *client); int (*init)(struct host1x_client *client); int (*exit)(struct host1x_client *client); - int (*late_exit)(struct host1x_client *client); - int (*suspend)(struct host1x_client *client); - int (*resume)(struct host1x_client *client); }; -/** - * struct host1x_client - host1x client structure - * @list: list node for the host1x client - * @host: pointer to struct device representing the host1x controller - * @dev: pointer to struct device backing this host1x client - * @group: IOMMU group that this client is a member of - * @ops: host1x client operations - * @class: host1x class represented by this client - * @channel: host1x channel associated with this client - * @syncpts: array of syncpoints requested for this client - * @num_syncpts: number of syncpoints requested for this client - * @parent: pointer to parent structure - * @usecount: reference count for this structure - * @lock: mutex for mutually exclusive concurrency - */ struct host1x_client { struct list_head list; - struct device *host; + struct device *parent; struct device *dev; - struct iommu_group *group; const struct host1x_client_ops *ops; @@ -69,10 +48,6 @@ struct host1x_client { struct host1x_syncpt **syncpts; unsigned int num_syncpts; - - struct host1x_client *parent; - unsigned int usecount; - struct mutex lock; }; /* @@ -85,11 +60,12 @@ struct sg_table; struct host1x_bo_ops { struct host1x_bo *(*get)(struct host1x_bo *bo); void (*put)(struct host1x_bo *bo); - struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo, - dma_addr_t *phys); - void (*unpin)(struct device *dev, struct sg_table *sgt); + dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt); + void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt); void *(*mmap)(struct host1x_bo *bo); void (*munmap)(struct host1x_bo *bo, void *addr); + void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum); + void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr); }; struct host1x_bo { @@ -112,17 +88,15 @@ static inline void host1x_bo_put(struct host1x_bo *bo) bo->ops->put(bo); } -static inline struct sg_table *host1x_bo_pin(struct device *dev, - struct host1x_bo *bo, - dma_addr_t *phys) +static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo, + struct sg_table **sgt) { - return bo->ops->pin(dev, bo, phys); + return bo->ops->pin(bo, sgt); } -static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo, - struct sg_table *sgt) +static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) { - bo->ops->unpin(dev, sgt); + bo->ops->unpin(bo, sgt); } static inline void *host1x_bo_mmap(struct host1x_bo *bo) @@ -135,6 +109,17 @@ static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr) bo->ops->munmap(bo, addr); } +static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum) +{ + return bo->ops->kmap(bo, pagenum); +} + +static inline void host1x_bo_kunmap(struct host1x_bo *bo, + unsigned int pagenum, void *addr) +{ + bo->ops->kunmap(bo, pagenum, addr); +} + /* * host1x syncpoints */ @@ -146,9 +131,7 @@ struct host1x_syncpt_base; struct host1x_syncpt; struct host1x; -struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id); -struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id); -struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp); +struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id); u32 host1x_syncpt_id(struct host1x_syncpt *sp); u32 host1x_syncpt_read_min(struct host1x_syncpt *sp); u32 host1x_syncpt_read_max(struct host1x_syncpt *sp); @@ -157,21 +140,13 @@ int host1x_syncpt_incr(struct host1x_syncpt *sp); u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs); int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, u32 *value); -struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, +struct host1x_syncpt *host1x_syncpt_request(struct device *dev, unsigned long flags); -void host1x_syncpt_put(struct host1x_syncpt *sp); -struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, - unsigned long flags, - const char *name); +void host1x_syncpt_free(struct host1x_syncpt *sp); struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp); u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); -void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, - u32 syncpt_id); - -struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold); - /* * host1x channel */ @@ -179,7 +154,8 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold); struct host1x_channel; struct host1x_job; -struct host1x_channel *host1x_channel_request(struct host1x_client *client); +struct host1x_channel *host1x_channel_request(struct device *dev); +void host1x_channel_free(struct host1x_channel *channel); struct host1x_channel *host1x_channel_get(struct host1x_channel *channel); void host1x_channel_put(struct host1x_channel *channel); int host1x_job_submit(struct host1x_job *job); @@ -188,9 +164,6 @@ int host1x_job_submit(struct host1x_job *job); * host1x job */ -#define HOST1X_RELOC_READ (1 << 0) -#define HOST1X_RELOC_WRITE (1 << 1) - struct host1x_reloc { struct { struct host1x_bo *bo; @@ -201,7 +174,6 @@ struct host1x_reloc { unsigned long offset; } target; unsigned long shift; - unsigned long flags; }; struct host1x_job { @@ -214,15 +186,19 @@ struct host1x_job { /* Channel where job is submitted to */ struct host1x_channel *channel; - /* client where the job originated */ - struct host1x_client *client; + u32 client; /* Gathers and their memory */ - struct host1x_job_cmd *cmds; - unsigned int num_cmds; + struct host1x_job_gather *gathers; + unsigned int num_gathers; + + /* Wait checks to be processed at submit time */ + struct host1x_waitchk *waitchk; + unsigned int num_waitchk; + u32 waitchk_mask; /* Array of handles to be pinned & unpinned */ - struct host1x_reloc *relocs; + struct host1x_reloc *relocarray; unsigned int num_relocs; struct host1x_job_unpin_data *unpins; unsigned int num_unpins; @@ -232,19 +208,13 @@ struct host1x_job { dma_addr_t *reloc_addr_phys; /* Sync point id, number of increments and end related to the submit */ - struct host1x_syncpt *syncpt; + u32 syncpt_id; u32 syncpt_incrs; u32 syncpt_end; - /* Completion waiter ref */ - void *waiter; - /* Maximum time to wait for this job */ unsigned int timeout; - /* Job has timed out and should be released */ - bool cancelled; - /* Index and number of slots used in the push buffer */ unsigned int first_get; unsigned int num_slots; @@ -255,35 +225,20 @@ struct host1x_job { u8 *gather_copy_mapped; /* Check if register is marked as an address reg */ - int (*is_addr_reg)(struct device *dev, u32 class, u32 reg); - - /* Check if class belongs to the unit */ - int (*is_valid_class)(u32 class); + int (*is_addr_reg)(struct device *dev, u32 reg, u32 class); /* Request a SETCLASS to this class */ u32 class; /* Add a channel wait for previous ops to complete */ bool serialize; - - /* Fast-forward syncpoint increments on job timeout */ - bool syncpt_recovery; - - /* Callback called when job is freed */ - void (*release)(struct host1x_job *job); - void *user_data; - - /* Whether host1x-side firewall should be ran for this job or not */ - bool enable_firewall; }; struct host1x_job *host1x_job_alloc(struct host1x_channel *ch, u32 num_cmdbufs, u32 num_relocs, - bool skip_firewall); -void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, - unsigned int words, unsigned int offset); -void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh, - bool relative, u32 next_class); + u32 num_waitchks); +void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id, + u32 words, u32 offset); struct host1x_job *host1x_job_get(struct host1x_job *job); void host1x_job_put(struct host1x_job *job); int host1x_job_pin(struct host1x_job *job, struct device *dev); @@ -295,15 +250,6 @@ void host1x_job_unpin(struct host1x_job *job); struct host1x_device; -/** - * struct host1x_driver - host1x logical device driver - * @driver: core driver - * @subdevs: table of OF device IDs matching subdevices for this driver - * @list: list node for the driver - * @probe: called when the host1x logical device is probed - * @remove: called when the host1x logical device is removed - * @shutdown: called when the host1x logical device is shut down - */ struct host1x_driver { struct device_driver driver; @@ -341,8 +287,6 @@ struct host1x_device { struct list_head clients; bool registered; - - struct device_dma_parameters dma_parms; }; static inline struct host1x_device *to_host1x_device(struct device *dev) @@ -353,45 +297,15 @@ static inline struct host1x_device *to_host1x_device(struct device *dev) int host1x_device_init(struct host1x_device *device); int host1x_device_exit(struct host1x_device *device); -void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key); -void host1x_client_exit(struct host1x_client *client); - -#define host1x_client_init(client) \ - ({ \ - static struct lock_class_key __key; \ - __host1x_client_init(client, &__key); \ - }) - -int __host1x_client_register(struct host1x_client *client); - -/* - * Note that this wrapper calls __host1x_client_init() for compatibility - * with existing callers. Callers that want to separately initialize and - * register a host1x client must first initialize using either of the - * __host1x_client_init() or host1x_client_init() functions and then use - * the low-level __host1x_client_register() function to avoid the client - * getting reinitialized. - */ -#define host1x_client_register(client) \ - ({ \ - static struct lock_class_key __key; \ - __host1x_client_init(client, &__key); \ - __host1x_client_register(client); \ - }) - +int host1x_client_register(struct host1x_client *client); int host1x_client_unregister(struct host1x_client *client); -int host1x_client_suspend(struct host1x_client *client); -int host1x_client_resume(struct host1x_client *client); - struct tegra_mipi_device; -struct tegra_mipi_device *tegra_mipi_request(struct device *device, - struct device_node *np); +struct tegra_mipi_device *tegra_mipi_request(struct device *device); void tegra_mipi_free(struct tegra_mipi_device *device); int tegra_mipi_enable(struct tegra_mipi_device *device); int tegra_mipi_disable(struct tegra_mipi_device *device); -int tegra_mipi_start_calibration(struct tegra_mipi_device *device); -int tegra_mipi_finish_calibration(struct tegra_mipi_device *device); +int tegra_mipi_calibrate(struct tegra_mipi_device *device); #endif diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h index 6f1dee7e67..d392975d88 100644 --- a/include/linux/hp_sdc.h +++ b/include/linux/hp_sdc.h @@ -281,7 +281,7 @@ typedef struct { hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */ int rcurr, rqty; /* Current read transact in process */ - ktime_t rtime; /* Time when current read started */ + struct timeval rtv; /* Time when current read started */ int wcurr; /* Current write transact in process */ int dev_err; /* carries status from registration */ diff --git a/include/linux/hpet.h b/include/linux/hpet.h index 8604564b98..9427ab4e01 100644 --- a/include/linux/hpet.h +++ b/include/linux/hpet.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __HPET__ #define __HPET__ 1 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 0ee140176f..5e00f80b15 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -1,5 +1,6 @@ -// SPDX-License-Identifier: GPL-2.0 /* + * include/linux/hrtimer.h + * * hrtimers - High-resolution kernel timers * * Copyright(C) 2005, Thomas Gleixner @@ -8,16 +9,18 @@ * data type definitions, declarations, prototypes * * Started by: Thomas Gleixner and Ingo Molnar + * + * For licencing details see kernel-base/COPYING */ #ifndef _LINUX_HRTIMER_H #define _LINUX_HRTIMER_H -#include #include +#include #include #include +#include #include -#include #include #include @@ -26,37 +29,13 @@ struct hrtimer_cpu_base; /* * Mode arguments of xxx_hrtimer functions: - * - * HRTIMER_MODE_ABS - Time value is absolute - * HRTIMER_MODE_REL - Time value is relative to now - * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered - * when starting the timer) - * HRTIMER_MODE_SOFT - Timer callback function will be executed in - * soft irq context - * HRTIMER_MODE_HARD - Timer callback function will be executed in - * hard irq context even on PREEMPT_RT. */ enum hrtimer_mode { - HRTIMER_MODE_ABS = 0x00, - HRTIMER_MODE_REL = 0x01, - HRTIMER_MODE_PINNED = 0x02, - HRTIMER_MODE_SOFT = 0x04, - HRTIMER_MODE_HARD = 0x08, - - HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, - HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, - - HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT, - HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT, - - HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, - HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, - - HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD, - HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD, - - HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD, - HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD, + HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */ + HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */ + HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */ + HRTIMER_MODE_ABS_PINNED = 0x02, + HRTIMER_MODE_REL_PINNED = 0x03, }; /* @@ -109,9 +88,12 @@ enum hrtimer_restart { * @base: pointer to the timer base (per cpu and per clock) * @state: state information (See bit values above) * @is_rel: Set if the timer was armed relative - * @is_soft: Set if hrtimer will be expired in soft interrupt context. - * @is_hard: Set if hrtimer will be expired in hard interrupt context - * even on RT. + * @start_pid: timer statistics field to store the pid of the task which + * started the timer + * @start_site: timer statistics field to store the site where the timer + * was started + * @start_comm: timer statistics field to store the name of the process which + * started the timer * * The hrtimer structure must be initialized by hrtimer_init() */ @@ -122,8 +104,11 @@ struct hrtimer { struct hrtimer_clock_base *base; u8 state; u8 is_rel; - u8 is_soft; - u8 is_hard; +#ifdef CONFIG_TIMER_STATS + int start_pid; + void *start_site; + char start_comm[16]; +#endif }; /** @@ -139,9 +124,9 @@ struct hrtimer_sleeper { }; #ifdef CONFIG_64BIT -# define __hrtimer_clock_base_align ____cacheline_aligned +# define HRTIMER_CLOCK_BASE_ALIGN 64 #else -# define __hrtimer_clock_base_align +# define HRTIMER_CLOCK_BASE_ALIGN 32 #endif /** @@ -150,61 +135,48 @@ struct hrtimer_sleeper { * @index: clock type index for per_cpu support when moving a * timer to a base on another cpu. * @clockid: clock id for per_cpu support - * @seq: seqcount around __run_hrtimer - * @running: pointer to the currently running hrtimer * @active: red black tree root node for the active timers * @get_time: function to retrieve the current time of the clock * @offset: offset of this clock to the monotonic base */ struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; - unsigned int index; + int index; clockid_t clockid; - seqcount_raw_spinlock_t seq; - struct hrtimer *running; struct timerqueue_head active; ktime_t (*get_time)(void); ktime_t offset; -} __hrtimer_clock_base_align; +} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); enum hrtimer_base_type { HRTIMER_BASE_MONOTONIC, HRTIMER_BASE_REALTIME, HRTIMER_BASE_BOOTTIME, HRTIMER_BASE_TAI, - HRTIMER_BASE_MONOTONIC_SOFT, - HRTIMER_BASE_REALTIME_SOFT, - HRTIMER_BASE_BOOTTIME_SOFT, - HRTIMER_BASE_TAI_SOFT, HRTIMER_MAX_CLOCK_BASES, }; -/** +/* * struct hrtimer_cpu_base - the per cpu clock bases * @lock: lock protecting the base and associated clock bases * and timers + * @seq: seqcount around __run_hrtimer + * @running: pointer to the currently running hrtimer * @cpu: cpu number * @active_bases: Bitfield to mark bases with active timers * @clock_was_set_seq: Sequence counter of clock was set events - * @hres_active: State of high resolution mode + * @migration_enabled: The migration of hrtimers to other cpus is enabled + * @nohz_active: The nohz functionality is enabled + * @expires_next: absolute time of the next event which was scheduled + * via clock_set_next_event() + * @next_timer: Pointer to the first expiring timer * @in_hrtirq: hrtimer_interrupt() is currently executing + * @hres_active: State of high resolution mode * @hang_detected: The last hrtimer interrupt detected a hang - * @softirq_activated: displays, if the softirq is raised - update of softirq - * related settings is not required then. * @nr_events: Total number of hrtimer interrupt events * @nr_retries: Total number of hrtimer interrupt retries * @nr_hangs: Total number of hrtimer interrupt hangs * @max_hang_time: Maximum time spent in hrtimer_interrupt - * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are - * expired - * @timer_waiters: A hrtimer_cancel() invocation waits for the timer - * callback to finish. - * @expires_next: absolute time of the next event, is required for remote - * hrtimer enqueue; it is the total first expiry time (hard - * and soft hrtimer are taken into account) - * @next_timer: Pointer to the first expiring timer - * @softirq_expires_next: Time to check, if soft queues needs also to be expired - * @softirq_next_timer: Pointer to the first expiring softirq based timer * @clock_base: array of clock bases for this cpu * * Note: next_timer is just an optimization for __remove_hrtimer(). @@ -213,32 +185,31 @@ enum hrtimer_base_type { */ struct hrtimer_cpu_base { raw_spinlock_t lock; + seqcount_t seq; + struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; - unsigned int hres_active : 1, - in_hrtirq : 1, - hang_detected : 1, - softirq_activated : 1; + bool migration_enabled; + bool nohz_active; #ifdef CONFIG_HIGH_RES_TIMERS - unsigned int nr_events; - unsigned short nr_retries; - unsigned short nr_hangs; - unsigned int max_hang_time; -#endif -#ifdef CONFIG_PREEMPT_RT - spinlock_t softirq_expiry_lock; - atomic_t timer_waiters; -#endif + unsigned int in_hrtirq : 1, + hres_active : 1, + hang_detected : 1; ktime_t expires_next; struct hrtimer *next_timer; - ktime_t softirq_expires_next; - struct hrtimer *softirq_next_timer; + unsigned int nr_events; + unsigned int nr_retries; + unsigned int nr_hangs; + unsigned int max_hang_time; +#endif struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; } ____cacheline_aligned; static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) { + BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN); + timer->node.expires = time; timer->_softexpires = time; } @@ -257,8 +228,8 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) { - timer->node.expires = tv64; - timer->_softexpires = tv64; + timer->node.expires.tv64 = tv64; + timer->_softexpires.tv64 = tv64; } static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) @@ -285,11 +256,11 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) { - return timer->node.expires; + return timer->node.expires.tv64; } static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) { - return timer->_softexpires; + return timer->_softexpires.tv64; } static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) @@ -307,23 +278,49 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) return timer->base->get_time(); } -static inline int hrtimer_is_hres_active(struct hrtimer *timer) -{ - return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? - timer->base->cpu_base->hres_active : 0; -} - #ifdef CONFIG_HIGH_RES_TIMERS struct clock_event_device; extern void hrtimer_interrupt(struct clock_event_device *dev); +static inline int hrtimer_is_hres_active(struct hrtimer *timer) +{ + return timer->base->cpu_base->hres_active; +} + +extern void hrtimer_peek_ahead_timers(void); + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +# define HIGH_RES_NSEC 1 +# define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC } +# define MONOTONIC_RES_NSEC HIGH_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_HIGH_RES + +extern void clock_was_set_delayed(void); + extern unsigned int hrtimer_resolution; #else +# define MONOTONIC_RES_NSEC LOW_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_LOW_RES + #define hrtimer_resolution (unsigned int)LOW_RES_NSEC +static inline void hrtimer_peek_ahead_timers(void) { } + +static inline int hrtimer_is_hres_active(struct hrtimer *timer) +{ + return 0; +} + +static inline void clock_was_set_delayed(void) { } + #endif static inline ktime_t @@ -336,7 +333,7 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now) * hrtimer_start_range_ns() to prevent short timeouts. */ if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) - rem -= hrtimer_resolution; + rem.tv64 -= hrtimer_resolution; return rem; } @@ -347,39 +344,26 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer) timer->base->get_time()); } +extern void clock_was_set(void); #ifdef CONFIG_TIMERFD extern void timerfd_clock_was_set(void); -extern void timerfd_resume(void); #else static inline void timerfd_clock_was_set(void) { } -static inline void timerfd_resume(void) { } #endif +extern void hrtimers_resume(void); DECLARE_PER_CPU(struct tick_device, tick_cpu_device); -#ifdef CONFIG_PREEMPT_RT -void hrtimer_cancel_wait_running(const struct hrtimer *timer); -#else -static inline void hrtimer_cancel_wait_running(struct hrtimer *timer) -{ - cpu_relax(); -} -#endif /* Exported timer functions: */ /* Initialize timers: */ extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode); -extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, - enum hrtimer_mode mode); #ifdef CONFIG_DEBUG_OBJECTS_TIMERS extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode); -extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, - clockid_t clock_id, - enum hrtimer_mode mode); extern void destroy_hrtimer_on_stack(struct hrtimer *timer); #else @@ -389,14 +373,6 @@ static inline void hrtimer_init_on_stack(struct hrtimer *timer, { hrtimer_init(timer, which_clock, mode); } - -static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, - clockid_t clock_id, - enum hrtimer_mode mode) -{ - hrtimer_init_sleeper(sl, clock_id, mode); -} - static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } #endif @@ -405,12 +381,11 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, u64 range_ns, const enum hrtimer_mode mode); /** - * hrtimer_start - (re)start an hrtimer + * hrtimer_start - (re)start an hrtimer on the current CPU * @timer: the timer to be added * @tim: expiry time - * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or - * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); - * softirq based mode is considered for debug purpose only! + * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or + * relative (HRTIMER_MODE_REL) */ static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) @@ -432,9 +407,6 @@ static inline void hrtimer_start_expires(struct hrtimer *timer, hrtimer_start_range_ns(timer, soft, delta, mode); } -void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl, - enum hrtimer_mode mode); - static inline void hrtimer_restart(struct hrtimer *timer) { hrtimer_start_expires(timer, HRTIMER_MODE_ABS); @@ -443,32 +415,21 @@ static inline void hrtimer_restart(struct hrtimer *timer) /* Query timers: */ extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); -/** - * hrtimer_get_remaining - get remaining time for the timer - * @timer: the timer to read - */ static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) { return __hrtimer_get_remaining(timer, false); } extern u64 hrtimer_get_next_event(void); -extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); extern bool hrtimer_active(const struct hrtimer *timer); -/** - * hrtimer_is_queued - check, whether the timer is on one of the queues - * @timer: Timer to check - * - * Returns: True if the timer is queued, false otherwise - * - * The function can be used lockless, but it gives only a current snapshot. +/* + * Helper function to check, whether the timer is on one of the queues */ -static inline bool hrtimer_is_queued(struct hrtimer *timer) +static inline int hrtimer_is_queued(struct hrtimer *timer) { - /* The READ_ONCE pairs with the update functions of timer->state */ - return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED); + return timer->state & HRTIMER_STATE_ENQUEUED; } /* @@ -477,7 +438,7 @@ static inline bool hrtimer_is_queued(struct hrtimer *timer) */ static inline int hrtimer_callback_running(struct hrtimer *timer) { - return timer->base->running == timer; + return timer->base->cpu_base->running == timer; } /* Forward a hrtimer so it expires after now: */ @@ -507,17 +468,21 @@ static inline u64 hrtimer_forward_now(struct hrtimer *timer, } /* Precise sleep: */ - -extern int nanosleep_copyout(struct restart_block *, struct timespec64 *); -extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, +extern long hrtimer_nanosleep(struct timespec *rqtp, + struct timespec __user *rmtp, + const enum hrtimer_mode mode, const clockid_t clockid); +extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); + +extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, + struct task_struct *tsk); extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, - const enum hrtimer_mode mode); + const enum hrtimer_mode mode); extern int schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, const enum hrtimer_mode mode, - clockid_t clock_id); + int clock); extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); /* Soft interrupt function to run the hrtimer queues: */ diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index 6ca92bff02..57402544b5 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * HSI core header file. * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA */ #ifndef __LINUX_HSI_H__ diff --git a/include/linux/hsi/ssi_protocol.h b/include/linux/hsi/ssi_protocol.h index 2d6f3cfa7d..1433651be0 100644 --- a/include/linux/hsi/ssi_protocol.h +++ b/include/linux/hsi/ssi_protocol.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * ssip_slave.h * @@ -7,6 +6,20 @@ * Copyright (C) 2010 Nokia Corporation. All rights reserved. * * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA */ #ifndef __LINUX_SSIP_SLAVE_H__ diff --git a/include/linux/htcpld.h b/include/linux/htcpld.h index 842fce69ac..ab3f6cb4dd 100644 --- a/include/linux/htcpld.h +++ b/include/linux/htcpld.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_HTCPLD_H #define __LINUX_HTCPLD_H diff --git a/include/linux/htirq.h b/include/linux/htirq.h new file mode 100644 index 0000000000..d4a527e584 --- /dev/null +++ b/include/linux/htirq.h @@ -0,0 +1,38 @@ +#ifndef LINUX_HTIRQ_H +#define LINUX_HTIRQ_H + +struct pci_dev; +struct irq_data; + +struct ht_irq_msg { + u32 address_lo; /* low 32 bits of the ht irq message */ + u32 address_hi; /* high 32 bits of the it irq message */ +}; + +typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq, + struct ht_irq_msg *msg); + +struct ht_irq_cfg { + struct pci_dev *dev; + /* Update callback used to cope with buggy hardware */ + ht_irq_update_t *update; + unsigned pos; + unsigned idx; + struct ht_irq_msg msg; +}; + +/* Helper functions.. */ +void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); +void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); +void mask_ht_irq(struct irq_data *data); +void unmask_ht_irq(struct irq_data *data); + +/* The arch hook for getting things started */ +int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev, + ht_irq_update_t *update); +void arch_teardown_ht_irq(unsigned int irq); + +/* For drivers of buggy hardware */ +int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update); + +#endif /* LINUX_HTIRQ_H */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f123e15d96..e35e6de633 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -1,191 +1,99 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H -#include -#include - -#include /* only for vma_is_dax() */ - -vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); -int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, - pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, - struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); -void huge_pmd_set_accessed(struct vm_fault *vmf); -int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, - pud_t *dst_pud, pud_t *src_pud, unsigned long addr, - struct vm_area_struct *vma); - -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD -void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); -#else -static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) -{ -} -#endif - -vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); -struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, - unsigned long addr, pmd_t *pmd, - unsigned int flags); -bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, - pmd_t *pmd, unsigned long addr, unsigned long next); -int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, - unsigned long addr); -int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, - unsigned long addr); -bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); -int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, - pgprot_t newprot, unsigned long cp_flags); -vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, - pgprot_t pgprot, bool write); - -/** - * vmf_insert_pfn_pmd - insert a pmd size pfn - * @vmf: Structure describing the fault - * @pfn: pfn to insert - * @pgprot: page protection to use - * @write: whether it's a write fault - * - * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. - * - * Return: vm_fault_t value. - */ -static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, - bool write) -{ - return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); -} -vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, - pgprot_t pgprot, bool write); - -/** - * vmf_insert_pfn_pud - insert a pud size pfn - * @vmf: Structure describing the fault - * @pfn: pfn to insert - * @pgprot: page protection to use - * @write: whether it's a write fault - * - * Insert a pud size pfn. See vmf_insert_pfn() for additional info. - * - * Return: vm_fault_t value. - */ -static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, - bool write) -{ - return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); -} - +extern int do_huge_pmd_anonymous_page(struct fault_env *fe); +extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, + struct vm_area_struct *vma); +extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd); +extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd); +extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, + unsigned long addr, + pmd_t *pmd, + unsigned int flags); +extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr, unsigned long next); +extern int zap_huge_pmd(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr); +extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, unsigned long end, + unsigned char *vec); +extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, + unsigned long new_addr, unsigned long old_end, + pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); +extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, pgprot_t newprot, + int prot_numa); +int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, + pfn_t pfn, bool write); enum transparent_hugepage_flag { - TRANSPARENT_HUGEPAGE_NEVER_DAX, TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, - TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, +#ifdef CONFIG_DEBUG_VM + TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, +#endif }; struct kobject; struct kobj_attribute; -ssize_t single_hugepage_flag_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count, - enum transparent_hugepage_flag flag); -ssize_t single_hugepage_flag_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf, - enum transparent_hugepage_flag flag); +extern ssize_t single_hugepage_flag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count, + enum transparent_hugepage_flag flag); +extern ssize_t single_hugepage_flag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf, + enum transparent_hugepage_flag flag); extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) #define HPAGE_PMD_NR (1<vm_start >> PAGE_SHIFT) - vma->vm_pgoff, - HPAGE_PMD_NR)) - return false; - } - - if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) - return false; - return true; -} - -static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, - unsigned long vm_flags) -{ - /* Explicitly disabled through madvise. */ - if ((vm_flags & VM_NOHUGEPAGE) || - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) - return false; - return true; -} - -/* - * to be used on vmas which are known to support THP. - * Use transparent_hugepage_active otherwise - */ -static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) -{ - - /* - * If the hardware/firmware marked hugepage support disabled. - */ - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) - return false; - - if (!transhuge_vma_enabled(vma, vma->vm_flags)) - return false; - - if (vma_is_temporary_stack(vma)) - return false; - - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) - return true; - - if (vma_is_dax(vma)) - return true; - - if (transparent_hugepage_flags & - (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) - return !!(vma->vm_flags & VM_HUGEPAGE); - - return false; -} - -bool transparent_hugepage_active(struct vm_area_struct *vma); +extern bool is_vma_temporary_stack(struct vm_area_struct *vma); +#define transparent_hugepage_enabled(__vma) \ + ((transparent_hugepage_flags & \ + (1<vm_flags & VM_HUGEPAGE))) && \ + !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ + !is_vma_temporary_stack(__vma)) #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<vm_mm->mmap_sem), vma); + if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) return __pmd_trans_huge_lock(pmd, vma); else return NULL; } -static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, - struct vm_area_struct *vma) +static inline int hpage_nr_pages(struct page *page) { - if (pud_trans_huge(*pud) || pud_devmap(*pud)) - return __pud_trans_huge_lock(pud, vma); - else - return NULL; -} - -/** - * thp_head - Head page of a transparent huge page. - * @page: Any page (tail, head or regular) found in the page cache. - */ -static inline struct page *thp_head(struct page *page) -{ - return compound_head(page); -} - -/** - * thp_order - Order of a transparent huge page. - * @page: Head page of a transparent huge page. - */ -static inline unsigned int thp_order(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - if (PageHead(page)) - return HPAGE_PMD_ORDER; - return 0; -} - -/** - * thp_nr_pages - The number of regular pages in this huge page. - * @page: The head page of a huge page. - */ -static inline int thp_nr_pages(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - if (PageHead(page)) + if (unlikely(PageTransHuge(page))) return HPAGE_PMD_NR; return 1; } -struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, int flags, struct dev_pagemap **pgmap); -struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, - pud_t *pud, int flags, struct dev_pagemap **pgmap); - -vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); +extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd); extern struct page *huge_zero_page; -extern unsigned long huge_zero_pfn; static inline bool is_huge_zero_page(struct page *page) { - return READ_ONCE(huge_zero_page) == page; + return ACCESS_ONCE(huge_zero_page) == page; } static inline bool is_huge_zero_pmd(pmd_t pmd) { - return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd); -} - -static inline bool is_huge_zero_pud(pud_t pud) -{ - return false; + return is_huge_zero_page(pmd_page(pmd)); } struct page *mm_get_huge_zero_page(struct mm_struct *mm); @@ -313,86 +161,21 @@ void mm_put_huge_zero_page(struct mm_struct *mm); #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) -static inline bool thp_migration_supported(void) -{ - return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); -} - -static inline struct list_head *page_deferred_list(struct page *page) -{ - /* - * Global or memcg deferred list in the second tail pages is - * occupied by compound_head. - */ - return &page[2].deferred_list; -} - #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) -#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) -#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) -#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) +#define hpage_nr_pages(x) 1 -static inline struct page *thp_head(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - return page; -} - -static inline unsigned int thp_order(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - return 0; -} - -static inline int thp_nr_pages(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - return 1; -} - -static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) -{ - return false; -} - -static inline bool transparent_hugepage_active(struct vm_area_struct *vma) -{ - return false; -} - -static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, - unsigned long haddr) -{ - return false; -} - -static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, - unsigned long vm_flags) -{ - return false; -} +#define transparent_hugepage_enabled(__vma) 0 static inline void prep_transhuge_page(struct page *page) {} -static inline bool is_transparent_hugepage(struct page *page) -{ - return false; -} - #define transparent_hugepage_flags 0UL #define thp_get_unmapped_area NULL -static inline bool -can_split_huge_page(struct page *page, int *pextra_pins) -{ - BUILD_BUG(); - return false; -} static inline int split_huge_page_to_list(struct page *page, struct list_head *list) { @@ -406,14 +189,9 @@ static inline void deferred_split_huge_page(struct page *page) {} #define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) -static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long address, bool freeze, struct page *page) {} static inline void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page) {} -#define split_huge_pud(__vma, __pmd, __address) \ - do { } while (0) - static inline int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { @@ -426,22 +204,13 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, long adjust_next) { } -static inline int is_swap_pmd(pmd_t pmd) -{ - return 0; -} static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { return NULL; } -static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, - struct vm_area_struct *vma) -{ - return NULL; -} -static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) +static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd) { return 0; } @@ -451,48 +220,16 @@ static inline bool is_huge_zero_page(struct page *page) return false; } -static inline bool is_huge_zero_pmd(pmd_t pmd) -{ - return false; -} - -static inline bool is_huge_zero_pud(pud_t pud) -{ - return false; -} - static inline void mm_put_huge_zero_page(struct mm_struct *mm) { return; } static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, - unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) + unsigned long addr, pmd_t *pmd, int flags) { return NULL; } - -static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, - unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) -{ - return NULL; -} - -static inline bool thp_migration_supported(void) -{ - return false; -} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -/** - * thp_size - Size of a transparent huge page. - * @page: Head page of a transparent huge page. - * - * Return: Number of bytes in this page. - */ -static inline unsigned long thp_size(struct page *page) -{ - return PAGE_SIZE << thp_order(page); -} - #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 1faebe1cd0..5d503f410a 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HUGETLB_H #define _LINUX_HUGETLB_H @@ -9,53 +8,28 @@ #include #include #include -#include -#include -#include +#include struct ctl_table; struct user_struct; struct mmu_gather; -#ifndef is_hugepd -typedef struct { unsigned long pd; } hugepd_t; -#define is_hugepd(hugepd) (0) -#define __hugepd(x) ((hugepd_t) { (x) }) -#endif - #ifdef CONFIG_HUGETLB_PAGE #include #include #include -/* - * For HugeTLB page, there are more metadata to save in the struct page. But - * the head struct page cannot meet our needs, so we have to abuse other tail - * struct page to store the metadata. In order to avoid conflicts caused by - * subsequent use of more tail struct pages, we gather these discrete indexes - * of tail struct page here. - */ -enum { - SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */ -#ifdef CONFIG_CGROUP_HUGETLB - SUBPAGE_INDEX_CGROUP, /* reuse page->private */ - SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */ - __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD, -#endif - __NR_USED_SUBPAGE, -}; - struct hugepage_subpool { spinlock_t lock; long count; long max_hpages; /* Maximum huge pages or -1 if no maximum. */ long used_hpages; /* Used count against maximum, includes */ - /* both allocated and reserved pages. */ + /* both alloced and reserved pages. */ struct hstate *hstate; long min_hpages; /* Minimum huge pages or -1 if no minimum. */ long rsv_hpages; /* Pages reserved against global pool to */ - /* satisfy minimum size. */ + /* sasitfy minimum size. */ }; struct resv_map { @@ -65,52 +39,7 @@ struct resv_map { long adds_in_progress; struct list_head region_cache; long region_cache_count; -#ifdef CONFIG_CGROUP_HUGETLB - /* - * On private mappings, the counter to uncharge reservations is stored - * here. If these fields are 0, then either the mapping is shared, or - * cgroup accounting is disabled for this resv_map. - */ - struct page_counter *reservation_counter; - unsigned long pages_per_hpage; - struct cgroup_subsys_state *css; -#endif }; - -/* - * Region tracking -- allows tracking of reservations and instantiated pages - * across the pages in a mapping. - * - * The region data structures are embedded into a resv_map and protected - * by a resv_map's lock. The set of regions within the resv_map represent - * reservations for huge pages, or huge pages that have already been - * instantiated within the map. The from and to elements are huge page - * indices into the associated mapping. from indicates the starting index - * of the region. to represents the first index past the end of the region. - * - * For example, a file region structure with from == 0 and to == 4 represents - * four huge pages in a mapping. It is important to note that the to element - * represents the first element past the end of the region. This is used in - * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. - * - * Interval notation of the form [from, to) will be used to indicate that - * the endpoint from is inclusive and to is exclusive. - */ -struct file_region { - struct list_head link; - long from; - long to; -#ifdef CONFIG_CGROUP_HUGETLB - /* - * On shared mappings, each reserved region appears as a struct - * file_region in resv_map. These fields hold the info needed to - * uncharge each reservation. - */ - struct page_counter *reservation_counter; - struct cgroup_subsys_state *css; -#endif -}; - extern struct resv_map *resv_map_alloc(void); void resv_map_release(struct kref *ref); @@ -124,19 +53,19 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, void hugepage_put_subpool(struct hugepage_subpool *spool); void reset_vma_resv_huge_pages(struct vm_area_struct *vma); -int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); -int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, - loff_t *); -int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, - loff_t *); -int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, - loff_t *); +int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); + +#ifdef CONFIG_NUMA +int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +#endif int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, - unsigned long *, unsigned long *, long, unsigned int, - int *); + unsigned long *, unsigned long *, long, unsigned int); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *); void __unmap_hugepage_range_final(struct mmu_gather *tlb, @@ -147,71 +76,50 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page); void hugetlb_report_meminfo(struct seq_file *); -int hugetlb_report_node_meminfo(char *buf, int len, int nid); +int hugetlb_report_node_meminfo(int, char *); void hugetlb_show_meminfo(void); unsigned long hugetlb_total_pages(void); -vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, +int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); -#ifdef CONFIG_USERFAULTFD -int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, - struct vm_area_struct *dst_vma, - unsigned long dst_addr, - unsigned long src_addr, - enum mcopy_atomic_mode mode, - struct page **pagep); -#endif /* CONFIG_USERFAULTFD */ -bool hugetlb_reserve_pages(struct inode *inode, long from, long to, +int hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); +int dequeue_hwpoisoned_huge_page(struct page *page); bool isolate_huge_page(struct page *page, struct list_head *list); -int get_hwpoison_huge_page(struct page *page, bool *hugetlb); void putback_active_hugepage(struct page *page); -void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); void free_huge_page(struct page *page); void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; -u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, + struct vm_area_struct *vma, + struct address_space *mapping, + pgoff_t idx, unsigned long address); -pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, pud_t *pud); - -struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); +pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); +extern int hugepages_treat_as_movable; extern int sysctl_hugetlb_shm_group; extern struct list_head huge_boot_pages; /* arch callbacks */ -pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, +pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz); -pte_t *huge_pte_offset(struct mm_struct *mm, - unsigned long addr, unsigned long sz); -int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long *addr, pte_t *ptep); -void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, - unsigned long *start, unsigned long *end); +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write); -struct page *follow_huge_pd(struct vm_area_struct *vma, - unsigned long address, hugepd_t hpd, - int flags, int pdshift); struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int flags); struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int flags); -struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, - pgd_t *pgd, int flags); - int pmd_huge(pmd_t pmd); -int pud_huge(pud_t pud); +int pud_huge(pud_t pmd); unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); -bool is_hugetlb_entry_migration(pte_t pte); -void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); - #else /* !CONFIG_HUGETLB_PAGE */ static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) @@ -223,157 +131,38 @@ static inline unsigned long hugetlb_total_pages(void) return 0; } -static inline struct address_space *hugetlb_page_mapping_lock_write( - struct page *hpage) -{ - return NULL; -} - -static inline int huge_pmd_unshare(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long *addr, pte_t *ptep) -{ - return 0; -} - -static inline void adjust_range_if_pmd_sharing_possible( - struct vm_area_struct *vma, - unsigned long *start, unsigned long *end) -{ -} - -static inline long follow_hugetlb_page(struct mm_struct *mm, - struct vm_area_struct *vma, struct page **pages, - struct vm_area_struct **vmas, unsigned long *position, - unsigned long *nr_pages, long i, unsigned int flags, - int *nonblocking) -{ - BUG(); - return 0; -} - -static inline struct page *follow_huge_addr(struct mm_struct *mm, - unsigned long address, int write) -{ - return ERR_PTR(-EINVAL); -} - -static inline int copy_hugetlb_page_range(struct mm_struct *dst, - struct mm_struct *src, struct vm_area_struct *vma) -{ - BUG(); - return 0; -} - +#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; }) +#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) +#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) static inline void hugetlb_report_meminfo(struct seq_file *m) { } - -static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) -{ - return 0; -} - +#define hugetlb_report_node_meminfo(n, buf) 0 static inline void hugetlb_show_meminfo(void) { } - -static inline struct page *follow_huge_pd(struct vm_area_struct *vma, - unsigned long address, hugepd_t hpd, int flags, - int pdshift) -{ - return NULL; -} - -static inline struct page *follow_huge_pmd(struct mm_struct *mm, - unsigned long address, pmd_t *pmd, int flags) -{ - return NULL; -} - -static inline struct page *follow_huge_pud(struct mm_struct *mm, - unsigned long address, pud_t *pud, int flags) -{ - return NULL; -} - -static inline struct page *follow_huge_pgd(struct mm_struct *mm, - unsigned long address, pgd_t *pgd, int flags) -{ - return NULL; -} - -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - return -EINVAL; -} - -static inline int pmd_huge(pmd_t pmd) +#define follow_huge_pmd(mm, addr, pmd, flags) NULL +#define follow_huge_pud(mm, addr, pud, flags) NULL +#define prepare_hugepage_range(file, addr, len) (-EINVAL) +#define pmd_huge(x) 0 +#define pud_huge(x) 0 +#define is_hugepage_only_range(mm, addr, len) 0 +#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) +#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) +#define huge_pte_offset(mm, address) 0 +static inline int dequeue_hwpoisoned_huge_page(struct page *page) { return 0; } -static inline int pud_huge(pud_t pud) -{ - return 0; -} - -static inline int is_hugepage_only_range(struct mm_struct *mm, - unsigned long addr, unsigned long len) -{ - return 0; -} - -static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - BUG(); -} - -#ifdef CONFIG_USERFAULTFD -static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, - pte_t *dst_pte, - struct vm_area_struct *dst_vma, - unsigned long dst_addr, - unsigned long src_addr, - enum mcopy_atomic_mode mode, - struct page **pagep) -{ - BUG(); - return 0; -} -#endif /* CONFIG_USERFAULTFD */ - -static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, - unsigned long sz) -{ - return NULL; -} - static inline bool isolate_huge_page(struct page *page, struct list_head *list) { return false; } +#define putback_active_hugepage(p) do {} while (0) -static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb) -{ - return 0; -} - -static inline void putback_active_hugepage(struct page *page) -{ -} - -static inline void move_hugetlb_state(struct page *oldpage, - struct page *newpage, int reason) -{ -} - -static inline unsigned long hugetlb_change_protection( - struct vm_area_struct *vma, unsigned long address, - unsigned long end, pgprot_t newprot) +static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, + unsigned long address, unsigned long end, pgprot_t newprot) { return 0; } @@ -392,16 +181,6 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb, BUG(); } -static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - unsigned int flags) -{ - BUG(); - return 0; -} - -static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } - #endif /* !CONFIG_HUGETLB_PAGE */ /* * hugepages at page global directory. If arch support @@ -410,9 +189,6 @@ static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } #ifndef pgd_huge #define pgd_huge(x) 0 #endif -#ifndef p4d_huge -#define p4d_huge(x) 0 -#endif #ifndef pgd_write static inline int pgd_write(pgd_t pgd) @@ -422,6 +198,37 @@ static inline int pgd_write(pgd_t pgd) } #endif +#ifndef pud_write +static inline int pud_write(pud_t pud) +{ + BUG(); + return 0; +} +#endif + +#ifndef is_hugepd +/* + * Some architectures requires a hugepage directory format that is + * required to support multiple hugepage sizes. For example + * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" + * introduced the same on powerpc. This allows for a more flexible hugepage + * pagetable layout. + */ +typedef struct { unsigned long pd; } hugepd_t; +#define is_hugepd(hugepd) (0) +#define __hugepd(x) ((hugepd_t) { (x) }) +static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, + unsigned pdshift, unsigned long end, + int write, struct page **pages, int *nr) +{ + return 0; +} +#else +extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, + unsigned pdshift, unsigned long end, + int write, struct page **pages, int *nr); +#endif + #define HUGETLB_ANON_FILE "anon_hugepage" enum { @@ -444,9 +251,6 @@ struct hugetlbfs_sb_info { spinlock_t stat_lock; struct hstate *hstate; struct hugepage_subpool *spool; - kuid_t uid; - kgid_t gid; - umode_t mode; }; static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) @@ -454,21 +258,10 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) return sb->s_fs_info; } -struct hugetlbfs_inode_info { - struct shared_policy policy; - struct inode vfs_inode; - unsigned int seals; -}; - -static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) -{ - return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); -} - extern const struct file_operations hugetlbfs_file_operations; extern const struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, - struct ucounts **ucounts, int creat_flags, + struct user_struct **user, int creat_flags, int page_size_log); static inline bool is_file_hugepages(struct file *file) @@ -479,25 +272,18 @@ static inline bool is_file_hugepages(struct file *file) return is_file_shm_hugepages(file); } -static inline struct hstate *hstate_inode(struct inode *i) -{ - return HUGETLBFS_SB(i->i_sb)->hstate; -} + #else /* !CONFIG_HUGETLBFS */ #define is_file_hugepages(file) false static inline struct file * hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, - struct ucounts **ucounts, int creat_flags, + struct user_struct **user, int creat_flags, int page_size_log) { return ERR_PTR(-ENOSYS); } -static inline struct hstate *hstate_inode(struct inode *i) -{ - return NULL; -} #endif /* !CONFIG_HUGETLBFS */ #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA @@ -506,93 +292,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long flags); #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ -/* - * huegtlb page specific state flags. These flags are located in page.private - * of the hugetlb head page. Functions created via the below macros should be - * used to manipulate these flags. - * - * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at - * allocation time. Cleared when page is fully instantiated. Free - * routine checks flag to restore a reservation on error paths. - * Synchronization: Examined or modified by code that knows it has - * the only reference to page. i.e. After allocation but before use - * or when the page is being freed. - * HPG_migratable - Set after a newly allocated page is added to the page - * cache and/or page tables. Indicates the page is a candidate for - * migration. - * Synchronization: Initially set after new page allocation with no - * locking. When examined and modified during migration processing - * (isolate, migrate, putback) the hugetlb_lock is held. - * HPG_temporary - - Set on a page that is temporarily allocated from the buddy - * allocator. Typically used for migration target pages when no pages - * are available in the pool. The hugetlb free page path will - * immediately free pages with this flag set to the buddy allocator. - * Synchronization: Can be set after huge page allocation from buddy when - * code knows it has only reference. All other examinations and - * modifications require hugetlb_lock. - * HPG_freed - Set when page is on the free lists. - * Synchronization: hugetlb_lock held for examination and modification. - * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. - */ -enum hugetlb_page_flags { - HPG_restore_reserve = 0, - HPG_migratable, - HPG_temporary, - HPG_freed, - HPG_vmemmap_optimized, - __NR_HPAGEFLAGS, -}; - -/* - * Macros to create test, set and clear function definitions for - * hugetlb specific page flags. - */ -#ifdef CONFIG_HUGETLB_PAGE -#define TESTHPAGEFLAG(uname, flname) \ -static inline int HPage##uname(struct page *page) \ - { return test_bit(HPG_##flname, &(page->private)); } - -#define SETHPAGEFLAG(uname, flname) \ -static inline void SetHPage##uname(struct page *page) \ - { set_bit(HPG_##flname, &(page->private)); } - -#define CLEARHPAGEFLAG(uname, flname) \ -static inline void ClearHPage##uname(struct page *page) \ - { clear_bit(HPG_##flname, &(page->private)); } -#else -#define TESTHPAGEFLAG(uname, flname) \ -static inline int HPage##uname(struct page *page) \ - { return 0; } - -#define SETHPAGEFLAG(uname, flname) \ -static inline void SetHPage##uname(struct page *page) \ - { } - -#define CLEARHPAGEFLAG(uname, flname) \ -static inline void ClearHPage##uname(struct page *page) \ - { } -#endif - -#define HPAGEFLAG(uname, flname) \ - TESTHPAGEFLAG(uname, flname) \ - SETHPAGEFLAG(uname, flname) \ - CLEARHPAGEFLAG(uname, flname) \ - -/* - * Create functions associated with hugetlb page flags - */ -HPAGEFLAG(RestoreReserve, restore_reserve) -HPAGEFLAG(Migratable, migratable) -HPAGEFLAG(Temporary, temporary) -HPAGEFLAG(Freed, freed) -HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) - #ifdef CONFIG_HUGETLB_PAGE #define HSTATE_NAME_LEN 32 /* Defines one hugetlb page size */ struct hstate { - struct mutex resize_lock; int next_nid_to_alloc; int next_nid_to_free; unsigned int order; @@ -608,13 +312,9 @@ struct hstate { unsigned int nr_huge_pages_node[MAX_NUMNODES]; unsigned int free_huge_pages_node[MAX_NUMNODES]; unsigned int surplus_huge_pages_node[MAX_NUMNODES]; -#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP - unsigned int nr_free_vmemmap_pages; -#endif #ifdef CONFIG_CGROUP_HUGETLB /* cgroup control files */ - struct cftype cgroup_files_dfl[7]; - struct cftype cgroup_files_legacy[9]; + struct cftype (*cgroup_files)[5]; #endif char name[HSTATE_NAME_LEN]; }; @@ -622,26 +322,24 @@ struct hstate { struct huge_bootmem_page { struct list_head list; struct hstate *hstate; +#ifdef CONFIG_HIGHMEM + phys_addr_t phys; +#endif }; -int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); -struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask, gfp_t gfp_mask); -struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, - unsigned long address); +struct page *alloc_huge_page_node(struct hstate *h, int nid); +struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, + unsigned long addr, int avoid_reserve); int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx); -void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, - unsigned long address, struct page *page); /* arch callback */ -int __init __alloc_bootmem_huge_page(struct hstate *h); int __init alloc_bootmem_huge_page(struct hstate *h); +void __init hugetlb_bad_size(void); void __init hugetlb_add_hstate(unsigned order); -bool __init arch_hugetlb_valid_size(unsigned long size); struct hstate *size_to_hstate(unsigned long size); #ifndef HUGE_MAX_HSTATE @@ -653,18 +351,9 @@ extern unsigned int default_hstate_idx; #define default_hstate (hstates[default_hstate_idx]) -/* - * hugetlb page subpool pointer located in hpage[1].private - */ -static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) +static inline struct hstate *hstate_inode(struct inode *i) { - return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL); -} - -static inline void hugetlb_set_page_subpool(struct page *hpage, - struct hugepage_subpool *subpool) -{ - set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool); + return HUGETLBFS_SB(i->i_sb)->hstate; } static inline struct hstate *hstate_file(struct file *f) @@ -726,23 +415,9 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h) #include -#ifndef is_hugepage_only_range -static inline int is_hugepage_only_range(struct mm_struct *mm, - unsigned long addr, unsigned long len) -{ - return 0; -} -#define is_hugepage_only_range is_hugepage_only_range -#endif - -#ifndef arch_clear_hugepage_flags -static inline void arch_clear_hugepage_flags(struct page *page) { } -#define arch_clear_hugepage_flags arch_clear_hugepage_flags -#endif - #ifndef arch_make_huge_pte -static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, - vm_flags_t flags) +static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, + struct page *page, int writable) { return entry; } @@ -751,7 +426,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, static inline struct hstate *page_hstate(struct page *page) { VM_BUG_ON_PAGE(!PageHuge(page), page); - return size_to_hstate(page_size(page)); + return size_to_hstate(PAGE_SIZE << compound_order(page)); } static inline unsigned hstate_index_to_shift(unsigned index) @@ -764,78 +439,26 @@ static inline int hstate_index(struct hstate *h) return h - hstates; } -extern int dissolve_free_huge_page(struct page *page); +pgoff_t __basepage_index(struct page *page); + +/* Return page->index in PAGE_SIZE units */ +static inline pgoff_t basepage_index(struct page *page) +{ + if (!PageCompound(page)) + return page->index; + + return __basepage_index(page); +} + extern int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); - -#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION -#ifndef arch_hugetlb_migration_supported -static inline bool arch_hugetlb_migration_supported(struct hstate *h) -{ - if ((huge_page_shift(h) == PMD_SHIFT) || - (huge_page_shift(h) == PUD_SHIFT) || - (huge_page_shift(h) == PGDIR_SHIFT)) - return true; - else - return false; -} -#endif -#else -static inline bool arch_hugetlb_migration_supported(struct hstate *h) -{ - return false; -} -#endif - static inline bool hugepage_migration_supported(struct hstate *h) { - return arch_hugetlb_migration_supported(h); -} - -/* - * Movability check is different as compared to migration check. - * It determines whether or not a huge page should be placed on - * movable zone or not. Movability of any huge page should be - * required only if huge page size is supported for migration. - * There won't be any reason for the huge page to be movable if - * it is not migratable to start with. Also the size of the huge - * page should be large enough to be placed under a movable zone - * and still feasible enough to be migratable. Just the presence - * in movable zone does not make the migration feasible. - * - * So even though large huge page sizes like the gigantic ones - * are migratable they should not be movable because its not - * feasible to migrate them from movable zone. - */ -static inline bool hugepage_movable_supported(struct hstate *h) -{ - if (!hugepage_migration_supported(h)) - return false; - - if (hstate_is_gigantic(h)) - return false; - return true; -} - -/* Movability of hugepages depends on migration support. */ -static inline gfp_t htlb_alloc_mask(struct hstate *h) -{ - if (hugepage_movable_supported(h)) - return GFP_HIGHUSER_MOVABLE; - else - return GFP_HIGHUSER; -} - -static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) -{ - gfp_t modified_mask = htlb_alloc_mask(h); - - /* Some callers might want to enforce node */ - modified_mask |= (gfp_mask & __GFP_THISNODE); - - modified_mask |= (gfp_mask & __GFP_NOWARN); - - return modified_mask; +#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION + return huge_page_shift(h) == PMD_SHIFT; +#else + return false; +#endif } static inline spinlock_t *huge_pte_lockptr(struct hstate *h, @@ -858,11 +481,6 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); -static inline void hugetlb_count_init(struct mm_struct *mm) -{ - atomic_long_set(&mm->hugetlb_usage, 0); -} - static inline void hugetlb_count_add(long l, struct mm_struct *mm) { atomic_long_add(l, &mm->hugetlb_usage); @@ -872,174 +490,36 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm) { atomic_long_sub(l, &mm->hugetlb_usage); } - -#ifndef set_huge_swap_pte_at -static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte, unsigned long sz) -{ - set_huge_pte_at(mm, addr, ptep, pte); -} -#endif - -#ifndef huge_ptep_modify_prot_start -#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start -static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) -{ - return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); -} -#endif - -#ifndef huge_ptep_modify_prot_commit -#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit -static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t old_pte, pte_t pte) -{ - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); -} -#endif - #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; - -static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) -{ - return NULL; -} - -static inline int isolate_or_dissolve_huge_page(struct page *page, - struct list_head *list) -{ - return -ENOMEM; -} - -static inline struct page *alloc_huge_page(struct vm_area_struct *vma, - unsigned long addr, - int avoid_reserve) -{ - return NULL; -} - -static inline struct page * -alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask, gfp_t gfp_mask) -{ - return NULL; -} - -static inline struct page *alloc_huge_page_vma(struct hstate *h, - struct vm_area_struct *vma, - unsigned long address) -{ - return NULL; -} - -static inline int __alloc_bootmem_huge_page(struct hstate *h) -{ - return 0; -} - -static inline struct hstate *hstate_file(struct file *f) -{ - return NULL; -} - -static inline struct hstate *hstate_sizelog(int page_size_log) -{ - return NULL; -} - -static inline struct hstate *hstate_vma(struct vm_area_struct *vma) -{ - return NULL; -} - -static inline struct hstate *page_hstate(struct page *page) -{ - return NULL; -} - -static inline unsigned long huge_page_size(struct hstate *h) -{ - return PAGE_SIZE; -} - -static inline unsigned long huge_page_mask(struct hstate *h) -{ - return PAGE_MASK; -} - -static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) -{ - return PAGE_SIZE; -} - -static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) -{ - return PAGE_SIZE; -} - -static inline unsigned int huge_page_order(struct hstate *h) -{ - return 0; -} - -static inline unsigned int huge_page_shift(struct hstate *h) -{ - return PAGE_SHIFT; -} - -static inline bool hstate_is_gigantic(struct hstate *h) -{ - return false; -} - +#define alloc_huge_page(v, a, r) NULL +#define alloc_huge_page_node(h, nid) NULL +#define alloc_huge_page_noerr(v, a, r) NULL +#define alloc_bootmem_huge_page(h) NULL +#define hstate_file(f) NULL +#define hstate_sizelog(s) NULL +#define hstate_vma(v) NULL +#define hstate_inode(i) NULL +#define page_hstate(page) NULL +#define huge_page_size(h) PAGE_SIZE +#define huge_page_mask(h) PAGE_MASK +#define vma_kernel_pagesize(v) PAGE_SIZE +#define vma_mmu_pagesize(v) PAGE_SIZE +#define huge_page_order(h) 0 +#define huge_page_shift(h) PAGE_SHIFT static inline unsigned int pages_per_huge_page(struct hstate *h) { return 1; } +#define hstate_index_to_shift(index) 0 +#define hstate_index(h) 0 -static inline unsigned hstate_index_to_shift(unsigned index) +static inline pgoff_t basepage_index(struct page *page) { - return 0; -} - -static inline int hstate_index(struct hstate *h) -{ - return 0; -} - -static inline int dissolve_free_huge_page(struct page *page) -{ - return 0; -} - -static inline int dissolve_free_huge_pages(unsigned long start_pfn, - unsigned long end_pfn) -{ - return 0; -} - -static inline bool hugepage_migration_supported(struct hstate *h) -{ - return false; -} - -static inline bool hugepage_movable_supported(struct hstate *h) -{ - return false; -} - -static inline gfp_t htlb_alloc_mask(struct hstate *h) -{ - return 0; -} - -static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) -{ - return 0; + return page->index; } +#define dissolve_free_huge_pages(s, e) 0 +#define hugepage_migration_supported(h) false static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) @@ -1047,10 +527,6 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, return &mm->page_table_lock; } -static inline void hugetlb_count_init(struct mm_struct *mm) -{ -} - static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) { } @@ -1058,19 +534,8 @@ static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) static inline void hugetlb_count_sub(long l, struct mm_struct *mm) { } - -static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte, unsigned long sz) -{ -} #endif /* CONFIG_HUGETLB_PAGE */ -#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP -extern bool hugetlb_free_vmemmap_enabled; -#else -#define hugetlb_free_vmemmap_enabled false -#endif - static inline spinlock_t *huge_pte_lock(struct hstate *h, struct mm_struct *mm, pte_t *pte) { @@ -1081,26 +546,4 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h, return ptl; } -#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) -extern void __init hugetlb_cma_reserve(int order); -extern void __init hugetlb_cma_check(void); -#else -static inline __init void hugetlb_cma_reserve(int order) -{ -} -static inline __init void hugetlb_cma_check(void) -{ -} -#endif - -bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); - -#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE -/* - * ARCHes with special requirements for evicting HUGETLB backing TLB entries can - * implement this. - */ -#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) -#endif - #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index c137396129..d34f2daf73 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -18,181 +18,71 @@ #include struct hugetlb_cgroup; -struct resv_map; -struct file_region; - -#ifdef CONFIG_CGROUP_HUGETLB /* * Minimum page order trackable by hugetlb cgroup. - * At least 4 pages are necessary for all the tracking information. - * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault - * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD]) - * is the reservation usage cgroup. + * At least 3 pages are necessary for all the tracking information. */ -#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__MAX_CGROUP_SUBPAGE_INDEX + 1) +#define HUGETLB_CGROUP_MIN_ORDER 2 -enum hugetlb_memory_event { - HUGETLB_MAX, - HUGETLB_NR_MEMORY_EVENTS, +#ifdef CONFIG_CGROUP_HUGETLB + +enum { + RES_USAGE, + RES_LIMIT, + RES_MAX_USAGE, + RES_FAILCNT, }; -struct hugetlb_cgroup { - struct cgroup_subsys_state css; - - /* - * the counter to account for hugepages from hugetlb. - */ - struct page_counter hugepage[HUGE_MAX_HSTATE]; - - /* - * the counter to account for hugepage reservations from hugetlb. - */ - struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE]; - - atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS]; - atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS]; - - /* Handle for "hugetlb.events" */ - struct cgroup_file events_file[HUGE_MAX_HSTATE]; - - /* Handle for "hugetlb.events.local" */ - struct cgroup_file events_local_file[HUGE_MAX_HSTATE]; -}; - -static inline struct hugetlb_cgroup * -__hugetlb_cgroup_from_page(struct page *page, bool rsvd) +static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) { VM_BUG_ON_PAGE(!PageHuge(page), page); if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return NULL; - if (rsvd) - return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD); - else - return (void *)page_private(page + SUBPAGE_INDEX_CGROUP); + return (struct hugetlb_cgroup *)page[2].private; } -static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) -{ - return __hugetlb_cgroup_from_page(page, false); -} - -static inline struct hugetlb_cgroup * -hugetlb_cgroup_from_page_rsvd(struct page *page) -{ - return __hugetlb_cgroup_from_page(page, true); -} - -static inline int __set_hugetlb_cgroup(struct page *page, - struct hugetlb_cgroup *h_cg, bool rsvd) +static inline +int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) { VM_BUG_ON_PAGE(!PageHuge(page), page); if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return -1; - if (rsvd) - set_page_private(page + SUBPAGE_INDEX_CGROUP_RSVD, - (unsigned long)h_cg); - else - set_page_private(page + SUBPAGE_INDEX_CGROUP, - (unsigned long)h_cg); + page[2].private = (unsigned long)h_cg; return 0; } -static inline int set_hugetlb_cgroup(struct page *page, - struct hugetlb_cgroup *h_cg) -{ - return __set_hugetlb_cgroup(page, h_cg, false); -} - -static inline int set_hugetlb_cgroup_rsvd(struct page *page, - struct hugetlb_cgroup *h_cg) -{ - return __set_hugetlb_cgroup(page, h_cg, true); -} - static inline bool hugetlb_cgroup_disabled(void) { return !cgroup_subsys_enabled(hugetlb_cgrp_subsys); } -static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg) -{ - css_put(&h_cg->css); -} - -static inline void resv_map_dup_hugetlb_cgroup_uncharge_info( - struct resv_map *resv_map) -{ - if (resv_map->css) - css_get(resv_map->css); -} - extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr); -extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, - struct hugetlb_cgroup **ptr); extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct page *page); -extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, - struct hugetlb_cgroup *h_cg, - struct page *page); extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page); -extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages, - struct page *page); - extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg); -extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages, - struct hugetlb_cgroup *h_cg); -extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, - unsigned long start, - unsigned long end); - -extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, - struct file_region *rg, - unsigned long nr_pages, - bool region_del); - extern void hugetlb_cgroup_file_init(void) __init; extern void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage); -#else -static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, - struct file_region *rg, - unsigned long nr_pages, - bool region_del) -{ -} +ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off); +ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off); +u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css, struct cftype *cft); +#else static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) { return NULL; } -static inline struct hugetlb_cgroup * -hugetlb_cgroup_from_page_resv(struct page *page) -{ - return NULL; -} - -static inline struct hugetlb_cgroup * -hugetlb_cgroup_from_page_rsvd(struct page *page) -{ - return NULL; -} - -static inline int set_hugetlb_cgroup(struct page *page, - struct hugetlb_cgroup *h_cg) -{ - return 0; -} - -static inline int set_hugetlb_cgroup_rsvd(struct page *page, - struct hugetlb_cgroup *h_cg) +static inline +int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) { return 0; } @@ -202,66 +92,28 @@ static inline bool hugetlb_cgroup_disabled(void) return true; } -static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg) -{ -} - -static inline void resv_map_dup_hugetlb_cgroup_uncharge_info( - struct resv_map *resv_map) -{ -} - -static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, - struct hugetlb_cgroup **ptr) +static inline int +hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, + struct hugetlb_cgroup **ptr) { return 0; } -static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx, - unsigned long nr_pages, - struct hugetlb_cgroup **ptr) -{ - return 0; -} - -static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, - struct hugetlb_cgroup *h_cg, - struct page *page) +static inline void +hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg, + struct page *page) { } static inline void -hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, - struct hugetlb_cgroup *h_cg, - struct page *page) -{ -} - -static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, - struct page *page) -{ -} - -static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx, - unsigned long nr_pages, - struct page *page) -{ -} -static inline void hugetlb_cgroup_uncharge_cgroup(int idx, - unsigned long nr_pages, - struct hugetlb_cgroup *h_cg) +hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) { } static inline void -hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages, - struct hugetlb_cgroup *h_cg) -{ -} - -static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, - unsigned long start, - unsigned long end) +hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg) { } diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h index 0660a03d37..a4e7ca0f35 100644 --- a/include/linux/hugetlb_inline.h +++ b/include/linux/hugetlb_inline.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HUGETLB_INLINE_H #define _LINUX_HUGETLB_INLINE_H diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 78dd7035d1..0464c85e63 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HW_BREAKPOINT_H #define _LINUX_HW_BREAKPOINT_H @@ -53,9 +52,6 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, /* FIXME: only change from the attr, and don't unregister */ extern int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); -extern int -modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, - bool check); /* * Kernel breakpoints are not associated with any particular thread. @@ -72,6 +68,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, void *context); extern int register_perf_hw_breakpoint(struct perf_event *bp); +extern int __register_perf_hw_breakpoint(struct perf_event *bp); extern void unregister_hw_breakpoint(struct perf_event *bp); extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events); @@ -79,10 +76,6 @@ extern int dbg_reserve_bp_slot(struct perf_event *bp); extern int dbg_release_bp_slot(struct perf_event *bp); extern int reserve_bp_slot(struct perf_event *bp); extern void release_bp_slot(struct perf_event *bp); -int hw_breakpoint_weight(struct perf_event *bp); -int arch_reserve_bp_slot(struct perf_event *bp); -void arch_release_bp_slot(struct perf_event *bp); -void arch_unregister_hw_breakpoint(struct perf_event *bp); extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); @@ -103,10 +96,6 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, static inline int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { return -ENOSYS; } -static inline int -modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, - bool check) { return -ENOSYS; } - static inline struct perf_event * register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_overflow_handler_t triggered, @@ -118,6 +107,8 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, void *context) { return NULL; } static inline int register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } +static inline int +__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } static inline void unregister_hw_breakpoint(struct perf_event *bp) { } static inline void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { } diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 8e6dd908da..34a0dc18f3 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -1,7 +1,7 @@ /* Hardware Random Number Generator - Please read Documentation/admin-guide/hw_random.rst for details on use. + Please read Documentation/hw_random.txt for details on use. ---------------------------------------------------------- This software may be used and distributed according to the terms @@ -30,11 +30,11 @@ * Must not be NULL. *OBSOLETE* * @read: New API. drivers can fill up to max bytes of data * into the buffer. The buffer is aligned for any type - * and max is a multiple of 4 and >= 32 bytes. + * and max is guaranteed to be >= to that alignment + * (either 4 or 8 depending on architecture). * @priv: Private data, for use by the RNG driver. * @quality: Estimation of true entropy in RNG's bitstream - * (in bits of entropy per 1024 bits of input; - * valid values: 1 to 1024, or 0 for unknown). + * (per mill). */ struct hwrng { const char *name; diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h index cb26d02f52..7dda4003a1 100644 --- a/include/linux/hwmon-sysfs.h +++ b/include/linux/hwmon-sysfs.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * hwmon-sysfs.h - hardware monitoring chip driver sysfs defines * * Copyright (C) 2005 Yani Ioannou + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _LINUX_HWMON_SYSFS_H #define _LINUX_HWMON_SYSFS_H @@ -12,7 +25,8 @@ struct sensor_device_attribute{ struct device_attribute dev_attr; int index; -}; +} __do_const; +typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const; #define to_sensor_dev_attr(_dev_attr) \ container_of(_dev_attr, struct sensor_device_attribute, dev_attr) @@ -20,33 +34,16 @@ struct sensor_device_attribute{ { .dev_attr = __ATTR(_name, _mode, _show, _store), \ .index = _index } -#define SENSOR_ATTR_RO(_name, _func, _index) \ - SENSOR_ATTR(_name, 0444, _func##_show, NULL, _index) - -#define SENSOR_ATTR_RW(_name, _func, _index) \ - SENSOR_ATTR(_name, 0644, _func##_show, _func##_store, _index) - -#define SENSOR_ATTR_WO(_name, _func, _index) \ - SENSOR_ATTR(_name, 0200, NULL, _func##_store, _index) - #define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \ struct sensor_device_attribute sensor_dev_attr_##_name \ = SENSOR_ATTR(_name, _mode, _show, _store, _index) -#define SENSOR_DEVICE_ATTR_RO(_name, _func, _index) \ - SENSOR_DEVICE_ATTR(_name, 0444, _func##_show, NULL, _index) - -#define SENSOR_DEVICE_ATTR_RW(_name, _func, _index) \ - SENSOR_DEVICE_ATTR(_name, 0644, _func##_show, _func##_store, _index) - -#define SENSOR_DEVICE_ATTR_WO(_name, _func, _index) \ - SENSOR_DEVICE_ATTR(_name, 0200, NULL, _func##_store, _index) - struct sensor_device_attribute_2 { struct device_attribute dev_attr; u8 index; u8 nr; -}; +} __do_const; +typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const; #define to_sensor_dev_attr_2(_dev_attr) \ container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr) @@ -55,29 +52,8 @@ struct sensor_device_attribute_2 { .index = _index, \ .nr = _nr } -#define SENSOR_ATTR_2_RO(_name, _func, _nr, _index) \ - SENSOR_ATTR_2(_name, 0444, _func##_show, NULL, _nr, _index) - -#define SENSOR_ATTR_2_RW(_name, _func, _nr, _index) \ - SENSOR_ATTR_2(_name, 0644, _func##_show, _func##_store, _nr, _index) - -#define SENSOR_ATTR_2_WO(_name, _func, _nr, _index) \ - SENSOR_ATTR_2(_name, 0200, NULL, _func##_store, _nr, _index) - #define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \ struct sensor_device_attribute_2 sensor_dev_attr_##_name \ = SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) -#define SENSOR_DEVICE_ATTR_2_RO(_name, _func, _nr, _index) \ - SENSOR_DEVICE_ATTR_2(_name, 0444, _func##_show, NULL, \ - _nr, _index) - -#define SENSOR_DEVICE_ATTR_2_RW(_name, _func, _nr, _index) \ - SENSOR_DEVICE_ATTR_2(_name, 0644, _func##_show, _func##_store, \ - _nr, _index) - -#define SENSOR_DEVICE_ATTR_2_WO(_name, _func, _nr, _index) \ - SENSOR_DEVICE_ATTR_2(_name, 0200, NULL, _func##_store, \ - _nr, _index) - #endif /* _LINUX_HWMON_SYSFS_H */ diff --git a/include/linux/hwmon-vid.h b/include/linux/hwmon-vid.h index 9409e1d207..da0a680e2f 100644 --- a/include/linux/hwmon-vid.h +++ b/include/linux/hwmon-vid.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* hwmon-vid.h - VID/VRM/VRD voltage conversions @@ -6,6 +5,19 @@ Copyright (c) 2002 Mark D. Studebaker With assistance from Trent Piepho + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _LINUX_HWMON_VID_H diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 1e8d6ea899..9d2f8bde7d 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* hwmon.h - part of lm_sensors, Linux kernel modules for hardware monitoring @@ -7,6 +6,9 @@ Copyright (C) 2005 Mark M. Hoffman + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. */ #ifndef _HWMON_H_ @@ -27,8 +29,6 @@ enum hwmon_sensor_types { hwmon_humidity, hwmon_fan, hwmon_pwm, - hwmon_intrusion, - hwmon_max, }; enum hwmon_chip_attributes { @@ -39,11 +39,6 @@ enum hwmon_chip_attributes { hwmon_chip_register_tz, hwmon_chip_update_interval, hwmon_chip_alarms, - hwmon_chip_samples, - hwmon_chip_curr_samples, - hwmon_chip_in_samples, - hwmon_chip_power_samples, - hwmon_chip_temp_samples, }; #define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history) @@ -53,15 +48,9 @@ enum hwmon_chip_attributes { #define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz) #define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval) #define HWMON_C_ALARMS BIT(hwmon_chip_alarms) -#define HWMON_C_SAMPLES BIT(hwmon_chip_samples) -#define HWMON_C_CURR_SAMPLES BIT(hwmon_chip_curr_samples) -#define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples) -#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples) -#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples) enum hwmon_temp_attributes { - hwmon_temp_enable, - hwmon_temp_input, + hwmon_temp_input = 0, hwmon_temp_type, hwmon_temp_lcrit, hwmon_temp_lcrit_hyst, @@ -85,11 +74,8 @@ enum hwmon_temp_attributes { hwmon_temp_lowest, hwmon_temp_highest, hwmon_temp_reset_history, - hwmon_temp_rated_min, - hwmon_temp_rated_max, }; -#define HWMON_T_ENABLE BIT(hwmon_temp_enable) #define HWMON_T_INPUT BIT(hwmon_temp_input) #define HWMON_T_TYPE BIT(hwmon_temp_type) #define HWMON_T_LCRIT BIT(hwmon_temp_lcrit) @@ -102,11 +88,9 @@ enum hwmon_temp_attributes { #define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst) #define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency) #define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst) -#define HWMON_T_ALARM BIT(hwmon_temp_alarm) #define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) #define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) #define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) -#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm) #define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm) #define HWMON_T_FAULT BIT(hwmon_temp_fault) #define HWMON_T_OFFSET BIT(hwmon_temp_offset) @@ -114,11 +98,8 @@ enum hwmon_temp_attributes { #define HWMON_T_LOWEST BIT(hwmon_temp_lowest) #define HWMON_T_HIGHEST BIT(hwmon_temp_highest) #define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history) -#define HWMON_T_RATED_MIN BIT(hwmon_temp_rated_min) -#define HWMON_T_RATED_MAX BIT(hwmon_temp_rated_max) enum hwmon_in_attributes { - hwmon_in_enable, hwmon_in_input, hwmon_in_min, hwmon_in_max, @@ -134,11 +115,8 @@ enum hwmon_in_attributes { hwmon_in_max_alarm, hwmon_in_lcrit_alarm, hwmon_in_crit_alarm, - hwmon_in_rated_min, - hwmon_in_rated_max, }; -#define HWMON_I_ENABLE BIT(hwmon_in_enable) #define HWMON_I_INPUT BIT(hwmon_in_input) #define HWMON_I_MIN BIT(hwmon_in_min) #define HWMON_I_MAX BIT(hwmon_in_max) @@ -154,11 +132,8 @@ enum hwmon_in_attributes { #define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm) #define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm) #define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm) -#define HWMON_I_RATED_MIN BIT(hwmon_in_rated_min) -#define HWMON_I_RATED_MAX BIT(hwmon_in_rated_max) enum hwmon_curr_attributes { - hwmon_curr_enable, hwmon_curr_input, hwmon_curr_min, hwmon_curr_max, @@ -174,11 +149,8 @@ enum hwmon_curr_attributes { hwmon_curr_max_alarm, hwmon_curr_lcrit_alarm, hwmon_curr_crit_alarm, - hwmon_curr_rated_min, - hwmon_curr_rated_max, }; -#define HWMON_C_ENABLE BIT(hwmon_curr_enable) #define HWMON_C_INPUT BIT(hwmon_curr_input) #define HWMON_C_MIN BIT(hwmon_curr_min) #define HWMON_C_MAX BIT(hwmon_curr_max) @@ -194,11 +166,8 @@ enum hwmon_curr_attributes { #define HWMON_C_MAX_ALARM BIT(hwmon_curr_max_alarm) #define HWMON_C_LCRIT_ALARM BIT(hwmon_curr_lcrit_alarm) #define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm) -#define HWMON_C_RATED_MIN BIT(hwmon_curr_rated_min) -#define HWMON_C_RATED_MAX BIT(hwmon_curr_rated_max) enum hwmon_power_attributes { - hwmon_power_enable, hwmon_power_average, hwmon_power_average_interval, hwmon_power_average_interval_max, @@ -216,22 +185,15 @@ enum hwmon_power_attributes { hwmon_power_cap_hyst, hwmon_power_cap_max, hwmon_power_cap_min, - hwmon_power_min, hwmon_power_max, hwmon_power_crit, - hwmon_power_lcrit, hwmon_power_label, hwmon_power_alarm, hwmon_power_cap_alarm, - hwmon_power_min_alarm, hwmon_power_max_alarm, - hwmon_power_lcrit_alarm, hwmon_power_crit_alarm, - hwmon_power_rated_min, - hwmon_power_rated_max, }; -#define HWMON_P_ENABLE BIT(hwmon_power_enable) #define HWMON_P_AVERAGE BIT(hwmon_power_average) #define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval) #define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max) @@ -249,32 +211,23 @@ enum hwmon_power_attributes { #define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst) #define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max) #define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min) -#define HWMON_P_MIN BIT(hwmon_power_min) #define HWMON_P_MAX BIT(hwmon_power_max) -#define HWMON_P_LCRIT BIT(hwmon_power_lcrit) #define HWMON_P_CRIT BIT(hwmon_power_crit) #define HWMON_P_LABEL BIT(hwmon_power_label) #define HWMON_P_ALARM BIT(hwmon_power_alarm) #define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm) -#define HWMON_P_MIN_ALARM BIT(hwmon_power_min_alarm) #define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm) -#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm) #define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm) -#define HWMON_P_RATED_MIN BIT(hwmon_power_rated_min) -#define HWMON_P_RATED_MAX BIT(hwmon_power_rated_max) enum hwmon_energy_attributes { - hwmon_energy_enable, hwmon_energy_input, hwmon_energy_label, }; -#define HWMON_E_ENABLE BIT(hwmon_energy_enable) #define HWMON_E_INPUT BIT(hwmon_energy_input) #define HWMON_E_LABEL BIT(hwmon_energy_label) enum hwmon_humidity_attributes { - hwmon_humidity_enable, hwmon_humidity_input, hwmon_humidity_label, hwmon_humidity_min, @@ -283,11 +236,8 @@ enum hwmon_humidity_attributes { hwmon_humidity_max_hyst, hwmon_humidity_alarm, hwmon_humidity_fault, - hwmon_humidity_rated_min, - hwmon_humidity_rated_max, }; -#define HWMON_H_ENABLE BIT(hwmon_humidity_enable) #define HWMON_H_INPUT BIT(hwmon_humidity_input) #define HWMON_H_LABEL BIT(hwmon_humidity_label) #define HWMON_H_MIN BIT(hwmon_humidity_min) @@ -296,11 +246,8 @@ enum hwmon_humidity_attributes { #define HWMON_H_MAX_HYST BIT(hwmon_humidity_max_hyst) #define HWMON_H_ALARM BIT(hwmon_humidity_alarm) #define HWMON_H_FAULT BIT(hwmon_humidity_fault) -#define HWMON_H_RATED_MIN BIT(hwmon_humidity_rated_min) -#define HWMON_H_RATED_MAX BIT(hwmon_humidity_rated_max) enum hwmon_fan_attributes { - hwmon_fan_enable, hwmon_fan_input, hwmon_fan_label, hwmon_fan_min, @@ -314,7 +261,6 @@ enum hwmon_fan_attributes { hwmon_fan_fault, }; -#define HWMON_F_ENABLE BIT(hwmon_fan_enable) #define HWMON_F_INPUT BIT(hwmon_fan_input) #define HWMON_F_LABEL BIT(hwmon_fan_label) #define HWMON_F_MIN BIT(hwmon_fan_min) @@ -339,13 +285,6 @@ enum hwmon_pwm_attributes { #define HWMON_PWM_MODE BIT(hwmon_pwm_mode) #define HWMON_PWM_FREQ BIT(hwmon_pwm_freq) -enum hwmon_intrusion_attributes { - hwmon_intrusion_alarm, - hwmon_intrusion_beep, -}; -#define HWMON_INTRUSION_ALARM BIT(hwmon_intrusion_alarm) -#define HWMON_INTRUSION_BEEP BIT(hwmon_intrusion_beep) - /** * struct hwmon_ops - hwmon device operations * @is_visible: Callback to return attribute visibility. Mandatory. @@ -359,8 +298,8 @@ enum hwmon_intrusion_attributes { * Channel number * The function returns the file permissions. * If the return value is 0, no attribute will be created. - * @read: Read callback for data attributes. Mandatory if readable - * data attributes are present. + * @read: Read callback. Optional. If not provided, attributes + * will not be readable. * Parameters are: * @dev: Pointer to hardware monitoring device * @type: Sensor type @@ -369,19 +308,8 @@ enum hwmon_intrusion_attributes { * Channel number * @val: Pointer to returned value * The function returns 0 on success or a negative error number. - * @read_string: - * Read callback for string attributes. Mandatory if string - * attributes are present. - * Parameters are: - * @dev: Pointer to hardware monitoring device - * @type: Sensor type - * @attr: Sensor attribute - * @channel: - * Channel number - * @str: Pointer to returned string - * The function returns 0 on success or a negative error number. - * @write: Write callback for data attributes. Mandatory if writeable - * data attributes are present. + * @write: Write callback. Optional. If not provided, attributes + * will not be writable. * Parameters are: * @dev: Pointer to hardware monitoring device * @type: Sensor type @@ -396,8 +324,6 @@ struct hwmon_ops { u32 attr, int channel); int (*read)(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val); - int (*read_string)(struct device *dev, enum hwmon_sensor_types type, - u32 attr, int channel, const char **str); int (*write)(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val); }; @@ -413,14 +339,6 @@ struct hwmon_channel_info { const u32 *config; }; -#define HWMON_CHANNEL_INFO(stype, ...) \ - (&(struct hwmon_channel_info) { \ - .type = hwmon_##stype, \ - .config = (u32 []) { \ - __VA_ARGS__, 0 \ - } \ - }) - /** * Chip configuration * @ops: Pointer to hwmon operations. @@ -431,9 +349,7 @@ struct hwmon_chip_info { const struct hwmon_channel_info **info; }; -/* hwmon_device_register() is deprecated */ struct device *hwmon_device_register(struct device *dev); - struct device * hwmon_device_register_with_groups(struct device *dev, const char *name, void *drvdata, @@ -446,40 +362,14 @@ struct device * hwmon_device_register_with_info(struct device *dev, const char *name, void *drvdata, const struct hwmon_chip_info *info, - const struct attribute_group **extra_groups); + const struct attribute_group **groups); struct device * devm_hwmon_device_register_with_info(struct device *dev, - const char *name, void *drvdata, - const struct hwmon_chip_info *info, - const struct attribute_group **extra_groups); + const char *name, void *drvdata, + const struct hwmon_chip_info *info, + const struct attribute_group **groups); void hwmon_device_unregister(struct device *dev); void devm_hwmon_device_unregister(struct device *dev); -int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type, - u32 attr, int channel); - -/** - * hwmon_is_bad_char - Is the char invalid in a hwmon name - * @ch: the char to be considered - * - * hwmon_is_bad_char() can be used to determine if the given character - * may not be used in a hwmon name. - * - * Returns true if the char is invalid, false otherwise. - */ -static inline bool hwmon_is_bad_char(const char ch) -{ - switch (ch) { - case '-': - case '*': - case ' ': - case '\t': - case '\n': - return true; - default: - return false; - } -} - #endif diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h index bfe7c1f1ac..859d673d98 100644 --- a/include/linux/hwspinlock.h +++ b/include/linux/hwspinlock.h @@ -1,10 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Hardware spinlock public header * * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com * * Contact: Ohad Ben-Cohen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __LINUX_HWSPINLOCK_H @@ -14,10 +22,8 @@ #include /* hwspinlock mode argument */ -#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ -#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ -#define HWLOCK_RAW 0x03 -#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */ +#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ +#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ struct device; struct device_node; @@ -53,7 +59,7 @@ struct hwspinlock_pdata { int base_id; }; -#ifdef CONFIG_HWSPINLOCK +#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE) int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev, const struct hwspinlock_ops *ops, int base_id, int num_locks); @@ -67,17 +73,6 @@ int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int, unsigned long *); int __hwspin_trylock(struct hwspinlock *, int, unsigned long *); void __hwspin_unlock(struct hwspinlock *, int, unsigned long *); -int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name); -int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock); -struct hwspinlock *devm_hwspin_lock_request(struct device *dev); -struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev, - unsigned int id); -int devm_hwspin_lock_unregister(struct device *dev, - struct hwspinlock_device *bank); -int devm_hwspin_lock_register(struct device *dev, - struct hwspinlock_device *bank, - const struct hwspinlock_ops *ops, - int base_id, int num_locks); #else /* !CONFIG_HWSPINLOCK */ @@ -137,30 +132,6 @@ static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) return 0; } -static inline -int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name) -{ - return 0; -} - -static inline -int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock) -{ - return 0; -} - -static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev) -{ - return ERR_PTR(-ENODEV); -} - -static inline -struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev, - unsigned int id) -{ - return ERR_PTR(-ENODEV); -} - #endif /* !CONFIG_HWSPINLOCK */ /** @@ -204,42 +175,6 @@ static inline int hwspin_trylock_irq(struct hwspinlock *hwlock) return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL); } -/** - * hwspin_trylock_raw() - attempt to lock a specific hwspinlock - * @hwlock: an hwspinlock which we want to trylock - * - * This function attempts to lock an hwspinlock, and will immediately fail - * if the hwspinlock is already taken. - * - * Caution: User must protect the routine of getting hardware lock with mutex - * or spinlock to avoid dead-lock, that will let user can do some time-consuming - * or sleepable operations under the hardware lock. - * - * Returns 0 if we successfully locked the hwspinlock, -EBUSY if - * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. - */ -static inline int hwspin_trylock_raw(struct hwspinlock *hwlock) -{ - return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL); -} - -/** - * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock - * @hwlock: an hwspinlock which we want to trylock - * - * This function attempts to lock an hwspinlock, and will immediately fail - * if the hwspinlock is already taken. - * - * This function shall be called only from an atomic context. - * - * Returns 0 if we successfully locked the hwspinlock, -EBUSY if - * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. - */ -static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock) -{ - return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL); -} - /** * hwspin_trylock() - attempt to lock a specific hwspinlock * @hwlock: an hwspinlock which we want to trylock @@ -307,51 +242,6 @@ int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to) return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL); } -/** - * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit - * @hwlock: the hwspinlock to be locked - * @to: timeout value in msecs - * - * This function locks the underlying @hwlock. If the @hwlock - * is already taken, the function will busy loop waiting for it to - * be released, but give up when @timeout msecs have elapsed. - * - * Caution: User must protect the routine of getting hardware lock with mutex - * or spinlock to avoid dead-lock, that will let user can do some time-consuming - * or sleepable operations under the hardware lock. - * - * Returns 0 when the @hwlock was successfully taken, and an appropriate - * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still - * busy after @timeout msecs). The function will never sleep. - */ -static inline -int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to) -{ - return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL); -} - -/** - * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit - * @hwlock: the hwspinlock to be locked - * @to: timeout value in msecs - * - * This function locks the underlying @hwlock. If the @hwlock - * is already taken, the function will busy loop waiting for it to - * be released, but give up when @timeout msecs have elapsed. - * - * This function shall be called only from an atomic context and the timeout - * value shall not exceed a few msecs. - * - * Returns 0 when the @hwlock was successfully taken, and an appropriate - * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still - * busy after @timeout msecs). The function will never sleep. - */ -static inline -int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to) -{ - return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL); -} - /** * hwspin_lock_timeout() - lock an hwspinlock with timeout limit * @hwlock: the hwspinlock to be locked @@ -411,36 +301,6 @@ static inline void hwspin_unlock_irq(struct hwspinlock *hwlock) __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL); } -/** - * hwspin_unlock_raw() - unlock hwspinlock - * @hwlock: a previously-acquired hwspinlock which we want to unlock - * - * This function will unlock a specific hwspinlock. - * - * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling - * this function: it is a bug to call unlock on a @hwlock that is already - * unlocked. - */ -static inline void hwspin_unlock_raw(struct hwspinlock *hwlock) -{ - __hwspin_unlock(hwlock, HWLOCK_RAW, NULL); -} - -/** - * hwspin_unlock_in_atomic() - unlock hwspinlock - * @hwlock: a previously-acquired hwspinlock which we want to unlock - * - * This function will unlock a specific hwspinlock. - * - * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling - * this function: it is a bug to call unlock on a @hwlock that is already - * unlocked. - */ -static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock) -{ - __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL); -} - /** * hwspin_unlock() - unlock hwspinlock * @hwlock: a previously-acquired hwspinlock which we want to unlock diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index ddc8713ce5..d596a076da 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1,78 +1,48 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright (c) 2011, Microsoft Corporation. * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * * Authors: * Haiyang Zhang * Hank Janssen * K. Y. Srinivasan + * */ #ifndef _HYPERV_H #define _HYPERV_H #include +#include -#include #include #include #include #include +#include #include #include #include -#include -#include -#include + #define MAX_PAGE_BUFFER_COUNT 32 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ #pragma pack(push, 1) -/* - * Types for GPADL, decides is how GPADL header is created. - * - * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the - * same as HV_HYP_PAGE_SIZE. - * - * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers - * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put - * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each - * HV_HYP_PAGE will be different between different types of GPADL, for example - * if PAGE_SIZE is 64K: - * - * BUFFER: - * - * gva: |-- 64k --|-- 64k --| ... | - * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | - * index: 0 1 2 15 16 17 18 .. 31 32 ... - * | | ... | | | ... | ... - * v V V V V V - * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... | - * index: 0 1 2 ... 15 16 17 18 .. 31 32 ... - * - * RING: - * - * | header | data | header | data | - * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... | - * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... | - * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n - * | / / / | / / - * | / / / | / / - * | / / ... / ... | / ... / - * | / / / | / / - * | / / / | / / - * V V V V V V v - * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... | - * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30 - */ -enum hv_gpadl_type { - HV_GPADL_BUFFER, - HV_GPADL_RING -}; - /* Single-page buffer */ struct hv_page_buffer { u32 len; @@ -120,33 +90,18 @@ struct hv_ring_buffer { u32 interrupt_mask; /* - * WS2012/Win8 and later versions of Hyper-V implement interrupt - * driven flow management. The feature bit feat_pending_send_sz - * is set by the host on the host->guest ring buffer, and by the - * guest on the guest->host ring buffer. + * Win8 uses some of the reserved bits to implement + * interrupt driven flow management. On the send side + * we can request that the receiver interrupt the sender + * when the ring transitions from being full to being able + * to handle a message of size "pending_send_sz". * - * The meaning of the feature bit is a bit complex in that it has - * semantics that apply to both ring buffers. If the guest sets - * the feature bit in the guest->host ring buffer, the guest is - * telling the host that: - * 1) It will set the pending_send_sz field in the guest->host ring - * buffer when it is waiting for space to become available, and - * 2) It will read the pending_send_sz field in the host->guest - * ring buffer and interrupt the host when it frees enough space - * - * Similarly, if the host sets the feature bit in the host->guest - * ring buffer, the host is telling the guest that: - * 1) It will set the pending_send_sz field in the host->guest ring - * buffer when it is waiting for space to become available, and - * 2) It will read the pending_send_sz field in the guest->host - * ring buffer and interrupt the guest when it frees enough space - * - * If either the guest or host does not set the feature bit that it - * owns, that guest or host must do polling if it encounters a full - * ring buffer, and not signal the other end with an interrupt. + * Add necessary state for this enhancement. */ u32 pending_send_sz; + u32 reserved1[12]; + union { struct { u32 feat_pending_send_sz:1; @@ -155,40 +110,51 @@ struct hv_ring_buffer { } feature_bits; /* Pad it to PAGE_SIZE so that data starts on page boundary */ - u8 reserved2[PAGE_SIZE - 68]; + u8 reserved2[4028]; /* * Ring data starts here + RingDataStartOffset * !!! DO NOT place any fields below this !!! */ - u8 buffer[]; + u8 buffer[0]; } __packed; -/* Calculate the proper size of a ringbuffer, it must be page-aligned */ -#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \ - (payload_sz)) - struct hv_ring_buffer_info { struct hv_ring_buffer *ring_buffer; u32 ring_size; /* Include the shared header */ - struct reciprocal_value ring_size_div10_reciprocal; spinlock_t ring_lock; u32 ring_datasize; /* < ring_size */ + u32 ring_data_startoffset; + u32 priv_write_index; u32 priv_read_index; - /* - * The ring buffer mutex lock. This lock prevents the ring buffer from - * being freed while the ring buffer is being accessed. - */ - struct mutex ring_buffer_mutex; - - /* Buffer that holds a copy of an incoming host packet */ - void *pkt_buffer; - u32 pkt_buffer_size; + u32 cached_read_index; }; +/* + * + * hv_get_ringbuffer_availbytes() + * + * Get number of bytes available to read and to write to + * for the specified ring buffer + */ +static inline void +hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, + u32 *read, u32 *write) +{ + u32 read_loc, write_loc, dsize; -static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) + /* Capture the read/write indices before they changed */ + read_loc = rbi->ring_buffer->read_index; + write_loc = rbi->ring_buffer->write_index; + dsize = rbi->ring_datasize; + + *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : + read_loc - write_loc; + *read = dsize - *write; +} + +static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi) { u32 read_loc, write_loc, dsize, read; @@ -202,7 +168,7 @@ static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) return read; } -static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi) +static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) { u32 read_loc, write_loc, dsize, write; @@ -215,16 +181,19 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi) return write; } -static inline u32 hv_get_avail_to_write_percent( - const struct hv_ring_buffer_info *rbi) +static inline u32 hv_get_cached_bytes_to_write( + const struct hv_ring_buffer_info *rbi) { - u32 avail_write = hv_get_bytes_to_write(rbi); + u32 read_loc, write_loc, dsize, write; - return reciprocal_divide( - (avail_write << 3) + (avail_write << 1), - rbi->ring_size_div10_reciprocal); + dsize = rbi->ring_datasize; + read_loc = rbi->cached_read_index; + write_loc = rbi->ring_buffer->write_index; + + write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : + read_loc - write_loc; + return write; } - /* * VMBUS version is 32 bit entity broken up into * two 16 bit quantities: major_number. minor_number. @@ -234,23 +203,17 @@ static inline u32 hv_get_avail_to_write_percent( * 2 . 4 (Windows 8) * 3 . 0 (Windows 8 R2) * 4 . 0 (Windows 10) - * 4 . 1 (Windows 10 RS3) - * 5 . 0 (Newer Windows 10) - * 5 . 1 (Windows 10 RS4) - * 5 . 2 (Windows Server 2019, RS5) - * 5 . 3 (Windows Server 2022) */ #define VERSION_WS2008 ((0 << 16) | (13)) #define VERSION_WIN7 ((1 << 16) | (1)) #define VERSION_WIN8 ((2 << 16) | (4)) #define VERSION_WIN8_1 ((3 << 16) | (0)) -#define VERSION_WIN10 ((4 << 16) | (0)) -#define VERSION_WIN10_V4_1 ((4 << 16) | (1)) -#define VERSION_WIN10_V5 ((5 << 16) | (0)) -#define VERSION_WIN10_V5_1 ((5 << 16) | (1)) -#define VERSION_WIN10_V5_2 ((5 << 16) | (2)) -#define VERSION_WIN10_V5_3 ((5 << 16) | (3)) +#define VERSION_WIN10 ((4 << 16) | (0)) + +#define VERSION_INVAL -1 + +#define VERSION_CURRENT VERSION_WIN10 /* Make maximum size of pipe payload of 16K */ #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) @@ -270,8 +233,8 @@ static inline u32 hv_get_avail_to_write_percent( * struct contains the fundamental information about an offer. */ struct vmbus_channel_offer { - guid_t if_type; - guid_t if_instance; + uuid_le if_type; + uuid_le if_instance; /* * These two fields are not currently used. @@ -290,7 +253,7 @@ struct vmbus_channel_offer { /* * Pipes: - * The following structure is an integrated pipe protocol, which + * The following sructure is an integrated pipe protocol, which * is implemented on top of standard user-defined data. Pipe * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own * use. @@ -301,10 +264,7 @@ struct vmbus_channel_offer { } pipe; } u; /* - * The sub_channel_index is defined in Win8: a value of zero means a - * primary channel and a value of non-zero means a sub-channel. - * - * Before Win8, the field is reserved, meaning it's always zero. + * The sub_channel_index is defined in win8. */ u16 sub_channel_index; u16 reserved3; @@ -367,7 +327,7 @@ struct vmadd_remove_transfer_page_set { struct gpa_range { u32 byte_count; u32 byte_offset; - u64 pfn_array[]; + u64 pfn_array[0]; }; /* @@ -479,15 +439,9 @@ enum vmbus_channel_message_type { CHANNELMSG_19 = 19, CHANNELMSG_20 = 20, CHANNELMSG_TL_CONNECT_REQUEST = 21, - CHANNELMSG_MODIFYCHANNEL = 22, - CHANNELMSG_TL_CONNECT_RESULT = 23, - CHANNELMSG_MODIFYCHANNEL_RESPONSE = 24, CHANNELMSG_COUNT }; -/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */ -#define INVALID_RELID U32_MAX - struct vmbus_channel_message_header { enum vmbus_channel_message_type msgtype; u32 padding; @@ -571,10 +525,10 @@ struct vmbus_channel_open_channel { u32 target_vp; /* - * The upstream ring buffer begins at offset zero in the memory - * described by RingBufferGpadlHandle. The downstream ring buffer - * follows it at this offset (in pages). - */ + * The upstream ring buffer begins at offset zero in the memory + * described by RingBufferGpadlHandle. The downstream ring buffer + * follows it at this offset (in pages). + */ u32 downstream_ringbuffer_pageoffset; /* User-specific data to be passed along to the server endpoint. */ @@ -589,13 +543,6 @@ struct vmbus_channel_open_result { u32 status; } __packed; -/* Modify Channel Result parameters */ -struct vmbus_channel_modifychannel_response { - struct vmbus_channel_message_header header; - u32 child_relid; - u32 status; -} __packed; - /* Close channel parameters; */ struct vmbus_channel_close_channel { struct vmbus_channel_message_header header; @@ -619,7 +566,7 @@ struct vmbus_channel_gpadl_header { u32 gpadl; u16 range_buflen; u16 rangecount; - struct gpa_range range[]; + struct gpa_range range[0]; } __packed; /* This is the followup packet that contains more PFNs. */ @@ -627,7 +574,7 @@ struct vmbus_channel_gpadl_body { struct vmbus_channel_message_header header; u32 msgnumber; u32 gpadl; - u64 pfn[]; + u64 pfn[0]; } __packed; struct vmbus_channel_gpadl_created { @@ -657,14 +604,7 @@ struct vmbus_channel_initiate_contact { struct vmbus_channel_message_header header; u32 vmbus_version_requested; u32 target_vcpu; /* The VCPU the host should respond to */ - union { - u64 interrupt_page; - struct { - u8 msg_sint; - u8 padding1[3]; - u32 padding2; - }; - }; + u64 interrupt_page; u64 monitor_page1; u64 monitor_page2; } __packed; @@ -672,33 +612,13 @@ struct vmbus_channel_initiate_contact { /* Hyper-V socket: guest's connect()-ing to host */ struct vmbus_channel_tl_connect_request { struct vmbus_channel_message_header header; - guid_t guest_endpoint_id; - guid_t host_service_id; -} __packed; - -/* Modify Channel parameters, cf. vmbus_send_modifychannel() */ -struct vmbus_channel_modifychannel { - struct vmbus_channel_message_header header; - u32 child_relid; - u32 target_vp; + uuid_le guest_endpoint_id; + uuid_le host_service_id; } __packed; struct vmbus_channel_version_response { struct vmbus_channel_message_header header; u8 version_supported; - - u8 connection_state; - u16 padding; - - /* - * On new hosts that support VMBus protocol 5.0, we must use - * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message, - * and for subsequent messages, we must use the Message Connection ID - * field in the host-returned Version Response Message. - * - * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1). - */ - u32 msg_conn_id; } __packed; enum vmbus_channel_state { @@ -728,7 +648,6 @@ struct vmbus_channel_msginfo { struct vmbus_channel_gpadl_torndown gpadl_torndown; struct vmbus_channel_gpadl_created gpadl_created; struct vmbus_channel_version_response version_response; - struct vmbus_channel_modifychannel_response modify_response; } response; u32 msgsize; @@ -736,7 +655,7 @@ struct vmbus_channel_msginfo { * The channel message that goes out on the "wire". * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header */ - unsigned char msg[]; + unsigned char msg[0]; }; struct vmbus_close_msg { @@ -753,6 +672,28 @@ union hv_connection_id { } u; }; +/* Definition of the hv_signal_event hypercall input structure. */ +struct hv_input_signal_event { + union hv_connection_id connectionid; + u16 flag_number; + u16 rsvdz; +}; + +struct hv_input_signal_event_buffer { + u64 align8; + struct hv_input_signal_event event; +}; + +enum hv_signal_policy { + HV_SIGNAL_POLICY_DEFAULT = 0, + HV_SIGNAL_POLICY_EXPLICIT, +}; + +enum hv_numa_policy { + HV_BALANCED = 0, + HV_LOCALIZED, +}; + enum vmbus_device_type { HV_IDE = 0, HV_SCSI, @@ -770,39 +711,15 @@ enum vmbus_device_type { HV_FCOPY, HV_BACKUP, HV_DM, - HV_UNKNOWN, + HV_UNKOWN, }; -/* - * Provides request ids for VMBus. Encapsulates guest memory - * addresses and stores the next available slot in req_arr - * to generate new ids in constant time. - */ -struct vmbus_requestor { - u64 *req_arr; - unsigned long *req_bitmap; /* is a given slot available? */ - u32 size; - u64 next_request_id; - spinlock_t req_lock; /* provides atomicity */ -}; - -#define VMBUS_NO_RQSTOR U64_MAX -#define VMBUS_RQST_ERROR (U64_MAX - 1) -/* NetVSC-specific */ -#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2) -/* StorVSC-specific */ -#define VMBUS_RQST_INIT (U64_MAX - 2) -#define VMBUS_RQST_RESET (U64_MAX - 3) - struct vmbus_device { u16 dev_type; - guid_t guid; + uuid_le guid; bool perf_device; - bool allowed_in_isolated; }; -#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096 - struct vmbus_channel { struct list_head listentry; @@ -819,90 +736,68 @@ struct vmbus_channel { u8 monitor_bit; bool rescind; /* got rescind msg */ - bool rescind_ref; /* got rescind msg, got channel reference */ - struct completion rescind_event; u32 ringbuffer_gpadlhandle; /* Allocated memory for ring buffer */ - struct page *ringbuffer_page; + void *ringbuffer_pages; u32 ringbuffer_pagecount; - u32 ringbuffer_send_offset; struct hv_ring_buffer_info outbound; /* send to parent */ struct hv_ring_buffer_info inbound; /* receive from parent */ + spinlock_t inbound_lock; struct vmbus_close_msg close_msg; - /* Statistics */ - u64 interrupts; /* Host to Guest interrupts */ - u64 sig_events; /* Guest to Host events */ + /* Channel callback are invoked in this workqueue context */ + /* HANDLE dataWorkQueue; */ - /* - * Guest to host interrupts caused by the outbound ring buffer changing - * from empty to not empty. - */ - u64 intr_out_empty; - - /* - * Indicates that a full outbound ring buffer was encountered. The flag - * is set to true when a full outbound ring buffer is encountered and - * set to false when a write to the outbound ring buffer is completed. - */ - bool out_full_flag; - - /* Channel callback's invoked in softirq context */ - struct tasklet_struct callback_event; void (*onchannel_callback)(void *context); void *channel_callback_context; - void (*change_target_cpu_callback)(struct vmbus_channel *channel, - u32 old, u32 new); - /* - * Synchronize channel scheduling and channel removal; see the inline - * comments in vmbus_chan_sched() and vmbus_reset_channel_cb(). + * A channel can be marked for efficient (batched) + * reading: + * If batched_reading is set to "true", we read until the + * channel is empty and hold off interrupts from the host + * during the entire read process. + * If batched_reading is set to "false", the client is not + * going to perform batched reading. + * + * By default we will enable batched reading; specific + * drivers that don't want this behavior can turn it off. */ - spinlock_t sched_lock; - /* - * A channel can be marked for one of three modes of reading: - * BATCHED - callback called from taslket and should read - * channel until empty. Interrupts from the host - * are masked while read is in process (default). - * DIRECT - callback called from tasklet (softirq). - * ISR - callback called in interrupt context and must - * invoke its own deferred processing. - * Host interrupts are disabled and must be re-enabled - * when ring is empty. - */ - enum hv_callback_mode { - HV_CALL_BATCHED, - HV_CALL_DIRECT, - HV_CALL_ISR - } callback_mode; + bool batched_reading; bool is_dedicated_interrupt; - u64 sig_event; + struct hv_input_signal_event_buffer sig_buf; + struct hv_input_signal_event *sig_event; /* - * Starting with win8, this field will be used to specify the - * target CPU on which to deliver the interrupt for the host - * to guest communication. - * - * Prior to win8, incoming channel interrupts would only be - * delivered on CPU 0. Setting this value to 0 would preserve - * the earlier behavior. + * Starting with win8, this field will be used to specify + * the target virtual processor on which to deliver the interrupt for + * the host to guest communication. + * Prior to win8, incoming channel interrupts would only + * be delivered on cpu 0. Setting this value to 0 would + * preserve the earlier behavior. */ + u32 target_vp; + /* The corresponding CPUID in the guest */ u32 target_cpu; + /* + * State to manage the CPU affiliation of channels. + */ + struct cpumask alloced_cpus_in_node; + int numa_node; /* * Support for sub-channels. For high performance devices, * it will be useful to have multiple sub-channels to support * a scalable communication infrastructure with the host. - * The support for sub-channels is implemented as an extension + * The support for sub-channels is implemented as an extention * to the current infrastructure. * The initial offer is considered the primary channel and this * offer message will indicate if the host supports sub-channels. - * The guest is free to ask for sub-channels to be offered and can + * The guest is free to ask for sub-channels to be offerred and can * open these sub-channels as a normal "primary" channel. However, * all sub-channels will have the same type and instance guids as the * primary channel. Requests sent on a given channel will result in a @@ -922,10 +817,25 @@ struct vmbus_channel { */ void (*chn_rescind_callback)(struct vmbus_channel *channel); + /* + * The spinlock to protect the structure. It is being used to protect + * test-and-set access to various attributes of the structure as well + * as all sc_list operations. + */ + spinlock_t lock; /* * All Sub-channels of a primary channel are linked here. */ struct list_head sc_list; + /* + * Current number of sub-channels. + */ + int num_sc; + /* + * Number of a sub-channel (position within sc_list) which is supposed + * to be used as the next outgoing channel. + */ + int next_oc; /* * The primary channel this sub-channel belongs to. * This will be NULL for the primary channel. @@ -935,18 +845,28 @@ struct vmbus_channel { * Support per-channel state for use by vmbus drivers. */ void *per_channel_state; - /* - * Defer freeing channel until after all cpu's have - * gone through grace period. + * To support per-cpu lookup mapping of relid to channel, + * link up channels based on their CPU affinity. */ - struct rcu_head rcu; - + struct list_head percpu_list; /* - * For sysfs per-channel properties. + * Host signaling policy: The default policy will be + * based on the ring buffer state. We will also support + * a policy where the client driver can have explicit + * signaling control. */ - struct kobject kobj; - + enum hv_signal_policy signal_policy; + /* + * On the channel send side, many of the VMBUS + * device drivers explicity serialize access to the + * outgoing ring buffer. Give more control to the + * VMBUS device drivers in terms how to serialize + * accesss to the outgoing ring buffer. + * The default behavior will be to aquire the + * ring lock to preserve the current behavior. + */ + bool acquire_ring_lock; /* * For performance critical channels (storage, networking * etc,), Hyper-V has a mechanism to enhance the throughput @@ -966,77 +886,31 @@ struct vmbus_channel { * Clearly, these optimizations improve throughput at the expense of * latency. Furthermore, since the channel is shared for both * control and data messages, control messages currently suffer - * unnecessary latency adversely impacting performance and boot + * unnecessary latency adversley impacting performance and boot * time. To fix this issue, permit tagging the channel as being * in "low latency" mode. In this mode, we will bypass the monitor * mechanism. */ bool low_latency; - bool probe_done; - /* - * Cache the device ID here for easy access; this is useful, in - * particular, in situations where the channel's device_obj has - * not been allocated/initialized yet. + * NUMA distribution policy: + * We support teo policies: + * 1) Balanced: Here all performance critical channels are + * distributed evenly amongst all the NUMA nodes. + * This policy will be the default policy. + * 2) Localized: All channels of a given instance of a + * performance critical service will be assigned CPUs + * within a selected NUMA node. */ - u16 device_id; + enum hv_numa_policy affinity_policy; - /* - * We must offload the handling of the primary/sub channels - * from the single-threaded vmbus_connection.work_queue to - * two different workqueue, otherwise we can block - * vmbus_connection.work_queue and hang: see vmbus_process_offer(). - */ - struct work_struct add_channel_work; - - /* - * Guest to host interrupts caused by the inbound ring buffer changing - * from full to not full while a packet is waiting. - */ - u64 intr_in_full; - - /* - * The total number of write operations that encountered a full - * outbound ring buffer. - */ - u64 out_full_total; - - /* - * The number of write operations that were the first to encounter a - * full outbound ring buffer. - */ - u64 out_full_first; - - /* enabling/disabling fuzz testing on the channel (default is false)*/ - bool fuzz_testing_state; - - /* - * Interrupt delay will delay the guest from emptying the ring buffer - * for a specific amount of time. The delay is in microseconds and will - * be between 1 to a maximum of 1000, its default is 0 (no delay). - * The Message delay will delay guest reading on a per message basis - * in microseconds between 1 to 1000 with the default being 0 - * (no delay). - */ - u32 fuzz_testing_interrupt_delay; - u32 fuzz_testing_message_delay; - - /* callback to generate a request ID from a request address */ - u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr); - /* callback to retrieve a request address from a request ID */ - u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id); - - /* request/transaction ids for VMBus */ - struct vmbus_requestor requestor; - u32 rqstor_size; - - /* The max size of a packet on this channel */ - u32 max_pkt_size; }; -u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr); -u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id); +static inline void set_channel_lock_state(struct vmbus_channel *c, bool state) +{ + c->acquire_ring_lock = state; +} static inline bool is_hvsock_channel(const struct vmbus_channel *c) { @@ -1044,15 +918,21 @@ static inline bool is_hvsock_channel(const struct vmbus_channel *c) VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); } -static inline bool is_sub_channel(const struct vmbus_channel *c) +static inline void set_channel_signal_state(struct vmbus_channel *c, + enum hv_signal_policy policy) { - return c->offermsg.offer.sub_channel_index != 0; + c->signal_policy = policy; } -static inline void set_channel_read_mode(struct vmbus_channel *c, - enum hv_callback_mode mode) +static inline void set_channel_affinity_state(struct vmbus_channel *c, + enum hv_numa_policy policy) { - c->callback_mode = mode; + c->affinity_policy = policy; +} + +static inline void set_channel_read_state(struct vmbus_channel *c, bool state) +{ + c->batched_reading = state; } static inline void set_per_channel_state(struct vmbus_channel *c, void *s) @@ -1068,25 +948,20 @@ static inline void *get_per_channel_state(struct vmbus_channel *c) static inline void set_channel_pending_send_size(struct vmbus_channel *c, u32 size) { - unsigned long flags; - - if (size) { - spin_lock_irqsave(&c->outbound.ring_lock, flags); - ++c->out_full_total; - - if (!c->out_full_flag) { - ++c->out_full_first; - c->out_full_flag = true; - } - spin_unlock_irqrestore(&c->outbound.ring_lock, flags); - } else { - c->out_full_flag = false; - } - c->outbound.ring_buffer->pending_send_sz = size; } -void vmbus_onmessage(struct vmbus_channel_message_header *hdr); +static inline void set_low_latency_mode(struct vmbus_channel *c) +{ + c->low_latency = true; +} + +static inline void clear_low_latency_mode(struct vmbus_channel *c) +{ + c->low_latency = false; +} + +void vmbus_onmessage(void *context); int vmbus_request_offers(void); @@ -1100,6 +975,14 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, void (*chn_rescind_cb)(struct vmbus_channel *)); +/* + * Retrieve the (sub) channel on which to send an outgoing request. + * When a primary channel has multiple sub-channels, we choose a + * channel whose VCPU binding is closest to the VCPU on which + * this call is being made. + */ +struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary); + /* * Check if sub-channels have already been offerred. This API will be useful * when the driver is unloaded after establishing sub-channels. In this case, @@ -1149,21 +1032,13 @@ struct vmbus_packet_mpb_array { struct hv_mpb_array range; } __packed; -int vmbus_alloc_ring(struct vmbus_channel *channel, - u32 send_size, u32 recv_size); -void vmbus_free_ring(struct vmbus_channel *channel); - -int vmbus_connect_ring(struct vmbus_channel *channel, - void (*onchannel_callback)(void *context), - void *context); -int vmbus_disconnect_ring(struct vmbus_channel *channel); extern int vmbus_open(struct vmbus_channel *channel, u32 send_ringbuffersize, u32 recv_ringbuffersize, void *userdata, u32 userdatalen, - void (*onchannel_callback)(void *context), + void(*onchannel_callback)(void *context), void *context); extern void vmbus_close(struct vmbus_channel *channel); @@ -1175,6 +1050,14 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel, enum vmbus_packet_type type, u32 flags); +extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel, + void *buffer, + u32 bufferLen, + u64 requestid, + enum vmbus_packet_type type, + u32 flags, + bool kick_q); + extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, struct hv_page_buffer pagebuffers[], u32 pagecount, @@ -1182,6 +1065,21 @@ extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, u32 bufferlen, u64 requestid); +extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, + struct hv_page_buffer pagebuffers[], + u32 pagecount, + void *buffer, + u32 bufferlen, + u64 requestid, + u32 flags, + bool kick_q); + +extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, + struct hv_multipage_buffer *mpb, + void *buffer, + u32 bufferlen, + u64 requestid); + extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, struct vmbus_packet_mpb_array *mpb, u32 desc_size, @@ -1197,8 +1095,6 @@ extern int vmbus_establish_gpadl(struct vmbus_channel *channel, extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle); -void vmbus_reset_channel_cb(struct vmbus_channel *channel); - extern int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, u32 bufferlen, @@ -1233,45 +1129,30 @@ struct hv_driver { bool hvsock; /* the device type supported by this driver */ - guid_t dev_type; + uuid_le dev_type; const struct hv_vmbus_device_id *id_table; struct device_driver driver; - /* dynamic device GUID's */ - struct { - spinlock_t lock; - struct list_head list; - } dynids; - int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); int (*remove)(struct hv_device *); void (*shutdown)(struct hv_device *); - int (*suspend)(struct hv_device *); - int (*resume)(struct hv_device *); - }; /* Base device object */ struct hv_device { /* the device type id of this device */ - guid_t dev_type; + uuid_le dev_type; /* the device instance id of this device */ - guid_t dev_instance; + uuid_le dev_instance; u16 vendor_id; u16 device_id; struct device device; - char *driver_override; /* Driver name to force a match */ struct vmbus_channel *channel; - struct kset *channels_kset; - - /* place holder to keep track of the dir for hv device in debugfs */ - struct dentry *debug_dir; - }; @@ -1295,18 +1176,6 @@ static inline void *hv_get_drvdata(struct hv_device *dev) return dev_get_drvdata(&dev->device); } -struct hv_ring_buffer_debug_info { - u32 current_interrupt_mask; - u32 current_read_index; - u32 current_write_index; - u32 bytes_avail_toread; - u32 bytes_avail_towrite; -}; - - -int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, - struct hv_ring_buffer_debug_info *debug_info); - /* Vmbus interface */ #define vmbus_driver_register(driver) \ __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) @@ -1322,6 +1191,8 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, resource_size_t size, resource_size_t align, bool fb_overlap_ok); void vmbus_free_mmio(resource_size_t start, resource_size_t size); +int vmbus_cpu_number_to_vp_number(int cpu_number); +u64 hv_do_hypercall(u64 control, void *input, void *output); /* * GUID definitions of various offer types - services offered to the guest. @@ -1332,102 +1203,102 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); * {f8615163-df3e-46c5-913f-f2d2f965ed0e} */ #define HV_NIC_GUID \ - .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ - 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) + .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ + 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) /* * IDE GUID * {32412632-86cb-44a2-9b5c-50d1417354f5} */ #define HV_IDE_GUID \ - .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ - 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) + .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ + 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) /* * SCSI GUID * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ #define HV_SCSI_GUID \ - .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ - 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) + .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ + 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) /* * Shutdown GUID * {0e0b6031-5213-4934-818b-38d90ced39db} */ #define HV_SHUTDOWN_GUID \ - .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ - 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) + .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ + 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) /* * Time Synch GUID * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */ #define HV_TS_GUID \ - .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ - 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) + .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ + 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) /* * Heartbeat GUID * {57164f39-9115-4e78-ab55-382f3bd5422d} */ #define HV_HEART_BEAT_GUID \ - .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ - 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) + .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ + 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) /* * KVP GUID * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} */ #define HV_KVP_GUID \ - .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ - 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) + .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ + 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) /* * Dynamic memory GUID * {525074dc-8985-46e2-8057-a307dc18a502} */ #define HV_DM_GUID \ - .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ - 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) + .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ + 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) /* * Mouse GUID * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} */ #define HV_MOUSE_GUID \ - .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ - 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) + .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ + 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) /* * Keyboard GUID * {f912ad6d-2b17-48ea-bd65-f927a61c7684} */ #define HV_KBD_GUID \ - .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ - 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) + .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ + 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) /* * VSS (Backup/Restore) GUID */ #define HV_VSS_GUID \ - .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ - 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) + .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ + 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) /* * Synthetic Video GUID * {DA0A7802-E377-4aac-8E77-0558EB1073F8} */ #define HV_SYNTHVID_GUID \ - .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ - 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) + .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ + 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) /* * Synthetic FC GUID * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} */ #define HV_SYNTHFC_GUID \ - .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ - 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) + .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ + 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) /* * Guest File Copy Service @@ -1435,16 +1306,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); */ #define HV_FCOPY_GUID \ - .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ - 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) + .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ + 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) /* * NetworkDirect. This is the guest RDMA service. * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} */ #define HV_ND_GUID \ - .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ - 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) + .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ + 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) /* * PCI Express Pass Through @@ -1452,8 +1323,8 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); */ #define HV_PCIE_GUID \ - .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ - 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) + .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ + 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) /* * Linux doesn't support the 3 devices: the first two are for @@ -1465,16 +1336,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); */ #define HV_AVMA1_GUID \ - .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ - 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) + .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ + 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) #define HV_AVMA2_GUID \ - .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ - 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) + .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ + 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) #define HV_RDV_GUID \ - .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ - 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) + .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ + 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) /* * Common header for Hyper-V ICs @@ -1486,7 +1357,6 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); #define ICMSGTYPE_SHUTDOWN 3 #define ICMSGTYPE_TIMESYNC 4 #define ICMSGTYPE_VSS 5 -#define ICMSGTYPE_FCOPY 7 #define ICMSGHDRFLAG_TRANSACTION 1 #define ICMSGHDRFLAG_REQUEST 2 @@ -1505,8 +1375,6 @@ struct hv_util_service { void (*util_cb)(void *); int (*util_init)(struct hv_util_service *); void (*util_deinit)(void); - int (*util_pre_suspend)(void); - int (*util_pre_resume)(void); }; struct vmbuspipe_hdr { @@ -1530,17 +1398,11 @@ struct icmsg_hdr { u8 reserved[2]; } __packed; -#define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100 -#define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)) -#define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \ - (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \ - (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version))) - struct icmsg_negotiate { u16 icframe_vercnt; u16 icmsg_vercnt; u32 reserved; - struct ic_version icversion_data[]; /* any size array */ + struct ic_version icversion_data[1]; /* any size array */ } __packed; struct shutdown_msg_data { @@ -1585,18 +1447,20 @@ struct ictimesync_ref_data { struct hyperv_service_callback { u8 msg_type; char *log_msg; - guid_t data; + uuid_le data; struct vmbus_channel *channel; - void (*callback)(void *context); + void (*callback) (void *context); }; #define MAX_SRV_VER 0x7ffffff -extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen, - const int *fw_version, int fw_vercnt, - const int *srv_version, int srv_vercnt, - int *nego_fw_version, int *nego_srv_version); +extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *, + struct icmsg_negotiate *, u8 *, int, + int); -void hv_process_channel_removal(struct vmbus_channel *channel); +void hv_event_tasklet_disable(struct vmbus_channel *channel); +void hv_event_tasklet_enable(struct vmbus_channel *channel); + +void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); void vmbus_setevent(struct vmbus_channel *channel); /* @@ -1605,164 +1469,149 @@ void vmbus_setevent(struct vmbus_channel *channel); extern __u32 vmbus_proto_version; -int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, - const guid_t *shv_host_servie_id); -int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp); +int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, + const uuid_le *shv_host_servie_id); void vmbus_set_event(struct vmbus_channel *channel); /* Get the start of the ring buffer. */ static inline void * -hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info) +hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) { - return ring_info->ring_buffer->buffer; + return (void *)ring_info->ring_buffer->buffer; } /* - * Mask off host interrupt callback notifications + * To optimize the flow management on the send-side, + * when the sender is blocked because of lack of + * sufficient space in the ring buffer, potential the + * consumer of the ring buffer can signal the producer. + * This is controlled by the following parameters: + * + * 1. pending_send_sz: This is the size in bytes that the + * producer is trying to send. + * 2. The feature bit feat_pending_send_sz set to indicate if + * the consumer of the ring will signal when the ring + * state transitions from being full to a state where + * there is room for the producer to send the pending packet. */ -static inline void hv_begin_read(struct hv_ring_buffer_info *rbi) + +static inline void hv_signal_on_read(struct vmbus_channel *channel) { - rbi->ring_buffer->interrupt_mask = 1; - - /* make sure mask update is not reordered */ - virt_mb(); -} - -/* - * Re-enable host callback and return number of outstanding bytes - */ -static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi) -{ - - rbi->ring_buffer->interrupt_mask = 0; - - /* make sure mask update is not reordered */ - virt_mb(); + u32 cur_write_sz, cached_write_sz; + u32 pending_sz; + struct hv_ring_buffer_info *rbi = &channel->inbound; /* - * Now check to see if the ring buffer is still empty. - * If it is not, we raced and we need to process new - * incoming messages. + * Issue a full memory barrier before making the signaling decision. + * Here is the reason for having this barrier: + * If the reading of the pend_sz (in this function) + * were to be reordered and read before we commit the new read + * index (in the calling function) we could + * have a problem. If the host were to set the pending_sz after we + * have sampled pending_sz and go to sleep before we commit the + * read index, we could miss sending the interrupt. Issue a full + * memory barrier to address this. */ - return hv_get_bytes_to_read(rbi); + virt_mb(); + + pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); + /* If the other end is not blocked on write don't bother. */ + if (pending_sz == 0) + return; + + cur_write_sz = hv_get_bytes_to_write(rbi); + + if (cur_write_sz < pending_sz) + return; + + cached_write_sz = hv_get_cached_bytes_to_write(rbi); + if (cached_write_sz < pending_sz) + vmbus_setevent(channel); + + return; +} + +static inline void +init_cached_read_index(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *rbi = &channel->inbound; + + rbi->cached_read_index = rbi->ring_buffer->read_index; } /* * An API to support in-place processing of incoming VMBUS packets. */ - -/* Get data payload associated with descriptor */ -static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc) -{ - return (void *)((unsigned long)desc + (desc->offset8 << 3)); -} - -/* Get data size associated with descriptor */ -static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc) -{ - return (desc->len8 << 3) - (desc->offset8 << 3); -} - - -struct vmpacket_descriptor * -hv_pkt_iter_first_raw(struct vmbus_channel *channel); - -struct vmpacket_descriptor * -hv_pkt_iter_first(struct vmbus_channel *channel); - -struct vmpacket_descriptor * -__hv_pkt_iter_next(struct vmbus_channel *channel, - const struct vmpacket_descriptor *pkt, - bool copy); - -void hv_pkt_iter_close(struct vmbus_channel *channel); +#define VMBUS_PKT_TRAILER 8 static inline struct vmpacket_descriptor * -hv_pkt_iter_next_pkt(struct vmbus_channel *channel, - const struct vmpacket_descriptor *pkt, - bool copy) +get_next_pkt_raw(struct vmbus_channel *channel) { - struct vmpacket_descriptor *nxt; + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 priv_read_loc = ring_info->priv_read_index; + void *ring_buffer = hv_get_ring_buffer(ring_info); + u32 dsize = ring_info->ring_datasize; + /* + * delta is the difference between what is available to read and + * what was already consumed in place. We commit read index after + * the whole batch is processed. + */ + u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ? + priv_read_loc - ring_info->ring_buffer->read_index : + (dsize - ring_info->ring_buffer->read_index) + priv_read_loc; + u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); - nxt = __hv_pkt_iter_next(channel, pkt, copy); - if (!nxt) - hv_pkt_iter_close(channel); + if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) + return NULL; - return nxt; + return ring_buffer + priv_read_loc; } /* - * Get next packet descriptor without copying it out of the ring buffer - * If at end of list, return NULL and update host. + * A helper function to step through packets "in-place" + * This API is to be called after each successful call + * get_next_pkt_raw(). */ -static inline struct vmpacket_descriptor * -hv_pkt_iter_next_raw(struct vmbus_channel *channel, - const struct vmpacket_descriptor *pkt) +static inline void put_pkt_raw(struct vmbus_channel *channel, + struct vmpacket_descriptor *desc) { - return hv_pkt_iter_next_pkt(channel, pkt, false); + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 packetlen = desc->len8 << 3; + u32 dsize = ring_info->ring_datasize; + + /* + * Include the packet trailer. + */ + ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; + ring_info->priv_read_index %= dsize; } /* - * Get next packet descriptor from iterator - * If at end of list, return NULL and update host. + * This call commits the read index and potentially signals the host. + * Here is the pattern for using the "in-place" consumption APIs: + * + * init_cached_read_index(); + * + * while (get_next_pkt_raw() { + * process the packet "in-place"; + * put_pkt_raw(); + * } + * if (packets processed in place) + * commit_rd_index(); */ -static inline struct vmpacket_descriptor * -hv_pkt_iter_next(struct vmbus_channel *channel, - const struct vmpacket_descriptor *pkt) +static inline void commit_rd_index(struct vmbus_channel *channel) { - return hv_pkt_iter_next_pkt(channel, pkt, true); + struct hv_ring_buffer_info *ring_info = &channel->inbound; + /* + * Make sure all reads are done before we update the read index since + * the writer may start writing to the read area once the read index + * is updated. + */ + virt_rmb(); + ring_info->ring_buffer->read_index = ring_info->priv_read_index; + + hv_signal_on_read(channel); } -#define foreach_vmbus_pkt(pkt, channel) \ - for (pkt = hv_pkt_iter_first(channel); pkt; \ - pkt = hv_pkt_iter_next(channel, pkt)) - -/* - * Interface for passing data between SR-IOV PF and VF drivers. The VF driver - * sends requests to read and write blocks. Each block must be 128 bytes or - * smaller. Optionally, the VF driver can register a callback function which - * will be invoked when the host says that one or more of the first 64 block - * IDs is "invalid" which means that the VF driver should reread them. - */ -#define HV_CONFIG_BLOCK_SIZE_MAX 128 - -int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, - unsigned int block_id, unsigned int *bytes_returned); -int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, - unsigned int block_id); -int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, - void (*block_invalidate)(void *context, - u64 block_mask)); - -struct hyperv_pci_block_ops { - int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len, - unsigned int block_id, unsigned int *bytes_returned); - int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len, - unsigned int block_id); - int (*reg_blk_invalidate)(struct pci_dev *dev, void *context, - void (*block_invalidate)(void *context, - u64 block_mask)); -}; - -extern struct hyperv_pci_block_ops hvpci_block_ops; - -static inline unsigned long virt_to_hvpfn(void *addr) -{ - phys_addr_t paddr; - - if (is_vmalloc_addr(addr)) - paddr = page_to_phys(vmalloc_to_page(addr)) + - offset_in_page(addr); - else - paddr = __pa(addr); - - return paddr >> HV_HYP_PAGE_SHIFT; -} - -#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE) -#define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK) -#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT) -#define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT) -#define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE) #endif /* _HYPERV_H */ diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h index fc08b433c8..3fa5ef2b37 100644 --- a/include/linux/hypervisor.h +++ b/include/linux/hypervisor.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_HYPEVISOR_H #define __LINUX_HYPEVISOR_H @@ -7,29 +6,12 @@ * Juergen Gross */ -#ifdef CONFIG_X86 - -#include -#include - -static inline void hypervisor_pin_vcpu(int cpu) -{ - x86_platform.hyper.pin_vcpu(cpu); -} - -#else /* !CONFIG_X86 */ - -#include - +#ifdef CONFIG_HYPERVISOR_GUEST +#include +#else static inline void hypervisor_pin_vcpu(int cpu) { } - -static inline bool jailhouse_paravirt(void) -{ - return of_find_compatible_node(NULL, NULL, "jailhouse,cell"); -} - -#endif /* !CONFIG_X86 */ +#endif #endif /* __LINUX_HYPEVISOR_H */ diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h index 7fd5575a36..63904ba688 100644 --- a/include/linux/i2c-algo-bit.h +++ b/include/linux/i2c-algo-bit.h @@ -1,17 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * i2c-algo-bit.h: i2c driver algorithms for bit-shift adapters - * - * Copyright (C) 1995-99 Simon G. Vogl - * With some changes from Kyösti Mälkki and even - * Frodo Looijaard - */ +/* ------------------------------------------------------------------------- */ +/* i2c-algo-bit.h i2c driver algorithms for bit-shift adapters */ +/* ------------------------------------------------------------------------- */ +/* Copyright (C) 1995-99 Simon G. Vogl + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. */ +/* ------------------------------------------------------------------------- */ + +/* With some changes from Kyösti Mälkki and even + Frodo Looijaard */ #ifndef _LINUX_I2C_ALGO_BIT_H #define _LINUX_I2C_ALGO_BIT_H -#include - /* --- Defines for bit-adapters --------------------------------------- */ /* * This struct contains the hw-dependent functions of bit-style adapters to @@ -33,7 +46,6 @@ struct i2c_algo_bit_data { minimum 5 us for standard-mode I2C and SMBus, maximum 50 us for SMBus */ int timeout; /* in jiffies */ - bool can_do_atomic; /* callbacks don't sleep, we can be atomic */ }; int i2c_bit_add_bus(struct i2c_adapter *); diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h index 7c522fdd9e..a3c3ecd59f 100644 --- a/include/linux/i2c-algo-pca.h +++ b/include/linux/i2c-algo-pca.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_I2C_ALGO_PCA_H #define _LINUX_I2C_ALGO_PCA_H @@ -53,20 +52,6 @@ #define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ #define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ -/** - * struct pca_i2c_bus_settings - The configured PCA i2c bus settings - * @mode: Configured i2c bus mode - * @tlow: Configured SCL LOW period - * @thi: Configured SCL HIGH period - * @clock_freq: The configured clock frequency - */ -struct pca_i2c_bus_settings { - int mode; - int tlow; - int thi; - int clock_freq; -}; - struct i2c_algo_pca_data { void *data; /* private low level data */ void (*write_byte) (void *data, int reg, int val); @@ -78,7 +63,6 @@ struct i2c_algo_pca_data { * For PCA9665, use the frequency you want here. */ unsigned int i2c_clock; unsigned int chip; - struct pca_i2c_bus_settings bus_settings; }; int i2c_pca_add_bus(struct i2c_adapter *); diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h index 696e7de83c..538e8f41a3 100644 --- a/include/linux/i2c-algo-pcf.h +++ b/include/linux/i2c-algo-pcf.h @@ -1,11 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* ------------------------------------------------------------------------- */ /* adap-pcf.h i2c driver algorithms for PCF8584 adapters */ /* ------------------------------------------------------------------------- */ /* Copyright (C) 1995-97 Simon G. Vogl 1998-99 Hans Berglund - */ + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. */ /* ------------------------------------------------------------------------- */ /* With some changes from Kyösti Mälkki and even diff --git a/include/linux/i2c-dev.h b/include/linux/i2c-dev.h index 4c86fce30a..79727144c5 100644 --- a/include/linux/i2c-dev.h +++ b/include/linux/i2c-dev.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* i2c-dev.h - i2c-bus driver, char device interface Copyright (C) 1995-97 Simon G. Vogl Copyright (C) 1998-99 Frodo Looijaard + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. */ #ifndef _LINUX_I2C_DEV_H #define _LINUX_I2C_DEV_H diff --git a/include/linux/i2c-gpio.h b/include/linux/i2c-gpio.h new file mode 100644 index 0000000000..c1bcb1f1d7 --- /dev/null +++ b/include/linux/i2c-gpio.h @@ -0,0 +1,38 @@ +/* + * i2c-gpio interface to platform code + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_I2C_GPIO_H +#define _LINUX_I2C_GPIO_H + +/** + * struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio + * @sda_pin: GPIO pin ID to use for SDA + * @scl_pin: GPIO pin ID to use for SCL + * @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz + * @timeout: clock stretching timeout in jiffies. If the slave keeps + * SCL low for longer than this, the transfer will time out. + * @sda_is_open_drain: SDA is configured as open drain, i.e. the pin + * isn't actively driven high when setting the output value high. + * gpio_get_value() must return the actual pin state even if the + * pin is configured as an output. + * @scl_is_open_drain: SCL is set up as open drain. Same requirements + * as for sda_is_open_drain apply. + * @scl_is_output_only: SCL output drivers cannot be turned off. + */ +struct i2c_gpio_platform_data { + unsigned int sda_pin; + unsigned int scl_pin; + int udelay; + int timeout; + unsigned int sda_is_open_drain:1; + unsigned int scl_is_open_drain:1; + unsigned int scl_is_output_only:1; +}; + +#endif /* _LINUX_I2C_GPIO_H */ diff --git a/include/linux/i2c-mux-gpio.h b/include/linux/i2c-mux-gpio.h new file mode 100644 index 0000000000..4406108201 --- /dev/null +++ b/include/linux/i2c-mux-gpio.h @@ -0,0 +1,43 @@ +/* + * i2c-mux-gpio interface to platform code + * + * Peter Korsgaard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_I2C_MUX_GPIO_H +#define _LINUX_I2C_MUX_GPIO_H + +/* MUX has no specific idle mode */ +#define I2C_MUX_GPIO_NO_IDLE ((unsigned)-1) + +/** + * struct i2c_mux_gpio_platform_data - Platform-dependent data for i2c-mux-gpio + * @parent: Parent I2C bus adapter number + * @base_nr: Base I2C bus number to number adapters from or zero for dynamic + * @values: Array of bitmasks of GPIO settings (low/high) for each + * position + * @n_values: Number of multiplexer positions (busses to instantiate) + * @classes: Optional I2C auto-detection classes + * @gpio_chip: Optional GPIO chip name; if set, GPIO pin numbers are given + * relative to the base GPIO number of that chip + * @gpios: Array of GPIO numbers used to control MUX + * @n_gpios: Number of GPIOs used to control MUX + * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used + */ +struct i2c_mux_gpio_platform_data { + int parent; + int base_nr; + const unsigned *values; + int n_values; + const unsigned *classes; + char *gpio_chip; + const unsigned *gpios; + int n_gpios; + unsigned idle; +}; + +#endif /* _LINUX_I2C_MUX_GPIO_H */ diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h new file mode 100644 index 0000000000..a65c86429e --- /dev/null +++ b/include/linux/i2c-mux-pinctrl.h @@ -0,0 +1,41 @@ +/* + * i2c-mux-pinctrl platform data + * + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _LINUX_I2C_MUX_PINCTRL_H +#define _LINUX_I2C_MUX_PINCTRL_H + +/** + * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl + * @parent_bus_num: Parent I2C bus number + * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic. + * @bus_count: Number of child busses. Also the number of elements in + * @pinctrl_states + * @pinctrl_states: The names of the pinctrl state to select for each child bus + * @pinctrl_state_idle: The pinctrl state to select when no child bus is being + * accessed. If NULL, the most recently used pinctrl state will be left + * selected. + */ +struct i2c_mux_pinctrl_platform_data { + int parent_bus_num; + int base_bus_num; + int bus_count; + const char **pinctrl_states; + const char *pinctrl_state_idle; +}; + +#endif diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h index 98ef73b7c8..bd74d5706f 100644 --- a/include/linux/i2c-mux.h +++ b/include/linux/i2c-mux.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * * i2c-mux.h - functions for the i2c-bus mux support @@ -6,6 +5,21 @@ * Copyright (c) 2008-2009 Rodolfo Giometti * Copyright (c) 2008-2009 Eurotech S.p.A. * Michael Lawnick + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301 USA. */ #ifndef _LINUX_I2C_MUX_H @@ -29,7 +43,7 @@ struct i2c_mux_core { int num_adapters; int max_adapters; - struct i2c_adapter *adapter[]; + struct i2c_adapter *adapter[0]; }; struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent, diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h new file mode 100644 index 0000000000..01edd96fe1 --- /dev/null +++ b/include/linux/i2c-ocores.h @@ -0,0 +1,23 @@ +/* + * i2c-ocores.h - definitions for the i2c-ocores interface + * + * Peter Korsgaard + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef _LINUX_I2C_OCORES_H +#define _LINUX_I2C_OCORES_H + +struct ocores_i2c_platform_data { + u32 reg_shift; /* register offset shift value */ + u32 reg_io_width; /* register io read/write width */ + u32 clock_khz; /* input clock in kHz */ + bool big_endian; /* registers are big endian */ + u8 num_devices; /* number of devices in the devices list */ + struct i2c_board_info const *devices; /* devices connected to the bus */ +}; + +#endif /* _LINUX_I2C_OCORES_H */ diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h new file mode 100644 index 0000000000..babe0cf6d5 --- /dev/null +++ b/include/linux/i2c-omap.h @@ -0,0 +1,38 @@ +#ifndef __I2C_OMAP_H__ +#define __I2C_OMAP_H__ + +#include + +/* + * Version 2 of the I2C peripheral unit has a different register + * layout and extra registers. The ID register in the V2 peripheral + * unit on the OMAP4430 reports the same ID as the V1 peripheral + * unit on the OMAP3530, so we must inform the driver which IP + * version we know it is running on from platform / cpu-specific + * code using these constants in the hwmod class definition. + */ + +#define OMAP_I2C_IP_VERSION_1 1 +#define OMAP_I2C_IP_VERSION_2 2 + +/* struct omap_i2c_bus_platform_data .flags meanings */ + +#define OMAP_I2C_FLAG_NO_FIFO BIT(0) +#define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1) +#define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2) +#define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5) +#define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6) +/* how the CPU address bus must be translated for I2C unit access */ +#define OMAP_I2C_FLAG_BUS_SHIFT_NONE 0 +#define OMAP_I2C_FLAG_BUS_SHIFT_1 BIT(7) +#define OMAP_I2C_FLAG_BUS_SHIFT_2 BIT(8) +#define OMAP_I2C_FLAG_BUS_SHIFT__SHIFT 7 + +struct omap_i2c_bus_platform_data { + u32 clkrate; + u32 rev; + u32 flags; + void (*set_mpu_wkup_lat)(struct device *dev, long set); +}; + +#endif diff --git a/include/linux/i2c-pca-platform.h b/include/linux/i2c-pca-platform.h new file mode 100644 index 0000000000..aba33759de --- /dev/null +++ b/include/linux/i2c-pca-platform.h @@ -0,0 +1,12 @@ +#ifndef I2C_PCA9564_PLATFORM_H +#define I2C_PCA9564_PLATFORM_H + +struct i2c_pca9564_pf_platform_data { + int gpio; /* pin to reset chip. driver will work when + * not supplied (negative value), but it + * cannot exit some error conditions then */ + int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */ + int timeout; /* timeout in jiffies */ +}; + +#endif /* I2C_PCA9564_PLATFORM_H */ diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h new file mode 100644 index 0000000000..5388326fbb --- /dev/null +++ b/include/linux/i2c-pnx.h @@ -0,0 +1,38 @@ +/* + * Header file for I2C support on PNX010x/4008. + * + * Author: Dennis Kovalev + * + * 2004-2006 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#ifndef __I2C_PNX_H__ +#define __I2C_PNX_H__ + +struct platform_device; +struct clk; + +struct i2c_pnx_mif { + int ret; /* Return value */ + int mode; /* Interface mode */ + struct completion complete; /* I/O completion */ + struct timer_list timer; /* Timeout */ + u8 * buf; /* Data buffer */ + int len; /* Length of data buffer */ + int order; /* RX Bytes to order via TX */ +}; + +struct i2c_pnx_algo_data { + void __iomem *ioaddr; + struct i2c_pnx_mif mif; + int last; + struct clk *clk; + struct i2c_adapter adapter; + int irq; + u32 timeout; +}; + +#endif /* __I2C_PNX_H__ */ diff --git a/include/linux/i2c-pxa.h b/include/linux/i2c-pxa.h new file mode 100644 index 0000000000..41dcdfe7f6 --- /dev/null +++ b/include/linux/i2c-pxa.h @@ -0,0 +1,17 @@ +#ifndef _LINUX_I2C_ALGO_PXA_H +#define _LINUX_I2C_ALGO_PXA_H + +typedef enum i2c_slave_event_e { + I2C_SLAVE_EVENT_START_READ, + I2C_SLAVE_EVENT_START_WRITE, + I2C_SLAVE_EVENT_STOP +} i2c_slave_event_t; + +struct i2c_slave_client { + void *data; + void (*event)(void *ptr, i2c_slave_event_t event); + int (*read) (void *ptr); + void (*write)(void *ptr, unsigned int val); +}; + +#endif /* _LINUX_I2C_ALGO_PXA_H */ diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 1ef421818d..c2e3324f94 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -1,8 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * i2c-smbus.h - SMBus extensions to the I2C protocol * - * Copyright (C) 2010-2019 Jean Delvare + * Copyright (C) 2010 Jean Delvare + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301 USA. */ #ifndef _LINUX_I2C_SMBUS_H @@ -15,46 +29,52 @@ /** * i2c_smbus_alert_setup - platform data for the smbus_alert i2c client + * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0) + * triggered * @irq: IRQ number, if the smbus_alert driver should take care of interrupt * handling * * If irq is not specified, the smbus_alert driver doesn't take care of * interrupt handling. In that case it is up to the I2C bus driver to either * handle the interrupts or to poll for alerts. + * + * If irq is specified then it it crucial that alert_edge_triggered is + * properly set. */ struct i2c_smbus_alert_setup { + unsigned int alert_edge_triggered:1; int irq; }; -struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter, - struct i2c_smbus_alert_setup *setup); +struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, + struct i2c_smbus_alert_setup *setup); int i2c_handle_smbus_alert(struct i2c_client *ara); -#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF) -int of_i2c_setup_smbus_alert(struct i2c_adapter *adap); -#else -static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap) -{ - return 0; -} -#endif -#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_I2C_SLAVE) -struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter); -void i2c_free_slave_host_notify_device(struct i2c_client *client); -#else -static inline struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter) -{ - return ERR_PTR(-ENOSYS); -} -static inline void i2c_free_slave_host_notify_device(struct i2c_client *client) -{ -} -#endif +/** + * smbus_host_notify - internal structure used by the Host Notify mechanism. + * @adapter: the I2C adapter associated with this struct + * @work: worker used to schedule the IRQ in the slave device + * @lock: spinlock to check if a notification is already pending + * @pending: flag set when a notification is pending (any new notification will + * be rejected if pending is true) + * @payload: the actual payload of the Host Notify event + * @addr: the address of the slave device which raised the notification + * + * This struct needs to be allocated by i2c_setup_smbus_host_notify() and does + * not need to be freed. Internally, i2c_setup_smbus_host_notify() uses a + * managed resource to clean this up when the adapter get released. + */ +struct smbus_host_notify { + struct i2c_adapter *adapter; + struct work_struct work; + spinlock_t lock; + bool pending; + u16 payload; + u8 addr; +}; -#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI) -void i2c_register_spd(struct i2c_adapter *adap); -#else -static inline void i2c_register_spd(struct i2c_adapter *adap) { } -#endif +struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap); +int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify, + unsigned short addr, unsigned int data); #endif /* _LINUX_I2C_SMBUS_H */ diff --git a/include/linux/i2c-xiic.h b/include/linux/i2c-xiic.h new file mode 100644 index 0000000000..4f9f2256a9 --- /dev/null +++ b/include/linux/i2c-xiic.h @@ -0,0 +1,43 @@ +/* + * i2c-xiic.h + * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* Supports: + * Xilinx IIC + */ + +#ifndef _LINUX_I2C_XIIC_H +#define _LINUX_I2C_XIIC_H + +/** + * struct xiic_i2c_platform_data - Platform data of the Xilinx I2C driver + * @num_devices: Number of devices that shall be added when the driver + * is probed. + * @devices: The actuall devices to add. + * + * This purpose of this platform data struct is to be able to provide a number + * of devices that should be added to the I2C bus. The reason is that sometimes + * the I2C board info is not enough, a new PCI board can for instance be + * plugged into a standard PC, and the bus number might be unknown at + * early init time. + */ +struct xiic_i2c_platform_data { + u8 num_devices; + struct i2c_board_info const *devices; +}; + +#endif /* _LINUX_I2C_XIIC_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 2ce3efbe91..9570baa416 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -1,30 +1,41 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * i2c.h - definitions for the Linux i2c bus interface - * Copyright (C) 1995-2000 Simon G. Vogl - * Copyright (C) 2013-2019 Wolfram Sang - * - * With some changes from Kyösti Mälkki and - * Frodo Looijaard - */ +/* ------------------------------------------------------------------------- */ +/* */ +/* i2c.h - definitions for the i2c-bus interface */ +/* */ +/* ------------------------------------------------------------------------- */ +/* Copyright (C) 1995-2000 Simon G. Vogl + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. */ +/* ------------------------------------------------------------------------- */ + +/* With some changes from Kyösti Mälkki and + Frodo Looijaard */ #ifndef _LINUX_I2C_H #define _LINUX_I2C_H -#include /* for acpi_handle */ #include #include /* for struct device */ #include /* for completion */ #include -#include -#include -#include /* for Host Notify IRQ */ #include /* for struct device_node */ #include /* for swab16 */ #include extern struct bus_type i2c_bus_type; extern struct device_type i2c_adapter_type; -extern struct device_type i2c_client_type; /* --- General options ------------------------------------------------ */ @@ -33,130 +44,56 @@ struct i2c_algorithm; struct i2c_adapter; struct i2c_client; struct i2c_driver; -struct i2c_device_identity; union i2c_smbus_data; struct i2c_board_info; enum i2c_slave_event; -typedef int (*i2c_slave_cb_t)(struct i2c_client *client, - enum i2c_slave_event event, u8 *val); - -/* I2C Frequency Modes */ -#define I2C_MAX_STANDARD_MODE_FREQ 100000 -#define I2C_MAX_FAST_MODE_FREQ 400000 -#define I2C_MAX_FAST_MODE_PLUS_FREQ 1000000 -#define I2C_MAX_TURBO_MODE_FREQ 1400000 -#define I2C_MAX_HIGH_SPEED_MODE_FREQ 3400000 -#define I2C_MAX_ULTRA_FAST_MODE_FREQ 5000000 +typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); struct module; -struct property_entry; - -#if IS_ENABLED(CONFIG_I2C) -/* Return the Frequency mode string based on the bus frequency */ -const char *i2c_freq_mode_string(u32 bus_freq_hz); +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) /* * The master routines are the ones normally used to transmit data to devices * on a bus (or read from them). Apart from two basic transfer functions to * transmit one message at a time, a more complex version can be used to * transmit an arbitrary number of messages without interruption. - * @count must be less than 64k since msg.len is u16. + * @count must be be less than 64k since msg.len is u16. */ -int i2c_transfer_buffer_flags(const struct i2c_client *client, - char *buf, int count, u16 flags); - -/** - * i2c_master_recv - issue a single I2C message in master receive mode - * @client: Handle to slave device - * @buf: Where to store data read from slave - * @count: How many bytes to read, must be less than 64k since msg.len is u16 - * - * Returns negative errno, or else the number of bytes read. - */ -static inline int i2c_master_recv(const struct i2c_client *client, - char *buf, int count) -{ - return i2c_transfer_buffer_flags(client, buf, count, I2C_M_RD); -}; - -/** - * i2c_master_recv_dmasafe - issue a single I2C message in master receive mode - * using a DMA safe buffer - * @client: Handle to slave device - * @buf: Where to store data read from slave, must be safe to use with DMA - * @count: How many bytes to read, must be less than 64k since msg.len is u16 - * - * Returns negative errno, or else the number of bytes read. - */ -static inline int i2c_master_recv_dmasafe(const struct i2c_client *client, - char *buf, int count) -{ - return i2c_transfer_buffer_flags(client, buf, count, - I2C_M_RD | I2C_M_DMA_SAFE); -}; - -/** - * i2c_master_send - issue a single I2C message in master transmit mode - * @client: Handle to slave device - * @buf: Data that will be written to the slave - * @count: How many bytes to write, must be less than 64k since msg.len is u16 - * - * Returns negative errno, or else the number of bytes written. - */ -static inline int i2c_master_send(const struct i2c_client *client, - const char *buf, int count) -{ - return i2c_transfer_buffer_flags(client, (char *)buf, count, 0); -}; - -/** - * i2c_master_send_dmasafe - issue a single I2C message in master transmit mode - * using a DMA safe buffer - * @client: Handle to slave device - * @buf: Data that will be written to the slave, must be safe to use with DMA - * @count: How many bytes to write, must be less than 64k since msg.len is u16 - * - * Returns negative errno, or else the number of bytes written. - */ -static inline int i2c_master_send_dmasafe(const struct i2c_client *client, - const char *buf, int count) -{ - return i2c_transfer_buffer_flags(client, (char *)buf, count, - I2C_M_DMA_SAFE); -}; +extern int i2c_master_send(const struct i2c_client *client, const char *buf, + int count); +extern int i2c_master_recv(const struct i2c_client *client, char *buf, + int count); /* Transfer num messages. */ -int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num); +extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num); /* Unlocked flavor */ -int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num); +extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num); /* This is the very generalized SMBus access routine. You probably do not want to use this, though; one of the functions below may be much easier, and probably just as fast. Note that we use i2c_adapter here, because you do not need a specific smbus adapter to call this function. */ -s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, - unsigned short flags, char read_write, u8 command, - int protocol, union i2c_smbus_data *data); - -/* Unlocked flavor */ -s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, - unsigned short flags, char read_write, u8 command, - int protocol, union i2c_smbus_data *data); +extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, + unsigned short flags, char read_write, u8 command, + int size, union i2c_smbus_data *data); /* Now follow the 'nice' access routines. These also document the calling conventions of i2c_smbus_xfer. */ -u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count); -s32 i2c_smbus_read_byte(const struct i2c_client *client); -s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value); -s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command); -s32 i2c_smbus_write_byte_data(const struct i2c_client *client, - u8 command, u8 value); -s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command); -s32 i2c_smbus_write_word_data(const struct i2c_client *client, - u8 command, u16 value); +extern s32 i2c_smbus_read_byte(const struct i2c_client *client); +extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value); +extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client, + u8 command); +extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client, + u8 command, u8 value); +extern s32 i2c_smbus_read_word_data(const struct i2c_client *client, + u8 command); +extern s32 i2c_smbus_write_word_data(const struct i2c_client *client, + u8 command, u16 value); static inline s32 i2c_smbus_read_word_swapped(const struct i2c_client *client, u8 command) @@ -174,49 +111,21 @@ i2c_smbus_write_word_swapped(const struct i2c_client *client, } /* Returns the number of read bytes */ -s32 i2c_smbus_read_block_data(const struct i2c_client *client, - u8 command, u8 *values); -s32 i2c_smbus_write_block_data(const struct i2c_client *client, - u8 command, u8 length, const u8 *values); +extern s32 i2c_smbus_read_block_data(const struct i2c_client *client, + u8 command, u8 *values); +extern s32 i2c_smbus_write_block_data(const struct i2c_client *client, + u8 command, u8 length, const u8 *values); /* Returns the number of read bytes */ -s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, - u8 command, u8 length, u8 *values); -s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, - u8 command, u8 length, const u8 *values); -s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, - u8 command, u8 length, - u8 *values); -int i2c_get_device_id(const struct i2c_client *client, - struct i2c_device_identity *id); +extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, + u8 command, u8 length, u8 *values); +extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, + u8 command, u8 length, + const u8 *values); +extern s32 +i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, + u8 command, u8 length, u8 *values); #endif /* I2C */ -/** - * struct i2c_device_identity - i2c client device identification - * @manufacturer_id: 0 - 4095, database maintained by NXP - * @part_id: 0 - 511, according to manufacturer - * @die_revision: 0 - 7, according to manufacturer - */ -struct i2c_device_identity { - u16 manufacturer_id; -#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS 0 -#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_1 1 -#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_2 2 -#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_3 3 -#define I2C_DEVICE_ID_RAMTRON_INTERNATIONAL 4 -#define I2C_DEVICE_ID_ANALOG_DEVICES 5 -#define I2C_DEVICE_ID_STMICROELECTRONICS 6 -#define I2C_DEVICE_ID_ON_SEMICONDUCTOR 7 -#define I2C_DEVICE_ID_SPRINTEK_CORPORATION 8 -#define I2C_DEVICE_ID_ESPROS_PHOTONICS_AG 9 -#define I2C_DEVICE_ID_FUJITSU_SEMICONDUCTOR 10 -#define I2C_DEVICE_ID_FLIR 11 -#define I2C_DEVICE_ID_O2MICRO 12 -#define I2C_DEVICE_ID_ATMEL 13 -#define I2C_DEVICE_ID_NONE 0xffff - u16 part_id; - u8 die_revision; -}; - enum i2c_alert_protocol { I2C_PROTOCOL_SMBUS_ALERT, I2C_PROTOCOL_SMBUS_HOST_NOTIFY, @@ -225,8 +134,8 @@ enum i2c_alert_protocol { /** * struct i2c_driver - represent an I2C device driver * @class: What kind of i2c device we instantiate (for detect) - * @probe: Callback for device binding - soon to be deprecated - * @probe_new: New callback for device binding + * @attach_adapter: Callback for bus addition (deprecated) + * @probe: Callback for device binding * @remove: Callback for device unbinding * @shutdown: Callback for device shutdown * @alert: Alert callback, for example for the SMBus alert protocol @@ -260,17 +169,17 @@ enum i2c_alert_protocol { struct i2c_driver { unsigned int class; - /* Standard driver model interfaces */ - int (*probe)(struct i2c_client *client, const struct i2c_device_id *id); - int (*remove)(struct i2c_client *client); - - /* New driver model interface to aid the seamless removal of the - * current probe()'s, more commonly unused than used second parameter. + /* Notifies the driver that a new bus has appeared. You should avoid + * using this, it will be removed in a near future. */ - int (*probe_new)(struct i2c_client *client); + int (*attach_adapter)(struct i2c_adapter *) __deprecated; + + /* Standard driver model interfaces */ + int (*probe)(struct i2c_client *, const struct i2c_device_id *); + int (*remove)(struct i2c_client *); /* driver model interfaces that don't relate to enumeration */ - void (*shutdown)(struct i2c_client *client); + void (*shutdown)(struct i2c_client *); /* Alert callback, for example for the SMBus alert protocol. * The format and meaning of the data value depends on the protocol. @@ -279,7 +188,7 @@ struct i2c_driver { * For the SMBus Host Notify protocol, the data corresponds to the * 16-bit payload data reported by the slave device acting as master. */ - void (*alert)(struct i2c_client *client, enum i2c_alert_protocol protocol, + void (*alert)(struct i2c_client *, enum i2c_alert_protocol protocol, unsigned int data); /* a ioctl like command that can be used to perform specific functions @@ -291,7 +200,7 @@ struct i2c_driver { const struct i2c_device_id *id_table; /* Device detection callback for automatic device creation */ - int (*detect)(struct i2c_client *client, struct i2c_board_info *info); + int (*detect)(struct i2c_client *, struct i2c_board_info *); const unsigned short *address_list; struct list_head clients; }; @@ -299,20 +208,18 @@ struct i2c_driver { /** * struct i2c_client - represent an I2C slave device - * @flags: see I2C_CLIENT_* for possible flags + * @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address; + * I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking * @addr: Address used on the I2C bus connected to the parent adapter. * @name: Indicates the type of the device, usually a chip name that's * generic enough to hide second-sourcing and compatible revisions. * @adapter: manages the bus segment hosting this I2C device * @dev: Driver model device node for the slave. - * @init_irq: IRQ that was set at initialization * @irq: indicates the IRQ generated by this device (if any) * @detected: member of an i2c_driver.clients list or i2c-core's * userspace_devices list * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter * calls it to pass on slave events to the slave driver. - * @devres_group_id: id of the devres group that will be created for resources - * acquired when probing this device. * * An i2c_client identifies a single device (i.e. chip) connected to an * i2c bus. The behaviour exposed to Linux is defined by the driver @@ -320,49 +227,37 @@ struct i2c_driver { */ struct i2c_client { unsigned short flags; /* div., see below */ -#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ -#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ - /* Must equal I2C_M_TEN below */ -#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ -#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */ -#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ -#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ - /* Must match I2C_M_STOP|IGNORE_NAK */ - unsigned short addr; /* chip address - NOTE: 7bit */ /* addresses are stored in the */ /* _LOWER_ 7 bits */ char name[I2C_NAME_SIZE]; struct i2c_adapter *adapter; /* the adapter we sit on */ struct device dev; /* the device structure */ - int init_irq; /* irq set at initialization */ int irq; /* irq issued by device */ struct list_head detected; #if IS_ENABLED(CONFIG_I2C_SLAVE) i2c_slave_cb_t slave_cb; /* callback for slave mode */ #endif - void *devres_group_id; /* ID of probe devres group */ }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) -struct i2c_adapter *i2c_verify_adapter(struct device *dev); -const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, - const struct i2c_client *client); +extern struct i2c_client *i2c_verify_client(struct device *dev); +extern struct i2c_adapter *i2c_verify_adapter(struct device *dev); static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) { - struct device * const dev = kobj_to_dev(kobj); + struct device * const dev = container_of(kobj, struct device, kobj); return to_i2c_client(dev); } -static inline void *i2c_get_clientdata(const struct i2c_client *client) +static inline void *i2c_get_clientdata(const struct i2c_client *dev) { - return dev_get_drvdata(&client->dev); + return dev_get_drvdata(&dev->dev); } -static inline void i2c_set_clientdata(struct i2c_client *client, void *data) +static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) { - dev_set_drvdata(&client->dev, data); + dev_set_drvdata(&dev->dev, data); } /* I2C slave support */ @@ -376,17 +271,14 @@ enum i2c_slave_event { I2C_SLAVE_STOP, }; -int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); -int i2c_slave_unregister(struct i2c_client *client); -bool i2c_detect_slave_mode(struct device *dev); +extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); +extern int i2c_slave_unregister(struct i2c_client *client); static inline int i2c_slave_event(struct i2c_client *client, enum i2c_slave_event event, u8 *val) { return client->slave_cb(client, event, val); } -#else -static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } #endif /** @@ -394,13 +286,10 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } * @type: chip type, to initialize i2c_client.name * @flags: to initialize i2c_client.flags * @addr: stored in i2c_client.addr - * @dev_name: Overrides the default - dev_name if set * @platform_data: stored in i2c_client.dev.platform_data + * @archdata: copied into i2c_client.dev.archdata * @of_node: pointer to OpenFirmware device node * @fwnode: device node supplied by the platform firmware - * @swnode: software node for the device - * @resources: resources associated with the device - * @num_resources: number of resources in the @resources array * @irq: stored in i2c_client.irq * * I2C doesn't actually support hardware probing, although controllers and @@ -412,19 +301,16 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } * that are present. This information is used to grow the driver model tree. * For mainboards this is done statically using i2c_register_board_info(); * bus numbers identify adapters that aren't yet available. For add-on boards, - * i2c_new_client_device() does this dynamically with the adapter already known. + * i2c_new_device() does this dynamically with the adapter already known. */ struct i2c_board_info { char type[I2C_NAME_SIZE]; unsigned short flags; unsigned short addr; - const char *dev_name; void *platform_data; + struct dev_archdata *archdata; struct device_node *of_node; struct fwnode_handle *fwnode; - const struct software_node *swnode; - const struct resource *resources; - unsigned int num_resources; int irq; }; @@ -442,14 +328,13 @@ struct i2c_board_info { .type = dev_type, .addr = (dev_addr) -#if IS_ENABLED(CONFIG_I2C) -/* - * Add-on boards should register/unregister their devices; e.g. a board +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +/* Add-on boards should register/unregister their devices; e.g. a board * with integrated I2C, a config eeprom, sensors, and a codec that's * used in conjunction with the primary hardware. */ -struct i2c_client * -i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info); +extern struct i2c_client * +i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); /* If you don't know the exact address of an I2C device, use this variant * instead, which can probe for device presence in a list of possible @@ -457,34 +342,27 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf * it must return 1 on successful probe, 0 otherwise. If it is not provided, * a default probing method is used. */ -struct i2c_client * -i2c_new_scanned_device(struct i2c_adapter *adap, - struct i2c_board_info *info, - unsigned short const *addr_list, - int (*probe)(struct i2c_adapter *adap, unsigned short addr)); +extern struct i2c_client * +i2c_new_probed_device(struct i2c_adapter *adap, + struct i2c_board_info *info, + unsigned short const *addr_list, + int (*probe)(struct i2c_adapter *, unsigned short addr)); /* Common custom probe functions */ -int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr); +extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr); -struct i2c_client * -i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address); +/* For devices that use several addresses, use i2c_new_dummy() to make + * client handles for the extra addresses. + */ +extern struct i2c_client * +i2c_new_dummy(struct i2c_adapter *adap, u16 address); -struct i2c_client * -devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address); +extern struct i2c_client * +i2c_new_secondary_device(struct i2c_client *client, + const char *name, + u16 default_addr); -struct i2c_client * -i2c_new_ancillary_device(struct i2c_client *client, - const char *name, - u16 default_addr); - -void i2c_unregister_device(struct i2c_client *client); - -struct i2c_client *i2c_verify_client(struct device *dev); -#else -static inline struct i2c_client *i2c_verify_client(struct device *dev) -{ - return NULL; -} +extern void i2c_unregister_device(struct i2c_client *); #endif /* I2C */ /* Mainboard arch_initcall() code should register all its I2C devices. @@ -492,7 +370,7 @@ static inline struct i2c_client *i2c_verify_client(struct device *dev) * Modules for add-on boards must use other calls. */ #ifdef CONFIG_I2C_BOARDINFO -int +extern int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned n); #else @@ -509,15 +387,11 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * @master_xfer: Issue a set of i2c transactions to the given I2C adapter * defined by the msgs array, with num messages available to transfer via * the adapter specified by adap. - * @master_xfer_atomic: same as @master_xfer. Yet, only using atomic context - * so e.g. PMICs can be accessed very late before shutdown. Optional. * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this * is not present, then the bus layer will try and convert the SMBus calls * into I2C transfers instead. - * @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context - * so e.g. PMICs can be accessed very late before shutdown. Optional. * @functionality: Return the flags that this algorithm/adapter pair supports - * from the ``I2C_FUNC_*`` flags. + * from the I2C_FUNC_* flags. * @reg_slave: Register given client to I2C slave mode of this adapter * @unreg_slave: Unregister given client from I2C slave mode of this adapter * @@ -526,39 +400,32 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 * to name two of the most common. * - * The return codes from the ``master_xfer{_atomic}`` fields should indicate the - * type of error code that occurred during the transfer, as documented in the - * Kernel Documentation file Documentation/i2c/fault-codes.rst. + * The return codes from the @master_xfer field should indicate the type of + * error code that occurred during the transfer, as documented in the kernel + * Documentation file Documentation/i2c/fault-codes. */ struct i2c_algorithm { - /* - * If an adapter algorithm can't do I2C-level access, set master_xfer - * to NULL. If an adapter algorithm can do SMBus access, set - * smbus_xfer. If set to NULL, the SMBus protocol is simulated - * using common I2C messages. - * - * master_xfer should return the number of messages successfully - * processed, or a negative value on error - */ + /* If an adapter algorithm can't do I2C-level access, set master_xfer + to NULL. If an adapter algorithm can do SMBus access, set + smbus_xfer. If set to NULL, the SMBus protocol is simulated + using common I2C messages */ + /* master_xfer should return the number of messages successfully + processed, or a negative value on error */ int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, int num); - int (*master_xfer_atomic)(struct i2c_adapter *adap, - struct i2c_msg *msgs, int num); - int (*smbus_xfer)(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, union i2c_smbus_data *data); - int (*smbus_xfer_atomic)(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, union i2c_smbus_data *data); + int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr, + unsigned short flags, char read_write, + u8 command, int size, union i2c_smbus_data *data); /* To determine what the adapter supports */ - u32 (*functionality)(struct i2c_adapter *adap); + u32 (*functionality) (struct i2c_adapter *); #if IS_ENABLED(CONFIG_I2C_SLAVE) int (*reg_slave)(struct i2c_client *client); int (*unreg_slave)(struct i2c_client *client); #endif }; +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const; /** * struct i2c_lock_operations - represent I2C locking operations @@ -569,9 +436,9 @@ struct i2c_algorithm { * The main operations are wrapped by i2c_lock_bus and i2c_unlock_bus. */ struct i2c_lock_operations { - void (*lock_bus)(struct i2c_adapter *adapter, unsigned int flags); - int (*trylock_bus)(struct i2c_adapter *adapter, unsigned int flags); - void (*unlock_bus)(struct i2c_adapter *adapter, unsigned int flags); + void (*lock_bus)(struct i2c_adapter *, unsigned int flags); + int (*trylock_bus)(struct i2c_adapter *, unsigned int flags); + void (*unlock_bus)(struct i2c_adapter *, unsigned int flags); }; /** @@ -581,11 +448,6 @@ struct i2c_lock_operations { * @scl_fall_ns: time SCL signal takes to fall in ns; t(f) in the I2C specification * @scl_int_delay_ns: time IP core additionally needs to setup SCL in ns * @sda_fall_ns: time SDA signal takes to fall in ns; t(f) in the I2C specification - * @sda_hold_ns: time IP core additionally needs to hold SDA in ns - * @digital_filter_width_ns: width in ns of spikes on i2c lines that the IP core - * digital filter can filter out - * @analog_filter_cutoff_freq_hz: threshold frequency for the low pass IP core - * analog filter */ struct i2c_timings { u32 bus_freq_hz; @@ -593,65 +455,45 @@ struct i2c_timings { u32 scl_fall_ns; u32 scl_int_delay_ns; u32 sda_fall_ns; - u32 sda_hold_ns; - u32 digital_filter_width_ns; - u32 analog_filter_cutoff_freq_hz; }; /** * struct i2c_bus_recovery_info - I2C bus recovery information * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or - * i2c_generic_scl_recovery(). + * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery(). * @get_scl: This gets current value of SCL line. Mandatory for generic SCL - * recovery. Populated internally for generic GPIO recovery. - * @set_scl: This sets/clears the SCL line. Mandatory for generic SCL recovery. - * Populated internally for generic GPIO recovery. - * @get_sda: This gets current value of SDA line. This or set_sda() is mandatory - * for generic SCL recovery. Populated internally, if sda_gpio is a valid - * GPIO, for generic GPIO recovery. - * @set_sda: This sets/clears the SDA line. This or get_sda() is mandatory for - * generic SCL recovery. Populated internally, if sda_gpio is a valid GPIO, - * for generic GPIO recovery. - * @get_bus_free: Returns the bus free state as seen from the IP core in case it - * has a more complex internal logic than just reading SDA. Optional. + * recovery. Used internally for generic GPIO recovery. + * @set_scl: This sets/clears SCL line. Mandatory for generic SCL recovery. Used + * internally for generic GPIO recovery. + * @get_sda: This gets current value of SDA line. Optional for generic SCL + * recovery. Used internally, if sda_gpio is a valid GPIO, for generic GPIO + * recovery. * @prepare_recovery: This will be called before starting recovery. Platform may * configure padmux here for SDA/SCL line or something else they want. * @unprepare_recovery: This will be called after completing recovery. Platform * may configure padmux here for SDA/SCL line or something else they want. - * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery. - * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery. - * @pinctrl: pinctrl used by GPIO recovery to change the state of the I2C pins. - * Optional. - * @pins_default: default pinctrl state of SCL/SDA lines, when they are assigned - * to the I2C bus. Optional. Populated internally for GPIO recovery, if - * state with the name PINCTRL_STATE_DEFAULT is found and pinctrl is valid. - * @pins_gpio: recovery pinctrl state of SCL/SDA lines, when they are used as - * GPIOs. Optional. Populated internally for GPIO recovery, if this state - * is called "gpio" or "recovery" and pinctrl is valid. + * @scl_gpio: gpio number of the SCL line. Only required for GPIO recovery. + * @sda_gpio: gpio number of the SDA line. Only required for GPIO recovery. */ struct i2c_bus_recovery_info { - int (*recover_bus)(struct i2c_adapter *adap); + int (*recover_bus)(struct i2c_adapter *); - int (*get_scl)(struct i2c_adapter *adap); - void (*set_scl)(struct i2c_adapter *adap, int val); - int (*get_sda)(struct i2c_adapter *adap); - void (*set_sda)(struct i2c_adapter *adap, int val); - int (*get_bus_free)(struct i2c_adapter *adap); + int (*get_scl)(struct i2c_adapter *); + void (*set_scl)(struct i2c_adapter *, int val); + int (*get_sda)(struct i2c_adapter *); - void (*prepare_recovery)(struct i2c_adapter *adap); - void (*unprepare_recovery)(struct i2c_adapter *adap); + void (*prepare_recovery)(struct i2c_adapter *); + void (*unprepare_recovery)(struct i2c_adapter *); /* gpio recovery */ - struct gpio_desc *scl_gpiod; - struct gpio_desc *sda_gpiod; - struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; - struct pinctrl_state *pins_gpio; + int scl_gpio; + int sda_gpio; }; int i2c_recover_bus(struct i2c_adapter *adap); /* Generic recovery routines */ +int i2c_generic_gpio_recovery(struct i2c_adapter *adap); int i2c_generic_scl_recovery(struct i2c_adapter *adap); /** @@ -697,12 +539,6 @@ struct i2c_adapter_quirks { I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR) /* clock stretching is not supported */ #define I2C_AQ_NO_CLK_STRETCH BIT(4) -/* message cannot have length of 0 */ -#define I2C_AQ_NO_ZERO_LEN_READ BIT(5) -#define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6) -#define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE) -/* adapter cannot do repeated START */ -#define I2C_AQ_NO_REP_START BIT(7) /* * i2c_adapter is the structure used to identify a physical i2c bus along @@ -722,9 +558,6 @@ struct i2c_adapter { int timeout; /* in jiffies */ int retries; struct device dev; /* the adapter device */ - unsigned long locked_flags; /* owned by the I2C core */ -#define I2C_ALF_IS_SUSPENDED 0 -#define I2C_ALF_SUSPEND_REPORTED 1 int nr; char name[48]; @@ -735,20 +568,17 @@ struct i2c_adapter { struct i2c_bus_recovery_info *bus_recovery_info; const struct i2c_adapter_quirks *quirks; - - struct irq_domain *host_notify_domain; - struct regulator *bus_regulator; }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) -static inline void *i2c_get_adapdata(const struct i2c_adapter *adap) +static inline void *i2c_get_adapdata(const struct i2c_adapter *dev) { - return dev_get_drvdata(&adap->dev); + return dev_get_drvdata(&dev->dev); } -static inline void i2c_set_adapdata(struct i2c_adapter *adap, void *data) +static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) { - dev_set_drvdata(&adap->dev, data); + dev_set_drvdata(&dev->dev, data); } static inline struct i2c_adapter * @@ -764,7 +594,7 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) return NULL; } -int i2c_for_each_dev(void *data, int (*fn)(struct device *dev, void *data)); +int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *)); /* Adapter locking functions, exported for shared pin cases */ #define I2C_LOCK_ROOT_ADAPTER BIT(0) @@ -808,44 +638,32 @@ i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) adapter->lock_ops->unlock_bus(adapter, flags); } -/** - * i2c_mark_adapter_suspended - Report suspended state of the adapter to the core - * @adap: Adapter to mark as suspended - * - * When using this helper to mark an adapter as suspended, the core will reject - * further transfers to this adapter. The usage of this helper is optional but - * recommended for devices having distinct handlers for system suspend and - * runtime suspend. More complex devices are free to implement custom solutions - * to reject transfers when suspended. - */ -static inline void i2c_mark_adapter_suspended(struct i2c_adapter *adap) +static inline void +i2c_lock_adapter(struct i2c_adapter *adapter) { - i2c_lock_bus(adap, I2C_LOCK_ROOT_ADAPTER); - set_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags); - i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER); + i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER); } -/** - * i2c_mark_adapter_resumed - Report resumed state of the adapter to the core - * @adap: Adapter to mark as resumed - * - * When using this helper to mark an adapter as resumed, the core will allow - * further transfers to this adapter. See also further notes to - * @i2c_mark_adapter_suspended(). - */ -static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) +static inline void +i2c_unlock_adapter(struct i2c_adapter *adapter) { - i2c_lock_bus(adap, I2C_LOCK_ROOT_ADAPTER); - clear_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags); - i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER); + i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER); } +/*flags for the client struct: */ +#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ +#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ + /* Must equal I2C_M_TEN below */ +#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ +#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ +#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ + /* Must match I2C_M_STOP|IGNORE_NAK */ + /* i2c adapter classes (bitmask) */ #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ #define I2C_CLASS_SPD (1<<7) /* Memory modules */ -/* Warn users that the adapter doesn't support classes anymore */ -#define I2C_CLASS_DEPRECATED (1<<8) +#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */ /* Internal numbers to terminate lists */ #define I2C_CLIENT_END 0xfffeU @@ -859,32 +677,29 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) /* administration... */ -#if IS_ENABLED(CONFIG_I2C) -int i2c_add_adapter(struct i2c_adapter *adap); -int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter); -void i2c_del_adapter(struct i2c_adapter *adap); -int i2c_add_numbered_adapter(struct i2c_adapter *adap); +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +extern int i2c_add_adapter(struct i2c_adapter *); +extern void i2c_del_adapter(struct i2c_adapter *); +extern int i2c_add_numbered_adapter(struct i2c_adapter *); -int i2c_register_driver(struct module *owner, struct i2c_driver *driver); -void i2c_del_driver(struct i2c_driver *driver); +extern int i2c_register_driver(struct module *, struct i2c_driver *); +extern void i2c_del_driver(struct i2c_driver *); /* use a define to avoid include chaining to get THIS_MODULE */ #define i2c_add_driver(driver) \ i2c_register_driver(THIS_MODULE, driver) -static inline bool i2c_client_has_driver(struct i2c_client *client) -{ - return !IS_ERR_OR_NULL(client) && client->dev.driver; -} +extern struct i2c_client *i2c_use_client(struct i2c_client *client); +extern void i2c_release_client(struct i2c_client *client); /* call the i2c_client->command() of all attached clients with * the given arguments */ -void i2c_clients_command(struct i2c_adapter *adap, - unsigned int cmd, void *arg); +extern void i2c_clients_command(struct i2c_adapter *adap, + unsigned int cmd, void *arg); -struct i2c_adapter *i2c_get_adapter(int nr); -void i2c_put_adapter(struct i2c_adapter *adap); -unsigned int i2c_adapter_depth(struct i2c_adapter *adapter); +extern struct i2c_adapter *i2c_get_adapter(int nr); +extern void i2c_put_adapter(struct i2c_adapter *adap); +extern unsigned int i2c_adapter_depth(struct i2c_adapter *adapter); void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults); @@ -925,10 +740,6 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); } -u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); -void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred); - -int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); /** * module_i2c_driver() - Helper macro for registering a modular I2C driver * @__i2c_driver: i2c_driver struct @@ -956,21 +767,14 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); #if IS_ENABLED(CONFIG_OF) /* must call put_device() when done with returned i2c_client device */ -struct i2c_client *of_find_i2c_device_by_node(struct device_node *node); +extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node); /* must call put_device() when done with returned i2c_adapter device */ -struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node); +extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node); /* must call i2c_put_adapter() when done with returned i2c_adapter device */ struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node); -const struct of_device_id -*i2c_of_match_device(const struct of_device_id *matches, - struct i2c_client *client); - -int of_i2c_get_board_info(struct device *dev, struct device_node *node, - struct i2c_board_info *info); - #else static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) @@ -987,57 +791,15 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node { return NULL; } - -static inline const struct of_device_id -*i2c_of_match_device(const struct of_device_id *matches, - struct i2c_client *client) -{ - return NULL; -} - -static inline int of_i2c_get_board_info(struct device *dev, - struct device_node *node, - struct i2c_board_info *info) -{ - return -ENOTSUPP; -} - #endif /* CONFIG_OF */ -struct acpi_resource; -struct acpi_resource_i2c_serialbus; - #if IS_ENABLED(CONFIG_ACPI) -bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, - struct acpi_resource_i2c_serialbus **i2c); -int i2c_acpi_client_count(struct acpi_device *adev); u32 i2c_acpi_find_bus_speed(struct device *dev); -struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, - struct i2c_board_info *info); -struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle); #else -static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, - struct acpi_resource_i2c_serialbus **i2c) -{ - return false; -} -static inline int i2c_acpi_client_count(struct acpi_device *adev) -{ - return 0; -} static inline u32 i2c_acpi_find_bus_speed(struct device *dev) { return 0; } -static inline struct i2c_client *i2c_acpi_new_device(struct device *dev, - int index, struct i2c_board_info *info) -{ - return ERR_PTR(-ENODEV); -} -static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) -{ - return NULL; -} #endif /* CONFIG_ACPI */ #endif /* _LINUX_I2C_H */ diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h new file mode 100644 index 0000000000..c2153049cf --- /dev/null +++ b/include/linux/i2c/adp5588.h @@ -0,0 +1,172 @@ +/* + * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _ADP5588_H +#define _ADP5588_H + +#define DEV_ID 0x00 /* Device ID */ +#define CFG 0x01 /* Configuration Register1 */ +#define INT_STAT 0x02 /* Interrupt Status Register */ +#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */ +#define Key_EVENTA 0x04 /* Key Event Register A */ +#define Key_EVENTB 0x05 /* Key Event Register B */ +#define Key_EVENTC 0x06 /* Key Event Register C */ +#define Key_EVENTD 0x07 /* Key Event Register D */ +#define Key_EVENTE 0x08 /* Key Event Register E */ +#define Key_EVENTF 0x09 /* Key Event Register F */ +#define Key_EVENTG 0x0A /* Key Event Register G */ +#define Key_EVENTH 0x0B /* Key Event Register H */ +#define Key_EVENTI 0x0C /* Key Event Register I */ +#define Key_EVENTJ 0x0D /* Key Event Register J */ +#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */ +#define UNLOCK1 0x0F /* Unlock Key1 */ +#define UNLOCK2 0x10 /* Unlock Key2 */ +#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */ +#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */ +#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */ +#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */ +#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */ +#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */ +#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */ +#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */ +#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */ +#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */ +#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */ +#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */ +#define GPI_EM1 0x20 /* GPI Event Mode 1 */ +#define GPI_EM2 0x21 /* GPI Event Mode 2 */ +#define GPI_EM3 0x22 /* GPI Event Mode 3 */ +#define GPIO_DIR1 0x23 /* GPIO Data Direction */ +#define GPIO_DIR2 0x24 /* GPIO Data Direction */ +#define GPIO_DIR3 0x25 /* GPIO Data Direction */ +#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */ +#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */ +#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */ +#define Debounce_DIS1 0x29 /* Debounce Disable */ +#define Debounce_DIS2 0x2A /* Debounce Disable */ +#define Debounce_DIS3 0x2B /* Debounce Disable */ +#define GPIO_PULL1 0x2C /* GPIO Pull Disable */ +#define GPIO_PULL2 0x2D /* GPIO Pull Disable */ +#define GPIO_PULL3 0x2E /* GPIO Pull Disable */ +#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */ +#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */ +#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */ +#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */ +#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */ +#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */ +#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */ +#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */ +#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */ +#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */ +#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */ +#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */ +#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */ +#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */ +#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */ + +#define ADP5588_DEVICE_ID_MASK 0xF + + /* Configuration Register1 */ +#define ADP5588_AUTO_INC (1 << 7) +#define ADP5588_GPIEM_CFG (1 << 6) +#define ADP5588_OVR_FLOW_M (1 << 5) +#define ADP5588_INT_CFG (1 << 4) +#define ADP5588_OVR_FLOW_IEN (1 << 3) +#define ADP5588_K_LCK_IM (1 << 2) +#define ADP5588_GPI_IEN (1 << 1) +#define ADP5588_KE_IEN (1 << 0) + +/* Interrupt Status Register */ +#define ADP5588_CMP2_INT (1 << 5) +#define ADP5588_CMP1_INT (1 << 4) +#define ADP5588_OVR_FLOW_INT (1 << 3) +#define ADP5588_K_LCK_INT (1 << 2) +#define ADP5588_GPI_INT (1 << 1) +#define ADP5588_KE_INT (1 << 0) + +/* Key Lock and Event Counter Register */ +#define ADP5588_K_LCK_EN (1 << 6) +#define ADP5588_LCK21 0x30 +#define ADP5588_KEC 0xF + +#define ADP5588_MAXGPIO 18 +#define ADP5588_BANK(offs) ((offs) >> 3) +#define ADP5588_BIT(offs) (1u << ((offs) & 0x7)) + +/* Put one of these structures in i2c_board_info platform_data */ + +#define ADP5588_KEYMAPSIZE 80 + +#define GPI_PIN_ROW0 97 +#define GPI_PIN_ROW1 98 +#define GPI_PIN_ROW2 99 +#define GPI_PIN_ROW3 100 +#define GPI_PIN_ROW4 101 +#define GPI_PIN_ROW5 102 +#define GPI_PIN_ROW6 103 +#define GPI_PIN_ROW7 104 +#define GPI_PIN_COL0 105 +#define GPI_PIN_COL1 106 +#define GPI_PIN_COL2 107 +#define GPI_PIN_COL3 108 +#define GPI_PIN_COL4 109 +#define GPI_PIN_COL5 110 +#define GPI_PIN_COL6 111 +#define GPI_PIN_COL7 112 +#define GPI_PIN_COL8 113 +#define GPI_PIN_COL9 114 + +#define GPI_PIN_ROW_BASE GPI_PIN_ROW0 +#define GPI_PIN_ROW_END GPI_PIN_ROW7 +#define GPI_PIN_COL_BASE GPI_PIN_COL0 +#define GPI_PIN_COL_END GPI_PIN_COL9 + +#define GPI_PIN_BASE GPI_PIN_ROW_BASE +#define GPI_PIN_END GPI_PIN_COL_END + +#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1) + +struct adp5588_gpi_map { + unsigned short pin; + unsigned short sw_evt; +}; + +struct adp5588_kpad_platform_data { + int rows; /* Number of rows */ + int cols; /* Number of columns */ + const unsigned short *keymap; /* Pointer to keymap */ + unsigned short keymapsize; /* Keymap size */ + unsigned repeat:1; /* Enable key repeat */ + unsigned en_keylock:1; /* Enable Key Lock feature */ + unsigned short unlock_key1; /* Unlock Key 1 */ + unsigned short unlock_key2; /* Unlock Key 2 */ + const struct adp5588_gpi_map *gpimap; + unsigned short gpimapsize; + const struct adp5588_gpio_platform_data *gpio_data; +}; + +struct i2c_client; /* forward declaration */ + +struct adp5588_gpio_platform_data { + int gpio_start; /* GPIO Chip base # */ + const char *const *names; + unsigned irq_base; /* interrupt base # */ + unsigned pullup_dis_mask; /* Pull-Up Disable Mask */ + int (*setup)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + void *context; +}; + +#endif diff --git a/include/linux/i2c/adp8860.h b/include/linux/i2c/adp8860.h new file mode 100644 index 0000000000..0b4d39855c --- /dev/null +++ b/include/linux/i2c/adp8860.h @@ -0,0 +1,154 @@ +/* + * Definitions and platform data for Analog Devices + * Backlight drivers ADP8860 + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_I2C_ADP8860_H +#define __LINUX_I2C_ADP8860_H + +#include +#include + +#define ID_ADP8860 8860 + +#define ADP8860_MAX_BRIGHTNESS 0x7F +#define FLAG_OFFT_SHIFT 8 + +/* + * LEDs subdevice platform data + */ + +#define ADP8860_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT) + +#define ADP8860_LED_ONT_200ms 0 +#define ADP8860_LED_ONT_600ms 1 +#define ADP8860_LED_ONT_800ms 2 +#define ADP8860_LED_ONT_1200ms 3 + +#define ADP8860_LED_D7 (7) +#define ADP8860_LED_D6 (6) +#define ADP8860_LED_D5 (5) +#define ADP8860_LED_D4 (4) +#define ADP8860_LED_D3 (3) +#define ADP8860_LED_D2 (2) +#define ADP8860_LED_D1 (1) + +/* + * Backlight subdevice platform data + */ + +#define ADP8860_BL_D7 (1 << 6) +#define ADP8860_BL_D6 (1 << 5) +#define ADP8860_BL_D5 (1 << 4) +#define ADP8860_BL_D4 (1 << 3) +#define ADP8860_BL_D3 (1 << 2) +#define ADP8860_BL_D2 (1 << 1) +#define ADP8860_BL_D1 (1 << 0) + +#define ADP8860_FADE_T_DIS 0 /* Fade Timer Disabled */ +#define ADP8860_FADE_T_300ms 1 /* 0.3 Sec */ +#define ADP8860_FADE_T_600ms 2 +#define ADP8860_FADE_T_900ms 3 +#define ADP8860_FADE_T_1200ms 4 +#define ADP8860_FADE_T_1500ms 5 +#define ADP8860_FADE_T_1800ms 6 +#define ADP8860_FADE_T_2100ms 7 +#define ADP8860_FADE_T_2400ms 8 +#define ADP8860_FADE_T_2700ms 9 +#define ADP8860_FADE_T_3000ms 10 +#define ADP8860_FADE_T_3500ms 11 +#define ADP8860_FADE_T_4000ms 12 +#define ADP8860_FADE_T_4500ms 13 +#define ADP8860_FADE_T_5000ms 14 +#define ADP8860_FADE_T_5500ms 15 /* 5.5 Sec */ + +#define ADP8860_FADE_LAW_LINEAR 0 +#define ADP8860_FADE_LAW_SQUARE 1 +#define ADP8860_FADE_LAW_CUBIC1 2 +#define ADP8860_FADE_LAW_CUBIC2 3 + +#define ADP8860_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ +#define ADP8860_BL_AMBL_FILT_160ms 1 +#define ADP8860_BL_AMBL_FILT_320ms 2 +#define ADP8860_BL_AMBL_FILT_640ms 3 +#define ADP8860_BL_AMBL_FILT_1280ms 4 +#define ADP8860_BL_AMBL_FILT_2560ms 5 +#define ADP8860_BL_AMBL_FILT_5120ms 6 +#define ADP8860_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ + +/* + * Blacklight current 0..30mA + */ +#define ADP8860_BL_CUR_mA(I) ((I * 127) / 30) + +/* + * L2 comparator current 0..1106uA + */ +#define ADP8860_L2_COMP_CURR_uA(I) ((I * 255) / 1106) + +/* + * L3 comparator current 0..138uA + */ +#define ADP8860_L3_COMP_CURR_uA(I) ((I * 255) / 138) + +struct adp8860_backlight_platform_data { + u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */ + + u8 bl_fade_in; /* Backlight Fade-In Timer */ + u8 bl_fade_out; /* Backlight Fade-Out Timer */ + u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */ + + u8 en_ambl_sens; /* 1 = enable ambient light sensor */ + u8 abml_filt; /* Light sensor filter time */ + + u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + + u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + + /** + * Independent Current Sinks / LEDS + * Sinks not assigned to the Backlight can be exposed to + * user space using the LEDS CLASS interface + */ + + int num_leds; + struct led_info *leds; + u8 led_fade_in; /* LED Fade-In Timer */ + u8 led_fade_out; /* LED Fade-Out Timer */ + u8 led_fade_law; /* fade-on/fade-off transfer characteristic */ + u8 led_on_time; + + /** + * Gain down disable. Setting this option does not allow the + * charge pump to switch to lower gains. NOT AVAILABLE on ADP8860 + * 1 = the charge pump doesn't switch down in gain until all LEDs are 0. + * The charge pump switches up in gain as needed. This feature is + * useful if the ADP8863 charge pump is used to drive an external load. + * This feature must be used when utilizing small fly capacitors + * (0402 or smaller). + * 0 = the charge pump automatically switches up and down in gain. + * This provides optimal efficiency, but is not suitable for driving + * loads that are not connected through the ADP8863 diode drivers. + * Additionally, the charge pump fly capacitors should be low ESR + * and sized 0603 or greater. + */ + + u8 gdwn_dis; +}; + +#endif /* __LINUX_I2C_ADP8860_H */ diff --git a/include/linux/i2c/adp8870.h b/include/linux/i2c/adp8870.h new file mode 100644 index 0000000000..624dceccbd --- /dev/null +++ b/include/linux/i2c/adp8870.h @@ -0,0 +1,153 @@ +/* + * Definitions and platform data for Analog Devices + * Backlight drivers ADP8870 + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_I2C_ADP8870_H +#define __LINUX_I2C_ADP8870_H + +#define ID_ADP8870 8870 + +#define ADP8870_MAX_BRIGHTNESS 0x7F +#define FLAG_OFFT_SHIFT 8 + +/* + * LEDs subdevice platform data + */ + +#define ADP8870_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT) +#define ADP8870_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT) +#define ADP8870_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT) +#define ADP8870_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT) + +#define ADP8870_LED_ONT_200ms 0 +#define ADP8870_LED_ONT_600ms 1 +#define ADP8870_LED_ONT_800ms 2 +#define ADP8870_LED_ONT_1200ms 3 + +#define ADP8870_LED_D7 (7) +#define ADP8870_LED_D6 (6) +#define ADP8870_LED_D5 (5) +#define ADP8870_LED_D4 (4) +#define ADP8870_LED_D3 (3) +#define ADP8870_LED_D2 (2) +#define ADP8870_LED_D1 (1) + +/* + * Backlight subdevice platform data + */ + +#define ADP8870_BL_D7 (1 << 6) +#define ADP8870_BL_D6 (1 << 5) +#define ADP8870_BL_D5 (1 << 4) +#define ADP8870_BL_D4 (1 << 3) +#define ADP8870_BL_D3 (1 << 2) +#define ADP8870_BL_D2 (1 << 1) +#define ADP8870_BL_D1 (1 << 0) + +#define ADP8870_FADE_T_DIS 0 /* Fade Timer Disabled */ +#define ADP8870_FADE_T_300ms 1 /* 0.3 Sec */ +#define ADP8870_FADE_T_600ms 2 +#define ADP8870_FADE_T_900ms 3 +#define ADP8870_FADE_T_1200ms 4 +#define ADP8870_FADE_T_1500ms 5 +#define ADP8870_FADE_T_1800ms 6 +#define ADP8870_FADE_T_2100ms 7 +#define ADP8870_FADE_T_2400ms 8 +#define ADP8870_FADE_T_2700ms 9 +#define ADP8870_FADE_T_3000ms 10 +#define ADP8870_FADE_T_3500ms 11 +#define ADP8870_FADE_T_4000ms 12 +#define ADP8870_FADE_T_4500ms 13 +#define ADP8870_FADE_T_5000ms 14 +#define ADP8870_FADE_T_5500ms 15 /* 5.5 Sec */ + +#define ADP8870_FADE_LAW_LINEAR 0 +#define ADP8870_FADE_LAW_SQUARE 1 +#define ADP8870_FADE_LAW_CUBIC1 2 +#define ADP8870_FADE_LAW_CUBIC2 3 + +#define ADP8870_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ +#define ADP8870_BL_AMBL_FILT_160ms 1 +#define ADP8870_BL_AMBL_FILT_320ms 2 +#define ADP8870_BL_AMBL_FILT_640ms 3 +#define ADP8870_BL_AMBL_FILT_1280ms 4 +#define ADP8870_BL_AMBL_FILT_2560ms 5 +#define ADP8870_BL_AMBL_FILT_5120ms 6 +#define ADP8870_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ + +/* + * Blacklight current 0..30mA + */ +#define ADP8870_BL_CUR_mA(I) ((I * 127) / 30) + +/* + * L2 comparator current 0..1106uA + */ +#define ADP8870_L2_COMP_CURR_uA(I) ((I * 255) / 1106) + +/* + * L3 comparator current 0..551uA + */ +#define ADP8870_L3_COMP_CURR_uA(I) ((I * 255) / 551) + +/* + * L4 comparator current 0..275uA + */ +#define ADP8870_L4_COMP_CURR_uA(I) ((I * 255) / 275) + +/* + * L5 comparator current 0..138uA + */ +#define ADP8870_L5_COMP_CURR_uA(I) ((I * 255) / 138) + +struct adp8870_backlight_platform_data { + u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */ + u8 pwm_assign; /* 1 = Enables PWM mode */ + + u8 bl_fade_in; /* Backlight Fade-In Timer */ + u8 bl_fade_out; /* Backlight Fade-Out Timer */ + u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */ + + u8 en_ambl_sens; /* 1 = enable ambient light sensor */ + u8 abml_filt; /* Light sensor filter time */ + + u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_bright_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_bright_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l4_indoor_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l4_indor_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l5_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l5_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + + u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + u8 l4_trip; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ + u8 l4_hyst; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ + u8 l5_trip; /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */ + u8 l5_hyst; /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */ + + /** + * Independent Current Sinks / LEDS + * Sinks not assigned to the Backlight can be exposed to + * user space using the LEDS CLASS interface + */ + + int num_leds; + struct led_info *leds; + u8 led_fade_in; /* LED Fade-In Timer */ + u8 led_fade_out; /* LED Fade-Out Timer */ + u8 led_fade_law; /* fade-on/fade-off transfer characteristic */ + u8 led_on_time; +}; + +#endif /* __LINUX_I2C_ADP8870_H */ diff --git a/include/linux/i2c/ads1015.h b/include/linux/i2c/ads1015.h new file mode 100644 index 0000000000..d5aa2a0456 --- /dev/null +++ b/include/linux/i2c/ads1015.h @@ -0,0 +1,36 @@ +/* + * Platform Data for ADS1015 12-bit 4-input ADC + * (C) Copyright 2010 + * Dirk Eibach, Guntermann & Drunck GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef LINUX_ADS1015_H +#define LINUX_ADS1015_H + +#define ADS1015_CHANNELS 8 + +struct ads1015_channel_data { + bool enabled; + unsigned int pga; + unsigned int data_rate; +}; + +struct ads1015_platform_data { + struct ads1015_channel_data channel_data[ADS1015_CHANNELS]; +}; + +#endif /* LINUX_ADS1015_H */ diff --git a/include/linux/i2c/apds990x.h b/include/linux/i2c/apds990x.h new file mode 100644 index 0000000000..d186fcc5d2 --- /dev/null +++ b/include/linux/i2c/apds990x.h @@ -0,0 +1,79 @@ +/* + * This file is part of the APDS990x sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __APDS990X_H__ +#define __APDS990X_H__ + + +#define APDS_IRLED_CURR_12mA 0x3 +#define APDS_IRLED_CURR_25mA 0x2 +#define APDS_IRLED_CURR_50mA 0x1 +#define APDS_IRLED_CURR_100mA 0x0 + +/** + * struct apds990x_chip_factors - defines effect of the cover window + * @ga: Total glass attenuation + * @cf1: clear channel factor 1 for raw to lux conversion + * @irf1: IR channel factor 1 for raw to lux conversion + * @cf2: clear channel factor 2 for raw to lux conversion + * @irf2: IR channel factor 2 for raw to lux conversion + * @df: device factor for conversion formulas + * + * Structure for tuning ALS calculation to match with environment. + * Values depend on the material above the sensor and the sensor + * itself. If the GA is zero, driver will use uncovered sensor default values + * format: decimal value * APDS_PARAM_SCALE except df which is plain integer. + */ +#define APDS_PARAM_SCALE 4096 +struct apds990x_chip_factors { + int ga; + int cf1; + int irf1; + int cf2; + int irf2; + int df; +}; + +/** + * struct apds990x_platform_data - platform data for apsd990x.c driver + * @cf: chip factor data + * @pddrive: IR-led driving current + * @ppcount: number of IR pulses used for proximity estimation + * @setup_resources: interrupt line setup call back function + * @release_resources: interrupt line release call back function + * + * Proximity detection result depends heavily on correct ppcount, pdrive + * and cover window. + * + */ + +struct apds990x_platform_data { + struct apds990x_chip_factors cf; + u8 pdrive; + u8 ppcount; + int (*setup_resources)(void); + int (*release_resources)(void); +}; + +#endif diff --git a/include/linux/i2c/bfin_twi.h b/include/linux/i2c/bfin_twi.h new file mode 100644 index 0000000000..135a4e0876 --- /dev/null +++ b/include/linux/i2c/bfin_twi.h @@ -0,0 +1,145 @@ +/* + * i2c-bfin-twi.h - interface to ADI TWI controller + * + * Copyright 2005-2014 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __I2C_BFIN_TWI_H__ +#define __I2C_BFIN_TWI_H__ + +#include +#include + +/* + * ADI twi registers layout + */ +struct bfin_twi_regs { + u16 clkdiv; + u16 dummy1; + u16 control; + u16 dummy2; + u16 slave_ctl; + u16 dummy3; + u16 slave_stat; + u16 dummy4; + u16 slave_addr; + u16 dummy5; + u16 master_ctl; + u16 dummy6; + u16 master_stat; + u16 dummy7; + u16 master_addr; + u16 dummy8; + u16 int_stat; + u16 dummy9; + u16 int_mask; + u16 dummy10; + u16 fifo_ctl; + u16 dummy11; + u16 fifo_stat; + u16 dummy12; + u32 __pad[20]; + u16 xmt_data8; + u16 dummy13; + u16 xmt_data16; + u16 dummy14; + u16 rcv_data8; + u16 dummy15; + u16 rcv_data16; + u16 dummy16; +}; + +struct bfin_twi_iface { + int irq; + spinlock_t lock; + char read_write; + u8 command; + u8 *transPtr; + int readNum; + int writeNum; + int cur_mode; + int manual_stop; + int result; + struct i2c_adapter adap; + struct completion complete; + struct i2c_msg *pmsg; + int msg_num; + int cur_msg; + u16 saved_clkdiv; + u16 saved_control; + struct bfin_twi_regs __iomem *regs_base; +}; + +/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ********************/ +/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */ +#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */ +#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */ + +/* TWI_PRESCALE Masks */ +#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */ +#define TWI_ENA 0x0080 /* TWI Enable */ +#define SCCB 0x0200 /* SCCB Compatibility Enable */ + +/* TWI_SLAVE_CTL Masks */ +#define SEN 0x0001 /* Slave Enable */ +#define SADD_LEN 0x0002 /* Slave Address Length */ +#define STDVAL 0x0004 /* Slave Transmit Data Valid */ +#define NAK 0x0008 /* NAK Generated At Conclusion Of Transfer */ +#define GEN 0x0010 /* General Call Address Matching Enabled */ + +/* TWI_SLAVE_STAT Masks */ +#define SDIR 0x0001 /* Slave Transfer Direction (RX/TX*) */ +#define GCALL 0x0002 /* General Call Indicator */ + +/* TWI_MASTER_CTL Masks */ +#define MEN 0x0001 /* Master Mode Enable */ +#define MADD_LEN 0x0002 /* Master Address Length */ +#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */ +#define FAST 0x0008 /* Use Fast Mode Timing Specs */ +#define STOP 0x0010 /* Issue Stop Condition */ +#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */ +#define DCNT 0x3FC0 /* Data Bytes To Transfer */ +#define SDAOVR 0x4000 /* Serial Data Override */ +#define SCLOVR 0x8000 /* Serial Clock Override */ + +/* TWI_MASTER_STAT Masks */ +#define MPROG 0x0001 /* Master Transfer In Progress */ +#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */ +#define ANAK 0x0004 /* Address Not Acknowledged */ +#define DNAK 0x0008 /* Data Not Acknowledged */ +#define BUFRDERR 0x0010 /* Buffer Read Error */ +#define BUFWRERR 0x0020 /* Buffer Write Error */ +#define SDASEN 0x0040 /* Serial Data Sense */ +#define SCLSEN 0x0080 /* Serial Clock Sense */ +#define BUSBUSY 0x0100 /* Bus Busy Indicator */ + +/* TWI_INT_SRC and TWI_INT_ENABLE Masks */ +#define SINIT 0x0001 /* Slave Transfer Initiated */ +#define SCOMP 0x0002 /* Slave Transfer Complete */ +#define SERR 0x0004 /* Slave Transfer Error */ +#define SOVF 0x0008 /* Slave Overflow */ +#define MCOMP 0x0010 /* Master Transfer Complete */ +#define MERR 0x0020 /* Master Transfer Error */ +#define XMTSERV 0x0040 /* Transmit FIFO Service */ +#define RCVSERV 0x0080 /* Receive FIFO Service */ + +/* TWI_FIFO_CTRL Masks */ +#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */ +#define RCVFLUSH 0x0002 /* Receive Buffer Flush */ +#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */ +#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */ + +/* TWI_FIFO_STAT Masks */ +#define XMTSTAT 0x0003 /* Transmit FIFO Status */ +#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */ +#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */ +#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */ + +#define RCVSTAT 0x000C /* Receive FIFO Status */ +#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */ +#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */ +#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */ + +#endif diff --git a/include/linux/i2c/bh1770glc.h b/include/linux/i2c/bh1770glc.h new file mode 100644 index 0000000000..8b5e2df36c --- /dev/null +++ b/include/linux/i2c/bh1770glc.h @@ -0,0 +1,53 @@ +/* + * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __BH1770_H__ +#define __BH1770_H__ + +/** + * struct bh1770_platform_data - platform data for bh1770glc driver + * @led_def_curr: IR led driving current. + * @glass_attenuation: Attenuation factor for covering window. + * @setup_resources: Call back for interrupt line setup function + * @release_resources: Call back for interrupte line release function + * + * Example of glass attenuation: 16384 * 385 / 100 means attenuation factor + * of 3.85. i.e. light_above_sensor = light_above_cover_window / 3.85 + */ + +struct bh1770_platform_data { +#define BH1770_LED_5mA 0 +#define BH1770_LED_10mA 1 +#define BH1770_LED_20mA 2 +#define BH1770_LED_50mA 3 +#define BH1770_LED_100mA 4 +#define BH1770_LED_150mA 5 +#define BH1770_LED_200mA 6 + __u8 led_def_curr; +#define BH1770_NEUTRAL_GA 16384 /* 16384 / 16384 = 1 */ + __u32 glass_attenuation; + int (*setup_resources)(void); + int (*release_resources)(void); +}; +#endif diff --git a/include/linux/i2c/dm355evm_msp.h b/include/linux/i2c/dm355evm_msp.h new file mode 100644 index 0000000000..372470350f --- /dev/null +++ b/include/linux/i2c/dm355evm_msp.h @@ -0,0 +1,79 @@ +/* + * dm355evm_msp.h - support MSP430 microcontroller on DM355EVM board + */ +#ifndef __LINUX_I2C_DM355EVM_MSP +#define __LINUX_I2C_DM355EVM_MSP + +/* + * Written against Spectrum's writeup for the A4 firmware revision, + * and tweaked to match source and rev D2 schematics by removing CPLD + * and NOR flash hooks (which were last appropriate in rev B boards). + * + * Note that the firmware supports a flavor of write posting ... to be + * sure a write completes, issue another read or write. + */ + +/* utilities to access "registers" emulated by msp430 firmware */ +extern int dm355evm_msp_write(u8 value, u8 reg); +extern int dm355evm_msp_read(u8 reg); + + +/* command/control registers */ +#define DM355EVM_MSP_COMMAND 0x00 +# define MSP_COMMAND_NULL 0 +# define MSP_COMMAND_RESET_COLD 1 +# define MSP_COMMAND_RESET_WARM 2 +# define MSP_COMMAND_RESET_WARM_I 3 +# define MSP_COMMAND_POWEROFF 4 +# define MSP_COMMAND_IR_REINIT 5 +#define DM355EVM_MSP_STATUS 0x01 +# define MSP_STATUS_BAD_OFFSET BIT(0) +# define MSP_STATUS_BAD_COMMAND BIT(1) +# define MSP_STATUS_POWER_ERROR BIT(2) +# define MSP_STATUS_RXBUF_OVERRUN BIT(3) +#define DM355EVM_MSP_RESET 0x02 /* 0 bits == in reset */ +# define MSP_RESET_DC5 BIT(0) +# define MSP_RESET_TVP5154 BIT(2) +# define MSP_RESET_IMAGER BIT(3) +# define MSP_RESET_ETHERNET BIT(4) +# define MSP_RESET_SYS BIT(5) +# define MSP_RESET_AIC33 BIT(7) + +/* GPIO registers ... bit patterns mostly match the source MSP ports */ +#define DM355EVM_MSP_LED 0x03 /* active low (MSP P4) */ +#define DM355EVM_MSP_SWITCH1 0x04 /* (MSP P5, masked) */ +# define MSP_SWITCH1_SW6_1 BIT(0) +# define MSP_SWITCH1_SW6_2 BIT(1) +# define MSP_SWITCH1_SW6_3 BIT(2) +# define MSP_SWITCH1_SW6_4 BIT(3) +# define MSP_SWITCH1_J1 BIT(4) /* NTSC/PAL */ +# define MSP_SWITCH1_MSP_INT BIT(5) /* active low */ +#define DM355EVM_MSP_SWITCH2 0x05 /* (MSP P6, masked) */ +# define MSP_SWITCH2_SW10 BIT(3) +# define MSP_SWITCH2_SW11 BIT(4) +# define MSP_SWITCH2_SW12 BIT(5) +# define MSP_SWITCH2_SW13 BIT(6) +# define MSP_SWITCH2_SW14 BIT(7) +#define DM355EVM_MSP_SDMMC 0x06 /* (MSP P2, masked) */ +# define MSP_SDMMC_0_WP BIT(1) +# define MSP_SDMMC_0_CD BIT(2) /* active low */ +# define MSP_SDMMC_1_WP BIT(3) +# define MSP_SDMMC_1_CD BIT(4) /* active low */ +#define DM355EVM_MSP_FIRMREV 0x07 /* not a GPIO (out of order) */ +#define DM355EVM_MSP_VIDEO_IN 0x08 /* (MSP P3, masked) */ +# define MSP_VIDEO_IMAGER BIT(7) /* low == tvp5146 */ + +/* power supply registers are currently omitted */ + +/* RTC registers */ +#define DM355EVM_MSP_RTC_0 0x12 /* LSB */ +#define DM355EVM_MSP_RTC_1 0x13 +#define DM355EVM_MSP_RTC_2 0x14 +#define DM355EVM_MSP_RTC_3 0x15 /* MSB */ + +/* input event queue registers; code == ((HIGH << 8) | LOW) */ +#define DM355EVM_MSP_INPUT_COUNT 0x16 /* decrement by reading LOW */ +#define DM355EVM_MSP_INPUT_HIGH 0x17 +#define DM355EVM_MSP_INPUT_LOW 0x18 + +#endif /* __LINUX_I2C_DM355EVM_MSP */ diff --git a/include/linux/i2c/ds620.h b/include/linux/i2c/ds620.h new file mode 100644 index 0000000000..736bb87ac0 --- /dev/null +++ b/include/linux/i2c/ds620.h @@ -0,0 +1,21 @@ +#ifndef _LINUX_DS620_H +#define _LINUX_DS620_H + +#include +#include + +/* platform data for the DS620 temperature sensor and thermostat */ + +struct ds620_platform_data { + /* + * Thermostat output pin PO mode: + * 0 = always low (default) + * 1 = PO_LOW + * 2 = PO_HIGH + * + * (see Documentation/hwmon/ds620) + */ + int pomode; +}; + +#endif /* _LINUX_DS620_H */ diff --git a/include/linux/i2c/i2c-hid.h b/include/linux/i2c/i2c-hid.h new file mode 100644 index 0000000000..7aa901d920 --- /dev/null +++ b/include/linux/i2c/i2c-hid.h @@ -0,0 +1,36 @@ +/* + * HID over I2C protocol implementation + * + * Copyright (c) 2012 Benjamin Tissoires + * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#ifndef __LINUX_I2C_HID_H +#define __LINUX_I2C_HID_H + +#include + +/** + * struct i2chid_platform_data - used by hid over i2c implementation. + * @hid_descriptor_address: i2c register where the HID descriptor is stored. + * + * Note that it is the responsibility of the platform driver (or the acpi 5.0 + * driver, or the flattened device tree) to setup the irq related to the gpio in + * the struct i2c_board_info. + * The platform driver should also setup the gpio according to the device: + * + * A typical example is the following: + * irq = gpio_to_irq(intr_gpio); + * hkdk4412_i2c_devs5[0].irq = irq; // store the irq in i2c_board_info + * gpio_request(intr_gpio, "elan-irq"); + * s3c_gpio_setpull(intr_gpio, S3C_GPIO_PULL_UP); + */ +struct i2c_hid_platform_data { + u16 hid_descriptor_address; +}; + +#endif /* __LINUX_I2C_HID_H */ diff --git a/include/linux/i2c/i2c-sh_mobile.h b/include/linux/i2c/i2c-sh_mobile.h new file mode 100644 index 0000000000..06e3089795 --- /dev/null +++ b/include/linux/i2c/i2c-sh_mobile.h @@ -0,0 +1,11 @@ +#ifndef __I2C_SH_MOBILE_H__ +#define __I2C_SH_MOBILE_H__ + +#include + +struct i2c_sh_mobile_platform_data { + unsigned long bus_speed; + unsigned int clks_per_count; +}; + +#endif /* __I2C_SH_MOBILE_H__ */ diff --git a/include/linux/i2c/lm8323.h b/include/linux/i2c/lm8323.h new file mode 100644 index 0000000000..478d668bc5 --- /dev/null +++ b/include/linux/i2c/lm8323.h @@ -0,0 +1,46 @@ +/* + * lm8323.h - Configuration for LM8323 keypad driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation (version 2 of the License only). + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_LM8323_H +#define __LINUX_LM8323_H + +#include + +/* + * Largest keycode that the chip can send, plus one, + * so keys can be mapped directly at the index of the + * LM8323 keycode instead of subtracting one. + */ +#define LM8323_KEYMAP_SIZE (0x7f + 1) + +#define LM8323_NUM_PWMS 3 + +struct lm8323_platform_data { + int debounce_time; /* Time to watch for key bouncing, in ms. */ + int active_time; /* Idle time until sleep, in ms. */ + + int size_x; + int size_y; + bool repeat; + const unsigned short *keymap; + + const char *pwm_names[LM8323_NUM_PWMS]; + + const char *name; /* Device name. */ +}; + +#endif /* __LINUX_LM8323_H */ diff --git a/include/linux/i2c/ltc4245.h b/include/linux/i2c/ltc4245.h new file mode 100644 index 0000000000..56bda4be00 --- /dev/null +++ b/include/linux/i2c/ltc4245.h @@ -0,0 +1,21 @@ +/* + * Platform Data for LTC4245 hardware monitor chip + * + * Copyright (c) 2010 Ira W. Snyder + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef LINUX_LTC4245_H +#define LINUX_LTC4245_H + +#include + +struct ltc4245_platform_data { + bool use_extra_gpios; +}; + +#endif /* LINUX_LTC4245_H */ diff --git a/include/linux/i2c/max6639.h b/include/linux/i2c/max6639.h new file mode 100644 index 0000000000..6011c42034 --- /dev/null +++ b/include/linux/i2c/max6639.h @@ -0,0 +1,14 @@ +#ifndef _LINUX_MAX6639_H +#define _LINUX_MAX6639_H + +#include + +/* platform data for the MAX6639 temperature sensor and fan control */ + +struct max6639_platform_data { + bool pwm_polarity; /* Polarity low (0) or high (1, default) */ + int ppr; /* Pulses per rotation 1..4 (default == 2) */ + int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */ +}; + +#endif /* _LINUX_MAX6639_H */ diff --git a/include/linux/i2c/max732x.h b/include/linux/i2c/max732x.h new file mode 100644 index 0000000000..c04bac8bf2 --- /dev/null +++ b/include/linux/i2c/max732x.h @@ -0,0 +1,22 @@ +#ifndef __LINUX_I2C_MAX732X_H +#define __LINUX_I2C_MAX732X_H + +/* platform data for the MAX732x 8/16-bit I/O expander driver */ + +struct max732x_platform_data { + /* number of the first GPIO */ + unsigned gpio_base; + + /* interrupt base */ + int irq_base; + + void *context; /* param to setup/teardown */ + + int (*setup)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); +}; +#endif /* __LINUX_I2C_MAX732X_H */ diff --git a/include/linux/i2c/mcs.h b/include/linux/i2c/mcs.h new file mode 100644 index 0000000000..61bb18a4fd --- /dev/null +++ b/include/linux/i2c/mcs.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim + * Author: HeungJun Kim + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MCS_H +#define __LINUX_MCS_H + +#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff)) +#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff) +#define MCS_KEY_CODE(v) ((v) & 0xffff) + +struct mcs_platform_data { + void (*poweron)(bool); + void (*cfg_pin)(void); + + /* touchscreen */ + unsigned int x_size; + unsigned int y_size; + + /* touchkey */ + const u32 *keymap; + unsigned int keymap_size; + unsigned int key_maxval; + bool no_autorepeat; +}; + +#endif /* __LINUX_MCS_H */ diff --git a/include/linux/i2c/mms114.h b/include/linux/i2c/mms114.h new file mode 100644 index 0000000000..5722ebfb27 --- /dev/null +++ b/include/linux/i2c/mms114.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundationr + */ + +#ifndef __LINUX_MMS114_H +#define __LINUX_MMS114_H + +struct mms114_platform_data { + unsigned int x_size; + unsigned int y_size; + unsigned int contact_threshold; + unsigned int moving_threshold; + bool x_invert; + bool y_invert; + + void (*cfg_pin)(bool); +}; + +#endif /* __LINUX_MMS114_H */ diff --git a/include/linux/i2c/mpr121_touchkey.h b/include/linux/i2c/mpr121_touchkey.h new file mode 100644 index 0000000000..f0bcc38bbb --- /dev/null +++ b/include/linux/i2c/mpr121_touchkey.h @@ -0,0 +1,20 @@ +/* Header file for Freescale MPR121 Capacitive Touch Sensor */ + +#ifndef _MPR121_TOUCHKEY_H +#define _MPR121_TOUCHKEY_H + +/** + * struct mpr121_platform_data - platform data for mpr121 sensor + * @keymap: pointer to array of KEY_* values representing keymap + * @keymap_size: size of the keymap + * @wakeup: configure the button as a wake-up source + * @vdd_uv: VDD voltage in uV + */ +struct mpr121_platform_data { + const unsigned short *keymap; + unsigned int keymap_size; + bool wakeup; + int vdd_uv; +}; + +#endif /* _MPR121_TOUCHKEY_H */ diff --git a/include/linux/i2c/pca954x.h b/include/linux/i2c/pca954x.h new file mode 100644 index 0000000000..1712677d59 --- /dev/null +++ b/include/linux/i2c/pca954x.h @@ -0,0 +1,48 @@ +/* + * + * pca954x.h - I2C multiplexer/switch support + * + * Copyright (c) 2008-2009 Rodolfo Giometti + * Copyright (c) 2008-2009 Eurotech S.p.A. + * Michael Lawnick + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#ifndef _LINUX_I2C_PCA954X_H +#define _LINUX_I2C_PCA954X_H + +/* Platform data for the PCA954x I2C multiplexers */ + +/* Per channel initialisation data: + * @adap_id: bus number for the adapter. 0 = don't care + * @deselect_on_exit: set this entry to 1, if your H/W needs deselection + * of this channel after transaction. + * + */ +struct pca954x_platform_mode { + int adap_id; + unsigned int deselect_on_exit:1; + unsigned int class; +}; + +/* Per mux/switch data, used with i2c_register_board_info */ +struct pca954x_platform_data { + struct pca954x_platform_mode *modes; + int num_modes; +}; + +#endif /* _LINUX_I2C_PCA954X_H */ diff --git a/include/linux/i2c/pcf857x.h b/include/linux/i2c/pcf857x.h new file mode 100644 index 0000000000..0767a2a6b2 --- /dev/null +++ b/include/linux/i2c/pcf857x.h @@ -0,0 +1,44 @@ +#ifndef __LINUX_PCF857X_H +#define __LINUX_PCF857X_H + +/** + * struct pcf857x_platform_data - data to set up pcf857x driver + * @gpio_base: number of the chip's first GPIO + * @n_latch: optional bit-inverse of initial register value; if + * you leave this initialized to zero the driver will act + * like the chip was just reset + * @setup: optional callback issued once the GPIOs are valid + * @teardown: optional callback issued before the GPIOs are invalidated + * @context: optional parameter passed to setup() and teardown() + * + * In addition to the I2C_BOARD_INFO() state appropriate to each chip, + * the i2c_board_info used with the pcf875x driver must provide its + * platform_data (pointer to one of these structures) with at least + * the gpio_base value initialized. + * + * The @setup callback may be used with the kind of board-specific glue + * which hands the (now-valid) GPIOs to other drivers, or which puts + * devices in their initial states using these GPIOs. + * + * These GPIO chips are only "quasi-bidirectional"; read the chip specs + * to understand the behavior. They don't have separate registers to + * record which pins are used for input or output, record which output + * values are driven, or provide access to input values. That must be + * inferred by reading the chip's value and knowing the last value written + * to it. If you leave n_latch initialized to zero, that last written + * value is presumed to be all ones (as if the chip were just reset). + */ +struct pcf857x_platform_data { + unsigned gpio_base; + unsigned n_latch; + + int (*setup)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + void *context; +}; + +#endif /* __LINUX_PCF857X_H */ diff --git a/include/linux/i2c/pmbus.h b/include/linux/i2c/pmbus.h new file mode 100644 index 0000000000..ee3c2aba2a --- /dev/null +++ b/include/linux/i2c/pmbus.h @@ -0,0 +1,49 @@ +/* + * Hardware monitoring driver for PMBus devices + * + * Copyright (c) 2010, 2011 Ericsson AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PMBUS_H_ +#define _PMBUS_H_ + +/* flags */ + +/* + * PMBUS_SKIP_STATUS_CHECK + * + * During register detection, skip checking the status register for + * communication or command errors. + * + * Some PMBus chips respond with valid data when trying to read an unsupported + * register. For such chips, checking the status register is mandatory when + * trying to determine if a chip register exists or not. + * Other PMBus chips don't support the STATUS_CML register, or report + * communication errors for no explicable reason. For such chips, checking + * the status register must be disabled. + */ +#define PMBUS_SKIP_STATUS_CHECK (1 << 0) + +struct pmbus_platform_data { + u32 flags; /* Device specific flags */ + + /* regulator support */ + int num_regulators; + struct regulator_init_data *reg_init_data; +}; + +#endif /* _PMBUS_H_ */ diff --git a/include/linux/i2c/pxa-i2c.h b/include/linux/i2c/pxa-i2c.h new file mode 100644 index 0000000000..53aab243cb --- /dev/null +++ b/include/linux/i2c/pxa-i2c.h @@ -0,0 +1,85 @@ +/* + * i2c_pxa.h + * + * Copyright (C) 2002 Intrinsyc Software Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef _I2C_PXA_H_ +#define _I2C_PXA_H_ + +#if 0 +#define DEF_TIMEOUT 3 +#else +/* need a longer timeout if we're dealing with the fact we may well be + * looking at a multi-master environment +*/ +#define DEF_TIMEOUT 32 +#endif + +#define BUS_ERROR (-EREMOTEIO) +#define XFER_NAKED (-ECONNREFUSED) +#define I2C_RETRY (-2000) /* an error has occurred retry transmit */ + +/* ICR initialize bit values +* +* 15. FM 0 (100 Khz operation) +* 14. UR 0 (No unit reset) +* 13. SADIE 0 (Disables the unit from interrupting on slave addresses +* matching its slave address) +* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration +* in master mode) +* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode) +* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent) +* 9. IRFIE 1 (Enable interrupts from full buffer received) +* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty) +* 7. GCD 1 (Disables i2c unit response to general call messages as a slave) +* 6. IUE 0 (Disable unit until we change settings) +* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL) +* 4. MA 0 (Only send stop with the ICR stop bit) +* 3. TB 0 (We are not transmitting a byte initially) +* 2. ACKNAK 0 (Send an ACK after the unit receives a byte) +* 1. STOP 0 (Do not send a STOP) +* 0. START 0 (Do not send a START) +* +*/ +#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE) + +/* I2C status register init values + * + * 10. BED 1 (Clear bus error detected) + * 9. SAD 1 (Clear slave address detected) + * 7. IRF 1 (Clear IDBR Receive Full) + * 6. ITE 1 (Clear IDBR Transmit Empty) + * 5. ALD 1 (Clear Arbitration Loss Detected) + * 4. SSD 1 (Clear Slave Stop Detected) + */ +#define I2C_ISR_INIT 0x7FF /* status register init */ + +struct i2c_slave_client; + +struct i2c_pxa_platform_data { + unsigned int slave_addr; + struct i2c_slave_client *slave; + unsigned int class; + unsigned int use_pio :1; + unsigned int fast_mode :1; + unsigned int high_mode:1; + unsigned char master_code; + unsigned long rate; +}; + +extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info); + +#ifdef CONFIG_PXA27x +extern void pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info); +#endif + +#ifdef CONFIG_PXA3xx +extern void pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info); +#endif + +#endif diff --git a/include/linux/i2c/tc35876x.h b/include/linux/i2c/tc35876x.h new file mode 100644 index 0000000000..cd6a51c71e --- /dev/null +++ b/include/linux/i2c/tc35876x.h @@ -0,0 +1,11 @@ + +#ifndef _TC35876X_H +#define _TC35876X_H + +struct tc35876x_platform_data { + int gpio_bridge_reset; + int gpio_panel_bl_en; + int gpio_panel_vadd; +}; + +#endif /* _TC35876X_H */ diff --git a/include/linux/i2c/tps65010.h b/include/linux/i2c/tps65010.h new file mode 100644 index 0000000000..08aa92278d --- /dev/null +++ b/include/linux/i2c/tps65010.h @@ -0,0 +1,205 @@ +/* linux/i2c/tps65010.h + * + * Functions to access TPS65010 power management device. + * + * Copyright (C) 2004 Dirk Behme + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_I2C_TPS65010_H +#define __LINUX_I2C_TPS65010_H + +/* + * ---------------------------------------------------------------------------- + * Registers, all 8 bits + * ---------------------------------------------------------------------------- + */ + +#define TPS_CHGSTATUS 0x01 +# define TPS_CHG_USB (1 << 7) +# define TPS_CHG_AC (1 << 6) +# define TPS_CHG_THERM (1 << 5) +# define TPS_CHG_TERM (1 << 4) +# define TPS_CHG_TAPER_TMO (1 << 3) +# define TPS_CHG_CHG_TMO (1 << 2) +# define TPS_CHG_PRECHG_TMO (1 << 1) +# define TPS_CHG_TEMP_ERR (1 << 0) +#define TPS_REGSTATUS 0x02 +# define TPS_REG_ONOFF (1 << 7) +# define TPS_REG_COVER (1 << 6) +# define TPS_REG_UVLO (1 << 5) +# define TPS_REG_NO_CHG (1 << 4) /* tps65013 */ +# define TPS_REG_PG_LD02 (1 << 3) +# define TPS_REG_PG_LD01 (1 << 2) +# define TPS_REG_PG_MAIN (1 << 1) +# define TPS_REG_PG_CORE (1 << 0) +#define TPS_MASK1 0x03 +#define TPS_MASK2 0x04 +#define TPS_ACKINT1 0x05 +#define TPS_ACKINT2 0x06 +#define TPS_CHGCONFIG 0x07 +# define TPS_CHARGE_POR (1 << 7) /* 65010/65012 */ +# define TPS65013_AUA (1 << 7) /* 65011/65013 */ +# define TPS_CHARGE_RESET (1 << 6) +# define TPS_CHARGE_FAST (1 << 5) +# define TPS_CHARGE_CURRENT (3 << 3) +# define TPS_VBUS_500MA (1 << 2) +# define TPS_VBUS_CHARGING (1 << 1) +# define TPS_CHARGE_ENABLE (1 << 0) +#define TPS_LED1_ON 0x08 +#define TPS_LED1_PER 0x09 +#define TPS_LED2_ON 0x0a +#define TPS_LED2_PER 0x0b +#define TPS_VDCDC1 0x0c +# define TPS_ENABLE_LP (1 << 3) +#define TPS_VDCDC2 0x0d +# define TPS_LP_COREOFF (1 << 7) +# define TPS_VCORE_1_8V (7<<4) +# define TPS_VCORE_1_5V (6 << 4) +# define TPS_VCORE_1_4V (5 << 4) +# define TPS_VCORE_1_3V (4 << 4) +# define TPS_VCORE_1_2V (3 << 4) +# define TPS_VCORE_1_1V (2 << 4) +# define TPS_VCORE_1_0V (1 << 4) +# define TPS_VCORE_0_85V (0 << 4) +# define TPS_VCORE_LP_1_2V (3 << 2) +# define TPS_VCORE_LP_1_1V (2 << 2) +# define TPS_VCORE_LP_1_0V (1 << 2) +# define TPS_VCORE_LP_0_85V (0 << 2) +# define TPS_VIB (1 << 1) +# define TPS_VCORE_DISCH (1 << 0) +#define TPS_VREGS1 0x0e +# define TPS_LDO2_ENABLE (1 << 7) +# define TPS_LDO2_OFF (1 << 6) +# define TPS_VLDO2_3_0V (3 << 4) +# define TPS_VLDO2_2_75V (2 << 4) +# define TPS_VLDO2_2_5V (1 << 4) +# define TPS_VLDO2_1_8V (0 << 4) +# define TPS_LDO1_ENABLE (1 << 3) +# define TPS_LDO1_OFF (1 << 2) +# define TPS_VLDO1_3_0V (3 << 0) +# define TPS_VLDO1_2_75V (2 << 0) +# define TPS_VLDO1_2_5V (1 << 0) +# define TPS_VLDO1_ADJ (0 << 0) +#define TPS_MASK3 0x0f +#define TPS_DEFGPIO 0x10 + +/* + * ---------------------------------------------------------------------------- + * Macros used by exported functions + * ---------------------------------------------------------------------------- + */ + +#define LED1 1 +#define LED2 2 +#define OFF 0 +#define ON 1 +#define BLINK 2 +#define GPIO1 1 +#define GPIO2 2 +#define GPIO3 3 +#define GPIO4 4 +#define LOW 0 +#define HIGH 1 + +/* + * ---------------------------------------------------------------------------- + * Exported functions + * ---------------------------------------------------------------------------- + */ + +/* Draw from VBUS: + * 0 mA -- DON'T DRAW (might supply power instead) + * 100 mA -- usb unit load (slowest charge rate) + * 500 mA -- usb high power (fast battery charge) + */ +extern int tps65010_set_vbus_draw(unsigned mA); + +/* tps65010_set_gpio_out_value parameter: + * gpio: GPIO1, GPIO2, GPIO3 or GPIO4 + * value: LOW or HIGH + */ +extern int tps65010_set_gpio_out_value(unsigned gpio, unsigned value); + +/* tps65010_set_led parameter: + * led: LED1 or LED2 + * mode: ON, OFF or BLINK + */ +extern int tps65010_set_led(unsigned led, unsigned mode); + +/* tps65010_set_vib parameter: + * value: ON or OFF + */ +extern int tps65010_set_vib(unsigned value); + +/* tps65010_set_low_pwr parameter: + * mode: ON or OFF + */ +extern int tps65010_set_low_pwr(unsigned mode); + +/* tps65010_config_vregs1 parameter: + * value to be written to VREGS1 register + * Note: The complete register is written, set all bits you need + */ +extern int tps65010_config_vregs1(unsigned value); + +/* tps65013_set_low_pwr parameter: + * mode: ON or OFF + */ +extern int tps65013_set_low_pwr(unsigned mode); + +/* tps65010_set_vdcdc2 + * value to be written to VDCDC2 + */ +extern int tps65010_config_vdcdc2(unsigned value); + +struct i2c_client; + +/** + * struct tps65010_board - packages GPIO and LED lines + * @base: the GPIO number to assign to GPIO-1 + * @outmask: bit (N-1) is set to allow GPIO-N to be used as an + * (open drain) output + * @setup: optional callback issued once the GPIOs are valid + * @teardown: optional callback issued before the GPIOs are invalidated + * @context: optional parameter passed to setup() and teardown() + * + * Board data may be used to package the GPIO (and LED) lines for use + * in by the generic GPIO and LED frameworks. The first four GPIOs + * starting at gpio_base are GPIO1..GPIO4. The next two are LED1/nPG + * and LED2 (with hardware blinking capability, not currently exposed). + * + * The @setup callback may be used with the kind of board-specific glue + * which hands the (now-valid) GPIOs to other drivers, or which puts + * devices in their initial states using these GPIOs. + */ +struct tps65010_board { + int base; + unsigned outmask; + + int (*setup)(struct i2c_client *client, void *context); + int (*teardown)(struct i2c_client *client, void *context); + void *context; +}; + +#endif /* __LINUX_I2C_TPS65010_H */ + diff --git a/include/linux/i2c/tsc2007.h b/include/linux/i2c/tsc2007.h new file mode 100644 index 0000000000..4f35b6ad38 --- /dev/null +++ b/include/linux/i2c/tsc2007.h @@ -0,0 +1,22 @@ +#ifndef __LINUX_I2C_TSC2007_H +#define __LINUX_I2C_TSC2007_H + +/* linux/i2c/tsc2007.h */ + +struct tsc2007_platform_data { + u16 model; /* 2007. */ + u16 x_plate_ohms; /* must be non-zero value */ + u16 max_rt; /* max. resistance above which samples are ignored */ + unsigned long poll_period; /* time (in ms) between samples */ + int fuzzx; /* fuzz factor for X, Y and pressure axes */ + int fuzzy; + int fuzzz; + + int (*get_pendown_state)(struct device *); + /* If needed, clear 2nd level interrupt source */ + void (*clear_penirq)(void); + int (*init_platform_hw)(void); + void (*exit_platform_hw)(void); +}; + +#endif diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h new file mode 100644 index 0000000000..9ad7828d9d --- /dev/null +++ b/include/linux/i2c/twl.h @@ -0,0 +1,876 @@ +/* + * twl4030.h - header for TWL4030 PM and audio CODEC device + * + * Copyright (C) 2005-2006 Texas Instruments, Inc. + * + * Based on tlv320aic23.c: + * Copyright (c) by Kai Svahn + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef __TWL_H_ +#define __TWL_H_ + +#include +#include + +/* + * Using the twl4030 core we address registers using a pair + * { module id, relative register offset } + * which that core then maps to the relevant + * { i2c slave, absolute register address } + * + * The module IDs are meaningful only to the twl4030 core code, + * which uses them as array indices to look up the first register + * address each module uses within a given i2c slave. + */ + +/* Module IDs for similar functionalities found in twl4030/twl6030 */ +enum twl_module_ids { + TWL_MODULE_USB, + TWL_MODULE_PIH, + TWL_MODULE_MAIN_CHARGE, + TWL_MODULE_PM_MASTER, + TWL_MODULE_PM_RECEIVER, + + TWL_MODULE_RTC, + TWL_MODULE_PWM, + TWL_MODULE_LED, + TWL_MODULE_SECURED_REG, + + TWL_MODULE_LAST, +}; + +/* Modules only available in twl4030 series */ +enum twl4030_module_ids { + TWL4030_MODULE_AUDIO_VOICE = TWL_MODULE_LAST, + TWL4030_MODULE_GPIO, + TWL4030_MODULE_INTBR, + TWL4030_MODULE_TEST, + TWL4030_MODULE_KEYPAD, + + TWL4030_MODULE_MADC, + TWL4030_MODULE_INTERRUPTS, + TWL4030_MODULE_PRECHARGE, + TWL4030_MODULE_BACKUP, + TWL4030_MODULE_INT, + + TWL5031_MODULE_ACCESSORY, + TWL5031_MODULE_INTERRUPTS, + + TWL4030_MODULE_LAST, +}; + +/* Modules only available in twl6030 series */ +enum twl6030_module_ids { + TWL6030_MODULE_ID0 = TWL_MODULE_LAST, + TWL6030_MODULE_ID1, + TWL6030_MODULE_ID2, + TWL6030_MODULE_GPADC, + TWL6030_MODULE_GASGAUGE, + + TWL6030_MODULE_LAST, +}; + +/* Until the clients has been converted to use TWL_MODULE_LED */ +#define TWL4030_MODULE_LED TWL_MODULE_LED + +#define GPIO_INTR_OFFSET 0 +#define KEYPAD_INTR_OFFSET 1 +#define BCI_INTR_OFFSET 2 +#define MADC_INTR_OFFSET 3 +#define USB_INTR_OFFSET 4 +#define CHARGERFAULT_INTR_OFFSET 5 +#define BCI_PRES_INTR_OFFSET 9 +#define USB_PRES_INTR_OFFSET 10 +#define RTC_INTR_OFFSET 11 + +/* + * Offset from TWL6030_IRQ_BASE / pdata->irq_base + */ +#define PWR_INTR_OFFSET 0 +#define HOTDIE_INTR_OFFSET 12 +#define SMPSLDO_INTR_OFFSET 13 +#define BATDETECT_INTR_OFFSET 14 +#define SIMDETECT_INTR_OFFSET 15 +#define MMCDETECT_INTR_OFFSET 16 +#define GASGAUGE_INTR_OFFSET 17 +#define USBOTG_INTR_OFFSET 4 +#define CHARGER_INTR_OFFSET 2 +#define RSV_INTR_OFFSET 0 + +/* INT register offsets */ +#define REG_INT_STS_A 0x00 +#define REG_INT_STS_B 0x01 +#define REG_INT_STS_C 0x02 + +#define REG_INT_MSK_LINE_A 0x03 +#define REG_INT_MSK_LINE_B 0x04 +#define REG_INT_MSK_LINE_C 0x05 + +#define REG_INT_MSK_STS_A 0x06 +#define REG_INT_MSK_STS_B 0x07 +#define REG_INT_MSK_STS_C 0x08 + +/* MASK INT REG GROUP A */ +#define TWL6030_PWR_INT_MASK 0x07 +#define TWL6030_RTC_INT_MASK 0x18 +#define TWL6030_HOTDIE_INT_MASK 0x20 +#define TWL6030_SMPSLDOA_INT_MASK 0xC0 + +/* MASK INT REG GROUP B */ +#define TWL6030_SMPSLDOB_INT_MASK 0x01 +#define TWL6030_BATDETECT_INT_MASK 0x02 +#define TWL6030_SIMDETECT_INT_MASK 0x04 +#define TWL6030_MMCDETECT_INT_MASK 0x08 +#define TWL6030_GPADC_INT_MASK 0x60 +#define TWL6030_GASGAUGE_INT_MASK 0x80 + +/* MASK INT REG GROUP C */ +#define TWL6030_USBOTG_INT_MASK 0x0F +#define TWL6030_CHARGER_CTRL_INT_MASK 0x10 +#define TWL6030_CHARGER_FAULT_INT_MASK 0x60 + +#define TWL6030_MMCCTRL 0xEE +#define VMMC_AUTO_OFF (0x1 << 3) +#define SW_FC (0x1 << 2) +#define STS_MMC 0x1 + +#define TWL6030_CFG_INPUT_PUPD3 0xF2 +#define MMC_PU (0x1 << 3) +#define MMC_PD (0x1 << 2) + +#define TWL_SIL_TYPE(rev) ((rev) & 0x00FFFFFF) +#define TWL_SIL_REV(rev) ((rev) >> 24) +#define TWL_SIL_5030 0x09002F +#define TWL5030_REV_1_0 0x00 +#define TWL5030_REV_1_1 0x10 +#define TWL5030_REV_1_2 0x30 + +#define TWL4030_CLASS_ID 0x4030 +#define TWL6030_CLASS_ID 0x6030 +unsigned int twl_rev(void); +#define GET_TWL_REV (twl_rev()) +#define TWL_CLASS_IS(class, id) \ +static inline int twl_class_is_ ##class(void) \ +{ \ + return ((id) == (GET_TWL_REV)) ? 1 : 0; \ +} + +TWL_CLASS_IS(4030, TWL4030_CLASS_ID) +TWL_CLASS_IS(6030, TWL6030_CLASS_ID) + +/* Set the regcache bypass for the regmap associated with the nodule */ +int twl_set_regcache_bypass(u8 mod_no, bool enable); + +/* + * Read and write several 8-bit registers at once. + */ +int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); +int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); + +/* + * Read and write single 8-bit registers + */ +static inline int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg) { + return twl_i2c_write(mod_no, &val, reg, 1); +} + +static inline int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg) { + return twl_i2c_read(mod_no, val, reg, 1); +} + +static inline int twl_i2c_write_u16(u8 mod_no, u16 val, u8 reg) { + val = cpu_to_le16(val); + return twl_i2c_write(mod_no, (u8*) &val, reg, 2); +} + +static inline int twl_i2c_read_u16(u8 mod_no, u16 *val, u8 reg) { + int ret; + ret = twl_i2c_read(mod_no, (u8*) val, reg, 2); + *val = le16_to_cpu(*val); + return ret; +} + +int twl_get_type(void); +int twl_get_version(void); +int twl_get_hfclk_rate(void); + +int twl6030_interrupt_unmask(u8 bit_mask, u8 offset); +int twl6030_interrupt_mask(u8 bit_mask, u8 offset); + +/* Card detect Configuration for MMC1 Controller on OMAP4 */ +#ifdef CONFIG_TWL4030_CORE +int twl6030_mmc_card_detect_config(void); +#else +static inline int twl6030_mmc_card_detect_config(void) +{ + pr_debug("twl6030_mmc_card_detect_config not supported\n"); + return 0; +} +#endif + +/* MMC1 Controller on OMAP4 uses Phoenix irq for Card detect */ +#ifdef CONFIG_TWL4030_CORE +int twl6030_mmc_card_detect(struct device *dev, int slot); +#else +static inline int twl6030_mmc_card_detect(struct device *dev, int slot) +{ + pr_debug("Call back twl6030_mmc_card_detect not supported\n"); + return -EIO; +} +#endif +/*----------------------------------------------------------------------*/ + +/* + * NOTE: at up to 1024 registers, this is a big chip. + * + * Avoid putting register declarations in this file, instead of into + * a driver-private file, unless some of the registers in a block + * need to be shared with other drivers. One example is blocks that + * have Secondary IRQ Handler (SIH) registers. + */ + +#define TWL4030_SIH_CTRL_EXCLEN_MASK BIT(0) +#define TWL4030_SIH_CTRL_PENDDIS_MASK BIT(1) +#define TWL4030_SIH_CTRL_COR_MASK BIT(2) + +/*----------------------------------------------------------------------*/ + +/* + * GPIO Block Register offsets (use TWL4030_MODULE_GPIO) + */ + +#define REG_GPIODATAIN1 0x0 +#define REG_GPIODATAIN2 0x1 +#define REG_GPIODATAIN3 0x2 +#define REG_GPIODATADIR1 0x3 +#define REG_GPIODATADIR2 0x4 +#define REG_GPIODATADIR3 0x5 +#define REG_GPIODATAOUT1 0x6 +#define REG_GPIODATAOUT2 0x7 +#define REG_GPIODATAOUT3 0x8 +#define REG_CLEARGPIODATAOUT1 0x9 +#define REG_CLEARGPIODATAOUT2 0xA +#define REG_CLEARGPIODATAOUT3 0xB +#define REG_SETGPIODATAOUT1 0xC +#define REG_SETGPIODATAOUT2 0xD +#define REG_SETGPIODATAOUT3 0xE +#define REG_GPIO_DEBEN1 0xF +#define REG_GPIO_DEBEN2 0x10 +#define REG_GPIO_DEBEN3 0x11 +#define REG_GPIO_CTRL 0x12 +#define REG_GPIOPUPDCTR1 0x13 +#define REG_GPIOPUPDCTR2 0x14 +#define REG_GPIOPUPDCTR3 0x15 +#define REG_GPIOPUPDCTR4 0x16 +#define REG_GPIOPUPDCTR5 0x17 +#define REG_GPIO_ISR1A 0x19 +#define REG_GPIO_ISR2A 0x1A +#define REG_GPIO_ISR3A 0x1B +#define REG_GPIO_IMR1A 0x1C +#define REG_GPIO_IMR2A 0x1D +#define REG_GPIO_IMR3A 0x1E +#define REG_GPIO_ISR1B 0x1F +#define REG_GPIO_ISR2B 0x20 +#define REG_GPIO_ISR3B 0x21 +#define REG_GPIO_IMR1B 0x22 +#define REG_GPIO_IMR2B 0x23 +#define REG_GPIO_IMR3B 0x24 +#define REG_GPIO_EDR1 0x28 +#define REG_GPIO_EDR2 0x29 +#define REG_GPIO_EDR3 0x2A +#define REG_GPIO_EDR4 0x2B +#define REG_GPIO_EDR5 0x2C +#define REG_GPIO_SIH_CTRL 0x2D + +/* Up to 18 signals are available as GPIOs, when their + * pins are not assigned to another use (such as ULPI/USB). + */ +#define TWL4030_GPIO_MAX 18 + +/*----------------------------------------------------------------------*/ + +/*Interface Bit Register (INTBR) offsets + *(Use TWL_4030_MODULE_INTBR) + */ + +#define REG_IDCODE_7_0 0x00 +#define REG_IDCODE_15_8 0x01 +#define REG_IDCODE_16_23 0x02 +#define REG_IDCODE_31_24 0x03 +#define REG_GPPUPDCTR1 0x0F +#define REG_UNLOCK_TEST_REG 0x12 + +/*I2C1 and I2C4(SR) SDA/SCL pull-up control bits */ + +#define I2C_SCL_CTRL_PU BIT(0) +#define I2C_SDA_CTRL_PU BIT(2) +#define SR_I2C_SCL_CTRL_PU BIT(4) +#define SR_I2C_SDA_CTRL_PU BIT(6) + +#define TWL_EEPROM_R_UNLOCK 0x49 + +/*----------------------------------------------------------------------*/ + +/* + * Keypad register offsets (use TWL4030_MODULE_KEYPAD) + * ... SIH/interrupt only + */ + +#define TWL4030_KEYPAD_KEYP_ISR1 0x11 +#define TWL4030_KEYPAD_KEYP_IMR1 0x12 +#define TWL4030_KEYPAD_KEYP_ISR2 0x13 +#define TWL4030_KEYPAD_KEYP_IMR2 0x14 +#define TWL4030_KEYPAD_KEYP_SIR 0x15 /* test register */ +#define TWL4030_KEYPAD_KEYP_EDR 0x16 +#define TWL4030_KEYPAD_KEYP_SIH_CTRL 0x17 + +/*----------------------------------------------------------------------*/ + +/* + * Multichannel ADC register offsets (use TWL4030_MODULE_MADC) + * ... SIH/interrupt only + */ + +#define TWL4030_MADC_ISR1 0x61 +#define TWL4030_MADC_IMR1 0x62 +#define TWL4030_MADC_ISR2 0x63 +#define TWL4030_MADC_IMR2 0x64 +#define TWL4030_MADC_SIR 0x65 /* test register */ +#define TWL4030_MADC_EDR 0x66 +#define TWL4030_MADC_SIH_CTRL 0x67 + +/*----------------------------------------------------------------------*/ + +/* + * Battery charger register offsets (use TWL4030_MODULE_INTERRUPTS) + */ + +#define TWL4030_INTERRUPTS_BCIISR1A 0x0 +#define TWL4030_INTERRUPTS_BCIISR2A 0x1 +#define TWL4030_INTERRUPTS_BCIIMR1A 0x2 +#define TWL4030_INTERRUPTS_BCIIMR2A 0x3 +#define TWL4030_INTERRUPTS_BCIISR1B 0x4 +#define TWL4030_INTERRUPTS_BCIISR2B 0x5 +#define TWL4030_INTERRUPTS_BCIIMR1B 0x6 +#define TWL4030_INTERRUPTS_BCIIMR2B 0x7 +#define TWL4030_INTERRUPTS_BCISIR1 0x8 /* test register */ +#define TWL4030_INTERRUPTS_BCISIR2 0x9 /* test register */ +#define TWL4030_INTERRUPTS_BCIEDR1 0xa +#define TWL4030_INTERRUPTS_BCIEDR2 0xb +#define TWL4030_INTERRUPTS_BCIEDR3 0xc +#define TWL4030_INTERRUPTS_BCISIHCTRL 0xd + +/*----------------------------------------------------------------------*/ + +/* + * Power Interrupt block register offsets (use TWL4030_MODULE_INT) + */ + +#define TWL4030_INT_PWR_ISR1 0x0 +#define TWL4030_INT_PWR_IMR1 0x1 +#define TWL4030_INT_PWR_ISR2 0x2 +#define TWL4030_INT_PWR_IMR2 0x3 +#define TWL4030_INT_PWR_SIR 0x4 /* test register */ +#define TWL4030_INT_PWR_EDR1 0x5 +#define TWL4030_INT_PWR_EDR2 0x6 +#define TWL4030_INT_PWR_SIH_CTRL 0x7 + +/*----------------------------------------------------------------------*/ + +/* + * Accessory Interrupts + */ +#define TWL5031_ACIIMR_LSB 0x05 +#define TWL5031_ACIIMR_MSB 0x06 +#define TWL5031_ACIIDR_LSB 0x07 +#define TWL5031_ACIIDR_MSB 0x08 +#define TWL5031_ACCISR1 0x0F +#define TWL5031_ACCIMR1 0x10 +#define TWL5031_ACCISR2 0x11 +#define TWL5031_ACCIMR2 0x12 +#define TWL5031_ACCSIR 0x13 +#define TWL5031_ACCEDR1 0x14 +#define TWL5031_ACCSIHCTRL 0x15 + +/*----------------------------------------------------------------------*/ + +/* + * Battery Charger Controller + */ + +#define TWL5031_INTERRUPTS_BCIISR1 0x0 +#define TWL5031_INTERRUPTS_BCIIMR1 0x1 +#define TWL5031_INTERRUPTS_BCIISR2 0x2 +#define TWL5031_INTERRUPTS_BCIIMR2 0x3 +#define TWL5031_INTERRUPTS_BCISIR 0x4 +#define TWL5031_INTERRUPTS_BCIEDR1 0x5 +#define TWL5031_INTERRUPTS_BCIEDR2 0x6 +#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7 + +/*----------------------------------------------------------------------*/ + +/* + * PM Master module register offsets (use TWL4030_MODULE_PM_MASTER) + */ + +#define TWL4030_PM_MASTER_CFG_P1_TRANSITION 0x00 +#define TWL4030_PM_MASTER_CFG_P2_TRANSITION 0x01 +#define TWL4030_PM_MASTER_CFG_P3_TRANSITION 0x02 +#define TWL4030_PM_MASTER_CFG_P123_TRANSITION 0x03 +#define TWL4030_PM_MASTER_STS_BOOT 0x04 +#define TWL4030_PM_MASTER_CFG_BOOT 0x05 +#define TWL4030_PM_MASTER_SHUNDAN 0x06 +#define TWL4030_PM_MASTER_BOOT_BCI 0x07 +#define TWL4030_PM_MASTER_CFG_PWRANA1 0x08 +#define TWL4030_PM_MASTER_CFG_PWRANA2 0x09 +#define TWL4030_PM_MASTER_BACKUP_MISC_STS 0x0b +#define TWL4030_PM_MASTER_BACKUP_MISC_CFG 0x0c +#define TWL4030_PM_MASTER_BACKUP_MISC_TST 0x0d +#define TWL4030_PM_MASTER_PROTECT_KEY 0x0e +#define TWL4030_PM_MASTER_STS_HW_CONDITIONS 0x0f +#define TWL4030_PM_MASTER_P1_SW_EVENTS 0x10 +#define TWL4030_PM_MASTER_P2_SW_EVENTS 0x11 +#define TWL4030_PM_MASTER_P3_SW_EVENTS 0x12 +#define TWL4030_PM_MASTER_STS_P123_STATE 0x13 +#define TWL4030_PM_MASTER_PB_CFG 0x14 +#define TWL4030_PM_MASTER_PB_WORD_MSB 0x15 +#define TWL4030_PM_MASTER_PB_WORD_LSB 0x16 +#define TWL4030_PM_MASTER_SEQ_ADD_W2P 0x1c +#define TWL4030_PM_MASTER_SEQ_ADD_P2A 0x1d +#define TWL4030_PM_MASTER_SEQ_ADD_A2W 0x1e +#define TWL4030_PM_MASTER_SEQ_ADD_A2S 0x1f +#define TWL4030_PM_MASTER_SEQ_ADD_S2A12 0x20 +#define TWL4030_PM_MASTER_SEQ_ADD_S2A3 0x21 +#define TWL4030_PM_MASTER_SEQ_ADD_WARM 0x22 +#define TWL4030_PM_MASTER_MEMORY_ADDRESS 0x23 +#define TWL4030_PM_MASTER_MEMORY_DATA 0x24 + +#define TWL4030_PM_MASTER_KEY_CFG1 0xc0 +#define TWL4030_PM_MASTER_KEY_CFG2 0x0c + +#define TWL4030_PM_MASTER_KEY_TST1 0xe0 +#define TWL4030_PM_MASTER_KEY_TST2 0x0e + +#define TWL4030_PM_MASTER_GLOBAL_TST 0xb6 + +/*----------------------------------------------------------------------*/ + +/* Power bus message definitions */ + +/* The TWL4030/5030 splits its power-management resources (the various + * regulators, clock and reset lines) into 3 processor groups - P1, P2 and + * P3. These groups can then be configured to transition between sleep, wait-on + * and active states by sending messages to the power bus. See Section 5.4.2 + * Power Resources of TWL4030 TRM + */ + +/* Processor groups */ +#define DEV_GRP_NULL 0x0 +#define DEV_GRP_P1 0x1 /* P1: all OMAP devices */ +#define DEV_GRP_P2 0x2 /* P2: all Modem devices */ +#define DEV_GRP_P3 0x4 /* P3: all peripheral devices */ + +/* Resource groups */ +#define RES_GRP_RES 0x0 /* Reserved */ +#define RES_GRP_PP 0x1 /* Power providers */ +#define RES_GRP_RC 0x2 /* Reset and control */ +#define RES_GRP_PP_RC 0x3 +#define RES_GRP_PR 0x4 /* Power references */ +#define RES_GRP_PP_PR 0x5 +#define RES_GRP_RC_PR 0x6 +#define RES_GRP_ALL 0x7 /* All resource groups */ + +#define RES_TYPE2_R0 0x0 +#define RES_TYPE2_R1 0x1 +#define RES_TYPE2_R2 0x2 + +#define RES_TYPE_R0 0x0 +#define RES_TYPE_ALL 0x7 + +/* Resource states */ +#define RES_STATE_WRST 0xF +#define RES_STATE_ACTIVE 0xE +#define RES_STATE_SLEEP 0x8 +#define RES_STATE_OFF 0x0 + +/* Power resources */ + +/* Power providers */ +#define RES_VAUX1 1 +#define RES_VAUX2 2 +#define RES_VAUX3 3 +#define RES_VAUX4 4 +#define RES_VMMC1 5 +#define RES_VMMC2 6 +#define RES_VPLL1 7 +#define RES_VPLL2 8 +#define RES_VSIM 9 +#define RES_VDAC 10 +#define RES_VINTANA1 11 +#define RES_VINTANA2 12 +#define RES_VINTDIG 13 +#define RES_VIO 14 +#define RES_VDD1 15 +#define RES_VDD2 16 +#define RES_VUSB_1V5 17 +#define RES_VUSB_1V8 18 +#define RES_VUSB_3V1 19 +#define RES_VUSBCP 20 +#define RES_REGEN 21 +/* Reset and control */ +#define RES_NRES_PWRON 22 +#define RES_CLKEN 23 +#define RES_SYSEN 24 +#define RES_HFCLKOUT 25 +#define RES_32KCLKOUT 26 +#define RES_RESET 27 +/* Power Reference */ +#define RES_MAIN_REF 28 + +#define TOTAL_RESOURCES 28 +/* + * Power Bus Message Format ... these can be sent individually by Linux, + * but are usually part of downloaded scripts that are run when various + * power events are triggered. + * + * Broadcast Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4] + * RES_STATE[3:0] + * + * Singular Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0] + */ + +#define MSG_BROADCAST(devgrp, grp, type, type2, state) \ + ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \ + | (type) << 4 | (state)) + +#define MSG_SINGULAR(devgrp, id, state) \ + ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) + +#define MSG_BROADCAST_ALL(devgrp, state) \ + ((devgrp) << 5 | (state)) + +#define MSG_BROADCAST_REF MSG_BROADCAST_ALL +#define MSG_BROADCAST_PROV MSG_BROADCAST_ALL +#define MSG_BROADCAST__CLK_RST MSG_BROADCAST_ALL +/*----------------------------------------------------------------------*/ + +struct twl4030_clock_init_data { + bool ck32k_lowpwr_enable; +}; + +struct twl4030_bci_platform_data { + int *battery_tmp_tbl; + unsigned int tblsize; + int bb_uvolt; /* voltage to charge backup battery */ + int bb_uamp; /* current for backup battery charging */ +}; + +/* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */ +struct twl4030_gpio_platform_data { + /* package the two LED signals as output-only GPIOs? */ + bool use_leds; + + /* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */ + u8 mmc_cd; + + /* if BIT(N) is set, or VMMC(n+1) is linked, debounce GPIO-N */ + u32 debounce; + + /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup + * should be enabled. Else, if that bit is set in "pulldowns", + * that pulldown is enabled. Don't waste power by letting any + * digital inputs float... + */ + u32 pullups; + u32 pulldowns; + + int (*setup)(struct device *dev, + unsigned gpio, unsigned ngpio); + int (*teardown)(struct device *dev, + unsigned gpio, unsigned ngpio); +}; + +struct twl4030_madc_platform_data { + int irq_line; +}; + +/* Boards have unique mappings of {row, col} --> keycode. + * Column and row are 8 bits each, but range only from 0..7. + * a PERSISTENT_KEY is "always on" and never reported. + */ +#define PERSISTENT_KEY(r, c) KEY((r), (c), KEY_RESERVED) + +struct twl4030_keypad_data { + const struct matrix_keymap_data *keymap_data; + unsigned rows; + unsigned cols; + bool rep; +}; + +enum twl4030_usb_mode { + T2_USB_MODE_ULPI = 1, + T2_USB_MODE_CEA2011_3PIN = 2, +}; + +struct twl4030_usb_data { + enum twl4030_usb_mode usb_mode; + unsigned long features; + + int (*phy_init)(struct device *dev); + int (*phy_exit)(struct device *dev); + /* Power on/off the PHY */ + int (*phy_power)(struct device *dev, int iD, int on); + /* enable/disable phy clocks */ + int (*phy_set_clock)(struct device *dev, int on); + /* suspend/resume of phy */ + int (*phy_suspend)(struct device *dev, int suspend); +}; + +struct twl4030_ins { + u16 pmb_message; + u8 delay; +}; + +struct twl4030_script { + struct twl4030_ins *script; + unsigned size; + u8 flags; +#define TWL4030_WRST_SCRIPT (1<<0) +#define TWL4030_WAKEUP12_SCRIPT (1<<1) +#define TWL4030_WAKEUP3_SCRIPT (1<<2) +#define TWL4030_SLEEP_SCRIPT (1<<3) +}; + +struct twl4030_resconfig { + u8 resource; + u8 devgroup; /* Processor group that Power resource belongs to */ + u8 type; /* Power resource addressed, 6 / broadcast message */ + u8 type2; /* Power resource addressed, 3 / broadcast message */ + u8 remap_off; /* off state remapping */ + u8 remap_sleep; /* sleep state remapping */ +}; + +struct twl4030_power_data { + struct twl4030_script **scripts; + unsigned num; + struct twl4030_resconfig *resource_config; + struct twl4030_resconfig *board_config; +#define TWL4030_RESCONFIG_UNDEF ((u8)-1) + bool use_poweroff; /* Board is wired for TWL poweroff */ + bool ac_charger_quirk; /* Disable AC charger on board */ +}; + +extern int twl4030_remove_script(u8 flags); +extern void twl4030_power_off(void); + +struct twl4030_codec_data { + unsigned int digimic_delay; /* in ms */ + unsigned int ramp_delay_value; + unsigned int offset_cncl_path; + unsigned int hs_extmute:1; + int hs_extmute_gpio; +}; + +struct twl4030_vibra_data { + unsigned int coexist; +}; + +struct twl4030_audio_data { + unsigned int audio_mclk; + struct twl4030_codec_data *codec; + struct twl4030_vibra_data *vibra; + + /* twl6040 */ + int audpwron_gpio; /* audio power-on gpio */ + int naudint_irq; /* audio interrupt */ + unsigned int irq_base; +}; + +struct twl4030_platform_data { + struct twl4030_clock_init_data *clock; + struct twl4030_bci_platform_data *bci; + struct twl4030_gpio_platform_data *gpio; + struct twl4030_madc_platform_data *madc; + struct twl4030_keypad_data *keypad; + struct twl4030_usb_data *usb; + struct twl4030_power_data *power; + struct twl4030_audio_data *audio; + + /* Common LDO regulators for TWL4030/TWL6030 */ + struct regulator_init_data *vdac; + struct regulator_init_data *vaux1; + struct regulator_init_data *vaux2; + struct regulator_init_data *vaux3; + struct regulator_init_data *vdd1; + struct regulator_init_data *vdd2; + struct regulator_init_data *vdd3; + /* TWL4030 LDO regulators */ + struct regulator_init_data *vpll1; + struct regulator_init_data *vpll2; + struct regulator_init_data *vmmc1; + struct regulator_init_data *vmmc2; + struct regulator_init_data *vsim; + struct regulator_init_data *vaux4; + struct regulator_init_data *vio; + struct regulator_init_data *vintana1; + struct regulator_init_data *vintana2; + struct regulator_init_data *vintdig; + /* TWL6030 LDO regulators */ + struct regulator_init_data *vmmc; + struct regulator_init_data *vpp; + struct regulator_init_data *vusim; + struct regulator_init_data *vana; + struct regulator_init_data *vcxio; + struct regulator_init_data *vusb; + struct regulator_init_data *clk32kg; + struct regulator_init_data *v1v8; + struct regulator_init_data *v2v1; + /* TWL6032 LDO regulators */ + struct regulator_init_data *ldo1; + struct regulator_init_data *ldo2; + struct regulator_init_data *ldo3; + struct regulator_init_data *ldo4; + struct regulator_init_data *ldo5; + struct regulator_init_data *ldo6; + struct regulator_init_data *ldo7; + struct regulator_init_data *ldoln; + struct regulator_init_data *ldousb; + /* TWL6032 DCDC regulators */ + struct regulator_init_data *smps3; + struct regulator_init_data *smps4; + struct regulator_init_data *vio6025; +}; + +struct twl_regulator_driver_data { + int (*set_voltage)(void *data, int target_uV); + int (*get_voltage)(void *data); + void *data; + unsigned long features; +}; +/* chip-specific feature flags, for twl_regulator_driver_data.features */ +#define TWL4030_VAUX2 BIT(0) /* pre-5030 voltage ranges */ +#define TPS_SUBSET BIT(1) /* tps659[23]0 have fewer LDOs */ +#define TWL5031 BIT(2) /* twl5031 has different registers */ +#define TWL6030_CLASS BIT(3) /* TWL6030 class */ +#define TWL6032_SUBCLASS BIT(4) /* TWL6032 has changed registers */ +#define TWL4030_ALLOW_UNSUPPORTED BIT(5) /* Some voltages are possible + * but not officially supported. + * This flag is necessary to + * enable them. + */ + +/*----------------------------------------------------------------------*/ + +int twl4030_sih_setup(struct device *dev, int module, int irq_base); + +/* Offsets to Power Registers */ +#define TWL4030_VDAC_DEV_GRP 0x3B +#define TWL4030_VDAC_DEDICATED 0x3E +#define TWL4030_VAUX1_DEV_GRP 0x17 +#define TWL4030_VAUX1_DEDICATED 0x1A +#define TWL4030_VAUX2_DEV_GRP 0x1B +#define TWL4030_VAUX2_DEDICATED 0x1E +#define TWL4030_VAUX3_DEV_GRP 0x1F +#define TWL4030_VAUX3_DEDICATED 0x22 + +static inline int twl4030charger_usb_en(int enable) { return 0; } + +/*----------------------------------------------------------------------*/ + +/* Linux-specific regulator identifiers ... for now, we only support + * the LDOs, and leave the three buck converters alone. VDD1 and VDD2 + * need to tie into hardware based voltage scaling (cpufreq etc), while + * VIO is generally fixed. + */ + +/* TWL4030 SMPS/LDO's */ +/* EXTERNAL dc-to-dc buck converters */ +#define TWL4030_REG_VDD1 0 +#define TWL4030_REG_VDD2 1 +#define TWL4030_REG_VIO 2 + +/* EXTERNAL LDOs */ +#define TWL4030_REG_VDAC 3 +#define TWL4030_REG_VPLL1 4 +#define TWL4030_REG_VPLL2 5 /* not on all chips */ +#define TWL4030_REG_VMMC1 6 +#define TWL4030_REG_VMMC2 7 /* not on all chips */ +#define TWL4030_REG_VSIM 8 /* not on all chips */ +#define TWL4030_REG_VAUX1 9 /* not on all chips */ +#define TWL4030_REG_VAUX2_4030 10 /* (twl4030-specific) */ +#define TWL4030_REG_VAUX2 11 /* (twl5030 and newer) */ +#define TWL4030_REG_VAUX3 12 /* not on all chips */ +#define TWL4030_REG_VAUX4 13 /* not on all chips */ + +/* INTERNAL LDOs */ +#define TWL4030_REG_VINTANA1 14 +#define TWL4030_REG_VINTANA2 15 +#define TWL4030_REG_VINTDIG 16 +#define TWL4030_REG_VUSB1V5 17 +#define TWL4030_REG_VUSB1V8 18 +#define TWL4030_REG_VUSB3V1 19 + +/* TWL6030 SMPS/LDO's */ +/* EXTERNAL dc-to-dc buck convertor controllable via SR */ +#define TWL6030_REG_VDD1 30 +#define TWL6030_REG_VDD2 31 +#define TWL6030_REG_VDD3 32 + +/* Non SR compliant dc-to-dc buck convertors */ +#define TWL6030_REG_VMEM 33 +#define TWL6030_REG_V2V1 34 +#define TWL6030_REG_V1V29 35 +#define TWL6030_REG_V1V8 36 + +/* EXTERNAL LDOs */ +#define TWL6030_REG_VAUX1_6030 37 +#define TWL6030_REG_VAUX2_6030 38 +#define TWL6030_REG_VAUX3_6030 39 +#define TWL6030_REG_VMMC 40 +#define TWL6030_REG_VPP 41 +#define TWL6030_REG_VUSIM 42 +#define TWL6030_REG_VANA 43 +#define TWL6030_REG_VCXIO 44 +#define TWL6030_REG_VDAC 45 +#define TWL6030_REG_VUSB 46 + +/* INTERNAL LDOs */ +#define TWL6030_REG_VRTC 47 +#define TWL6030_REG_CLK32KG 48 + +/* LDOs on 6025 have different names */ +#define TWL6032_REG_LDO2 49 +#define TWL6032_REG_LDO4 50 +#define TWL6032_REG_LDO3 51 +#define TWL6032_REG_LDO5 52 +#define TWL6032_REG_LDO1 53 +#define TWL6032_REG_LDO7 54 +#define TWL6032_REG_LDO6 55 +#define TWL6032_REG_LDOLN 56 +#define TWL6032_REG_LDOUSB 57 + +/* 6025 DCDC supplies */ +#define TWL6032_REG_SMPS3 58 +#define TWL6032_REG_SMPS4 59 +#define TWL6032_REG_VIO 60 + + +#endif /* End of __TWL4030_H */ diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h new file mode 100644 index 0000000000..1c0134dd32 --- /dev/null +++ b/include/linux/i2c/twl4030-madc.h @@ -0,0 +1,147 @@ +/* + * twl4030_madc.h - Header for TWL4030 MADC + * + * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * J Keerthy + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef _TWL4030_MADC_H +#define _TWL4030_MADC_H + +struct twl4030_madc_conversion_method { + u8 sel; + u8 avg; + u8 rbase; + u8 ctrl; +}; + +#define TWL4030_MADC_MAX_CHANNELS 16 + + +/* + * twl4030_madc_request- madc request packet for channel conversion + * @channels: 16 bit bitmap for individual channels + * @do_avgP: sample the input channel for 4 consecutive cycles + * @method: RT, SW1, SW2 + * @type: Polling or interrupt based method + * @raw: Return raw value, do not convert it + */ + +struct twl4030_madc_request { + unsigned long channels; + bool do_avg; + u16 method; + u16 type; + bool active; + bool result_pending; + bool raw; + int rbuf[TWL4030_MADC_MAX_CHANNELS]; + void (*func_cb)(int len, int channels, int *buf); +}; + +enum conversion_methods { + TWL4030_MADC_RT, + TWL4030_MADC_SW1, + TWL4030_MADC_SW2, + TWL4030_MADC_NUM_METHODS +}; + +enum sample_type { + TWL4030_MADC_WAIT, + TWL4030_MADC_IRQ_ONESHOT, + TWL4030_MADC_IRQ_REARM +}; + +#define TWL4030_MADC_CTRL1 0x00 +#define TWL4030_MADC_CTRL2 0x01 + +#define TWL4030_MADC_RTSELECT_LSB 0x02 +#define TWL4030_MADC_SW1SELECT_LSB 0x06 +#define TWL4030_MADC_SW2SELECT_LSB 0x0A + +#define TWL4030_MADC_RTAVERAGE_LSB 0x04 +#define TWL4030_MADC_SW1AVERAGE_LSB 0x08 +#define TWL4030_MADC_SW2AVERAGE_LSB 0x0C + +#define TWL4030_MADC_CTRL_SW1 0x12 +#define TWL4030_MADC_CTRL_SW2 0x13 + +#define TWL4030_MADC_RTCH0_LSB 0x17 +#define TWL4030_MADC_GPCH0_LSB 0x37 + +#define TWL4030_MADC_MADCON (1 << 0) /* MADC power on */ +#define TWL4030_MADC_BUSY (1 << 0) /* MADC busy */ +/* MADC conversion completion */ +#define TWL4030_MADC_EOC_SW (1 << 1) +/* MADC SWx start conversion */ +#define TWL4030_MADC_SW_START (1 << 5) +#define TWL4030_MADC_ADCIN0 (1 << 0) +#define TWL4030_MADC_ADCIN1 (1 << 1) +#define TWL4030_MADC_ADCIN2 (1 << 2) +#define TWL4030_MADC_ADCIN3 (1 << 3) +#define TWL4030_MADC_ADCIN4 (1 << 4) +#define TWL4030_MADC_ADCIN5 (1 << 5) +#define TWL4030_MADC_ADCIN6 (1 << 6) +#define TWL4030_MADC_ADCIN7 (1 << 7) +#define TWL4030_MADC_ADCIN8 (1 << 8) +#define TWL4030_MADC_ADCIN9 (1 << 9) +#define TWL4030_MADC_ADCIN10 (1 << 10) +#define TWL4030_MADC_ADCIN11 (1 << 11) +#define TWL4030_MADC_ADCIN12 (1 << 12) +#define TWL4030_MADC_ADCIN13 (1 << 13) +#define TWL4030_MADC_ADCIN14 (1 << 14) +#define TWL4030_MADC_ADCIN15 (1 << 15) + +/* Fixed channels */ +#define TWL4030_MADC_BTEMP TWL4030_MADC_ADCIN1 +#define TWL4030_MADC_VBUS TWL4030_MADC_ADCIN8 +#define TWL4030_MADC_VBKB TWL4030_MADC_ADCIN9 +#define TWL4030_MADC_ICHG TWL4030_MADC_ADCIN10 +#define TWL4030_MADC_VCHG TWL4030_MADC_ADCIN11 +#define TWL4030_MADC_VBAT TWL4030_MADC_ADCIN12 + +/* Step size and prescaler ratio */ +#define TEMP_STEP_SIZE 147 +#define TEMP_PSR_R 100 +#define CURR_STEP_SIZE 147 +#define CURR_PSR_R1 44 +#define CURR_PSR_R2 88 + +#define TWL4030_BCI_BCICTL1 0x23 +#define TWL4030_BCI_CGAIN 0x020 +#define TWL4030_BCI_MESBAT (1 << 1) +#define TWL4030_BCI_TYPEN (1 << 4) +#define TWL4030_BCI_ITHEN (1 << 3) + +#define REG_BCICTL2 0x024 +#define TWL4030_BCI_ITHSENS 0x007 + +/* Register and bits for GPBR1 register */ +#define TWL4030_REG_GPBR1 0x0c +#define TWL4030_GPBR1_MADC_HFCLK_EN (1 << 7) + +struct twl4030_madc_user_parms { + int channel; + int average; + int status; + u16 result; +}; + +int twl4030_madc_conversion(struct twl4030_madc_request *conv); +int twl4030_get_madc_conversion(int channel_no); +#endif diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h new file mode 100644 index 0000000000..1587b7dec5 --- /dev/null +++ b/include/linux/i7300_idle.h @@ -0,0 +1,83 @@ + +#ifndef I7300_IDLE_H +#define I7300_IDLE_H + +#include + +/* + * I/O AT controls (PCI bus 0 device 8 function 0) + * DIMM controls (PCI bus 0 device 16 function 1) + */ +#define IOAT_BUS 0 +#define IOAT_DEVFN PCI_DEVFN(8, 0) +#define MEMCTL_BUS 0 +#define MEMCTL_DEVFN PCI_DEVFN(16, 1) + +struct fbd_ioat { + unsigned int vendor; + unsigned int ioat_dev; + unsigned int enabled; +}; + +/* + * The i5000 chip-set has the same hooks as the i7300 + * but it is not enabled by default and must be manually + * manually enabled with "forceload=1" because it is + * only lightly validated. + */ + +static const struct fbd_ioat fbd_ioat_list[] = { + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1}, + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0}, + {0, 0} +}; + +/* table of devices that work with this driver */ +static const struct pci_device_id pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, + { } /* Terminating entry */ +}; + +/* Check for known platforms with I/O-AT */ +static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev, + struct pci_dev **ioat_dev, + int enable_all) +{ + int i; + struct pci_dev *memdev, *dmadev; + + memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN); + if (!memdev) + return -ENODEV; + + for (i = 0; pci_tbl[i].vendor != 0; i++) { + if (memdev->vendor == pci_tbl[i].vendor && + memdev->device == pci_tbl[i].device) { + break; + } + } + if (pci_tbl[i].vendor == 0) + return -ENODEV; + + dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN); + if (!dmadev) + return -ENODEV; + + for (i = 0; fbd_ioat_list[i].vendor != 0; i++) { + if (dmadev->vendor == fbd_ioat_list[i].vendor && + dmadev->device == fbd_ioat_list[i].ioat_dev) { + if (!(fbd_ioat_list[i].enabled || enable_all)) + continue; + if (fbd_dev) + *fbd_dev = memdev; + if (ioat_dev) + *ioat_dev = dmadev; + + return 0; + } + } + return -ENODEV; +} + +#endif diff --git a/include/linux/i8042.h b/include/linux/i8042.h index 0261e2fb36..d98780ca96 100644 --- a/include/linux/i8042.h +++ b/include/linux/i8042.h @@ -1,7 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _LINUX_I8042_H #define _LINUX_I8042_H +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ #include diff --git a/include/linux/i8253.h b/include/linux/i8253.h index 8336b2f6f8..e6bb36a975 100644 --- a/include/linux/i8253.h +++ b/include/linux/i8253.h @@ -21,7 +21,6 @@ #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) extern raw_spinlock_t i8253_lock; -extern bool i8253_clear_counter_on_shutdown; extern struct clock_event_device i8253_clockevent; extern void clockevent_i8253_init(bool oneshot); diff --git a/include/linux/icmp.h b/include/linux/icmp.h index 0af4d210ee..efc1849062 100644 --- a/include/linux/icmp.h +++ b/include/linux/icmp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -9,35 +8,20 @@ * Version: @(#)icmp.h 1.0.3 04/28/93 * * Author: Fred N. van Kempen, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_ICMP_H #define _LINUX_ICMP_H #include #include -#include static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) { return (struct icmphdr *)skb_transport_header(skb); } - -static inline bool icmp_is_err(int type) -{ - switch (type) { - case ICMP_DEST_UNREACH: - case ICMP_SOURCE_QUENCH: - case ICMP_REDIRECT: - case ICMP_TIME_EXCEEDED: - case ICMP_PARAMETERPROB: - return true; - } - - return false; -} - -void ip_icmp_error_rfc4884(const struct sk_buff *skb, - struct sock_ee_data_rfc4884 *out, - int thlen, int off); - #endif /* _LINUX_ICMP_H */ diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 9055cb380e..57086e9fc6 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -1,9 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ICMPV6_H #define _LINUX_ICMPV6_H #include -#include #include static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) @@ -14,64 +12,21 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #include #if IS_ENABLED(CONFIG_IPV6) +extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr, - const struct inet6_skb_parm *parm); -void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr, - const struct inet6_skb_parm *parm); -#if IS_BUILTIN(CONFIG_IPV6) -static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct inet6_skb_parm *parm) -{ - icmp6_send(skb, type, code, info, NULL, parm); -} -static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) -{ - BUILD_BUG_ON(fn != icmp6_send); - return 0; -} -static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) -{ - BUILD_BUG_ON(fn != icmp6_send); - return 0; -} -#else -extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct inet6_skb_parm *parm); + const struct in6_addr *force_saddr); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); -#endif - -static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) -{ - __icmpv6_send(skb, type, code, info, IP6CB(skb)); -} - int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); -#if IS_ENABLED(CONFIG_NF_NAT) -void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); -#else -static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) -{ - struct inet6_skb_parm parm = { 0 }; - __icmpv6_send(skb_in, type, code, info, &parm); -} -#endif - #else static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { -} -static inline void icmpv6_ndo_send(struct sk_buff *skb, - u8 type, u8 code, __u32 info) -{ } #endif @@ -90,18 +45,4 @@ extern void icmpv6_flow_init(struct sock *sk, const struct in6_addr *saddr, const struct in6_addr *daddr, int oif); - -static inline bool icmpv6_is_err(int type) -{ - switch (type) { - case ICMPV6_DEST_UNREACH: - case ICMPV6_PKT_TOOBIG: - case ICMPV6_TIME_EXCEED: - case ICMPV6_PARAMPROB: - return true; - } - - return false; -} - #endif diff --git a/include/linux/ide.h b/include/linux/ide.h index 6265376950..a633898f36 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _IDE_H #define _IDE_H /* @@ -10,7 +9,7 @@ #include #include #include -#include +#include #include #include #include @@ -21,14 +20,18 @@ #include /* for request_sense */ #include -#include #include #include +#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) +# define SUPPORT_VLB_SYNC 0 +#else +# define SUPPORT_VLB_SYNC 1 +#endif + /* * Probably not wise to fiddle with these */ -#define SUPPORT_VLB_SYNC 1 #define IDE_DEFAULT_MAX_FAILURES 1 #define ERROR_MAX 8 /* Max read/write errors per sector */ #define ERROR_RESET 3 /* Reset controller every 4th retry */ @@ -36,56 +39,20 @@ struct device; -/* values for ide_request.type */ -enum ata_priv_type { - ATA_PRIV_MISC, - ATA_PRIV_TASKFILE, - ATA_PRIV_PC, - ATA_PRIV_SENSE, /* sense request */ - ATA_PRIV_PM_SUSPEND, /* suspend request */ - ATA_PRIV_PM_RESUME, /* resume request */ +/* IDE-specific values for req->cmd_type */ +enum ata_cmd_type_bits { + REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1, + REQ_TYPE_ATA_PC, + REQ_TYPE_ATA_SENSE, /* sense request */ + REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */ + REQ_TYPE_ATA_PM_RESUME, /* resume request */ }; -struct ide_request { - struct scsi_request sreq; - u8 sense[SCSI_SENSE_BUFFERSIZE]; - u8 type; - void *special; -}; +#define ata_pm_request(rq) \ + ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \ + (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME) -static inline struct ide_request *ide_req(struct request *rq) -{ - return blk_mq_rq_to_pdu(rq); -} - -static inline bool ata_misc_request(struct request *rq) -{ - return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC; -} - -static inline bool ata_taskfile_request(struct request *rq) -{ - return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE; -} - -static inline bool ata_pc_request(struct request *rq) -{ - return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC; -} - -static inline bool ata_sense_request(struct request *rq) -{ - return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE; -} - -static inline bool ata_pm_request(struct request *rq) -{ - return blk_rq_is_private(rq) && - (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND || - ide_req(rq)->type == ATA_PRIV_PM_RESUME); -} - -/* Error codes returned in result to the higher part of the driver. */ +/* Error codes returned in rq->errors to the higher part of the driver. */ enum { IDE_DRV_ERROR_GENERAL = 101, IDE_DRV_ERROR_FILEMARK = 102, @@ -161,6 +128,7 @@ struct ide_io_ports { */ #define PARTN_BITS 6 /* number of minor dev bits for partitions */ #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ +#define SECTOR_SIZE 512 /* * Timeouts for various operations: @@ -253,9 +221,9 @@ static inline void ide_std_init_ports(struct ide_hw *hw, * Special Driver Flags */ enum { - IDE_SFLAG_SET_GEOMETRY = BIT(0), - IDE_SFLAG_RECALIBRATE = BIT(1), - IDE_SFLAG_SET_MULTMODE = BIT(2), + IDE_SFLAG_SET_GEOMETRY = (1 << 0), + IDE_SFLAG_RECALIBRATE = (1 << 1), + IDE_SFLAG_SET_MULTMODE = (1 << 2), }; /* @@ -267,13 +235,13 @@ typedef enum { } ide_startstop_t; enum { - IDE_VALID_ERROR = BIT(1), + IDE_VALID_ERROR = (1 << 1), IDE_VALID_FEATURE = IDE_VALID_ERROR, - IDE_VALID_NSECT = BIT(2), - IDE_VALID_LBAL = BIT(3), - IDE_VALID_LBAM = BIT(4), - IDE_VALID_LBAH = BIT(5), - IDE_VALID_DEVICE = BIT(6), + IDE_VALID_NSECT = (1 << 2), + IDE_VALID_LBAL = (1 << 3), + IDE_VALID_LBAM = (1 << 4), + IDE_VALID_LBAH = (1 << 5), + IDE_VALID_DEVICE = (1 << 6), IDE_VALID_LBA = IDE_VALID_LBAL | IDE_VALID_LBAM | IDE_VALID_LBAH, @@ -289,24 +257,24 @@ enum { }; enum { - IDE_TFLAG_LBA48 = BIT(0), - IDE_TFLAG_WRITE = BIT(1), - IDE_TFLAG_CUSTOM_HANDLER = BIT(2), - IDE_TFLAG_DMA_PIO_FALLBACK = BIT(3), + IDE_TFLAG_LBA48 = (1 << 0), + IDE_TFLAG_WRITE = (1 << 1), + IDE_TFLAG_CUSTOM_HANDLER = (1 << 2), + IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3), /* force 16-bit I/O operations */ - IDE_TFLAG_IO_16BIT = BIT(4), + IDE_TFLAG_IO_16BIT = (1 << 4), /* struct ide_cmd was allocated using kmalloc() */ - IDE_TFLAG_DYN = BIT(5), - IDE_TFLAG_FS = BIT(6), - IDE_TFLAG_MULTI_PIO = BIT(7), - IDE_TFLAG_SET_XFER = BIT(8), + IDE_TFLAG_DYN = (1 << 5), + IDE_TFLAG_FS = (1 << 6), + IDE_TFLAG_MULTI_PIO = (1 << 7), + IDE_TFLAG_SET_XFER = (1 << 8), }; enum { - IDE_FTFLAG_FLAGGED = BIT(0), - IDE_FTFLAG_SET_IN_FLAGS = BIT(1), - IDE_FTFLAG_OUT_DATA = BIT(2), - IDE_FTFLAG_IN_DATA = BIT(3), + IDE_FTFLAG_FLAGGED = (1 << 0), + IDE_FTFLAG_SET_IN_FLAGS = (1 << 1), + IDE_FTFLAG_OUT_DATA = (1 << 2), + IDE_FTFLAG_IN_DATA = (1 << 3), }; struct ide_taskfile { @@ -357,13 +325,13 @@ struct ide_cmd { /* ATAPI packet command flags */ enum { /* set when an error is considered normal - no retry (ide-tape) */ - PC_FLAG_ABORT = BIT(0), - PC_FLAG_SUPPRESS_ERROR = BIT(1), - PC_FLAG_WAIT_FOR_DSC = BIT(2), - PC_FLAG_DMA_OK = BIT(3), - PC_FLAG_DMA_IN_PROGRESS = BIT(4), - PC_FLAG_DMA_ERROR = BIT(5), - PC_FLAG_WRITING = BIT(6), + PC_FLAG_ABORT = (1 << 0), + PC_FLAG_SUPPRESS_ERROR = (1 << 1), + PC_FLAG_WAIT_FOR_DSC = (1 << 2), + PC_FLAG_DMA_OK = (1 << 3), + PC_FLAG_DMA_IN_PROGRESS = (1 << 4), + PC_FLAG_DMA_ERROR = (1 << 5), + PC_FLAG_WRITING = (1 << 6), }; #define ATAPI_WAIT_PC (60 * HZ) @@ -413,115 +381,115 @@ struct ide_disk_ops { sector_t); int (*ioctl)(struct ide_drive_s *, struct block_device *, fmode_t, unsigned int, unsigned long); - int (*compat_ioctl)(struct ide_drive_s *, struct block_device *, - fmode_t, unsigned int, unsigned long); }; /* ATAPI device flags */ enum { - IDE_AFLAG_DRQ_INTERRUPT = BIT(0), + IDE_AFLAG_DRQ_INTERRUPT = (1 << 0), /* ide-cd */ /* Drive cannot eject the disc. */ - IDE_AFLAG_NO_EJECT = BIT(1), + IDE_AFLAG_NO_EJECT = (1 << 1), /* Drive is a pre ATAPI 1.2 drive. */ - IDE_AFLAG_PRE_ATAPI12 = BIT(2), + IDE_AFLAG_PRE_ATAPI12 = (1 << 2), /* TOC addresses are in BCD. */ - IDE_AFLAG_TOCADDR_AS_BCD = BIT(3), + IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), /* TOC track numbers are in BCD. */ - IDE_AFLAG_TOCTRACKS_AS_BCD = BIT(4), + IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), /* Saved TOC information is current. */ - IDE_AFLAG_TOC_VALID = BIT(6), + IDE_AFLAG_TOC_VALID = (1 << 6), /* We think that the drive door is locked. */ - IDE_AFLAG_DOOR_LOCKED = BIT(7), + IDE_AFLAG_DOOR_LOCKED = (1 << 7), /* SET_CD_SPEED command is unsupported. */ - IDE_AFLAG_NO_SPEED_SELECT = BIT(8), - IDE_AFLAG_VERTOS_300_SSD = BIT(9), - IDE_AFLAG_VERTOS_600_ESD = BIT(10), - IDE_AFLAG_SANYO_3CD = BIT(11), - IDE_AFLAG_FULL_CAPS_PAGE = BIT(12), - IDE_AFLAG_PLAY_AUDIO_OK = BIT(13), - IDE_AFLAG_LE_SPEED_FIELDS = BIT(14), + IDE_AFLAG_NO_SPEED_SELECT = (1 << 8), + IDE_AFLAG_VERTOS_300_SSD = (1 << 9), + IDE_AFLAG_VERTOS_600_ESD = (1 << 10), + IDE_AFLAG_SANYO_3CD = (1 << 11), + IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12), + IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13), + IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14), /* ide-floppy */ /* Avoid commands not supported in Clik drive */ - IDE_AFLAG_CLIK_DRIVE = BIT(15), + IDE_AFLAG_CLIK_DRIVE = (1 << 15), /* Requires BH algorithm for packets */ - IDE_AFLAG_ZIP_DRIVE = BIT(16), + IDE_AFLAG_ZIP_DRIVE = (1 << 16), /* Supports format progress report */ - IDE_AFLAG_SRFP = BIT(17), + IDE_AFLAG_SRFP = (1 << 17), /* ide-tape */ - IDE_AFLAG_IGNORE_DSC = BIT(18), + IDE_AFLAG_IGNORE_DSC = (1 << 18), /* 0 When the tape position is unknown */ - IDE_AFLAG_ADDRESS_VALID = BIT(19), + IDE_AFLAG_ADDRESS_VALID = (1 << 19), /* Device already opened */ - IDE_AFLAG_BUSY = BIT(20), + IDE_AFLAG_BUSY = (1 << 20), /* Attempt to auto-detect the current user block size */ - IDE_AFLAG_DETECT_BS = BIT(21), + IDE_AFLAG_DETECT_BS = (1 << 21), /* Currently on a filemark */ - IDE_AFLAG_FILEMARK = BIT(22), + IDE_AFLAG_FILEMARK = (1 << 22), /* 0 = no tape is loaded, so we don't rewind after ejecting */ - IDE_AFLAG_MEDIUM_PRESENT = BIT(23), + IDE_AFLAG_MEDIUM_PRESENT = (1 << 23), - IDE_AFLAG_NO_AUTOCLOSE = BIT(24), + IDE_AFLAG_NO_AUTOCLOSE = (1 << 24), }; /* device flags */ enum { /* restore settings after device reset */ - IDE_DFLAG_KEEP_SETTINGS = BIT(0), + IDE_DFLAG_KEEP_SETTINGS = (1 << 0), /* device is using DMA for read/write */ - IDE_DFLAG_USING_DMA = BIT(1), + IDE_DFLAG_USING_DMA = (1 << 1), /* okay to unmask other IRQs */ - IDE_DFLAG_UNMASK = BIT(2), + IDE_DFLAG_UNMASK = (1 << 2), /* don't attempt flushes */ - IDE_DFLAG_NOFLUSH = BIT(3), + IDE_DFLAG_NOFLUSH = (1 << 3), /* DSC overlap */ - IDE_DFLAG_DSC_OVERLAP = BIT(4), + IDE_DFLAG_DSC_OVERLAP = (1 << 4), /* give potential excess bandwidth */ - IDE_DFLAG_NICE1 = BIT(5), + IDE_DFLAG_NICE1 = (1 << 5), /* device is physically present */ - IDE_DFLAG_PRESENT = BIT(6), + IDE_DFLAG_PRESENT = (1 << 6), /* disable Host Protected Area */ - IDE_DFLAG_NOHPA = BIT(7), + IDE_DFLAG_NOHPA = (1 << 7), /* id read from device (synthetic if not set) */ - IDE_DFLAG_ID_READ = BIT(8), - IDE_DFLAG_NOPROBE = BIT(9), + IDE_DFLAG_ID_READ = (1 << 8), + IDE_DFLAG_NOPROBE = (1 << 9), /* need to do check_media_change() */ - IDE_DFLAG_REMOVABLE = BIT(10), - IDE_DFLAG_FORCED_GEOM = BIT(12), + IDE_DFLAG_REMOVABLE = (1 << 10), + /* needed for removable devices */ + IDE_DFLAG_ATTACH = (1 << 11), + IDE_DFLAG_FORCED_GEOM = (1 << 12), /* disallow setting unmask bit */ - IDE_DFLAG_NO_UNMASK = BIT(13), + IDE_DFLAG_NO_UNMASK = (1 << 13), /* disallow enabling 32-bit I/O */ - IDE_DFLAG_NO_IO_32BIT = BIT(14), + IDE_DFLAG_NO_IO_32BIT = (1 << 14), /* for removable only: door lock/unlock works */ - IDE_DFLAG_DOORLOCKING = BIT(15), + IDE_DFLAG_DOORLOCKING = (1 << 15), /* disallow DMA */ - IDE_DFLAG_NODMA = BIT(16), + IDE_DFLAG_NODMA = (1 << 16), /* powermanagement told us not to do anything, so sleep nicely */ - IDE_DFLAG_BLOCKED = BIT(17), + IDE_DFLAG_BLOCKED = (1 << 17), /* sleeping & sleep field valid */ - IDE_DFLAG_SLEEPING = BIT(18), - IDE_DFLAG_POST_RESET = BIT(19), - IDE_DFLAG_UDMA33_WARNED = BIT(20), - IDE_DFLAG_LBA48 = BIT(21), + IDE_DFLAG_SLEEPING = (1 << 18), + IDE_DFLAG_POST_RESET = (1 << 19), + IDE_DFLAG_UDMA33_WARNED = (1 << 20), + IDE_DFLAG_LBA48 = (1 << 21), /* status of write cache */ - IDE_DFLAG_WCACHE = BIT(22), + IDE_DFLAG_WCACHE = (1 << 22), /* used for ignoring ATA_DF */ - IDE_DFLAG_NOWERR = BIT(23), + IDE_DFLAG_NOWERR = (1 << 23), /* retrying in PIO */ - IDE_DFLAG_DMA_PIO_RETRY = BIT(24), - IDE_DFLAG_LBA = BIT(25), + IDE_DFLAG_DMA_PIO_RETRY = (1 << 24), + IDE_DFLAG_LBA = (1 << 25), /* don't unload heads */ - IDE_DFLAG_NO_UNLOAD = BIT(26), + IDE_DFLAG_NO_UNLOAD = (1 << 26), /* heads unloaded, please don't reset port */ - IDE_DFLAG_PARKED = BIT(27), - IDE_DFLAG_MEDIA_CHANGED = BIT(28), + IDE_DFLAG_PARKED = (1 << 27), + IDE_DFLAG_MEDIA_CHANGED = (1 << 28), /* write protect */ - IDE_DFLAG_WP = BIT(29), - IDE_DFLAG_FORMAT_IN_PROGRESS = BIT(30), - IDE_DFLAG_NIEN_QUIRK = BIT(31), + IDE_DFLAG_WP = (1 << 29), + IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), + IDE_DFLAG_NIEN_QUIRK = (1 << 31), }; struct ide_drive_s { @@ -530,10 +498,6 @@ struct ide_drive_s { struct request_queue *queue; /* request queue */ - bool (*prep_rq)(struct ide_drive_s *, struct request *); - - struct blk_mq_tag_set tag_set; - struct request *rq; /* current request */ void *driver_data; /* extra driver data */ u16 *id; /* identification info */ @@ -615,13 +579,8 @@ struct ide_drive_s { /* current sense rq and buffer */ bool sense_rq_armed; - bool sense_rq_active; - struct request *sense_rq; + struct request sense_rq; struct request_sense sense_data; - - /* async sense insertion */ - struct work_struct rq_work; - struct list_head rq_list; }; typedef struct ide_drive_s ide_drive_t; @@ -676,7 +635,7 @@ struct ide_port_ops { void (*init_dev)(ide_drive_t *); void (*set_pio_mode)(struct hwif_s *, ide_drive_t *); void (*set_dma_mode)(struct hwif_s *, ide_drive_t *); - blk_status_t (*reset_poll)(ide_drive_t *); + int (*reset_poll)(ide_drive_t *); void (*pre_reset)(ide_drive_t *); void (*resetproc)(ide_drive_t *); void (*maskproc)(ide_drive_t *, int); @@ -709,7 +668,7 @@ struct ide_dma_ops { }; enum { - IDE_PFLAG_PROBING = BIT(0), + IDE_PFLAG_PROBING = (1 << 0), }; struct ide_host; @@ -862,7 +821,7 @@ extern struct mutex ide_setting_mtx; * configurable drive settings */ -#define DS_SYNC BIT(0) +#define DS_SYNC (1 << 0) struct ide_devset { int (*get)(ide_drive_t *); @@ -943,10 +902,6 @@ ide_devset_get(_name, _field); \ ide_devset_set(_name, _field); \ IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name) -#define ide_devset_ro_field(_name, _field) \ -ide_devset_get(_name, _field); \ -IDE_DEVSET(_name, 0, get_##_name, NULL) - #define ide_devset_rw_flag(_name, _field) \ ide_devset_get_flag(_name, _field); \ ide_devset_set_flag(_name, _field); \ @@ -975,7 +930,7 @@ __IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL) typedef struct { const char *name; umode_t mode; - int (*show)(struct seq_file *, void *); + const struct file_operations *proc_fops; } ide_proc_entry_t; void proc_ide_create(void); @@ -987,8 +942,8 @@ void ide_proc_unregister_port(ide_hwif_t *); void ide_proc_register_driver(ide_drive_t *, struct ide_driver *); void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *); -int ide_capacity_proc_show(struct seq_file *m, void *v); -int ide_geometry_proc_show(struct seq_file *m, void *v); +extern const struct file_operations ide_capacity_proc_fops; +extern const struct file_operations ide_geometry_proc_fops; #else static inline void proc_ide_create(void) { ; } static inline void proc_ide_destroy(void) { ; } @@ -1004,15 +959,15 @@ static inline void ide_proc_unregister_driver(ide_drive_t *drive, enum { /* enter/exit functions */ - IDE_DBG_FUNC = BIT(0), + IDE_DBG_FUNC = (1 << 0), /* sense key/asc handling */ - IDE_DBG_SENSE = BIT(1), + IDE_DBG_SENSE = (1 << 1), /* packet commands handling */ - IDE_DBG_PC = BIT(2), + IDE_DBG_PC = (1 << 2), /* request handling */ - IDE_DBG_RQ = BIT(3), + IDE_DBG_RQ = (1 << 3), /* driver probing/setup */ - IDE_DBG_PROBE = BIT(4), + IDE_DBG_PROBE = (1 << 4), }; /* DRV_NAME has to be defined in the driver before using the macro below */ @@ -1101,9 +1056,8 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l extern int ide_vlb_clk; extern int ide_pci_clk; -int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int); +int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int); void ide_kill_rq(ide_drive_t *, struct request *); -void ide_insert_request_head(ide_drive_t *, struct request *); void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); @@ -1133,7 +1087,7 @@ extern int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, int arg); void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8); -int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int); +int ide_complete_rq(ide_drive_t *, int, unsigned int); void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd); void ide_tf_dump(const char *, struct ide_cmd *); @@ -1175,10 +1129,10 @@ ssize_t ide_park_store(struct device *dev, struct device_attribute *attr, * the tail of our block device request queue and wait for their completion. */ enum { - REQ_IDETAPE_PC1 = BIT(0), /* packet command (first stage) */ - REQ_IDETAPE_PC2 = BIT(1), /* packet command (second stage) */ - REQ_IDETAPE_READ = BIT(2), - REQ_IDETAPE_WRITE = BIT(3), + REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */ + REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */ + REQ_IDETAPE_READ = (1 << 2), + REQ_IDETAPE_WRITE = (1 << 3), }; int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *, @@ -1221,10 +1175,9 @@ extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout); extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); -extern void ide_timer_expiry(struct timer_list *t); +extern void ide_timer_expiry(unsigned long); extern irqreturn_t ide_intr(int irq, void *dev_id); -extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); -extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool); +extern void do_ide_request(struct request_queue *); extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); void ide_init_disk(struct gendisk *, ide_drive_t *); @@ -1268,71 +1221,71 @@ struct ide_pci_enablebit { enum { /* Uses ISA control ports not PCI ones. */ - IDE_HFLAG_ISA_PORTS = BIT(0), + IDE_HFLAG_ISA_PORTS = (1 << 0), /* single port device */ - IDE_HFLAG_SINGLE = BIT(1), + IDE_HFLAG_SINGLE = (1 << 1), /* don't use legacy PIO blacklist */ - IDE_HFLAG_PIO_NO_BLACKLIST = BIT(2), + IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2), /* set for the second port of QD65xx */ - IDE_HFLAG_QD_2ND_PORT = BIT(3), + IDE_HFLAG_QD_2ND_PORT = (1 << 3), /* use PIO8/9 for prefetch off/on */ - IDE_HFLAG_ABUSE_PREFETCH = BIT(4), + IDE_HFLAG_ABUSE_PREFETCH = (1 << 4), /* use PIO6/7 for fast-devsel off/on */ - IDE_HFLAG_ABUSE_FAST_DEVSEL = BIT(5), + IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5), /* use 100-102 and 200-202 PIO values to set DMA modes */ - IDE_HFLAG_ABUSE_DMA_MODES = BIT(6), + IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6), /* * keep DMA setting when programming PIO mode, may be used only * for hosts which have separate PIO and DMA timings (ie. PMAC) */ - IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = BIT(7), + IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7), /* program host for the transfer mode after programming device */ - IDE_HFLAG_POST_SET_MODE = BIT(8), + IDE_HFLAG_POST_SET_MODE = (1 << 8), /* don't program host/device for the transfer mode ("smart" hosts) */ - IDE_HFLAG_NO_SET_MODE = BIT(9), + IDE_HFLAG_NO_SET_MODE = (1 << 9), /* trust BIOS for programming chipset/device for DMA */ - IDE_HFLAG_TRUST_BIOS_FOR_DMA = BIT(10), + IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10), /* host is CS5510/CS5520 */ - IDE_HFLAG_CS5520 = BIT(11), + IDE_HFLAG_CS5520 = (1 << 11), /* ATAPI DMA is unsupported */ - IDE_HFLAG_NO_ATAPI_DMA = BIT(12), + IDE_HFLAG_NO_ATAPI_DMA = (1 << 12), /* set if host is a "non-bootable" controller */ - IDE_HFLAG_NON_BOOTABLE = BIT(13), + IDE_HFLAG_NON_BOOTABLE = (1 << 13), /* host doesn't support DMA */ - IDE_HFLAG_NO_DMA = BIT(14), + IDE_HFLAG_NO_DMA = (1 << 14), /* check if host is PCI IDE device before allowing DMA */ - IDE_HFLAG_NO_AUTODMA = BIT(15), + IDE_HFLAG_NO_AUTODMA = (1 << 15), /* host uses MMIO */ - IDE_HFLAG_MMIO = BIT(16), + IDE_HFLAG_MMIO = (1 << 16), /* no LBA48 */ - IDE_HFLAG_NO_LBA48 = BIT(17), + IDE_HFLAG_NO_LBA48 = (1 << 17), /* no LBA48 DMA */ - IDE_HFLAG_NO_LBA48_DMA = BIT(18), + IDE_HFLAG_NO_LBA48_DMA = (1 << 18), /* data FIFO is cleared by an error */ - IDE_HFLAG_ERROR_STOPS_FIFO = BIT(19), + IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19), /* serialize ports */ - IDE_HFLAG_SERIALIZE = BIT(20), + IDE_HFLAG_SERIALIZE = (1 << 20), /* host is DTC2278 */ - IDE_HFLAG_DTC2278 = BIT(21), + IDE_HFLAG_DTC2278 = (1 << 21), /* 4 devices on a single set of I/O ports */ - IDE_HFLAG_4DRIVES = BIT(22), + IDE_HFLAG_4DRIVES = (1 << 22), /* host is TRM290 */ - IDE_HFLAG_TRM290 = BIT(23), + IDE_HFLAG_TRM290 = (1 << 23), /* use 32-bit I/O ops */ - IDE_HFLAG_IO_32BIT = BIT(24), + IDE_HFLAG_IO_32BIT = (1 << 24), /* unmask IRQs */ - IDE_HFLAG_UNMASK_IRQS = BIT(25), - IDE_HFLAG_BROKEN_ALTSTATUS = BIT(26), + IDE_HFLAG_UNMASK_IRQS = (1 << 25), + IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26), /* serialize ports if DMA is possible (for sl82c105) */ - IDE_HFLAG_SERIALIZE_DMA = BIT(27), + IDE_HFLAG_SERIALIZE_DMA = (1 << 27), /* force host out of "simplex" mode */ - IDE_HFLAG_CLEAR_SIMPLEX = BIT(28), + IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28), /* DSC overlap is unsupported */ - IDE_HFLAG_NO_DSC = BIT(29), + IDE_HFLAG_NO_DSC = (1 << 29), /* never use 32-bit I/O ops */ - IDE_HFLAG_NO_IO_32BIT = BIT(30), + IDE_HFLAG_NO_IO_32BIT = (1 << 30), /* never unmask IRQs */ - IDE_HFLAG_NO_UNMASK_IRQS = BIT(31), + IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31), }; #ifdef CONFIG_BLK_DEV_OFFBOARD @@ -1524,6 +1477,8 @@ static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data) hwif->hwif_data = data; } +extern void ide_toggle_bounce(ide_drive_t *drive, int on); + u64 ide_get_lba_addr(struct ide_cmd *, int); u8 ide_dump_status(ide_drive_t *, const char *, u8); @@ -1540,16 +1495,16 @@ struct ide_timing { }; enum { - IDE_TIMING_SETUP = BIT(0), - IDE_TIMING_ACT8B = BIT(1), - IDE_TIMING_REC8B = BIT(2), - IDE_TIMING_CYC8B = BIT(3), + IDE_TIMING_SETUP = (1 << 0), + IDE_TIMING_ACT8B = (1 << 1), + IDE_TIMING_REC8B = (1 << 2), + IDE_TIMING_CYC8B = (1 << 3), IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B | IDE_TIMING_CYC8B, - IDE_TIMING_ACTIVE = BIT(4), - IDE_TIMING_RECOVER = BIT(5), - IDE_TIMING_CYCLE = BIT(6), - IDE_TIMING_UDMA = BIT(7), + IDE_TIMING_ACTIVE = (1 << 4), + IDE_TIMING_RECOVER = (1 << 5), + IDE_TIMING_CYCLE = (1 << 6), + IDE_TIMING_UDMA = (1 << 7), IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT | IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER | IDE_TIMING_CYCLE | IDE_TIMING_UDMA, diff --git a/include/linux/idr.h b/include/linux/idr.h index a0dce14090..083d61e927 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -1,9 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/idr.h * * 2002-10-18 written by Jim Houston jim.houston@ccur.com * Copyright (C) 2002 by Concurrent Computer Corporation + * Distributed under the GNU GPL license version 2. * * Small id to pointer translation service avoiding fixed sized * tables. @@ -12,75 +12,49 @@ #ifndef __IDR_H__ #define __IDR_H__ -#include -#include -#include - -struct idr { - struct radix_tree_root idr_rt; - unsigned int idr_base; - unsigned int idr_next; -}; +#include +#include +#include +#include /* - * The IDR API does not expose the tagging functionality of the radix tree - * to users. Use tag 0 to track whether a node has free space below it. + * We want shallower trees and thus more bits covered at each layer. 8 + * bits gives us large enough first layer for most use cases and maximum + * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and + * 1k on 32bit. */ -#define IDR_FREE 0 +#define IDR_BITS 8 +#define IDR_SIZE (1 << IDR_BITS) +#define IDR_MASK ((1 << IDR_BITS)-1) -/* Set the IDR flag and the IDR_FREE tag */ -#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \ - (1 << (ROOT_TAG_SHIFT + IDR_FREE))) +struct idr_layer { + int prefix; /* the ID prefix of this idr_layer */ + int layer; /* distance from leaf */ + struct idr_layer __rcu *ary[1<idr_next); -} - -/** - * idr_set_cursor - Set the current position of the cyclic allocator - * @idr: idr handle - * @val: new position - * - * The next call to idr_alloc_cyclic() will return @val if it is free - * (otherwise the search will start from this position). - */ -static inline void idr_set_cursor(struct idr *idr, unsigned int val) -{ - WRITE_ONCE(idr->idr_next, val); -} - /** * DOC: idr sync * idr synchronization (stolen from radix-tree.h) @@ -98,70 +72,22 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val) * period). */ -#define idr_lock(idr) xa_lock(&(idr)->idr_rt) -#define idr_unlock(idr) xa_unlock(&(idr)->idr_rt) -#define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt) -#define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt) -#define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt) -#define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt) -#define idr_lock_irqsave(idr, flags) \ - xa_lock_irqsave(&(idr)->idr_rt, flags) -#define idr_unlock_irqrestore(idr, flags) \ - xa_unlock_irqrestore(&(idr)->idr_rt, flags) +/* + * This is what we export. + */ +void *idr_find_slowpath(struct idr *idp, int id); void idr_preload(gfp_t gfp_mask); - -int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t); -int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id, - unsigned long max, gfp_t); -int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t); -void *idr_remove(struct idr *, unsigned long id); -void *idr_find(const struct idr *, unsigned long id); -int idr_for_each(const struct idr *, +int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); +int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); +int idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data); -void *idr_get_next(struct idr *, int *nextid); -void *idr_get_next_ul(struct idr *, unsigned long *nextid); -void *idr_replace(struct idr *, void *, unsigned long id); -void idr_destroy(struct idr *); - -/** - * idr_init_base() - Initialise an IDR. - * @idr: IDR handle. - * @base: The base value for the IDR. - * - * This variation of idr_init() creates an IDR which will allocate IDs - * starting at %base. - */ -static inline void idr_init_base(struct idr *idr, int base) -{ - INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); - idr->idr_base = base; - idr->idr_next = 0; -} - -/** - * idr_init() - Initialise an IDR. - * @idr: IDR handle. - * - * Initialise a dynamically allocated IDR. To initialise a - * statically allocated IDR, use DEFINE_IDR(). - */ -static inline void idr_init(struct idr *idr) -{ - idr_init_base(idr, 0); -} - -/** - * idr_is_empty() - Are there any IDs allocated? - * @idr: IDR handle. - * - * Return: %true if any IDs have been allocated from this IDR. - */ -static inline bool idr_is_empty(const struct idr *idr) -{ - return radix_tree_empty(&idr->idr_rt) && - radix_tree_tagged(&idr->idr_rt, IDR_FREE); -} +void *idr_get_next(struct idr *idp, int *nextid); +void *idr_replace(struct idr *idp, void *ptr, int id); +void idr_remove(struct idr *idp, int id); +void idr_destroy(struct idr *idp); +void idr_init(struct idr *idp); +bool idr_is_empty(struct idr *idp); /** * idr_preload_end - end preload section started with idr_preload() @@ -171,159 +97,104 @@ static inline bool idr_is_empty(const struct idr *idr) */ static inline void idr_preload_end(void) { - local_unlock(&radix_tree_preloads.lock); + preempt_enable(); } /** - * idr_for_each_entry() - Iterate over an IDR's elements of a given type. - * @idr: IDR handle. - * @entry: The type * to use as cursor - * @id: Entry ID. + * idr_find - return pointer for given id + * @idr: idr handle + * @id: lookup key * - * @entry and @id do not need to be initialized before the loop, and - * after normal termination @entry is left with the value NULL. This - * is convenient for a "not found" value. + * Return the pointer given the id it has been registered with. A %NULL + * return indicates that @id is not valid or you passed %NULL in + * idr_get_new(). + * + * This function can be called under rcu_read_lock(), given that the leaf + * pointers lifetimes are correctly managed. */ -#define idr_for_each_entry(idr, entry, id) \ - for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U) +static inline void *idr_find(struct idr *idr, int id) +{ + struct idr_layer *hint = rcu_dereference_raw(idr->hint); + + if (hint && (id & ~IDR_MASK) == hint->prefix) + return rcu_dereference_raw(hint->ary[id & IDR_MASK]); + + return idr_find_slowpath(idr, id); +} /** - * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. - * @idr: IDR handle. - * @entry: The type * to use as cursor. - * @tmp: A temporary placeholder for ID. - * @id: Entry ID. + * idr_for_each_entry - iterate over an idr's elements of a given type + * @idp: idr handle + * @entry: the type * to use as cursor + * @id: id entry's key * * @entry and @id do not need to be initialized before the loop, and - * after normal termination @entry is left with the value NULL. This + * after normal terminatinon @entry is left with the value NULL. This * is convenient for a "not found" value. */ -#define idr_for_each_entry_ul(idr, entry, tmp, id) \ - for (tmp = 0, id = 0; \ - tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ - tmp = id, ++id) +#define idr_for_each_entry(idp, entry, id) \ + for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) /** - * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type - * @idr: IDR handle. - * @entry: The type * to use as a cursor. - * @id: Entry ID. + * idr_for_each_entry - continue iteration over an idr's elements of a given type + * @idp: idr handle + * @entry: the type * to use as cursor + * @id: id entry's key * - * Continue to iterate over entries, continuing after the current position. + * Continue to iterate over list of given type, continuing after + * the current position. */ -#define idr_for_each_entry_continue(idr, entry, id) \ - for ((entry) = idr_get_next((idr), &(id)); \ +#define idr_for_each_entry_continue(idp, entry, id) \ + for ((entry) = idr_get_next((idp), &(id)); \ entry; \ - ++id, (entry) = idr_get_next((idr), &(id))) - -/** - * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type - * @idr: IDR handle. - * @entry: The type * to use as a cursor. - * @tmp: A temporary placeholder for ID. - * @id: Entry ID. - * - * Continue to iterate over entries, continuing after the current position. - */ -#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \ - for (tmp = id; \ - tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ - tmp = id, ++id) + ++id, (entry) = idr_get_next((idp), &(id))) /* - * IDA - ID Allocator, use when translation from id to pointer isn't necessary. + * IDA - IDR based id allocator, use when translation from id to + * pointer isn't necessary. + * + * IDA_BITMAP_LONGS is calculated to be one less to accommodate + * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes. */ #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ -#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) +#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) struct ida_bitmap { + long nr_busy; unsigned long bitmap[IDA_BITMAP_LONGS]; }; struct ida { - struct xarray xa; + struct idr idr; + struct ida_bitmap *free_bitmap; }; -#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC) - -#define IDA_INIT(name) { \ - .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \ -} +#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) -int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t); -void ida_free(struct ida *, unsigned int id); +int ida_pre_get(struct ida *ida, gfp_t gfp_mask); +int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); +void ida_remove(struct ida *ida, int id); void ida_destroy(struct ida *ida); +void ida_init(struct ida *ida); + +int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, + gfp_t gfp_mask); +void ida_simple_remove(struct ida *ida, unsigned int id); /** - * ida_alloc() - Allocate an unused ID. - * @ida: IDA handle. - * @gfp: Memory allocation flags. + * ida_get_new - allocate new ID + * @ida: idr handle + * @p_id: pointer to the allocated handle * - * Allocate an ID between 0 and %INT_MAX, inclusive. - * - * Context: Any context. It is safe to call this function without - * locking in your code. - * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, - * or %-ENOSPC if there are no free IDs. + * Simple wrapper around ida_get_new_above() w/ @starting_id of zero. */ -static inline int ida_alloc(struct ida *ida, gfp_t gfp) +static inline int ida_get_new(struct ida *ida, int *p_id) { - return ida_alloc_range(ida, 0, ~0, gfp); + return ida_get_new_above(ida, 0, p_id); } -/** - * ida_alloc_min() - Allocate an unused ID. - * @ida: IDA handle. - * @min: Lowest ID to allocate. - * @gfp: Memory allocation flags. - * - * Allocate an ID between @min and %INT_MAX, inclusive. - * - * Context: Any context. It is safe to call this function without - * locking in your code. - * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, - * or %-ENOSPC if there are no free IDs. - */ -static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) -{ - return ida_alloc_range(ida, min, ~0, gfp); -} +void __init idr_init_cache(void); -/** - * ida_alloc_max() - Allocate an unused ID. - * @ida: IDA handle. - * @max: Highest ID to allocate. - * @gfp: Memory allocation flags. - * - * Allocate an ID between 0 and @max, inclusive. - * - * Context: Any context. It is safe to call this function without - * locking in your code. - * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, - * or %-ENOSPC if there are no free IDs. - */ -static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) -{ - return ida_alloc_range(ida, 0, max, gfp); -} - -static inline void ida_init(struct ida *ida) -{ - xa_init_flags(&ida->xa, IDA_INIT_FLAGS); -} - -/* - * ida_simple_get() and ida_simple_remove() are deprecated. Use - * ida_alloc() and ida_free() instead respectively. - */ -#define ida_simple_get(ida, start, end, gfp) \ - ida_alloc_range(ida, start, (end) - 1, gfp) -#define ida_simple_remove(ida, id) ida_free(ida, id) - -static inline bool ida_is_empty(const struct ida *ida) -{ - return xa_empty(&ida->xa); -} #endif /* __IDR_H__ */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 6942645031..a80516fd65 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * IEEE 802.11 defines * @@ -8,8 +7,11 @@ * Copyright (c) 2005, Devicescape Software, Inc. * Copyright (c) 2006, Michael Wu * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (c) 2018 - 2021 Intel Corporation + * Copyright (c) 2016 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef LINUX_IEEE80211_H @@ -105,54 +107,6 @@ /* extension, added by 802.11ad */ #define IEEE80211_STYPE_DMG_BEACON 0x0000 -#define IEEE80211_STYPE_S1G_BEACON 0x0010 - -/* bits unique to S1G beacon */ -#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100 - -/* see 802.11ah-2016 9.9 NDP CMAC frames */ -#define IEEE80211_S1G_1MHZ_NDP_BITS 25 -#define IEEE80211_S1G_1MHZ_NDP_BYTES 4 -#define IEEE80211_S1G_2MHZ_NDP_BITS 37 -#define IEEE80211_S1G_2MHZ_NDP_BYTES 5 - -#define IEEE80211_NDP_FTYPE_CTS 0 -#define IEEE80211_NDP_FTYPE_CF_END 0 -#define IEEE80211_NDP_FTYPE_PS_POLL 1 -#define IEEE80211_NDP_FTYPE_ACK 2 -#define IEEE80211_NDP_FTYPE_PS_POLL_ACK 3 -#define IEEE80211_NDP_FTYPE_BA 4 -#define IEEE80211_NDP_FTYPE_BF_REPORT_POLL 5 -#define IEEE80211_NDP_FTYPE_PAGING 6 -#define IEEE80211_NDP_FTYPE_PREQ 7 - -#define SM64(f, v) ((((u64)v) << f##_S) & f) - -/* NDP CMAC frame fields */ -#define IEEE80211_NDP_FTYPE 0x0000000000000007 -#define IEEE80211_NDP_FTYPE_S 0x0000000000000000 - -/* 1M Probe Request 11ah 9.9.3.1.1 */ -#define IEEE80211_NDP_1M_PREQ_ANO 0x0000000000000008 -#define IEEE80211_NDP_1M_PREQ_ANO_S 3 -#define IEEE80211_NDP_1M_PREQ_CSSID 0x00000000000FFFF0 -#define IEEE80211_NDP_1M_PREQ_CSSID_S 4 -#define IEEE80211_NDP_1M_PREQ_RTYPE 0x0000000000100000 -#define IEEE80211_NDP_1M_PREQ_RTYPE_S 20 -#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000 -#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000 -/* 2M Probe Request 11ah 9.9.3.1.2 */ -#define IEEE80211_NDP_2M_PREQ_ANO 0x0000000000000008 -#define IEEE80211_NDP_2M_PREQ_ANO_S 3 -#define IEEE80211_NDP_2M_PREQ_CSSID 0x0000000FFFFFFFF0 -#define IEEE80211_NDP_2M_PREQ_CSSID_S 4 -#define IEEE80211_NDP_2M_PREQ_RTYPE 0x0000001000000000 -#define IEEE80211_NDP_2M_PREQ_RTYPE_S 36 - -#define IEEE80211_ANO_NETTYPE_WILD 15 - -/* bits unique to S1G beacon */ -#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100 /* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */ #define IEEE80211_CTL_EXT_POLL 0x2000 @@ -169,21 +123,6 @@ #define IEEE80211_MAX_SN IEEE80211_SN_MASK #define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1) - -/* PV1 Layout 11ah 9.8.3.1 */ -#define IEEE80211_PV1_FCTL_VERS 0x0003 -#define IEEE80211_PV1_FCTL_FTYPE 0x001c -#define IEEE80211_PV1_FCTL_STYPE 0x00e0 -#define IEEE80211_PV1_FCTL_TODS 0x0100 -#define IEEE80211_PV1_FCTL_MOREFRAGS 0x0200 -#define IEEE80211_PV1_FCTL_PM 0x0400 -#define IEEE80211_PV1_FCTL_MOREDATA 0x0800 -#define IEEE80211_PV1_FCTL_PROTECTED 0x1000 -#define IEEE80211_PV1_FCTL_END_SP 0x2000 -#define IEEE80211_PV1_FCTL_RELAYED 0x4000 -#define IEEE80211_PV1_FCTL_ACK_POLICY 0x8000 -#define IEEE80211_PV1_FCTL_CTL_EXT 0x0f00 - static inline bool ieee80211_sn_less(u16 sn1, u16 sn2) { return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1); @@ -211,7 +150,6 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) #define IEEE80211_MAX_FRAG_THRESHOLD 2352 #define IEEE80211_MAX_RTS_THRESHOLD 2353 #define IEEE80211_MAX_AID 2007 -#define IEEE80211_MAX_AID_S1G 8191 #define IEEE80211_MAX_TIM_LEN 251 #define IEEE80211_MAX_MESH_PEERINGS 63 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section @@ -247,8 +185,6 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) /* number of user priorities 802.11 uses */ #define IEEE80211_NUM_UPS 8 -/* number of ACs */ -#define IEEE80211_NUM_ACS 4 #define IEEE80211_QOS_CTL_LEN 2 /* 1d tag mask */ @@ -435,17 +371,6 @@ static inline bool ieee80211_is_data(__le16 fc) cpu_to_le16(IEEE80211_FTYPE_DATA); } -/** - * ieee80211_is_ext - check if type is IEEE80211_FTYPE_EXT - * @fc: frame control bytes in little-endian byteorder - */ -static inline bool ieee80211_is_ext(__le16 fc) -{ - return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == - cpu_to_le16(IEEE80211_FTYPE_EXT); -} - - /** * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set * @fc: frame control bytes in little-endian byteorder @@ -544,40 +469,6 @@ static inline bool ieee80211_is_beacon(__le16 fc) cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); } -/** - * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT && - * IEEE80211_STYPE_S1G_BEACON - * @fc: frame control bytes in little-endian byteorder - */ -static inline bool ieee80211_is_s1g_beacon(__le16 fc) -{ - return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | - IEEE80211_FCTL_STYPE)) == - cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON); -} - -/** - * ieee80211_next_tbtt_present - check if IEEE80211_FTYPE_EXT && - * IEEE80211_STYPE_S1G_BEACON && IEEE80211_S1G_BCN_NEXT_TBTT - * @fc: frame control bytes in little-endian byteorder - */ -static inline bool ieee80211_next_tbtt_present(__le16 fc) -{ - return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == - cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON) && - fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT); -} - -/** - * ieee80211_is_s1g_short_beacon - check if next tbtt present bit is set. Only - * true for S1G beacons when they're short. - * @fc: frame control bytes in little-endian byteorder - */ -static inline bool ieee80211_is_s1g_short_beacon(__le16 fc) -{ - return ieee80211_is_s1g_beacon(fc) && ieee80211_next_tbtt_present(fc); -} - /** * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM * @fc: frame control bytes in little-endian byteorder @@ -728,15 +619,6 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc) cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); } -/** - * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame - * @fc: frame control bytes in little-endian byteorder - */ -static inline bool ieee80211_is_any_nullfunc(__le16 fc) -{ - return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)); -} - /** * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU * @fc: frame control field in little-endian byteorder @@ -825,7 +707,7 @@ struct ieee80211_msrment_ie { u8 token; u8 mode; u8 type; - u8 request[]; + u8 request[0]; } __packed; /** @@ -927,8 +809,6 @@ enum mesh_config_capab_flags { IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40, }; -#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1 - /** * mesh channel switch parameters element's flag indicator * @@ -968,7 +848,6 @@ enum ieee80211_ht_chanwidth_values { * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width - * @IEEE80211_OPMODE_NOTIF_BW_160_80P80: 160 / 80+80 MHz indicator flag * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask * (the NSS value is the value of this field + 1) * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift @@ -976,36 +855,16 @@ enum ieee80211_ht_chanwidth_values { * using a beamforming steering matrix */ enum ieee80211_vht_opmode_bits { - IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 0x03, + IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 3, IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0, IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1, IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2, IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3, - IEEE80211_OPMODE_NOTIF_BW_160_80P80 = 0x04, IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70, IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4, IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80, }; -/** - * enum ieee80211_s1g_chanwidth - * These are defined in IEEE802.11-2016ah Table 10-20 - * as BSS Channel Width - * - * @IEEE80211_S1G_CHANWIDTH_1MHZ: 1MHz operating channel - * @IEEE80211_S1G_CHANWIDTH_2MHZ: 2MHz operating channel - * @IEEE80211_S1G_CHANWIDTH_4MHZ: 4MHz operating channel - * @IEEE80211_S1G_CHANWIDTH_8MHZ: 8MHz operating channel - * @IEEE80211_S1G_CHANWIDTH_16MHZ: 16MHz operating channel - */ -enum ieee80211_s1g_chanwidth { - IEEE80211_S1G_CHANWIDTH_1MHZ = 0, - IEEE80211_S1G_CHANWIDTH_2MHZ = 1, - IEEE80211_S1G_CHANWIDTH_4MHZ = 3, - IEEE80211_S1G_CHANWIDTH_8MHZ = 7, - IEEE80211_S1G_CHANWIDTH_16MHZ = 15, -}; - #define WLAN_SA_QUERY_TR_ID_LEN 2 #define WLAN_MEMBERSHIP_LEN 8 #define WLAN_USER_POSITION_LEN 16 @@ -1020,116 +879,6 @@ struct ieee80211_tpc_report_ie { u8 link_margin; } __packed; -#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK GENMASK(2, 1) -#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT 1 -#define IEEE80211_ADDBA_EXT_NO_FRAG BIT(0) - -struct ieee80211_addba_ext_ie { - u8 data; -} __packed; - -/** - * struct ieee80211_s1g_bcn_compat_ie - * - * S1G Beacon Compatibility element - */ -struct ieee80211_s1g_bcn_compat_ie { - __le16 compat_info; - __le16 beacon_int; - __le32 tsf_completion; -} __packed; - -/** - * struct ieee80211_s1g_oper_ie - * - * S1G Operation element - */ -struct ieee80211_s1g_oper_ie { - u8 ch_width; - u8 oper_class; - u8 primary_ch; - u8 oper_ch; - __le16 basic_mcs_nss; -} __packed; - -/** - * struct ieee80211_aid_response_ie - * - * AID Response element - */ -struct ieee80211_aid_response_ie { - __le16 aid; - u8 switch_count; - __le16 response_int; -} __packed; - -struct ieee80211_s1g_cap { - u8 capab_info[10]; - u8 supp_mcs_nss[5]; -} __packed; - -struct ieee80211_ext { - __le16 frame_control; - __le16 duration; - union { - struct { - u8 sa[ETH_ALEN]; - __le32 timestamp; - u8 change_seq; - u8 variable[0]; - } __packed s1g_beacon; - struct { - u8 sa[ETH_ALEN]; - __le32 timestamp; - u8 change_seq; - u8 next_tbtt[3]; - u8 variable[0]; - } __packed s1g_short_beacon; - } u; -} __packed __aligned(2); - -#define IEEE80211_TWT_CONTROL_NDP BIT(0) -#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1) -#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3) -#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4) -#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5) - -#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0) -#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1) -#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4) -#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5) -#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6) -#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7) -#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10) -#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15) - -enum ieee80211_twt_setup_cmd { - TWT_SETUP_CMD_REQUEST, - TWT_SETUP_CMD_SUGGEST, - TWT_SETUP_CMD_DEMAND, - TWT_SETUP_CMD_GROUPING, - TWT_SETUP_CMD_ACCEPT, - TWT_SETUP_CMD_ALTERNATE, - TWT_SETUP_CMD_DICTATE, - TWT_SETUP_CMD_REJECT, -}; - -struct ieee80211_twt_params { - __le16 req_type; - __le64 twt; - u8 min_twt_dur; - __le16 mantissa; - u8 channel; -} __packed; - -struct ieee80211_twt_setup { - u8 dialog_token; - u8 element_id; - u8 length; - u8 control; - u8 params[]; -} __packed; - struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -1161,11 +910,6 @@ struct ieee80211_mgmt { /* followed by Supported rates */ u8 variable[0]; } __packed assoc_resp, reassoc_resp; - struct { - __le16 capab_info; - __le16 status_code; - u8 variable[0]; - } __packed s1g_assoc_resp, s1g_reassoc_resp; struct { __le16 capab_info; __le16 listen_interval; @@ -1227,8 +971,6 @@ struct ieee80211_mgmt { __le16 capab; __le16 timeout; __le16 start_seq_num; - /* followed by BA Extension */ - u8 variable[0]; } __packed addba_req; struct{ u8 action_code; @@ -1294,20 +1036,13 @@ struct ieee80211_mgmt { __le16 toa_error; u8 variable[0]; } __packed ftm; - struct { - u8 action_code; - u8 variable[]; - } __packed s1g; } u; } __packed action; } u; } __packed __aligned(2); -/* Supported rates membership selectors */ +/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */ #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 -#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 -#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122 -#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123 /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) @@ -1673,8 +1408,6 @@ struct ieee80211_ht_operation { #define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3 #define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004 #define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010 -#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5 -#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0 /* for stbc_param */ #define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040 @@ -1694,13 +1427,11 @@ struct ieee80211_ht_operation { #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 /* - * A-MPDU buffer sizes - * According to HT size varies from 8 to 64 frames - * HE adds the ability to have up to 256 frames. + * A-PMDU buffer sizes + * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */ -#define IEEE80211_MIN_AMPDU_BUF 0x8 -#define IEEE80211_MAX_AMPDU_BUF_HT 0x40 -#define IEEE80211_MAX_AMPDU_BUF 0x100 +#define IEEE80211_MIN_AMPDU_BUF 0x8 +#define IEEE80211_MAX_AMPDU_BUF 0x40 /* Spatial Multiplexing Power Save Modes (for capability) */ @@ -1721,16 +1452,13 @@ struct ieee80211_ht_operation { * STA can receive. Rate expressed in units of 1 Mbps. * If this field is 0 this value should not be used to * consider the highest RX data rate supported. - * The top 3 bits of this field indicate the Maximum NSTS,total - * (a beamformee capability.) + * The top 3 bits of this field are reserved. * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams * @tx_highest: Indicates highest long GI VHT PPDU data rate * STA can transmit. Rate expressed in units of 1 Mbps. * If this field is 0 this value should not be used to * consider the highest TX data rate supported. - * The top 2 bits of this field are reserved, the - * 3rd bit from the top indiciates VHT Extended NSS BW - * Capability. + * The top 3 bits of this field are reserved. */ struct ieee80211_vht_mcs_info { __le16 rx_mcs_map; @@ -1739,13 +1467,6 @@ struct ieee80211_vht_mcs_info { __le16 tx_highest; } __packed; -/* for rx_highest */ -#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13 -#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT) - -/* for tx_highest */ -#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13) - /** * enum ieee80211_vht_mcs_support - VHT MCS support definitions * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the @@ -1801,129 +1522,17 @@ enum ieee80211_vht_chanwidth { * This structure is the "VHT operation element" as * described in 802.11ac D3.0 8.4.2.161 * @chan_width: Operating channel width - * @center_freq_seg0_idx: center freq segment 0 index * @center_freq_seg1_idx: center freq segment 1 index + * @center_freq_seg2_idx: center freq segment 2 index * @basic_mcs_set: VHT Basic MCS rate set */ struct ieee80211_vht_operation { u8 chan_width; - u8 center_freq_seg0_idx; u8 center_freq_seg1_idx; + u8 center_freq_seg2_idx; __le16 basic_mcs_set; } __packed; -/** - * struct ieee80211_he_cap_elem - HE capabilities element - * - * This structure is the "HE capabilities element" fixed fields as - * described in P802.11ax_D4.0 section 9.4.2.242.2 and 9.4.2.242.3 - */ -struct ieee80211_he_cap_elem { - u8 mac_cap_info[6]; - u8 phy_cap_info[11]; -} __packed; - -#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5 - -/** - * enum ieee80211_he_mcs_support - HE MCS support definitions - * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the - * number of streams - * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported - * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported - * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported - * - * These definitions are used in each 2-bit subfield of the rx_mcs_* - * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are - * both split into 8 subfields by number of streams. These values indicate - * which MCSes are supported for the number of streams the value appears - * for. - */ -enum ieee80211_he_mcs_support { - IEEE80211_HE_MCS_SUPPORT_0_7 = 0, - IEEE80211_HE_MCS_SUPPORT_0_9 = 1, - IEEE80211_HE_MCS_SUPPORT_0_11 = 2, - IEEE80211_HE_MCS_NOT_SUPPORTED = 3, -}; - -/** - * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field - * - * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field - * described in P802.11ax_D2.0 section 9.4.2.237.4 - * - * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel - * widths less than 80MHz. - * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel - * widths less than 80MHz. - * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel - * width 160MHz. - * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel - * width 160MHz. - * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for - * channel width 80p80MHz. - * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for - * channel width 80p80MHz. - */ -struct ieee80211_he_mcs_nss_supp { - __le16 rx_mcs_80; - __le16 tx_mcs_80; - __le16 rx_mcs_160; - __le16 tx_mcs_160; - __le16 rx_mcs_80p80; - __le16 tx_mcs_80p80; -} __packed; - -/** - * struct ieee80211_he_operation - HE capabilities element - * - * This structure is the "HE operation element" fields as - * described in P802.11ax_D4.0 section 9.4.2.243 - */ -struct ieee80211_he_operation { - __le32 he_oper_params; - __le16 he_mcs_nss_set; - /* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */ - u8 optional[]; -} __packed; - -/** - * struct ieee80211_he_spr - HE spatial reuse element - * - * This structure is the "HE spatial reuse element" element as - * described in P802.11ax_D4.0 section 9.4.2.241 - */ -struct ieee80211_he_spr { - u8 he_sr_control; - /* Optional 0 to 19 bytes: depends on @he_sr_control */ - u8 optional[]; -} __packed; - -/** - * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field - * - * This structure is the "MU AC Parameter Record" fields as - * described in P802.11ax_D4.0 section 9.4.2.245 - */ -struct ieee80211_he_mu_edca_param_ac_rec { - u8 aifsn; - u8 ecw_min_max; - u8 mu_edca_timer; -} __packed; - -/** - * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element - * - * This structure is the "MU EDCA Parameter Set element" fields as - * described in P802.11ax_D4.0 section 9.4.2.245 - */ -struct ieee80211_mu_edca_param_set { - u8 mu_qos_info; - struct ieee80211_he_mu_edca_param_ac_rec ac_be; - struct ieee80211_he_mu_edca_param_ac_rec ac_bk; - struct ieee80211_he_mu_edca_param_ac_rec ac_vi; - struct ieee80211_he_mu_edca_param_ac_rec ac_vo; -} __packed; /* 802.11ac VHT Capabilities */ #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 @@ -1933,7 +1542,6 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C -#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2 #define IEEE80211_VHT_CAP_RXLDPC 0x00000010 #define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020 #define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040 @@ -1943,7 +1551,6 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300 #define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400 #define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700 -#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8 #define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800 #define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000 #define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13 @@ -1963,617 +1570,12 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000 #define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000 #define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000 -#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30 -#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000 - -/** - * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS - * @cap: VHT capabilities of the peer - * @bw: bandwidth to use - * @mcs: MCS index to use - * @ext_nss_bw_capable: indicates whether or not the local transmitter - * (rate scaling algorithm) can deal with the new logic - * (dot11VHTExtendedNSSBWCapable) - * @max_vht_nss: current maximum NSS as advertised by the STA in - * operating mode notification, can be 0 in which case the - * capability data will be used to derive this (from MCS support) - * - * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can - * vary for a given BW/MCS. This function parses the data. - * - * Note: This function is exported by cfg80211. - */ -int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, - enum ieee80211_vht_chanwidth bw, - int mcs, bool ext_nss_bw_capable, - unsigned int max_vht_nss); - -/* 802.11ax HE MAC capabilities */ -#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 -#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02 -#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04 -#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00 -#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08 -#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10 -#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18 -#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18 -#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00 -#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20 -#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40 -#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60 -#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80 -#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0 -#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0 -#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0 -#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0 - -#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00 -#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01 -#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02 -#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03 -#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03 -#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00 -#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04 -#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08 -#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70 - -/* Link adaptation is split between byte HE_MAC_CAP1 and - * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE - * in which case the following values apply: - * 0 = No feedback. - * 1 = reserved. - * 2 = Unsolicited feedback. - * 3 = both - */ -#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80 - -#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01 -#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02 -#define IEEE80211_HE_MAC_CAP2_TRS 0x04 -#define IEEE80211_HE_MAC_CAP2_BSR 0x08 -#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10 -#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20 -#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40 -#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80 - -#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02 -#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04 - -/* The maximum length of an A-MDPU is defined by the combination of the Maximum - * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the - * same field in the HE capabilities. - */ -#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0 0x00 -#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1 0x08 -#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2 0x10 -#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3 0x18 -#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18 -#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20 -#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40 -#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80 - -#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01 -#define IEEE80211_HE_MAC_CAP4_QTP 0x02 -#define IEEE80211_HE_MAC_CAP4_BQR 0x04 -#define IEEE80211_HE_MAC_CAP4_PSR_RESP 0x08 -#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10 -#define IEEE80211_HE_MAC_CAP4_OPS 0x20 -#define IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU 0x40 -/* Multi TID agg TX is split between byte #4 and #5 - * The value is a combination of B39,B40,B41 - */ -#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80 - -#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01 -#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02 -#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION 0x04 -#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08 -#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10 -#define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20 -#define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40 -#define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80 - -#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20 -#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16 - -/* 802.11ax HE PHY capabilities */ -#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 -#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 -#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08 -#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10 -#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20 -#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40 -#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe - -#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01 -#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02 -#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04 -#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08 -#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f -#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10 -#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20 -#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40 -/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */ -#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80 - -#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01 -#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02 -#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04 -#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08 -#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10 -#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20 - -/* Note that the meaning of UL MU below is different between an AP and a non-AP - * sta, where in the AP case it indicates support for Rx and in the non-AP sta - * case it indicates support for Tx. - */ -#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40 -#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80 - -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00 -#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20 -#define IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU 0x40 -#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80 - -#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01 -#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02 - -/* Minimal allowed value of Max STS under 80MHz is 3 */ -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10 -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14 -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18 -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c - -/* Minimal allowed value of Max STS above 80MHz is 3 */ -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60 -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80 -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0 -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0 -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0 -#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0 - -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07 - -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38 -#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38 - -#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40 -#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80 - -#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01 -#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02 -#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB 0x04 -#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB 0x08 -#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10 -#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20 -#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40 -#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80 - -#define IEEE80211_HE_PHY_CAP7_PSR_BASED_SR 0x01 -#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP 0x02 -#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04 -#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08 -#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10 -#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18 -#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20 -#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28 -#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30 -#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38 -#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38 -#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40 -#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80 - -#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01 -#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02 -#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04 -#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08 -#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10 -#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20 -#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242 0x00 -#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484 0x40 -#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996 0x80 -#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996 0xc0 -#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK 0xc0 - -#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01 -#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02 -#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04 -#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08 -#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10 -#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20 -#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US 0x00 -#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US 0x40 -#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US 0x80 -#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED 0xc0 -#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK 0xc0 - -#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF 0x01 - -/* 802.11ax HE TX/RX MCS NSS Support */ -#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) -#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6) -#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11) -#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0 -#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800 - -/* TX/RX HE MCS Support field Highest MCS subfield encoding */ -enum ieee80211_he_highest_mcs_supported_subfield_enc { - HIGHEST_MCS_SUPPORTED_MCS7 = 0, - HIGHEST_MCS_SUPPORTED_MCS8, - HIGHEST_MCS_SUPPORTED_MCS9, - HIGHEST_MCS_SUPPORTED_MCS10, - HIGHEST_MCS_SUPPORTED_MCS11, -}; - -/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */ -static inline u8 -ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap) -{ - u8 count = 4; - - if (he_cap->phy_cap_info[0] & - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) - count += 4; - - if (he_cap->phy_cap_info[0] & - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) - count += 4; - - return count; -} - -/* 802.11ax HE PPE Thresholds */ -#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1) -#define IEEE80211_PPE_THRES_NSS_POS (0) -#define IEEE80211_PPE_THRES_NSS_MASK (7) -#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \ - (BIT(5) | BIT(6)) -#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78 -#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3) -#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3) - -/* - * Calculate 802.11ax HE capabilities IE PPE field size - * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8* - */ -static inline u8 -ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) -{ - u8 n; - - if ((phy_cap_info[6] & - IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0) - return 0; - - n = hweight8(ppe_thres_hdr & - IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); - n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >> - IEEE80211_PPE_THRES_NSS_POS)); - - /* - * Each pair is 6 bits, and we need to add the 7 "header" bits to the - * total size. - */ - n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; - n = DIV_ROUND_UP(n, 8); - - return n; -} - -/* HE Operation defines */ -#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007 -#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 -#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 -#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4 -#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000 -#define IEEE80211_HE_OPERATION_CO_HOSTED_BSS 0x00008000 -#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000 -#define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000 -#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000 -#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24 -#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 -#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 - -#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 -#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 - -/** - * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field - * @primary: primary channel - * @control: control flags - * @ccfs0: channel center frequency segment 0 - * @ccfs1: channel center frequency segment 1 - * @minrate: minimum rate (in 1 Mbps units) - */ -struct ieee80211_he_6ghz_oper { - u8 primary; -#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH 0x3 -#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ 0 -#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ 1 -#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2 -#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3 -#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4 -#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38 - u8 control; - u8 ccfs0; - u8 ccfs1; - u8 minrate; -} __packed; - -/* - * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021", - * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation - * subfield encoding", and two category for each type in "Table E-12-Regulatory - * Info subfield encoding in the United States". - * So it it totally max 8 Transmit Power Envelope element. - */ -#define IEEE80211_TPE_MAX_IE_COUNT 8 -/* - * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield" - * of "IEEE Std 802.11axâ„¢â€2021", the max power level is 8. - */ -#define IEEE80211_MAX_NUM_PWR_LEVEL 8 - -#define IEEE80211_TPE_MAX_POWER_COUNT 8 - -/* transmit power interpretation type of transmit power envelope element */ -enum ieee80211_tx_power_intrpt_type { - IEEE80211_TPE_LOCAL_EIRP, - IEEE80211_TPE_LOCAL_EIRP_PSD, - IEEE80211_TPE_REG_CLIENT_EIRP, - IEEE80211_TPE_REG_CLIENT_EIRP_PSD, -}; - -/** - * struct ieee80211_tx_pwr_env - * - * This structure represents the "Transmit Power Envelope element" - */ -struct ieee80211_tx_pwr_env { - u8 tx_power_info; - s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT]; -} __packed; - -#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7 -#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38 -#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0 - -/* - * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size - * @he_oper_ie: byte data of the He Operations IE, stating from the byte - * after the ext ID byte. It is assumed that he_oper_ie has at least - * sizeof(struct ieee80211_he_operation) bytes, the caller must have - * validated this. - * @return the actual size of the IE data (not including header), or 0 on error - */ -static inline u8 -ieee80211_he_oper_size(const u8 *he_oper_ie) -{ - struct ieee80211_he_operation *he_oper = (void *)he_oper_ie; - u8 oper_len = sizeof(struct ieee80211_he_operation); - u32 he_oper_params; - - /* Make sure the input is not NULL */ - if (!he_oper_ie) - return 0; - - /* Calc required length */ - he_oper_params = le32_to_cpu(he_oper->he_oper_params); - if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) - oper_len += 3; - if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS) - oper_len++; - if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO) - oper_len += sizeof(struct ieee80211_he_6ghz_oper); - - /* Add the first byte (extension ID) to the total length */ - oper_len++; - - return oper_len; -} - -/** - * ieee80211_he_6ghz_oper - obtain 6 GHz operation field - * @he_oper: HE operation element (must be pre-validated for size) - * but may be %NULL - * - * Return: a pointer to the 6 GHz operation field, or %NULL - */ -static inline const struct ieee80211_he_6ghz_oper * -ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) -{ - const u8 *ret = (void *)&he_oper->optional; - u32 he_oper_params; - - if (!he_oper) - return NULL; - - he_oper_params = le32_to_cpu(he_oper->he_oper_params); - - if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)) - return NULL; - if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) - ret += 3; - if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS) - ret++; - - return (void *)ret; -} - -/* HE Spatial Reuse defines */ -#define IEEE80211_HE_SPR_PSR_DISALLOWED BIT(0) -#define IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED BIT(1) -#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT BIT(2) -#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT BIT(3) -#define IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED BIT(4) - -/* - * ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size - * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the byte - * after the ext ID byte. It is assumed that he_spr_ie has at least - * sizeof(struct ieee80211_he_spr) bytes, the caller must have validated - * this - * @return the actual size of the IE data (not including header), or 0 on error - */ -static inline u8 -ieee80211_he_spr_size(const u8 *he_spr_ie) -{ - struct ieee80211_he_spr *he_spr = (void *)he_spr_ie; - u8 spr_len = sizeof(struct ieee80211_he_spr); - u8 he_spr_params; - - /* Make sure the input is not NULL */ - if (!he_spr_ie) - return 0; - - /* Calc required length */ - he_spr_params = he_spr->he_sr_control; - if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) - spr_len++; - if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) - spr_len += 18; - - /* Add the first byte (extension ID) to the total length */ - spr_len++; - - return spr_len; -} - -/* S1G Capabilities Information field */ -#define IEEE80211_S1G_CAPABILITY_LEN 15 - -#define S1G_CAP0_S1G_LONG BIT(0) -#define S1G_CAP0_SGI_1MHZ BIT(1) -#define S1G_CAP0_SGI_2MHZ BIT(2) -#define S1G_CAP0_SGI_4MHZ BIT(3) -#define S1G_CAP0_SGI_8MHZ BIT(4) -#define S1G_CAP0_SGI_16MHZ BIT(5) -#define S1G_CAP0_SUPP_CH_WIDTH GENMASK(7, 6) - -#define S1G_SUPP_CH_WIDTH_2 0 -#define S1G_SUPP_CH_WIDTH_4 1 -#define S1G_SUPP_CH_WIDTH_8 2 -#define S1G_SUPP_CH_WIDTH_16 3 -#define S1G_SUPP_CH_WIDTH_MAX(cap) ((1 << FIELD_GET(S1G_CAP0_SUPP_CH_WIDTH, \ - cap[0])) << 1) - -#define S1G_CAP1_RX_LDPC BIT(0) -#define S1G_CAP1_TX_STBC BIT(1) -#define S1G_CAP1_RX_STBC BIT(2) -#define S1G_CAP1_SU_BFER BIT(3) -#define S1G_CAP1_SU_BFEE BIT(4) -#define S1G_CAP1_BFEE_STS GENMASK(7, 5) - -#define S1G_CAP2_SOUNDING_DIMENSIONS GENMASK(2, 0) -#define S1G_CAP2_MU_BFER BIT(3) -#define S1G_CAP2_MU_BFEE BIT(4) -#define S1G_CAP2_PLUS_HTC_VHT BIT(5) -#define S1G_CAP2_TRAVELING_PILOT GENMASK(7, 6) - -#define S1G_CAP3_RD_RESPONDER BIT(0) -#define S1G_CAP3_HT_DELAYED_BA BIT(1) -#define S1G_CAP3_MAX_MPDU_LEN BIT(2) -#define S1G_CAP3_MAX_AMPDU_LEN_EXP GENMASK(4, 3) -#define S1G_CAP3_MIN_MPDU_START GENMASK(7, 5) - -#define S1G_CAP4_UPLINK_SYNC BIT(0) -#define S1G_CAP4_DYNAMIC_AID BIT(1) -#define S1G_CAP4_BAT BIT(2) -#define S1G_CAP4_TIME_ADE BIT(3) -#define S1G_CAP4_NON_TIM BIT(4) -#define S1G_CAP4_GROUP_AID BIT(5) -#define S1G_CAP4_STA_TYPE GENMASK(7, 6) - -#define S1G_CAP5_CENT_AUTH_CONTROL BIT(0) -#define S1G_CAP5_DIST_AUTH_CONTROL BIT(1) -#define S1G_CAP5_AMSDU BIT(2) -#define S1G_CAP5_AMPDU BIT(3) -#define S1G_CAP5_ASYMMETRIC_BA BIT(4) -#define S1G_CAP5_FLOW_CONTROL BIT(5) -#define S1G_CAP5_SECTORIZED_BEAM GENMASK(7, 6) - -#define S1G_CAP6_OBSS_MITIGATION BIT(0) -#define S1G_CAP6_FRAGMENT_BA BIT(1) -#define S1G_CAP6_NDP_PS_POLL BIT(2) -#define S1G_CAP6_RAW_OPERATION BIT(3) -#define S1G_CAP6_PAGE_SLICING BIT(4) -#define S1G_CAP6_TXOP_SHARING_IMP_ACK BIT(5) -#define S1G_CAP6_VHT_LINK_ADAPT GENMASK(7, 6) - -#define S1G_CAP7_TACK_AS_PS_POLL BIT(0) -#define S1G_CAP7_DUP_1MHZ BIT(1) -#define S1G_CAP7_MCS_NEGOTIATION BIT(2) -#define S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3) -#define S1G_CAP7_NDP_BFING_REPORT_POLL BIT(4) -#define S1G_CAP7_UNSOLICITED_DYN_AID BIT(5) -#define S1G_CAP7_SECTOR_TRAINING_OPERATION BIT(6) -#define S1G_CAP7_TEMP_PS_MODE_SWITCH BIT(7) - -#define S1G_CAP8_TWT_GROUPING BIT(0) -#define S1G_CAP8_BDT BIT(1) -#define S1G_CAP8_COLOR GENMASK(4, 2) -#define S1G_CAP8_TWT_REQUEST BIT(5) -#define S1G_CAP8_TWT_RESPOND BIT(6) -#define S1G_CAP8_PV1_FRAME BIT(7) - -#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0) - -#define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0) -#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1) - - -#define LISTEN_INT_USF GENMASK(15, 14) -#define LISTEN_INT_UI GENMASK(13, 0) - -#define IEEE80211_MAX_USF FIELD_MAX(LISTEN_INT_USF) -#define IEEE80211_MAX_UI FIELD_MAX(LISTEN_INT_UI) /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 #define WLAN_AUTH_FT 2 #define WLAN_AUTH_SAE 3 -#define WLAN_AUTH_FILS_SK 4 -#define WLAN_AUTH_FILS_SK_PFS 5 -#define WLAN_AUTH_FILS_PK 6 #define WLAN_AUTH_LEAP 128 #define WLAN_AUTH_CHALLENGE_LEN 128 @@ -2629,8 +1631,6 @@ ieee80211_he_spr_size(const u8 *he_spr_ie) #define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0 #define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1 #define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2 -#define IEEE80211_SPCT_MSR_RPRT_TYPE_LCI 8 -#define IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC 11 /* 802.11g ERP information element */ #define WLAN_ERP_NON_ERP_PRESENT (1<<0) @@ -2715,11 +1715,6 @@ enum ieee80211_statuscode { WLAN_STATUS_REJECT_DSE_BAND = 96, WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99, WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103, - /* 802.11ai */ - WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108, - WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109, - WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126, - WLAN_STATUS_SAE_PK = 127, }; @@ -2957,63 +1952,14 @@ enum ieee80211_eid { WLAN_EID_VHT_OPERATION = 192, WLAN_EID_EXTENDED_BSS_LOAD = 193, WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, - WLAN_EID_TX_POWER_ENVELOPE = 195, + WLAN_EID_VHT_TX_POWER_ENVELOPE = 195, WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, WLAN_EID_AID = 197, WLAN_EID_QUIET_CHANNEL = 198, WLAN_EID_OPMODE_NOTIF = 199, - WLAN_EID_REDUCED_NEIGHBOR_REPORT = 201, - - WLAN_EID_AID_REQUEST = 210, - WLAN_EID_AID_RESPONSE = 211, - WLAN_EID_S1G_BCN_COMPAT = 213, - WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214, - WLAN_EID_S1G_TWT = 216, - WLAN_EID_S1G_CAPABILITIES = 217, WLAN_EID_VENDOR_SPECIFIC = 221, WLAN_EID_QOS_PARAMETER = 222, - WLAN_EID_S1G_OPERATION = 232, - WLAN_EID_CAG_NUMBER = 237, - WLAN_EID_AP_CSN = 239, - WLAN_EID_FILS_INDICATION = 240, - WLAN_EID_DILS = 241, - WLAN_EID_FRAGMENT = 242, - WLAN_EID_RSNX = 244, - WLAN_EID_EXTENSION = 255 -}; - -/* Element ID Extensions for Element ID 255 */ -enum ieee80211_eid_ext { - WLAN_EID_EXT_ASSOC_DELAY_INFO = 1, - WLAN_EID_EXT_FILS_REQ_PARAMS = 2, - WLAN_EID_EXT_FILS_KEY_CONFIRM = 3, - WLAN_EID_EXT_FILS_SESSION = 4, - WLAN_EID_EXT_FILS_HLP_CONTAINER = 5, - WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6, - WLAN_EID_EXT_KEY_DELIVERY = 7, - WLAN_EID_EXT_FILS_WRAPPED_DATA = 8, - WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, - WLAN_EID_EXT_FILS_NONCE = 13, - WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14, - WLAN_EID_EXT_HE_CAPABILITY = 35, - WLAN_EID_EXT_HE_OPERATION = 36, - WLAN_EID_EXT_UORA = 37, - WLAN_EID_EXT_HE_MU_EDCA = 38, - WLAN_EID_EXT_HE_SPR = 39, - WLAN_EID_EXT_NDP_FEEDBACK_REPORT_PARAMSET = 41, - WLAN_EID_EXT_BSS_COLOR_CHG_ANN = 42, - WLAN_EID_EXT_QUIET_TIME_PERIOD_SETUP = 43, - WLAN_EID_EXT_ESS_REPORT = 45, - WLAN_EID_EXT_OPS = 46, - WLAN_EID_EXT_HE_BSS_LOAD = 47, - WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52, - WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55, - WLAN_EID_EXT_NON_INHERITANCE = 56, - WLAN_EID_EXT_KNOWN_BSSID = 57, - WLAN_EID_EXT_SHORT_SSID_LIST = 58, - WLAN_EID_EXT_HE_6GHZ_CAPA = 59, - WLAN_EID_EXT_UL_MU_POWER_CAPA = 60, }; /* Action category code */ @@ -3024,7 +1970,6 @@ enum ieee80211_category { WLAN_CATEGORY_BACK = 3, WLAN_CATEGORY_PUBLIC = 4, WLAN_CATEGORY_RADIO_MEASUREMENT = 5, - WLAN_CATEGORY_FAST_BBS_TRANSITION = 6, WLAN_CATEGORY_HT = 7, WLAN_CATEGORY_SA_QUERY = 8, WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, @@ -3039,7 +1984,6 @@ enum ieee80211_category { WLAN_CATEGORY_FST = 18, WLAN_CATEGORY_UNPROT_DMG = 20, WLAN_CATEGORY_VHT = 21, - WLAN_CATEGORY_S1G = 22, WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, WLAN_CATEGORY_VENDOR_SPECIFIC = 127, }; @@ -3113,20 +2057,6 @@ enum ieee80211_key_len { WLAN_KEY_LEN_BIP_GMAC_256 = 32, }; -enum ieee80211_s1g_actioncode { - WLAN_S1G_AID_SWITCH_REQUEST, - WLAN_S1G_AID_SWITCH_RESPONSE, - WLAN_S1G_SYNC_CONTROL, - WLAN_S1G_STA_INFO_ANNOUNCE, - WLAN_S1G_EDCA_PARAM_SET, - WLAN_S1G_EL_OPERATION, - WLAN_S1G_TWT_SETUP, - WLAN_S1G_TWT_TEARDOWN, - WLAN_S1G_SECT_GROUP_ID_LIST, - WLAN_S1G_SECT_ID_FEEDBACK, - WLAN_S1G_TWT_INFORMATION = 11, -}; - #define IEEE80211_WEP_IV_LEN 4 #define IEEE80211_WEP_ICV_LEN 4 #define IEEE80211_CCMP_HDR_LEN 8 @@ -3143,53 +2073,10 @@ enum ieee80211_s1g_actioncode { #define IEEE80211_GCMP_MIC_LEN 16 #define IEEE80211_GCMP_PN_LEN 6 -#define FILS_NONCE_LEN 16 -#define FILS_MAX_KEK_LEN 64 - -#define FILS_ERP_MAX_USERNAME_LEN 16 -#define FILS_ERP_MAX_REALM_LEN 253 -#define FILS_ERP_MAX_RRK_LEN 64 - -#define PMK_MAX_LEN 64 -#define SAE_PASSWORD_MAX_LEN 128 - -/* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */ +/* Public action codes */ enum ieee80211_pub_actioncode { - WLAN_PUB_ACTION_20_40_BSS_COEX = 0, - WLAN_PUB_ACTION_DSE_ENABLEMENT = 1, - WLAN_PUB_ACTION_DSE_DEENABLEMENT = 2, - WLAN_PUB_ACTION_DSE_REG_LOC_ANN = 3, WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, - WLAN_PUB_ACTION_DSE_MSMT_REQ = 5, - WLAN_PUB_ACTION_DSE_MSMT_RESP = 6, - WLAN_PUB_ACTION_MSMT_PILOT = 7, - WLAN_PUB_ACTION_DSE_PC = 8, - WLAN_PUB_ACTION_VENDOR_SPECIFIC = 9, - WLAN_PUB_ACTION_GAS_INITIAL_REQ = 10, - WLAN_PUB_ACTION_GAS_INITIAL_RESP = 11, - WLAN_PUB_ACTION_GAS_COMEBACK_REQ = 12, - WLAN_PUB_ACTION_GAS_COMEBACK_RESP = 13, WLAN_PUB_ACTION_TDLS_DISCOVER_RES = 14, - WLAN_PUB_ACTION_LOC_TRACK_NOTI = 15, - WLAN_PUB_ACTION_QAB_REQUEST_FRAME = 16, - WLAN_PUB_ACTION_QAB_RESPONSE_FRAME = 17, - WLAN_PUB_ACTION_QMF_POLICY = 18, - WLAN_PUB_ACTION_QMF_POLICY_CHANGE = 19, - WLAN_PUB_ACTION_QLOAD_REQUEST = 20, - WLAN_PUB_ACTION_QLOAD_REPORT = 21, - WLAN_PUB_ACTION_HCCA_TXOP_ADVERT = 22, - WLAN_PUB_ACTION_HCCA_TXOP_RESPONSE = 23, - WLAN_PUB_ACTION_PUBLIC_KEY = 24, - WLAN_PUB_ACTION_CHANNEL_AVAIL_QUERY = 25, - WLAN_PUB_ACTION_CHANNEL_SCHEDULE_MGMT = 26, - WLAN_PUB_ACTION_CONTACT_VERI_SIGNAL = 27, - WLAN_PUB_ACTION_GDD_ENABLEMENT_REQ = 28, - WLAN_PUB_ACTION_GDD_ENABLEMENT_RESP = 29, - WLAN_PUB_ACTION_NETWORK_CHANNEL_CONTROL = 30, - WLAN_PUB_ACTION_WHITE_SPACE_MAP_ANN = 31, - WLAN_PUB_ACTION_FTM_REQUEST = 32, - WLAN_PUB_ACTION_FTM = 33, - WLAN_PUB_ACTION_FILS_DISCOVERY = 34, }; /* TDLS action codes */ @@ -3212,17 +2099,7 @@ enum ieee80211_tdls_actioncode { */ #define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) -/* Multiple BSSID capability is set in the 6th bit of 3rd byte of the - * @WLAN_EID_EXT_CAPABILITY information element - */ -#define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6) - -/* Timing Measurement protocol for time sync is set in the 7th bit of 3rd byte - * of the @WLAN_EID_EXT_CAPABILITY information element - */ -#define WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT BIT(7) - -/* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */ +/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ #define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) #define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) #define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6) @@ -3253,20 +2130,6 @@ enum ieee80211_tdls_actioncode { */ #define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7) -/* Defines support for TWT Requester and TWT Responder */ -#define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5) -#define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6) - -/* - * When set, indicates that the AP is able to tolerate 26-tone RU UL - * OFDMA transmissions using HE TB PPDU from OBSS (not falsely classify the - * 26-tone RU UL OFDMA transmissions as radar pulses). - */ -#define WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT BIT(7) - -/* Defines support for enhanced multi-bssid advertisement*/ -#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(3) - /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 @@ -3274,37 +2137,37 @@ enum ieee80211_tdls_actioncode { #define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) /** - * enum ieee80211_mesh_sync_method - mesh synchronization method identifier + * enum - mesh synchronization method identifier * * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method * that will be specified in a vendor specific information element */ -enum ieee80211_mesh_sync_method { +enum { IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1, IEEE80211_SYNC_METHOD_VENDOR = 255, }; /** - * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier + * enum - mesh path selection protocol identifier * * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will * be specified in a vendor specific information element */ -enum ieee80211_mesh_path_protocol { +enum { IEEE80211_PATH_PROTOCOL_HWMP = 1, IEEE80211_PATH_PROTOCOL_VENDOR = 255, }; /** - * enum ieee80211_mesh_path_metric - mesh path selection metric identifier + * enum - mesh path selection metric identifier * * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be * specified in a vendor specific information element */ -enum ieee80211_mesh_path_metric { +enum { IEEE80211_PATH_METRIC_AIRTIME = 1, IEEE80211_PATH_METRIC_VENDOR = 255, }; @@ -3413,32 +2276,6 @@ struct ieee80211_timeout_interval_ie { __le32 value; } __packed; -/** - * enum ieee80211_idle_options - BSS idle options - * @WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE: the station should send an RSN - * protected frame to the AP to reset the idle timer at the AP for - * the station. - */ -enum ieee80211_idle_options { - WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE = BIT(0), -}; - -/** - * struct ieee80211_bss_max_idle_period_ie - * - * This structure refers to "BSS Max idle period element" - * - * @max_idle_period: indicates the time period during which a station can - * refrain from transmitting frames to its associated AP without being - * disassociated. In units of 1000 TUs. - * @idle_options: indicates the options associated with the BSS idle capability - * as specified in &enum ieee80211_idle_options. - */ -struct ieee80211_bss_max_idle_period_ie { - __le16 max_idle_period; - u8 idle_options; -} __packed; - /* BACK action code */ enum ieee80211_back_actioncode { WLAN_ACTION_ADDBA_REQ = 0, @@ -3458,94 +2295,43 @@ enum ieee80211_sa_query_action { WLAN_ACTION_SA_QUERY_RESPONSE = 1, }; -/** - * struct ieee80211_bssid_index - * - * This structure refers to "Multiple BSSID-index element" - * - * @bssid_index: BSSID index - * @dtim_period: optional, overrides transmitted BSS dtim period - * @dtim_count: optional, overrides transmitted BSS dtim count - */ -struct ieee80211_bssid_index { - u8 bssid_index; - u8 dtim_period; - u8 dtim_count; -}; - -/** - * struct ieee80211_multiple_bssid_configuration - * - * This structure refers to "Multiple BSSID Configuration element" - * - * @bssid_count: total number of active BSSIDs in the set - * @profile_periodicity: the least number of beacon frames need to be received - * in order to discover all the nontransmitted BSSIDs in the set. - */ -struct ieee80211_multiple_bssid_configuration { - u8 bssid_count; - u8 profile_periodicity; -}; - -#define SUITE(oui, id) (((oui) << 8) | (id)) /* cipher suite selectors */ -#define WLAN_CIPHER_SUITE_USE_GROUP SUITE(0x000FAC, 0) -#define WLAN_CIPHER_SUITE_WEP40 SUITE(0x000FAC, 1) -#define WLAN_CIPHER_SUITE_TKIP SUITE(0x000FAC, 2) -/* reserved: SUITE(0x000FAC, 3) */ -#define WLAN_CIPHER_SUITE_CCMP SUITE(0x000FAC, 4) -#define WLAN_CIPHER_SUITE_WEP104 SUITE(0x000FAC, 5) -#define WLAN_CIPHER_SUITE_AES_CMAC SUITE(0x000FAC, 6) -#define WLAN_CIPHER_SUITE_GCMP SUITE(0x000FAC, 8) -#define WLAN_CIPHER_SUITE_GCMP_256 SUITE(0x000FAC, 9) -#define WLAN_CIPHER_SUITE_CCMP_256 SUITE(0x000FAC, 10) -#define WLAN_CIPHER_SUITE_BIP_GMAC_128 SUITE(0x000FAC, 11) -#define WLAN_CIPHER_SUITE_BIP_GMAC_256 SUITE(0x000FAC, 12) -#define WLAN_CIPHER_SUITE_BIP_CMAC_256 SUITE(0x000FAC, 13) +#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00 +#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01 +#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02 +/* reserved: 0x000FAC03 */ +#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04 +#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 +#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 +#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08 +#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09 +#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A +#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B +#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C +#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D -#define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1) +#define WLAN_CIPHER_SUITE_SMS4 0x00147201 /* AKM suite selectors */ -#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1) -#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2) -#define WLAN_AKM_SUITE_FT_8021X SUITE(0x000FAC, 3) -#define WLAN_AKM_SUITE_FT_PSK SUITE(0x000FAC, 4) -#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5) -#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6) -#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) -#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) -#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) -#define WLAN_AKM_SUITE_AP_PEER_KEY SUITE(0x000FAC, 10) -#define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11) -#define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12) -#define WLAN_AKM_SUITE_FT_8021X_SHA384 SUITE(0x000FAC, 13) -#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14) -#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15) -#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16) -#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17) -#define WLAN_AKM_SUITE_OWE SUITE(0x000FAC, 18) -#define WLAN_AKM_SUITE_FT_PSK_SHA384 SUITE(0x000FAC, 19) -#define WLAN_AKM_SUITE_PSK_SHA384 SUITE(0x000FAC, 20) - -#define WLAN_AKM_SUITE_WFA_DPP SUITE(WLAN_OUI_WFA, 2) +#define WLAN_AKM_SUITE_8021X 0x000FAC01 +#define WLAN_AKM_SUITE_PSK 0x000FAC02 +#define WLAN_AKM_SUITE_8021X_SHA256 0x000FAC05 +#define WLAN_AKM_SUITE_PSK_SHA256 0x000FAC06 +#define WLAN_AKM_SUITE_TDLS 0x000FAC07 +#define WLAN_AKM_SUITE_SAE 0x000FAC08 +#define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09 #define WLAN_MAX_KEY_LEN 32 -#define WLAN_PMK_NAME_LEN 16 #define WLAN_PMKID_LEN 16 -#define WLAN_PMK_LEN_EAP_LEAP 16 -#define WLAN_PMK_LEN 32 -#define WLAN_PMK_LEN_SUITE_B_192 48 #define WLAN_OUI_WFA 0x506f9a #define WLAN_OUI_TYPE_WFA_P2P 9 -#define WLAN_OUI_TYPE_WFA_DPP 0x1A #define WLAN_OUI_MICROSOFT 0x0050f2 #define WLAN_OUI_TYPE_MICROSOFT_WPA 1 #define WLAN_OUI_TYPE_MICROSOFT_WMM 2 #define WLAN_OUI_TYPE_MICROSOFT_WPS 4 -#define WLAN_OUI_TYPE_MICROSOFT_TPC 8 /* * WMM/802.11e Tspec Element @@ -3584,24 +2370,6 @@ struct ieee80211_tspec_ie { __le16 medium_time; } __packed; -struct ieee80211_he_6ghz_capa { - /* uses IEEE80211_HE_6GHZ_CAP_* below */ - __le16 capa; -} __packed; - -/* HE 6 GHz band capabilities */ -/* uses enum ieee80211_min_mpdu_spacing values */ -#define IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START 0x0007 -/* uses enum ieee80211_vht_max_ampdu_length_exp values */ -#define IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP 0x0038 -/* uses IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_* values */ -#define IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN 0x00c0 -/* WLAN_HT_CAP_SM_PS_* values */ -#define IEEE80211_HE_6GHZ_CAP_SM_PS 0x0600 -#define IEEE80211_HE_6GHZ_CAP_RD_RESPONDER 0x0800 -#define IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS 0x1000 -#define IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS 0x2000 - /** * ieee80211_get_qos_ctl - get pointer to qos control bytes * @hdr: the frame @@ -3619,17 +2387,6 @@ static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr) return (u8 *)hdr + 24; } -/** - * ieee80211_get_tid - get qos TID - * @hdr: the frame - */ -static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr) -{ - u8 *qc = ieee80211_get_qos_ctl(hdr); - - return qc[0] & IEEE80211_QOS_CTL_TID_MASK; -} - /** * ieee80211_get_SA - get pointer to SA * @hdr: the frame @@ -3826,18 +2583,6 @@ static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) -/* convert frequencies */ -#define MHZ_TO_KHZ(freq) ((freq) * 1000) -#define KHZ_TO_MHZ(freq) ((freq) / 1000) -#define PR_KHZ(f) KHZ_TO_MHZ(f), f % 1000 -#define KHZ_F "%d.%03d" - -/* convert powers */ -#define DBI_TO_MBI(gain) ((gain) * 100) -#define MBI_TO_DBI(gain) ((gain) / 100) -#define DBM_TO_MBM(gain) ((gain) * 100) -#define MBM_TO_DBM(gain) ((gain) / 100) - /** * ieee80211_action_contains_tpc - checks if the frame contains TPC element * @skb: the skb containing the frame, length will be checked @@ -3885,97 +2630,4 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb) return true; } -struct element { - u8 id; - u8 datalen; - u8 data[]; -} __packed; - -/* element iteration helpers */ -#define for_each_element(_elem, _data, _datalen) \ - for (_elem = (const struct element *)(_data); \ - (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \ - (int)sizeof(*_elem) && \ - (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \ - (int)sizeof(*_elem) + _elem->datalen; \ - _elem = (const struct element *)(_elem->data + _elem->datalen)) - -#define for_each_element_id(element, _id, data, datalen) \ - for_each_element(element, data, datalen) \ - if (element->id == (_id)) - -#define for_each_element_extid(element, extid, _data, _datalen) \ - for_each_element(element, _data, _datalen) \ - if (element->id == WLAN_EID_EXTENSION && \ - element->datalen > 0 && \ - element->data[0] == (extid)) - -#define for_each_subelement(sub, element) \ - for_each_element(sub, (element)->data, (element)->datalen) - -#define for_each_subelement_id(sub, id, element) \ - for_each_element_id(sub, id, (element)->data, (element)->datalen) - -#define for_each_subelement_extid(sub, extid, element) \ - for_each_element_extid(sub, extid, (element)->data, (element)->datalen) - -/** - * for_each_element_completed - determine if element parsing consumed all data - * @element: element pointer after for_each_element() or friends - * @data: same data pointer as passed to for_each_element() or friends - * @datalen: same data length as passed to for_each_element() or friends - * - * This function returns %true if all the data was parsed or considered - * while walking the elements. Only use this if your for_each_element() - * loop cannot be broken out of, otherwise it always returns %false. - * - * If some data was malformed, this returns %false since the last parsed - * element will not fill the whole remaining data. - */ -static inline bool for_each_element_completed(const struct element *element, - const void *data, size_t datalen) -{ - return (const u8 *)element == (const u8 *)data + datalen; -} - -/** - * RSNX Capabilities: - * bits 0-3: Field length (n-1) - */ -#define WLAN_RSNX_CAPA_PROTECTED_TWT BIT(4) -#define WLAN_RSNX_CAPA_SAE_H2E BIT(5) - -/* - * reduced neighbor report, based on Draft P802.11ax_D6.1, - * section 9.4.2.170 and accepted contributions. - */ -#define IEEE80211_AP_INFO_TBTT_HDR_TYPE 0x03 -#define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04 -#define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08 -#define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0 -#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 9 -#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 13 - -#define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01 -#define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02 -#define IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID 0x04 -#define IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID 0x08 -#define IEEE80211_RNR_TBTT_PARAMS_COLOC_ESS 0x10 -#define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20 -#define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40 - -struct ieee80211_neighbor_ap_info { - u8 tbtt_info_hdr; - u8 tbtt_info_len; - u8 op_class; - u8 channel; -} __packed; - -enum ieee80211_range_params_max_total_ltf { - IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_4 = 0, - IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_8, - IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_16, - IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_UNSPECIFIED, -}; - #endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index 95c8311622..ddb890174a 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -1,9 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * IEEE802.15.4-2003 specification * * Copyright (C) 2007, 2008 Siemens AG * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * * Written by: * Pavel Smolenskiy * Maxim Gorbachyov diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index b712217f70..f563907ed7 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -15,6 +14,11 @@ * Florian La Roche, * Jonathan Layes * Arnaldo Carvalho de Melo ARPHRD_HWX25 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_IF_ARP_H #define _LINUX_IF_ARP_H @@ -27,7 +31,7 @@ static inline struct arphdr *arp_hdr(const struct sk_buff *skb) return (struct arphdr *)skb_network_header(skb); } -static inline unsigned int arp_hdr_len(const struct net_device *dev) +static inline int arp_hdr_len(struct net_device *dev) { switch (dev->type) { #if IS_ENABLED(CONFIG_FIREWIRE_NET) @@ -40,22 +44,4 @@ static inline unsigned int arp_hdr_len(const struct net_device *dev) return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; } } - -static inline bool dev_is_mac_header_xmit(const struct net_device *dev) -{ - switch (dev->type) { - case ARPHRD_TUNNEL: - case ARPHRD_TUNNEL6: - case ARPHRD_SIT: - case ARPHRD_IPGRE: - case ARPHRD_IP6GRE: - case ARPHRD_VOID: - case ARPHRD_NONE: - case ARPHRD_RAWIP: - return false; - default: - return true; - } -} - #endif /* _LINUX_IF_ARP_H */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 509e18c7e7..c6587c01d9 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux ethernet bridge * * Authors: * Lennert Buytenhek + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_IF_BRIDGE_H #define _LINUX_IF_BRIDGE_H @@ -19,14 +23,7 @@ struct br_ip { #if IS_ENABLED(CONFIG_IPV6) struct in6_addr ip6; #endif - } src; - union { - __be32 ip4; -#if IS_ENABLED(CONFIG_IPV6) - struct in6_addr ip6; -#endif - unsigned char mac_addr[ETH_ALEN]; - } dst; + } u; __be16 proto; __u16 vid; }; @@ -49,33 +46,19 @@ struct br_ip_list { #define BR_LEARNING_SYNC BIT(9) #define BR_PROXYARP_WIFI BIT(10) #define BR_MCAST_FLOOD BIT(11) -#define BR_MULTICAST_TO_UNICAST BIT(12) -#define BR_VLAN_TUNNEL BIT(13) -#define BR_BCAST_FLOOD BIT(14) -#define BR_NEIGH_SUPPRESS BIT(15) -#define BR_ISOLATED BIT(16) -#define BR_MRP_AWARE BIT(17) -#define BR_MRP_LOST_CONT BIT(18) -#define BR_MRP_LOST_IN_CONT BIT(19) -#define BR_TX_FWD_OFFLOAD BIT(20) #define BR_DEFAULT_AGEING_TIME (300 * HZ) -struct net_bridge; -void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br, - unsigned int cmd, struct ifreq *ifr, - void __user *uarg)); -int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd, - struct ifreq *ifr, void __user *uarg); +extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); + +typedef int br_should_route_hook_t(struct sk_buff *skb); +extern br_should_route_hook_t __rcu *br_should_route_hook; #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); -bool br_multicast_has_router_adjacent(struct net_device *dev, int proto); -bool br_multicast_enabled(const struct net_device *dev); -bool br_multicast_router(const struct net_device *dev); #else static inline int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list) @@ -92,102 +75,6 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev, { return false; } - -static inline bool br_multicast_has_router_adjacent(struct net_device *dev, - int proto) -{ - return true; -} - -static inline bool br_multicast_enabled(const struct net_device *dev) -{ - return false; -} -static inline bool br_multicast_router(const struct net_device *dev) -{ - return false; -} -#endif - -#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) -bool br_vlan_enabled(const struct net_device *dev); -int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid); -int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid); -int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto); -int br_vlan_get_info(const struct net_device *dev, u16 vid, - struct bridge_vlan_info *p_vinfo); -int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, - struct bridge_vlan_info *p_vinfo); -#else -static inline bool br_vlan_enabled(const struct net_device *dev) -{ - return false; -} - -static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) -{ - return -EINVAL; -} - -static inline int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto) -{ - return -EINVAL; -} - -static inline int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid) -{ - return -EINVAL; -} - -static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, - struct bridge_vlan_info *p_vinfo) -{ - return -EINVAL; -} - -static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, - struct bridge_vlan_info *p_vinfo) -{ - return -EINVAL; -} -#endif - -#if IS_ENABLED(CONFIG_BRIDGE) -struct net_device *br_fdb_find_port(const struct net_device *br_dev, - const unsigned char *addr, - __u16 vid); -void br_fdb_clear_offload(const struct net_device *dev, u16 vid); -bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag); -u8 br_port_get_stp_state(const struct net_device *dev); -clock_t br_get_ageing_time(const struct net_device *br_dev); -#else -static inline struct net_device * -br_fdb_find_port(const struct net_device *br_dev, - const unsigned char *addr, - __u16 vid) -{ - return NULL; -} - -static inline void br_fdb_clear_offload(const struct net_device *dev, u16 vid) -{ -} - -static inline bool -br_port_flag_is_set(const struct net_device *dev, unsigned long flag) -{ - return false; -} - -static inline u8 br_port_get_stp_state(const struct net_device *dev) -{ - return BR_STATE_DISABLED; -} - -static inline clock_t br_get_ageing_time(const struct net_device *br_dev) -{ - return 0; -} #endif #endif diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 8a9792a642..548fd535fd 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -12,6 +11,11 @@ * Donald Becker, * Alan Cox, * Steve Whitehouse, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_IF_ETHER_H #define _LINUX_IF_ETHER_H @@ -24,14 +28,6 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) return (struct ethhdr *)skb_mac_header(skb); } -/* Prefer this version in TX path, instead of - * skb_reset_mac_header() + eth_hdr() - */ -static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb) -{ - return (struct ethhdr *)skb->data; -} - static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) { return (struct ethhdr *)skb_inner_mac_header(skb); diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h index c796f452d6..f5550b3eee 100644 --- a/include/linux/if_fddi.h +++ b/include/linux/if_fddi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -16,6 +15,11 @@ * Alan Cox, * Steve Whitehouse, * Peter De Schrijver, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_IF_FDDI_H #define _LINUX_IF_FDDI_H diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h index 52224de798..4316aa173d 100644 --- a/include/linux/if_frad.h +++ b/include/linux/if_frad.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are * created for each DLCI associated with a FRAD. The FRAD driver @@ -15,6 +14,11 @@ * 0.15 Mike McLagan changed structure defs (packed) * re-arranged flags * added DLCI_RET vars + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _FRAD_H_ #define _FRAD_H_ @@ -62,6 +66,8 @@ struct dlci_local struct frad_local { + struct net_device_stats stats; + /* devices which this FRAD is slaved to */ struct net_device *master[CONFIG_DLCI_MAX]; short dlci[CONFIG_DLCI_MAX]; @@ -79,7 +85,6 @@ struct frad_local /* fields that are used by the Sangoma SDLA cards */ struct timer_list timer; - struct net_device *dev; int type; /* adapter type */ int state; /* state of the S502/8 control latch */ int buffer; /* current buffer for S508 firmware */ diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 622658dfbf..0b17c585b5 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IF_LINK_H #define _LINUX_IF_LINK_H @@ -13,8 +12,6 @@ struct ifla_vf_stats { __u64 tx_bytes; __u64 broadcast; __u64 multicast; - __u64 rx_dropped; - __u64 tx_dropped; }; struct ifla_vf_info { diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h index 4cc1c0b778..81e434c507 100644 --- a/include/linux/if_ltalk.h +++ b/include/linux/if_ltalk.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_LTALK_H #define __LINUX_LTALK_H diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 10c94a3936..a4ccc3122f 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IF_MACVLAN_H #define _LINUX_IF_MACVLAN_H @@ -10,7 +9,27 @@ #include #include +#if IS_ENABLED(CONFIG_MACVTAP) +struct socket *macvtap_get_socket(struct file *); +#else +#include +#include +struct file; +struct socket; +static inline struct socket *macvtap_get_socket(struct file *f) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_MACVTAP */ + struct macvlan_port; +struct macvtap_queue; + +/* + * Maximum times a macvtap device can be opened. This can be used to + * configure the number of receive queue, e.g. for multiqueue virtio. + */ +#define MAX_MACVTAP_QUEUES 256 #define MACVLAN_MC_FILTER_BITS 8 #define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) @@ -21,7 +40,7 @@ struct macvlan_dev { struct hlist_node hlist; struct macvlan_port *port; struct net_device *lowerdev; - void *accel_priv; + void *fwd_priv; struct vlan_pcpu_stats __percpu *pcpu_stats; DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); @@ -29,11 +48,19 @@ struct macvlan_dev { netdev_features_t set_features; enum macvlan_mode mode; u16 flags; - unsigned int macaddr_count; - u32 bc_queue_len_req; + /* This array tracks active taps. */ + struct macvtap_queue __rcu *taps[MAX_MACVTAP_QUEUES]; + /* This list tracks all taps (both enabled and disabled) */ + struct list_head queue_list; + int numvtaps; + int numqueues; + netdev_features_t tap_features; + int minor; + int nest_level; #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif + unsigned int macaddr_count; }; static inline void macvlan_count_rx(const struct macvlan_dev *vlan, @@ -43,14 +70,13 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, if (likely(success)) { struct vlan_pcpu_stats *pcpu_stats; - pcpu_stats = get_cpu_ptr(vlan->pcpu_stats); + pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->rx_packets++; pcpu_stats->rx_bytes += len; if (multicast) pcpu_stats->rx_multicast++; u64_stats_update_end(&pcpu_stats->syncp); - put_cpu_ptr(vlan->pcpu_stats); } else { this_cpu_inc(vlan->pcpu_stats->rx_errors); } @@ -59,8 +85,11 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, extern void macvlan_common_setup(struct net_device *dev); extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack); + struct nlattr *tb[], struct nlattr *data[]); + +extern void macvlan_count_rx(const struct macvlan_dev *vlan, + unsigned int len, bool success, + bool multicast); extern void macvlan_dellink(struct net_device *dev, struct list_head *head); @@ -83,27 +112,4 @@ macvlan_dev_real_dev(const struct net_device *dev) } #endif -static inline void *macvlan_accel_priv(struct net_device *dev) -{ - struct macvlan_dev *macvlan = netdev_priv(dev); - - return macvlan->accel_priv; -} - -static inline bool macvlan_supports_dest_filter(struct net_device *dev) -{ - struct macvlan_dev *macvlan = netdev_priv(dev); - - return macvlan->mode == MACVLAN_MODE_PRIVATE || - macvlan->mode == MACVLAN_MODE_VEPA || - macvlan->mode == MACVLAN_MODE_BRIDGE; -} - -static inline int macvlan_release_l2fw_offload(struct net_device *dev) -{ - struct macvlan_dev *macvlan = netdev_priv(dev); - - macvlan->accel_priv = NULL; - return dev_uc_add(macvlan->lowerdev, dev->dev_addr); -} #endif /* _LINUX_IF_MACVLAN_H */ diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h index 2d8486168e..bbcdb0a767 100644 --- a/include/linux/if_phonet.h +++ b/include/linux/if_phonet.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * File: if_phonet.h * @@ -11,5 +10,5 @@ #include -extern const struct header_ops phonet_header_ops; +extern struct header_ops phonet_header_ops; #endif diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h index 96d40942e5..0fb71e532b 100644 --- a/include/linux/if_pppol2tp.h +++ b/include/linux/if_pppol2tp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /*************************************************************************** * Linux PPP over L2TP (PPPoL2TP) Socket Implementation (RFC 2661) * @@ -6,6 +5,11 @@ * (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c * * License: + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * */ #ifndef __LINUX_IF_PPPOL2TP_H #define __LINUX_IF_PPPOL2TP_H diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 69e813bcb9..33a0237367 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /*************************************************************************** * Linux PPP over X - Generic PPP transport layer sockets * Linux PPP over Ethernet (PPPoE) Socket Implementation (RFC 2516) @@ -7,6 +6,11 @@ * (pppox.c). All version information wrt this file is located in pppox.c * * License: + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * */ #ifndef __LINUX_IF_PPPOX_H #define __LINUX_IF_PPPOX_H @@ -74,15 +78,12 @@ struct pppox_proto { int (*ioctl)(struct socket *sock, unsigned int cmd, unsigned long arg); struct module *owner; -}; +} __do_const; extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp); extern void unregister_pppox_proto(int proto_num); extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); -extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); - -#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t) /* PPPoX socket states */ enum { diff --git a/include/linux/if_team.h b/include/linux/if_team.h index add607943c..c05216a8fb 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -1,7 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/if_team.h - Network team device driver header * Copyright (c) 2011 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef _LINUX_IF_TEAM_H_ #define _LINUX_IF_TEAM_H_ @@ -67,14 +71,9 @@ struct team_port { u16 queue_id; struct list_head qom_list; /* node in queue override mapping list */ struct rcu_head rcu; - long mode_priv[]; + long mode_priv[0]; }; -static inline struct team_port *team_port_get_rcu(const struct net_device *dev) -{ - return rcu_dereference(dev->rx_handler_data); -} - static inline bool team_port_enabled(struct team_port *port) { return port->index != -1; @@ -85,24 +84,14 @@ static inline bool team_port_txable(struct team_port *port) return port->linkup && team_port_enabled(port); } -static inline bool team_port_dev_txable(const struct net_device *port_dev) -{ - struct team_port *port; - bool txable; - - rcu_read_lock(); - port = team_port_get_rcu(port_dev); - txable = port ? team_port_txable(port) : false; - rcu_read_unlock(); - - return txable; -} - #ifdef CONFIG_NET_POLL_CONTROLLER static inline void team_netpoll_send_skb(struct team_port *port, struct sk_buff *skb) { - netpoll_send_skb(port->np, skb); + struct netpoll *np = port->np; + + if (np) + netpoll_send_skb(np, skb); } #else static inline void team_netpoll_send_skb(struct team_port *port, @@ -220,7 +209,6 @@ struct team { atomic_t count_pending; struct delayed_work dw; } mcast_rejoin; - struct lock_class_key team_lock_key; long mode_priv[TEAM_MODE_PRIV_LONGS]; }; @@ -259,7 +247,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team, static inline int team_num_to_port_index(struct team *team, unsigned int num) { - int en_port_count = READ_ONCE(team->en_port_count); + int en_port_count = ACCESS_ONCE(team->en_port_count); if (unlikely(!en_port_count)) return 0; @@ -310,6 +298,4 @@ extern void team_mode_unregister(const struct team_mode *mode); #define TEAM_DEFAULT_NUM_TX_QUEUES 16 #define TEAM_DEFAULT_NUM_RX_QUEUES 16 -#define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind) - #endif /* _LINUX_IF_TEAM_H_ */ diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 2a76608434..ed6da2e6df 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -1,45 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Universal TUN/TAP device driver. * Copyright (C) 1999-2000 Maxim Krasnyansky + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __IF_TUN_H #define __IF_TUN_H #include -#include - -#define TUN_XDP_FLAG 0x1UL - -#define TUN_MSG_UBUF 1 -#define TUN_MSG_PTR 2 -struct tun_msg_ctl { - unsigned short type; - unsigned short num; - void *ptr; -}; - -struct tun_xdp_hdr { - int buflen; - struct virtio_net_hdr gso; -}; #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); -struct ptr_ring *tun_get_tx_ring(struct file *file); -static inline bool tun_is_xdp_frame(void *ptr) -{ - return (unsigned long)ptr & TUN_XDP_FLAG; -} -static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp) -{ - return (void *)((unsigned long)xdp | TUN_XDP_FLAG); -} -static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr) -{ - return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); -} -void tun_ptr_free(void *ptr); #else #include #include @@ -49,24 +28,5 @@ static inline struct socket *tun_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } -static inline struct ptr_ring *tun_get_tx_ring(struct file *f) -{ - return ERR_PTR(-EINVAL); -} -static inline bool tun_is_xdp_frame(void *ptr) -{ - return false; -} -static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp) -{ - return NULL; -} -static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr) -{ - return NULL; -} -static inline void tun_ptr_free(void *ptr) -{ -} #endif /* CONFIG_TUN */ #endif /* __IF_TUN_H */ diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index 26606523ec..712710bc05 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _IF_TUNNEL_H_ #define _IF_TUNNEL_H_ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 41a5183366..3319d97d78 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * VLAN An implementation of 802.1Q VLAN tagging. * * Authors: Ben Greear + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * */ #ifndef _LINUX_IF_VLAN_H_ #define _LINUX_IF_VLAN_H_ @@ -25,8 +30,6 @@ #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ -#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ - /* * struct vlan_hdr - vlan header * @h_vlan_TCI: priority and VLAN ID @@ -62,7 +65,8 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ #define VLAN_PRIO_SHIFT 13 -#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */ +#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ +#define VLAN_TAG_PRESENT VLAN_CFI_MASK #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ #define VLAN_N_VID 4096 @@ -74,35 +78,10 @@ static inline bool is_vlan_dev(const struct net_device *dev) return dev->priv_flags & IFF_802_1Q_VLAN; } -#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present) -#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) +#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) +#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) -#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK)) -#define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT) - -static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev) -{ - ASSERT_RTNL(); - return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev)); -} - -static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev) -{ - ASSERT_RTNL(); - call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev); -} - -static inline int vlan_get_rx_stag_filter_info(struct net_device *dev) -{ - ASSERT_RTNL(); - return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev)); -} - -static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev) -{ - ASSERT_RTNL(); - call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev); -} +#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK) /** * struct vlan_pcpu_stats - VLAN percpu rx/tx stats @@ -130,9 +109,6 @@ struct vlan_pcpu_stats { extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, __be16 vlan_proto, u16 vlan_id); -extern int vlan_for_each(struct net_device *dev, - int (*action)(struct net_device *dev, int vid, - void *arg), void *arg); extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); @@ -184,6 +160,7 @@ struct vlan_dev_priv { #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif + unsigned int nest_level; }; static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) @@ -222,6 +199,11 @@ extern void vlan_vids_del_by_dev(struct net_device *dev, extern bool vlan_uses_dev(const struct net_device *dev); +static inline int vlan_get_encap_level(struct net_device *dev) +{ + BUG_ON(!is_vlan_dev(dev)); + return vlan_dev_priv(dev)->nest_level; +} #else static inline struct net_device * __vlan_find_dev_deep_rcu(struct net_device *real_dev, @@ -230,14 +212,6 @@ __vlan_find_dev_deep_rcu(struct net_device *real_dev, return NULL; } -static inline int -vlan_for_each(struct net_device *dev, - int (*action)(struct net_device *dev, int vid, void *arg), - void *arg) -{ - return 0; -} - static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) { BUG(); @@ -291,6 +265,11 @@ static inline bool vlan_uses_dev(const struct net_device *dev) { return false; } +static inline int vlan_get_encap_level(struct net_device *dev) +{ + BUG(); + return 0; +} #endif /** @@ -320,55 +299,6 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, return false; } -/** - * __vlan_insert_inner_tag - inner VLAN tag inserting - * @skb: skbuff to tag - * @vlan_proto: VLAN encapsulation protocol - * @vlan_tci: VLAN TCI to insert - * @mac_len: MAC header length including outer vlan headers - * - * Inserts the VLAN tag into @skb as part of the payload at offset mac_len - * Returns error if skb_cow_head fails. - * - * Does not change skb->protocol so this function can be used during receive. - */ -static inline int __vlan_insert_inner_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci, - unsigned int mac_len) -{ - struct vlan_ethhdr *veth; - - if (skb_cow_head(skb, VLAN_HLEN) < 0) - return -ENOMEM; - - skb_push(skb, VLAN_HLEN); - - /* Move the mac header sans proto to the beginning of the new header. */ - if (likely(mac_len > ETH_TLEN)) - memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); - skb->mac_header -= VLAN_HLEN; - - veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); - - /* first, the ethernet type */ - if (likely(mac_len >= ETH_TLEN)) { - /* h_vlan_encapsulated_proto should already be populated, and - * skb->data has space for h_vlan_proto - */ - veth->h_vlan_proto = vlan_proto; - } else { - /* h_vlan_encapsulated_proto should not be populated, and - * skb->data has no space for h_vlan_proto - */ - veth->h_vlan_encapsulated_proto = skb->protocol; - } - - /* now, the TCI */ - veth->h_vlan_TCI = htons(vlan_tci); - - return 0; -} - /** * __vlan_insert_tag - regular VLAN tag inserting * @skb: skbuff to tag @@ -376,44 +306,31 @@ static inline int __vlan_insert_inner_tag(struct sk_buff *skb, * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload - * Returns error if skb_cow_head fails. + * Returns error if skb_cow_head failes. * * Does not change skb->protocol so this function can be used during receive. */ static inline int __vlan_insert_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { - return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); -} + struct vlan_ethhdr *veth; -/** - * vlan_insert_inner_tag - inner VLAN tag inserting - * @skb: skbuff to tag - * @vlan_proto: VLAN encapsulation protocol - * @vlan_tci: VLAN TCI to insert - * @mac_len: MAC header length including outer vlan headers - * - * Inserts the VLAN tag into @skb as part of the payload at offset mac_len - * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. - * - * Following the skb_unshare() example, in case of error, the calling function - * doesn't have to worry about freeing the original skb. - * - * Does not change skb->protocol so this function can be used during receive. - */ -static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, - __be16 vlan_proto, - u16 vlan_tci, - unsigned int mac_len) -{ - int err; + if (skb_cow_head(skb, VLAN_HLEN) < 0) + return -ENOMEM; - err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len); - if (err) { - dev_kfree_skb_any(skb); - return NULL; - } - return skb; + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); + + /* Move the mac addresses to the beginning of the new header. */ + memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); + skb->mac_header -= VLAN_HLEN; + + /* first, the ethernet type */ + veth->h_vlan_proto = vlan_proto; + + /* now, the TCI */ + veth->h_vlan_TCI = htons(vlan_tci); + + return 0; } /** @@ -433,7 +350,14 @@ static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { - return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); + int err; + + err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); + if (err) { + dev_kfree_skb_any(skb); + return NULL; + } + return skb; } /** @@ -458,31 +382,6 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, return skb; } -/** - * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info - * @skb: skbuff to clear - * - * Clears the VLAN information from @skb - */ -static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb) -{ - skb->vlan_present = 0; -} - -/** - * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb - * @dst: skbuff to copy to - * @src: skbuff to copy from - * - * Copies VLAN information from @src to @dst (for branchless code) - */ -static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src) -{ - dst->vlan_present = src->vlan_present; - dst->vlan_proto = src->vlan_proto; - dst->vlan_tci = src->vlan_tci; -} - /* * __vlan_hwaccel_push_inside - pushes vlan tag to the payload * @skb: skbuff to tag @@ -497,7 +396,23 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, skb_vlan_tag_get(skb)); if (likely(skb)) - __vlan_hwaccel_clear_tag(skb); + skb->vlan_tci = 0; + return skb; +} +/* + * vlan_hwaccel_push_inside - pushes vlan tag to the payload + * @skb: skbuff to tag + * + * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the + * VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) +{ + if (skb_vlan_tag_present(skb)) + skb = __vlan_hwaccel_push_inside(skb); return skb; } @@ -513,8 +428,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { skb->vlan_proto = vlan_proto; - skb->vlan_tci = vlan_tci; - skb->vlan_present = 1; + skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; } /** @@ -554,6 +468,8 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, } } +#define HAVE_VLAN_GET_TAG + /** * vlan_get_tag - get the VLAN ID from the skb * @skb: skbuff to query @@ -579,10 +495,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, +static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, int *depth) { - unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; + unsigned int vlan_depth = skb->mac_len; /* if type is 802.1Q/AD then the header should already be * present at mac_len - VLAN_HLEN (if mac_len > 0), or at @@ -597,12 +513,13 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, vlan_depth = ETH_HLEN; } do { - struct vlan_hdr vhdr, *vh; + struct vlan_hdr *vh; - vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); - if (unlikely(!vh || !--parse_depth)) + if (unlikely(!pskb_may_pull(skb, + vlan_depth + VLAN_HLEN))) return 0; + vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; } while (eth_type_vlan(type)); @@ -621,25 +538,11 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +static inline __be16 vlan_get_protocol(struct sk_buff *skb) { return __vlan_get_protocol(skb, skb->protocol, NULL); } -/* A getter for the SKB protocol field which will handle VLAN tags consistently - * whether VLAN acceleration is enabled or not. - */ -static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) -{ - if (!skip_vlan) - /* VLAN acceleration strips the VLAN header from the skb and - * moves it to skb->vlan_proto - */ - return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; - - return vlan_get_protocol(skb); -} - static inline void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) { @@ -697,7 +600,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb) * Returns true if the skb is tagged with multiple vlan headers, regardless * of whether it is hardware accelerated or not. */ -static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) +static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) { __be16 protocol = skb->protocol; @@ -707,9 +610,6 @@ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) if (likely(!eth_type_vlan(protocol))) return false; - if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) - return false; - veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; } @@ -727,19 +627,17 @@ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) * * Returns features without unsafe ones if the skb has multiple tags. */ -static inline netdev_features_t vlan_features_check(struct sk_buff *skb, +static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, netdev_features_t features) { - if (skb_vlan_tagged_multi(skb)) { - /* In the case of multi-tagged packets, use a direct mask - * instead of using netdev_interesect_features(), to make - * sure that only devices supporting NETIF_F_HW_CSUM will - * have checksum offloading support. - */ - features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | - NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX; - } + if (skb_vlan_tagged_multi(skb)) + features = netdev_intersect_features(features, + NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_FRAGLIST | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); return features; } diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 93c262ecbd..12f6fba6d2 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux NET3: Internet Group Management Protocol [IGMP] * @@ -6,6 +5,12 @@ * Alan Cox * * Extended to talk the BSD extended IGMP protocol of mrouted 3.6 + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_IGMP_H #define _LINUX_IGMP_H @@ -13,8 +18,6 @@ #include #include #include -#include -#include #include static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb) @@ -38,9 +41,12 @@ struct ip_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct rcu_head rcu; - __be32 sl_addr[]; + __be32 sl_addr[0]; }; +#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \ + (count) * sizeof(__be32)) + #define IP_SFBLOCK 10 /* allocate this many at once */ /* ip_mc_socklist is real list now. Speed is not argument; @@ -57,8 +63,8 @@ struct ip_mc_socklist { struct ip_sf_list { struct ip_sf_list *sf_next; - unsigned long sf_count[2]; /* include/exclude counts */ __be32 sf_inaddr; + unsigned long sf_count[2]; /* include/exclude counts */ unsigned char sf_gsresp; /* include in g & s response? */ unsigned char sf_oldin; /* change state */ unsigned char sf_crcount; /* retrans. left to send */ @@ -78,7 +84,7 @@ struct ip_mc_list { struct ip_mc_list __rcu *next_hash; struct timer_list timer; int users; - refcount_t refcnt; + atomic_t refcnt; spinlock_t lock; char tm_running; char reporter; @@ -99,19 +105,9 @@ struct ip_mc_list { #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value) #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value) -static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len) -{ - if (skb_transport_offset(skb) + ip_transport_len(skb) < len) - return 0; - - return pskb_may_pull(skb, len); -} - extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); extern int igmp_rcv(struct sk_buff *); extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); -extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr, - unsigned int mode); extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); extern void ip_mc_drop_socket(struct sock *sk); extern int ip_mc_source(int add, int omode, struct sock *sk, @@ -120,23 +116,16 @@ extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex); extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, struct ip_msfilter __user *optval, int __user *optlen); extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, - struct sockaddr_storage __user *p); -extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, - int dif, int sdif); + struct group_filter __user *optval, int __user *optlen); +extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif); extern void ip_mc_init_dev(struct in_device *); extern void ip_mc_destroy_dev(struct in_device *); extern void ip_mc_up(struct in_device *); extern void ip_mc_down(struct in_device *); extern void ip_mc_unmap(struct in_device *); extern void ip_mc_remap(struct in_device *); -extern void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp); -static inline void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) -{ - return __ip_mc_dec_group(in_dev, addr, GFP_KERNEL); -} -extern void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, - gfp_t gfp); +extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); -int ip_mc_check_igmp(struct sk_buff *skb); +int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed); #endif diff --git a/include/linux/ihex.h b/include/linux/ihex.h index b824877e6d..31d8629e75 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Compact binary representation of ihex records. Some devices need their * firmware loaded in strange orders rather than a single big blob, but @@ -18,27 +17,15 @@ struct ihex_binrec { __be32 addr; __be16 len; - uint8_t data[]; + uint8_t data[0]; } __attribute__((packed)); -static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p) -{ - return be16_to_cpu(p->len) + sizeof(*p); -} - /* Find the next record, taking into account the 4-byte alignment */ -static inline const struct ihex_binrec * -__ihex_next_binrec(const struct ihex_binrec *rec) -{ - const void *p = rec; - - return p + ALIGN(ihex_binrec_size(rec), 4); -} - static inline const struct ihex_binrec * ihex_next_binrec(const struct ihex_binrec *rec) { - rec = __ihex_next_binrec(rec); + int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2; + rec = (void *)&rec->data[next]; return be16_to_cpu(rec->len) ? rec : NULL; } @@ -46,15 +33,18 @@ ihex_next_binrec(const struct ihex_binrec *rec) /* Check that ihex_next_binrec() won't take us off the end of the image... */ static inline int ihex_validate_fw(const struct firmware *fw) { - const struct ihex_binrec *end, *rec; + const struct ihex_binrec *rec; + size_t ofs = 0; - rec = (const void *)fw->data; - end = (const void *)&fw->data[fw->size - sizeof(*end)]; + while (ofs <= fw->size - sizeof(*rec)) { + rec = (void *)&fw->data[ofs]; - for (; rec <= end; rec = __ihex_next_binrec(rec)) { /* Zero length marks end of records */ - if (rec == end && !be16_to_cpu(rec->len)) + if (!be16_to_cpu(rec->len)) return 0; + + /* Point to next record... */ + ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3; } return -EINVAL; } diff --git a/include/linux/iio/accel/kxcjk_1013.h b/include/linux/iio/accel/kxcjk_1013.h index ea0ecb7743..fd1d540ea6 100644 --- a/include/linux/iio/accel/kxcjk_1013.h +++ b/include/linux/iio/accel/kxcjk_1013.h @@ -1,17 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * KXCJK-1013 3-axis accelerometer Interface * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef __IIO_KXCJK_1013_H__ #define __IIO_KXCJK_1013_H__ -#include - struct kxcjk_1013_platform_data { bool active_high_intr; - struct iio_mount_matrix orientation; }; #endif diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index c525fd5165..e7fdec4db9 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Support code for Analog Devices Sigma-Delta ADCs * * Copyright 2012 Analog Devices Inc. * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. */ #ifndef __AD_SIGMA_DELTA_H__ #define __AD_SIGMA_DELTA_H__ @@ -26,7 +27,6 @@ struct ad_sd_calib_data { }; struct ad_sigma_delta; -struct device; struct iio_dev; /** @@ -39,9 +39,6 @@ struct iio_dev; * if there is just one read-only sample data shift register. * @addr_shift: Shift of the register address in the communications register. * @read_mask: Mask for the communications register having the read bit set. - * @data_reg: Address of the data register, if 0 the default address of 0x3 will - * be used. - * @irq_flags: flags for the interrupt used by the triggered buffer */ struct ad_sigma_delta_info { int (*set_channel)(struct ad_sigma_delta *, unsigned int channel); @@ -50,8 +47,6 @@ struct ad_sigma_delta_info { bool has_registers; unsigned int addr_shift; unsigned int read_mask; - unsigned int data_reg; - unsigned long irq_flags; }; /** @@ -71,7 +66,6 @@ struct ad_sigma_delta { bool irq_dis; bool bus_locked; - bool keep_cs_asserted; uint8_t comm; @@ -80,12 +74,8 @@ struct ad_sigma_delta { /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. - * 'tx_buf' is up to 32 bits. - * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp, - * rounded to 16 bytes to take into account padding. */ - uint8_t tx_buf[4] ____cacheline_aligned; - uint8_t rx_buf[16] __aligned(8); + uint8_t data[4] ____cacheline_aligned; }; static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd, @@ -121,20 +111,63 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, unsigned int size, unsigned int *val); -int ad_sd_reset(struct ad_sigma_delta *sigma_delta, - unsigned int reset_length); - int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, int *val); -int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta, - unsigned int mode, unsigned int channel); int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta, const struct ad_sd_calib_data *cd, unsigned int n); int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev, struct spi_device *spi, const struct ad_sigma_delta_info *info); -int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev); +int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev); +void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev); int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig); +#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ + _storagebits, _shift, _extend_name, _type) \ + { \ + .type = (_type), \ + .differential = (_channel2 == -1 ? 0 : 1), \ + .indexed = 1, \ + .channel = (_channel1), \ + .channel2 = (_channel2), \ + .address = (_address), \ + .extend_name = (_extend_name), \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .scan_index = (_si), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (_bits), \ + .storagebits = (_storagebits), \ + .shift = (_shift), \ + .endianness = IIO_BE, \ + }, \ + } + +#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ + _storagebits, _shift) \ + __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ + _storagebits, _shift, NULL, IIO_VOLTAGE) + +#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \ + _storagebits, _shift) \ + __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \ + _storagebits, _shift, "shorted", IIO_VOLTAGE) + +#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \ + _storagebits, _shift) \ + __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ + _storagebits, _shift, NULL, IIO_VOLTAGE) + +#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \ + __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \ + _storagebits, _shift, NULL, IIO_TEMP) + +#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \ + _shift) \ + __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ + _storagebits, _shift, "supply", IIO_VOLTAGE) + #endif diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h index ff15c61bf3..767467d886 100644 --- a/include/linux/iio/buffer-dma.h +++ b/include/linux/iio/buffer-dma.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2013-2015 Analog Devices Inc. * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. */ #ifndef __INDUSTRIALIO_DMA_BUFFER_H__ @@ -11,7 +12,7 @@ #include #include #include -#include +#include struct iio_dma_buffer_queue; struct iio_dma_buffer_ops; @@ -140,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, char __user *user_buffer); size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); -int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length); +int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length); int iio_dma_buffer_request_update(struct iio_buffer *buffer); int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h index 5c355be898..5dcddf427b 100644 --- a/include/linux/iio/buffer-dmaengine.h +++ b/include/linux/iio/buffer-dmaengine.h @@ -1,17 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2014-2015 Analog Devices Inc. * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. */ #ifndef __IIO_DMAENGINE_H__ #define __IIO_DMAENGINE_H__ -struct iio_dev; +struct iio_buffer; struct device; -int devm_iio_dmaengine_buffer_setup(struct device *dev, - struct iio_dev *indio_dev, - const char *channel); +struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, + const char *channel); +void iio_dmaengine_buffer_free(struct iio_buffer *buffer); #endif diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index b6928ac5c6..70a5164f47 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -1,19 +1,149 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O core - generic buffer interfaces. * * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef _IIO_BUFFER_GENERIC_H_ #define _IIO_BUFFER_GENERIC_H_ #include #include +#include + +#ifdef CONFIG_IIO_BUFFER struct iio_buffer; -int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data); +/** + * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be + * configured. It has a fixed value which will be buffer specific. + */ +#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0) /** + * struct iio_buffer_access_funcs - access functions for buffers. + * @store_to: actually store stuff to the buffer + * @read_first_n: try to get a specified number of bytes (must exist) + * @data_available: indicates how much data is available for reading from + * the buffer. + * @request_update: if a parameter change has been marked, update underlying + * storage. + * @set_bytes_per_datum:set number of bytes per datum + * @set_length: set number of datums in buffer + * @enable: called if the buffer is attached to a device and the + * device starts sampling. Calls are balanced with + * @disable. + * @disable: called if the buffer is attached to a device and the + * device stops sampling. Calles are balanced with @enable. + * @release: called when the last reference to the buffer is dropped, + * should free all resources allocated by the buffer. + * @modes: Supported operating modes by this buffer type + * @flags: A bitmask combination of INDIO_BUFFER_FLAG_* + * + * The purpose of this structure is to make the buffer element + * modular as event for a given driver, different usecases may require + * different buffer designs (space efficiency vs speed for example). + * + * It is worth noting that a given buffer implementation may only support a + * small proportion of these functions. The core code 'should' cope fine with + * any of them not existing. + **/ +struct iio_buffer_access_funcs { + int (*store_to)(struct iio_buffer *buffer, const void *data); + int (*read_first_n)(struct iio_buffer *buffer, + size_t n, + char __user *buf); + size_t (*data_available)(struct iio_buffer *buffer); + + int (*request_update)(struct iio_buffer *buffer); + + int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); + int (*set_length)(struct iio_buffer *buffer, int length); + + int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + + void (*release)(struct iio_buffer *buffer); + + unsigned int modes; + unsigned int flags; +}; + +/** + * struct iio_buffer - general buffer structure + * @length: [DEVICE] number of datums in buffer + * @bytes_per_datum: [DEVICE] size of individual datum including timestamp + * @scan_el_attrs: [DRIVER] control of scan elements if that scan mode + * control method is used + * @scan_mask: [INTERN] bitmask used in masking scan mode elements + * @scan_timestamp: [INTERN] does the scan mode include a timestamp + * @access: [DRIVER] buffer access functions associated with the + * implementation. + * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes. + * @buffer_group: [INTERN] attributes of the buffer group + * @scan_el_group: [DRIVER] attribute group for those attributes not + * created from the iio_chan_info array. + * @pollq: [INTERN] wait queue to allow for polling on the buffer. + * @stufftoread: [INTERN] flag to indicate new data. + * @attrs: [INTERN] standard attributes of the buffer + * @demux_list: [INTERN] list of operations required to demux the scan. + * @demux_bounce: [INTERN] buffer for doing gather from incoming scan. + * @buffer_list: [INTERN] entry in the devices list of current buffers. + * @ref: [INTERN] reference count of the buffer. + * @watermark: [INTERN] number of datums to wait for poll/read. + */ +struct iio_buffer { + int length; + int bytes_per_datum; + struct attribute_group *scan_el_attrs; + long *scan_mask; + bool scan_timestamp; + const struct iio_buffer_access_funcs *access; + struct list_head scan_el_dev_attr_list; + struct attribute_group buffer_group; + struct attribute_group scan_el_group; + wait_queue_head_t pollq; + bool stufftoread; + const struct attribute **attrs; + struct list_head demux_list; + void *demux_bounce; + struct list_head buffer_list; + struct kref ref; + unsigned int watermark; +}; + +/** + * iio_update_buffers() - add or remove buffer from active list + * @indio_dev: device to add buffer to + * @insert_buffer: buffer to insert + * @remove_buffer: buffer_to_remove + * + * Note this will tear down the all buffering and build it up again + */ +int iio_update_buffers(struct iio_dev *indio_dev, + struct iio_buffer *insert_buffer, + struct iio_buffer *remove_buffer); + +/** + * iio_buffer_init() - Initialize the buffer structure + * @buffer: buffer to be initialized + **/ +void iio_buffer_init(struct iio_buffer *buffer); + +int iio_scan_mask_query(struct iio_dev *indio_dev, + struct iio_buffer *buffer, int bit); + +/** + * iio_push_to_buffers() - push to a registered buffer. + * @indio_dev: iio_dev structure for device. + * @data: Full scan. + */ +int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data); + +/* * iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers * @indio_dev: iio_dev structure for device. * @data: sample data @@ -38,10 +168,34 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev, return iio_push_to_buffers(indio_dev, data); } -bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, - const unsigned long *mask); +int iio_update_demux(struct iio_dev *indio_dev); -int iio_device_attach_buffer(struct iio_dev *indio_dev, - struct iio_buffer *buffer); +bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, + const unsigned long *mask); + +struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer); +void iio_buffer_put(struct iio_buffer *buffer); + +/** + * iio_device_attach_buffer - Attach a buffer to a IIO device + * @indio_dev: The device the buffer should be attached to + * @buffer: The buffer to attach to the device + * + * This function attaches a buffer to a IIO device. The buffer stays attached to + * the device until the device is freed. The function should only be called at + * most once per device. + */ +static inline void iio_device_attach_buffer(struct iio_dev *indio_dev, + struct iio_buffer *buffer) +{ + indio_dev->buffer = iio_buffer_get(buffer); +} + +#else /* CONFIG_IIO_BUFFER */ + +static inline void iio_buffer_get(struct iio_buffer *buffer) {} +static inline void iio_buffer_put(struct iio_buffer *buffer) {} + +#endif /* CONFIG_IIO_BUFFER */ #endif /* _IIO_BUFFER_GENERIC_H_ */ diff --git a/include/linux/iio/common/ssp_sensors.h b/include/linux/iio/common/ssp_sensors.h index 06c9b4b563..f4d1b0edb4 100644 --- a/include/linux/iio/common/ssp_sensors.h +++ b/include/linux/iio/common/ssp_sensors.h @@ -1,6 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef _SSP_SENSORS_H_ #define _SSP_SENSORS_H_ diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 8bdbaf3f37..497f2b3a5a 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -1,10 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * STMicroelectronics sensors library driver * * Copyright 2012-2013 STMicroelectronics Inc. * * Denis Ciocca + * + * Licensed under the GPL-2. */ #ifndef ST_SENSORS_H @@ -13,22 +14,14 @@ #include #include #include -#include #include #include #include -#include #include -#define LSM9DS0_IMU_DEV_NAME "lsm9ds0" - -/* - * Buffer size max case: 2bytes per channel, 3 channels in total + - * 8bytes timestamp channel (s64) - */ -#define ST_SENSORS_MAX_BUFFER_SIZE (ALIGN(2 * 3, sizeof(s64)) + \ - sizeof(s64)) +#define ST_SENSORS_TX_MAX_LENGTH 2 +#define ST_SENSORS_RX_MAX_LENGTH 6 #define ST_SENSORS_ODR_LIST_MAX 10 #define ST_SENSORS_FULLSCALE_AVL_MAX 10 @@ -47,10 +40,10 @@ #define ST_SENSORS_DEFAULT_STAT_ADDR 0x27 #define ST_SENSORS_MAX_NAME 17 -#define ST_SENSORS_MAX_4WAI 8 +#define ST_SENSORS_MAX_4WAI 7 -#define ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \ - ch2, s, endian, rbits, sbits, addr, ext) \ +#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \ + ch2, s, endian, rbits, sbits, addr) \ { \ .type = device_type, \ .modified = mod, \ @@ -66,14 +59,8 @@ .storagebits = sbits, \ .endianness = endian, \ }, \ - .ext_info = ext, \ } -#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \ - ch2, s, endian, rbits, sbits, addr) \ - ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \ - ch2, s, endian, rbits, sbits, addr, NULL) - #define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \ IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \ st_sensors_sysfs_sampling_frequency_avail) @@ -118,11 +105,6 @@ struct st_sensor_fullscale { struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX]; }; -struct st_sensor_sim { - u8 addr; - u8 value; -}; - /** * struct st_sensor_bdu - ST sensor device block data update * @addr: address of the register. @@ -143,46 +125,65 @@ struct st_sensor_das { u8 mask; }; -/** - * struct st_sensor_int_drdy - ST sensor device drdy line parameters - * @addr: address of INT drdy register. - * @mask: mask to enable drdy line. - * @addr_od: address to enable/disable Open Drain on the INT line. - * @mask_od: mask to enable/disable Open Drain on the INT line. - */ -struct st_sensor_int_drdy { - u8 addr; - u8 mask; - u8 addr_od; - u8 mask_od; -}; - /** * struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt - * struct int1 - data-ready configuration register for INT1 pin. - * struct int2 - data-ready configuration register for INT2 pin. + * @addr: address of the register. + * @mask_int1: mask to enable/disable IRQ on INT1 pin. + * @mask_int2: mask to enable/disable IRQ on INT2 pin. * @addr_ihl: address to enable/disable active low on the INT lines. * @mask_ihl: mask to enable/disable active low on the INT lines. - * struct stat_drdy - status register of DRDY (data ready) interrupt. + * @addr_od: address to enable/disable Open Drain on the INT lines. + * @mask_od: mask to enable/disable Open Drain on the INT lines. + * @addr_stat_drdy: address to read status of DRDY (data ready) interrupt * struct ig1 - represents the Interrupt Generator 1 of sensors. * @en_addr: address of the enable ig1 register. * @en_mask: mask to write the on/off value for enable. */ struct st_sensor_data_ready_irq { - struct st_sensor_int_drdy int1; - struct st_sensor_int_drdy int2; + u8 addr; + u8 mask_int1; + u8 mask_int2; u8 addr_ihl; u8 mask_ihl; - struct { - u8 addr; - u8 mask; - } stat_drdy; + u8 addr_od; + u8 mask_od; + u8 addr_stat_drdy; struct { u8 en_addr; u8 en_mask; } ig1; }; +/** + * struct st_sensor_transfer_buffer - ST sensor device I/O buffer + * @buf_lock: Mutex to protect rx and tx buffers. + * @tx_buf: Buffer used by SPI transfer function to send data to the sensors. + * This buffer is used to avoid DMA not-aligned issue. + * @rx_buf: Buffer used by SPI transfer to receive data from sensors. + * This buffer is used to avoid DMA not-aligned issue. + */ +struct st_sensor_transfer_buffer { + struct mutex buf_lock; + u8 rx_buf[ST_SENSORS_RX_MAX_LENGTH]; + u8 tx_buf[ST_SENSORS_TX_MAX_LENGTH] ____cacheline_aligned; +}; + +/** + * struct st_sensor_transfer_function - ST sensor device I/O function + * @read_byte: Function used to read one byte. + * @write_byte: Function used to write one byte. + * @read_multiple_byte: Function used to read multiple byte. + */ +struct st_sensor_transfer_function { + int (*read_byte) (struct st_sensor_transfer_buffer *tb, + struct device *dev, u8 reg_addr, u8 *res_byte); + int (*write_byte) (struct st_sensor_transfer_buffer *tb, + struct device *dev, u8 reg_addr, u8 data); + int (*read_multiple_byte) (struct st_sensor_transfer_buffer *tb, + struct device *dev, u8 reg_addr, int len, u8 *data, + bool multiread_bit); +}; + /** * struct st_sensor_settings - ST specific sensor settings * @wai: Contents of WhoAmI register. @@ -196,7 +197,6 @@ struct st_sensor_data_ready_irq { * @bdu: Block data update register. * @das: Data Alignment Selection register. * @drdy_irq: Data ready register of the sensor. - * @sim: SPI serial interface mode register of the sensor. * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. * @bootime: samples to discard when sensor passing from power-down to power-up. */ @@ -213,7 +213,6 @@ struct st_sensor_settings { struct st_sensor_bdu bdu; struct st_sensor_das das; struct st_sensor_data_ready_irq drdy_irq; - struct st_sensor_sim sim; bool multi_read_bit; unsigned int bootime; }; @@ -222,47 +221,51 @@ struct st_sensor_settings { * struct st_sensor_data - ST sensor device status * @dev: Pointer to instance of struct device (I2C or SPI). * @trig: The trigger in use by the core driver. - * @mount_matrix: The mounting matrix of the sensor. * @sensor_settings: Pointer to the specific sensor settings in use. * @current_fullscale: Maximum range of measure by the sensor. * @vdd: Pointer to sensor's Vdd power supply * @vdd_io: Pointer to sensor's Vdd-IO power supply - * @regmap: Pointer to specific sensor regmap configuration. * @enabled: Status of the sensor (false->off, true->on). + * @multiread_bit: Use or not particular bit for [I2C/SPI] multiread. + * @buffer_data: Data used by buffer part. * @odr: Output data rate of the sensor [Hz]. * num_data_channels: Number of data channels used in buffer. * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). * @int_pin_open_drain: Set the interrupt/DRDY to open drain. - * @irq: the IRQ number. + * @get_irq_data_ready: Function to get the IRQ used for data ready signal. + * @tf: Transfer function structure used by I/O operations. + * @tb: Transfer buffers and mutex used by I/O operations. * @edge_irq: the IRQ triggers on edges and need special handling. * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. - * @buffer_data: Data used by buffer part. */ struct st_sensor_data { struct device *dev; struct iio_trigger *trig; - struct iio_mount_matrix mount_matrix; struct st_sensor_settings *sensor_settings; struct st_sensor_fullscale_avl *current_fullscale; struct regulator *vdd; struct regulator *vdd_io; - struct regmap *regmap; bool enabled; + bool multiread_bit; + + char *buffer_data; unsigned int odr; unsigned int num_data_channels; u8 drdy_int_pin; bool int_pin_open_drain; - int irq; + + unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev); + + const struct st_sensor_transfer_function *tf; + struct st_sensor_transfer_buffer tb; bool edge_irq; bool hw_irq_trigger; s64 hw_timestamp; - - char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned; }; #ifdef CONFIG_IIO_BUFFER @@ -313,11 +316,8 @@ int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale); int st_sensors_read_info_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *ch, int *val); -int st_sensors_get_settings_index(const char *name, - const struct st_sensor_settings *list, - const int list_length); - -int st_sensors_verify_id(struct iio_dev *indio_dev); +int st_sensors_check_device_support(struct iio_dev *indio_dev, + int num_sensors_list, const struct st_sensor_settings *sensor_settings); ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, struct device_attribute *attr, char *buf); @@ -325,26 +325,4 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, ssize_t st_sensors_sysfs_scale_avail(struct device *dev, struct device_attribute *attr, char *buf); -void st_sensors_dev_name_probe(struct device *dev, char *name, int len); - -/* Accelerometer */ -const struct st_sensor_settings *st_accel_get_settings(const char *name); -int st_accel_common_probe(struct iio_dev *indio_dev); -void st_accel_common_remove(struct iio_dev *indio_dev); - -/* Gyroscope */ -const struct st_sensor_settings *st_gyro_get_settings(const char *name); -int st_gyro_common_probe(struct iio_dev *indio_dev); -void st_gyro_common_remove(struct iio_dev *indio_dev); - -/* Magnetometer */ -const struct st_sensor_settings *st_magn_get_settings(const char *name); -int st_magn_common_probe(struct iio_dev *indio_dev); -void st_magn_common_remove(struct iio_dev *indio_dev); - -/* Pressure */ -const struct st_sensor_settings *st_press_get_settings(const char *name); -int st_press_common_probe(struct iio_dev *indio_dev); -void st_press_common_remove(struct iio_dev *indio_dev); - #endif /* ST_SENSORS_H */ diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h index 5f15cf0103..1796af0933 100644 --- a/include/linux/iio/common/st_sensors_i2c.h +++ b/include/linux/iio/common/st_sensors_i2c.h @@ -1,10 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * STMicroelectronics sensors i2c library driver * * Copyright 2012-2013 STMicroelectronics Inc. * * Denis Ciocca + * + * Licensed under the GPL-2. */ #ifndef ST_SENSORS_I2C_H @@ -12,8 +13,19 @@ #include #include +#include -int st_sensors_i2c_configure(struct iio_dev *indio_dev, - struct i2c_client *client); +void st_sensors_i2c_configure(struct iio_dev *indio_dev, + struct i2c_client *client, struct st_sensor_data *sdata); + +#ifdef CONFIG_OF +void st_sensors_of_i2c_probe(struct i2c_client *client, + const struct of_device_id *match); +#else +static inline void st_sensors_of_i2c_probe(struct i2c_client *client, + const struct of_device_id *match) +{ +} +#endif #endif /* ST_SENSORS_I2C_H */ diff --git a/include/linux/iio/common/st_sensors_spi.h b/include/linux/iio/common/st_sensors_spi.h index 90b25f087f..d964a3563d 100644 --- a/include/linux/iio/common/st_sensors_spi.h +++ b/include/linux/iio/common/st_sensors_spi.h @@ -1,10 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * STMicroelectronics sensors spi library driver * * Copyright 2012-2013 STMicroelectronics Inc. * * Denis Ciocca + * + * Licensed under the GPL-2. */ #ifndef ST_SENSORS_SPI_H @@ -13,7 +14,7 @@ #include #include -int st_sensors_spi_configure(struct iio_dev *indio_dev, - struct spi_device *spi); +void st_sensors_spi_configure(struct iio_dev *indio_dev, + struct spi_device *spi, struct st_sensor_data *sdata); #endif /* ST_SENSORS_SPI_H */ diff --git a/include/linux/iio/configfs.h b/include/linux/iio/configfs.h index 84cab3f47e..93befd67c1 100644 --- a/include/linux/iio/configfs.h +++ b/include/linux/iio/configfs.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O configfs support * * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef __IIO_CONFIGFS #define __IIO_CONFIGFS diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 5fa5957586..9edccfba1f 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O in kernel consumer interface * * Copyright (c) 2011 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef _IIO_INKERN_CONSUMER_H_ #define _IIO_INKERN_CONSUMER_H_ @@ -13,7 +16,6 @@ struct iio_dev; struct iio_chan_spec; struct device; -struct device_node; /** * struct iio_channel - everything needed for a consumer to use a channel @@ -64,6 +66,15 @@ void iio_channel_release(struct iio_channel *chan); */ struct iio_channel *devm_iio_channel_get(struct device *dev, const char *consumer_channel); +/** + * devm_iio_channel_release() - Resource managed version of + * iio_channel_release(). + * @dev: Pointer to consumer device for which resource + * is allocared. + * @chan: The channel to be released. + */ +void devm_iio_channel_release(struct device *dev, struct iio_channel *chan); + /** * iio_channel_get_all() - get all channels associated with a client * @dev: Pointer to consumer device. @@ -99,39 +110,13 @@ void iio_channel_release_all(struct iio_channel *chan); struct iio_channel *devm_iio_channel_get_all(struct device *dev); /** - * of_iio_channel_get_by_name() - get description of all that is needed to access channel. - * @np: Pointer to consumer device tree node - * @consumer_channel: Unique name to identify the channel on the consumer - * side. This typically describes the channels use within - * the consumer. E.g. 'battery_voltage' + * devm_iio_channel_release_all() - Resource managed version of + * iio_channel_release_all(). + * @dev: Pointer to consumer device for which resource + * is allocared. + * @chan: Array channel to be released. */ -#ifdef CONFIG_OF -struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, const char *name); -#else -static inline struct iio_channel * -of_iio_channel_get_by_name(struct device_node *np, const char *name) -{ - return NULL; -} -#endif - -/** - * devm_of_iio_channel_get_by_name() - Resource managed version of of_iio_channel_get_by_name(). - * @dev: Pointer to consumer device. - * @np: Pointer to consumer device tree node - * @consumer_channel: Unique name to identify the channel on the consumer - * side. This typically describes the channels use within - * the consumer. E.g. 'battery_voltage' - * - * Returns a pointer to negative errno if it is not able to get the iio channel - * otherwise returns valid pointer for iio channel. - * - * The allocated iio channel is automatically released when the device is - * unbound. - */ -struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev, - struct device_node *np, - const char *consumer_channel); +void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan); struct iio_cb_buffer; /** @@ -148,17 +133,6 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, int (*cb)(const void *data, void *private), void *private); -/** - * iio_channel_cb_set_buffer_watermark() - set the buffer watermark. - * @cb_buffer: The callback buffer from whom we want the channel - * information. - * @watermark: buffer watermark in bytes. - * - * This function allows to configure the buffer watermark. - */ -int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer, - size_t watermark); - /** * iio_channel_release_all_cb() - release and unregister the callback. * @cb_buffer: The callback buffer that was allocated. @@ -241,47 +215,6 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val); */ int iio_read_channel_processed(struct iio_channel *chan, int *val); -/** - * iio_read_channel_processed_scale() - read and scale a processed value - * @chan: The channel being queried. - * @val: Value read back. - * @scale: Scale factor to apply during the conversion - * - * Returns an error code or 0. - * - * This function will read a processed value from a channel. This will work - * like @iio_read_channel_processed() but also scale with an additional - * scale factor while attempting to minimize any precision loss. - */ -int iio_read_channel_processed_scale(struct iio_channel *chan, int *val, - unsigned int scale); - -/** - * iio_write_channel_attribute() - Write values to the device attribute. - * @chan: The channel being queried. - * @val: Value being written. - * @val2: Value being written.val2 use depends on attribute type. - * @attribute: info attribute to be read. - * - * Returns an error code or 0. - */ -int iio_write_channel_attribute(struct iio_channel *chan, int val, - int val2, enum iio_chan_info_enum attribute); - -/** - * iio_read_channel_attribute() - Read values from the device attribute. - * @chan: The channel being queried. - * @val: Value being written. - * @val2: Value being written.Val2 use depends on attribute type. - * @attribute: info attribute to be written. - * - * Returns an error code if failed. Else returns a description of what is in val - * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val - * + val2/1e6 - */ -int iio_read_channel_attribute(struct iio_channel *chan, int *val, - int *val2, enum iio_chan_info_enum attribute); - /** * iio_write_channel_raw() - write to a given channel * @chan: The channel being queried. @@ -292,48 +225,6 @@ int iio_read_channel_attribute(struct iio_channel *chan, int *val, */ int iio_write_channel_raw(struct iio_channel *chan, int val); -/** - * iio_read_max_channel_raw() - read maximum available raw value from a given - * channel, i.e. the maximum possible value. - * @chan: The channel being queried. - * @val: Value read back. - * - * Note raw reads from iio channels are in adc counts and hence - * scale will need to be applied if standard units are required. - */ -int iio_read_max_channel_raw(struct iio_channel *chan, int *val); - -/** - * iio_read_avail_channel_raw() - read available raw values from a given channel - * @chan: The channel being queried. - * @vals: Available values read back. - * @length: Number of entries in vals. - * - * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST. - * - * For ranges, three vals are always returned; min, step and max. - * For lists, all the possible values are enumerated. - * - * Note raw available values from iio channels are in adc counts and - * hence scale will need to be applied if standard units are required. - */ -int iio_read_avail_channel_raw(struct iio_channel *chan, - const int **vals, int *length); - -/** - * iio_read_avail_channel_attribute() - read available channel attribute values - * @chan: The channel being queried. - * @vals: Available values read back. - * @type: Type of values read back. - * @length: Number of entries in vals. - * @attribute: info attribute to be read back. - * - * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST. - */ -int iio_read_avail_channel_attribute(struct iio_channel *chan, - const int **vals, int *type, int *length, - enum iio_chan_info_enum attribute); - /** * iio_get_channel_type() - get the type of a channel * @channel: The channel being queried. @@ -344,19 +235,6 @@ int iio_read_avail_channel_attribute(struct iio_channel *chan, int iio_get_channel_type(struct iio_channel *channel, enum iio_chan_type *type); -/** - * iio_read_channel_offset() - read the offset value for a channel - * @chan: The channel being queried. - * @val: First part of value read back. - * @val2: Second part of value read back. - * - * Note returns a description of what is in val and val2, such - * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val - * + val2/1e6 - */ -int iio_read_channel_offset(struct iio_channel *chan, int *val, - int *val2); - /** * iio_read_channel_scale() - read the scale value for a channel * @chan: The channel being queried. @@ -393,41 +271,4 @@ int iio_read_channel_scale(struct iio_channel *chan, int *val, int iio_convert_raw_to_processed(struct iio_channel *chan, int raw, int *processed, unsigned int scale); -/** - * iio_get_channel_ext_info_count() - get number of ext_info attributes - * connected to the channel. - * @chan: The channel being queried - * - * Returns the number of ext_info attributes - */ -unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan); - -/** - * iio_read_channel_ext_info() - read ext_info attribute from a given channel - * @chan: The channel being queried. - * @attr: The ext_info attribute to read. - * @buf: Where to store the attribute value. Assumed to hold - * at least PAGE_SIZE bytes. - * - * Returns the number of bytes written to buf (perhaps w/o zero termination; - * it need not even be a string), or an error code. - */ -ssize_t iio_read_channel_ext_info(struct iio_channel *chan, - const char *attr, char *buf); - -/** - * iio_write_channel_ext_info() - write ext_info attribute from a given channel - * @chan: The channel being queried. - * @attr: The ext_info attribute to read. - * @buf: The new attribute value. Strings needs to be zero- - * terminated, but the terminator should not be included - * in the below len. - * @len: The size of the new attribute value. - * - * Returns the number of accepted bytes, which should be the same as len. - * An error code can also be returned. - */ -ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr, - const char *buf, size_t len); - #endif diff --git a/include/linux/iio/dac/ad5421.h b/include/linux/iio/dac/ad5421.h index d8ee9a7f8a..8fd8f057a8 100644 --- a/include/linux/iio/dac/ad5421.h +++ b/include/linux/iio/dac/ad5421.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IIO_DAC_AD5421_H__ #define __IIO_DAC_AD5421_H__ diff --git a/include/linux/iio/dac/ad5504.h b/include/linux/iio/dac/ad5504.h index 9f23c90486..43895376a9 100644 --- a/include/linux/iio/dac/ad5504.h +++ b/include/linux/iio/dac/ad5504.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AD5504 SPI DAC driver * * Copyright 2011 Analog Devices Inc. + * + * Licensed under the GPL-2. */ #ifndef SPI_AD5504_H_ diff --git a/include/linux/iio/dac/ad5791.h b/include/linux/iio/dac/ad5791.h index 02966553f7..45ee281c66 100644 --- a/include/linux/iio/dac/ad5791.h +++ b/include/linux/iio/dac/ad5791.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AD5791 SPI DAC driver * * Copyright 2011 Analog Devices Inc. + * + * Licensed under the GPL-2. */ #ifndef SPI_AD5791_H_ diff --git a/include/linux/iio/dac/max517.h b/include/linux/iio/dac/max517.h index 4923645a18..7668716cd7 100644 --- a/include/linux/iio/dac/max517.h +++ b/include/linux/iio/dac/max517.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * MAX517 DAC driver * * Copyright 2011 Roland Stigge + * + * Licensed under the GPL-2 or later. */ #ifndef IIO_DAC_MAX517_H_ #define IIO_DAC_MAX517_H_ diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h index 1f7e53c506..91530e6611 100644 --- a/include/linux/iio/dac/mcp4725.h +++ b/include/linux/iio/dac/mcp4725.h @@ -1,25 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * MCP4725 DAC driver * * Copyright (C) 2012 Peter Meerwald + * + * Licensed under the GPL-2 or later. */ #ifndef IIO_DAC_MCP4725_H_ #define IIO_DAC_MCP4725_H_ -/** - * struct mcp4725_platform_data - MCP4725/6 DAC specific data. - * @use_vref: Whether an external reference voltage on Vref pin should be used. - * Additional vref-supply must be specified when used. - * @vref_buffered: Controls buffering of the external reference voltage. - * - * Vref related settings are available only on MCP4756. See - * Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml for more information. - */ struct mcp4725_platform_data { - bool use_vref; - bool vref_buffered; + u16 vref_mv; }; #endif /* IIO_DAC_MCP4725_H_ */ diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h index 36de60a5da..7dfb10ee26 100644 --- a/include/linux/iio/driver.h +++ b/include/linux/iio/driver.h @@ -1,14 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O in kernel access map interface. * * Copyright (c) 2011 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef _IIO_INKERN_H_ #define _IIO_INKERN_H_ -struct iio_dev; struct iio_map; /** diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h index a4558c45a5..8ad87d1c53 100644 --- a/include/linux/iio/events.h +++ b/include/linux/iio/events.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O - event passing to userspace * * Copyright (c) 2008-2011 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef _IIO_EVENTS_H_ #define _IIO_EVENTS_H_ diff --git a/include/linux/iio/frequency/ad9523.h b/include/linux/iio/frequency/ad9523.h index ff22a0ac15..12ce3ee427 100644 --- a/include/linux/iio/frequency/ad9523.h +++ b/include/linux/iio/frequency/ad9523.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AD9523 SPI Low Jitter Clock Generator * * Copyright 2012 Analog Devices Inc. + * + * Licensed under the GPL-2. */ #ifndef IIO_FREQUENCY_AD9523_H_ @@ -128,8 +129,8 @@ enum cpole1_capacitor { * @pll2_ndiv_b_cnt: PLL2 Feedback N-divider, B Counter, range 0..63. * @pll2_freq_doubler_en: PLL2 frequency doubler enable. * @pll2_r2_div: PLL2 R2 divider, range 0..31. - * @pll2_vco_div_m1: VCO1 divider, range 3..5. - * @pll2_vco_div_m2: VCO2 divider, range 3..5. + * @pll2_vco_diff_m1: VCO1 divider, range 3..5. + * @pll2_vco_diff_m2: VCO2 divider, range 3..5. * @rpole2: PLL2 loop filter Rpole resistor value. * @rzero: PLL2 loop filter Rzero resistor value. * @cpole1: PLL2 loop filter Cpole capacitor value. @@ -175,8 +176,8 @@ struct ad9523_platform_data { unsigned char pll2_ndiv_b_cnt; bool pll2_freq_doubler_en; unsigned char pll2_r2_div; - unsigned char pll2_vco_div_m1; /* 3..5 */ - unsigned char pll2_vco_div_m2; /* 3..5 */ + unsigned char pll2_vco_diff_m1; /* 3..5 */ + unsigned char pll2_vco_diff_m2; /* 3..5 */ /* Loop Filter PLL2 */ enum rpole2_resistor rpole2; diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h index de45cf2ee1..ffd8c8f909 100644 --- a/include/linux/iio/frequency/adf4350.h +++ b/include/linux/iio/frequency/adf4350.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * ADF4350/ADF4351 SPI PLL driver * * Copyright 2012-2013 Analog Devices Inc. + * + * Licensed under the GPL-2. */ #ifndef IIO_PLL_ADF4350_H_ @@ -103,6 +104,9 @@ * @r2_user_settings: User defined settings for ADF4350/1 REGISTER_2. * @r3_user_settings: User defined settings for ADF4350/1 REGISTER_3. * @r4_user_settings: User defined settings for ADF4350/1 REGISTER_4. + * @gpio_lock_detect: Optional, if set with a valid GPIO number, + * pll lock state is tested upon read. + * If not used - set to -1. */ struct adf4350_platform_data { @@ -118,6 +122,7 @@ struct adf4350_platform_data { unsigned r2_user_settings; unsigned r3_user_settings; unsigned r4_user_settings; + int gpio_lock_detect; }; #endif /* IIO_PLL_ADF4350_H_ */ diff --git a/include/linux/iio/gyro/itg3200.h b/include/linux/iio/gyro/itg3200.h index a602fe7b84..2a820850f2 100644 --- a/include/linux/iio/gyro/itg3200.h +++ b/include/linux/iio/gyro/itg3200.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * itg3200.h -- support InvenSense ITG3200 * Digital 3-Axis Gyroscope driver @@ -6,6 +5,10 @@ * Copyright (c) 2011 Christian Strobel * Copyright (c) 2011 Manuel Stahl * Copyright (c) 2012 Thorsten Nowak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef I2C_ITG3200_H_ @@ -101,7 +104,6 @@ struct itg3200 { struct i2c_client *i2c; struct iio_trigger *trig; - struct iio_mount_matrix orientation; }; enum ITG3200_SCAN_INDEX { diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 324561b7a5..b4a0679e4a 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O core * * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef _INDUSTRIAL_IO_H_ #define _INDUSTRIAL_IO_H_ @@ -17,6 +20,34 @@ * Currently assumes nano seconds. */ +enum iio_chan_info_enum { + IIO_CHAN_INFO_RAW = 0, + IIO_CHAN_INFO_PROCESSED, + IIO_CHAN_INFO_SCALE, + IIO_CHAN_INFO_OFFSET, + IIO_CHAN_INFO_CALIBSCALE, + IIO_CHAN_INFO_CALIBBIAS, + IIO_CHAN_INFO_PEAK, + IIO_CHAN_INFO_PEAK_SCALE, + IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW, + IIO_CHAN_INFO_AVERAGE_RAW, + IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY, + IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY, + IIO_CHAN_INFO_SAMP_FREQ, + IIO_CHAN_INFO_FREQUENCY, + IIO_CHAN_INFO_PHASE, + IIO_CHAN_INFO_HARDWAREGAIN, + IIO_CHAN_INFO_HYSTERESIS, + IIO_CHAN_INFO_INT_TIME, + IIO_CHAN_INFO_ENABLE, + IIO_CHAN_INFO_CALIBHEIGHT, + IIO_CHAN_INFO_CALIBWEIGHT, + IIO_CHAN_INFO_DEBOUNCE_COUNT, + IIO_CHAN_INFO_DEBOUNCE_TIME, + IIO_CHAN_INFO_CALIBEMISSIVITY, + IIO_CHAN_INFO_OVERSAMPLING_RATIO, +}; + enum iio_shared_by { IIO_SEPARATE, IIO_SHARED_BY_TYPE, @@ -127,7 +158,8 @@ struct iio_mount_matrix { ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, char *buf); -int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix); +int of_iio_read_mount_matrix(const struct device *dev, const char *propname, + struct iio_mount_matrix *matrix); typedef const struct iio_mount_matrix * (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev, @@ -179,36 +211,26 @@ struct iio_event_spec { * @address: Driver specific identifier. * @scan_index: Monotonic index to give ordering in scans when read * from a buffer. - * @scan_type: struct describing the scan type - * @scan_type.sign: 's' or 'u' to specify signed or unsigned - * @scan_type.realbits: Number of valid bits of data - * @scan_type.storagebits: Realbits + padding - * @scan_type.shift: Shift right by this before masking out - * realbits. - * @scan_type.repeat: Number of times real/storage bits repeats. - * When the repeat element is more than 1, then - * the type element in sysfs will show a repeat - * value. Otherwise, the number of repetitions - * is omitted. - * @scan_type.endianness: little or big endian + * @scan_type: sign: 's' or 'u' to specify signed or unsigned + * realbits: Number of valid bits of data + * storagebits: Realbits + padding + * shift: Shift right by this before masking out + * realbits. + * repeat: Number of times real/storage bits + * repeats. When the repeat element is + * more than 1, then the type element in + * sysfs will show a repeat value. + * Otherwise, the number of repetitions is + * omitted. + * endianness: little or big endian * @info_mask_separate: What information is to be exported that is specific to * this channel. - * @info_mask_separate_available: What availability information is to be - * exported that is specific to this channel. * @info_mask_shared_by_type: What information is to be exported that is shared * by all channels of the same type. - * @info_mask_shared_by_type_available: What availability information is to be - * exported that is shared by all channels of the same - * type. * @info_mask_shared_by_dir: What information is to be exported that is shared * by all channels of the same direction. - * @info_mask_shared_by_dir_available: What availability information is to be - * exported that is shared by all channels of the same - * direction. * @info_mask_shared_by_all: What information is to be exported that is shared * by all channels. - * @info_mask_shared_by_all_available: What availability information is to be - * exported that is shared by all channels. * @event_spec: Array of events which should be registered for this * channel. * @num_event_specs: Size of the event_spec array. @@ -247,13 +269,9 @@ struct iio_chan_spec { enum iio_endian endianness; } scan_type; long info_mask_separate; - long info_mask_separate_available; long info_mask_shared_by_type; - long info_mask_shared_by_type_available; long info_mask_shared_by_dir; - long info_mask_shared_by_dir_available; long info_mask_shared_by_all; - long info_mask_shared_by_all_available; const struct iio_event_spec *event_spec; unsigned int num_event_specs; const struct iio_chan_spec_ext_info *ext_info; @@ -283,23 +301,6 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, (chan->info_mask_shared_by_all & BIT(type)); } -/** - * iio_channel_has_available() - Checks if a channel has an available attribute - * @chan: The channel to be queried - * @type: Type of the available attribute to be checked - * - * Returns true if the channel supports reporting available values for the - * given attribute type, false otherwise. - */ -static inline bool iio_channel_has_available(const struct iio_chan_spec *chan, - enum iio_chan_info_enum type) -{ - return (chan->info_mask_separate_available & BIT(type)) | - (chan->info_mask_shared_by_type_available & BIT(type)) | - (chan->info_mask_shared_by_dir_available & BIT(type)) | - (chan->info_mask_shared_by_all_available & BIT(type)); -} - #define IIO_CHAN_SOFT_TIMESTAMP(_si) { \ .type = IIO_TIMESTAMP, \ .channel = -1, \ @@ -320,22 +321,19 @@ unsigned int iio_get_time_res(const struct iio_dev *indio_dev); #define INDIO_BUFFER_SOFTWARE 0x04 #define INDIO_BUFFER_HARDWARE 0x08 #define INDIO_EVENT_TRIGGERED 0x10 -#define INDIO_HARDWARE_TRIGGERED 0x20 #define INDIO_ALL_BUFFER_MODES \ (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE) -#define INDIO_ALL_TRIGGERED_MODES \ - (INDIO_BUFFER_TRIGGERED \ - | INDIO_EVENT_TRIGGERED \ - | INDIO_HARDWARE_TRIGGERED) - #define INDIO_MAX_RAW_ELEMENTS 4 struct iio_trigger; /* forward declaration */ +struct iio_dev; /** * struct iio_info - constant information about device + * @driver_module: module structure used to ensure correct + * ownership of chrdevs etc * @event_attrs: event control attributes * @attrs: general purpose device attributes * @read_raw: function to request a value from the device. @@ -351,18 +349,8 @@ struct iio_trigger; /* forward declaration */ * max_len specifies maximum number of elements * vals pointer can contain. val_len is used to return * length of valid elements in vals. - * @read_avail: function to return the available values from the device. - * mask specifies which value. Note 0 means the available - * values for the channel in question. Return value - * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is - * returned in vals. The type of the vals are returned in - * type and the number of vals is returned in length. For - * ranges, there are always three vals returned; min, step - * and max. For lists, all possible values are enumerated. * @write_raw: function to write a value to the device. * Parameters are the same as for read_raw. - * @read_label: function to request label name for a specified label, - * for better channel identification. * @write_raw_get_fmt: callback function to query the expected * format/precision. If not set by the driver, write_raw * returns IIO_VAL_INT_PLUS_MICRO. @@ -392,7 +380,8 @@ struct iio_trigger; /* forward declaration */ * were flushed and there was an error. **/ struct iio_info { - const struct attribute_group *event_attrs; + struct module *driver_module; + struct attribute_group *event_attrs; const struct attribute_group *attrs; int (*read_raw)(struct iio_dev *indio_dev, @@ -408,23 +397,12 @@ struct iio_info { int *val_len, long mask); - int (*read_avail)(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - const int **vals, - int *type, - int *length, - long mask); - int (*write_raw)(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask); - int (*read_label)(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - char *label); - int (*write_raw_get_fmt)(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, long mask); @@ -487,38 +465,56 @@ struct iio_buffer_setup_ops { /** * struct iio_dev - industrial I/O device + * @id: [INTERN] used to identify device internally * @modes: [DRIVER] operating modes supported by device * @currentmode: [DRIVER] current operating mode * @dev: [DRIVER] device structure, should be assigned a parent * and owner + * @event_interface: [INTERN] event chrdevs associated with interrupt lines * @buffer: [DRIVER] any buffer present + * @buffer_list: [INTERN] list of all buffers currently attached * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux - * @mlock: [INTERN] lock used to prevent simultaneous device state + * @mlock: [DRIVER] lock used to prevent simultaneous device state * changes * @available_scan_masks: [DRIVER] optional array of allowed bitmasks * @masklength: [INTERN] the length of the mask established from * channels * @active_scan_mask: [INTERN] union of all scan masks requested by buffers * @scan_timestamp: [INTERN] set if any buffers have requested timestamp + * @scan_index_timestamp:[INTERN] cache of the index to the timestamp * @trig: [INTERN] current device trigger (buffer modes) + * @trig_readonly [INTERN] mark the current trigger immutable * @pollfunc: [DRIVER] function run on trigger being received * @pollfunc_event: [DRIVER] function run on events trigger being received * @channels: [DRIVER] channel specification structure table * @num_channels: [DRIVER] number of channels specified in @channels. + * @channel_attr_list: [INTERN] keep track of automatically created channel + * attributes + * @chan_attr_group: [INTERN] group for all attrs in base directory * @name: [DRIVER] name of the device. - * @label: [DRIVER] unique name to identify which device this is * @info: [DRIVER] callbacks and constant info from driver + * @clock_id: [INTERN] timestamping clock posix identifier + * @info_exist_lock: [INTERN] lock to prevent use during removal * @setup_ops: [DRIVER] callbacks to call before and after buffer * enable/disable - * @priv: [DRIVER] reference to driver's private information - * **MUST** be accessed **ONLY** via iio_priv() helper + * @chrdev: [INTERN] associated character device + * @groups: [INTERN] attribute groups + * @groupcounter: [INTERN] index of next attribute group + * @flags: [INTERN] file ops related flags including busy flag. + * @debugfs_dentry: [INTERN] device specific debugfs dentry. + * @cached_reg_addr: [INTERN] cached register address for debugfs reads. */ struct iio_dev { + int id; + int modes; int currentmode; struct device dev; + struct iio_event_interface *event_interface; + struct iio_buffer *buffer; + struct list_head buffer_list; int scan_bytes; struct mutex mlock; @@ -526,50 +522,40 @@ struct iio_dev { unsigned masklength; const unsigned long *active_scan_mask; bool scan_timestamp; + unsigned scan_index_timestamp; struct iio_trigger *trig; + bool trig_readonly; struct iio_poll_func *pollfunc; struct iio_poll_func *pollfunc_event; struct iio_chan_spec const *channels; int num_channels; + struct list_head channel_attr_list; + struct attribute_group chan_attr_group; const char *name; - const char *label; const struct iio_info *info; + clockid_t clock_id; + struct mutex info_exist_lock; const struct iio_buffer_setup_ops *setup_ops; + struct cdev chrdev; +#define IIO_MAX_GROUPS 6 + const struct attribute_group *groups[IIO_MAX_GROUPS + 1]; + int groupcounter; - void *priv; + unsigned long flags; +#if defined(CONFIG_DEBUG_FS) + struct dentry *debugfs_dentry; + unsigned cached_reg_addr; +#endif }; -int iio_device_id(struct iio_dev *indio_dev); - const struct iio_chan_spec *iio_find_channel_from_si(struct iio_dev *indio_dev, int si); -/** - * iio_device_register() - register a device with the IIO subsystem - * @indio_dev: Device structure filled by the device driver - **/ -#define iio_device_register(indio_dev) \ - __iio_device_register((indio_dev), THIS_MODULE) -int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod); +int iio_device_register(struct iio_dev *indio_dev); void iio_device_unregister(struct iio_dev *indio_dev); -/** - * devm_iio_device_register - Resource-managed iio_device_register() - * @dev: Device to allocate iio_dev for - * @indio_dev: Device structure filled by the device driver - * - * Managed iio_device_register. The IIO device registered with this - * function is automatically unregistered on driver detach. This function - * calls iio_device_register() internally. Refer to that function for more - * information. - * - * RETURNS: - * 0 on success, negative error number on failure. - */ -#define devm_iio_device_register(dev, indio_dev) \ - __devm_iio_device_register((dev), (indio_dev), THIS_MODULE) -int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, - struct module *this_mod); +int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev); +void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); int iio_device_claim_direct_mode(struct iio_dev *indio_dev); void iio_device_release_direct_mode(struct iio_dev *indio_dev); @@ -586,8 +572,14 @@ static inline void iio_device_put(struct iio_dev *indio_dev) put_device(&indio_dev->dev); } -clockid_t iio_device_get_clock(const struct iio_dev *indio_dev); -int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id); +/** + * iio_device_get_clock() - Retrieve current timestamping clock for the device + * @indio_dev: IIO device structure containing the device + */ +static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) +{ + return indio_dev->clock_id; +} /** * dev_to_iio_dev() - Get IIO device struct from a device struct @@ -611,26 +603,6 @@ static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev) return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL; } -/** - * iio_device_set_parent() - assign parent device to the IIO device object - * @indio_dev: IIO device structure - * @parent: reference to parent device object - * - * This utility must be called between IIO device allocation - * (via devm_iio_device_alloc()) & IIO device registration - * (via iio_device_register() and devm_iio_device_register())). - * By default, the device allocation will also assign a parent device to - * the IIO device object. In cases where devm_iio_device_alloc() is used, - * sometimes the parent device must be different than the device used to - * manage the allocation. - * In that case, this helper should be used to change the parent, hence the - * requirement to call this between allocation & registration. - **/ -static inline void iio_device_set_parent(struct iio_dev *indio_dev, - struct device *parent) -{ - indio_dev->dev.parent = parent; -} /** * iio_device_set_drvdata() - Set device driver data @@ -651,26 +623,34 @@ static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data) * * Returns the data previously set with iio_device_set_drvdata() */ -static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev) +static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev) { return dev_get_drvdata(&indio_dev->dev); } /* Can we make this smaller? */ #define IIO_ALIGN L1_CACHE_BYTES -struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv); +struct iio_dev *iio_device_alloc(int sizeof_priv); -/* The information at the returned address is guaranteed to be cacheline aligned */ static inline void *iio_priv(const struct iio_dev *indio_dev) { - return indio_dev->priv; + return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN); +} + +static inline struct iio_dev *iio_priv_to_dev(void *priv) +{ + return (struct iio_dev *)((char *)priv - + ALIGN(sizeof(struct iio_dev), IIO_ALIGN)); } void iio_device_free(struct iio_dev *indio_dev); -struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv); -__printf(2, 3) -struct iio_trigger *devm_iio_trigger_alloc(struct device *parent, - const char *fmt, ...); +int devm_iio_device_match(struct device *dev, void *res, void *data); +struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv); +void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev); +struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, + const char *fmt, ...); +void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig); + /** * iio_buffer_enabled() - helper function to test if the buffer is enabled * @indio_dev: IIO device structure for device @@ -687,7 +667,10 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev) * @indio_dev: IIO device structure for device **/ #if defined(CONFIG_DEBUG_FS) -struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev); +static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) +{ + return indio_dev->debugfs_dentry; +} #else static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) { diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index cf49997d59..360da7d18a 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Common library for ADIS16XXX devices * * Copyright 2012 Analog Devices Inc. * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. */ #ifndef __IIO_ADIS_H__ @@ -21,58 +22,27 @@ struct adis; -/** - * struct adis_timeouts - ADIS chip variant timeouts - * @reset_ms - Wait time after rst pin goes inactive - * @sw_reset_ms - Wait time after sw reset command - * @self_test_ms - Wait time after self test command - */ -struct adis_timeout { - u16 reset_ms; - u16 sw_reset_ms; - u16 self_test_ms; -}; /** * struct adis_data - ADIS chip variant specific data * @read_delay: SPI delay for read operations in us * @write_delay: SPI delay for write operations in us - * @cs_change_delay: SPI delay between CS changes in us * @glob_cmd_reg: Register address of the GLOB_CMD register * @msc_ctrl_reg: Register address of the MSC_CTRL register * @diag_stat_reg: Register address of the DIAG_STAT register - * @prod_id_reg: Register address of the PROD_ID register - * @prod_id: Product ID code that should be expected when reading @prod_id_reg - * @self_test_mask: Bitmask of supported self-test operations - * @self_test_reg: Register address to request self test command - * @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg * @status_error_msgs: Array of error messgaes - * @status_error_mask: Bitmask of errors supported by the device - * @timeouts: Chip specific delays - * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable - * @has_paging: True if ADIS device has paged registers - * @burst_reg_cmd: Register command that triggers burst - * @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined, - * this should be the minimum size supported by the device. - * @burst_max_len: Holds the maximum burst size when the device supports - * more than one burst mode with different sizes - * @burst_max_speed_hz: Maximum spi speed that can be used in burst mode + * @status_error_mask: */ struct adis_data { unsigned int read_delay; unsigned int write_delay; - unsigned int cs_change_delay; unsigned int glob_cmd_reg; unsigned int msc_ctrl_reg; unsigned int diag_stat_reg; - unsigned int prod_id_reg; - - unsigned int prod_id; unsigned int self_test_mask; - unsigned int self_test_reg; bool self_test_no_autoclear; - const struct adis_timeout *timeouts; + unsigned int startup_delay; const char * const *status_error_msgs; unsigned int status_error_mask; @@ -80,52 +50,18 @@ struct adis_data { int (*enable_irq)(struct adis *adis, bool enable); bool has_paging; - - unsigned int burst_reg_cmd; - unsigned int burst_len; - unsigned int burst_max_len; - unsigned int burst_max_speed_hz; }; -/** - * struct adis - ADIS device instance data - * @spi: Reference to SPI device which owns this ADIS IIO device - * @trig: IIO trigger object data - * @data: ADIS chip variant specific data - * @burst: ADIS burst transfer information - * @burst_extra_len: Burst extra length. Should only be used by devices that can - * dynamically change their burst mode length. - * @state_lock: Lock used by the device to protect state - * @msg: SPI message object - * @xfer: SPI transfer objects to be used for a @msg - * @current_page: Some ADIS devices have registers, this selects current page - * @irq_flag: IRQ handling flags as passed to request_irq() - * @buffer: Data buffer for information read from the device - * @tx: DMA safe TX buffer for SPI transfers - * @rx: DMA safe RX buffer for SPI transfers - */ struct adis { struct spi_device *spi; struct iio_trigger *trig; const struct adis_data *data; - unsigned int burst_extra_len; - /** - * The state_lock is meant to be used during operations that require - * a sequence of SPI R/W in order to protect the SPI transfer - * information (fields 'xfer', 'msg' & 'current_page') between - * potential concurrent accesses. - * This lock is used by all "adis_{functions}" that have to read/write - * registers. These functions also have unlocked variants - * (see "__adis_{functions}"), which don't hold this lock. - * This allows users of the ADIS library to group SPI R/W into - * the drivers, but they also must manage this lock themselves. - */ - struct mutex state_lock; + + struct mutex txrx_lock; struct spi_message msg; struct spi_transfer *xfer; unsigned int current_page; - unsigned long irq_flag; void *buffer; uint8_t tx[10] ____cacheline_aligned; @@ -134,142 +70,13 @@ struct adis { int adis_init(struct adis *adis, struct iio_dev *indio_dev, struct spi_device *spi, const struct adis_data *data); -int __adis_reset(struct adis *adis); +int adis_reset(struct adis *adis); -/** - * adis_reset() - Reset the device - * @adis: The adis device - * - * Returns 0 on success, a negative error code otherwise - */ -static inline int adis_reset(struct adis *adis) -{ - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_reset(adis); - mutex_unlock(&adis->state_lock); - - return ret; -} - -int __adis_write_reg(struct adis *adis, unsigned int reg, +int adis_write_reg(struct adis *adis, unsigned int reg, unsigned int val, unsigned int size); -int __adis_read_reg(struct adis *adis, unsigned int reg, +int adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val, unsigned int size); -/** - * __adis_write_reg_8() - Write single byte to a register (unlocked) - * @adis: The adis device - * @reg: The address of the register to be written - * @value: The value to write - */ -static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg, - uint8_t val) -{ - return __adis_write_reg(adis, reg, val, 1); -} - -/** - * __adis_write_reg_16() - Write 2 bytes to a pair of registers (unlocked) - * @adis: The adis device - * @reg: The address of the lower of the two registers - * @value: Value to be written - */ -static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg, - uint16_t val) -{ - return __adis_write_reg(adis, reg, val, 2); -} - -/** - * __adis_write_reg_32() - write 4 bytes to four registers (unlocked) - * @adis: The adis device - * @reg: The address of the lower of the four register - * @value: Value to be written - */ -static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg, - uint32_t val) -{ - return __adis_write_reg(adis, reg, val, 4); -} - -/** - * __adis_read_reg_16() - read 2 bytes from a 16-bit register (unlocked) - * @adis: The adis device - * @reg: The address of the lower of the two registers - * @val: The value read back from the device - */ -static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg, - uint16_t *val) -{ - unsigned int tmp; - int ret; - - ret = __adis_read_reg(adis, reg, &tmp, 2); - if (ret == 0) - *val = tmp; - - return ret; -} - -/** - * __adis_read_reg_32() - read 4 bytes from a 32-bit register (unlocked) - * @adis: The adis device - * @reg: The address of the lower of the two registers - * @val: The value read back from the device - */ -static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, - uint32_t *val) -{ - unsigned int tmp; - int ret; - - ret = __adis_read_reg(adis, reg, &tmp, 4); - if (ret == 0) - *val = tmp; - - return ret; -} - -/** - * adis_write_reg() - write N bytes to register - * @adis: The adis device - * @reg: The address of the lower of the two registers - * @value: The value to write to device (up to 4 bytes) - * @size: The size of the @value (in bytes) - */ -static inline int adis_write_reg(struct adis *adis, unsigned int reg, - unsigned int val, unsigned int size) -{ - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_write_reg(adis, reg, val, size); - mutex_unlock(&adis->state_lock); - - return ret; -} - -/** - * adis_read_reg() - read N bytes from register - * @adis: The adis device - * @reg: The address of the lower of the two registers - * @val: The value read back from the device - * @size: The size of the @val buffer - */ -static int adis_read_reg(struct adis *adis, unsigned int reg, - unsigned int *val, unsigned int size) -{ - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_read_reg(adis, reg, val, size); - mutex_unlock(&adis->state_lock); - - return ret; -} - /** * adis_write_reg_8() - Write single byte to a register * @adis: The adis device @@ -319,8 +126,7 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, int ret; ret = adis_read_reg(adis, reg, &tmp, 2); - if (ret == 0) - *val = tmp; + *val = tmp; return ret; } @@ -338,107 +144,15 @@ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg, int ret; ret = adis_read_reg(adis, reg, &tmp, 4); - if (ret == 0) - *val = tmp; + *val = tmp; return ret; } -int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask, - const u32 val, u8 size); -/** - * adis_update_bits_base() - ADIS Update bits function - Locked version - * @adis: The adis device - * @reg: The address of the lower of the two registers - * @mask: Bitmask to change - * @val: Value to be written - * @size: Size of the register to update - * - * Updates the desired bits of @reg in accordance with @mask and @val. - */ -static inline int adis_update_bits_base(struct adis *adis, unsigned int reg, - const u32 mask, const u32 val, u8 size) -{ - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_update_bits_base(adis, reg, mask, val, size); - mutex_unlock(&adis->state_lock); - return ret; -} - -/** - * adis_update_bits() - Wrapper macro for adis_update_bits_base - Locked version - * @adis: The adis device - * @reg: The address of the lower of the two registers - * @mask: Bitmask to change - * @val: Value to be written - * - * This macro evaluates the sizeof of @val at compile time and calls - * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for - * @val can lead to undesired behavior if the register to update is 16bit. - */ -#define adis_update_bits(adis, reg, mask, val) ({ \ - BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \ - __builtin_choose_expr(sizeof(val) == 4, \ - adis_update_bits_base(adis, reg, mask, val, 4), \ - adis_update_bits_base(adis, reg, mask, val, 2)); \ -}) - -/** - * adis_update_bits() - Wrapper macro for adis_update_bits_base - * @adis: The adis device - * @reg: The address of the lower of the two registers - * @mask: Bitmask to change - * @val: Value to be written - * - * This macro evaluates the sizeof of @val at compile time and calls - * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for - * @val can lead to undesired behavior if the register to update is 16bit. - */ -#define __adis_update_bits(adis, reg, mask, val) ({ \ - BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \ - __builtin_choose_expr(sizeof(val) == 4, \ - __adis_update_bits_base(adis, reg, mask, val, 4), \ - __adis_update_bits_base(adis, reg, mask, val, 2)); \ -}) - int adis_enable_irq(struct adis *adis, bool enable); -int __adis_check_status(struct adis *adis); -int __adis_initial_startup(struct adis *adis); +int adis_check_status(struct adis *adis); -static inline int adis_check_status(struct adis *adis) -{ - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_check_status(adis); - mutex_unlock(&adis->state_lock); - - return ret; -} - -/* locked version of __adis_initial_startup() */ -static inline int adis_initial_startup(struct adis *adis) -{ - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_initial_startup(adis); - mutex_unlock(&adis->state_lock); - - return ret; -} - -static inline void adis_dev_lock(struct adis *adis) -{ - mutex_lock(&adis->state_lock); -} - -static inline void adis_dev_unlock(struct adis *adis) -{ - mutex_unlock(&adis->state_lock); -} +int adis_initial_startup(struct adis *adis); int adis_single_conversion(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int error_mask, @@ -518,30 +232,40 @@ int adis_single_conversion(struct iio_dev *indio_dev, #ifdef CONFIG_IIO_ADIS_LIB_BUFFER -int -devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, - irq_handler_t trigger_handler); +int adis_setup_buffer_and_trigger(struct adis *adis, + struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)); +void adis_cleanup_buffer_and_trigger(struct adis *adis, + struct iio_dev *indio_dev); -int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); +int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); +void adis_remove_trigger(struct adis *adis); int adis_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask); #else /* CONFIG_IIO_BUFFER */ -static inline int -devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, - irq_handler_t trigger_handler) +static inline int adis_setup_buffer_and_trigger(struct adis *adis, + struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)) { return 0; } -static inline int devm_adis_probe_trigger(struct adis *adis, - struct iio_dev *indio_dev) +static inline void adis_cleanup_buffer_and_trigger(struct adis *adis, + struct iio_dev *indio_dev) +{ +} + +static inline int adis_probe_trigger(struct adis *adis, + struct iio_dev *indio_dev) { return 0; } +static inline void adis_remove_trigger(struct adis *adis) +{ +} + #define adis_update_scan_mode NULL #endif /* CONFIG_IIO_BUFFER */ diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h index ccd2ceae7b..1683bc710d 100644 --- a/include/linux/iio/kfifo_buf.h +++ b/include/linux/iio/kfifo_buf.h @@ -1,22 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_IIO_KFIFO_BUF_H__ #define __LINUX_IIO_KFIFO_BUF_H__ -struct iio_buffer; -struct iio_buffer_setup_ops; -struct iio_dev; -struct device; +#include +#include +#include struct iio_buffer *iio_kfifo_allocate(void); void iio_kfifo_free(struct iio_buffer *r); -int devm_iio_kfifo_buffer_setup_ext(struct device *dev, - struct iio_dev *indio_dev, - int mode_flags, - const struct iio_buffer_setup_ops *setup_ops, - const struct attribute **buffer_attrs); - -#define devm_iio_kfifo_buffer_setup(dev, indio_dev, mode_flags, setup_ops) \ - devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (mode_flags), (setup_ops), NULL) +struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev); +void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r); #endif diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h index fe7ccbb811..1601a2a63a 100644 --- a/include/linux/iio/machine.h +++ b/include/linux/iio/machine.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O in kernel access map definitions for board files. * * Copyright (c) 2011 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef __LINUX_IIO_MACHINE_H__ @@ -25,11 +28,4 @@ struct iio_map { void *consumer_data; }; -#define IIO_MAP(_provider_channel, _consumer_dev_name, _consumer_channel) \ -{ \ - .adc_channel_label = _provider_channel, \ - .consumer_dev_name = _consumer_dev_name, \ - .consumer_channel = _consumer_channel, \ -} - #endif diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h new file mode 100644 index 0000000000..c8400959d1 --- /dev/null +++ b/include/linux/iio/magnetometer/ak8975.h @@ -0,0 +1,16 @@ +#ifndef __IIO_MAGNETOMETER_AK8975_H__ +#define __IIO_MAGNETOMETER_AK8975_H__ + +#include + +/** + * struct ak8975_platform_data - AK8975 magnetometer driver platform data + * @eoc_gpio: data ready event gpio + * @orientation: mounting matrix relative to main hardware + */ +struct ak8975_platform_data { + int eoc_gpio; + struct iio_mount_matrix orientation; +}; + +#endif diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h index eff1e6b259..fa79319330 100644 --- a/include/linux/iio/sw_device.h +++ b/include/linux/iio/sw_device.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O software device interface * * Copyright (c) 2016 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef __IIO_SW_DEVICE @@ -57,7 +60,7 @@ void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt); static inline void iio_swd_group_init_type_name(struct iio_sw_device *d, const char *name, - const struct config_item_type *type) + struct config_item_type *type) { #if IS_ENABLED(CONFIG_CONFIGFS_FS) config_group_init_type_name(&d->group, name, type); diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h index 47de2443e9..c97eab6755 100644 --- a/include/linux/iio/sw_trigger.h +++ b/include/linux/iio/sw_trigger.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O software trigger interface * * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef __IIO_SW_TRIGGER @@ -57,7 +60,7 @@ void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt); static inline void iio_swt_group_init_type_name(struct iio_sw_trigger *t, const char *name, - const struct config_item_type *type) + struct config_item_type *type) { #if IS_ENABLED(CONFIG_CONFIGFS_FS) config_group_init_type_name(&t->group, name, type); diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h index e51fba66de..9cd8f74721 100644 --- a/include/linux/iio/sysfs.h +++ b/include/linux/iio/sysfs.h @@ -1,15 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O core * *Copyright (c) 2008 Jonathan Cameron * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * * General attributes */ #ifndef _INDUSTRIAL_IO_SYSFS_H_ #define _INDUSTRIAL_IO_SYSFS_H_ -struct iio_buffer; struct iio_chan_spec; /** @@ -18,14 +20,12 @@ struct iio_chan_spec; * @address: associated register address * @l: list head for maintaining list of dynamically created attrs * @c: specification for the underlying channel - * @buffer: the IIO buffer to which this attribute belongs to (if any) */ struct iio_dev_attr { struct device_attribute dev_attr; u64 address; struct list_head l; struct iio_chan_spec const *c; - struct iio_buffer *buffer; }; #define to_iio_dev_attr(_dev_attr) \ @@ -55,34 +55,10 @@ struct iio_const_attr { { .dev_attr = __ATTR(_name, _mode, _show, _store), \ .address = _addr } -#define IIO_ATTR_RO(_name, _addr) \ - { .dev_attr = __ATTR_RO(_name), \ - .address = _addr } - -#define IIO_ATTR_WO(_name, _addr) \ - { .dev_attr = __ATTR_WO(_name), \ - .address = _addr } - -#define IIO_ATTR_RW(_name, _addr) \ - { .dev_attr = __ATTR_RW(_name), \ - .address = _addr } - #define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \ struct iio_dev_attr iio_dev_attr_##_name \ = IIO_ATTR(_name, _mode, _show, _store, _addr) -#define IIO_DEVICE_ATTR_RO(_name, _addr) \ - struct iio_dev_attr iio_dev_attr_##_name \ - = IIO_ATTR_RO(_name, _addr) - -#define IIO_DEVICE_ATTR_WO(_name, _addr) \ - struct iio_dev_attr iio_dev_attr_##_name \ - = IIO_ATTR_WO(_name, _addr) - -#define IIO_DEVICE_ATTR_RW(_name, _addr) \ - struct iio_dev_attr iio_dev_attr_##_name \ - = IIO_ATTR_RW(_name, _addr) - #define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \ struct iio_dev_attr iio_dev_attr_##_vname \ = IIO_ATTR(_name, _mode, _show, _store, _addr) diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 096f68dd2e..4f1154f7a3 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O core, trigger handling functions * * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #include #include @@ -20,8 +23,9 @@ struct iio_trigger; /** * struct iio_trigger_ops - operations structure for an iio_trigger. + * @owner: used to monitor usage count of the trigger. * @set_trigger_state: switch on/off the trigger on demand - * @reenable: function to reenable the trigger when the + * @try_reenable: function to reenable the trigger when the * use count is zero (may be NULL) * @validate_device: function to validate the device when the * current trigger gets changed. @@ -30,8 +34,9 @@ struct iio_trigger; * instances of a given device. **/ struct iio_trigger_ops { + struct module *owner; int (*set_trigger_state)(struct iio_trigger *trig, bool state); - void (*reenable)(struct iio_trigger *trig); + int (*try_reenable)(struct iio_trigger *trig); int (*validate_device)(struct iio_trigger *trig, struct iio_dev *indio_dev); }; @@ -40,13 +45,12 @@ struct iio_trigger_ops { /** * struct iio_trigger - industrial I/O trigger device * @ops: [DRIVER] operations structure - * @owner: [INTERN] owner of this driver module * @id: [INTERN] unique id number * @name: [DRIVER] unique name * @dev: [DRIVER] associated device (if relevant) * @list: [INTERN] used in maintenance of global trigger list * @alloc_list: [DRIVER] used for driver specific trigger list - * @use_count: [INTERN] use count for the trigger. + * @use_count: use count for the trigger * @subirq_chip: [INTERN] associate 'virtual' irq chip. * @subirq_base: [INTERN] base number for irqs provided by trigger. * @subirqs: [INTERN] information about the 'child' irqs. @@ -58,7 +62,6 @@ struct iio_trigger_ops { **/ struct iio_trigger { const struct iio_trigger_ops *ops; - struct module *owner; int id; const char *name; struct device dev; @@ -84,20 +87,20 @@ static inline struct iio_trigger *to_iio_trigger(struct device *d) static inline void iio_trigger_put(struct iio_trigger *trig) { - module_put(trig->owner); + module_put(trig->ops->owner); put_device(&trig->dev); } static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig) { get_device(&trig->dev); - __module_get(trig->owner); + __module_get(trig->ops->owner); return trig; } /** - * iio_trigger_set_drvdata() - Set trigger driver data + * iio_device_set_drvdata() - Set trigger driver data * @trig: IIO trigger structure * @data: Driver specific data * @@ -124,16 +127,10 @@ static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig) * iio_trigger_register() - register a trigger with the IIO core * @trig_info: trigger to be registered **/ -#define iio_trigger_register(trig_info) \ - __iio_trigger_register((trig_info), THIS_MODULE) -int __iio_trigger_register(struct iio_trigger *trig_info, - struct module *this_mod); +int iio_trigger_register(struct iio_trigger *trig_info); -#define devm_iio_trigger_register(dev, trig_info) \ - __devm_iio_trigger_register((dev), (trig_info), THIS_MODULE) -int __devm_iio_trigger_register(struct device *dev, - struct iio_trigger *trig_info, - struct module *this_mod); +int devm_iio_trigger_register(struct device *dev, + struct iio_trigger *trig_info); /** * iio_trigger_unregister() - unregister a trigger from the core @@ -141,11 +138,14 @@ int __devm_iio_trigger_register(struct device *dev, **/ void iio_trigger_unregister(struct iio_trigger *trig_info); +void devm_iio_trigger_unregister(struct device *dev, + struct iio_trigger *trig_info); + /** * iio_trigger_set_immutable() - set an immutable trigger on destination * - * @indio_dev: IIO device structure containing the device - * @trig: trigger to assign to device + * @indio_dev - IIO device structure containing the device + * @trig - trigger to assign to device * **/ int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig); @@ -161,8 +161,7 @@ void iio_trigger_poll_chained(struct iio_trigger *trig); irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private); -__printf(2, 3) -struct iio_trigger *iio_trigger_alloc(struct device *parent, const char *fmt, ...); +__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...); void iio_trigger_free(struct iio_trigger *trig); /** @@ -171,8 +170,6 @@ void iio_trigger_free(struct iio_trigger *trig); */ bool iio_trigger_using_own(struct iio_dev *indio_dev); -int iio_trigger_validate_own_device(struct iio_trigger *trig, - struct iio_dev *indio_dev); #else struct iio_trigger; diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h index 2c05dfad88..c4f8c74096 100644 --- a/include/linux/iio/trigger_consumer.h +++ b/include/linux/iio/trigger_consumer.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O core, trigger consumer functions * * Copyright (c) 2008-2011 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef __LINUX_IIO_TRIGGER_CONSUMER_H__ @@ -38,7 +41,7 @@ struct iio_poll_func { }; -__printf(5, 6) struct iio_poll_func +struct iio_poll_func *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), int type, @@ -50,4 +53,11 @@ irqreturn_t iio_pollfunc_store_time(int irq, void *p); void iio_trigger_notify_done(struct iio_trigger *trig); +/* + * Two functions for common case where all that happens is a pollfunc + * is attached and detached from a trigger + */ +int iio_triggered_buffer_postenable(struct iio_dev *indio_dev); +int iio_triggered_buffer_predisable(struct iio_dev *indio_dev); + #endif diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h index 7f154d1f87..3014561677 100644 --- a/include/linux/iio/triggered_buffer.h +++ b/include/linux/iio/triggered_buffer.h @@ -1,31 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_ #define _LINUX_IIO_TRIGGERED_BUFFER_H_ #include -struct attribute; struct iio_dev; struct iio_buffer_setup_ops; -int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev, +int iio_triggered_buffer_setup(struct iio_dev *indio_dev, irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), - const struct iio_buffer_setup_ops *setup_ops, - const struct attribute **buffer_attrs); + const struct iio_buffer_setup_ops *setup_ops); void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev); -#define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \ - iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), (setup_ops), NULL) - -int devm_iio_triggered_buffer_setup_ext(struct device *dev, - struct iio_dev *indio_dev, - irqreturn_t (*h)(int irq, void *p), - irqreturn_t (*thread)(int irq, void *p), - const struct iio_buffer_setup_ops *ops, - const struct attribute **buffer_attrs); - -#define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \ - devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), (setup_ops), NULL) +int devm_iio_triggered_buffer_setup(struct device *dev, + struct iio_dev *indio_dev, + irqreturn_t (*h)(int irq, void *p), + irqreturn_t (*thread)(int irq, void *p), + const struct iio_buffer_setup_ops *ops); +void devm_iio_triggered_buffer_cleanup(struct device *dev, + struct iio_dev *indio_dev); #endif diff --git a/include/linux/iio/triggered_event.h b/include/linux/iio/triggered_event.h index 13250fd997..8fe8537085 100644 --- a/include/linux/iio/triggered_event.h +++ b/include/linux/iio/triggered_event.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IIO_TRIGGERED_EVENT_H_ #define _LINUX_IIO_TRIGGERED_EVENT_H_ diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 84b3f8175c..32b5795250 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* industrial I/O data types needed both in and out of kernel * * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef _IIO_TYPES_H_ @@ -16,7 +19,6 @@ enum iio_event_info { IIO_EV_INFO_PERIOD, IIO_EV_INFO_HIGH_PASS_FILTER_3DB, IIO_EV_INFO_LOW_PASS_FILTER_3DB, - IIO_EV_INFO_TIMEOUT, }; #define IIO_VAL_INT 1 @@ -26,42 +28,5 @@ enum iio_event_info { #define IIO_VAL_INT_MULTIPLE 5 #define IIO_VAL_FRACTIONAL 10 #define IIO_VAL_FRACTIONAL_LOG2 11 -#define IIO_VAL_CHAR 12 - -enum iio_available_type { - IIO_AVAIL_LIST, - IIO_AVAIL_RANGE, -}; - -enum iio_chan_info_enum { - IIO_CHAN_INFO_RAW = 0, - IIO_CHAN_INFO_PROCESSED, - IIO_CHAN_INFO_SCALE, - IIO_CHAN_INFO_OFFSET, - IIO_CHAN_INFO_CALIBSCALE, - IIO_CHAN_INFO_CALIBBIAS, - IIO_CHAN_INFO_PEAK, - IIO_CHAN_INFO_PEAK_SCALE, - IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW, - IIO_CHAN_INFO_AVERAGE_RAW, - IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY, - IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY, - IIO_CHAN_INFO_SAMP_FREQ, - IIO_CHAN_INFO_FREQUENCY, - IIO_CHAN_INFO_PHASE, - IIO_CHAN_INFO_HARDWAREGAIN, - IIO_CHAN_INFO_HYSTERESIS, - IIO_CHAN_INFO_HYSTERESIS_RELATIVE, - IIO_CHAN_INFO_INT_TIME, - IIO_CHAN_INFO_ENABLE, - IIO_CHAN_INFO_CALIBHEIGHT, - IIO_CHAN_INFO_CALIBWEIGHT, - IIO_CHAN_INFO_DEBOUNCE_COUNT, - IIO_CHAN_INFO_DEBOUNCE_TIME, - IIO_CHAN_INFO_CALIBEMISSIVITY, - IIO_CHAN_INFO_OVERSAMPLING_RATIO, - IIO_CHAN_INFO_THERMOCOUPLE_TYPE, - IIO_CHAN_INFO_CALIBAMBIENT, -}; #endif /* _IIO_TYPES_H_ */ diff --git a/include/linux/ima.h b/include/linux/ima.h index b6ab66a546..0eb7c2e7f0 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -1,91 +1,39 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2008 IBM Corporation * Author: Mimi Zohar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. */ #ifndef _LINUX_IMA_H #define _LINUX_IMA_H -#include #include -#include -#include -#include struct linux_binprm; #ifdef CONFIG_IMA -extern enum hash_algo ima_get_current_hash_algo(void); extern int ima_bprm_check(struct linux_binprm *bprm); -extern int ima_file_check(struct file *file, int mask); -extern void ima_post_create_tmpfile(struct user_namespace *mnt_userns, - struct inode *inode); +extern int ima_file_check(struct file *file, int mask, int opened); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); -extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot); -extern int ima_load_data(enum kernel_load_data_id id, bool contents); -extern int ima_post_load_data(char *buf, loff_t size, - enum kernel_load_data_id id, char *description); -extern int ima_read_file(struct file *file, enum kernel_read_file_id id, - bool contents); +extern int ima_read_file(struct file *file, enum kernel_read_file_id id); extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); -extern void ima_post_path_mknod(struct user_namespace *mnt_userns, - struct dentry *dentry); -extern int ima_file_hash(struct file *file, char *buf, size_t buf_size); -extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size); -extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size); -extern int ima_measure_critical_data(const char *event_label, - const char *event_name, - const void *buf, size_t buf_len, - bool hash, u8 *digest, size_t digest_len); - -#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM -extern void ima_appraise_parse_cmdline(void); -#else -static inline void ima_appraise_parse_cmdline(void) {} -#endif - -#ifdef CONFIG_IMA_KEXEC -extern void ima_add_kexec_buffer(struct kimage *image); -#endif - -#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT -extern bool arch_ima_get_secureboot(void); -extern const char * const *arch_get_ima_policy(void); -#else -static inline bool arch_ima_get_secureboot(void) -{ - return false; -} - -static inline const char * const *arch_get_ima_policy(void) -{ - return NULL; -} -#endif +extern void ima_post_path_mknod(struct dentry *dentry); #else -static inline enum hash_algo ima_get_current_hash_algo(void) -{ - return HASH_ALGO__LAST; -} - static inline int ima_bprm_check(struct linux_binprm *bprm) { return 0; } -static inline int ima_file_check(struct file *file, int mask) +static inline int ima_file_check(struct file *file, int mask, int opened) { return 0; } -static inline void ima_post_create_tmpfile(struct user_namespace *mnt_userns, - struct inode *inode) -{ -} - static inline void ima_file_free(struct file *file) { return; @@ -96,26 +44,7 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot) return 0; } -static inline int ima_file_mprotect(struct vm_area_struct *vma, - unsigned long prot) -{ - return 0; -} - -static inline int ima_load_data(enum kernel_load_data_id id, bool contents) -{ - return 0; -} - -static inline int ima_post_load_data(char *buf, loff_t size, - enum kernel_load_data_id id, - char *description) -{ - return 0; -} - -static inline int ima_read_file(struct file *file, enum kernel_read_file_id id, - bool contents) +static inline int ima_read_file(struct file *file, enum kernel_read_file_id id) { return 0; } @@ -126,71 +55,20 @@ static inline int ima_post_read_file(struct file *file, void *buf, loff_t size, return 0; } -static inline void ima_post_path_mknod(struct user_namespace *mnt_userns, - struct dentry *dentry) +static inline void ima_post_path_mknod(struct dentry *dentry) { return; } -static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size) -{ - return -EOPNOTSUPP; -} - -static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size) -{ - return -EOPNOTSUPP; -} - -static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {} - -static inline int ima_measure_critical_data(const char *event_label, - const char *event_name, - const void *buf, size_t buf_len, - bool hash, u8 *digest, - size_t digest_len) -{ - return -ENOENT; -} - #endif /* CONFIG_IMA */ -#ifndef CONFIG_IMA_KEXEC -struct kimage; - -static inline void ima_add_kexec_buffer(struct kimage *image) -{} -#endif - -#ifdef CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS -extern void ima_post_key_create_or_update(struct key *keyring, - struct key *key, - const void *payload, size_t plen, - unsigned long flags, bool create); -#else -static inline void ima_post_key_create_or_update(struct key *keyring, - struct key *key, - const void *payload, - size_t plen, - unsigned long flags, - bool create) {} -#endif /* CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS */ - #ifdef CONFIG_IMA_APPRAISE -extern bool is_ima_appraise_enabled(void); -extern void ima_inode_post_setattr(struct user_namespace *mnt_userns, - struct dentry *dentry); +extern void ima_inode_post_setattr(struct dentry *dentry); extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len); extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name); #else -static inline bool is_ima_appraise_enabled(void) -{ - return 0; -} - -static inline void ima_inode_post_setattr(struct user_namespace *mnt_userns, - struct dentry *dentry) +static inline void ima_inode_post_setattr(struct dentry *dentry) { return; } @@ -209,13 +87,4 @@ static inline int ima_inode_removexattr(struct dentry *dentry, return 0; } #endif /* CONFIG_IMA_APPRAISE */ - -#if defined(CONFIG_IMA_APPRAISE) && defined(CONFIG_INTEGRITY_TRUSTED_KEYRING) -extern bool ima_appraise_signature(enum kernel_read_file_id func); -#else -static inline bool ima_appraise_signature(enum kernel_read_file_id func) -{ - return false; -} -#endif /* CONFIG_IMA_APPRAISE && CONFIG_INTEGRITY_TRUSTED_KEYRING */ #endif /* _LINUX_IMA_H */ diff --git a/include/linux/in.h b/include/linux/in.h index 1873ef6426..31b4937347 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -10,6 +9,11 @@ * * Authors: Original taken from the GNU Project file. * Fred N. van Kempen, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_IN_H #define _LINUX_IN_H @@ -56,14 +60,9 @@ static inline bool ipv4_is_lbcast(__be32 addr) return addr == htonl(INADDR_BROADCAST); } -static inline bool ipv4_is_all_snoopers(__be32 addr) -{ - return addr == htonl(INADDR_ALLSNOOPERS_GROUP); -} - static inline bool ipv4_is_zeronet(__be32 addr) { - return (addr == 0); + return (addr & htonl(0xff000000)) == htonl(0x00000000); } /* Special-Use IPv4 Addresses (RFC3330) */ diff --git a/include/linux/in6.h b/include/linux/in6.h index 0777a21cbf..34edf1f6c9 100644 --- a/include/linux/in6.h +++ b/include/linux/in6.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Types and definitions for AF_INET6 * Linux INET6 implementation @@ -12,6 +11,11 @@ * * Advanced Sockets API for IPv6 * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_IN6_H #define _LINUX_IN6_H diff --git a/include/linux/inet.h b/include/linux/inet.h index bd8276e96e..4cca05c967 100644 --- a/include/linux/inet.h +++ b/include/linux/inet.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Swansea University Computer Society NET3 * @@ -34,13 +33,16 @@ * $Id: udp.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $ * $Id: we.c,v 0.8.4.10 1993/01/23 18:00:11 bir7 Exp $ * $Id: wereg.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_INET_H #define _LINUX_INET_H #include -#include -#include /* * These mimic similar macros defined in user-space for inet_ntop(3). @@ -52,9 +54,4 @@ extern __be32 in_aton(const char *str); extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); - -extern int inet_pton_with_scope(struct net *net, unsigned short af, - const char *src, const char *port, struct sockaddr_storage *addr); -extern bool inet_addr_is_any(struct sockaddr *addr); - #endif /* _LINUX_INET_H */ diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 84abb30a3f..65da430e26 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -1,31 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _INET_DIAG_H_ #define _INET_DIAG_H_ 1 -#include #include +struct net; +struct sock; struct inet_hashinfo; +struct nlattr; +struct nlmsghdr; +struct sk_buff; +struct netlink_callback; struct inet_diag_handler { void (*dump)(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r); + const struct inet_diag_req_v2 *r, + struct nlattr *bc); - int (*dump_one)(struct netlink_callback *cb, + int (*dump_one)(struct sk_buff *in_skb, + const struct nlmsghdr *nlh, const struct inet_diag_req_v2 *req); void (*idiag_get_info)(struct sock *sk, struct inet_diag_msg *r, void *info); - int (*idiag_get_aux)(struct sock *sk, - bool net_admin, - struct sk_buff *skb); - - size_t (*idiag_get_aux_size)(struct sock *sk, - bool net_admin); - int (*destroy)(struct sk_buff *in_skb, const struct inet_diag_req_v2 *req); @@ -33,25 +32,18 @@ struct inet_diag_handler { __u16 idiag_info_size; }; -struct bpf_sk_storage_diag; -struct inet_diag_dump_data { - struct nlattr *req_nlas[__INET_DIAG_REQ_MAX]; -#define inet_diag_nla_bc req_nlas[INET_DIAG_REQ_BYTECODE] -#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES] - - struct bpf_sk_storage_diag *bpf_stg_diag; -}; - struct inet_connection_sock; int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *req, - u16 nlmsg_flags, bool net_admin); + struct sk_buff *skb, const struct inet_diag_req_v2 *req, + struct user_namespace *user_ns, + u32 pid, u32 seq, u16 nlmsg_flags, + const struct nlmsghdr *unlh, bool net_admin); void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r); + const struct inet_diag_req_v2 *r, + struct nlattr *bc); int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, - struct netlink_callback *cb, + struct sk_buff *in_skb, const struct nlmsghdr *nlh, const struct inet_diag_req_v2 *req); struct sock *inet_diag_find_one_icsk(struct net *net, @@ -62,23 +54,6 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk); -static inline size_t inet_diag_msg_attrs_size(void) -{ - return nla_total_size(1) /* INET_DIAG_SHUTDOWN */ - + nla_total_size(1) /* INET_DIAG_TOS */ -#if IS_ENABLED(CONFIG_IPV6) - + nla_total_size(1) /* INET_DIAG_TCLASS */ - + nla_total_size(1) /* INET_DIAG_SKV6ONLY */ -#endif - + nla_total_size(4) /* INET_DIAG_MARK */ - + nla_total_size(4) /* INET_DIAG_CLASS_ID */ -#ifdef CONFIG_SOCK_CGROUP_DATA - + nla_total_size_64bit(sizeof(u64)) /* INET_DIAG_CGROUP_ID */ -#endif - + nla_total_size(sizeof(struct inet_diag_sockopt)) - /* INET_DIAG_SOCKOPT */ - ; -} int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, struct inet_diag_msg *r, int ext, struct user_namespace *user_ns, bool net_admin); diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index a038feb63f..ee971f335a 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_INETDEVICE_H #define _LINUX_INETDEVICE_H @@ -12,7 +11,6 @@ #include #include #include -#include struct ipv4_devconf { void *sysctl; @@ -24,9 +22,9 @@ struct ipv4_devconf { struct in_device { struct net_device *dev; - refcount_t refcnt; + atomic_t refcnt; int dead; - struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */ + struct in_ifaddr *ifa_list; /* IP ifaddr chain */ struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ struct ip_mc_list __rcu * __rcu *mc_hash; @@ -37,11 +35,9 @@ struct in_device { unsigned long mr_v1_seen; unsigned long mr_v2_seen; unsigned long mr_maxdelay; - unsigned long mr_qi; /* Query Interval */ - unsigned long mr_qri; /* Query Response Interval */ - unsigned char mr_qrv; /* Query Robustness Variable */ + unsigned char mr_qrv; unsigned char mr_gq_running; - u32 mr_ifc_count; + unsigned char mr_ifc_count; struct timer_list mr_gq_timer; /* general query timer */ struct timer_list mr_ifc_timer; /* interface change timer */ @@ -95,7 +91,6 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) -#define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING) #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) #define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ @@ -105,7 +100,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) #define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS) #define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP) -#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP_PVLAN) +#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN) #define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA) #define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS) #define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \ @@ -126,7 +121,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) #define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \ - IN_DEV_ORCONF((in_dev), IGNORE_ROUTES_WITH_LINKDOWN) + IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN) #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) #define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT) @@ -136,13 +131,12 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) struct in_ifaddr { struct hlist_node hash; - struct in_ifaddr __rcu *ifa_next; + struct in_ifaddr *ifa_next; struct in_device *ifa_dev; struct rcu_head rcu_head; __be32 ifa_local; __be32 ifa_address; __be32 ifa_mask; - __u32 ifa_rt_priority; __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; @@ -156,19 +150,11 @@ struct in_ifaddr { unsigned long ifa_tstamp; /* updated timestamp */ }; -struct in_validator_info { - __be32 ivi_addr; - struct in_device *ivi_dev; - struct netlink_ext_ack *extack; -}; - int register_inetaddr_notifier(struct notifier_block *nb); int unregister_inetaddr_notifier(struct notifier_block *nb); -int register_inetaddr_validator_notifier(struct notifier_block *nb); -int unregister_inetaddr_validator_notifier(struct notifier_block *nb); -void inet_netconf_notify_devconf(struct net *net, int event, int type, - int ifindex, struct ipv4_devconf *devconf); +void inet_netconf_notify_devconf(struct net *net, int type, int ifindex, + struct ipv4_devconf *devconf); struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref); static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) @@ -177,16 +163,7 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) } int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); -int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); -#ifdef CONFIG_INET -int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size); -#else -static inline int inet_gifconf(struct net_device *dev, char __user *buf, - int len, int size) -{ - return 0; -} -#endif +int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); @@ -194,8 +171,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope); struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); -struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr); -static inline bool inet_ifa_match(__be32 addr, const struct in_ifaddr *ifa) +static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) { return !((addr^ifa->ifa_address)&ifa->ifa_mask); } @@ -215,13 +191,14 @@ static __inline__ bool bad_mask(__be32 mask, __be32 addr) return false; } -#define in_dev_for_each_ifa_rtnl(ifa, in_dev) \ - for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \ - ifa = rtnl_dereference(ifa->ifa_next)) +#define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \ + for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next) -#define in_dev_for_each_ifa_rcu(ifa, in_dev) \ - for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \ - ifa = rcu_dereference(ifa->ifa_next)) +#define for_ifa(in_dev) { struct in_ifaddr *ifa; \ + for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next) + + +#define endfor_ifa(in_dev) } static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) { @@ -235,7 +212,7 @@ static inline struct in_device *in_dev_get(const struct net_device *dev) rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (in_dev) - refcount_inc(&in_dev->refcnt); + atomic_inc(&in_dev->refcnt); rcu_read_unlock(); return in_dev; } @@ -245,20 +222,6 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) return rtnl_dereference(dev->ip_ptr); } -/* called with rcu_read_lock or rtnl held */ -static inline bool ip_ignore_linkdown(const struct net_device *dev) -{ - struct in_device *in_dev; - bool rc = false; - - in_dev = rcu_dereference_rtnl(dev->ip_ptr); - if (in_dev && - IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) - rc = true; - - return rc; -} - static inline struct neigh_parms *__in_dev_arp_parms_get_rcu(const struct net_device *dev) { struct in_device *in_dev = __in_dev_get_rcu(dev); @@ -270,12 +233,12 @@ void in_dev_finish_destroy(struct in_device *idev); static inline void in_dev_put(struct in_device *idev) { - if (refcount_dec_and_test(&idev->refcnt)) + if (atomic_dec_and_test(&idev->refcnt)) in_dev_finish_destroy(idev); } -#define __in_dev_put(idev) refcount_dec(&(idev)->refcnt) -#define in_dev_hold(idev) refcount_inc(&(idev)->refcnt) +#define __in_dev_put(idev) atomic_dec(&(idev)->refcnt) +#define in_dev_hold(idev) atomic_inc(&(idev)->refcnt) #endif /* __KERNEL__ */ diff --git a/include/linux/init.h b/include/linux/init.h index d82b4b2e1d..34661e1796 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -1,17 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_INIT_H #define _LINUX_INIT_H #include #include -/* Built-in __init functions needn't be compiled with retpoline */ -#if defined(__noretpoline) && !defined(MODULE) -#define __noinitretpoline __noretpoline -#else -#define __noinitretpoline -#endif - /* These macros are used to mark some functions or * initialized data (doesn't apply to uninitialized data) * as `initialization' functions. The kernel can take this @@ -47,11 +39,11 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline __nocfi -#define __initdata __section(".init.data") -#define __initconst __section(".init.rodata") -#define __exitdata __section(".exit.data") -#define __exit_call __used __section(".exitcall.exit") +#define __init __section(.init.text) __cold notrace __latent_entropy +#define __initdata __section(.init.data) +#define __initconst __section(.init.rodata) +#define __exitdata __section(.exit.data) +#define __exit_call __used __section(.exitcall.exit) /* * modpost check for section mismatches during the kernel build. @@ -70,9 +62,9 @@ * * The markers follow same syntax rules as __init / __initdata. */ -#define __ref __section(".ref.text") noinline -#define __refdata __section(".ref.data") -#define __refconst __section(".ref.rodata") +#define __ref __section(.ref.text) noinline +#define __refdata __section(.ref.data) +#define __refconst __section(.ref.rodata) #ifdef MODULE #define __exitused @@ -80,16 +72,16 @@ #define __exitused __used #endif -#define __exit __section(".exit.text") __exitused __cold notrace +#define __exit __section(.exit.text) __exitused __cold notrace /* Used for MEMORY_HOTPLUG */ -#define __meminit __section(".meminit.text") __cold notrace \ +#define __meminit __section(.meminit.text) __cold notrace \ __latent_entropy -#define __meminitdata __section(".meminit.data") -#define __meminitconst __section(".meminit.rodata") -#define __memexit __section(".memexit.text") __exitused __cold notrace -#define __memexitdata __section(".memexit.data") -#define __memexitconst __section(".memexit.rodata") +#define __meminitdata __section(.meminit.data) +#define __meminitconst __section(.meminit.rodata) +#define __memexit __section(.memexit.text) __exitused __cold notrace +#define __memexitdata __section(.memexit.data) +#define __memexitconst __section(.memexit.rodata) /* For assembly routines */ #define __HEAD .section ".head.text","ax" @@ -109,6 +101,12 @@ #define __REFDATA .section ".ref.data", "aw" #define __REFCONST .section ".ref.rodata", "a" +#ifdef CONFIG_PAX_KERNEXEC +#define __READ_ONLY .section ".data..read_only","a",%progbits +#else +#define __READ_ONLY .section ".data..mostly","aw",%progbits +#endif + #ifndef __ASSEMBLY__ /* * Used for initialization calls.. @@ -116,29 +114,12 @@ typedef int (*initcall_t)(void); typedef void (*exitcall_t)(void); -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS -typedef int initcall_entry_t; - -static inline initcall_t initcall_from_entry(initcall_entry_t *entry) -{ - return offset_to_ptr(entry); -} -#else -typedef initcall_t initcall_entry_t; - -static inline initcall_t initcall_from_entry(initcall_entry_t *entry) -{ - return *entry; -} -#endif - -extern initcall_entry_t __con_initcall_start[], __con_initcall_end[]; +extern initcall_t __con_initcall_start[], __con_initcall_end[]; +extern initcall_t __security_initcall_start[], __security_initcall_end[]; /* Used for contructor calls. */ typedef void (*ctor_fn_t)(void); -struct file_system_type; - /* Defined in init/main.c */ extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; @@ -148,13 +129,10 @@ extern unsigned int reset_devices; /* used by init/main.c */ void setup_arch(char **); void prepare_namespace(void); -void __init init_rootfs(void); -extern struct file_system_type rootfs_fs_type; +void __init load_default_modules(void); +int __init init_rootfs(void); -#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX) -extern bool rodata_enabled; -#endif -#ifdef CONFIG_STRICT_KERNEL_RWX +#ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); #endif @@ -184,82 +162,9 @@ extern bool initcall_debug; * as KEEP() in the linker script. */ -/* Format: ____ */ -#define __initcall_id(fn) \ - __PASTE(__KBUILD_MODNAME, \ - __PASTE(__, \ - __PASTE(__COUNTER__, \ - __PASTE(_, \ - __PASTE(__LINE__, \ - __PASTE(_, fn)))))) - -/* Format: ____ */ -#define __initcall_name(prefix, __iid, id) \ - __PASTE(__, \ - __PASTE(prefix, \ - __PASTE(__, \ - __PASTE(__iid, id)))) - -#ifdef CONFIG_LTO_CLANG -/* - * With LTO, the compiler doesn't necessarily obey link order for - * initcalls. In order to preserve the correct order, we add each - * variable into its own section and generate a linker script (in - * scripts/link-vmlinux.sh) to specify the order of the sections. - */ -#define __initcall_section(__sec, __iid) \ - #__sec ".init.." #__iid - -/* - * With LTO, the compiler can rename static functions to avoid - * global naming collisions. We use a global stub function for - * initcalls to create a stable symbol name whose address can be - * taken in inline assembly when PREL32 relocations are used. - */ -#define __initcall_stub(fn, __iid, id) \ - __initcall_name(initstub, __iid, id) - -#define __define_initcall_stub(__stub, fn) \ - int __init __cficanonical __stub(void); \ - int __init __cficanonical __stub(void) \ - { \ - return fn(); \ - } \ - __ADDRESSABLE(__stub) -#else -#define __initcall_section(__sec, __iid) \ - #__sec ".init" - -#define __initcall_stub(fn, __iid, id) fn - -#define __define_initcall_stub(__stub, fn) \ - __ADDRESSABLE(fn) -#endif - -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS -#define ____define_initcall(fn, __stub, __name, __sec) \ - __define_initcall_stub(__stub, fn) \ - asm(".section \"" __sec "\", \"a\" \n" \ - __stringify(__name) ": \n" \ - ".long " __stringify(__stub) " - . \n" \ - ".previous \n"); \ - static_assert(__same_type(initcall_t, &fn)); -#else -#define ____define_initcall(fn, __unused, __name, __sec) \ - static initcall_t __name __used \ - __attribute__((__section__(__sec))) = fn; -#endif - -#define __unique_initcall(fn, id, __sec, __iid) \ - ____define_initcall(fn, \ - __initcall_stub(fn, __iid, id), \ - __initcall_name(initcall, __iid, id), \ - __initcall_section(__sec, __iid)) - -#define ___define_initcall(fn, id, __sec) \ - __unique_initcall(fn, id, __sec, __initcall_id(fn)) - -#define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id) +#define __define_initcall(fn, id) \ + static initcall_t __initcall_##fn##id __used \ + __attribute__((__section__(".initcall" #id ".init"))) = fn; /* * Early initcalls run before initializing SMP. @@ -298,7 +203,13 @@ extern bool initcall_debug; #define __exitcall(fn) \ static exitcall_t __exitcall_##fn __exit_call = fn -#define console_initcall(fn) ___define_initcall(fn, con, .con_initcall) +#define console_initcall(fn) \ + static initcall_t __initcall_##fn \ + __used __section(.con_initcall.init) = fn + +#define security_initcall(fn) \ + static initcall_t __initcall_##fn \ + __used __section(.security_initcall.init) = fn struct obs_kernel_param { const char *str; @@ -316,8 +227,8 @@ struct obs_kernel_param { static const char __setup_str_##unique_id[] __initconst \ __aligned(1) = str; \ static struct obs_kernel_param __setup_##unique_id \ - __used __section(".init.setup") \ - __aligned(__alignof__(struct obs_kernel_param)) \ + __used __section(.init.setup) \ + __attribute__((aligned((sizeof(long))))) \ = { __setup_str_##unique_id, fn, early } #define __setup(str, fn) \ @@ -339,14 +250,14 @@ struct obs_kernel_param { var = 1; \ return 0; \ } \ - early_param(str_on, parse_##var##_on); \ + __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \ \ static int __init parse_##var##_off(char *arg) \ { \ var = 0; \ return 0; \ } \ - early_param(str_off, parse_##var##_off) + __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1) /* Relies on boot_command_line being set */ void __init parse_early_param(void); @@ -360,7 +271,7 @@ void __init parse_early_options(char *cmdline); #endif /* Data marked not to be saved by software suspend */ -#define __nosavedata __section(".data..nosave") +#define __nosavedata __section(.data..nosave) #ifdef MODULE #define __exit_p(x) x diff --git a/include/linux/init_ohci1394_dma.h b/include/linux/init_ohci1394_dma.h index 228afca432..3c03a4bba5 100644 --- a/include/linux/init_ohci1394_dma.h +++ b/include/linux/init_ohci1394_dma.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT extern int __initdata init_ohci1394_dma_early; extern void __init init_ohci1394_dma_on_all_controllers(void); diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 40fc5813cf..b16f6af3bb 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX__INIT_TASK_H #define _LINUX__INIT_TASK_H @@ -13,19 +12,27 @@ #include #include #include -#include -#include #include #include -#include -#include #include +#ifdef CONFIG_SMP +# define INIT_PUSHABLE_TASKS(tsk) \ + .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), +#else +# define INIT_PUSHABLE_TASKS(tsk) +#endif + extern struct files_struct init_files; extern struct fs_struct init_fs; -extern struct nsproxy init_nsproxy; -extern struct cred init_cred; + +#ifdef CONFIG_CPUSETS +#define INIT_CPUSET_SEQ(tsk) \ + .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), +#else +#define INIT_CPUSET_SEQ(tsk) +#endif #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #define INIT_PREV_CPUTIME(x) .prev_cputime = { \ @@ -35,16 +42,246 @@ extern struct cred init_cred; #define INIT_PREV_CPUTIME(x) #endif +#define INIT_SIGNALS(sig) { \ + .nr_threads = 1, \ + .thread_head = LIST_HEAD_INIT(init_task.thread_node), \ + .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ + .shared_pending = { \ + .list = LIST_HEAD_INIT(sig.shared_pending.list), \ + .signal = {{0}}}, \ + .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ + .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ + .rlim = INIT_RLIMITS, \ + .cputimer = { \ + .cputime_atomic = INIT_CPUTIME_ATOMIC, \ + .running = false, \ + .checking_timer = false, \ + }, \ + INIT_PREV_CPUTIME(sig) \ + .cred_guard_mutex = \ + __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ +} + +extern struct nsproxy init_nsproxy; + +#define INIT_SIGHAND(sighand) { \ + .count = ATOMIC_INIT(1), \ + .action = { { { .sa_handler = SIG_DFL, } }, }, \ + .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ + .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \ +} + +extern struct group_info init_groups; + +#define INIT_STRUCT_PID { \ + .count = ATOMIC_INIT(1), \ + .tasks = { \ + { .first = NULL }, \ + { .first = NULL }, \ + { .first = NULL }, \ + }, \ + .level = 0, \ + .numbers = { { \ + .nr = 0, \ + .ns = &init_pid_ns, \ + .pid_chain = { .next = NULL, .pprev = NULL }, \ + }, } \ +} + +#define INIT_PID_LINK(type) \ +{ \ + .node = { \ + .next = NULL, \ + .pprev = NULL, \ + }, \ + .pid = &init_struct_pid, \ +} + +#ifdef CONFIG_AUDITSYSCALL +#define INIT_IDS \ + .loginuid = INVALID_UID, \ + .sessionid = (unsigned int)-1, +#else +#define INIT_IDS +#endif + +#ifdef CONFIG_PREEMPT_RCU +#define INIT_TASK_RCU_TREE_PREEMPT() \ + .rcu_blocked_node = NULL, +#else +#define INIT_TASK_RCU_TREE_PREEMPT(tsk) +#endif +#ifdef CONFIG_PREEMPT_RCU +#define INIT_TASK_RCU_PREEMPT(tsk) \ + .rcu_read_lock_nesting = 0, \ + .rcu_read_unlock_special.s = 0, \ + .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ + INIT_TASK_RCU_TREE_PREEMPT() +#else +#define INIT_TASK_RCU_PREEMPT(tsk) +#endif +#ifdef CONFIG_TASKS_RCU +#define INIT_TASK_RCU_TASKS(tsk) \ + .rcu_tasks_holdout = false, \ + .rcu_tasks_holdout_list = \ + LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \ + .rcu_tasks_idle_cpu = -1, +#else +#define INIT_TASK_RCU_TASKS(tsk) +#endif + +extern struct cred init_cred; + +extern struct task_group root_task_group; + +#ifdef CONFIG_CGROUP_SCHED +# define INIT_CGROUP_SCHED(tsk) \ + .sched_task_group = &root_task_group, +#else +# define INIT_CGROUP_SCHED(tsk) +#endif + +#ifdef CONFIG_PERF_EVENTS +# define INIT_PERF_EVENTS(tsk) \ + .perf_event_mutex = \ + __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ + .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), +#else +# define INIT_PERF_EVENTS(tsk) +#endif + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +# define INIT_VTIME(tsk) \ + .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ + .vtime_snap = 0, \ + .vtime_snap_whence = VTIME_SYS, +#else +# define INIT_VTIME(tsk) +#endif + #define INIT_TASK_COMM "swapper" -/* Attach to the init_task data structure for proper alignment */ -#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK -#define __init_task_data __section(".data..init_task") +#ifdef CONFIG_RT_MUTEXES +# define INIT_RT_MUTEXES(tsk) \ + .pi_waiters = RB_ROOT, \ + .pi_waiters_leftmost = NULL, #else -#define __init_task_data /**/ +# define INIT_RT_MUTEXES(tsk) #endif -/* Attach to the thread_info data structure for proper alignment */ -#define __init_thread_info __section(".data..init_thread_info") +#ifdef CONFIG_NUMA_BALANCING +# define INIT_NUMA_BALANCING(tsk) \ + .numa_preferred_nid = -1, \ + .numa_group = NULL, \ + .numa_faults = NULL, +#else +# define INIT_NUMA_BALANCING(tsk) +#endif + +#ifdef CONFIG_KASAN +# define INIT_KASAN(tsk) \ + .kasan_depth = 1, +#else +# define INIT_KASAN(tsk) +#endif + +#ifdef CONFIG_THREAD_INFO_IN_TASK +# define INIT_TASK_TI(tsk) \ + .thread_info = INIT_THREAD_INFO(tsk), \ + .stack_refcount = ATOMIC_INIT(1), +#else +# define INIT_TASK_TI(tsk) +#endif + +/* + * INIT_TASK is used to set up the first task table, touch at + * your own risk!. Base=0, limit=0x1fffff (=2MB) + */ +#define INIT_TASK(tsk) \ +{ \ + INIT_TASK_TI(tsk) \ + .state = 0, \ + .stack = init_stack, \ + .usage = ATOMIC_INIT(2), \ + .flags = PF_KTHREAD, \ + .prio = MAX_PRIO-20, \ + .static_prio = MAX_PRIO-20, \ + .normal_prio = MAX_PRIO-20, \ + .policy = SCHED_NORMAL, \ + .cpus_allowed = CPU_MASK_ALL, \ + .nr_cpus_allowed= NR_CPUS, \ + .mm = NULL, \ + .active_mm = &init_mm, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ + .se = { \ + .group_node = LIST_HEAD_INIT(tsk.se.group_node), \ + }, \ + .rt = { \ + .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ + .time_slice = RR_TIMESLICE, \ + }, \ + .tasks = LIST_HEAD_INIT(tsk.tasks), \ + INIT_PUSHABLE_TASKS(tsk) \ + INIT_CGROUP_SCHED(tsk) \ + .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ + .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ + .real_parent = &tsk, \ + .parent = &tsk, \ + .children = LIST_HEAD_INIT(tsk.children), \ + .sibling = LIST_HEAD_INIT(tsk.sibling), \ + .group_leader = &tsk, \ + RCU_POINTER_INITIALIZER(real_cred, &init_cred), \ + RCU_POINTER_INITIALIZER(cred, &init_cred), \ + .comm = INIT_TASK_COMM, \ + .thread = INIT_THREAD, \ + .fs = &init_fs, \ + .files = &init_files, \ + .signal = &init_signals, \ + .sighand = &init_sighand, \ + .nsproxy = &init_nsproxy, \ + .pending = { \ + .list = LIST_HEAD_INIT(tsk.pending.list), \ + .signal = {{0}}}, \ + .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ + .journal_info = NULL, \ + .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .timer_slack_ns = 50000, /* 50 usec default slack */ \ + .pids = { \ + [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ + [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ + [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ + }, \ + .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ + .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \ + INIT_IDS \ + INIT_PERF_EVENTS(tsk) \ + INIT_TRACE_IRQFLAGS \ + INIT_LOCKDEP \ + INIT_FTRACE_GRAPH \ + INIT_TRACE_RECURSION \ + INIT_TASK_RCU_PREEMPT(tsk) \ + INIT_TASK_RCU_TASKS(tsk) \ + INIT_CPUSET_SEQ(tsk) \ + INIT_RT_MUTEXES(tsk) \ + INIT_PREV_CPUTIME(tsk) \ + INIT_VTIME(tsk) \ + INIT_NUMA_BALANCING(tsk) \ + INIT_KASAN(tsk) \ +} + + +#define INIT_CPU_TIMERS(cpu_timers) \ +{ \ + LIST_HEAD_INIT(cpu_timers[0]), \ + LIST_HEAD_INIT(cpu_timers[1]), \ + LIST_HEAD_INIT(cpu_timers[2]), \ +} + +/* Attach to the init_task data structure for proper alignment */ +#define __init_task_data __attribute__((__section__(".data..init_task"))) + #endif diff --git a/include/linux/initrd.h b/include/linux/initrd.h index 1bbe9af48d..55289d261b 100644 --- a/include/linux/initrd.h +++ b/include/linux/initrd.h @@ -1,16 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef __LINUX_INITRD_H -#define __LINUX_INITRD_H #define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */ +/* 1 = load ramdisk, 0 = don't load */ +extern int rd_doload; + +/* 1 = prompt for ramdisk, 0 = don't prompt */ +extern int rd_prompt; + /* starting block # of image */ extern int rd_image_start; -/* size of a single RAM disk */ -extern unsigned long rd_size; - /* 1 if it is not an error if initrd_start < memory_start */ extern int initrd_below_start_ok; @@ -18,22 +17,4 @@ extern int initrd_below_start_ok; extern unsigned long initrd_start, initrd_end; extern void free_initrd_mem(unsigned long, unsigned long); -#ifdef CONFIG_BLK_DEV_INITRD -extern void __init reserve_initrd_mem(void); -extern void wait_for_initramfs(void); -#else -static inline void __init reserve_initrd_mem(void) {} -static inline void wait_for_initramfs(void) {} -#endif - -extern phys_addr_t phys_initrd_start; -extern unsigned long phys_initrd_size; - extern unsigned int real_root_dev; - -extern char __initramfs_start[]; -extern unsigned long __initramfs_size; - -void console_on_rootfs(void); - -#endif /* __LINUX_INITRD_H */ diff --git a/include/linux/inotify.h b/include/linux/inotify.h index 6a24905f6e..23aede0b58 100644 --- a/include/linux/inotify.h +++ b/include/linux/inotify.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Inode based directory notification for Linux * @@ -18,6 +17,6 @@ extern struct ctl_table inotify_table[]; /* for sysctl */ IN_DELETE_SELF | IN_MOVE_SELF | IN_UNMOUNT | \ IN_Q_OVERFLOW | IN_IGNORED | IN_ONLYDIR | \ IN_DONT_FOLLOW | IN_EXCL_UNLINK | IN_MASK_ADD | \ - IN_MASK_CREATE | IN_ISDIR | IN_ONESHOT) + IN_ISDIR | IN_ONESHOT) #endif /* _LINUX_INOTIFY_H */ diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h index 14821fd231..2465182670 100644 --- a/include/linux/input-polldev.h +++ b/include/linux/input-polldev.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _INPUT_POLLDEV_H #define _INPUT_POLLDEV_H /* * Copyright (c) 2007 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #include diff --git a/include/linux/input.h b/include/linux/input.h index 0354b298d8..a65e3b24fb 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 1999-2002 Vojtech Pavlik + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef _INPUT_H #define _INPUT_H @@ -21,8 +24,6 @@ #include #include -struct input_dev_poller; - /** * struct input_value - input value representation * @type: type of value (EV_KEY, EV_ABS, etc) @@ -35,13 +36,6 @@ struct input_value { __s32 value; }; -enum input_clock_type { - INPUT_CLK_REAL = 0, - INPUT_CLK_MONO, - INPUT_CLK_BOOT, - INPUT_CLK_MAX -}; - /** * struct input_dev - represents an input device * @name: name of the device @@ -73,8 +67,6 @@ enum input_clock_type { * not sleep * @ff: force feedback structure associated with the device if device * supports force feedback effects - * @poller: poller structure associated with the device if device is - * set up to use polling mode * @repeat_key: stores key code of the last key pressed; used to implement * software autorepeat * @timer: timer for software autorepeat @@ -90,11 +82,9 @@ enum input_clock_type { * @open: this method is called when the very first user calls * input_open_device(). The driver must prepare the device * to start generating events (start polling thread, - * request an IRQ, submit URB, etc.). The meaning of open() is - * to start providing events to the input core. + * request an IRQ, submit URB, etc.) * @close: this method is called when the very last user calls - * input_close_device(). The meaning of close() is to stop - * providing events to the input core. + * input_close_device(). * @flush: purges the device. Most commonly used to get rid of force * feedback effects loaded into the device when disconnecting * from it @@ -127,12 +117,6 @@ enum input_clock_type { * @vals: array of values queued in the current frame * @devres_managed: indicates that devices is managed with devres framework * and needs not be explicitly unregistered or freed. - * @timestamp: storage for a timestamp set by input_set_timestamp called - * by a driver - * @inhibited: indicates that the input device is inhibited. If that is - * the case then input core ignores any events generated by the device. - * Device's close() is called when it is being inhibited and its open() - * is called when it is being uninhibited. */ struct input_dev { const char *name; @@ -166,8 +150,6 @@ struct input_dev { struct ff_device *ff; - struct input_dev_poller *poller; - unsigned int repeat_key; struct timer_list timer; @@ -205,10 +187,6 @@ struct input_dev { struct input_value *vals; bool devres_managed; - - ktime_t timestamp[INPUT_CLK_MAX]; - - bool inhibited; }; #define to_input_dev(d) container_of(d, struct input_dev, dev) @@ -256,10 +234,6 @@ struct input_dev { #error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match" #endif -#if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX -#error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match" -#endif - #define INPUT_DEVICE_ID_MATCH_DEVICE \ (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT) #define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ @@ -386,13 +360,6 @@ void input_unregister_device(struct input_dev *); void input_reset_device(struct input_dev *); -int input_setup_polling(struct input_dev *dev, - void (*poll_fn)(struct input_dev *dev)); -void input_set_poll_interval(struct input_dev *dev, unsigned int interval); -void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval); -void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval); -int input_get_poll_interval(struct input_dev *dev); - int __must_check input_register_handler(struct input_handler *); void input_unregister_handler(struct input_handler *); @@ -414,9 +381,6 @@ void input_close_device(struct input_handle *); int input_flush_device(struct input_handle *handle, struct file *file); -void input_set_timestamp(struct input_dev *dev, ktime_t timestamp); -ktime_t *input_get_timestamp(struct input_dev *dev); - void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); @@ -505,13 +469,8 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke); int input_set_keycode(struct input_dev *dev, const struct input_keymap_entry *ke); -bool input_match_device_id(const struct input_dev *dev, - const struct input_device_id *id); - void input_enable_softrepeat(struct input_dev *dev, int delay, int period); -bool input_device_enabled(struct input_dev *dev); - extern struct class input_class; /** @@ -570,7 +529,6 @@ int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code, int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file); int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file); -int input_ff_flush(struct input_dev *dev, struct file *file); int input_ff_create_memless(struct input_dev *dev, void *data, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)); diff --git a/include/linux/input/ad714x.h b/include/linux/input/ad714x.h index 20aea668b0..d388d857bf 100644 --- a/include/linux/input/ad714x.h +++ b/include/linux/input/ad714x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/input/ad714x.h * @@ -8,6 +7,8 @@ * information. * * Copyright 2009-2011 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. */ #ifndef __LINUX_INPUT_AD714X_H__ diff --git a/include/linux/input/adp5589.h b/include/linux/input/adp5589.h index 0e4742c8c8..1a05eee15e 100644 --- a/include/linux/input/adp5589.h +++ b/include/linux/input/adp5589.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Analog Devices ADP5589/ADP5585 I/O Expander and QWERTY Keypad Controller * * Copyright 2010-2011 Analog Devices Inc. + * + * Licensed under the GPL-2. */ #ifndef _ADP5589_H @@ -175,6 +176,13 @@ struct i2c_client; /* forward declaration */ struct adp5589_gpio_platform_data { int gpio_start; /* GPIO Chip base # */ + int (*setup)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + void *context; }; #endif diff --git a/include/linux/input/adxl34x.h b/include/linux/input/adxl34x.h index 7efc9008f3..010d98175e 100644 --- a/include/linux/input/adxl34x.h +++ b/include/linux/input/adxl34x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/input/adxl34x.h * @@ -7,6 +6,8 @@ * device's "struct device" holds this information. * * Copyright 2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. */ #ifndef __LINUX_INPUT_ADXL34X_H__ diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h index 5fba52a56c..1affd0ddfa 100644 --- a/include/linux/input/as5011.h +++ b/include/linux/input/as5011.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _AS5011_H #define _AS5011_H /* * Copyright (c) 2010, 2011 Fabien Marteau + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ struct as5011_platform_data { diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h index ed0776997a..5049f21928 100644 --- a/include/linux/input/auo-pixcir-ts.h +++ b/include/linux/input/auo-pixcir-ts.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Driver for AUO in-cell touchscreens * @@ -8,6 +7,17 @@ * * Copyright (c) 2008 QUALCOMM Incorporated. * Copyright (c) 2008 QUALCOMM USA, INC. + * + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __AUO_PIXCIR_TS_H__ diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h new file mode 100644 index 0000000000..6230d76bde --- /dev/null +++ b/include/linux/input/bu21013.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Naveen Kumar G for ST-Ericsson + * License terms:GNU General Public License (GPL) version 2 + */ + +#ifndef _BU21013_H +#define _BU21013_H + +/** + * struct bu21013_platform_device - Handle the platform data + * @touch_x_max: touch x max + * @touch_y_max: touch y max + * @cs_pin: chip select pin + * @touch_pin: touch gpio pin + * @ext_clk: external clock flag + * @x_flip: x flip flag + * @y_flip: y flip flag + * @wakeup: wakeup flag + * + * This is used to handle the platform data + */ +struct bu21013_platform_device { + int touch_x_max; + int touch_y_max; + unsigned int cs_pin; + unsigned int touch_pin; + bool ext_clk; + bool x_flip; + bool y_flip; + bool wakeup; +}; + +#endif diff --git a/include/linux/input/cma3000.h b/include/linux/input/cma3000.h index aaab51fa90..cbbaac27d3 100644 --- a/include/linux/input/cma3000.h +++ b/include/linux/input/cma3000.h @@ -1,9 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * VTI CMA3000_Dxx Accelerometer driver * * Copyright (C) 2010 Texas Instruments * Author: Hemanth V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . */ #ifndef _LINUX_CMA3000_H diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h index ee1d44545f..09522cb599 100644 --- a/include/linux/input/cy8ctmg110_pdata.h +++ b/include/linux/input/cy8ctmg110_pdata.h @@ -1,10 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CY8CTMG110_PDATA_H #define _LINUX_CY8CTMG110_PDATA_H struct cy8ctmg110_pdata { int reset_pin; /* Reset pin is wired to this GPIO (optional) */ + int irq_pin; /* IRQ pin is wired to this GPIO */ }; #endif diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h index 118b9af6e0..586c8c95dc 100644 --- a/include/linux/input/cyttsp.h +++ b/include/linux/input/cyttsp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Header file for: * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. @@ -10,7 +9,22 @@ * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. * Copyright (C) 2012 Javier Martinez Canillas * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, and only version 2, as published by the + * Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com) + * */ #ifndef _CYTTSP_H_ #define _CYTTSP_H_ diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h new file mode 100644 index 0000000000..16625d799b --- /dev/null +++ b/include/linux/input/eeti_ts.h @@ -0,0 +1,10 @@ +#ifndef LINUX_INPUT_EETI_TS_H +#define LINUX_INPUT_EETI_TS_H + +struct eeti_ts_platform_data { + int irq_gpio; + unsigned int irq_active_high; +}; + +#endif /* LINUX_INPUT_EETI_TS_H */ + diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h new file mode 100644 index 0000000000..aad2fd44a6 --- /dev/null +++ b/include/linux/input/gp2ap002a00f.h @@ -0,0 +1,22 @@ +#ifndef _GP2AP002A00F_H_ +#define _GP2AP002A00F_H_ + +#include + +#define GP2A_I2C_NAME "gp2ap002a00f" + +/** + * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data + * @vout_gpio: The gpio connected to the object detected pin (VOUT) + * @wakeup: Set to true if the proximity can wake the device from suspend + * @hw_setup: Callback for setting up hardware such as gpios and vregs + * @hw_shutdown: Callback for properly shutting down hardware + */ +struct gp2a_platform_data { + int vout_gpio; + bool wakeup; + int (*hw_setup)(struct i2c_client *client); + int (*hw_shutdown)(struct i2c_client *client); +}; + +#endif diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h new file mode 100644 index 0000000000..c1cc52d380 --- /dev/null +++ b/include/linux/input/gpio_tilt.h @@ -0,0 +1,73 @@ +#ifndef _INPUT_GPIO_TILT_H +#define _INPUT_GPIO_TILT_H + +/** + * struct gpio_tilt_axis - Axis used by the tilt switch + * @axis: Constant describing the axis, e.g. ABS_X + * @min: minimum value for abs_param + * @max: maximum value for abs_param + * @fuzz: fuzz value for abs_param + * @flat: flat value for abs_param + */ +struct gpio_tilt_axis { + int axis; + int min; + int max; + int fuzz; + int flat; +}; + +/** + * struct gpio_tilt_state - state description + * @gpios: bitfield of gpio target-states for the value + * @axes: array containing the axes settings for the gpio state + * The array indizes must correspond to the axes defined + * in platform_data + * + * This structure describes a supported axis settings + * and the necessary gpio-state which represent it. + * + * The n-th bit in the bitfield describes the state of the n-th GPIO + * from the gpios-array defined in gpio_regulator_config below. + */ +struct gpio_tilt_state { + int gpios; + int *axes; +}; + +/** + * struct gpio_tilt_platform_data + * @gpios: Array containing the gpios determining the tilt state + * @nr_gpios: Number of gpios + * @axes: Array of gpio_tilt_axis descriptions + * @nr_axes: Number of axes + * @states: Array of gpio_tilt_state entries describing + * the gpio state for specific tilts + * @nr_states: Number of states available + * @debounce_interval: debounce ticks interval in msecs + * @poll_interval: polling interval in msecs - for polling driver only + * @enable: callback to enable the tilt switch + * @disable: callback to disable the tilt switch + * + * This structure contains gpio-tilt-switch configuration + * information that must be passed by platform code to the + * gpio-tilt input driver. + */ +struct gpio_tilt_platform_data { + struct gpio *gpios; + int nr_gpios; + + struct gpio_tilt_axis *axes; + int nr_axes; + + struct gpio_tilt_state *states; + int nr_states; + + int debounce_interval; + + unsigned int poll_interval; + int (*enable)(struct device *dev); + void (*disable)(struct device *dev); +}; + +#endif diff --git a/include/linux/input/ili210x.h b/include/linux/input/ili210x.h new file mode 100644 index 0000000000..a5471245a1 --- /dev/null +++ b/include/linux/input/ili210x.h @@ -0,0 +1,10 @@ +#ifndef _ILI210X_H +#define _ILI210X_H + +struct ili210x_platform_data { + unsigned long irq_flags; + unsigned int poll_period; + bool (*get_pendown_state)(void); +}; + +#endif diff --git a/include/linux/input/kxtj9.h b/include/linux/input/kxtj9.h index 46e231986f..d415579b56 100644 --- a/include/linux/input/kxtj9.h +++ b/include/linux/input/kxtj9.h @@ -1,7 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 Kionix, Inc. * Written by Chris Hudson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + * 02111-1307, USA */ #ifndef __KXTJ9_H__ diff --git a/include/linux/input/lm8333.h b/include/linux/input/lm8333.h index 906da5fc06..79f918c6e8 100644 --- a/include/linux/input/lm8333.h +++ b/include/linux/input/lm8333.h @@ -1,6 +1,6 @@ /* * public include for LM8333 keypad driver - same license as driver - * Copyright (C) 2012 Wolfram Sang, Pengutronix + * Copyright (C) 2012 Wolfram Sang, Pengutronix */ #ifndef _LM8333_H diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index 9476768c3b..27e06acc50 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _MATRIX_KEYPAD_H #define _MATRIX_KEYPAD_H @@ -50,8 +49,6 @@ struct matrix_keymap_data { * @wakeup: controls whether the device should be set up as wakeup * source * @no_autorepeat: disable key autorepeat - * @drive_inactive_cols: drive inactive columns during scan, rather than - * making them inputs. * * This structure represents platform-specific data that use used by * matrix_keypad driver to perform proper initialization. @@ -76,7 +73,6 @@ struct matrix_keypad_platform_data { bool active_low; bool wakeup; bool no_autorepeat; - bool drive_inactive_cols; }; int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data, @@ -84,9 +80,24 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data, unsigned int rows, unsigned int cols, unsigned short *keymap, struct input_dev *input_dev); -int matrix_keypad_parse_properties(struct device *dev, - unsigned int *rows, unsigned int *cols); -#define matrix_keypad_parse_of_params matrix_keypad_parse_properties +#ifdef CONFIG_OF +/** + * matrix_keypad_parse_of_params() - Read parameters from matrix-keypad node + * + * @dev: Device containing of_node + * @rows: Returns number of matrix rows + * @cols: Returns number of matrix columns + * @return 0 if OK, <0 on error + */ +int matrix_keypad_parse_of_params(struct device *dev, + unsigned int *rows, unsigned int *cols); +#else +static inline int matrix_keypad_parse_of_params(struct device *dev, + unsigned int *rows, unsigned int *cols) +{ + return -ENOSYS; +} +#endif /* CONFIG_OF */ #endif /* _MATRIX_KEYPAD_H */ diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h index 3b8580bd33..d7188de4db 100644 --- a/include/linux/input/mt.h +++ b/include/linux/input/mt.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _INPUT_MT_H #define _INPUT_MT_H @@ -6,6 +5,10 @@ * Input Multitouch Library * * Copyright (c) 2010 Henrik Rydberg + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #include @@ -97,14 +100,9 @@ static inline bool input_is_mt_axis(int axis) return axis == ABS_MT_SLOT || input_is_mt_value(axis); } -bool input_mt_report_slot_state(struct input_dev *dev, +void input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active); -static inline void input_mt_report_slot_inactive(struct input_dev *dev) -{ - input_mt_report_slot_state(dev, 0, false); -} - void input_mt_report_finger_count(struct input_dev *dev, int count); void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count); void input_mt_drop_unused(struct input_dev *dev); diff --git a/include/linux/input/navpoint.h b/include/linux/input/navpoint.h index d464ffb4db..45050eb34d 100644 --- a/include/linux/input/navpoint.h +++ b/include/linux/input/navpoint.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Paul Parsons + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ struct navpoint_platform_data { diff --git a/include/linux/input/samsung-keypad.h b/include/linux/input/samsung-keypad.h index ab6b97114c..f25619bfd8 100644 --- a/include/linux/input/samsung-keypad.h +++ b/include/linux/input/samsung-keypad.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Samsung Keypad platform data definitions * * Copyright (C) 2010 Samsung Electronics Co.Ltd * Author: Joonyoung Shim + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __SAMSUNG_KEYPAD_H diff --git a/include/linux/input/sh_keysc.h b/include/linux/input/sh_keysc.h index b3c4f3b667..5d253cd936 100644 --- a/include/linux/input/sh_keysc.h +++ b/include/linux/input/sh_keysc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SH_KEYSC_H__ #define __SH_KEYSC_H__ diff --git a/include/linux/input/sparse-keymap.h b/include/linux/input/sparse-keymap.h index d0dddc14eb..52db62064c 100644 --- a/include/linux/input/sparse-keymap.h +++ b/include/linux/input/sparse-keymap.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _SPARSE_KEYMAP_H #define _SPARSE_KEYMAP_H /* * Copyright (c) 2009 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #define KE_END 0 /* Indicates end of keymap */ @@ -20,7 +23,6 @@ * private definitions. * @code: Device-specific data identifying the button/switch * @keycode: KEY_* code assigned to a key/button - * @sw: struct with code/value used by KE_SW and KE_VSW * @sw.code: SW_* code assigned to a switch * @sw.value: Value that should be sent in an input even when KE_SW * switch is toggled. KE_VSW switches ignore this field and @@ -49,6 +51,7 @@ struct key_entry *sparse_keymap_entry_from_keycode(struct input_dev *dev, int sparse_keymap_setup(struct input_dev *dev, const struct key_entry *keymap, int (*setup)(struct input_dev *, struct key_entry *)); +void sparse_keymap_free(struct input_dev *dev); void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *ke, unsigned int value, bool autorelease); diff --git a/include/linux/input/tca8418_keypad.h b/include/linux/input/tca8418_keypad.h new file mode 100644 index 0000000000..e71a85dc2c --- /dev/null +++ b/include/linux/input/tca8418_keypad.h @@ -0,0 +1,44 @@ +/* + * TCA8418 keypad platform support + * + * Copyright (C) 2011 Fuel7, Inc. All rights reserved. + * + * Author: Kyle Manna + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + * If you can't comply with GPLv2, alternative licensing terms may be + * arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary + * alternative licensing inquiries. + */ + +#ifndef _TCA8418_KEYPAD_H +#define _TCA8418_KEYPAD_H + +#include +#include + +#define TCA8418_I2C_ADDR 0x34 +#define TCA8418_NAME "tca8418_keypad" + +struct tca8418_keypad_platform_data { + const struct matrix_keymap_data *keymap_data; + unsigned rows; + unsigned cols; + bool rep; + bool irq_is_gpio; +}; + +#endif diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h index fe66e2b58f..09d22ccb9e 100644 --- a/include/linux/input/touchscreen.h +++ b/include/linux/input/touchscreen.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 Sebastian Reichel + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef _TOUCHSCREEN_H diff --git a/include/linux/integrity.h b/include/linux/integrity.h index 2ea0f2f65a..c2d6082a1a 100644 --- a/include/linux/integrity.h +++ b/include/linux/integrity.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2009 IBM Corporation * Author: Mimi Zohar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. */ #ifndef _LINUX_INTEGRITY_H @@ -11,9 +14,7 @@ enum integrity_status { INTEGRITY_PASS = 0, - INTEGRITY_PASS_IMMUTABLE, INTEGRITY_FAIL, - INTEGRITY_FAIL_IMMUTABLE, INTEGRITY_NOLABEL, INTEGRITY_NOXATTRS, INTEGRITY_UNKNOWN, @@ -42,17 +43,4 @@ static inline void integrity_load_keys(void) } #endif /* CONFIG_INTEGRITY */ -#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS - -extern int integrity_kernel_module_request(char *kmod_name); - -#else - -static inline int integrity_kernel_module_request(char *kmod_name) -{ - return 0; -} - -#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */ - #endif /* _LINUX_INTEGRITY_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 05a65eb155..23e129ef67 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -1,10 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2006-2015, Intel Corporation. * * Authors: Ashok Raj * Anil S Keshavamurthy * David Woodhouse + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _INTEL_IOMMU_H_ @@ -14,50 +26,16 @@ #include #include #include +#include #include #include -#include -#include -#include -#include -#include - #include #include -/* - * VT-d hardware uses 4KiB page size regardless of host page size. - */ -#define VTD_PAGE_SHIFT (12) -#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) -#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) -#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) - -#define VTD_STRIDE_SHIFT (9) -#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) - -#define DMA_PTE_READ BIT_ULL(0) -#define DMA_PTE_WRITE BIT_ULL(1) -#define DMA_PTE_LARGE_PAGE BIT_ULL(7) -#define DMA_PTE_SNP BIT_ULL(11) - -#define DMA_FL_PTE_PRESENT BIT_ULL(0) -#define DMA_FL_PTE_US BIT_ULL(2) -#define DMA_FL_PTE_ACCESS BIT_ULL(5) -#define DMA_FL_PTE_DIRTY BIT_ULL(6) -#define DMA_FL_PTE_XD BIT_ULL(63) - -#define ADDR_WIDTH_5LEVEL (57) -#define ADDR_WIDTH_4LEVEL (48) - -#define CONTEXT_TT_MULTI_LEVEL 0 -#define CONTEXT_TT_DEV_IOTLB 1 -#define CONTEXT_TT_PASS_THROUGH 2 -#define CONTEXT_PASIDE BIT_ULL(3) - /* * Intel IOMMU register specification per version 1.0 public spec. */ + #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ @@ -81,7 +59,6 @@ #define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ -#define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */ #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ #define DMAR_PQH_REG 0xc0 /* Page request queue head register */ #define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ @@ -91,53 +68,27 @@ #define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ #define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ #define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ -#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */ -#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */ -#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */ -#define DMAR_MTRR_FIX16K_80000_REG 0x128 -#define DMAR_MTRR_FIX16K_A0000_REG 0x130 -#define DMAR_MTRR_FIX4K_C0000_REG 0x138 -#define DMAR_MTRR_FIX4K_C8000_REG 0x140 -#define DMAR_MTRR_FIX4K_D0000_REG 0x148 -#define DMAR_MTRR_FIX4K_D8000_REG 0x150 -#define DMAR_MTRR_FIX4K_E0000_REG 0x158 -#define DMAR_MTRR_FIX4K_E8000_REG 0x160 -#define DMAR_MTRR_FIX4K_F0000_REG 0x168 -#define DMAR_MTRR_FIX4K_F8000_REG 0x170 -#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */ -#define DMAR_MTRR_PHYSMASK0_REG 0x188 -#define DMAR_MTRR_PHYSBASE1_REG 0x190 -#define DMAR_MTRR_PHYSMASK1_REG 0x198 -#define DMAR_MTRR_PHYSBASE2_REG 0x1a0 -#define DMAR_MTRR_PHYSMASK2_REG 0x1a8 -#define DMAR_MTRR_PHYSBASE3_REG 0x1b0 -#define DMAR_MTRR_PHYSMASK3_REG 0x1b8 -#define DMAR_MTRR_PHYSBASE4_REG 0x1c0 -#define DMAR_MTRR_PHYSMASK4_REG 0x1c8 -#define DMAR_MTRR_PHYSBASE5_REG 0x1d0 -#define DMAR_MTRR_PHYSMASK5_REG 0x1d8 -#define DMAR_MTRR_PHYSBASE6_REG 0x1e0 -#define DMAR_MTRR_PHYSMASK6_REG 0x1e8 -#define DMAR_MTRR_PHYSBASE7_REG 0x1f0 -#define DMAR_MTRR_PHYSMASK7_REG 0x1f8 -#define DMAR_MTRR_PHYSBASE8_REG 0x200 -#define DMAR_MTRR_PHYSMASK8_REG 0x208 -#define DMAR_MTRR_PHYSBASE9_REG 0x210 -#define DMAR_MTRR_PHYSMASK9_REG 0x218 -#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */ -#define DMAR_VCMD_REG 0xe00 /* Virtual command register */ -#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */ - -#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg) -#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg) -#define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg) #define OFFSET_STRIDE (9) +#ifdef CONFIG_64BIT #define dmar_readq(a) readq(a) #define dmar_writeq(a,v) writeq(v,a) -#define dmar_readl(a) readl(a) -#define dmar_writel(a, v) writel(v, a) +#else +static inline u64 dmar_readq(void __iomem *addr) +{ + u32 lo, hi; + lo = readl(addr); + hi = readl(addr + 4); + return (((u64) hi) << 32) + lo; +} + +static inline void dmar_writeq(void __iomem *addr, u64 val) +{ + writel((u32)val, addr); + writel((u32)(val >> 32), addr + 4); +} +#endif #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) #define DMAR_VER_MINOR(v) ((v) & 0x0f) @@ -145,9 +96,7 @@ /* * Decoding Capability Register */ -#define cap_5lp_support(c) (((c) >> 60) & 1) #define cap_pi_support(c) (((c) >> 59) & 1) -#define cap_fl1gp_support(c) (((c) >> 56) & 1) #define cap_read_drain(c) (((c) >> 55) & 1) #define cap_write_drain(c) (((c) >> 54) & 1) #define cap_max_amask_val(c) (((c) >> 48) & 0x3f) @@ -176,40 +125,28 @@ * Extended Capability Register */ -#define ecap_rps(e) (((e) >> 49) & 0x1) -#define ecap_smpwc(e) (((e) >> 48) & 0x1) -#define ecap_flts(e) (((e) >> 47) & 0x1) -#define ecap_slts(e) (((e) >> 46) & 0x1) -#define ecap_slads(e) (((e) >> 45) & 0x1) -#define ecap_vcs(e) (((e) >> 44) & 0x1) -#define ecap_smts(e) (((e) >> 43) & 0x1) -#define ecap_dit(e) (((e) >> 41) & 0x1) -#define ecap_pds(e) (((e) >> 42) & 0x1) -#define ecap_pasid(e) (((e) >> 40) & 0x1) -#define ecap_pss(e) (((e) >> 35) & 0x1f) -#define ecap_eafs(e) (((e) >> 34) & 0x1) -#define ecap_nwfs(e) (((e) >> 33) & 0x1) -#define ecap_srs(e) (((e) >> 31) & 0x1) -#define ecap_ers(e) (((e) >> 30) & 0x1) -#define ecap_prs(e) (((e) >> 29) & 0x1) -#define ecap_broken_pasid(e) (((e) >> 28) & 0x1) -#define ecap_dis(e) (((e) >> 27) & 0x1) -#define ecap_nest(e) (((e) >> 26) & 0x1) -#define ecap_mts(e) (((e) >> 25) & 0x1) -#define ecap_ecs(e) (((e) >> 24) & 0x1) +#define ecap_pasid(e) ((e >> 40) & 0x1) +#define ecap_pss(e) ((e >> 35) & 0x1f) +#define ecap_eafs(e) ((e >> 34) & 0x1) +#define ecap_nwfs(e) ((e >> 33) & 0x1) +#define ecap_srs(e) ((e >> 31) & 0x1) +#define ecap_ers(e) ((e >> 30) & 0x1) +#define ecap_prs(e) ((e >> 29) & 0x1) +#define ecap_broken_pasid(e) ((e >> 28) & 0x1) +#define ecap_dis(e) ((e >> 27) & 0x1) +#define ecap_nest(e) ((e >> 26) & 0x1) +#define ecap_mts(e) ((e >> 25) & 0x1) +#define ecap_ecs(e) ((e >> 24) & 0x1) #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) #define ecap_coherent(e) ((e) & 0x1) #define ecap_qis(e) ((e) & 0x2) -#define ecap_pass_through(e) (((e) >> 6) & 0x1) -#define ecap_eim_support(e) (((e) >> 4) & 0x1) -#define ecap_ir_support(e) (((e) >> 3) & 0x1) +#define ecap_pass_through(e) ((e >> 6) & 0x1) +#define ecap_eim_support(e) ((e >> 4) & 0x1) +#define ecap_ir_support(e) ((e >> 3) & 0x1) #define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1) -#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf) -#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */ - -/* Virtual command interface capability */ -#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */ +#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) +#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ /* IOTLB_REG */ #define DMA_TLB_FLUSH_GRANU_OFFSET 60 @@ -265,7 +202,6 @@ /* DMA_RTADDR_REG */ #define DMA_RTADDR_RTT (((u64)1) << 11) -#define DMA_RTADDR_SMT (((u64)1) << 10) /* CCMD_REG */ #define DMA_CCMD_ICC (((u64)1) << 63) @@ -284,12 +220,11 @@ #define DMA_FECTL_IM (((u32)1) << 31) /* FSTS_REG */ -#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */ -#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */ -#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */ -#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */ -#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */ -#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */ +#define DMA_FSTS_PPF ((u32)2) +#define DMA_FSTS_PFO ((u32)1) +#define DMA_FSTS_IQE (1 << 4) +#define DMA_FSTS_ICE (1 << 5) +#define DMA_FSTS_ITE (1 << 6) #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) /* FRCD_REG, 32 bits access */ @@ -297,16 +232,11 @@ #define dma_frcd_type(d) ((d >> 30) & 1) #define dma_frcd_fault_reason(c) (c & 0xff) #define dma_frcd_source_id(c) (c & 0xffff) -#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff) -#define dma_frcd_pasid_present(c) (((c) >> 31) & 1) /* low 64 bit */ #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) /* PRS_REG */ #define DMA_PRS_PPR ((u32)1) -#define DMA_PRS_PRO ((u32)2) - -#define DMA_VCS_PAS ((u64)1) #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ do { \ @@ -347,8 +277,6 @@ enum { #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) #define QI_IWD_STATUS_WRITE (((u64)1) << 5) -#define QI_IWD_FENCE (((u64)1) << 6) -#define QI_IWD_PRQ_DRAIN (((u64)1) << 7) #define QI_IOTLB_DID(did) (((u64)did) << 16) #define QI_IOTLB_DR(dr) (((u64)dr) << 7) @@ -356,7 +284,7 @@ enum { #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) #define QI_IOTLB_IH(ih) (((u64)ih) << 6) -#define QI_IOTLB_AM(am) (((u8)am) & 0x3f) +#define QI_IOTLB_AM(am) (((u8)am)) #define QI_CC_FM(fm) (((u64)fm) << 48) #define QI_CC_SID(sid) (((u64)sid) << 32) @@ -366,8 +294,6 @@ enum { #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) -#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ - ((u64)((pfsid >> 4) & 0xfff) << 52)) #define QI_DEV_IOTLB_SIZE 1 #define QI_DEV_IOTLB_MAX_INVS 32 @@ -375,70 +301,62 @@ enum { #define QI_PC_DID(did) (((u64)did) << 16) #define QI_PC_GRAN(gran) (((u64)gran) << 4) -/* PASID cache invalidation granu */ -#define QI_PC_ALL_PASIDS 0 -#define QI_PC_PASID_SEL 1 -#define QI_PC_GLOBAL 3 +#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) +#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) +#define QI_EIOTLB_GL(gl) (((u64)gl) << 7) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) -#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f) +#define QI_EIOTLB_AM(am) (((u64)am)) #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) #define QI_EIOTLB_DID(did) (((u64)did) << 16) #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) -/* QI Dev-IOTLB inv granu */ -#define QI_DEV_IOTLB_GRAN_ALL 1 -#define QI_DEV_IOTLB_GRAN_PASID_SEL 0 - #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) +#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) +#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) -#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ - ((u64)((pfsid >> 4) & 0xfff) << 52)) #define QI_DEV_EIOTLB_MAX_INVS 32 -/* Page group response descriptor QW0 */ -#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) -#define QI_PGRP_PDP(p) (((u64)(p)) << 5) -#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12) -#define QI_PGRP_DID(rid) (((u64)(rid)) << 16) +#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) +#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32) +#define QI_PGRP_RESP_CODE(res) ((u64)(res)) #define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) +#define QI_PGRP_DID(did) (((u64)(did)) << 16) +#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) -/* Page group response descriptor QW1 */ -#define QI_PGRP_LPIG(x) (((u64)(x)) << 2) -#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3) - +#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK) +#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4) +#define QI_PSTRM_RESP_CODE(res) ((u64)(res)) +#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55) +#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32) +#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24) +#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4) #define QI_RESP_SUCCESS 0x0 #define QI_RESP_INVALID 0x1 #define QI_RESP_FAILURE 0xf +#define QI_GRAN_ALL_ALL 0 +#define QI_GRAN_NONG_ALL 1 #define QI_GRAN_NONG_PASID 2 #define QI_GRAN_PSI_PASID 3 -#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap)) - struct qi_desc { - u64 qw0; - u64 qw1; - u64 qw2; - u64 qw3; + u64 low, high; }; struct q_inval { raw_spinlock_t q_lock; - void *desc; /* invalidation queue */ + struct qi_desc *desc; /* invalidation queue */ int *desc_status; /* desc status */ int free_head; /* first free entry */ int free_tail; /* last free entry */ int free_cnt; }; -struct dmar_pci_notify_info; - #ifdef CONFIG_IRQ_REMAP /* 1MB - maximum possible interrupt remapping table size */ #define INTR_REMAP_PAGE_ORDER 8 @@ -453,11 +371,6 @@ struct ir_table { struct irte *base; unsigned long *bitmap; }; - -void intel_irq_remap_add_device(struct dmar_pci_notify_info *info); -#else -static inline void -intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { } #endif struct iommu_flush { @@ -477,111 +390,17 @@ enum { #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) -#define VTD_FLAG_SVM_CAPABLE (1 << 2) - -extern int intel_iommu_sm; -extern spinlock_t device_domain_lock; - -#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) -#define pasid_supported(iommu) (sm_supported(iommu) && \ - ecap_pasid((iommu)->ecap)) struct pasid_entry; struct pasid_state_entry; struct page_req_dsc; -/* - * 0: Present - * 1-11: Reserved - * 12-63: Context Ptr (12 - (haw-1)) - * 64-127: Reserved - */ -struct root_entry { - u64 lo; - u64 hi; -}; - -/* - * low 64 bits: - * 0: present - * 1: fault processing disable - * 2-3: translation type - * 12-63: address space root - * high 64 bits: - * 0-2: address width - * 3-6: aval - * 8-23: domain id - */ -struct context_entry { - u64 lo; - u64 hi; -}; - -/* si_domain contains mulitple devices */ -#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0) - -/* - * When VT-d works in the scalable mode, it allows DMA translation to - * happen through either first level or second level page table. This - * bit marks that the DMA translation for the domain goes through the - * first level page table, otherwise, it goes through the second level. - */ -#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1) - -/* - * Domain represents a virtual machine which demands iommu nested - * translation mode support. - */ -#define DOMAIN_FLAG_NESTING_MODE BIT(2) - -struct dmar_domain { - int nid; /* node id */ - - unsigned int iommu_refcnt[DMAR_UNITS_SUPPORTED]; - /* Refcount of devices per iommu */ - - - u16 iommu_did[DMAR_UNITS_SUPPORTED]; - /* Domain ids per IOMMU. Use u16 since - * domain ids are 16 bit wide according - * to VT-d spec, section 9.3 */ - - u8 has_iotlb_device: 1; - u8 iommu_coherency: 1; /* indicate coherency of iommu access */ - u8 iommu_snooping: 1; /* indicate snooping control feature */ - - struct list_head devices; /* all devices' list */ - struct list_head subdevices; /* all subdevices' list */ - struct iova_domain iovad; /* iova's that belong to this domain */ - - struct dma_pte *pgd; /* virtual address */ - int gaw; /* max guest address width */ - - /* adjusted guest address width, 0 is level 2 30-bit */ - int agaw; - - int flags; /* flags to find out type of domain */ - int iommu_superpage;/* Level of superpages supported: - 0 == 4KiB (no superpages), 1 == 2MiB, - 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ - u64 max_addr; /* maximum mapped address */ - - u32 default_pasid; /* - * The default pasid used for non-SVM - * traffic on mediated devices. - */ - - struct iommu_domain domain; /* generic domain data structure for - iommu core */ -}; - struct intel_iommu { void __iomem *reg; /* Pointer to hardware regs, virtual addr */ u64 reg_phys; /* physical address of hw register set */ u64 reg_size; /* size of hw register set */ u64 cap; u64 ecap; - u64 vccap; u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ raw_spinlock_t register_lock; /* protect register handling */ int seq_id; /* sequence id of the iommu */ @@ -600,13 +419,18 @@ struct intel_iommu { struct iommu_flush flush; #endif #ifdef CONFIG_INTEL_IOMMU_SVM + /* These are large and need to be contiguous, so we allocate just + * one for now. We'll maybe want to rethink that if we truly give + * devices away to userspace processes (e.g. for DPDK) and don't + * want to trust that userspace will use *only* the PASID it was + * told to. But while it's all driver-arbitrated, we're fine. */ + struct pasid_entry *pasid_table; + struct pasid_state_entry *pasid_state_table; struct page_req_dsc *prq; unsigned char prq_name[16]; /* Name for PRQ interrupt */ - struct completion prq_complete; - struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */ + struct idr pasid_idr; + u32 pasid_max; #endif - struct iopf_queue *iopf_queue; - unsigned char iopfq_name[16]; struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ @@ -615,45 +439,9 @@ struct intel_iommu { struct irq_domain *ir_domain; struct irq_domain *ir_msi_domain; #endif - struct iommu_device iommu; /* IOMMU core code handle */ + struct device *iommu_dev; /* IOMMU-sysfs device */ int node; u32 flags; /* Software defined flags */ - - struct dmar_drhd_unit *drhd; - void *perf_statistic; -}; - -/* Per subdevice private data */ -struct subdev_domain_info { - struct list_head link_phys; /* link to phys device siblings */ - struct list_head link_domain; /* link to domain siblings */ - struct device *pdev; /* physical device derived from */ - struct dmar_domain *domain; /* aux-domain */ - int users; /* user count */ -}; - -/* PCI domain-device relationship */ -struct device_domain_info { - struct list_head link; /* link to domain siblings */ - struct list_head global; /* link to global list */ - struct list_head table; /* link to pasid table */ - struct list_head subdevices; /* subdevices sibling */ - u32 segment; /* PCI segment number */ - u8 bus; /* PCI bus number */ - u8 devfn; /* PCI devfn number */ - u16 pfsid; /* SRIOV physical function source ID */ - u8 pasid_supported:3; - u8 pasid_enabled:1; - u8 pri_supported:1; - u8 pri_enabled:1; - u8 ats_supported:1; - u8 ats_enabled:1; - u8 auxd_enabled:1; /* Multiple domains per device */ - u8 ats_qdep; - struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ - struct intel_iommu *iommu; /* IOMMU used by this device */ - struct dmar_domain *domain; /* pointer to domain */ - struct pasid_table *pasid_table; /* pasid table */ }; static inline void __iommu_flush_cache( @@ -663,56 +451,6 @@ static inline void __iommu_flush_cache( clflush_cache_range(addr, size); } -/* Convert generic struct iommu_domain to private struct dmar_domain */ -static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) -{ - return container_of(dom, struct dmar_domain, domain); -} - -/* - * 0: readable - * 1: writable - * 2-6: reserved - * 7: super page - * 8-10: available - * 11: snoop behavior - * 12-63: Host physical address - */ -struct dma_pte { - u64 val; -}; - -static inline void dma_clear_pte(struct dma_pte *pte) -{ - pte->val = 0; -} - -static inline u64 dma_pte_addr(struct dma_pte *pte) -{ -#ifdef CONFIG_64BIT - return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD); -#else - /* Must have a full atomic 64-bit read */ - return __cmpxchg64(&pte->val, 0ULL, 0ULL) & - VTD_PAGE_MASK & (~DMA_FL_PTE_XD); -#endif -} - -static inline bool dma_pte_present(struct dma_pte *pte) -{ - return (pte->val & 3) != 0; -} - -static inline bool dma_pte_superpage(struct dma_pte *pte) -{ - return (pte->val & DMA_PTE_LARGE_PAGE); -} - -static inline int first_pte_in_page(struct dma_pte *pte) -{ - return !((unsigned long)pte & ~VTD_PAGE_MASK); -} - extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); @@ -725,61 +463,26 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, u64 type); extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type); -extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, - u16 qdep, u64 addr, unsigned mask); +extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, + u64 addr, unsigned mask); -void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, - unsigned long npages, bool ih); - -void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, - u32 pasid, u16 qdep, u64 addr, - unsigned int size_order); -void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, - u32 pasid); - -int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, - unsigned int count, unsigned long options); -/* - * Options used in qi_submit_sync: - * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8. - */ -#define QI_OPT_WAIT_DRAIN BIT(0) +extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); -void *alloc_pgtable_page(int node); -void free_pgtable_page(void *vaddr); -struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); -int for_each_device_domain(int (*fn)(struct device_domain_info *info, - void *data), void *data); -void iommu_flush_write_buffer(struct intel_iommu *iommu); -int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); -struct dmar_domain *find_domain(struct device *dev); -struct device_domain_info *get_domain_info(struct device *dev); -struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); - #ifdef CONFIG_INTEL_IOMMU_SVM -extern void intel_svm_check(struct intel_iommu *iommu); +extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu); +extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu); -int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, - struct iommu_gpasid_bind_data *data); -int intel_svm_unbind_gpasid(struct device *dev, u32 pasid); -struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, - void *drvdata); -void intel_svm_unbind(struct iommu_sva *handle); -u32 intel_svm_get_pasid(struct iommu_sva *handle); -int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, - struct iommu_page_response *msg); + +struct svm_dev_ops; struct intel_svm_dev { struct list_head list; struct rcu_head rcu; struct device *dev; - struct intel_iommu *iommu; - struct iommu_sva sva; - unsigned long prq_seq_number; - u32 pasid; + struct svm_dev_ops *ops; int users; u16 did; u16 dev_iotlb:1; @@ -789,72 +492,16 @@ struct intel_svm_dev { struct intel_svm { struct mmu_notifier notifier; struct mm_struct *mm; - - unsigned int flags; - u32 pasid; - int gpasid; /* In case that guest PASID is different from host PASID */ + struct intel_iommu *iommu; + int flags; + int pasid; struct list_head devs; }; -#else -static inline void intel_svm_check(struct intel_iommu *iommu) {} -#endif -#ifdef CONFIG_INTEL_IOMMU_DEBUGFS -void intel_iommu_debugfs_init(void); -#else -static inline void intel_iommu_debugfs_init(void) {} -#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */ +extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev); +extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); +#endif extern const struct attribute_group *intel_iommu_groups[]; -bool context_present(struct context_entry *context); -struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, - u8 devfn, int alloc); - -#ifdef CONFIG_INTEL_IOMMU -extern int iommu_calculate_agaw(struct intel_iommu *iommu); -extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); -extern int dmar_disabled; -extern int intel_iommu_enabled; -extern int intel_iommu_gfx_mapped; -#else -static inline int iommu_calculate_agaw(struct intel_iommu *iommu) -{ - return 0; -} -static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) -{ - return 0; -} -#define dmar_disabled (1) -#define intel_iommu_enabled (0) -#endif - -static inline const char *decode_prq_descriptor(char *str, size_t size, - u64 dw0, u64 dw1, u64 dw2, u64 dw3) -{ - char *buf = str; - int bytes; - - bytes = snprintf(buf, size, - "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx", - FIELD_GET(GENMASK_ULL(31, 16), dw0), - FIELD_GET(GENMASK_ULL(63, 12), dw1), - dw1 & BIT_ULL(0) ? 'r' : '-', - dw1 & BIT_ULL(1) ? 'w' : '-', - dw0 & BIT_ULL(52) ? 'x' : '-', - dw0 & BIT_ULL(53) ? 'p' : '-', - dw1 & BIT_ULL(2) ? 'l' : '-', - FIELD_GET(GENMASK_ULL(51, 32), dw0), - FIELD_GET(GENMASK_ULL(11, 3), dw1)); - - /* Private Data */ - if (dw0 & BIT_ULL(9)) { - size -= bytes; - buf += bytes; - snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3); - } - - return str; -} #endif diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 57cceecbe3..3c25794042 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -1,23 +1,44 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2015 Intel Corporation. * * Authors: David Woodhouse + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef __INTEL_SVM_H__ #define __INTEL_SVM_H__ +struct device; + +struct svm_dev_ops { + void (*fault_cb)(struct device *dev, int pasid, u64 address, + u32 private, int rwxp, int response); +}; + /* Values for rxwp in fault_cb callback */ #define SVM_REQ_READ (1<<3) #define SVM_REQ_WRITE (1<<2) #define SVM_REQ_EXEC (1<<1) #define SVM_REQ_PRIV (1<<0) -/* Page Request Queue depth */ -#define PRQ_ORDER 2 -#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) -#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5) + +/* + * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" + * PASID for the current process. Even if a PASID already exists, a new one + * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID + * will not be given to subsequent callers. This facility allows a driver to + * disambiguate between multiple device contexts which access the same MM, + * if there is no other way to do so. It should be used sparingly, if at all. + */ +#define SVM_FLAG_PRIVATE_PASID (1<<0) /* * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only @@ -30,18 +51,71 @@ * It is unlikely that we will ever hook into flush_tlb_kernel_range() to * do such IOTLB flushes automatically. */ -#define SVM_FLAG_SUPERVISOR_MODE BIT(0) -/* - * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest - * processes. Compared to the host bind, the primary differences are: - * 1. mm life cycle management - * 2. fault reporting +#define SVM_FLAG_SUPERVISOR_MODE (1<<1) + +#ifdef CONFIG_INTEL_IOMMU_SVM + +/** + * intel_svm_bind_mm() - Bind the current process to a PASID + * @dev: Device to be granted acccess + * @pasid: Address for allocated PASID + * @flags: Flags. Later for requesting supervisor mode, etc. + * @ops: Callbacks to device driver + * + * This function attempts to enable PASID support for the given device. + * If the @pasid argument is non-%NULL, a PASID is allocated for access + * to the MM of the current process. + * + * By using a %NULL value for the @pasid argument, this function can + * be used to simply validate that PASID support is available for the + * given device — i.e. that it is behind an IOMMU which has the + * requisite support, and is enabled. + * + * Page faults are handled transparently by the IOMMU code, and there + * should be no need for the device driver to be involved. If a page + * fault cannot be handled (i.e. is an invalid address rather than + * just needs paging in), then the page request will be completed by + * the core IOMMU code with appropriate status, and the device itself + * can then report the resulting fault to its driver via whatever + * mechanism is appropriate. + * + * Multiple calls from the same process may result in the same PASID + * being re-used. A reference count is kept. */ -#define SVM_FLAG_GUEST_MODE BIT(1) -/* - * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space, - * which requires guest and host PASID translation at both directions. +extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, + struct svm_dev_ops *ops); + +/** + * intel_svm_unbind_mm() - Unbind a specified PASID + * @dev: Device for which PASID was allocated + * @pasid: PASID value to be unbound + * + * This function allows a PASID to be retired when the device no + * longer requires access to the address space of a given process. + * + * If the use count for the PASID in question reaches zero, the + * PASID is revoked and may no longer be used by hardware. + * + * Device drivers are required to ensure that no access (including + * page requests) is currently outstanding for the PASID in question, + * before calling this function. */ -#define SVM_FLAG_GUEST_PASID BIT(2) +extern int intel_svm_unbind_mm(struct device *dev, int pasid); + +#else /* CONFIG_INTEL_IOMMU_SVM */ + +static inline int intel_svm_bind_mm(struct device *dev, int *pasid, + int flags, struct svm_dev_ops *ops) +{ + return -ENOSYS; +} + +static inline int intel_svm_unbind_mm(struct device *dev, int pasid) +{ + BUG(); +} +#endif /* CONFIG_INTEL_IOMMU_SVM */ + +#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) #endif /* __INTEL_SVM_H__ */ diff --git a/include/linux/intel_pmic_gpio.h b/include/linux/intel_pmic_gpio.h new file mode 100644 index 0000000000..920109a291 --- /dev/null +++ b/include/linux/intel_pmic_gpio.h @@ -0,0 +1,15 @@ +#ifndef LINUX_INTEL_PMIC_H +#define LINUX_INTEL_PMIC_H + +struct intel_pmic_gpio_platform_data { + /* the first IRQ of the chip */ + unsigned irq_base; + /* number assigned to the first GPIO */ + unsigned gpio_base; + /* sram address for gpiointr register, the langwell chip will map + * the PMIC spi GPIO expander's GPIOINTR register in sram. + */ + unsigned gpiointr; +}; + +#endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 1f22a30c09..5981526ff2 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -1,10 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* interrupt.h */ #ifndef _LINUX_INTERRUPT_H #define _LINUX_INTERRUPT_H #include +#include #include +#include #include #include #include @@ -13,12 +14,10 @@ #include #include #include -#include #include #include #include -#include /* * These correspond to the IORESOURCE_IRQ_* defines in @@ -46,14 +45,14 @@ * IRQF_PERCPU - Interrupt is per cpu * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is - * registered first in a shared interrupt is considered for + * registered first in an shared interrupt is considered for * performance reasons) * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. * Used by threaded interrupts which need to keep the * irq line disabled until the threaded handler has been run. * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee * that this interrupt will wake the system from a suspended - * state. See Documentation/power/suspend-and-interrupts.rst + * state. See Documentation/power/suspend-and-interrupts.txt * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set * IRQF_NO_THREAD - Interrupt cannot be threaded * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device @@ -62,11 +61,6 @@ * interrupt handler after suspending interrupts. For system * wakeup devices users need to implement wakeup detection in * their interrupt handlers. - * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. - * Users will enable it explicitly by enable_irq() or enable_nmi() - * later. - * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers, - * depends on IRQF_PERCPU. */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -80,8 +74,6 @@ #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 -#define IRQF_NO_AUTOEN 0x00080000 -#define IRQF_NO_DEBUG 0x00100000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) @@ -148,19 +140,6 @@ request_threaded_irq(unsigned int irq, irq_handler_t handler, irq_handler_t thread_fn, unsigned long flags, const char *name, void *dev); -/** - * request_irq - Add a handler for an interrupt line - * @irq: The interrupt line to allocate - * @handler: Function to be called when the IRQ occurs. - * Primary handler for threaded interrupts - * If NULL, the default primary handler is installed - * @flags: Handling flags - * @name: Name of the device generating this interrupt - * @dev: A cookie passed to the handler function - * - * This call allocates an interrupt and establishes a handler; see - * the documentation for request_threaded_irq() for details. - */ static inline int __must_check request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev) @@ -173,32 +152,12 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev_id); extern int __must_check -__request_percpu_irq(unsigned int irq, irq_handler_t handler, - unsigned long flags, const char *devname, - void __percpu *percpu_dev_id); - -extern int __must_check -request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, - const char *name, void *dev); - -static inline int __must_check request_percpu_irq(unsigned int irq, irq_handler_t handler, - const char *devname, void __percpu *percpu_dev_id) -{ - return __request_percpu_irq(irq, handler, 0, - devname, percpu_dev_id); -} + const char *devname, void __percpu *percpu_dev_id); -extern int __must_check -request_percpu_nmi(unsigned int irq, irq_handler_t handler, - const char *devname, void __percpu *dev); - -extern const void *free_irq(unsigned int, void *); +extern void free_irq(unsigned int, void *); extern void free_percpu_irq(unsigned int, void __percpu *); -extern const void *free_nmi(unsigned int irq, void *dev_id); -extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); - struct device; extern int __must_check @@ -240,7 +199,6 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); # define local_irq_enable_in_hardirq() local_irq_enable() #endif -bool irq_has_action(unsigned int irq); extern void disable_irq_nosync(unsigned int irq); extern bool disable_hardirq(unsigned int irq); extern void disable_irq(unsigned int irq); @@ -250,19 +208,9 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type); extern bool irq_percpu_is_enabled(unsigned int irq); extern void irq_wake_thread(unsigned int irq, void *dev_id); -extern void disable_nmi_nosync(unsigned int irq); -extern void disable_percpu_nmi(unsigned int irq); -extern void enable_nmi(unsigned int irq); -extern void enable_percpu_nmi(unsigned int irq, unsigned int type); -extern int prepare_percpu_nmi(unsigned int irq); -extern void teardown_percpu_nmi(unsigned int irq); - -extern int irq_inject_interrupt(unsigned int irq); - /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); -extern void rearm_wake_irq(unsigned int irq); /** * struct irq_affinity_notify - context for notification of IRQ affinity changes @@ -284,63 +232,54 @@ struct irq_affinity_notify { void (*release)(struct kref *ref); }; -#define IRQ_AFFINITY_MAX_SETS 4 - -/** - * struct irq_affinity - Description for automatic irq affinity assignements - * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of - * the MSI(-X) vector space - * @post_vectors: Don't apply affinity to @post_vectors at end of - * the MSI(-X) vector space - * @nr_sets: The number of interrupt sets for which affinity - * spreading is required - * @set_size: Array holding the size of each interrupt set - * @calc_sets: Callback for calculating the number and size - * of interrupt sets - * @priv: Private data for usage by @calc_sets, usually a - * pointer to driver/device specific data. - */ -struct irq_affinity { - unsigned int pre_vectors; - unsigned int post_vectors; - unsigned int nr_sets; - unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; - void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); - void *priv; -}; - -/** - * struct irq_affinity_desc - Interrupt affinity descriptor - * @mask: cpumask to hold the affinity assignment - * @is_managed: 1 if the interrupt is managed internally - */ -struct irq_affinity_desc { - struct cpumask mask; - unsigned int is_managed : 1; -}; - #if defined(CONFIG_SMP) extern cpumask_var_t irq_default_affinity; -extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); -extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask); +/* Internal implementation. Use the helpers below */ +extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, + bool force); + +/** + * irq_set_affinity - Set the irq affinity of a given irq + * @irq: Interrupt to set affinity + * @cpumask: cpumask + * + * Fails if cpumask does not contain an online CPU + */ +static inline int +irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return __irq_set_affinity(irq, cpumask, false); +} + +/** + * irq_force_affinity - Force the irq affinity of a given irq + * @irq: Interrupt to set affinity + * @cpumask: cpumask + * + * Same as irq_set_affinity, but without checking the mask against + * online cpus. + * + * Solely for low level cpu hotplug code, where we need to make per + * cpu interrupts affine before the cpu becomes online. + */ +static inline int +irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return __irq_set_affinity(irq, cpumask, true); +} extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); -extern int irq_update_affinity_desc(unsigned int irq, - struct irq_affinity_desc *affinity); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); -struct irq_affinity_desc * -irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); - -unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, - const struct irq_affinity *affd); +struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec); +int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec); #else /* CONFIG_SMP */ @@ -367,27 +306,20 @@ static inline int irq_set_affinity_hint(unsigned int irq, return -EINVAL; } -static inline int irq_update_affinity_desc(unsigned int irq, - struct irq_affinity_desc *affinity) -{ - return -EINVAL; -} - static inline int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { return 0; } -static inline struct irq_affinity_desc * -irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) +static inline struct cpumask * +irq_create_affinity_masks(const struct cpumask *affinity, int nvec) { return NULL; } -static inline unsigned int -irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, - const struct irq_affinity *affd) +static inline int +irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec) { return maxvec; } @@ -474,28 +406,16 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool state); #ifdef CONFIG_IRQ_FORCED_THREADING -# ifdef CONFIG_PREEMPT_RT -# define force_irqthreads() (true) -# else -DECLARE_STATIC_KEY_FALSE(force_irqthreads_key); -# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key)) -# endif +extern bool force_irqthreads; #else -#define force_irqthreads() (false) +#define force_irqthreads (0) #endif -#ifndef local_softirq_pending - -#ifndef local_softirq_pending_ref -#define local_softirq_pending_ref irq_stat.__softirq_pending +#ifndef __ARCH_SET_SOFTIRQ_PENDING +#define set_softirq_pending(x) (local_softirq_pending() = (x)) +#define or_softirq_pending(x) (local_softirq_pending() |= (x)) #endif -#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref)) -#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x))) -#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x))) - -#endif /* local_softirq_pending */ - /* Some architectures might implement lazy enabling/disabling of * interrupts. In some cases, such as stop_machine, we might want * to ensure that after a local_irq_disable(), interrupts have @@ -522,7 +442,8 @@ enum IRQ_POLL_SOFTIRQ, TASKLET_SOFTIRQ, SCHED_SOFTIRQ, - HRTIMER_SOFTIRQ, + HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the + numbering. Sigh! */ RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ NR_SOFTIRQS @@ -541,13 +462,22 @@ extern const char * const softirq_to_name[NR_SOFTIRQS]; struct softirq_action { - void (*action)(struct softirq_action *); -}; + void (*action)(void); +} __no_const; asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); -extern void open_softirq(int nr, void (*action)(struct softirq_action *)); +#ifdef __ARCH_HAS_DO_SOFTIRQ +void do_softirq_own_stack(void); +#else +static inline void do_softirq_own_stack(void) +{ + __do_softirq(); +} +#endif + +extern void open_softirq(int nr, void (*action)(void)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); @@ -563,9 +493,6 @@ static inline struct task_struct *this_cpu_ksoftirqd(void) /* Tasklets --- multithreaded analogue of BHs. - This API is deprecated. Please consider using threaded IRQs instead: - https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de - Main feature differing them of generic softirqs: tasklet is running only on one CPU simultaneously. @@ -589,42 +516,16 @@ struct tasklet_struct struct tasklet_struct *next; unsigned long state; atomic_t count; - bool use_callback; - union { - void (*func)(unsigned long data); - void (*callback)(struct tasklet_struct *t); - }; + void (*func)(unsigned long); unsigned long data; }; -#define DECLARE_TASKLET(name, _callback) \ -struct tasklet_struct name = { \ - .count = ATOMIC_INIT(0), \ - .callback = _callback, \ - .use_callback = true, \ -} +#define DECLARE_TASKLET(name, func, data) \ +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } -#define DECLARE_TASKLET_DISABLED(name, _callback) \ -struct tasklet_struct name = { \ - .count = ATOMIC_INIT(1), \ - .callback = _callback, \ - .use_callback = true, \ -} +#define DECLARE_TASKLET_DISABLED(name, func, data) \ +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } -#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ - container_of(callback_tasklet, typeof(*var), tasklet_fieldname) - -#define DECLARE_TASKLET_OLD(name, _func) \ -struct tasklet_struct name = { \ - .count = ATOMIC_INIT(0), \ - .func = _func, \ -} - -#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \ -struct tasklet_struct name = { \ - .count = ATOMIC_INIT(1), \ - .func = _func, \ -} enum { @@ -632,21 +533,26 @@ enum TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ }; -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) +#ifdef CONFIG_SMP static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); } -void tasklet_unlock(struct tasklet_struct *t); -void tasklet_unlock_wait(struct tasklet_struct *t); -void tasklet_unlock_spin_wait(struct tasklet_struct *t); +static inline void tasklet_unlock(struct tasklet_struct *t) +{ + smp_mb__before_atomic(); + clear_bit(TASKLET_STATE_RUN, &(t)->state); +} +static inline void tasklet_unlock_wait(struct tasklet_struct *t) +{ + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } +} #else -static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } -static inline void tasklet_unlock(struct tasklet_struct *t) { } -static inline void tasklet_unlock_wait(struct tasklet_struct *t) { } -static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { } +#define tasklet_trylock(t) 1 +#define tasklet_unlock_wait(t) do { } while (0) +#define tasklet_unlock(t) do { } while (0) #endif extern void __tasklet_schedule(struct tasklet_struct *t); @@ -665,23 +571,27 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t) __tasklet_hi_schedule(t); } +extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); + +/* + * This version avoids touching any other tasklets. Needed for kmemcheck + * in order not to take any page faults while enqueueing this tasklet; + * consider VERY carefully whether you really need this or + * tasklet_hi_schedule()... + */ +static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_hi_schedule_first(t); +} + + static inline void tasklet_disable_nosync(struct tasklet_struct *t) { atomic_inc(&t->count); smp_mb__after_atomic(); } -/* - * Do not use in new code. Disabling tasklets from atomic contexts is - * error prone and should be avoided. - */ -static inline void tasklet_disable_in_atomic(struct tasklet_struct *t) -{ - tasklet_disable_nosync(t); - tasklet_unlock_spin_wait(t); - smp_mb(); -} - static inline void tasklet_disable(struct tasklet_struct *t) { tasklet_disable_nosync(t); @@ -696,10 +606,34 @@ static inline void tasklet_enable(struct tasklet_struct *t) } extern void tasklet_kill(struct tasklet_struct *t); +extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); -extern void tasklet_setup(struct tasklet_struct *t, - void (*callback)(struct tasklet_struct *)); + +struct tasklet_hrtimer { + struct hrtimer timer; + struct tasklet_struct tasklet; + enum hrtimer_restart (*function)(struct hrtimer *); +}; + +extern void +tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, + enum hrtimer_restart (*function)(struct hrtimer *), + clockid_t which_clock, enum hrtimer_mode mode); + +static inline +void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, + const enum hrtimer_mode mode) +{ + hrtimer_start(&ttimer->timer, time, mode); +} + +static inline +void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) +{ + hrtimer_cancel(&ttimer->timer); + tasklet_kill(&ttimer->tasklet); +} /* * Autoprobing for irqs: @@ -757,12 +691,6 @@ static inline void init_irq_proc(void) } #endif -#ifdef CONFIG_IRQ_TIMINGS -void irq_timings_enable(void); -void irq_timings_disable(void); -u64 irq_timings_next_event(u64 now); -#endif - struct seq_file; int show_interrupts(struct seq_file *p, void *v); int arch_show_interrupts(struct seq_file *p, int prec); @@ -771,13 +699,24 @@ extern int early_irq_init(void); extern int arch_probe_nr_irqs(void); extern int arch_early_irq_init(void); +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) /* * We want to know which function is an entrypoint of a hardirq or a softirq. */ -#ifndef __irq_entry -# define __irq_entry __section(".irqentry.text") +#define __irq_entry __attribute__((__section__(".irqentry.text"))) +#define __softirq_entry \ + __attribute__((__section__(".softirqentry.text"))) + +/* Limits of hardirq entrypoints */ +extern char __irqentry_text_start[]; +extern char __irqentry_text_end[]; +/* Limits of softirq entrypoints */ +extern char __softirqentry_text_start[]; +extern char __softirqentry_text_end[]; + +#else +#define __irq_entry +#define __softirq_entry #endif -#define __softirq_entry __section(".softirqentry.text") - #endif diff --git a/include/linux/interval_tree.h b/include/linux/interval_tree.h index 288c26f507..724556aa3c 100644 --- a/include/linux/interval_tree.h +++ b/include/linux/interval_tree.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_INTERVAL_TREE_H #define _LINUX_INTERVAL_TREE_H @@ -12,15 +11,13 @@ struct interval_tree_node { }; extern void -interval_tree_insert(struct interval_tree_node *node, - struct rb_root_cached *root); +interval_tree_insert(struct interval_tree_node *node, struct rb_root *root); extern void -interval_tree_remove(struct interval_tree_node *node, - struct rb_root_cached *root); +interval_tree_remove(struct interval_tree_node *node, struct rb_root *root); extern struct interval_tree_node * -interval_tree_iter_first(struct rb_root_cached *root, +interval_tree_iter_first(struct rb_root *root, unsigned long start, unsigned long last); extern struct interval_tree_node * diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h index aaa8a0767a..58370e1862 100644 --- a/include/linux/interval_tree_generic.h +++ b/include/linux/interval_tree_generic.h @@ -1,8 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Interval Trees (C) 2012 Michel Lespinasse + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA include/linux/interval_tree_generic.h */ @@ -21,7 +33,7 @@ * ITSTATIC: 'static' or empty * ITPREFIX: prefix to use for the inline tree definitions * - * Note - before using this, please consider if generic version + * Note - before using this, please consider if non-generic version * (interval_tree.h) would work for you... */ @@ -30,18 +42,34 @@ \ /* Callbacks for augmented rbtree insert and remove */ \ \ -RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \ - ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \ +static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \ +{ \ + ITTYPE max = ITLAST(node), subtree_last; \ + if (node->ITRB.rb_left) { \ + subtree_last = rb_entry(node->ITRB.rb_left, \ + ITSTRUCT, ITRB)->ITSUBTREE; \ + if (max < subtree_last) \ + max = subtree_last; \ + } \ + if (node->ITRB.rb_right) { \ + subtree_last = rb_entry(node->ITRB.rb_right, \ + ITSTRUCT, ITRB)->ITSUBTREE; \ + if (max < subtree_last) \ + max = subtree_last; \ + } \ + return max; \ +} \ + \ +RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \ + ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \ \ /* Insert / remove interval nodes from the tree */ \ \ -ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, \ - struct rb_root_cached *root) \ +ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \ { \ - struct rb_node **link = &root->rb_root.rb_node, *rb_parent = NULL; \ + struct rb_node **link = &root->rb_node, *rb_parent = NULL; \ ITTYPE start = ITSTART(node), last = ITLAST(node); \ ITSTRUCT *parent; \ - bool leftmost = true; \ \ while (*link) { \ rb_parent = *link; \ @@ -50,22 +78,18 @@ ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, \ parent->ITSUBTREE = last; \ if (start < ITSTART(parent)) \ link = &parent->ITRB.rb_left; \ - else { \ + else \ link = &parent->ITRB.rb_right; \ - leftmost = false; \ - } \ } \ \ node->ITSUBTREE = last; \ rb_link_node(&node->ITRB, rb_parent, link); \ - rb_insert_augmented_cached(&node->ITRB, root, \ - leftmost, &ITPREFIX ## _augment); \ + rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \ } \ \ -ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \ - struct rb_root_cached *root) \ +ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \ { \ - rb_erase_augmented_cached(&node->ITRB, root, &ITPREFIX ## _augment); \ + rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \ } \ \ /* \ @@ -116,35 +140,15 @@ ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ } \ \ ITSTATIC ITSTRUCT * \ -ITPREFIX ## _iter_first(struct rb_root_cached *root, \ - ITTYPE start, ITTYPE last) \ +ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \ { \ - ITSTRUCT *node, *leftmost; \ + ITSTRUCT *node; \ \ - if (!root->rb_root.rb_node) \ + if (!root->rb_node) \ return NULL; \ - \ - /* \ - * Fastpath range intersection/overlap between A: [a0, a1] and \ - * B: [b0, b1] is given by: \ - * \ - * a0 <= b1 && b0 <= a1 \ - * \ - * ... where A holds the lock range and B holds the smallest \ - * 'start' and largest 'last' in the tree. For the later, we \ - * rely on the root node, which by augmented interval tree \ - * property, holds the largest value in its last-in-subtree. \ - * This allows mitigating some of the tree walk overhead for \ - * for non-intersecting ranges, maintained and consulted in O(1). \ - */ \ - node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \ + node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \ if (node->ITSUBTREE < start) \ return NULL; \ - \ - leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \ - if (ITSTART(leftmost) > last) \ - return NULL; \ - \ return ITPREFIX ## _subtree_search(node, start, last); \ } \ \ diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h index f32522bb3a..defcc4644c 100644 --- a/include/linux/io-64-nonatomic-hi-lo.h +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IO_64_NONATOMIC_HI_LO_H_ #define _LINUX_IO_64_NONATOMIC_HI_LO_H_ @@ -55,68 +54,4 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) #define writeq_relaxed hi_lo_writeq_relaxed #endif -#ifndef ioread64_hi_lo -#define ioread64_hi_lo ioread64_hi_lo -static inline u64 ioread64_hi_lo(const void __iomem *addr) -{ - u32 low, high; - - high = ioread32(addr + sizeof(u32)); - low = ioread32(addr); - - return low + ((u64)high << 32); -} -#endif - -#ifndef iowrite64_hi_lo -#define iowrite64_hi_lo iowrite64_hi_lo -static inline void iowrite64_hi_lo(u64 val, void __iomem *addr) -{ - iowrite32(val >> 32, addr + sizeof(u32)); - iowrite32(val, addr); -} -#endif - -#ifndef ioread64be_hi_lo -#define ioread64be_hi_lo ioread64be_hi_lo -static inline u64 ioread64be_hi_lo(const void __iomem *addr) -{ - u32 low, high; - - high = ioread32be(addr); - low = ioread32be(addr + sizeof(u32)); - - return low + ((u64)high << 32); -} -#endif - -#ifndef iowrite64be_hi_lo -#define iowrite64be_hi_lo iowrite64be_hi_lo -static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr) -{ - iowrite32be(val >> 32, addr); - iowrite32be(val, addr + sizeof(u32)); -} -#endif - -#ifndef ioread64 -#define ioread64_is_nonatomic -#define ioread64 ioread64_hi_lo -#endif - -#ifndef iowrite64 -#define iowrite64_is_nonatomic -#define iowrite64 iowrite64_hi_lo -#endif - -#ifndef ioread64be -#define ioread64be_is_nonatomic -#define ioread64be ioread64be_hi_lo -#endif - -#ifndef iowrite64be -#define iowrite64be_is_nonatomic -#define iowrite64be iowrite64be_hi_lo -#endif - #endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h index 448a21435d..084461a4e5 100644 --- a/include/linux/io-64-nonatomic-lo-hi.h +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IO_64_NONATOMIC_LO_HI_H_ #define _LINUX_IO_64_NONATOMIC_LO_HI_H_ @@ -55,68 +54,4 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) #define writeq_relaxed lo_hi_writeq_relaxed #endif -#ifndef ioread64_lo_hi -#define ioread64_lo_hi ioread64_lo_hi -static inline u64 ioread64_lo_hi(const void __iomem *addr) -{ - u32 low, high; - - low = ioread32(addr); - high = ioread32(addr + sizeof(u32)); - - return low + ((u64)high << 32); -} -#endif - -#ifndef iowrite64_lo_hi -#define iowrite64_lo_hi iowrite64_lo_hi -static inline void iowrite64_lo_hi(u64 val, void __iomem *addr) -{ - iowrite32(val, addr); - iowrite32(val >> 32, addr + sizeof(u32)); -} -#endif - -#ifndef ioread64be_lo_hi -#define ioread64be_lo_hi ioread64be_lo_hi -static inline u64 ioread64be_lo_hi(const void __iomem *addr) -{ - u32 low, high; - - low = ioread32be(addr + sizeof(u32)); - high = ioread32be(addr); - - return low + ((u64)high << 32); -} -#endif - -#ifndef iowrite64be_lo_hi -#define iowrite64be_lo_hi iowrite64be_lo_hi -static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr) -{ - iowrite32be(val, addr + sizeof(u32)); - iowrite32be(val >> 32, addr); -} -#endif - -#ifndef ioread64 -#define ioread64_is_nonatomic -#define ioread64 ioread64_lo_hi -#endif - -#ifndef iowrite64 -#define iowrite64_is_nonatomic -#define iowrite64 iowrite64_lo_hi -#endif - -#ifndef ioread64be -#define ioread64be_is_nonatomic -#define ioread64be ioread64be_lo_hi -#endif - -#ifndef iowrite64be -#define iowrite64be_is_nonatomic -#define iowrite64be iowrite64be_lo_hi -#endif - #endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index e9743cfd85..58df02bd93 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -1,6 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2008 Keith Packard + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef _LINUX_IO_MAPPING_H @@ -10,14 +22,13 @@ #include #include #include -#include #include /* * The io_mapping mechanism provides an abstraction for mapping * individual pages from an io device to the CPU in an efficient fashion. * - * See Documentation/driver-api/io-mapping.rst + * See Documentation/io-mapping.txt */ struct io_mapping { @@ -29,7 +40,6 @@ struct io_mapping { #ifdef CONFIG_HAVE_ATOMIC_IOMAP -#include #include /* * For small address space machines, mapping large objects @@ -66,35 +76,18 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) { resource_size_t phys_addr; + unsigned long pfn; BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; - preempt_disable(); - pagefault_disable(); - return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); + pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); + return iomap_atomic_prot_pfn(pfn, mapping->prot); } static inline void io_mapping_unmap_atomic(void __iomem *vaddr) { - kunmap_local_indexed((void __force *)vaddr); - pagefault_enable(); - preempt_enable(); -} - -static inline void __iomem * -io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) -{ - resource_size_t phys_addr; - - BUG_ON(offset >= mapping->size); - phys_addr = mapping->base + offset; - return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); -} - -static inline void io_mapping_unmap_local(void __iomem *vaddr) -{ - kunmap_local_indexed((void __force *)vaddr); + iounmap_atomic(vaddr); } static inline void __iomem * @@ -116,9 +109,10 @@ io_mapping_unmap(void __iomem *vaddr) iounmap(vaddr); } -#else /* HAVE_ATOMIC_IOMAP */ +#else #include +#include /* Create the io_mapping object*/ static inline struct io_mapping * @@ -126,12 +120,9 @@ io_mapping_init_wc(struct io_mapping *iomap, resource_size_t base, unsigned long size) { - iomap->iomem = ioremap_wc(base, size); - if (!iomap->iomem) - return NULL; - iomap->base = base; iomap->size = size; + iomap->iomem = ioremap_wc(base, size); #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); #elif defined(pgprot_writecombine) @@ -181,18 +172,7 @@ io_mapping_unmap_atomic(void __iomem *vaddr) preempt_enable(); } -static inline void __iomem * -io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) -{ - return io_mapping_map_wc(mapping, offset, PAGE_SIZE); -} - -static inline void io_mapping_unmap_local(void __iomem *vaddr) -{ - io_mapping_unmap(vaddr); -} - -#endif /* !HAVE_ATOMIC_IOMAP */ +#endif /* HAVE_ATOMIC_IOMAP */ static inline struct io_mapping * io_mapping_create_wc(resource_size_t base, @@ -220,6 +200,3 @@ io_mapping_free(struct io_mapping *iomap) } #endif /* _LINUX_IO_MAPPING_H */ - -int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn, unsigned long size); diff --git a/include/linux/io.h b/include/linux/io.h index 9595151d80..82ef36eac8 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -1,6 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2006 PathScale, Inc. All Rights Reserved. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef _LINUX_IO_H @@ -31,6 +43,14 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, } #endif +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +void __init ioremap_huge_init(void); +int arch_ioremap_pud_supported(void); +int arch_ioremap_pmd_supported(void); +#else +static inline void ioremap_huge_init(void) { } +#endif + /* * Managed iomap interface */ @@ -55,12 +75,10 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size); -void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, +void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, resource_size_t size); void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, resource_size_t size); -void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset, - resource_size_t size); void devm_iounmap(struct device *dev, void __iomem *addr); int check_signature(const volatile void __iomem *io_addr, const unsigned char *signature, int length); @@ -70,26 +88,7 @@ void *devm_memremap(struct device *dev, resource_size_t offset, size_t size, unsigned long flags); void devm_memunmap(struct device *dev, void *addr); -#ifdef CONFIG_PCI -/* - * The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and - * Posting") mandate non-posted configuration transactions. This default - * implementation attempts to use the ioremap_np() API to provide this - * on arches that support it, and falls back to ioremap() on those that - * don't. Overriding this function is deprecated; arches that properly - * support non-posted accesses should implement ioremap_np() instead, which - * this default implementation can then use to return mappings compliant with - * the PCI specification. - */ -#ifndef pci_remap_cfgspace -#define pci_remap_cfgspace pci_remap_cfgspace -static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset, - size_t size) -{ - return ioremap_np(offset, size) ?: ioremap(offset, size); -} -#endif -#endif +void *__devm_memremap_pages(struct device *dev, struct resource *res); /* * Some systems do not have legacy ISA devices. @@ -137,8 +136,6 @@ enum { MEMREMAP_WB = 1 << 0, MEMREMAP_WT = 1 << 1, MEMREMAP_WC = 1 << 2, - MEMREMAP_ENC = 1 << 3, - MEMREMAP_DEC = 1 << 4, }; void *memremap(resource_size_t offset, size_t size, unsigned long flags); diff --git a/include/linux/ioc3.h b/include/linux/ioc3.h new file mode 100644 index 0000000000..38b286e9a4 --- /dev/null +++ b/include/linux/ioc3.h @@ -0,0 +1,93 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2005 Stanislaw Skowronek + */ + +#ifndef _LINUX_IOC3_H +#define _LINUX_IOC3_H + +#include + +#define IOC3_MAX_SUBMODULES 32 + +#define IOC3_CLASS_NONE 0 +#define IOC3_CLASS_BASE_IP27 1 +#define IOC3_CLASS_BASE_IP30 2 +#define IOC3_CLASS_MENET_123 3 +#define IOC3_CLASS_MENET_4 4 +#define IOC3_CLASS_CADDUO 5 +#define IOC3_CLASS_SERIAL 6 + +/* One of these per IOC3 */ +struct ioc3_driver_data { + struct list_head list; + int id; /* IOC3 sequence number */ + /* PCI mapping */ + unsigned long pma; /* physical address */ + struct ioc3 __iomem *vma; /* pointer to registers */ + struct pci_dev *pdev; /* PCI device */ + /* IRQ stuff */ + int dual_irq; /* set if separate IRQs are used */ + int irq_io, irq_eth; /* IRQ numbers */ + /* GPIO magic */ + spinlock_t gpio_lock; + unsigned int gpdr_shadow; + /* NIC identifiers */ + char nic_part[32]; + char nic_serial[16]; + char nic_mac[6]; + /* submodule set */ + int class; + void *data[IOC3_MAX_SUBMODULES]; /* for submodule use */ + int active[IOC3_MAX_SUBMODULES]; /* set if probe succeeds */ + /* is_ir_lock must be held while + * modifying sio_ie values, so + * we can be sure that sio_ie is + * not changing when we read it + * along with sio_ir. + */ + spinlock_t ir_lock; /* SIO_IE[SC] mod lock */ +}; + +/* One per submodule */ +struct ioc3_submodule { + char *name; /* descriptive submodule name */ + struct module *owner; /* owning kernel module */ + int ethernet; /* set for ethernet drivers */ + int (*probe) (struct ioc3_submodule *, struct ioc3_driver_data *); + int (*remove) (struct ioc3_submodule *, struct ioc3_driver_data *); + int id; /* assigned by IOC3, index for the "data" array */ + /* IRQ stuff */ + unsigned int irq_mask; /* IOC3 IRQ mask, leave clear for Ethernet */ + int reset_mask; /* non-zero if you want the ioc3.c module to reset interrupts */ + int (*intr) (struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); + /* private submodule data */ + void *data; /* assigned by submodule */ +}; + +/********************************** + * Functions needed by submodules * + **********************************/ + +#define IOC3_W_IES 0 +#define IOC3_W_IEC 1 + +/* registers a submodule for all existing and future IOC3 chips */ +extern int ioc3_register_submodule(struct ioc3_submodule *); +/* unregisters a submodule */ +extern void ioc3_unregister_submodule(struct ioc3_submodule *); +/* enables IRQs indicated by irq_mask for a specified IOC3 chip */ +extern void ioc3_enable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); +/* ackowledges specified IRQs */ +extern void ioc3_ack(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); +/* disables IRQs indicated by irq_mask for a specified IOC3 chip */ +extern void ioc3_disable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); +/* atomically sets GPCR bits */ +extern void ioc3_gpcr_set(struct ioc3_driver_data *, unsigned int); +/* general ireg writer */ +extern void ioc3_write_ireg(struct ioc3_driver_data *idd, uint32_t value, int reg); + +#endif diff --git a/include/linux/ioc4.h b/include/linux/ioc4.h new file mode 100644 index 0000000000..51e2b9fb63 --- /dev/null +++ b/include/linux/ioc4.h @@ -0,0 +1,184 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. + */ + +#ifndef _LINUX_IOC4_H +#define _LINUX_IOC4_H + +#include + +/*************** + * Definitions * + ***************/ + +/* Miscellaneous values inherent to hardware */ + +#define IOC4_EXTINT_COUNT_DIVISOR 520 /* PCI clocks per COUNT tick */ + +/*********************************** + * Structures needed by subdrivers * + ***********************************/ + +/* This structure fully describes the IOC4 miscellaneous registers which + * appear at bar[0]+0x00000 through bar[0]+0x0005c. The corresponding + * PCI resource is managed by the main IOC4 driver because it contains + * registers of interest to many different IOC4 subdrivers. + */ +struct ioc4_misc_regs { + /* Miscellaneous IOC4 registers */ + union ioc4_pci_err_addr_l { + uint32_t raw; + struct { + uint32_t valid:1; /* Address captured */ + uint32_t master_id:4; /* Unit causing error + * 0/1: Serial port 0 TX/RX + * 2/3: Serial port 1 TX/RX + * 4/5: Serial port 2 TX/RX + * 6/7: Serial port 3 TX/RX + * 8: ATA/ATAPI + * 9-15: Undefined + */ + uint32_t mul_err:1; /* Multiple errors occurred */ + uint32_t addr:26; /* Bits 31-6 of error addr */ + } fields; + } pci_err_addr_l; + uint32_t pci_err_addr_h; /* Bits 63-32 of error addr */ + union ioc4_sio_int { + uint32_t raw; + struct { + uint8_t tx_mt:1; /* TX ring buffer empty */ + uint8_t rx_full:1; /* RX ring buffer full */ + uint8_t rx_high:1; /* RX high-water exceeded */ + uint8_t rx_timer:1; /* RX timer has triggered */ + uint8_t delta_dcd:1; /* DELTA_DCD seen */ + uint8_t delta_cts:1; /* DELTA_CTS seen */ + uint8_t intr_pass:1; /* Interrupt pass-through */ + uint8_t tx_explicit:1; /* TX, MCW, or delay complete */ + } fields[4]; + } sio_ir; /* Serial interrupt state */ + union ioc4_other_int { + uint32_t raw; + struct { + uint32_t ata_int:1; /* ATA port passthru */ + uint32_t ata_memerr:1; /* ATA halted by mem error */ + uint32_t memerr:4; /* Serial halted by mem err */ + uint32_t kbd_int:1; /* kbd/mouse intr asserted */ + uint32_t reserved:16; /* zero */ + uint32_t rt_int:1; /* INT_OUT section latch */ + uint32_t gen_int:8; /* Intr. from generic pins */ + } fields; + } other_ir; /* Other interrupt state */ + union ioc4_sio_int sio_ies; /* Serial interrupt enable set */ + union ioc4_other_int other_ies; /* Other interrupt enable set */ + union ioc4_sio_int sio_iec; /* Serial interrupt enable clear */ + union ioc4_other_int other_iec; /* Other interrupt enable clear */ + union ioc4_sio_cr { + uint32_t raw; + struct { + uint32_t cmd_pulse:4; /* Bytebus strobe width */ + uint32_t arb_diag:3; /* PCI bus requester */ + uint32_t sio_diag_idle:1; /* Active ser req? */ + uint32_t ata_diag_idle:1; /* Active ATA req? */ + uint32_t ata_diag_active:1; /* ATA req is winner */ + uint32_t reserved:22; /* zero */ + } fields; + } sio_cr; + uint32_t unused1; + union ioc4_int_out { + uint32_t raw; + struct { + uint32_t count:16; /* Period control */ + uint32_t mode:3; /* Output signal shape */ + uint32_t reserved:11; /* zero */ + uint32_t diag:1; /* Timebase control */ + uint32_t int_out:1; /* Current value */ + } fields; + } int_out; /* External interrupt output control */ + uint32_t unused2; + union ioc4_gpcr { + uint32_t raw; + struct { + uint32_t dir:8; /* Pin direction */ + uint32_t edge:8; /* Edge/level mode */ + uint32_t reserved1:4; /* zero */ + uint32_t int_out_en:1; /* INT_OUT enable */ + uint32_t reserved2:11; /* zero */ + } fields; + } gpcr_s; /* Generic PIO control set */ + union ioc4_gpcr gpcr_c; /* Generic PIO control clear */ + union ioc4_gpdr { + uint32_t raw; + struct { + uint32_t gen_pin:8; /* State of pins */ + uint32_t reserved:24; + } fields; + } gpdr; /* Generic PIO data */ + uint32_t unused3; + union ioc4_gppr { + uint32_t raw; + struct { + uint32_t gen_pin:1; /* Single pin state */ + uint32_t reserved:31; + } fields; + } gppr[8]; /* Generic PIO pins */ +}; + +/* Masks for GPCR DIR pins */ +#define IOC4_GPCR_DIR_0 0x01 /* External interrupt output */ +#define IOC4_GPCR_DIR_1 0x02 /* External interrupt input */ +#define IOC4_GPCR_DIR_2 0x04 +#define IOC4_GPCR_DIR_3 0x08 /* Keyboard/mouse presence */ +#define IOC4_GPCR_DIR_4 0x10 /* Ser. port 0 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_5 0x20 /* Ser. port 1 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_6 0x40 /* Ser. port 2 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_7 0x80 /* Ser. port 3 xcvr select (0=232, 1=422) */ + +/* Masks for GPCR EDGE pins */ +#define IOC4_GPCR_EDGE_0 0x01 +#define IOC4_GPCR_EDGE_1 0x02 /* External interrupt input */ +#define IOC4_GPCR_EDGE_2 0x04 +#define IOC4_GPCR_EDGE_3 0x08 +#define IOC4_GPCR_EDGE_4 0x10 +#define IOC4_GPCR_EDGE_5 0x20 +#define IOC4_GPCR_EDGE_6 0x40 +#define IOC4_GPCR_EDGE_7 0x80 + +#define IOC4_VARIANT_IO9 0x0900 +#define IOC4_VARIANT_PCI_RT 0x0901 +#define IOC4_VARIANT_IO10 0x1000 + +/* One of these per IOC4 */ +struct ioc4_driver_data { + struct list_head idd_list; + unsigned long idd_bar0; + struct pci_dev *idd_pdev; + const struct pci_device_id *idd_pci_id; + struct ioc4_misc_regs __iomem *idd_misc_regs; + unsigned long count_period; + void *idd_serial_data; + unsigned int idd_variant; +}; + +/* One per submodule */ +struct ioc4_submodule { + struct list_head is_list; + char *is_name; + struct module *is_owner; + int (*is_probe) (struct ioc4_driver_data *); + int (*is_remove) (struct ioc4_driver_data *); +}; + +#define IOC4_NUM_CARDS 8 /* max cards per partition */ + +/********************************** + * Functions needed by submodules * + **********************************/ + +extern int ioc4_register_submodule(struct ioc4_submodule *); +extern void ioc4_unregister_submodule(struct ioc4_submodule *); + +#endif /* _LINUX_IOC4_H */ diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 0a9dc40b7b..df38db2ef4 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef IOCONTEXT_H #define IOCONTEXT_H @@ -8,7 +7,6 @@ enum { ICQ_EXITED = 1 << 2, - ICQ_DESTROYED = 1 << 3, }; /* @@ -106,6 +104,12 @@ struct io_context { unsigned short ioprio; + /* + * For request batching + */ + int nr_batch_requests; /* Number of requests left in the batch */ + unsigned long last_waited; /* Time last woken after wait for request */ + struct radix_tree_root icq_tree; struct io_cq __rcu *icq_hint; struct hlist_head icq_list; diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 24f8489583..7892f55a18 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -1,133 +1,46 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_IOMAP_H #define LINUX_IOMAP_H 1 -#include -#include -#include -#include #include -#include -#include -struct address_space; struct fiemap_extent_info; struct inode; -struct iomap_dio; -struct iomap_writepage_ctx; struct iov_iter; struct kiocb; -struct page; struct vm_area_struct; struct vm_fault; /* * Types of block ranges for iomap mappings: */ -#define IOMAP_HOLE 0 /* no blocks allocated, need allocation */ -#define IOMAP_DELALLOC 1 /* delayed allocation blocks */ -#define IOMAP_MAPPED 2 /* blocks allocated at @addr */ -#define IOMAP_UNWRITTEN 3 /* blocks allocated at @addr in unwritten state */ -#define IOMAP_INLINE 4 /* data inline in the inode */ +#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */ +#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */ +#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */ +#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ /* - * Flags reported by the file system from iomap_begin: - * - * IOMAP_F_NEW indicates that the blocks have been newly allocated and need - * zeroing for areas that no data is copied to. - * - * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access - * written data and requires fdatasync to commit them to persistent storage. - * This needs to take into account metadata changes that *may* be made at IO - * completion, such as file size updates from direct IO. - * - * IOMAP_F_SHARED indicates that the blocks are shared, and will need to be - * unshared as part a write. - * - * IOMAP_F_MERGED indicates that the iomap contains the merge of multiple block - * mappings. - * - * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of - * buffer heads for this mapping. + * Flags for all iomap mappings: */ -#define IOMAP_F_NEW 0x01 -#define IOMAP_F_DIRTY 0x02 -#define IOMAP_F_SHARED 0x04 -#define IOMAP_F_MERGED 0x08 -#define IOMAP_F_BUFFER_HEAD 0x10 -#define IOMAP_F_ZONE_APPEND 0x20 +#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ /* - * Flags set by the core iomap code during operations: - * - * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size - * has changed as the result of this write operation. + * Flags that only need to be reported for IOMAP_REPORT requests: */ -#define IOMAP_F_SIZE_CHANGED 0x100 +#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */ +#define IOMAP_F_SHARED 0x20 /* block shared with another file */ /* - * Flags from 0x1000 up are for file system specific usage: + * Magic value for blkno: */ -#define IOMAP_F_PRIVATE 0x1000 - - -/* - * Magic value for addr: - */ -#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */ - -struct iomap_page_ops; +#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */ struct iomap { - u64 addr; /* disk offset of mapping, bytes */ + sector_t blkno; /* 1st sector of mapping, 512b units */ loff_t offset; /* file offset of mapping, bytes */ u64 length; /* length of mapping, bytes */ u16 type; /* type of mapping */ u16 flags; /* flags for mapping */ struct block_device *bdev; /* block device for I/O */ - struct dax_device *dax_dev; /* dax_dev for dax operations */ - void *inline_data; - void *private; /* filesystem private */ - const struct iomap_page_ops *page_ops; -}; - -static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos) -{ - return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; -} - -/* - * Returns the inline data pointer for logical offset @pos. - */ -static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos) -{ - return iomap->inline_data + pos - iomap->offset; -} - -/* - * Check if the mapping's length is within the valid range for inline data. - * This is used to guard against accessing data beyond the page inline_data - * points at. - */ -static inline bool iomap_inline_data_valid(const struct iomap *iomap) -{ - return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data); -} - -/* - * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare - * and page_done will be called for each page written to. This only applies to - * buffered writes as unbuffered writes will not typically have pages - * associated with them. - * - * When page_prepare succeeds, page_done will always be called to do any - * cleanup work necessary. In that page_done call, @page will be NULL if the - * associated page could not be obtained. - */ -struct iomap_page_ops { - int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len); - void (*page_done)(struct inode *inode, loff_t pos, unsigned copied, - struct page *page); }; /* @@ -136,11 +49,6 @@ struct iomap_page_ops { #define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */ #define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */ #define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */ -#define IOMAP_FAULT (1 << 3) /* mapping for page fault */ -#define IOMAP_DIRECT (1 << 4) /* direct I/O */ -#define IOMAP_NOWAIT (1 << 5) /* do not block */ -#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */ -#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */ struct iomap_ops { /* @@ -149,8 +57,7 @@ struct iomap_ops { * The actual length is returned in iomap->length. */ int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length, - unsigned flags, struct iomap *iomap, - struct iomap *srcmap); + unsigned flags, struct iomap *iomap); /* * Commit and/or unreserve space previous allocated using iomap_begin. @@ -162,192 +69,17 @@ struct iomap_ops { ssize_t written, unsigned flags, struct iomap *iomap); }; -/** - * struct iomap_iter - Iterate through a range of a file - * @inode: Set at the start of the iteration and should not change. - * @pos: The current file position we are operating on. It is updated by - * calls to iomap_iter(). Treat as read-only in the body. - * @len: The remaining length of the file segment we're operating on. - * It is updated at the same time as @pos. - * @processed: The number of bytes processed by the body in the most recent - * iteration, or a negative errno. 0 causes the iteration to stop. - * @flags: Zero or more of the iomap_begin flags above. - * @iomap: Map describing the I/O iteration - * @srcmap: Source map for COW operations - */ -struct iomap_iter { - struct inode *inode; - loff_t pos; - u64 len; - s64 processed; - unsigned flags; - struct iomap iomap; - struct iomap srcmap; -}; - -int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops); - -/** - * iomap_length - length of the current iomap iteration - * @iter: iteration structure - * - * Returns the length that the operation applies to for the current iteration. - */ -static inline u64 iomap_length(const struct iomap_iter *iter) -{ - u64 end = iter->iomap.offset + iter->iomap.length; - - if (iter->srcmap.type != IOMAP_HOLE) - end = min(end, iter->srcmap.offset + iter->srcmap.length); - return min(iter->len, end - iter->pos); -} - -/** - * iomap_iter_srcmap - return the source map for the current iomap iteration - * @i: iteration structure - * - * Write operations on file systems with reflink support might require a - * source and a destination map. This function retourns the source map - * for a given operation, which may or may no be identical to the destination - * map in &i->iomap. - */ -static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i) -{ - if (i->srcmap.type != IOMAP_HOLE) - return &i->srcmap; - return &i->iomap; -} - ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, - const struct iomap_ops *ops); -int iomap_readpage(struct page *page, const struct iomap_ops *ops); -void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); -int iomap_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count); -int iomap_releasepage(struct page *page, gfp_t gfp_mask); -void iomap_invalidatepage(struct page *page, unsigned int offset, - unsigned int len); -#ifdef CONFIG_MIGRATION -int iomap_migrate_page(struct address_space *mapping, struct page *newpage, - struct page *page, enum migrate_mode mode); -#else -#define iomap_migrate_page NULL -#endif -int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, - const struct iomap_ops *ops); + struct iomap_ops *ops); +int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, + struct iomap_ops *ops); int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, - bool *did_zero, const struct iomap_ops *ops); + bool *did_zero, struct iomap_ops *ops); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, - const struct iomap_ops *ops); -vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, - const struct iomap_ops *ops); + struct iomap_ops *ops); +int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, + struct iomap_ops *ops); int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, - u64 start, u64 len, const struct iomap_ops *ops); -loff_t iomap_seek_hole(struct inode *inode, loff_t offset, - const struct iomap_ops *ops); -loff_t iomap_seek_data(struct inode *inode, loff_t offset, - const struct iomap_ops *ops); -sector_t iomap_bmap(struct address_space *mapping, sector_t bno, - const struct iomap_ops *ops); - -/* - * Structure for writeback I/O completions. - */ -struct iomap_ioend { - struct list_head io_list; /* next ioend in chain */ - u16 io_type; - u16 io_flags; /* IOMAP_F_* */ - struct inode *io_inode; /* file being written to */ - size_t io_size; /* size of the extent */ - loff_t io_offset; /* offset in the file */ - struct bio *io_bio; /* bio being built */ - struct bio io_inline_bio; /* MUST BE LAST! */ -}; - -struct iomap_writeback_ops { - /* - * Required, maps the blocks so that writeback can be performed on - * the range starting at offset. - */ - int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode, - loff_t offset); - - /* - * Optional, allows the file systems to perform actions just before - * submitting the bio and/or override the bio end_io handler for complex - * operations like copy on write extent manipulation or unwritten extent - * conversions. - */ - int (*prepare_ioend)(struct iomap_ioend *ioend, int status); - - /* - * Optional, allows the file system to discard state on a page where - * we failed to submit any I/O. - */ - void (*discard_page)(struct page *page, loff_t fileoff); -}; - -struct iomap_writepage_ctx { - struct iomap iomap; - struct iomap_ioend *ioend; - const struct iomap_writeback_ops *ops; -}; - -void iomap_finish_ioends(struct iomap_ioend *ioend, int error); -void iomap_ioend_try_merge(struct iomap_ioend *ioend, - struct list_head *more_ioends); -void iomap_sort_ioends(struct list_head *ioend_list); -int iomap_writepage(struct page *page, struct writeback_control *wbc, - struct iomap_writepage_ctx *wpc, - const struct iomap_writeback_ops *ops); -int iomap_writepages(struct address_space *mapping, - struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, - const struct iomap_writeback_ops *ops); - -/* - * Flags for direct I/O ->end_io: - */ -#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */ -#define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */ - -struct iomap_dio_ops { - int (*end_io)(struct kiocb *iocb, ssize_t size, int error, - unsigned flags); - blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio, - loff_t file_offset); -}; - -/* - * Wait for the I/O to complete in iomap_dio_rw even if the kiocb is not - * synchronous. - */ -#define IOMAP_DIO_FORCE_WAIT (1 << 0) - -/* - * Do not allocate blocks or zero partial blocks, but instead fall back to - * the caller by returning -EAGAIN. Used to optimize direct I/O writes that - * are not aligned to the file system block size. - */ -#define IOMAP_DIO_OVERWRITE_ONLY (1 << 1) - -ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, - const struct iomap_ops *ops, const struct iomap_dio_ops *dops, - unsigned int dio_flags); -struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, - const struct iomap_ops *ops, const struct iomap_dio_ops *dops, - unsigned int dio_flags); -ssize_t iomap_dio_complete(struct iomap_dio *dio); -int iomap_dio_iopoll(struct kiocb *kiocb, bool spin); - -#ifdef CONFIG_SWAP -struct file; -struct swap_info_struct; - -int iomap_swapfile_activate(struct swap_info_struct *sis, - struct file *swap_file, sector_t *pagespan, - const struct iomap_ops *ops); -#else -# define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO) -#endif /* CONFIG_SWAP */ + loff_t start, loff_t len, struct iomap_ops *ops); #endif /* LINUX_IOMAP_H */ diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h new file mode 100644 index 0000000000..376a27c9cc --- /dev/null +++ b/include/linux/iommu-common.h @@ -0,0 +1,52 @@ +#ifndef _LINUX_IOMMU_COMMON_H +#define _LINUX_IOMMU_COMMON_H + +#include +#include +#include + +#define IOMMU_POOL_HASHBITS 4 +#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) +#define IOMMU_ERROR_CODE (~(unsigned long) 0) + +struct iommu_pool { + unsigned long start; + unsigned long end; + unsigned long hint; + spinlock_t lock; +}; + +struct iommu_map_table { + unsigned long table_map_base; + unsigned long table_shift; + unsigned long nr_pools; + void (*lazy_flush)(struct iommu_map_table *); + unsigned long poolsize; + struct iommu_pool pools[IOMMU_NR_POOLS]; + u32 flags; +#define IOMMU_HAS_LARGE_POOL 0x00000001 +#define IOMMU_NO_SPAN_BOUND 0x00000002 +#define IOMMU_NEED_FLUSH 0x00000004 + struct iommu_pool large_pool; + unsigned long *map; +}; + +extern void iommu_tbl_pool_init(struct iommu_map_table *iommu, + unsigned long num_entries, + u32 table_shift, + void (*lazy_flush)(struct iommu_map_table *), + bool large_pool, u32 npools, + bool skip_span_boundary_check); + +extern unsigned long iommu_tbl_range_alloc(struct device *dev, + struct iommu_map_table *iommu, + unsigned long npages, + unsigned long *handle, + unsigned long mask, + unsigned int align_order); + +extern void iommu_tbl_range_free(struct iommu_map_table *iommu, + u64 dma_addr, unsigned long npages, + unsigned long entry); + +#endif diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h index 74be34f3a2..86bdeffe43 100644 --- a/include/linux/iommu-helper.h +++ b/include/linux/iommu-helper.h @@ -1,11 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IOMMU_HELPER_H #define _LINUX_IOMMU_HELPER_H -#include -#include -#include -#include +#include static inline unsigned long iommu_device_max_index(unsigned long size, unsigned long offset, @@ -17,15 +13,9 @@ static inline unsigned long iommu_device_max_index(unsigned long size, return size; } -static inline int iommu_is_span_boundary(unsigned int index, unsigned int nr, - unsigned long shift, unsigned long boundary_size) -{ - BUG_ON(!is_power_of_2(boundary_size)); - - shift = (shift + index) & (boundary_size - 1); - return shift + nr > boundary_size; -} - +extern int iommu_is_span_boundary(unsigned int index, unsigned int nr, + unsigned long shift, + unsigned long boundary_size); extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, unsigned long shift, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index d2f3435e7d..a6796a4fe3 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -1,36 +1,36 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. * Author: Joerg Roedel + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_IOMMU_H #define __LINUX_IOMMU_H -#include -#include -#include #include #include #include -#include -#include +#include +#include +#include #define IOMMU_READ (1 << 0) #define IOMMU_WRITE (1 << 1) #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ #define IOMMU_NOEXEC (1 << 3) #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ -/* - * Where the bus hardware includes a privilege level as part of its access type - * markings, and certain devices are capable of issuing transactions marked as - * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other - * given permission flags only apply to accesses at the higher privilege level, - * and that unprivileged transactions should have as little access as possible. - * This would usually imply the same permissions as kernel mappings on the CPU, - * if the IOMMU page table format is equivalent. - */ -#define IOMMU_PRIV (1 << 5) struct iommu_ops; struct iommu_group; @@ -38,9 +38,6 @@ struct bus_type; struct device; struct iommu_domain; struct notifier_block; -struct iommu_sva; -struct iommu_fault_event; -struct iommu_dma_cookie; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 @@ -48,7 +45,6 @@ struct iommu_dma_cookie; typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); -typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); struct iommu_domain_geometry { dma_addr_t aperture_start; /* First address that can be mapped */ @@ -61,7 +57,6 @@ struct iommu_domain_geometry { #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API implementation */ #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ -#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ /* * This are the possible domain-types @@ -74,17 +69,12 @@ struct iommu_domain_geometry { * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. * This flag allows IOMMU drivers to implement * certain optimizations for these domains - * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB - * invalidation. */ #define IOMMU_DOMAIN_BLOCKED (0U) #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ __IOMMU_DOMAIN_DMA_API) -#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ - __IOMMU_DOMAIN_DMA_API | \ - __IOMMU_DOMAIN_DMA_FQ) struct iommu_domain { unsigned type; @@ -93,14 +83,9 @@ struct iommu_domain { iommu_fault_handler_t handler; void *handler_token; struct iommu_domain_geometry geometry; - struct iommu_dma_cookie *iova_cookie; + void *iova_cookie; }; -static inline bool iommu_is_dma_domain(struct iommu_domain *domain) -{ - return domain->type & __IOMMU_DOMAIN_DMA_API; -} - enum iommu_cap { IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA transactions */ @@ -108,88 +93,46 @@ enum iommu_cap { IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ }; -/* These are the possible reserved region types */ -enum iommu_resv_type { - /* Memory regions which must be mapped 1:1 at all times */ - IOMMU_RESV_DIRECT, - /* - * Memory regions which are advertised to be 1:1 but are - * commonly considered relaxable in some conditions, - * for instance in device assignment use case (USB, Graphics) - */ - IOMMU_RESV_DIRECT_RELAXABLE, - /* Arbitrary "never map this or give it to a device" address ranges */ - IOMMU_RESV_RESERVED, - /* Hardware MSI region (untranslated) */ - IOMMU_RESV_MSI, - /* Software-managed MSI translation window */ - IOMMU_RESV_SW_MSI, +/* + * Following constraints are specifc to FSL_PAMUV1: + * -aperture must be power of 2, and naturally aligned + * -number of windows must be power of 2, and address space size + * of each window is determined by aperture size / # of windows + * -the actual size of the mapped region of a window must be power + * of 2 starting with 4KB and physical address must be naturally + * aligned. + * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. + * The caller can invoke iommu_domain_get_attr to check if the underlying + * iommu implementation supports these constraints. + */ + +enum iommu_attr { + DOMAIN_ATTR_GEOMETRY, + DOMAIN_ATTR_PAGING, + DOMAIN_ATTR_WINDOWS, + DOMAIN_ATTR_FSL_PAMU_STASH, + DOMAIN_ATTR_FSL_PAMU_ENABLE, + DOMAIN_ATTR_FSL_PAMUV1, + DOMAIN_ATTR_NESTING, /* two stages of translation */ + DOMAIN_ATTR_MAX, }; /** - * struct iommu_resv_region - descriptor for a reserved memory region + * struct iommu_dm_region - descriptor for a direct mapped memory region * @list: Linked list pointers * @start: System physical start address of the region * @length: Length of the region in bytes * @prot: IOMMU Protection flags (READ/WRITE/...) - * @type: Type of the reserved region */ -struct iommu_resv_region { +struct iommu_dm_region { struct list_head list; phys_addr_t start; size_t length; int prot; - enum iommu_resv_type type; }; -/** - * enum iommu_dev_features - Per device IOMMU features - * @IOMMU_DEV_FEAT_AUX: Auxiliary domain feature - * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses - * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally - * enabling %IOMMU_DEV_FEAT_SVA requires - * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page - * Faults themselves instead of relying on the IOMMU. When - * supported, this feature must be enabled before and - * disabled after %IOMMU_DEV_FEAT_SVA. - * - * Device drivers query whether a feature is supported using - * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature(). - */ -enum iommu_dev_features { - IOMMU_DEV_FEAT_AUX, - IOMMU_DEV_FEAT_SVA, - IOMMU_DEV_FEAT_IOPF, -}; - -#define IOMMU_PASID_INVALID (-1U) - #ifdef CONFIG_IOMMU_API -/** - * struct iommu_iotlb_gather - Range information for a pending IOTLB flush - * - * @start: IOVA representing the start of the range to be flushed - * @end: IOVA representing the end of the range to be flushed (inclusive) - * @pgsize: The interval at which to perform the flush - * @freelist: Removed pages to free after sync - * @queued: Indicates that the flush will be queued - * - * This structure is intended to be updated by multiple calls to the - * ->unmap() function in struct iommu_ops before eventually being passed - * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after - * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to - * them. @queued is set to indicate when ->iotlb_flush_all() will be called - * later instead of ->iotlb_sync(), so drivers may optimise accordingly. - */ -struct iommu_iotlb_gather { - unsigned long start; - unsigned long end; - size_t pgsize; - struct page *freelist; - bool queued; -}; - /** * struct iommu_ops - iommu ops and capabilities * @capable: check capability @@ -198,46 +141,24 @@ struct iommu_iotlb_gather { * @attach_dev: attach device to an iommu domain * @detach_dev: detach device from an iommu domain * @map: map a physically contiguous memory region to an iommu domain - * @map_pages: map a physically contiguous set of pages of the same size to - * an iommu domain. * @unmap: unmap a physically contiguous memory region from an iommu domain - * @unmap_pages: unmap a number of pages of the same size from an iommu domain - * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain - * @iotlb_sync_map: Sync mappings created recently using @map to the hardware - * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush - * queue + * @map_sg: map a scatter-gather list of physically contiguous memory chunks + * to an iommu domain * @iova_to_phys: translate iova to physical address - * @probe_device: Add device to iommu driver handling - * @release_device: Remove device from iommu driver handling - * @probe_finalize: Do final setup work after the device is added to an IOMMU - * group and attached to the groups domain + * @add_device: add device to iommu grouping + * @remove_device: remove device from iommu grouping * @device_group: find iommu group for a particular device - * @enable_nesting: Enable nesting - * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) - * @get_resv_regions: Request list of reserved regions for a device - * @put_resv_regions: Free list of reserved regions for a device - * @apply_resv_region: Temporary helper call-back for iova reserved ranges + * @domain_get_attr: Query domain attributes + * @domain_set_attr: Change domain attributes + * @get_dm_regions: Request list of direct mapping requirements for a device + * @put_dm_regions: Free list of direct mapping requirements for a device + * @apply_dm_region: Temporary helper call-back for iova reserved ranges + * @domain_window_enable: Configure and enable a particular window for a domain + * @domain_window_disable: Disable a particular window for a domain + * @domain_set_windows: Set the number of windows for a domain + * @domain_get_windows: Return the number of windows for a domain * @of_xlate: add OF master IDs to iommu grouping - * @is_attach_deferred: Check if domain attach should be deferred from iommu - * driver init to device driver init (default no) - * @dev_has/enable/disable_feat: per device entries to check/enable/disable - * iommu specific features. - * @dev_feat_enabled: check enabled feature - * @aux_attach/detach_dev: aux-domain specific attach/detach entries. - * @aux_get_pasid: get the pasid given an aux-domain - * @sva_bind: Bind process address space to device - * @sva_unbind: Unbind process address space from device - * @sva_get_pasid: Get PASID associated to a SVA handle - * @page_response: handle page request response - * @cache_invalidate: invalidate translation caches - * @sva_bind_gpasid: bind guest pasid and mm - * @sva_unbind_gpasid: unbind guest pasid and mm - * @def_domain_type: device default domain type, return value: - * - IOMMU_DOMAIN_IDENTITY: must use an identity domain - * - IOMMU_DOMAIN_DMA: must use a dma domain - * - 0: use the default setting * @pgsize_bitmap: bitmap of all possible supported page sizes - * @owner: Driver module providing these ops */ struct iommu_ops { bool (*capable)(enum iommu_cap); @@ -249,158 +170,39 @@ struct iommu_ops { int (*attach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev); int (*map)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp); - int (*map_pages)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t pgsize, size_t pgcount, - int prot, gfp_t gfp, size_t *mapped); + phys_addr_t paddr, size_t size, int prot); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *iotlb_gather); - size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, - size_t pgsize, size_t pgcount, - struct iommu_iotlb_gather *iotlb_gather); - void (*flush_iotlb_all)(struct iommu_domain *domain); - void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, - size_t size); - void (*iotlb_sync)(struct iommu_domain *domain, - struct iommu_iotlb_gather *iotlb_gather); + size_t size); + size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); - struct iommu_device *(*probe_device)(struct device *dev); - void (*release_device)(struct device *dev); - void (*probe_finalize)(struct device *dev); + int (*add_device)(struct device *dev); + void (*remove_device)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); - int (*enable_nesting)(struct iommu_domain *domain); - int (*set_pgtable_quirks)(struct iommu_domain *domain, - unsigned long quirks); + int (*domain_get_attr)(struct iommu_domain *domain, + enum iommu_attr attr, void *data); + int (*domain_set_attr)(struct iommu_domain *domain, + enum iommu_attr attr, void *data); - /* Request/Free a list of reserved regions for a device */ - void (*get_resv_regions)(struct device *dev, struct list_head *list); - void (*put_resv_regions)(struct device *dev, struct list_head *list); - void (*apply_resv_region)(struct device *dev, - struct iommu_domain *domain, - struct iommu_resv_region *region); + /* Request/Free a list of direct mapping requirements for a device */ + void (*get_dm_regions)(struct device *dev, struct list_head *list); + void (*put_dm_regions)(struct device *dev, struct list_head *list); + void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain, + struct iommu_dm_region *region); + + /* Window handling functions */ + int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, + phys_addr_t paddr, u64 size, int prot); + void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); + /* Set the number of windows per domain */ + int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count); + /* Get the number of windows per domain */ + u32 (*domain_get_windows)(struct iommu_domain *domain); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); - bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); - - /* Per device IOMMU features */ - bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); - bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f); - int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); - int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); - - /* Aux-domain specific attach/detach entries */ - int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev); - void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); - int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); - - struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, - void *drvdata); - void (*sva_unbind)(struct iommu_sva *handle); - u32 (*sva_get_pasid)(struct iommu_sva *handle); - - int (*page_response)(struct device *dev, - struct iommu_fault_event *evt, - struct iommu_page_response *msg); - int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev, - struct iommu_cache_invalidate_info *inv_info); - int (*sva_bind_gpasid)(struct iommu_domain *domain, - struct device *dev, struct iommu_gpasid_bind_data *data); - - int (*sva_unbind_gpasid)(struct device *dev, u32 pasid); - - int (*def_domain_type)(struct device *dev); unsigned long pgsize_bitmap; - struct module *owner; -}; - -/** - * struct iommu_device - IOMMU core representation of one IOMMU hardware - * instance - * @list: Used by the iommu-core to keep a list of registered iommus - * @ops: iommu-ops for talking to this iommu - * @dev: struct device for sysfs handling - */ -struct iommu_device { - struct list_head list; - const struct iommu_ops *ops; - struct fwnode_handle *fwnode; - struct device *dev; -}; - -/** - * struct iommu_fault_event - Generic fault event - * - * Can represent recoverable faults such as a page requests or - * unrecoverable faults such as DMA or IRQ remapping faults. - * - * @fault: fault descriptor - * @list: pending fault event list, used for tracking responses - */ -struct iommu_fault_event { - struct iommu_fault fault; - struct list_head list; -}; - -/** - * struct iommu_fault_param - per-device IOMMU fault data - * @handler: Callback function to handle IOMMU faults at device level - * @data: handler private data - * @faults: holds the pending faults which needs response - * @lock: protect pending faults list - */ -struct iommu_fault_param { - iommu_dev_fault_handler_t handler; - void *data; - struct list_head faults; - struct mutex lock; -}; - -/** - * struct dev_iommu - Collection of per-device IOMMU data - * - * @fault_param: IOMMU detected device fault reporting data - * @iopf_param: I/O Page Fault queue and data - * @fwspec: IOMMU fwspec data - * @iommu_dev: IOMMU device this device is linked to - * @priv: IOMMU Driver private data - * - * TODO: migrate other per device data pointers under iommu_dev_data, e.g. - * struct iommu_group *iommu_group; - */ -struct dev_iommu { - struct mutex lock; - struct iommu_fault_param *fault_param; - struct iopf_device_param *iopf_param; - struct iommu_fwspec *fwspec; - struct iommu_device *iommu_dev; - void *priv; -}; - -int iommu_device_register(struct iommu_device *iommu, - const struct iommu_ops *ops, - struct device *hwdev); -void iommu_device_unregister(struct iommu_device *iommu); -int iommu_device_sysfs_add(struct iommu_device *iommu, - struct device *parent, - const struct attribute_group **groups, - const char *fmt, ...) __printf(4, 5); -void iommu_device_sysfs_remove(struct iommu_device *iommu); -int iommu_device_link(struct iommu_device *iommu, struct device *link); -void iommu_device_unlink(struct iommu_device *iommu, struct device *link); -int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); - -static inline struct iommu_device *dev_to_iommu_device(struct device *dev) -{ - return (struct iommu_device *)dev_get_drvdata(dev); -} - -static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) -{ - *gather = (struct iommu_iotlb_gather) { - .start = ULONG_MAX, - }; -} +} __do_const; #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ @@ -410,7 +212,6 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); -extern int bus_iommu_probe(struct bus_type *bus); extern bool iommu_present(struct bus_type *bus); extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); @@ -420,48 +221,21 @@ extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); -extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain, - struct device *dev, - void __user *uinfo); - -extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata); -extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata); -extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); -extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); extern int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); -extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size); -extern size_t iommu_unmap_fast(struct iommu_domain *domain, - unsigned long iova, size_t size, - struct iommu_iotlb_gather *iotlb_gather); -extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg, unsigned int nents, int prot); -extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, - unsigned long iova, struct scatterlist *sg, - unsigned int nents, int prot); + size_t size); +extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg,unsigned int nents, + int prot); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); -extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); -extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); -extern void generic_iommu_put_resv_regions(struct device *dev, - struct list_head *list); -extern void iommu_set_default_passthrough(bool cmd_line); -extern void iommu_set_default_translated(bool cmd_line); -extern bool iommu_default_passthrough(void); -extern struct iommu_resv_region * -iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, - enum iommu_resv_type type); -extern int iommu_get_group_resv_regions(struct iommu_group *group, - struct list_head *head); +extern void iommu_get_dm_regions(struct device *dev, struct list_head *list); +extern void iommu_put_dm_regions(struct device *dev, struct list_head *list); +extern int iommu_request_dm_for_dev(struct device *dev); extern int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); @@ -479,216 +253,110 @@ extern void iommu_group_remove_device(struct device *dev); extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)); extern struct iommu_group *iommu_group_get(struct device *dev); -extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); extern void iommu_group_put(struct iommu_group *group); extern int iommu_group_register_notifier(struct iommu_group *group, struct notifier_block *nb); extern int iommu_group_unregister_notifier(struct iommu_group *group, struct notifier_block *nb); -extern int iommu_register_device_fault_handler(struct device *dev, - iommu_dev_fault_handler_t handler, - void *data); - -extern int iommu_unregister_device_fault_handler(struct device *dev); - -extern int iommu_report_device_fault(struct device *dev, - struct iommu_fault_event *evt); -extern int iommu_page_response(struct device *dev, - struct iommu_page_response *msg); - extern int iommu_group_id(struct iommu_group *group); +extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); -int iommu_enable_nesting(struct iommu_domain *domain); -int iommu_set_pgtable_quirks(struct iommu_domain *domain, - unsigned long quirks); - -void iommu_set_dma_strict(void); - -extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, - unsigned long iova, int flags); - -static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) -{ - if (domain->ops->flush_iotlb_all) - domain->ops->flush_iotlb_all(domain); -} - -static inline void iommu_iotlb_sync(struct iommu_domain *domain, - struct iommu_iotlb_gather *iotlb_gather) -{ - if (domain->ops->iotlb_sync) - domain->ops->iotlb_sync(domain, iotlb_gather); - - iommu_iotlb_gather_init(iotlb_gather); -} +extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, + void *data); +extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, + void *data); +struct device *iommu_device_create(struct device *parent, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...) __printf(4, 5); +void iommu_device_destroy(struct device *dev); +int iommu_device_link(struct device *dev, struct device *link); +void iommu_device_unlink(struct device *dev, struct device *link); +/* Window handling function prototypes */ +extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, + phys_addr_t offset, u64 size, + int prot); +extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); /** - * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint + * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework + * @domain: the iommu domain where the fault has happened + * @dev: the device where the fault has happened + * @iova: the faulting address + * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) * - * @gather: TLB gather data - * @iova: start of page to invalidate - * @size: size of page to invalidate + * This function should be called by the low-level IOMMU implementations + * whenever IOMMU faults happen, to allow high-level users, that are + * interested in such events, to know about them. * - * Helper for IOMMU drivers to check whether a new range and the gathered range - * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better - * than merging the two, which might lead to unnecessary invalidations. + * This event may be useful for several possible use cases: + * - mere logging of the event + * - dynamic TLB/PTE loading + * - if restarting of the faulting device is required + * + * Returns 0 on success and an appropriate error code otherwise (if dynamic + * PTE/TLB loading will one day be supported, implementations will be able + * to tell whether it succeeded or not according to this return value). + * + * Specifically, -ENOSYS is returned if a fault handler isn't installed + * (though fault handlers can also return -ENOSYS, in case they want to + * elicit the default behavior of the IOMMU drivers). */ -static inline -bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, - unsigned long iova, size_t size) +static inline int report_iommu_fault(struct iommu_domain *domain, + struct device *dev, unsigned long iova, int flags) { - unsigned long start = iova, end = start + size - 1; + int ret = -ENOSYS; - return gather->end != 0 && - (end + 1 < gather->start || start > gather->end + 1); -} - - -/** - * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation - * @gather: TLB gather data - * @iova: start of page to invalidate - * @size: size of page to invalidate - * - * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands - * where only the address range matters, and simply minimising intermediate - * syncs is preferred. - */ -static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, - unsigned long iova, size_t size) -{ - unsigned long end = iova + size - 1; - - if (gather->start > iova) - gather->start = iova; - if (gather->end < end) - gather->end = end; -} - -/** - * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation - * @domain: IOMMU domain to be invalidated - * @gather: TLB gather data - * @iova: start of page to invalidate - * @size: size of page to invalidate - * - * Helper for IOMMU drivers to build invalidation commands based on individual - * pages, or with page size/table level hints which cannot be gathered if they - * differ. - */ -static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, - struct iommu_iotlb_gather *gather, - unsigned long iova, size_t size) -{ /* - * If the new page is disjoint from the current range or is mapped at - * a different granularity, then sync the TLB so that the gather - * structure can be rewritten. + * if upper layers showed interest and installed a fault handler, + * invoke it. */ - if ((gather->pgsize && gather->pgsize != size) || - iommu_iotlb_gather_is_disjoint(gather, iova, size)) - iommu_iotlb_sync(domain, gather); + if (domain->handler) + ret = domain->handler(domain, dev, iova, flags, + domain->handler_token); - gather->pgsize = size; - iommu_iotlb_gather_add_range(gather, iova, size); + trace_io_page_fault(dev, iova, flags); + return ret; } -static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) +static inline size_t iommu_map_sg(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot) { - return gather && gather->queued; + return domain->ops->map_sg(domain, iova, sg, nents, prot); } /* PCI device grouping function */ extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ extern struct iommu_group *generic_device_group(struct device *dev); -/* FSL-MC device grouping function */ -struct iommu_group *fsl_mc_device_group(struct device *dev); /** * struct iommu_fwspec - per-device IOMMU instance data * @ops: ops for this device's IOMMU * @iommu_fwnode: firmware handle for this device's IOMMU - * @flags: IOMMU_FWSPEC_* flags + * @iommu_priv: IOMMU driver private data for this device * @num_ids: number of associated device IDs * @ids: IDs which this device may present to the IOMMU */ struct iommu_fwspec { const struct iommu_ops *ops; struct fwnode_handle *iommu_fwnode; - u32 flags; + void *iommu_priv; unsigned int num_ids; - u32 ids[]; -}; - -/* ATS is supported */ -#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) - -/** - * struct iommu_sva - handle to a device-mm bond - */ -struct iommu_sva { - struct device *dev; + u32 ids[1]; }; int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops); void iommu_fwspec_free(struct device *dev); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); -const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); - -static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) -{ - if (dev->iommu) - return dev->iommu->fwspec; - else - return NULL; -} - -static inline void dev_iommu_fwspec_set(struct device *dev, - struct iommu_fwspec *fwspec) -{ - dev->iommu->fwspec = fwspec; -} - -static inline void *dev_iommu_priv_get(struct device *dev) -{ - if (dev->iommu) - return dev->iommu->priv; - else - return NULL; -} - -static inline void dev_iommu_priv_set(struct device *dev, void *priv) -{ - dev->iommu->priv = priv; -} - -int iommu_probe_device(struct device *dev); -void iommu_release_device(struct device *dev); - -int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); -int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); -bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); -int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev); -void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev); -int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev); - -struct iommu_sva *iommu_sva_bind_device(struct device *dev, - struct mm_struct *mm, - void *drvdata); -void iommu_sva_unbind_device(struct iommu_sva *handle); -u32 iommu_sva_get_pasid(struct iommu_sva *handle); #else /* CONFIG_IOMMU_API */ struct iommu_ops {}; struct iommu_group {}; struct iommu_fwspec {}; -struct iommu_device {}; -struct iommu_fault_param {}; -struct iommu_iotlb_gather {}; static inline bool iommu_present(struct bus_type *bus) { @@ -731,51 +399,33 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) } static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, int gfp_order, int prot) { return -ENODEV; } -static inline int iommu_map_atomic(struct iommu_domain *domain, - unsigned long iova, phys_addr_t paddr, - size_t size, int prot) +static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, + int gfp_order) { return -ENODEV; } -static inline size_t iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) -{ - return 0; -} - -static inline size_t iommu_unmap_fast(struct iommu_domain *domain, - unsigned long iova, int gfp_order, - struct iommu_iotlb_gather *iotlb_gather) -{ - return 0; -} - -static inline ssize_t iommu_map_sg(struct iommu_domain *domain, - unsigned long iova, struct scatterlist *sg, - unsigned int nents, int prot) -{ - return -ENODEV; -} - -static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, +static inline size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { return -ENODEV; } -static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) +static inline int iommu_domain_window_enable(struct iommu_domain *domain, + u32 wnd_nr, phys_addr_t paddr, + u64 size, int prot) { + return -ENODEV; } -static inline void iommu_iotlb_sync(struct iommu_domain *domain, - struct iommu_iotlb_gather *iotlb_gather) +static inline void iommu_domain_window_disable(struct iommu_domain *domain, + u32 wnd_nr) { } @@ -789,35 +439,21 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain, { } -static inline void iommu_get_resv_regions(struct device *dev, +static inline void iommu_get_dm_regions(struct device *dev, struct list_head *list) { } -static inline void iommu_put_resv_regions(struct device *dev, +static inline void iommu_put_dm_regions(struct device *dev, struct list_head *list) { } -static inline int iommu_get_group_resv_regions(struct iommu_group *group, - struct list_head *head) +static inline int iommu_request_dm_for_dev(struct device *dev) { return -ENODEV; } -static inline void iommu_set_default_passthrough(bool cmd_line) -{ -} - -static inline void iommu_set_default_translated(bool cmd_line) -{ -} - -static inline bool iommu_default_passthrough(void) -{ - return true; -} - static inline int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { @@ -889,82 +525,32 @@ static inline int iommu_group_unregister_notifier(struct iommu_group *group, return 0; } -static inline -int iommu_register_device_fault_handler(struct device *dev, - iommu_dev_fault_handler_t handler, - void *data) -{ - return -ENODEV; -} - -static inline int iommu_unregister_device_fault_handler(struct device *dev) -{ - return 0; -} - -static inline -int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) -{ - return -ENODEV; -} - -static inline int iommu_page_response(struct device *dev, - struct iommu_page_response *msg) -{ - return -ENODEV; -} - static inline int iommu_group_id(struct iommu_group *group) { return -ENODEV; } -static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, - unsigned long quirks) +static inline int iommu_domain_get_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) { - return 0; + return -EINVAL; } -static inline int iommu_device_register(struct iommu_device *iommu, - const struct iommu_ops *ops, - struct device *hwdev) +static inline int iommu_domain_set_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) { - return -ENODEV; + return -EINVAL; } -static inline struct iommu_device *dev_to_iommu_device(struct device *dev) +static inline struct device *iommu_device_create(struct device *parent, + void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...) { - return NULL; + return ERR_PTR(-ENODEV); } -static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) -{ -} - -static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, - struct iommu_iotlb_gather *gather, - unsigned long iova, size_t size) -{ -} - -static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) -{ - return false; -} - -static inline void iommu_device_unregister(struct iommu_device *iommu) -{ -} - -static inline int iommu_device_sysfs_add(struct iommu_device *iommu, - struct device *parent, - const struct attribute_group **groups, - const char *fmt, ...) -{ - return -ENODEV; -} - -static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) +static inline void iommu_device_destroy(struct device *dev) { } @@ -994,116 +580,6 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, return -ENODEV; } -static inline -const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) -{ - return NULL; -} - -static inline bool -iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) -{ - return false; -} - -static inline int -iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) -{ - return -ENODEV; -} - -static inline int -iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) -{ - return -ENODEV; -} - -static inline int -iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) -{ - return -ENODEV; -} - -static inline void -iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) -{ -} - -static inline int -iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) -{ - return -ENODEV; -} - -static inline struct iommu_sva * -iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) -{ - return NULL; -} - -static inline void iommu_sva_unbind_device(struct iommu_sva *handle) -{ -} - -static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) -{ - return IOMMU_PASID_INVALID; -} - -static inline int -iommu_uapi_cache_invalidate(struct iommu_domain *domain, - struct device *dev, - struct iommu_cache_invalidate_info *inv_info) -{ - return -ENODEV; -} - -static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata) -{ - return -ENODEV; -} - -static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata) -{ - return -ENODEV; -} - -static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, - ioasid_t pasid) -{ - return -ENODEV; -} - -static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) -{ - return NULL; -} #endif /* CONFIG_IOMMU_API */ -/** - * iommu_map_sgtable - Map the given buffer to the IOMMU domain - * @domain: The IOMMU domain to perform the mapping - * @iova: The start address to map the buffer - * @sgt: The sg_table object describing the buffer - * @prot: IOMMU protection bits - * - * Creates a mapping at @iova for the buffer described by a scatterlist - * stored in the given sg_table object in the provided IOMMU domain. - */ -static inline size_t iommu_map_sgtable(struct iommu_domain *domain, - unsigned long iova, struct sg_table *sgt, int prot) -{ - return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); -} - -#ifdef CONFIG_IOMMU_DEBUGFS -extern struct dentry *iommu_debugfs_dir; -void iommu_debugfs_setup(void); -#else -static inline void iommu_debugfs_setup(void) {} -#endif - #endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 2c8860e406..1c30014ed1 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -1,6 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef _LINUX_IOPOLL_H @@ -8,97 +17,11 @@ #include #include -#include +#include #include #include #include -/** - * read_poll_timeout - Periodically poll an address until a condition is - * met or a timeout occurs - * @op: accessor function (takes @args as its arguments) - * @val: Variable to read the value into - * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). - * @timeout_us: Timeout in us, 0 means never timeout - * @sleep_before_read: if it is true, sleep @sleep_us before read. - * @args: arguments for @op poll - * - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @args is stored in @val. Must not - * be called from atomic context if sleep_us or timeout_us are used. - * - * When available, you'll probably want to use one of the specialized - * macros defined below rather than this macro directly. - */ -#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ - sleep_before_read, args...) \ -({ \ - u64 __timeout_us = (timeout_us); \ - unsigned long __sleep_us = (sleep_us); \ - ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ - might_sleep_if((__sleep_us) != 0); \ - if (sleep_before_read && __sleep_us) \ - usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ - for (;;) { \ - (val) = op(args); \ - if (cond) \ - break; \ - if (__timeout_us && \ - ktime_compare(ktime_get(), __timeout) > 0) { \ - (val) = op(args); \ - break; \ - } \ - if (__sleep_us) \ - usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ - } \ - (cond) ? 0 : -ETIMEDOUT; \ -}) - -/** - * read_poll_timeout_atomic - Periodically poll an address until a condition is - * met or a timeout occurs - * @op: accessor function (takes @args as its arguments) - * @val: Variable to read the value into - * @cond: Break condition (usually involving @val) - * @delay_us: Time to udelay between reads in us (0 tight-loops). Should - * be less than ~10us since udelay is used (see - * Documentation/timers/timers-howto.rst). - * @timeout_us: Timeout in us, 0 means never timeout - * @delay_before_read: if it is true, delay @delay_us before read. - * @args: arguments for @op poll - * - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @args is stored in @val. - * - * When available, you'll probably want to use one of the specialized - * macros defined below rather than this macro directly. - */ -#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \ - delay_before_read, args...) \ -({ \ - u64 __timeout_us = (timeout_us); \ - unsigned long __delay_us = (delay_us); \ - ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ - if (delay_before_read && __delay_us) \ - udelay(__delay_us); \ - for (;;) { \ - (val) = op(args); \ - if (cond) \ - break; \ - if (__timeout_us && \ - ktime_compare(ktime_get(), __timeout) > 0) { \ - (val) = op(args); \ - break; \ - } \ - if (__delay_us) \ - udelay(__delay_us); \ - } \ - (cond) ? 0 : -ETIMEDOUT; \ -}) - /** * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs * @op: accessor function (takes @addr as its only argument) @@ -107,7 +30,7 @@ * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * is used (see Documentation/timers/timers-howto.txt). * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout. In either @@ -118,7 +41,22 @@ * macros defined below rather than this macro directly. */ #define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \ - read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr) +({ \ + ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ + might_sleep_if(sleep_us); \ + for (;;) { \ + (val) = op(addr); \ + if (cond) \ + break; \ + if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ + (val) = op(addr); \ + break; \ + } \ + if (sleep_us) \ + usleep_range((sleep_us >> 2) + 1, sleep_us); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) /** * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs @@ -128,7 +66,7 @@ * @cond: Break condition (usually involving @val) * @delay_us: Time to udelay between reads in us (0 tight-loops). Should * be less than ~10us since udelay is used (see - * Documentation/timers/timers-howto.rst). + * Documentation/timers/timers-howto.txt). * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout. In either @@ -138,7 +76,22 @@ * macros defined below rather than this macro directly. */ #define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \ - read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr) +({ \ + ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ + for (;;) { \ + (val) = op(addr); \ + if (cond) \ + break; \ + if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ + (val) = op(addr); \ + break; \ + } \ + if (delay_us) \ + udelay(delay_us); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + #define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \ readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us) diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 8359c50f99..1ccafa4dfa 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * ioport.h Definitions of routines for detecting, reserving and * allocating system resources. @@ -10,9 +9,7 @@ #define _LINUX_IOPORT_H #ifndef __ASSEMBLY__ -#include #include -#include #include /* * Resources are tree-like, allowing @@ -59,10 +56,6 @@ struct resource { #define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */ #define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */ -/* IORESOURCE_SYSRAM specific bits. */ -#define IORESOURCE_SYSRAM_DRIVER_MANAGED 0x02000000 /* Always detected via a driver. */ -#define IORESOURCE_SYSRAM_MERGEABLE 0x04000000 /* Resource can be merged. */ - #define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ #define IORESOURCE_DISABLED 0x10000000 @@ -108,7 +101,6 @@ struct resource { #define IORESOURCE_MEM_32BIT (3<<3) #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ #define IORESOURCE_MEM_EXPANSIONROM (1<<6) -#define IORESOURCE_MEM_NONPOSTED (1<<7) /* PnP I/O specific bits (IORESOURCE_BITS) */ #define IORESOURCE_IO_16BIT_ADDR (1<<0) @@ -138,17 +130,6 @@ enum { IORES_DESC_ACPI_NV_STORAGE = 3, IORES_DESC_PERSISTENT_MEMORY = 4, IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, - IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, - IORES_DESC_RESERVED = 7, - IORES_DESC_SOFT_RESERVED = 8, -}; - -/* - * Flags controlling ioremap() behavior. - */ -enum { - IORES_MAP_SYSTEM_RAM = BIT(0), - IORES_MAP_ENCRYPTED = BIT(1), }; /* helpers to define resources */ @@ -209,7 +190,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start); int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size); resource_size_t resource_alignment(struct resource *res); -static inline resource_size_t resource_size(const struct resource *res) +static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res) { return res->end - res->start + 1; } @@ -231,31 +212,6 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2) return r1->start <= r2->start && r1->end >= r2->end; } -/* True if any part of r1 overlaps r2 */ -static inline bool resource_overlaps(struct resource *r1, struct resource *r2) -{ - return r1->start <= r2->end && r1->end >= r2->start; -} - -static inline bool -resource_intersection(struct resource *r1, struct resource *r2, struct resource *r) -{ - if (!resource_overlaps(r1, r2)) - return false; - r->start = max(r1->start, r2->start); - r->end = min(r1->end, r2->end); - return true; -} - -static inline bool -resource_union(struct resource *r1, struct resource *r2, struct resource *r) -{ - if (!resource_overlaps(r1, r2)) - return false; - r->start = min(r1->start, r2->start); - r->end = max(r1->end, r2->end); - return true; -} /* Convenience shorthand with allocation */ #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) @@ -278,10 +234,8 @@ extern struct resource * __request_region(struct resource *, extern void __release_region(struct resource *, resource_size_t, resource_size_t); #ifdef CONFIG_MEMORY_HOTREMOVE -extern void release_mem_region_adjustable(resource_size_t, resource_size_t); -#endif -#ifdef CONFIG_MEMORY_HOTPLUG -extern void merge_system_ram_resource(struct resource *res); +extern int release_mem_region_adjustable(struct resource *, resource_size_t, + resource_size_t); #endif /* Wrappers for managed devices */ @@ -308,34 +262,24 @@ extern struct resource * __devm_request_region(struct device *dev, extern void __devm_release_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n); extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); -extern bool iomem_is_exclusive(u64 addr); +extern int iomem_is_exclusive(u64 addr); extern int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)); extern int -walk_mem_res(u64 start, u64 end, void *arg, - int (*func)(struct resource *, void *)); -extern int walk_system_ram_res(u64 start, u64 end, void *arg, - int (*func)(struct resource *, void *)); + int (*func)(u64, u64, void *)); extern int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end, - void *arg, int (*func)(struct resource *, void *)); + void *arg, int (*func)(u64, u64, void *)); -struct resource *devm_request_free_mem_region(struct device *dev, - struct resource *base, unsigned long size); -struct resource *request_free_mem_region(struct resource *base, - unsigned long size, const char *name); - -static inline void irqresource_disabled(struct resource *res, u32 irq) +/* True if any part of r1 overlaps r2 */ +static inline bool resource_overlaps(struct resource *r1, struct resource *r2) { - res->start = irq; - res->end = irq; - res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET; + return (r1->start <= r2->end && r1->end >= r2->start); } -extern struct address_space *iomem_get_mapping(void); #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 3f53bc27a1..8c1239020d 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -1,27 +1,49 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef IOPRIO_H #define IOPRIO_H #include -#include #include -#include +/* + * Gives us 8 prio classes with 13-bits of data for each class + */ +#define IOPRIO_CLASS_SHIFT (13) +#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) + +#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) +#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) +#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data) + +#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) /* - * Default IO priority. + * These are the io priority groups as implemented by CFQ. RT is the realtime + * class, it always gets premium service. BE is the best-effort scheduling + * class, the default for any process. IDLE is the idle scheduling class, it + * is only served when no one else is using the disk. */ -#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_BE_NORM) +enum { + IOPRIO_CLASS_NONE, + IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, + IOPRIO_CLASS_IDLE, +}; /* - * Check that a priority value has a valid class. + * 8 best effort priority levels are supported */ -static inline bool ioprio_valid(unsigned short ioprio) -{ - unsigned short class = IOPRIO_PRIO_CLASS(ioprio); +#define IOPRIO_BE_NR (8) - return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE; -} +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP, + IOPRIO_WHO_USER, +}; + +/* + * Fallback BE priority + */ +#define IOPRIO_NORM (4) /* * if process has set io priority explicitly, use that. if not, convert @@ -40,25 +62,12 @@ static inline int task_nice_ioclass(struct task_struct *task) { if (task->policy == SCHED_IDLE) return IOPRIO_CLASS_IDLE; - else if (task_is_realtime(task)) + else if (task->policy == SCHED_FIFO || task->policy == SCHED_RR) return IOPRIO_CLASS_RT; else return IOPRIO_CLASS_BE; } -/* - * If the calling process has set an I/O priority, use that. Otherwise, return - * the default I/O priority. - */ -static inline int get_current_ioprio(void) -{ - struct io_context *ioc = current->io_context; - - if (ioc) - return ioc->ioprio; - return IOPRIO_DEFAULT; -} - /* * For inheritance, return the highest of the two given priorities */ @@ -66,13 +75,4 @@ extern int ioprio_best(unsigned short aprio, unsigned short bprio); extern int set_task_ioprio(struct task_struct *task, int ioprio); -#ifdef CONFIG_BLOCK -extern int ioprio_check_cap(int ioprio); -#else -static inline int ioprio_check_cap(int ioprio) -{ - return -ENOTBLK; -} -#endif /* CONFIG_BLOCK */ - #endif diff --git a/include/linux/iova.h b/include/linux/iova.h index 71d8a2de66..f27bb2c62f 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -1,9 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2006, Intel Corporation. * + * This file is released under the GPLv2. + * * Copyright (C) 2006-2008 Intel Corporation * Author: Anil S Keshavamurthy + * */ #ifndef _IOVA_H_ @@ -12,7 +14,6 @@ #include #include #include -#include #include /* iova structure */ @@ -35,67 +36,15 @@ struct iova_rcache { struct iova_cpu_rcache __percpu *cpu_rcaches; }; -struct iova_domain; - -/* Call-Back from IOVA code into IOMMU drivers */ -typedef void (* iova_flush_cb)(struct iova_domain *domain); - -/* Destructor for per-entry data */ -typedef void (* iova_entry_dtor)(unsigned long data); - -/* Number of entries per Flush Queue */ -#define IOVA_FQ_SIZE 256 - -/* Timeout (in ms) after which entries are flushed from the Flush-Queue */ -#define IOVA_FQ_TIMEOUT 10 - -/* Flush Queue entry for defered flushing */ -struct iova_fq_entry { - unsigned long iova_pfn; - unsigned long pages; - unsigned long data; - u64 counter; /* Flush counter when this entrie was added */ -}; - -/* Per-CPU Flush Queue structure */ -struct iova_fq { - struct iova_fq_entry entries[IOVA_FQ_SIZE]; - unsigned head, tail; - spinlock_t lock; -}; - /* holds all the iova translations for a domain */ struct iova_domain { spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ struct rb_root rbroot; /* iova domain rbtree root */ - struct rb_node *cached_node; /* Save last alloced node */ - struct rb_node *cached32_node; /* Save last 32-bit alloced node */ + struct rb_node *cached32_node; /* Save last alloced node */ unsigned long granule; /* pfn granularity for this domain */ unsigned long start_pfn; /* Lower limit for this domain */ unsigned long dma_32bit_pfn; - unsigned long max32_alloc_size; /* Size of last failed allocation */ - struct iova_fq __percpu *fq; /* Flush Queue */ - - atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that - have been started */ - - atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that - have been finished */ - - struct iova anchor; /* rbtree lookup anchor */ struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ - - iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU - TLBs */ - - iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for - iova entry */ - - struct timer_list fq_timer; /* Timer to regularily empty the - flush-queues */ - atomic_t fq_timer_on; /* 1 when timer is active, 0 - when not */ - struct hlist_node cpuhp_dead; }; static inline unsigned long iova_size(struct iova *iova) @@ -133,10 +82,11 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) return iova >> iova_shift(iovad); } -#if IS_ENABLED(CONFIG_IOMMU_IOVA) int iova_cache_get(void); void iova_cache_put(void); +struct iova *alloc_iova_mem(void); +void free_iova_mem(struct iova *iova); void free_iova(struct iova_domain *iovad, unsigned long pfn); void __free_iova(struct iova_domain *iovad, struct iova *iova); struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, @@ -144,95 +94,17 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, bool size_aligned); void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size); -void queue_iova(struct iova_domain *iovad, - unsigned long pfn, unsigned long pages, - unsigned long data); unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, - unsigned long limit_pfn, bool flush_rcache); + unsigned long limit_pfn); struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); +void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, - unsigned long start_pfn); -int init_iova_flush_queue(struct iova_domain *iovad, - iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); + unsigned long start_pfn, unsigned long pfn_32bit); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); -#else -static inline int iova_cache_get(void) -{ - return -ENOTSUPP; -} - -static inline void iova_cache_put(void) -{ -} - -static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) -{ -} - -static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) -{ -} - -static inline struct iova *alloc_iova(struct iova_domain *iovad, - unsigned long size, - unsigned long limit_pfn, - bool size_aligned) -{ - return NULL; -} - -static inline void free_iova_fast(struct iova_domain *iovad, - unsigned long pfn, - unsigned long size) -{ -} - -static inline void queue_iova(struct iova_domain *iovad, - unsigned long pfn, unsigned long pages, - unsigned long data) -{ -} - -static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, - unsigned long size, - unsigned long limit_pfn, - bool flush_rcache) -{ - return 0; -} - -static inline struct iova *reserve_iova(struct iova_domain *iovad, - unsigned long pfn_lo, - unsigned long pfn_hi) -{ - return NULL; -} - -static inline void init_iova_domain(struct iova_domain *iovad, - unsigned long granule, - unsigned long start_pfn) -{ -} - -static inline int init_iova_flush_queue(struct iova_domain *iovad, - iova_flush_cb flush_cb, - iova_entry_dtor entry_dtor) -{ - return -ENODEV; -} - -static inline struct iova *find_iova(struct iova_domain *iovad, - unsigned long pfn) -{ - return NULL; -} - -static inline void put_iova_domain(struct iova_domain *iovad) -{ -} - -#endif +struct iova *split_and_remove_iova(struct iova_domain *iovad, + struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); +void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); #endif diff --git a/include/linux/ip.h b/include/linux/ip.h index 3d9c6750af..492bc65135 100644 --- a/include/linux/ip.h +++ b/include/linux/ip.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -9,6 +8,11 @@ * Version: @(#)ip.h 1.0.2 04/28/93 * * Authors: Fred N. van Kempen, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_IP_H #define _LINUX_IP_H @@ -30,9 +34,4 @@ static inline struct iphdr *ipip_hdr(const struct sk_buff *skb) { return (struct iphdr *)skb_transport_header(skb); } - -static inline unsigned int ip_transport_len(const struct sk_buff *skb) -{ - return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb); -} #endif /* _LINUX_IP_H */ diff --git a/include/linux/ipack.h b/include/linux/ipack.h index 2c6936b837..8bddc3fbdd 100644 --- a/include/linux/ipack.h +++ b/include/linux/ipack.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Industry-pack bus. * * Copyright (C) 2011-2012 CERN (www.cern.ch) * Author: Samuel Iglesias Gonsalvez + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; version 2 of the License. */ #include diff --git a/include/linux/ipc.h b/include/linux/ipc.h index e1c9eea601..12d5bdf5bd 100644 --- a/include/linux/ipc.h +++ b/include/linux/ipc.h @@ -1,15 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IPC_H #define _LINUX_IPC_H #include #include -#include #include -#include + +#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */ /* used by in-kernel data structures */ -struct kern_ipc_perm { +struct kern_ipc_perm +{ spinlock_t lock; bool deleted; int id; @@ -18,14 +18,9 @@ struct kern_ipc_perm { kgid_t gid; kuid_t cuid; kgid_t cgid; - umode_t mode; - unsigned long seq; + umode_t mode; + unsigned long seq __intentional_overflow(-1); void *security; - - struct rhash_head khtnode; - - struct rcu_head rcu; - refcount_t refcount; -} ____cacheline_aligned_in_smp __randomize_layout; +} __randomize_layout; #endif /* _LINUX_IPC_H */ diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index 05e22770af..65327ee093 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IPC_NAMESPACE_H__ #define __IPC_NAMESPACE_H__ @@ -8,8 +7,6 @@ #include #include #include -#include -#include struct user_namespace; @@ -18,15 +15,11 @@ struct ipc_ids { unsigned short seq; struct rw_semaphore rwsem; struct idr ipcs_idr; - int max_idx; - int last_idx; /* For wrap around detection */ -#ifdef CONFIG_CHECKPOINT_RESTORE int next_id; -#endif - struct rhashtable key_ht; }; struct ipc_namespace { + atomic_t count; struct ipc_ids ids[3]; int sem_ctls[4]; @@ -67,8 +60,6 @@ struct ipc_namespace { struct user_namespace *user_ns; struct ucounts *ucounts; - struct llist_node mnt_llist; - struct ns_common ns; } __randomize_layout; @@ -127,7 +118,7 @@ extern struct ipc_namespace *copy_ipcs(unsigned long flags, static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) { if (ns) - refcount_inc(&ns->ns.count); + atomic_inc(&ns->count); return ns; } diff --git a/include/linux/ipmi-fru.h b/include/linux/ipmi-fru.h new file mode 100644 index 0000000000..4d3a76380e --- /dev/null +++ b/include/linux/ipmi-fru.h @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2012 CERN (www.cern.ch) + * Author: Alessandro Rubini + * + * Released according to the GNU GPL, version 2 or any later version. + * + * This work is part of the White Rabbit project, a research effort led + * by CERN, the European Institute for Nuclear Research. + */ +#ifndef __LINUX_IPMI_FRU_H__ +#define __LINUX_IPMI_FRU_H__ +#ifdef __KERNEL__ +# include +# include +#else +# include +# include +#endif + +/* + * These structures match the unaligned crap we have in FRU1011.pdf + * (http://download.intel.com/design/servers/ipmi/FRU1011.pdf) + */ + +/* chapter 8, page 5 */ +struct fru_common_header { + uint8_t format; /* 0x01 */ + uint8_t internal_use_off; /* multiple of 8 bytes */ + uint8_t chassis_info_off; /* multiple of 8 bytes */ + uint8_t board_area_off; /* multiple of 8 bytes */ + uint8_t product_area_off; /* multiple of 8 bytes */ + uint8_t multirecord_off; /* multiple of 8 bytes */ + uint8_t pad; /* must be 0 */ + uint8_t checksum; /* sum modulo 256 must be 0 */ +}; + +/* chapter 9, page 5 -- internal_use: not used by us */ + +/* chapter 10, page 6 -- chassis info: not used by us */ + +/* chapter 13, page 9 -- used by board_info_area below */ +struct fru_type_length { + uint8_t type_length; + uint8_t data[0]; +}; + +/* chapter 11, page 7 */ +struct fru_board_info_area { + uint8_t format; /* 0x01 */ + uint8_t area_len; /* multiple of 8 bytes */ + uint8_t language; /* I hope it's 0 */ + uint8_t mfg_date[3]; /* LSB, minutes since 1996-01-01 */ + struct fru_type_length tl[0]; /* type-length stuff follows */ + + /* + * the TL there are in order: + * Board Manufacturer + * Board Product Name + * Board Serial Number + * Board Part Number + * FRU File ID (may be null) + * more manufacturer-specific stuff + * 0xc1 as a terminator + * 0x00 pad to a multiple of 8 bytes - 1 + * checksum (sum of all stuff module 256 must be zero) + */ +}; + +enum fru_type { + FRU_TYPE_BINARY = 0x00, + FRU_TYPE_BCDPLUS = 0x40, + FRU_TYPE_ASCII6 = 0x80, + FRU_TYPE_ASCII = 0xc0, /* not ascii: depends on language */ +}; + +/* + * some helpers + */ +static inline struct fru_board_info_area *fru_get_board_area( + const struct fru_common_header *header) +{ + /* we know for sure that the header is 8 bytes in size */ + return (struct fru_board_info_area *)(header + header->board_area_off); +} + +static inline int fru_type(struct fru_type_length *tl) +{ + return tl->type_length & 0xc0; +} + +static inline int fru_length(struct fru_type_length *tl) +{ + return (tl->type_length & 0x3f) + 1; /* len of whole record */ +} + +/* assume ascii-latin1 encoding */ +static inline int fru_strlen(struct fru_type_length *tl) +{ + return fru_length(tl) - 1; +} + +static inline char *fru_strcpy(char *dest, struct fru_type_length *tl) +{ + int len = fru_strlen(tl); + memcpy(dest, tl->data, len); + dest[len] = '\0'; + return dest; +} + +static inline struct fru_type_length *fru_next_tl(struct fru_type_length *tl) +{ + return tl + fru_length(tl); +} + +static inline int fru_is_eof(struct fru_type_length *tl) +{ + return tl->type_length == 0xc1; +} + +/* + * External functions defined in fru-parse.c. + */ +extern int fru_header_cksum_ok(struct fru_common_header *header); +extern int fru_bia_cksum_ok(struct fru_board_info_area *bia); + +/* All these 4 return allocated strings by calling fru_alloc() */ +extern char *fru_get_board_manufacturer(struct fru_common_header *header); +extern char *fru_get_product_name(struct fru_common_header *header); +extern char *fru_get_serial_number(struct fru_common_header *header); +extern char *fru_get_part_number(struct fru_common_header *header); + +/* This must be defined by the caller of the above functions */ +extern void *fru_alloc(size_t size); + +#endif /* __LINUX_IMPI_FRU_H__ */ diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 52850a02a3..78c5d5ae38 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * ipmi.h * @@ -10,6 +9,26 @@ * * Copyright 2002 MontaVista Software Inc. * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_IPMI_H #define __LINUX_IPMI_H @@ -23,11 +42,9 @@ struct module; struct device; -/* - * Opaque type for a IPMI message user. One of these is needed to - * send and receive messages. - */ -struct ipmi_user; +/* Opaque type for a IPMI message user. One of these is needed to + send and receive messages. */ +typedef struct ipmi_user *ipmi_user_t; /* * Stuff coming from the receive interface comes as one of these. @@ -39,36 +56,28 @@ struct ipmi_user; struct ipmi_recv_msg { struct list_head link; - /* - * The type of message as defined in the "Receive Types" - * defines above. - */ + /* The type of message as defined in the "Receive Types" + defines above. */ int recv_type; - struct ipmi_user *user; + ipmi_user_t user; struct ipmi_addr addr; long msgid; struct kernel_ipmi_msg msg; - /* - * The user_msg_data is the data supplied when a message was - * sent, if this is a response to a sent message. If this is - * not a response to a sent message, then user_msg_data will - * be NULL. If the user above is NULL, then this will be the - * intf. - */ + /* The user_msg_data is the data supplied when a message was + sent, if this is a response to a sent message. If this is + not a response to a sent message, then user_msg_data will + be NULL. If the user above is NULL, then this will be the + intf. */ void *user_msg_data; - /* - * Call this when done with the message. It will presumably free - * the message and do any other necessary cleanup. - */ + /* Call this when done with the message. It will presumably free + the message and do any other necessary cleanup. */ void (*done)(struct ipmi_recv_msg *msg); - /* - * Place-holder for the data, don't make any assumptions about - * the size or existence of this, since it may change. - */ + /* Place-holder for the data, don't make any assumptions about + the size or existence of this, since it may change. */ unsigned char msg_data[IPMI_MAX_MSG_LENGTH]; }; @@ -76,77 +85,54 @@ struct ipmi_recv_msg { void ipmi_free_recv_msg(struct ipmi_recv_msg *msg); struct ipmi_user_hndl { - /* - * Routine type to call when a message needs to be routed to - * the upper layer. This will be called with some locks held, - * the only IPMI routines that can be called are ipmi_request - * and the alloc/free operations. The handler_data is the - * variable supplied when the receive handler was registered. - */ + /* Routine type to call when a message needs to be routed to + the upper layer. This will be called with some locks held, + the only IPMI routines that can be called are ipmi_request + and the alloc/free operations. The handler_data is the + variable supplied when the receive handler was registered. */ void (*ipmi_recv_hndl)(struct ipmi_recv_msg *msg, void *user_msg_data); - /* - * Called when the interface detects a watchdog pre-timeout. If - * this is NULL, it will be ignored for the user. - */ + /* Called when the interface detects a watchdog pre-timeout. If + this is NULL, it will be ignored for the user. */ void (*ipmi_watchdog_pretimeout)(void *handler_data); - - /* - * If not NULL, called at panic time after the interface has - * been set up to handle run to completion. - */ - void (*ipmi_panic_handler)(void *handler_data); - - /* - * Called when the interface has been removed. After this returns - * the user handle will be invalid. The interface may or may - * not be usable when this is called, but it will return errors - * if it is not usable. - */ - void (*shutdown)(void *handler_data); }; /* Create a new user of the IPMI layer on the given interface number. */ int ipmi_create_user(unsigned int if_num, - const struct ipmi_user_hndl *handler, + struct ipmi_user_hndl *handler, void *handler_data, - struct ipmi_user **user); + ipmi_user_t *user); -/* - * Destroy the given user of the IPMI layer. Note that after this - * function returns, the system is guaranteed to not call any - * callbacks for the user. Thus as long as you destroy all the users - * before you unload a module, you will be safe. And if you destroy - * the users before you destroy the callback structures, it should be - * safe, too. - */ -int ipmi_destroy_user(struct ipmi_user *user); +/* Destroy the given user of the IPMI layer. Note that after this + function returns, the system is guaranteed to not call any + callbacks for the user. Thus as long as you destroy all the users + before you unload a module, you will be safe. And if you destroy + the users before you destroy the callback structures, it should be + safe, too. */ +int ipmi_destroy_user(ipmi_user_t user); /* Get the IPMI version of the BMC we are talking to. */ -int ipmi_get_version(struct ipmi_user *user, - unsigned char *major, - unsigned char *minor); +void ipmi_get_version(ipmi_user_t user, + unsigned char *major, + unsigned char *minor); -/* - * Set and get the slave address and LUN that we will use for our - * source messages. Note that this affects the interface, not just - * this user, so it will affect all users of this interface. This is - * so some initialization code can come in and do the OEM-specific - * things it takes to determine your address (if not the BMC) and set - * it for everyone else. Note that each channel can have its own - * address. - */ -int ipmi_set_my_address(struct ipmi_user *user, +/* Set and get the slave address and LUN that we will use for our + source messages. Note that this affects the interface, not just + this user, so it will affect all users of this interface. This is + so some initialization code can come in and do the OEM-specific + things it takes to determine your address (if not the BMC) and set + it for everyone else. Note that each channel can have its own address. */ +int ipmi_set_my_address(ipmi_user_t user, unsigned int channel, unsigned char address); -int ipmi_get_my_address(struct ipmi_user *user, +int ipmi_get_my_address(ipmi_user_t user, unsigned int channel, unsigned char *address); -int ipmi_set_my_LUN(struct ipmi_user *user, +int ipmi_set_my_LUN(ipmi_user_t user, unsigned int channel, unsigned char LUN); -int ipmi_get_my_LUN(struct ipmi_user *user, +int ipmi_get_my_LUN(ipmi_user_t user, unsigned int channel, unsigned char *LUN); @@ -163,7 +149,7 @@ int ipmi_get_my_LUN(struct ipmi_user *user, * it makes no sense to do it here. However, this can be used if you * have unusual requirements. */ -int ipmi_request_settime(struct ipmi_user *user, +int ipmi_request_settime(ipmi_user_t user, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, @@ -181,7 +167,7 @@ int ipmi_request_settime(struct ipmi_user *user, * change as the system changes, so don't use it unless you REALLY * have to. */ -int ipmi_request_supply_msgs(struct ipmi_user *user, +int ipmi_request_supply_msgs(ipmi_user_t user, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, @@ -197,7 +183,7 @@ int ipmi_request_supply_msgs(struct ipmi_user *user, * way. This is useful if you need to spin waiting for something to * happen in the IPMI driver. */ -void ipmi_poll_interface(struct ipmi_user *user); +void ipmi_poll_interface(ipmi_user_t user); /* * When commands come in to the SMS, the user can register to receive @@ -208,11 +194,11 @@ void ipmi_poll_interface(struct ipmi_user *user); * error. Channels are specified as a bitfield, use IPMI_CHAN_ALL to * mean all channels. */ -int ipmi_register_for_cmd(struct ipmi_user *user, +int ipmi_register_for_cmd(ipmi_user_t user, unsigned char netfn, unsigned char cmd, unsigned int chans); -int ipmi_unregister_for_cmd(struct ipmi_user *user, +int ipmi_unregister_for_cmd(ipmi_user_t user, unsigned char netfn, unsigned char cmd, unsigned int chans); @@ -243,8 +229,8 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user, * * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means. */ -int ipmi_get_maintenance_mode(struct ipmi_user *user); -int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode); +int ipmi_get_maintenance_mode(ipmi_user_t user); +int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); /* * When the user is created, it will not receive IPMI events by @@ -252,7 +238,7 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode); * The first user that sets this to TRUE will receive all events that * have been queued while no one was waiting for events. */ -int ipmi_set_gets_events(struct ipmi_user *user, bool val); +int ipmi_set_gets_events(ipmi_user_t user, bool val); /* * Called when a new SMI is registered. This will also be called on @@ -262,18 +248,14 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val); struct ipmi_smi_watcher { struct list_head link; - /* - * You must set the owner to the current module, if you are in - * a module (generally just set it to "THIS_MODULE"). - */ + /* You must set the owner to the current module, if you are in + a module (generally just set it to "THIS_MODULE"). */ struct module *owner; - /* - * These two are called with read locks held for the interface - * the watcher list. So you can add and remove users from the - * IPMI interface, send messages, etc., but you cannot add - * or remove SMI watchers or SMI interfaces. - */ + /* These two are called with read locks held for the interface + the watcher list. So you can add and remove users from the + IPMI interface, send messages, etc., but you cannot add + or remove SMI watchers or SMI interfaces. */ void (*new_smi)(int if_num, struct device *dev); void (*smi_gone)(int if_num); }; @@ -281,10 +263,8 @@ struct ipmi_smi_watcher { int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher); int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher); -/* - * The following are various helper functions for dealing with IPMI - * addresses. - */ +/* The following are various helper functions for dealing with IPMI + addresses. */ /* Return the maximum length of an IPMI address given it's type. */ unsigned int ipmi_addr_length(int addr_type); @@ -297,7 +277,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len); */ enum ipmi_addr_src { SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, - SI_PCI, SI_DEVICETREE, SI_PLATFORM, SI_LAST + SI_PCI, SI_DEVICETREE, SI_LAST }; const char *ipmi_addr_src_to_str(enum ipmi_addr_src src); @@ -330,9 +310,7 @@ struct ipmi_smi_info { union ipmi_smi_info_union addr_info; }; -/* This is to get the private info of struct ipmi_smi */ +/* This is to get the private info of ipmi_smi_t */ extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data); -#define GET_DEVICE_ID_MAX_RETRY 5 - #endif /* __LINUX_IPMI_H */ diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index deec18b894..f8cea14485 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * ipmi_smi.h * @@ -10,6 +9,26 @@ * * Copyright 2002 MontaVista Software Inc. * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_IPMI_SMI_H @@ -22,21 +41,11 @@ struct device; -/* - * This files describes the interface for IPMI system management interface - * drivers to bind into the IPMI message handler. - */ +/* This files describes the interface for IPMI system management interface + drivers to bind into the IPMI message handler. */ /* Structure for the low-level drivers. */ -struct ipmi_smi; - -/* - * Flags for set_check_watch() below. Tells if the SMI should be - * waiting for watchdog timeouts, commands and/or messages. - */ -#define IPMI_WATCH_MASK_CHECK_MESSAGES (1 << 0) -#define IPMI_WATCH_MASK_CHECK_WATCHDOG (1 << 1) -#define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 2) +typedef struct ipmi_smi *ipmi_smi_t; /* * Messages to/from the lower layer. The smi interface will take one @@ -63,30 +72,20 @@ struct ipmi_smi_msg { int rsp_size; unsigned char rsp[IPMI_MAX_MSG_LENGTH]; - /* - * Will be called when the system is done with the message - * (presumably to free it). - */ + /* Will be called when the system is done with the message + (presumably to free it). */ void (*done)(struct ipmi_smi_msg *msg); }; struct ipmi_smi_handlers { struct module *owner; - /* - * The low-level interface cannot start sending messages to - * the upper layer until this function is called. This may - * not be NULL, the lower layer must take the interface from - * this call. - */ - int (*start_processing)(void *send_info, - struct ipmi_smi *new_intf); - - /* - * When called, the low-level interface should disable all - * processing, it should be complete shut down when it returns. - */ - void (*shutdown)(void *send_info); + /* The low-level interface cannot start sending messages to + the upper layer until this function is called. This may + not be NULL, the lower layer must take the interface from + this call. */ + int (*start_processing)(void *send_info, + ipmi_smi_t new_intf); /* * Get the detailed private info of the low level interface and store @@ -95,64 +94,56 @@ struct ipmi_smi_handlers { */ int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data); - /* - * Called to enqueue an SMI message to be sent. This - * operation is not allowed to fail. If an error occurs, it - * should report back the error in a received message. It may - * do this in the current call context, since no write locks - * are held when this is run. Message are delivered one at - * a time by the message handler, a new message will not be - * delivered until the previous message is returned. - */ + /* Called to enqueue an SMI message to be sent. This + operation is not allowed to fail. If an error occurs, it + should report back the error in a received message. It may + do this in the current call context, since no write locks + are held when this is run. Message are delivered one at + a time by the message handler, a new message will not be + delivered until the previous message is returned. */ void (*sender)(void *send_info, struct ipmi_smi_msg *msg); - /* - * Called by the upper layer to request that we try to get - * events from the BMC we are attached to. - */ + /* Called by the upper layer to request that we try to get + events from the BMC we are attached to. */ void (*request_events)(void *send_info); - /* - * Called by the upper layer when some user requires that the - * interface watch for received messages and watchdog - * pretimeouts (basically do a "Get Flags", or not. Used by - * the SMI to know if it should watch for these. This may be - * NULL if the SMI does not implement it. watch_mask is from - * IPMI_WATCH_MASK_xxx above. The interface should run slower - * timeouts for just watchdog checking or faster timeouts when - * waiting for the message queue. - */ - void (*set_need_watch)(void *send_info, unsigned int watch_mask); + /* Called by the upper layer when some user requires that the + interface watch for events, received messages, watchdog + pretimeouts, or not. Used by the SMI to know if it should + watch for these. This may be NULL if the SMI does not + implement it. */ + void (*set_need_watch)(void *send_info, bool enable); /* * Called when flushing all pending messages. */ void (*flush_messages)(void *send_info); - /* - * Called when the interface should go into "run to - * completion" mode. If this call sets the value to true, the - * interface should make sure that all messages are flushed - * out and that none are pending, and any new requests are run - * to completion immediately. - */ + /* Called when the interface should go into "run to + completion" mode. If this call sets the value to true, the + interface should make sure that all messages are flushed + out and that none are pending, and any new requests are run + to completion immediately. */ void (*set_run_to_completion)(void *send_info, bool run_to_completion); - /* - * Called to poll for work to do. This is so upper layers can - * poll for operations during things like crash dumps. - */ + /* Called to poll for work to do. This is so upper layers can + poll for operations during things like crash dumps. */ void (*poll)(void *send_info); - /* - * Enable/disable firmware maintenance mode. Note that this - * is *not* the modes defined, this is simply an on/off - * setting. The message handler does the mode handling. Note - * that this is called from interrupt context, so it cannot - * block. - */ + /* Enable/disable firmware maintenance mode. Note that this + is *not* the modes defined, this is simply an on/off + setting. The message handler does the mode handling. Note + that this is called from interrupt context, so it cannot + block. */ void (*set_maintenance_mode)(void *send_info, bool enable); + + /* Tell the handler that we are using it/not using it. The + message handler get the modules that this handler belongs + to; this function lets the SMI claim any modules that it + uses. These may be NULL if this is not required. */ + int (*inc_usecount)(void *send_info); + void (*dec_usecount)(void *send_info); }; struct ipmi_device_id { @@ -171,28 +162,27 @@ struct ipmi_device_id { #define ipmi_version_major(v) ((v)->ipmi_version & 0xf) #define ipmi_version_minor(v) ((v)->ipmi_version >> 4) -/* - * Take a pointer to an IPMI response and extract device id information from - * it. @netfn is in the IPMI_NETFN_ format, so may need to be shifted from - * a SI response. - */ -static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd, - const unsigned char *data, +/* Take a pointer to a raw data buffer and a length and extract device + id information from it. The first byte of data must point to the + netfn << 2, the data should be of the format: + netfn << 2, cmd, completion code, data + as normally comes from a device interface. */ +static inline int ipmi_demangle_device_id(const unsigned char *data, unsigned int data_len, struct ipmi_device_id *id) { - if (data_len < 7) + if (data_len < 9) return -EINVAL; - if (netfn != IPMI_NETFN_APP_RESPONSE || cmd != IPMI_GET_DEVICE_ID_CMD) + if (data[0] != IPMI_NETFN_APP_RESPONSE << 2 || + data[1] != IPMI_GET_DEVICE_ID_CMD) /* Strange, didn't get the response we expected. */ return -EINVAL; - if (data[0] != 0) + if (data[2] != 0) /* That's odd, it shouldn't be able to fail. */ return -EINVAL; - data++; - data_len--; - + data += 3; + data_len -= 3; id->device_id = data[0]; id->device_revision = data[1]; id->firmware_revision_1 = data[2]; @@ -216,28 +206,23 @@ static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd, return 0; } -/* - * Add a low-level interface to the IPMI driver. Note that if the - * interface doesn't know its slave address, it should pass in zero. - * The low-level interface should not deliver any messages to the - * upper layer until the start_processing() function in the handlers - * is called, and the lower layer must get the interface from that - * call. - */ -int ipmi_add_smi(struct module *owner, - const struct ipmi_smi_handlers *handlers, - void *send_info, - struct device *dev, - unsigned char slave_addr); - -#define ipmi_register_smi(handlers, send_info, dev, slave_addr) \ - ipmi_add_smi(THIS_MODULE, handlers, send_info, dev, slave_addr) +/* Add a low-level interface to the IPMI driver. Note that if the + interface doesn't know its slave address, it should pass in zero. + The low-level interface should not deliver any messages to the + upper layer until the start_processing() function in the handlers + is called, and the lower layer must get the interface from that + call. */ +int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, + void *send_info, + struct ipmi_device_id *device_id, + struct device *dev, + unsigned char slave_addr); /* * Remove a low-level interface from the IPMI driver. This will * return an error if the interface is still in use by a user. */ -void ipmi_unregister_smi(struct ipmi_smi *intf); +int ipmi_unregister_smi(ipmi_smi_t intf); /* * The lower layer reports received messages through this interface. @@ -245,11 +230,11 @@ void ipmi_unregister_smi(struct ipmi_smi *intf); * the lower layer gets an error sending a message, it should format * an error response in the message response. */ -void ipmi_smi_msg_received(struct ipmi_smi *intf, +void ipmi_smi_msg_received(ipmi_smi_t intf, struct ipmi_smi_msg *msg); /* The lower layer received a watchdog pre-timeout on interface. */ -void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf); +void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf); struct ipmi_smi_msg *ipmi_alloc_smi_msg(void); static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) @@ -257,4 +242,11 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) msg->done(msg); } +/* Allow the lower layer to add things to the proc filesystem + directory for this interface. Note that the entry will + automatically be dstroyed when the interface is destroyed. */ +int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, + const struct file_operations *proc_ops, + void *data); + #endif /* __LINUX_IPMI_SMI_H */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ef4a698657..a0649973ee 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _IPV6_H #define _IPV6_H @@ -31,7 +30,6 @@ struct ipv6_devconf { __s32 max_desync_factor; __s32 max_addresses; __s32 accept_ra_defrtr; - __u32 ra_defrtr_metric; __s32 accept_ra_min_hop_limit; __s32 accept_ra_pinfo; __s32 ignore_routes_with_linkdown; @@ -39,7 +37,6 @@ struct ipv6_devconf { __s32 accept_ra_rtr_pref; __s32 rtr_probe_interval; #ifdef CONFIG_IPV6_ROUTE_INFO - __s32 accept_ra_rt_info_min_plen; __s32 accept_ra_rt_info_max_plen; #endif #endif @@ -67,18 +64,6 @@ struct ipv6_devconf { } stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; - __s32 seg6_enabled; -#ifdef CONFIG_IPV6_SEG6_HMAC - __s32 seg6_require_hmac; -#endif - __u32 enhanced_dad; - __u32 addr_gen_mode; - __s32 disable_policy; - __s32 ndisc_tclass; - __s32 rpl_seg_enabled; - __u32 ioam6_id; - __u32 ioam6_id_wide; - __u8 ioam6_enabled; struct ctl_table_header *sysctl_header; }; @@ -88,6 +73,7 @@ struct ipv6_params { __s32 autoconf; }; extern struct ipv6_params ipv6_defaults; +#include #include #include @@ -108,12 +94,6 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb) return (struct ipv6hdr *)skb_transport_header(skb); } -static inline unsigned int ipv6_transport_len(const struct sk_buff *skb) -{ - return ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr) - - skb_network_header_len(skb); -} - /* This structure contains results of exthdrs parsing as offsets from skb->nh. @@ -140,7 +120,6 @@ struct inet6_skb_parm { #define IP6SKB_FRAGMENTED 16 #define IP6SKB_HOPBYHOP 32 #define IP6SKB_L3SLAVE 64 -#define IP6SKB_JUMBOGRAM 128 }; #if defined(CONFIG_NET_L3_MASTER_DEV) @@ -165,19 +144,15 @@ static inline int inet6_iif(const struct sk_buff *skb) return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; } -static inline bool inet6_is_jumbogram(const struct sk_buff *skb) -{ - return !!(IP6CB(skb)->flags & IP6SKB_JUMBOGRAM); -} - /* can not be used in TCP layer after tcp_v6_fill_cb */ -static inline int inet6_sdif(const struct sk_buff *skb) +static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) { -#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - if (skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) - return IP6CB(skb)->iif; +#if defined(CONFIG_NET_L3_MASTER_DEV) + if (!net->ipv4.sysctl_tcp_l3mdev_accept && + skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) + return true; #endif - return 0; + return false; } struct tcp6_request_sock { @@ -215,7 +190,7 @@ struct ipv6_pinfo { /* * Packed in 16bits. - * Omit one shift by putting the signed field at MSB. + * Omit one shift by by putting the signed field at MSB. */ #if defined(__BIG_ENDIAN_BITFIELD) __s16 hop_limit:9; @@ -254,9 +229,8 @@ struct ipv6_pinfo { rxflow:1, rxtclass:1, rxpmtu:1, - rxorigdstaddr:1, - recvfragsize:1; - /* 1 bits hole */ + rxorigdstaddr:1; + /* 2 bits hole */ } bits; __u16 all; } rxopt; @@ -272,11 +246,7 @@ struct ipv6_pinfo { * 100: prefer care-of address */ dontfrag:1, - autoflowlabel:1, - autoflowlabel_set:1, - mc_all:1, - recverr_rfc4884:1, - rtalert_isolate:1; + autoflowlabel:1; __u8 min_hopcount; __u8 tclass; __be32 rcv_flowinfo; @@ -337,6 +307,17 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) return (struct raw6_sock *)sk; } +static inline void inet_sk_copy_descendant(struct sock *sk_to, + const struct sock *sk_from) +{ + int ancestor_size = sizeof(struct inet_sock); + + if (sk_from->sk_family == PF_INET6) + ancestor_size += sizeof(struct ipv6_pinfo); + + __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); +} + #define __ipv6_only_sock(sk) (sk->sk_ipv6only) #define ipv6_only_sock(sk) (__ipv6_only_sock(sk)) #define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \ diff --git a/include/linux/ipv6_route.h b/include/linux/ipv6_route.h index 5711e246c3..25b5f1f5e7 100644 --- a/include/linux/ipv6_route.h +++ b/include/linux/ipv6_route.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux INET6 implementation * * Authors: * Pedro Roque + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_IPV6_ROUTE_H #define _LINUX_IPV6_ROUTE_H diff --git a/include/linux/irq.h b/include/linux/irq.h index c8293c8176..650e968cf7 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQ_H #define _LINUX_IRQ_H @@ -10,15 +9,19 @@ * Thanks. --rmk */ +#include +#include #include #include #include +#include #include #include #include +#include #include +#include #include -#include #include #include @@ -27,7 +30,6 @@ struct seq_file; struct module; struct msi_msg; -struct irq_affinity_desc; enum irqchip_irq_state; /* @@ -71,8 +73,6 @@ enum irqchip_irq_state; * it from the spurious interrupt detection * mechanism and from core side polling. * IRQ_DISABLE_UNLAZY - Disable lazy irq disable - * IRQ_HIDDEN - Don't show up in /proc/interrupts - * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -99,15 +99,13 @@ enum { IRQ_PER_CPU_DEVID = (1 << 17), IRQ_IS_POLLED = (1 << 18), IRQ_DISABLE_UNLAZY = (1 << 19), - IRQ_HIDDEN = (1 << 20), - IRQ_NO_DEBUG = (1 << 21), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ - IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN) + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) @@ -118,7 +116,7 @@ enum { * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping - * all descendant irqchips. + * all descendent irqchips. */ enum { IRQ_SET_MASK_OK = 0, @@ -138,9 +136,6 @@ struct irq_domain; * @affinity: IRQ affinity on SMP. If this is an IPI * related irq, then this is the mask of the * CPUs to which an IPI can be sent. - * @effective_affinity: The effective IRQ affinity on SMP as some irq - * chips do not allow multi CPU destinations. - * A subset of @affinity. * @msi_desc: MSI descriptor * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional. */ @@ -152,9 +147,6 @@ struct irq_common_data { void *handler_data; struct msi_desc *msi_desc; cpumask_var_t affinity; -#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK - cpumask_var_t effective_affinity; -#endif #ifdef CONFIG_GENERIC_IRQ_IPI unsigned int ipi_offset; #endif @@ -199,7 +191,7 @@ struct irq_data { * IRQD_LEVEL - Interrupt is level triggered * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup * from suspend - * IRQD_MOVE_PCNTXT - Interrupt can be moved in process + * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process * context * IRQD_IRQ_DISABLED - Disabled state of the interrupt * IRQD_IRQ_MASKED - Masked state of the interrupt @@ -207,20 +199,6 @@ struct irq_data { * IRQD_WAKEUP_ARMED - Wakeup mode armed * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel - * IRQD_IRQ_STARTED - Startup state of the interrupt - * IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity - * mask. Applies only to affinity managed irqs. - * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target - * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set - * IRQD_CAN_RESERVE - Can use reservation mode - * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change - * required - * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked - * from actual interrupt context. - * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call - * irq_chip::irq_set_affinity() when deactivated. - * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if - * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set. */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -238,15 +216,6 @@ enum { IRQD_WAKEUP_ARMED = (1 << 19), IRQD_FORWARDED_TO_VCPU = (1 << 20), IRQD_AFFINITY_MANAGED = (1 << 21), - IRQD_IRQ_STARTED = (1 << 22), - IRQD_MANAGED_SHUTDOWN = (1 << 23), - IRQD_SINGLE_TARGET = (1 << 24), - IRQD_DEFAULT_TRIGGER_SET = (1 << 25), - IRQD_CAN_RESERVE = (1 << 26), - IRQD_MSI_NOMASK_QUIRK = (1 << 27), - IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), - IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), - IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -276,25 +245,18 @@ static inline void irqd_mark_affinity_was_set(struct irq_data *d) __irqd_to_state(d) |= IRQD_AFFINITY_SET; } -static inline bool irqd_trigger_type_was_set(struct irq_data *d) -{ - return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET; -} - static inline u32 irqd_get_trigger_type(struct irq_data *d) { return __irqd_to_state(d) & IRQD_TRIGGER_MASK; } /* - * Must only be called inside irq_chip.irq_set_type() functions or - * from the DT/ACPI setup code. + * Must only be called inside irq_chip.irq_set_type() functions. */ static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) { __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK; __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK; - __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET; } static inline bool irqd_is_level_type(struct irq_data *d) @@ -302,35 +264,6 @@ static inline bool irqd_is_level_type(struct irq_data *d) return __irqd_to_state(d) & IRQD_LEVEL; } -/* - * Must only be called of irqchip.irq_set_affinity() or low level - * hierarchy domain allocation functions. - */ -static inline void irqd_set_single_target(struct irq_data *d) -{ - __irqd_to_state(d) |= IRQD_SINGLE_TARGET; -} - -static inline bool irqd_is_single_target(struct irq_data *d) -{ - return __irqd_to_state(d) & IRQD_SINGLE_TARGET; -} - -static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d) -{ - __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX; -} - -static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d) -{ - return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX; -} - -static inline bool irqd_is_enabled_on_suspend(struct irq_data *d) -{ - return __irqd_to_state(d) & IRQD_IRQ_ENABLED_ON_SUSPEND; -} - static inline bool irqd_is_wakeup_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_WAKEUP_STATE; @@ -396,56 +329,6 @@ static inline void irqd_clr_activated(struct irq_data *d) __irqd_to_state(d) &= ~IRQD_ACTIVATED; } -static inline bool irqd_is_started(struct irq_data *d) -{ - return __irqd_to_state(d) & IRQD_IRQ_STARTED; -} - -static inline bool irqd_is_managed_and_shutdown(struct irq_data *d) -{ - return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; -} - -static inline void irqd_set_can_reserve(struct irq_data *d) -{ - __irqd_to_state(d) |= IRQD_CAN_RESERVE; -} - -static inline void irqd_clr_can_reserve(struct irq_data *d) -{ - __irqd_to_state(d) &= ~IRQD_CAN_RESERVE; -} - -static inline bool irqd_can_reserve(struct irq_data *d) -{ - return __irqd_to_state(d) & IRQD_CAN_RESERVE; -} - -static inline void irqd_set_msi_nomask_quirk(struct irq_data *d) -{ - __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK; -} - -static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d) -{ - __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK; -} - -static inline bool irqd_msi_nomask_quirk(struct irq_data *d) -{ - return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK; -} - -static inline void irqd_set_affinity_on_activate(struct irq_data *d) -{ - __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; -} - -static inline bool irqd_affinity_on_activate(struct irq_data *d) -{ - return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; -} - #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) @@ -467,12 +350,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * @irq_mask_ack: ack and mask an interrupt source * @irq_unmask: unmask an interrupt source * @irq_eoi: end of interrupt - * @irq_set_affinity: Set the CPU affinity on SMP machines. If the force - * argument is true, it tells the driver to - * unconditionally apply the affinity setting. Sanity - * checks against the supplied affinity mask are not - * required. This is used for CPU hotplug where the - * target CPU is not yet set in the cpu_online_mask. + * @irq_set_affinity: set the CPU affinity on SMP machines * @irq_retrigger: resend an IRQ to the CPU * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ * @irq_set_wake: enable/disable power-management wake-on of an IRQ @@ -498,8 +376,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine * @ipi_send_single: send a single IPI to destination cpus * @ipi_send_mask: send an IPI to destination cpus in cpumask - * @irq_nmi_setup: function called from core code before enabling an NMI - * @irq_nmi_teardown: function called from core code after disabling an NMI * @flags: chip specific flags */ struct irq_chip { @@ -548,41 +424,32 @@ struct irq_chip { void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); - int (*irq_nmi_setup)(struct irq_data *data); - void (*irq_nmi_teardown)(struct irq_data *data); - unsigned long flags; -}; +} __do_const; +#ifndef _LINUX_IRQDOMAIN_H +typedef struct irq_chip __no_const irq_chip_no_const; +#endif /* * irq_chip specific flags * - * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() - * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled - * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path - * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks - * when irq enabled - * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip - * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask - * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode - * IRQCHIP_SUPPORTS_LEVEL_MSI: Chip can provide two doorbells for Level MSIs - * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips - * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs - * in the suspend path if they are in disabled state - * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup + * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() + * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled + * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path + * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks + * when irq enabled + * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip + * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask + * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode */ enum { - IRQCHIP_SET_TYPE_MASKED = (1 << 0), - IRQCHIP_EOI_IF_HANDLED = (1 << 1), - IRQCHIP_MASK_ON_SUSPEND = (1 << 2), - IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), - IRQCHIP_SKIP_SET_WAKE = (1 << 4), - IRQCHIP_ONESHOT_SAFE = (1 << 5), - IRQCHIP_EOI_THREADED = (1 << 6), - IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), - IRQCHIP_SUPPORTS_NMI = (1 << 8), - IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9), - IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), + IRQCHIP_SET_TYPE_MASKED = (1 << 0), + IRQCHIP_EOI_IF_HANDLED = (1 << 1), + IRQCHIP_MASK_ON_SUSPEND = (1 << 2), + IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), + IRQCHIP_SKIP_SET_WAKE = (1 << 4), + IRQCHIP_ONESHOT_SAFE = (1 << 5), + IRQCHIP_EOI_THREADED = (1 << 6), }; #include @@ -603,6 +470,8 @@ enum { #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS struct irqaction; +extern int setup_irq(unsigned int irq, struct irqaction *new); +extern void remove_irq(unsigned int irq, struct irqaction *act); extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); @@ -612,26 +481,14 @@ extern int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask, bool force); extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); -#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION) extern void irq_migrate_all_off_this_cpu(void); -extern int irq_affinity_online_cpu(unsigned int cpu); -#else -# define irq_affinity_online_cpu NULL -#endif #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) -void __irq_move_irq(struct irq_data *data); -static inline void irq_move_irq(struct irq_data *data) -{ - if (unlikely(irqd_is_setaffinity_pending(data))) - __irq_move_irq(data); -} +void irq_move_irq(struct irq_data *data); void irq_move_masked_irq(struct irq_data *data); -void irq_force_complete_move(struct irq_desc *desc); #else static inline void irq_move_irq(struct irq_data *data) { } static inline void irq_move_masked_irq(struct irq_data *data) { } -static inline void irq_force_complete_move(struct irq_desc *desc) { } #endif extern int no_irq_affinity; @@ -660,27 +517,15 @@ extern void handle_percpu_devid_irq(struct irq_desc *desc); extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); -extern void handle_fasteoi_nmi(struct irq_desc *desc); -extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc); - extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); extern int irq_chip_pm_get(struct irq_data *data); extern int irq_chip_pm_put(struct irq_data *data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -extern void handle_fasteoi_ack_irq(struct irq_desc *desc); -extern void handle_fasteoi_mask_irq(struct irq_desc *desc); -extern int irq_chip_set_parent_state(struct irq_data *data, - enum irqchip_irq_state which, - bool val); -extern int irq_chip_get_parent_state(struct irq_data *data, - enum irqchip_irq_state which, - bool *state); extern void irq_chip_enable_parent(struct irq_data *data); extern void irq_chip_disable_parent(struct irq_data *data); extern void irq_chip_ack_parent(struct irq_data *data); extern int irq_chip_retrigger_hierarchy(struct irq_data *data); extern void irq_chip_mask_parent(struct irq_data *data); -extern void irq_chip_mask_ack_parent(struct irq_data *data); extern void irq_chip_unmask_parent(struct irq_data *data); extern void irq_chip_eoi_parent(struct irq_data *data); extern int irq_chip_set_affinity_parent(struct irq_data *data, @@ -690,8 +535,6 @@ extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info); extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); -extern int irq_chip_request_resources_parent(struct irq_data *data); -extern void irq_chip_release_resources_parent(struct irq_data *data); #endif /* Handling of unhandled and spurious interrupts: */ @@ -887,52 +730,17 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) return d->common->affinity; } -#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK -static inline -struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) -{ - return d->common->effective_affinity; -} -static inline void irq_data_update_effective_affinity(struct irq_data *d, - const struct cpumask *m) -{ - cpumask_copy(d->common->effective_affinity, m); -} -#else -static inline void irq_data_update_effective_affinity(struct irq_data *d, - const struct cpumask *m) -{ -} -static inline -struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) -{ - return d->common->affinity; -} -#endif - -static inline struct cpumask *irq_get_effective_affinity_mask(unsigned int irq) -{ - struct irq_data *d = irq_get_irq_data(irq); - - return d ? irq_data_get_effective_affinity_mask(d) : NULL; -} - unsigned int arch_dynirq_lower_bound(unsigned int from); int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, - struct module *owner, - const struct irq_affinity_desc *affinity); - -int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, - unsigned int cnt, int node, struct module *owner, - const struct irq_affinity_desc *affinity); + struct module *owner, const struct cpumask *affinity); /* use macros to avoid needing export.h for THIS_MODULE */ #define irq_alloc_descs(irq, from, cnt, node) \ __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) #define irq_alloc_desc(node) \ - irq_alloc_descs(-1, 1, 1, node) + irq_alloc_descs(-1, 0, 1, node) #define irq_alloc_desc_at(at, node) \ irq_alloc_descs(at, at, 1, node) @@ -943,27 +751,27 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, #define irq_alloc_descs_from(from, cnt, node) \ irq_alloc_descs(-1, from, cnt, node) -#define devm_irq_alloc_descs(dev, irq, from, cnt, node) \ - __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL) - -#define devm_irq_alloc_desc(dev, node) \ - devm_irq_alloc_descs(dev, -1, 1, 1, node) - -#define devm_irq_alloc_desc_at(dev, at, node) \ - devm_irq_alloc_descs(dev, at, at, 1, node) - -#define devm_irq_alloc_desc_from(dev, from, node) \ - devm_irq_alloc_descs(dev, -1, from, 1, node) - -#define devm_irq_alloc_descs_from(dev, from, cnt, node) \ - devm_irq_alloc_descs(dev, -1, from, cnt, node) - void irq_free_descs(unsigned int irq, unsigned int cnt); static inline void irq_free_desc(unsigned int irq) { irq_free_descs(irq, 1); } +#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ +unsigned int irq_alloc_hwirqs(int cnt, int node); +static inline unsigned int irq_alloc_hwirq(int node) +{ + return irq_alloc_hwirqs(1, node); +} +void irq_free_hwirqs(unsigned int from, int cnt); +static inline void irq_free_hwirq(unsigned int irq) +{ + return irq_free_hwirqs(irq, 1); +} +int arch_setup_hwirq(unsigned int irq, int node); +void arch_teardown_hwirq(unsigned int irq); +#endif + #ifdef CONFIG_GENERIC_IRQ_LEGACY void irq_init_desc(unsigned int irq); #endif @@ -1063,7 +871,7 @@ struct irq_chip_generic { unsigned long unused; struct irq_domain *domain; struct list_head list; - struct irq_chip_type chip_types[]; + struct irq_chip_type chip_types[0]; }; /** @@ -1099,7 +907,7 @@ struct irq_domain_chip_generic { unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; - struct irq_chip_generic *gc[]; + struct irq_chip_generic *gc[0]; }; /* Generic chip callback functions */ @@ -1110,7 +918,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d); void irq_gc_unmask_enable_reg(struct irq_data *d); void irq_gc_ack_set_bit(struct irq_data *d); void irq_gc_ack_clr_bit(struct irq_data *d); -void irq_gc_mask_disable_and_ack_set(struct irq_data *d); +void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); void irq_gc_eoi(struct irq_data *d); int irq_gc_set_wake(struct irq_data *d, unsigned int on); @@ -1127,14 +935,6 @@ int irq_setup_alt_chip(struct irq_data *d, unsigned int type); void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, unsigned int clr, unsigned int set); -struct irq_chip_generic * -devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct, - unsigned int irq_base, void __iomem *reg_base, - irq_flow_handler_t handler); -int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc, - u32 msk, enum irq_gc_flags flags, - unsigned int clr, unsigned int set); - struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, @@ -1151,19 +951,6 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, handler, clr, set, flags); \ }) -static inline void irq_free_generic_chip(struct irq_chip_generic *gc) -{ - kfree(gc); -} - -static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc, - u32 msk, unsigned int clr, - unsigned int set) -{ - irq_remove_generic_chip(gc, msk, clr, set); - irq_free_generic_chip(gc); -} - static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) { return container_of(d->chip, struct irq_chip_type, chip); @@ -1214,29 +1001,6 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc, return readl(gc->reg_base + reg_offset); } -struct irq_matrix; -struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits, - unsigned int alloc_start, - unsigned int alloc_end); -void irq_matrix_online(struct irq_matrix *m); -void irq_matrix_offline(struct irq_matrix *m); -void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); -int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); -void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); -int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, - unsigned int *mapped_cpu); -void irq_matrix_reserve(struct irq_matrix *m); -void irq_matrix_remove_reserved(struct irq_matrix *m); -int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, - bool reserved, unsigned int *mapped_cpu); -void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, - unsigned int bit, bool managed); -void irq_matrix_assign(struct irq_matrix *m, unsigned int bit); -unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown); -unsigned int irq_matrix_allocated(struct irq_matrix *m); -unsigned int irq_matrix_reserved(struct irq_matrix *m); -void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind); - /* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */ #define INVALID_HWIRQ (~0UL) irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu); @@ -1245,30 +1009,4 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest); int ipi_send_single(unsigned int virq, unsigned int cpu); int ipi_send_mask(unsigned int virq, const struct cpumask *dest); -#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER -/* - * Registers a generic IRQ handling function as the top-level IRQ handler in - * the system, which is generally the first C code called from an assembly - * architecture-specific interrupt handler. - * - * Returns 0 on success, or -EBUSY if an IRQ handler has already been - * registered. - */ -int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)); - -/* - * Allows interrupt handlers to find the irqchip that's been registered as the - * top-level IRQ handler. - */ -extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init; -#else -#ifndef set_handle_irq -#define set_handle_irq(handle_irq) \ - do { \ - (void)handle_irq; \ - WARN_ON(1); \ - } while (0) -#endif -#endif - #endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h index 6e8895cd4d..77e4bac292 100644 --- a/include/linux/irq_cpustat.h +++ b/include/linux/irq_cpustat.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __irq_cpustat_h #define __irq_cpustat_h @@ -18,11 +17,15 @@ */ #ifndef __ARCH_IRQ_STAT -DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); /* defined in asm/hardirq.h */ -#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat.member, cpu)) +extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */ +#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) #endif -/* arch dependent irq_stat fields */ + /* arch independent irq_stat fields */ +#define local_softirq_pending() \ + __IRQ_STAT(smp_processor_id(), __softirq_pending) + + /* arch dependent irq_stat fields */ #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */ #endif /* __irq_cpustat_h */ diff --git a/include/linux/irq_poll.h b/include/linux/irq_poll.h index 16aaeccb65..3e8c1b8fb9 100644 --- a/include/linux/irq_poll.h +++ b/include/linux/irq_poll.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef IRQ_POLL_H #define IRQ_POLL_H diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index ec2a47a81e..47b9ebd4a7 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -1,8 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQ_WORK_H #define _LINUX_IRQ_WORK_H -#include +#include /* * An entry can be in one of four states: @@ -13,41 +12,31 @@ * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed */ +#define IRQ_WORK_PENDING 1UL +#define IRQ_WORK_BUSY 2UL +#define IRQ_WORK_FLAGS 3UL +#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ + struct irq_work { - struct __call_single_node node; + unsigned long flags; + struct llist_node llnode; void (*func)(struct irq_work *); }; -#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \ - .node = { .u_flags = (_flags), }, \ - .func = (_func), \ -} - -#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0) -#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY) -#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ) - -#define DEFINE_IRQ_WORK(name, _f) \ - struct irq_work name = IRQ_WORK_INIT(_f) - static inline void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) { - *work = IRQ_WORK_INIT(func); + work->flags = 0; + work->func = func; } -static inline bool irq_work_is_pending(struct irq_work *work) -{ - return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING; -} - -static inline bool irq_work_is_busy(struct irq_work *work) -{ - return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY; -} +#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } bool irq_work_queue(struct irq_work *work); + +#ifdef CONFIG_SMP bool irq_work_queue_on(struct irq_work *work, int cpu); +#endif void irq_work_tick(void); void irq_work_sync(struct irq_work *work); @@ -57,11 +46,9 @@ void irq_work_sync(struct irq_work *work); void irq_work_run(void); bool irq_work_needs_cpu(void); -void irq_work_single(void *arg); #else static inline bool irq_work_needs_cpu(void) { return false; } static inline void irq_work_run(void) { } -static inline void irq_work_single(void *arg) { } #endif #endif /* _LINUX_IRQ_WORK_H */ diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h index 9bdb2a7818..f0f5d26715 100644 --- a/include/linux/irqbypass.h +++ b/include/linux/irqbypass.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * IRQ offload/bypass manager * * Copyright (C) 2015 Red Hat, Inc. * Copyright (c) 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef IRQBYPASS_H #define IRQBYPASS_H diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 67351aac65..89c34b2006 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h @@ -12,49 +12,25 @@ #define _LINUX_IRQCHIP_H #include -#include #include -#include /* * This macro must be used by the different irqchip drivers to declare * the association between their DT compatible string and their * initialization function. * - * @name: name that must be unique across all IRQCHIP_DECLARE of the + * @name: name that must be unique accross all IRQCHIP_DECLARE of the * same file. * @compstr: compatible string of the irqchip driver * @fn: initialization function */ #define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) -extern int platform_irqchip_probe(struct platform_device *pdev); - -#define IRQCHIP_PLATFORM_DRIVER_BEGIN(drv_name) \ -static const struct of_device_id drv_name##_irqchip_match_table[] = { - -#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, .data = fn }, - -#define IRQCHIP_PLATFORM_DRIVER_END(drv_name) \ - {}, \ -}; \ -MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \ -static struct platform_driver drv_name##_driver = { \ - .probe = platform_irqchip_probe, \ - .driver = { \ - .name = #drv_name, \ - .owner = THIS_MODULE, \ - .of_match_table = drv_name##_irqchip_match_table, \ - .suppress_bind_attrs = true, \ - }, \ -}; \ -builtin_platform_driver(drv_name##_driver) - /* * This macro must be used by the different irqchip drivers to declare * the association between their version and their initialization function. * - * @name: name that must be unique across all IRQCHIP_ACPI_DECLARE of the + * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the * same file. * @subtable: Subtable to be identified in MADT * @validate: Function to be called on that subtable to check its validity. @@ -63,9 +39,8 @@ builtin_platform_driver(drv_name##_driver) * @fn: initialization function */ #define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \ - ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(irqchip, name, \ - ACPI_SIG_MADT, subtable, \ - validate, data, fn) + ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \ + subtable, validate, data, fn) #ifdef CONFIG_IRQCHIP void irqchip_init(void); diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h index 1177f3a1ae..c647b0547b 100644 --- a/include/linux/irqchip/arm-gic-common.h +++ b/include/linux/irqchip/arm-gic-common.h @@ -1,23 +1,34 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/irqchip/arm-gic-common.h * * Copyright (C) 2016 ARM Limited, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H #define __LINUX_IRQCHIP_ARM_GIC_COMMON_H -#include +#include +#include -#define GICD_INT_DEF_PRI 0xa0 -#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\ - (GICD_INT_DEF_PRI << 16) |\ - (GICD_INT_DEF_PRI << 8) |\ - GICD_INT_DEF_PRI) +enum gic_type { + GIC_V2, + GIC_V3, +}; -struct irq_domain; -struct fwnode_handle; -int gicv2m_init(struct fwnode_handle *parent_handle, - struct irq_domain *parent); +struct gic_kvm_info { + /* GIC type */ + enum gic_type type; + /* Virtual CPU interface */ + struct resource vcpu; + /* Interrupt number */ + unsigned int maint_irq; + /* Virtual control interface */ + struct resource vctrl; +}; + +const struct gic_kvm_info *gic_get_kvm_info(void); #endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 81cbf85f73..b7e34313cd 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. * Author: Marc Zyngier + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H #define __LINUX_IRQCHIP_ARM_GIC_V3_H @@ -13,12 +25,12 @@ #define GICD_CTLR 0x0000 #define GICD_TYPER 0x0004 #define GICD_IIDR 0x0008 -#define GICD_TYPER2 0x000C #define GICD_STATUSR 0x0010 #define GICD_SETSPI_NSR 0x0040 #define GICD_CLRSPI_NSR 0x0048 #define GICD_SETSPI_SR 0x0050 #define GICD_CLRSPI_SR 0x0058 +#define GICD_SEIR 0x0068 #define GICD_IGROUPR 0x0080 #define GICD_ISENABLER 0x0100 #define GICD_ICENABLER 0x0180 @@ -30,22 +42,10 @@ #define GICD_ICFGR 0x0C00 #define GICD_IGRPMODR 0x0D00 #define GICD_NSACR 0x0E00 -#define GICD_IGROUPRnE 0x1000 -#define GICD_ISENABLERnE 0x1200 -#define GICD_ICENABLERnE 0x1400 -#define GICD_ISPENDRnE 0x1600 -#define GICD_ICPENDRnE 0x1800 -#define GICD_ISACTIVERnE 0x1A00 -#define GICD_ICACTIVERnE 0x1C00 -#define GICD_IPRIORITYRnE 0x2000 -#define GICD_ICFGRnE 0x3000 #define GICD_IROUTER 0x6000 -#define GICD_IROUTERnE 0x8000 #define GICD_IDREGS 0xFFD0 #define GICD_PIDR2 0xFFE8 -#define ESPI_BASE_INTID 4096 - /* * Those registers are actually from GICv2, but the spec demands that they * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). @@ -56,22 +56,11 @@ #define GICD_SPENDSGIR 0x0F20 #define GICD_CTLR_RWP (1U << 31) -#define GICD_CTLR_nASSGIreq (1U << 8) #define GICD_CTLR_DS (1U << 6) #define GICD_CTLR_ARE_NS (1U << 4) #define GICD_CTLR_ENABLE_G1A (1U << 1) #define GICD_CTLR_ENABLE_G1 (1U << 0) -#define GICD_IIDR_IMPLEMENTER_SHIFT 0 -#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) -#define GICD_IIDR_REVISION_SHIFT 12 -#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) -#define GICD_IIDR_VARIANT_SHIFT 16 -#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) -#define GICD_IIDR_PRODUCT_ID_SHIFT 24 -#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) - - /* * In systems with a single security state (what we emulate in KVM) * the meaning of the interrupt group enable bits is slightly different @@ -79,20 +68,12 @@ #define GICD_CTLR_ENABLE_SS_G1 (1U << 1) #define GICD_CTLR_ENABLE_SS_G0 (1U << 0) -#define GICD_TYPER_RSS (1U << 26) #define GICD_TYPER_LPIS (1U << 17) #define GICD_TYPER_MBIS (1U << 16) -#define GICD_TYPER_ESPI (1U << 8) #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) -#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) -#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) -#define GICD_TYPER_ESPIS(typer) \ - (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) - -#define GICD_TYPER2_nASSGIcap (1U << 8) -#define GICD_TYPER2_VIL (1U << 7) -#define GICD_TYPER2_VID GENMASK(4, 0) +#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_LPIS (1U << 17) #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) @@ -103,11 +84,6 @@ #define GIC_V3_DIST_SIZE 0x10000 -#define GIC_PAGE_SIZE_4K 0ULL -#define GIC_PAGE_SIZE_16K 1ULL -#define GIC_PAGE_SIZE_64K 2ULL -#define GIC_PAGE_SIZE_MASK 3ULL - /* * Re-Distributor registers, offsets from RD_base */ @@ -118,31 +94,21 @@ #define GICR_WAKER 0x0014 #define GICR_SETLPIR 0x0040 #define GICR_CLRLPIR 0x0048 +#define GICR_SEIR GICD_SEIR #define GICR_PROPBASER 0x0070 #define GICR_PENDBASER 0x0078 #define GICR_INVLPIR 0x00A0 #define GICR_INVALLR 0x00B0 #define GICR_SYNCR 0x00C0 +#define GICR_MOVLPIR 0x0100 +#define GICR_MOVALLR 0x0110 #define GICR_IDREGS GICD_IDREGS #define GICR_PIDR2 GICD_PIDR2 #define GICR_CTLR_ENABLE_LPIS (1UL << 0) -#define GICR_CTLR_RWP (1UL << 3) #define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) -#define EPPI_BASE_INTID 1056 - -#define GICR_TYPER_NR_PPIS(r) \ - ({ \ - unsigned int __ppinum = ((r) >> 27) & 0x1f; \ - unsigned int __nr_ppis = 16; \ - if (__ppinum == 1 || __ppinum == 2) \ - __nr_ppis += __ppinum * 32; \ - \ - __nr_ppis; \ - }) - #define GICR_WAKER_ProcessorSleep (1U << 1) #define GICR_WAKER_ChildrenAsleep (1U << 2) @@ -167,9 +133,6 @@ #define GIC_BASER_SHAREABILITY(reg, type) \ (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) -/* encode a size field of width @w containing @n - 1 units */ -#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) - #define GICR_PROPBASER_SHAREABILITY_SHIFT (10) #define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) #define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) @@ -187,15 +150,13 @@ #define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) #define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) #define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) -#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) +#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) #define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) #define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) #define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) #define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) #define GICR_PROPBASER_IDBITS_MASK (0x1f) -#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) -#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) #define GICR_PENDBASER_SHAREABILITY_SHIFT (10) #define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) @@ -214,7 +175,7 @@ #define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) #define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) #define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) -#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) +#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) #define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) #define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) #define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) @@ -239,128 +200,19 @@ #define GICR_TYPER_PLPIS (1U << 0) #define GICR_TYPER_VLPIS (1U << 1) -#define GICR_TYPER_DIRTY (1U << 2) -#define GICR_TYPER_DirectLPIS (1U << 3) #define GICR_TYPER_LAST (1U << 4) -#define GICR_TYPER_RVPEID (1U << 7) -#define GICR_TYPER_COMMON_LPI_AFF GENMASK_ULL(25, 24) -#define GICR_TYPER_AFFINITY GENMASK_ULL(63, 32) - -#define GICR_INVLPIR_INTID GENMASK_ULL(31, 0) -#define GICR_INVLPIR_VPEID GENMASK_ULL(47, 32) -#define GICR_INVLPIR_V GENMASK_ULL(63, 63) - -#define GICR_INVALLR_VPEID GICR_INVLPIR_VPEID -#define GICR_INVALLR_V GICR_INVLPIR_V #define GIC_V3_REDIST_SIZE 0x20000 #define LPI_PROP_GROUP1 (1 << 1) #define LPI_PROP_ENABLED (1 << 0) -/* - * Re-Distributor registers, offsets from VLPI_base - */ -#define GICR_VPROPBASER 0x0070 - -#define GICR_VPROPBASER_IDBITS_MASK 0x1f - -#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) -#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) -#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) - -#define GICR_VPROPBASER_SHAREABILITY_MASK \ - GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) -#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ - GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) -#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ - GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) -#define GICR_VPROPBASER_CACHEABILITY_MASK \ - GICR_VPROPBASER_INNER_CACHEABILITY_MASK - -#define GICR_VPROPBASER_InnerShareable \ - GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) - -#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) -#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) -#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) -#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb) -#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) -#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) -#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) -#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) - -/* - * GICv4.1 VPROPBASER reinvention. A subtle mix between the old - * VPROPBASER and ITS_BASER. Just not quite any of the two. - */ -#define GICR_VPROPBASER_4_1_VALID (1ULL << 63) -#define GICR_VPROPBASER_4_1_ENTRY_SIZE GENMASK_ULL(61, 59) -#define GICR_VPROPBASER_4_1_INDIRECT (1ULL << 55) -#define GICR_VPROPBASER_4_1_PAGE_SIZE GENMASK_ULL(54, 53) -#define GICR_VPROPBASER_4_1_Z (1ULL << 52) -#define GICR_VPROPBASER_4_1_ADDR GENMASK_ULL(51, 12) -#define GICR_VPROPBASER_4_1_SIZE GENMASK_ULL(6, 0) - -#define GICR_VPENDBASER 0x0078 - -#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) -#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) -#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) -#define GICR_VPENDBASER_SHAREABILITY_MASK \ - GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) -#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ - GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) -#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ - GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) -#define GICR_VPENDBASER_CACHEABILITY_MASK \ - GICR_VPENDBASER_INNER_CACHEABILITY_MASK - -#define GICR_VPENDBASER_NonShareable \ - GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) - -#define GICR_VPENDBASER_InnerShareable \ - GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable) - -#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) -#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) -#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) -#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb) -#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) -#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) -#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) -#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) - -#define GICR_VPENDBASER_Dirty (1ULL << 60) -#define GICR_VPENDBASER_PendingLast (1ULL << 61) -#define GICR_VPENDBASER_IDAI (1ULL << 62) -#define GICR_VPENDBASER_Valid (1ULL << 63) - -/* - * GICv4.1 VPENDBASER, used for VPE residency. On top of these fields, - * also use the above Valid, PendingLast and Dirty. - */ -#define GICR_VPENDBASER_4_1_DB (1ULL << 62) -#define GICR_VPENDBASER_4_1_VGRP0EN (1ULL << 59) -#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58) -#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0) - -#define GICR_VSGIR 0x0080 - -#define GICR_VSGIR_VPEID GENMASK(15, 0) - -#define GICR_VSGIPENDR 0x0088 - -#define GICR_VSGIPENDR_BUSY (1U << 31) -#define GICR_VSGIPENDR_PENDING GENMASK(15, 0) - /* * ITS registers, offsets from ITS_base */ #define GITS_CTLR 0x0000 #define GITS_IIDR 0x0004 #define GITS_TYPER 0x0008 -#define GITS_MPIDR 0x0018 #define GITS_CBASER 0x0080 #define GITS_CWRITER 0x0088 #define GITS_CREADR 0x0090 @@ -377,37 +229,17 @@ #define GITS_TRANSLATER 0x10040 -#define GITS_SGIR 0x20020 - -#define GITS_SGIR_VPEID GENMASK_ULL(47, 32) -#define GITS_SGIR_VINTID GENMASK_ULL(3, 0) - #define GITS_CTLR_ENABLE (1U << 0) -#define GITS_CTLR_ImDe (1U << 1) -#define GITS_CTLR_ITS_NUMBER_SHIFT 4 -#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) #define GITS_CTLR_QUIESCENT (1U << 31) #define GITS_TYPER_PLPIS (1UL << 0) -#define GITS_TYPER_VLPIS (1UL << 1) -#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 -#define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4) #define GITS_TYPER_IDBITS_SHIFT 8 #define GITS_TYPER_DEVBITS_SHIFT 13 -#define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13) +#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) #define GITS_TYPER_PTA (1UL << 19) -#define GITS_TYPER_HCC_SHIFT 24 -#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) -#define GITS_TYPER_VMOVP (1ULL << 37) -#define GITS_TYPER_VMAPP (1ULL << 40) -#define GITS_TYPER_SVPET GENMASK_ULL(42, 41) +#define GITS_TYPER_HWCOLLCNT_SHIFT 24 -#define GITS_IIDR_REV_SHIFT 12 -#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) -#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) -#define GITS_IIDR_PRODUCTID_SHIFT 24 - -#define GITS_CBASER_VALID (1ULL << 63) +#define GITS_CBASER_VALID (1UL << 63) #define GITS_CBASER_SHAREABILITY_SHIFT (10) #define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) #define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) @@ -425,17 +257,15 @@ #define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) #define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) #define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) -#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb) +#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) #define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) #define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) #define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) #define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) -#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12)) - #define GITS_BASER_NR_REGS 8 -#define GITS_BASER_VALID (1ULL << 63) +#define GITS_BASER_VALID (1UL << 63) #define GITS_BASER_INDIRECT (1ULL << 62) #define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) @@ -451,7 +281,7 @@ #define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) #define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) #define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) -#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) +#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) #define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) #define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) #define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) @@ -461,21 +291,14 @@ #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) #define GITS_BASER_ENTRY_SIZE_SHIFT (48) #define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) -#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) -#define GITS_BASER_PHYS_52_to_48(phys) \ - (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) -#define GITS_BASER_ADDR_48_to_52(baser) \ - (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48) - #define GITS_BASER_SHAREABILITY_SHIFT (10) #define GITS_BASER_InnerShareable \ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) #define GITS_BASER_PAGE_SIZE_SHIFT (8) -#define __GITS_BASER_PSZ(sz) (GIC_PAGE_SIZE_ ## sz << GITS_BASER_PAGE_SIZE_SHIFT) -#define GITS_BASER_PAGE_SIZE_4K __GITS_BASER_PSZ(4K) -#define GITS_BASER_PAGE_SIZE_16K __GITS_BASER_PSZ(16K) -#define GITS_BASER_PAGE_SIZE_64K __GITS_BASER_PSZ(64K) -#define GITS_BASER_PAGE_SIZE_MASK __GITS_BASER_PSZ(MASK) +#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGES_MAX 256 #define GITS_BASER_PAGES_SHIFT (0) #define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) @@ -483,7 +306,7 @@ #define GITS_BASER_TYPE_NONE 0 #define GITS_BASER_TYPE_DEVICE 1 #define GITS_BASER_TYPE_VCPU 2 -#define GITS_BASER_TYPE_RESERVED3 3 +#define GITS_BASER_TYPE_CPU 3 #define GITS_BASER_TYPE_COLLECTION 4 #define GITS_BASER_TYPE_RESERVED5 5 #define GITS_BASER_TYPE_RESERVED6 6 @@ -497,6 +320,8 @@ #define GITS_CMD_MAPD 0x08 #define GITS_CMD_MAPC 0x09 #define GITS_CMD_MAPTI 0x0a +/* older GIC documentation used MAPVI for this command */ +#define GITS_CMD_MAPVI GITS_CMD_MAPTI #define GITS_CMD_MAPI 0x0b #define GITS_CMD_MOVI 0x01 #define GITS_CMD_DISCARD 0x0f @@ -507,20 +332,6 @@ #define GITS_CMD_CLEAR 0x04 #define GITS_CMD_SYNC 0x05 -/* - * GICv4 ITS specific commands - */ -#define GITS_CMD_GICv4(x) ((x) | 0x20) -#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) -#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) -#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) -#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) -#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) -/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */ -#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) -#define GITS_CMD_VSGI GITS_CMD_GICv4(3) -#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe) - /* * ITS error numbers */ @@ -529,11 +340,9 @@ #define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 #define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 #define E_ITS_MAPD_DEVICE_OOR 0x010801 -#define E_ITS_MAPD_ITTSIZE_OOR 0x010802 #define E_ITS_MAPC_PROCNUM_OOR 0x010902 #define E_ITS_MAPC_COLLECTION_OOR 0x010903 #define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 -#define E_ITS_MAPTI_ID_OOR 0x010a05 #define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 #define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 #define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 @@ -543,43 +352,45 @@ /* * CPU interface registers */ -#define ICC_CTLR_EL1_EOImode_SHIFT (1) -#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) -#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) -#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) -#define ICC_CTLR_EL1_CBPR_SHIFT 0 -#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) -#define ICC_CTLR_EL1_PMHE_SHIFT 6 -#define ICC_CTLR_EL1_PMHE_MASK (1 << ICC_CTLR_EL1_PMHE_SHIFT) -#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 -#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) -#define ICC_CTLR_EL1_ID_BITS_SHIFT 11 -#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) -#define ICC_CTLR_EL1_SEIS_SHIFT 14 -#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) -#define ICC_CTLR_EL1_A3V_SHIFT 15 -#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) -#define ICC_CTLR_EL1_RSS (0x1 << 18) -#define ICC_CTLR_EL1_ExtRange (0x1 << 19) -#define ICC_PMR_EL1_SHIFT 0 -#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) -#define ICC_BPR0_EL1_SHIFT 0 -#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) -#define ICC_BPR1_EL1_SHIFT 0 -#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) -#define ICC_IGRPEN0_EL1_SHIFT 0 -#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) -#define ICC_IGRPEN1_EL1_SHIFT 0 -#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) -#define ICC_SRE_EL1_DIB (1U << 2) -#define ICC_SRE_EL1_DFB (1U << 1) +#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1) +#define ICC_CTLR_EL1_EOImode_drop (1U << 1) #define ICC_SRE_EL1_SRE (1U << 0) +/* + * Hypervisor interface registers (SRE only) + */ +#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) + +#define ICH_LR_EOI (1ULL << 41) +#define ICH_LR_GROUP (1ULL << 60) +#define ICH_LR_HW (1ULL << 61) +#define ICH_LR_STATE (3ULL << 62) +#define ICH_LR_PENDING_BIT (1ULL << 62) +#define ICH_LR_ACTIVE_BIT (1ULL << 63) +#define ICH_LR_PHYS_ID_SHIFT 32 +#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_PRIORITY_SHIFT 48 + /* These are for GICv2 emulation only */ #define GICH_LR_VIRTUALID (0x3ffUL << 0) #define GICH_LR_PHYSID_CPUID_SHIFT (10) #define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) +#define ICH_MISR_EOI (1 << 0) +#define ICH_MISR_U (1 << 1) + +#define ICH_HCR_EN (1 << 0) +#define ICH_HCR_UIE (1 << 1) + +#define ICH_VMCR_CTLR_SHIFT 0 +#define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT) +#define ICH_VMCR_BPR1_SHIFT 18 +#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) +#define ICH_VMCR_BPR0_SHIFT 21 +#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) +#define ICH_VMCR_PMR_SHIFT 24 +#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) + #define ICC_IAR1_EL1_SPURIOUS 0x3ff #define ICC_SRE_EL2_SRE (1 << 0) @@ -594,8 +405,6 @@ #define ICC_SGI1R_AFFINITY_2_SHIFT 32 #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 -#define ICC_SGI1R_RS_SHIFT 44 -#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) #define ICC_SGI1R_AFFINITY_3_SHIFT 48 #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) @@ -611,23 +420,13 @@ struct rdists { struct { - raw_spinlock_t rd_lock; void __iomem *rd_base; struct page *pend_page; phys_addr_t phys_base; - bool lpi_enabled; - cpumask_t *vpe_table_mask; - void *vpe_l1_base; } __percpu *rdist; - phys_addr_t prop_table_pa; - void *prop_table_va; + struct page *prop_page; + int id_bits; u64 flags; - u32 gicd_typer; - u32 gicd_typer2; - bool has_vlpis; - bool has_rvpeid; - bool has_direct_lpi; - bool has_vpend_valid_dirty; }; struct irq_domain; @@ -635,7 +434,6 @@ struct fwnode_handle; int its_cpu_init(void); int its_init(struct fwnode_handle *handle, struct rdists *rdists, struct irq_domain *domain); -int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); static inline bool gic_enable_sre(void) { diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 5686711b0f..eafc965b3e 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/irqchip/arm-gic.h * * Copyright (C) 2002 ARM Limited, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_IRQCHIP_ARM_GIC_H #define __LINUX_IRQCHIP_ARM_GIC_H @@ -22,18 +25,7 @@ #define GICC_ENABLE 0x1 #define GICC_INT_PRI_THRESHOLD 0xf0 -#define GIC_CPU_CTRL_EnableGrp0_SHIFT 0 -#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT) -#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1 -#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT) -#define GIC_CPU_CTRL_AckCtl_SHIFT 2 -#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT) -#define GIC_CPU_CTRL_FIQEn_SHIFT 3 -#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT) -#define GIC_CPU_CTRL_CBPR_SHIFT 4 -#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT) -#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9 -#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT) +#define GIC_CPU_CTRL_EOImodeNS (1 << 9) #define GICC_IAR_INT_ID_MASK 0x3ff #define GICC_INT_SPURIOUS 1023 @@ -62,16 +54,11 @@ #define GICD_INT_EN_CLR_X32 0xffffffff #define GICD_INT_EN_SET_SGI 0x0000ffff #define GICD_INT_EN_CLR_PPI 0xffff0000 - -#define GICD_IIDR_IMPLEMENTER_SHIFT 0 -#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) -#define GICD_IIDR_REVISION_SHIFT 12 -#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) -#define GICD_IIDR_VARIANT_SHIFT 16 -#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) -#define GICD_IIDR_PRODUCT_ID_SHIFT 24 -#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) - +#define GICD_INT_DEF_PRI 0xa0 +#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\ + (GICD_INT_DEF_PRI << 16) |\ + (GICD_INT_DEF_PRI << 8) |\ + GICD_INT_DEF_PRI) #define GICH_HCR 0x0 #define GICH_VTR 0x4 @@ -86,7 +73,6 @@ #define GICH_HCR_EN (1 << 0) #define GICH_HCR_UIE (1 << 1) -#define GICH_HCR_NPIE (1 << 3) #define GICH_LR_VIRTUALID (0x3ff << 0) #define GICH_LR_PHYSID_CPUID_SHIFT (10) @@ -96,22 +82,10 @@ #define GICH_LR_PENDING_BIT (1 << 28) #define GICH_LR_ACTIVE_BIT (1 << 29) #define GICH_LR_EOI (1 << 19) -#define GICH_LR_GROUP1 (1 << 30) #define GICH_LR_HW (1 << 31) -#define GICH_VMCR_ENABLE_GRP0_SHIFT 0 -#define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT) -#define GICH_VMCR_ENABLE_GRP1_SHIFT 1 -#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT) -#define GICH_VMCR_ACK_CTL_SHIFT 2 -#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT) -#define GICH_VMCR_FIQ_EN_SHIFT 3 -#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT) -#define GICH_VMCR_CBPR_SHIFT 4 -#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT) -#define GICH_VMCR_EOI_MODE_SHIFT 9 -#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT) - +#define GICH_VMCR_CTRL_SHIFT 0 +#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) #define GICH_VMCR_PRIMASK_SHIFT 27 #define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT) #define GICH_VMCR_BINPOINT_SHIFT 21 @@ -122,9 +96,6 @@ #define GICH_MISR_EOI (1 << 0) #define GICH_MISR_U (1 << 1) -#define GICV_PMR_PRIORITY_SHIFT 3 -#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT) - #ifndef __ASSEMBLY__ #include @@ -155,7 +126,11 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq); * Legacy platforms not converted to DT yet must use this to init * their GIC */ -void gic_init(void __iomem *dist , void __iomem *cpu); +void gic_init(unsigned int nr, int start, + void __iomem *dist , void __iomem *cpu); + +int gicv2m_init(struct fwnode_handle *parent_handle, + struct irq_domain *parent); void gic_send_sgi(unsigned int cpu_id, unsigned int irq); int gic_get_cpu_id(unsigned int cpu); diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h index f2b11d1df2..ba46c794b4 100644 --- a/include/linux/irqchip/arm-vic.h +++ b/include/linux/irqchip/arm-vic.h @@ -1,14 +1,38 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * arch/arm/include/asm/hardware/vic.h * * Copyright (c) ARM Limited 2003. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __ASM_ARM_HARDWARE_VIC_H #define __ASM_ARM_HARDWARE_VIC_H #include +#define VIC_RAW_STATUS 0x08 +#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */ +#define VIC_INT_ENABLE_CLEAR 0x14 + +struct device_node; +struct pt_regs; + +void __vic_init(void __iomem *base, int parent_irq, int irq_start, + u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); +int vic_init_cascaded(void __iomem *base, unsigned int parent_irq, + u32 vic_sources, u32 resume_sources); #endif diff --git a/include/linux/irqchip/chained_irq.h b/include/linux/irqchip/chained_irq.h index dd8b3c4766..adf4c30f3a 100644 --- a/include/linux/irqchip/chained_irq.h +++ b/include/linux/irqchip/chained_irq.h @@ -1,8 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Chained IRQ handlers support. * * Copyright (C) 2011 ARM Ltd. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef __IRQCHIP_CHAINED_IRQ_H #define __IRQCHIP_CHAINED_IRQ_H diff --git a/include/linux/irqchip/ingenic.h b/include/linux/irqchip/ingenic.h new file mode 100644 index 0000000000..0ee319a402 --- /dev/null +++ b/include/linux/irqchip/ingenic.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2010, Lars-Peter Clausen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __LINUX_IRQCHIP_INGENIC_H__ +#define __LINUX_IRQCHIP_INGENIC_H__ + +#include + +extern void ingenic_intc_irq_suspend(struct irq_data *data); +extern void ingenic_intc_irq_resume(struct irq_data *data); + +#endif diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h index dca379c0d7..2e3d1afeb6 100644 --- a/include/linux/irqchip/irq-omap-intc.h +++ b/include/linux/irqchip/irq-omap-intc.h @@ -1,15 +1,25 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /** * irq-omap-intc.h - INTC Idle Functions * - * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com * * Author: Felipe Balbi + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H #define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H +void omap3_init_irq(void); + int omap_irq_pending(void); void omap_intc_save_context(void); void omap_intc_restore_context(void); diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h index 2f6ae75517..87433a5d12 100644 --- a/include/linux/irqchip/irq-partition-percpu.h +++ b/include/linux/irqchip/irq-partition-percpu.h @@ -1,12 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 ARM Limited, All Rights Reserved. * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ -#ifndef __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H -#define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H - #include #include #include @@ -49,5 +57,3 @@ struct irq_domain *partition_get_domain(struct partition_desc *dsc) return NULL; } #endif - -#endif /* __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H */ diff --git a/include/linux/irqchip/irq-sa11x0.h b/include/linux/irqchip/irq-sa11x0.h index 68fd2d73b6..15db6829c1 100644 --- a/include/linux/irqchip/irq-sa11x0.h +++ b/include/linux/irqchip/irq-sa11x0.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic IRQ handling for the SA11x0. * * Copyright (C) 2015 Dmitry Eremin-Solenikov * Copyright (C) 1999-2001 Nicolas Pitre + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H diff --git a/include/linux/irqchip/metag-ext.h b/include/linux/irqchip/metag-ext.h new file mode 100644 index 0000000000..697af0fe7c --- /dev/null +++ b/include/linux/irqchip/metag-ext.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2012 Imagination Technologies + */ + +#ifndef _LINUX_IRQCHIP_METAG_EXT_H_ +#define _LINUX_IRQCHIP_METAG_EXT_H_ + +struct irq_data; +struct platform_device; + +/* called from core irq code at init */ +int init_external_IRQ(void); + +/* + * called from SoC init_irq() callback to dynamically indicate the lack of + * HWMASKEXT registers. + */ +void meta_intc_no_mask(void); + +/* + * These allow SoCs to specialise the interrupt controller from their init_irq + * callbacks. + */ + +extern struct irq_chip meta_intc_edge_chip; +extern struct irq_chip meta_intc_level_chip; + +/* this should be called in the mask callback */ +void meta_intc_mask_irq_simple(struct irq_data *data); +/* this should be called in the unmask callback */ +void meta_intc_unmask_irq_simple(struct irq_data *data); + +#endif /* _LINUX_IRQCHIP_METAG_EXT_H_ */ diff --git a/include/linux/irqchip/metag.h b/include/linux/irqchip/metag.h new file mode 100644 index 0000000000..4ebdfb3101 --- /dev/null +++ b/include/linux/irqchip/metag.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2011 Imagination Technologies + */ + +#ifndef _LINUX_IRQCHIP_METAG_H_ +#define _LINUX_IRQCHIP_METAG_H_ + +#include + +#ifdef CONFIG_METAG_PERFCOUNTER_IRQS +extern int init_internal_IRQ(void); +extern int internal_irq_map(unsigned int hw); +#else +static inline int init_internal_IRQ(void) +{ + return 0; +} +static inline int internal_irq_map(unsigned int hw) +{ + return -EINVAL; +} +#endif + +#endif /* _LINUX_IRQCHIP_METAG_H_ */ diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h new file mode 100644 index 0000000000..81f930b0bc --- /dev/null +++ b/include/linux/irqchip/mips-gic.h @@ -0,0 +1,298 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000, 07 MIPS Technologies, Inc. + */ +#ifndef __LINUX_IRQCHIP_MIPS_GIC_H +#define __LINUX_IRQCHIP_MIPS_GIC_H + +#include +#include + +#define GIC_MAX_INTRS 256 + +/* Constants */ +#define GIC_POL_POS 1 +#define GIC_POL_NEG 0 +#define GIC_TRIG_EDGE 1 +#define GIC_TRIG_LEVEL 0 +#define GIC_TRIG_DUAL_ENABLE 1 +#define GIC_TRIG_DUAL_DISABLE 0 + +#define MSK(n) ((1 << (n)) - 1) + +/* Accessors */ +#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS) + +/* GIC Address Space */ +#define SHARED_SECTION_OFS 0x0000 +#define SHARED_SECTION_SIZE 0x8000 +#define VPE_LOCAL_SECTION_OFS 0x8000 +#define VPE_LOCAL_SECTION_SIZE 0x4000 +#define VPE_OTHER_SECTION_OFS 0xc000 +#define VPE_OTHER_SECTION_SIZE 0x4000 +#define USM_VISIBLE_SECTION_OFS 0x10000 +#define USM_VISIBLE_SECTION_SIZE 0x10000 + +/* Register Map for Shared Section */ + +#define GIC_SH_CONFIG_OFS 0x0000 + +/* Shared Global Counter */ +#define GIC_SH_COUNTER_31_00_OFS 0x0010 +/* 64-bit counter register for CM3 */ +#define GIC_SH_COUNTER_OFS GIC_SH_COUNTER_31_00_OFS +#define GIC_SH_COUNTER_63_32_OFS 0x0014 +#define GIC_SH_REVISIONID_OFS 0x0020 + +/* Convert an interrupt number to a byte offset/bit for multi-word registers */ +#define GIC_INTR_OFS(intr) ({ \ + unsigned bits = mips_cm_is64 ? 64 : 32; \ + unsigned reg_idx = (intr) / bits; \ + unsigned reg_width = bits / 8; \ + \ + reg_idx * reg_width; \ +}) +#define GIC_INTR_BIT(intr) ((intr) % (mips_cm_is64 ? 64 : 32)) + +/* Polarity : Reset Value is always 0 */ +#define GIC_SH_SET_POLARITY_OFS 0x0100 + +/* Triggering : Reset Value is always 0 */ +#define GIC_SH_SET_TRIGGER_OFS 0x0180 + +/* Dual edge triggering : Reset Value is always 0 */ +#define GIC_SH_SET_DUAL_OFS 0x0200 + +/* Set/Clear corresponding bit in Edge Detect Register */ +#define GIC_SH_WEDGE_OFS 0x0280 + +/* Mask manipulation */ +#define GIC_SH_RMASK_OFS 0x0300 +#define GIC_SH_SMASK_OFS 0x0380 + +/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */ +#define GIC_SH_MASK_OFS 0x0400 + +/* Pending Global Interrupts (RO) */ +#define GIC_SH_PEND_OFS 0x0480 + +/* Maps Interrupt X to a Pin */ +#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500 +#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr)) + +/* Maps Interrupt X to a VPE */ +#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000 +#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \ + ((32 * (intr)) + (((vpe) / 32) * 4)) +#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32)) + +/* Register Map for Local Section */ +#define GIC_VPE_CTL_OFS 0x0000 +#define GIC_VPE_PEND_OFS 0x0004 +#define GIC_VPE_MASK_OFS 0x0008 +#define GIC_VPE_RMASK_OFS 0x000c +#define GIC_VPE_SMASK_OFS 0x0010 +#define GIC_VPE_WD_MAP_OFS 0x0040 +#define GIC_VPE_COMPARE_MAP_OFS 0x0044 +#define GIC_VPE_TIMER_MAP_OFS 0x0048 +#define GIC_VPE_FDC_MAP_OFS 0x004c +#define GIC_VPE_PERFCTR_MAP_OFS 0x0050 +#define GIC_VPE_SWINT0_MAP_OFS 0x0054 +#define GIC_VPE_SWINT1_MAP_OFS 0x0058 +#define GIC_VPE_OTHER_ADDR_OFS 0x0080 +#define GIC_VP_IDENT_OFS 0x0088 +#define GIC_VPE_WD_CONFIG0_OFS 0x0090 +#define GIC_VPE_WD_COUNT0_OFS 0x0094 +#define GIC_VPE_WD_INITIAL0_OFS 0x0098 +#define GIC_VPE_COMPARE_LO_OFS 0x00a0 +/* 64-bit Compare register on CM3 */ +#define GIC_VPE_COMPARE_OFS GIC_VPE_COMPARE_LO_OFS +#define GIC_VPE_COMPARE_HI_OFS 0x00a4 + +#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100 +#define GIC_VPE_EIC_SS(intr) (4 * (intr)) + +#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800 +#define GIC_VPE_EIC_VEC(intr) (4 * (intr)) + +#define GIC_VPE_TENABLE_NMI_OFS 0x1000 +#define GIC_VPE_TENABLE_YQ_OFS 0x1004 +#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080 +#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084 + +/* User Mode Visible Section Register Map */ +#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000 +#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004 + +/* Masks */ +#define GIC_SH_CONFIG_COUNTSTOP_SHF 28 +#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF) + +#define GIC_SH_CONFIG_COUNTBITS_SHF 24 +#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF) + +#define GIC_SH_CONFIG_NUMINTRS_SHF 16 +#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF) + +#define GIC_SH_CONFIG_NUMVPES_SHF 0 +#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF) + +#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31)) +#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31)) + +#define GIC_MAP_TO_PIN_SHF 31 +#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF) +#define GIC_MAP_TO_NMI_SHF 30 +#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF) +#define GIC_MAP_TO_YQ_SHF 29 +#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF) +#define GIC_MAP_SHF 0 +#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF) + +/* GIC_VPE_CTL Masks */ +#define GIC_VPE_CTL_FDC_RTBL_SHF 4 +#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF) +#define GIC_VPE_CTL_SWINT_RTBL_SHF 3 +#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF) +#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2 +#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF) +#define GIC_VPE_CTL_TIMER_RTBL_SHF 1 +#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF) +#define GIC_VPE_CTL_EIC_MODE_SHF 0 +#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF) + +/* GIC_VPE_PEND Masks */ +#define GIC_VPE_PEND_WD_SHF 0 +#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF) +#define GIC_VPE_PEND_CMP_SHF 1 +#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF) +#define GIC_VPE_PEND_TIMER_SHF 2 +#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF) +#define GIC_VPE_PEND_PERFCOUNT_SHF 3 +#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF) +#define GIC_VPE_PEND_SWINT0_SHF 4 +#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF) +#define GIC_VPE_PEND_SWINT1_SHF 5 +#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF) +#define GIC_VPE_PEND_FDC_SHF 6 +#define GIC_VPE_PEND_FDC_MSK (MSK(1) << GIC_VPE_PEND_FDC_SHF) + +/* GIC_VPE_RMASK Masks */ +#define GIC_VPE_RMASK_WD_SHF 0 +#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF) +#define GIC_VPE_RMASK_CMP_SHF 1 +#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF) +#define GIC_VPE_RMASK_TIMER_SHF 2 +#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF) +#define GIC_VPE_RMASK_PERFCNT_SHF 3 +#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF) +#define GIC_VPE_RMASK_SWINT0_SHF 4 +#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF) +#define GIC_VPE_RMASK_SWINT1_SHF 5 +#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF) +#define GIC_VPE_RMASK_FDC_SHF 6 +#define GIC_VPE_RMASK_FDC_MSK (MSK(1) << GIC_VPE_RMASK_FDC_SHF) + +/* GIC_VPE_SMASK Masks */ +#define GIC_VPE_SMASK_WD_SHF 0 +#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF) +#define GIC_VPE_SMASK_CMP_SHF 1 +#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF) +#define GIC_VPE_SMASK_TIMER_SHF 2 +#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF) +#define GIC_VPE_SMASK_PERFCNT_SHF 3 +#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF) +#define GIC_VPE_SMASK_SWINT0_SHF 4 +#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF) +#define GIC_VPE_SMASK_SWINT1_SHF 5 +#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF) +#define GIC_VPE_SMASK_FDC_SHF 6 +#define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF) + +/* GIC_VP_IDENT fields */ +#define GIC_VP_IDENT_VCNUM_SHF 0 +#define GIC_VP_IDENT_VCNUM_MSK (MSK(6) << GIC_VP_IDENT_VCNUM_SHF) + +/* GIC nomenclature for Core Interrupt Pins. */ +#define GIC_CPU_INT0 0 /* Core Interrupt 2 */ +#define GIC_CPU_INT1 1 /* . */ +#define GIC_CPU_INT2 2 /* . */ +#define GIC_CPU_INT3 3 /* . */ +#define GIC_CPU_INT4 4 /* . */ +#define GIC_CPU_INT5 5 /* Core Interrupt 7 */ + +/* Add 2 to convert GIC CPU pin to core interrupt */ +#define GIC_CPU_PIN_OFFSET 2 + +/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */ +#define GIC_CPU_TO_VEC_OFFSET 2 + +/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ +#define GIC_PIN_TO_VEC_OFFSET 1 + +/* Local GIC interrupts. */ +#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */ +#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */ +#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */ +#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */ +#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */ +#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */ +#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */ +#define GIC_NUM_LOCAL_INTRS 7 + +/* Convert between local/shared IRQ number and GIC HW IRQ number. */ +#define GIC_LOCAL_HWIRQ_BASE 0 +#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) +#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS +#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) + +#ifdef CONFIG_MIPS_GIC + +extern unsigned int gic_present; + +extern void gic_init(unsigned long gic_base_addr, + unsigned long gic_addrspace_size, unsigned int cpu_vec, + unsigned int irqbase); +extern void gic_clocksource_init(unsigned int); +extern cycle_t gic_read_count(void); +extern unsigned int gic_get_count_width(void); +extern cycle_t gic_read_compare(void); +extern void gic_write_compare(cycle_t cnt); +extern void gic_write_cpu_compare(cycle_t cnt, int cpu); +extern void gic_start_count(void); +extern void gic_stop_count(void); +extern int gic_get_c0_compare_int(void); +extern int gic_get_c0_perfcount_int(void); +extern int gic_get_c0_fdc_int(void); +extern int gic_get_usm_range(struct resource *gic_usm_res); + +#else /* CONFIG_MIPS_GIC */ + +#define gic_present 0 + +static inline int gic_get_usm_range(struct resource *gic_usm_res) +{ + /* Shouldn't be called. */ + return -1; +} + +#endif /* CONFIG_MIPS_GIC */ + +/** + * gic_read_local_vp_id() - read the local VPs VCNUM + * + * Read the VCNUM of the local VP from the GIC_VP_IDENT register and + * return it to the caller. This ID should be used to refer to the VP + * via the GICs VP-other region, or when calculating an offset to a + * bit representing the VP in interrupt masks. + * + * Return: The VCNUM value for the local VP. + */ +extern unsigned gic_read_local_vp_id(void); + +#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h index cb8455c87c..124e0b7860 100644 --- a/include/linux/irqchip/mmp.h +++ b/include/linux/irqchip/mmp.h @@ -1,7 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IRQCHIP_MMP_H #define __IRQCHIP_MMP_H -extern struct irq_chip icu_irq_chip; +extern irq_chip_no_const icu_irq_chip; #endif /* __IRQCHIP_MMP_H */ diff --git a/include/linux/irqchip/mxs.h b/include/linux/irqchip/mxs.h index 4f447e3f0f..9039a538a9 100644 --- a/include/linux/irqchip/mxs.h +++ b/include/linux/irqchip/mxs.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_IRQCHIP_MXS_H diff --git a/include/linux/irqchip/versatile-fpga.h b/include/linux/irqchip/versatile-fpga.h index a978fc8c79..1fac9651d3 100644 --- a/include/linux/irqchip/versatile-fpga.h +++ b/include/linux/irqchip/versatile-fpga.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef PLAT_FPGA_IRQ_H #define PLAT_FPGA_IRQ_H diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 59aea39785..00414d6676 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -1,10 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQDESC_H #define _LINUX_IRQDESC_H #include #include -#include /* * Core internal functions to deal with irq descriptors @@ -22,17 +20,17 @@ struct pt_regs; * @irq_common_data: per irq and chip data passed down to chip functions * @kstat_irqs: irq stats per cpu * @handle_irq: highlevel irq-events handler + * @preflow_handler: handler called before the flow handler (currently used by sparc) * @action: the irq action chain - * @status_use_accessors: status information + * @status: status information * @core_internal_state__do_not_mess_with_it: core internal status information * @depth: disable-depth, for nested irq_disable() calls * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers - * @tot_count: stats field for non-percpu irqs * @irq_count: stats field to detect stalled irqs * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts * @threads_handled: stats field for deferred spurious detection of threaded handlers - * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers + * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers * @lock: locking for SMP * @affinity_hint: hint to user space for preferred irq affinity * @affinity_notify: context for notification of affinity changes @@ -47,9 +45,7 @@ struct pt_regs; * IRQF_FORCE_RESUME set * @rcu: rcu head for delayed free * @kobj: kobject used to represent this struct in sysfs - * @request_mutex: mutex to protect request/free before locking desc->lock * @dir: /proc/irq/ procfs entry - * @debugfs_file: dentry for the debugfs file * @name: flow handler name for /proc/interrupts output */ struct irq_desc { @@ -57,16 +53,18 @@ struct irq_desc { struct irq_data irq_data; unsigned int __percpu *kstat_irqs; irq_flow_handler_t handle_irq; +#ifdef CONFIG_IRQ_PREFLOW_FASTEOI + irq_preflow_handler_t preflow_handler; +#endif struct irqaction *action; /* IRQ action list */ unsigned int status_use_accessors; unsigned int core_internal_state__do_not_mess_with_it; unsigned int depth; /* nested irq disables */ unsigned int wake_depth; /* nested wake enables */ - unsigned int tot_count; unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ unsigned int irqs_unhandled; - atomic_t threads_handled; + atomic_unchecked_t threads_handled; int threads_handled_last; raw_spinlock_t lock; struct cpumask *percpu_enabled; @@ -90,15 +88,10 @@ struct irq_desc { #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif -#ifdef CONFIG_GENERIC_IRQ_DEBUGFS - struct dentry *debugfs_file; - const char *dev_name; -#endif #ifdef CONFIG_SPARSE_IRQ struct rcu_head rcu; struct kobject kobj; #endif - struct mutex request_mutex; int parent_irq; struct module *owner; const char *name; @@ -113,12 +106,6 @@ static inline void irq_unlock_sparse(void) { } extern struct irq_desc irq_desc[NR_IRQS]; #endif -static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc, - unsigned int cpu) -{ - return desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; -} - static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) { return container_of(data->common, struct irq_desc, irq_common_data); @@ -149,6 +136,11 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc) return desc->irq_common_data.handler_data; } +static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) +{ + return desc->irq_common_data.msi_desc; +} + /* * Architectures call this to let the generic IRQ layer * handle an interrupt. @@ -158,30 +150,34 @@ static inline void generic_handle_irq_desc(struct irq_desc *desc) desc->handle_irq(desc); } -int handle_irq_desc(struct irq_desc *desc); int generic_handle_irq(unsigned int irq); -#ifdef CONFIG_IRQ_DOMAIN +#ifdef CONFIG_HANDLE_DOMAIN_IRQ /* * Convert a HW interrupt number to a logical one using a IRQ domain, * and handle the result interrupt number. Return -EINVAL if - * conversion failed. + * conversion failed. Providing a NULL domain indicates that the + * conversion has already been done. */ -int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq); +int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, + bool lookup, struct pt_regs *regs); -#ifdef CONFIG_HANDLE_DOMAIN_IRQ -int handle_domain_irq(struct irq_domain *domain, - unsigned int hwirq, struct pt_regs *regs); - -int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, - struct pt_regs *regs); -#endif +static inline int handle_domain_irq(struct irq_domain *domain, + unsigned int hwirq, struct pt_regs *regs) +{ + return __handle_domain_irq(domain, hwirq, true, regs); +} #endif /* Test to see if a driver has successfully requested an irq */ static inline int irq_desc_has_action(struct irq_desc *desc) { - return desc && desc->action != NULL; + return desc->action != NULL; +} + +static inline int irq_has_action(unsigned int irq) +{ + return irq_desc_has_action(irq_to_desc(irq)); } /** @@ -225,31 +221,40 @@ irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip, data->chip = chip; } -bool irq_check_status_bit(unsigned int irq, unsigned int bitmask); - -static inline bool irq_balancing_disabled(unsigned int irq) +static inline int irq_balancing_disabled(unsigned int irq) { - return irq_check_status_bit(irq, IRQ_NO_BALANCING_MASK); + struct irq_desc *desc; + + desc = irq_to_desc(irq); + return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; } -static inline bool irq_is_percpu(unsigned int irq) +static inline int irq_is_percpu(unsigned int irq) { - return irq_check_status_bit(irq, IRQ_PER_CPU); + struct irq_desc *desc; + + desc = irq_to_desc(irq); + return desc->status_use_accessors & IRQ_PER_CPU; } -static inline bool irq_is_percpu_devid(unsigned int irq) -{ - return irq_check_status_bit(irq, IRQ_PER_CPU_DEVID); -} - -void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, - struct lock_class_key *request_class); static inline void -irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, - struct lock_class_key *request_class) +irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) { - if (IS_ENABLED(CONFIG_LOCKDEP)) - __irq_set_lockdep_class(irq, lock_class, request_class); + struct irq_desc *desc = irq_to_desc(irq); + + if (desc) + lockdep_set_class(&desc->lock, class); } +#ifdef CONFIG_IRQ_PREFLOW_FASTEOI +static inline void +__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + desc->preflow_handler = handler; +} +#endif + #endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 9ee238ad29..1ef1031584 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * irq_domain - IRQ translation domains * @@ -33,18 +32,20 @@ #include #include #include -#include #include struct device_node; -struct fwnode_handle; struct irq_domain; +struct of_device_id; struct irq_chip; +#ifndef _LINUX_IRQ_H +typedef struct irq_chip __no_const irq_chip_no_const; +#endif struct irq_data; -struct irq_desc; struct cpumask; -struct seq_file; -struct irq_affinity_desc; + +/* Number of irqs reserved for a legacy isa controller */ +#define NUM_ISA_INTERRUPTS 16 #define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16 @@ -74,15 +75,11 @@ struct irq_fwspec { enum irq_domain_bus_token { DOMAIN_BUS_ANY = 0, DOMAIN_BUS_WIRED, - DOMAIN_BUS_GENERIC_MSI, DOMAIN_BUS_PCI_MSI, DOMAIN_BUS_PLATFORM_MSI, DOMAIN_BUS_NEXUS, DOMAIN_BUS_IPI, DOMAIN_BUS_FSL_MC_MSI, - DOMAIN_BUS_TI_SCI_INTA_MSI, - DOMAIN_BUS_WAKEUP, - DOMAIN_BUS_VMD_MSI, }; /** @@ -110,21 +107,18 @@ struct irq_domain_ops { int (*xlate)(struct irq_domain *d, struct device_node *node, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type); + #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /* extended V2 interfaces to support hierarchy irq_domains */ int (*alloc)(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs, void *arg); void (*free)(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs); - int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve); + void (*activate)(struct irq_domain *d, struct irq_data *irq_data); void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type); #endif -#ifdef CONFIG_GENERIC_IRQ_DEBUGFS - void (*debug_show)(struct seq_file *m, struct irq_domain *d, - struct irq_data *irqd, int ind); -#endif }; extern struct irq_domain_ops irq_generic_chip_ops; @@ -139,21 +133,21 @@ struct irq_domain_chip_generic; * @host_data: private data pointer for use by owner. Not touched by irq_domain * core code. * @flags: host per irq_domain flags - * @mapcount: The number of mapped interrupts * * Optional elements - * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy - * to swap it for the of_node via the irq_domain_get_of_node accessor + * @of_node: Pointer to device tree nodes associated with the irq_domain. Used + * when decoding device tree interrupt specifiers. * @gc: Pointer to a list of generic chips. There is a helper function for * setting up one or more generic chips for interrupt controllers * drivers using the generic chip library which uses this pointer. * @parent: Pointer to parent irq_domain to support hierarchy irq_domains * * Revmap data, used internally by irq_domain - * @revmap_size: Size of the linear map table @revmap[] + * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that + * support direct mapping + * @revmap_size: Size of the linear map table @linear_revmap[] * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map - * @revmap_mutex: Lock for the revmap - * @revmap: Linear table of irq_data pointers + * @linear_revmap: Linear table of hwirq->virq reverse mappings */ struct irq_domain { struct list_head link; @@ -161,7 +155,6 @@ struct irq_domain { const struct irq_domain_ops *ops; void *host_data; unsigned int flags; - unsigned int mapcount; /* Optional data */ struct fwnode_handle *fwnode; @@ -173,10 +166,10 @@ struct irq_domain { /* reverse map data. The linear map gets appended to the irq_domain */ irq_hw_number_t hwirq_max; + unsigned int revmap_direct_max_irq; unsigned int revmap_size; struct radix_tree_root revmap_tree; - struct mutex revmap_mutex; - struct irq_data __rcu *revmap[]; + unsigned int linear_revmap[]; }; /* Irq domain flags */ @@ -184,8 +177,8 @@ enum { /* Irq domain is hierarchical */ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), - /* Irq domain name was allocated in __irq_domain_add() */ - IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1), + /* Core calls alloc/free recursive through the domain hierarchy. */ + IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), /* Irq domain is an IPI domain with virq per cpu */ IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2), @@ -193,22 +186,6 @@ enum { /* Irq domain is an IPI domain with single virq */ IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), - /* Irq domain implements MSIs */ - IRQ_DOMAIN_FLAG_MSI = (1 << 4), - - /* Irq domain implements MSI remapping */ - IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5), - - /* - * Quirk to handle MSI implementations which do not provide - * masking. Currently known to affect x86, but partially - * handled in core code. - */ - IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6), - - /* Irq domain doesn't translate anything */ - IRQ_DOMAIN_FLAG_NO_MAP = (1 << 7), - /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the @@ -223,79 +200,40 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) } #ifdef CONFIG_IRQ_DOMAIN -struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, - const char *name, phys_addr_t *pa); - -enum { - IRQCHIP_FWNODE_REAL, - IRQCHIP_FWNODE_NAMED, - IRQCHIP_FWNODE_NAMED_ID, -}; - -static inline -struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name) -{ - return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL); -} - -static inline -struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id) -{ - return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name, - NULL); -} - -static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa) -{ - return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa); -} - +struct fwnode_handle *irq_domain_alloc_fwnode(void *data); void irq_domain_free_fwnode(struct fwnode_handle *fwnode); -struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size, +struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, irq_hw_number_t hwirq_max, int direct_max, const struct irq_domain_ops *ops, void *host_data); -struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, - unsigned int size, - unsigned int first_irq, - const struct irq_domain_ops *ops, - void *host_data); +struct irq_domain *irq_domain_add_simple(struct device_node *of_node, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data); struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, unsigned int size, unsigned int first_irq, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data); -struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, - unsigned int size, - unsigned int first_irq, - irq_hw_number_t first_hwirq, - const struct irq_domain_ops *ops, - void *host_data); extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); -extern bool irq_domain_check_msi_remap(void); extern void irq_set_default_host(struct irq_domain *host); -extern struct irq_domain *irq_get_default_host(void); extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node, - const struct irq_affinity_desc *affinity); + const struct cpumask *affinity); static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) { return node ? &node->fwnode : NULL; } -extern const struct fwnode_operations irqchip_fwnode_ops; - static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode) { - return fwnode && fwnode->ops == &irqchip_fwnode_ops; + return fwnode && fwnode->type == FWNODE_IRQCHIP; } -extern void irq_domain_update_bus_token(struct irq_domain *domain, - enum irq_domain_bus_token bus_token); - static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) @@ -315,22 +253,7 @@ static inline struct irq_domain *irq_find_matching_host(struct device_node *node static inline struct irq_domain *irq_find_host(struct device_node *node) { - struct irq_domain *d; - - d = irq_find_matching_host(node, DOMAIN_BUS_WIRED); - if (!d) - d = irq_find_matching_host(node, DOMAIN_BUS_ANY); - - return d; -} - -static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node, - unsigned int size, - unsigned int first_irq, - const struct irq_domain_ops *ops, - void *host_data) -{ - return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data); + return irq_find_matching_host(node, DOMAIN_BUS_ANY); } /** @@ -347,8 +270,6 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no { return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); } - -#ifdef CONFIG_IRQ_DOMAIN_NOMAP static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, unsigned int max_irq, const struct irq_domain_ops *ops, @@ -356,10 +277,14 @@ static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_nod { return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data); } - -extern unsigned int irq_create_direct_mapping(struct irq_domain *host); -#endif - +static inline struct irq_domain *irq_domain_add_legacy_isa( + struct device_node *of_node, + const struct irq_domain_ops *ops, + void *host_data) +{ + return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops, + host_data); +} static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node, const struct irq_domain_ops *ops, void *host_data) @@ -389,49 +314,40 @@ extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq, extern void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count); +extern void irq_domain_disassociate(struct irq_domain *domain, + unsigned int irq); -extern unsigned int irq_create_mapping_affinity(struct irq_domain *host, - irq_hw_number_t hwirq, - const struct irq_affinity_desc *affinity); +extern unsigned int irq_create_mapping(struct irq_domain *host, + irq_hw_number_t hwirq); extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); extern void irq_dispose_mapping(unsigned int virq); -static inline unsigned int irq_create_mapping(struct irq_domain *host, - irq_hw_number_t hwirq) -{ - return irq_create_mapping_affinity(host, hwirq, NULL); -} - -extern struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain, - irq_hw_number_t hwirq, - unsigned int *irq); - -static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain, - irq_hw_number_t hwirq) -{ - return __irq_resolve_mapping(domain, hwirq, NULL); -} - /** - * irq_find_mapping() - Find a linux irq from a hw irq number. + * irq_linear_revmap() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space + * + * This is a fast path alternative to irq_find_mapping() that can be + * called directly by irq controller code to save a handful of + * instructions. It is always safe to call, but won't find irqs mapped + * using the radix tree. */ -static inline unsigned int irq_find_mapping(struct irq_domain *domain, - irq_hw_number_t hwirq) -{ - unsigned int irq; - - if (__irq_resolve_mapping(domain, hwirq, &irq)) - return irq; - - return 0; -} - static inline unsigned int irq_linear_revmap(struct irq_domain *domain, irq_hw_number_t hwirq) { - return irq_find_mapping(domain, hwirq); + return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0; +} +extern unsigned int irq_find_mapping(struct irq_domain *host, + irq_hw_number_t hwirq); +extern unsigned int irq_create_direct_mapping(struct irq_domain *host); +extern int irq_create_strict_mappings(struct irq_domain *domain, + unsigned int irq_base, + irq_hw_number_t hwirq_base, int count); + +static inline int irq_create_identity_mapping(struct irq_domain *host, + irq_hw_number_t hwirq) +{ + return irq_create_strict_mappings(host, hwirq, hwirq, 1); } extern const struct irq_domain_ops irq_domain_simple_ops; @@ -447,16 +363,6 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); -int irq_domain_translate_twocell(struct irq_domain *d, - struct irq_fwspec *fwspec, - unsigned long *out_hwirq, - unsigned int *out_type); - -int irq_domain_translate_onecell(struct irq_domain *d, - struct irq_fwspec *fwspec, - unsigned long *out_hwirq, - unsigned int *out_type); - /* IPI functions */ int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest); int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest); @@ -468,7 +374,6 @@ extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name); -extern void irq_domain_reset_irq_data(struct irq_data *irq_data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, unsigned int flags, unsigned int size, @@ -489,10 +394,9 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, - bool realloc, - const struct irq_affinity_desc *affinity); + bool realloc, const struct cpumask *affinity); extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); -extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early); +extern void irq_domain_activate_irq(struct irq_data *irq_data); extern void irq_domain_deactivate_irq(struct irq_data *irq_data); static inline int irq_domain_alloc_irqs(struct irq_domain *domain, @@ -502,7 +406,7 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain, NULL); } -extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, +extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg); extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, @@ -510,15 +414,13 @@ extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, irq_hw_number_t hwirq, struct irq_chip *chip, void *chip_data); +extern void irq_domain_reset_irq_data(struct irq_data *irq_data); extern void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); extern void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); -extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg); -extern int irq_domain_pop_irq(struct irq_domain *domain, int virq); - extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg); @@ -527,9 +429,6 @@ extern void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs); -extern int irq_domain_disconnect_hierarchy(struct irq_domain *domain, - unsigned int virq); - static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; @@ -550,20 +449,9 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; } - -static inline bool irq_domain_is_msi(struct irq_domain *domain) -{ - return domain->flags & IRQ_DOMAIN_FLAG_MSI; -} - -static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) -{ - return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP; -} - -extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain); - #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ +static inline void irq_domain_activate_irq(struct irq_data *data) { } +static inline void irq_domain_deactivate_irq(struct irq_data *data) { } static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs, int node, void *arg) { @@ -592,35 +480,17 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) { return false; } - -static inline bool irq_domain_is_msi(struct irq_domain *domain) -{ - return false; -} - -static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) -{ - return false; -} - -static inline bool -irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) -{ - return false; -} #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #else /* CONFIG_IRQ_DOMAIN */ static inline void irq_dispose_mapping(unsigned int virq) { } +static inline void irq_domain_activate_irq(struct irq_data *data) { } +static inline void irq_domain_deactivate_irq(struct irq_data *data) { } static inline struct irq_domain *irq_find_matching_fwnode( struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) { return NULL; } -static inline bool irq_domain_check_msi_remap(void) -{ - return false; -} #endif /* !CONFIG_IRQ_DOMAIN */ #endif /* _LINUX_IRQDOMAIN_H */ diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 600c10da32..5dd1272d1a 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/irqflags.h * @@ -14,130 +13,35 @@ #include #include -#include - -/* Currently lockdep_softirqs_on/off is used only by lockdep */ -#ifdef CONFIG_PROVE_LOCKING - extern void lockdep_softirqs_on(unsigned long ip); - extern void lockdep_softirqs_off(unsigned long ip); - extern void lockdep_hardirqs_on_prepare(unsigned long ip); - extern void lockdep_hardirqs_on(unsigned long ip); - extern void lockdep_hardirqs_off(unsigned long ip); -#else - static inline void lockdep_softirqs_on(unsigned long ip) { } - static inline void lockdep_softirqs_off(unsigned long ip) { } - static inline void lockdep_hardirqs_on_prepare(unsigned long ip) { } - static inline void lockdep_hardirqs_on(unsigned long ip) { } - static inline void lockdep_hardirqs_off(unsigned long ip) { } -#endif #ifdef CONFIG_TRACE_IRQFLAGS - -/* Per-task IRQ trace events information. */ -struct irqtrace_events { - unsigned int irq_events; - unsigned long hardirq_enable_ip; - unsigned long hardirq_disable_ip; - unsigned int hardirq_enable_event; - unsigned int hardirq_disable_event; - unsigned long softirq_disable_ip; - unsigned long softirq_enable_ip; - unsigned int softirq_disable_event; - unsigned int softirq_enable_event; -}; - -DECLARE_PER_CPU(int, hardirqs_enabled); -DECLARE_PER_CPU(int, hardirq_context); - -extern void trace_hardirqs_on_prepare(void); -extern void trace_hardirqs_off_finish(void); -extern void trace_hardirqs_on(void); -extern void trace_hardirqs_off(void); - -# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context)) -# define lockdep_softirq_context(p) ((p)->softirq_context) -# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) -# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) -# define lockdep_hardirq_enter() \ -do { \ - if (__this_cpu_inc_return(hardirq_context) == 1)\ - current->hardirq_threaded = 0; \ -} while (0) -# define lockdep_hardirq_threaded() \ -do { \ - current->hardirq_threaded = 1; \ -} while (0) -# define lockdep_hardirq_exit() \ -do { \ - __this_cpu_dec(hardirq_context); \ -} while (0) -# define lockdep_softirq_enter() \ -do { \ - current->softirq_context++; \ -} while (0) -# define lockdep_softirq_exit() \ -do { \ - current->softirq_context--; \ -} while (0) - -# define lockdep_hrtimer_enter(__hrtimer) \ -({ \ - bool __expires_hardirq = true; \ - \ - if (!__hrtimer->is_hard) { \ - current->irq_config = 1; \ - __expires_hardirq = false; \ - } \ - __expires_hardirq; \ -}) - -# define lockdep_hrtimer_exit(__expires_hardirq) \ - do { \ - if (!__expires_hardirq) \ - current->irq_config = 0; \ - } while (0) - -# define lockdep_posixtimer_enter() \ - do { \ - current->irq_config = 1; \ - } while (0) - -# define lockdep_posixtimer_exit() \ - do { \ - current->irq_config = 0; \ - } while (0) - -# define lockdep_irq_work_enter(_flags) \ - do { \ - if (!((_flags) & IRQ_WORK_HARD_IRQ)) \ - current->irq_config = 1; \ - } while (0) -# define lockdep_irq_work_exit(_flags) \ - do { \ - if (!((_flags) & IRQ_WORK_HARD_IRQ)) \ - current->irq_config = 0; \ - } while (0) - + extern void trace_softirqs_on(unsigned long ip); + extern void trace_softirqs_off(unsigned long ip); + extern void trace_hardirqs_on(void); + extern void trace_hardirqs_off(void); +# define trace_hardirq_context(p) ((p)->hardirq_context) +# define trace_softirq_context(p) ((p)->softirq_context) +# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) +# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) +# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) +# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) +# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, #else -# define trace_hardirqs_on_prepare() do { } while (0) -# define trace_hardirqs_off_finish() do { } while (0) -# define trace_hardirqs_on() do { } while (0) -# define trace_hardirqs_off() do { } while (0) -# define lockdep_hardirq_context() 0 -# define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled() 0 -# define lockdep_softirqs_enabled(p) 0 -# define lockdep_hardirq_enter() do { } while (0) -# define lockdep_hardirq_threaded() do { } while (0) -# define lockdep_hardirq_exit() do { } while (0) -# define lockdep_softirq_enter() do { } while (0) -# define lockdep_softirq_exit() do { } while (0) -# define lockdep_hrtimer_enter(__hrtimer) false -# define lockdep_hrtimer_exit(__context) do { } while (0) -# define lockdep_posixtimer_enter() do { } while (0) -# define lockdep_posixtimer_exit() do { } while (0) -# define lockdep_irq_work_enter(__work) do { } while (0) -# define lockdep_irq_work_exit(__work) do { } while (0) +# define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_off() do { } while (0) +# define trace_softirqs_on(ip) do { } while (0) +# define trace_softirqs_off(ip) do { } while (0) +# define trace_hardirq_context(p) 0 +# define trace_softirq_context(p) 0 +# define trace_hardirqs_enabled(p) 0 +# define trace_softirqs_enabled(p) 0 +# define trace_hardirq_enter() do { } while (0) +# define trace_hardirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) +# define INIT_TRACE_IRQFLAGS #endif #if defined(CONFIG_IRQSOFF_TRACER) || \ @@ -149,17 +53,6 @@ do { \ # define start_critical_timings() do { } while (0) #endif -#ifdef CONFIG_DEBUG_IRQFLAGS -extern void warn_bogus_irq_restore(void); -#define raw_check_bogus_irq_restore() \ - do { \ - if (unlikely(!arch_irqs_disabled())) \ - warn_bogus_irq_restore(); \ - } while (0) -#else -#define raw_check_bogus_irq_restore() do { } while (0) -#endif - /* * Wrap the arch provided IRQ routines to provide appropriate checks. */ @@ -173,7 +66,6 @@ extern void warn_bogus_irq_restore(void); #define raw_local_irq_restore(flags) \ do { \ typecheck(unsigned long, flags); \ - raw_check_bogus_irq_restore(); \ arch_local_irq_restore(flags); \ } while (0) #define raw_local_save_flags(flags) \ @@ -194,33 +86,26 @@ extern void warn_bogus_irq_restore(void); * if !TRACE_IRQFLAGS. */ #ifdef CONFIG_TRACE_IRQFLAGS - -#define local_irq_enable() \ - do { \ - trace_hardirqs_on(); \ - raw_local_irq_enable(); \ - } while (0) - -#define local_irq_disable() \ - do { \ - bool was_disabled = raw_irqs_disabled();\ - raw_local_irq_disable(); \ - if (!was_disabled) \ - trace_hardirqs_off(); \ - } while (0) - +#define local_irq_enable() \ + do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) +#define local_irq_disable() \ + do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) #define local_irq_save(flags) \ do { \ raw_local_irq_save(flags); \ - if (!raw_irqs_disabled_flags(flags)) \ - trace_hardirqs_off(); \ + trace_hardirqs_off(); \ } while (0) + #define local_irq_restore(flags) \ do { \ - if (!raw_irqs_disabled_flags(flags)) \ + if (raw_irqs_disabled_flags(flags)) { \ + raw_local_irq_restore(flags); \ + trace_hardirqs_off(); \ + } else { \ trace_hardirqs_on(); \ - raw_local_irq_restore(flags); \ + raw_local_irq_restore(flags); \ + } \ } while (0) #define safe_halt() \ @@ -234,7 +119,10 @@ extern void warn_bogus_irq_restore(void); #define local_irq_enable() do { raw_local_irq_enable(); } while (0) #define local_irq_disable() do { raw_local_irq_disable(); } while (0) -#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0) +#define local_irq_save(flags) \ + do { \ + raw_local_irq_save(flags); \ + } while (0) #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) #define safe_halt() do { raw_safe_halt(); } while (0) diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h index c30f454a95..661bed0ed1 100644 --- a/include/linux/irqhandler.h +++ b/include/linux/irqhandler.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQHANDLER_H #define _LINUX_IRQHANDLER_H @@ -10,5 +9,6 @@ struct irq_desc; struct irq_data; typedef void (*irq_flow_handler_t)(struct irq_desc *desc); +typedef void (*irq_preflow_handler_t)(struct irq_data *data); #endif diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 3496baa0b0..9669bf9d4f 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQNR_H #define _LINUX_IRQNR_H diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index bd4c066ad3..eb1bdcf95f 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQRETURN_H #define _LINUX_IRQRETURN_H diff --git a/include/linux/isa.h b/include/linux/isa.h index e309631909..f2d0258414 100644 --- a/include/linux/isa.h +++ b/include/linux/isa.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * ISA bus. */ @@ -13,7 +12,7 @@ struct isa_driver { int (*match)(struct device *, unsigned int); int (*probe)(struct device *, unsigned int); - void (*remove)(struct device *, unsigned int); + int (*remove)(struct device *, unsigned int); void (*shutdown)(struct device *, unsigned int); int (*suspend)(struct device *, unsigned int, pm_message_t); int (*resume)(struct device *, unsigned int); diff --git a/include/linux/isapnp.h b/include/linux/isapnp.h index dba18c9584..3c77bf9b1e 100644 --- a/include/linux/isapnp.h +++ b/include/linux/isapnp.h @@ -1,7 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ISA Plug & Play support * Copyright (c) by Jaroslav Kysela + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef LINUX_ISAPNP_H @@ -75,6 +90,9 @@ static inline int isapnp_proc_done(void) { return 0; } #endif /* compat */ +struct pnp_card *pnp_find_card(unsigned short vendor, + unsigned short device, + struct pnp_card *from); struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor, unsigned short function, @@ -89,6 +107,9 @@ static inline int isapnp_cfg_end(void) { return -ENODEV; } static inline unsigned char isapnp_read_byte(unsigned char idx) { return 0xff; } static inline void isapnp_write_byte(unsigned char idx, unsigned char val) { ; } +static inline struct pnp_card *pnp_find_card(unsigned short vendor, + unsigned short device, + struct pnp_card *from) { return NULL; } static inline struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor, unsigned short function, diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h index 5f244d3f14..10923d7304 100644 --- a/include/linux/iscsi_boot_sysfs.h +++ b/include/linux/iscsi_boot_sysfs.h @@ -1,9 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Export the iSCSI boot info to userland via sysfs. * * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * Copyright (C) 2010 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _ISCSI_BOOT_SYSFS_ #define _ISCSI_BOOT_SYSFS_ diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h index 790e7fcfc1..605cc5c333 100644 --- a/include/linux/iscsi_ibft.h +++ b/include/linux/iscsi_ibft.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2007 Red Hat, Inc. * by Peter Jones @@ -8,27 +7,40 @@ * by Konrad Rzeszutek * * This code exposes the iSCSI Boot Format Table to userland via sysfs. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef ISCSI_IBFT_H #define ISCSI_IBFT_H -#include +#include /* - * Physical location of iSCSI Boot Format Table. - * If the value is 0 there is no iBFT on the machine. + * Logical location of iSCSI Boot Format Table. + * If the value is NULL there is no iBFT on the machine. */ -extern phys_addr_t ibft_phys_addr; +extern struct acpi_table_ibft *ibft_addr; /* * Routine used to find and reserve the iSCSI Boot Format Table. The - * physical address is set in the ibft_phys_addr variable. + * mapped address is set in the ibft_addr variable. */ #ifdef CONFIG_ISCSI_IBFT_FIND -void reserve_ibft_region(void); +unsigned long find_ibft_region(unsigned long *sizep); #else -static inline void reserve_ibft_region(void) {} +static inline unsigned long find_ibft_region(unsigned long *sizep) +{ + *sizep = 0; + return 0; +} #endif #endif /* ISCSI_IBFT_H */ diff --git a/include/linux/isdn.h b/include/linux/isdn.h new file mode 100644 index 0000000000..df97c8444f --- /dev/null +++ b/include/linux/isdn.h @@ -0,0 +1,473 @@ +/* $Id: isdn.h,v 1.125.2.3 2004/02/10 01:07:14 keil Exp $ + * + * Main header for the Linux ISDN subsystem (linklevel). + * + * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) + * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg + * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ +#ifndef __ISDN_H__ +#define __ISDN_H__ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ISDN_TTY_MAJOR 43 +#define ISDN_TTYAUX_MAJOR 44 +#define ISDN_MAJOR 45 + +/* The minor-devicenumbers for Channel 0 and 1 are used as arguments for + * physical Channel-Mapping, so they MUST NOT be changed without changing + * the correspondent code in isdn.c + */ + +#define ISDN_MINOR_B 0 +#define ISDN_MINOR_BMAX (ISDN_MAX_CHANNELS-1) +#define ISDN_MINOR_CTRL 64 +#define ISDN_MINOR_CTRLMAX (64 + (ISDN_MAX_CHANNELS-1)) +#define ISDN_MINOR_PPP 128 +#define ISDN_MINOR_PPPMAX (128 + (ISDN_MAX_CHANNELS-1)) +#define ISDN_MINOR_STATUS 255 + +#ifdef CONFIG_ISDN_PPP + +#ifdef CONFIG_ISDN_PPP_VJ +# include +#endif + +#include +#include + +#include +#endif + +#ifdef CONFIG_ISDN_X25 +# include +#endif + +#include + +#define ISDN_DRVIOCTL_MASK 0x7f /* Mask for Device-ioctl */ + +/* Until now unused */ +#define ISDN_SERVICE_VOICE 1 +#define ISDN_SERVICE_AB 1<<1 +#define ISDN_SERVICE_X21 1<<2 +#define ISDN_SERVICE_G4 1<<3 +#define ISDN_SERVICE_BTX 1<<4 +#define ISDN_SERVICE_DFUE 1<<5 +#define ISDN_SERVICE_X25 1<<6 +#define ISDN_SERVICE_TTX 1<<7 +#define ISDN_SERVICE_MIXED 1<<8 +#define ISDN_SERVICE_FW 1<<9 +#define ISDN_SERVICE_GTEL 1<<10 +#define ISDN_SERVICE_BTXN 1<<11 +#define ISDN_SERVICE_BTEL 1<<12 + +/* Macros checking plain usage */ +#define USG_NONE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NONE) +#define USG_RAW(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_RAW) +#define USG_MODEM(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) +#define USG_VOICE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) +#define USG_NET(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NET) +#define USG_FAX(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_FAX) +#define USG_OUTGOING(x) ((x & ISDN_USAGE_OUTGOING)==ISDN_USAGE_OUTGOING) +#define USG_MODEMORVOICE(x) (((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) || \ + ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) ) + +/* Timer-delays and scheduling-flags */ +#define ISDN_TIMER_RES 4 /* Main Timer-Resolution */ +#define ISDN_TIMER_02SEC (HZ/ISDN_TIMER_RES/5) /* Slow-Timer1 .2 sec */ +#define ISDN_TIMER_1SEC (HZ/ISDN_TIMER_RES) /* Slow-Timer2 1 sec */ +#define ISDN_TIMER_RINGING 5 /* tty RINGs = ISDN_TIMER_1SEC * this factor */ +#define ISDN_TIMER_KEEPINT 10 /* Cisco-Keepalive = ISDN_TIMER_1SEC * this factor */ +#define ISDN_TIMER_MODEMREAD 1 +#define ISDN_TIMER_MODEMPLUS 2 +#define ISDN_TIMER_MODEMRING 4 +#define ISDN_TIMER_MODEMXMIT 8 +#define ISDN_TIMER_NETDIAL 16 +#define ISDN_TIMER_NETHANGUP 32 +#define ISDN_TIMER_CARRIER 256 /* Wait for Carrier */ +#define ISDN_TIMER_FAST (ISDN_TIMER_MODEMREAD | ISDN_TIMER_MODEMPLUS | \ + ISDN_TIMER_MODEMXMIT) +#define ISDN_TIMER_SLOW (ISDN_TIMER_MODEMRING | ISDN_TIMER_NETHANGUP | \ + ISDN_TIMER_NETDIAL | ISDN_TIMER_CARRIER) + +/* Timeout-Values for isdn_net_dial() */ +#define ISDN_TIMER_DTIMEOUT10 (10*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) +#define ISDN_TIMER_DTIMEOUT15 (15*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) +#define ISDN_TIMER_DTIMEOUT60 (60*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) + +/* GLOBAL_FLAGS */ +#define ISDN_GLOBAL_STOPPED 1 + +/*=================== Start of ip-over-ISDN stuff =========================*/ + +/* Feature- and status-flags for a net-interface */ +#define ISDN_NET_CONNECTED 0x01 /* Bound to ISDN-Channel */ +#define ISDN_NET_SECURE 0x02 /* Accept calls from phonelist only */ +#define ISDN_NET_CALLBACK 0x04 /* activate callback */ +#define ISDN_NET_CBHUP 0x08 /* hangup before callback */ +#define ISDN_NET_CBOUT 0x10 /* remote machine does callback */ + +#define ISDN_NET_MAGIC 0x49344C02 /* for paranoia-checking */ + +/* Phone-list-element */ +typedef struct { + void *next; + char num[ISDN_MSNLEN]; +} isdn_net_phone; + +/* + Principles when extending structures for generic encapsulation protocol + ("concap") support: + - Stuff which is hardware specific (here i4l-specific) goes in + the netdev -> local structure (here: isdn_net_local) + - Stuff which is encapsulation protocol specific goes in the structure + which holds the linux device structure (here: isdn_net_device) +*/ + +/* Local interface-data */ +typedef struct isdn_net_local_s { + ulong magic; + struct net_device_stats stats; /* Ethernet Statistics */ + int isdn_device; /* Index to isdn-device */ + int isdn_channel; /* Index to isdn-channel */ + int ppp_slot; /* PPPD device slot number */ + int pre_device; /* Preselected isdn-device */ + int pre_channel; /* Preselected isdn-channel */ + int exclusive; /* If non-zero idx to reserved chan.*/ + int flags; /* Connection-flags */ + int dialretry; /* Counter for Dialout-retries */ + int dialmax; /* Max. Number of Dial-retries */ + int cbdelay; /* Delay before Callback starts */ + int dtimer; /* Timeout-counter for dialing */ + char msn[ISDN_MSNLEN]; /* MSNs/EAZs for this interface */ + u_char cbhup; /* Flag: Reject Call before Callback*/ + u_char dialstate; /* State for dialing */ + u_char p_encap; /* Packet encapsulation */ + /* 0 = Ethernet over ISDN */ + /* 1 = RAW-IP */ + /* 2 = IP with type field */ + u_char l2_proto; /* Layer-2-protocol */ + /* See ISDN_PROTO_L2..-constants in */ + /* isdnif.h */ + /* 0 = X75/LAPB with I-Frames */ + /* 1 = X75/LAPB with UI-Frames */ + /* 2 = X75/LAPB with BUI-Frames */ + /* 3 = HDLC */ + u_char l3_proto; /* Layer-3-protocol */ + /* See ISDN_PROTO_L3..-constants in */ + /* isdnif.h */ + /* 0 = Transparent */ + int huptimer; /* Timeout-counter for auto-hangup */ + int charge; /* Counter for charging units */ + ulong chargetime; /* Timer for Charging info */ + int hupflags; /* Flags for charge-unit-hangup: */ + /* bit0: chargeint is invalid */ + /* bit1: Getting charge-interval */ + /* bit2: Do charge-unit-hangup */ + /* bit3: Do hangup even on incoming */ + int outgoing; /* Flag: outgoing call */ + int onhtime; /* Time to keep link up */ + int chargeint; /* Interval between charge-infos */ + int onum; /* Flag: at least 1 outgoing number */ + int cps; /* current speed of this interface */ + int transcount; /* byte-counter for cps-calculation */ + int sqfull; /* Flag: netdev-queue overloaded */ + ulong sqfull_stamp; /* Start-Time of overload */ + ulong slavedelay; /* Dynamic bundling delaytime */ + int triggercps; /* BogoCPS needed for trigger slave */ + isdn_net_phone *phone[2]; /* List of remote-phonenumbers */ + /* phone[0] = Incoming Numbers */ + /* phone[1] = Outgoing Numbers */ + isdn_net_phone *dial; /* Pointer to dialed number */ + struct net_device *master; /* Ptr to Master device for slaves */ + struct net_device *slave; /* Ptr to Slave device for masters */ + struct isdn_net_local_s *next; /* Ptr to next link in bundle */ + struct isdn_net_local_s *last; /* Ptr to last link in bundle */ + struct isdn_net_dev_s *netdev; /* Ptr to netdev */ + struct sk_buff_head super_tx_queue; /* List of supervisory frames to */ + /* be transmitted asap */ + atomic_t frame_cnt; /* number of frames currently */ + /* queued in HL driver */ + /* Ptr to orig. hard_header_cache */ + spinlock_t xmit_lock; /* used to protect the xmit path of */ + /* a particular channel (including */ + /* the frame_cnt */ + + int pppbind; /* ippp device for bindings */ + int dialtimeout; /* How long shall we try on dialing? (jiffies) */ + int dialwait; /* How long shall we wait after failed attempt? (jiffies) */ + ulong dialstarted; /* jiffies of first dialing-attempt */ + ulong dialwait_timer; /* jiffies of earliest next dialing-attempt */ + int huptimeout; /* How long will the connection be up? (seconds) */ +#ifdef CONFIG_ISDN_X25 + struct concap_device_ops *dops; /* callbacks used by encapsulator */ +#endif + /* use an own struct for that in later versions */ + ulong cisco_myseq; /* Local keepalive seq. for Cisco */ + ulong cisco_mineseen; /* returned keepalive seq. from remote */ + ulong cisco_yourseq; /* Remote keepalive seq. for Cisco */ + int cisco_keepalive_period; /* keepalive period */ + ulong cisco_last_slarp_in; /* jiffie of last keepalive packet we received */ + char cisco_line_state; /* state of line according to keepalive packets */ + char cisco_debserint; /* debugging flag of cisco hdlc with slarp */ + struct timer_list cisco_timer; + struct work_struct tqueue; +} isdn_net_local; + +/* the interface itself */ +typedef struct isdn_net_dev_s { + isdn_net_local *local; + isdn_net_local *queue; /* circular list of all bundled + channels, which are currently + online */ + spinlock_t queue_lock; /* lock to protect queue */ + void *next; /* Pointer to next isdn-interface */ + struct net_device *dev; /* interface to upper levels */ +#ifdef CONFIG_ISDN_PPP + ippp_bundle * pb; /* pointer to the common bundle structure + * with the per-bundle data */ +#endif +#ifdef CONFIG_ISDN_X25 + struct concap_proto *cprot; /* connection oriented encapsulation protocol */ +#endif + +} isdn_net_dev; + +/*===================== End of ip-over-ISDN stuff ===========================*/ + +/*======================= Start of ISDN-tty stuff ===========================*/ + +#define ISDN_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */ +#define ISDN_SERIAL_XMIT_SIZE 1024 /* Default bufsize for write */ +#define ISDN_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */ + +#ifdef CONFIG_ISDN_AUDIO +/* For using sk_buffs with audio we need some private variables + * within each sk_buff. For this purpose, we declare a struct here, + * and put it always at the private skb->cb data array. A few macros help + * accessing the variables. + */ +typedef struct _isdn_audio_data { + unsigned short dle_count; + unsigned char lock; +} isdn_audio_data_t; + +#define ISDN_AUDIO_SKB_DLECOUNT(skb) (((isdn_audio_data_t *)&skb->cb[0])->dle_count) +#define ISDN_AUDIO_SKB_LOCK(skb) (((isdn_audio_data_t *)&skb->cb[0])->lock) +#endif + +/* Private data of AT-command-interpreter */ +typedef struct atemu { + u_char profile[ISDN_MODEM_NUMREG]; /* Modem-Regs. Profile 0 */ + u_char mdmreg[ISDN_MODEM_NUMREG]; /* Modem-Registers */ + char pmsn[ISDN_MSNLEN]; /* EAZ/MSNs Profile 0 */ + char msn[ISDN_MSNLEN]; /* EAZ/MSN */ + char plmsn[ISDN_LMSNLEN]; /* Listening MSNs Profile 0 */ + char lmsn[ISDN_LMSNLEN]; /* Listening MSNs */ + char cpn[ISDN_MSNLEN]; /* CalledPartyNumber on incoming call */ + char connmsg[ISDN_CMSGLEN]; /* CONNECT-Msg from HL-Driver */ +#ifdef CONFIG_ISDN_AUDIO + u_char vpar[10]; /* Voice-parameters */ + int lastDLE; /* Flag for voice-coding: DLE seen */ +#endif + int mdmcmdl; /* Length of Modem-Commandbuffer */ + int pluscount; /* Counter for +++ sequence */ + u_long lastplus; /* Timestamp of last + */ + int carrierwait; /* Seconds of carrier waiting */ + char mdmcmd[255]; /* Modem-Commandbuffer */ + unsigned int charge; /* Charge units of current connection */ +} atemu; + +/* Private data (similar to async_struct in ) */ +typedef struct modem_info { + int magic; + struct tty_port port; + int x_char; /* xon/xoff character */ + int mcr; /* Modem control register */ + int msr; /* Modem status register */ + int lsr; /* Line status register */ + int line; + int online; /* 1 = B-Channel is up, drop data */ + /* 2 = B-Channel is up, deliver d.*/ + int dialing; /* Dial in progress or ATA */ + int closing; + int rcvsched; /* Receive needs schedule */ + int isdn_driver; /* Index to isdn-driver */ + int isdn_channel; /* Index to isdn-channel */ + int drv_index; /* Index to dev->usage */ + int ncarrier; /* Flag: schedule NO CARRIER */ + unsigned char last_cause[8]; /* Last cause message */ + unsigned char last_num[ISDN_MSNLEN]; + /* Last phone-number */ + unsigned char last_l2; /* Last layer-2 protocol */ + unsigned char last_si; /* Last service */ + unsigned char last_lhup; /* Last hangup local? */ + unsigned char last_dir; /* Last direction (in or out) */ + struct timer_list nc_timer; /* Timer for delayed NO CARRIER */ + int send_outstanding;/* # of outstanding send-requests */ + int xmit_size; /* max. # of chars in xmit_buf */ + int xmit_count; /* # of chars in xmit_buf */ + struct sk_buff_head xmit_queue; /* transmit queue */ + atomic_t xmit_lock; /* Semaphore for isdn_tty_write */ +#ifdef CONFIG_ISDN_AUDIO + int vonline; /* Voice-channel status */ + /* Bit 0 = recording */ + /* Bit 1 = playback */ + /* Bit 2 = playback, DLE-ETX seen */ + struct sk_buff_head dtmf_queue; /* queue for dtmf results */ + void *adpcms; /* state for adpcm decompression */ + void *adpcmr; /* state for adpcm compression */ + void *dtmf_state; /* state for dtmf decoder */ + void *silence_state; /* state for silence detection */ +#endif +#ifdef CONFIG_ISDN_TTY_FAX + struct T30_s *fax; /* T30 Fax Group 3 data/interface */ + int faxonline; /* Fax-channel status */ +#endif + atemu emu; /* AT-emulator data */ + spinlock_t readlock; +} modem_info; + +#define ISDN_MODEM_WINSIZE 8 + +/* Description of one ISDN-tty */ +typedef struct _isdn_modem { + int refcount; /* Number of opens */ + struct tty_driver *tty_modem; /* tty-device */ + struct tty_struct *modem_table[ISDN_MAX_CHANNELS]; /* ?? copied from Orig */ + struct ktermios *modem_termios[ISDN_MAX_CHANNELS]; + struct ktermios *modem_termios_locked[ISDN_MAX_CHANNELS]; + modem_info info[ISDN_MAX_CHANNELS]; /* Private data */ +} isdn_modem_t; + +/*======================= End of ISDN-tty stuff ============================*/ + +/*======================== Start of V.110 stuff ============================*/ +#define V110_BUFSIZE 1024 + +typedef struct { + int nbytes; /* 1 Matrixbyte -> nbytes in stream */ + int nbits; /* Number of used bits in streambyte */ + unsigned char key; /* Bitmask in stream eg. 11 (nbits=2) */ + int decodelen; /* Amount of data in decodebuf */ + int SyncInit; /* Number of sync frames to send */ + unsigned char *OnlineFrame; /* Precalculated V110 idle frame */ + unsigned char *OfflineFrame; /* Precalculated V110 sync Frame */ + int framelen; /* Length of frames */ + int skbuser; /* Number of unacked userdata skbs */ + int skbidle; /* Number of unacked idle/sync skbs */ + int introducer; /* Local vars for decoder */ + int dbit; + unsigned char b; + int skbres; /* space to reserve in outgoing skb */ + int maxsize; /* maxbufsize of lowlevel driver */ + unsigned char *encodebuf; /* temporary buffer for encoding */ + unsigned char decodebuf[V110_BUFSIZE]; /* incomplete V110 matrices */ +} isdn_v110_stream; + +/*========================= End of V.110 stuff =============================*/ + +/*======================= Start of general stuff ===========================*/ + +typedef struct { + char *next; + char *private; +} infostruct; + +#define DRV_FLAG_RUNNING 1 +#define DRV_FLAG_REJBUS 2 +#define DRV_FLAG_LOADED 4 + +/* Description of hardware-level-driver */ +typedef struct _isdn_driver { + ulong online; /* Channel-Online flags */ + ulong flags; /* Misc driver Flags */ + int locks; /* Number of locks for this driver */ + int channels; /* Number of channels */ + wait_queue_head_t st_waitq; /* Wait-Queue for status-read's */ + int maxbufsize; /* Maximum Buffersize supported */ + unsigned long pktcount; /* Until now: unused */ + int stavail; /* Chars avail on Status-device */ + isdn_if *interface; /* Interface to driver */ + int *rcverr; /* Error-counters for B-Ch.-receive */ + int *rcvcount; /* Byte-counters for B-Ch.-receive */ +#ifdef CONFIG_ISDN_AUDIO + unsigned long DLEflag; /* Flags: Insert DLE at next read */ +#endif + struct sk_buff_head *rpqueue; /* Pointers to start of Rcv-Queue */ + wait_queue_head_t *rcv_waitq; /* Wait-Queues for B-Channel-Reads */ + wait_queue_head_t *snd_waitq; /* Wait-Queue for B-Channel-Send's */ + char msn2eaz[10][ISDN_MSNLEN]; /* Mapping-Table MSN->EAZ */ +} isdn_driver_t; + +/* Main driver-data */ +typedef struct isdn_devt { + struct module *owner; + spinlock_t lock; + unsigned short flags; /* Bitmapped Flags: */ + int drivers; /* Current number of drivers */ + int channels; /* Current number of channels */ + int net_verbose; /* Verbose-Flag */ + int modempoll; /* Flag: tty-read active */ + spinlock_t timerlock; + int tflags; /* Timer-Flags: */ + /* see ISDN_TIMER_..defines */ + int global_flags; + infostruct *infochain; /* List of open info-devs. */ + wait_queue_head_t info_waitq; /* Wait-Queue for isdninfo */ + struct timer_list timer; /* Misc.-function Timer */ + int chanmap[ISDN_MAX_CHANNELS]; /* Map minor->device-channel */ + int drvmap[ISDN_MAX_CHANNELS]; /* Map minor->driver-index */ + int usage[ISDN_MAX_CHANNELS]; /* Used by tty/ip/voice */ + char num[ISDN_MAX_CHANNELS][ISDN_MSNLEN]; + /* Remote number of active ch.*/ + int m_idx[ISDN_MAX_CHANNELS]; /* Index for mdm.... */ + isdn_driver_t *drv[ISDN_MAX_DRIVERS]; /* Array of drivers */ + isdn_net_dev *netdev; /* Linked list of net-if's */ + char drvid[ISDN_MAX_DRIVERS][20];/* Driver-ID */ + struct task_struct *profd; /* For iprofd */ + isdn_modem_t mdm; /* tty-driver-data */ + isdn_net_dev *rx_netdev[ISDN_MAX_CHANNELS]; /* rx netdev-pointers */ + isdn_net_dev *st_netdev[ISDN_MAX_CHANNELS]; /* stat netdev-pointers */ + ulong ibytes[ISDN_MAX_CHANNELS]; /* Statistics incoming bytes */ + ulong obytes[ISDN_MAX_CHANNELS]; /* Statistics outgoing bytes */ + int v110emu[ISDN_MAX_CHANNELS]; /* V.110 emulator-mode 0=none */ + atomic_t v110use[ISDN_MAX_CHANNELS]; /* Usage-Semaphore for stream */ + isdn_v110_stream *v110[ISDN_MAX_CHANNELS]; /* V.110 private data */ + struct mutex mtx; /* serialize list access*/ + unsigned long global_features; +} isdn_dev; + +extern isdn_dev *dev; + + +#endif /* __ISDN_H__ */ diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h index 12be09b688..11b57c4858 100644 --- a/include/linux/isdn/capilli.h +++ b/include/linux/isdn/capilli.h @@ -50,7 +50,7 @@ struct capi_ctr { u16 (*send_message)(struct capi_ctr *, struct sk_buff *skb); char *(*procinfo)(struct capi_ctr *); - int (*proc_show)(struct seq_file *, void *); + const struct file_operations *proc_fops; /* filled in before calling ready callback */ u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */ @@ -69,6 +69,7 @@ struct capi_ctr { unsigned short state; /* controller state */ int blocked; /* output blocked */ int traceflag; /* capi trace */ + wait_queue_head_t state_wait_queue; struct proc_dir_entry *procent; char procfn[128]; @@ -79,6 +80,8 @@ int detach_capi_ctr(struct capi_ctr *); void capi_ctr_ready(struct capi_ctr * card); void capi_ctr_down(struct capi_ctr * card); +void capi_ctr_suspend_output(struct capi_ctr * card); +void capi_ctr_resume_output(struct capi_ctr * card); void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb); // --------------------------------------------------------------------------- @@ -88,8 +91,23 @@ struct capi_driver { char name[32]; /* driver name */ char revision[32]; + int (*add_card)(struct capi_driver *driver, capicardparams *data); + /* management information for kcapi */ struct list_head list; }; +void register_capi_driver(struct capi_driver *driver); +void unregister_capi_driver(struct capi_driver *driver); + +// --------------------------------------------------------------------------- +// library functions for use by hardware controller drivers + +void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize); +void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci); +void capilib_release_appl(struct list_head *head, u16 applid); +void capilib_release(struct list_head *head); +void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid); +u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid); + #endif /* __CAPILLI_H__ */ diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h index 953fd500df..44bd6046e6 100644 --- a/include/linux/isdn/capiutil.h +++ b/include/linux/isdn/capiutil.h @@ -57,4 +57,460 @@ static inline void capimsg_setu32(void *m, int off, __u32 val) #define CAPIMSG_SETCONTROL(m, contr) capimsg_setu32(m, 8, contr) #define CAPIMSG_SETDATALEN(m, len) capimsg_setu16(m, 16, len) +/*----- basic-type definitions -----*/ + +typedef __u8 *_cstruct; + +typedef enum { + CAPI_COMPOSE, + CAPI_DEFAULT +} _cmstruct; + +/* + The _cmsg structure contains all possible CAPI 2.0 parameter. + All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE + assembles the parameter and builds CAPI2.0 conform messages. + CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the + parameter in the _cmsg structure + */ + +typedef struct { + /* Header */ + __u16 ApplId; + __u8 Command; + __u8 Subcommand; + __u16 Messagenumber; + + /* Parameter */ + union { + __u32 adrController; + __u32 adrPLCI; + __u32 adrNCCI; + } adr; + + _cmstruct AdditionalInfo; + _cstruct B1configuration; + __u16 B1protocol; + _cstruct B2configuration; + __u16 B2protocol; + _cstruct B3configuration; + __u16 B3protocol; + _cstruct BC; + _cstruct BChannelinformation; + _cmstruct BProtocol; + _cstruct CalledPartyNumber; + _cstruct CalledPartySubaddress; + _cstruct CallingPartyNumber; + _cstruct CallingPartySubaddress; + __u32 CIPmask; + __u32 CIPmask2; + __u16 CIPValue; + __u32 Class; + _cstruct ConnectedNumber; + _cstruct ConnectedSubaddress; + __u32 Data; + __u16 DataHandle; + __u16 DataLength; + _cstruct FacilityConfirmationParameter; + _cstruct Facilitydataarray; + _cstruct FacilityIndicationParameter; + _cstruct FacilityRequestParameter; + __u16 FacilitySelector; + __u16 Flags; + __u32 Function; + _cstruct HLC; + __u16 Info; + _cstruct InfoElement; + __u32 InfoMask; + __u16 InfoNumber; + _cstruct Keypadfacility; + _cstruct LLC; + _cstruct ManuData; + __u32 ManuID; + _cstruct NCPI; + __u16 Reason; + __u16 Reason_B3; + __u16 Reject; + _cstruct Useruserdata; + + /* intern */ + unsigned l, p; + unsigned char *par; + __u8 *m; + + /* buffer to construct message */ + __u8 buf[180]; + +} _cmsg; + +/* + * capi_cmsg2message() assembles the parameter from _cmsg to a CAPI 2.0 + * conform message + */ +unsigned capi_cmsg2message(_cmsg * cmsg, __u8 * msg); + +/* + * capi_message2cmsg disassembles a CAPI message an writes the parameter + * into _cmsg for easy access + */ +unsigned capi_message2cmsg(_cmsg * cmsg, __u8 * msg); + +/* + * capi_cmsg_header() fills the _cmsg structure with default values, so only + * parameter with non default values must be changed before sending the + * message. + */ +unsigned capi_cmsg_header(_cmsg * cmsg, __u16 _ApplId, + __u8 _Command, __u8 _Subcommand, + __u16 _Messagenumber, __u32 _Controller); + +/*-----------------------------------------------------------------------*/ + +/* + * Debugging / Tracing functions + */ + +char *capi_cmd2str(__u8 cmd, __u8 subcmd); + +typedef struct { + u_char *buf; + u_char *p; + size_t size; + size_t pos; +} _cdebbuf; + +#define CDEBUG_SIZE 1024 +#define CDEBUG_GSIZE 4096 + +void cdebbuf_free(_cdebbuf *cdb); +int cdebug_init(void); +void cdebug_exit(void); + +_cdebbuf *capi_cmsg2str(_cmsg *cmsg); +_cdebbuf *capi_message2str(__u8 *msg); + +/*-----------------------------------------------------------------------*/ + +static inline void capi_cmsg_answer(_cmsg * cmsg) +{ + cmsg->Subcommand |= 0x01; +} + +/*-----------------------------------------------------------------------*/ + +static inline void capi_fill_CONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct NCPI) +{ + capi_cmsg_header(cmsg, ApplId, 0x82, 0x80, Messagenumber, adr); + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_FACILITY_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 FacilitySelector, + _cstruct FacilityRequestParameter) +{ + capi_cmsg_header(cmsg, ApplId, 0x80, 0x80, Messagenumber, adr); + cmsg->FacilitySelector = FacilitySelector; + cmsg->FacilityRequestParameter = FacilityRequestParameter; +} + +static inline void capi_fill_INFO_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct CalledPartyNumber, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + capi_cmsg_header(cmsg, ApplId, 0x08, 0x80, Messagenumber, adr); + cmsg->CalledPartyNumber = CalledPartyNumber; + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_LISTEN_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 InfoMask, + __u32 CIPmask, + __u32 CIPmask2, + _cstruct CallingPartyNumber, + _cstruct CallingPartySubaddress) +{ + capi_cmsg_header(cmsg, ApplId, 0x05, 0x80, Messagenumber, adr); + cmsg->InfoMask = InfoMask; + cmsg->CIPmask = CIPmask; + cmsg->CIPmask2 = CIPmask2; + cmsg->CallingPartyNumber = CallingPartyNumber; + cmsg->CallingPartySubaddress = CallingPartySubaddress; +} + +static inline void capi_fill_ALERT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + capi_cmsg_header(cmsg, ApplId, 0x01, 0x80, Messagenumber, adr); + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_CONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 CIPValue, + _cstruct CalledPartyNumber, + _cstruct CallingPartyNumber, + _cstruct CalledPartySubaddress, + _cstruct CallingPartySubaddress, + __u16 B1protocol, + __u16 B2protocol, + __u16 B3protocol, + _cstruct B1configuration, + _cstruct B2configuration, + _cstruct B3configuration, + _cstruct BC, + _cstruct LLC, + _cstruct HLC, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + + capi_cmsg_header(cmsg, ApplId, 0x02, 0x80, Messagenumber, adr); + cmsg->CIPValue = CIPValue; + cmsg->CalledPartyNumber = CalledPartyNumber; + cmsg->CallingPartyNumber = CallingPartyNumber; + cmsg->CalledPartySubaddress = CalledPartySubaddress; + cmsg->CallingPartySubaddress = CallingPartySubaddress; + cmsg->B1protocol = B1protocol; + cmsg->B2protocol = B2protocol; + cmsg->B3protocol = B3protocol; + cmsg->B1configuration = B1configuration; + cmsg->B2configuration = B2configuration; + cmsg->B3configuration = B3configuration; + cmsg->BC = BC; + cmsg->LLC = LLC; + cmsg->HLC = HLC; + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_DATA_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 Data, + __u16 DataLength, + __u16 DataHandle, + __u16 Flags) +{ + + capi_cmsg_header(cmsg, ApplId, 0x86, 0x80, Messagenumber, adr); + cmsg->Data = Data; + cmsg->DataLength = DataLength; + cmsg->DataHandle = DataHandle; + cmsg->Flags = Flags; +} + +static inline void capi_fill_DISCONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + + capi_cmsg_header(cmsg, ApplId, 0x04, 0x80, Messagenumber, adr); + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_DISCONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct NCPI) +{ + + capi_cmsg_header(cmsg, ApplId, 0x84, 0x80, Messagenumber, adr); + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_MANUFACTURER_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 ManuID, + __u32 Class, + __u32 Function, + _cstruct ManuData) +{ + + capi_cmsg_header(cmsg, ApplId, 0xff, 0x80, Messagenumber, adr); + cmsg->ManuID = ManuID; + cmsg->Class = Class; + cmsg->Function = Function; + cmsg->ManuData = ManuData; +} + +static inline void capi_fill_RESET_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct NCPI) +{ + + capi_cmsg_header(cmsg, ApplId, 0x87, 0x80, Messagenumber, adr); + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_SELECT_B_PROTOCOL_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 B1protocol, + __u16 B2protocol, + __u16 B3protocol, + _cstruct B1configuration, + _cstruct B2configuration, + _cstruct B3configuration) +{ + + capi_cmsg_header(cmsg, ApplId, 0x41, 0x80, Messagenumber, adr); + cmsg->B1protocol = B1protocol; + cmsg->B2protocol = B2protocol; + cmsg->B3protocol = B3protocol; + cmsg->B1configuration = B1configuration; + cmsg->B2configuration = B2configuration; + cmsg->B3configuration = B3configuration; +} + +static inline void capi_fill_CONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 Reject, + __u16 B1protocol, + __u16 B2protocol, + __u16 B3protocol, + _cstruct B1configuration, + _cstruct B2configuration, + _cstruct B3configuration, + _cstruct ConnectedNumber, + _cstruct ConnectedSubaddress, + _cstruct LLC, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + capi_cmsg_header(cmsg, ApplId, 0x02, 0x83, Messagenumber, adr); + cmsg->Reject = Reject; + cmsg->B1protocol = B1protocol; + cmsg->B2protocol = B2protocol; + cmsg->B3protocol = B3protocol; + cmsg->B1configuration = B1configuration; + cmsg->B2configuration = B2configuration; + cmsg->B3configuration = B3configuration; + cmsg->ConnectedNumber = ConnectedNumber; + cmsg->ConnectedSubaddress = ConnectedSubaddress; + cmsg->LLC = LLC; + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_CONNECT_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x03, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_CONNECT_B3_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x83, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_CONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 Reject, + _cstruct NCPI) +{ + capi_cmsg_header(cmsg, ApplId, 0x82, 0x83, Messagenumber, adr); + cmsg->Reject = Reject; + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_CONNECT_B3_T90_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x88, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_DATA_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 DataHandle) +{ + + capi_cmsg_header(cmsg, ApplId, 0x86, 0x83, Messagenumber, adr); + cmsg->DataHandle = DataHandle; +} + +static inline void capi_fill_DISCONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x84, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_DISCONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x04, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_FACILITY_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 FacilitySelector) +{ + + capi_cmsg_header(cmsg, ApplId, 0x80, 0x83, Messagenumber, adr); + cmsg->FacilitySelector = FacilitySelector; +} + +static inline void capi_fill_INFO_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x08, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_MANUFACTURER_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 ManuID, + __u32 Class, + __u32 Function, + _cstruct ManuData) +{ + + capi_cmsg_header(cmsg, ApplId, 0xff, 0x83, Messagenumber, adr); + cmsg->ManuID = ManuID; + cmsg->Class = Class; + cmsg->Function = Function; + cmsg->ManuData = ManuData; +} + +static inline void capi_fill_RESET_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x87, 0x83, Messagenumber, adr); +} + #endif /* __CAPIUTIL_H__ */ diff --git a/include/linux/isdn/hdlc.h b/include/linux/isdn/hdlc.h new file mode 100644 index 0000000000..96521370c7 --- /dev/null +++ b/include/linux/isdn/hdlc.h @@ -0,0 +1,82 @@ +/* + * hdlc.h -- General purpose ISDN HDLC decoder. + * + * Implementation of a HDLC decoder/encoder in software. + * Necessary because some ISDN devices don't have HDLC + * controllers. + * + * Copyright (C) + * 2009 Karsten Keil + * 2002 Wolfgang Mües + * 2001 Frode Isaksen + * 2001 Kai Germaschewski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ISDNHDLC_H__ +#define __ISDNHDLC_H__ + +struct isdnhdlc_vars { + int bit_shift; + int hdlc_bits1; + int data_bits; + int ffbit_shift; /* encoding only */ + int state; + int dstpos; + + u16 crc; + + u8 cbin; + u8 shift_reg; + u8 ffvalue; + + /* set if transferring data */ + u32 data_received:1; + /* set if D channel (send idle instead of flags) */ + u32 dchannel:1; + /* set if 56K adaptation */ + u32 do_adapt56:1; + /* set if in closing phase (need to send CRC + flag) */ + u32 do_closing:1; + /* set if data is bitreverse */ + u32 do_bitreverse:1; +}; + +/* Feature Flags */ +#define HDLC_56KBIT 0x01 +#define HDLC_DCHANNEL 0x02 +#define HDLC_BITREVERSE 0x04 + +/* + The return value from isdnhdlc_decode is + the frame length, 0 if no complete frame was decoded, + or a negative error number +*/ +#define HDLC_FRAMING_ERROR 1 +#define HDLC_CRC_ERROR 2 +#define HDLC_LENGTH_ERROR 3 + +extern void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features); + +extern int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src, + int slen, int *count, u8 *dst, int dsize); + +extern void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features); + +extern int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src, + u16 slen, int *count, u8 *dst, int dsize); + +#endif /* __ISDNHDLC_H__ */ diff --git a/include/linux/isdn_divertif.h b/include/linux/isdn_divertif.h new file mode 100644 index 0000000000..19ab361f9f --- /dev/null +++ b/include/linux/isdn_divertif.h @@ -0,0 +1,35 @@ +/* $Id: isdn_divertif.h,v 1.4.6.1 2001/09/23 22:25:05 kai Exp $ + * + * Header for the diversion supplementary interface for i4l. + * + * Author Werner Cornelius (werner@titro.de) + * Copyright by Werner Cornelius (werner@titro.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ +#ifndef _LINUX_ISDN_DIVERTIF_H +#define _LINUX_ISDN_DIVERTIF_H + +#include +#include +#include + +/***************************************************************/ +/* structure exchanging data between isdn hl and divert module */ +/***************************************************************/ +typedef struct + { ulong if_magic; /* magic info and version */ + int cmd; /* command */ + int (*stat_callback)(isdn_ctrl *); /* supplied by divert module when calling */ + int (*ll_cmd)(isdn_ctrl *); /* supplied by hl on return */ + char * (*drv_to_name)(int); /* map a driver id to name, supplied by hl */ + int (*name_to_drv)(char *); /* map a driver id to name, supplied by hl */ + } isdn_divert_if; + +/*********************/ +/* function register */ +/*********************/ +extern int DIVERT_REG_NAME(isdn_divert_if *); +#endif /* _LINUX_ISDN_DIVERTIF_H */ diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h new file mode 100644 index 0000000000..a0070c6dfa --- /dev/null +++ b/include/linux/isdn_ppp.h @@ -0,0 +1,194 @@ +/* Linux ISDN subsystem, sync PPP, interface to ipppd + * + * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de) + * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg + * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) + * Copyright 2000-2002 by Kai Germaschewski (kai@germaschewski.name) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ +#ifndef _LINUX_ISDN_PPP_H +#define _LINUX_ISDN_PPP_H + + + + +#ifdef CONFIG_IPPP_FILTER +#include +#endif +#include + +#define DECOMP_ERR_NOMEM (-10) + +#define MP_END_FRAG 0x40 +#define MP_BEGIN_FRAG 0x80 + +#define MP_MAX_QUEUE_LEN 16 + +/* + * We need a way for the decompressor to influence the generation of CCP + * Reset-Requests in a variety of ways. The decompressor is already returning + * a lot of information (generated skb length, error conditions) so we use + * another parameter. This parameter is a pointer to a structure which is + * to be marked valid by the decompressor and only in this case is ever used. + * Furthermore, the only case where this data is used is when the decom- + * pressor returns DECOMP_ERROR. + * + * We use this same struct for the reset entry of the compressor to commu- + * nicate to its caller how to deal with sending of a Reset Ack. In this + * case, expra is not used, but other options still apply (suppressing + * sending with rsend, appending arbitrary data, etc). + */ + +#define IPPP_RESET_MAXDATABYTES 32 + +struct isdn_ppp_resetparams { + unsigned char valid:1; /* rw Is this structure filled at all ? */ + unsigned char rsend:1; /* rw Should we send one at all ? */ + unsigned char idval:1; /* rw Is the id field valid ? */ + unsigned char dtval:1; /* rw Is the data field valid ? */ + unsigned char expra:1; /* rw Is an Ack expected for this Req ? */ + unsigned char id; /* wo Send CCP ResetReq with this id */ + unsigned short maxdlen; /* ro Max bytes to be stored in data field */ + unsigned short dlen; /* rw Bytes stored in data field */ + unsigned char *data; /* wo Data for ResetReq info field */ +}; + +/* + * this is an 'old friend' from ppp-comp.h under a new name + * check the original include for more information + */ +struct isdn_ppp_compressor { + struct isdn_ppp_compressor *next, *prev; + struct module *owner; + int num; /* CCP compression protocol number */ + + void *(*alloc) (struct isdn_ppp_comp_data *); + void (*free) (void *state); + int (*init) (void *state, struct isdn_ppp_comp_data *, + int unit,int debug); + + /* The reset entry needs to get more exact information about the + ResetReq or ResetAck it was called with. The parameters are + obvious. If reset is called without a Req or Ack frame which + could be handed into it, code MUST be set to 0. Using rsparm, + the reset entry can control if and how a ResetAck is returned. */ + + void (*reset) (void *state, unsigned char code, unsigned char id, + unsigned char *data, unsigned len, + struct isdn_ppp_resetparams *rsparm); + + int (*compress) (void *state, struct sk_buff *in, + struct sk_buff *skb_out, int proto); + + int (*decompress) (void *state,struct sk_buff *in, + struct sk_buff *skb_out, + struct isdn_ppp_resetparams *rsparm); + + void (*incomp) (void *state, struct sk_buff *in,int proto); + void (*stat) (void *state, struct compstat *stats); +}; + +extern int isdn_ppp_register_compressor(struct isdn_ppp_compressor *); +extern int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *); +extern int isdn_ppp_dial_slave(char *); +extern int isdn_ppp_hangup_slave(char *); + +typedef struct { + unsigned long seqerrs; + unsigned long frame_drops; + unsigned long overflows; + unsigned long max_queue_len; +} isdn_mppp_stats; + +typedef struct { + int mp_mrru; /* unused */ + struct sk_buff * frags; /* fragments sl list -- use skb->next */ + long frames; /* number of frames in the frame list */ + unsigned int seq; /* last processed packet seq #: any packets + * with smaller seq # will be dropped + * unconditionally */ + spinlock_t lock; + int ref_ct; + /* statistics */ + isdn_mppp_stats stats; +} ippp_bundle; + +#define NUM_RCV_BUFFS 64 + +struct ippp_buf_queue { + struct ippp_buf_queue *next; + struct ippp_buf_queue *last; + char *buf; /* NULL here indicates end of queue */ + int len; +}; + +/* The data structure for one CCP reset transaction */ +enum ippp_ccp_reset_states { + CCPResetIdle, + CCPResetSentReq, + CCPResetRcvdReq, + CCPResetSentAck, + CCPResetRcvdAck +}; + +struct ippp_ccp_reset_state { + enum ippp_ccp_reset_states state; /* State of this transaction */ + struct ippp_struct *is; /* Backlink to device stuff */ + unsigned char id; /* Backlink id index */ + unsigned char ta:1; /* The timer is active (flag) */ + unsigned char expra:1; /* We expect a ResetAck at all */ + int dlen; /* Databytes stored in data */ + struct timer_list timer; /* For timeouts/retries */ + /* This is a hack but seems sufficient for the moment. We do not want + to have this be yet another allocation for some bytes, it is more + memory management overhead than the whole mess is worth. */ + unsigned char data[IPPP_RESET_MAXDATABYTES]; +}; + +/* The data structure keeping track of the currently outstanding CCP Reset + transactions. */ +struct ippp_ccp_reset { + struct ippp_ccp_reset_state *rs[256]; /* One per possible id */ + unsigned char lastid; /* Last id allocated by the engine */ +}; + +struct ippp_struct { + struct ippp_struct *next_link; + int state; + spinlock_t buflock; + struct ippp_buf_queue rq[NUM_RCV_BUFFS]; /* packet queue for isdn_ppp_read() */ + struct ippp_buf_queue *first; /* pointer to (current) first packet */ + struct ippp_buf_queue *last; /* pointer to (current) last used packet in queue */ + wait_queue_head_t wq; + struct task_struct *tk; + unsigned int mpppcfg; + unsigned int pppcfg; + unsigned int mru; + unsigned int mpmru; + unsigned int mpmtu; + unsigned int maxcid; + struct isdn_net_local_s *lp; + int unit; + int minor; + unsigned int last_link_seqno; + long mp_seqno; +#ifdef CONFIG_ISDN_PPP_VJ + unsigned char *cbuf; + struct slcompress *slcomp; +#endif +#ifdef CONFIG_IPPP_FILTER + struct bpf_prog *pass_filter; /* filter for packets to pass */ + struct bpf_prog *active_filter; /* filter for pkts to reset idle */ +#endif + unsigned long debug; + struct isdn_ppp_compressor *compressor,*decompressor; + struct isdn_ppp_compressor *link_compressor,*link_decompressor; + void *decomp_stat,*comp_stat,*link_decomp_stat,*link_comp_stat; + struct ippp_ccp_reset *reset; /* Allocated on demand, may never be needed */ + unsigned long compflags; +}; + +#endif /* _LINUX_ISDN_PPP_H */ diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h new file mode 100644 index 0000000000..0fc6ff2762 --- /dev/null +++ b/include/linux/isdnif.h @@ -0,0 +1,505 @@ +/* $Id: isdnif.h,v 1.43.2.2 2004/01/12 23:08:35 keil Exp $ + * + * Linux ISDN subsystem + * Definition of the interface between the subsystem and its low-level drivers. + * + * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) + * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ +#ifndef __ISDNIF_H__ +#define __ISDNIF_H__ + + +#include +#include + +/***************************************************************************/ +/* Extensions made by Werner Cornelius (werner@ikt.de) */ +/* */ +/* The proceed command holds a incoming call in a state to leave processes */ +/* enough time to check whether ist should be accepted. */ +/* The PROT_IO Command extends the interface to make protocol dependent */ +/* features available (call diversion, call waiting...). */ +/* */ +/* The PROT_IO Command is executed with the desired driver id and the arg */ +/* parameter coded as follows: */ +/* The lower 8 bits of arg contain the desired protocol from ISDN_PTYPE */ +/* definitions. The upper 24 bits represent the protocol specific cmd/stat.*/ +/* Any additional data is protocol and command specific. */ +/* This mechanism also applies to the statcallb callback STAT_PROT. */ +/* */ +/* This suggested extension permits an easy expansion of protocol specific */ +/* handling. Extensions may be added at any time without changing the HL */ +/* driver code and not getting conflicts without certifications. */ +/* The well known CAPI 2.0 interface handles such extensions in a similar */ +/* way. Perhaps a protocol specific module may be added and separately */ +/* loaded and linked to the basic isdn module for handling. */ +/***************************************************************************/ + +/*****************/ +/* DSS1 commands */ +/*****************/ +#define DSS1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_EURO) /* invoke a supplementary service */ +#define DSS1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_EURO) /* abort a invoke cmd */ + +/*******************************/ +/* DSS1 Status callback values */ +/*******************************/ +#define DSS1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_EURO) /* Result for invocation */ +#define DSS1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_EURO) /* Error Return for invocation */ +#define DSS1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_EURO) /* Deliver invoke broadcast info */ + + +/*********************************************************************/ +/* structures for DSS1 commands and callback */ +/* */ +/* An action is invoked by sending a DSS1_CMD_INVOKE. The ll_id, proc*/ +/* timeout, datalen and data fields must be set before calling. */ +/* */ +/* The return value is a positive hl_id value also delivered in the */ +/* hl_id field. A value of zero signals no more left hl_id capacitys.*/ +/* A negative return value signals errors in LL. So if the return */ +/* value is <= 0 no action in LL will be taken -> request ignored */ +/* */ +/* The timeout field must be filled with a positive value specifying */ +/* the amount of time the INVOKED process waits for a reaction from */ +/* the network. */ +/* If a response (either error or result) is received during this */ +/* intervall, a reporting callback is initiated and the process will */ +/* be deleted, the hl identifier will be freed. */ +/* If no response is received during the specified intervall, a error*/ +/* callback is initiated with timeout set to -1 and a datalen set */ +/* to 0. */ +/* If timeout is set to a value <= 0 during INVOCATION the process is*/ +/* immediately deleted after sending the data. No callback occurs ! */ +/* */ +/* A currently waiting process may be aborted with INVOKE_ABORT. No */ +/* callback will occur when a process has been aborted. */ +/* */ +/* Broadcast invoke frames from the network are reported via the */ +/* STAT_INVOKE_BRD callback. The ll_id is set to 0, the other fields */ +/* are supplied by the network and not by the HL. */ +/*********************************************************************/ + +/*****************/ +/* NI1 commands */ +/*****************/ +#define NI1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_NI1) /* invoke a supplementary service */ +#define NI1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_NI1) /* abort a invoke cmd */ + +/*******************************/ +/* NI1 Status callback values */ +/*******************************/ +#define NI1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_NI1) /* Result for invocation */ +#define NI1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_NI1) /* Error Return for invocation */ +#define NI1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_NI1) /* Deliver invoke broadcast info */ + +typedef struct + { ulong ll_id; /* ID supplied by LL when executing */ + /* a command and returned by HL for */ + /* INVOKE_RES and INVOKE_ERR */ + int hl_id; /* ID supplied by HL when called */ + /* for executing a cmd and delivered */ + /* for results and errors */ + /* must be supplied by LL when aborting*/ + int proc; /* invoke procedure used by CMD_INVOKE */ + /* returned by callback and broadcast */ + int timeout; /* timeout for INVOKE CMD in ms */ + /* -1 in stat callback when timed out */ + /* error value when error callback */ + int datalen; /* length of cmd or stat data */ + u_char *data;/* pointer to data delivered or send */ + } isdn_cmd_stat; + +/* + * Commands from linklevel to lowlevel + * + */ +#define ISDN_CMD_IOCTL 0 /* Perform ioctl */ +#define ISDN_CMD_DIAL 1 /* Dial out */ +#define ISDN_CMD_ACCEPTD 2 /* Accept an incoming call on D-Chan. */ +#define ISDN_CMD_ACCEPTB 3 /* Request B-Channel connect. */ +#define ISDN_CMD_HANGUP 4 /* Hangup */ +#define ISDN_CMD_CLREAZ 5 /* Clear EAZ(s) of channel */ +#define ISDN_CMD_SETEAZ 6 /* Set EAZ(s) of channel */ +#define ISDN_CMD_GETEAZ 7 /* Get EAZ(s) of channel */ +#define ISDN_CMD_SETSIL 8 /* Set Service-Indicator-List of channel */ +#define ISDN_CMD_GETSIL 9 /* Get Service-Indicator-List of channel */ +#define ISDN_CMD_SETL2 10 /* Set B-Chan. Layer2-Parameter */ +#define ISDN_CMD_GETL2 11 /* Get B-Chan. Layer2-Parameter */ +#define ISDN_CMD_SETL3 12 /* Set B-Chan. Layer3-Parameter */ +#define ISDN_CMD_GETL3 13 /* Get B-Chan. Layer3-Parameter */ +// #define ISDN_CMD_LOCK 14 /* Signal usage by upper levels */ +// #define ISDN_CMD_UNLOCK 15 /* Release usage-lock */ +#define ISDN_CMD_SUSPEND 16 /* Suspend connection */ +#define ISDN_CMD_RESUME 17 /* Resume connection */ +#define ISDN_CMD_PROCEED 18 /* Proceed with call establishment */ +#define ISDN_CMD_ALERT 19 /* Alert after Proceeding */ +#define ISDN_CMD_REDIR 20 /* Redir a incoming call */ +#define ISDN_CMD_PROT_IO 21 /* Protocol specific commands */ +#define CAPI_PUT_MESSAGE 22 /* CAPI message send down or up */ +#define ISDN_CMD_FAXCMD 23 /* FAX commands to HL-driver */ +#define ISDN_CMD_AUDIO 24 /* DSP, DTMF, ... settings */ + +/* + * Status-Values delivered from lowlevel to linklevel via + * statcallb(). + * + */ +#define ISDN_STAT_STAVAIL 256 /* Raw status-data available */ +#define ISDN_STAT_ICALL 257 /* Incoming call detected */ +#define ISDN_STAT_RUN 258 /* Signal protocol-code is running */ +#define ISDN_STAT_STOP 259 /* Signal halt of protocol-code */ +#define ISDN_STAT_DCONN 260 /* Signal D-Channel connect */ +#define ISDN_STAT_BCONN 261 /* Signal B-Channel connect */ +#define ISDN_STAT_DHUP 262 /* Signal D-Channel disconnect */ +#define ISDN_STAT_BHUP 263 /* Signal B-Channel disconnect */ +#define ISDN_STAT_CINF 264 /* Charge-Info */ +#define ISDN_STAT_LOAD 265 /* Signal new lowlevel-driver is loaded */ +#define ISDN_STAT_UNLOAD 266 /* Signal unload of lowlevel-driver */ +#define ISDN_STAT_BSENT 267 /* Signal packet sent */ +#define ISDN_STAT_NODCH 268 /* Signal no D-Channel */ +#define ISDN_STAT_ADDCH 269 /* Add more Channels */ +#define ISDN_STAT_CAUSE 270 /* Cause-Message */ +#define ISDN_STAT_ICALLW 271 /* Incoming call without B-chan waiting */ +#define ISDN_STAT_REDIR 272 /* Redir result */ +#define ISDN_STAT_PROT 273 /* protocol IO specific callback */ +#define ISDN_STAT_DISPLAY 274 /* deliver a received display message */ +#define ISDN_STAT_L1ERR 275 /* Signal Layer-1 Error */ +#define ISDN_STAT_FAXIND 276 /* FAX indications from HL-driver */ +#define ISDN_STAT_AUDIO 277 /* DTMF, DSP indications */ +#define ISDN_STAT_DISCH 278 /* Disable/Enable channel usage */ + +/* + * Audio commands + */ +#define ISDN_AUDIO_SETDD 0 /* Set DTMF detection */ +#define ISDN_AUDIO_DTMF 1 /* Rx/Tx DTMF */ + +/* + * Values for errcode field + */ +#define ISDN_STAT_L1ERR_SEND 1 +#define ISDN_STAT_L1ERR_RECV 2 + +/* + * Values for feature-field of interface-struct. + */ +/* Layer 2 */ +#define ISDN_FEATURE_L2_X75I (0x0001 << ISDN_PROTO_L2_X75I) +#define ISDN_FEATURE_L2_X75UI (0x0001 << ISDN_PROTO_L2_X75UI) +#define ISDN_FEATURE_L2_X75BUI (0x0001 << ISDN_PROTO_L2_X75BUI) +#define ISDN_FEATURE_L2_HDLC (0x0001 << ISDN_PROTO_L2_HDLC) +#define ISDN_FEATURE_L2_TRANS (0x0001 << ISDN_PROTO_L2_TRANS) +#define ISDN_FEATURE_L2_X25DTE (0x0001 << ISDN_PROTO_L2_X25DTE) +#define ISDN_FEATURE_L2_X25DCE (0x0001 << ISDN_PROTO_L2_X25DCE) +#define ISDN_FEATURE_L2_V11096 (0x0001 << ISDN_PROTO_L2_V11096) +#define ISDN_FEATURE_L2_V11019 (0x0001 << ISDN_PROTO_L2_V11019) +#define ISDN_FEATURE_L2_V11038 (0x0001 << ISDN_PROTO_L2_V11038) +#define ISDN_FEATURE_L2_MODEM (0x0001 << ISDN_PROTO_L2_MODEM) +#define ISDN_FEATURE_L2_FAX (0x0001 << ISDN_PROTO_L2_FAX) +#define ISDN_FEATURE_L2_HDLC_56K (0x0001 << ISDN_PROTO_L2_HDLC_56K) + +#define ISDN_FEATURE_L2_MASK (0x0FFFF) /* Max. 16 protocols */ +#define ISDN_FEATURE_L2_SHIFT (0) + +/* Layer 3 */ +#define ISDN_FEATURE_L3_TRANS (0x10000 << ISDN_PROTO_L3_TRANS) +#define ISDN_FEATURE_L3_TRANSDSP (0x10000 << ISDN_PROTO_L3_TRANSDSP) +#define ISDN_FEATURE_L3_FCLASS2 (0x10000 << ISDN_PROTO_L3_FCLASS2) +#define ISDN_FEATURE_L3_FCLASS1 (0x10000 << ISDN_PROTO_L3_FCLASS1) + +#define ISDN_FEATURE_L3_MASK (0x0FF0000) /* Max. 8 Protocols */ +#define ISDN_FEATURE_L3_SHIFT (16) + +/* Signaling */ +#define ISDN_FEATURE_P_UNKNOWN (0x1000000 << ISDN_PTYPE_UNKNOWN) +#define ISDN_FEATURE_P_1TR6 (0x1000000 << ISDN_PTYPE_1TR6) +#define ISDN_FEATURE_P_EURO (0x1000000 << ISDN_PTYPE_EURO) +#define ISDN_FEATURE_P_NI1 (0x1000000 << ISDN_PTYPE_NI1) + +#define ISDN_FEATURE_P_MASK (0x0FF000000) /* Max. 8 Protocols */ +#define ISDN_FEATURE_P_SHIFT (24) + +typedef struct setup_parm { + unsigned char phone[32]; /* Remote Phone-Number */ + unsigned char eazmsn[32]; /* Local EAZ or MSN */ + unsigned char si1; /* Service Indicator 1 */ + unsigned char si2; /* Service Indicator 2 */ + unsigned char plan; /* Numbering plan */ + unsigned char screen; /* Screening info */ +} setup_parm; + + +#ifdef CONFIG_ISDN_TTY_FAX +/* T.30 Fax G3 */ + +#define FAXIDLEN 21 + +typedef struct T30_s { + /* session parameters */ + __u8 resolution; + __u8 rate; + __u8 width; + __u8 length; + __u8 compression; + __u8 ecm; + __u8 binary; + __u8 scantime; + __u8 id[FAXIDLEN]; + /* additional parameters */ + __u8 phase; + __u8 direction; + __u8 code; + __u8 badlin; + __u8 badmul; + __u8 bor; + __u8 fet; + __u8 pollid[FAXIDLEN]; + __u8 cq; + __u8 cr; + __u8 ctcrty; + __u8 minsp; + __u8 phcto; + __u8 rel; + __u8 nbc; + /* remote station parameters */ + __u8 r_resolution; + __u8 r_rate; + __u8 r_width; + __u8 r_length; + __u8 r_compression; + __u8 r_ecm; + __u8 r_binary; + __u8 r_scantime; + __u8 r_id[FAXIDLEN]; + __u8 r_code; +} __packed T30_s; + +#define ISDN_TTY_FAX_CONN_IN 0 +#define ISDN_TTY_FAX_CONN_OUT 1 + +#define ISDN_TTY_FAX_FCON 0 +#define ISDN_TTY_FAX_DIS 1 +#define ISDN_TTY_FAX_FTT 2 +#define ISDN_TTY_FAX_MCF 3 +#define ISDN_TTY_FAX_DCS 4 +#define ISDN_TTY_FAX_TRAIN_OK 5 +#define ISDN_TTY_FAX_EOP 6 +#define ISDN_TTY_FAX_EOM 7 +#define ISDN_TTY_FAX_MPS 8 +#define ISDN_TTY_FAX_DTC 9 +#define ISDN_TTY_FAX_RID 10 +#define ISDN_TTY_FAX_HNG 11 +#define ISDN_TTY_FAX_DT 12 +#define ISDN_TTY_FAX_FCON_I 13 +#define ISDN_TTY_FAX_DR 14 +#define ISDN_TTY_FAX_ET 15 +#define ISDN_TTY_FAX_CFR 16 +#define ISDN_TTY_FAX_PTS 17 +#define ISDN_TTY_FAX_SENT 18 + +#define ISDN_FAX_PHASE_IDLE 0 +#define ISDN_FAX_PHASE_A 1 +#define ISDN_FAX_PHASE_B 2 +#define ISDN_FAX_PHASE_C 3 +#define ISDN_FAX_PHASE_D 4 +#define ISDN_FAX_PHASE_E 5 + +#endif /* TTY_FAX */ + +#define ISDN_FAX_CLASS1_FAE 0 +#define ISDN_FAX_CLASS1_FTS 1 +#define ISDN_FAX_CLASS1_FRS 2 +#define ISDN_FAX_CLASS1_FTM 3 +#define ISDN_FAX_CLASS1_FRM 4 +#define ISDN_FAX_CLASS1_FTH 5 +#define ISDN_FAX_CLASS1_FRH 6 +#define ISDN_FAX_CLASS1_CTRL 7 + +#define ISDN_FAX_CLASS1_OK 0 +#define ISDN_FAX_CLASS1_CONNECT 1 +#define ISDN_FAX_CLASS1_NOCARR 2 +#define ISDN_FAX_CLASS1_ERROR 3 +#define ISDN_FAX_CLASS1_FCERROR 4 +#define ISDN_FAX_CLASS1_QUERY 5 + +typedef struct { + __u8 cmd; + __u8 subcmd; + __u8 para[50]; +} aux_s; + +#define AT_COMMAND 0 +#define AT_EQ_VALUE 1 +#define AT_QUERY 2 +#define AT_EQ_QUERY 3 + +/* CAPI structs */ + +/* this is compatible to the old union size */ +#define MAX_CAPI_PARA_LEN 50 + +typedef struct { + /* Header */ + __u16 Length; + __u16 ApplId; + __u8 Command; + __u8 Subcommand; + __u16 Messagenumber; + + /* Parameter */ + union { + __u32 Controller; + __u32 PLCI; + __u32 NCCI; + } adr; + __u8 para[MAX_CAPI_PARA_LEN]; +} capi_msg; + +/* + * Structure for exchanging above infos + * + */ +typedef struct { + int driver; /* Lowlevel-Driver-ID */ + int command; /* Command or Status (see above) */ + ulong arg; /* Additional Data */ + union { + ulong errcode; /* Type of error with STAT_L1ERR */ + int length; /* Amount of bytes sent with STAT_BSENT */ + u_char num[50]; /* Additional Data */ + setup_parm setup;/* For SETUP msg */ + capi_msg cmsg; /* For CAPI like messages */ + char display[85];/* display message data */ + isdn_cmd_stat isdn_io; /* ISDN IO-parameter/result */ + aux_s aux; /* for modem commands/indications */ +#ifdef CONFIG_ISDN_TTY_FAX + T30_s *fax; /* Pointer to ttys fax struct */ +#endif + ulong userdata; /* User Data */ + } parm; +} isdn_ctrl; + +#define dss1_io isdn_io +#define ni1_io isdn_io + +/* + * The interface-struct itself (initialized at load-time of lowlevel-driver) + * + * See Documentation/isdn/INTERFACE for a description, how the communication + * between the ISDN subsystem and its drivers is done. + * + */ +typedef struct { + struct module *owner; + + /* Number of channels supported by this driver + */ + int channels; + + /* + * Maximum Size of transmit/receive-buffer this driver supports. + */ + int maxbufsize; + + /* Feature-Flags for this driver. + * See defines ISDN_FEATURE_... for Values + */ + unsigned long features; + + /* + * Needed for calculating + * dev->hard_header_len = linklayer header + hl_hdrlen; + * Drivers, not supporting sk_buff's should set this to 0. + */ + unsigned short hl_hdrlen; + + /* + * Receive-Callback using sk_buff's + * Parameters: + * int Driver-ID + * int local channel-number (0 ...) + * struct sk_buff *skb received Data + */ + void (*rcvcallb_skb)(int, int, struct sk_buff *); + + /* Status-Callback + * Parameters: + * isdn_ctrl* + * driver = Driver ID. + * command = One of above ISDN_STAT_... constants. + * arg = depending on status-type. + * num = depending on status-type. + */ + int (*statcallb)(isdn_ctrl*); + + /* Send command + * Parameters: + * isdn_ctrl* + * driver = Driver ID. + * command = One of above ISDN_CMD_... constants. + * arg = depending on command. + * num = depending on command. + */ + int (*command)(isdn_ctrl*); + + /* + * Send data using sk_buff's + * Parameters: + * int driverId + * int local channel-number (0...) + * int Flag: Need ACK for this packet. + * struct sk_buff *skb Data to send + */ + int (*writebuf_skb) (int, int, int, struct sk_buff *); + + /* Send raw D-Channel-Commands + * Parameters: + * u_char pointer data + * int length of data + * int driverId + * int local channel-number (0 ...) + */ + int (*writecmd)(const u_char __user *, int, int, int); + + /* Read raw Status replies + * u_char pointer data (volatile) + * int length of buffer + * int driverId + * int local channel-number (0 ...) + */ + int (*readstat)(u_char __user *, int, int, int); + + char id[20]; +} isdn_if; + +/* + * Function which must be called by lowlevel-driver at loadtime with + * the following fields of above struct set: + * + * channels Number of channels that will be supported. + * hl_hdrlen Space to preserve in sk_buff's when sending. Drivers, not + * supporting sk_buff's should set this to 0. + * command Address of Command-Handler. + * features Bitwise coded Features of this driver. (use ISDN_FEATURE_...) + * writebuf_skb Address of Skbuff-Send-Handler. + * writecmd " " D-Channel " which accepts raw D-Ch-Commands. + * readstat " " D-Channel " which delivers raw Status-Data. + * + * The linklevel-driver fills the following fields: + * + * channels Driver-ID assigned to this driver. (Must be used on all + * subsequent callbacks. + * rcvcallb_skb Address of handler for received Skbuff's. + * statcallb " " " for status-changes. + * + */ +extern int register_isdn(isdn_if*); +#include + +#endif /* __ISDNIF_H__ */ diff --git a/include/linux/isicom.h b/include/linux/isicom.h index 7de6822d7b..b92e056506 100644 --- a/include/linux/isicom.h +++ b/include/linux/isicom.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ISICOM_H #define _LINUX_ISICOM_H diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index fd933c4528..a66f30d2d8 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/include/linux/jbd2.h * @@ -6,6 +5,10 @@ * * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + * * Definitions for transaction data structures for the buffer cache * filesystem journaling support. */ @@ -27,7 +30,6 @@ #include #include #include -#include #include #endif @@ -61,14 +63,13 @@ void __jbd2_debug(int level, const char *file, const char *func, #define jbd_debug(n, fmt, a...) \ __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a) #else -#define jbd_debug(n, fmt, a...) no_printk(fmt, ##a) +#define jbd_debug(n, fmt, a...) /**/ #endif extern void *jbd2_alloc(size_t size, gfp_t flags); extern void jbd2_free(void *ptr, size_t size); #define JBD2_MIN_JOURNAL_BLOCKS 1024 -#define JBD2_DEFAULT_FAST_COMMIT_BLOCKS 256 #ifdef __KERNEL__ @@ -264,10 +265,7 @@ typedef struct journal_superblock_s /* 0x0050 */ __u8 s_checksum_type; /* checksum type */ __u8 s_padding2[3]; -/* 0x0054 */ - __be32 s_num_fc_blks; /* Number of fast commit blocks */ -/* 0x0058 */ - __u32 s_padding[41]; + __u32 s_padding[42]; __be32 s_checksum; /* crc32c(superblock) */ /* 0x0100 */ @@ -293,7 +291,6 @@ typedef struct journal_superblock_s #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 #define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010 -#define JBD2_FEATURE_INCOMPAT_FAST_COMMIT 0x00000020 /* See "journal feature predicate functions" below */ @@ -304,8 +301,7 @@ typedef struct journal_superblock_s JBD2_FEATURE_INCOMPAT_64BIT | \ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \ JBD2_FEATURE_INCOMPAT_CSUM_V2 | \ - JBD2_FEATURE_INCOMPAT_CSUM_V3 | \ - JBD2_FEATURE_INCOMPAT_FAST_COMMIT) + JBD2_FEATURE_INCOMPAT_CSUM_V3) #ifdef __KERNEL__ @@ -320,6 +316,7 @@ enum jbd_state_bits { BH_Revoked, /* Has been revoked from the log */ BH_RevokeValid, /* Revoked flag is valid */ BH_JBDDirty, /* Is dirty but journaled */ + BH_State, /* Pins most journal_head state */ BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ BH_Shadow, /* IO on shadow buffer is running */ BH_Verified, /* Metadata block has been verified ok */ @@ -348,6 +345,26 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) return bh->b_private; } +static inline void jbd_lock_bh_state(struct buffer_head *bh) +{ + bit_spin_lock(BH_State, &bh->b_state); +} + +static inline int jbd_trylock_bh_state(struct buffer_head *bh) +{ + return bit_spin_trylock(BH_State, &bh->b_state); +} + +static inline int jbd_is_locked_bh_state(struct buffer_head *bh) +{ + return bit_spin_is_locked(BH_State, &bh->b_state); +} + +static inline void jbd_unlock_bh_state(struct buffer_head *bh) +{ + bit_spin_unlock(BH_State, &bh->b_state); +} + static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) { bit_spin_lock(BH_JournalHead, &bh->b_state); @@ -401,83 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) #define JI_WAIT_DATA (1 << __JI_WAIT_DATA) /** - * struct jbd2_inode - The jbd_inode type is the structure linking inodes in - * ordered mode present in a transaction so that we can sync them during commit. + * struct jbd_inode is the structure linking inodes in ordered mode + * present in a transaction so that we can sync them during commit. */ struct jbd2_inode { - /** - * @i_transaction: - * - * Which transaction does this inode belong to? Either the running - * transaction or the committing one. [j_list_lock] - */ + /* Which transaction does this inode belong to? Either the running + * transaction or the committing one. [j_list_lock] */ transaction_t *i_transaction; - /** - * @i_next_transaction: - * - * Pointer to the running transaction modifying inode's data in case - * there is already a committing transaction touching it. [j_list_lock] - */ + /* Pointer to the running transaction modifying inode's data in case + * there is already a committing transaction touching it. [j_list_lock] */ transaction_t *i_next_transaction; - /** - * @i_list: List of inodes in the i_transaction [j_list_lock] - */ + /* List of inodes in the i_transaction [j_list_lock] */ struct list_head i_list; - /** - * @i_vfs_inode: - * - * VFS inode this inode belongs to [constant for lifetime of structure] - */ + /* VFS inode this inode belongs to [constant during the lifetime + * of the structure] */ struct inode *i_vfs_inode; - /** - * @i_flags: Flags of inode [j_list_lock] - */ + /* Flags of inode [j_list_lock] */ unsigned long i_flags; - - /** - * @i_dirty_start: - * - * Offset in bytes where the dirty range for this inode starts. - * [j_list_lock] - */ - loff_t i_dirty_start; - - /** - * @i_dirty_end: - * - * Inclusive offset in bytes where the dirty range for this inode - * ends. [j_list_lock] - */ - loff_t i_dirty_end; }; struct jbd2_revoke_table_s; /** - * struct jbd2_journal_handle - The jbd2_journal_handle type is the concrete - * type associated with handle_t. + * struct handle_s - The handle_s type is the concrete type associated with + * handle_t. * @h_transaction: Which compound transaction is this update a part of? - * @h_journal: Which journal handle belongs to - used iff h_reserved set. - * @h_rsv_handle: Handle reserved for finishing the logical operation. - * @h_total_credits: Number of remaining buffers we are allowed to add to - * journal. These are dirty buffers and revoke descriptor blocks. - * @h_revoke_credits: Number of remaining revoke records available for handle - * @h_ref: Reference count on this handle. - * @h_err: Field for caller's use to track errors through large fs operations. - * @h_sync: Flag for sync-on-close. - * @h_jdata: Flag to force data journaling. - * @h_reserved: Flag for handle for reserved credits. - * @h_aborted: Flag indicating fatal error on handle. - * @h_type: For handle statistics. - * @h_line_no: For handle statistics. - * @h_start_jiffies: Handle Start time. - * @h_requested_credits: Holds @h_total_credits after handle is started. - * @h_revoke_credits_requested: Holds @h_revoke_credits after handle is started. - * @saved_alloc_context: Saved context while transaction is open. + * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. + * @h_ref: Reference count on this handle + * @h_err: Field for caller's use to track errors through large fs operations + * @h_sync: flag for sync-on-close + * @h_jdata: flag to force data journaling + * @h_aborted: flag indicating fatal error on handle **/ /* Docbook can't yet cope with the bit fields, but will leave the documentation @@ -487,30 +462,35 @@ struct jbd2_revoke_table_s; struct jbd2_journal_handle { union { + /* Which compound transaction is this update a part of? */ transaction_t *h_transaction; /* Which journal handle belongs to - used iff h_reserved set */ journal_t *h_journal; }; + /* Handle reserved for finishing the logical operation */ handle_t *h_rsv_handle; - int h_total_credits; - int h_revoke_credits; - int h_revoke_credits_requested; + + /* Number of remaining buffers we are allowed to dirty: */ + int h_buffer_credits; + + /* Reference count on this handle */ int h_ref; + + /* Field for caller's use to track errors through large fs */ + /* operations */ int h_err; /* Flags [no locking] */ - unsigned int h_sync: 1; - unsigned int h_jdata: 1; - unsigned int h_reserved: 1; - unsigned int h_aborted: 1; - unsigned int h_type: 8; - unsigned int h_line_no: 16; + unsigned int h_sync: 1; /* sync-on-close */ + unsigned int h_jdata: 1; /* force data journaling */ + unsigned int h_reserved: 1; /* handle with reserved credits */ + unsigned int h_aborted: 1; /* fatal error on handle */ + unsigned int h_type: 8; /* for handle statistics */ + unsigned int h_line_no: 16; /* for handle statistics */ unsigned long h_start_jiffies; unsigned int h_requested_credits; - - unsigned int saved_alloc_context; }; @@ -538,7 +518,6 @@ struct transaction_chp_stats_s { * The transaction keeps track of all of the buffers modified by a * running transaction, and all of the buffers committed but not yet * flushed to home for finished transactions. - * (Locking Documentation improved by LockDoc) */ /* @@ -548,9 +527,9 @@ struct transaction_chp_stats_s { * ->jbd_lock_bh_journal_head() (This is "innermost") * * j_state_lock - * ->b_state_lock + * ->jbd_lock_bh_state() * - * b_state_lock + * jbd_lock_bh_state() * ->j_list_lock * * j_state_lock @@ -580,7 +559,6 @@ struct transaction_s enum { T_RUNNING, T_LOCKED, - T_SWITCH, T_FLUSH, T_COMMIT, T_COMMIT_DFLUSH, @@ -594,22 +572,18 @@ struct transaction_s */ unsigned long t_log_start; - /* - * Number of buffers on the t_buffers list [j_list_lock, no locks - * needed for jbd2 thread] - */ + /* Number of buffers on the t_buffers list [j_list_lock] */ int t_nr_buffers; /* * Doubly-linked circular list of all buffers reserved but not yet - * modified by this transaction [j_list_lock, no locks needed fo - * jbd2 thread] + * modified by this transaction [j_list_lock] */ struct journal_head *t_reserved_list; /* * Doubly-linked circular list of all metadata buffers owned by this - * transaction [j_list_lock, no locks needed for jbd2 thread] + * transaction [j_list_lock] */ struct journal_head *t_buffers; @@ -633,18 +607,14 @@ struct transaction_s struct journal_head *t_checkpoint_io_list; /* - * Doubly-linked circular list of metadata buffers being - * shadowed by log IO. The IO buffers on the iobuf list and - * the shadow buffers on this list match each other one for - * one at all times. [j_list_lock, no locks needed for jbd2 - * thread] + * Doubly-linked circular list of metadata buffers being shadowed by log + * IO. The IO buffers on the iobuf list and the shadow buffers on this + * list match each other one for one at all times. [j_list_lock] */ struct journal_head *t_shadow_list; /* - * List of inodes associated with the transaction; e.g., ext4 uses - * this to track inodes in data=ordered and data=journal mode that - * need special handling on transaction commit; also used by ocfs2. + * List of inodes whose data we've modified in data=ordered mode. * [j_list_lock] */ struct list_head t_inode_list; @@ -665,40 +635,27 @@ struct transaction_s unsigned long t_start; /* - * When commit was requested [j_state_lock] + * When commit was requested */ unsigned long t_requested; /* - * Checkpointing stats [j_list_lock] + * Checkpointing stats [j_checkpoint_sem] */ struct transaction_chp_stats_s t_chp_stats; /* * Number of outstanding updates running on this transaction - * [none] + * [t_handle_lock] */ atomic_t t_updates; /* - * Number of blocks reserved for this transaction in the journal. - * This is including all credits reserved when starting transaction - * handles as well as all journal descriptor blocks needed for this - * transaction. [none] + * Number of buffers reserved for use by all handles in this transaction + * handle but not yet modified. [t_handle_lock] */ atomic_t t_outstanding_credits; - /* - * Number of revoke records for this transaction added by already - * stopped handles. [none] - */ - atomic_t t_outstanding_revokes; - - /* - * How many handles used this transaction? [none] - */ - atomic_t t_handle_count; - /* * Forward and backward links for the circular list of all transactions * awaiting checkpoint. [j_list_lock] @@ -716,6 +673,11 @@ struct transaction_s */ ktime_t t_start_time; + /* + * How many handles used this transaction? [t_handle_lock] + */ + atomic_unchecked_t t_handle_count; + /* * This transaction is being forced and some process is * waiting for it to finish. @@ -762,329 +724,231 @@ jbd2_time_diff(unsigned long start, unsigned long end) #define JBD2_NR_BATCH 64 -enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY}; - -#define JBD2_FC_REPLAY_STOP 0 -#define JBD2_FC_REPLAY_CONTINUE 1 - /** * struct journal_s - The journal_s type is the concrete type associated with * journal_t. + * @j_flags: General journaling state flags + * @j_errno: Is there an outstanding uncleared error on the journal (from a + * prior abort)? + * @j_sb_buffer: First part of superblock buffer + * @j_superblock: Second part of superblock buffer + * @j_format_version: Version of the superblock format + * @j_state_lock: Protect the various scalars in the journal + * @j_barrier_count: Number of processes waiting to create a barrier lock + * @j_barrier: The barrier lock itself + * @j_running_transaction: The current running transaction.. + * @j_committing_transaction: the transaction we are pushing to disk + * @j_checkpoint_transactions: a linked circular list of all transactions + * waiting for checkpointing + * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction + * to start committing, or for a barrier lock to be released + * @j_wait_done_commit: Wait queue for waiting for commit to complete + * @j_wait_commit: Wait queue to trigger commit + * @j_wait_updates: Wait queue to wait for updates to complete + * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop + * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints + * @j_head: Journal head - identifies the first unused block in the journal + * @j_tail: Journal tail - identifies the oldest still-used block in the + * journal. + * @j_free: Journal free - how many free blocks are there in the journal? + * @j_first: The block number of the first usable block + * @j_last: The block number one beyond the last usable block + * @j_dev: Device where we store the journal + * @j_blocksize: blocksize for the location where we store the journal. + * @j_blk_offset: starting block offset for into the device where we store the + * journal + * @j_fs_dev: Device which holds the client fs. For internal journal this will + * be equal to j_dev + * @j_reserved_credits: Number of buffers reserved from the running transaction + * @j_maxlen: Total maximum capacity of the journal region on disk. + * @j_list_lock: Protects the buffer lists and internal buffer state. + * @j_inode: Optional inode where we store the journal. If present, all journal + * block numbers are mapped into this inode via bmap(). + * @j_tail_sequence: Sequence number of the oldest transaction in the log + * @j_transaction_sequence: Sequence number of the next transaction to grant + * @j_commit_sequence: Sequence number of the most recently committed + * transaction + * @j_commit_request: Sequence number of the most recent transaction wanting + * commit + * @j_uuid: Uuid of client object. + * @j_task: Pointer to the current commit thread for this journal + * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a + * single compound commit transaction + * @j_commit_interval: What is the maximum transaction lifetime before we begin + * a commit? + * @j_commit_timer: The timer used to wakeup the commit thread + * @j_revoke_lock: Protect the revoke table + * @j_revoke: The revoke table - maintains the list of revoked blocks in the + * current transaction. + * @j_revoke_table: alternate revoke tables for j_revoke + * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction + * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the + * number that will fit in j_blocksize + * @j_last_sync_writer: most recent pid which did a synchronous write + * @j_history_lock: Protect the transactions statistics history + * @j_proc_entry: procfs entry for the jbd statistics directory + * @j_stats: Overall statistics + * @j_private: An opaque pointer to fs-private information. + * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies */ + struct journal_s { - /** - * @j_flags: General journaling state flags [j_state_lock, - * no lock for quick racy checks] - */ + /* General journaling state flags [j_state_lock] */ unsigned long j_flags; - /** - * @j_atomic_flags: Atomic journaling state flags. - */ - unsigned long j_atomic_flags; - - /** - * @j_errno: - * + /* * Is there an outstanding uncleared error on the journal (from a prior * abort)? [j_state_lock] */ int j_errno; - /** - * @j_abort_mutex: Lock the whole aborting procedure. - */ - struct mutex j_abort_mutex; - - /** - * @j_sb_buffer: The first part of the superblock buffer. - */ + /* The superblock buffer */ struct buffer_head *j_sb_buffer; - - /** - * @j_superblock: The second part of the superblock buffer. - */ journal_superblock_t *j_superblock; - /** - * @j_format_version: Version of the superblock format. - */ + /* Version of the superblock format */ int j_format_version; - /** - * @j_state_lock: Protect the various scalars in the journal. + /* + * Protect the various scalars in the journal */ rwlock_t j_state_lock; - /** - * @j_barrier_count: - * - * Number of processes waiting to create a barrier lock [j_state_lock, - * no lock for quick racy checks] + /* + * Number of processes waiting to create a barrier lock [j_state_lock] */ int j_barrier_count; - /** - * @j_barrier: The barrier lock itself. - */ + /* The barrier lock itself */ struct mutex j_barrier; - /** - * @j_running_transaction: - * + /* * Transactions: The current running transaction... - * [j_state_lock, no lock for quick racy checks] [caller holding - * open handle] + * [j_state_lock] [caller holding open handle] */ transaction_t *j_running_transaction; - /** - * @j_committing_transaction: - * + /* * the transaction we are pushing to disk * [j_state_lock] [caller holding open handle] */ transaction_t *j_committing_transaction; - /** - * @j_checkpoint_transactions: - * + /* * ... and a linked circular list of all transactions waiting for * checkpointing. [j_list_lock] */ transaction_t *j_checkpoint_transactions; - /** - * @j_wait_transaction_locked: - * + /* * Wait queue for waiting for a locked transaction to start committing, - * or for a barrier lock to be released. + * or for a barrier lock to be released */ wait_queue_head_t j_wait_transaction_locked; - /** - * @j_wait_done_commit: Wait queue for waiting for commit to complete. - */ + /* Wait queue for waiting for commit to complete */ wait_queue_head_t j_wait_done_commit; - /** - * @j_wait_commit: Wait queue to trigger commit. - */ + /* Wait queue to trigger commit */ wait_queue_head_t j_wait_commit; - /** - * @j_wait_updates: Wait queue to wait for updates to complete. - */ + /* Wait queue to wait for updates to complete */ wait_queue_head_t j_wait_updates; - /** - * @j_wait_reserved: - * - * Wait queue to wait for reserved buffer credits to drop. - */ + /* Wait queue to wait for reserved buffer credits to drop */ wait_queue_head_t j_wait_reserved; - /** - * @j_fc_wait: - * - * Wait queue to wait for completion of async fast commits. - */ - wait_queue_head_t j_fc_wait; - - /** - * @j_checkpoint_mutex: - * - * Semaphore for locking against concurrent checkpoints. - */ + /* Semaphore for locking against concurrent checkpoints */ struct mutex j_checkpoint_mutex; - /** - * @j_chkpt_bhs: - * + /* * List of buffer heads used by the checkpoint routine. This * was moved from jbd2_log_do_checkpoint() to reduce stack * usage. Access to this array is controlled by the - * @j_checkpoint_mutex. [j_checkpoint_mutex] + * j_checkpoint_mutex. [j_checkpoint_mutex] */ struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; - - /** - * @j_shrinker: - * - * Journal head shrinker, reclaim buffer's journal head which - * has been written back. - */ - struct shrinker j_shrinker; - - /** - * @j_checkpoint_jh_count: - * - * Number of journal buffers on the checkpoint list. [j_list_lock] - */ - struct percpu_counter j_checkpoint_jh_count; - - /** - * @j_shrink_transaction: - * - * Record next transaction will shrink on the checkpoint list. - * [j_list_lock] - */ - transaction_t *j_shrink_transaction; - - /** - * @j_head: - * + + /* * Journal head: identifies the first unused block in the journal. * [j_state_lock] */ unsigned long j_head; - /** - * @j_tail: - * + /* * Journal tail: identifies the oldest still-used block in the journal. * [j_state_lock] */ unsigned long j_tail; - /** - * @j_free: - * + /* * Journal free: how many free blocks are there in the journal? * [j_state_lock] */ unsigned long j_free; - /** - * @j_first: - * - * The block number of the first usable block in the journal - * [j_state_lock]. + /* + * Journal start and end: the block numbers of the first usable block + * and one beyond the last usable block in the journal. [j_state_lock] */ unsigned long j_first; - - /** - * @j_last: - * - * The block number one beyond the last usable block in the journal - * [j_state_lock]. - */ unsigned long j_last; - /** - * @j_fc_first: - * - * The block number of the first fast commit block in the journal - * [j_state_lock]. - */ - unsigned long j_fc_first; - - /** - * @j_fc_off: - * - * Number of fast commit blocks currently allocated. Accessed only - * during fast commit. Currently only process can do fast commit, so - * this field is not protected by any lock. - */ - unsigned long j_fc_off; - - /** - * @j_fc_last: - * - * The block number one beyond the last fast commit block in the journal - * [j_state_lock]. - */ - unsigned long j_fc_last; - - /** - * @j_dev: Device where we store the journal. + /* + * Device, blocksize and starting block offset for the location where we + * store the journal. */ struct block_device *j_dev; - - /** - * @j_blocksize: Block size for the location where we store the journal. - */ int j_blocksize; - - /** - * @j_blk_offset: - * - * Starting block offset into the device where we store the journal. - */ unsigned long long j_blk_offset; - - /** - * @j_devname: Journal device name. - */ char j_devname[BDEVNAME_SIZE+24]; - /** - * @j_fs_dev: - * + /* * Device which holds the client fs. For internal journal this will be * equal to j_dev. */ struct block_device *j_fs_dev; - /** - * @j_total_len: Total maximum capacity of the journal region on disk. - */ - unsigned int j_total_len; + /* Total maximum capacity of the journal region on disk. */ + unsigned int j_maxlen; - /** - * @j_reserved_credits: - * - * Number of buffers reserved from the running transaction. - */ + /* Number of buffers reserved from the running transaction */ atomic_t j_reserved_credits; - /** - * @j_list_lock: Protects the buffer lists and internal buffer state. + /* + * Protects the buffer lists and internal buffer state. */ spinlock_t j_list_lock; - /** - * @j_inode: - * - * Optional inode where we store the journal. If present, all - * journal block numbers are mapped into this inode via bmap(). - */ + /* Optional inode where we store the journal. If present, all */ + /* journal block numbers are mapped into this inode via */ + /* bmap(). */ struct inode *j_inode; - /** - * @j_tail_sequence: - * + /* * Sequence number of the oldest transaction in the log [j_state_lock] */ tid_t j_tail_sequence; - /** - * @j_transaction_sequence: - * + /* * Sequence number of the next transaction to grant [j_state_lock] */ tid_t j_transaction_sequence; - /** - * @j_commit_sequence: - * + /* * Sequence number of the most recently committed transaction - * [j_state_lock, no lock for quick racy checks] + * [j_state_lock]. */ tid_t j_commit_sequence; - /** - * @j_commit_request: - * + /* * Sequence number of the most recent transaction wanting commit - * [j_state_lock, no lock for quick racy checks] + * [j_state_lock] */ tid_t j_commit_request; - /** - * @j_uuid: - * + /* * Journal uuid: identifies the object (filesystem, LVM volume etc) * backed by this journal. This will eventually be replaced by an array * of uuids, allowing us to index multiple devices within a single @@ -1092,193 +956,85 @@ struct journal_s */ __u8 j_uuid[16]; - /** - * @j_task: Pointer to the current commit thread for this journal. - */ + /* Pointer to the current commit thread for this journal */ struct task_struct *j_task; - /** - * @j_max_transaction_buffers: - * + /* * Maximum number of metadata buffers to allow in a single compound - * commit transaction. + * commit transaction */ int j_max_transaction_buffers; - /** - * @j_revoke_records_per_block: - * - * Number of revoke records that fit in one descriptor block. - */ - int j_revoke_records_per_block; - - /** - * @j_commit_interval: - * + /* * What is the maximum transaction lifetime before we begin a commit? */ unsigned long j_commit_interval; - /** - * @j_commit_timer: The timer used to wakeup the commit thread. - */ + /* The timer used to wakeup the commit thread: */ struct timer_list j_commit_timer; - /** - * @j_revoke_lock: Protect the revoke table. + /* + * The revoke table: maintains the list of revoked blocks in the + * current transaction. [j_revoke_lock] */ spinlock_t j_revoke_lock; - - /** - * @j_revoke: - * - * The revoke table - maintains the list of revoked blocks in the - * current transaction. - */ struct jbd2_revoke_table_s *j_revoke; - - /** - * @j_revoke_table: Alternate revoke tables for j_revoke. - */ struct jbd2_revoke_table_s *j_revoke_table[2]; - /** - * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction. + /* + * array of bhs for jbd2_journal_commit_transaction */ struct buffer_head **j_wbuf; - - /** - * @j_fc_wbuf: Array of fast commit bhs for fast commit. Accessed only - * during a fast commit. Currently only process can do fast commit, so - * this field is not protected by any lock. - */ - struct buffer_head **j_fc_wbuf; - - /** - * @j_wbufsize: - * - * Size of @j_wbuf array. - */ int j_wbufsize; - /** - * @j_fc_wbufsize: - * - * Size of @j_fc_wbuf array. - */ - int j_fc_wbufsize; - - /** - * @j_last_sync_writer: - * - * The pid of the last person to run a synchronous operation - * through the journal. + /* + * this is the pid of hte last person to run a synchronous operation + * through the journal */ pid_t j_last_sync_writer; - /** - * @j_average_commit_time: - * - * The average amount of time in nanoseconds it takes to commit a + /* + * the average amount of time in nanoseconds it takes to commit a * transaction to disk. [j_state_lock] */ u64 j_average_commit_time; - /** - * @j_min_batch_time: - * - * Minimum time that we should wait for additional filesystem operations - * to get batched into a synchronous handle in microseconds. + /* + * minimum and maximum times that we should wait for + * additional filesystem operations to get batched into a + * synchronous handle in microseconds */ u32 j_min_batch_time; - - /** - * @j_max_batch_time: - * - * Maximum time that we should wait for additional filesystem operations - * to get batched into a synchronous handle in microseconds. - */ u32 j_max_batch_time; - /** - * @j_commit_callback: - * - * This function is called when a transaction is closed. - */ + /* This function is called when a transaction is closed */ void (*j_commit_callback)(journal_t *, transaction_t *); - /** - * @j_submit_inode_data_buffers: - * - * This function is called for all inodes associated with the - * committing transaction marked with JI_WRITE_DATA flag - * before we start to write out the transaction to the journal. - */ - int (*j_submit_inode_data_buffers) - (struct jbd2_inode *); - - /** - * @j_finish_inode_data_buffers: - * - * This function is called for all inodes associated with the - * committing transaction marked with JI_WAIT_DATA flag - * after we have written the transaction to the journal - * but before we write out the commit block. - */ - int (*j_finish_inode_data_buffers) - (struct jbd2_inode *); - /* * Journal statistics */ - - /** - * @j_history_lock: Protect the transactions statistics history. - */ spinlock_t j_history_lock; - - /** - * @j_proc_entry: procfs entry for the jbd statistics directory. - */ struct proc_dir_entry *j_proc_entry; - - /** - * @j_stats: Overall statistics. - */ struct transaction_stats_s j_stats; - /** - * @j_failed_commit: Failed journal commit ID. - */ + /* Failed journal commit ID */ unsigned int j_failed_commit; - /** - * @j_private: - * + /* * An opaque pointer to fs-private information. ext3 puts its - * superblock pointer here. + * superblock pointer here */ void *j_private; - /** - * @j_chksum_driver: - * - * Reference to checksum algorithm driver via cryptoapi. - */ + /* Reference to checksum algorithm driver via cryptoapi */ struct crypto_shash *j_chksum_driver; - /** - * @j_csum_seed: - * - * Precomputed journal UUID checksum for seeding other checksums. - */ + /* Precomputed journal UUID checksum for seeding other checksums */ __u32 j_csum_seed; #ifdef CONFIG_DEBUG_LOCK_ALLOC - /** - * @j_trans_commit_map: - * + /* * Lockdep entity to track transaction commit dependencies. Handles * hold this "lock" for read, when we wait for commit, we acquire the * "lock" for writing. This matches the properties of jbd2 journalling @@ -1288,36 +1044,12 @@ struct journal_s */ struct lockdep_map j_trans_commit_map; #endif - - /** - * @j_fc_cleanup_callback: - * - * Clean-up after fast commit or full commit. JBD2 calls this function - * after every commit operation. - */ - void (*j_fc_cleanup_callback)(struct journal_s *journal, int); - - /** - * @j_fc_replay_callback: - * - * File-system specific function that performs replay of a fast - * commit. JBD2 calls this function for each fast commit block found in - * the journal. This function should return JBD2_FC_REPLAY_CONTINUE - * to indicate that the block was processed correctly and more fast - * commit replay should continue. Return value of JBD2_FC_REPLAY_STOP - * indicates the end of replay (no more blocks remaining). A negative - * return value indicates error. - */ - int (*j_fc_replay_callback)(struct journal_s *journal, - struct buffer_head *bh, - enum passtype pass, int off, - tid_t expected_commit_id); }; #define jbd2_might_wait_for_commit(j) \ do { \ rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \ - rwsem_release(&j->j_trans_commit_map, _THIS_IP_); \ + rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \ } while (0) /* journal feature predicate functions */ @@ -1382,7 +1114,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT) JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT) JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2) JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) -JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT) /* * Journal flag definitions @@ -1396,18 +1127,7 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT) #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file * data write error in ordered * mode */ -#define JBD2_FAST_COMMIT_ONGOING 0x100 /* Fast commit is ongoing */ -#define JBD2_FULL_COMMIT_ONGOING 0x200 /* Full commit is ongoing */ -#define JBD2_JOURNAL_FLUSH_DISCARD 0x0001 -#define JBD2_JOURNAL_FLUSH_ZEROOUT 0x0002 -#define JBD2_JOURNAL_FLUSH_VALID (JBD2_JOURNAL_FLUSH_DISCARD | \ - JBD2_JOURNAL_FLUSH_ZEROOUT) - -/* - * Journal atomic flag definitions - */ -#define JBD2_CHECKPOINT_IO_ERROR 0x001 /* Detect io error while writing - * buffer back to disk */ +#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */ /* * Function declarations for the journaling transaction and buffer @@ -1416,7 +1136,7 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT) /* Filing buffers */ extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *); -extern bool __jbd2_journal_refile_buffer(struct journal_head *); +extern void __jbd2_journal_refile_buffer(struct journal_head *); extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *); extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); extern void __journal_free_buffer(struct journal_head *bh); @@ -1445,7 +1165,6 @@ extern void jbd2_journal_commit_transaction(journal_t *); /* Checkpoint list management */ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); -unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan); int __jbd2_journal_remove_checkpoint(struct journal_head *); void jbd2_journal_destroy_checkpoint(journal_t *journal); void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); @@ -1491,7 +1210,7 @@ extern void __wait_on_journal (journal_t *); /* Transaction cache support */ extern void jbd2_journal_destroy_transaction_cache(void); -extern int __init jbd2_journal_init_transaction_cache(void); +extern int jbd2_journal_init_transaction_cache(void); extern void jbd2_journal_free_transaction(transaction_t *); /* @@ -1518,16 +1237,14 @@ static inline handle_t *journal_current_handle(void) extern handle_t *jbd2_journal_start(journal_t *, int nblocks); extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks, - int revoke_records, gfp_t gfp_mask, - unsigned int type, unsigned int line_no); + gfp_t gfp_mask, unsigned int type, + unsigned int line_no); extern int jbd2_journal_restart(handle_t *, int nblocks); -extern int jbd2__journal_restart(handle_t *, int nblocks, - int revoke_records, gfp_t gfp_mask); +extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask); extern int jbd2_journal_start_reserved(handle_t *handle, unsigned int type, unsigned int line_no); extern void jbd2_journal_free_reserved(handle_t *handle); -extern int jbd2_journal_extend(handle_t *handle, int nblocks, - int revoke_records); +extern int jbd2_journal_extend (handle_t *, int nblocks); extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *); @@ -1535,11 +1252,12 @@ void jbd2_journal_set_triggers(struct buffer_head *, struct jbd2_buffer_trigger_type *type); extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); +extern void journal_sync_buffer (struct buffer_head *); extern int jbd2_journal_invalidatepage(journal_t *, struct page *, unsigned int, unsigned int); -extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page); +extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); extern int jbd2_journal_stop(handle_t *); -extern int jbd2_journal_flush(journal_t *journal, unsigned int flags); +extern int jbd2_journal_flush (journal_t *); extern void jbd2_journal_lock_updates (journal_t *); extern void jbd2_journal_unlock_updates (journal_t *); @@ -1564,6 +1282,7 @@ extern int jbd2_journal_skip_recovery (journal_t *); extern void jbd2_journal_update_sb_errno(journal_t *); extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t, unsigned long, int); +extern void __jbd2_journal_abort_hard (journal_t *); extern void jbd2_journal_abort (journal_t *, int); extern int jbd2_journal_errno (journal_t *); extern void jbd2_journal_ack_err (journal_t *); @@ -1571,16 +1290,8 @@ extern int jbd2_journal_clear_err (journal_t *); extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); extern int jbd2_journal_force_commit(journal_t *); extern int jbd2_journal_force_commit_nested(journal_t *); -extern int jbd2_journal_inode_ranged_write(handle_t *handle, - struct jbd2_inode *inode, loff_t start_byte, - loff_t length); -extern int jbd2_journal_inode_ranged_wait(handle_t *handle, - struct jbd2_inode *inode, loff_t start_byte, - loff_t length); -extern int jbd2_journal_submit_inode_data_buffers( - struct jbd2_inode *jinode); -extern int jbd2_journal_finish_inode_data_buffers( - struct jbd2_inode *jinode); +extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode); extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *inode, loff_t new_size); extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); @@ -1627,10 +1338,8 @@ static inline void jbd2_free_inode(struct jbd2_inode *jinode) /* Primary revoke support */ #define JOURNAL_REVOKE_DEFAULT_HASH 256 extern int jbd2_journal_init_revoke(journal_t *, int); -extern void jbd2_journal_destroy_revoke_record_cache(void); -extern void jbd2_journal_destroy_revoke_table_cache(void); -extern int __init jbd2_journal_init_revoke_record_cache(void); -extern int __init jbd2_journal_init_revoke_table_cache(void); +extern void jbd2_journal_destroy_revoke_caches(void); +extern int jbd2_journal_init_revoke_caches(void); extern void jbd2_journal_destroy_revoke(journal_t *); extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); @@ -1656,7 +1365,6 @@ int jbd2_log_start_commit(journal_t *journal, tid_t tid); int __jbd2_log_start_commit(journal_t *journal, tid_t tid); int jbd2_journal_start_commit(journal_t *journal, tid_t *tid); int jbd2_log_wait_commit(journal_t *journal, tid_t tid); -int jbd2_transaction_committed(journal_t *journal, tid_t tid); int jbd2_complete_transaction(journal_t *journal, tid_t tid); int jbd2_log_do_checkpoint(journal_t *journal); int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid); @@ -1665,21 +1373,6 @@ void __jbd2_log_wait_for_space(journal_t *journal); extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); extern int jbd2_cleanup_journal_tail(journal_t *); -/* Fast commit related APIs */ -int jbd2_fc_begin_commit(journal_t *journal, tid_t tid); -int jbd2_fc_end_commit(journal_t *journal); -int jbd2_fc_end_commit_fallback(journal_t *journal); -int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out); -int jbd2_submit_inode_data(struct jbd2_inode *jinode); -int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode); -int jbd2_fc_wait_bufs(journal_t *journal, int num_blks); -int jbd2_fc_release_bufs(journal_t *journal); - -static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal) -{ - return (journal->j_total_len - journal->j_fc_wbufsize) / 4; -} - /* * is_journal_abort * @@ -1740,11 +1433,20 @@ static inline int jbd2_journal_has_csum_v2or3(journal_t *journal) return journal->j_chksum_driver != NULL; } -static inline int jbd2_journal_get_num_fc_blks(journal_superblock_t *jsb) -{ - int num_fc_blocks = be32_to_cpu(jsb->s_num_fc_blks); +/* + * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for + * transaction control blocks. + */ +#define JBD2_CONTROL_BLOCKS_SHIFT 5 - return num_fc_blocks ? num_fc_blocks : JBD2_DEFAULT_FAST_COMMIT_BLOCKS; +/* + * Return the minimum number of blocks which must be free in the journal + * before a new transaction may be started. Must be called under j_state_lock. + */ +static inline int jbd2_space_needed(journal_t *journal) +{ + int nblocks = journal->j_max_transaction_buffers; + return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT); } /* @@ -1753,13 +1455,16 @@ static inline int jbd2_journal_get_num_fc_blks(journal_superblock_t *jsb) static inline unsigned long jbd2_log_space_left(journal_t *journal) { /* Allow for rounding errors */ - long free = journal->j_free - 32; + unsigned long free = journal->j_free - 32; if (journal->j_committing_transaction) { - free -= atomic_read(&journal-> - j_committing_transaction->t_outstanding_credits); + unsigned long committing = atomic_read(&journal-> + j_committing_transaction->t_outstanding_credits); + + /* Transaction + control blocks */ + free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT); } - return max_t(long, free, 0); + return free; } /* @@ -1792,6 +1497,7 @@ static inline u32 jbd2_chksum(journal_t *journal, u32 crc, JBD_MAX_CHECKSUM_SIZE); desc.shash.tfm = journal->j_chksum_driver; + desc.shash.flags = 0; *(u32 *)desc.ctx = crc; err = crypto_shash_update(&desc.shash, address, length); @@ -1813,20 +1519,6 @@ static inline tid_t jbd2_get_latest_transaction(journal_t *journal) return tid; } -static inline int jbd2_handle_buffer_credits(handle_t *handle) -{ - journal_t *journal; - - if (!handle->h_reserved) - journal = handle->h_transaction->t_journal; - else - journal = handle->h_journal; - - return handle->h_total_credits - - DIV_ROUND_UP(handle->h_revoke_credits_requested, - journal->j_revoke_records_per_block); -} - #ifdef __KERNEL__ #define buffer_trace_init(bh) do {} while (0) diff --git a/include/linux/jhash.h b/include/linux/jhash.h index ab7f8c152b..348c6f47e4 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -5,7 +5,7 @@ * * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) * - * https://burtleburtle.net/bob/hash/ + * http://burtleburtle.net/bob/hash/ * * These are the credits from Bob's sources: * @@ -17,7 +17,7 @@ * if SELF_TEST is defined. You can use this free for any purpose. It's in * the public domain. It has no warranty. * - * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@netfilter.org) + * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu) * * I've modified Bob's hash to be useful in the Linux kernel, and * any bugs present are my fault. @@ -85,21 +85,21 @@ static inline u32 jhash(const void *key, u32 length, u32 initval) k += 12; } /* Last block: affect all 32 bits of (c) */ + /* All the case statements fall through */ switch (length) { - case 12: c += (u32)k[11]<<24; fallthrough; - case 11: c += (u32)k[10]<<16; fallthrough; - case 10: c += (u32)k[9]<<8; fallthrough; - case 9: c += k[8]; fallthrough; - case 8: b += (u32)k[7]<<24; fallthrough; - case 7: b += (u32)k[6]<<16; fallthrough; - case 6: b += (u32)k[5]<<8; fallthrough; - case 5: b += k[4]; fallthrough; - case 4: a += (u32)k[3]<<24; fallthrough; - case 3: a += (u32)k[2]<<16; fallthrough; - case 2: a += (u32)k[1]<<8; fallthrough; + case 12: c += (u32)k[11]<<24; + case 11: c += (u32)k[10]<<16; + case 10: c += (u32)k[9]<<8; + case 9: c += k[8]; + case 8: b += (u32)k[7]<<24; + case 7: b += (u32)k[6]<<16; + case 6: b += (u32)k[5]<<8; + case 5: b += k[4]; + case 4: a += (u32)k[3]<<24; + case 3: a += (u32)k[2]<<16; + case 2: a += (u32)k[1]<<8; case 1: a += k[0]; __jhash_final(a, b, c); - break; case 0: /* Nothing left to add */ break; } @@ -131,13 +131,12 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) k += 3; } - /* Handle the last 3 u32's */ + /* Handle the last 3 u32's: all the case statements fall through */ switch (length) { - case 3: c += k[2]; fallthrough; - case 2: b += k[1]; fallthrough; + case 3: c += k[2]; + case 2: b += k[1]; case 1: a += k[0]; __jhash_final(a, b, c); - break; case 0: /* Nothing left to add */ break; } diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 5e13f801c9..4a7c48c330 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -1,15 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_JIFFIES_H #define _LINUX_JIFFIES_H -#include -#include #include -#include +#include #include #include #include -#include #include /* for HZ */ #include @@ -61,23 +57,25 @@ extern int register_refined_jiffies(long clock_tick_rate); -/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */ -#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ) +/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ +#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) -/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ -#define USER_TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) +/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ +#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) -#ifndef __jiffy_arch_data -#define __jiffy_arch_data -#endif +/* some arch's have a small-data section that can be accessed register-relative + * but that can only take up to, say, 4-byte variables. jiffies being part of + * an 8-byte variable may not be correctly accessed unless we force the issue + */ +#define __jiffy_data __attribute__((section(".data"))) /* * The 64-bit value is not atomic - you MUST NOT read it * without sampling the sequence number in jiffies_lock. * get_jiffies_64() will do this for you as appropriate. */ -extern u64 __cacheline_aligned_in_smp jiffies_64; -extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies; +extern u64 __jiffy_data jiffies_64; +extern unsigned long volatile __jiffy_data jiffies; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void); @@ -290,22 +288,19 @@ extern unsigned long preset_lpj; extern unsigned int jiffies_to_msecs(const unsigned long j); extern unsigned int jiffies_to_usecs(const unsigned long j); -static inline u64 jiffies_to_nsecs(const unsigned long j) +static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j) { return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; } -extern u64 jiffies64_to_nsecs(u64 j); -extern u64 jiffies64_to_msecs(u64 j); - -extern unsigned long __msecs_to_jiffies(const unsigned int m); +extern unsigned long __msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1); #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) /* * HZ is equal to or smaller than 1000, and 1000 is a nice round * multiple of HZ, divide with the factor between them, but round * upwards: */ -static inline unsigned long _msecs_to_jiffies(const unsigned int m) +static inline unsigned long __intentional_overflow(-1) _msecs_to_jiffies(const unsigned int m) { return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); } @@ -316,7 +311,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m) * * But first make sure the multiplication result cannot overflow: */ -static inline unsigned long _msecs_to_jiffies(const unsigned int m) +static inline unsigned long __intentional_overflow(-1) _msecs_to_jiffies(const unsigned int m) { if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; @@ -327,7 +322,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m) * Generic case - multiply, round and divide. But first check that if * we are doing a net multiplication, that we wouldn't overflow: */ -static inline unsigned long _msecs_to_jiffies(const unsigned int m) +static inline unsigned long __intentional_overflow(-1) _msecs_to_jiffies(const unsigned int m) { if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; @@ -371,14 +366,14 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m) } } -extern unsigned long __usecs_to_jiffies(const unsigned int u); +extern unsigned long __usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1); #if !(USEC_PER_SEC % HZ) -static inline unsigned long _usecs_to_jiffies(const unsigned int u) +static inline unsigned long __intentional_overflow(-1) _usecs_to_jiffies(const unsigned int u) { return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); } #else -static inline unsigned long _usecs_to_jiffies(const unsigned int u) +static inline unsigned long __intentional_overflow(-1) _usecs_to_jiffies(const unsigned int u) { return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32) >> USEC_TO_HZ_SHR32; @@ -421,17 +416,32 @@ static __always_inline unsigned long usecs_to_jiffies(const unsigned int u) extern unsigned long timespec64_to_jiffies(const struct timespec64 *value); extern void jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value); +static inline unsigned long timespec_to_jiffies(const struct timespec *value) +{ + struct timespec64 ts = timespec_to_timespec64(*value); + + return timespec64_to_jiffies(&ts); +} + +static inline void jiffies_to_timespec(const unsigned long jiffies, + struct timespec *value) +{ + struct timespec64 ts; + + jiffies_to_timespec64(jiffies, &ts); + *value = timespec64_to_timespec(ts); +} + +extern unsigned long timeval_to_jiffies(const struct timeval *value); +extern void jiffies_to_timeval(const unsigned long jiffies, + struct timeval *value); + extern clock_t jiffies_to_clock_t(unsigned long x); static inline clock_t jiffies_delta_to_clock_t(long delta) { return jiffies_to_clock_t(max(0L, delta)); } -static inline unsigned int jiffies_delta_to_msecs(long delta) -{ - return jiffies_to_msecs(max(0L, delta)); -} - extern unsigned long clock_t_to_jiffies(unsigned long x); extern u64 jiffies_64_to_clock_t(u64 x); extern u64 nsec_to_clock_t(u64 x); diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h index 75bc561090..98cd41bb39 100644 --- a/include/linux/journal-head.h +++ b/include/linux/journal-head.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/journal-head.h * @@ -11,8 +10,6 @@ #ifndef JOURNAL_HEAD_H_INCLUDED #define JOURNAL_HEAD_H_INCLUDED -#include - typedef unsigned int tid_t; /* Unique transaction ID */ typedef struct transaction_s transaction_t; /* Compound transaction type */ @@ -25,11 +22,6 @@ struct journal_head { */ struct buffer_head *b_bh; - /* - * Protect the buffer head state - */ - spinlock_t b_state_lock; - /* * Reference count - see description in journal.c * [jbd_lock_bh_journal_head()] @@ -37,7 +29,7 @@ struct journal_head { int b_jcount; /* - * Journalling list for this buffer [b_state_lock] + * Journalling list for this buffer [jbd_lock_bh_state()] * NOTE: We *cannot* combine this with b_modified into a bitfield * as gcc would then (which the C standard allows but which is * very unuseful) make 64-bit accesses to the bitfield and clobber @@ -48,20 +40,20 @@ struct journal_head { /* * This flag signals the buffer has been modified by * the currently running transaction - * [b_state_lock] + * [jbd_lock_bh_state()] */ unsigned b_modified; /* * Copy of the buffer data frozen for writing to the log. - * [b_state_lock] + * [jbd_lock_bh_state()] */ char *b_frozen_data; /* * Pointer to a saved copy of the buffer containing no uncommitted * deallocation references, so that allocations can avoid overwriting - * uncommitted deletes. [b_state_lock] + * uncommitted deletes. [jbd_lock_bh_state()] */ char *b_committed_data; @@ -70,7 +62,7 @@ struct journal_head { * metadata: either the running transaction or the committing * transaction (if there is one). Only applies to buffers on a * transaction's data or metadata journaling list. - * [j_list_lock] [b_state_lock] + * [j_list_lock] [jbd_lock_bh_state()] * Either of these locks is enough for reading, both are needed for * changes. */ @@ -80,13 +72,13 @@ struct journal_head { * Pointer to the running compound transaction which is currently * modifying the buffer's metadata, if there was already a transaction * committing it when the new transaction touched it. - * [t_list_lock] [b_state_lock] + * [t_list_lock] [jbd_lock_bh_state()] */ transaction_t *b_next_transaction; /* * Doubly-linked list of buffers on a transaction's data, metadata or - * forget queue. [t_list_lock] [b_state_lock] + * forget queue. [t_list_lock] [jbd_lock_bh_state()] */ struct journal_head *b_tnext, *b_tprev; diff --git a/include/linux/joystick.h b/include/linux/joystick.h index 41b833b012..cbf2aa9e93 100644 --- a/include/linux/joystick.h +++ b/include/linux/joystick.h @@ -1,10 +1,26 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 1996-2000 Vojtech Pavlik * * Sponsored by SuSE */ /* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Should you need to contact me, the author, you can do so either by + * e-mail - mail your message to , or by paper mail: + * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic */ #ifndef _LINUX_JOYSTICK_H #define _LINUX_JOYSTICK_H diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 48b9b2a827..a0547c5718 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_JUMP_LABEL_H #define _LINUX_JUMP_LABEL_H @@ -68,9 +67,13 @@ * Lacking toolchain and or architecture support, static keys fall back to a * simple conditional branch. * - * Additional babbling in: Documentation/staging/static-keys.rst + * Additional babbling in: Documentation/static-keys.txt */ +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) +# define HAVE_JUMP_LABEL +#endif + #ifndef __ASSEMBLY__ #include @@ -78,117 +81,30 @@ extern bool static_key_initialized; -#define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \ - "%s(): static key '%pS' used before call to jump_label_init()", \ - __func__, (key)) +#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \ + "%s used before call to jump_label_init", \ + __func__) -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL struct static_key { atomic_t enabled; -/* - * Note: - * To make anonymous unions work with old compilers, the static - * initialization of them requires brackets. This creates a dependency - * on the order of the struct with the initializers. If any fields - * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need - * to be modified. - * - * bit 0 => 1 if key is initially true - * 0 if initially false - * bit 1 => 1 if points to struct static_key_mod - * 0 if points to struct jump_entry - */ - union { - unsigned long type; - struct jump_entry *entries; - struct static_key_mod *next; - }; +/* Set lsb bit to 1 if branch is default true, 0 ot */ + struct jump_entry *entries; +#ifdef CONFIG_MODULES + struct static_key_mod *next; +#endif }; #else struct static_key { atomic_t enabled; }; -#endif /* CONFIG_JUMP_LABEL */ +#endif /* HAVE_JUMP_LABEL */ #endif /* __ASSEMBLY__ */ -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL #include - -#ifndef __ASSEMBLY__ -#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE - -struct jump_entry { - s32 code; - s32 target; - long key; // key may be far away from the core kernel under KASLR -}; - -static inline unsigned long jump_entry_code(const struct jump_entry *entry) -{ - return (unsigned long)&entry->code + entry->code; -} - -static inline unsigned long jump_entry_target(const struct jump_entry *entry) -{ - return (unsigned long)&entry->target + entry->target; -} - -static inline struct static_key *jump_entry_key(const struct jump_entry *entry) -{ - long offset = entry->key & ~3L; - - return (struct static_key *)((unsigned long)&entry->key + offset); -} - -#else - -static inline unsigned long jump_entry_code(const struct jump_entry *entry) -{ - return entry->code; -} - -static inline unsigned long jump_entry_target(const struct jump_entry *entry) -{ - return entry->target; -} - -static inline struct static_key *jump_entry_key(const struct jump_entry *entry) -{ - return (struct static_key *)((unsigned long)entry->key & ~3UL); -} - -#endif - -static inline bool jump_entry_is_branch(const struct jump_entry *entry) -{ - return (unsigned long)entry->key & 1UL; -} - -static inline bool jump_entry_is_init(const struct jump_entry *entry) -{ - return (unsigned long)entry->key & 2UL; -} - -static inline void jump_entry_set_init(struct jump_entry *entry, bool set) -{ - if (set) - entry->key |= 2; - else - entry->key &= ~2; -} - -static inline int jump_entry_size(struct jump_entry *entry) -{ -#ifdef JUMP_LABEL_NOP_SIZE - return JUMP_LABEL_NOP_SIZE; -#else - return arch_jump_entry_size(entry); -#endif -} - -#endif #endif #ifndef __ASSEMBLY__ @@ -200,12 +116,11 @@ enum jump_label_type { struct module; -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL -#define JUMP_TYPE_FALSE 0UL -#define JUMP_TYPE_TRUE 1UL -#define JUMP_TYPE_LINKED 2UL -#define JUMP_TYPE_MASK 3UL +#define JUMP_TYPE_FALSE 0UL +#define JUMP_TYPE_TRUE 1UL +#define JUMP_TYPE_MASK 1UL static __always_inline bool static_key_false(struct static_key *key) { @@ -227,20 +142,13 @@ extern void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type); extern void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type); -extern bool arch_jump_label_transform_queue(struct jump_entry *entry, - enum jump_label_type type); -extern void arch_jump_label_transform_apply(void); extern int jump_label_text_reserved(void *start, void *end); extern void static_key_slow_inc(struct static_key *key); extern void static_key_slow_dec(struct static_key *key); -extern void static_key_slow_inc_cpuslocked(struct static_key *key); -extern void static_key_slow_dec_cpuslocked(struct static_key *key); extern void jump_label_apply_nops(struct module *mod); extern int static_key_count(struct static_key *key); extern void static_key_enable(struct static_key *key); extern void static_key_disable(struct static_key *key); -extern void static_key_enable_cpuslocked(struct static_key *key); -extern void static_key_disable_cpuslocked(struct static_key *key); /* * We should be using ATOMIC_INIT() for initializing .enabled, but @@ -251,12 +159,12 @@ extern void static_key_disable_cpuslocked(struct static_key *key); */ #define STATIC_KEY_INIT_TRUE \ { .enabled = { 1 }, \ - { .entries = (void *)JUMP_TYPE_TRUE } } + .entries = (void *)JUMP_TYPE_TRUE } #define STATIC_KEY_INIT_FALSE \ { .enabled = { 0 }, \ - { .entries = (void *)JUMP_TYPE_FALSE } } + .entries = (void *)JUMP_TYPE_FALSE } -#else /* !CONFIG_JUMP_LABEL */ +#else /* !HAVE_JUMP_LABEL */ #include #include @@ -273,33 +181,30 @@ static __always_inline void jump_label_init(void) static __always_inline bool static_key_false(struct static_key *key) { - if (unlikely_notrace(static_key_count(key) > 0)) + if (unlikely(static_key_count(key) > 0)) return true; return false; } static __always_inline bool static_key_true(struct static_key *key) { - if (likely_notrace(static_key_count(key) > 0)) + if (likely(static_key_count(key) > 0)) return true; return false; } static inline void static_key_slow_inc(struct static_key *key) { - STATIC_KEY_CHECK_USE(key); + STATIC_KEY_CHECK_USE(); atomic_inc(&key->enabled); } static inline void static_key_slow_dec(struct static_key *key) { - STATIC_KEY_CHECK_USE(key); + STATIC_KEY_CHECK_USE(); atomic_dec(&key->enabled); } -#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key) -#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key) - static inline int jump_label_text_reserved(void *start, void *end) { return 0; @@ -315,33 +220,28 @@ static inline int jump_label_apply_nops(struct module *mod) static inline void static_key_enable(struct static_key *key) { - STATIC_KEY_CHECK_USE(key); + int count = static_key_count(key); - if (atomic_read(&key->enabled) != 0) { - WARN_ON_ONCE(atomic_read(&key->enabled) != 1); - return; - } - atomic_set(&key->enabled, 1); + WARN_ON_ONCE(count < 0 || count > 1); + + if (!count) + static_key_slow_inc(key); } static inline void static_key_disable(struct static_key *key) { - STATIC_KEY_CHECK_USE(key); + int count = static_key_count(key); - if (atomic_read(&key->enabled) != 1) { - WARN_ON_ONCE(atomic_read(&key->enabled) != 0); - return; - } - atomic_set(&key->enabled, 0); + WARN_ON_ONCE(count < 0 || count > 1); + + if (count) + static_key_slow_dec(key); } -#define static_key_enable_cpuslocked(k) static_key_enable((k)) -#define static_key_disable_cpuslocked(k) static_key_disable((k)) - #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } -#endif /* CONFIG_JUMP_LABEL */ +#endif /* HAVE_JUMP_LABEL */ #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #define jump_label_enabled static_key_enabled @@ -369,18 +269,12 @@ struct static_key_false { #define DEFINE_STATIC_KEY_TRUE(name) \ struct static_key_true name = STATIC_KEY_TRUE_INIT -#define DEFINE_STATIC_KEY_TRUE_RO(name) \ - struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT - #define DECLARE_STATIC_KEY_TRUE(name) \ extern struct static_key_true name #define DEFINE_STATIC_KEY_FALSE(name) \ struct static_key_false name = STATIC_KEY_FALSE_INIT -#define DEFINE_STATIC_KEY_FALSE_RO(name) \ - struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT - #define DECLARE_STATIC_KEY_FALSE(name) \ extern struct static_key_false name @@ -394,21 +288,6 @@ struct static_key_false { [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \ } -#define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name) -#define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name) -#define DEFINE_STATIC_KEY_MAYBE(cfg, name) \ - __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name) - -#define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name) -#define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name) -#define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \ - __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name) - -#define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name) -#define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name) -#define DECLARE_STATIC_KEY_MAYBE(cfg, name) \ - __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name) - extern bool ____wrong_branch_error(void); #define static_key_enabled(x) \ @@ -420,7 +299,7 @@ extern bool ____wrong_branch_error(void); static_key_count((struct static_key *)x) > 0; \ }) -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL /* * Combine the right initial value (type) with the right branch order @@ -487,7 +366,7 @@ extern bool ____wrong_branch_error(void); branch = !arch_static_branch_jump(&(x)->key, true); \ else \ branch = ____wrong_branch_error(); \ - likely_notrace(branch); \ + branch; \ }) #define static_branch_unlikely(x) \ @@ -499,19 +378,15 @@ extern bool ____wrong_branch_error(void); branch = arch_static_branch(&(x)->key, false); \ else \ branch = ____wrong_branch_error(); \ - unlikely_notrace(branch); \ + branch; \ }) -#else /* !CONFIG_JUMP_LABEL */ +#else /* !HAVE_JUMP_LABEL */ -#define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key)) -#define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key)) +#define static_branch_likely(x) likely(static_key_enabled(&(x)->key)) +#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key)) -#endif /* CONFIG_JUMP_LABEL */ - -#define static_branch_maybe(config, x) \ - (IS_ENABLED(config) ? static_branch_likely(x) \ - : static_branch_unlikely(x)) +#endif /* HAVE_JUMP_LABEL */ /* * Advanced usage; refcount, branch is enabled when: count != 0 @@ -519,18 +394,14 @@ extern bool ____wrong_branch_error(void); #define static_branch_inc(x) static_key_slow_inc(&(x)->key) #define static_branch_dec(x) static_key_slow_dec(&(x)->key) -#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key) -#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key) /* * Normal usage; boolean enable/disable. */ -#define static_branch_enable(x) static_key_enable(&(x)->key) -#define static_branch_disable(x) static_key_disable(&(x)->key) -#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key) -#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key) - -#endif /* __ASSEMBLY__ */ +#define static_branch_enable(x) static_key_enable(&(x)->key) +#define static_branch_disable(x) static_key_disable(&(x)->key) #endif /* _LINUX_JUMP_LABEL_H */ + +#endif /* __ASSEMBLY__ */ diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index 8c3ee291b2..23da3af459 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -1,99 +1,41 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_JUMP_LABEL_RATELIMIT_H #define _LINUX_JUMP_LABEL_RATELIMIT_H #include #include -#if defined(CONFIG_JUMP_LABEL) +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) struct static_key_deferred { struct static_key key; unsigned long timeout; struct delayed_work work; }; +#endif -struct static_key_true_deferred { - struct static_key_true key; - unsigned long timeout; - struct delayed_work work; -}; - -struct static_key_false_deferred { - struct static_key_false key; - unsigned long timeout; - struct delayed_work work; -}; - -#define static_key_slow_dec_deferred(x) \ - __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) -#define static_branch_slow_dec_deferred(x) \ - __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) - -#define static_key_deferred_flush(x) \ - __static_key_deferred_flush((x), &(x)->work) - -extern void -__static_key_slow_dec_deferred(struct static_key *key, - struct delayed_work *work, - unsigned long timeout); -extern void __static_key_deferred_flush(void *key, struct delayed_work *work); +#ifdef HAVE_JUMP_LABEL +extern void static_key_slow_dec_deferred(struct static_key_deferred *key); +extern void static_key_deferred_flush(struct static_key_deferred *key); extern void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); -extern void jump_label_update_timeout(struct work_struct *work); - -#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ - struct static_key_true_deferred name = { \ - .key = { STATIC_KEY_INIT_TRUE }, \ - .timeout = (rl), \ - .work = __DELAYED_WORK_INITIALIZER((name).work, \ - jump_label_update_timeout, \ - 0), \ - } - -#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ - struct static_key_false_deferred name = { \ - .key = { STATIC_KEY_INIT_FALSE }, \ - .timeout = (rl), \ - .work = __DELAYED_WORK_INITIALIZER((name).work, \ - jump_label_update_timeout, \ - 0), \ - } - -#else /* !CONFIG_JUMP_LABEL */ +#else /* !HAVE_JUMP_LABEL */ struct static_key_deferred { struct static_key key; }; -struct static_key_true_deferred { - struct static_key_true key; -}; -struct static_key_false_deferred { - struct static_key_false key; -}; -#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ - struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT } -#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ - struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT } - -#define static_branch_slow_dec_deferred(x) static_branch_dec(&(x)->key) - static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) { - STATIC_KEY_CHECK_USE(key); + STATIC_KEY_CHECK_USE(); static_key_slow_dec(&key->key); } -static inline void static_key_deferred_flush(void *key) +static inline void static_key_deferred_flush(struct static_key_deferred *key) { - STATIC_KEY_CHECK_USE(key); + STATIC_KEY_CHECK_USE(); } static inline void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl) { - STATIC_KEY_CHECK_USE(key); + STATIC_KEY_CHECK_USE(); } -#endif /* CONFIG_JUMP_LABEL */ - -#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key) - +#endif /* HAVE_JUMP_LABEL */ #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */ diff --git a/include/linux/jz4740-adc.h b/include/linux/jz4740-adc.h index 19d995c8bf..8184578fbf 100644 --- a/include/linux/jz4740-adc.h +++ b/include/linux/jz4740-adc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_JZ4740_ADC #define __LINUX_JZ4740_ADC diff --git a/include/linux/jz4780-nemc.h b/include/linux/jz4780-nemc.h index bd7fad9102..e7f1cc7a22 100644 --- a/include/linux/jz4780-nemc.h +++ b/include/linux/jz4780-nemc.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * JZ4780 NAND/external memory controller (NEMC) * * Copyright (c) 2015 Imagination Technologies * Author: Alex Smith + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __LINUX_JZ4780_NEMC_H__ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index a1d6fc82d7..d2c774623a 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* Rewritten and vastly simplified by Rusty Russell for in-kernel * module loader: * Copyright 2002 Rusty Russell IBM Corporation @@ -7,81 +6,25 @@ #define _LINUX_KALLSYMS_H #include -#include #include #include -#include -#include - -#include #define KSYM_NAME_LEN 128 -#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \ - (KSYM_NAME_LEN - 1) + \ - 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \ - (BUILD_ID_SIZE_MAX * 2) + 1) +#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ + 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) -struct cred; struct module; -static inline int is_kernel_inittext(unsigned long addr) -{ - if (addr >= (unsigned long)_sinittext - && addr <= (unsigned long)_einittext) - return 1; - return 0; -} - -static inline int is_kernel_text(unsigned long addr) -{ - if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || - arch_is_kernel_text(addr)) - return 1; - return in_gate_area_no_mm(addr); -} - -static inline int is_kernel(unsigned long addr) -{ - if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) - return 1; - return in_gate_area_no_mm(addr); -} - -static inline int is_ksym_addr(unsigned long addr) -{ - if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) - return is_kernel(addr); - - return is_kernel_text(addr) || is_kernel_inittext(addr); -} - -static inline void *dereference_symbol_descriptor(void *ptr) -{ -#ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR - struct module *mod; - - ptr = dereference_kernel_function_descriptor(ptr); - if (is_ksym_addr((unsigned long)ptr)) - return ptr; - - preempt_disable(); - mod = __module_address((unsigned long)ptr); - preempt_enable(); - - if (mod) - ptr = dereference_module_function_descriptor(mod, ptr); -#endif - return ptr; -} +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS) +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) +/* Lookup the address for a symbol. Returns 0 if not found. */ +unsigned long kallsyms_lookup_name(const char *name); +/* Call a function on each kallsyms symbol in the core kernel */ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, unsigned long), void *data); -#ifdef CONFIG_KALLSYMS -/* Lookup the address for a symbol. Returns 0 if not found. */ -unsigned long kallsyms_lookup_name(const char *name); - extern int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset); @@ -94,17 +37,15 @@ const char *kallsyms_lookup(unsigned long addr, /* Look up a kernel symbol and return it in a text buffer. */ extern int sprint_symbol(char *buffer, unsigned long address); -extern int sprint_symbol_build_id(char *buffer, unsigned long address); extern int sprint_symbol_no_offset(char *buffer, unsigned long address); extern int sprint_backtrace(char *buffer, unsigned long address); -extern int sprint_backtrace_build_id(char *buffer, unsigned long address); + +/* Look up a kernel symbol and print it to the kernel messages. */ +extern __printf(1, 3) void __print_symbol(const char *fmt, unsigned long address, ...); int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); -/* How and when do we show kallsyms values? */ -extern bool kallsyms_show_value(const struct cred *cred); - #else /* !CONFIG_KALLSYMS */ static inline unsigned long kallsyms_lookup_name(const char *name) @@ -112,6 +53,14 @@ static inline unsigned long kallsyms_lookup_name(const char *name) return 0; } +static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, + unsigned long), + void *data) +{ + return 0; +} + static inline int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) @@ -133,12 +82,6 @@ static inline int sprint_symbol(char *buffer, unsigned long addr) return 0; } -static inline int sprint_symbol_build_id(char *buffer, unsigned long address) -{ - *buffer = '\0'; - return 0; -} - static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr) { *buffer = '\0'; @@ -151,12 +94,6 @@ static inline int sprint_backtrace(char *buffer, unsigned long addr) return 0; } -static inline int sprint_backtrace_build_id(char *buffer, unsigned long addr) -{ - *buffer = '\0'; - return 0; -} - static inline int lookup_symbol_name(unsigned long addr, char *symname) { return -ERANGE; @@ -167,16 +104,31 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u return -ERANGE; } -static inline bool kallsyms_show_value(const struct cred *cred) -{ - return false; -} - +/* Stupid that this does nothing, but I didn't create this mess. */ +#define __print_symbol(fmt, addr, args...) #endif /*CONFIG_KALLSYMS*/ +#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */ +extern unsigned long kallsyms_lookup_name(const char *name); +extern __printf(1, 3) void __print_symbol(const char *fmt, unsigned long address, ...); +extern int sprint_backtrace(char *buffer, unsigned long address); +extern int sprint_symbol(char *buffer, unsigned long address); +extern int sprint_symbol_no_offset(char *buffer, unsigned long address); +const char *kallsyms_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, char *namebuf); +extern int kallsyms_lookup_size_offset(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset); +#endif -static inline void print_ip_sym(const char *loglvl, unsigned long ip) +#define print_symbol(fmt, addr) \ + __print_symbol(fmt, addr, "") + +static inline void print_ip_sym(unsigned long ip) { - printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip); + printk("[<%p>] %pS\n", (void *) ip, (void *) ip); } #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h index 3d6d22a25b..b7f8aced78 100644 --- a/include/linux/kasan-checks.h +++ b/include/linux/kasan-checks.h @@ -1,50 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KASAN_CHECKS_H #define _LINUX_KASAN_CHECKS_H -#include - -/* - * The annotations present in this file are only relevant for the software - * KASAN modes that rely on compiler instrumentation, and will be optimized - * away for the hardware tag-based KASAN mode. Use kasan_check_byte() instead. - */ - -/* - * __kasan_check_*: Always available when KASAN is enabled. This may be used - * even in compilation units that selectively disable KASAN, but must use KASAN - * to validate access to an address. Never use these in header files! - */ -#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) -bool __kasan_check_read(const volatile void *p, unsigned int size); -bool __kasan_check_write(const volatile void *p, unsigned int size); +#ifdef CONFIG_KASAN +void kasan_check_read(const void *p, unsigned int size); +void kasan_check_write(const void *p, unsigned int size); #else -static inline bool __kasan_check_read(const volatile void *p, unsigned int size) -{ - return true; -} -static inline bool __kasan_check_write(const volatile void *p, unsigned int size) -{ - return true; -} -#endif - -/* - * kasan_check_*: Only available when the particular compilation unit has KASAN - * instrumentation enabled. May be used in header files. - */ -#ifdef __SANITIZE_ADDRESS__ -#define kasan_check_read __kasan_check_read -#define kasan_check_write __kasan_check_write -#else -static inline bool kasan_check_read(const volatile void *p, unsigned int size) -{ - return true; -} -static inline bool kasan_check_write(const volatile void *p, unsigned int size) -{ - return true; -} +static inline void kasan_check_read(const void *p, unsigned int size) { } +static inline void kasan_check_write(const void *p, unsigned int size) { } #endif #endif diff --git a/include/linux/kasan.h b/include/linux/kasan.h index dd874a1ee8..820c0ad54a 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -1,53 +1,26 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H -#include -#include -#include +#include #include struct kmem_cache; struct page; struct vm_struct; -struct task_struct; #ifdef CONFIG_KASAN -#include +#define KASAN_SHADOW_SCALE_SHIFT 3 + #include +#include -/* kasan_data struct is used in KUnit tests for KASAN expected failures */ -struct kunit_kasan_expectation { - bool report_found; -}; +extern unsigned char kasan_zero_page[PAGE_SIZE]; +extern pte_t kasan_zero_pte[PTRS_PER_PTE]; +extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; +extern pud_t kasan_zero_pud[PTRS_PER_PUD]; -#endif - -#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) - -#include - -/* Software KASAN implementations use shadow memory. */ - -#ifdef CONFIG_KASAN_SW_TAGS -/* This matches KASAN_TAG_INVALID. */ -#define KASAN_SHADOW_INIT 0xFE -#else -#define KASAN_SHADOW_INIT 0 -#endif - -#ifndef PTE_HWTABLE_PTRS -#define PTE_HWTABLE_PTRS 0 -#endif - -extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; -extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; -extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; -extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; -extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; - -int kasan_populate_early_shadow(const void *shadow_start, +void kasan_populate_zero_shadow(const void *shadow_start, const void *shadow_end); static inline void *kasan_mem_to_shadow(const void *addr) @@ -56,425 +29,106 @@ static inline void *kasan_mem_to_shadow(const void *addr) + KASAN_SHADOW_OFFSET; } -int kasan_add_zero_shadow(void *start, unsigned long size); -void kasan_remove_zero_shadow(void *start, unsigned long size); - /* Enable reporting bugs after kasan_disable_current() */ -extern void kasan_enable_current(void); +static inline void kasan_enable_current(void) +{ + current->kasan_depth++; +} /* Disable reporting bugs for current task */ -extern void kasan_disable_current(void); - -#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ - -static inline int kasan_add_zero_shadow(void *start, unsigned long size) +static inline void kasan_disable_current(void) { - return 0; -} -static inline void kasan_remove_zero_shadow(void *start, - unsigned long size) -{} - -static inline void kasan_enable_current(void) {} -static inline void kasan_disable_current(void) {} - -#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ - -#ifdef CONFIG_KASAN_HW_TAGS - -DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); - -static __always_inline bool kasan_enabled(void) -{ - return static_branch_likely(&kasan_flag_enabled); + current->kasan_depth--; } -static inline bool kasan_has_integrated_init(void) -{ - return kasan_enabled(); -} +void kasan_unpoison_shadow(const void *address, size_t size); -void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags); +void kasan_unpoison_task_stack(struct task_struct *task); +void kasan_unpoison_stack_above_sp_to(const void *watermark); + +void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); -#else /* CONFIG_KASAN_HW_TAGS */ +void kasan_cache_create(struct kmem_cache *cache, size_t *size, + unsigned long *flags); +void kasan_cache_shrink(struct kmem_cache *cache); +void kasan_cache_destroy(struct kmem_cache *cache); -static inline bool kasan_enabled(void) -{ - return IS_ENABLED(CONFIG_KASAN); -} +void kasan_poison_slab(struct page *page); +void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); +void kasan_poison_object_data(struct kmem_cache *cache, void *object); +void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); -static inline bool kasan_has_integrated_init(void) -{ - return false; -} +void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); +void kasan_kfree_large(const void *ptr); +void kasan_poison_kfree(void *ptr); +void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, + gfp_t flags); +void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); -static __always_inline void kasan_alloc_pages(struct page *page, - unsigned int order, gfp_t flags) -{ - /* Only available for integrated init. */ - BUILD_BUG(); -} - -static __always_inline void kasan_free_pages(struct page *page, - unsigned int order) -{ - /* Only available for integrated init. */ - BUILD_BUG(); -} - -#endif /* CONFIG_KASAN_HW_TAGS */ - -#ifdef CONFIG_KASAN +void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); +bool kasan_slab_free(struct kmem_cache *s, void *object); struct kasan_cache { int alloc_meta_offset; int free_meta_offset; - bool is_kmalloc; }; -slab_flags_t __kasan_never_merge(void); -static __always_inline slab_flags_t kasan_never_merge(void) -{ - if (kasan_enabled()) - return __kasan_never_merge(); - return 0; -} +int kasan_module_alloc(void *addr, size_t size); +void kasan_free_shadow(const struct vm_struct *vm); -void __kasan_unpoison_range(const void *addr, size_t size); -static __always_inline void kasan_unpoison_range(const void *addr, size_t size) -{ - if (kasan_enabled()) - __kasan_unpoison_range(addr, size); -} - -void __kasan_poison_pages(struct page *page, unsigned int order, bool init); -static __always_inline void kasan_poison_pages(struct page *page, - unsigned int order, bool init) -{ - if (kasan_enabled()) - __kasan_poison_pages(page, order, init); -} - -void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); -static __always_inline void kasan_unpoison_pages(struct page *page, - unsigned int order, bool init) -{ - if (kasan_enabled()) - __kasan_unpoison_pages(page, order, init); -} - -void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, - slab_flags_t *flags); -static __always_inline void kasan_cache_create(struct kmem_cache *cache, - unsigned int *size, slab_flags_t *flags) -{ - if (kasan_enabled()) - __kasan_cache_create(cache, size, flags); -} - -void __kasan_cache_create_kmalloc(struct kmem_cache *cache); -static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) -{ - if (kasan_enabled()) - __kasan_cache_create_kmalloc(cache); -} - -size_t __kasan_metadata_size(struct kmem_cache *cache); -static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) -{ - if (kasan_enabled()) - return __kasan_metadata_size(cache); - return 0; -} - -void __kasan_poison_slab(struct page *page); -static __always_inline void kasan_poison_slab(struct page *page) -{ - if (kasan_enabled()) - __kasan_poison_slab(page); -} - -void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); -static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, - void *object) -{ - if (kasan_enabled()) - __kasan_unpoison_object_data(cache, object); -} - -void __kasan_poison_object_data(struct kmem_cache *cache, void *object); -static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, - void *object) -{ - if (kasan_enabled()) - __kasan_poison_object_data(cache, object); -} - -void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, - const void *object); -static __always_inline void * __must_check kasan_init_slab_obj( - struct kmem_cache *cache, const void *object) -{ - if (kasan_enabled()) - return __kasan_init_slab_obj(cache, object); - return (void *)object; -} - -bool __kasan_slab_free(struct kmem_cache *s, void *object, - unsigned long ip, bool init); -static __always_inline bool kasan_slab_free(struct kmem_cache *s, - void *object, bool init) -{ - if (kasan_enabled()) - return __kasan_slab_free(s, object, _RET_IP_, init); - return false; -} - -void __kasan_kfree_large(void *ptr, unsigned long ip); -static __always_inline void kasan_kfree_large(void *ptr) -{ - if (kasan_enabled()) - __kasan_kfree_large(ptr, _RET_IP_); -} - -void __kasan_slab_free_mempool(void *ptr, unsigned long ip); -static __always_inline void kasan_slab_free_mempool(void *ptr) -{ - if (kasan_enabled()) - __kasan_slab_free_mempool(ptr, _RET_IP_); -} - -void * __must_check __kasan_slab_alloc(struct kmem_cache *s, - void *object, gfp_t flags, bool init); -static __always_inline void * __must_check kasan_slab_alloc( - struct kmem_cache *s, void *object, gfp_t flags, bool init) -{ - if (kasan_enabled()) - return __kasan_slab_alloc(s, object, flags, init); - return object; -} - -void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, - size_t size, gfp_t flags); -static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, - const void *object, size_t size, gfp_t flags) -{ - if (kasan_enabled()) - return __kasan_kmalloc(s, object, size, flags); - return (void *)object; -} - -void * __must_check __kasan_kmalloc_large(const void *ptr, - size_t size, gfp_t flags); -static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, - size_t size, gfp_t flags) -{ - if (kasan_enabled()) - return __kasan_kmalloc_large(ptr, size, flags); - return (void *)ptr; -} - -void * __must_check __kasan_krealloc(const void *object, - size_t new_size, gfp_t flags); -static __always_inline void * __must_check kasan_krealloc(const void *object, - size_t new_size, gfp_t flags) -{ - if (kasan_enabled()) - return __kasan_krealloc(object, new_size, flags); - return (void *)object; -} - -/* - * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for - * the hardware tag-based mode that doesn't rely on compiler instrumentation. - */ -bool __kasan_check_byte(const void *addr, unsigned long ip); -static __always_inline bool kasan_check_byte(const void *addr) -{ - if (kasan_enabled()) - return __kasan_check_byte(addr, _RET_IP_); - return true; -} - - -bool kasan_save_enable_multi_shot(void); -void kasan_restore_multi_shot(bool enabled); +size_t ksize(const void *); +static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } +size_t kasan_metadata_size(struct kmem_cache *cache); #else /* CONFIG_KASAN */ -static inline slab_flags_t kasan_never_merge(void) -{ - return 0; -} -static inline void kasan_unpoison_range(const void *address, size_t size) {} -static inline void kasan_poison_pages(struct page *page, unsigned int order, - bool init) {} -static inline void kasan_unpoison_pages(struct page *page, unsigned int order, - bool init) {} +static inline void kasan_unpoison_shadow(const void *address, size_t size) {} + +static inline void kasan_unpoison_task_stack(struct task_struct *task) {} +static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {} + +static inline void kasan_enable_current(void) {} +static inline void kasan_disable_current(void) {} + +static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} +static inline void kasan_free_pages(struct page *page, unsigned int order) {} + static inline void kasan_cache_create(struct kmem_cache *cache, - unsigned int *size, - slab_flags_t *flags) {} -static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} -static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } + size_t *size, + unsigned long *flags) {} +static inline void kasan_cache_shrink(struct kmem_cache *cache) {} +static inline void kasan_cache_destroy(struct kmem_cache *cache) {} + static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) {} static inline void kasan_poison_object_data(struct kmem_cache *cache, void *object) {} -static inline void *kasan_init_slab_obj(struct kmem_cache *cache, - const void *object) -{ - return (void *)object; -} -static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init) +static inline void kasan_init_slab_obj(struct kmem_cache *cache, + const void *object) {} + +static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} +static inline void kasan_kfree_large(const void *ptr) {} +static inline void kasan_poison_kfree(void *ptr) {} +static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, + size_t size, gfp_t flags) {} +static inline void kasan_krealloc(const void *object, size_t new_size, + gfp_t flags) {} + +static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, + gfp_t flags) {} +static inline bool kasan_slab_free(struct kmem_cache *s, void *object) { return false; } -static inline void kasan_kfree_large(void *ptr) {} -static inline void kasan_slab_free_mempool(void *ptr) {} -static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, - gfp_t flags, bool init) -{ - return object; -} -static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, - size_t size, gfp_t flags) -{ - return (void *)object; -} -static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) -{ - return (void *)ptr; -} -static inline void *kasan_krealloc(const void *object, size_t new_size, - gfp_t flags) -{ - return (void *)object; -} -static inline bool kasan_check_byte(const void *address) -{ - return true; -} - -#endif /* CONFIG_KASAN */ - -#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) -void kasan_unpoison_task_stack(struct task_struct *task); -#else -static inline void kasan_unpoison_task_stack(struct task_struct *task) {} -#endif - -#ifdef CONFIG_KASAN_GENERIC - -void kasan_cache_shrink(struct kmem_cache *cache); -void kasan_cache_shutdown(struct kmem_cache *cache); -void kasan_record_aux_stack(void *ptr); - -#else /* CONFIG_KASAN_GENERIC */ - -static inline void kasan_cache_shrink(struct kmem_cache *cache) {} -static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} -static inline void kasan_record_aux_stack(void *ptr) {} - -#endif /* CONFIG_KASAN_GENERIC */ - -#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) - -static inline void *kasan_reset_tag(const void *addr) -{ - return (void *)arch_kasan_reset_tag(addr); -} - -/** - * kasan_report - print a report about a bad memory access detected by KASAN - * @addr: address of the bad access - * @size: size of the bad access - * @is_write: whether the bad access is a write or a read - * @ip: instruction pointer for the accessibility check or the bad access itself - */ -bool kasan_report(unsigned long addr, size_t size, - bool is_write, unsigned long ip); - -#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ - -static inline void *kasan_reset_tag(const void *addr) -{ - return (void *)addr; -} - -#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ - -#ifdef CONFIG_KASAN_HW_TAGS - -void kasan_report_async(void); - -#endif /* CONFIG_KASAN_HW_TAGS */ - -#ifdef CONFIG_KASAN_SW_TAGS -void __init kasan_init_sw_tags(void); -#else -static inline void kasan_init_sw_tags(void) { } -#endif - -#ifdef CONFIG_KASAN_HW_TAGS -void kasan_init_hw_tags_cpu(void); -void __init kasan_init_hw_tags(void); -#else -static inline void kasan_init_hw_tags_cpu(void) { } -static inline void kasan_init_hw_tags(void) { } -#endif - -#ifdef CONFIG_KASAN_VMALLOC - -int kasan_populate_vmalloc(unsigned long addr, unsigned long size); -void kasan_poison_vmalloc(const void *start, unsigned long size); -void kasan_unpoison_vmalloc(const void *start, unsigned long size); -void kasan_release_vmalloc(unsigned long start, unsigned long end, - unsigned long free_region_start, - unsigned long free_region_end); - -#else /* CONFIG_KASAN_VMALLOC */ - -static inline int kasan_populate_vmalloc(unsigned long start, - unsigned long size) -{ - return 0; -} - -static inline void kasan_poison_vmalloc(const void *start, unsigned long size) -{ } -static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) -{ } -static inline void kasan_release_vmalloc(unsigned long start, - unsigned long end, - unsigned long free_region_start, - unsigned long free_region_end) {} - -#endif /* CONFIG_KASAN_VMALLOC */ - -#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ - !defined(CONFIG_KASAN_VMALLOC) - -/* - * These functions provide a special case to support backing module - * allocations with real shadow memory. With KASAN vmalloc, the special - * case is unnecessary, as the work is handled in the generic case. - */ -int kasan_module_alloc(void *addr, size_t size); -void kasan_free_shadow(const struct vm_struct *vm); - -#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } static inline void kasan_free_shadow(const struct vm_struct *vm) {} -#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ +static inline void kasan_unpoison_slab(const void *ptr) { } +static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } -#ifdef CONFIG_KASAN_INLINE -void kasan_non_canonical_hook(unsigned long addr); -#else /* CONFIG_KASAN_INLINE */ -static inline void kasan_non_canonical_hook(unsigned long addr) { } -#endif /* CONFIG_KASAN_INLINE */ +#endif /* CONFIG_KASAN */ #endif /* LINUX_KASAN_H */ diff --git a/include/linux/kbd_diacr.h b/include/linux/kbd_diacr.h index 738c7340c1..7274ec68c2 100644 --- a/include/linux/kbd_diacr.h +++ b/include/linux/kbd_diacr.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DIACR_H #define _DIACR_H #include diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h index c40811d797..cbfb171bbc 100644 --- a/include/linux/kbd_kern.h +++ b/include/linux/kbd_kern.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _KBD_KERN_H #define _KBD_KERN_H @@ -6,7 +5,12 @@ #include #include +extern struct tasklet_struct keyboard_tasklet; + extern char *func_table[MAX_NR_FUNC]; +extern char func_buf[]; +extern char *funcbufptr; +extern int funcbufsize, funcbufleft; /* * kbd->xxx contains the VC-local things (flag settings etc..) @@ -69,6 +73,12 @@ extern void (*kbd_ledfunc)(unsigned int led); extern int set_console(int nr); extern void schedule_console_callback(void); +/* FIXME: review locking for vt.c callers */ +static inline void set_leds(void) +{ + tasklet_schedule(&keyboard_tasklet); +} + static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag) { return ((kbd->modeflags >> flag) & 1); @@ -127,7 +137,7 @@ static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag) struct console; -void vt_set_leds_compute_shiftstate(void); +void compute_shiftstate(void); /* defkeymap.c */ diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h index e7be517aaa..22a72198c1 100644 --- a/include/linux/kbuild.h +++ b/include/linux/kbuild.h @@ -1,16 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_KBUILD_H #define __LINUX_KBUILD_H #define DEFINE(sym, val) \ - asm volatile("\n.ascii \"->" #sym " %0 " #val "\"" : : "i" (val)) + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) -#define BLANK() asm volatile("\n.ascii \"->\"" : : ) +#define BLANK() asm volatile("\n->" : : ) #define OFFSET(sym, str, mem) \ DEFINE(sym, offsetof(struct str, mem)) #define COMMENT(x) \ - asm volatile("\n.ascii \"->#" x "\"") + asm volatile("\n->#" x) #endif diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index 20d1079e92..8f2e059e4d 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -1,21 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_KCONFIG_H #define __LINUX_KCONFIG_H #include -#ifdef CONFIG_CPU_BIG_ENDIAN -#define __BIG_ENDIAN 4321 -#else -#define __LITTLE_ENDIAN 1234 -#endif - #define __ARG_PLACEHOLDER_1 0, #define __take_second_arg(__ignored, val, ...) val /* * The use of "&&" / "||" is limited in certain expressions. - * The following enable to calculate "and" / "or" with macro expansion only. + * The followings enable to calculate "and" / "or" with macro expansion only. */ #define __and(x, y) ___and(x, y) #define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y) @@ -51,8 +44,7 @@ /* * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 - * otherwise. CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1" in - * autoconf.h. + * otherwise. */ #define IS_MODULE(option) __is_defined(option##_MODULE) @@ -67,8 +59,7 @@ /* * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', - * 0 otherwise. Note that CONFIG_FOO=y results in "#define CONFIG_FOO 1" in - * autoconf.h, while CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1". + * 0 otherwise. */ #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) diff --git a/include/linux/kcore.h b/include/linux/kcore.h index 86c0f1d189..d927622866 100644 --- a/include/linux/kcore.h +++ b/include/linux/kcore.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * /proc/kcore definitions */ @@ -10,7 +9,7 @@ enum kcore_type { KCORE_VMALLOC, KCORE_RAM, KCORE_VMEMMAP, - KCORE_USER, + KCORE_OTHER, }; struct kcore_list { @@ -27,16 +26,8 @@ struct vmcore { loff_t offset; }; -struct vmcoredd_node { - struct list_head list; /* List of dumps */ - void *buf; /* Buffer containing device's dump */ - unsigned int size; /* Size of the buffer */ -}; - #ifdef CONFIG_PROC_KCORE -void __init kclist_add(struct kcore_list *, void *, size_t, int type); - -extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)); +extern void kclist_add(struct kcore_list *, void *, size_t, int type); #else static inline void kclist_add(struct kcore_list *new, void *addr, size_t size, int type) diff --git a/include/linux/kcov.h b/include/linux/kcov.h index 55dc338f6b..2883ac98c2 100644 --- a/include/linux/kcov.h +++ b/include/linux/kcov.h @@ -1,93 +1,29 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KCOV_H #define _LINUX_KCOV_H -#include #include struct task_struct; #ifdef CONFIG_KCOV +void kcov_task_init(struct task_struct *t); +void kcov_task_exit(struct task_struct *t); + enum kcov_mode { /* Coverage collection is not enabled yet. */ KCOV_MODE_DISABLED = 0, - /* KCOV was initialized, but tracing mode hasn't been chosen yet. */ - KCOV_MODE_INIT = 1, /* * Tracing coverage collection mode. * Covered PCs are collected in a per-task buffer. */ - KCOV_MODE_TRACE_PC = 2, - /* Collecting comparison operands mode. */ - KCOV_MODE_TRACE_CMP = 3, + KCOV_MODE_TRACE = 1, }; -#define KCOV_IN_CTXSW (1 << 30) - -void kcov_task_init(struct task_struct *t); -void kcov_task_exit(struct task_struct *t); - -#define kcov_prepare_switch(t) \ -do { \ - (t)->kcov_mode |= KCOV_IN_CTXSW; \ -} while (0) - -#define kcov_finish_switch(t) \ -do { \ - (t)->kcov_mode &= ~KCOV_IN_CTXSW; \ -} while (0) - -/* See Documentation/dev-tools/kcov.rst for usage details. */ -void kcov_remote_start(u64 handle); -void kcov_remote_stop(void); -u64 kcov_common_handle(void); - -static inline void kcov_remote_start_common(u64 id) -{ - kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_COMMON, id)); -} - -static inline void kcov_remote_start_usb(u64 id) -{ - kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id)); -} - -/* - * The softirq flavor of kcov_remote_*() functions is introduced as a temporary - * work around for kcov's lack of nested remote coverage sections support in - * task context. Adding suport for nested sections is tracked in: - * https://bugzilla.kernel.org/show_bug.cgi?id=210337 - */ - -static inline void kcov_remote_start_usb_softirq(u64 id) -{ - if (in_serving_softirq()) - kcov_remote_start_usb(id); -} - -static inline void kcov_remote_stop_softirq(void) -{ - if (in_serving_softirq()) - kcov_remote_stop(); -} - #else static inline void kcov_task_init(struct task_struct *t) {} static inline void kcov_task_exit(struct task_struct *t) {} -static inline void kcov_prepare_switch(struct task_struct *t) {} -static inline void kcov_finish_switch(struct task_struct *t) {} -static inline void kcov_remote_start(u64 handle) {} -static inline void kcov_remote_stop(void) {} -static inline u64 kcov_common_handle(void) -{ - return 0; -} -static inline void kcov_remote_start_common(u64 id) {} -static inline void kcov_remote_start_usb(u64 id) {} -static inline void kcov_remote_start_usb_softirq(u64 id) {} -static inline void kcov_remote_stop_softirq(void) {} #endif /* CONFIG_KCOV */ #endif /* _LINUX_KCOV_H */ diff --git a/include/linux/kd.h b/include/linux/kd.h index b130a18f86..25bd17fad2 100644 --- a/include/linux/kd.h +++ b/include/linux/kd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KD_H #define _LINUX_KD_H diff --git a/include/linux/kdb.h b/include/linux/kdb.h index ea0f5e580f..410decacff 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -13,8 +13,6 @@ * Copyright (C) 2009 Jason Wessel */ -#include - /* Shifted versions of the command enable bits are be used if the command * has no arguments (see kdb_check_flags). This allows commands, such as * go, to have different permissions depending upon whether it is called @@ -66,17 +64,6 @@ typedef enum { typedef int (*kdb_func_t)(int, const char **); -/* The KDB shell command table */ -typedef struct _kdbtab { - char *name; /* Command name */ - kdb_func_t func; /* Function to execute command */ - char *usage; /* Usage String for this command */ - char *help; /* Help message for this command */ - short minlen; /* Minimum legal # cmd chars required */ - kdb_cmdflags_t flags; /* Command behaviour flags */ - struct list_head list_node; /* Command list */ -} kdbtab_t; - #ifdef CONFIG_KGDB_KDB #include #include @@ -90,6 +77,7 @@ extern int kdb_poll_idx; * number whenever the kernel debugger is entered. */ extern int kdb_initial_cpu; +extern atomic_t kdb_event; /* Types and messages used for dynamically added kdb shell commands */ @@ -138,7 +126,7 @@ extern const char *kdb_diemsg; #define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do * not use keyboard */ -extern unsigned int kdb_flags; /* Global flags, see kdb_state for per cpu state */ +extern int kdb_flags; /* Global flags, see kdb_state for per cpu state */ extern void kdb_save_flags(void); extern void kdb_restore_flags(void); @@ -174,7 +162,6 @@ enum kdb_msgsrc { }; extern int kdb_trap_printk; -extern int kdb_printf_cpu; extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list args); extern __printf(1, 2) int kdb_printf(const char *, ...); @@ -196,6 +183,8 @@ int kdb_process_cpu(const struct task_struct *p) return cpu; } +/* kdb access to register set for stack dumping */ +extern struct pt_regs *kdb_current_regs; #ifdef CONFIG_KALLSYMS extern const char *kdb_walk_kallsyms(loff_t *pos); #else /* ! CONFIG_KALLSYMS */ @@ -206,13 +195,19 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos) #endif /* ! CONFIG_KALLSYMS */ /* Dynamic kdb shell command registration */ -extern int kdb_register(kdbtab_t *cmd); -extern void kdb_unregister(kdbtab_t *cmd); +extern int kdb_register(char *, kdb_func_t, char *, char *, short); +extern int kdb_register_flags(char *, kdb_func_t, char *, char *, + short, kdb_cmdflags_t); +extern int kdb_unregister(char *); #else /* ! CONFIG_KGDB_KDB */ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } static inline void kdb_init(int level) {} -static inline int kdb_register(kdbtab_t *cmd) { return 0; } -static inline void kdb_unregister(kdbtab_t *cmd) {} +static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen) { return 0; } +static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen, + kdb_cmdflags_t flags) { return 0; } +static inline int kdb_unregister(char *cmd) { return 0; } #endif /* CONFIG_KGDB_KDB */ enum { KDB_NOT_INITIALIZED, diff --git a/include/linux/kdebug.h b/include/linux/kdebug.h index fd311565fa..ed815090b3 100644 --- a/include/linux/kdebug.h +++ b/include/linux/kdebug.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KDEBUG_H #define _LINUX_KDEBUG_H diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h index 4856706fbf..8e9e288b08 100644 --- a/include/linux/kdev_t.h +++ b/include/linux/kdev_t.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KDEV_T_H #define _LINUX_KDEV_T_H @@ -21,61 +20,61 @@ }) /* acceptable for old filesystems */ -static __always_inline bool old_valid_dev(dev_t dev) +static inline bool old_valid_dev(dev_t dev) { return MAJOR(dev) < 256 && MINOR(dev) < 256; } -static __always_inline u16 old_encode_dev(dev_t dev) +static inline u16 old_encode_dev(dev_t dev) { return (MAJOR(dev) << 8) | MINOR(dev); } -static __always_inline dev_t old_decode_dev(u16 val) +static inline dev_t old_decode_dev(u16 val) { return MKDEV((val >> 8) & 255, val & 255); } -static __always_inline u32 new_encode_dev(dev_t dev) +static inline u32 new_encode_dev(dev_t dev) { unsigned major = MAJOR(dev); unsigned minor = MINOR(dev); return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); } -static __always_inline dev_t new_decode_dev(u32 dev) +static inline dev_t new_decode_dev(u32 dev) { unsigned major = (dev & 0xfff00) >> 8; unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); return MKDEV(major, minor); } -static __always_inline u64 huge_encode_dev(dev_t dev) +static inline u64 huge_encode_dev(dev_t dev) { return new_encode_dev(dev); } -static __always_inline dev_t huge_decode_dev(u64 dev) +static inline dev_t huge_decode_dev(u64 dev) { return new_decode_dev(dev); } -static __always_inline int sysv_valid_dev(dev_t dev) +static inline int sysv_valid_dev(dev_t dev) { return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18); } -static __always_inline u32 sysv_encode_dev(dev_t dev) +static inline u32 sysv_encode_dev(dev_t dev) { return MINOR(dev) | (MAJOR(dev) << 18); } -static __always_inline unsigned sysv_major(u32 dev) +static inline unsigned sysv_major(u32 dev) { return (dev >> 18) & 0x3fff; } -static __always_inline unsigned sysv_minor(u32 dev) +static inline unsigned sysv_minor(u32 dev) { return dev & 0x3ffff; } diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h index bf2389c26a..f282d4e872 100644 --- a/include/linux/kern_levels.h +++ b/include/linux/kern_levels.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __KERN_LEVELS_H__ #define __KERN_LEVELS_H__ @@ -14,7 +13,7 @@ #define KERN_INFO KERN_SOH "6" /* informational */ #define KERN_DEBUG KERN_SOH "7" /* debug-level messages */ -#define KERN_DEFAULT "" /* the default kernel loglevel */ +#define KERN_DEFAULT KERN_SOH "d" /* the default kernel loglevel */ /* * Annotation for a "continued" line of log printout (only done after a diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h index eee1877a35..f65ce09784 100644 --- a/include/linux/kernel-page-flags.h +++ b/include/linux/kernel-page-flags.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_KERNEL_PAGE_FLAGS_H #define LINUX_KERNEL_PAGE_FLAGS_H @@ -17,6 +16,5 @@ #define KPF_ARCH 38 #define KPF_UNCACHED 39 #define KPF_SOFTDIRTY 40 -#define KPF_ARCH_2 41 #endif /* LINUX_KERNEL_PAGE_FLAGS_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2776423a58..bc6ed52a39 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -1,62 +1,157 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KERNEL_H #define _LINUX_KERNEL_H -#include -#include -#include + +#include #include #include #include #include #include -#include #include -#include -#include #include -#include #include -#include -#include #include - #include +#define USHRT_MAX ((u16)(~0U)) +#define SHRT_MAX ((s16)(USHRT_MAX>>1)) +#define SHRT_MIN ((s16)(-SHRT_MAX - 1)) +#define INT_MAX ((int)(~0U>>1)) +#define INT_MIN (-INT_MAX - 1) +#define UINT_MAX (~0U) +#define LONG_MAX ((long)(~0UL>>1)) +#define LONG_MIN (-LONG_MAX - 1) +#define ULONG_MAX (~0UL) +#define LLONG_MAX ((long long)(~0ULL>>1)) +#define LLONG_MIN (-LLONG_MAX - 1) +#define ULLONG_MAX (~0ULL) +#define SIZE_MAX (~(size_t)0) + +#define U8_MAX ((u8)~0U) +#define S8_MAX ((s8)(U8_MAX>>1)) +#define S8_MIN ((s8)(-S8_MAX - 1)) +#define U16_MAX ((u16)~0U) +#define S16_MAX ((s16)(U16_MAX>>1)) +#define S16_MIN ((s16)(-S16_MAX - 1)) +#define U32_MAX ((u32)~0U) +#define S32_MAX ((s32)(U32_MAX>>1)) +#define S32_MIN ((s32)(-S32_MAX - 1)) +#define U64_MAX ((u64)~0ULL) +#define S64_MAX ((s64)(U64_MAX>>1)) +#define S64_MIN ((s64)(-S64_MAX - 1)) + #define STACK_MAGIC 0xdeadbeef -/** - * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value - * @x: value to repeat - * - * NOTE: @x is not checked for > 0xff; larger values produce odd results. - */ #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) -/* generic data direction definitions */ -#define READ 0 -#define WRITE 1 +#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) -/** - * ARRAY_SIZE - get the number of elements in array @arr - * @arr: array to be sized - */ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) -#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL) - #define u64_to_user_ptr(x) ( \ { \ - typecheck(u64, (x)); \ - (void __user *)(uintptr_t)(x); \ + typecheck(u64, x); \ + (void __user *)(uintptr_t)x; \ } \ ) -#define typeof_member(T, m) typeof(((T*)0)->m) +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) +#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP +#define DIV_ROUND_UP_ULL(ll,d) \ + ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; }) + +#if BITS_PER_LONG == 32 +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) +#else +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d) +#endif + +/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */ +#define roundup(x, y) ( \ +{ \ + const typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ +) +#define rounddown(x, y) ( \ +{ \ + typeof(x) __x = (x); \ + __x - (__x % (y)); \ +} \ +) + +/* + * Divide positive or negative dividend by positive divisor and round + * to closest integer. Result is undefined for negative divisors and + * for negative dividends if the divisor variable type is unsigned. + */ +#define DIV_ROUND_CLOSEST(x, divisor)( \ +{ \ + typeof(x) __x = x; \ + typeof(divisor) __d = divisor; \ + (((typeof(x))-1) > 0 || \ + ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ + (((__x) + ((__d) / 2)) / (__d)) : \ + (((__x) - ((__d) / 2)) / (__d)); \ +} \ +) +/* + * Same as above but for u64 dividends. divisor must be a 32-bit + * number. + */ +#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ +{ \ + typeof(divisor) __d = divisor; \ + unsigned long long _tmp = (x) + (__d) / 2; \ + do_div(_tmp, __d); \ + _tmp; \ +} \ +) + +/* + * Multiplies an integer by a fraction, while avoiding unnecessary + * overflow or loss of precision. + */ +#define mult_frac(x, numer, denom)( \ +{ \ + typeof(x) quot = (x) / (denom); \ + typeof(x) rem = (x) % (denom); \ + (quot * (numer)) + ((rem * (numer)) / (denom)); \ +} \ +) + #define _RET_IP_ (unsigned long)__builtin_return_address(0) #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) +#ifdef CONFIG_LBDAF +# include +# define sector_div(a, b) do_div(a, b) +#else +# define sector_div(n, b)( \ +{ \ + int _res; \ + _res = (n) % (b); \ + (n) /= (b); \ + _res; \ +} \ +) +#endif + /** * upper_32_bits - return bits 32-63 of a number * @n: the number we're accessing @@ -71,58 +166,27 @@ * lower_32_bits - return bits 0-31 of a number * @n: the number we're accessing */ -#define lower_32_bits(n) ((u32)((n) & 0xffffffff)) - -/** - * upper_16_bits - return bits 16-31 of a number - * @n: the number we're accessing - */ -#define upper_16_bits(n) ((u16)((n) >> 16)) - -/** - * lower_16_bits - return bits 0-15 of a number - * @n: the number we're accessing - */ -#define lower_16_bits(n) ((u16)((n) & 0xffff)) +#define lower_32_bits(n) ((u32)(n)) struct completion; +struct pt_regs; struct user; #ifdef CONFIG_PREEMPT_VOLUNTARY - -extern int __cond_resched(void); -# define might_resched() __cond_resched() - -#elif defined(CONFIG_PREEMPT_DYNAMIC) - -extern int __cond_resched(void); - -DECLARE_STATIC_CALL(might_resched, __cond_resched); - -static __always_inline void might_resched(void) -{ - static_call_mod(might_resched)(); -} - +extern int _cond_resched(void); +# define might_resched() _cond_resched() #else - # define might_resched() do { } while (0) - -#endif /* CONFIG_PREEMPT_* */ +#endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP -extern void ___might_sleep(const char *file, int line, int preempt_offset); -extern void __might_sleep(const char *file, int line, int preempt_offset); -extern void __cant_sleep(const char *file, int line, int preempt_offset); -extern void __cant_migrate(const char *file, int line); - + void ___might_sleep(const char *file, int line, int preempt_offset); + void __might_sleep(const char *file, int line, int preempt_offset); /** * might_sleep - annotation for functions that can sleep * * this macro will print a stack trace if it is executed in an atomic - * context (spinlock, irq-handler, ...). Additional sections where blocking is - * not allowed can be annotated with non_block_start() and non_block_end() - * pairs. + * context (spinlock, irq-handler, ...). * * This is a useful debugging help to be able to catch problems early and not * be bitten later when the calling function happens to sleep when it is not @@ -130,58 +194,60 @@ extern void __cant_migrate(const char *file, int line); */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) -/** - * cant_sleep - annotation for functions that cannot sleep - * - * this macro will print a stack trace if it is executed with preemption enabled - */ -# define cant_sleep() \ - do { __cant_sleep(__FILE__, __LINE__, 0); } while (0) # define sched_annotate_sleep() (current->task_state_change = 0) - -/** - * cant_migrate - annotation for functions that cannot migrate - * - * Will print a stack trace if executed in code which is migratable - */ -# define cant_migrate() \ - do { \ - if (IS_ENABLED(CONFIG_SMP)) \ - __cant_migrate(__FILE__, __LINE__); \ - } while (0) - -/** - * non_block_start - annotate the start of section where sleeping is prohibited - * - * This is on behalf of the oom reaper, specifically when it is calling the mmu - * notifiers. The problem is that if the notifier were to block on, for example, - * mutex_lock() and if the process which holds that mutex were to perform a - * sleeping memory allocation, the oom reaper is now blocked on completion of - * that memory allocation. Other blocking calls like wait_event() pose similar - * issues. - */ -# define non_block_start() (current->non_block_count++) -/** - * non_block_end - annotate the end of section where sleeping is prohibited - * - * Closes a section opened by non_block_start(). - */ -# define non_block_end() WARN_ON(current->non_block_count-- == 0) #else static inline void ___might_sleep(const char *file, int line, int preempt_offset) { } static inline void __might_sleep(const char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) -# define cant_sleep() do { } while (0) -# define cant_migrate() do { } while (0) # define sched_annotate_sleep() do { } while (0) -# define non_block_start() do { } while (0) -# define non_block_end() do { } while (0) #endif #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) +/** + * abs - return absolute value of an argument + * @x: the value. If it is unsigned type, it is converted to signed type first. + * char is treated as if it was signed (regardless of whether it really is) + * but the macro's return type is preserved as char. + * + * Return: an absolute value of x. + */ +#define abs(x) __abs_choose_expr(x, long long, \ + __abs_choose_expr(x, long, \ + __abs_choose_expr(x, int, \ + __abs_choose_expr(x, short, \ + __abs_choose_expr(x, char, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), char), \ + (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ + ((void)0))))))) + +#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), signed type) || \ + __builtin_types_compatible_p(typeof(x), unsigned type), \ + ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) + +/** + * reciprocal_scale - "scale" a value into range [0, ep_ro) + * @val: value + * @ep_ro: right open interval endpoint + * + * Perform a "reciprocal multiplication" in order to "scale" a value into + * range [0, ep_ro), where the upper interval endpoint is right-open. + * This is useful, e.g. for accessing a index of an array containing + * ep_ro elements, for example. Think of it as sort of modulus, only that + * the result isn't that of modulo. ;) Note that if initial input is a + * small value, then result will return 0. + * + * Return: a result based on val in interval [0, ep_ro). + */ +static inline u32 reciprocal_scale(u32 val, u32 ep_ro) +{ + return (u32)(((u64) val * ep_ro) >> 32); +} + #if defined(CONFIG_MMU) && \ (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) #define might_fault() __might_fault(__FILE__, __LINE__) @@ -190,11 +256,152 @@ void __might_fault(const char *file, int line); static inline void might_fault(void) { } #endif +extern struct atomic_notifier_head panic_notifier_list; +extern long (*panic_blink)(int state); +__printf(1, 2) +void panic(const char *fmt, ...) __noreturn __cold; +void nmi_panic(struct pt_regs *regs, const char *msg); +extern void oops_enter(void); +extern void oops_exit(void); +void print_oops_end_marker(void); +extern int oops_may_print(void); void do_exit(long error_code) __noreturn; void complete_and_exit(struct completion *, long) __noreturn; -extern int num_to_str(char *buf, int size, - unsigned long long num, unsigned int width); +/* Internal, do not use. */ +int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); +int __must_check _kstrtol(const char *s, unsigned int base, long *res); + +int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res); +int __must_check kstrtoll(const char *s, unsigned int base, long long *res); + +/** + * kstrtoul - convert a string to an unsigned long + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign, but not a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Used as a replacement for the obsolete simple_strtoull. Return code must + * be checked. +*/ +static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0. + */ + if (sizeof(unsigned long) == sizeof(unsigned long long) && + __alignof__(unsigned long) == __alignof__(unsigned long long)) + return kstrtoull(s, base, (unsigned long long *)res); + else + return _kstrtoul(s, base, res); +} + +/** + * kstrtol - convert a string to a long + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign or a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Used as a replacement for the obsolete simple_strtoull. Return code must + * be checked. + */ +static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(long, long long) = 0. + */ + if (sizeof(long) == sizeof(long long) && + __alignof__(long) == __alignof__(long long)) + return kstrtoll(s, base, (long long *)res); + else + return _kstrtol(s, base, res); +} + +int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res); +int __must_check kstrtoint(const char *s, unsigned int base, int *res); + +static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res) +{ + return kstrtoull(s, base, res); +} + +static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res) +{ + return kstrtoll(s, base, res); +} + +static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res) +{ + return kstrtouint(s, base, res); +} + +static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res) +{ + return kstrtoint(s, base, res); +} + +int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); +int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); +int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); +int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); +int __must_check kstrtobool(const char *s, bool *res); + +int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); +int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); +int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res); +int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res); +int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res); +int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res); +int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res); +int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); +int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); +int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); +int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res); + +static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) +{ + return kstrtoull_from_user(s, count, base, res); +} + +static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res) +{ + return kstrtoll_from_user(s, count, base, res); +} + +static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res) +{ + return kstrtouint_from_user(s, count, base, res); +} + +static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res) +{ + return kstrtoint_from_user(s, count, base, res); +} + +/* Obsolete, do not use. Use kstrto instead */ + +extern unsigned long simple_strtoul(const char *,char **,unsigned int); +extern long simple_strtol(const char *,char **,unsigned int); +extern unsigned long long simple_strtoull(const char *,char **,unsigned int); +extern long long simple_strtoll(const char *,char **,unsigned int); + +extern int num_to_str(char *buf, int size, unsigned long long num); /* lib/printf utilities */ @@ -220,41 +427,86 @@ int sscanf(const char *, const char *, ...); extern __scanf(2, 0) int vsscanf(const char *, const char *, va_list); -extern int no_hash_pointers_enable(char *str); - extern int get_option(char **str, int *pint); extern char *get_options(const char *str, int nints, int *ints); extern unsigned long long memparse(const char *ptr, char **retptr); extern bool parse_option_str(const char *str, const char *option); -extern char *next_arg(char *args, char **param, char **val); extern int core_kernel_text(unsigned long addr); -extern int init_kernel_text(unsigned long addr); extern int core_kernel_data(unsigned long addr); extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); extern int func_ptr_is_kernel_text(void *ptr); -extern void bust_spinlocks(int yes); +unsigned long int_sqrt(unsigned long); +extern void bust_spinlocks(int yes); +extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ +extern int panic_timeout; +extern int panic_on_oops; +extern int panic_on_unrecovered_nmi; +extern int panic_on_io_nmi; +extern int panic_on_warn; +extern int sysctl_panic_on_rcu_stall; +extern int sysctl_panic_on_stackoverflow; + +extern bool crash_kexec_post_notifiers; + +/* + * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It + * holds a CPU number which is executing panic() currently. A value of + * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec(). + */ +extern atomic_t panic_cpu; +#define PANIC_CPU_INVALID -1 + +/* + * Only to be used by arch init code. If the user over-wrote the default + * CONFIG_PANIC_TIMEOUT, honor it. + */ +static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) +{ + if (panic_timeout == arch_default_timeout) + panic_timeout = timeout; +} +extern const char *print_tainted(void); +enum lockdep_ok { + LOCKDEP_STILL_OK, + LOCKDEP_NOW_UNRELIABLE +}; +extern void add_taint(unsigned flag, enum lockdep_ok); +extern int test_taint(unsigned flag); +extern unsigned long get_taint(void); extern int root_mountflags; extern bool early_boot_irqs_disabled; -/* - * Values used for system_state. Ordering of the states must not be changed - * as code checks for <, <=, >, >= STATE. - */ +/* Values used for system_state */ extern enum system_states { SYSTEM_BOOTING, - SYSTEM_SCHEDULING, SYSTEM_RUNNING, SYSTEM_HALT, SYSTEM_POWER_OFF, SYSTEM_RESTART, - SYSTEM_SUSPEND, } system_state; +#define TAINT_PROPRIETARY_MODULE 0 +#define TAINT_FORCED_MODULE 1 +#define TAINT_CPU_OUT_OF_SPEC 2 +#define TAINT_FORCED_RMMOD 3 +#define TAINT_MACHINE_CHECK 4 +#define TAINT_BAD_PAGE 5 +#define TAINT_USER 6 +#define TAINT_DIE 7 +#define TAINT_OVERRIDDEN_ACPI_TABLE 8 +#define TAINT_WARN 9 +#define TAINT_CRAP 10 +#define TAINT_FIRMWARE_WORKAROUND 11 +#define TAINT_OOT_MODULE 12 +#define TAINT_UNSIGNED_MODULE 13 +#define TAINT_SOFTLOCKUP 14 +#define TAINT_LIVEPATCH 15 + extern const char hex_asc[]; #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] @@ -333,8 +585,8 @@ do { \ * trace_printk - printf formatting in the ftrace buffer * @fmt: the printf format for printing * - * Note: __trace_printk is an internal function for trace_printk() and - * the @ip is passed in via the trace_printk() macro. + * Note: __trace_printk is an internal function for trace_printk and + * the @ip is passed in via the trace_printk macro. * * This function allows a kernel developer to debug fast path sections * that printk is not appropriate for. By scattering in various @@ -344,9 +596,9 @@ do { \ * This is intended as a debugging tool for the developer only. * Please refrain from leaving trace_printks scattered around in * your code. (Extra memory is used for special buffers that are - * allocated when trace_printk() is used.) + * allocated when trace_printk() is used) * - * A little optimization trick is done here. If there's only one + * A little optization trick is done here. If there's only one * argument, there's no need to scan the string for printf formats. * The trace_puts() will suffice. But how can we take advantage of * using trace_puts() when trace_printk() has only one argument? @@ -371,7 +623,7 @@ do { \ #define do_trace_printk(fmt, args...) \ do { \ static const char *trace_printk_fmt __used \ - __section("__trace_printk_fmt") = \ + __attribute__((section("__trace_printk_fmt"))) = \ __builtin_constant_p(fmt) ? fmt : NULL; \ \ __trace_printk_check_format(fmt, ##args); \ @@ -396,7 +648,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...); * the @ip is passed in via the trace_puts macro. * * This is similar to trace_printk() but is made for those really fast - * paths that a developer wants the least amount of "Heisenbug" effects, + * paths that a developer wants the least amount of "Heisenbug" affects, * where the processing of the print format is still too much. * * This function allows a kernel developer to debug fast path sections @@ -407,7 +659,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...); * This is intended as a debugging tool for the developer only. * Please refrain from leaving trace_puts scattered around in * your code. (Extra memory is used for special buffers that are - * allocated when trace_puts() is used.) + * allocated when trace_puts() is used) * * Returns: 0 if nothing was written, positive # if string was. * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) @@ -415,7 +667,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...); #define trace_puts(str) ({ \ static const char *trace_printk_fmt __used \ - __section("__trace_printk_fmt") = \ + __attribute__((section("__trace_printk_fmt"))) = \ __builtin_constant_p(str) ? str : NULL; \ \ if (__builtin_constant_p(str)) \ @@ -437,7 +689,7 @@ extern void trace_dump_stack(int skip); do { \ if (__builtin_constant_p(fmt)) { \ static const char *trace_printk_fmt __used \ - __section("__trace_printk_fmt") = \ + __attribute__((section("__trace_printk_fmt"))) = \ __builtin_constant_p(fmt) ? fmt : NULL; \ \ __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ @@ -476,12 +728,102 @@ ftrace_vprintk(const char *fmt, va_list ap) static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #endif /* CONFIG_TRACING */ -/* This counts to 12. Any more, it will return 13th argument. */ -#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n -#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +/* + * min()/max()/clamp() macros that also do + * strict type-checking.. See the + * "unnecessary" pointer comparison. + */ +#define __min(t1, t2, min1, min2, x, y) ({ \ + t1 min1 = (x); \ + t2 min2 = (y); \ + (void) (&min1 == &min2); \ + min1 < min2 ? min1 : min2; }) +#define min(x, y) \ + __min(typeof(x), typeof(y), \ + __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ + x, y) -#define __CONCAT(a, b) a ## b -#define CONCATENATE(a, b) __CONCAT(a, b) +#define __max(t1, t2, max1, max2, x, y) ({ \ + t1 max1 = (x); \ + t2 max2 = (y); \ + (void) (&max1 == &max2); \ + max1 > max2 ? max1 : max2; }) +#define max(x, y) \ + __max(typeof(x), typeof(y), \ + __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \ + x, y) + +#define min3(x, y, z) min((typeof(x))min(x, y), z) +#define max3(x, y, z) max((typeof(x))max(x, y), z) + +/** + * min_not_zero - return the minimum that is _not_ zero, unless both are zero + * @x: value1 + * @y: value2 + */ +#define min_not_zero(x, y) ({ \ + typeof(x) __x = (x); \ + typeof(y) __y = (y); \ + __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) + +/** + * clamp - return a value clamped to a given range with strict typechecking + * @val: current value + * @lo: lowest allowable value + * @hi: highest allowable value + * + * This macro does strict typechecking of lo/hi to make sure they are of the + * same type as val. See the unnecessary pointer comparisons. + */ +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) + +/* + * ..and if you can't take the strict + * types, you can specify one yourself. + * + * Or not use min/max/clamp at all, of course. + */ +#define min_t(type, x, y) \ + __min(type, type, \ + __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ + x, y) + +#define max_t(type, x, y) \ + __max(type, type, \ + __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ + x, y) + +/** + * clamp_t - return a value clamped to a given range using a given type + * @type: the type of variable to use + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of type + * 'type' to make all the comparisons. + */ +#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) + +/** + * clamp_val - return a value clamped to a given range using val's type + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of whatever + * type the input argument 'val' is. This is useful when val is an unsigned + * type and min and max are literals that will otherwise be assigned a signed + * integer type. + */ +#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) + + +/* + * swap - swap value of @a and @b + */ +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) /** * container_of - cast a member of a structure out to the containing structure @@ -490,28 +832,9 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * @member: the name of the member within the struct. * */ -#define container_of(ptr, type, member) ({ \ - void *__mptr = (void *)(ptr); \ - BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ - !__same_type(*(ptr), void), \ - "pointer type mismatch in container_of()"); \ - ((type *)(__mptr - offsetof(type, member))); }) - -/** - * container_of_safe - cast a member of a structure out to the containing structure - * @ptr: the pointer to the member. - * @type: the type of the container struct this is embedded in. - * @member: the name of the member within the struct. - * - * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged. - */ -#define container_of_safe(ptr, type, member) ({ \ - void *__mptr = (void *)(ptr); \ - BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ - !__same_type(*(ptr), void), \ - "pointer type mismatch in container_of()"); \ - IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \ - ((type *)(__mptr - offsetof(type, member))); }) +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ #ifdef CONFIG_FTRACE_MCOUNT_RECORD diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 44ae1a7eb9..44fda64ad4 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KERNEL_STAT_H #define _LINUX_KERNEL_STAT_H @@ -10,6 +9,7 @@ #include #include #include +#include /* * 'kernel_stat.h' contains the definitions needed for doing @@ -67,6 +67,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) /* * Number of interrupts per specific IRQ source, since bootup */ +extern unsigned int kstat_irqs(unsigned int irq); extern unsigned int kstat_irqs_usr(unsigned int irq); /* @@ -77,36 +78,15 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) return kstat_cpu(cpu).irqs_sum; } -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern u64 kcpustat_field(struct kernel_cpustat *kcpustat, - enum cpu_usage_stat usage, int cpu); -extern void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu); -#else -static inline u64 kcpustat_field(struct kernel_cpustat *kcpustat, - enum cpu_usage_stat usage, int cpu) -{ - return kcpustat->cpustat[usage]; -} - -static inline void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu) -{ - *dst = kcpustat_cpu(cpu); -} - -#endif - -extern void account_user_time(struct task_struct *, u64); -extern void account_guest_time(struct task_struct *, u64); -extern void account_system_time(struct task_struct *, int, u64); -extern void account_system_index_time(struct task_struct *, u64, - enum cpu_usage_stat); -extern void account_steal_time(u64); -extern void account_idle_time(u64); +extern void account_user_time(struct task_struct *, cputime_t, cputime_t); +extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); +extern void account_steal_time(cputime_t); +extern void account_idle_time(cputime_t); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE static inline void account_process_tick(struct task_struct *tsk, int user) { - vtime_flush(tsk); + vtime_account_user(tsk); } #else extern void account_process_tick(struct task_struct *, int user); diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h index 94ba42bf9d..e985ba679c 100644 --- a/include/linux/kernelcapi.h +++ b/include/linux/kernelcapi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * $Id: kernelcapi.h,v 1.8.6.2 2001/02/07 11:31:31 kai Exp $ * @@ -10,12 +9,46 @@ #ifndef __KERNELCAPI_H__ #define __KERNELCAPI_H__ + #include #include #include #include #include +struct capi20_appl { + u16 applid; + capi_register_params rparam; + void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb); + void *private; + + /* internal to kernelcapi.o */ + unsigned long nrecvctlpkt; + unsigned long nrecvdatapkt; + unsigned long nsentctlpkt; + unsigned long nsentdatapkt; + struct mutex recv_mtx; + struct sk_buff_head recv_queue; + struct work_struct recv_work; + int release_in_progress; +}; + +u16 capi20_isinstalled(void); +u16 capi20_register(struct capi20_appl *ap); +u16 capi20_release(struct capi20_appl *ap); +u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb); +u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]); +u16 capi20_get_version(u32 contr, struct capi_version *verp); +u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]); +u16 capi20_get_profile(u32 contr, struct capi_profile *profp); +int capi20_manufacturer(unsigned long cmd, void __user *data); + +#define CAPICTR_UP 0 +#define CAPICTR_DOWN 1 + +int register_capictr_notifier(struct notifier_block *nb); +int unregister_capictr_notifier(struct notifier_block *nb); + #define CAPI_NOERROR 0x0000 #define CAPI_TOOMANYAPPLS 0x1001 @@ -42,4 +75,45 @@ #define CAPI_MSGCTRLERNOTSUPPORTEXTEQUIP 0x110a #define CAPI_MSGCTRLERONLYSUPPORTEXTEQUIP 0x110b +typedef enum { + CapiMessageNotSupportedInCurrentState = 0x2001, + CapiIllContrPlciNcci = 0x2002, + CapiNoPlciAvailable = 0x2003, + CapiNoNcciAvailable = 0x2004, + CapiNoListenResourcesAvailable = 0x2005, + CapiNoFaxResourcesAvailable = 0x2006, + CapiIllMessageParmCoding = 0x2007, +} RESOURCE_CODING_PROBLEM; + +typedef enum { + CapiB1ProtocolNotSupported = 0x3001, + CapiB2ProtocolNotSupported = 0x3002, + CapiB3ProtocolNotSupported = 0x3003, + CapiB1ProtocolParameterNotSupported = 0x3004, + CapiB2ProtocolParameterNotSupported = 0x3005, + CapiB3ProtocolParameterNotSupported = 0x3006, + CapiBProtocolCombinationNotSupported = 0x3007, + CapiNcpiNotSupported = 0x3008, + CapiCipValueUnknown = 0x3009, + CapiFlagsNotSupported = 0x300a, + CapiFacilityNotSupported = 0x300b, + CapiDataLengthNotSupportedByCurrentProtocol = 0x300c, + CapiResetProcedureNotSupportedByCurrentProtocol = 0x300d, + CapiTeiAssignmentFailed = 0x300e, +} REQUESTED_SERVICES_PROBLEM; + +typedef enum { + CapiSuccess = 0x0000, + CapiSupplementaryServiceNotSupported = 0x300e, + CapiRequestNotAllowedInThisState = 0x3010, +} SUPPLEMENTARY_SERVICE_INFO; + +typedef enum { + CapiProtocolErrorLayer1 = 0x3301, + CapiProtocolErrorLayer2 = 0x3302, + CapiProtocolErrorLayer3 = 0x3303, + CapiTimeOut = 0x3303, // SuppServiceReason + CapiCallGivenToOtherApplication = 0x3304, +} CAPI_REASON; + #endif /* __KERNELCAPI_H__ */ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 1093abf7c2..7056238fd9 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -1,6 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * kernfs.h - pseudo filesystem decoupled from vfs locking + * + * This file is released under the GPLv2. */ #ifndef __LINUX_KERNFS_H @@ -14,7 +15,6 @@ #include #include #include -#include #include struct file; @@ -24,10 +24,7 @@ struct seq_file; struct vm_area_struct; struct super_block; struct file_system_type; -struct poll_table_struct; -struct fs_context; -struct kernfs_fs_context; struct kernfs_open_node; struct kernfs_iattrs; @@ -37,10 +34,8 @@ enum kernfs_node_type { KERNFS_LINK = 0x0004, }; -#define KERNFS_TYPE_MASK 0x000f -#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK -#define KERNFS_MAX_USER_XATTRS 128 -#define KERNFS_USER_XATTR_SIZE_LIMIT (128 << 10) +#define KERNFS_TYPE_MASK 0x000f +#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK enum kernfs_node_flag { KERNFS_ACTIVATED = 0x0010, @@ -51,7 +46,6 @@ enum kernfs_node_flag { KERNFS_SUICIDAL = 0x0400, KERNFS_SUICIDED = 0x0800, KERNFS_EMPTY_DIR = 0x1000, - KERNFS_HAS_RELEASE = 0x2000, }; /* @flags for kernfs_create_root() */ @@ -65,7 +59,7 @@ enum kernfs_root_flag { KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001, /* - * For regular files, if the opener has CAP_DAC_OVERRIDE, open(2) + * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2) * succeeds regardless of the RW permissions. sysfs had an extra * layer of enforcement where open(2) fails with -EACCES regardless * of CAP_DAC_OVERRIDE if the permission doesn't have the @@ -74,17 +68,6 @@ enum kernfs_root_flag { * following flag enables that behavior. */ KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002, - - /* - * The filesystem supports exportfs operation, so userspace can use - * fhandle to access nodes of the fs. - */ - KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004, - - /* - * Support user xattrs to be written to nodes rooted at this root. - */ - KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008, }; /* type-specific structures for kernfs_node union members */ @@ -98,11 +81,6 @@ struct kernfs_elem_dir { * better directly in kernfs_node but is here to save space. */ struct kernfs_root *root; - /* - * Monotonic revision counter, used to identify if a directory - * node has changed during negative dentry revalidation. - */ - unsigned long rev; }; struct kernfs_elem_symlink { @@ -121,7 +99,7 @@ struct kernfs_elem_attr { * kernfs node is represented by single kernfs_node. Most fields are * private to kernfs and shouldn't be accessed directly by kernfs users. * - * As long as count reference is held, the kernfs_node itself is + * As long as s_count reference is held, the kernfs_node itself is * accessible. Dereferencing elem or any other outer entity requires * active reference. */ @@ -152,14 +130,9 @@ struct kernfs_node { void *priv; - /* - * 64bit unique ID. On 64bit ino setups, id is the ino. On 32bit, - * the low 32bits are ino and upper generation. - */ - u64 id; - unsigned short flags; umode_t mode; + unsigned int ino; struct kernfs_iattrs *iattr; }; @@ -171,6 +144,7 @@ struct kernfs_node { * kernfs_node parameter. */ struct kernfs_syscall_ops { + int (*remount_fs)(struct kernfs_root *root, int *flags, char *data); int (*show_options)(struct seq_file *sf, struct kernfs_root *root); int (*mkdir)(struct kernfs_node *parent, const char *name, @@ -188,12 +162,10 @@ struct kernfs_root { unsigned int flags; /* KERNFS_ROOT_* flags */ /* private fields, do not use outside kernfs proper */ - struct idr ino_idr; - u32 last_id_lowbits; - u32 id_highbits; + struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; - /* list of kernfs_super_info of this root, protected by kernfs_rwsem */ + /* list of kernfs_super_info of this root, protected by kernfs_mutex */ struct list_head supers; wait_queue_head_t deactivate_waitq; @@ -203,7 +175,6 @@ struct kernfs_open_file { /* published fields */ struct kernfs_node *kn; struct file *file; - struct seq_file *seq_file; void *priv; /* private fields, do not use outside kernfs proper */ @@ -214,19 +185,11 @@ struct kernfs_open_file { char *prealloc_buf; size_t atomic_write_len; - bool mmapped:1; - bool released:1; + bool mmapped; const struct vm_operations_struct *vm_ops; }; struct kernfs_ops { - /* - * Optional open/release methods. Both are called with - * @of->seq_file populated. - */ - int (*open)(struct kernfs_open_file *of); - void (*release)(struct kernfs_open_file *of); - /* * Read is handled by either seq_file or raw_read(). * @@ -265,9 +228,6 @@ struct kernfs_ops { ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, loff_t off); - __poll_t (*poll)(struct kernfs_open_file *of, - struct poll_table_struct *pt); - int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -275,18 +235,6 @@ struct kernfs_ops { #endif }; -/* - * The kernfs superblock creation/mount parameter context. - */ -struct kernfs_fs_context { - struct kernfs_root *root; /* Root of the hierarchy being mounted */ - void *ns_tag; /* Namespace tag of the mount (or NULL) */ - unsigned long magic; /* File system specific magic number */ - - /* The following are set/used by kernfs_mount() */ - bool new_sb_created; /* Set to T if we allocated a new sb */ -}; - #ifdef CONFIG_KERNFS static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn) @@ -294,34 +242,6 @@ static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn) return kn->flags & KERNFS_TYPE_MASK; } -static inline ino_t kernfs_id_ino(u64 id) -{ - /* id is ino if ino_t is 64bit; otherwise, low 32bits */ - if (sizeof(ino_t) >= sizeof(u64)) - return id; - else - return (u32)id; -} - -static inline u32 kernfs_id_gen(u64 id) -{ - /* gen is fixed at 1 if ino_t is 64bit; otherwise, high 32bits */ - if (sizeof(ino_t) >= sizeof(u64)) - return 1; - else - return id >> 32; -} - -static inline ino_t kernfs_ino(struct kernfs_node *kn) -{ - return kernfs_id_ino(kn->id); -} - -static inline ino_t kernfs_gen(struct kernfs_node *kn) -{ - return kernfs_id_gen(kn->id); -} - /** * kernfs_enable_ns - enable namespace under a directory * @kn: directory of interest, should be empty @@ -373,14 +293,12 @@ void kernfs_destroy_root(struct kernfs_root *root); struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, const char *name, umode_t mode, - kuid_t uid, kgid_t gid, void *priv, const void *ns); struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, const char *name); struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, - const char *name, umode_t mode, - kuid_t uid, kgid_t gid, - loff_t size, + const char *name, + umode_t mode, loff_t size, const struct kernfs_ops *ops, void *priv, const void *ns, struct lock_class_key *key); @@ -397,24 +315,17 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, const char *new_name, const void *new_ns); int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr); -__poll_t kernfs_generic_poll(struct kernfs_open_file *of, - struct poll_table_struct *pt); void kernfs_notify(struct kernfs_node *kn); -int kernfs_xattr_get(struct kernfs_node *kn, const char *name, - void *value, size_t size); -int kernfs_xattr_set(struct kernfs_node *kn, const char *name, - const void *value, size_t size, int flags); - const void *kernfs_super_ns(struct super_block *sb); -int kernfs_get_tree(struct fs_context *fc); -void kernfs_free_fs_context(struct fs_context *fc); +struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, + struct kernfs_root *root, unsigned long magic, + bool *new_sb_created, const void *ns); void kernfs_kill_sb(struct super_block *sb); +struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns); void kernfs_init(void); -struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root, - u64 id); #else /* CONFIG_KERNFS */ static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn) @@ -470,14 +381,12 @@ static inline void kernfs_destroy_root(struct kernfs_root *root) { } static inline struct kernfs_node * kernfs_create_dir_ns(struct kernfs_node *parent, const char *name, - umode_t mode, kuid_t uid, kgid_t gid, - void *priv, const void *ns) + umode_t mode, void *priv, const void *ns) { return ERR_PTR(-ENOSYS); } static inline struct kernfs_node * __kernfs_create_file(struct kernfs_node *parent, const char *name, - umode_t mode, kuid_t uid, kgid_t gid, - loff_t size, const struct kernfs_ops *ops, + umode_t mode, loff_t size, const struct kernfs_ops *ops, void *priv, const void *ns, struct lock_class_key *key) { return ERR_PTR(-ENOSYS); } @@ -508,21 +417,14 @@ static inline int kernfs_setattr(struct kernfs_node *kn, static inline void kernfs_notify(struct kernfs_node *kn) { } -static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name, - void *value, size_t size) -{ return -ENOSYS; } - -static inline int kernfs_xattr_set(struct kernfs_node *kn, const char *name, - const void *value, size_t size, int flags) -{ return -ENOSYS; } - static inline const void *kernfs_super_ns(struct super_block *sb) { return NULL; } -static inline int kernfs_get_tree(struct fs_context *fc) -{ return -ENOSYS; } - -static inline void kernfs_free_fs_context(struct fs_context *fc) { } +static inline struct dentry * +kernfs_mount_ns(struct file_system_type *fs_type, int flags, + struct kernfs_root *root, unsigned long magic, + bool *new_sb_created, const void *ns) +{ return ERR_PTR(-ENOSYS); } static inline void kernfs_kill_sb(struct super_block *sb) { } @@ -536,11 +438,10 @@ static inline void kernfs_init(void) { } * @buf: buffer to copy @kn's name into * @buflen: size of @buf * - * If @kn is NULL result will be "(null)". - * - * Returns the length of the full path. If the full length is equal to or - * greater than @buflen, @buf contains the truncated path with the trailing - * '\0'. On error, -errno is returned. + * Builds and returns the full path of @kn in @buf of @buflen bytes. The + * path is built from the end of @buf so the returned pointer usually + * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated + * and %NULL is returned. */ static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen) { @@ -563,15 +464,12 @@ static inline struct kernfs_node * kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode, void *priv) { - return kernfs_create_dir_ns(parent, name, mode, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, - priv, NULL); + return kernfs_create_dir_ns(parent, name, mode, priv, NULL); } static inline struct kernfs_node * kernfs_create_file_ns(struct kernfs_node *parent, const char *name, - umode_t mode, kuid_t uid, kgid_t gid, - loff_t size, const struct kernfs_ops *ops, + umode_t mode, loff_t size, const struct kernfs_ops *ops, void *priv, const void *ns) { struct lock_class_key *key = NULL; @@ -579,17 +477,15 @@ kernfs_create_file_ns(struct kernfs_node *parent, const char *name, #ifdef CONFIG_DEBUG_LOCK_ALLOC key = (struct lock_class_key *)&ops->lockdep_key; #endif - return __kernfs_create_file(parent, name, mode, uid, gid, - size, ops, priv, ns, key); + return __kernfs_create_file(parent, name, mode, size, ops, priv, ns, + key); } static inline struct kernfs_node * kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode, loff_t size, const struct kernfs_ops *ops, void *priv) { - return kernfs_create_file_ns(parent, name, mode, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, - size, ops, priv, NULL); + return kernfs_create_file_ns(parent, name, mode, size, ops, priv, NULL); } static inline int kernfs_remove_by_name(struct kernfs_node *parent, @@ -605,4 +501,13 @@ static inline int kernfs_rename(struct kernfs_node *kn, return kernfs_rename_ns(kn, new_parent, new_name, NULL); } +static inline struct dentry * +kernfs_mount(struct file_system_type *fs_type, int flags, + struct kernfs_root *root, unsigned long magic, + bool *new_sb_created) +{ + return kernfs_mount_ns(fs_type, flags, root, + magic, new_sb_created, NULL); +} + #endif /* __LINUX_KERNFS_H */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 0c994ae377..406c33dcae 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_KEXEC_H #define LINUX_KEXEC_H @@ -15,15 +14,17 @@ #if !defined(__ASSEMBLY__) -#include #include #include #ifdef CONFIG_KEXEC_CORE #include +#include #include #include +#include +#include #include #include @@ -61,7 +62,20 @@ #define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE #endif -#define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME +#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4) +#define KEXEC_CORE_NOTE_NAME "CORE" +#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4) +#define KEXEC_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4) +/* + * The per-cpu notes area is a list of notes terminated by a "NULL" + * note header. For kdump, the code in vmcore.c runs in the context + * of the second kernel to combine them into one note. + */ +#ifndef KEXEC_NOTE_BYTES +#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \ + KEXEC_CORE_NOTE_NAME_BYTES + \ + KEXEC_CORE_NOTE_DESC_BYTES ) +#endif /* * This structure is used to hold the arguments that are used when loading @@ -99,24 +113,20 @@ struct compat_kexec_segment { #ifdef CONFIG_KEXEC_FILE struct purgatory_info { - /* - * Pointer to elf header at the beginning of kexec_purgatory. - * Note: kexec_purgatory is read only - */ - const Elf_Ehdr *ehdr; - /* - * Temporary, modifiable buffer for sechdrs used for relocation. - * This memory can be freed post image load. - */ + /* Pointer to elf header of read only purgatory */ + Elf_Ehdr *ehdr; + + /* Pointer to purgatory sechdrs which are modifiable */ Elf_Shdr *sechdrs; /* - * Temporary, modifiable buffer for stripped purgatory used for - * relocation. This memory can be freed post image load. + * Temporary buffer location where purgatory is loaded and relocated + * This memory can be freed post image load */ void *purgatory_buf; -}; -struct kimage; + /* Address where purgatory is finally loaded and is executed from */ + unsigned long purgatory_load_addr; +}; typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size); typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf, @@ -125,7 +135,7 @@ typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf, unsigned long cmdline_len); typedef int (kexec_cleanup_t)(void *loader_data); -#ifdef CONFIG_KEXEC_SIG +#ifdef CONFIG_KEXEC_VERIFY_SIG typedef int (kexec_verify_sig_t)(const char *kernel_buf, unsigned long kernel_len); #endif @@ -134,120 +144,12 @@ struct kexec_file_ops { kexec_probe_t *probe; kexec_load_t *load; kexec_cleanup_t *cleanup; -#ifdef CONFIG_KEXEC_SIG +#ifdef CONFIG_KEXEC_VERIFY_SIG kexec_verify_sig_t *verify_sig; #endif }; - -extern const struct kexec_file_ops * const kexec_file_loaders[]; - -int kexec_image_probe_default(struct kimage *image, void *buf, - unsigned long buf_len); -int kexec_image_post_load_cleanup_default(struct kimage *image); - -/* - * If kexec_buf.mem is set to this value, kexec_locate_mem_hole() - * will try to allocate free memory. Arch may overwrite it. - */ -#ifndef KEXEC_BUF_MEM_UNKNOWN -#define KEXEC_BUF_MEM_UNKNOWN 0 #endif -/** - * struct kexec_buf - parameters for finding a place for a buffer in memory - * @image: kexec image in which memory to search. - * @buffer: Contents which will be copied to the allocated memory. - * @bufsz: Size of @buffer. - * @mem: On return will have address of the buffer in memory. - * @memsz: Size for the buffer in memory. - * @buf_align: Minimum alignment needed. - * @buf_min: The buffer can't be placed below this address. - * @buf_max: The buffer can't be placed above this address. - * @top_down: Allocate from top of memory. - */ -struct kexec_buf { - struct kimage *image; - void *buffer; - unsigned long bufsz; - unsigned long mem; - unsigned long memsz; - unsigned long buf_align; - unsigned long buf_min; - unsigned long buf_max; - bool top_down; -}; - -int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf); -int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, - void *buf, unsigned int size, - bool get_value); -void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); - -/* Architectures may override the below functions */ -int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len); -void *arch_kexec_kernel_image_load(struct kimage *image); -int arch_kexec_apply_relocations_add(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); -int arch_kexec_apply_relocations(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); -int arch_kimage_file_post_load_cleanup(struct kimage *image); -#ifdef CONFIG_KEXEC_SIG -int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, - unsigned long buf_len); -#endif -int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); - -extern int kexec_add_buffer(struct kexec_buf *kbuf); -int kexec_locate_mem_hole(struct kexec_buf *kbuf); - -/* Alignment required for elf header segment */ -#define ELF_CORE_HEADER_ALIGN 4096 - -struct crash_mem_range { - u64 start, end; -}; - -struct crash_mem { - unsigned int max_nr_ranges; - unsigned int nr_ranges; - struct crash_mem_range ranges[]; -}; - -extern int crash_exclude_mem_range(struct crash_mem *mem, - unsigned long long mstart, - unsigned long long mend); -extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map, - void **addr, unsigned long *sz); -#endif /* CONFIG_KEXEC_FILE */ - -#ifdef CONFIG_KEXEC_ELF -struct kexec_elf_info { - /* - * Where the ELF binary contents are kept. - * Memory managed by the user of the struct. - */ - const char *buffer; - - const struct elfhdr *ehdr; - const struct elf_phdr *proghdrs; -}; - -int kexec_build_elf_info(const char *buf, size_t len, struct elfhdr *ehdr, - struct kexec_elf_info *elf_info); - -int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, - struct kexec_elf_info *elf_info, - struct kexec_buf *kbuf, - unsigned long *lowest_load_addr); - -void kexec_free_elf_info(struct kexec_elf_info *elf_info); -int kexec_elf_probe(const char *buf, unsigned long len); -#endif struct kimage { kimage_entry_t head; kimage_entry_t *entry; @@ -256,7 +158,6 @@ struct kimage { unsigned long start; struct page *control_code_page; struct page *swap_page; - void *vmcoreinfo_data_copy; /* locates in the crash memory */ unsigned long nr_segments; struct kexec_segment segment[KEXEC_SEGMENT_MAX]; @@ -292,7 +193,7 @@ struct kimage { unsigned long cmdline_buf_len; /* File operations provided by image loader */ - const struct kexec_file_ops *fops; + struct kexec_file_ops *fops; /* Image loader handling the kernel can store a pointer here */ void *image_loader_data; @@ -300,36 +201,70 @@ struct kimage { /* Information for loading purgatory */ struct purgatory_info purgatory_info; #endif - -#ifdef CONFIG_IMA_KEXEC - /* Virtual address of IMA measurement buffer for kexec syscall */ - void *ima_buffer; - - phys_addr_t ima_buffer_addr; - size_t ima_buffer_size; -#endif - - /* Core ELF header buffer */ - void *elf_headers; - unsigned long elf_headers_sz; - unsigned long elf_load_addr; }; /* kexec interface functions */ extern void machine_kexec(struct kimage *image); extern int machine_kexec_prepare(struct kimage *image); extern void machine_kexec_cleanup(struct kimage *image); +extern asmlinkage long sys_kexec_load(unsigned long entry, + unsigned long nr_segments, + struct kexec_segment __user *segments, + unsigned long flags); extern int kernel_kexec(void); +extern int kexec_add_buffer(struct kimage *image, char *buffer, + unsigned long bufsz, unsigned long memsz, + unsigned long buf_align, unsigned long buf_min, + unsigned long buf_max, bool top_down, + unsigned long *load_addr); extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); -int machine_kexec_post_load(struct kimage *image); - +extern int kexec_load_purgatory(struct kimage *image, unsigned long min, + unsigned long max, int top_down, + unsigned long *load_addr); +extern int kexec_purgatory_get_set_symbol(struct kimage *image, + const char *name, void *buf, + unsigned int size, bool get_value); +extern void *kexec_purgatory_get_symbol_addr(struct kimage *image, + const char *name); extern void __crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); int kexec_crash_loaded(void); void crash_save_cpu(struct pt_regs *regs, int cpu); -extern int kimage_crash_copy_vmcoreinfo(struct kimage *image); +void crash_save_vmcoreinfo(void); +void arch_crash_save_vmcoreinfo(void); +__printf(1, 2) +void vmcoreinfo_append_str(const char *fmt, ...); +phys_addr_t paddr_vmcoreinfo_note(void); + +#define VMCOREINFO_OSRELEASE(value) \ + vmcoreinfo_append_str("OSRELEASE=%s\n", value) +#define VMCOREINFO_PAGESIZE(value) \ + vmcoreinfo_append_str("PAGESIZE=%ld\n", value) +#define VMCOREINFO_SYMBOL(name) \ + vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name) +#define VMCOREINFO_SIZE(name) \ + vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ + (unsigned long)sizeof(name)) +#define VMCOREINFO_STRUCT_SIZE(name) \ + vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ + (unsigned long)sizeof(struct name)) +#define VMCOREINFO_OFFSET(name, field) \ + vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \ + (unsigned long)offsetof(struct name, field)) +#define VMCOREINFO_LENGTH(name, value) \ + vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value) +#define VMCOREINFO_NUMBER(name) \ + vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name) +#define VMCOREINFO_CONFIG(name) \ + vmcoreinfo_append_str("CONFIG_%s=y\n", #name) +#define VMCOREINFO_PAGE_OFFSET(value) \ + vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value) +#define VMCOREINFO_VMALLOC_START(value) \ + vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value) +#define VMCOREINFO_VMEMMAP_START(value) \ + vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value) extern struct kimage *kexec_image; extern struct kimage *kexec_crash_image; @@ -350,19 +285,45 @@ extern int kexec_load_disabled; #define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \ KEXEC_FILE_NO_INITRAMFS) +#define VMCOREINFO_BYTES (4096) +#define VMCOREINFO_NOTE_NAME "VMCOREINFO" +#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4) +#define VMCOREINFO_NOTE_SIZE (KEXEC_NOTE_HEAD_BYTES*2 + VMCOREINFO_BYTES \ + + VMCOREINFO_NOTE_NAME_BYTES) + /* Location of a reserved region to hold the crash kernel. */ extern struct resource crashk_res; extern struct resource crashk_low_res; +typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4]; extern note_buf_t __percpu *crash_notes; +extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; +extern size_t vmcoreinfo_size; +extern size_t vmcoreinfo_max_size; /* flag to track if kexec reboot is in progress */ extern bool kexec_in_progress; +int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, + unsigned long long *crash_size, unsigned long long *crash_base); +int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, + unsigned long long *crash_size, unsigned long long *crash_base); +int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, + unsigned long long *crash_size, unsigned long long *crash_base); int crash_shrink_memory(unsigned long new_size); size_t crash_get_memory_size(void); void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len); +void * __weak arch_kexec_kernel_image_load(struct kimage *image); +int __weak arch_kimage_file_post_load_cleanup(struct kimage *image); +int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, + unsigned long buf_len); +int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, + Elf_Shdr *sechdrs, unsigned int relsec); +int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + unsigned int relsec); void arch_kexec_protect_crashkres(void); void arch_kexec_unprotect_crashkres(void); @@ -404,14 +365,6 @@ static inline void *boot_phys_to_virt(unsigned long entry) return phys_to_virt(boot_phys_to_phys(entry)); } -#ifndef arch_kexec_post_alloc_pages -static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { return 0; } -#endif - -#ifndef arch_kexec_pre_free_pages -static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { } -#endif - #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; struct task_struct; diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 7d985a1dfe..d1d24c37f4 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Definitions for key type implementations * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_KEY_TYPE_H @@ -13,8 +17,14 @@ #ifdef CONFIG_KEYS -struct kernel_pkey_query; -struct kernel_pkey_params; +/* + * key under-construction record + * - passed to the request_key actor if supplied + */ +struct key_construction { + struct key *key; /* key being constructed */ + struct key *authkey;/* authorisation for key being constructed */ +}; /* * Pre-parsed payload, used by key add, update and instantiate. @@ -29,16 +39,16 @@ struct kernel_pkey_params; * clear the contents. */ struct key_preparsed_payload { - const char *orig_description; /* Actual or proposed description (maybe NULL) */ char *description; /* Proposed key description (or NULL) */ union key_payload payload; /* Proposed payload */ const void *data; /* Raw data */ size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ - time64_t expiry; /* Expiry time of key */ + time_t expiry; /* Expiry time of key */ } __randomize_layout; -typedef int (*request_key_actor_t)(struct key *auth_key, void *aux); +typedef int (*request_key_actor_t)(struct key_construction *key, + const char *op, void *aux); /* * Preparsed matching criterion. @@ -71,9 +81,6 @@ struct key_type { */ size_t def_datalen; - unsigned int flags; -#define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */ - /* vet a description */ int (*vet_description)(const char *description); @@ -128,7 +135,7 @@ struct key_type { * much is copied into the buffer * - shouldn't do the copy if the buffer is NULL */ - long (*read)(const struct key *key, char *buffer, size_t buflen); + long (*read)(const struct key *key, char __user *buffer, size_t buflen); /* handle request_key() for this type instead of invoking * /sbin/request-key (optional) @@ -140,26 +147,10 @@ struct key_type { */ request_key_actor_t request_key; - /* Look up a keyring access restriction (optional) - * - * - NULL is a valid return value (meaning the requested restriction - * is known but will never block addition of a key) - * - should return -EINVAL if the restriction is unknown - */ - struct key_restriction *(*lookup_restriction)(const char *params); - - /* Asymmetric key accessor functions. */ - int (*asym_query)(const struct kernel_pkey_params *params, - struct kernel_pkey_query *info); - int (*asym_eds_op)(struct kernel_pkey_params *params, - const void *in, void *out); - int (*asym_verify_signature)(struct kernel_pkey_params *params, - const void *in, const void *in2); - /* internal fields */ struct list_head link; /* link in types list */ struct lock_class_key lock_class; /* key->sem lock class */ -} __randomize_layout; +} __do_const __randomize_layout; extern struct key_type key_type_keyring; @@ -171,20 +162,20 @@ extern int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, - struct key *authkey); + struct key *instkey); extern int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, - struct key *authkey); -extern void complete_request_key(struct key *authkey, int error); + struct key *instkey); +extern void complete_request_key(struct key_construction *cons, int error); static inline int key_negate_and_link(struct key *key, unsigned timeout, struct key *keyring, - struct key *authkey) + struct key *instkey) { - return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey); + return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey); } extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); diff --git a/include/linux/key.h b/include/linux/key.h index 7febc48813..722914798f 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Authentication token and access key management * * Copyright (C) 2004, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - * See Documentation/security/keys/core.rst for information on keys/keyrings. + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * + * See Documentation/security/keys.txt for information on keys/keyrings. */ #ifndef _LINUX_KEY_H @@ -18,8 +23,6 @@ #include #include #include -#include -#include #ifdef __KERNEL__ #include @@ -31,7 +34,6 @@ typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; -struct net; #ifdef CONFIG_KEYS @@ -71,23 +73,6 @@ struct net; #define KEY_PERM_UNDEF 0xffffffff -/* - * The permissions required on a key that we're looking up. - */ -enum key_need_perm { - KEY_NEED_UNSPECIFIED, /* Needed permission unspecified */ - KEY_NEED_VIEW, /* Require permission to view attributes */ - KEY_NEED_READ, /* Require permission to read content */ - KEY_NEED_WRITE, /* Require permission to update / modify */ - KEY_NEED_SEARCH, /* Require permission to search (keyring) or find (key) */ - KEY_NEED_LINK, /* Require permission to link */ - KEY_NEED_SETATTR, /* Require permission to change attributes */ - KEY_NEED_UNLINK, /* Require permission to unlink key */ - KEY_SYSADMIN_OVERRIDE, /* Special: override by CAP_SYS_ADMIN */ - KEY_AUTHTOKEN_OVERRIDE, /* Special: override by possession of auth token */ - KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */ -}; - struct seq_file; struct user_struct; struct signal_struct; @@ -95,34 +80,13 @@ struct cred; struct key_type; struct key_owner; -struct key_tag; struct keyring_list; struct keyring_name; -struct key_tag { - struct rcu_head rcu; - refcount_t usage; - bool removed; /* T when subject removed */ -}; - struct keyring_index_key { - /* [!] If this structure is altered, the union in struct key must change too! */ - unsigned long hash; /* Hash value */ - union { - struct { -#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */ - u16 desc_len; - char desc[sizeof(long) - 2]; /* First few chars of description */ -#else - char desc[sizeof(long) - 2]; /* First few chars of description */ - u16 desc_len; -#endif - }; - unsigned long x; - }; struct key_type *type; - struct key_tag *domain_tag; /* Domain of operation */ const char *description; + size_t desc_len; }; union key_payload { @@ -162,22 +126,6 @@ static inline bool is_key_possessed(const key_ref_t key_ref) return (unsigned long) key_ref & 1UL; } -typedef int (*key_restrict_link_func_t)(struct key *dest_keyring, - const struct key_type *type, - const union key_payload *payload, - struct key *restriction_key); - -struct key_restriction { - key_restrict_link_func_t check; - struct key *key; - struct key_type *keytype; -}; - -enum key_state { - KEY_IS_UNINSTANTIATED, - KEY_IS_POSITIVE, /* Positively instantiated */ -}; - /*****************************************************************************/ /* * authentication token / access credential / keyring @@ -187,23 +135,20 @@ enum key_state { * - Kerberos TGTs and tickets */ struct key { - refcount_t usage; /* number of references */ + atomic_t usage; /* number of references */ key_serial_t serial; /* key serial number */ union { struct list_head graveyard_link; struct rb_node serial_node; }; -#ifdef CONFIG_KEY_NOTIFICATIONS - struct watch_list *watchers; /* Entities watching this key for changes */ -#endif struct rw_semaphore sem; /* change vs change sem */ struct key_user *user; /* owner of this key */ void *security; /* security data for this key */ union { - time64_t expiry; /* time at which key expires (or 0) */ - time64_t revoked_at; /* time at which key was revoked */ + time_t expiry; /* time at which key expires (or 0) */ + time_t revoked_at; /* time at which key was revoked */ }; - time64_t last_used_at; /* last time used for LRU keyring discard */ + time_t last_used_at; /* last time used for LRU keyring discard */ kuid_t uid; kgid_t gid; key_perm_t perm; /* access permissions */ @@ -212,24 +157,25 @@ struct key { * - may not match RCU dereferenced payload * - payload should contain own length */ - short state; /* Key state (+) or rejection error (-) */ #ifdef KEY_DEBUGGING unsigned magic; #define KEY_DEBUG_MAGIC 0x18273645u +#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu #endif unsigned long flags; /* status flags (change with bitops) */ -#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */ -#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */ -#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */ -#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */ -#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */ -#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */ -#define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */ -#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */ -#define KEY_FLAG_KEEP 8 /* set if key should not be removed */ -#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */ +#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */ +#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */ +#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */ +#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */ +#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */ +#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ +#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ +#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ +#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ +#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ +#define KEY_FLAG_KEEP 10 /* set if key should not be removed */ /* the key type and key description string * - the desc is used to match a key against search criteria @@ -239,10 +185,7 @@ struct key { union { struct keyring_index_key index_key; struct { - unsigned long hash; - unsigned long len_desc; struct key_type *type; /* type of key */ - struct key_tag *domain_tag; /* Domain of operation */ char *description; }; }; @@ -258,20 +201,22 @@ struct key { struct list_head name_link; struct assoc_array keys; }; + int reject_error; }; /* This is set on a keyring to restrict the addition of a link to a key - * to it. If this structure isn't provided then it is assumed that the + * to it. If this method isn't provided then it is assumed that the * keyring is open to any addition. It is ignored for non-keyring - * keys. Only set this value using keyring_restrict(), keyring_alloc(), - * or key_alloc(). + * keys. * * This is intended for use with rings of trusted keys whereby addition * to the keyring needs to be controlled. KEY_ALLOC_BYPASS_RESTRICTION * overrides this, allowing the kernel to add extra keys without * restriction. */ - struct key_restriction *restrict_link; + int (*restrict_link)(struct key *keyring, + const struct key_type *type, + const union key_payload *payload); }; extern struct key *key_alloc(struct key_type *type, @@ -280,7 +225,9 @@ extern struct key *key_alloc(struct key_type *type, const struct cred *cred, key_perm_t perm, unsigned long flags, - struct key_restriction *restrict_link); + int (*restrict_link)(struct key *, + const struct key_type *, + const union key_payload *)); #define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ @@ -288,18 +235,14 @@ extern struct key *key_alloc(struct key_type *type, #define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ -#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */ -#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */ extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); extern void key_put(struct key *key); -extern bool key_put_tag(struct key_tag *tag); -extern void key_remove_domain(struct key_tag *domain_tag); static inline struct key *__key_get(struct key *key) { - refcount_inc(&key->usage); + atomic_inc(&key->usage); return key; } @@ -313,68 +256,26 @@ static inline void key_ref_put(key_ref_t key_ref) key_put(key_ref_to_ptr(key_ref)); } -extern struct key *request_key_tag(struct key_type *type, - const char *description, - struct key_tag *domain_tag, - const char *callout_info); - -extern struct key *request_key_rcu(struct key_type *type, - const char *description, - struct key_tag *domain_tag); +extern struct key *request_key(struct key_type *type, + const char *description, + const char *callout_info); extern struct key *request_key_with_auxdata(struct key_type *type, const char *description, - struct key_tag *domain_tag, const void *callout_info, size_t callout_len, void *aux); -/** - * request_key - Request a key and wait for construction - * @type: Type of key. - * @description: The searchable description of the key. - * @callout_info: The data to pass to the instantiation upcall (or NULL). - * - * As for request_key_tag(), but with the default global domain tag. - */ -static inline struct key *request_key(struct key_type *type, - const char *description, - const char *callout_info) -{ - return request_key_tag(type, description, NULL, callout_info); -} +extern struct key *request_key_async(struct key_type *type, + const char *description, + const void *callout_info, + size_t callout_len); -#ifdef CONFIG_NET -/** - * request_key_net - Request a key for a net namespace and wait for construction - * @type: Type of key. - * @description: The searchable description of the key. - * @net: The network namespace that is the key's domain of operation. - * @callout_info: The data to pass to the instantiation upcall (or NULL). - * - * As for request_key() except that it does not add the returned key to a - * keyring if found, new keys are always allocated in the user's quota, the - * callout_info must be a NUL-terminated string and no auxiliary data can be - * passed. Only keys that operate the specified network namespace are used. - * - * Furthermore, it then works as wait_for_key_construction() to wait for the - * completion of keys undergoing construction with a non-interruptible wait. - */ -#define request_key_net(type, description, net, callout_info) \ - request_key_tag(type, description, net->key_domain, callout_info) - -/** - * request_key_net_rcu - Request a key for a net namespace under RCU conditions - * @type: Type of key. - * @description: The searchable description of the key. - * @net: The network namespace that is the key's domain of operation. - * - * As for request_key_rcu() except that only keys that operate the specified - * network namespace are used. - */ -#define request_key_net_rcu(type, description, net) \ - request_key_rcu(type, description, net->key_domain) -#endif /* CONFIG_NET */ +extern struct key *request_key_async_with_auxdata(struct key_type *type, + const char *description, + const void *callout_info, + size_t callout_len, + void *aux); extern int wait_for_key_construction(struct key *key, bool intr); @@ -395,11 +296,6 @@ extern int key_update(key_ref_t key, extern int key_link(struct key *keyring, struct key *key); -extern int key_move(struct key *key, - struct key *from_keyring, - struct key *to_keyring, - unsigned int flags); - extern int key_unlink(struct key *keyring, struct key *key); @@ -407,27 +303,24 @@ extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid const struct cred *cred, key_perm_t perm, unsigned long flags, - struct key_restriction *restrict_link, + int (*restrict_link)(struct key *, + const struct key_type *, + const union key_payload *), struct key *dest); extern int restrict_link_reject(struct key *keyring, const struct key_type *type, - const union key_payload *payload, - struct key *restriction_key); + const union key_payload *payload); extern int keyring_clear(struct key *keyring); extern key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, - const char *description, - bool recurse); + const char *description); extern int keyring_add_key(struct key *keyring, struct key *key); -extern int keyring_restrict(key_ref_t keyring, const char *type, - const char *restriction); - extern struct key *key_lookup(key_serial_t id); static inline key_serial_t key_serial(const struct key *key) @@ -437,37 +330,31 @@ static inline key_serial_t key_serial(const struct key *key) extern void key_set_timeout(struct key *, unsigned); -extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags, - enum key_need_perm need_perm); -extern void key_free_user_ns(struct user_namespace *); - -static inline short key_read_state(const struct key *key) -{ - /* Barrier versus mark_key_instantiated(). */ - return smp_load_acquire(&key->state); -} +/* + * The permissions required on a key that we're looking up. + */ +#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */ +#define KEY_NEED_READ 0x02 /* Require permission to read content */ +#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */ +#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */ +#define KEY_NEED_LINK 0x10 /* Require permission to link */ +#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ +#define KEY_NEED_ALL 0x3f /* All the above permissions */ /** - * key_is_positive - Determine if a key has been positively instantiated + * key_is_instantiated - Determine if a key has been positively instantiated * @key: The key to check. * * Return true if the specified key has been positively instantiated, false * otherwise. */ -static inline bool key_is_positive(const struct key *key) +static inline bool key_is_instantiated(const struct key *key) { - return key_read_state(key) == KEY_IS_POSITIVE; + return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && + !test_bit(KEY_FLAG_NEGATIVE, &key->flags); } -static inline bool key_is_negative(const struct key *key) -{ - return key_read_state(key) < 0; -} - -#define dereference_key_rcu(KEY) \ - (rcu_dereference((KEY)->payload.rcu_data0)) - -#define dereference_key_locked(KEY) \ +#define rcu_dereference_key(KEY) \ (rcu_dereference_protected((KEY)->payload.rcu_data0, \ rwsem_is_locked(&((struct key *)(KEY))->sem))) @@ -483,8 +370,8 @@ extern struct ctl_table key_sysctls[]; * the userspace interface */ extern int install_thread_keyring_to_cred(struct cred *cred); -extern void key_fsuid_changed(struct cred *new_cred); -extern void key_fsgid_changed(struct cred *new_cred); +extern void key_fsuid_changed(struct task_struct *tsk); +extern void key_fsgid_changed(struct task_struct *tsk); extern void key_init(void); #else /* CONFIG_KEYS */ @@ -499,11 +386,9 @@ extern void key_init(void); #define make_key_ref(k, p) NULL #define key_ref_to_ptr(k) NULL #define is_key_possessed(k) 0 -#define key_fsuid_changed(c) do { } while(0) -#define key_fsgid_changed(c) do { } while(0) +#define key_fsuid_changed(t) do { } while(0) +#define key_fsgid_changed(t) do { } while(0) #define key_init() do { } while(0) -#define key_free_user_ns(ns) do { } while(0) -#define key_remove_domain(d) do { } while(0) #endif /* CONFIG_KEYS */ #endif /* __KERNEL__ */ diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h index 73d11e4090..131ed51465 100644 --- a/include/linux/keyboard.h +++ b/include/linux/keyboard.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_KEYBOARD_H #define __LINUX_KEYBOARD_H diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 86249476b5..41eb6fdf87 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -1,8 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * A generic kernel FIFO implementation * * Copyright (C) 2013 Stefani Seibold + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef _LINUX_KFIFO_H @@ -27,11 +41,11 @@ */ /* - * Note about locking: There is no locking required until only one reader - * and one writer is using the fifo and no kfifo_reset() will be called. - * kfifo_reset_out() can be safely used, until it will be only called + * Note about locking : There is no locking required until only * one reader + * and one writer is using the fifo and no kfifo_reset() will be * called + * kfifo_reset_out() can be safely used, until it will be only called * in the reader thread. - * For multiple writer and one reader there is only a need to lock the writer. + * For multiple writer and one reader there is only a need to lock the writer. * And vice versa for only one writer and multiple reader there is only a need * to lock the reader. */ @@ -99,8 +113,7 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void); * array is a part of the structure and the fifo type where the array is * outside of the fifo structure. */ -#define __is_kfifo_ptr(fifo) \ - (sizeof(*fifo) == sizeof(STRUCT_KFIFO_PTR(typeof(*(fifo)->type)))) +#define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo)) /** * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object @@ -246,37 +259,6 @@ __kfifo_int_must_check_helper(int val) __tmpq->kfifo.in == __tmpq->kfifo.out; \ }) -/** - * kfifo_is_empty_spinlocked - returns true if the fifo is empty using - * a spinlock for locking - * @fifo: address of the fifo to be used - * @lock: spinlock to be used for locking - */ -#define kfifo_is_empty_spinlocked(fifo, lock) \ -({ \ - unsigned long __flags; \ - bool __ret; \ - spin_lock_irqsave(lock, __flags); \ - __ret = kfifo_is_empty(fifo); \ - spin_unlock_irqrestore(lock, __flags); \ - __ret; \ -}) - -/** - * kfifo_is_empty_spinlocked_noirqsave - returns true if the fifo is empty - * using a spinlock for locking, doesn't disable interrupts - * @fifo: address of the fifo to be used - * @lock: spinlock to be used for locking - */ -#define kfifo_is_empty_spinlocked_noirqsave(fifo, lock) \ -({ \ - bool __ret; \ - spin_lock(lock); \ - __ret = kfifo_is_empty(fifo); \ - spin_unlock(lock); \ - __ret; \ -}) - /** * kfifo_is_full - returns true if the fifo is full * @fifo: address of the fifo to be used @@ -343,7 +325,7 @@ __kfifo_uint_must_check_helper( \ * * This macro dynamically allocates a new fifo buffer. * - * The number of elements will be rounded-up to a power of 2. + * The numer of elements will be rounded-up to a power of 2. * The fifo will be release with kfifo_free(). * Return 0 if no error, otherwise an error code. */ @@ -376,9 +358,9 @@ __kfifo_int_must_check_helper( \ * @buffer: the preallocated buffer to be used * @size: the size of the internal buffer, this have to be a power of 2 * - * This macro initializes a fifo using a preallocated buffer. + * This macro initialize a fifo using a preallocated buffer. * - * The number of elements will be rounded-up to a power of 2. + * The numer of elements will be rounded-up to a power of 2. * Return 0 if no error, otherwise an error code. */ #define kfifo_init(fifo, buffer, size) \ @@ -548,26 +530,6 @@ __kfifo_uint_must_check_helper( \ __ret; \ }) -/** - * kfifo_in_spinlocked_noirqsave - put data into fifo using a spinlock for - * locking, don't disable interrupts - * @fifo: address of the fifo to be used - * @buf: the data to be added - * @n: number of elements to be added - * @lock: pointer to the spinlock to use for locking - * - * This is a variant of kfifo_in_spinlocked() but uses spin_lock/unlock() - * for locking and doesn't disable interrupts. - */ -#define kfifo_in_spinlocked_noirqsave(fifo, buf, n, lock) \ -({ \ - unsigned int __ret; \ - spin_lock(lock); \ - __ret = kfifo_in(fifo, buf, n); \ - spin_unlock(lock); \ - __ret; \ -}) - /* alias for kfifo_in_spinlocked, will be removed in a future release */ #define kfifo_in_locked(fifo, buf, n, lock) \ kfifo_in_spinlocked(fifo, buf, n, lock) @@ -620,28 +582,6 @@ __kfifo_uint_must_check_helper( \ }) \ ) -/** - * kfifo_out_spinlocked_noirqsave - get data from the fifo using a spinlock - * for locking, don't disable interrupts - * @fifo: address of the fifo to be used - * @buf: pointer to the storage buffer - * @n: max. number of elements to get - * @lock: pointer to the spinlock to use for locking - * - * This is a variant of kfifo_out_spinlocked() which uses spin_lock/unlock() - * for locking and doesn't disable interrupts. - */ -#define kfifo_out_spinlocked_noirqsave(fifo, buf, n, lock) \ -__kfifo_uint_must_check_helper( \ -({ \ - unsigned int __ret; \ - spin_lock(lock); \ - __ret = kfifo_out(fifo, buf, n); \ - spin_unlock(lock); \ - __ret; \ -}) \ -) - /* alias for kfifo_out_spinlocked, will be removed in a future release */ #define kfifo_out_locked(fifo, buf, n, lock) \ kfifo_out_spinlocked(fifo, buf, n, lock) diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 258cdde8d3..19f605fd21 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -16,7 +16,6 @@ #include #include #include -#include #ifdef CONFIG_HAVE_ARCH_KGDB #include #endif @@ -53,7 +52,7 @@ extern int kgdb_connected; extern int kgdb_io_module_registered; extern atomic_t kgdb_setting_breakpoint; -extern atomic_t kgdb_cpu_doing_single_step; +extern atomic_unchecked_t kgdb_cpu_doing_single_step; extern struct task_struct *kgdb_usethread; extern struct task_struct *kgdb_contthread; @@ -105,9 +104,9 @@ extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs); */ /** - * kgdb_arch_init - Perform any architecture specific initialization. + * kgdb_arch_init - Perform any architecture specific initalization. * - * This function will handle the initialization of any architecture + * This function will handle the initalization of any architecture * specific callbacks. */ extern int kgdb_arch_init(void); @@ -177,40 +176,23 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code, char *remcom_out_buffer, struct pt_regs *regs); -/** - * kgdb_arch_handle_qxfer_pkt - Handle architecture specific GDB XML - * packets. - * @remcom_in_buffer: The buffer of the packet we have read. - * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into. - */ - -extern void -kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, - char *remcom_out_buffer); - -/** - * kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU - * @ignored: This parameter is only here to match the prototype. - * - * If you're using the default implementation of kgdb_roundup_cpus() - * this function will be called per CPU. If you don't implement - * kgdb_call_nmi_hook() a default will be used. - */ - -extern void kgdb_call_nmi_hook(void *ignored); - /** * kgdb_roundup_cpus - Get other CPUs into a holding pattern + * @flags: Current IRQ state * * On SMP systems, we need to get the attention of the other CPUs * and get them into a known state. This should do what is needed * to get the other CPUs to call kgdb_wait(). Note that on some arches, - * the NMI approach is not used for rounding up all the CPUs. Normally - * those architectures can just not implement this and get the default. + * the NMI approach is not used for rounding up all the CPUs. For example, + * in case of MIPS, smp_call_function() is used to roundup CPUs. In + * this case, we have to make sure that interrupts are enabled before + * calling smp_call_function(). The argument to this function is + * the flags that will be used when restoring the interrupts. There is + * local_irq_save() call before kgdb_roundup_cpus(). * * On non-SMP systems, this is not called. */ -extern void kgdb_roundup_cpus(void); +extern void kgdb_roundup_cpus(unsigned long flags); /** * kgdb_arch_set_pc - Generic call back to the program counter @@ -229,9 +211,9 @@ extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt); extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt); /** - * kgdb_arch_late - Perform any architecture specific initialization. + * kgdb_arch_late - Perform any architecture specific initalization. * - * This function will handle the late initialization of any + * This function will handle the late initalization of any * architecture specific callbacks. This is an optional function for * handling things like late initialization of hw breakpoints. The * default implementation does nothing. @@ -272,7 +254,7 @@ struct kgdb_arch { void (*correct_hw_break)(void); void (*enable_nmi)(bool on); -}; +} __do_const; /** * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB. @@ -281,14 +263,12 @@ struct kgdb_arch { * @write_char: Pointer to a function that will write one char. * @flush: Pointer to a function that will flush any pending writes. * @init: Pointer to a function that will initialize the device. - * @deinit: Pointer to a function that will deinit the device. Implies that - * this I/O driver is temporary and expects to be replaced. Called when - * an I/O driver is replaced or explicitly unregistered. * @pre_exception: Pointer to a function that will do any prep work for * the I/O driver. * @post_exception: Pointer to a function that will do any cleanup work * for the I/O driver. - * @cons: valid if the I/O device is a console; else NULL. + * @is_console: 1 if the end device is a console 0 if the I/O device is + * not a console */ struct kgdb_io { const char *name; @@ -296,13 +276,12 @@ struct kgdb_io { void (*write_char) (u8); void (*flush) (void); int (*init) (void); - void (*deinit) (void); void (*pre_exception) (void); void (*post_exception) (void); - struct console *cons; -}; + int is_console; +} __do_const; -extern const struct kgdb_arch arch_kgdb_ops; +extern struct kgdb_arch arch_kgdb_ops; extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs); @@ -313,7 +292,7 @@ extern bool kgdb_nmi_poll_knock(void); #else static inline int kgdb_register_nmi_console(void) { return 0; } static inline int kgdb_unregister_nmi_console(void) { return 0; } -static inline bool kgdb_nmi_poll_knock(void) { return true; } +static inline bool kgdb_nmi_poll_knock(void) { return 1; } #endif extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); @@ -325,7 +304,7 @@ extern char *kgdb_mem2hex(char *mem, char *buf, int count); extern int kgdb_hex2mem(char *buf, char *mem, int count); extern int kgdb_isremovedbreak(unsigned long addr); -extern int kgdb_has_hit_break(unsigned long addr); +extern void kgdb_schedule_breakpoint(void); extern int kgdb_handle_exception(int ex_vector, int signo, int err_code, @@ -335,35 +314,14 @@ extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code, atomic_t *snd_rdy); extern void gdbstub_exit(int status); -/* - * kgdb and kprobes both use the same (kprobe) blocklist (which makes sense - * given they are both typically hooked up to the same trap meaning on most - * architectures one cannot be used to debug the other) - * - * However on architectures where kprobes is not (yet) implemented we permit - * breakpoints everywhere rather than blocking everything by default. - */ -static inline bool kgdb_within_blocklist(unsigned long addr) -{ -#ifdef CONFIG_KGDB_HONOUR_BLOCKLIST - return within_kprobe_blacklist(addr); -#else - return false; -#endif -} - extern int kgdb_single_step; extern atomic_t kgdb_active; #define in_dbg_master() \ - (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) + (raw_smp_processor_id() == atomic_read(&kgdb_active)) extern bool dbg_is_early; extern void __init dbg_late_init(void); -extern void kgdb_panic(const char *msg); -extern void kgdb_free_init_mem(void); #else /* ! CONFIG_KGDB */ #define in_dbg_master() (0) #define dbg_late_init() -static inline void kgdb_panic(const char *msg) {} -static inline void kgdb_free_init_mem(void) { } #endif /* ! CONFIG_KGDB */ #endif /* _KGDB_H_ */ diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 2fcc01891b..1e032a1ddb 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -1,10 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KHUGEPAGED_H #define _LINUX_KHUGEPAGED_H -#include /* MMF_VM_HUGEPAGE */ -#include - +#include /* MMF_VM_HUGEPAGE */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern struct attribute_group khugepaged_attr_group; @@ -16,15 +13,6 @@ extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags); -extern void khugepaged_min_free_kbytes_update(void); -#ifdef CONFIG_SHMEM -extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); -#else -static inline void collapse_pte_mapped_thp(struct mm_struct *mm, - unsigned long addr) -{ -} -#endif #define khugepaged_enabled() \ (transparent_hugepage_flags & \ @@ -58,10 +46,8 @@ static inline int khugepaged_enter(struct vm_area_struct *vma, { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) if ((khugepaged_always() || - (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) || (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && - !(vm_flags & VM_NOHUGEPAGE) && - !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) + !(vm_flags & VM_NOHUGEPAGE)) if (__khugepaged_enter(vma->vm_mm)) return -ENOMEM; return 0; @@ -84,14 +70,6 @@ static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, { return 0; } -static inline void collapse_pte_mapped_thp(struct mm_struct *mm, - unsigned long addr) -{ -} - -static inline void khugepaged_min_free_kbytes_update(void) -{ -} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_KHUGEPAGED_H */ diff --git a/include/linux/klist.h b/include/linux/klist.h index b0f238f20d..953f283f84 100644 --- a/include/linux/klist.h +++ b/include/linux/klist.h @@ -1,10 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * klist.h - Some generic list helpers, extending struct list_head a bit. * * Implementations are found in lib/klist.c * + * * Copyright (C) 2005 Patrick Mochel + * + * This file is rleased under the GPL v2. */ #ifndef _LINUX_KLIST_H diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h new file mode 100644 index 0000000000..39f8453239 --- /dev/null +++ b/include/linux/kmemcheck.h @@ -0,0 +1,171 @@ +#ifndef LINUX_KMEMCHECK_H +#define LINUX_KMEMCHECK_H + +#include +#include + +#ifdef CONFIG_KMEMCHECK +extern int kmemcheck_enabled; + +/* The slab-related functions. */ +void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node); +void kmemcheck_free_shadow(struct page *page, int order); +void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size); +void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); + +void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order, + gfp_t gfpflags); + +void kmemcheck_show_pages(struct page *p, unsigned int n); +void kmemcheck_hide_pages(struct page *p, unsigned int n); + +bool kmemcheck_page_is_tracked(struct page *p); + +void kmemcheck_mark_unallocated(void *address, unsigned int n); +void kmemcheck_mark_uninitialized(void *address, unsigned int n); +void kmemcheck_mark_initialized(void *address, unsigned int n); +void kmemcheck_mark_freed(void *address, unsigned int n); + +void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); +void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); +void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); + +int kmemcheck_show_addr(unsigned long address); +int kmemcheck_hide_addr(unsigned long address); + +bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size); + +/* + * Bitfield annotations + * + * How to use: If you have a struct using bitfields, for example + * + * struct a { + * int x:8, y:8; + * }; + * + * then this should be rewritten as + * + * struct a { + * kmemcheck_bitfield_begin(flags); + * int x:8, y:8; + * kmemcheck_bitfield_end(flags); + * }; + * + * Now the "flags_begin" and "flags_end" members may be used to refer to the + * beginning and end, respectively, of the bitfield (and things like + * &x.flags_begin is allowed). As soon as the struct is allocated, the bit- + * fields should be annotated: + * + * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL); + * kmemcheck_annotate_bitfield(a, flags); + */ +#define kmemcheck_bitfield_begin(name) \ + int name##_begin[0]; + +#define kmemcheck_bitfield_end(name) \ + int name##_end[0]; + +#define kmemcheck_annotate_bitfield(ptr, name) \ + do { \ + int _n; \ + \ + if (!ptr) \ + break; \ + \ + _n = (long) &((ptr)->name##_end) \ + - (long) &((ptr)->name##_begin); \ + BUILD_BUG_ON(_n < 0); \ + \ + kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \ + } while (0) + +#define kmemcheck_annotate_variable(var) \ + do { \ + kmemcheck_mark_initialized(&(var), sizeof(var)); \ + } while (0) \ + +#else +#define kmemcheck_enabled 0 + +static inline void +kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) +{ +} + +static inline void +kmemcheck_free_shadow(struct page *page, int order) +{ +} + +static inline void +kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size) +{ +} + +static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, + size_t size) +{ +} + +static inline void kmemcheck_pagealloc_alloc(struct page *p, + unsigned int order, gfp_t gfpflags) +{ +} + +static inline bool kmemcheck_page_is_tracked(struct page *p) +{ + return false; +} + +static inline void kmemcheck_mark_unallocated(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_initialized(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_freed(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_unallocated_pages(struct page *p, + unsigned int n) +{ +} + +static inline void kmemcheck_mark_uninitialized_pages(struct page *p, + unsigned int n) +{ +} + +static inline void kmemcheck_mark_initialized_pages(struct page *p, + unsigned int n) +{ +} + +static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) +{ + return true; +} + +#define kmemcheck_bitfield_begin(name) +#define kmemcheck_bitfield_end(name) +#define kmemcheck_annotate_bitfield(ptr, name) \ + do { \ + } while (0) + +#define kmemcheck_annotate_variable(var) \ + do { \ + } while (0) + +#endif /* CONFIG_KMEMCHECK */ + +#endif /* LINUX_KMEMCHECK_H */ diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 34684b2026..d61f48c85d 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h @@ -1,26 +1,35 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/kmemleak.h * * Copyright (C) 2008 ARM Limited * Written by Catalin Marinas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __KMEMLEAK_H #define __KMEMLEAK_H #include -#include #ifdef CONFIG_DEBUG_KMEMLEAK extern void kmemleak_init(void) __init; extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, - gfp_t gfp) __ref; + gfp_t gfp) __ref __size_overflow(2); extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, gfp_t gfp) __ref; -extern void kmemleak_vmalloc(const struct vm_struct *area, size_t size, - gfp_t gfp) __ref; extern void kmemleak_free(const void *ptr) __ref; extern void kmemleak_free_part(const void *ptr, size_t size) __ref; extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; @@ -36,14 +45,14 @@ extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref; extern void kmemleak_ignore_phys(phys_addr_t phys) __ref; static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, - int min_count, slab_flags_t flags, + int min_count, unsigned long flags, gfp_t gfp) { if (!(flags & SLAB_NOLEAKTRACE)) kmemleak_alloc(ptr, size, min_count, gfp); } -static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags) +static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) { if (!(flags & SLAB_NOLEAKTRACE)) kmemleak_free(ptr); @@ -59,12 +68,12 @@ static inline void kmemleak_erase(void **ptr) static inline void kmemleak_init(void) { } -static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count, +static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) { } static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, - int min_count, slab_flags_t flags, + int min_count, unsigned long flags, gfp_t gfp) { } @@ -72,17 +81,13 @@ static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, gfp_t gfp) { } -static inline void kmemleak_vmalloc(const struct vm_struct *area, size_t size, - gfp_t gfp) -{ -} static inline void kmemleak_free(const void *ptr) { } static inline void kmemleak_free_part(const void *ptr, size_t size) { } -static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags) +static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) { } static inline void kmemleak_free_percpu(const void __percpu *ptr) diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 68f69362d4..e4f5edba09 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -1,12 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __LINUX_KMOD_H__ #define __LINUX_KMOD_H__ /* * include/linux/kmod.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include #include #include #include @@ -22,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */ * usually useless though. */ extern __printf(2, 3) int __request_module(bool wait, const char *name, ...); +extern __printf(3, 4) +int ___request_module(bool wait, char *param_name, const char *name, ...); #define request_module(mod...) __request_module(true, mod) #define request_module_nowait(mod...) __request_module(false, mod) #define try_then_request_module(x, mod...) \ @@ -32,4 +46,65 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; #define try_then_request_module(x, mod...) (x) #endif + +struct cred; +struct file; + +#define UMH_NO_WAIT 0 /* don't wait at all */ +#define UMH_WAIT_EXEC 1 /* wait for the exec, but not the process */ +#define UMH_WAIT_PROC 2 /* wait for the process to complete */ +#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */ + +struct subprocess_info { + struct work_struct work; + struct completion *complete; + char *path; +#ifdef CONFIG_GRKERNSEC + char *origpath; +#endif + char **argv; + char **envp; + int wait; + int retval; + int (*init)(struct subprocess_info *info, struct cred *new); + void (*cleanup)(struct subprocess_info *info); + void *data; +} __randomize_layout; + +extern int +call_usermodehelper(char *path, char **argv, char **envp, int wait); + +extern struct subprocess_info * +call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask, + int (*init)(struct subprocess_info *info, struct cred *new), + void (*cleanup)(struct subprocess_info *), void *data); + +extern int +call_usermodehelper_exec(struct subprocess_info *info, int wait); + +extern struct ctl_table usermodehelper_table[]; + +enum umh_disable_depth { + UMH_ENABLED = 0, + UMH_FREEZING, + UMH_DISABLED, +}; + +extern int __usermodehelper_disable(enum umh_disable_depth depth); +extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth); + +static inline int usermodehelper_disable(void) +{ + return __usermodehelper_disable(UMH_DISABLED); +} + +static inline void usermodehelper_enable(void) +{ + __usermodehelper_set_disable_depth(UMH_ENABLED); +} + +extern int usermodehelper_read_trylock(void); +extern long usermodehelper_read_lock_wait(long timeout); +extern void usermodehelper_read_unlock(void); + #endif /* __LINUX_KMOD_H__ */ diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h index 906521c232..2e7a1e032c 100644 --- a/include/linux/kmsg_dump.h +++ b/include/linux/kmsg_dump.h @@ -25,18 +25,9 @@ enum kmsg_dump_reason { KMSG_DUMP_PANIC, KMSG_DUMP_OOPS, KMSG_DUMP_EMERG, - KMSG_DUMP_SHUTDOWN, - KMSG_DUMP_MAX -}; - -/** - * struct kmsg_dump_iter - iterator for retrieving kernel messages - * @cur_seq: Points to the oldest message to dump - * @next_seq: Points after the newest message to dump - */ -struct kmsg_dump_iter { - u64 cur_seq; - u64 next_seq; + KMSG_DUMP_RESTART, + KMSG_DUMP_HALT, + KMSG_DUMP_POWEROFF, }; /** @@ -51,43 +42,64 @@ struct kmsg_dumper { struct list_head list; void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason); enum kmsg_dump_reason max_reason; + bool active; bool registered; + + /* private state of the kmsg iterator */ + u32 cur_idx; + u32 next_idx; + u64 cur_seq; + u64 next_seq; }; #ifdef CONFIG_PRINTK void kmsg_dump(enum kmsg_dump_reason reason); -bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog, +bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, + char *line, size_t size, size_t *len); + +bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, char *line, size_t size, size_t *len); -bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog, - char *buf, size_t size, size_t *len_out); +bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, + char *buf, size_t size, size_t *len); -void kmsg_dump_rewind(struct kmsg_dump_iter *iter); +void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper); + +void kmsg_dump_rewind(struct kmsg_dumper *dumper); int kmsg_dump_register(struct kmsg_dumper *dumper); int kmsg_dump_unregister(struct kmsg_dumper *dumper); - -const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason); #else static inline void kmsg_dump(enum kmsg_dump_reason reason) { } -static inline bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog, +static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, + bool syslog, const char *line, + size_t size, size_t *len) +{ + return false; +} + +static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, const char *line, size_t size, size_t *len) { return false; } -static inline bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog, +static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, char *buf, size_t size, size_t *len) { return false; } -static inline void kmsg_dump_rewind(struct kmsg_dump_iter *iter) +static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper) +{ +} + +static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper) { } @@ -100,11 +112,6 @@ static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper) { return -EINVAL; } - -static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason) -{ - return "Disabled"; -} #endif #endif /* _LINUX_KMSG_DUMP_H */ diff --git a/include/linux/kobj_map.h b/include/linux/kobj_map.h index c9919f8b22..18ca75ffcc 100644 --- a/include/linux/kobj_map.h +++ b/include/linux/kobj_map.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * kobj_map.h */ diff --git a/include/linux/kobject.h b/include/linux/kobject.h index ea30529fba..9d45d56c54 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * kobject.h - generic kernel object infrastructure. * @@ -7,7 +6,9 @@ * Copyright (c) 2006-2008 Greg Kroah-Hartman * Copyright (c) 2006-2008 Novell Inc. * - * Please read Documentation/core-api/kobject.rst before using the kobject + * This file is released under the GPLv2. + * + * Please read Documentation/kobject.txt before using the kobject * interface, ESPECIALLY the parts about reference counts and object * destructors. */ @@ -26,10 +27,9 @@ #include #include #include -#include #define UEVENT_HELPER_PATH_LEN 256 -#define UEVENT_NUM_ENVP 64 /* number of env pointers */ +#define UEVENT_NUM_ENVP 32 /* number of env pointers */ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ #ifdef CONFIG_UEVENT_HELPER @@ -57,8 +57,7 @@ enum kobject_action { KOBJ_MOVE, KOBJ_ONLINE, KOBJ_OFFLINE, - KOBJ_BIND, - KOBJ_UNBIND, + KOBJ_MAX }; struct kobject { @@ -109,41 +108,18 @@ extern int __must_check kobject_rename(struct kobject *, const char *new_name); extern int __must_check kobject_move(struct kobject *, struct kobject *); extern struct kobject *kobject_get(struct kobject *kobj); -extern struct kobject * __must_check kobject_get_unless_zero( - struct kobject *kobj); extern void kobject_put(struct kobject *kobj); extern const void *kobject_namespace(struct kobject *kobj); -extern void kobject_get_ownership(struct kobject *kobj, - kuid_t *uid, kgid_t *gid); extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); -/** - * kobject_has_children - Returns whether a kobject has children. - * @kobj: the object to test - * - * This will return whether a kobject has other kobjects as children. - * - * It does NOT account for the presence of attribute files, only sub - * directories. It also assumes there is no concurrent addition or - * removal of such children, and thus relies on external locking. - */ -static inline bool kobject_has_children(struct kobject *kobj) -{ - WARN_ON_ONCE(kref_read(&kobj->kref) == 0); - - return kobj->sd && kobj->sd->dir.subdirs; -} - struct kobj_type { void (*release)(struct kobject *kobj); const struct sysfs_ops *sysfs_ops; - struct attribute **default_attrs; /* use default_groups instead */ - const struct attribute_group **default_groups; + struct attribute **default_attrs; const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); const void *(*namespace)(struct kobject *kobj); - void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid); -}; +} __do_const; struct kobj_uevent_env { char *argv[3]; @@ -167,6 +143,14 @@ struct kobj_attribute { ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count); }; +typedef struct kobj_attribute __no_const kobj_attribute_no_const; + +#define KOBJECT_ATTR(_name, _mode, _show, _store) \ + struct kobj_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define KOBJECT_ATTR_RW(_name) \ + struct kobj_attribute dev_attr_##_name = __ATTR_RW(_name) +#define KOBJECT_ATTR_RO(_name) \ + struct kobj_attribute dev_attr_##_name = __ATTR_RO(_name) extern const struct sysfs_ops kobj_sysfs_ops; @@ -239,9 +223,11 @@ extern struct kobject *firmware_kobj; int kobject_uevent(struct kobject *kobj, enum kobject_action action); int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, char *envp[]); -int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count); __printf(2, 3) int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...); +int kobject_action_type(const char *buf, size_t count, + enum kobject_action *type); + #endif /* _KOBJECT_H_ */ diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h index 2b5b64256c..fb52e27d59 100644 --- a/include/linux/kobject_ns.h +++ b/include/linux/kobject_ns.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* Kernel object name space definitions * * Copyright (c) 2002-2003 Patrick Mochel @@ -8,7 +7,9 @@ * * Split from kobject.h by David Howells (dhowells@redhat.com) * - * Please read Documentation/core-api/kobject.rst before using the kobject + * This file is released under the GPLv2. + * + * Please read Documentation/kobject.txt before using the kobject * interface, ESPECIALLY the parts about reference counts and object * destructors. */ @@ -43,7 +44,7 @@ struct kobj_ns_type_operations { const void *(*netlink_ns)(struct sock *sk); const void *(*initial_ns)(void); void (*drop_ns)(void *); -}; +} __do_const; int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); int kobj_ns_type_registered(enum kobj_ns_type type); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e4f3bfe087..8f68490842 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _LINUX_KPROBES_H #define _LINUX_KPROBES_H /* * Kernel Probes (KProbes) * include/linux/kprobes.h * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * * Copyright (C) IBM Corporation, 2002, 2004 * * 2002-Oct Created by Vamsi Krishna S Kernel @@ -16,7 +29,7 @@ * and Prasanna S Panchamukhi * added function-return probes. */ -#include +#include /* for __kprobes */ #include #include #include @@ -27,11 +40,9 @@ #include #include #include -#include -#include -#include #ifdef CONFIG_KPROBES +#include /* kprobe_status settings */ #define KPROBE_HIT_ACTIVE 0x00000001 @@ -40,7 +51,6 @@ #define KPROBE_HIT_SSDONE 0x00000008 #else /* CONFIG_KPROBES */ -#include typedef int kprobe_opcode_t; struct arch_specific_insn { int dummy; @@ -52,8 +62,11 @@ struct pt_regs; struct kretprobe; struct kretprobe_instance; typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); +typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *); typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, unsigned long flags); +typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, + int trapnr); typedef int (*kretprobe_handler_t) (struct kretprobe_instance *, struct pt_regs *); @@ -81,6 +94,18 @@ struct kprobe { /* Called after addr is executed, unless... */ kprobe_post_handler_t post_handler; + /* + * ... called if executing addr causes a fault (eg. page fault). + * Return 1 if it handled fault, otherwise kernel will see it. + */ + kprobe_fault_handler_t fault_handler; + + /* + * ... called if breakpoint trap occurs in probe handler. + * Return 1 if it handled break, otherwise kernel will see it. + */ + kprobe_break_handler_t break_handler; + /* Saved opcode (which has been replaced with breakpoint) */ kprobe_opcode_t opcode; @@ -128,6 +153,24 @@ static inline int kprobe_ftrace(struct kprobe *p) return p->flags & KPROBE_FLAG_FTRACE; } +/* + * Special probe type that uses setjmp-longjmp type tricks to resume + * execution at a specified entry with a matching prototype corresponding + * to the probed function - a trick to enable arguments to become + * accessible seamlessly by probe handling logic. + * Note: + * Because of the way compilers allocate stack space for local variables + * etc upfront, regardless of sub-scopes within a function, this mirroring + * principle currently works only for probes placed on function entry points. + */ +struct jprobe { + struct kprobe kp; + void *entry; /* probe handling code to jump to */ +}; + +/* For backward compatibility with old code using JPROBE_ENTRY() */ +#define JPROBE_ENTRY(handler) (handler) + /* * Function-return probe - * Note: @@ -138,11 +181,6 @@ static inline int kprobe_ftrace(struct kprobe *p) * ignored, due to maxactive being too low. * */ -struct kretprobe_holder { - struct kretprobe *rp; - refcount_t ref; -}; - struct kretprobe { struct kprobe kp; kretprobe_handler_t handler; @@ -150,20 +188,16 @@ struct kretprobe { int maxactive; int nmissed; size_t data_size; - struct freelist_head freelist; - struct kretprobe_holder *rph; + struct hlist_head free_instances; + raw_spinlock_t lock; }; struct kretprobe_instance { - union { - struct freelist_node freelist; - struct rcu_head rcu; - }; - struct llist_node llist; - struct kretprobe_holder *rph; + struct hlist_node hlist; + struct kretprobe *rp; kprobe_opcode_t *ret_addr; - void *fp; - char data[]; + struct task_struct *task; + char data[0]; }; struct kretprobe_blackpoint { @@ -189,45 +223,10 @@ static inline int kprobes_built_in(void) return 1; } -extern void kprobe_busy_begin(void); -extern void kprobe_busy_end(void); - #ifdef CONFIG_KRETPROBES extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs); extern int arch_trampoline_kprobe(struct kprobe *p); - -/* If the trampoline handler called from a kprobe, use this version */ -unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, - void *trampoline_address, - void *frame_pointer); - -static nokprobe_inline -unsigned long kretprobe_trampoline_handler(struct pt_regs *regs, - void *trampoline_address, - void *frame_pointer) -{ - unsigned long ret; - /* - * Set a dummy kprobe for avoiding kretprobe recursion. - * Since kretprobe never runs in kprobe handler, no kprobe must - * be running at this point. - */ - kprobe_busy_begin(); - ret = __kretprobe_trampoline_handler(regs, trampoline_address, frame_pointer); - kprobe_busy_end(); - - return ret; -} - -static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri) -{ - RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), - "Kretprobe is accessed from instance under preemptive context"); - - return READ_ONCE(ri->rph->rp); -} - #else /* CONFIG_KRETPROBES */ static inline void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) @@ -241,6 +240,16 @@ static inline int arch_trampoline_kprobe(struct kprobe *p) extern struct kretprobe_blackpoint kretprobe_blacklist[]; +static inline void kretprobe_assert(struct kretprobe_instance *ri, + unsigned long orig_ret_address, unsigned long trampoline_address) +{ + if (!orig_ret_address || (orig_ret_address == trampoline_address)) { + printk("kretprobe BUG!: Processing kretprobe %p @ %p\n", + ri->rp, ri->rp->kp.addr); + BUG(); + } +} + #ifdef CONFIG_KPROBES_SANITY_TEST extern int init_test_probes(void); #else @@ -254,33 +263,24 @@ extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); extern int arch_init_kprobes(void); +extern void show_registers(struct pt_regs *regs); extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); -extern int arch_populate_kprobe_blacklist(void); -extern bool arch_kprobe_on_func_entry(unsigned long offset); -extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); extern bool within_kprobe_blacklist(unsigned long addr); -extern int kprobe_add_ksym_blacklist(unsigned long entry); -extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end); struct kprobe_insn_cache { struct mutex mutex; void *(*alloc)(void); /* allocate insn page */ void (*free)(void *); /* free insn page */ - const char *sym; /* symbol for insn pages */ struct list_head pages; /* list of kprobe_insn_page */ size_t insn_size; /* size of instruction slot */ int nr_garbage; }; -#ifdef __ARCH_WANT_KPROBES_INSN_SLOT extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c); extern void __free_insn_slot(struct kprobe_insn_cache *c, kprobe_opcode_t *slot, int dirty); -/* sleep-less address checking routine */ -extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c, - unsigned long addr); #define DEFINE_INSN_CACHE_OPS(__name) \ extern struct kprobe_insn_cache kprobe_##__name##_slots; \ @@ -294,22 +294,6 @@ static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\ { \ __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \ } \ - \ -static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ -{ \ - return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \ -} -#define KPROBE_INSN_PAGE_SYM "kprobe_insn_page" -#define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page" -int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, - unsigned long *value, char *type, char *sym); -#else /* __ARCH_WANT_KPROBES_INSN_SLOT */ -#define DEFINE_INSN_CACHE_OPS(__name) \ -static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ -{ \ - return 0; \ -} -#endif DEFINE_INSN_CACHE_OPS(insn); @@ -343,16 +327,14 @@ DEFINE_INSN_CACHE_OPS(optinsn); #ifdef CONFIG_SYSCTL extern int sysctl_kprobes_optimization; extern int proc_kprobes_optimization_handler(struct ctl_table *table, - int write, void *buffer, + int write, void __user *buffer, size_t *length, loff_t *ppos); #endif -extern void wait_for_kprobe_optimizer(void); -#else -static inline void wait_for_kprobe_optimizer(void) { } + #endif /* CONFIG_OPTPROBES */ #ifdef CONFIG_KPROBES_ON_FTRACE extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *ops, struct ftrace_regs *fregs); + struct ftrace_ops *ops, struct pt_regs *regs); extern int arch_prepare_kprobe_ftrace(struct kprobe *p); #endif @@ -360,6 +342,10 @@ int arch_check_ftrace_location(struct kprobe *p); /* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr); +void kretprobe_hash_lock(struct task_struct *tsk, + struct hlist_head **head, unsigned long *flags); +void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags); +struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); /* kprobe_running() will just return the current_kprobe on this CPU */ static inline struct kprobe *kprobe_running(void) @@ -377,11 +363,17 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) return this_cpu_ptr(&kprobe_ctlblk); } -kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); int register_kprobes(struct kprobe **kps, int num); void unregister_kprobes(struct kprobe **kps, int num); +int setjmp_pre_handler(struct kprobe *, struct pt_regs *); +int longjmp_break_handler(struct kprobe *, struct pt_regs *); +int register_jprobe(struct jprobe *p); +void unregister_jprobe(struct jprobe *p); +int register_jprobes(struct jprobe **jps, int num); +void unregister_jprobes(struct jprobe **jps, int num); +void jprobe_return(void); unsigned long arch_deref_entry_point(void *); int register_kretprobe(struct kretprobe *rp); @@ -390,24 +382,13 @@ int register_kretprobes(struct kretprobe **rps, int num); void unregister_kretprobes(struct kretprobe **rps, int num); void kprobe_flush_task(struct task_struct *tk); - -void kprobe_free_init_mem(void); +void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); int disable_kprobe(struct kprobe *kp); int enable_kprobe(struct kprobe *kp); void dump_kprobe(struct kprobe *kp); -void *alloc_insn_page(void); - -void *alloc_optinsn_page(void); -void free_optinsn_page(void *page); - -int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, - char *sym); - -int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, - char *type, char *sym); #else /* !CONFIG_KPROBES: */ static inline int kprobes_built_in(void) @@ -440,6 +421,23 @@ static inline void unregister_kprobe(struct kprobe *p) static inline void unregister_kprobes(struct kprobe **kps, int num) { } +static inline int register_jprobe(struct jprobe *p) +{ + return -ENOSYS; +} +static inline int register_jprobes(struct jprobe **jps, int num) +{ + return -ENOSYS; +} +static inline void unregister_jprobe(struct jprobe *p) +{ +} +static inline void unregister_jprobes(struct jprobe **jps, int num) +{ +} +static inline void jprobe_return(void) +{ +} static inline int register_kretprobe(struct kretprobe *rp) { return -ENOSYS; @@ -457,9 +455,6 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num) static inline void kprobe_flush_task(struct task_struct *tk) { } -static inline void kprobe_free_init_mem(void) -{ -} static inline int disable_kprobe(struct kprobe *kp) { return -ENOSYS; @@ -468,16 +463,6 @@ static inline int enable_kprobe(struct kprobe *kp) { return -ENOSYS; } - -static inline bool within_kprobe_blacklist(unsigned long addr) -{ - return true; -} -static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *sym) -{ - return -ERANGE; -} #endif /* CONFIG_KPROBES */ static inline int disable_kretprobe(struct kretprobe *rp) { @@ -487,37 +472,27 @@ static inline int enable_kretprobe(struct kretprobe *rp) { return enable_kprobe(&rp->kp); } +static inline int disable_jprobe(struct jprobe *jp) +{ + return disable_kprobe(&jp->kp); +} +static inline int enable_jprobe(struct jprobe *jp) +{ + return enable_kprobe(&jp->kp); +} -#ifndef CONFIG_KPROBES -static inline bool is_kprobe_insn_slot(unsigned long addr) -{ - return false; -} +#ifdef CONFIG_KPROBES +/* + * Blacklist ganerating macro. Specify functions which is not probed + * by using this macro. + */ +#define __NOKPROBE_SYMBOL(fname) \ +static unsigned long __used \ + __attribute__((section("_kprobe_blacklist"))) \ + _kbl_addr_##fname = (unsigned long)fname; +#define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) +#else +#define NOKPROBE_SYMBOL(fname) #endif -#ifndef CONFIG_OPTPROBES -static inline bool is_kprobe_optinsn_slot(unsigned long addr) -{ - return false; -} -#endif - -/* Returns true if kprobes handled the fault */ -static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, - unsigned int trap) -{ - if (!kprobes_built_in()) - return false; - if (user_mode(regs)) - return false; - /* - * To be potentially processing a kprobe fault and to be allowed - * to call kprobe_running(), we have to be non-preemptible. - */ - if (preemptible()) - return false; - if (!kprobe_running()) - return false; - return kprobe_fault_handler(regs, trap); -} #endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/kref.h b/include/linux/kref.h index d32e21a253..531fd0a598 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * kref.h - library routines for handling generic reference counted objects * @@ -8,32 +7,30 @@ * based on kobject.h which was: * Copyright (C) 2002-2003 Patrick Mochel * Copyright (C) 2002-2003 Open Source Development Labs + * + * This file is released under the GPLv2. + * */ #ifndef _KREF_H_ #define _KREF_H_ -#include -#include +#include +#include +#include +#include struct kref { - refcount_t refcount; + atomic_t refcount; }; -#define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), } - /** * kref_init - initialize object. * @kref: object in question. */ static inline void kref_init(struct kref *kref) { - refcount_set(&kref->refcount, 1); -} - -static inline unsigned int kref_read(const struct kref *kref) -{ - return refcount_read(&kref->refcount); + atomic_set(&kref->refcount, 1); } /** @@ -42,7 +39,41 @@ static inline unsigned int kref_read(const struct kref *kref) */ static inline void kref_get(struct kref *kref) { - refcount_inc(&kref->refcount); + /* If refcount was 0 before incrementing then we have a race + * condition when this kref is freeing by some other thread right now. + * In this case one should use kref_get_unless_zero() + */ + WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2); +} + +/** + * kref_sub - subtract a number of refcounts for object. + * @kref: object. + * @count: Number of recounts to subtract. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. If the caller does pass kfree to this + * function, you will be publicly mocked mercilessly by the kref + * maintainer, and anyone else who happens to notice it. You have + * been warned. + * + * Subtract @count from the refcount, and if 0, call release(). + * Return 1 if the object was removed, otherwise return 0. Beware, if this + * function returns 0, you still can not count on the kref from remaining in + * memory. Only use the return value if you want to see if the kref is now + * gone, not present. + */ +static inline int kref_sub(struct kref *kref, unsigned int count, + void (*release)(struct kref *kref)) +{ + BUG_ON(release == NULL); + + if (atomic_sub_and_test((int) count, &kref->refcount)) { + release(kref); + return 1; + } + return 0; } /** @@ -51,7 +82,10 @@ static inline void kref_get(struct kref *kref) * @release: pointer to the function that will clean up the object when the * last reference to the object is released. * This pointer is required, and it is not acceptable to pass kfree - * in as this function. + * in as this function. If the caller does pass kfree to this + * function, you will be publicly mocked mercilessly by the kref + * maintainer, and anyone else who happens to notice it. You have + * been warned. * * Decrement the refcount, and if 0, call release(). * Return 1 if the object was removed, otherwise return 0. Beware, if this @@ -61,29 +95,20 @@ static inline void kref_get(struct kref *kref) */ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) { - if (refcount_dec_and_test(&kref->refcount)) { - release(kref); - return 1; - } - return 0; + return kref_sub(kref, 1, release); } static inline int kref_put_mutex(struct kref *kref, void (*release)(struct kref *kref), struct mutex *lock) { - if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) { - release(kref); - return 1; - } - return 0; -} - -static inline int kref_put_lock(struct kref *kref, - void (*release)(struct kref *kref), - spinlock_t *lock) -{ - if (refcount_dec_and_lock(&kref->refcount, lock)) { + WARN_ON(release == NULL); + if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { + mutex_lock(lock); + if (unlikely(!atomic_dec_and_test(&kref->refcount))) { + mutex_unlock(lock); + return 0; + } release(kref); return 1; } @@ -108,6 +133,6 @@ static inline int kref_put_lock(struct kref *kref, */ static inline int __must_check kref_get_unless_zero(struct kref *kref) { - return refcount_inc_not_zero(&kref->refcount); + return atomic_add_unless(&kref->refcount, 1, 0); } #endif /* _KREF_H_ */ diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h index 1a37a664f9..cb311798e0 100644 --- a/include/linux/ks0108.h +++ b/include/linux/ks0108.h @@ -1,11 +1,25 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Filename: ks0108.h * Version: 0.1.0 * Description: ks0108 LCD Controller driver header + * License: GPLv2 * - * Author: Copyright (C) Miguel Ojeda + * Author: Copyright (C) Miguel Ojeda Sandonis * Date: 2006-10-31 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * */ #ifndef _KS0108_H_ diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h index 96ffdf3cbe..14ba445229 100644 --- a/include/linux/ks8842.h +++ b/include/linux/ks8842.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * ks8842.h KS8842 platform data struct definition * Copyright (c) 2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _LINUX_KS8842_H diff --git a/include/linux/ks8851_mll.h b/include/linux/ks8851_mll.h index 57c0a39ed7..e9ccfb59ed 100644 --- a/include/linux/ks8851_mll.h +++ b/include/linux/ks8851_mll.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * ks8861_mll platform data struct definition * Copyright (c) 2012 BTicino S.p.A. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _LINUX_KS8851_MLL_H diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 161e8164ab..481c8c4627 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_KSM_H #define __LINUX_KSM_H /* @@ -13,7 +12,6 @@ #include #include #include -#include struct stable_node; struct mem_cgroup; @@ -37,6 +35,17 @@ static inline void ksm_exit(struct mm_struct *mm) __ksm_exit(mm); } +static inline struct stable_node *page_stable_node(struct page *page) +{ + return PageKsm(page) ? page_rmapping(page) : NULL; +} + +static inline void set_page_stable_node(struct page *page, + struct stable_node *stable_node) +{ + page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); +} + /* * When do_swap_page() first faults in from swap what used to be a KSM page, * no problem, it will be assigned to this vma's anon_vma; but thereafter, @@ -51,7 +60,7 @@ static inline void ksm_exit(struct mm_struct *mm) struct page *ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address); -void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); +int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void ksm_migrate_page(struct page *newpage, struct page *oldpage); #else /* !CONFIG_KSM */ @@ -78,9 +87,16 @@ static inline struct page *ksm_might_need_to_copy(struct page *page, return page; } -static inline void rmap_walk_ksm(struct page *page, +static inline int page_referenced_ksm(struct page *page, + struct mem_cgroup *memcg, unsigned long *vm_flags) +{ + return 0; +} + +static inline int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) { + return 0; } static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 346b0f2691..a6e82a69c3 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -1,12 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KTHREAD_H #define _LINUX_KTHREAD_H /* Simple interface for creating and stopping kernel threads without mess. */ #include #include -struct mm_struct; - __printf(4, 5) struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, @@ -18,7 +15,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), * @threadfn: the function to run in the thread * @data: data pointer for @threadfn() * @namefmt: printf-style format string for the thread name - * @arg: arguments for @namefmt. + * @...: arguments for @namefmt. * * This macro will create a kthread on the current node, leaving it in * the stopped state. This is just a helper for kthread_create_on_node(); @@ -33,11 +30,6 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), unsigned int cpu, const char *namefmt); -void set_kthread_struct(struct task_struct *p); - -void kthread_set_per_cpu(struct task_struct *k, int cpu); -bool kthread_is_per_cpu(struct task_struct *k); - /** * kthread_run - create and wake a thread. * @threadfn: the function to run until signal_pending(current). @@ -56,15 +48,12 @@ bool kthread_is_per_cpu(struct task_struct *k); __k; \ }) -void free_kthread_struct(struct task_struct *k); void kthread_bind(struct task_struct *k, unsigned int cpu); void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); int kthread_stop(struct task_struct *k); bool kthread_should_stop(void); bool kthread_should_park(void); -bool __kthread_should_park(struct task_struct *k); bool kthread_freezable_should_stop(bool *was_frozen); -void *kthread_func(struct task_struct *k); void *kthread_data(struct task_struct *k); void *kthread_probe_data(struct task_struct *k); int kthread_park(struct task_struct *k); @@ -85,7 +74,7 @@ extern int tsk_fork_get_node(struct task_struct *tsk); */ struct kthread_work; typedef void (*kthread_work_func_t)(struct kthread_work *work); -void kthread_delayed_work_timer_fn(struct timer_list *t); +void kthread_delayed_work_timer_fn(unsigned long __data); enum { KTW_FREEZABLE = 1 << 0, /* freeze during suspend */ @@ -93,7 +82,7 @@ enum { struct kthread_worker { unsigned int flags; - raw_spinlock_t lock; + spinlock_t lock; struct list_head work_list; struct list_head delayed_work_list; struct task_struct *task; @@ -114,7 +103,7 @@ struct kthread_delayed_work { }; #define KTHREAD_WORKER_INIT(worker) { \ - .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ + .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ .work_list = LIST_HEAD_INIT((worker).work_list), \ .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ } @@ -126,7 +115,8 @@ struct kthread_delayed_work { #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \ .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ - .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\ + .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn, \ + 0, (unsigned long)&(dwork), \ TIMER_IRQSAFE), \ } @@ -172,9 +162,10 @@ extern void __kthread_init_worker(struct kthread_worker *worker, #define kthread_init_delayed_work(dwork, fn) \ do { \ kthread_init_work(&(dwork)->work, (fn)); \ - timer_setup(&(dwork)->timer, \ - kthread_delayed_work_timer_fn, \ - TIMER_IRQSAFE); \ + __setup_timer(&(dwork)->timer, \ + kthread_delayed_work_timer_fn, \ + (unsigned long)(dwork), \ + TIMER_IRQSAFE); \ } while (0) int kthread_worker_fn(void *worker_ptr); @@ -183,7 +174,7 @@ __printf(2, 3) struct kthread_worker * kthread_create_worker(unsigned int flags, const char namefmt[], ...); -__printf(3, 4) struct kthread_worker * +struct kthread_worker * kthread_create_worker_on_cpu(int cpu, unsigned int flags, const char namefmt[], ...); @@ -206,19 +197,4 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); void kthread_destroy_worker(struct kthread_worker *worker); -void kthread_use_mm(struct mm_struct *mm); -void kthread_unuse_mm(struct mm_struct *mm); - -struct cgroup_subsys_state; - -#ifdef CONFIG_BLK_CGROUP -void kthread_associate_blkcg(struct cgroup_subsys_state *css); -struct cgroup_subsys_state *kthread_blkcg(void); -#else -static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } -static inline struct cgroup_subsys_state *kthread_blkcg(void) -{ - return NULL; -} -#endif #endif /* _LINUX_KTHREAD_H */ diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 73f20deb49..0fb7ffb177 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -23,10 +23,22 @@ #include #include -#include -/* Nanosecond scalar representation for kernel time values */ -typedef s64 ktime_t; +/* + * ktime_t: + * + * A single 64-bit variable is used to store the hrtimers + * internal representation of time values in scalar nanoseconds. The + * design plays out best on 64-bit CPUs, where most conversions are + * NOPs and most arithmetic ktime_t operations are plain arithmetic + * operations. + * + */ +union ktime { + s64 tv64; +}; + +typedef union ktime ktime_t; /* Kill this */ /** * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value @@ -38,34 +50,45 @@ typedef s64 ktime_t; static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) { if (unlikely(secs >= KTIME_SEC_MAX)) - return KTIME_MAX; + return (ktime_t){ .tv64 = KTIME_MAX }; - return secs * NSEC_PER_SEC + (s64)nsecs; + return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs }; } /* Subtract two ktime_t variables. rem = lhs -rhs: */ -#define ktime_sub(lhs, rhs) ((lhs) - (rhs)) +#define ktime_sub(lhs, rhs) \ + ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; }) /* Add two ktime_t variables. res = lhs + rhs: */ -#define ktime_add(lhs, rhs) ((lhs) + (rhs)) +#define ktime_add(lhs, rhs) \ + ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; }) /* * Same as ktime_add(), but avoids undefined behaviour on overflow; however, * this means that you must check the result for overflow yourself. */ -#define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs)) +#define ktime_add_unsafe(lhs, rhs) \ + ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; }) /* * Add a ktime_t variable and a scalar nanosecond value. * res = kt + nsval: */ -#define ktime_add_ns(kt, nsval) ((kt) + (nsval)) +#define ktime_add_ns(kt, nsval) \ + ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; }) /* * Subtract a scalar nanosecod from a ktime_t variable * res = kt - nsval: */ -#define ktime_sub_ns(kt, nsval) ((kt) - (nsval)) +#define ktime_sub_ns(kt, nsval) \ + ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; }) + +/* convert a timespec to ktime_t format: */ +static inline ktime_t timespec_to_ktime(struct timespec ts) +{ + return ktime_set(ts.tv_sec, ts.tv_nsec); +} /* convert a timespec64 to ktime_t format: */ static inline ktime_t timespec64_to_ktime(struct timespec64 ts) @@ -73,13 +96,37 @@ static inline ktime_t timespec64_to_ktime(struct timespec64 ts) return ktime_set(ts.tv_sec, ts.tv_nsec); } -/* Map the ktime_t to timespec conversion to ns_to_timespec function */ -#define ktime_to_timespec64(kt) ns_to_timespec64((kt)) - -/* Convert ktime_t to nanoseconds */ -static inline s64 ktime_to_ns(const ktime_t kt) +/* convert a timeval to ktime_t format: */ +static inline ktime_t timeval_to_ktime(struct timeval tv) { - return kt; + return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); +} + +/* Map the ktime_t to timespec conversion to ns_to_timespec function */ +#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) + +/* Map the ktime_t to timespec conversion to ns_to_timespec function */ +#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64) + +/* Map the ktime_t to timeval conversion to ns_to_timeval function */ +#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) + +/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ +#define ktime_to_ns(kt) ((kt).tv64) + + +/** + * ktime_equal - Compares two ktime_t variables to see if they are equal + * @cmp1: comparable1 + * @cmp2: comparable2 + * + * Compare two ktime_t variables. + * + * Return: 1 if equal. + */ +static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) +{ + return cmp1.tv64 == cmp2.tv64; } /** @@ -94,9 +141,9 @@ static inline s64 ktime_to_ns(const ktime_t kt) */ static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) { - if (cmp1 < cmp2) + if (cmp1.tv64 < cmp2.tv64) return -1; - if (cmp1 > cmp2) + if (cmp1.tv64 > cmp2.tv64) return 1; return 0; } @@ -135,7 +182,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div) */ BUG_ON(div < 0); if (__builtin_constant_p(div) && !(div >> 32)) { - s64 ns = kt; + s64 ns = kt.tv64; u64 tmp = ns < 0 ? -ns : ns; do_div(tmp, div); @@ -152,7 +199,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div) * so catch them on 64bit as well. */ WARN_ON(div < 0); - return kt / div; + return kt.tv64 / div; } #endif @@ -198,6 +245,25 @@ static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec) extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); +/** + * ktime_to_timespec_cond - convert a ktime_t variable to timespec + * format only if the variable contains data + * @kt: the ktime_t variable to convert + * @ts: the timespec variable to store the result in + * + * Return: %true if there was a successful conversion, %false if kt was 0. + */ +static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, + struct timespec *ts) +{ + if (kt.tv64) { + *ts = ktime_to_timespec(kt); + return true; + } else { + return false; + } +} + /** * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64 * format only if the variable contains data @@ -209,7 +275,7 @@ extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, struct timespec64 *ts) { - if (kt) { + if (kt.tv64) { *ts = ktime_to_timespec64(kt); return true; } else { @@ -217,16 +283,27 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, } } -#include +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +#define LOW_RES_NSEC TICK_NSEC +#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } static inline ktime_t ns_to_ktime(u64 ns) { - return ns; + static const ktime_t ktime_zero = { .tv64 = 0 }; + + return ktime_add_ns(ktime_zero, ns); } static inline ktime_t ms_to_ktime(u64 ms) { - return ms * NSEC_PER_MSEC; + static const ktime_t ktime_zero = { .tv64 = 0 }; + + return ktime_add_ms(ktime_zero, ms); } # include diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0f18df7fe8..8c58db2c09 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __KVM_HOST_H #define __KVM_HOST_H +/* + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ #include #include @@ -10,25 +13,19 @@ #include #include #include -#include #include -#include #include #include #include #include #include -#include #include #include #include #include #include #include -#include -#include -#include -#include +#include #include #include @@ -37,7 +34,6 @@ #include #include -#include #ifndef KVM_MAX_VCPU_ID #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS @@ -49,27 +45,7 @@ * include/linux/kvm_h. */ #define KVM_MEMSLOT_INVALID (1UL << 16) - -/* - * Bit 63 of the memslot generation number is an "update in-progress flag", - * e.g. is temporarily set for the duration of install_new_memslots(). - * This flag effectively creates a unique generation number that is used to - * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, - * i.e. may (or may not) have come from the previous memslots generation. - * - * This is necessary because the actual memslots update is not atomic with - * respect to the generation number update. Updating the generation number - * first would allow a vCPU to cache a spte from the old memslots using the - * new generation number, and updating the generation number after switching - * to the new memslots would allow cache hits using the old generation number - * to reference the defunct memslots. - * - * This mechanism is used to prevent getting hits in KVM's caches while a - * memslot update is in-progress, and to prevent cache hits *after* updating - * the actual generation number against accesses that were inserted into the - * cache *before* the memslots were updated. - */ -#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) +#define KVM_MEMSLOT_INCOHERENT (1UL << 17) /* Two fragments for cross MMIO pages. */ #define KVM_MAX_MMIO_FRAGMENTS 2 @@ -139,39 +115,21 @@ static inline bool is_error_page(struct page *page) return IS_ERR(page); } -#define KVM_REQUEST_MASK GENMASK(7,0) -#define KVM_REQUEST_NO_WAKEUP BIT(8) -#define KVM_REQUEST_WAIT BIT(9) /* * Architecture-independent vcpu->requests bit members * Bits 4-7 are reserved for more arch-independent bits. */ -#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQ_UNBLOCK 2 -#define KVM_REQ_UNHALT 3 -#define KVM_REQ_VM_BUGGED (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQUEST_ARCH_BASE 8 - -#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ - BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ - (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ -}) -#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) - -bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except, - unsigned long *vcpu_bitmap, cpumask_var_t tmp); -bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); -bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except); -bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, - unsigned long *vcpu_bitmap); +#define KVM_REQ_TLB_FLUSH 0 +#define KVM_REQ_MMU_RELOAD 1 +#define KVM_REQ_PENDING_TIMER 2 +#define KVM_REQ_UNHALT 3 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 -extern struct mutex kvm_lock; +extern struct kmem_cache *kvm_vcpu_cache; + +extern spinlock_t kvm_lock; extern struct list_head vm_list; struct kvm_io_range { @@ -204,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val); int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev); -int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, - struct kvm_io_device *dev); +void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev); struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr); @@ -216,34 +174,19 @@ struct kvm_async_pf { struct list_head queue; struct kvm_vcpu *vcpu; struct mm_struct *mm; - gpa_t cr2_or_gpa; + gva_t gva; unsigned long addr; struct kvm_arch_async_pf arch; bool wakeup_all; - bool notpresent_injected; }; void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); -bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - unsigned long hva, struct kvm_arch_async_pf *arch); +int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, + struct kvm_arch_async_pf *arch); int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif -#ifdef KVM_ARCH_WANT_MMU_NOTIFIER -struct kvm_gfn_range { - struct kvm_memory_slot *slot; - gfn_t start; - gfn_t end; - pte_t pte; - bool may_block; -}; -bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); -bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); -bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); -#endif - enum { OUTSIDE_GUEST_MODE, IN_GUEST_MODE, @@ -251,37 +194,6 @@ enum { READING_SHADOW_PAGE_TABLES, }; -#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) - -struct kvm_host_map { - /* - * Only valid if the 'pfn' is managed by the host kernel (i.e. There is - * a 'struct page' for it. When using mem= kernel parameter some memory - * can be used as guest memory but they are not managed by host - * kernel). - * If 'pfn' is not managed by the host kernel, this field is - * initialized to KVM_UNMAPPED_PAGE. - */ - struct page *page; - void *hva; - kvm_pfn_t pfn; - kvm_pfn_t gfn; -}; - -/* - * Used to check if the mapping is valid or not. Never use 'kvm_host_map' - * directly to check for that. - */ -static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) -{ - return !!map->hva; -} - -static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop) -{ - return single_task_running() && !need_resched() && ktime_before(cur, stop); -} - /* * Sometimes a large or cross-page mmio needs to be broken up into separate * exits for userspace servicing. @@ -298,11 +210,10 @@ struct kvm_vcpu { struct preempt_notifier preempt_notifier; #endif int cpu; - int vcpu_id; /* id given by userspace at creation */ - int vcpu_idx; /* index in kvm->vcpus array */ + int vcpu_id; int srcu_idx; int mode; - u64 requests; + unsigned long requests; unsigned long guest_debug; int pre_pcpu; @@ -311,10 +222,14 @@ struct kvm_vcpu { struct mutex mutex; struct kvm_run *run; - struct rcuwait wait; - struct pid __rcu *pid; + int fpu_active; + int guest_fpu_loaded, guest_xcr0_loaded; + unsigned char fpu_counter; + struct swait_queue_head wq; + struct pid *pid; int sigset_active; sigset_t sigset; + struct kvm_vcpu_stat stat; unsigned int halt_poll_ns; bool valid_wakeup; @@ -349,73 +264,12 @@ struct kvm_vcpu { } spin_loop; #endif bool preempted; - bool ready; struct kvm_vcpu_arch arch; - struct kvm_vcpu_stat stat; - char stats_id[KVM_STATS_NAME_SIZE]; - struct kvm_dirty_ring dirty_ring; - - /* - * The index of the most recently used memslot by this vCPU. It's ok - * if this becomes stale due to memslot changes since we always check - * it is a valid slot. - */ - int last_used_slot; + struct dentry *debugfs_dentry; }; -/* must be called with irqs disabled */ -static __always_inline void guest_enter_irqoff(void) -{ - /* - * This is running in ioctl context so its safe to assume that it's the - * stime pending cputime to flush. - */ - instrumentation_begin(); - vtime_account_guest_enter(); - instrumentation_end(); - - /* - * KVM does not hold any references to rcu protected data when it - * switches CPU into a guest mode. In fact switching to a guest mode - * is very similar to exiting to userspace from rcu point of view. In - * addition CPU may stay in a guest mode for quite a long time (up to - * one time slice). Lets treat guest mode as quiescent state, just like - * we do with user-mode execution. - */ - if (!context_tracking_guest_enter()) { - instrumentation_begin(); - rcu_virt_note_context_switch(smp_processor_id()); - instrumentation_end(); - } -} - -static __always_inline void guest_exit_irqoff(void) -{ - context_tracking_guest_exit(); - - instrumentation_begin(); - /* Flush the guest cputime we spent on the guest */ - vtime_account_guest_exit(); - instrumentation_end(); -} - -static inline void guest_exit(void) -{ - unsigned long flags; - - local_irq_save(flags); - guest_exit_irqoff(); - local_irq_restore(flags); -} - static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { - /* - * The memory barrier ensures a previous write to vcpu->requests cannot - * be reordered with the read of vcpu->mode. It pairs with the general - * memory barrier following the write of vcpu->mode in VCPU RUN. - */ - smp_mb__before_atomic(); return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); } @@ -433,30 +287,13 @@ struct kvm_memory_slot { unsigned long userspace_addr; u32 flags; short id; - u16 as_id; }; -static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot) -{ - return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; -} - static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) { return ALIGN(memslot->npages, BITS_PER_LONG) / 8; } -static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) -{ - unsigned long len = kvm_dirty_bitmap_bytes(memslot); - - return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); -} - -#ifndef KVM_DIRTY_LOG_MANUAL_CAPS -#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE -#endif - struct kvm_s390_adapter_int { u64 ind_addr; u64 summary_addr; @@ -502,7 +339,7 @@ struct kvm_irq_routing_table { * Array indexed by gsi. Each entry contains list of irq chips * the gsi is connected to. */ - struct hlist_head map[]; + struct hlist_head map[0]; }; #endif @@ -510,8 +347,9 @@ struct kvm_irq_routing_table { #define KVM_PRIVATE_MEM_SLOTS 0 #endif -#define KVM_MEM_SLOTS_NUM SHRT_MAX -#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS) +#ifndef KVM_MEM_SLOTS_NUM +#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) +#endif #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) @@ -527,39 +365,22 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) */ struct kvm_memslots { u64 generation; + struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; /* The mapping table from slot id to the index in memslots[]. */ short id_to_index[KVM_MEM_SLOTS_NUM]; - atomic_t last_used_slot; + atomic_t lru_slot; int used_slots; - struct kvm_memory_slot memslots[]; }; struct kvm { -#ifdef KVM_HAVE_MMU_RWLOCK - rwlock_t mmu_lock; -#else spinlock_t mmu_lock; -#endif /* KVM_HAVE_MMU_RWLOCK */ - struct mutex slots_lock; - - /* - * Protects the arch-specific fields of struct kvm_memory_slots in - * use by the VM. To be used under the slots_lock (above) or in a - * kvm->srcu critical section where acquiring the slots_lock would - * lead to deadlock with the synchronize_srcu in - * install_new_memslots. - */ - struct mutex slots_arch_lock; struct mm_struct *mm; /* userspace tied to this vm */ - struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; + struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM]; + struct srcu_struct srcu; + struct srcu_struct irq_srcu; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; - /* Used to wait for completion of MMU notifiers. */ - spinlock_t mn_invalidate_lock; - unsigned long mn_active_invalidate_count; - struct rcuwait mn_memslots_update_rcuwait; - /* * created_vcpus is protected by kvm->lock, and is incremented * at the beginning of KVM_CREATE_VCPU. online_vcpus is only @@ -571,7 +392,7 @@ struct kvm { int last_boosted_vcpu; struct list_head vm_list; struct mutex lock; - struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; + struct kvm_io_bus *buses[KVM_NR_BUSES]; #ifdef CONFIG_HAVE_KVM_EVENTFD struct { spinlock_t lock; @@ -583,8 +404,8 @@ struct kvm { #endif struct kvm_vm_stat stat; struct kvm_arch arch; - refcount_t users_count; -#ifdef CONFIG_KVM_MMIO + atomic_t users_count; +#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; spinlock_t ring_lock; struct list_head coalesced_zones; @@ -605,24 +426,11 @@ struct kvm { struct mmu_notifier mmu_notifier; unsigned long mmu_notifier_seq; long mmu_notifier_count; - unsigned long mmu_notifier_range_start; - unsigned long mmu_notifier_range_end; #endif + long tlbs_dirty; struct list_head devices; - u64 manual_dirty_log_protect; struct dentry *debugfs_dentry; struct kvm_stat_data **debugfs_stat_data; - struct srcu_struct srcu; - struct srcu_struct irq_srcu; - pid_t userspace_pid; - unsigned int max_halt_poll_ns; - u32 dirty_ring_size; - bool vm_bugged; - -#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER - struct notifier_block pm_notifier; -#endif - char stats_id[KVM_STATS_NAME_SIZE]; }; #define kvm_err(fmt, ...) \ @@ -631,9 +439,6 @@ struct kvm { pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) #define kvm_debug(fmt, ...) \ pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) -#define kvm_debug_ratelimited(fmt, ...) \ - pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ - ## __VA_ARGS__) #define kvm_pr_unimpl(fmt, ...) \ pr_err_ratelimited("kvm [%i]: " fmt, \ task_tgid_nr(current), ## __VA_ARGS__) @@ -645,54 +450,15 @@ struct kvm { #define vcpu_debug(vcpu, fmt, ...) \ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) -#define vcpu_debug_ratelimited(vcpu, fmt, ...) \ - kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ - ## __VA_ARGS__) #define vcpu_err(vcpu, fmt, ...) \ kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) -static inline void kvm_vm_bugged(struct kvm *kvm) -{ - kvm->vm_bugged = true; - kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED); -} - -#define KVM_BUG(cond, kvm, fmt...) \ -({ \ - int __ret = (cond); \ - \ - if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ - kvm_vm_bugged(kvm); \ - unlikely(__ret); \ -}) - -#define KVM_BUG_ON(cond, kvm) \ -({ \ - int __ret = (cond); \ - \ - if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ - kvm_vm_bugged(kvm); \ - unlikely(__ret); \ -}) - -static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) -{ - return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); -} - -static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) -{ - return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, - lockdep_is_held(&kvm->slots_lock) || - !refcount_read(&kvm->users_count)); -} - static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) { - int num_vcpus = atomic_read(&kvm->online_vcpus); - i = array_index_nospec(i, num_vcpus); - - /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ + /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case + * the caller has read kvm->online_vcpus before (as is the case + * for kvm_for_each_vcpu, for example). + */ smp_rmb(); return kvm->vcpus[i]; } @@ -720,22 +486,22 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) return NULL; } -#define kvm_for_each_memslot(memslot, slots) \ - for (memslot = &slots->memslots[0]; \ - memslot < slots->memslots + slots->used_slots; memslot++) \ - if (WARN_ON_ONCE(!memslot->npages)) { \ - } else +#define kvm_for_each_memslot(memslot, slots) \ + for (memslot = &slots->memslots[0]; \ + memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ + memslot++) -void kvm_vcpu_destroy(struct kvm_vcpu *vcpu); +int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); +void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); -void vcpu_load(struct kvm_vcpu *vcpu); +int __must_check vcpu_load(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_IOAPIC -void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); +void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); void kvm_arch_post_irq_routing_update(struct kvm *kvm); #else -static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) +static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) { } static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) @@ -761,17 +527,13 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, void kvm_exit(void); void kvm_get_kvm(struct kvm *kvm); -bool kvm_get_kvm_safe(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm); -bool file_is_kvm(struct file *file); -void kvm_put_kvm_no_destroy(struct kvm *kvm); static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) { - as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); - return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, - lockdep_is_held(&kvm->slots_lock) || - !refcount_read(&kvm->users_count)); + return rcu_dereference_check(kvm->memslots[as_id], + srcu_read_lock_held(&kvm->srcu) + || lockdep_is_held(&kvm->slots_lock)); } static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) @@ -786,15 +548,12 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) return __kvm_memslots(vcpu->kvm, as_id); } -static inline -struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) +static inline struct kvm_memory_slot * +id_to_memslot(struct kvm_memslots *slots, int id) { int index = slots->id_to_index[id]; struct kvm_memory_slot *slot; - if (index < 0) - return NULL; - slot = &slots->memslots[index]; WARN_ON(slot->id != id); @@ -823,17 +582,22 @@ int kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem); int __kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem); -void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); -void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, + struct kvm_memory_slot *dont); +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages); +void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change); void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot *old, + const struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); +bool kvm_largepages_enabled(void); +void kvm_disable_largepages(void); /* flush all memory translations */ void kvm_arch_flush_shadow_all(struct kvm *kvm); /* flush memory translations pointing to 'slot' */ @@ -853,6 +617,7 @@ void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); void kvm_set_page_accessed(struct page *page); +kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable); @@ -860,95 +625,40 @@ kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, - bool *writable, hva_t *hva); + bool *writable); void kvm_release_pfn_clean(kvm_pfn_t pfn); -void kvm_release_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn); +void kvm_get_pfn(kvm_pfn_t pfn); -void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); +int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, + unsigned long len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); -int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - void *data, unsigned int offset, - unsigned long len); int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); -int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - void *data, unsigned int offset, - unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); - -#define __kvm_get_guest(kvm, gfn, offset, v) \ -({ \ - unsigned long __addr = gfn_to_hva(kvm, gfn); \ - typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ - int __ret = -EFAULT; \ - \ - if (!kvm_is_error_hva(__addr)) \ - __ret = get_user(v, __uaddr); \ - __ret; \ -}) - -#define kvm_get_guest(kvm, gpa, v) \ -({ \ - gpa_t __gpa = gpa; \ - struct kvm *__kvm = kvm; \ - \ - __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ - offset_in_page(__gpa), v); \ -}) - -#define __kvm_put_guest(kvm, gfn, offset, v) \ -({ \ - unsigned long __addr = gfn_to_hva(kvm, gfn); \ - typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ - int __ret = -EFAULT; \ - \ - if (!kvm_is_error_hva(__addr)) \ - __ret = put_user(v, __uaddr); \ - if (!__ret) \ - mark_page_dirty(kvm, gfn); \ - __ret; \ -}) - -#define kvm_put_guest(kvm, gpa, v) \ -({ \ - gpa_t __gpa = gpa; \ - struct kvm *__kvm = kvm; \ - \ - __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ - offset_in_page(__gpa), v); \ -}) - +int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); -bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); -unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); -void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn); +unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); -int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, bool atomic); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); -void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, @@ -963,59 +673,44 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len); void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); -void kvm_sigset_activate(struct kvm_vcpu *vcpu); -void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); - void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); -bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); +void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); int kvm_vcpu_yield_to(struct kvm_vcpu *target); -void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); +void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); +void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); +void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); - -#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE -int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); -int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); -void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); -void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); -#endif - -void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, - unsigned long end); -void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, - unsigned long end); +bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); -vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); +int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); +int kvm_get_dirty_log(struct kvm *kvm, + struct kvm_dirty_log *log, int *is_dirty); + +int kvm_get_dirty_log_protect(struct kvm *kvm, + struct kvm_dirty_log *log, bool *is_dirty); + void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask); -void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); -#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT -void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - const struct kvm_memory_slot *memslot); -#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); -int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, - int *is_dirty, struct kvm_memory_slot **memslot); -#endif +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, + struct kvm_dirty_log *log); int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status); -int kvm_vm_ioctl_enable_cap(struct kvm *kvm, - struct kvm_enable_cap *cap); long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); @@ -1037,47 +732,38 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state); int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg); -int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); int kvm_arch_init(void *opaque); void kvm_arch_exit(void); +int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); + void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); -int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); -int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); +struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); -#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER -int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state); -#endif - -#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS -void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); -#endif +bool kvm_arch_has_vcpu_debugfs(void); +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); int kvm_arch_hardware_enable(void); void kvm_arch_hardware_disable(void); -int kvm_arch_hardware_setup(void *opaque); +int kvm_arch_hardware_setup(void); void kvm_arch_hardware_unsetup(void); -int kvm_arch_check_processor_compat(void *opaque); +void kvm_arch_check_processor_compat(void *rtn); int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); -bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); -bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); -bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); -int kvm_arch_post_init_vm(struct kvm *kvm); -void kvm_arch_pre_destroy_vm(struct kvm *kvm); -int kvm_arch_create_vm_debugfs(struct kvm *kvm); + +void *kvm_kvzalloc(unsigned long size); #ifndef __KVM_HAVE_ARCH_VM_ALLOC -/* - * All architectures that want to use vzalloc currently also - * need their own kvm_arch_alloc_vm implementation. - */ static inline struct kvm *kvm_arch_alloc_vm(void) { return kzalloc(sizeof(struct kvm), GFP_KERNEL); @@ -1089,13 +775,6 @@ static inline void kvm_arch_free_vm(struct kvm *kvm) } #endif -#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB -static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) -{ - return -ENOTSUPP; -} -#endif - #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA void kvm_arch_register_noncoherent_dma(struct kvm *kvm); void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); @@ -1133,12 +812,12 @@ static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) } #endif -static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) +static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) { #ifdef __KVM_HAVE_ARCH_WQP - return vcpu->arch.waitp; + return vcpu->arch.wqp; #else - return &vcpu->wait; + return &vcpu->wq; #endif } @@ -1161,10 +840,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm); void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); +void kvm_vcpu_kick(struct kvm_vcpu *vcpu); bool kvm_is_reserved_pfn(kvm_pfn_t pfn); -bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); -bool kvm_is_transparent_hugepage(kvm_pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; @@ -1192,52 +870,42 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); -bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); -/* - * Returns a pointer to the memslot at slot_index if it contains gfn. - * Otherwise returns NULL. - */ -static inline struct kvm_memory_slot * -try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) +#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT +int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); +void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); +#else +static inline int kvm_iommu_map_pages(struct kvm *kvm, + struct kvm_memory_slot *slot) { - struct kvm_memory_slot *slot; - - if (slot_index < 0 || slot_index >= slots->used_slots) - return NULL; - - /* - * slot_index can come from vcpu->last_used_slot which is not kept - * in sync with userspace-controllable memslot deletion. So use nospec - * to prevent the CPU from speculating past the end of memslots[]. - */ - slot_index = array_index_nospec(slot_index, slots->used_slots); - slot = &slots->memslots[slot_index]; - - if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) - return slot; - else - return NULL; + return 0; } +static inline void kvm_iommu_unmap_pages(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ +} +#endif + /* - * Returns a pointer to the memslot that contains gfn and records the index of - * the slot in index. Otherwise returns NULL. - * - * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! + * search_memslots() and __gfn_to_memslot() are here because they are + * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. + * gfn_to_memslot() itself isn't here as an inline because that would + * bloat other code too much. */ static inline struct kvm_memory_slot * -search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index) +search_memslots(struct kvm_memslots *slots, gfn_t gfn) { int start = 0, end = slots->used_slots; + int slot = atomic_read(&slots->lru_slot); struct kvm_memory_slot *memslots = slots->memslots; - struct kvm_memory_slot *slot; - if (unlikely(!slots->used_slots)) - return NULL; + if (gfn >= memslots[slot].base_gfn && + gfn < memslots[slot].base_gfn + memslots[slot].npages) + return &memslots[slot]; while (start < end) { - int slot = start + (end - start) / 2; + slot = start + (end - start) / 2; if (gfn >= memslots[slot].base_gfn) end = slot; @@ -1245,51 +913,25 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index) start = slot + 1; } - slot = try_get_memslot(slots, start, gfn); - if (slot) { - *index = start; - return slot; + if (gfn >= memslots[start].base_gfn && + gfn < memslots[start].base_gfn + memslots[start].npages) { + atomic_set(&slots->lru_slot, start); + return &memslots[start]; } return NULL; } -/* - * __gfn_to_memslot() and its descendants are here because it is called from - * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot() - * itself isn't here as an inline because that would bloat other code too much. - */ static inline struct kvm_memory_slot * __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) { - struct kvm_memory_slot *slot; - int slot_index = atomic_read(&slots->last_used_slot); - - slot = try_get_memslot(slots, slot_index, gfn); - if (slot) - return slot; - - slot = search_memslots(slots, gfn, &slot_index); - if (slot) { - atomic_set(&slots->last_used_slot, slot_index); - return slot; - } - - return NULL; + return search_memslots(slots, gfn); } static inline unsigned long -__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) +__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { - /* - * The index was checked originally in search_memslots. To avoid - * that a malicious guest builds a Spectre gadget out of e.g. page - * table walks, do not let the processor speculate loads outside - * the guest's registered memslots. - */ - unsigned long offset = gfn - slot->base_gfn; - offset = array_index_nospec(offset, slot->npages); - return slot->userspace_addr + offset * PAGE_SIZE; + return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; } static inline int memslot_id(struct kvm *kvm, gfn_t gfn) @@ -1320,12 +962,6 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) return (hpa_t)pfn << PAGE_SHIFT; } -static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, - gpa_t gpa) -{ - return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); -} - static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) { unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); @@ -1339,174 +975,18 @@ enum kvm_stat_kind { }; struct kvm_stat_data { + int offset; struct kvm *kvm; - const struct _kvm_stats_desc *desc; +}; + +struct kvm_stats_debugfs_item { + const char *name; + int offset; enum kvm_stat_kind kind; }; - -struct _kvm_stats_desc { - struct kvm_stats_desc desc; - char name[KVM_STATS_NAME_SIZE]; -}; - -#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ - .flags = type | unit | base | \ - BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ - BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ - BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ - .exponent = exp, \ - .size = sz, \ - .bucket_size = bsz - -#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ - { \ - { \ - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ - .offset = offsetof(struct kvm_vm_stat, generic.stat) \ - }, \ - .name = #stat, \ - } -#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ - { \ - { \ - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ - .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ - }, \ - .name = #stat, \ - } -#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ - { \ - { \ - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ - .offset = offsetof(struct kvm_vm_stat, stat) \ - }, \ - .name = #stat, \ - } -#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ - { \ - { \ - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ - .offset = offsetof(struct kvm_vcpu_stat, stat) \ - }, \ - .name = #stat, \ - } -/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ -#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \ - SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz) - -#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \ - STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \ - unit, base, exponent, 1, 0) -#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \ - STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \ - unit, base, exponent, 1, 0) -#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \ - STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \ - unit, base, exponent, 1, 0) -#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \ - STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \ - unit, base, exponent, sz, bsz) -#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \ - STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \ - unit, base, exponent, sz, 0) - -/* Cumulative counter, read/write */ -#define STATS_DESC_COUNTER(SCOPE, name) \ - STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \ - KVM_STATS_BASE_POW10, 0) -/* Instantaneous counter, read only */ -#define STATS_DESC_ICOUNTER(SCOPE, name) \ - STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \ - KVM_STATS_BASE_POW10, 0) -/* Peak counter, read/write */ -#define STATS_DESC_PCOUNTER(SCOPE, name) \ - STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \ - KVM_STATS_BASE_POW10, 0) - -/* Cumulative time in nanosecond */ -#define STATS_DESC_TIME_NSEC(SCOPE, name) \ - STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ - KVM_STATS_BASE_POW10, -9) -/* Linear histogram for time in nanosecond */ -#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \ - STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ - KVM_STATS_BASE_POW10, -9, sz, bsz) -/* Logarithmic histogram for time in nanosecond */ -#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \ - STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ - KVM_STATS_BASE_POW10, -9, sz) - -#define KVM_GENERIC_VM_STATS() \ - STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \ - STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests) - -#define KVM_GENERIC_VCPU_STATS() \ - STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \ - STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \ - STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \ - STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \ - STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \ - STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \ - STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \ - STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \ - HALT_POLL_HIST_COUNT), \ - STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \ - HALT_POLL_HIST_COUNT), \ - STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ - HALT_POLL_HIST_COUNT) - +extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; -ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, - const struct _kvm_stats_desc *desc, - void *stats, size_t size_stats, - char __user *user_buffer, size_t size, loff_t *offset); - -/** - * kvm_stats_linear_hist_update() - Update bucket value for linear histogram - * statistics data. - * - * @data: start address of the stats data - * @size: the number of bucket of the stats data - * @value: the new value used to update the linear histogram's bucket - * @bucket_size: the size (width) of a bucket - */ -static inline void kvm_stats_linear_hist_update(u64 *data, size_t size, - u64 value, size_t bucket_size) -{ - size_t index = div64_u64(value, bucket_size); - - index = min(index, size - 1); - ++data[index]; -} - -/** - * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram - * statistics data. - * - * @data: start address of the stats data - * @size: the number of bucket of the stats data - * @value: the new value used to update the logarithmic histogram's bucket - */ -static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value) -{ - size_t index = fls64(value); - - index = min(index, size - 1); - ++data[index]; -} - -#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \ - kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize) -#define KVM_STATS_LOG_HIST_UPDATE(array, value) \ - kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value) - - -extern const struct kvm_stats_header kvm_vm_stats_header; -extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; -extern const struct kvm_stats_header kvm_vcpu_stats_header; -extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; - #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) { @@ -1527,33 +1007,18 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) return 1; return 0; } - -static inline int mmu_notifier_retry_hva(struct kvm *kvm, - unsigned long mmu_seq, - unsigned long hva) -{ - lockdep_assert_held(&kvm->mmu_lock); - /* - * If mmu_notifier_count is non-zero, then the range maintained by - * kvm_mmu_notifier_invalidate_range_start contains all addresses that - * might be being invalidated. Note that it may include some false - * positives, due to shortcuts when handing concurrent invalidations. - */ - if (unlikely(kvm->mmu_notifier_count) && - hva >= kvm->mmu_notifier_range_start && - hva < kvm->mmu_notifier_range_end) - return 1; - if (kvm->mmu_notifier_seq != mmu_seq) - return 1; - return 0; -} #endif #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING -#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ +#ifdef CONFIG_S390 +#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that... +#elif defined(CONFIG_ARM64) +#define KVM_MAX_IRQ_ROUTES 4096 +#else +#define KVM_MAX_IRQ_ROUTES 1024 +#endif -bool kvm_arch_can_set_irq_routing(struct kvm *kvm); int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *entries, unsigned nr, @@ -1605,6 +1070,7 @@ static inline void kvm_irq_routing_update(struct kvm *kvm) { } #endif +void kvm_arch_irq_routing_update(struct kvm *kvm); static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { @@ -1613,8 +1079,6 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) #endif /* CONFIG_HAVE_KVM_EVENTFD */ -void kvm_arch_irq_routing_update(struct kvm *kvm); - static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) { /* @@ -1622,28 +1086,13 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) * caller. Paired with the smp_mb__after_atomic in kvm_check_request. */ smp_wmb(); - set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); -} - -static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) -{ - return READ_ONCE(vcpu->requests); -} - -static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) -{ - return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); -} - -static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) -{ - clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); + set_bit(req, &vcpu->requests); } static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) { - if (kvm_test_request(req, vcpu)) { - kvm_clear_request(req, vcpu); + if (test_bit(req, &vcpu->requests)) { + clear_bit(req, &vcpu->requests); /* * Ensure the rest of the request is visible to kvm_check_request's @@ -1658,13 +1107,8 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) extern bool kvm_rebooting; -extern unsigned int halt_poll_ns; -extern unsigned int halt_poll_ns_grow; -extern unsigned int halt_poll_ns_grow_start; -extern unsigned int halt_poll_ns_shrink; - struct kvm_device { - const struct kvm_device_ops *ops; + struct kvm_device_ops *ops; struct kvm *kvm; void *private; struct list_head vm_node; @@ -1697,30 +1141,21 @@ struct kvm_device_ops { */ void (*destroy)(struct kvm_device *dev); - /* - * Release is an alternative method to free the device. It is - * called when the device file descriptor is closed. Once - * release is called, the destroy method will not be called - * anymore as the device is removed from the device list of - * the VM. kvm->lock is held. - */ - void (*release)(struct kvm_device *dev); - int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, unsigned long arg); - int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); }; void kvm_device_get(struct kvm_device *dev); void kvm_device_put(struct kvm_device *dev); struct kvm_device *kvm_device_from_filp(struct file *filp); -int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); +int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); void kvm_unregister_device_ops(u32 type); extern struct kvm_device_ops kvm_mpic_ops; +extern struct kvm_device_ops kvm_xics_ops; extern struct kvm_device_ops kvm_arm_vgic_v2_ops; extern struct kvm_device_ops kvm_arm_vgic_v3_ops; @@ -1746,15 +1181,6 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) } #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ -static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) -{ - return (memslot && memslot->id < KVM_USER_MEM_SLOTS && - !(memslot->flags & KVM_MEMSLOT_INVALID)); -} - -struct kvm_vcpu *kvm_get_running_vcpu(void); -struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); - #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS bool kvm_arch_has_irq_bypass(void); int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, @@ -1781,62 +1207,4 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) } #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ -#ifdef CONFIG_HAVE_KVM_NO_POLL -/* Callback that tells if we must not poll */ -bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); -#else -static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) -{ - return false; -} -#endif /* CONFIG_HAVE_KVM_NO_POLL */ - -#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL -long kvm_arch_vcpu_async_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg); -#else -static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, - unsigned int ioctl, - unsigned long arg) -{ - return -ENOIOCTLCMD; -} -#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ - -void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end); - -#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE -int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); -#else -static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) -{ - return 0; -} -#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ - -typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); - -int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, - uintptr_t data, const char *name, - struct task_struct **thread_ptr); - -#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK -static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) -{ - vcpu->run->exit_reason = KVM_EXIT_INTR; - vcpu->stat.signal_exits++; -} -#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ - -/* - * This defines how many reserved entries we want to keep before we - * kick the vcpu to the userspace to avoid dirty ring full. This - * value can be tuned to higher if e.g. PML is enabled on the host. - */ -#define KVM_DIRTY_RING_RSVD_ENTRIES 64 - -/* Max number of entries allowed for each kvm dirty ring */ -#define KVM_DIRTY_RING_MAX_ENTRIES 65536 - #endif diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index dac047abdb..0c1de05098 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h @@ -1,5 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. * * irqfd: Allows an fd to be used to inject an interrupt to the guest * Credit goes to Avi Kivity for the original idea. @@ -39,10 +46,10 @@ struct kvm_kernel_irqfd_resampler { struct kvm_kernel_irqfd { /* Used for MSI fast-path */ struct kvm *kvm; - wait_queue_entry_t wait; + wait_queue_t wait; /* Update side is protected by irqfds.lock */ struct kvm_kernel_irq_routing_entry irq_entry; - seqcount_spinlock_t irq_entry_sc; + seqcount_t irq_entry_sc; /* Used for level IRQ fast-path */ int gsi; struct work_struct inject; diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index f23b90b028..35e568f04b 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_KVM_PARA_H #define __LINUX_KVM_PARA_H @@ -9,9 +8,4 @@ static inline bool kvm_para_has_feature(unsigned int feature) { return !!(kvm_arch_para_features() & (1UL << feature)); } - -static inline bool kvm_para_has_hint(unsigned int feature) -{ - return !!(kvm_arch_para_hints() & (1UL << feature)); -} #endif /* __LINUX_KVM_PARA_H */ diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 2237abb93c..8bf259dae9 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -1,4 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ #ifndef __KVM_TYPES_H__ #define __KVM_TYPES_H__ @@ -18,9 +32,7 @@ struct kvm_memslots; enum kvm_mr_change; -#include - -#include +#include /* * Address types: @@ -37,8 +49,6 @@ typedef unsigned long gva_t; typedef u64 gpa_t; typedef u64 gfn_t; -#define GPA_INVALID (~(gpa_t)0) - typedef unsigned long hva_t; typedef u64 hpa_t; typedef u64 hfn_t; @@ -53,49 +63,4 @@ struct gfn_to_hva_cache { struct kvm_memory_slot *memslot; }; -struct gfn_to_pfn_cache { - u64 generation; - gfn_t gfn; - kvm_pfn_t pfn; - bool dirty; -}; - -#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE -/* - * Memory caches are used to preallocate memory ahead of various MMU flows, - * e.g. page fault handlers. Gracefully handling allocation failures deep in - * MMU flows is problematic, as is triggering reclaim, I/O, etc... while - * holding MMU locks. Note, these caches act more like prefetch buffers than - * classical caches, i.e. objects are not returned to the cache on being freed. - */ -struct kvm_mmu_memory_cache { - int nobjs; - gfp_t gfp_zero; - struct kmem_cache *kmem_cache; - void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE]; -}; -#endif - -#define HALT_POLL_HIST_COUNT 32 - -struct kvm_vm_stat_generic { - u64 remote_tlb_flush; - u64 remote_tlb_flush_requests; -}; - -struct kvm_vcpu_stat_generic { - u64 halt_successful_poll; - u64 halt_attempted_poll; - u64 halt_poll_invalid; - u64 halt_wakeup; - u64 halt_poll_success_ns; - u64 halt_poll_fail_ns; - u64 halt_wait_ns; - u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT]; - u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT]; - u64 halt_wait_hist[HALT_POLL_HIST_COUNT]; -}; - -#define KVM_STATS_NAME_SIZE 48 - #endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h index 0402eda1a9..bffdb962f1 100644 --- a/include/linux/l2tp.h +++ b/include/linux/l2tp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * L2TP-over-IP socket for L2TPv3. * diff --git a/include/linux/lapb.h b/include/linux/lapb.h index eb56472f23..873c1eb635 100644 --- a/include/linux/lapb.h +++ b/include/linux/lapb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * These are the public elements of the Linux LAPB module. */ diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h index abe3d95f79..59ccab297a 100644 --- a/include/linux/latencytop.h +++ b/include/linux/latencytop.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * latencytop.h: Infrastructure for displaying latency * @@ -36,10 +35,10 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter) __account_scheduler_latency(task, usecs, inter); } -void clear_tsk_latency_tracing(struct task_struct *p); +void clear_all_latency_tracing(struct task_struct *p); -int sysctl_latencytop(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); +extern int sysctl_latencytop(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); #else @@ -48,7 +47,7 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter) { } -static inline void clear_tsk_latency_tracing(struct task_struct *p) +static inline void clear_all_latency_tracing(struct task_struct *p) { } diff --git a/include/linux/lcd.h b/include/linux/lcd.h index 238fb1dfed..504f6246f3 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * LCD Lowlevel Control Abstraction * @@ -41,6 +40,16 @@ struct lcd_ops { /* Get the LCD panel power status (0: full on, 1..3: controller power on, flat panel power off, 4: full off), see FB_BLANK_XXX */ int (*get_power)(struct lcd_device *); + /* + * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX) + * and this callback would be called proir to fb driver's callback. + * + * P.S. note that if early_set_power is not NULL then early fb notifier + * would be registered. + */ + int (*early_set_power)(struct lcd_device *, int power); + /* revert the effects of the early blank event. */ + int (*r_early_set_power)(struct lcd_device *, int power); /* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */ int (*set_power)(struct lcd_device *, int power); /* Get the current contrast setting (0-max_contrast) */ diff --git a/include/linux/lcm.h b/include/linux/lcm.h index 0db3efd56e..1ce79a7f1d 100644 --- a/include/linux/lcm.h +++ b/include/linux/lcm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LCM_H #define _LCM_H diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h index 612b4cab38..e97966d1fb 100644 --- a/include/linux/led-class-flash.h +++ b/include/linux/led-class-flash.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * LED Flash class interface * * Copyright (C) 2015 Samsung Electronics Co., Ltd. * Author: Jacek Anaszewski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __LINUX_FLASH_LEDS_H_INCLUDED #define __LINUX_FLASH_LEDS_H_INCLUDED @@ -85,19 +89,16 @@ static inline struct led_classdev_flash *lcdev_to_flcdev( return container_of(lcdev, struct led_classdev_flash, led_cdev); } -#if IS_ENABLED(CONFIG_LEDS_CLASS_FLASH) /** - * led_classdev_flash_register_ext - register a new object of LED class with - * init data and with support for flash LEDs - * @parent: LED flash controller device this flash LED is driven by + * led_classdev_flash_register - register a new object of led_classdev class + * with support for flash LEDs + * @parent: the flash LED to register * @fled_cdev: the led_classdev_flash structure for this device - * @init_data: the LED class flash device initialization data * * Returns: 0 on success or negative error value on failure */ -int led_classdev_flash_register_ext(struct device *parent, - struct led_classdev_flash *fled_cdev, - struct led_init_data *init_data); +extern int led_classdev_flash_register(struct device *parent, + struct led_classdev_flash *fled_cdev); /** * led_classdev_flash_unregister - unregisters an object of led_classdev class @@ -106,50 +107,7 @@ int led_classdev_flash_register_ext(struct device *parent, * * Unregister a previously registered via led_classdev_flash_register object */ -void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev); - -int devm_led_classdev_flash_register_ext(struct device *parent, - struct led_classdev_flash *fled_cdev, - struct led_init_data *init_data); - - -void devm_led_classdev_flash_unregister(struct device *parent, - struct led_classdev_flash *fled_cdev); - -#else - -static inline int led_classdev_flash_register_ext(struct device *parent, - struct led_classdev_flash *fled_cdev, - struct led_init_data *init_data) -{ - return 0; -} - -static inline void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev) {}; -static inline int devm_led_classdev_flash_register_ext(struct device *parent, - struct led_classdev_flash *fled_cdev, - struct led_init_data *init_data) -{ - return 0; -} - -static inline void devm_led_classdev_flash_unregister(struct device *parent, - struct led_classdev_flash *fled_cdev) -{}; - -#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_FLASH) */ - -static inline int led_classdev_flash_register(struct device *parent, - struct led_classdev_flash *fled_cdev) -{ - return led_classdev_flash_register_ext(parent, fled_cdev, NULL); -} - -static inline int devm_led_classdev_flash_register(struct device *parent, - struct led_classdev_flash *fled_cdev) -{ - return devm_led_classdev_flash_register_ext(parent, fled_cdev, NULL); -} +extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev); /** * led_set_flash_strobe - setup flash strobe @@ -163,8 +121,6 @@ static inline int devm_led_classdev_flash_register(struct device *parent, static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev, bool state) { - if (!fled_cdev) - return -EINVAL; return fled_cdev->ops->strobe_set(fled_cdev, state); } @@ -180,8 +136,6 @@ static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev, static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev, bool *state) { - if (!fled_cdev) - return -EINVAL; if (fled_cdev->ops->strobe_get) return fled_cdev->ops->strobe_get(fled_cdev, state); @@ -197,8 +151,8 @@ static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev, * * Returns: 0 on success or negative error value on failure */ -int led_set_flash_brightness(struct led_classdev_flash *fled_cdev, - u32 brightness); +extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev, + u32 brightness); /** * led_update_flash_brightness - update flash LED brightness @@ -209,7 +163,7 @@ int led_set_flash_brightness(struct led_classdev_flash *fled_cdev, * * Returns: 0 on success or negative error value on failure */ -int led_update_flash_brightness(struct led_classdev_flash *fled_cdev); +extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev); /** * led_set_flash_timeout - set flash LED timeout @@ -220,7 +174,8 @@ int led_update_flash_brightness(struct led_classdev_flash *fled_cdev); * * Returns: 0 on success or negative error value on failure */ -int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout); +extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, + u32 timeout); /** * led_get_flash_fault - get the flash LED fault @@ -231,6 +186,7 @@ int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout); * * Returns: 0 on success or negative error value on failure */ -int led_get_flash_fault(struct led_classdev_flash *fled_cdev, u32 *fault); +extern int led_get_flash_fault(struct led_classdev_flash *fled_cdev, + u32 *fault); #endif /* __LINUX_FLASH_LEDS_H_INCLUDED */ diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h index 811f7ce4e2..4b133479d6 100644 --- a/include/linux/led-lm3530.h +++ b/include/linux/led-lm3530.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 ST-Ericsson SA. * Copyright (C) 2009 Motorola, Inc. * + * License Terms: GNU General Public License v2 + * * Simple driver for National Semiconductor LM35330 Backlight driver chip * * Author: Shreshtha Kumar SAHU diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h index ec577f5f87..42f854a1a1 100644 --- a/include/linux/leds-bd2802.h +++ b/include/linux/leds-bd2802.h @@ -1,16 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * leds-bd2802.h - RGB LED Driver * * Copyright (C) 2009 Samsung Electronics * Kim Kyuwon * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf + * */ #ifndef _LEDS_BD2802_H_ #define _LEDS_BD2802_H_ struct bd2802_led_platform_data{ + int reset_gpio; u8 rgb_time; }; diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h index f681fefff2..2618aa9063 100644 --- a/include/linux/leds-lp3944.h +++ b/include/linux/leds-lp3944.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * leds-lp3944.h - platform data structure for lp3944 led controller * * Copyright (C) 2009 Antonio Ospite + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __LINUX_LEDS_LP3944_H diff --git a/include/linux/leds-lp3952.h b/include/linux/leds-lp3952.h index 937ae5f2ea..49b37ed8d4 100644 --- a/include/linux/leds-lp3952.h +++ b/include/linux/leds-lp3952.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * LED driver for TI lp3952 controller * * Copyright (C) 2016, DAQRI, LLC. * Author: Tony Makkiel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef LEDS_LP3952_H_ diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h index f4796d3339..d215b45611 100644 --- a/include/linux/leds-pca9532.h +++ b/include/linux/leds-pca9532.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * pca9532.h - platform data structure for pca9532 led controller * * Copyright (C) 2008 Riku Voipio * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf + * */ #ifndef __LINUX_PCA9532_H @@ -18,8 +22,7 @@ enum pca9532_state { PCA9532_OFF = 0x0, PCA9532_ON = 0x1, PCA9532_PWM0 = 0x2, - PCA9532_PWM1 = 0x3, - PCA9532_KEEP = 0xff, + PCA9532_PWM1 = 0x3 }; struct pca9532_led { @@ -41,3 +44,4 @@ struct pca9532_platform_data { }; #endif /* __LINUX_PCA9532_H */ + diff --git a/include/linux/leds-regulator.h b/include/linux/leds-regulator.h index 899f816073..e2337a8c90 100644 --- a/include/linux/leds-regulator.h +++ b/include/linux/leds-regulator.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * leds-regulator.h - platform data structure for regulator driven LEDs. * * Copyright (C) 2009 Antonio Ospite + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __LINUX_LEDS_REGULATOR_H diff --git a/include/linux/leds-tca6507.h b/include/linux/leds-tca6507.h new file mode 100644 index 0000000000..dcabf4fa2a --- /dev/null +++ b/include/linux/leds-tca6507.h @@ -0,0 +1,34 @@ +/* + * TCA6507 LED chip driver. + * + * Copyright (C) 2011 Neil Brown + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_TCA6507_H +#define __LINUX_TCA6507_H +#include + +struct tca6507_platform_data { + struct led_platform_data leds; +#ifdef CONFIG_GPIOLIB + int gpio_base; + void (*setup)(unsigned gpio_base, unsigned ngpio); +#endif +}; + +#define TCA6507_MAKE_GPIO 1 +#endif /* __LINUX_TCA6507_H*/ diff --git a/include/linux/leds.h b/include/linux/leds.h index cb9a65b4b0..ddfcb2df36 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -1,16 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Driver model for leds and led triggers * * Copyright (C) 2005 John Lenz * Copyright (C) 2005 Richard Purdie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __LINUX_LEDS_H_INCLUDED #define __LINUX_LEDS_H_INCLUDED -#include #include -#include #include #include #include @@ -19,85 +21,37 @@ #include struct device; -struct led_pattern; -struct device_node; /* * LED Core */ -/* This is obsolete/useless. We now support variable maximum brightness. */ enum led_brightness { LED_OFF = 0, - LED_ON = 1, LED_HALF = 127, LED_FULL = 255, }; -enum led_default_state { - LEDS_DEFSTATE_OFF = 0, - LEDS_DEFSTATE_ON = 1, - LEDS_DEFSTATE_KEEP = 2, -}; - -struct led_init_data { - /* device fwnode handle */ - struct fwnode_handle *fwnode; - /* - * default tuple, for backward compatibility - * with in-driver hard-coded LED names used as a fallback when - * DT "label" property is absent; it should be set to NULL - * in new LED class drivers. - */ - const char *default_label; - /* - * string to be used for devicename section of LED class device - * either for label based LED name composition path or for fwnode - * based when devname_mandatory is true - */ - const char *devicename; - /* - * indicates if LED name should always comprise devicename section; - * only LEDs exposed by drivers of hot-pluggable devices should - * set it to true - */ - bool devname_mandatory; -}; - -struct led_hw_trigger_type { - int dummy; -}; - struct led_classdev { const char *name; - unsigned int brightness; - unsigned int max_brightness; + enum led_brightness brightness; + enum led_brightness max_brightness; int flags; /* Lower 16 bits reflect status */ -#define LED_SUSPENDED BIT(0) -#define LED_UNREGISTERING BIT(1) +#define LED_SUSPENDED (1 << 0) +#define LED_UNREGISTERING (1 << 1) /* Upper 16 bits reflect control information */ -#define LED_CORE_SUSPENDRESUME BIT(16) -#define LED_SYSFS_DISABLE BIT(17) -#define LED_DEV_CAP_FLASH BIT(18) -#define LED_HW_PLUGGABLE BIT(19) -#define LED_PANIC_INDICATOR BIT(20) -#define LED_BRIGHT_HW_CHANGED BIT(21) -#define LED_RETAIN_AT_SHUTDOWN BIT(22) -#define LED_INIT_DEFAULT_TRIGGER BIT(23) - /* Additions for Raspberry Pi PWR LED */ -#define SET_GPIO_INPUT BIT(30) -#define SET_GPIO_OUTPUT BIT(31) - - /* set_brightness_work / blink_timer flags, atomic, private. */ - unsigned long work_flags; - -#define LED_BLINK_SW 0 -#define LED_BLINK_ONESHOT 1 -#define LED_BLINK_ONESHOT_STOP 2 -#define LED_BLINK_INVERT 3 -#define LED_BLINK_BRIGHTNESS_CHANGE 4 -#define LED_BLINK_DISABLE 5 +#define LED_CORE_SUSPENDRESUME (1 << 16) +#define LED_BLINK_SW (1 << 17) +#define LED_BLINK_ONESHOT (1 << 18) +#define LED_BLINK_ONESHOT_STOP (1 << 19) +#define LED_BLINK_INVERT (1 << 20) +#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21) +#define LED_BLINK_DISABLE (1 << 22) +#define LED_SYSFS_DISABLE (1 << 23) +#define LED_DEV_CAP_FLASH (1 << 24) +#define LED_HW_PLUGGABLE (1 << 25) +#define LED_PANIC_INDICATOR (1 << 26) /* Set LED brightness level * Must not sleep. Use brightness_set_blocking for drivers @@ -126,10 +80,6 @@ struct led_classdev { unsigned long *delay_on, unsigned long *delay_off); - int (*pattern_set)(struct led_classdev *led_cdev, - struct led_pattern *pattern, u32 len, int repeat); - int (*pattern_clear)(struct led_classdev *led_cdev); - struct device *dev; const struct attribute_group **groups; @@ -139,7 +89,6 @@ struct led_classdev { unsigned long blink_delay_on, blink_delay_off; struct timer_list blink_timer; int blink_brightness; - int new_blink_brightness; void (*flash_resume)(struct led_classdev *led_cdev); struct work_struct set_brightness_work; @@ -154,70 +103,21 @@ struct led_classdev { void *trigger_data; /* true if activated - deactivate routine uses it to do cleanup */ bool activated; - - /* LEDs that have private triggers have this set */ - struct led_hw_trigger_type *trigger_type; -#endif - -#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED - int brightness_hw_changed; - struct kernfs_node *brightness_hw_changed_kn; #endif /* Ensures consistent access to the LED Flash Class device */ struct mutex led_access; }; -/** - * led_classdev_register_ext - register a new object of LED class with - * init data - * @parent: LED controller device this LED is driven by - * @led_cdev: the led_classdev structure for this device - * @init_data: the LED class device initialization data - * - * Register a new object of LED class, with name derived from init_data. - * - * Returns: 0 on success or negative error value on failure - */ -int led_classdev_register_ext(struct device *parent, - struct led_classdev *led_cdev, - struct led_init_data *init_data); - -/** - * led_classdev_register - register a new object of LED class - * @parent: LED controller device this LED is driven by - * @led_cdev: the led_classdev structure for this device - * - * Register a new object of LED class, with name derived from the name property - * of passed led_cdev argument. - * - * Returns: 0 on success or negative error value on failure - */ -static inline int led_classdev_register(struct device *parent, - struct led_classdev *led_cdev) -{ - return led_classdev_register_ext(parent, led_cdev, NULL); -} - -int devm_led_classdev_register_ext(struct device *parent, - struct led_classdev *led_cdev, - struct led_init_data *init_data); - -static inline int devm_led_classdev_register(struct device *parent, - struct led_classdev *led_cdev) -{ - return devm_led_classdev_register_ext(parent, led_cdev, NULL); -} -void led_classdev_unregister(struct led_classdev *led_cdev); -void devm_led_classdev_unregister(struct device *parent, - struct led_classdev *led_cdev); -void led_classdev_suspend(struct led_classdev *led_cdev); -void led_classdev_resume(struct led_classdev *led_cdev); - -extern struct led_classdev *of_led_get(struct device_node *np, int index); -extern void led_put(struct led_classdev *led_cdev); -struct led_classdev *__must_check devm_of_led_get(struct device *dev, - int index); +extern int led_classdev_register(struct device *parent, + struct led_classdev *led_cdev); +extern int devm_led_classdev_register(struct device *parent, + struct led_classdev *led_cdev); +extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern void devm_led_classdev_unregister(struct device *parent, + struct led_classdev *led_cdev); +extern void led_classdev_suspend(struct led_classdev *led_cdev); +extern void led_classdev_resume(struct led_classdev *led_cdev); /** * led_blink_set - set blinking with software fallback @@ -234,8 +134,9 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev, * led_cdev->brightness_set() will not stop the blinking, * use led_classdev_brightness_set() instead. */ -void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, - unsigned long *delay_off); +extern void led_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off); /** * led_blink_set_oneshot - do a oneshot software blink * @led_cdev: the LED to start blinking @@ -250,9 +151,10 @@ void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, * If invert is set, led blinks for delay_off first, then for * delay_on and leave the led on after the on-off cycle. */ -void led_blink_set_oneshot(struct led_classdev *led_cdev, - unsigned long *delay_on, unsigned long *delay_off, - int invert); +extern void led_blink_set_oneshot(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off, + int invert); /** * led_set_brightness - set LED brightness * @led_cdev: the LED to set @@ -262,12 +164,13 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev, * software blink timer that implements blinking when the * hardware doesn't. This function is guaranteed not to sleep. */ -void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness); +extern void led_set_brightness(struct led_classdev *led_cdev, + enum led_brightness brightness); /** * led_set_brightness_sync - set LED brightness synchronously * @led_cdev: the LED to set - * @value: the brightness to set it to + * @brightness: the brightness to set it to * * Set an LED's brightness immediately. This function will block * the caller for the time required for accessing device registers, @@ -275,7 +178,8 @@ void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness); * * Returns: 0 on success or negative error value on failure */ -int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value); +extern int led_set_brightness_sync(struct led_classdev *led_cdev, + enum led_brightness value); /** * led_update_brightness - update LED brightness @@ -286,19 +190,7 @@ int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value); * * Returns: 0 on success or negative error value on failure */ -int led_update_brightness(struct led_classdev *led_cdev); - -/** - * led_get_default_pattern - return default pattern - * - * @led_cdev: the LED to get default pattern for - * @size: pointer for storing the number of elements in returned array, - * modified only if return != NULL - * - * Return: Allocated array of integers with default pattern from device tree - * or NULL. Caller is responsible for kfree(). - */ -u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size); +extern int led_update_brightness(struct led_classdev *led_cdev); /** * led_sysfs_disable - disable LED sysfs interface @@ -306,7 +198,7 @@ u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size); * * Disable the led_cdev's sysfs interface. */ -void led_sysfs_disable(struct led_classdev *led_cdev); +extern void led_sysfs_disable(struct led_classdev *led_cdev); /** * led_sysfs_enable - enable LED sysfs interface @@ -314,22 +206,7 @@ void led_sysfs_disable(struct led_classdev *led_cdev); * * Enable the led_cdev's sysfs interface. */ -void led_sysfs_enable(struct led_classdev *led_cdev); - -/** - * led_compose_name - compose LED class device name - * @dev: LED controller device object - * @init_data: the LED class device initialization data - * @led_classdev_name: composed LED class device name - * - * Create LED class device name basing on the provided init_data argument. - * The name can have or . - * form, depending on the init_data configuration. - * - * Returns: 0 on success or negative error value on failure - */ -int led_compose_name(struct device *dev, struct led_init_data *init_data, - char *led_classdev_name); +extern void led_sysfs_enable(struct led_classdev *led_cdev); /** * led_sysfs_is_disabled - check if LED sysfs interface is disabled @@ -356,56 +233,44 @@ static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev) struct led_trigger { /* Trigger Properties */ const char *name; - int (*activate)(struct led_classdev *led_cdev); + void (*activate)(struct led_classdev *led_cdev); void (*deactivate)(struct led_classdev *led_cdev); - /* LED-private triggers have this set */ - struct led_hw_trigger_type *trigger_type; - /* LEDs under control by this trigger (for simple triggers) */ rwlock_t leddev_list_lock; struct list_head led_cdevs; /* Link to next registered trigger */ struct list_head next_trig; - - const struct attribute_group **groups; }; -/* - * Currently the attributes in struct led_trigger::groups are added directly to - * the LED device. As this might change in the future, the following - * macros abstract getting the LED device and its trigger_data from the dev - * parameter passed to the attribute accessor functions. - */ -#define led_trigger_get_led(dev) ((struct led_classdev *)dev_get_drvdata((dev))) -#define led_trigger_get_drvdata(dev) (led_get_trigger_data(led_trigger_get_led(dev))) +ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, + char *buf); /* Registration functions for complex triggers */ -int led_trigger_register(struct led_trigger *trigger); -void led_trigger_unregister(struct led_trigger *trigger); -int devm_led_trigger_register(struct device *dev, +extern int led_trigger_register(struct led_trigger *trigger); +extern void led_trigger_unregister(struct led_trigger *trigger); +extern int devm_led_trigger_register(struct device *dev, struct led_trigger *trigger); -void led_trigger_register_simple(const char *name, +extern void led_trigger_register_simple(const char *name, struct led_trigger **trigger); -void led_trigger_unregister_simple(struct led_trigger *trigger); -void led_trigger_event(struct led_trigger *trigger, enum led_brightness event); -void led_trigger_blink(struct led_trigger *trigger, unsigned long *delay_on, - unsigned long *delay_off); -void led_trigger_blink_oneshot(struct led_trigger *trigger, - unsigned long *delay_on, - unsigned long *delay_off, - int invert); -void led_trigger_set_default(struct led_classdev *led_cdev); -int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger); -void led_trigger_remove(struct led_classdev *led_cdev); - -static inline void led_set_trigger_data(struct led_classdev *led_cdev, - void *trigger_data) -{ - led_cdev->trigger_data = trigger_data; -} +extern void led_trigger_unregister_simple(struct led_trigger *trigger); +extern void led_trigger_event(struct led_trigger *trigger, + enum led_brightness event); +extern void led_trigger_blink(struct led_trigger *trigger, + unsigned long *delay_on, + unsigned long *delay_off); +extern void led_trigger_blink_oneshot(struct led_trigger *trigger, + unsigned long *delay_on, + unsigned long *delay_off, + int invert); +extern void led_trigger_set_default(struct led_classdev *led_cdev); +extern void led_trigger_set(struct led_classdev *led_cdev, + struct led_trigger *trigger); +extern void led_trigger_remove(struct led_classdev *led_cdev); static inline void *led_get_trigger_data(struct led_classdev *led_cdev) { @@ -427,11 +292,8 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) * This is meant to be used on triggers with statically * allocated name. */ -void led_trigger_rename_static(const char *name, struct led_trigger *trig); - -#define module_led_trigger(__led_trigger) \ - module_driver(__led_trigger, led_trigger_register, \ - led_trigger_unregister) +extern void led_trigger_rename_static(const char *name, + struct led_trigger *trig); #else @@ -452,14 +314,9 @@ static inline void led_trigger_blink_oneshot(struct led_trigger *trigger, unsigned long *delay_off, int invert) {} static inline void led_trigger_set_default(struct led_classdev *led_cdev) {} -static inline int led_trigger_set(struct led_classdev *led_cdev, - struct led_trigger *trigger) -{ - return 0; -} - +static inline void led_trigger_set(struct led_classdev *led_cdev, + struct led_trigger *trigger) {} static inline void led_trigger_remove(struct led_classdev *led_cdev) {} -static inline void led_set_trigger_data(struct led_classdev *led_cdev) {} static inline void *led_get_trigger_data(struct led_classdev *led_cdev) { return NULL; @@ -469,20 +326,20 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) /* Trigger specific functions */ #ifdef CONFIG_LEDS_TRIGGER_DISK -void ledtrig_disk_activity(bool write); +extern void ledtrig_disk_activity(void); #else -static inline void ledtrig_disk_activity(bool write) {} +static inline void ledtrig_disk_activity(void) {} #endif #ifdef CONFIG_LEDS_TRIGGER_MTD -void ledtrig_mtd_activity(void); +extern void ledtrig_mtd_activity(void); #else static inline void ledtrig_mtd_activity(void) {} #endif #if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE) -void ledtrig_flash_ctrl(bool on); -void ledtrig_torch_ctrl(bool on); +extern void ledtrig_flash_ctrl(bool on); +extern void ledtrig_torch_ctrl(bool on); #else static inline void ledtrig_flash_ctrl(bool on) {} static inline void ledtrig_torch_ctrl(bool on) {} @@ -502,15 +359,6 @@ struct led_platform_data { struct led_info *leds; }; -struct led_properties { - u32 color; - bool color_present; - const char *function; - u32 func_enum; - bool func_enum_present; - const char *label; -}; - struct gpio_desc; typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state, unsigned long *delay_on, @@ -525,13 +373,12 @@ struct gpio_led { unsigned retain_state_suspended : 1; unsigned panic_indicator : 1; unsigned default_state : 2; - unsigned retain_state_shutdown : 1; /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ struct gpio_desc *gpiod; }; -#define LEDS_GPIO_DEFSTATE_OFF LEDS_DEFSTATE_OFF -#define LEDS_GPIO_DEFSTATE_ON LEDS_DEFSTATE_ON -#define LEDS_GPIO_DEFSTATE_KEEP LEDS_DEFSTATE_KEEP +#define LEDS_GPIO_DEFSTATE_OFF 0 +#define LEDS_GPIO_DEFSTATE_ON 1 +#define LEDS_GPIO_DEFSTATE_KEEP 2 struct gpio_led_platform_data { int num_leds; @@ -562,7 +409,7 @@ enum cpu_led_event { CPU_LED_HALTED, /* Machine shutdown */ }; #ifdef CONFIG_LEDS_TRIGGER_CPU -void ledtrig_cpu(enum cpu_led_event evt); +extern void ledtrig_cpu(enum cpu_led_event evt); #else static inline void ledtrig_cpu(enum cpu_led_event evt) { @@ -570,42 +417,4 @@ static inline void ledtrig_cpu(enum cpu_led_event evt) } #endif -#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED -void led_classdev_notify_brightness_hw_changed( - struct led_classdev *led_cdev, unsigned int brightness); -#else -static inline void led_classdev_notify_brightness_hw_changed( - struct led_classdev *led_cdev, enum led_brightness brightness) { } -#endif - -/** - * struct led_pattern - pattern interval settings - * @delta_t: pattern interval delay, in milliseconds - * @brightness: pattern interval brightness - */ -struct led_pattern { - u32 delta_t; - int brightness; -}; - -enum led_audio { - LED_AUDIO_MUTE, /* master mute LED */ - LED_AUDIO_MICMUTE, /* mic mute LED */ - NUM_AUDIO_LEDS -}; - -#if IS_ENABLED(CONFIG_LEDS_TRIGGER_AUDIO) -enum led_brightness ledtrig_audio_get(enum led_audio type); -void ledtrig_audio_set(enum led_audio type, enum led_brightness state); -#else -static inline enum led_brightness ledtrig_audio_get(enum led_audio type) -{ - return LED_OFF; -} -static inline void ledtrig_audio_set(enum led_audio type, - enum led_brightness state) -{ -} -#endif - #endif /* __LINUX_LEDS_H_INCLUDED */ diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h new file mode 100644 index 0000000000..a65e9646e4 --- /dev/null +++ b/include/linux/leds_pwm.h @@ -0,0 +1,21 @@ +/* + * PWM LED driver data - see drivers/leds/leds-pwm.c + */ +#ifndef __LINUX_LEDS_PWM_H +#define __LINUX_LEDS_PWM_H + +struct led_pwm { + const char *name; + const char *default_trigger; + unsigned pwm_id __deprecated; + u8 active_low; + unsigned max_brightness; + unsigned pwm_period_ns; +}; + +struct led_pwm_platform_data { + int num_leds; + struct led_pwm *leds; +}; + +#endif diff --git a/include/linux/lguest.h b/include/linux/lguest.h new file mode 100644 index 0000000000..6db19f35f7 --- /dev/null +++ b/include/linux/lguest.h @@ -0,0 +1,73 @@ +/* + * Things the lguest guest needs to know. Note: like all lguest interfaces, + * this is subject to wild and random change between versions. + */ +#ifndef _LINUX_LGUEST_H +#define _LINUX_LGUEST_H + +#ifndef __ASSEMBLY__ +#include +#include +#include + +#define LG_CLOCK_MIN_DELTA 100UL +#define LG_CLOCK_MAX_DELTA ULONG_MAX + +/*G:031 + * The second method of communicating with the Host is to via "struct + * lguest_data". Once the Guest's initialization hypercall tells the Host where + * this is, the Guest and Host both publish information in it. +:*/ +struct lguest_data { + /* + * 512 == enabled (same as eflags in normal hardware). The Guest + * changes interrupts so often that a hypercall is too slow. + */ + unsigned int irq_enabled; + /* Fine-grained interrupt disabling by the Guest */ + DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS); + + /* + * The Host writes the virtual address of the last page fault here, + * which saves the Guest a hypercall. CR2 is the native register where + * this address would normally be found. + */ + unsigned long cr2; + + /* Wallclock time set by the Host. */ + struct timespec time; + + /* + * Interrupt pending set by the Host. The Guest should do a hypercall + * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). + */ + int irq_pending; + + /* + * Async hypercall ring. Instead of directly making hypercalls, we can + * place them in here for processing the next time the Host wants. + * This batching can be quite efficient. + */ + + /* 0xFF == done (set by Host), 0 == pending (set by Guest). */ + u8 hcall_status[LHCALL_RING_SIZE]; + /* The actual registers for the hypercalls. */ + struct hcall_args hcalls[LHCALL_RING_SIZE]; + +/* Fields initialized by the Host at boot: */ + /* Memory not to try to access */ + unsigned long reserve_mem; + /* KHz for the TSC clock. */ + u32 tsc_khz; + +/* Fields initialized by the Guest at boot: */ + /* Instruction to suppress interrupts even if enabled */ + unsigned long noirq_iret; + /* Address above which page tables are all identical. */ + unsigned long kernel_address; + /* The vector to try to use for system calls (0x40 or 0x80). */ + unsigned int syscall_vec; +}; +extern struct lguest_data lguest_data; +#endif /* __ASSEMBLY__ */ +#endif /* _LINUX_LGUEST_H */ diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h new file mode 100644 index 0000000000..acd5b12565 --- /dev/null +++ b/include/linux/lguest_launcher.h @@ -0,0 +1,44 @@ +#ifndef _LINUX_LGUEST_LAUNCHER +#define _LINUX_LGUEST_LAUNCHER +/* Everything the "lguest" userspace program needs to know. */ +#include + +/*D:010 + * Drivers + * + * The Guest needs devices to do anything useful. Since we don't let it touch + * real devices (think of the damage it could do!) we provide virtual devices. + * We emulate a PCI bus with virtio devices on it; we used to have our own + * lguest bus which was far simpler, but this tests the virtio 1.0 standard. + * + * Virtio devices are also used by kvm, so we can simply reuse their optimized + * device drivers. And one day when everyone uses virtio, my plan will be + * complete. Bwahahahah! + */ + +/* Write command first word is a request. */ +enum lguest_req +{ + LHREQ_INITIALIZE, /* + base, pfnlimit, start */ + LHREQ_GETDMA, /* No longer used */ + LHREQ_IRQ, /* + irq */ + LHREQ_BREAK, /* No longer used */ + LHREQ_EVENTFD, /* No longer used. */ + LHREQ_GETREG, /* + offset within struct pt_regs (then read value). */ + LHREQ_SETREG, /* + offset within struct pt_regs, value. */ + LHREQ_TRAP, /* + trap number to deliver to guest. */ +}; + +/* + * This is what read() of the lguest fd populates. trap == + * LGUEST_TRAP_ENTRY for an LHCALL_NOTIFY (addr is the + * argument), 14 for a page fault in the MMIO region (addr is + * the trap address, insn is the instruction), or 13 for a GPF + * (insn is the instruction). + */ +struct lguest_pending { + __u8 trap; + __u8 insn[7]; + __u32 addr; +}; +#endif /* _LINUX_LGUEST_LAUNCHER */ diff --git a/include/linux/libata.h b/include/linux/libata.h index c0c64f03e1..8d208ebb25 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1,10 +1,26 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2003-2005 Red Hat, Inc. All rights reserved. * Copyright 2003-2005 Jeff Garzik * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * * libata documentation is available via 'make {ps|pdf}docs', - * as Documentation/driver-api/libata.rst + * as Documentation/DocBook/libata.* + * */ #ifndef __LINUX_LIBATA_H__ @@ -22,7 +38,6 @@ #include #include #include -#include /* * Define if arch has non-standard setup. This is a _PCI_ standard @@ -58,6 +73,8 @@ #define VPRINTK(fmt, args...) #endif /* ATA_DEBUG */ +#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) + #define ata_print_version_once(dev, version) \ ({ \ static bool __print_once; \ @@ -108,8 +125,9 @@ enum { LIBATA_MAX_PRD = ATA_MAX_PRD / 2, LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */ ATA_DEF_QUEUE = 1, + /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ ATA_MAX_QUEUE = 32, - ATA_TAG_INTERNAL = ATA_MAX_QUEUE, + ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, ATA_SHORT_PAUSE = 16, ATAPI_MAX_DRAIN = 16 << 10, @@ -118,6 +136,7 @@ enum { ATA_SHT_EMULATED = 1, ATA_SHT_THIS_ID = -1, + ATA_SHT_USE_CLUSTERING = 1, /* struct ata_taskfile flags */ ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */ @@ -137,7 +156,6 @@ enum { ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */ ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */ ATA_DFLAG_AN = (1 << 7), /* AN configured */ - ATA_DFLAG_TRUSTED = (1 << 8), /* device supports trusted send/recv */ ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */ ATA_DFLAG_CFG_MASK = (1 << 12) - 1, @@ -148,8 +166,6 @@ enum { ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */ - ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */ - ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */ ATA_DFLAG_INIT_MASK = (1 << 24) - 1, ATA_DFLAG_DETACH = (1 << 24), @@ -161,10 +177,6 @@ enum { ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */ ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */ - ATA_DFLAG_FEATURES_MASK = ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \ - ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \ - ATA_DFLAG_NCQ_PRIO, - ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ @@ -179,7 +191,6 @@ enum { ATA_DEV_NONE = 11, /* no device */ /* struct ata_link flags */ - /* NOTE: struct ata_force_param currently stores lflags in u16 */ ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ @@ -197,7 +208,6 @@ enum { ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ /* (doesn't imply presence) */ ATA_FLAG_SATA = (1 << 1), - ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */ ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ @@ -332,9 +342,7 @@ enum { ATA_SHIFT_PIO = 0, ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES, ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES, - ATA_SHIFT_PRIO = 6, - ATA_PRIO_HIGH = 2, /* size of buffer to pad xfers ending on unaligned boundaries */ ATA_DMA_PAD_SZ = 4, @@ -422,11 +430,9 @@ enum { ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */ ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ - ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */ + ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ - ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ - ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ @@ -490,7 +496,6 @@ enum hsm_task_states { }; enum ata_completion_errors { - AC_ERR_OK = 0, /* no error */ AC_ERR_DEV = (1 << 0), /* device reported error */ AC_ERR_HSM = (1 << 1), /* host state machine violation */ AC_ERR_TIMEOUT = (1 << 2), /* timeout */ @@ -512,9 +517,7 @@ enum ata_lpm_policy { ATA_LPM_UNKNOWN, ATA_LPM_MAX_POWER, ATA_LPM_MED_POWER, - ATA_LPM_MED_POWER_WITH_DIPM, /* Med power + DIPM as win IRST does */ - ATA_LPM_MIN_POWER_WITH_PARTIAL, /* Min Power + partial and slumber */ - ATA_LPM_MIN_POWER, /* Min power + no partial (slumber only) */ + ATA_LPM_MIN_POWER, }; enum ata_lpm_hints { @@ -537,15 +540,11 @@ typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes, unsigned long deadline); typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); -extern struct device_attribute dev_attr_unload_heads; -#ifdef CONFIG_SATA_HOST extern struct device_attribute dev_attr_link_power_management_policy; -extern struct device_attribute dev_attr_ncq_prio_supported; -extern struct device_attribute dev_attr_ncq_prio_enable; +extern struct device_attribute dev_attr_unload_heads; extern struct device_attribute dev_attr_em_message_type; extern struct device_attribute dev_attr_em_message; extern struct device_attribute dev_attr_sw_activity; -#endif enum sw_activity { OFF, @@ -611,13 +610,12 @@ struct ata_host { void *private_data; struct ata_port_operations *ops; unsigned long flags; - struct kref kref; struct mutex eh_mutex; struct task_struct *eh_owner; struct ata_port *simplex_claimed; /* channel owning the DMA */ - struct ata_port *ports[]; + struct ata_port *ports[0]; }; struct ata_queued_cmd { @@ -631,8 +629,7 @@ struct ata_queued_cmd { u8 cdb[ATAPI_CDB_LEN]; unsigned long flags; /* ATA_QCFLAG_xxx */ - unsigned int tag; /* libata core tag */ - unsigned int hw_tag; /* driver tag */ + unsigned int tag; unsigned int n_elem; unsigned int orig_n_elem; @@ -844,9 +841,9 @@ struct ata_port { unsigned int udma_mask; unsigned int cbl; /* cable type; ATA_CBL_xxx */ - struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1]; + struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; unsigned long sas_tag_allocated; /* for sas tag allocation only */ - u64 qc_active; + unsigned int qc_active; int nr_active_links; /* #links with active qcs */ unsigned int sas_last_tag; /* track next tag hw expects */ @@ -880,8 +877,6 @@ struct ata_port { struct timer_list fastdrain_timer; unsigned long fastdrain_cnt; - async_cookie_t cookie; - int em_message_type; void *private_data; @@ -903,9 +898,9 @@ struct ata_port_operations { /* * Command execution */ - int (*qc_defer)(struct ata_queued_cmd *qc); - int (*check_atapi_dma)(struct ata_queued_cmd *qc); - enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc); + int (*qc_defer)(struct ata_queued_cmd *qc); + int (*check_atapi_dma)(struct ata_queued_cmd *qc); + void (*qc_prep)(struct ata_queued_cmd *qc); unsigned int (*qc_issue)(struct ata_queued_cmd *qc); bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); @@ -968,7 +963,7 @@ struct ata_port_operations { void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf); void (*sff_exec_command)(struct ata_port *ap, const struct ata_taskfile *tf); - unsigned int (*sff_data_xfer)(struct ata_queued_cmd *qc, + unsigned int (*sff_data_xfer)(struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw); void (*sff_irq_on)(struct ata_port *); bool (*sff_irq_check)(struct ata_port *); @@ -1003,7 +998,7 @@ struct ata_port_operations { * fields must be pointers. */ const struct ata_port_operations *inherits; -}; +} __do_const; struct ata_port_info { unsigned long flags; @@ -1031,6 +1026,10 @@ struct ata_timing { /* * Core layer - drivers/ata/libata-core.c */ +extern const unsigned long sata_deb_timing_normal[]; +extern const unsigned long sata_deb_timing_hotplug[]; +extern const unsigned long sata_deb_timing_long[]; + extern struct ata_port_operations ata_dummy_port_ops; extern const struct ata_port_info ata_dummy_port_info; @@ -1068,14 +1067,33 @@ static inline int is_multi_taskfile(struct ata_taskfile *tf) (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT); } +static inline const unsigned long * +sata_ehc_deb_timing(struct ata_eh_context *ehc) +{ + if (ehc->i.flags & ATA_EHI_HOTPLUGGED) + return sata_deb_timing_hotplug; + else + return sata_deb_timing_normal; +} + static inline int ata_port_is_dummy(struct ata_port *ap) { return ap->ops == &ata_dummy_port_ops; } +extern int sata_set_spd(struct ata_link *link); extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)); +extern int sata_link_debounce(struct ata_link *link, + const unsigned long *params, unsigned long deadline); +extern int sata_link_resume(struct ata_link *link, const unsigned long *params, + unsigned long deadline); +extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, + bool spm_wakeup); +extern int sata_link_hardreset(struct ata_link *link, + const unsigned long *timing, unsigned long deadline, + bool *online, int (*check_ready)(struct ata_link *)); extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); @@ -1083,8 +1101,7 @@ extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports); -extern void ata_host_get(struct ata_host *host); -extern void ata_host_put(struct ata_host *host); +extern int ata_slave_link_init(struct ata_port *ap); extern int ata_host_start(struct ata_host *host); extern int ata_host_register(struct ata_host *host, struct scsi_host_template *sht); @@ -1094,21 +1111,24 @@ extern int ata_host_activate(struct ata_host *host, int irq, extern void ata_host_detach(struct ata_host *host); extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *); extern int ata_scsi_detect(struct scsi_host_template *sht); -extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd, - void __user *arg); -#ifdef CONFIG_COMPAT -#define ATA_SCSI_COMPAT_IOCTL .compat_ioctl = ata_scsi_ioctl, -#else -#define ATA_SCSI_COMPAT_IOCTL /* empty */ -#endif +extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); -#if IS_REACHABLE(CONFIG_ATA) -bool ata_scsi_dma_need_drain(struct request *rq); -#else -#define ata_scsi_dma_need_drain NULL -#endif extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, - unsigned int cmd, void __user *arg); + int cmd, void __user *arg); +extern void ata_sas_port_destroy(struct ata_port *); +extern struct ata_port *ata_sas_port_alloc(struct ata_host *, + struct ata_port_info *, struct Scsi_Host *); +extern void ata_sas_async_probe(struct ata_port *ap); +extern int ata_sas_sync_probe(struct ata_port *ap); +extern int ata_sas_port_init(struct ata_port *); +extern int ata_sas_port_start(struct ata_port *ap); +extern void ata_sas_port_stop(struct ata_port *ap); +extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); +extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); +extern int sata_scr_valid(struct ata_link *link); +extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); +extern int sata_scr_write(struct ata_link *link, int reg, u32 val); +extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); extern bool ata_link_online(struct ata_link *link); extern bool ata_link_offline(struct ata_link *link); #ifdef CONFIG_PM @@ -1129,6 +1149,9 @@ extern void ata_msleep(struct ata_port *ap, unsigned int msecs); extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, unsigned long interval, unsigned long timeout); extern int atapi_cmd_type(u8 opcode); +extern void ata_tf_to_fis(const struct ata_taskfile *tf, + u8 pmp, int is_cmd, u8 *fis); +extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); extern unsigned long ata_pack_xfermask(unsigned long pio_mask, unsigned long mwdma_mask, unsigned long udma_mask); extern void ata_unpack_xfermask(unsigned long xfer_mask, @@ -1140,7 +1163,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode); extern const char *ata_mode_string(unsigned long xfer_mask); extern unsigned long ata_id_xfermask(const u16 *id); extern int ata_std_qc_defer(struct ata_queued_cmd *qc); -extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); +extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem); extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); @@ -1152,7 +1175,7 @@ extern void ata_id_c_string(const u16 *id, unsigned char *s, extern unsigned int ata_do_dev_read_id(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); extern void ata_qc_complete(struct ata_queued_cmd *qc); -extern u64 ata_qc_get_active(struct ata_port *ap); +extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd); extern int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, @@ -1168,96 +1191,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev); extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); - -/* - * SATA specific code - drivers/ata/libata-sata.c - */ -#ifdef CONFIG_SATA_HOST -extern const unsigned long sata_deb_timing_normal[]; -extern const unsigned long sata_deb_timing_hotplug[]; -extern const unsigned long sata_deb_timing_long[]; - -static inline const unsigned long * -sata_ehc_deb_timing(struct ata_eh_context *ehc) -{ - if (ehc->i.flags & ATA_EHI_HOTPLUGGED) - return sata_deb_timing_hotplug; - else - return sata_deb_timing_normal; -} - -extern int sata_scr_valid(struct ata_link *link); -extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); -extern int sata_scr_write(struct ata_link *link, int reg, u32 val); -extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); -extern int sata_set_spd(struct ata_link *link); -extern int sata_link_hardreset(struct ata_link *link, - const unsigned long *timing, unsigned long deadline, - bool *online, int (*check_ready)(struct ata_link *)); -extern int sata_link_resume(struct ata_link *link, const unsigned long *params, - unsigned long deadline); -extern void ata_eh_analyze_ncq_error(struct ata_link *link); -#else -static inline const unsigned long * -sata_ehc_deb_timing(struct ata_eh_context *ehc) -{ - return NULL; -} -static inline int sata_scr_valid(struct ata_link *link) { return 0; } -static inline int sata_scr_read(struct ata_link *link, int reg, u32 *val) -{ - return -EOPNOTSUPP; -} -static inline int sata_scr_write(struct ata_link *link, int reg, u32 val) -{ - return -EOPNOTSUPP; -} -static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) -{ - return -EOPNOTSUPP; -} -static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; } -static inline int sata_link_hardreset(struct ata_link *link, - const unsigned long *timing, - unsigned long deadline, - bool *online, - int (*check_ready)(struct ata_link *)) -{ - if (online) - *online = false; - return -EOPNOTSUPP; -} -static inline int sata_link_resume(struct ata_link *link, - const unsigned long *params, - unsigned long deadline) -{ - return -EOPNOTSUPP; -} -static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { } -#endif -extern int sata_link_debounce(struct ata_link *link, - const unsigned long *params, unsigned long deadline); -extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, - bool spm_wakeup); -extern int ata_slave_link_init(struct ata_port *ap); -extern void ata_sas_port_destroy(struct ata_port *); -extern struct ata_port *ata_sas_port_alloc(struct ata_host *, - struct ata_port_info *, struct Scsi_Host *); -extern void ata_sas_async_probe(struct ata_port *ap); -extern int ata_sas_sync_probe(struct ata_port *ap); -extern int ata_sas_port_init(struct ata_port *); -extern int ata_sas_port_start(struct ata_port *ap); -extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap); -extern void ata_sas_tport_delete(struct ata_port *ap); -extern void ata_sas_port_stop(struct ata_port *ap); -extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); -extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); -extern void ata_tf_to_fis(const struct ata_taskfile *tf, - u8 pmp, int is_cmd, u8 *fis); -extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); -extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active); extern bool sata_lpm_ignore_phy_events(struct ata_link *link); -extern int sata_async_notification(struct ata_port *ap); extern int ata_cable_40wire(struct ata_port *ap); extern int ata_cable_80wire(struct ata_port *ap); @@ -1267,6 +1201,12 @@ extern int ata_cable_unknown(struct ata_port *ap); /* Timing helpers */ extern unsigned int ata_pio_need_iordy(const struct ata_device *); +extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); +extern int ata_timing_compute(struct ata_device *, unsigned short, + struct ata_timing *, int, int); +extern void ata_timing_merge(const struct ata_timing *, + const struct ata_timing *, struct ata_timing *, + unsigned int); extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); /* PCI */ @@ -1281,7 +1221,6 @@ struct pci_bits { }; extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); -extern void ata_pci_shutdown_one(struct pci_dev *pdev); extern void ata_pci_remove_one(struct pci_dev *pdev); #ifdef CONFIG_PM @@ -1350,12 +1289,14 @@ extern void ata_port_wait_eh(struct ata_port *ap); extern int ata_link_abort(struct ata_link *link); extern int ata_port_abort(struct ata_port *ap); extern int ata_port_freeze(struct ata_port *ap); +extern int sata_async_notification(struct ata_port *ap); extern void ata_eh_freeze_port(struct ata_port *ap); extern void ata_eh_thaw_port(struct ata_port *ap); extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); +extern void ata_eh_analyze_ncq_error(struct ata_link *link); extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, @@ -1396,38 +1337,26 @@ extern struct device_attribute *ata_common_sdev_attrs[]; * edge driver's module reference, otherwise the driver can be unloaded * even if the scsi_device is being accessed. */ -#define __ATA_BASE_SHT(drv_name) \ +#define ATA_BASE_SHT(drv_name) \ .module = THIS_MODULE, \ .name = drv_name, \ .ioctl = ata_scsi_ioctl, \ - ATA_SCSI_COMPAT_IOCTL \ .queuecommand = ata_scsi_queuecmd, \ - .dma_need_drain = ata_scsi_dma_need_drain, \ - .this_id = ATA_SHT_THIS_ID, \ - .emulated = ATA_SHT_EMULATED, \ - .proc_name = drv_name, \ - .slave_destroy = ata_scsi_slave_destroy, \ - .bios_param = ata_std_bios_param, \ - .unlock_native_capacity = ata_scsi_unlock_native_capacity - -#define ATA_SUBBASE_SHT(drv_name) \ - __ATA_BASE_SHT(drv_name), \ .can_queue = ATA_DEF_QUEUE, \ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ - .slave_configure = ata_scsi_slave_config - -#define ATA_BASE_SHT(drv_name) \ - ATA_SUBBASE_SHT(drv_name), \ + .this_id = ATA_SHT_THIS_ID, \ + .emulated = ATA_SHT_EMULATED, \ + .use_clustering = ATA_SHT_USE_CLUSTERING, \ + .proc_name = drv_name, \ + .slave_configure = ata_scsi_slave_config, \ + .slave_destroy = ata_scsi_slave_destroy, \ + .bios_param = ata_std_bios_param, \ + .unlock_native_capacity = ata_scsi_unlock_native_capacity, \ .sdev_attrs = ata_common_sdev_attrs -#ifdef CONFIG_SATA_HOST -extern struct device_attribute *ata_ncq_sdev_attrs[]; - #define ATA_NCQ_SHT(drv_name) \ - ATA_SUBBASE_SHT(drv_name), \ - .sdev_attrs = ata_ncq_sdev_attrs, \ + ATA_BASE_SHT(drv_name), \ .change_queue_depth = ata_scsi_change_queue_depth -#endif /* * PMP helpers @@ -1460,7 +1389,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) static inline bool ata_is_host_link(const struct ata_link *link) { - return true; + return 1; } #endif /* CONFIG_SATA_PMP */ @@ -1546,39 +1475,16 @@ extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, const char *name); #endif -static inline bool ata_tag_internal(unsigned int tag) +static inline unsigned int ata_tag_valid(unsigned int tag) +{ + return (tag < ATA_MAX_QUEUE) ? 1 : 0; +} + +static inline unsigned int ata_tag_internal(unsigned int tag) { return tag == ATA_TAG_INTERNAL; } -static inline bool ata_tag_valid(unsigned int tag) -{ - return tag < ATA_MAX_QUEUE || ata_tag_internal(tag); -} - -#define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \ - for ((tag) = 0; (tag) < (max_tag) && \ - ({ qc = fn((ap), (tag)); 1; }); (tag)++) \ - -/* - * Internal use only, iterate commands ignoring error handling and - * status of 'qc'. - */ -#define ata_qc_for_each_raw(ap, qc, tag) \ - __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag) - -/* - * Iterate all potential commands that can be queued - */ -#define ata_qc_for_each(ap, qc, tag) \ - __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag) - -/* - * Like ata_qc_for_each, but with the internal tag included - */ -#define ata_qc_for_each_with_internal(ap, qc, tag) \ - __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag) - /* * device helpers */ @@ -1700,8 +1606,6 @@ extern struct ata_device *ata_dev_next(struct ata_device *dev, */ static inline int ata_ncq_enabled(struct ata_device *dev) { - if (!IS_ENABLED(CONFIG_SATA_HOST)) - return 0; return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; } @@ -1741,7 +1645,7 @@ static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, unsigned int tag) { - if (ata_tag_valid(tag)) + if (likely(ata_tag_valid(tag))) return &ap->qcmd[tag]; return NULL; } @@ -1870,16 +1774,6 @@ static inline int ata_dma_enabled(struct ata_device *adev) return (adev->dma_mode == 0xFF ? 0 : 1); } -/************************************************************************** - * PATA timings - drivers/ata/libata-pata-timings.c - */ -extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); -extern int ata_timing_compute(struct ata_device *, unsigned short, - struct ata_timing *, int, int); -extern void ata_timing_merge(const struct ata_timing *, - const struct ata_timing *, struct ata_timing *, - unsigned int); - /************************************************************************** * PMP - drivers/ata/libata-pmp.c */ @@ -1924,9 +1818,11 @@ extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf); extern void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); -extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, +extern unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw); -extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, +extern unsigned int ata_sff_data_xfer32(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw); +extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw); extern void ata_sff_irq_on(struct ata_port *ap); extern void ata_sff_irq_clear(struct ata_port *ap); @@ -1979,9 +1875,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops; .sg_tablesize = LIBATA_MAX_PRD, \ .dma_boundary = ATA_DMA_BOUNDARY -extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc); +extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); -extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); +extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc); extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); diff --git a/include/linux/libfdt.h b/include/linux/libfdt.h index 90ed4ebfa6..4c0306c69b 100644 --- a/include/linux/libfdt.h +++ b/include/linux/libfdt.h @@ -1,8 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _INCLUDE_LIBFDT_H_ #define _INCLUDE_LIBFDT_H_ #include +#include "../../scripts/dtc/libfdt/fdt.h" #include "../../scripts/dtc/libfdt/libfdt.h" #endif /* _INCLUDE_LIBFDT_H_ */ diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h index cea8574a29..2a663c6bb4 100644 --- a/include/linux/libfdt_env.h +++ b/include/linux/libfdt_env.h @@ -1,15 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef LIBFDT_ENV_H -#define LIBFDT_ENV_H +#ifndef _LIBFDT_ENV_H +#define _LIBFDT_ENV_H -#include /* For INT_MAX */ #include #include -#define INT32_MAX S32_MAX -#define UINT32_MAX U32_MAX - typedef __be16 fdt16_t; typedef __be32 fdt32_t; typedef __be64 fdt64_t; @@ -19,4 +14,4 @@ typedef __be64 fdt64_t; #define fdt64_to_cpu(x) be64_to_cpu(x) #define cpu_to_fdt64(x) cpu_to_be64(x) -#endif /* LIBFDT_ENV_H */ +#endif /* _LIBFDT_ENV_H */ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 7074aa9af5..77e7af3254 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -1,44 +1,28 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * libnvdimm - Non-volatile-memory Devices Subsystem * * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. */ #ifndef __LIBNVDIMM_H__ #define __LIBNVDIMM_H__ #include #include #include -#include -#include -#include - -struct badrange_entry { - u64 start; - u64 length; - struct list_head list; -}; - -struct badrange { - struct list_head list; - spinlock_t lock; -}; enum { /* when a dimm supports both PMEM and BLK access a label is required */ - NDD_ALIASING = 0, + NDD_ALIASING = 1 << 0, /* unarmed memory devices may not persist writes */ - NDD_UNARMED = 1, - /* locked memory devices should not be accessed */ - NDD_LOCKED = 2, - /* memory under security wipes should not be accessed */ - NDD_SECURITY_OVERWRITE = 3, - /* tracking whether or not there is a pending device reference */ - NDD_WORK_PENDING = 4, - /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */ - NDD_NOBLK = 5, - /* dimm supports namespace labels */ - NDD_LABELING = 6, + NDD_UNARMED = 1 << 1, /* need to set a limit somewhere, but yes, this is likely overkill */ ND_IOCTL_MAX_BUFLEN = SZ_4M, @@ -48,45 +32,33 @@ enum { /* region flag indicating to direct-map persistent memory by default */ ND_REGION_PAGEMAP = 0, - /* - * Platform ensures entire CPU store data path is flushed to pmem on - * system power loss. - */ - ND_REGION_PERSIST_CACHE = 1, - /* - * Platform provides mechanisms to automatically flush outstanding - * write data from memory controler to pmem on system power loss. - * (ADR) - */ - ND_REGION_PERSIST_MEMCTRL = 2, - - /* Platform provides asynchronous flush mechanism */ - ND_REGION_ASYNC = 3, /* mark newly adjusted resources as requiring a label update */ DPA_RESOURCE_ADJUSTED = 1 << 0, }; +extern struct attribute_group nvdimm_bus_attribute_group; +extern struct attribute_group nvdimm_attribute_group; +extern struct attribute_group nd_device_attribute_group; +extern struct attribute_group nd_numa_attribute_group; +extern struct attribute_group nd_region_attribute_group; +extern struct attribute_group nd_mapping_attribute_group; + struct nvdimm; struct nvdimm_bus_descriptor; typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc); -struct device_node; struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; unsigned long cmd_mask; - unsigned long dimm_family_mask; - unsigned long bus_family_mask; struct module *module; char *provider_name; - struct device_node *of_node; ndctl_fn ndctl; int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, - struct nvdimm *nvdimm, unsigned int cmd, void *data); - const struct nvdimm_bus_fw_ops *fw_ops; + struct nvdimm *nvdimm, unsigned int cmd); }; struct nd_cmd_desc { @@ -97,24 +69,17 @@ struct nd_cmd_desc { }; struct nd_interleave_set { - /* v1.1 definition of the interleave-set-cookie algorithm */ - u64 cookie1; - /* v1.2 definition of the interleave-set-cookie algorithm */ - u64 cookie2; + u64 cookie; /* compatibility with initial buggy Linux implementation */ u64 altcookie; - - guid_t type_guid; }; struct nd_mapping_desc { struct nvdimm *nvdimm; u64 start; u64 size; - int position; }; -struct nd_region; struct nd_region_desc { struct resource *res; struct nd_mapping_desc *mapping; @@ -124,10 +89,7 @@ struct nd_region_desc { void *provider_data; int num_lanes; int numa_node; - int target_node; unsigned long flags; - struct device_node *of_node; - int (*flush)(struct nd_region *nd_region, struct bio *bio); }; struct device; @@ -141,6 +103,7 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev, struct nvdimm_bus; struct module; +struct device; struct nd_blk_region; struct nd_blk_region_desc { int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); @@ -156,107 +119,15 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( } -/* - * Note that separate bits for locked + unlocked are defined so that - * 'flags == 0' corresponds to an error / not-supported state. - */ -enum nvdimm_security_bits { - NVDIMM_SECURITY_DISABLED, - NVDIMM_SECURITY_UNLOCKED, - NVDIMM_SECURITY_LOCKED, - NVDIMM_SECURITY_FROZEN, - NVDIMM_SECURITY_OVERWRITE, -}; - -#define NVDIMM_PASSPHRASE_LEN 32 -#define NVDIMM_KEY_DESC_LEN 22 - -struct nvdimm_key_data { - u8 data[NVDIMM_PASSPHRASE_LEN]; -}; - -enum nvdimm_passphrase_type { - NVDIMM_USER, - NVDIMM_MASTER, -}; - -struct nvdimm_security_ops { - unsigned long (*get_flags)(struct nvdimm *nvdimm, - enum nvdimm_passphrase_type pass_type); - int (*freeze)(struct nvdimm *nvdimm); - int (*change_key)(struct nvdimm *nvdimm, - const struct nvdimm_key_data *old_data, - const struct nvdimm_key_data *new_data, - enum nvdimm_passphrase_type pass_type); - int (*unlock)(struct nvdimm *nvdimm, - const struct nvdimm_key_data *key_data); - int (*disable)(struct nvdimm *nvdimm, - const struct nvdimm_key_data *key_data); - int (*erase)(struct nvdimm *nvdimm, - const struct nvdimm_key_data *key_data, - enum nvdimm_passphrase_type pass_type); - int (*overwrite)(struct nvdimm *nvdimm, - const struct nvdimm_key_data *key_data); - int (*query_overwrite)(struct nvdimm *nvdimm); -}; - -enum nvdimm_fwa_state { - NVDIMM_FWA_INVALID, - NVDIMM_FWA_IDLE, - NVDIMM_FWA_ARMED, - NVDIMM_FWA_BUSY, - NVDIMM_FWA_ARM_OVERFLOW, -}; - -enum nvdimm_fwa_trigger { - NVDIMM_FWA_ARM, - NVDIMM_FWA_DISARM, -}; - -enum nvdimm_fwa_capability { - NVDIMM_FWA_CAP_INVALID, - NVDIMM_FWA_CAP_NONE, - NVDIMM_FWA_CAP_QUIESCE, - NVDIMM_FWA_CAP_LIVE, -}; - -enum nvdimm_fwa_result { - NVDIMM_FWA_RESULT_INVALID, - NVDIMM_FWA_RESULT_NONE, - NVDIMM_FWA_RESULT_SUCCESS, - NVDIMM_FWA_RESULT_NOTSTAGED, - NVDIMM_FWA_RESULT_NEEDRESET, - NVDIMM_FWA_RESULT_FAIL, -}; - -struct nvdimm_bus_fw_ops { - enum nvdimm_fwa_state (*activate_state) - (struct nvdimm_bus_descriptor *nd_desc); - enum nvdimm_fwa_capability (*capability) - (struct nvdimm_bus_descriptor *nd_desc); - int (*activate)(struct nvdimm_bus_descriptor *nd_desc); -}; - -struct nvdimm_fw_ops { - enum nvdimm_fwa_state (*activate_state)(struct nvdimm *nvdimm); - enum nvdimm_fwa_result (*activate_result)(struct nvdimm *nvdimm); - int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg); -}; - -void badrange_init(struct badrange *badrange); -int badrange_add(struct badrange *badrange, u64 addr, u64 length); -void badrange_forget(struct badrange *badrange, phys_addr_t start, - unsigned int len); -int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, - u64 length); +int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length); +void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus, + phys_addr_t start, unsigned int len); struct nvdimm_bus *nvdimm_bus_register(struct device *parent, struct nvdimm_bus_descriptor *nfit_desc); void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); struct nvdimm_bus *to_nvdimm_bus(struct device *dev); -struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm); struct nvdimm *to_nvdimm(struct device *dev); struct nd_region *to_nd_region(struct device *dev); -struct device *nd_region_dev(struct nd_region *nd_region); struct nd_blk_region *to_nd_blk_region(struct device *dev); struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); @@ -264,22 +135,10 @@ const char *nvdimm_name(struct nvdimm *nvdimm); struct kobject *nvdimm_kobj(struct nvdimm *nvdimm); unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm); void *nvdimm_provider_data(struct nvdimm *nvdimm); -struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, - void *provider_data, const struct attribute_group **groups, - unsigned long flags, unsigned long cmd_mask, int num_flush, - struct resource *flush_wpq, const char *dimm_id, - const struct nvdimm_security_ops *sec_ops, - const struct nvdimm_fw_ops *fw_ops); -static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, - void *provider_data, const struct attribute_group **groups, - unsigned long flags, unsigned long cmd_mask, int num_flush, - struct resource *flush_wpq) -{ - return __nvdimm_create(nvdimm_bus, provider_data, groups, flags, - cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL); -} -void nvdimm_delete(struct nvdimm *nvdimm); - +struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, + const struct attribute_group **groups, unsigned long flags, + unsigned long cmd_mask, int num_flush, + struct resource *flush_wpq); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, @@ -298,38 +157,9 @@ void *nd_region_provider_data(struct nd_region *nd_region); void *nd_blk_region_provider_data(struct nd_blk_region *ndbr); void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data); struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); -unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr); unsigned int nd_region_acquire_lane(struct nd_region *nd_region); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); u64 nd_fletcher64(void *addr, size_t len, bool le); -int nvdimm_flush(struct nd_region *nd_region, struct bio *bio); -int generic_nvdimm_flush(struct nd_region *nd_region); +void nvdimm_flush(struct nd_region *nd_region); int nvdimm_has_flush(struct nd_region *nd_region); -int nvdimm_has_cache(struct nd_region *nd_region); -int nvdimm_in_overwrite(struct nvdimm *nvdimm); -bool is_nvdimm_sync(struct nd_region *nd_region); - -static inline int nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, void *buf, - unsigned int buf_len, int *cmd_rc) -{ - struct nvdimm_bus *nvdimm_bus = nvdimm_to_bus(nvdimm); - struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); - - return nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, cmd_rc); -} - -#ifdef CONFIG_ARCH_HAS_PMEM_API -#define ARCH_MEMREMAP_PMEM MEMREMAP_WB -void arch_wb_cache_pmem(void *addr, size_t size); -void arch_invalidate_pmem(void *addr, size_t size); -#else -#define ARCH_MEMREMAP_PMEM MEMREMAP_WT -static inline void arch_wb_cache_pmem(void *addr, size_t size) -{ -} -static inline void arch_invalidate_pmem(void *addr, size_t size) -{ -} -#endif - #endif /* __LIBNVDIMM_H__ */ diff --git a/include/linux/libps2.h b/include/linux/libps2.h index 53f7e4d0f4..4ad06e824f 100644 --- a/include/linux/libps2.h +++ b/include/linux/libps2.h @@ -1,19 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _LIBPS2_H #define _LIBPS2_H /* * Copyright (C) 1999-2002 Vojtech Pavlik * Copyright (C) 2004 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ -#include -#include -#include -#include -#define PS2_CMD_SETSCALE11 0x00e6 -#define PS2_CMD_SETRES 0x10e8 #define PS2_CMD_GETID 0x02f2 #define PS2_CMD_RESET_BAT 0x02ff @@ -23,12 +20,11 @@ #define PS2_RET_NAK 0xfe #define PS2_RET_ERR 0xfc -#define PS2_FLAG_ACK BIT(0) /* Waiting for ACK/NAK */ -#define PS2_FLAG_CMD BIT(1) /* Waiting for a command to finish */ -#define PS2_FLAG_CMD1 BIT(2) /* Waiting for the first byte of command response */ -#define PS2_FLAG_WAITID BIT(3) /* Command executing is GET ID */ -#define PS2_FLAG_NAK BIT(4) /* Last transmission was NAKed */ -#define PS2_FLAG_ACK_CMD BIT(5) /* Waiting to ACK the command (first) byte */ +#define PS2_FLAG_ACK 1 /* Waiting for ACK/NAK */ +#define PS2_FLAG_CMD 2 /* Waiting for command to finish */ +#define PS2_FLAG_CMD1 4 /* Waiting for the first byte of command response */ +#define PS2_FLAG_WAITID 8 /* Command execiting is GET ID */ +#define PS2_FLAG_NAK 16 /* Last transmission was NAKed */ struct ps2dev { struct serio *serio; @@ -40,22 +36,21 @@ struct ps2dev { wait_queue_head_t wait; unsigned long flags; - u8 cmdbuf[8]; - u8 cmdcnt; - u8 nak; + unsigned char cmdbuf[8]; + unsigned char cmdcnt; + unsigned char nak; }; void ps2_init(struct ps2dev *ps2dev, struct serio *serio); -int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout); -void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout); +int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout); +void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout); void ps2_begin_command(struct ps2dev *ps2dev); void ps2_end_command(struct ps2dev *ps2dev); -int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command); -int ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command); -int ps2_sliced_command(struct ps2dev *ps2dev, u8 command); -bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data); -bool ps2_handle_response(struct ps2dev *ps2dev, u8 data); +int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command); +int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command); +int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data); +int ps2_handle_response(struct ps2dev *ps2dev, unsigned char data); void ps2_cmd_aborted(struct ps2dev *ps2dev); -bool ps2_is_keyboard_id(u8 id); +int ps2_is_keyboard_id(char id); #endif /* _LIBPS2_H */ diff --git a/include/linux/license.h b/include/linux/license.h index 7cce390f12..decdbf43cb 100644 --- a/include/linux/license.h +++ b/include/linux/license.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __LICENSE_H #define __LICENSE_H diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 1db223710b..d190786e4a 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef NVM_H #define NVM_H @@ -16,58 +15,26 @@ enum { NVM_IOTYPE_GC = 1, }; -/* common format */ -#define NVM_GEN_CH_BITS (8) -#define NVM_GEN_LUN_BITS (8) -#define NVM_GEN_BLK_BITS (16) -#define NVM_GEN_RESERVED (32) - -/* 1.2 format */ -#define NVM_12_PG_BITS (16) -#define NVM_12_PL_BITS (4) -#define NVM_12_SEC_BITS (4) -#define NVM_12_RESERVED (8) - -/* 2.0 format */ -#define NVM_20_SEC_BITS (24) -#define NVM_20_RESERVED (8) - -enum { - NVM_OCSSD_SPEC_12 = 12, - NVM_OCSSD_SPEC_20 = 20, -}; +#define NVM_BLK_BITS (16) +#define NVM_PG_BITS (16) +#define NVM_SEC_BITS (8) +#define NVM_PL_BITS (8) +#define NVM_LUN_BITS (8) +#define NVM_CH_BITS (7) struct ppa_addr { /* Generic structure for all addresses */ union { - /* generic device format */ struct { - u64 ch : NVM_GEN_CH_BITS; - u64 lun : NVM_GEN_LUN_BITS; - u64 blk : NVM_GEN_BLK_BITS; - u64 reserved : NVM_GEN_RESERVED; - } a; - - /* 1.2 device format */ - struct { - u64 ch : NVM_GEN_CH_BITS; - u64 lun : NVM_GEN_LUN_BITS; - u64 blk : NVM_GEN_BLK_BITS; - u64 pg : NVM_12_PG_BITS; - u64 pl : NVM_12_PL_BITS; - u64 sec : NVM_12_SEC_BITS; - u64 reserved : NVM_12_RESERVED; + u64 blk : NVM_BLK_BITS; + u64 pg : NVM_PG_BITS; + u64 sec : NVM_SEC_BITS; + u64 pl : NVM_PL_BITS; + u64 lun : NVM_LUN_BITS; + u64 ch : NVM_CH_BITS; + u64 reserved : 1; } g; - /* 2.0 device format */ - struct { - u64 grp : NVM_GEN_CH_BITS; - u64 pu : NVM_GEN_LUN_BITS; - u64 chk : NVM_GEN_BLK_BITS; - u64 sec : NVM_20_SEC_BITS; - u64 reserved : NVM_20_RESERVED; - } m; - struct { u64 line : 63; u64 is_cached : 1; @@ -80,16 +47,16 @@ struct ppa_addr { struct nvm_rq; struct nvm_id; struct nvm_dev; -struct nvm_tgt_dev; -struct nvm_chk_meta; -typedef int (nvm_id_fn)(struct nvm_dev *); +typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); +typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); +typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, + nvm_l2p_update_fn *, void *); typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); -typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, - struct nvm_chk_meta *); -typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *); -typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int); +typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); +typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *); +typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); typedef void (nvm_destroy_dma_pool_fn)(void *); typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, dma_addr_t *); @@ -97,19 +64,23 @@ typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); struct nvm_dev_ops { nvm_id_fn *identity; + nvm_get_l2p_tbl_fn *get_l2p_tbl; nvm_op_bb_tbl_fn *get_bb_tbl; nvm_op_set_bb_fn *set_bb_tbl; - nvm_get_chk_meta_fn *get_chk_meta; - nvm_submit_io_fn *submit_io; + nvm_erase_blk_fn *erase_block; nvm_create_dma_pool_fn *create_dma_pool; nvm_destroy_dma_pool_fn *destroy_dma_pool; nvm_dev_dma_alloc_fn *dev_dma_alloc; nvm_dev_dma_free_fn *dev_dma_free; + + unsigned int max_phys_sect; }; + + #ifdef CONFIG_NVM #include @@ -136,11 +107,10 @@ enum { NVM_RSP_NOT_CHANGEABLE = 0x1, NVM_RSP_ERR_FAILWRITE = 0x40ff, NVM_RSP_ERR_EMPTYPAGE = 0x42ff, - NVM_RSP_ERR_FAILECC = 0x4281, - NVM_RSP_ERR_FAILCRC = 0x4004, - NVM_RSP_WARN_HIGHECC = 0x4700, /* Device opcodes */ + NVM_OP_HBREAD = 0x02, + NVM_OP_HBWRITE = 0x81, NVM_OP_PWRITE = 0x91, NVM_OP_PREAD = 0x92, NVM_OP_ERASE = 0x90, @@ -153,7 +123,7 @@ enum { /* NAND Access Modes */ NVM_IO_SUSPEND = 0x80, NVM_IO_SLC_MODE = 0x100, - NVM_IO_SCRAMBLE_ENABLE = 0x200, + NVM_IO_SCRAMBLE_DISABLE = 0x200, /* Block Types */ NVM_BLK_T_FREE = 0x0, @@ -187,100 +157,78 @@ struct nvm_id_lp_tbl { struct nvm_id_lp_mlc mlc; }; -struct nvm_addrf_12 { - u8 ch_len; - u8 lun_len; - u8 blk_len; - u8 pg_len; - u8 pln_len; - u8 sec_len; +struct nvm_id_group { + u8 mtype; + u8 fmtype; + u8 num_ch; + u8 num_lun; + u8 num_pln; + u16 num_blk; + u16 num_pg; + u16 fpg_sz; + u16 csecs; + u16 sos; + u32 trdt; + u32 trdm; + u32 tprt; + u32 tprm; + u32 tbet; + u32 tbem; + u32 mpos; + u32 mccap; + u16 cpar; + struct nvm_id_lp_tbl lptbl; +}; + +struct nvm_addr_format { u8 ch_offset; + u8 ch_len; u8 lun_offset; - u8 blk_offset; - u8 pg_offset; + u8 lun_len; u8 pln_offset; - u8 sec_offset; - - u64 ch_mask; - u64 lun_mask; - u64 blk_mask; - u64 pg_mask; - u64 pln_mask; - u64 sec_mask; + u8 pln_len; + u8 blk_offset; + u8 blk_len; + u8 pg_offset; + u8 pg_len; + u8 sect_offset; + u8 sect_len; }; -struct nvm_addrf { - u8 ch_len; - u8 lun_len; - u8 chk_len; - u8 sec_len; - u8 rsv_len[2]; - - u8 ch_offset; - u8 lun_offset; - u8 chk_offset; - u8 sec_offset; - u8 rsv_off[2]; - - u64 ch_mask; - u64 lun_mask; - u64 chk_mask; - u64 sec_mask; - u64 rsv_mask[2]; -}; - -enum { - /* Chunk states */ - NVM_CHK_ST_FREE = 1 << 0, - NVM_CHK_ST_CLOSED = 1 << 1, - NVM_CHK_ST_OPEN = 1 << 2, - NVM_CHK_ST_OFFLINE = 1 << 3, - - /* Chunk types */ - NVM_CHK_TP_W_SEQ = 1 << 0, - NVM_CHK_TP_W_RAN = 1 << 1, - NVM_CHK_TP_SZ_SPEC = 1 << 4, -}; - -/* - * Note: The structure size is linked to nvme_nvm_chk_meta such that the same - * buffer can be used when converting from little endian to cpu addressing. - */ -struct nvm_chk_meta { - u8 state; - u8 type; - u8 wi; - u8 rsvd[5]; - u64 slba; - u64 cnlb; - u64 wp; -}; +struct nvm_id { + u8 ver_id; + u8 vmnt; + u8 cgrps; + u32 cap; + u32 dom; + struct nvm_addr_format ppaf; + struct nvm_id_group groups[4]; +} __packed; struct nvm_target { struct list_head list; - struct nvm_tgt_dev *dev; + struct nvm_dev *dev; struct nvm_tgt_type *type; struct gendisk *disk; }; -#define ADDR_EMPTY (~0ULL) +struct nvm_tgt_instance { + struct nvm_tgt_type *tt; +}; -#define NVM_TARGET_DEFAULT_OP (101) -#define NVM_TARGET_MIN_OP (3) -#define NVM_TARGET_MAX_OP (80) +#define ADDR_EMPTY (~0ULL) #define NVM_VERSION_MAJOR 1 #define NVM_VERSION_MINOR 0 #define NVM_VERSION_PATCH 0 -#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */ - struct nvm_rq; typedef void (nvm_end_io_fn)(struct nvm_rq *); struct nvm_rq { - struct nvm_tgt_dev *dev; + struct nvm_tgt_instance *ins; + struct nvm_dev *dev; struct bio *bio; @@ -294,6 +242,7 @@ struct nvm_rq { void *meta_list; dma_addr_t dma_meta_list; + struct completion *wait; nvm_end_io_fn *end_io; uint8_t opcode; @@ -302,10 +251,6 @@ struct nvm_rq { u64 ppa_status; /* ppa media status */ int error; - - int is_seq; /* Sequential hint flag. 1.2 only */ - - void *private; }; static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) @@ -318,10 +263,19 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) return rqdata + 1; } -static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd) -{ - return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; -} +struct nvm_block; + +struct nvm_lun { + int id; + + int lun_id; + int chnl_id; + + spinlock_t lock; + + unsigned int nr_free_blocks; /* Number of unused blocks */ + struct nvm_block *blocks; +}; enum { NVM_BLK_ST_FREE = 0x1, /* Free block */ @@ -329,85 +283,22 @@ enum { NVM_BLK_ST_BAD = 0x8, /* Bad block */ }; -/* Instance geometry */ -struct nvm_geo { - /* device reported version */ - u8 major_ver_id; - u8 minor_ver_id; +struct nvm_block { + struct list_head list; + struct nvm_lun *lun; + unsigned long id; - /* kernel short version */ - u8 version; - - /* instance specific geometry */ - int num_ch; - int num_lun; /* per channel */ - - /* calculated values */ - int all_luns; /* across channels */ - int all_chunks; /* across channels */ - - int op; /* over-provision in instance */ - - sector_t total_secs; /* across channels */ - - /* chunk geometry */ - u32 num_chk; /* chunks per lun */ - u32 clba; /* sectors per chunk */ - u16 csecs; /* sector size */ - u16 sos; /* out-of-band area size */ - bool ext; /* metadata in extended data buffer */ - u32 mdts; /* Max data transfer size*/ - - /* device write constrains */ - u32 ws_min; /* minimum write size */ - u32 ws_opt; /* optimal write size */ - u32 mw_cunits; /* distance required for successful read */ - u32 maxoc; /* maximum open chunks */ - u32 maxocpu; /* maximum open chunks per parallel unit */ - - /* device capabilities */ - u32 mccap; - - /* device timings */ - u32 trdt; /* Avg. Tread (ns) */ - u32 trdm; /* Max Tread (ns) */ - u32 tprt; /* Avg. Tprog (ns) */ - u32 tprm; /* Max Tprog (ns) */ - u32 tbet; /* Avg. Terase (ns) */ - u32 tbem; /* Max Terase (ns) */ - - /* generic address format */ - struct nvm_addrf addrf; - - /* 1.2 compatibility */ - u8 vmnt; - u32 cap; - u32 dom; - - u8 mtype; - u8 fmtype; - - u16 cpar; - u32 mpos; - - u8 num_pln; - u8 pln_mode; - u16 num_pg; - u16 fpg_sz; + void *priv; + int state; }; -/* sub-device structure */ -struct nvm_tgt_dev { - /* Device information */ - struct nvm_geo geo; - - /* Base ppas for target LUNs */ - struct ppa_addr *luns; - - struct request_queue *q; - - struct nvm_dev *parent; - void *map; +/* system block cpu representation */ +struct nvm_sb_info { + unsigned long seqnr; + unsigned long erase_cnt; + unsigned int version; + char mmtype[NVM_MMTYPE_LEN]; + struct ppa_addr fs_ppa; }; struct nvm_dev { @@ -415,273 +306,274 @@ struct nvm_dev { struct list_head devices; + /* Media manager */ + struct nvmm_type *mt; + void *mp; + + /* System blocks */ + struct nvm_sb_info sb; + /* Device information */ - struct nvm_geo geo; + int nr_chnls; + int nr_planes; + int luns_per_chnl; + int sec_per_pg; /* only sectors for a single page */ + int pgs_per_blk; + int blks_per_lun; + int fpg_size; + int pfpg_size; /* size of buffer if all pages are to be read */ + int sec_size; + int oob_size; + int mccap; + struct nvm_addr_format ppaf; + + /* Calculated/Cached values. These do not reflect the actual usable + * blocks at run-time. + */ + int max_rq_size; + int plane_mode; /* drive device in single, double or quad mode */ + + int sec_per_pl; /* all sectors across planes */ + int sec_per_blk; + int sec_per_lun; + + /* lower page table */ + int lps_per_blk; + int *lptbl; + + unsigned long total_blocks; + unsigned long total_secs; + int nr_luns; unsigned long *lun_map; void *dma_pool; + struct nvm_id identity; + /* Backend device */ struct request_queue *q; + struct device dev; + struct device *parent_dev; char name[DISK_NAME_LEN]; void *private_data; - struct kref ref; - void *rmap; - struct mutex mlock; spinlock_t lock; - - /* target management */ - struct list_head area_list; - struct list_head targets; }; static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, - struct ppa_addr r) + struct ppa_addr r) { - struct nvm_geo *geo = &dev->geo; struct ppa_addr l; - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; - - l.ppa = ((u64)r.g.ch) << ppaf->ch_offset; - l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset; - l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; - l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; - l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; - l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = &geo->addrf; - - l.ppa = ((u64)r.m.grp) << lbaf->ch_offset; - l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset; - l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset; - l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset; - } + l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; + l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; + l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; + l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; + l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; + l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; return l; } static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, - struct ppa_addr r) + struct ppa_addr r) { - struct nvm_geo *geo = &dev->geo; struct ppa_addr l; l.ppa = 0; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; - - l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset; - l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset; - l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; - l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; - l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; - l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = &geo->addrf; - - l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset; - l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset; - l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset; - l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset; - } + /* + * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. + */ + l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & + (((1 << dev->ppaf.blk_len) - 1)); + l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & + (((1 << dev->ppaf.pg_len) - 1)); + l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & + (((1 << dev->ppaf.sect_len) - 1)); + l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & + (((1 << dev->ppaf.pln_len) - 1)); + l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & + (((1 << dev->ppaf.lun_len) - 1)); + l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & + (((1 << dev->ppaf.ch_len) - 1)); return l; } -static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf, - struct ppa_addr p) +static inline int ppa_empty(struct ppa_addr ppa_addr) { - struct nvm_geo *geo = &dev->geo; - u64 caddr; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf; - - caddr = (u64)p.g.pg << ppaf->pg_offset; - caddr |= (u64)p.g.pl << ppaf->pln_offset; - caddr |= (u64)p.g.sec << ppaf->sec_offset; - } else { - caddr = p.m.sec; - } - - return caddr; + return (ppa_addr.ppa == ADDR_EMPTY); } -static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev, - void *addrf, u32 ppa32) +static inline void ppa_set_empty(struct ppa_addr *ppa_addr) { - struct ppa_addr ppa64; - - ppa64.ppa = 0; - - if (ppa32 == -1) { - ppa64.ppa = ADDR_EMPTY; - } else if (ppa32 & (1U << 31)) { - ppa64.c.line = ppa32 & ((~0U) >> 1); - ppa64.c.is_cached = 1; - } else { - struct nvm_geo *geo = &dev->geo; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = addrf; - - ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> - ppaf->ch_offset; - ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> - ppaf->lun_offset; - ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> - ppaf->blk_offset; - ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> - ppaf->pg_offset; - ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> - ppaf->pln_offset; - ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> - ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = addrf; - - ppa64.m.grp = (ppa32 & lbaf->ch_mask) >> - lbaf->ch_offset; - ppa64.m.pu = (ppa32 & lbaf->lun_mask) >> - lbaf->lun_offset; - ppa64.m.chk = (ppa32 & lbaf->chk_mask) >> - lbaf->chk_offset; - ppa64.m.sec = (ppa32 & lbaf->sec_mask) >> - lbaf->sec_offset; - } - } - - return ppa64; + ppa_addr->ppa = ADDR_EMPTY; } -static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev, - void *addrf, struct ppa_addr ppa64) +static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev, + struct nvm_block *blk) { - u32 ppa32 = 0; + struct ppa_addr ppa; + struct nvm_lun *lun = blk->lun; - if (ppa64.ppa == ADDR_EMPTY) { - ppa32 = ~0U; - } else if (ppa64.c.is_cached) { - ppa32 |= ppa64.c.line; - ppa32 |= 1U << 31; - } else { - struct nvm_geo *geo = &dev->geo; + ppa.ppa = 0; + ppa.g.blk = blk->id % dev->blks_per_lun; + ppa.g.lun = lun->lun_id; + ppa.g.ch = lun->chnl_id; - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = addrf; - - ppa32 |= ppa64.g.ch << ppaf->ch_offset; - ppa32 |= ppa64.g.lun << ppaf->lun_offset; - ppa32 |= ppa64.g.blk << ppaf->blk_offset; - ppa32 |= ppa64.g.pg << ppaf->pg_offset; - ppa32 |= ppa64.g.pl << ppaf->pln_offset; - ppa32 |= ppa64.g.sec << ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = addrf; - - ppa32 |= ppa64.m.grp << lbaf->ch_offset; - ppa32 |= ppa64.m.pu << lbaf->lun_offset; - ppa32 |= ppa64.m.chk << lbaf->chk_offset; - ppa32 |= ppa64.m.sec << lbaf->sec_offset; - } - } - - return ppa32; + return ppa; } -static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev, - struct ppa_addr *ppa) +static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) { - struct nvm_geo *geo = &dev->geo; - int last = 0; - - if (geo->version == NVM_OCSSD_SPEC_12) { - int sec = ppa->g.sec; - - sec++; - if (sec == geo->ws_min) { - int pg = ppa->g.pg; - - sec = 0; - pg++; - if (pg == geo->num_pg) { - int pl = ppa->g.pl; - - pg = 0; - pl++; - if (pl == geo->num_pln) - last = 1; - - ppa->g.pl = pl; - } - ppa->g.pg = pg; - } - ppa->g.sec = sec; - } else { - ppa->m.sec++; - if (ppa->m.sec == geo->clba) - last = 1; - } - - return last; + return dev->lptbl[slc_pg]; } +typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); typedef sector_t (nvm_tgt_capacity_fn)(void *); -typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, - int flags); -typedef void (nvm_tgt_exit_fn)(void *, bool); -typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *); -typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *); - -enum { - NVM_TGT_F_DEV_L2P = 0, - NVM_TGT_F_HOST_L2P = 1 << 0, -}; +typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); +typedef void (nvm_tgt_exit_fn)(void *); struct nvm_tgt_type { const char *name; unsigned int version[3]; - int flags; /* target entry points */ - const struct block_device_operations *bops; + nvm_tgt_make_rq_fn *make_rq; nvm_tgt_capacity_fn *capacity; + nvm_end_io_fn *end_io; /* module-specific init/teardown */ nvm_tgt_init_fn *init; nvm_tgt_exit_fn *exit; - /* sysfs */ - nvm_tgt_sysfs_init_fn *sysfs_init; - nvm_tgt_sysfs_exit_fn *sysfs_exit; - /* For internal use */ struct list_head list; - struct module *owner; }; +extern struct nvm_tgt_type *nvm_find_target_type(const char *, int); + extern int nvm_register_tgt_type(struct nvm_tgt_type *); extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); +typedef int (nvmm_register_fn)(struct nvm_dev *); +typedef void (nvmm_unregister_fn)(struct nvm_dev *); + +typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *); +typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *); +typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, + struct nvm_lun *, unsigned long); +typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); +typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, + unsigned long); +typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int); +typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); +typedef int (nvmm_reserve_lun)(struct nvm_dev *, int); +typedef void (nvmm_release_lun)(struct nvm_dev *, int); +typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); + +typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t); +typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t); + +struct nvmm_type { + const char *name; + unsigned int version[3]; + + nvmm_register_fn *register_mgr; + nvmm_unregister_fn *unregister_mgr; + + nvmm_create_tgt_fn *create_tgt; + nvmm_remove_tgt_fn *remove_tgt; + + /* Block administration callbacks */ + nvmm_get_blk_fn *get_blk; + nvmm_put_blk_fn *put_blk; + nvmm_open_blk_fn *open_blk; + nvmm_close_blk_fn *close_blk; + nvmm_flush_blk_fn *flush_blk; + + nvmm_submit_io_fn *submit_io; + nvmm_erase_blk_fn *erase_blk; + + /* Bad block mgmt */ + nvmm_mark_blk_fn *mark_blk; + + /* Configuration management */ + nvmm_get_lun_fn *get_lun; + nvmm_reserve_lun *reserve_lun; + nvmm_release_lun *release_lun; + + /* Statistics */ + nvmm_lun_info_print_fn *lun_info_print; + + nvmm_get_area_fn *get_area; + nvmm_put_area_fn *put_area; + + struct list_head list; +}; + +extern int nvm_register_mgr(struct nvmm_type *); +extern void nvm_unregister_mgr(struct nvmm_type *); + +extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, + unsigned long); +extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); + extern struct nvm_dev *nvm_alloc_dev(int); extern int nvm_register(struct nvm_dev *); extern void nvm_unregister(struct nvm_dev *); -extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr, - int, struct nvm_chk_meta *); -extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *, - int, int); -extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *); -extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *); -extern void nvm_end_io(struct nvm_rq *); +void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type); + +extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); +extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); +extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); +extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, + const struct ppa_addr *, int, int); +extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); +extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); +extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); +extern void nvm_end_io(struct nvm_rq *, int); +extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, + void *, int); +extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int, + int, void *, int); +extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); +extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *); + +/* sysblk.c */ +#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */ + +/* system block on disk representation */ +struct nvm_system_block { + __be32 magic; /* magic signature */ + __be32 seqnr; /* sequence number */ + __be32 erase_cnt; /* erase count */ + __be16 version; /* version number */ + u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */ + __be64 fs_ppa; /* PPA for media manager + * superblock */ +}; + +extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *); +extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *); +extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *); + +extern int nvm_dev_factory(struct nvm_dev *, int flags); + +#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \ + for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \ + (chid)++, (ppa).g.ch = (chid)) \ + for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \ + (lunid)++, (ppa).g.lun = (lunid)) #else /* CONFIG_NVM */ struct nvm_dev_ops; diff --git a/include/linux/linkage.h b/include/linux/linkage.h index dbf8506dec..4313dcc382 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -1,11 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LINKAGE_H #define _LINUX_LINKAGE_H -#include +#include #include #include #include +#include /* Some toolchains use other characters (e.g. '`') to mark new line in macro */ #ifndef ASM_NL @@ -23,21 +23,31 @@ #endif #ifndef cond_syscall +# ifdef CONFIG_PAX_RAP +# define rap_cond_syscall(x) \ + ".weak " VMLINUX_SYMBOL_STR(rap_##x) "\n\t" \ + ".set " VMLINUX_SYMBOL_STR(rap_##x) "," \ + VMLINUX_SYMBOL_STR(rap_sys_ni_syscall) "\n\t" +# else +# define rap_cond_syscall(x) +# endif #define cond_syscall(x) asm( \ - ".weak " __stringify(x) "\n\t" \ - ".set " __stringify(x) "," \ - __stringify(sys_ni_syscall)) + rap_cond_syscall(x) \ + ".weak " VMLINUX_SYMBOL_STR(x) "\n\t" \ + ".set " VMLINUX_SYMBOL_STR(x) "," \ + VMLINUX_SYMBOL_STR(sys_ni_syscall)) #endif #ifndef SYSCALL_ALIAS #define SYSCALL_ALIAS(alias, name) asm( \ - ".globl " __stringify(alias) "\n\t" \ - ".set " __stringify(alias) "," \ - __stringify(name)) + ".globl " VMLINUX_SYMBOL_STR(alias) "\n\t" \ + ".set " VMLINUX_SYMBOL_STR(alias) "," \ + VMLINUX_SYMBOL_STR(name)) #endif -#define __page_aligned_data __section(".data..page_aligned") __aligned(PAGE_SIZE) -#define __page_aligned_bss __section(".bss..page_aligned") __aligned(PAGE_SIZE) +#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) +#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE) +#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) /* * For assembly routines. @@ -73,63 +83,55 @@ #define __ALIGN_STR ".align 4,0x90" #endif +#ifdef CONFIG_PAX_RAP +# if BITS_PER_LONG == 64 +# define __ASM_RAP_HASH(hash) .quad 0, hash +# define __ASM_RAP_RET_HASH(hash) .quad hash +# elif BITS_PER_LONG == 32 +# define __ASM_RAP_HASH(hash) .long 0, hash +# define __ASM_RAP_RET_HASH(hash) .long hash +# else +# error incompatible BITS_PER_LONG +# endif +#endif + #ifdef __ASSEMBLY__ -/* SYM_T_FUNC -- type used by assembler to mark functions */ -#ifndef SYM_T_FUNC -#define SYM_T_FUNC STT_FUNC -#endif - -/* SYM_T_OBJECT -- type used by assembler to mark data */ -#ifndef SYM_T_OBJECT -#define SYM_T_OBJECT STT_OBJECT -#endif - -/* SYM_T_NONE -- type used by assembler to mark entries of unknown type */ -#ifndef SYM_T_NONE -#define SYM_T_NONE STT_NOTYPE -#endif - -/* SYM_A_* -- align the symbol? */ -#define SYM_A_ALIGN ALIGN -#define SYM_A_NONE /* nothing */ - -/* SYM_L_* -- linkage of symbols */ -#define SYM_L_GLOBAL(name) .globl name -#define SYM_L_WEAK(name) .weak name -#define SYM_L_LOCAL(name) /* nothing */ - #ifndef LINKER_SCRIPT #define ALIGN __ALIGN #define ALIGN_STR __ALIGN_STR -/* === DEPRECATED annotations === */ - -#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS -#ifndef GLOBAL -/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */ -#define GLOBAL(name) \ - .globl name ASM_NL \ - name: -#endif - #ifndef ENTRY -/* deprecated, use SYM_FUNC_START */ -#define ENTRY(name) \ - SYM_FUNC_START(name) +#define __ENTRY(name, rap_hash) \ + .globl name ASM_NL \ + ALIGN ASM_NL \ + rap_hash \ + name: + +#define ENTRY(name) __ENTRY(name,) + #endif -#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */ + #endif /* LINKER_SCRIPT */ -#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS #ifndef WEAK -/* deprecated, use SYM_FUNC_START_WEAK* */ -#define WEAK(name) \ - SYM_FUNC_START_WEAK(name) +#define __WEAK(name, rap_hash) \ + .weak name ASM_NL \ + rap_hash \ + name: + +#define WEAK(name) __WEAK(name, ) +#endif + +#ifdef CONFIG_PAX_RAP +# define RAP_ENTRY(name) __ENTRY(name, __ASM_RAP_HASH(__rap_hash_call_##name) ASM_NL) +# define RAP_WEAK(name) __WEAK(name, __ASM_RAP_HASH(__rap_hash_call_##name) ASM_NL) +#else +# define RAP_ENTRY(name) ENTRY(name) +# define RAP_WEAK(name) WEAK(name) #endif #ifndef END -/* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */ #define END(name) \ .size name, .-name #endif @@ -139,220 +141,11 @@ * static analysis tools such as stack depth analyzer. */ #ifndef ENDPROC -/* deprecated, use SYM_FUNC_END */ #define ENDPROC(name) \ - SYM_FUNC_END(name) -#endif -#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */ - -/* === generic annotations === */ - -/* SYM_ENTRY -- use only if you have to for non-paired symbols */ -#ifndef SYM_ENTRY -#define SYM_ENTRY(name, linkage, align...) \ - linkage(name) ASM_NL \ - align ASM_NL \ - name: + .type name, @function ASM_NL \ + END(name) #endif -/* SYM_START -- use only if you have to */ -#ifndef SYM_START -#define SYM_START(name, linkage, align...) \ - SYM_ENTRY(name, linkage, align) #endif -/* SYM_END -- use only if you have to */ -#ifndef SYM_END -#define SYM_END(name, sym_type) \ - .type name sym_type ASM_NL \ - .size name, .-name #endif - -/* === code annotations === */ - -/* - * FUNC -- C-like functions (proper stack frame etc.) - * CODE -- non-C code (e.g. irq handlers with different, special stack etc.) - * - * Objtool validates stack for FUNC, but not for CODE. - * Objtool generates debug info for both FUNC & CODE, but needs special - * annotations for each CODE's start (to describe the actual stack frame). - * - * Objtool requires that all code must be contained in an ELF symbol. Symbol - * names that have a .L prefix do not emit symbol table entries. .L - * prefixed symbols can be used within a code region, but should be avoided for - * denoting a range of code via ``SYM_*_START/END`` annotations. - * - * ALIAS -- does not generate debug info -- the aliased function will - */ - -/* SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code */ -#ifndef SYM_INNER_LABEL_ALIGN -#define SYM_INNER_LABEL_ALIGN(name, linkage) \ - .type name SYM_T_NONE ASM_NL \ - SYM_ENTRY(name, linkage, SYM_A_ALIGN) -#endif - -/* SYM_INNER_LABEL -- only for labels in the middle of code */ -#ifndef SYM_INNER_LABEL -#define SYM_INNER_LABEL(name, linkage) \ - .type name SYM_T_NONE ASM_NL \ - SYM_ENTRY(name, linkage, SYM_A_NONE) -#endif - -/* - * SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one - * function - */ -#ifndef SYM_FUNC_START_LOCAL_ALIAS -#define SYM_FUNC_START_LOCAL_ALIAS(name) \ - SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) -#endif - -/* - * SYM_FUNC_START_ALIAS -- use where there are two global names for one - * function - */ -#ifndef SYM_FUNC_START_ALIAS -#define SYM_FUNC_START_ALIAS(name) \ - SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) -#endif - -/* SYM_FUNC_START -- use for global functions */ -#ifndef SYM_FUNC_START -/* - * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two - * later. - */ -#define SYM_FUNC_START(name) \ - SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) -#endif - -/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */ -#ifndef SYM_FUNC_START_NOALIGN -#define SYM_FUNC_START_NOALIGN(name) \ - SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) -#endif - -/* SYM_FUNC_START_LOCAL -- use for local functions */ -#ifndef SYM_FUNC_START_LOCAL -/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */ -#define SYM_FUNC_START_LOCAL(name) \ - SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) -#endif - -/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */ -#ifndef SYM_FUNC_START_LOCAL_NOALIGN -#define SYM_FUNC_START_LOCAL_NOALIGN(name) \ - SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) -#endif - -/* SYM_FUNC_START_WEAK -- use for weak functions */ -#ifndef SYM_FUNC_START_WEAK -#define SYM_FUNC_START_WEAK(name) \ - SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) -#endif - -/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */ -#ifndef SYM_FUNC_START_WEAK_NOALIGN -#define SYM_FUNC_START_WEAK_NOALIGN(name) \ - SYM_START(name, SYM_L_WEAK, SYM_A_NONE) -#endif - -/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */ -#ifndef SYM_FUNC_END_ALIAS -#define SYM_FUNC_END_ALIAS(name) \ - SYM_END(name, SYM_T_FUNC) -#endif - -/* - * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START, - * SYM_FUNC_START_WEAK, ... - */ -#ifndef SYM_FUNC_END -/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */ -#define SYM_FUNC_END(name) \ - SYM_END(name, SYM_T_FUNC) -#endif - -/* SYM_CODE_START -- use for non-C (special) functions */ -#ifndef SYM_CODE_START -#define SYM_CODE_START(name) \ - SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) -#endif - -/* SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o alignment */ -#ifndef SYM_CODE_START_NOALIGN -#define SYM_CODE_START_NOALIGN(name) \ - SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) -#endif - -/* SYM_CODE_START_LOCAL -- use for local non-C (special) functions */ -#ifndef SYM_CODE_START_LOCAL -#define SYM_CODE_START_LOCAL(name) \ - SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) -#endif - -/* - * SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special) functions, - * w/o alignment - */ -#ifndef SYM_CODE_START_LOCAL_NOALIGN -#define SYM_CODE_START_LOCAL_NOALIGN(name) \ - SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) -#endif - -/* SYM_CODE_END -- the end of SYM_CODE_START_LOCAL, SYM_CODE_START, ... */ -#ifndef SYM_CODE_END -#define SYM_CODE_END(name) \ - SYM_END(name, SYM_T_NONE) -#endif - -/* === data annotations === */ - -/* SYM_DATA_START -- global data symbol */ -#ifndef SYM_DATA_START -#define SYM_DATA_START(name) \ - SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) -#endif - -/* SYM_DATA_START -- local data symbol */ -#ifndef SYM_DATA_START_LOCAL -#define SYM_DATA_START_LOCAL(name) \ - SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) -#endif - -/* SYM_DATA_END -- the end of SYM_DATA_START symbol */ -#ifndef SYM_DATA_END -#define SYM_DATA_END(name) \ - SYM_END(name, SYM_T_OBJECT) -#endif - -/* SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol */ -#ifndef SYM_DATA_END_LABEL -#define SYM_DATA_END_LABEL(name, linkage, label) \ - linkage(label) ASM_NL \ - .type label SYM_T_OBJECT ASM_NL \ - label: \ - SYM_END(name, SYM_T_OBJECT) -#endif - -/* SYM_DATA -- start+end wrapper around simple global data */ -#ifndef SYM_DATA -#define SYM_DATA(name, data...) \ - SYM_DATA_START(name) ASM_NL \ - data ASM_NL \ - SYM_DATA_END(name) -#endif - -/* SYM_DATA_LOCAL -- start+end wrapper around simple local data */ -#ifndef SYM_DATA_LOCAL -#define SYM_DATA_LOCAL(name, data...) \ - SYM_DATA_START_LOCAL(name) ASM_NL \ - data ASM_NL \ - SYM_DATA_END(name) -#endif - -#endif /* __ASSEMBLY__ */ - -#endif /* _LINUX_LINKAGE_H */ diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h index d4d5b93efe..ca5bd91d12 100644 --- a/include/linux/linux_logo.h +++ b/include/linux/linux_logo.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LINUX_LOGO_H #define _LINUX_LINUX_LOGO_H @@ -36,6 +35,8 @@ struct linux_logo { extern const struct linux_logo logo_linux_mono; extern const struct linux_logo logo_linux_vga16; extern const struct linux_logo logo_linux_clut224; +extern const struct linux_logo logo_blackfin_vga16; +extern const struct linux_logo logo_blackfin_clut224; extern const struct linux_logo logo_dec_clut224; extern const struct linux_logo logo_mac_clut224; extern const struct linux_logo logo_parisc_clut224; @@ -44,6 +45,7 @@ extern const struct linux_logo logo_sun_clut224; extern const struct linux_logo logo_superh_mono; extern const struct linux_logo logo_superh_vga16; extern const struct linux_logo logo_superh_clut224; +extern const struct linux_logo logo_m32r_clut224; extern const struct linux_logo logo_spe_clut224; extern const struct linux_logo *fb_find_logo(int depth); diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h index b72b8cdba7..f1664c636a 100644 --- a/include/linux/lis3lv02d.h +++ b/include/linux/lis3lv02d.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LIS3LV02D_H_ #define __LIS3LV02D_H_ diff --git a/include/linux/list.h b/include/linux/list.h index f2af4b4aa4..d20a3b26ff 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LIST_H #define _LINUX_LIST_H @@ -9,7 +8,7 @@ #include /* - * Circular doubly linked list implementation. + * Simple doubly linked list implementation. * * Some of the internal functions ("__xxx") are useful when * manipulating whole lists rather than single entries, as @@ -23,55 +22,33 @@ #define LIST_HEAD(name) \ struct list_head name = LIST_HEAD_INIT(name) -/** - * INIT_LIST_HEAD - Initialize a list_head structure - * @list: list_head structure to be initialized. - * - * Initializes the list_head to point to itself. If it is a list header, - * the result is an empty list. - */ static inline void INIT_LIST_HEAD(struct list_head *list) { WRITE_ONCE(list->next, list); list->prev = list; } -#ifdef CONFIG_DEBUG_LIST -extern bool __list_add_valid(struct list_head *new, - struct list_head *prev, - struct list_head *next); -extern bool __list_del_entry_valid(struct list_head *entry); -#else -static inline bool __list_add_valid(struct list_head *new, - struct list_head *prev, - struct list_head *next) -{ - return true; -} -static inline bool __list_del_entry_valid(struct list_head *entry) -{ - return true; -} -#endif - /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ +#ifndef CONFIG_DEBUG_LIST static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { - if (!__list_add_valid(new, prev, next)) - return; - next->prev = new; new->next = next; new->prev = prev; WRITE_ONCE(prev->next, new); } +#else +extern void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next); +#endif /** * list_add - add a new entry @@ -113,40 +90,41 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) WRITE_ONCE(prev->next, next); } -/* - * Delete a list entry and clear the 'prev' pointer. - * - * This is a special-purpose list clearing method used in the networking code - * for lists allocated as per-cpu, where we don't want to incur the extra - * WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this - * needs to check the node 'prev' pointer instead of calling list_empty(). - */ -static inline void __list_del_clearprev(struct list_head *entry) -{ - __list_del(entry->prev, entry->next); - entry->prev = NULL; -} - -static inline void __list_del_entry(struct list_head *entry) -{ - if (!__list_del_entry_valid(entry)) - return; - - __list_del(entry->prev, entry->next); -} - /** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ +#ifndef CONFIG_DEBUG_LIST +static inline void __list_del_entry(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); +} + static inline void list_del(struct list_head *entry) { - __list_del_entry(entry); + __list_del(entry->prev, entry->next); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } +#else +extern void __list_del_entry(struct list_head *entry); +extern void list_del(struct list_head *entry); +#endif + +extern void __pax_list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next); +static inline void pax_list_add(struct list_head *new, struct list_head *head) +{ + __pax_list_add(new, head, head->next); +} +static inline void pax_list_add_tail(struct list_head *new, struct list_head *head) +{ + __pax_list_add(new, head->prev, head); +} +extern void pax_list_del(struct list_head *entry); /** * list_replace - replace old entry by new one @@ -164,37 +142,13 @@ static inline void list_replace(struct list_head *old, new->prev->next = new; } -/** - * list_replace_init - replace old entry by new one and initialize the old one - * @old : the element to be replaced - * @new : the new element to insert - * - * If @old was empty, it will be overwritten. - */ static inline void list_replace_init(struct list_head *old, - struct list_head *new) + struct list_head *new) { list_replace(old, new); INIT_LIST_HEAD(old); } -/** - * list_swap - replace entry1 with entry2 and re-add entry1 at entry2's position - * @entry1: the location to place entry2 - * @entry2: the location to place entry1 - */ -static inline void list_swap(struct list_head *entry1, - struct list_head *entry2) -{ - struct list_head *pos = entry2->prev; - - list_del(entry2); - list_replace(entry1, entry2); - if (pos == entry1) - pos = entry2; - list_add(entry1, pos); -} - /** * list_del_init - deletes entry from list and reinitialize it. * @entry: the element to delete from the list. @@ -205,6 +159,8 @@ static inline void list_del_init(struct list_head *entry) INIT_LIST_HEAD(entry); } +extern void pax_list_del_init(struct list_head *entry); + /** * list_move - delete from one list and add as another's head * @list: the entry to move @@ -228,40 +184,6 @@ static inline void list_move_tail(struct list_head *list, list_add_tail(list, head); } -/** - * list_bulk_move_tail - move a subsection of a list to its tail - * @head: the head that will follow our entry - * @first: first entry to move - * @last: last entry to move, can be the same as first - * - * Move all entries between @first and including @last before @head. - * All three entries must belong to the same linked list. - */ -static inline void list_bulk_move_tail(struct list_head *head, - struct list_head *first, - struct list_head *last) -{ - first->prev->next = last->next; - last->next->prev = first->prev; - - head->prev->next = first; - first->prev = head->prev; - - last->next = head; - head->prev = last; -} - -/** - * list_is_first -- tests whether @list is the first entry in list @head - * @list: the entry to test - * @head: the head of the list - */ -static inline int list_is_first(const struct list_head *list, - const struct list_head *head) -{ - return list->prev == head; -} - /** * list_is_last - tests whether @list is the last entry in list @head * @list: the entry to test @@ -282,24 +204,6 @@ static inline int list_empty(const struct list_head *head) return READ_ONCE(head->next) == head; } -/** - * list_del_init_careful - deletes entry from list and reinitialize it. - * @entry: the element to delete from the list. - * - * This is the same as list_del_init(), except designed to be used - * together with list_empty_careful() in a way to guarantee ordering - * of other memory operations. - * - * Any memory operations done before a list_del_init_careful() are - * guaranteed to be visible after a list_empty_careful() test. - */ -static inline void list_del_init_careful(struct list_head *entry) -{ - __list_del_entry(entry); - entry->prev = entry; - smp_store_release(&entry->next, entry); -} - /** * list_empty_careful - tests whether a list is empty and not being modified * @head: the list to test @@ -315,7 +219,7 @@ static inline void list_del_init_careful(struct list_head *entry) */ static inline int list_empty_careful(const struct list_head *head) { - struct list_head *next = smp_load_acquire(&head->next); + struct list_head *next = head->next; return (next == head) && (next == head->prev); } @@ -333,24 +237,6 @@ static inline void list_rotate_left(struct list_head *head) } } -/** - * list_rotate_to_front() - Rotate list to specific item. - * @list: The desired new front of the list. - * @head: The head of the list. - * - * Rotates list so that @list becomes the new front of the list. - */ -static inline void list_rotate_to_front(struct list_head *list, - struct list_head *head) -{ - /* - * Deletes the list head from the list denoted by @head and - * places it as the tail of @list, this effectively rotates the - * list so that @list is at the front. - */ - list_move_tail(head, list); -} - /** * list_is_singular - tests whether a list has just one entry. * @head: the list to test. @@ -400,36 +286,6 @@ static inline void list_cut_position(struct list_head *list, __list_cut_position(list, head, entry); } -/** - * list_cut_before - cut a list into two, before given entry - * @list: a new list to add all removed entries - * @head: a list with entries - * @entry: an entry within head, could be the head itself - * - * This helper moves the initial part of @head, up to but - * excluding @entry, from @head to @list. You should pass - * in @entry an element you know is on @head. @list should - * be an empty list or a list you do not care about losing - * its data. - * If @entry == @head, all entries on @head are moved to - * @list. - */ -static inline void list_cut_before(struct list_head *list, - struct list_head *head, - struct list_head *entry) -{ - if (head->next == entry) { - INIT_LIST_HEAD(list); - return; - } - list->next = head->next; - list->next->prev = list; - list->prev = entry->prev; - list->prev->next = list; - head->next = entry; - entry->prev = head; -} - static inline void __list_splice(const struct list_head *list, struct list_head *prev, struct list_head *next) @@ -570,16 +426,6 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_for_each(pos, head) \ for (pos = (head)->next; pos != (head); pos = pos->next) -/** - * list_for_each_continue - continue iteration over a list - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - * - * Continue to iterate over a list, continuing after the current position. - */ -#define list_for_each_continue(pos, head) \ - for (pos = pos->next; pos != (head); pos = pos->next) - /** * list_for_each_prev - iterate over a list backwards * @pos: the &struct list_head to use as a loop cursor. @@ -609,15 +455,6 @@ static inline void list_splice_tail_init(struct list_head *list, pos != (head); \ pos = n, n = pos->prev) -/** - * list_entry_is_head - test if the entry points to the head of the list - * @pos: the type * to cursor - * @head: the head for your list. - * @member: the name of the list_head within the struct. - */ -#define list_entry_is_head(pos, head, member) \ - (&pos->member == (head)) - /** * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. @@ -626,7 +463,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ - !list_entry_is_head(pos, head, member); \ + &pos->member != (head); \ pos = list_next_entry(pos, member)) /** @@ -637,7 +474,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member); \ - !list_entry_is_head(pos, head, member); \ + &pos->member != (head); \ pos = list_prev_entry(pos, member)) /** @@ -662,7 +499,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue(pos, head, member) \ for (pos = list_next_entry(pos, member); \ - !list_entry_is_head(pos, head, member); \ + &pos->member != (head); \ pos = list_next_entry(pos, member)) /** @@ -676,7 +513,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue_reverse(pos, head, member) \ for (pos = list_prev_entry(pos, member); \ - !list_entry_is_head(pos, head, member); \ + &pos->member != (head); \ pos = list_prev_entry(pos, member)) /** @@ -688,22 +525,9 @@ static inline void list_splice_tail_init(struct list_head *list, * Iterate over list of given type, continuing from current position. */ #define list_for_each_entry_from(pos, head, member) \ - for (; !list_entry_is_head(pos, head, member); \ + for (; &pos->member != (head); \ pos = list_next_entry(pos, member)) -/** - * list_for_each_entry_from_reverse - iterate backwards over list of given type - * from the current point - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_head within the struct. - * - * Iterate backwards over list of given type, continuing from current position. - */ -#define list_for_each_entry_from_reverse(pos, head, member) \ - for (; !list_entry_is_head(pos, head, member); \ - pos = list_prev_entry(pos, member)) - /** * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. @@ -714,7 +538,7 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member), \ n = list_next_entry(pos, member); \ - !list_entry_is_head(pos, head, member); \ + &pos->member != (head); \ pos = n, n = list_next_entry(n, member)) /** @@ -730,7 +554,7 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_for_each_entry_safe_continue(pos, n, head, member) \ for (pos = list_next_entry(pos, member), \ n = list_next_entry(pos, member); \ - !list_entry_is_head(pos, head, member); \ + &pos->member != (head); \ pos = n, n = list_next_entry(n, member)) /** @@ -745,7 +569,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_safe_from(pos, n, head, member) \ for (n = list_next_entry(pos, member); \ - !list_entry_is_head(pos, head, member); \ + &pos->member != (head); \ pos = n, n = list_next_entry(n, member)) /** @@ -761,7 +585,7 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_for_each_entry_safe_reverse(pos, n, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member), \ n = list_prev_entry(pos, member); \ - !list_entry_is_head(pos, head, member); \ + &pos->member != (head); \ pos = n, n = list_prev_entry(n, member)) /** @@ -795,36 +619,11 @@ static inline void INIT_HLIST_NODE(struct hlist_node *h) h->pprev = NULL; } -/** - * hlist_unhashed - Has node been removed from list and reinitialized? - * @h: Node to be checked - * - * Not that not all removal functions will leave a node in unhashed - * state. For example, hlist_nulls_del_init_rcu() does leave the - * node in unhashed state, but hlist_nulls_del() does not. - */ static inline int hlist_unhashed(const struct hlist_node *h) { return !h->pprev; } -/** - * hlist_unhashed_lockless - Version of hlist_unhashed for lockless use - * @h: Node to be checked - * - * This variant of hlist_unhashed() must be used in lockless contexts - * to avoid potential load-tearing. The READ_ONCE() is paired with the - * various WRITE_ONCE() in hlist helpers that are defined below. - */ -static inline int hlist_unhashed_lockless(const struct hlist_node *h) -{ - return !READ_ONCE(h->pprev); -} - -/** - * hlist_empty - Is the specified hlist_head structure an empty hlist? - * @h: Structure to check. - */ static inline int hlist_empty(const struct hlist_head *h) { return !READ_ONCE(h->first); @@ -837,16 +636,9 @@ static inline void __hlist_del(struct hlist_node *n) WRITE_ONCE(*pprev, next); if (next) - WRITE_ONCE(next->pprev, pprev); + next->pprev = pprev; } -/** - * hlist_del - Delete the specified hlist_node from its list - * @n: Node to delete. - * - * Note that this function leaves the node in hashed state. Use - * hlist_del_init() or similar instead to unhash @n. - */ static inline void hlist_del(struct hlist_node *n) { __hlist_del(n); @@ -854,12 +646,6 @@ static inline void hlist_del(struct hlist_node *n) n->pprev = LIST_POISON2; } -/** - * hlist_del_init - Delete the specified hlist_node from its list and initialize - * @n: Node to delete. - * - * Note that this function leaves the node in unhashed state. - */ static inline void hlist_del_init(struct hlist_node *n) { if (!hlist_unhashed(n)) { @@ -868,83 +654,51 @@ static inline void hlist_del_init(struct hlist_node *n) } } -/** - * hlist_add_head - add a new entry at the beginning of the hlist - * @n: new entry to be added - * @h: hlist head to add it after - * - * Insert a new entry after the specified head. - * This is good for implementing stacks. - */ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; - WRITE_ONCE(n->next, first); + n->next = first; if (first) - WRITE_ONCE(first->pprev, &n->next); + first->pprev = &n->next; WRITE_ONCE(h->first, n); - WRITE_ONCE(n->pprev, &h->first); + n->pprev = &h->first; } -/** - * hlist_add_before - add a new entry before the one specified - * @n: new entry to be added - * @next: hlist node to add it before, which must be non-NULL - */ +/* next must be != NULL */ static inline void hlist_add_before(struct hlist_node *n, - struct hlist_node *next) + struct hlist_node *next) { - WRITE_ONCE(n->pprev, next->pprev); - WRITE_ONCE(n->next, next); - WRITE_ONCE(next->pprev, &n->next); + n->pprev = next->pprev; + n->next = next; + next->pprev = &n->next; WRITE_ONCE(*(n->pprev), n); } -/** - * hlist_add_behind - add a new entry after the one specified - * @n: new entry to be added - * @prev: hlist node to add it after, which must be non-NULL - */ static inline void hlist_add_behind(struct hlist_node *n, struct hlist_node *prev) { - WRITE_ONCE(n->next, prev->next); + n->next = prev->next; WRITE_ONCE(prev->next, n); - WRITE_ONCE(n->pprev, &prev->next); + n->pprev = &prev->next; if (n->next) - WRITE_ONCE(n->next->pprev, &n->next); + n->next->pprev = &n->next; } -/** - * hlist_add_fake - create a fake hlist consisting of a single headless node - * @n: Node to make a fake list out of - * - * This makes @n appear to be its own predecessor on a headless hlist. - * The point of this is to allow things like hlist_del() to work correctly - * in cases where there is no list. - */ +/* after that we'll appear to be on some hlist and hlist_del will work */ static inline void hlist_add_fake(struct hlist_node *n) { n->pprev = &n->next; } -/** - * hlist_fake: Is this node a fake hlist? - * @h: Node to check for being a self-referential fake hlist. - */ static inline bool hlist_fake(struct hlist_node *h) { return h->pprev == &h->next; } -/** - * hlist_is_singular_node - is node the only element of the specified hlist? - * @n: Node to check for singularity. - * @h: Header for potentially singular list. - * +/* * Check whether the node is the only node of the head without - * accessing head, thus avoiding unnecessary cache misses. + * accessing head: */ static inline bool hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) @@ -952,11 +706,7 @@ hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) return !n->next && n->pprev == &h->first; } -/** - * hlist_move_list - Move an hlist - * @old: hlist_head for old list. - * @new: hlist_head for new list. - * +/* * Move a list from one list head to another. Fixup the pprev * reference of the first entry if it exists. */ @@ -1016,7 +766,7 @@ static inline void hlist_move_list(struct hlist_head *old, /** * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. - * @n: a &struct hlist_node to use as temporary storage + * @n: another &struct hlist_node to use as temporary storage * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index ae1b541446..cb483305e1 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LIST_BL_H #define _LINUX_LIST_BL_H @@ -86,32 +85,6 @@ static inline void hlist_bl_add_head(struct hlist_bl_node *n, hlist_bl_set_first(h, n); } -static inline void hlist_bl_add_before(struct hlist_bl_node *n, - struct hlist_bl_node *next) -{ - struct hlist_bl_node **pprev = next->pprev; - - n->pprev = pprev; - n->next = next; - next->pprev = &n->next; - - /* pprev may be `first`, so be careful not to lose the lock bit */ - WRITE_ONCE(*pprev, - (struct hlist_bl_node *) - ((uintptr_t)n | ((uintptr_t)*pprev & LIST_BL_LOCKMASK))); -} - -static inline void hlist_bl_add_behind(struct hlist_bl_node *n, - struct hlist_bl_node *prev) -{ - n->next = prev->next; - n->pprev = &prev->next; - prev->next = n; - - if (n->next) - n->next->pprev = &n->next; -} - static inline void __hlist_bl_del(struct hlist_bl_node *n) { struct hlist_bl_node *next = n->next; diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 1b5fceb565..cb0ba9f2a9 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. * Authors: David Chinner and Glauber Costa @@ -32,9 +31,8 @@ struct list_lru_one { }; struct list_lru_memcg { - struct rcu_head rcu; /* array of per cgroup lists, indexed by memcg_cache_id */ - struct list_lru_one *lru[]; + struct list_lru_one *lru[0]; }; struct list_lru_node { @@ -42,35 +40,29 @@ struct list_lru_node { spinlock_t lock; /* global list, used for the root cgroup in cgroup aware lrus */ struct list_lru_one lru; -#ifdef CONFIG_MEMCG_KMEM +#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ - struct list_lru_memcg __rcu *memcg_lrus; + struct list_lru_memcg *memcg_lrus; #endif - long nr_items; } ____cacheline_aligned_in_smp; struct list_lru { struct list_lru_node *node; -#ifdef CONFIG_MEMCG_KMEM +#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) struct list_head list; - int shrinker_id; - bool memcg_aware; #endif }; void list_lru_destroy(struct list_lru *lru); int __list_lru_init(struct list_lru *lru, bool memcg_aware, - struct lock_class_key *key, struct shrinker *shrinker); + struct lock_class_key *key); -#define list_lru_init(lru) \ - __list_lru_init((lru), false, NULL, NULL) -#define list_lru_init_key(lru, key) \ - __list_lru_init((lru), false, (key), NULL) -#define list_lru_init_memcg(lru, shrinker) \ - __list_lru_init((lru), true, NULL, shrinker) +#define list_lru_init(lru) __list_lru_init((lru), false, NULL) +#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key)) +#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL) int memcg_update_all_list_lrus(int num_memcgs); -void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg); +void memcg_drain_all_list_lrus(int src_idx, int dst_idx); /** * list_lru_add: add an element to the lru list's tail @@ -146,7 +138,7 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, * @lru: the lru pointer. * @nid: the node id to scan from. * @memcg: the cgroup to scan from. - * @isolate: callback function that is responsible for deciding what to do with + * @isolate: callback function that is resposible for deciding what to do with * the item currently being scanned * @cb_arg: opaque type that will be passed to @isolate * @nr_to_walk: how many items to scan. @@ -167,23 +159,6 @@ unsigned long list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk); -/** - * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items. - * @lru: the lru pointer. - * @nid: the node id to scan from. - * @memcg: the cgroup to scan from. - * @isolate: callback function that is responsible for deciding what to do with - * the item currently being scanned - * @cb_arg: opaque type that will be passed to @isolate - * @nr_to_walk: how many items to scan. - * - * Same as @list_lru_walk_one except that the spinlock is acquired with - * spin_lock_irq(). - */ -unsigned long list_lru_walk_one_irq(struct list_lru *lru, - int nid, struct mem_cgroup *memcg, - list_lru_walk_cb isolate, void *cb_arg, - unsigned long *nr_to_walk); unsigned long list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk); @@ -196,14 +171,6 @@ list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc, &sc->nr_to_scan); } -static inline unsigned long -list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc, - list_lru_walk_cb isolate, void *cb_arg) -{ - return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, - &sc->nr_to_scan); -} - static inline unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, void *cb_arg, unsigned long nr_to_walk) diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index fa6e8471bd..b01fe10090 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LIST_NULLS_H #define _LINUX_LIST_NULLS_H @@ -30,11 +29,6 @@ struct hlist_nulls_node { ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls)) #define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) - -#define hlist_nulls_entry_safe(ptr, type, member) \ - ({ typeof(ptr) ____ptr = (ptr); \ - !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \ - }) /** * ptr_is_a_nulls - Test if a ptr is a nulls * @ptr: ptr to be tested @@ -56,33 +50,11 @@ static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr) return ((unsigned long)ptr) >> 1; } -/** - * hlist_nulls_unhashed - Has node been removed and reinitialized? - * @h: Node to be checked - * - * Not that not all removal functions will leave a node in unhashed state. - * For example, hlist_del_init_rcu() leaves the node in unhashed state, - * but hlist_nulls_del() does not. - */ static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h) { return !h->pprev; } -/** - * hlist_nulls_unhashed_lockless - Has node been removed and reinitialized? - * @h: Node to be checked - * - * Not that not all removal functions will leave a node in unhashed state. - * For example, hlist_del_init_rcu() leaves the node in unhashed state, - * but hlist_nulls_del() does not. Unlike hlist_nulls_unhashed(), this - * function may be used locklessly. - */ -static inline int hlist_nulls_unhashed_lockless(const struct hlist_nulls_node *h) -{ - return !READ_ONCE(h->pprev); -} - static inline int hlist_nulls_empty(const struct hlist_nulls_head *h) { return is_a_nulls(READ_ONCE(h->first)); @@ -94,10 +66,10 @@ static inline void hlist_nulls_add_head(struct hlist_nulls_node *n, struct hlist_nulls_node *first = h->first; n->next = first; - WRITE_ONCE(n->pprev, &h->first); + n->pprev = &h->first; h->first = n; if (!is_a_nulls(first)) - WRITE_ONCE(first->pprev, &n->next); + first->pprev = &n->next; } static inline void __hlist_nulls_del(struct hlist_nulls_node *n) @@ -107,13 +79,13 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n) WRITE_ONCE(*pprev, next); if (!is_a_nulls(next)) - WRITE_ONCE(next->pprev, pprev); + next->pprev = pprev; } static inline void hlist_nulls_del(struct hlist_nulls_node *n) { __hlist_nulls_del(n); - WRITE_ONCE(n->pprev, LIST_POISON2); + n->pprev = LIST_POISON2; } /** diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h index 453105f74e..1a2df2efb7 100644 --- a/include/linux/list_sort.h +++ b/include/linux/list_sort.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LIST_SORT_H #define _LINUX_LIST_SORT_H @@ -6,9 +5,7 @@ struct list_head; -typedef int __attribute__((nonnull(2,3))) (*list_cmp_func_t)(void *, - const struct list_head *, const struct list_head *); - -__attribute__((nonnull(2,3))) -void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp); +void list_sort(void *priv, struct list_head *head, + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b)); #endif diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 2614247a97..9072f04db6 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -1,9 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * livepatch.h - Kernel Live Patching Core * * Copyright (C) 2014 Seth Jennings * Copyright (C) 2014 SUSE + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . */ #ifndef _LINUX_LIVEPATCH_H_ @@ -11,17 +23,15 @@ #include #include -#include -#include #if IS_ENABLED(CONFIG_LIVEPATCH) #include -/* task patch states */ -#define KLP_UNDEFINED -1 -#define KLP_UNPATCHED 0 -#define KLP_PATCHED 1 +enum klp_state { + KLP_DISABLED, + KLP_ENABLED +}; /** * struct klp_func - function structure for live patching @@ -29,30 +39,10 @@ * @new_func: pointer to the patched function code * @old_sympos: a hint indicating which symbol position the old function * can be found (optional) - * @old_func: pointer to the function being patched + * @old_addr: the address of the function being patched * @kobj: kobject for sysfs resources - * @node: list node for klp_object func_list + * @state: tracks function-level patch application state * @stack_node: list node for klp_ops func_stack list - * @old_size: size of the old function - * @new_size: size of the new function - * @nop: temporary patch to use the original code again; dyn. allocated - * @patched: the func has been added to the klp_ops list - * @transition: the func is currently being applied or reverted - * - * The patched and transition variables define the func's patching state. When - * patching, a func is always in one of the following states: - * - * patched=0 transition=0: unpatched - * patched=0 transition=1: unpatched, temporary starting state - * patched=1 transition=1: patched, may be visible to some tasks - * patched=1 transition=0: patched, visible to all tasks - * - * And when unpatching, it goes in the reverse order: - * - * patched=1 transition=0: patched, visible to all tasks - * patched=1 transition=1: patched, may be visible to some tasks - * patched=0 transition=1: unpatched, temporary ending state - * patched=0 transition=0: unpatched */ struct klp_func { /* external */ @@ -68,190 +58,75 @@ struct klp_func { unsigned long old_sympos; /* internal */ - void *old_func; + unsigned long old_addr; struct kobject kobj; - struct list_head node; + enum klp_state state; struct list_head stack_node; - unsigned long old_size, new_size; - bool nop; - bool patched; - bool transition; -}; - -struct klp_object; - -/** - * struct klp_callbacks - pre/post live-(un)patch callback structure - * @pre_patch: executed before code patching - * @post_patch: executed after code patching - * @pre_unpatch: executed before code unpatching - * @post_unpatch: executed after code unpatching - * @post_unpatch_enabled: flag indicating if post-unpatch callback - * should run - * - * All callbacks are optional. Only the pre-patch callback, if provided, - * will be unconditionally executed. If the parent klp_object fails to - * patch for any reason, including a non-zero error status returned from - * the pre-patch callback, no further callbacks will be executed. - */ -struct klp_callbacks { - int (*pre_patch)(struct klp_object *obj); - void (*post_patch)(struct klp_object *obj); - void (*pre_unpatch)(struct klp_object *obj); - void (*post_unpatch)(struct klp_object *obj); - bool post_unpatch_enabled; }; /** * struct klp_object - kernel object structure for live patching * @name: module name (or NULL for vmlinux) * @funcs: function entries for functions to be patched in the object - * @callbacks: functions to be executed pre/post (un)patching * @kobj: kobject for sysfs resources - * @func_list: dynamic list of the function entries - * @node: list node for klp_patch obj_list * @mod: kernel module associated with the patched object - * (NULL for vmlinux) - * @dynamic: temporary object for nop functions; dynamically allocated - * @patched: the object's funcs have been added to the klp_ops list + * (NULL for vmlinux) + * @state: tracks object-level patch application state */ struct klp_object { /* external */ const char *name; struct klp_func *funcs; - struct klp_callbacks callbacks; /* internal */ struct kobject kobj; - struct list_head func_list; - struct list_head node; struct module *mod; - bool dynamic; - bool patched; -}; - -/** - * struct klp_state - state of the system modified by the livepatch - * @id: system state identifier (non-zero) - * @version: version of the change - * @data: custom data - */ -struct klp_state { - unsigned long id; - unsigned int version; - void *data; + enum klp_state state; }; /** * struct klp_patch - patch structure for live patching * @mod: reference to the live patch module * @objs: object entries for kernel objects to be patched - * @states: system states that can get modified - * @replace: replace all actively used patches - * @list: list node for global list of actively used patches + * @list: list node for global list of registered patches * @kobj: kobject for sysfs resources - * @obj_list: dynamic list of the object entries - * @enabled: the patch is enabled (but operation may be incomplete) - * @forced: was involved in a forced transition - * @free_work: patch cleanup from workqueue-context - * @finish: for waiting till it is safe to remove the patch module + * @state: tracks patch-level application state */ struct klp_patch { /* external */ struct module *mod; struct klp_object *objs; - struct klp_state *states; - bool replace; /* internal */ struct list_head list; struct kobject kobj; - struct list_head obj_list; - bool enabled; - bool forced; - struct work_struct free_work; - struct completion finish; + enum klp_state state; }; -#define klp_for_each_object_static(patch, obj) \ +#define klp_for_each_object(patch, obj) \ for (obj = patch->objs; obj->funcs || obj->name; obj++) -#define klp_for_each_object_safe(patch, obj, tmp_obj) \ - list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node) - -#define klp_for_each_object(patch, obj) \ - list_for_each_entry(obj, &patch->obj_list, node) - -#define klp_for_each_func_static(obj, func) \ +#define klp_for_each_func(obj, func) \ for (func = obj->funcs; \ func->old_name || func->new_func || func->old_sympos; \ func++) -#define klp_for_each_func_safe(obj, func, tmp_func) \ - list_for_each_entry_safe(func, tmp_func, &obj->func_list, node) - -#define klp_for_each_func(obj, func) \ - list_for_each_entry(func, &obj->func_list, node) - +int klp_register_patch(struct klp_patch *); +int klp_unregister_patch(struct klp_patch *); int klp_enable_patch(struct klp_patch *); +int klp_disable_patch(struct klp_patch *); + +void arch_klp_init_object_loaded(struct klp_patch *patch, + struct klp_object *obj); /* Called from the module loader during module coming/going states */ int klp_module_coming(struct module *mod); void klp_module_going(struct module *mod); -void klp_copy_process(struct task_struct *child); -void klp_update_patch_state(struct task_struct *task); - -static inline bool klp_patch_pending(struct task_struct *task) -{ - return test_tsk_thread_flag(task, TIF_PATCH_PENDING); -} - -static inline bool klp_have_reliable_stack(void) -{ - return IS_ENABLED(CONFIG_STACKTRACE) && - IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); -} - -typedef int (*klp_shadow_ctor_t)(void *obj, - void *shadow_data, - void *ctor_data); -typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data); - -void *klp_shadow_get(void *obj, unsigned long id); -void *klp_shadow_alloc(void *obj, unsigned long id, - size_t size, gfp_t gfp_flags, - klp_shadow_ctor_t ctor, void *ctor_data); -void *klp_shadow_get_or_alloc(void *obj, unsigned long id, - size_t size, gfp_t gfp_flags, - klp_shadow_ctor_t ctor, void *ctor_data); -void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor); -void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); - -struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id); -struct klp_state *klp_get_prev_state(unsigned long id); - -int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, - const char *shstrtab, const char *strtab, - unsigned int symindex, unsigned int secindex, - const char *objname); - #else /* !CONFIG_LIVEPATCH */ static inline int klp_module_coming(struct module *mod) { return 0; } -static inline void klp_module_going(struct module *mod) {} -static inline bool klp_patch_pending(struct task_struct *task) { return false; } -static inline void klp_update_patch_state(struct task_struct *task) {} -static inline void klp_copy_process(struct task_struct *child) {} - -static inline -int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, - const char *shstrtab, const char *strtab, - unsigned int symindex, unsigned int secindex, - const char *objname) -{ - return 0; -} +static inline void klp_module_going(struct module *mod) { } #endif /* CONFIG_LIVEPATCH */ diff --git a/include/linux/llist.h b/include/linux/llist.h index 24f207b019..d77d4a87ba 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -1,36 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef LLIST_H #define LLIST_H /* * Lock-less NULL terminated single linked list * - * Cases where locking is not needed: - * If there are multiple producers and multiple consumers, llist_add can be - * used in producers and llist_del_all can be used in consumers simultaneously - * without locking. Also a single consumer can use llist_del_first while - * multiple producers simultaneously use llist_add, without any locking. + * If there are multiple producers and multiple consumers, llist_add + * can be used in producers and llist_del_all can be used in + * consumers. They can work simultaneously without lock. But + * llist_del_first can not be used here. Because llist_del_first + * depends on list->first->next does not changed if list->first is not + * changed during its operation, but llist_del_first, llist_add, + * llist_add (or llist_del_all, llist_add, llist_add) sequence in + * another consumer may violate that. * - * Cases where locking is needed: - * If we have multiple consumers with llist_del_first used in one consumer, and - * llist_del_first or llist_del_all used in other consumers, then a lock is - * needed. This is because llist_del_first depends on list->first->next not - * changing, but without lock protection, there's no way to be sure about that - * if a preemption happens in the middle of the delete operation and on being - * preempted back, the list->first is the same as before causing the cmpxchg in - * llist_del_first to succeed. For example, while a llist_del_first operation - * is in progress in one consumer, then a llist_del_first, llist_add, - * llist_add (or llist_del_all, llist_add, llist_add) sequence in another - * consumer may cause violations. + * If there are multiple producers and one consumer, llist_add can be + * used in producers and llist_del_all or llist_del_first can be used + * in the consumer. * - * This can be summarized as follows: + * This can be summarized as follow: * * | add | del_first | del_all * add | - | - | - * del_first | | L | L * del_all | | | - * - * Where, a particular row's operation can happen concurrently with a column's - * operation, with "-" being no lock needed, while "L" being lock is needed. + * Where "-" stands for no lock is needed, while "L" stands for lock + * is needed. * * The list entries deleted via llist_del_all can be traversed with * traversing function such as llist_for_each etc. But the list @@ -46,6 +40,19 @@ * * Copyright 2010,2011 Intel Corp. * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include @@ -80,23 +87,6 @@ static inline void init_llist_head(struct llist_head *list) #define llist_entry(ptr, type, member) \ container_of(ptr, type, member) -/** - * member_address_is_nonnull - check whether the member address is not NULL - * @ptr: the object pointer (struct type * that contains the llist_node) - * @member: the name of the llist_node within the struct. - * - * This macro is conceptually the same as - * &ptr->member != NULL - * but it works around the fact that compilers can decide that taking a member - * address is never a NULL pointer. - * - * Real objects that start at a high address and have a member at NULL are - * unlikely to exist, but such pointers may be returned e.g. by the - * container_of() macro. - */ -#define member_address_is_nonnull(ptr, member) \ - ((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0) - /** * llist_for_each - iterate over some deleted entries of a lock-less list * @pos: the &struct llist_node to use as a loop cursor @@ -114,25 +104,6 @@ static inline void init_llist_head(struct llist_head *list) #define llist_for_each(pos, node) \ for ((pos) = (node); pos; (pos) = (pos)->next) -/** - * llist_for_each_safe - iterate over some deleted entries of a lock-less list - * safe against removal of list entry - * @pos: the &struct llist_node to use as a loop cursor - * @n: another &struct llist_node to use as temporary storage - * @node: the first entry of deleted list entries - * - * In general, some entries of the lock-less list can be traversed - * safely only after being deleted from list, so start with an entry - * instead of list head. - * - * If being used on entries deleted from lock-less list directly, the - * traverse order is from the newest to the oldest added entry. If - * you want to traverse from the oldest to the newest, you must - * reverse the order by yourself before traversing. - */ -#define llist_for_each_safe(pos, n, node) \ - for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n)) - /** * llist_for_each_entry - iterate over some deleted entries of lock-less list of given type * @pos: the type * to use as a loop cursor. @@ -150,7 +121,7 @@ static inline void init_llist_head(struct llist_head *list) */ #define llist_for_each_entry(pos, node, member) \ for ((pos) = llist_entry((node), typeof(*(pos)), member); \ - member_address_is_nonnull(pos, member); \ + &(pos)->member != NULL; \ (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) /** @@ -172,7 +143,7 @@ static inline void init_llist_head(struct llist_head *list) */ #define llist_for_each_entry_safe(pos, n, node, member) \ for (pos = llist_entry((node), typeof(*pos), member); \ - member_address_is_nonnull(pos, member) && \ + &pos->member != NULL && \ (n = llist_entry(pos->member.next, typeof(*n), member), true); \ pos = n) @@ -186,7 +157,7 @@ static inline void init_llist_head(struct llist_head *list) */ static inline bool llist_empty(const struct llist_head *head) { - return READ_ONCE(head->first) == NULL; + return ACCESS_ONCE(head->first) == NULL; } static inline struct llist_node *llist_next(struct llist_node *node) @@ -198,15 +169,9 @@ extern bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, struct llist_head *head); -static inline bool __llist_add_batch(struct llist_node *new_first, - struct llist_node *new_last, - struct llist_head *head) -{ - new_last->next = head->first; - head->first = new_first; - return new_last->next == NULL; -} - +extern bool pax_llist_add_batch(struct llist_node *new_first, + struct llist_node *new_last, + struct llist_head *head); /** * llist_add - add a new entry * @new: new entry to be added @@ -219,9 +184,9 @@ static inline bool llist_add(struct llist_node *new, struct llist_head *head) return llist_add_batch(new, new, head); } -static inline bool __llist_add(struct llist_node *new, struct llist_head *head) +static inline bool pax_llist_add(struct llist_node *new, struct llist_head *head) { - return __llist_add_batch(new, new, head); + return pax_llist_add_batch(new, new, head); } /** @@ -237,14 +202,6 @@ static inline struct llist_node *llist_del_all(struct llist_head *head) return xchg(&head->first, NULL); } -static inline struct llist_node *__llist_del_all(struct llist_head *head) -{ - struct llist_node *first = head->first; - - head->first = NULL; - return first; -} - extern struct llist_node *llist_del_first(struct llist_head *head); struct llist_node *llist_reverse_order(struct llist_node *head); diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index 3bc9f7410e..140edab644 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/bind.h * @@ -19,7 +18,6 @@ /* Dummy declarations */ struct svc_rqst; -struct rpc_task; /* * This is the set of functions for lockd->nfsd communication @@ -27,8 +25,7 @@ struct rpc_task; struct nlmsvc_binding { __be32 (*fopen)(struct svc_rqst *, struct nfs_fh *, - struct file **, - int mode); + struct file **); void (*fclose)(struct file *); }; @@ -46,8 +43,6 @@ struct nlmclnt_initdata { u32 nfs_version; int noresvport; struct net *net; - const struct nlmclnt_operations *nlmclnt_ops; - const struct cred *cred; }; /* @@ -57,27 +52,9 @@ struct nlmclnt_initdata { extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init); extern void nlmclnt_done(struct nlm_host *host); -/* - * NLM client operations provide a means to modify RPC processing of NLM - * requests. Callbacks receive a pointer to data passed into the call to - * nlmclnt_proc(). - */ -struct nlmclnt_operations { - /* Called on successful allocation of nlm_rqst, use for allocation or - * reference counting. */ - void (*nlmclnt_alloc_call)(void *); - - /* Called in rpc_task_prepare for unlock. A return value of true - * indicates the callback has put the task to sleep on a waitqueue - * and NLM should not call rpc_call_start(). */ - bool (*nlmclnt_unlock_prepare)(struct rpc_task*, void *); - - /* Called when the nlm_rqst is freed, callbacks should clean up here */ - void (*nlmclnt_release_call)(void *); -}; - -extern int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data); -extern int lockd_up(struct net *net, const struct cred *cred); +extern int nlmclnt_proc(struct nlm_host *host, int cmd, + struct file_lock *fl); +extern int lockd_up(struct net *net); extern void lockd_down(struct net *net); #endif /* LINUX_LOCKD_BIND_H */ diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h index eede2ab524..0ca8109934 100644 --- a/include/linux/lockd/debug.h +++ b/include/linux/lockd/debug.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/debug.h * @@ -10,6 +9,8 @@ #ifndef LINUX_LOCKD_DEBUG_H #define LINUX_LOCKD_DEBUG_H +#ifdef __KERNEL__ + #include /* @@ -23,6 +24,8 @@ # define ifdebug(flag) if (0) #endif +#endif /* __KERNEL__ */ + /* * Debug flags */ diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index c4ae6506b8..b37dee3aca 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/lockd.h * @@ -10,14 +9,13 @@ #ifndef LINUX_LOCKD_LOCKD_H #define LINUX_LOCKD_LOCKD_H -/* XXX: a lot of this should really be under fs/lockd. */ +#ifdef __KERNEL__ #include #include #include #include #include -#include #include #include #include @@ -59,7 +57,7 @@ struct nlm_host { u32 h_state; /* pseudo-state counter */ u32 h_nsmstate; /* true remote NSM state */ u32 h_pidcount; /* Pseudopids */ - refcount_t h_count; /* reference count */ + atomic_t h_count; /* reference count */ struct mutex h_mutex; /* mutex for pmap binding */ unsigned long h_nextrebind; /* next portmap call */ unsigned long h_expires; /* eligible for GC */ @@ -70,9 +68,7 @@ struct nlm_host { struct nsm_handle *h_nsmhandle; /* NSM status handle */ char *h_addrbuf; /* address eyecatcher */ struct net *net; /* host net */ - const struct cred *h_cred; char nodename[UNX_MAXNODENAME + 1]; - const struct nlmclnt_operations *h_nlmclnt_ops; /* Callback ops for NLM users */ }; /* @@ -85,7 +81,7 @@ struct nlm_host { struct nsm_handle { struct list_head sm_link; - refcount_t sm_count; + atomic_t sm_count; char *sm_mon_name; char *sm_name; struct sockaddr_storage sm_addr; @@ -124,7 +120,7 @@ static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host) */ struct nlm_lockowner { struct list_head list; - refcount_t count; + atomic_t count; struct nlm_host *host; fl_owner_t owner; @@ -138,7 +134,7 @@ struct nlm_wait; */ #define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u) struct nlm_rqst { - refcount_t a_count; + atomic_t a_count; unsigned int a_flags; /* initial RPC task flags */ struct nlm_host * a_host; /* host handle */ struct nlm_args a_args; /* arguments */ @@ -146,7 +142,6 @@ struct nlm_rqst { struct nlm_block * a_block; unsigned int a_retries; /* Retry count */ u8 a_owner[NLMCLNT_OHSIZE]; - void * a_callback_data; /* sent to nlmclnt_operations callbacks */ }; /* @@ -156,8 +151,7 @@ struct nlm_rqst { struct nlm_file { struct hlist_node f_list; /* linked list */ struct nfs_fh f_handle; /* NFS file handle */ - struct file * f_file[2]; /* VFS file pointers, - indexed by O_ flags */ + struct file * f_file; /* VFS file pointer */ struct nlm_share * f_shares; /* DOS shares */ struct list_head f_blocks; /* blocked locks */ unsigned int f_locks; /* guesstimate # of locks */ @@ -196,9 +190,9 @@ struct nlm_block { * Global variables */ extern const struct rpc_program nlm_program; -extern const struct svc_procedure nlmsvc_procedures[]; +extern struct svc_procedure nlmsvc_procedures[]; #ifdef CONFIG_LOCKD_V4 -extern const struct svc_procedure nlmsvc_procedures4[]; +extern struct svc_procedure nlmsvc_procedures4[]; #endif extern int nlmsvc_grace_period; extern unsigned long nlmsvc_timeout; @@ -231,8 +225,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, const u32 version, const char *hostname, int noresvport, - struct net *net, - const struct cred *cred); + struct net *net); void nlmclnt_release_host(struct nlm_host *); struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, const char *hostname, @@ -270,7 +263,6 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref); /* * Server-side lock handling */ -int lock_to_openmode(struct file_lock *); __be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, struct nlm_host *, struct nlm_lock *, int, struct nlm_cookie *, int); @@ -284,15 +276,13 @@ void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, nlm_host_match_fn_t match); void nlmsvc_grant_reply(struct nlm_cookie *, __be32); void nlmsvc_release_call(struct nlm_rqst *); -void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t); /* * File handling for the server personality */ __be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **, - struct nlm_lock *); + struct nfs_fh *); void nlm_release_file(struct nlm_file *); -void nlmsvc_release_lockowner(struct nlm_lock *); void nlmsvc_mark_resources(struct net *); void nlmsvc_free_host_resources(struct nlm_host *); void nlmsvc_invalidate_all(void); @@ -305,8 +295,7 @@ int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) { - return locks_inode(file->f_file[O_RDONLY] ? - file->f_file[O_RDONLY] : file->f_file[O_WRONLY]); + return file_inode(file->f_file); } static inline int __nlm_privileged_request4(const struct sockaddr *sap) @@ -366,7 +355,7 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) static inline int nlm_compare_locks(const struct file_lock *fl1, const struct file_lock *fl2) { - return locks_inode(fl1->fl_file) == locks_inode(fl2->fl_file) + return file_inode(fl1->fl_file) == file_inode(fl2->fl_file) && fl1->fl_pid == fl2->fl_pid && fl1->fl_owner == fl2->fl_owner && fl1->fl_start == fl2->fl_start @@ -376,4 +365,6 @@ static inline int nlm_compare_locks(const struct file_lock *fl1, extern const struct lock_manager_operations nlmsvc_lock_operations; +#endif /* __KERNEL__ */ + #endif /* LINUX_LOCKD_LOCKD_H */ diff --git a/include/linux/lockd/nlm.h b/include/linux/lockd/nlm.h index 6e343ef760..d9d46e4425 100644 --- a/include/linux/lockd/nlm.h +++ b/include/linux/lockd/nlm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/nlm.h * diff --git a/include/linux/lockd/share.h b/include/linux/lockd/share.h index 1f18a9faf6..630c5bf69b 100644 --- a/include/linux/lockd/share.h +++ b/include/linux/lockd/share.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/share.h * diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index a98309c012..8b5d98f26c 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/xdr.h * @@ -96,18 +95,24 @@ struct nlm_reboot { */ #define NLMSVC_XDRSIZE sizeof(struct nlm_args) -int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *); -int nlmsvc_encode_testres(struct svc_rqst *, __be32 *); -int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *); -int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *); -int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *); -int nlmsvc_encode_res(struct svc_rqst *, __be32 *); -int nlmsvc_decode_res(struct svc_rqst *, __be32 *); -int nlmsvc_encode_void(struct svc_rqst *, __be32 *); -int nlmsvc_decode_void(struct svc_rqst *, __be32 *); -int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *); -int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *); -int nlmsvc_decode_notify(struct svc_rqst *, __be32 *); -int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *); +int nlmsvc_decode_testargs(void *, __be32 *, void *); +int nlmsvc_encode_testres(void *, __be32 *, void *); +int nlmsvc_decode_lockargs(void *, __be32 *, void *); +int nlmsvc_decode_cancargs(void *, __be32 *, void *); +int nlmsvc_decode_unlockargs(void *, __be32 *, void *); +int nlmsvc_encode_res(void *, __be32 *, void *); +int nlmsvc_decode_res(void *, __be32 *, void *); +int nlmsvc_encode_void(void *, __be32 *p, void *); +int nlmsvc_decode_void(void *, __be32 *, void *); +int nlmsvc_decode_shareargs(void *, __be32 *, void *); +int nlmsvc_encode_shareres(void *, __be32 *, void *); +int nlmsvc_decode_notify(void *, __be32 *, void *); +int nlmsvc_decode_reboot(void *, __be32 *, void *); +/* +int nlmclt_encode_testargs(void *, u32 *, void *); +int nlmclt_encode_lockargs(void *, u32 *, void *); +int nlmclt_encode_cancargs(void *, u32 *, void *); +int nlmclt_encode_unlockargs(void *, u32 *, void *); + */ #endif /* LOCKD_XDR_H */ diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h index 5ae766f26e..759ca71d6d 100644 --- a/include/linux/lockd/xdr4.h +++ b/include/linux/lockd/xdr4.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/xdr4.h * @@ -24,20 +23,25 @@ -int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *); -int nlm4svc_encode_testres(struct svc_rqst *, __be32 *); -int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *); -int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *); -int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *); -int nlm4svc_encode_res(struct svc_rqst *, __be32 *); -int nlm4svc_decode_res(struct svc_rqst *, __be32 *); -int nlm4svc_encode_void(struct svc_rqst *, __be32 *); -int nlm4svc_decode_void(struct svc_rqst *, __be32 *); -int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *); -int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *); -int nlm4svc_decode_notify(struct svc_rqst *, __be32 *); -int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *); - +int nlm4svc_decode_testargs(void *, __be32 *, void *); +int nlm4svc_encode_testres(void *, __be32 *, void *); +int nlm4svc_decode_lockargs(void *, __be32 *, void *); +int nlm4svc_decode_cancargs(void *, __be32 *, void *); +int nlm4svc_decode_unlockargs(void *, __be32 *, void *); +int nlm4svc_encode_res(void *, __be32 *, void *); +int nlm4svc_decode_res(void *, __be32 *, void *); +int nlm4svc_encode_void(void *, __be32 *, void *); +int nlm4svc_decode_void(void *, __be32 *, void *); +int nlm4svc_decode_shareargs(void *, __be32 *, void *); +int nlm4svc_encode_shareres(void *, __be32 *, void *); +int nlm4svc_decode_notify(void *, __be32 *, void *); +int nlm4svc_decode_reboot(void *, __be32 *, void *); +/* +int nlmclt_encode_testargs(void *, u32 *, void *); +int nlmclt_encode_lockargs(void *, u32 *, void *); +int nlmclt_encode_cancargs(void *, u32 *, void *); +int nlmclt_encode_unlockargs(void *, u32 *, void *); + */ extern const struct rpc_version nlm_version4; #endif /* LOCKD_XDR4_H */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 9fe165beb0..c1458fede1 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -1,25 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Runtime locking correctness validator * * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * - * see Documentation/locking/lockdep-design.rst for more details. + * see Documentation/locking/lockdep-design.txt for more details. */ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H -#include -#include -#include - struct task_struct; +struct lockdep_map; /* for sysctl */ extern int prove_locking; extern int lock_stat; +#define MAX_LOCKDEP_SUBCLASSES 8UL + #ifdef CONFIG_LOCKDEP #include @@ -27,6 +25,138 @@ extern int lock_stat; #include #include +/* + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need + * the total number of states... :-( + */ +#define XXX_LOCK_USAGE_STATES (1+3*4) + +/* + * NR_LOCKDEP_CACHING_CLASSES ... Number of classes + * cached in the instance of lockdep_map + * + * Currently main class (subclass == 0) and signle depth subclass + * are cached in lockdep_map. This optimization is mainly targeting + * on rq->lock. double_rq_lock() acquires this highly competitive with + * single depth. + */ +#define NR_LOCKDEP_CACHING_CLASSES 2 + +/* + * Lock-classes are keyed via unique addresses, by embedding the + * lockclass-key into the kernel (or module) .data section. (For + * static locks we use the lock address itself as the key.) + */ +struct lockdep_subclass_key { + char __one_byte; +} __attribute__ ((__packed__)); + +struct lock_class_key { + struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; +}; + +extern struct lock_class_key __lockdep_no_validate__; + +#define LOCKSTAT_POINTS 4 + +/* + * The lock-class itself: + */ +struct lock_class { + /* + * class-hash: + */ + struct hlist_node hash_entry; + + /* + * global list of all lock-classes: + */ + struct list_head lock_entry; + + struct lockdep_subclass_key *key; + unsigned int subclass; + unsigned int dep_gen_id; + + /* + * IRQ/softirq usage tracking bits: + */ + unsigned long usage_mask; + struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; + + /* + * These fields represent a directed graph of lock dependencies, + * to every node we attach a list of "forward" and a list of + * "backward" graph nodes. + */ + struct list_head locks_after, locks_before; + + /* + * Generation counter, when doing certain classes of graph walking, + * to ensure that we check one node only once: + */ + unsigned int version; + + /* + * Statistics counter: + */ + unsigned long ops; + + const char *name; + int name_version; + +#ifdef CONFIG_LOCK_STAT + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; +#endif +}; + +#ifdef CONFIG_LOCK_STAT +struct lock_time { + s64 min; + s64 max; + s64 total; + unsigned long nr; +}; + +enum bounce_type { + bounce_acquired_write, + bounce_acquired_read, + bounce_contended_write, + bounce_contended_read, + nr_bounce_types, + + bounce_acquired = bounce_acquired_write, + bounce_contended = bounce_contended_write, +}; + +struct lock_class_stats { + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; + struct lock_time read_waittime; + struct lock_time write_waittime; + struct lock_time read_holdtime; + struct lock_time write_holdtime; + unsigned long bounces[nr_bounce_types]; +}; + +struct lock_class_stats lock_stats(struct lock_class *class); +void clear_lock_stats(struct lock_class *class); +#endif + +/* + * Map the lock object (the lock instance) to the lock-class object. + * This is embedded into specific lock instances: + */ +struct lockdep_map { + struct lock_class_key *key; + struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; + const char *name; +#ifdef CONFIG_LOCK_STAT + int cpu; + unsigned long ip; +#endif +}; + static inline void lockdep_copy_map(struct lockdep_map *to, struct lockdep_map *from) { @@ -52,13 +182,8 @@ static inline void lockdep_copy_map(struct lockdep_map *to, struct lock_list { struct list_head entry; struct lock_class *class; - struct lock_class *links_to; - const struct lock_trace *trace; - u16 distance; - /* bitmap of different dependencies from head to this */ - u8 dep; - /* used by BFS to record whether "prev -> this" only has -(*R)-> */ - u8 only_xr; + struct stack_trace trace; + int distance; /* * The parent field is used to implement breadth-first search, and the @@ -67,17 +192,11 @@ struct lock_list { struct lock_list *parent; }; -/** - * struct lock_chain - lock dependency chain record - * - * @irq_context: the same as irq_context in held_lock below - * @depth: the number of held locks in this chain - * @base: the index in chain_hlocks for this chain - * @entry: the collided lock chains in lock_chain hash list - * @chain_key: the hash key of this lock_chain +/* + * We record lock dependency chains, so that we can cache them: */ struct lock_chain { - /* see BUILD_BUG_ON()s in add_chain_cache() */ + /* see BUILD_BUG_ON()s in lookup_chain_cache() */ unsigned int irq_context : 2, depth : 6, base : 24; @@ -87,8 +206,12 @@ struct lock_chain { }; #define MAX_LOCKDEP_KEYS_BITS 13 -#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) -#define INITIAL_CHAIN_KEY -1 +/* + * Subtract one because we offset hlock->class_idx by 1 in order + * to make 0 mean no class. This avoids overflowing the class_idx + * bitfield and hitting the BUG in hlock_class(). + */ +#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) struct held_lock { /* @@ -113,11 +236,6 @@ struct held_lock { u64 waittime_stamp; u64 holdtime_stamp; #endif - /* - * class_idx is zero-indexed; it points to the element in - * lock_classes this held lock instance belongs to. class_idx is in - * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. - */ unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; /* * The lock-stack is unified in that the lock chains of interrupt @@ -145,39 +263,14 @@ struct held_lock { /* * Initialization, self-test and debugging-output methods: */ -extern void lockdep_init(void); +extern void lockdep_info(void); extern void lockdep_reset(void); extern void lockdep_reset_lock(struct lockdep_map *lock); extern void lockdep_free_key_range(void *start, unsigned long size); extern asmlinkage void lockdep_sys_exit(void); -extern void lockdep_set_selftest_task(struct task_struct *task); -extern void lockdep_init_task(struct task_struct *task); - -/* - * Split the recursion counter in two to readily detect 'off' vs recursion. - */ -#define LOCKDEP_RECURSION_BITS 16 -#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) -#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) - -/* - * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due - * to header dependencies. - */ - -#define lockdep_off() \ -do { \ - current->lockdep_recursion += LOCKDEP_OFF; \ -} while (0) - -#define lockdep_on() \ -do { \ - current->lockdep_recursion -= LOCKDEP_OFF; \ -} while (0) - -extern void lockdep_register_key(struct lock_class_key *key); -extern void lockdep_unregister_key(struct lock_class_key *key); +extern void lockdep_off(void); +extern void lockdep_on(void); /* * These methods are used by specific locking variants (spinlocks, @@ -185,28 +278,15 @@ extern void lockdep_unregister_key(struct lock_class_key *key); * to lockdep: */ -extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, - struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); +extern void lockdep_init_map(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass); -static inline void -lockdep_init_map_waits(struct lockdep_map *lock, const char *name, - struct lock_class_key *key, int subclass, u8 inner, u8 outer) -{ - lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL); -} - -static inline void -lockdep_init_map_wait(struct lockdep_map *lock, const char *name, - struct lock_class_key *key, int subclass, u8 inner) -{ - lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); -} - -static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, - struct lock_class_key *key, int subclass) -{ - lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); -} +/* + * To initialize a lockdep_map statically use this macro. + * Note that _name must not be NULL. + */ +#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ + { .name = (_name), .key = (void *)(_key), } /* * Reinitialize a lock key - for cases where there is special locking or @@ -214,29 +294,18 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, * of dependencies wrong: they are either too broad (they need a class-split) * or they are too narrow (they suffer from a false class-split): */ -#define lockdep_set_class(lock, key) \ - lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) - -#define lockdep_set_class_and_name(lock, key, name) \ - lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) - -#define lockdep_set_class_and_subclass(lock, key, sub) \ - lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) - -#define lockdep_set_subclass(lock, sub) \ - lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) +#define lockdep_set_class(lock, key) \ + lockdep_init_map(&(lock)->dep_map, #key, key, 0) +#define lockdep_set_class_and_name(lock, key, name) \ + lockdep_init_map(&(lock)->dep_map, name, key, 0) +#define lockdep_set_class_and_subclass(lock, key, sub) \ + lockdep_init_map(&(lock)->dep_map, #key, key, sub) +#define lockdep_set_subclass(lock, sub) \ + lockdep_init_map(&(lock)->dep_map, #lock, \ + (lock)->dep_map.key, sub) #define lockdep_set_novalidate_class(lock) \ lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) - /* * Compare locking classes */ @@ -266,25 +335,12 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, struct lockdep_map *nest_lock, unsigned long ip); -extern void lock_release(struct lockdep_map *lock, unsigned long ip); +extern void lock_release(struct lockdep_map *lock, int nested, + unsigned long ip); -/* lock_is_held_type() returns */ -#define LOCK_STATE_UNKNOWN -1 -#define LOCK_STATE_NOT_HELD 0 -#define LOCK_STATE_HELD 1 +#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) -/* - * Same "read" as for lock_acquire(), except -1 means any. - */ -extern int lock_is_held_type(const struct lockdep_map *lock, int read); - -static inline int lock_is_held(const struct lockdep_map *lock) -{ - return lock_is_held_type(lock, -1); -} - -#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) -#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) +extern int lock_is_held(struct lockdep_map *lock); extern void lock_set_class(struct lockdep_map *lock, const char *name, struct lock_class_key *key, unsigned int subclass, @@ -296,7 +352,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock, lock_set_class(lock, lock->name, lock->key, subclass, ip); } -extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); +extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); +extern void lockdep_clear_current_reclaim_state(void); +extern void lockdep_trace_alloc(gfp_t mask); + +struct pin_cookie { unsigned int val; }; #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } @@ -304,31 +364,17 @@ extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); +# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, + #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) -#define lockdep_assert(cond) \ - do { WARN_ON(debug_locks && !(cond)); } while (0) +#define lockdep_assert_held(l) do { \ + WARN_ON(debug_locks && !lockdep_is_held(l)); \ + } while (0) -#define lockdep_assert_once(cond) \ - do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0) - -#define lockdep_assert_held(l) \ - lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) - -#define lockdep_assert_not_held(l) \ - lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD) - -#define lockdep_assert_held_write(l) \ - lockdep_assert(lockdep_is_held_type(l, 0)) - -#define lockdep_assert_held_read(l) \ - lockdep_assert(lockdep_is_held_type(l, 1)) - -#define lockdep_assert_held_once(l) \ - lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) - -#define lockdep_assert_none_held_once() \ - lockdep_assert_once(!current->lockdep_depth) +#define lockdep_assert_held_once(l) do { \ + WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ + } while (0) #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) @@ -338,10 +384,6 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #else /* !CONFIG_LOCKDEP */ -static inline void lockdep_init_task(struct task_struct *task) -{ -} - static inline void lockdep_off(void) { } @@ -350,22 +392,14 @@ static inline void lockdep_on(void) { } -static inline void lockdep_set_selftest_task(struct task_struct *task) -{ -} - # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) -# define lock_release(l, i) do { } while (0) -# define lock_downgrade(l, i) do { } while (0) +# define lock_release(l, n, i) do { } while (0) # define lock_set_class(l, n, k, s, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0) -# define lockdep_init() do { } while (0) -# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ - do { (void)(name); (void)(key); } while (0) -# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ - do { (void)(name); (void)(key); } while (0) -# define lockdep_init_map_wait(lock, name, key, sub, inner) \ - do { (void)(name); (void)(key); } while (0) +# define lockdep_set_current_reclaim_state(g) do { } while (0) +# define lockdep_clear_current_reclaim_state() do { } while (0) +# define lockdep_trace_alloc(g) do { } while (0) +# define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) \ do { (void)(name); (void)(key); } while (0) # define lockdep_set_class(lock, key) do { (void)(key); } while (0) @@ -383,65 +417,32 @@ static inline void lockdep_set_selftest_task(struct task_struct *task) * #ifdef the call himself. */ +# define INIT_LOCKDEP # define lockdep_reset() do { debug_locks = 1; } while (0) # define lockdep_free_key_range(start, size) do { } while (0) # define lockdep_sys_exit() do { } while (0) - -static inline void lockdep_register_key(struct lock_class_key *key) -{ -} - -static inline void lockdep_unregister_key(struct lock_class_key *key) -{ -} +/* + * The class key takes no space if lockdep is disabled: + */ +struct lock_class_key { }; #define lockdep_depth(tsk) (0) -/* - * Dummy forward declarations, allow users to write less ifdef-y code - * and depend on dead code elimination. - */ -extern int lock_is_held(const void *); -extern int lockdep_is_held(const void *); -#define lockdep_is_held_type(l, r) (1) - -#define lockdep_assert(c) do { } while (0) -#define lockdep_assert_once(c) do { } while (0) - #define lockdep_assert_held(l) do { (void)(l); } while (0) -#define lockdep_assert_not_held(l) do { (void)(l); } while (0) -#define lockdep_assert_held_write(l) do { (void)(l); } while (0) -#define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) -#define lockdep_assert_none_held_once() do { } while (0) #define lockdep_recursing(tsk) (0) +struct pin_cookie { }; + #define NIL_COOKIE (struct pin_cookie){ } -#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) +#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; }) #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) #endif /* !LOCKDEP */ -enum xhlock_context_t { - XHLOCK_HARD, - XHLOCK_SOFT, - XHLOCK_CTX_NR, -}; - -#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) -/* - * To initialize a lockdep_map statically use this macro. - * Note that _name must not be NULL. - */ -#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ - { .name = (_name), .key = (void *)(_key), } - -static inline void lockdep_invariant_state(bool force) {} -static inline void lockdep_free_task(struct task_struct *task) {} - #ifdef CONFIG_LOCK_STAT extern void lock_contended(struct lockdep_map *lock, unsigned long ip); @@ -498,7 +499,7 @@ do { \ #endif /* CONFIG_LOCKDEP */ -#ifdef CONFIG_PROVE_LOCKING +#ifdef CONFIG_TRACE_IRQFLAGS extern void print_irqtrace_events(struct task_struct *curr); #else static inline void print_irqtrace_events(struct task_struct *curr) @@ -506,20 +507,6 @@ static inline void print_irqtrace_events(struct task_struct *curr) } #endif -/* Variable used to make lockdep treat read_lock() as recursive in selftests */ -#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS -extern unsigned int force_read_lock_recursive; -#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ -#define force_read_lock_recursive 0 -#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ - -#ifdef CONFIG_LOCKDEP -extern bool read_lock_is_recursive(void); -#else /* CONFIG_LOCKDEP */ -/* If !LOCKDEP, the value is meaningless */ -#define read_lock_is_recursive() 0 -#endif - /* * For trivial one-depth nesting of a lock-class, the following * global define can be used. (Subsystems with multiple levels @@ -538,132 +525,46 @@ extern bool read_lock_is_recursive(void); #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) -#define spin_release(l, i) lock_release(l, i) +#define spin_release(l, n, i) lock_release(l, n, i) #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) -#define rwlock_acquire_read(l, s, t, i) \ -do { \ - if (read_lock_is_recursive()) \ - lock_acquire_shared_recursive(l, s, t, NULL, i); \ - else \ - lock_acquire_shared(l, s, t, NULL, i); \ -} while (0) - -#define rwlock_release(l, i) lock_release(l, i) +#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) +#define rwlock_release(l, n, i) lock_release(l, n, i) #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) -#define seqcount_release(l, i) lock_release(l, i) +#define seqcount_release(l, n, i) lock_release(l, n, i) #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) -#define mutex_release(l, i) lock_release(l, i) +#define mutex_release(l, n, i) lock_release(l, n, i) #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) -#define rwsem_release(l, i) lock_release(l, i) +#define rwsem_release(l, n, i) lock_release(l, n, i) #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) -#define lock_map_release(l) lock_release(l, _THIS_IP_) +#define lock_map_release(l) lock_release(l, 1, _THIS_IP_) #ifdef CONFIG_PROVE_LOCKING -# define might_lock(lock) \ +# define might_lock(lock) \ do { \ typecheck(struct lockdep_map *, &(lock)->dep_map); \ lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ - lock_release(&(lock)->dep_map, _THIS_IP_); \ + lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ } while (0) -# define might_lock_read(lock) \ +# define might_lock_read(lock) \ do { \ typecheck(struct lockdep_map *, &(lock)->dep_map); \ lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ - lock_release(&(lock)->dep_map, _THIS_IP_); \ + lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ } while (0) -# define might_lock_nested(lock, subclass) \ -do { \ - typecheck(struct lockdep_map *, &(lock)->dep_map); \ - lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ - _THIS_IP_); \ - lock_release(&(lock)->dep_map, _THIS_IP_); \ -} while (0) - -DECLARE_PER_CPU(int, hardirqs_enabled); -DECLARE_PER_CPU(int, hardirq_context); -DECLARE_PER_CPU(unsigned int, lockdep_recursion); - -#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) - -#define lockdep_assert_irqs_enabled() \ -do { \ - WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ -} while (0) - -#define lockdep_assert_irqs_disabled() \ -do { \ - WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ -} while (0) - -#define lockdep_assert_in_irq() \ -do { \ - WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ -} while (0) - -#define lockdep_assert_preemption_enabled() \ -do { \ - WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ - __lockdep_enabled && \ - (preempt_count() != 0 || \ - !this_cpu_read(hardirqs_enabled))); \ -} while (0) - -#define lockdep_assert_preemption_disabled() \ -do { \ - WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ - __lockdep_enabled && \ - (preempt_count() == 0 && \ - this_cpu_read(hardirqs_enabled))); \ -} while (0) - -/* - * Acceptable for protecting per-CPU resources accessed from BH. - * Much like in_softirq() - semantics are ambiguous, use carefully. - */ -#define lockdep_assert_in_softirq() \ -do { \ - WARN_ON_ONCE(__lockdep_enabled && \ - (!in_softirq() || in_irq() || in_nmi())); \ -} while (0) - #else # define might_lock(lock) do { } while (0) # define might_lock_read(lock) do { } while (0) -# define might_lock_nested(lock, subclass) do { } while (0) - -# define lockdep_assert_irqs_enabled() do { } while (0) -# define lockdep_assert_irqs_disabled() do { } while (0) -# define lockdep_assert_in_irq() do { } while (0) - -# define lockdep_assert_preemption_enabled() do { } while (0) -# define lockdep_assert_preemption_disabled() do { } while (0) -# define lockdep_assert_in_softirq() do { } while (0) -#endif - -#ifdef CONFIG_PROVE_RAW_LOCK_NESTING - -# define lockdep_assert_RT_in_threaded_ctx() do { \ - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ - lockdep_hardirq_context() && \ - !(current->hardirq_threaded || current->irq_config), \ - "Not in threaded context on PREEMPT_RT as expected\n"); \ -} while (0) - -#else - -# define lockdep_assert_RT_in_threaded_ctx() do { } while (0) - #endif #ifdef CONFIG_LOCKDEP diff --git a/include/linux/lockref.h b/include/linux/lockref.h index 99f17cc8e1..d37b3dec49 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_LOCKREF_H #define __LINUX_LOCKREF_H @@ -29,7 +28,7 @@ struct lockref { #endif struct { spinlock_t lock; - int count; + atomic_t count; }; }; }; @@ -37,7 +36,6 @@ struct lockref { extern void lockref_get(struct lockref *); extern int lockref_put_return(struct lockref *); extern int lockref_get_not_zero(struct lockref *); -extern int lockref_put_not_zero(struct lockref *); extern int lockref_get_or_lock(struct lockref *); extern int lockref_put_or_lock(struct lockref *); @@ -45,9 +43,29 @@ extern void lockref_mark_dead(struct lockref *); extern int lockref_get_not_dead(struct lockref *); /* Must be called under spinlock for reliable results */ -static inline bool __lockref_is_dead(const struct lockref *l) +static inline int __lockref_is_dead(const struct lockref *lockref) { - return ((int)l->count < 0); + return atomic_read(&lockref->count) < 0; +} + +static inline int __lockref_read(const struct lockref *lockref) +{ + return atomic_read(&lockref->count); +} + +static inline void __lockref_set(struct lockref *lockref, int count) +{ + atomic_set(&lockref->count, count); +} + +static inline void __lockref_inc(struct lockref *lockref) +{ + atomic_inc(&lockref->count); +} + +static inline void __lockref_dec(struct lockref *lockref) +{ + atomic_dec(&lockref->count); } #endif /* __LINUX_LOCKREF_H */ diff --git a/include/linux/log2.h b/include/linux/log2.h index df0b155c21..f38fae23bd 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Integer base 2 logarithm calculation * * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_LOG2_H @@ -33,23 +37,19 @@ int __ilog2_u64(u64 n) } #endif -/** - * is_power_of_2() - check if a value is a power of two - * @n: the value to check - * - * Determine whether some value is a power of two, where zero is +/* + * Determine whether some value is a power of two, where zero is * *not* considered a power of two. - * Return: true if @n is a power of 2, otherwise false. */ + static inline __attribute__((const)) bool is_power_of_2(unsigned long n) { return (n != 0 && ((n & (n - 1)) == 0)); } -/** - * __roundup_pow_of_two() - round up to nearest power of two - * @n: value to round up +/* + * round up to nearest power of two */ static inline __attribute__((const)) unsigned long __roundup_pow_of_two(unsigned long n) @@ -57,9 +57,8 @@ unsigned long __roundup_pow_of_two(unsigned long n) return 1UL << fls_long(n - 1); } -/** - * __rounddown_pow_of_two() - round down to nearest power of two - * @n: value to round down +/* + * round down to nearest power of two */ static inline __attribute__((const)) unsigned long __rounddown_pow_of_two(unsigned long n) @@ -68,13 +67,16 @@ unsigned long __rounddown_pow_of_two(unsigned long n) } /** - * const_ilog2 - log base 2 of 32-bit or a 64-bit constant unsigned value - * @n: parameter + * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value + * @n - parameter * - * Use this where sparse expects a true constant expression, e.g. for array - * indices. + * constant-capable log of base 2 calculation + * - this can be used to initialise global variables from constant data, hence + * the massive ternary operator construction + * + * selects the appropriately-sized optimised version depending on sizeof(n) */ -#define const_ilog2(n) \ +#define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ (n) < 2 ? 0 : \ @@ -140,32 +142,15 @@ unsigned long __rounddown_pow_of_two(unsigned long n) (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ - 1) : \ - -1) - -/** - * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value - * @n: parameter - * - * constant-capable log of base 2 calculation - * - this can be used to initialise global variables from constant data, hence - * the massive ternary operator construction - * - * selects the appropriately-sized optimised version depending on sizeof(n) - */ -#define ilog2(n) \ -( \ - __builtin_constant_p(n) ? \ - ((n) < 2 ? 0 : \ - 63 - __builtin_clzll(n)) : \ - (sizeof(n) <= 4) ? \ - __ilog2_u32(n) : \ - __ilog2_u64(n) \ + 1 ) : \ + (sizeof(n) <= 4) ? \ + __ilog2_u32(n) : \ + __ilog2_u64(n) \ ) /** * roundup_pow_of_two - round the given value up to nearest power of two - * @n: parameter + * @n - parameter * * round the given value up to the nearest power of two * - the result is undefined when n == 0 @@ -174,7 +159,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ - ((n) == 1) ? 1 : \ + (n == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ @@ -182,7 +167,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) /** * rounddown_pow_of_two - round the given value down to nearest power of two - * @n: parameter + * @n - parameter * * round the given value down to the nearest power of two * - the result is undefined when n == 0 @@ -195,12 +180,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n) __rounddown_pow_of_two(n) \ ) -static inline __attribute_const__ -int __order_base_2(unsigned long n) -{ - return n > 1 ? ilog2(n - 1) + 1 : 0; -} - /** * order_base_2 - calculate the (rounded up) base 2 order of the argument * @n: parameter @@ -214,45 +193,7 @@ int __order_base_2(unsigned long n) * ob2(5) = 3 * ... and so on. */ -#define order_base_2(n) \ -( \ - __builtin_constant_p(n) ? ( \ - ((n) == 0 || (n) == 1) ? 0 : \ - ilog2((n) - 1) + 1) : \ - __order_base_2(n) \ -) -static inline __attribute__((const)) -int __bits_per(unsigned long n) -{ - if (n < 2) - return 1; - if (is_power_of_2(n)) - return order_base_2(n) + 1; - return order_base_2(n); -} +#define order_base_2(n) ilog2(roundup_pow_of_two(n)) -/** - * bits_per - calculate the number of bits required for the argument - * @n: parameter - * - * This is constant-capable and can be used for compile time - * initializations, e.g bitfields. - * - * The first few values calculated by this routine: - * bf(0) = 1 - * bf(1) = 1 - * bf(2) = 2 - * bf(3) = 2 - * bf(4) = 3 - * ... and so on. - */ -#define bits_per(n) \ -( \ - __builtin_constant_p(n) ? ( \ - ((n) == 0 || (n) == 1) \ - ? 1 : ilog2(n) + 1 \ - ) : \ - __bits_per(n) \ -) #endif /* _LINUX_LOG2_H */ diff --git a/include/linux/lp.h b/include/linux/lp.h index be8a07eb20..0dd276af9e 100644 --- a/include/linux/lp.h +++ b/include/linux/lp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * usr/include/linux/lp.h c.1991-1992 James Wiegand * many modifications copyright (C) 1992 Michael K. Johnson diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h index 07add7882a..04fc6e6c7f 100644 --- a/include/linux/lru_cache.h +++ b/include/linux/lru_cache.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* lru_cache.c @@ -8,6 +7,19 @@ Copyright (C) 2003-2008, Philipp Reisner . Copyright (C) 2003-2008, Lars Ellenberg . + drbd is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + drbd is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with drbd; see the file COPYING. If not, write to + the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ @@ -32,7 +44,7 @@ This header file (and its .c file; kernel-doc of functions see there) Because of this later property, it is called "lru_cache". As it actually Tracks Objects in an Active SeT, we could also call it toast (incidentally that is what may happen to the data on the - backend storage upon next resync, if we don't get it right). + backend storage uppon next resync, if we don't get it right). What for? @@ -152,7 +164,7 @@ struct lc_element { * for paranoia, and for "lc_element_to_index" */ unsigned lc_index; /* if we want to track a larger set of objects, - * it needs to become an architecture independent u64 */ + * it needs to become arch independend u64 */ unsigned lc_number; /* special label when on free list */ #define LC_FREE (~0U) @@ -263,7 +275,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char * * Allows (expects) the set to be "dirty". Note that the reference counts and * order on the active and lru lists may still change. Used to serialize - * changing transactions. Returns true if we acquired the lock. + * changing transactions. Returns true if we aquired the lock. */ static inline int lc_try_lock_for_transaction(struct lru_cache *lc) { @@ -275,7 +287,7 @@ static inline int lc_try_lock_for_transaction(struct lru_cache *lc) * @lc: the lru cache to operate on * * Note that the reference counts and order on the active and lru lists may - * still change. Only works on a "clean" set. Returns true if we acquired the + * still change. Only works on a "clean" set. Returns true if we aquired the * lock, which means there are no pending changes, and any further attempt to * change the set will not succeed until the next lc_unlock(). */ diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 17d02eda95..e58e577117 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -1,11 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Common LSM logging functions * Heavily borrowed from selinux/avc.h * * Author : Etienne BASSET * - * All credits to : Stephen Smalley, + * All credits to : Stephen Smalley, * All BUGS to : Etienne BASSET */ #ifndef _LSM_COMMON_LOGGING_ @@ -22,11 +21,10 @@ #include #include #include -#include struct lsm_network_audit { int netif; - const struct sock *sk; + struct sock *sk; u16 family; __be16 dport; __be16 sport; @@ -47,16 +45,6 @@ struct lsm_ioctlop_audit { u16 cmd; }; -struct lsm_ibpkey_audit { - u64 subnet_prefix; - u16 pkey; -}; - -struct lsm_ibendport_audit { - const char *dev_name; - u8 port; -}; - /* Auxiliary data to use in generating the audit record. */ struct common_audit_data { char type; @@ -72,10 +60,6 @@ struct common_audit_data { #define LSM_AUDIT_DATA_DENTRY 10 #define LSM_AUDIT_DATA_IOCTL_OP 11 #define LSM_AUDIT_DATA_FILE 12 -#define LSM_AUDIT_DATA_IBPKEY 13 -#define LSM_AUDIT_DATA_IBENDPORT 14 -#define LSM_AUDIT_DATA_LOCKDOWN 15 -#define LSM_AUDIT_DATA_NOTIFICATION 16 union { struct path path; struct dentry *dentry; @@ -93,9 +77,6 @@ struct common_audit_data { char *kmod_name; struct lsm_ioctlop_audit *op; struct file *file; - struct lsm_ibpkey_audit *ibpkey; - struct lsm_ibendport_audit *ibendport; - int reason; } u; /* this union contains LSM specific data */ union { diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 5c4c5c0602..9127895302 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -8,7 +8,6 @@ * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) * Copyright (C) 2015 Intel Corporation. * Copyright (C) 2015 Casey Schaufler - * Copyright (C) 2016 Mellanox Techonologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -30,52 +29,38 @@ #include /** - * union security_list_options - Linux Security Module hook function list - * * Security hooks for program execution operations. * - * @bprm_creds_for_exec: - * If the setup in prepare_exec_creds did not setup @bprm->cred->security - * properly for executing @bprm->file, update the LSM's portion of - * @bprm->cred->security to be what commit_creds needs to install for the - * new program. This hook may also optionally check permissions - * (e.g. for transitions between security domains). - * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to - * request libc enable secure mode. - * @bprm contains the linux_binprm structure. - * Return 0 if the hook is successful and permission is granted. - * @bprm_creds_from_file: - * If @file is setpcap, suid, sgid or otherwise marked to change - * privilege upon exec, update @bprm->cred to reflect that change. - * This is called after finding the binary that will be executed. - * without an interpreter. This ensures that the credentials will not - * be derived from a script that the binary will need to reopen, which - * when reopend may end up being a completely different file. This - * hook may also optionally check permissions (e.g. for transitions - * between security domains). - * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to - * request libc enable secure mode. - * The hook must add to @bprm->per_clear any personality flags that - * should be cleared from current->personality. + * @bprm_set_creds: + * Save security information in the bprm->security field, typically based + * on information about the bprm->file, for later use by the apply_creds + * hook. This hook may also optionally check permissions (e.g. for + * transitions between security domains). + * This hook may be called multiple times during a single execve, e.g. for + * interpreters. The hook can tell whether it has already been called by + * checking to see if @bprm->security is non-NULL. If so, then the hook + * may decide either to retain the security information saved earlier or + * to replace it. * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_check_security: * This hook mediates the point when a search for a binary handler will - * begin. It allows a check against the @bprm->cred->security value - * which was set in the preceding creds_for_exec call. The argv list and - * envp list are reliably available in @bprm. This hook may be called - * multiple times during a single execve. + * begin. It allows a check the @bprm->security value which is set in the + * preceding set_creds call. The primary difference from set_creds is + * that the argv list and envp list are reliably available in @bprm. This + * hook may be called multiple times during a single execve; and in each + * pass set_creds is called first. * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_committing_creds: * Prepare to install the new security attributes of a process being * transformed by an execve operation, based on the old credentials * pointed to by @current->cred and the information set in @bprm->cred by - * the bprm_creds_for_exec hook. @bprm points to the linux_binprm - * structure. This hook is a good place to perform state changes on the - * process such as closing open file descriptors to which access will no - * longer be granted when the attributes are changed. This is called - * immediately before commit_creds(). + * the bprm_set_creds hook. @bprm points to the linux_binprm structure. + * This hook is a good place to perform state changes on the process such + * as closing open file descriptors to which access will no longer be + * granted when the attributes are changed. This is called immediately + * before commit_creds(). * @bprm_committed_creds: * Tidy up after the installation of the new security attributes of a * process being transformed by an execve operation. The new credentials @@ -83,22 +68,12 @@ * linux_binprm structure. This hook is a good place to perform state * changes on the process such as clearing out non-inheritable signal * state. This is called immediately after commit_creds(). - * - * Security hooks for mount using fs_context. - * [See also Documentation/filesystems/mount_api.rst] - * - * @fs_context_dup: - * Allocate and attach a security structure to sc->security. This pointer - * is initialised to NULL by the caller. - * @fc indicates the new filesystem context. - * @src_fc indicates the original filesystem context. - * @fs_context_parse_param: - * Userspace provided a parameter to configure a superblock. The LSM may - * reject it with an error and may use it for itself, in which case it - * should return 0; otherwise it should return -ENOPARAM to pass it on to - * the filesystem. - * @fc indicates the filesystem context. - * @param The parameter + * @bprm_secureexec: + * Return a boolean value (0 or 1) indicating whether a "secure exec" + * is required. The flag is passed in the auxiliary table + * on the initial stack to the ELF interpreter to indicate whether libc + * should enable secure mode. + * @bprm contains the linux_binprm structure. * * Security hooks for filesystem operations. * @@ -108,16 +83,9 @@ * allocated. * @sb contains the super_block structure to be modified. * Return 0 if operation was successful. - * @sb_delete: - * Release objects tied to a superblock (e.g. inodes). - * @sb contains the super_block structure being released. * @sb_free_security: * Deallocate and clear the sb->s_security field. * @sb contains the super_block structure to be modified. - * @sb_free_mnt_opts: - * Free memory associated with @mnt_ops. - * @sb_eat_lsm_opts: - * Eat (scan @orig options) and save them in @mnt_opts. * @sb_statfs: * Check permission before obtaining filesystem statistics for the @mnt * mountpoint. @@ -142,25 +110,16 @@ * options cleanly (a filesystem may modify the data e.g. with strsep()). * This also allows the original mount data to be stripped of security- * specific options to avoid having to make filesystems aware of them. + * @type the type of filesystem being mounted. * @orig the original mount data copied from userspace. * @copy copied data which will be passed to the security module. * Returns 0 if the copy was successful. - * @sb_mnt_opts_compat: - * Determine if the new mount options in @mnt_opts are allowed given - * the existing mounted filesystem at @sb. - * @sb superblock being compared - * @mnt_opts new mount options - * Return 0 if options are compatible. * @sb_remount: * Extracts security system specific mount options and verifies no changes * are being made to those options. * @sb superblock being remounted * @data contains the filesystem-specific data. * Return 0 if permission is granted. - * @sb_kern_mount: - * Mount this @sb if allowed by permissions. - * @sb_show_options: - * Show (print on @m) mount options for this @sb. * @sb_umount: * Check permission before the @mnt file system is unmounted. * @mnt contains the mounted file system. @@ -180,16 +139,10 @@ * Copy all security options from a given superblock to another * @oldsb old superblock which contain information to clone * @newsb new superblock which needs filled in - * @sb_add_mnt_opt: - * Add one mount @option to @mnt_opts. * @sb_parse_opts_str: * Parse a string of security data filling in the opts structure * @options string containing all mount options known by the LSM * @opts binary data structure usable by the LSM - * @move_mount: - * Check permission before a mount is moved. - * @from_path indicates the mount that is going to be moved. - * @to_path indicates the mountpoint that will be mounted upon. * @dentry_init_security: * Compute a context for a dentry as the inode is not yet available * since NFSv4 has no label backed by an EA anyway. @@ -240,17 +193,8 @@ * @value will be set to the allocated attribute value. * @len will be set to the length of the value. * Returns 0 if @name and @value have been successfully set, - * -EOPNOTSUPP if no security attribute is needed, or - * -ENOMEM on memory allocation failure. - * @inode_init_security_anon: - * Set up the incore security field for the new anonymous inode - * and return whether the inode creation is permitted by the security - * module or not. - * @inode contains the inode structure - * @name name of the anonymous inode class - * @context_inode optional related inode - * Returns 0 on success, -EACCES if the security module denies the - * creation of this inode, or another -errno upon other errors. + * -EOPNOTSUPP if no security attribute is needed, or + * -ENOMEM on memory allocation failure. * @inode_create: * Check permission to create a regular file. * @dir contains inode structure of the parent of the new file. @@ -359,11 +303,10 @@ * @new_dentry contains the dentry structure of the new link. * Return 0 if permission is granted. * @path_chmod: - * Check for permission to change a mode of the file @path. The new - * mode is specified in @mode. - * @path contains the path structure of the file to change the mode. - * @mode contains the new DAC's permission, which is a bitmask of - * constants from + * Check for permission to change DAC's permission of a file or directory. + * @dentry contains the dentry structure. + * @mnt contains the vfsmnt structure. + * @mode contains DAC's mode. * Return 0 if permission is granted. * @path_chown: * Check for permission to change owner/group of a file or directory. @@ -375,9 +318,6 @@ * Check for permission to change root directory. * @path contains the path structure. * Return 0 if permission is granted. - * @path_notify: - * Check permissions before setting a watch on events as defined by @mask, - * on an object at @path, whose type is defined by @obj_type. * @inode_readlink: * Check the permission to read the symbolic link. * @dentry contains the dentry structure for the file link. @@ -412,7 +352,8 @@ * Return 0 if permission is granted. * @inode_getattr: * Check permission before obtaining file attributes. - * @path contains the path structure for the file. + * @mnt is the vfsmount where the dentry was looked up + * @dentry contains the dentry structure for the file. * Return 0 if permission is granted. * @inode_setxattr: * Check permission before setting the extended attributes @@ -462,7 +403,6 @@ * @inode_killpriv: * The setuid bit is being removed. Remove similar security labels. * Called with the dentry->d_inode->i_mutex held. - * @mnt_userns: user namespace of the mount * @dentry is the dentry being changed. * Return 0 on success. If error is returned, then the operation * causing setuid bit removal is failed. @@ -488,21 +428,6 @@ * security module does not know about attribute or a negative error code * to abort the copy up. Note that the caller is responsible for reading * and writing the xattrs as this hook is merely a filter. - * @d_instantiate: - * Fill in @inode security information for a @dentry if allowed. - * @getprocattr: - * Read attribute @name for process @p and store it into @value if allowed. - * @setprocattr: - * Write (set) attribute @name to @value, size @size if allowed. - * - * Security hooks for kernfs node operations - * - * @kernfs_init_security: - * Initialize the security context of a newly created kernfs node based - * on its own and its parent's attributes. - * - * @kn_dir the parent kernfs node - * @kn the new child kernfs node * * Security hooks for file operations * @@ -561,7 +486,7 @@ * Return 0 if permission is granted. * @file_lock: * Check permission before performing file locking operations. - * Note the hook mediates both flock and fcntl style locks. + * Note: this hook mediates both flock and fcntl style locks. * @file contains the file structure. * @cmd contains the posix-translated lock operation to perform * (e.g. F_RDLCK, F_WRLCK). @@ -586,7 +511,8 @@ * process @tsk. Note that this hook is sometimes called from interrupt. * Note that the fown_struct, @fown, is never outside the context of a * struct file, so the file structure (and associated security information) - * can always be obtained: container_of(fown, struct file, f_owner) + * can always be obtained: + * container_of(fown, struct file, f_owner) * @tsk contains the structure of task receiving signal. * @fown contains the file owner information. * @sig is the signal that will be sent. When 0, kernel sends SIGIO. @@ -596,20 +522,20 @@ * to receive an open file descriptor via socket IPC. * @file contains the file structure being received. * Return 0 if permission is granted. - * @file_open: + * @file_open * Save open-time permission checking state for later use upon * file_permission, and recheck access if anything has changed * since inode_permission. * * Security hooks for task operations. * - * @task_alloc: - * @task task being allocated. + * @task_create: + * Check permission before creating a child process. See the clone(2) + * manual page for definitions of the @clone_flags. * @clone_flags contains the flags indicating what should be shared. - * Handle allocation of task-related resources. - * Returns a zero on success, negative values on failure. + * Return 0 if permission is granted. * @task_free: - * @task task about to be freed. + * @task task being freed * Handle release of task-related resources. (Note that this can be called * from interrupt context.) * @cred_alloc_blank: @@ -629,10 +555,6 @@ * @new points to the new credentials. * @old points to the original credentials. * Transfer data from original creds to new creds - * @cred_getsecid: - * Retrieve the security identifier of the cred structure @c - * @c contains the credentials, secid will be placed into @secid. - * In case of failure, @secid will be set to zero. * @kernel_act_as: * Set the credentials for a kernel service to act as (subjective context). * @new points to the credentials to be modified. @@ -651,26 +573,11 @@ * userspace to load a kernel module with the given name. * @kmod_name name of the module requested by the kernel * Return 0 if successful. - * @kernel_load_data: - * Load data provided by userspace. - * @id kernel load data identifier - * @contents if a subsequent @kernel_post_load_data will be called. - * Return 0 if permission is granted. - * @kernel_post_load_data: - * Load data provided by a non-file source (usually userspace buffer). - * @buf pointer to buffer containing the data contents. - * @size length of the data contents. - * @id kernel load data identifier - * @description a text description of what was loaded, @id-specific - * Return 0 if permission is granted. - * This must be paired with a prior @kernel_load_data call that had - * @contents set to true. * @kernel_read_file: * Read a file specified by userspace. * @file contains the file structure pointing to the file being read * by the kernel. * @id kernel read file identifier - * @contents if a subsequent @kernel_post_read_file will be called. * Return 0 if permission is granted. * @kernel_post_read_file: * Read a file specified by userspace. @@ -679,8 +586,6 @@ * @buf pointer to buffer containing the file contents. * @size length of the file contents. * @id kernel read file identifier - * This must be paired with a prior @kernel_read_file call that had - * @contents set to true. * Return 0 if permission is granted. * @task_fix_setuid: * Update the module's state after setting one or more of the user @@ -691,15 +596,6 @@ * @old is the set of credentials that are being replaces * @flags contains one of the LSM_SETID_* values. * Return 0 on success. - * @task_fix_setgid: - * Update the module's state after setting one or more of the group - * identity attributes of the current process. The @flags parameter - * indicates which of the set*gid system calls invoked this hook. - * @new is the set of credentials that will be installed. Modifications - * should be made to this rather than to @current->cred. - * @old is the set of credentials that are being replaced. - * @flags contains one of the LSM_SETID_* values. - * Return 0 on success. * @task_setpgid: * Check permission before setting the process group identifier of the * process @p to @pgid. @@ -716,15 +612,9 @@ * @p. * @p contains the task_struct for the process. * Return 0 if permission is granted. - * @task_getsecid_subj: - * Retrieve the subjective security identifier of the task_struct in @p - * and return it in @secid. Special care must be taken to ensure that @p - * is the either the "current" task, or the caller has exclusive access - * to @p. - * In case of failure, @secid will be set to zero. - * @task_getsecid_obj: - * Retrieve the objective security identifier of the task_struct in @p - * and return it in @secid. + * @task_getsecid: + * Retrieve the security identifier of the process @p. + * @p contains the task_struct for the process and place is into @secid. * In case of failure, @secid will be set to zero. * * @task_setnice: @@ -732,48 +622,41 @@ * @p contains the task_struct of process. * @nice contains the new nice value. * Return 0 if permission is granted. - * @task_setioprio: + * @task_setioprio * Check permission before setting the ioprio value of @p to @ioprio. * @p contains the task_struct of process. * @ioprio contains the new ioprio value * Return 0 if permission is granted. - * @task_getioprio: + * @task_getioprio * Check permission before getting the ioprio value of @p. * @p contains the task_struct of process. * Return 0 if permission is granted. - * @task_prlimit: - * Check permission before getting and/or setting the resource limits of - * another task. - * @cred points to the cred structure for the current task. - * @tcred points to the cred structure for the target task. - * @flags contains the LSM_PRLIMIT_* flag bits indicating whether the - * resource limits are being read, modified, or both. - * Return 0 if permission is granted. * @task_setrlimit: - * Check permission before setting the resource limits of process @p - * for @resource to @new_rlim. The old resource limit values can - * be examined by dereferencing (p->signal->rlim + resource). - * @p points to the task_struct for the target task's group leader. + * Check permission before setting the resource limits of the current + * process for @resource to @new_rlim. The old resource limit values can + * be examined by dereferencing (current->signal->rlim + resource). * @resource contains the resource whose limit is being set. * @new_rlim contains the new limits for @resource. * Return 0 if permission is granted. * @task_setscheduler: * Check permission before setting scheduling policy and/or parameters of - * process @p. + * process @p based on @policy and @lp. * @p contains the task_struct for process. + * @policy contains the scheduling policy. + * @lp contains the scheduling parameters. * Return 0 if permission is granted. * @task_getscheduler: * Check permission before obtaining scheduling information for process * @p. * @p contains the task_struct for process. * Return 0 if permission is granted. - * @task_movememory: + * @task_movememory * Check permission before moving memory owned by process @p. * @p contains the task_struct for process. * Return 0 if permission is granted. * @task_kill: * Check permission before sending signal @sig to @p. @info can be NULL, - * the constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or + * the constant 1, or a pointer to a siginfo structure. If @info is 1 or * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming * from the kernel and should typically be permitted. * SIGIO signals are handled separately by the send_sigiotask hook in @@ -781,8 +664,12 @@ * @p contains the task_struct for process. * @info contains the signal information. * @sig contains the signal value. - * @cred contains the cred of the process where the signal originated, or - * NULL if the current task is the originator. + * @secid contains the sid of the process where the signal originated + * Return 0 if permission is granted. + * @task_wait: + * Check permission before allowing a process to reap a child process @p + * and collect its status information. + * @p contains the task_struct for process. * Return 0 if permission is granted. * @task_prctl: * Check permission before performing a process control operation on the @@ -853,20 +740,15 @@ * structure. Note that the security field was not added directly to the * socket structure, but rather, the socket security information is stored * in the associated inode. Typically, the inode alloc_security hook will - * allocate and attach security information to - * SOCK_INODE(sock)->i_security. This hook may be used to update the - * SOCK_INODE(sock)->i_security field with additional information that - * wasn't available when the inode was allocated. + * allocate and and attach security information to + * sock->inode->i_security. This hook may be used to update the + * sock->inode->i_security field with additional information that wasn't + * available when the inode was allocated. * @sock contains the newly created socket structure. * @family contains the requested protocol family. * @type contains the requested communications type. * @protocol contains the requested protocol. * @kern set to 1 if a kernel socket. - * @socket_socketpair: - * Check permissions before creating a fresh pair of sockets. - * @socka contains the first socket structure. - * @sockb contains the second socket structure. - * Return 0 if permission is granted and the connection was established. * @socket_bind: * Check permission before socket protocol layer bind operation is * performed and the socket @sock is bound to the address specified in the @@ -961,13 +843,13 @@ * @socket_getpeersec_dgram: * This hook allows the security module to provide peer socket security * state for udp sockets on a per-packet basis to userspace via - * getsockopt SO_GETPEERSEC. The application must first have indicated - * the IP_PASSSEC option via getsockopt. It can then retrieve the + * getsockopt SO_GETPEERSEC. The application must first have indicated + * the IP_PASSSEC option via getsockopt. It can then retrieve the * security state returned by this hook for a packet via the SCM_SECURITY * ancillary message type. - * @sock contains the peer socket. May be NULL. - * @skb is the sk_buff for the packet being queried. May be NULL. - * @secid pointer to store the secid of the packet. + * @skb is the skbuff for the packet being queried + * @secdata is a pointer to a buffer in which to copy the security data + * @seclen is the maximum length for @secdata * Return 0 on success, error on failure. * @sk_alloc_security: * Allocate and attach a security structure to the sk->sk_security field, @@ -991,9 +873,9 @@ * @secmark_relabel_packet: * check if the process should be allowed to relabel packets to * the given secid - * @secmark_refcount_inc: + * @security_secmark_refcount_inc * tells the LSM to increment the number of secmark labeling rules loaded - * @secmark_refcount_dec: + * @security_secmark_refcount_dec * tells the LSM to decrement the number of secmark labeling rules loaded * @req_classify_flow: * Sets the flow's sid to the openreq sid. @@ -1021,53 +903,6 @@ * associated with the TUN device's security structure. * @security pointer to the TUN devices's security structure. * - * Security hooks for SCTP - * - * @sctp_assoc_request: - * Passes the @ep and @chunk->skb of the association INIT packet to - * the security module. - * @ep pointer to sctp endpoint structure. - * @skb pointer to skbuff of association packet. - * Return 0 on success, error on failure. - * @sctp_bind_connect: - * Validiate permissions required for each address associated with sock - * @sk. Depending on @optname, the addresses will be treated as either - * for a connect or bind service. The @addrlen is calculated on each - * ipv4 and ipv6 address using sizeof(struct sockaddr_in) or - * sizeof(struct sockaddr_in6). - * @sk pointer to sock structure. - * @optname name of the option to validate. - * @address list containing one or more ipv4/ipv6 addresses. - * @addrlen total length of address(s). - * Return 0 on success, error on failure. - * @sctp_sk_clone: - * Called whenever a new socket is created by accept(2) (i.e. a TCP - * style socket) or when a socket is 'peeled off' e.g userspace - * calls sctp_peeloff(3). - * @ep pointer to current sctp endpoint structure. - * @sk pointer to current sock structure. - * @sk pointer to new sock structure. - * - * Security hooks for Infiniband - * - * @ib_pkey_access: - * Check permission to access a pkey when modifing a QP. - * @subnet_prefix the subnet prefix of the port being used. - * @pkey the pkey to be accessed. - * @sec pointer to a security structure. - * @ib_endport_manage_subnet: - * Check permissions to send and receive SMPs on a end port. - * @dev_name the IB device name (i.e. mlx4_0). - * @port_num the port number. - * @sec pointer to a security structure. - * @ib_alloc_security: - * Allocate a security structure for Infiniband objects. - * @sec pointer to a security structure pointer. - * Returns 0 on success, non-zero on failure - * @ib_free_security: - * Deallocate an Infiniband security structure. - * @sec contains the security structure to be freed. - * * Security hooks for XFRM operations. * * @xfrm_policy_alloc_security: @@ -1130,7 +965,7 @@ * @xfrm_state_pol_flow_match: * @x contains the state to match. * @xp contains the policy to check for a match. - * @flic contains the flowi_common struct to check for a match. + * @fl contains the flow to check for a match. * Return 1 if there is a match. * @xfrm_decode_session: * @skb points to skb to decode. @@ -1184,7 +1019,6 @@ * In case of failure, @secid will be set to zero. * * Security hooks for individual messages held in System V IPC message queues - * * @msg_msg_alloc_security: * Allocate and attach a security structure to the msg->security field. * The security field is initialized to NULL when the structure is first @@ -1199,41 +1033,41 @@ * * @msg_queue_alloc_security: * Allocate and attach a security structure to the - * @perm->security field. The security field is initialized to + * msq->q_perm.security field. The security field is initialized to * NULL when the structure is first created. - * @perm contains the IPC permissions of the message queue. + * @msq contains the message queue structure to be modified. * Return 0 if operation was successful and permission is granted. * @msg_queue_free_security: - * Deallocate security field @perm->security for the message queue. - * @perm contains the IPC permissions of the message queue. + * Deallocate security structure for this message queue. + * @msq contains the message queue structure to be modified. * @msg_queue_associate: * Check permission when a message queue is requested through the - * msgget system call. This hook is only called when returning the + * msgget system call. This hook is only called when returning the * message queue identifier for an existing message queue, not when a * new message queue is created. - * @perm contains the IPC permissions of the message queue. + * @msq contains the message queue to act upon. * @msqflg contains the operation control flags. * Return 0 if permission is granted. * @msg_queue_msgctl: * Check permission when a message control operation specified by @cmd - * is to be performed on the message queue with permissions @perm. - * The @perm may be NULL, e.g. for IPC_INFO or MSG_INFO. - * @perm contains the IPC permissions of the msg queue. May be NULL. + * is to be performed on the message queue @msq. + * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO. + * @msq contains the message queue to act upon. May be NULL. * @cmd contains the operation to be performed. * Return 0 if permission is granted. * @msg_queue_msgsnd: * Check permission before a message, @msg, is enqueued on the message - * queue with permissions @perm. - * @perm contains the IPC permissions of the message queue. + * queue, @msq. + * @msq contains the message queue to send message to. * @msg contains the message to be enqueued. * @msqflg contains operational flags. * Return 0 if permission is granted. * @msg_queue_msgrcv: * Check permission before a message, @msg, is removed from the message - * queue. The @target task structure contains a pointer to the + * queue, @msq. The @target task structure contains a pointer to the * process that will be receiving the message (not equal to the current * process when inline receives are being performed). - * @perm contains the IPC permissions of the message queue. + * @msq contains the message queue to retrieve message from. * @msg contains the message destination. * @target contains the task structure for recipient process. * @type contains the type of message requested. @@ -1243,34 +1077,34 @@ * Security hooks for System V Shared Memory Segments * * @shm_alloc_security: - * Allocate and attach a security structure to the @perm->security - * field. The security field is initialized to NULL when the structure is + * Allocate and attach a security structure to the shp->shm_perm.security + * field. The security field is initialized to NULL when the structure is * first created. - * @perm contains the IPC permissions of the shared memory structure. + * @shp contains the shared memory structure to be modified. * Return 0 if operation was successful and permission is granted. * @shm_free_security: - * Deallocate the security structure @perm->security for the memory segment. - * @perm contains the IPC permissions of the shared memory structure. + * Deallocate the security struct for this memory segment. + * @shp contains the shared memory structure to be modified. * @shm_associate: * Check permission when a shared memory region is requested through the - * shmget system call. This hook is only called when returning the shared + * shmget system call. This hook is only called when returning the shared * memory region identifier for an existing region, not when a new shared * memory region is created. - * @perm contains the IPC permissions of the shared memory structure. + * @shp contains the shared memory structure to be modified. * @shmflg contains the operation control flags. * Return 0 if permission is granted. * @shm_shmctl: * Check permission when a shared memory control operation specified by - * @cmd is to be performed on the shared memory region with permissions @perm. - * The @perm may be NULL, e.g. for IPC_INFO or SHM_INFO. - * @perm contains the IPC permissions of the shared memory structure. + * @cmd is to be performed on the shared memory region @shp. + * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO. + * @shp contains shared memory structure to be modified. * @cmd contains the operation to be performed. * Return 0 if permission is granted. * @shm_shmat: * Check permissions prior to allowing the shmat system call to attach the - * shared memory segment with permissions @perm to the data segment of the - * calling process. The attaching address is specified by @shmaddr. - * @perm contains the IPC permissions of the shared memory structure. + * shared memory segment @shp to the data segment of the calling process. + * The attaching address is specified by @shmaddr. + * @shp contains the shared memory structure to be modified. * @shmaddr contains the address to attach memory region to. * @shmflg contains the operational flags. * Return 0 if permission is granted. @@ -1278,53 +1112,53 @@ * Security hooks for System V Semaphores * * @sem_alloc_security: - * Allocate and attach a security structure to the @perm->security - * field. The security field is initialized to NULL when the structure is + * Allocate and attach a security structure to the sma->sem_perm.security + * field. The security field is initialized to NULL when the structure is * first created. - * @perm contains the IPC permissions of the semaphore. + * @sma contains the semaphore structure * Return 0 if operation was successful and permission is granted. * @sem_free_security: - * Deallocate security structure @perm->security for the semaphore. - * @perm contains the IPC permissions of the semaphore. + * deallocate security struct for this semaphore + * @sma contains the semaphore structure. * @sem_associate: * Check permission when a semaphore is requested through the semget - * system call. This hook is only called when returning the semaphore + * system call. This hook is only called when returning the semaphore * identifier for an existing semaphore, not when a new one must be * created. - * @perm contains the IPC permissions of the semaphore. + * @sma contains the semaphore structure. * @semflg contains the operation control flags. * Return 0 if permission is granted. * @sem_semctl: * Check permission when a semaphore operation specified by @cmd is to be - * performed on the semaphore. The @perm may be NULL, e.g. for + * performed on the semaphore @sma. The @sma may be NULL, e.g. for * IPC_INFO or SEM_INFO. - * @perm contains the IPC permissions of the semaphore. May be NULL. + * @sma contains the semaphore structure. May be NULL. * @cmd contains the operation to be performed. * Return 0 if permission is granted. - * @sem_semop: + * @sem_semop * Check permissions before performing operations on members of the - * semaphore set. If the @alter flag is nonzero, the semaphore set + * semaphore set @sma. If the @alter flag is nonzero, the semaphore set * may be modified. - * @perm contains the IPC permissions of the semaphore. + * @sma contains the semaphore structure. * @sops contains the operations to perform. * @nsops contains the number of operations to perform. * @alter contains the flag indicating whether changes are to be made. * Return 0 if permission is granted. * - * @binder_set_context_mgr: + * @binder_set_context_mgr * Check whether @mgr is allowed to be the binder context manager. * @mgr contains the task_struct for the task being registered. * Return 0 if permission is granted. - * @binder_transaction: + * @binder_transaction * Check whether @from is allowed to invoke a binder transaction call * to @to. * @from contains the task_struct for the sending task. * @to contains the task_struct for the receiving task. - * @binder_transfer_binder: + * @binder_transfer_binder * Check whether @from is allowed to transfer a binder reference to @to. * @from contains the task_struct for the sending task. * @to contains the task_struct for the receiving task. - * @binder_transfer_file: + * @binder_transfer_file * Check whether @from is allowed to transfer @file to @to. * @from contains the task_struct for the sending task. * @file contains the struct file being transferred. @@ -1372,22 +1206,19 @@ * @cred contains the credentials to use. * @ns contains the user namespace we want the capability in * @cap contains the capability . - * @opts contains options for the capable check + * @audit: Whether to write an audit message or not * Return 0 if the capability is granted for @tsk. - * @quotactl: - * Check whether the quotactl syscall is allowed for this @sb. - * @quota_on: - * Check whether QUOTAON is allowed for this @dentry. * @syslog: * Check permission before accessing the kernel message ring or changing * logging to the console. * See the syslog(2) manual page for an explanation of the @type values. - * @type contains the SYSLOG_ACTION_* constant from + * @type contains the type of action. + * @from_file indicates the context of action (if it came from /proc). * Return 0 if permission is granted. * @settime: * Check permission to change the system time. - * struct timespec64 is defined in and timezone - * is defined in + * struct timespec64 is defined in include/linux/time64.h and timezone + * is defined in include/linux/time.h * @ts contains new time * @tz contains new timezone * Return 0 if permission is granted. @@ -1429,7 +1260,7 @@ * @audit_rule_init: * Allocate and initialize an LSM audit rule structure. * @field contains the required Audit action. - * Fields flags are defined in + * Fields flags are defined in include/linux/audit.h * @op contains the operator the rule uses. * @rulestr contains the context where the rule will be applied to. * @lsmrule contains a pointer to receive the result. @@ -1437,9 +1268,9 @@ * -EINVAL in case of an invalid rule. * * @audit_rule_known: - * Specifies whether given @krule contains any fields related to + * Specifies whether given @rule contains any fields related to * current LSM. - * @krule contains the audit rule of interest. + * @rule contains the audit rule of interest. * Return 1 in case of relation found, 0 otherwise. * * @audit_rule_match: @@ -1448,13 +1279,14 @@ * @secid contains the security id in question. * @field contains the field which relates to current LSM. * @op contains the operator that will be used for matching. - * @lrule points to the audit rule that will be checked against. + * @rule points to the audit rule that will be checked against. + * @actx points to the audit context associated with the check. * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure. * * @audit_rule_free: * Deallocate the LSM audit rule structure previously allocated by * audit_rule_init. - * @lsmrule contains the allocated rule + * @rule contains the allocated rule * * @inode_invalidate_secctx: * Notify the security module that it must revalidate the security context @@ -1467,7 +1299,9 @@ * this hook to initialize the security context in its incore inode to the * value provided by the server for the file when the server returned the * file's attributes to the client. + * * Must be called with inode->i_mutex locked. + * * @inode we wish to set the security context of. * @ctx contains the string which we wish to set in the inode. * @ctxlen contains the length of @ctx. @@ -1480,7 +1314,9 @@ * this hook to change the security context in its incore inode and on the * backing filesystem to a value provided by the client on a SETATTR * operation. + * * Must be called with inode->i_mutex locked. + * * @dentry contains the inode we wish to set the security context of. * @ctx contains the string which we wish to set in the inode. * @ctxlen contains the length of @ctx. @@ -1488,86 +1324,548 @@ * @inode_getsecctx: * On success, returns 0 and fills out @ctx and @ctxlen with the security * context for the given @inode. + * * @inode we wish to get the security context of. * @ctx is a pointer in which to place the allocated security context. * @ctxlen points to the place to put the length of @ctx. - * - * Security hooks for the general notification queue: - * - * @post_notification: - * Check to see if a watch notification can be posted to a particular - * queue. - * @w_cred: The credentials of the whoever set the watch. - * @cred: The event-triggerer's credentials - * @n: The notification being posted - * - * @watch_key: - * Check to see if a process is allowed to watch for event notifications - * from a key or keyring. - * @key: The key to watch. - * - * Security hooks for using the eBPF maps and programs functionalities through - * eBPF syscalls. - * - * @bpf: - * Do a initial check for all bpf syscalls after the attribute is copied - * into the kernel. The actual security module can implement their own - * rules to check the specific cmd they need. - * - * @bpf_map: - * Do a check when the kernel generate and return a file descriptor for - * eBPF maps. - * - * @map: bpf map that we want to access - * @mask: the access flags - * - * @bpf_prog: - * Do a check when the kernel generate and return a file descriptor for - * eBPF programs. - * - * @prog: bpf prog that userspace want to use. - * - * @bpf_map_alloc_security: - * Initialize the security field inside bpf map. - * - * @bpf_map_free_security: - * Clean up the security information stored inside bpf map. - * - * @bpf_prog_alloc_security: - * Initialize the security field inside bpf program. - * - * @bpf_prog_free_security: - * Clean up the security information stored inside bpf prog. - * - * @locked_down: - * Determine whether a kernel feature that potentially enables arbitrary - * code execution in kernel space should be permitted. - * - * @what: kernel feature being accessed - * - * Security hooks for perf events - * - * @perf_event_open: - * Check whether the @type of perf_event_open syscall is allowed. - * @perf_event_alloc: - * Allocate and save perf_event security info. - * @perf_event_free: - * Release (free) perf_event security info. - * @perf_event_read: - * Read perf_event security info if allowed. - * @perf_event_write: - * Write perf_event security info if allowed. + * This is the main security structure. */ + union security_list_options { - #define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__); - #include "lsm_hook_defs.h" - #undef LSM_HOOK + int (*binder_set_context_mgr)(struct task_struct *mgr); + int (*binder_transaction)(struct task_struct *from, + struct task_struct *to); + int (*binder_transfer_binder)(struct task_struct *from, + struct task_struct *to); + int (*binder_transfer_file)(struct task_struct *from, + struct task_struct *to, + struct file *file); + + int (*ptrace_access_check)(struct task_struct *child, + unsigned int mode); + int (*ptrace_traceme)(struct task_struct *parent); + int (*capget)(struct task_struct *target, kernel_cap_t *effective, + kernel_cap_t *inheritable, kernel_cap_t *permitted); + int (*capset)(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); + int (*capable)(const struct cred *cred, struct user_namespace *ns, + int cap, int audit); + int (*quotactl)(int cmds, int type, int id, struct super_block *sb); + int (*quota_on)(struct dentry *dentry); + int (*syslog)(int type); + int (*settime)(const struct timespec64 *ts, const struct timezone *tz); + int (*vm_enough_memory)(struct mm_struct *mm, long pages); + + int (*bprm_set_creds)(struct linux_binprm *bprm); + int (*bprm_check_security)(struct linux_binprm *bprm); + int (*bprm_secureexec)(struct linux_binprm *bprm); + void (*bprm_committing_creds)(struct linux_binprm *bprm); + void (*bprm_committed_creds)(struct linux_binprm *bprm); + + int (*sb_alloc_security)(struct super_block *sb); + void (*sb_free_security)(struct super_block *sb); + int (*sb_copy_data)(char *orig, char *copy); + int (*sb_remount)(struct super_block *sb, void *data); + int (*sb_kern_mount)(struct super_block *sb, int flags, void *data); + int (*sb_show_options)(struct seq_file *m, struct super_block *sb); + int (*sb_statfs)(struct dentry *dentry); + int (*sb_mount)(const char *dev_name, const struct path *path, + const char *type, unsigned long flags, void *data); + int (*sb_umount)(struct vfsmount *mnt, int flags); + int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path); + int (*sb_set_mnt_opts)(struct super_block *sb, + struct security_mnt_opts *opts, + unsigned long kern_flags, + unsigned long *set_kern_flags); + int (*sb_clone_mnt_opts)(const struct super_block *oldsb, + struct super_block *newsb); + int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts); + int (*dentry_init_security)(struct dentry *dentry, int mode, + const struct qstr *name, void **ctx, + u32 *ctxlen); + int (*dentry_create_files_as)(struct dentry *dentry, int mode, + struct qstr *name, + const struct cred *old, + struct cred *new); + + +#ifdef CONFIG_SECURITY_PATH + int (*path_unlink)(const struct path *dir, struct dentry *dentry); + int (*path_mkdir)(const struct path *dir, struct dentry *dentry, + umode_t mode); + int (*path_rmdir)(const struct path *dir, struct dentry *dentry); + int (*path_mknod)(const struct path *dir, struct dentry *dentry, + umode_t mode, unsigned int dev); + int (*path_truncate)(const struct path *path); + int (*path_symlink)(const struct path *dir, struct dentry *dentry, + const char *old_name); + int (*path_link)(struct dentry *old_dentry, const struct path *new_dir, + struct dentry *new_dentry); + int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry, + const struct path *new_dir, + struct dentry *new_dentry); + int (*path_chmod)(const struct path *path, umode_t mode); + int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid); + int (*path_chroot)(const struct path *path); +#endif + + int (*inode_alloc_security)(struct inode *inode); + void (*inode_free_security)(struct inode *inode); + int (*inode_init_security)(struct inode *inode, struct inode *dir, + const struct qstr *qstr, + const char **name, void **value, + size_t *len); + int (*inode_create)(struct inode *dir, struct dentry *dentry, + umode_t mode); + int (*inode_link)(struct dentry *old_dentry, struct inode *dir, + struct dentry *new_dentry); + int (*inode_unlink)(struct inode *dir, struct dentry *dentry); + int (*inode_symlink)(struct inode *dir, struct dentry *dentry, + const char *old_name); + int (*inode_mkdir)(struct inode *dir, struct dentry *dentry, + umode_t mode); + int (*inode_rmdir)(struct inode *dir, struct dentry *dentry); + int (*inode_mknod)(struct inode *dir, struct dentry *dentry, + umode_t mode, dev_t dev); + int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry); + int (*inode_readlink)(struct dentry *dentry); + int (*inode_follow_link)(struct dentry *dentry, struct inode *inode, + bool rcu); + int (*inode_permission)(struct inode *inode, int mask); + int (*inode_setattr)(struct dentry *dentry, struct iattr *attr); + int (*inode_getattr)(const struct path *path); + int (*inode_setxattr)(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); + void (*inode_post_setxattr)(struct dentry *dentry, const char *name, + const void *value, size_t size, + int flags); + int (*inode_getxattr)(struct dentry *dentry, const char *name); + int (*inode_listxattr)(struct dentry *dentry); + int (*inode_removexattr)(struct dentry *dentry, const char *name); + int (*inode_need_killpriv)(struct dentry *dentry); + int (*inode_killpriv)(struct dentry *dentry); + int (*inode_getsecurity)(struct inode *inode, const char *name, + void **buffer, bool alloc); + int (*inode_setsecurity)(struct inode *inode, const char *name, + const void *value, size_t size, + int flags); + int (*inode_listsecurity)(struct inode *inode, char *buffer, + size_t buffer_size); + void (*inode_getsecid)(struct inode *inode, u32 *secid); + int (*inode_copy_up)(struct dentry *src, struct cred **new); + int (*inode_copy_up_xattr)(const char *name); + + int (*file_permission)(struct file *file, int mask); + int (*file_alloc_security)(struct file *file); + void (*file_free_security)(struct file *file); + int (*file_ioctl)(struct file *file, unsigned int cmd, + unsigned long arg); + int (*mmap_addr)(unsigned long addr); + int (*mmap_file)(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags); + int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot, + unsigned long prot); + int (*file_lock)(struct file *file, unsigned int cmd); + int (*file_fcntl)(struct file *file, unsigned int cmd, + unsigned long arg); + void (*file_set_fowner)(struct file *file); + int (*file_send_sigiotask)(struct task_struct *tsk, + struct fown_struct *fown, int sig); + int (*file_receive)(struct file *file); + int (*file_open)(struct file *file, const struct cred *cred); + + int (*task_create)(unsigned long clone_flags); + void (*task_free)(struct task_struct *task); + int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp); + void (*cred_free)(struct cred *cred); + int (*cred_prepare)(struct cred *new, const struct cred *old, + gfp_t gfp); + void (*cred_transfer)(struct cred *new, const struct cred *old); + int (*kernel_act_as)(struct cred *new, u32 secid); + int (*kernel_create_files_as)(struct cred *new, struct inode *inode); + int (*kernel_module_request)(char *kmod_name); + int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id); + int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size, + enum kernel_read_file_id id); + int (*task_fix_setuid)(struct cred *new, const struct cred *old, + int flags); + int (*task_setpgid)(struct task_struct *p, pid_t pgid); + int (*task_getpgid)(struct task_struct *p); + int (*task_getsid)(struct task_struct *p); + void (*task_getsecid)(struct task_struct *p, u32 *secid); + int (*task_setnice)(struct task_struct *p, int nice); + int (*task_setioprio)(struct task_struct *p, int ioprio); + int (*task_getioprio)(struct task_struct *p); + int (*task_setrlimit)(struct task_struct *p, unsigned int resource, + struct rlimit *new_rlim); + int (*task_setscheduler)(struct task_struct *p); + int (*task_getscheduler)(struct task_struct *p); + int (*task_movememory)(struct task_struct *p); + int (*task_kill)(struct task_struct *p, struct siginfo *info, + int sig, u32 secid); + int (*task_wait)(struct task_struct *p); + int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); + void (*task_to_inode)(struct task_struct *p, struct inode *inode); + + int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag); + void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid); + + int (*msg_msg_alloc_security)(struct msg_msg *msg); + void (*msg_msg_free_security)(struct msg_msg *msg); + + int (*msg_queue_alloc_security)(struct msg_queue *msq); + void (*msg_queue_free_security)(struct msg_queue *msq); + int (*msg_queue_associate)(struct msg_queue *msq, int msqflg); + int (*msg_queue_msgctl)(struct msg_queue *msq, int cmd); + int (*msg_queue_msgsnd)(struct msg_queue *msq, struct msg_msg *msg, + int msqflg); + int (*msg_queue_msgrcv)(struct msg_queue *msq, struct msg_msg *msg, + struct task_struct *target, long type, + int mode); + + int (*shm_alloc_security)(struct shmid_kernel *shp); + void (*shm_free_security)(struct shmid_kernel *shp); + int (*shm_associate)(struct shmid_kernel *shp, int shmflg); + int (*shm_shmctl)(struct shmid_kernel *shp, int cmd); + int (*shm_shmat)(struct shmid_kernel *shp, char __user *shmaddr, + int shmflg); + + int (*sem_alloc_security)(struct sem_array *sma); + void (*sem_free_security)(struct sem_array *sma); + int (*sem_associate)(struct sem_array *sma, int semflg); + int (*sem_semctl)(struct sem_array *sma, int cmd); + int (*sem_semop)(struct sem_array *sma, struct sembuf *sops, + unsigned nsops, int alter); + + int (*netlink_send)(struct sock *sk, struct sk_buff *skb); + + void (*d_instantiate)(struct dentry *dentry, struct inode *inode); + + int (*getprocattr)(struct task_struct *p, char *name, char **value); + int (*setprocattr)(struct task_struct *p, char *name, void *value, + size_t size); + int (*ismaclabel)(const char *name); + int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); + int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid); + void (*release_secctx)(char *secdata, u32 seclen); + + void (*inode_invalidate_secctx)(struct inode *inode); + int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); + int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); + int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); + +#ifdef CONFIG_SECURITY_NETWORK + int (*unix_stream_connect)(struct sock *sock, struct sock *other, + struct sock *newsk); + int (*unix_may_send)(struct socket *sock, struct socket *other); + + int (*socket_create)(int family, int type, int protocol, int kern); + int (*socket_post_create)(struct socket *sock, int family, int type, + int protocol, int kern); + int (*socket_bind)(struct socket *sock, struct sockaddr *address, + int addrlen); + int (*socket_connect)(struct socket *sock, struct sockaddr *address, + int addrlen); + int (*socket_listen)(struct socket *sock, int backlog); + int (*socket_accept)(struct socket *sock, struct socket *newsock); + int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg, + int size); + int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg, + int size, int flags); + int (*socket_getsockname)(struct socket *sock); + int (*socket_getpeername)(struct socket *sock); + int (*socket_getsockopt)(struct socket *sock, int level, int optname); + int (*socket_setsockopt)(struct socket *sock, int level, int optname); + int (*socket_shutdown)(struct socket *sock, int how); + int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb); + int (*socket_getpeersec_stream)(struct socket *sock, + char __user *optval, + int __user *optlen, unsigned len); + int (*socket_getpeersec_dgram)(struct socket *sock, + struct sk_buff *skb, u32 *secid); + int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority); + void (*sk_free_security)(struct sock *sk); + void (*sk_clone_security)(const struct sock *sk, struct sock *newsk); + void (*sk_getsecid)(struct sock *sk, u32 *secid); + void (*sock_graft)(struct sock *sk, struct socket *parent); + int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb, + struct request_sock *req); + void (*inet_csk_clone)(struct sock *newsk, + const struct request_sock *req); + void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb); + int (*secmark_relabel_packet)(u32 secid); + void (*secmark_refcount_inc)(void); + void (*secmark_refcount_dec)(void); + void (*req_classify_flow)(const struct request_sock *req, + struct flowi *fl); + int (*tun_dev_alloc_security)(void **security); + void (*tun_dev_free_security)(void *security); + int (*tun_dev_create)(void); + int (*tun_dev_attach_queue)(void *security); + int (*tun_dev_attach)(struct sock *sk, void *security); + int (*tun_dev_open)(void *security); +#endif /* CONFIG_SECURITY_NETWORK */ + +#ifdef CONFIG_SECURITY_NETWORK_XFRM + int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx, + gfp_t gfp); + int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx, + struct xfrm_sec_ctx **new_ctx); + void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx); + int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx); + int (*xfrm_state_alloc)(struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx); + int (*xfrm_state_alloc_acquire)(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, + u32 secid); + void (*xfrm_state_free_security)(struct xfrm_state *x); + int (*xfrm_state_delete_security)(struct xfrm_state *x); + int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid, + u8 dir); + int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, + struct xfrm_policy *xp, + const struct flowi *fl); + int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); +#endif /* CONFIG_SECURITY_NETWORK_XFRM */ + + /* key management security hooks */ +#ifdef CONFIG_KEYS + int (*key_alloc)(struct key *key, const struct cred *cred, + unsigned long flags); + void (*key_free)(struct key *key); + int (*key_permission)(key_ref_t key_ref, const struct cred *cred, + unsigned perm); + int (*key_getsecurity)(struct key *key, char **_buffer); +#endif /* CONFIG_KEYS */ + +#ifdef CONFIG_AUDIT + int (*audit_rule_init)(u32 field, u32 op, char *rulestr, + void **lsmrule); + int (*audit_rule_known)(struct audit_krule *krule); + int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule, + struct audit_context *actx); + void (*audit_rule_free)(void *lsmrule); +#endif /* CONFIG_AUDIT */ }; struct security_hook_heads { - #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME; - #include "lsm_hook_defs.h" - #undef LSM_HOOK + struct list_head binder_set_context_mgr; + struct list_head binder_transaction; + struct list_head binder_transfer_binder; + struct list_head binder_transfer_file; + struct list_head ptrace_access_check; + struct list_head ptrace_traceme; + struct list_head capget; + struct list_head capset; + struct list_head capable; + struct list_head quotactl; + struct list_head quota_on; + struct list_head syslog; + struct list_head settime; + struct list_head vm_enough_memory; + struct list_head bprm_set_creds; + struct list_head bprm_check_security; + struct list_head bprm_secureexec; + struct list_head bprm_committing_creds; + struct list_head bprm_committed_creds; + struct list_head sb_alloc_security; + struct list_head sb_free_security; + struct list_head sb_copy_data; + struct list_head sb_remount; + struct list_head sb_kern_mount; + struct list_head sb_show_options; + struct list_head sb_statfs; + struct list_head sb_mount; + struct list_head sb_umount; + struct list_head sb_pivotroot; + struct list_head sb_set_mnt_opts; + struct list_head sb_clone_mnt_opts; + struct list_head sb_parse_opts_str; + struct list_head dentry_init_security; + struct list_head dentry_create_files_as; +#ifdef CONFIG_SECURITY_PATH + struct list_head path_unlink; + struct list_head path_mkdir; + struct list_head path_rmdir; + struct list_head path_mknod; + struct list_head path_truncate; + struct list_head path_symlink; + struct list_head path_link; + struct list_head path_rename; + struct list_head path_chmod; + struct list_head path_chown; + struct list_head path_chroot; +#endif + struct list_head inode_alloc_security; + struct list_head inode_free_security; + struct list_head inode_init_security; + struct list_head inode_create; + struct list_head inode_link; + struct list_head inode_unlink; + struct list_head inode_symlink; + struct list_head inode_mkdir; + struct list_head inode_rmdir; + struct list_head inode_mknod; + struct list_head inode_rename; + struct list_head inode_readlink; + struct list_head inode_follow_link; + struct list_head inode_permission; + struct list_head inode_setattr; + struct list_head inode_getattr; + struct list_head inode_setxattr; + struct list_head inode_post_setxattr; + struct list_head inode_getxattr; + struct list_head inode_listxattr; + struct list_head inode_removexattr; + struct list_head inode_need_killpriv; + struct list_head inode_killpriv; + struct list_head inode_getsecurity; + struct list_head inode_setsecurity; + struct list_head inode_listsecurity; + struct list_head inode_getsecid; + struct list_head inode_copy_up; + struct list_head inode_copy_up_xattr; + struct list_head file_permission; + struct list_head file_alloc_security; + struct list_head file_free_security; + struct list_head file_ioctl; + struct list_head mmap_addr; + struct list_head mmap_file; + struct list_head file_mprotect; + struct list_head file_lock; + struct list_head file_fcntl; + struct list_head file_set_fowner; + struct list_head file_send_sigiotask; + struct list_head file_receive; + struct list_head file_open; + struct list_head task_create; + struct list_head task_free; + struct list_head cred_alloc_blank; + struct list_head cred_free; + struct list_head cred_prepare; + struct list_head cred_transfer; + struct list_head kernel_act_as; + struct list_head kernel_create_files_as; + struct list_head kernel_read_file; + struct list_head kernel_post_read_file; + struct list_head kernel_module_request; + struct list_head task_fix_setuid; + struct list_head task_setpgid; + struct list_head task_getpgid; + struct list_head task_getsid; + struct list_head task_getsecid; + struct list_head task_setnice; + struct list_head task_setioprio; + struct list_head task_getioprio; + struct list_head task_setrlimit; + struct list_head task_setscheduler; + struct list_head task_getscheduler; + struct list_head task_movememory; + struct list_head task_kill; + struct list_head task_wait; + struct list_head task_prctl; + struct list_head task_to_inode; + struct list_head ipc_permission; + struct list_head ipc_getsecid; + struct list_head msg_msg_alloc_security; + struct list_head msg_msg_free_security; + struct list_head msg_queue_alloc_security; + struct list_head msg_queue_free_security; + struct list_head msg_queue_associate; + struct list_head msg_queue_msgctl; + struct list_head msg_queue_msgsnd; + struct list_head msg_queue_msgrcv; + struct list_head shm_alloc_security; + struct list_head shm_free_security; + struct list_head shm_associate; + struct list_head shm_shmctl; + struct list_head shm_shmat; + struct list_head sem_alloc_security; + struct list_head sem_free_security; + struct list_head sem_associate; + struct list_head sem_semctl; + struct list_head sem_semop; + struct list_head netlink_send; + struct list_head d_instantiate; + struct list_head getprocattr; + struct list_head setprocattr; + struct list_head ismaclabel; + struct list_head secid_to_secctx; + struct list_head secctx_to_secid; + struct list_head release_secctx; + struct list_head inode_invalidate_secctx; + struct list_head inode_notifysecctx; + struct list_head inode_setsecctx; + struct list_head inode_getsecctx; +#ifdef CONFIG_SECURITY_NETWORK + struct list_head unix_stream_connect; + struct list_head unix_may_send; + struct list_head socket_create; + struct list_head socket_post_create; + struct list_head socket_bind; + struct list_head socket_connect; + struct list_head socket_listen; + struct list_head socket_accept; + struct list_head socket_sendmsg; + struct list_head socket_recvmsg; + struct list_head socket_getsockname; + struct list_head socket_getpeername; + struct list_head socket_getsockopt; + struct list_head socket_setsockopt; + struct list_head socket_shutdown; + struct list_head socket_sock_rcv_skb; + struct list_head socket_getpeersec_stream; + struct list_head socket_getpeersec_dgram; + struct list_head sk_alloc_security; + struct list_head sk_free_security; + struct list_head sk_clone_security; + struct list_head sk_getsecid; + struct list_head sock_graft; + struct list_head inet_conn_request; + struct list_head inet_csk_clone; + struct list_head inet_conn_established; + struct list_head secmark_relabel_packet; + struct list_head secmark_refcount_inc; + struct list_head secmark_refcount_dec; + struct list_head req_classify_flow; + struct list_head tun_dev_alloc_security; + struct list_head tun_dev_free_security; + struct list_head tun_dev_create; + struct list_head tun_dev_attach_queue; + struct list_head tun_dev_attach; + struct list_head tun_dev_open; +#endif /* CONFIG_SECURITY_NETWORK */ +#ifdef CONFIG_SECURITY_NETWORK_XFRM + struct list_head xfrm_policy_alloc_security; + struct list_head xfrm_policy_clone_security; + struct list_head xfrm_policy_free_security; + struct list_head xfrm_policy_delete_security; + struct list_head xfrm_state_alloc; + struct list_head xfrm_state_alloc_acquire; + struct list_head xfrm_state_free_security; + struct list_head xfrm_state_delete_security; + struct list_head xfrm_policy_lookup; + struct list_head xfrm_state_pol_flow_match; + struct list_head xfrm_decode_session; +#endif /* CONFIG_SECURITY_NETWORK_XFRM */ +#ifdef CONFIG_KEYS + struct list_head key_alloc; + struct list_head key_free; + struct list_head key_permission; + struct list_head key_getsecurity; +#endif /* CONFIG_KEYS */ +#ifdef CONFIG_AUDIT + struct list_head audit_rule_init; + struct list_head audit_rule_known; + struct list_head audit_rule_match; + struct list_head audit_rule_free; +#endif /* CONFIG_AUDIT */ } __randomize_layout; /* @@ -1575,31 +1873,11 @@ struct security_hook_heads { * For use with generic list macros for common operations. */ struct security_hook_list { - struct hlist_node list; - struct hlist_head *head; + struct list_head list; + struct list_head *head; union security_list_options hook; - char *lsm; } __randomize_layout; -/* - * Security blob size or offset data. - */ -struct lsm_blob_sizes { - int lbs_cred; - int lbs_file; - int lbs_inode; - int lbs_superblock; - int lbs_ipc; - int lbs_msg_msg; - int lbs_task; -}; - -/* - * LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void - * LSM hooks (in include/linux/lsm_hook_defs.h). - */ -#define LSM_RET_VOID ((void) 0) - /* * Initializing a security_hook_list structure takes * up a lot of space in a source file. This macro takes @@ -1610,40 +1888,15 @@ struct lsm_blob_sizes { { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } } extern struct security_hook_heads security_hook_heads; -extern char *lsm_names; -extern void security_add_hooks(struct security_hook_list *hooks, int count, - char *lsm); +static inline void security_add_hooks(struct security_hook_list *hooks, + int count) +{ + int i; -#define LSM_FLAG_LEGACY_MAJOR BIT(0) -#define LSM_FLAG_EXCLUSIVE BIT(1) - -enum lsm_order { - LSM_ORDER_FIRST = -1, /* This is only for capabilities. */ - LSM_ORDER_MUTABLE = 0, -}; - -struct lsm_info { - const char *name; /* Required. */ - enum lsm_order order; /* Optional: default is LSM_ORDER_MUTABLE */ - unsigned long flags; /* Optional: flags describing LSM */ - int *enabled; /* Optional: controlled by CONFIG_LSM */ - int (*init)(void); /* Required. */ - struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */ -}; - -extern struct lsm_info __start_lsm_info[], __end_lsm_info[]; -extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[]; - -#define DEFINE_LSM(lsm) \ - static struct lsm_info __lsm_##lsm \ - __used __section(".lsm_info.init") \ - __aligned(sizeof(unsigned long)) - -#define DEFINE_EARLY_LSM(lsm) \ - static struct lsm_info __early_lsm_##lsm \ - __used __section(".early_lsm_info.init") \ - __aligned(sizeof(unsigned long)) + for (i = 0; i < count; i++) + list_add_tail_rcu(&hooks[i].list, hooks[i].head); +} #ifdef CONFIG_SECURITY_SELINUX_DISABLE /* @@ -1664,17 +1917,21 @@ static inline void security_delete_hooks(struct security_hook_list *hooks, int i; for (i = 0; i < count; i++) - hlist_del_rcu(&hooks[i].list); + list_del_rcu(&hooks[i].list); } #endif /* CONFIG_SECURITY_SELINUX_DISABLE */ -/* Currently required to handle SELinux runtime hook disable. */ -#ifdef CONFIG_SECURITY_WRITABLE_HOOKS -#define __lsm_ro_after_init +extern int __init security_module_enable(const char *module); +extern void __init capability_add_hooks(void); +#ifdef CONFIG_SECURITY_YAMA +extern void __init yama_add_hooks(void); #else -#define __lsm_ro_after_init __ro_after_init -#endif /* CONFIG_SECURITY_WRITABLE_HOOKS */ - -extern int lsm_inode_alloc(struct inode *inode); +static inline void __init yama_add_hooks(void) { } +#endif +#ifdef CONFIG_SECURITY_LOADPIN +void __init loadpin_add_hooks(void); +#else +static inline void loadpin_add_hooks(void) { }; +#endif #endif /* ! __LINUX_LSM_HOOKS_H */ diff --git a/include/linux/lz4.h b/include/linux/lz4.h index b16e15b958..6b784c59f3 100644 --- a/include/linux/lz4.h +++ b/include/linux/lz4.h @@ -1,648 +1,87 @@ -/* LZ4 Kernel Interface +#ifndef __LZ4_H__ +#define __LZ4_H__ +/* + * LZ4 Kernel Interface * * Copyright (C) 2013, LG Electronics, Kyungsik Lee - * Copyright (C) 2016, Sven Schmidt <4sschmid@informatik.uni-hamburg.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * This file is based on the original header file - * for LZ4 - Fast LZ compression algorithm. - * - * LZ4 - Fast LZ compression algorithm - * Copyright (C) 2011-2016, Yann Collet. - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * You can contact the author at : - * - LZ4 homepage : http://www.lz4.org - * - LZ4 source repository : https://github.com/lz4/lz4 */ - -#ifndef __LZ4_H__ -#define __LZ4_H__ - -#include -#include /* memset, memcpy */ - -/*-************************************************************************ - * CONSTANTS - **************************************************************************/ -/* - * LZ4_MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes - * (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache - */ -#define LZ4_MEMORY_USAGE 14 - -#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ -#define LZ4_COMPRESSBOUND(isize) (\ - (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \ - ? 0 \ - : (isize) + ((isize)/255) + 16) - -#define LZ4_ACCELERATION_DEFAULT 1 -#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) -#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) -#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) - -#define LZ4HC_MIN_CLEVEL 3 -#define LZ4HC_DEFAULT_CLEVEL 9 -#define LZ4HC_MAX_CLEVEL 16 - -#define LZ4HC_DICTIONARY_LOGSIZE 16 -#define LZ4HC_MAXD (1<= LZ4_compressBound(inputSize). - * It also runs faster, so it's a recommended setting. - * If the function cannot compress 'source' into a more limited 'dest' budget, - * compression stops *immediately*, and the function result is zero. - * As a consequence, 'dest' content is not valid. - * - * Return: Number of bytes written into buffer 'dest' - * (necessarily <= maxOutputSize) or 0 if compression fails +/* + * lz4_compress() + * src : source address of the original data + * src_len : size of the original data + * dst : output buffer address of the compressed data + * This requires 'dst' of size LZ4_COMPRESSBOUND. + * dst_len : is the output size, which is returned after compress done + * workmem : address of the working memory. + * This requires 'workmem' of size LZ4_MEM_COMPRESS. + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer and workmem must be already allocated with + * the defined size. */ -int LZ4_compress_default(const char *source, char *dest, int inputSize, - int maxOutputSize, void *wrkmem); +int lz4_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); -/** - * LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param - * @source: source address of the original data - * @dest: output buffer address of the compressed data - * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE - * @maxOutputSize: full or partial size of buffer 'dest' - * which must be already allocated - * @acceleration: acceleration factor - * @wrkmem: address of the working memory. - * This requires 'workmem' of LZ4_MEM_COMPRESS. - * - * Same as LZ4_compress_default(), but allows to select an "acceleration" - * factor. The larger the acceleration value, the faster the algorithm, - * but also the lesser the compression. It's a trade-off. It can be fine tuned, - * with each successive value providing roughly +~3% to speed. - * An acceleration value of "1" is the same as regular LZ4_compress_default() - * Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1. - * - * Return: Number of bytes written into buffer 'dest' - * (necessarily <= maxOutputSize) or 0 if compression fails + /* + * lz4hc_compress() + * src : source address of the original data + * src_len : size of the original data + * dst : output buffer address of the compressed data + * This requires 'dst' of size LZ4_COMPRESSBOUND. + * dst_len : is the output size, which is returned after compress done + * workmem : address of the working memory. + * This requires 'workmem' of size LZ4HC_MEM_COMPRESS. + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer and workmem must be already allocated with + * the defined size. + */ +int lz4hc_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + +/* + * lz4_decompress() + * src : source address of the compressed data + * src_len : is the input size, whcih is returned after decompress done + * dest : output buffer address of the decompressed data + * actual_dest_len: is the size of uncompressed data, supposing it's known + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer must be already allocated. + * slightly faster than lz4_decompress_unknownoutputsize() */ -int LZ4_compress_fast(const char *source, char *dest, int inputSize, - int maxOutputSize, int acceleration, void *wrkmem); +int lz4_decompress(const unsigned char *src, size_t *src_len, + unsigned char *dest, size_t actual_dest_len); -/** - * LZ4_compress_destSize() - Compress as much data as possible - * from source to dest - * @source: source address of the original data - * @dest: output buffer address of the compressed data - * @sourceSizePtr: will be modified to indicate how many bytes where read - * from 'source' to fill 'dest'. New value is necessarily <= old value. - * @targetDestSize: Size of buffer 'dest' which must be already allocated - * @wrkmem: address of the working memory. - * This requires 'workmem' of LZ4_MEM_COMPRESS. - * - * Reverse the logic, by compressing as much data as possible - * from 'source' buffer into already allocated buffer 'dest' - * of size 'targetDestSize'. - * This function either compresses the entire 'source' content into 'dest' - * if it's large enough, or fill 'dest' buffer completely with as much data as - * possible from 'source'. - * - * Return: Number of bytes written into 'dest' (necessarily <= targetDestSize) - * or 0 if compression fails +/* + * lz4_decompress_unknownoutputsize() + * src : source address of the compressed data + * src_len : is the input size, therefore the compressed size + * dest : output buffer address of the decompressed data + * dest_len: is the max size of the destination buffer, which is + * returned with actual size of decompressed data after + * decompress done + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer must be already allocated. */ -int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr, - int targetDestSize, void *wrkmem); - -/*-************************************************************************ - * Decompression Functions - **************************************************************************/ - -/** - * LZ4_decompress_fast() - Decompresses data from 'source' into 'dest' - * @source: source address of the compressed data - * @dest: output buffer address of the uncompressed data - * which must be already allocated with 'originalSize' bytes - * @originalSize: is the original and therefore uncompressed size - * - * Decompresses data from 'source' into 'dest'. - * This function fully respect memory boundaries for properly formed - * compressed data. - * It is a bit faster than LZ4_decompress_safe(). - * However, it does not provide any protection against intentionally - * modified data stream (malicious input). - * Use this function in trusted environment only - * (data to decode comes from a trusted source). - * - * Return: number of bytes read from the source buffer - * or a negative result if decompression fails. - */ -int LZ4_decompress_fast(const char *source, char *dest, int originalSize); - -/** - * LZ4_decompress_safe() - Decompression protected against buffer overflow - * @source: source address of the compressed data - * @dest: output buffer address of the uncompressed data - * which must be already allocated - * @compressedSize: is the precise full size of the compressed block - * @maxDecompressedSize: is the size of 'dest' buffer - * - * Decompresses data from 'source' into 'dest'. - * If the source stream is detected malformed, the function will - * stop decoding and return a negative result. - * This function is protected against buffer overflow exploits, - * including malicious data packets. It never writes outside output buffer, - * nor reads outside input buffer. - * - * Return: number of bytes decompressed into destination buffer - * (necessarily <= maxDecompressedSize) - * or a negative result in case of error - */ -int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, - int maxDecompressedSize); - -/** - * LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize' - * at position 'source' into buffer 'dest' - * @source: source address of the compressed data - * @dest: output buffer address of the decompressed data which must be - * already allocated - * @compressedSize: is the precise full size of the compressed block. - * @targetOutputSize: the decompression operation will try - * to stop as soon as 'targetOutputSize' has been reached - * @maxDecompressedSize: is the size of destination buffer - * - * This function decompresses a compressed block of size 'compressedSize' - * at position 'source' into destination buffer 'dest' - * of size 'maxDecompressedSize'. - * The function tries to stop decompressing operation as soon as - * 'targetOutputSize' has been reached, reducing decompression time. - * This function never writes outside of output buffer, - * and never reads outside of input buffer. - * It is therefore protected against malicious data packets. - * - * Return: the number of bytes decoded in the destination buffer - * (necessarily <= maxDecompressedSize) - * or a negative result in case of error - * - */ -int LZ4_decompress_safe_partial(const char *source, char *dest, - int compressedSize, int targetOutputSize, int maxDecompressedSize); - -/*-************************************************************************ - * LZ4 HC Compression - **************************************************************************/ - -/** - * LZ4_compress_HC() - Compress data from `src` into `dst`, using HC algorithm - * @src: source address of the original data - * @dst: output buffer address of the compressed data - * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE - * @dstCapacity: full or partial size of buffer 'dst', - * which must be already allocated - * @compressionLevel: Recommended values are between 4 and 9, although any - * value between 1 and LZ4HC_MAX_CLEVEL will work. - * Values >LZ4HC_MAX_CLEVEL behave the same as 16. - * @wrkmem: address of the working memory. - * This requires 'wrkmem' of size LZ4HC_MEM_COMPRESS. - * - * Compress data from 'src' into 'dst', using the more powerful - * but slower "HC" algorithm. Compression is guaranteed to succeed if - * `dstCapacity >= LZ4_compressBound(srcSize) - * - * Return : the number of bytes written into 'dst' or 0 if compression fails. - */ -int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity, - int compressionLevel, void *wrkmem); - -/** - * LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure - * @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure - * @compressionLevel: Recommended values are between 4 and 9, although any - * value between 1 and LZ4HC_MAX_CLEVEL will work. - * Values >LZ4HC_MAX_CLEVEL behave the same as 16. - * - * An LZ4_streamHC_t structure can be allocated once - * and re-used multiple times. - * Use this function to init an allocated `LZ4_streamHC_t` structure - * and start a new compression. - */ -void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel); - -/** - * LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC - * @streamHCPtr: pointer to the LZ4HC_stream_t - * @dictionary: dictionary to load - * @dictSize: size of dictionary - * - * Use this function to load a static dictionary into LZ4HC_stream. - * Any previous data will be forgotten, only 'dictionary' - * will remain in memory. - * Loading a size of 0 is allowed. - * - * Return : dictionary size, in bytes (necessarily <= 64 KB) - */ -int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary, - int dictSize); - -/** - * LZ4_compress_HC_continue() - Compress 'src' using data from previously - * compressed blocks as a dictionary using the HC algorithm - * @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure - * @src: source address of the original data - * @dst: output buffer address of the compressed data, - * which must be already allocated - * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE - * @maxDstSize: full or partial size of buffer 'dest' - * which must be already allocated - * - * These functions compress data in successive blocks of any size, using - * previous blocks as dictionary. One key assumption is that previous - * blocks (up to 64 KB) remain read-accessible while - * compressing next blocks. There is an exception for ring buffers, - * which can be smaller than 64 KB. - * Ring buffers scenario is automatically detected and handled by - * LZ4_compress_HC_continue(). - * Before starting compression, state must be properly initialized, - * using LZ4_resetStreamHC(). - * A first "fictional block" can then be designated as - * initial dictionary, using LZ4_loadDictHC() (Optional). - * Then, use LZ4_compress_HC_continue() - * to compress each successive block. Previous memory blocks - * (including initial dictionary when present) must remain accessible - * and unmodified during compression. - * 'dst' buffer should be sized to handle worst case scenarios, using - * LZ4_compressBound(), to ensure operation success. - * If, for any reason, previous data blocks can't be preserved unmodified - * in memory during next compression block, - * you must save it to a safer memory space, using LZ4_saveDictHC(). - * Return value of LZ4_saveDictHC() is the size of dictionary - * effectively saved into 'safeBuffer'. - * - * Return: Number of bytes written into buffer 'dst' or 0 if compression fails - */ -int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src, - char *dst, int srcSize, int maxDstSize); - -/** - * LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream - * @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure - * @safeBuffer: buffer to save dictionary to, must be already allocated - * @maxDictSize: size of 'safeBuffer' - * - * If previously compressed data block is not guaranteed - * to remain available at its memory location, - * save it into a safer place (char *safeBuffer). - * Note : you don't need to call LZ4_loadDictHC() afterwards, - * dictionary is immediately usable, you can therefore call - * LZ4_compress_HC_continue(). - * - * Return : saved dictionary size in bytes (necessarily <= maxDictSize), - * or 0 if error. - */ -int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer, - int maxDictSize); - -/*-********************************************* - * Streaming Compression Functions - ***********************************************/ - -/** - * LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure - * @LZ4_stream: pointer to the 'LZ4_stream_t' structure - * - * An LZ4_stream_t structure can be allocated once - * and re-used multiple times. - * Use this function to init an allocated `LZ4_stream_t` structure - * and start a new compression. - */ -void LZ4_resetStream(LZ4_stream_t *LZ4_stream); - -/** - * LZ4_loadDict() - Load a static dictionary into LZ4_stream - * @streamPtr: pointer to the LZ4_stream_t - * @dictionary: dictionary to load - * @dictSize: size of dictionary - * - * Use this function to load a static dictionary into LZ4_stream. - * Any previous data will be forgotten, only 'dictionary' - * will remain in memory. - * Loading a size of 0 is allowed. - * - * Return : dictionary size, in bytes (necessarily <= 64 KB) - */ -int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary, - int dictSize); - -/** - * LZ4_saveDict() - Save static dictionary from LZ4_stream - * @streamPtr: pointer to the 'LZ4_stream_t' structure - * @safeBuffer: buffer to save dictionary to, must be already allocated - * @dictSize: size of 'safeBuffer' - * - * If previously compressed data block is not guaranteed - * to remain available at its memory location, - * save it into a safer place (char *safeBuffer). - * Note : you don't need to call LZ4_loadDict() afterwards, - * dictionary is immediately usable, you can therefore call - * LZ4_compress_fast_continue(). - * - * Return : saved dictionary size in bytes (necessarily <= dictSize), - * or 0 if error. - */ -int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize); - -/** - * LZ4_compress_fast_continue() - Compress 'src' using data from previously - * compressed blocks as a dictionary - * @streamPtr: Pointer to the previous 'LZ4_stream_t' structure - * @src: source address of the original data - * @dst: output buffer address of the compressed data, - * which must be already allocated - * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE - * @maxDstSize: full or partial size of buffer 'dest' - * which must be already allocated - * @acceleration: acceleration factor - * - * Compress buffer content 'src', using data from previously compressed blocks - * as dictionary to improve compression ratio. - * Important : Previous data blocks are assumed to still - * be present and unmodified ! - * If maxDstSize >= LZ4_compressBound(srcSize), - * compression is guaranteed to succeed, and runs faster. - * - * Return: Number of bytes written into buffer 'dst' or 0 if compression fails - */ -int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src, - char *dst, int srcSize, int maxDstSize, int acceleration); - -/** - * LZ4_setStreamDecode() - Instruct where to find dictionary - * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure - * @dictionary: dictionary to use - * @dictSize: size of dictionary - * - * Use this function to instruct where to find the dictionary. - * Setting a size of 0 is allowed (same effect as reset). - * - * Return: 1 if OK, 0 if error - */ -int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, - const char *dictionary, int dictSize); - -/** - * LZ4_decompress_safe_continue() - Decompress blocks in streaming mode - * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure - * @source: source address of the compressed data - * @dest: output buffer address of the uncompressed data - * which must be already allocated - * @compressedSize: is the precise full size of the compressed block - * @maxDecompressedSize: is the size of 'dest' buffer - * - * This decoding function allows decompression of multiple blocks - * in "streaming" mode. - * Previously decoded blocks *must* remain available at the memory position - * where they were decoded (up to 64 KB) - * In the case of a ring buffers, decoding buffer must be either : - * - Exactly same size as encoding buffer, with same update rule - * (block boundaries at same positions) In which case, - * the decoding & encoding ring buffer can have any size, - * including very small ones ( < 64 KB). - * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. - * maxBlockSize is implementation dependent. - * It's the maximum size you intend to compress into a single block. - * In which case, encoding and decoding buffers do not need - * to be synchronized, and encoding ring buffer can have any size, - * including small ones ( < 64 KB). - * - _At least_ 64 KB + 8 bytes + maxBlockSize. - * In which case, encoding and decoding buffers do not need to be - * synchronized, and encoding ring buffer can have any size, - * including larger than decoding buffer. W - * Whenever these conditions are not possible, save the last 64KB of decoded - * data into a safe buffer, and indicate where it is saved - * using LZ4_setStreamDecode() - * - * Return: number of bytes decompressed into destination buffer - * (necessarily <= maxDecompressedSize) - * or a negative result in case of error - */ -int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, - const char *source, char *dest, int compressedSize, - int maxDecompressedSize); - -/** - * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode - * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure - * @source: source address of the compressed data - * @dest: output buffer address of the uncompressed data - * which must be already allocated with 'originalSize' bytes - * @originalSize: is the original and therefore uncompressed size - * - * This decoding function allows decompression of multiple blocks - * in "streaming" mode. - * Previously decoded blocks *must* remain available at the memory position - * where they were decoded (up to 64 KB) - * In the case of a ring buffers, decoding buffer must be either : - * - Exactly same size as encoding buffer, with same update rule - * (block boundaries at same positions) In which case, - * the decoding & encoding ring buffer can have any size, - * including very small ones ( < 64 KB). - * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. - * maxBlockSize is implementation dependent. - * It's the maximum size you intend to compress into a single block. - * In which case, encoding and decoding buffers do not need - * to be synchronized, and encoding ring buffer can have any size, - * including small ones ( < 64 KB). - * - _At least_ 64 KB + 8 bytes + maxBlockSize. - * In which case, encoding and decoding buffers do not need to be - * synchronized, and encoding ring buffer can have any size, - * including larger than decoding buffer. W - * Whenever these conditions are not possible, save the last 64KB of decoded - * data into a safe buffer, and indicate where it is saved - * using LZ4_setStreamDecode() - * - * Return: number of bytes decompressed into destination buffer - * (necessarily <= maxDecompressedSize) - * or a negative result in case of error - */ -int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, - const char *source, char *dest, int originalSize); - -/** - * LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode() - * followed by LZ4_decompress_safe_continue() - * @source: source address of the compressed data - * @dest: output buffer address of the uncompressed data - * which must be already allocated - * @compressedSize: is the precise full size of the compressed block - * @maxDecompressedSize: is the size of 'dest' buffer - * @dictStart: pointer to the start of the dictionary in memory - * @dictSize: size of dictionary - * - * This decoding function works the same as - * a combination of LZ4_setStreamDecode() followed by - * LZ4_decompress_safe_continue() - * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure. - * - * Return: number of bytes decompressed into destination buffer - * (necessarily <= maxDecompressedSize) - * or a negative result in case of error - */ -int LZ4_decompress_safe_usingDict(const char *source, char *dest, - int compressedSize, int maxDecompressedSize, const char *dictStart, - int dictSize); - -/** - * LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode() - * followed by LZ4_decompress_fast_continue() - * @source: source address of the compressed data - * @dest: output buffer address of the uncompressed data - * which must be already allocated with 'originalSize' bytes - * @originalSize: is the original and therefore uncompressed size - * @dictStart: pointer to the start of the dictionary in memory - * @dictSize: size of dictionary - * - * This decoding function works the same as - * a combination of LZ4_setStreamDecode() followed by - * LZ4_decompress_fast_continue() - * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure. - * - * Return: number of bytes decompressed into destination buffer - * (necessarily <= maxDecompressedSize) - * or a negative result in case of error - */ -int LZ4_decompress_fast_usingDict(const char *source, char *dest, - int originalSize, const char *dictStart, int dictSize); - +int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len, + unsigned char *dest, size_t *dest_len); #endif diff --git a/include/linux/lzo.h b/include/linux/lzo.h index e95c7d1092..a0848d9377 100644 --- a/include/linux/lzo.h +++ b/include/linux/lzo.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LZO_H__ #define __LZO_H__ /* @@ -18,16 +17,12 @@ #define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short)) #define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS -#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2) +#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3) /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ int lzo1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem); -/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ -int lzorle1x_1_compress(const unsigned char *src, size_t src_len, - unsigned char *dst, size_t *dst_len, void *wrkmem); - /* safe decompression with overrun testing */ int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len); diff --git a/include/linux/mISDNdsp.h b/include/linux/mISDNdsp.h index 00758f45fd..41d1eeb9b3 100644 --- a/include/linux/mISDNdsp.h +++ b/include/linux/mISDNdsp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __mISDNdsp_H__ #define __mISDNdsp_H__ diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h index ef4f8eb02e..9d96d5d4df 100644 --- a/include/linux/mISDNhw.h +++ b/include/linux/mISDNhw.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Author Karsten Keil @@ -6,6 +5,16 @@ * Basic declarations for the mISDN HW channels * * Copyright 2008 by Karsten Keil + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef MISDNHW_H diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 7dd1f01ec4..ac02c54520 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -18,6 +18,7 @@ #ifndef mISDNIF_H #define mISDNIF_H +#include #include #include #include @@ -553,7 +554,7 @@ _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) if (!skb) return NULL; if (len) - skb_put_data(skb, dp, len); + memcpy(skb_put(skb, len), dp, len); hh = mISDN_HEAD_P(skb); hh->prim = prim; hh->id = id; diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h index 18da821154..6b55c938b4 100644 --- a/include/linux/mailbox/brcm-message.h +++ b/include/linux/mailbox/brcm-message.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Broadcom * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Common header for Broadcom mailbox messages which is shared across * Broadcom SoCs and Broadcom mailbox client drivers. */ @@ -13,7 +16,6 @@ enum brcm_message_type { BRCM_MESSAGE_UNKNOWN = 0, - BRCM_MESSAGE_BATCH, BRCM_MESSAGE_SPU, BRCM_MESSAGE_SBA, BRCM_MESSAGE_MAX, @@ -21,28 +23,23 @@ enum brcm_message_type { struct brcm_sba_command { u64 cmd; - u64 *cmd_dma; - dma_addr_t cmd_dma_addr; #define BRCM_SBA_CMD_TYPE_A BIT(0) #define BRCM_SBA_CMD_TYPE_B BIT(1) #define BRCM_SBA_CMD_TYPE_C BIT(2) #define BRCM_SBA_CMD_HAS_RESP BIT(3) #define BRCM_SBA_CMD_HAS_OUTPUT BIT(4) u64 flags; + dma_addr_t input; + size_t input_len; dma_addr_t resp; size_t resp_len; - dma_addr_t data; - size_t data_len; + dma_addr_t output; + size_t output_len; }; struct brcm_message { enum brcm_message_type type; union { - struct { - struct brcm_message *msgs; - unsigned int msgs_queued; - unsigned int msgs_count; - } batch; struct { struct scatterlist *src; struct scatterlist *dst; diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h index 65229a4559..4434871095 100644 --- a/include/linux/mailbox_client.h +++ b/include/linux/mailbox_client.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013-2014 Linaro Ltd. * Author: Jassi Brar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __MAILBOX_CLIENT_H @@ -41,7 +44,6 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, const char *name); struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index); int mbox_send_message(struct mbox_chan *chan, void *mssg); -int mbox_flush(struct mbox_chan *chan, unsigned long timeout); void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */ void mbox_free_channel(struct mbox_chan *chan); /* may sleep */ diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h index 36d6ce6735..74deadb42d 100644 --- a/include/linux/mailbox_controller.h +++ b/include/linux/mailbox_controller.h @@ -1,4 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ #ifndef __MAILBOX_CONTROLLER_H #define __MAILBOX_CONTROLLER_H @@ -20,9 +24,6 @@ struct mbox_chan; * transmission of data is reported by the controller via * mbox_chan_txdone (if it has some TX ACK irq). It must not * sleep. - * @flush: Called when a client requests transmissions to be blocking but - * the context doesn't allow sleeping. Typically the controller - * will implement a busy loop waiting for the data to flush out. * @startup: Called when a client requests the chan. The controller * could ask clients for additional parameters of communication * to be provided via client's chan_data. This call may @@ -45,7 +46,6 @@ struct mbox_chan; */ struct mbox_chan_ops { int (*send_data)(struct mbox_chan *chan, void *data); - int (*flush)(struct mbox_chan *chan, unsigned long timeout); int (*startup)(struct mbox_chan *chan); void (*shutdown)(struct mbox_chan *chan); bool (*last_tx_done)(struct mbox_chan *chan); @@ -131,9 +131,4 @@ void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */ void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */ void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */ -int devm_mbox_controller_register(struct device *dev, - struct mbox_controller *mbox); -void devm_mbox_controller_unregister(struct device *dev, - struct mbox_controller *mbox); - #endif /* __MAILBOX_CONTROLLER_H */ diff --git a/include/linux/maple.h b/include/linux/maple.h index 9b140272ee..c37288b23e 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MAPLE_H #define __LINUX_MAPLE_H diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 0f06c2287b..a57f0dfb6d 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _MARVELL_PHY_H #define _MARVELL_PHY_H @@ -15,33 +14,13 @@ #define MARVELL_PHY_ID_88E1149R 0x01410e50 #define MARVELL_PHY_ID_88E1240 0x01410e30 #define MARVELL_PHY_ID_88E1318S 0x01410e90 -#define MARVELL_PHY_ID_88E1340S 0x01410dc0 #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 #define MARVELL_PHY_ID_88E1540 0x01410eb0 -#define MARVELL_PHY_ID_88E1545 0x01410ea0 -#define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 -#define MARVELL_PHY_ID_88X3310 0x002b09a0 -#define MARVELL_PHY_ID_88E2110 0x002b09b0 -#define MARVELL_PHY_ID_88X2222 0x01410f10 - -/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ -#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 - -/* These Ethernet switch families contain embedded PHYs, but they do - * not have a model ID. So the switch driver traps reads to the ID2 - * register and returns the switch family ID - */ -#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41 -#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90 -#define MARVELL_PHY_ID_88E6393_FAMILY 0x002b0b9b - -#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4) /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 #define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 -#define MARVELL_PHY_LED0_LINK_LED1_ACTIVE 0x00000004 #endif /* _MARVELL_PHY_H */ diff --git a/include/linux/math64.h b/include/linux/math64.h index 2928f03d6d..8e8a37d102 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -1,10 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MATH64_H #define _LINUX_MATH64_H #include -#include -#include #include #if BITS_PER_LONG == 64 @@ -14,28 +11,18 @@ /** * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder - * @dividend: unsigned 64bit dividend - * @divisor: unsigned 32bit divisor - * @remainder: pointer to unsigned 32bit remainder - * - * Return: sets ``*remainder``, then returns dividend / divisor * * This is commonly provided by 32bit archs to provide an optimized 64bit * divide. */ -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) { *remainder = dividend % divisor; return dividend / divisor; } -/* +/** * div_s64_rem - signed 64bit divide with 32bit divisor with remainder - * @dividend: signed 64bit dividend - * @divisor: signed 32bit divisor - * @remainder: pointer to signed 32bit remainder - * - * Return: sets ``*remainder``, then returns dividend / divisor */ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) { @@ -43,13 +30,8 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) return dividend / divisor; } -/* +/** * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder - * @dividend: unsigned 64bit dividend - * @divisor: unsigned 64bit divisor - * @remainder: pointer to unsigned 64bit remainder - * - * Return: sets ``*remainder``, then returns dividend / divisor */ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) { @@ -57,24 +39,16 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) return dividend / divisor; } -/* +/** * div64_u64 - unsigned 64bit divide with 64bit divisor - * @dividend: unsigned 64bit dividend - * @divisor: unsigned 64bit divisor - * - * Return: dividend / divisor */ -static inline u64 div64_u64(u64 dividend, u64 divisor) +static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor) { return dividend / divisor; } -/* +/** * div64_s64 - signed 64bit divide with 64bit divisor - * @dividend: signed 64bit dividend - * @divisor: signed 64bit divisor - * - * Return: dividend / divisor */ static inline s64 div64_s64(s64 dividend, s64 divisor) { @@ -87,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor) #define div64_ul(x, y) div_u64((x), (y)) #ifndef div_u64_rem -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) { *remainder = do_div(dividend, divisor); return dividend; @@ -103,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder); #endif #ifndef div64_u64 -extern u64 div64_u64(u64 dividend, u64 divisor); +extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor); #endif #ifndef div64_s64 @@ -114,15 +88,13 @@ extern s64 div64_s64(s64 dividend, s64 divisor); /** * div_u64 - unsigned 64bit divide with 32bit divisor - * @dividend: unsigned 64bit dividend - * @divisor: unsigned 32bit divisor * * This is the most common 64bit divide and should be used if possible, * as many 32bit archs can optimize this variant better than a full 64bit * divide. */ #ifndef div_u64 -static inline u64 div_u64(u64 dividend, u32 divisor) +static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor) { u32 remainder; return div_u64_rem(dividend, divisor, &remainder); @@ -131,8 +103,6 @@ static inline u64 div_u64(u64 dividend, u32 divisor) /** * div_s64 - signed 64bit divide with 32bit divisor - * @dividend: signed 64bit dividend - * @divisor: signed 32bit divisor */ #ifndef div_s64 static inline s64 div_s64(s64 dividend, s32 divisor) @@ -144,15 +114,24 @@ static inline s64 div_s64(s64 dividend, s32 divisor) u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); -#ifndef mul_u32_u32 -/* - * Many a GCC version messes this up and generates a 64x64 mult :-( - */ -static inline u64 mul_u32_u32(u32 a, u32 b) +static __always_inline u32 +__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) { - return (u64)a * b; + u32 ret = 0; + + while (dividend >= divisor) { + /* The following asm() prevents the compiler from + optimising this loop into a modulo operation. */ + asm("" : "+rm"(dividend)); + + dividend -= divisor; + ret++; + } + + *remainder = dividend; + + return ret; } -#endif #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) @@ -181,9 +160,9 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) al = a; ah = a >> 32; - ret = mul_u32_u32(al, mul) >> shift; + ret = ((u64)al * mul) >> shift; if (ah) - ret += mul_u32_u32(ah, mul) << (32 - shift); + ret += ((u64)ah * mul) << (32 - shift); return ret; } @@ -207,10 +186,10 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) a0.ll = a; b0.ll = b; - rl.ll = mul_u32_u32(a0.l.low, b0.l.low); - rm.ll = mul_u32_u32(a0.l.low, b0.l.high); - rn.ll = mul_u32_u32(a0.l.high, b0.l.low); - rh.ll = mul_u32_u32(a0.l.high, b0.l.high); + rl.ll = (u64)a0.l.low * b0.l.low; + rm.ll = (u64)a0.l.low * b0.l.high; + rn.ll = (u64)a0.l.high * b0.l.low; + rh.ll = (u64)a0.l.high * b0.l.high; /* * Each of these lines computes a 64-bit intermediate result into "c", @@ -235,24 +214,6 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) #endif -#ifndef mul_s64_u64_shr -static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift) -{ - u64 ret; - - /* - * Extract the sign before the multiplication and put it back - * afterwards if needed. - */ - ret = mul_u64_u64_shr(abs(a), b, shift); - - if (a < 0) - ret = -((s64) ret); - - return ret; -} -#endif /* mul_s64_u64_shr */ - #ifndef mul_u64_u32_div static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) { @@ -268,8 +229,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) } u, rl, rh; u.ll = a; - rl.ll = mul_u32_u32(u.l.low, mul); - rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high; + rl.ll = (u64)u.l.low * mul; + rh.ll = (u64)u.l.high * mul + rl.l.high; /* Bits 32-63 of the result will be in rh.l.low. */ rl.l.high = do_div(rh.ll, divisor); @@ -282,41 +243,4 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) } #endif /* mul_u64_u32_div */ -u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); - -#define DIV64_U64_ROUND_UP(ll, d) \ - ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) - -/** - * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer - * @dividend: unsigned 64bit dividend - * @divisor: unsigned 64bit divisor - * - * Divide unsigned 64bit dividend by unsigned 64bit divisor - * and round to closest integer. - * - * Return: dividend / divisor rounded to nearest integer - */ -#define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \ - ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); }) - -/* - * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer - * @dividend: signed 64bit dividend - * @divisor: signed 32bit divisor - * - * Divide signed 64bit dividend by signed 32bit divisor - * and round to closest integer. - * - * Return: dividend / divisor rounded to nearest integer - */ -#define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \ -{ \ - s64 __x = (dividend); \ - s32 __d = (divisor); \ - ((__x > 0) == (__d > 0)) ? \ - div_s64((__x + (__d / 2)), __d) : \ - div_s64((__x - (__d / 2)), __d); \ -} \ -) #endif /* _LINUX_MATH64_H */ diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h index 593602fc93..ad97b06cf9 100644 --- a/include/linux/max17040_battery.h +++ b/include/linux/max17040_battery.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2009 Samsung Electronics * Minkyu Kang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __MAX17040_BATTERY_H_ diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h index 20f1e3ff60..86c9a8b480 100644 --- a/include/linux/mbcache.h +++ b/include/linux/mbcache.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MBCACHE_H #define _LINUX_MBCACHE_H @@ -20,15 +19,15 @@ struct mb_cache_entry { u32 e_key; u32 e_referenced:1; u32 e_reusable:1; - /* User provided value - stable during lifetime of the entry */ - u64 e_value; + /* Block number of hashed block - stable during lifetime of the entry */ + sector_t e_block; }; struct mb_cache *mb_cache_create(int bucket_bits); void mb_cache_destroy(struct mb_cache *cache); int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, - u64 value, bool reusable); + sector_t block, bool reusable); void __mb_cache_entry_free(struct mb_cache_entry *entry); static inline int mb_cache_entry_put(struct mb_cache *cache, struct mb_cache_entry *entry) @@ -39,9 +38,10 @@ static inline int mb_cache_entry_put(struct mb_cache *cache, return 1; } -void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value); +void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key, + sector_t block); struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, - u64 value); + sector_t block); struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, u32 key); struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 4773145246..2931aa43da 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -31,8 +31,8 @@ struct mbus_dram_target_info struct mbus_dram_window { u8 cs_index; u8 mbus_attr; - u64 base; - u64 size; + u32 base; + u32 size; } cs[4]; }; @@ -82,7 +82,6 @@ static inline int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, } #endif -#ifdef CONFIG_MVEBU_MBUS int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr); void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); void mvebu_mbus_get_pcie_io_aperture(struct resource *res); @@ -98,12 +97,5 @@ int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base, size_t mbus_size, phys_addr_t sdram_phys_base, size_t sdram_size); int mvebu_mbus_dt_init(bool is_coherent); -#else -static inline int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, - u8 *attr) -{ - return -EINVAL; -} -#endif /* CONFIG_MVEBU_MBUS */ #endif /* __LINUX_MBUS_H */ diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index 0661af17a7..a585b4b5fa 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -16,7 +16,6 @@ #include /* register access macros */ #include #include -#include #ifdef __KERNEL__ #include /* spinlock_t */ diff --git a/include/linux/mc6821.h b/include/linux/mc6821.h index 8dffab19b4..28e301e295 100644 --- a/include/linux/mc6821.h +++ b/include/linux/mc6821.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _MC6821_H_ #define _MC6821_H_ diff --git a/include/linux/mcb.h b/include/linux/mcb.h index f6efb16f9d..4097ac9ea1 100644 --- a/include/linux/mcb.h +++ b/include/linux/mcb.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MEN Chameleon Bus. * * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de) * Author: Johannes Thumshirn + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; version 2 of the License. */ #ifndef _LINUX_MCB_H #define _LINUX_MCB_H @@ -120,7 +123,7 @@ extern int __must_check __mcb_register_driver(struct mcb_driver *drv, __mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) extern void mcb_unregister_driver(struct mcb_driver *driver); #define module_mcb_driver(__mcb_driver) \ - module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver) + module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver); extern void mcb_bus_add_devices(const struct mcb_bus *bus); extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev); extern struct mcb_bus *mcb_alloc_bus(struct device *carrier); @@ -133,7 +136,5 @@ extern struct resource *mcb_request_mem(struct mcb_device *dev, const char *name); extern void mcb_release_mem(struct resource *mem); extern int mcb_get_irq(struct mcb_device *dev); -extern struct resource *mcb_get_resource(struct mcb_device *dev, - unsigned int type); #endif /* _LINUX_MCB_H */ diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h index 373630fe5c..76f52bbbb2 100644 --- a/include/linux/mdio-bitbang.h +++ b/include/linux/mdio-bitbang.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MDIO_BITBANG_H #define __LINUX_MDIO_BITBANG_H @@ -33,14 +32,10 @@ struct mdiobb_ops { struct mdiobb_ctrl { const struct mdiobb_ops *ops; - unsigned int override_op_c22; - u8 op_c22_read; - u8 op_c22_write; + /* reset callback */ + int (*reset)(struct mii_bus *bus); }; -int mdiobb_read(struct mii_bus *bus, int phy, int reg); -int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val); - /* The returned bus is not yet registered with the phy layer. */ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl); diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h index a5d58f2219..61f5b21b31 100644 --- a/include/linux/mdio-mux.h +++ b/include/linux/mdio-mux.h @@ -12,16 +12,7 @@ #include #include -/* mdio_mux_init() - Initialize a MDIO mux - * @dev The device owning the MDIO mux - * @mux_node The device node of the MDIO mux - * @switch_fn The function called for switching target MDIO child - * mux_handle A pointer to a (void *) used internaly by mdio-mux - * @data Private data used by switch_fn() - * @mux_bus An optional parent bus (Other case are to use parent_bus property) - */ int mdio_mux_init(struct device *dev, - struct device_node *mux_node, int (*switch_fn) (int cur, int desired, void *data), void **mux_handle, void *data, diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 5e6dc38f41..bf9d1d7506 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -1,24 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/mdio.h: definitions for MDIO (clause 45) transceivers * Copyright 2006-2009 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. */ #ifndef __LINUX_MDIO_H__ #define __LINUX_MDIO_H__ #include -#include -/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit - * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. - */ -#define MII_ADDR_C45 (1<<30) -#define MII_DEVADDR_C45_SHIFT 16 -#define MII_REGADDR_C45_MASK GENMASK(15, 0) - -struct gpio_desc; struct mii_bus; -struct reset_control; /* Multiple levels of nesting are possible. However typically this is * limited to nested DSA like layer, a MUX layer, and the normal @@ -34,8 +27,8 @@ enum mdio_mutex_lock_class { struct mdio_device { struct device dev; + const struct dev_pm_ops *pm_ops; struct mii_bus *bus; - char modalias[MDIO_NAME_SIZE]; int (*bus_match)(struct device *dev, struct device_driver *drv); void (*device_free)(struct mdio_device *mdiodev); @@ -44,16 +37,8 @@ struct mdio_device { /* Bus address of the MDIO device (0-31) */ int addr; int flags; - struct gpio_desc *reset_gpio; - struct reset_control *reset_ctrl; - unsigned int reset_assert_delay; - unsigned int reset_deassert_delay; }; - -static inline struct mdio_device *to_mdio_device(const struct device *dev) -{ - return container_of(dev, struct mdio_device, dev); -} +#define to_mdio_device(d) container_of(d, struct mdio_device, dev) /* struct mdio_driver_common: Common to all MDIO drivers */ struct mdio_driver_common { @@ -61,12 +46,8 @@ struct mdio_driver_common { int flags; }; #define MDIO_DEVICE_FLAG_PHY 1 - -static inline struct mdio_driver_common * -to_mdio_common_driver(const struct device_driver *driver) -{ - return container_of(driver, struct mdio_driver_common, driver); -} +#define to_mdio_common_driver(d) \ + container_of(d, struct mdio_driver_common, driver) /* struct mdio_driver: Generic MDIO driver */ struct mdio_driver { @@ -80,37 +61,16 @@ struct mdio_driver { /* Clears up any memory if needed */ void (*remove)(struct mdio_device *mdiodev); - - /* Quiesces the device on system shutdown, turns off interrupts etc */ - void (*shutdown)(struct mdio_device *mdiodev); }; - -static inline struct mdio_driver * -to_mdio_driver(const struct device_driver *driver) -{ - return container_of(to_mdio_common_driver(driver), struct mdio_driver, - mdiodrv); -} - -/* device driver data */ -static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data) -{ - dev_set_drvdata(&mdio->dev, data); -} - -static inline void *mdiodev_get_drvdata(struct mdio_device *mdio) -{ - return dev_get_drvdata(&mdio->dev); -} +#define to_mdio_driver(d) \ + container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv) void mdio_device_free(struct mdio_device *mdiodev); struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); int mdio_device_register(struct mdio_device *mdiodev); void mdio_device_remove(struct mdio_device *mdiodev); -void mdio_device_reset(struct mdio_device *mdiodev, int value); int mdio_driver_register(struct mdio_driver *drv); void mdio_driver_unregister(struct mdio_driver *drv); -int mdio_device_bus_match(struct device *dev, struct device_driver *drv); static inline bool mdio_phy_id_is_c45(int phy_id) { @@ -170,10 +130,6 @@ extern int mdio45_nway_restart(const struct mdio_if_info *mdio); extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, struct ethtool_cmd *ecmd, u32 npage_adv, u32 npage_lpa); -extern void -mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio, - struct ethtool_link_ksettings *cmd, - u32 npage_adv, u32 npage_lpa); /** * mdio45_ethtool_gset - get settings for ETHTOOL_GSET @@ -191,23 +147,6 @@ static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio, mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0); } -/** - * mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS - * @mdio: MDIO interface - * @cmd: Ethtool request structure - * - * Since the CSRs for auto-negotiation using next pages are not fully - * standardised, this function does not attempt to decode them. Use - * mdio45_ethtool_ksettings_get_npage() to specify advertisement bits - * from next pages. - */ -static inline void -mdio45_ethtool_ksettings_get(const struct mdio_if_info *mdio, - struct ethtool_link_ksettings *cmd) -{ - mdio45_ethtool_ksettings_get_npage(mdio, cmd, 0, 0); -} - extern int mdio_mii_ioctl(const struct mdio_if_info *mdio, struct mii_ioctl_data *mii_data, int cmd); @@ -294,91 +233,10 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv) return reg; } -/** - * linkmode_adv_to_mii_10gbt_adv_t - * @advertising: the linkmode advertisement settings - * - * A small helper function that translates linkmode advertisement - * settings to phy autonegotiation advertisements for the C45 - * 10GBASE-T AN CONTROL (7.32) register. - */ -static inline u32 linkmode_adv_to_mii_10gbt_adv_t(unsigned long *advertising) -{ - u32 result = 0; - - if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, - advertising)) - result |= MDIO_AN_10GBT_CTRL_ADV2_5G; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, - advertising)) - result |= MDIO_AN_10GBT_CTRL_ADV5G; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - advertising)) - result |= MDIO_AN_10GBT_CTRL_ADV10G; - - return result; -} - -/** - * mii_10gbt_stat_mod_linkmode_lpa_t - * @advertising: target the linkmode advertisement settings - * @lpa: value of the C45 10GBASE-T AN STATUS register - * - * A small helper function that translates C45 10GBASE-T AN STATUS register bits - * to linkmode advertisement settings. Other bits in advertising aren't changed. - */ -static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising, - u32 lpa) -{ - linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, - advertising, lpa & MDIO_AN_10GBT_STAT_LP2_5G); - linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, - advertising, lpa & MDIO_AN_10GBT_STAT_LP5G); - linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - advertising, lpa & MDIO_AN_10GBT_STAT_LP10G); -} - -int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); -int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); -int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum, - u16 mask, u16 set); - int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); -int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, - u16 set); - -static inline u32 mdiobus_c45_addr(int devad, u16 regnum) -{ - return MII_ADDR_C45 | devad << MII_DEVADDR_C45_SHIFT | regnum; -} - -static inline int __mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad, - u16 regnum) -{ - return __mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum)); -} - -static inline int __mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad, - u16 regnum, u16 val) -{ - return __mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum), - val); -} - -static inline int mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad, - u16 regnum) -{ - return mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum)); -} - -static inline int mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad, - u16 regnum, u16 val) -{ - return mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum), val); -} int mdiobus_register_device(struct mdio_device *mdiodev); int mdiobus_unregister_device(struct mdio_device *mdiodev); @@ -386,8 +244,7 @@ bool mdiobus_is_registered_device(struct mii_bus *bus, int addr); struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr); /** - * mdio_module_driver() - Helper macro for registering mdio drivers - * @_mdio_driver: driver to register + * module_mdio_driver() - Helper macro for registering mdio drivers * * Helper macro for MDIO drivers which do not do anything special in module * init/exit. Each module may only use this macro once, and calling it diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index c6786c12b2..e746919530 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -1,7 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2013-2016, Intel Corporation. All rights reserved. - */ #ifndef _LINUX_MEI_CL_BUS_H #define _LINUX_MEI_CL_BUS_H @@ -12,7 +8,8 @@ struct mei_cl_device; struct mei_device; -typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev); +typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, + u32 events, void *context); /** * struct mei_cl_device - MEI device handle @@ -27,12 +24,12 @@ typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev); * @me_cl: me client * @cl: mei client * @name: device name - * @rx_work: async work to execute Rx event callback - * @rx_cb: Drivers register this callback to get asynchronous ME - * Rx buffer pending notifications. - * @notif_work: async work to execute FW notif event callback - * @notif_cb: Drivers register this callback to get asynchronous ME - * FW notification pending notifications. + * @event_work: async work to execute event callback + * @event_cb: Drivers register this callback to get asynchronous ME + * events (e.g. Rx buffer pending) notifications. + * @event_context: event callback run context + * @events_mask: Events bit mask requested by driver. + * @events: Events bitmask sent to the driver. * * @do_match: wheather device can be matched with a driver * @is_added: device is already scanned @@ -47,10 +44,11 @@ struct mei_cl_device { struct mei_cl *cl; char name[MEI_CL_NAME_SIZE]; - struct work_struct rx_work; - mei_cldev_cb_t rx_cb; - struct work_struct notif_work; - mei_cldev_cb_t notif_cb; + struct work_struct event_work; + mei_cldev_event_cb_t event_cb; + void *event_context; + unsigned long events_mask; + unsigned long events; unsigned int do_match:1; unsigned int is_added:1; @@ -58,8 +56,6 @@ struct mei_cl_device { void *priv_data; }; -#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev) - struct mei_cl_driver { struct device_driver driver; const char *name; @@ -68,7 +64,7 @@ struct mei_cl_driver { int (*probe)(struct mei_cl_device *cldev, const struct mei_cl_device_id *id); - void (*remove)(struct mei_cl_device *cldev); + int (*remove)(struct mei_cl_device *cldev); }; int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, @@ -78,34 +74,16 @@ int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); -/** - * module_mei_cl_driver - Helper macro for registering mei cl driver - * - * @__mei_cldrv: mei_cl_driver structure - * - * Helper macro for mei cl drivers which do not do anything special in module - * init/exit, for eliminating a boilerplate code. - */ -#define module_mei_cl_driver(__mei_cldrv) \ - module_driver(__mei_cldrv, \ - mei_cldev_driver_register,\ - mei_cldev_driver_unregister) +ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); -ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, - size_t length); -ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); -ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, - size_t length); -ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf, - size_t length, u8 vtag); -ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, - u8 *vtag); -ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, - size_t length, u8 *vtag); +int mei_cldev_register_event_cb(struct mei_cl_device *cldev, + unsigned long event_mask, + mei_cldev_event_cb_t read_cb, void *context); -int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); -int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, - mei_cldev_cb_t notif_cb); +#define MEI_CL_EVENT_RX 0 +#define MEI_CL_EVENT_TX 1 +#define MEI_CL_EVENT_NOTIF 2 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); u8 mei_cldev_ver(const struct mei_cl_device *cldev); @@ -115,6 +93,6 @@ void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data); int mei_cldev_enable(struct mei_cl_device *cldev); int mei_cldev_disable(struct mei_cl_device *cldev); -bool mei_cldev_enabled(const struct mei_cl_device *cldev); +bool mei_cldev_enabled(struct mei_cl_device *cldev); #endif /* _LINUX_MEI_CL_BUS_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 34de69b3b8..5b759c9acf 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -1,113 +1,90 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _LINUX_MEMBLOCK_H #define _LINUX_MEMBLOCK_H #ifdef __KERNEL__ +#ifdef CONFIG_HAVE_MEMBLOCK /* * Logical memory blocks. * * Copyright (C) 2001 Peter Bergner, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #include #include -#include -extern unsigned long max_low_pfn; -extern unsigned long min_low_pfn; +#define INIT_MEMBLOCK_REGIONS 128 +#define INIT_PHYSMEM_REGIONS 4 -/* - * highest page - */ -extern unsigned long max_pfn; -/* - * highest possible page - */ -extern unsigned long long max_possible_pfn; - -/** - * enum memblock_flags - definition of memory region attributes - * @MEMBLOCK_NONE: no special request - * @MEMBLOCK_HOTPLUG: hotpluggable region - * @MEMBLOCK_MIRROR: mirrored region - * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as - * reserved in the memory map; refer to memblock_mark_nomap() description - * for further details - */ -enum memblock_flags { +/* Definition of memblock flags. */ +enum { MEMBLOCK_NONE = 0x0, /* No special request */ MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ MEMBLOCK_MIRROR = 0x2, /* mirrored region */ MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ }; -/** - * struct memblock_region - represents a memory region - * @base: base address of the region - * @size: size of the region - * @flags: memory region attributes - * @nid: NUMA node id - */ struct memblock_region { phys_addr_t base; phys_addr_t size; - enum memblock_flags flags; -#ifdef CONFIG_NUMA + unsigned long flags; +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int nid; #endif }; -/** - * struct memblock_type - collection of memory regions of certain type - * @cnt: number of regions - * @max: size of the allocated array - * @total_size: size of all regions - * @regions: array of regions - * @name: the memory type symbolic name - */ struct memblock_type { - unsigned long cnt; - unsigned long max; - phys_addr_t total_size; + unsigned long cnt; /* number of regions */ + unsigned long max; /* size of the allocated array */ + phys_addr_t total_size; /* size of all regions */ struct memblock_region *regions; - char *name; }; -/** - * struct memblock - memblock allocator metadata - * @bottom_up: is bottom up direction? - * @current_limit: physical address of the current allocation limit - * @memory: usable memory regions - * @reserved: reserved memory regions - */ struct memblock { bool bottom_up; /* is bottom up direction? */ phys_addr_t current_limit; struct memblock_type memory; struct memblock_type reserved; +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP + struct memblock_type physmem; +#endif }; extern struct memblock memblock; +extern int memblock_debug; +#ifdef CONFIG_MOVABLE_NODE +/* If movable_node boot option specified */ +extern bool movable_node_enabled; +#endif /* CONFIG_MOVABLE_NODE */ -#ifndef CONFIG_ARCH_KEEP_MEMBLOCK +#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK #define __init_memblock __meminit #define __initdata_memblock __meminitdata -void memblock_discard(void); #else #define __init_memblock #define __initdata_memblock -static inline void memblock_discard(void) {} #endif +#define memblock_dbg(fmt, ...) \ + if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) + +phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, + phys_addr_t start, phys_addr_t end, + int nid, ulong flags); +phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, + phys_addr_t size, phys_addr_t align); +phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); +phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr); void memblock_allow_resize(void); int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); int memblock_add(phys_addr_t base, phys_addr_t size); int memblock_remove(phys_addr_t base, phys_addr_t size); int memblock_free(phys_addr_t base, phys_addr_t size); int memblock_reserve(phys_addr_t base, phys_addr_t size); -#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP -int memblock_physmem_add(phys_addr_t base, phys_addr_t size); -#endif void memblock_trim_memory(phys_addr_t align); bool memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); @@ -115,52 +92,28 @@ int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); -int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); - -void memblock_free_all(void); -void memblock_free_ptr(void *ptr, size_t size); -void reset_node_managed_pages(pg_data_t *pgdat); -void reset_all_zones_managed_pages(void); +ulong choose_memblock_flags(void); /* Low level functions */ -void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, +int memblock_add_range(struct memblock_type *type, + phys_addr_t base, phys_addr_t size, + int nid, unsigned long flags); + +void __next_mem_range(u64 *idx, int nid, ulong flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid); -void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, +void __next_mem_range_rev(u64 *idx, int nid, ulong flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid); -void __memblock_free_late(phys_addr_t base, phys_addr_t size); - -#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP -static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, - phys_addr_t *out_start, - phys_addr_t *out_end) -{ - extern struct memblock_type physmem; - - __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type, - out_start, out_end, NULL); -} +void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, + phys_addr_t *out_end); /** - * for_each_physmem_range - iterate through physmem areas not included in type. - * @i: u64 used as loop variable - * @type: ptr to memblock_type which excludes from the iteration, can be %NULL - * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL - * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL - */ -#define for_each_physmem_range(i, type, p_start, p_end) \ - for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \ - i != (u64)ULLONG_MAX; \ - __next_physmem_range(&i, type, p_start, p_end)) -#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */ - -/** - * __for_each_mem_range - iterate through memblock areas from type_a and not + * for_each_mem_range - iterate through memblock areas from type_a and not * included in type_b. Or just type_a if type_b is NULL. * @i: u64 used as loop variable * @type_a: ptr to memblock_type to iterate @@ -171,7 +124,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL */ -#define __for_each_mem_range(i, type_a, type_b, nid, flags, \ +#define for_each_mem_range(i, type_a, type_b, nid, flags, \ p_start, p_end, p_nid) \ for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ p_start, p_end, p_nid); \ @@ -180,7 +133,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, p_start, p_end, p_nid)) /** - * __for_each_mem_range_rev - reverse iterate through memblock areas from + * for_each_mem_range_rev - reverse iterate through memblock areas from * type_a and not included in type_b. Or just type_a if type_b is NULL. * @i: u64 used as loop variable * @type_a: ptr to memblock_type to iterate @@ -191,38 +144,17 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL */ -#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ - p_start, p_end, p_nid) \ +#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ + p_start, p_end, p_nid) \ for (i = (u64)ULLONG_MAX, \ - __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ + __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ p_start, p_end, p_nid); \ i != (u64)ULLONG_MAX; \ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ p_start, p_end, p_nid)) /** - * for_each_mem_range - iterate through memory areas. - * @i: u64 used as loop variable - * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL - * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL - */ -#define for_each_mem_range(i, p_start, p_end) \ - __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_HOTPLUG, p_start, p_end, NULL) - -/** - * for_each_mem_range_rev - reverse iterate through memblock areas from - * type_a and not included in type_b. Or just type_a if type_b is NULL. - * @i: u64 used as loop variable - * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL - * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL - */ -#define for_each_mem_range_rev(i, p_start, p_end) \ - __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_HOTPLUG, p_start, p_end, NULL) - -/** - * for_each_reserved_mem_range - iterate over all reserved memblock areas + * for_each_reserved_mem_region - iterate over all reserved memblock areas * @i: u64 used as loop variable * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL @@ -230,15 +162,32 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, * Walks over reserved areas of memblock. Available as soon as memblock * is initialized. */ -#define for_each_reserved_mem_range(i, p_start, p_end) \ - __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \ - MEMBLOCK_NONE, p_start, p_end, NULL) +#define for_each_reserved_mem_region(i, p_start, p_end) \ + for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ + i != (u64)ULLONG_MAX; \ + __next_reserved_mem_region(&i, p_start, p_end)) +#ifdef CONFIG_MOVABLE_NODE static inline bool memblock_is_hotpluggable(struct memblock_region *m) { return m->flags & MEMBLOCK_HOTPLUG; } +static inline bool __init_memblock movable_node_is_enabled(void) +{ + return movable_node_enabled; +} +#else +static inline bool memblock_is_hotpluggable(struct memblock_region *m) +{ + return false; +} +static inline bool movable_node_is_enabled(void) +{ + return false; +} +#endif + static inline bool memblock_is_mirror(struct memblock_region *m) { return m->flags & MEMBLOCK_MIRROR; @@ -249,6 +198,7 @@ static inline bool memblock_is_nomap(struct memblock_region *m) return m->flags & MEMBLOCK_NOMAP; } +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn); void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, @@ -267,50 +217,7 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) - -#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT -void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, - unsigned long *out_spfn, - unsigned long *out_epfn); -/** - * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free - * memblock areas - * @i: u64 used as loop variable - * @zone: zone in which all of the memory blocks reside - * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL - * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL - * - * Walks over free (memory && !reserved) areas of memblock in a specific - * zone. Available once memblock and an empty zone is initialized. The main - * assumption is that the zone start, end, and pgdat have been associated. - * This way we can use the zone to determine NUMA node, and if a given part - * of the memblock is valid for the zone. - */ -#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \ - for (i = 0, \ - __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \ - i != U64_MAX; \ - __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) - -/** - * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific - * free memblock areas from a given point - * @i: u64 used as loop variable - * @zone: zone in which all of the memory blocks reside - * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL - * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL - * - * Walks over free (memory && !reserved) areas of memblock in a specific - * zone, continuing from current position. Available as soon as memblock is - * initialized. - */ -#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \ - for (; i != U64_MAX; \ - __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) - -int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); - -#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ /** * for_each_free_mem_range - iterate through free memblock areas @@ -325,8 +232,8 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); * soon as memblock is initialized. */ #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ - __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ - nid, flags, p_start, p_end, p_nid) + for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ + nid, flags, p_start, p_end, p_nid) /** * for_each_free_mem_range_reverse - rev-iterate through free memblock areas @@ -342,13 +249,25 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); */ #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ p_nid) \ - __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ - nid, flags, p_start, p_end, p_nid) + for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ + nid, flags, p_start, p_end, p_nid) +static inline void memblock_set_region_flags(struct memblock_region *r, + unsigned long flags) +{ + r->flags |= flags; +} + +static inline void memblock_clear_region_flags(struct memblock_region *r, + unsigned long flags) +{ + r->flags &= ~flags; +} + +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid); -#ifdef CONFIG_NUMA static inline void memblock_set_region_node(struct memblock_region *r, int nid) { r->nid = nid; @@ -367,101 +286,18 @@ static inline int memblock_get_region_node(const struct memblock_region *r) { return 0; } -#endif /* CONFIG_NUMA */ +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ -/* Flags for memblock allocation APIs */ -#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) -#define MEMBLOCK_ALLOC_ACCESSIBLE 0 -#define MEMBLOCK_ALLOC_KASAN 1 +phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); -/* We are using top down, so it is safe to use 0 here */ -#define MEMBLOCK_LOW_LIMIT 0 - -#ifndef ARCH_LOW_ADDRESS_LIMIT -#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL -#endif - -phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, - phys_addr_t start, phys_addr_t end); -phys_addr_t memblock_alloc_range_nid(phys_addr_t size, - phys_addr_t align, phys_addr_t start, - phys_addr_t end, int nid, bool exact_nid); -phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); - -static inline phys_addr_t memblock_phys_alloc(phys_addr_t size, - phys_addr_t align) -{ - return memblock_phys_alloc_range(size, align, 0, - MEMBLOCK_ALLOC_ACCESSIBLE); -} - -void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, phys_addr_t max_addr, - int nid); -void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, phys_addr_t max_addr, - int nid); -void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, phys_addr_t max_addr, - int nid); - -static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align) -{ - return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, - MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); -} - -static inline void *memblock_alloc_raw(phys_addr_t size, - phys_addr_t align) -{ - return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, - MEMBLOCK_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void *memblock_alloc_from(phys_addr_t size, - phys_addr_t align, - phys_addr_t min_addr) -{ - return memblock_alloc_try_nid(size, align, min_addr, - MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); -} - -static inline void *memblock_alloc_low(phys_addr_t size, - phys_addr_t align) -{ - return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, - ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); -} - -static inline void *memblock_alloc_node(phys_addr_t size, - phys_addr_t align, int nid) -{ - return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, - MEMBLOCK_ALLOC_ACCESSIBLE, nid); -} - -static inline void memblock_free_early(phys_addr_t base, - phys_addr_t size) -{ - memblock_free(base, size); -} - -static inline void memblock_free_early_nid(phys_addr_t base, - phys_addr_t size, int nid) -{ - memblock_free(base, size); -} - -static inline void memblock_free_late(phys_addr_t base, phys_addr_t size) -{ - __memblock_free_late(base, size); -} +phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); +#ifdef CONFIG_MOVABLE_NODE /* * Set the allocation direction to bottom-up or top-down. */ -static inline __init_memblock void memblock_set_bottom_up(bool enable) +static inline void __init memblock_set_bottom_up(bool enable) { memblock.bottom_up = enable; } @@ -471,25 +307,46 @@ static inline __init_memblock void memblock_set_bottom_up(bool enable) * if this is true, that said, memblock will allocate memory * in bottom-up direction. */ -static inline __init_memblock bool memblock_bottom_up(void) +static inline bool memblock_bottom_up(void) { return memblock.bottom_up; } +#else +static inline void __init memblock_set_bottom_up(bool enable) {} +static inline bool memblock_bottom_up(void) { return false; } +#endif +/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ +#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) +#define MEMBLOCK_ALLOC_ACCESSIBLE 0 + +phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, + phys_addr_t start, phys_addr_t end, + ulong flags); +phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, + phys_addr_t max_addr); +phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, + phys_addr_t max_addr); phys_addr_t memblock_phys_mem_size(void); phys_addr_t memblock_reserved_size(void); +phys_addr_t memblock_mem_size(unsigned long limit_pfn); phys_addr_t memblock_start_of_DRAM(void); phys_addr_t memblock_end_of_DRAM(void); void memblock_enforce_memory_limit(phys_addr_t memory_limit); -void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); void memblock_mem_limit_remove_map(phys_addr_t limit); bool memblock_is_memory(phys_addr_t addr); -bool memblock_is_map_memory(phys_addr_t addr); -bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); +int memblock_is_map_memory(phys_addr_t addr); +int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); bool memblock_is_reserved(phys_addr_t addr); bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); -void memblock_dump_all(void); +extern void __memblock_dump_all(void); + +static inline void memblock_dump_all(void) +{ + if (memblock_debug) + __memblock_dump_all(); +} /** * memblock_set_current_limit - Set the current allocation limit to allow @@ -511,10 +368,8 @@ phys_addr_t memblock_get_current_limit(void); */ /** - * memblock_region_memory_base_pfn - get the lowest pfn of the memory region + * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region * @reg: memblock_region structure - * - * Return: the lowest pfn intersecting with the memory region */ static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) { @@ -522,10 +377,8 @@ static inline unsigned long memblock_region_memory_base_pfn(const struct membloc } /** - * memblock_region_memory_end_pfn - get the end pfn of the memory region + * memblock_region_memory_end_pfn - Return the end_pfn this region * @reg: memblock_region structure - * - * Return: the end_pfn of the reserved region */ static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) { @@ -533,10 +386,8 @@ static inline unsigned long memblock_region_memory_end_pfn(const struct memblock } /** - * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region + * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region * @reg: memblock_region structure - * - * Return: the lowest pfn intersecting with the reserved region */ static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) { @@ -544,58 +395,23 @@ static inline unsigned long memblock_region_reserved_base_pfn(const struct membl } /** - * memblock_region_reserved_end_pfn - get the end pfn of the reserved region + * memblock_region_reserved_end_pfn - Return the end_pfn this region * @reg: memblock_region structure - * - * Return: the end_pfn of the reserved region */ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) { return PFN_UP(reg->base + reg->size); } -/** - * for_each_mem_region - itereate over memory regions - * @region: loop variable - */ -#define for_each_mem_region(region) \ - for (region = memblock.memory.regions; \ - region < (memblock.memory.regions + memblock.memory.cnt); \ +#define for_each_memblock(memblock_type, region) \ + for (region = memblock.memblock_type.regions; \ + region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ region++) -/** - * for_each_reserved_mem_region - itereate over reserved memory regions - * @region: loop variable - */ -#define for_each_reserved_mem_region(region) \ - for (region = memblock.reserved.regions; \ - region < (memblock.reserved.regions + memblock.reserved.cnt); \ - region++) - -extern void *alloc_large_system_hash(const char *tablename, - unsigned long bucketsize, - unsigned long numentries, - int scale, - int flags, - unsigned int *_hash_shift, - unsigned int *_hash_mask, - unsigned long low_limit, - unsigned long high_limit); - -#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ -#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min - * shift passed via *_hash_shift */ -#define HASH_ZERO 0x00000004 /* Zero allocated hash table */ - -/* Only NUMA needs hash distribution. 64bit NUMA architectures have - * sufficient vmalloc space. - */ -#ifdef CONFIG_NUMA -#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) -extern int hashdist; /* Distribute hashes across NUMA nodes? */ -#else -#define hashdist (0) -#endif +#define for_each_memblock_type(memblock_type, rgn) \ + for (idx = 0, rgn = &memblock_type->regions[0]; \ + idx < memblock_type->cnt; \ + idx++, rgn = &memblock_type->regions[idx]) #ifdef CONFIG_MEMTEST extern void early_memtest(phys_addr_t start, phys_addr_t end); @@ -605,6 +421,14 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) } #endif +#else +static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) +{ + return 0; +} + +#endif /* CONFIG_HAVE_MEMBLOCK */ + #endif /* __KERNEL__ */ #endif /* _LINUX_MEMBLOCK_H */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 3096c9a0ee..a87beac415 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* memcontrol.h - Memory Controller * * Copyright IBM Corporation, 2007 @@ -6,6 +5,16 @@ * * Copyright 2007 OpenVZ SWsoft Inc * Author: Pavel Emelianov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _LINUX_MEMCONTROL_H @@ -17,42 +26,72 @@ #include #include #include -#include -#include +#include #include #include struct mem_cgroup; -struct obj_cgroup; struct page; struct mm_struct; struct kmem_cache; -/* Cgroup-specific page state, on top of universal node page state */ -enum memcg_stat_item { - MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, +/* + * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c, + * These two lists should keep in accord with each other. + */ +enum mem_cgroup_stat_index { + /* + * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. + */ + MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ + MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ + MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ + MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ + MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */ + MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ + MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ + MEM_CGROUP_STAT_NSTATS, + /* default hierarchy stats */ + MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS, + MEMCG_SLAB_RECLAIMABLE, + MEMCG_SLAB_UNRECLAIMABLE, MEMCG_SOCK, - MEMCG_PERCPU_B, MEMCG_NR_STAT, }; -enum memcg_memory_event { - MEMCG_LOW, - MEMCG_HIGH, - MEMCG_MAX, - MEMCG_OOM, - MEMCG_OOM_KILL, - MEMCG_SWAP_HIGH, - MEMCG_SWAP_MAX, - MEMCG_SWAP_FAIL, - MEMCG_NR_MEMORY_EVENTS, -}; - struct mem_cgroup_reclaim_cookie { pg_data_t *pgdat; + int priority; unsigned int generation; }; +enum mem_cgroup_events_index { + MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ + MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ + MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ + MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ + MEM_CGROUP_EVENTS_NSTATS, + /* default hierarchy events */ + MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS, + MEMCG_HIGH, + MEMCG_MAX, + MEMCG_OOM, + MEMCG_NR_EVENTS, +}; + +/* + * Per memcg event counter is incremented at every pagein/pageout. With THP, + * it will be incremated by the number of pages. This counter is used for + * for trigger some periodic events. This is straightforward and better + * than using jiffies etc. to handle periodic memcg event. + */ +enum mem_cgroup_events_target { + MEM_CGROUP_TARGET_THRESH, + MEM_CGROUP_TARGET_SOFTLIMIT, + MEM_CGROUP_TARGET_NUMAINFO, + MEM_CGROUP_NTARGETS, +}; + #ifdef CONFIG_MEMCG #define MEM_CGROUP_ID_SHIFT 16 @@ -60,43 +99,14 @@ struct mem_cgroup_reclaim_cookie { struct mem_cgroup_id { int id; - refcount_t ref; + atomic_t ref; }; -/* - * Per memcg event counter is incremented at every pagein/pageout. With THP, - * it will be incremented by the number of pages. This counter is used - * to trigger some periodic events. This is straightforward and better - * than using jiffies etc. to handle periodic memcg event. - */ -enum mem_cgroup_events_target { - MEM_CGROUP_TARGET_THRESH, - MEM_CGROUP_TARGET_SOFTLIMIT, - MEM_CGROUP_NTARGETS, -}; - -struct memcg_vmstats_percpu { - /* Local (CPU and cgroup) page state & events */ - long state[MEMCG_NR_STAT]; - unsigned long events[NR_VM_EVENT_ITEMS]; - - /* Delta calculation for lockless upward propagation */ - long state_prev[MEMCG_NR_STAT]; - unsigned long events_prev[NR_VM_EVENT_ITEMS]; - - /* Cgroup1: threshold notifications & softlimit tree updates */ - unsigned long nr_page_events; - unsigned long targets[MEM_CGROUP_NTARGETS]; -}; - -struct memcg_vmstats { - /* Aggregated (CPU and subtree) page state & events */ - long state[MEMCG_NR_STAT]; - unsigned long events[NR_VM_EVENT_ITEMS]; - - /* Pending child counts during tree propagation */ - long state_pending[MEMCG_NR_STAT]; - unsigned long events_pending[NR_VM_EVENT_ITEMS]; +struct mem_cgroup_stat_cpu { + long count[MEMCG_NR_STAT]; + unsigned long events[MEMCG_NR_EVENTS]; + unsigned long nr_page_events; + unsigned long targets[MEM_CGROUP_NTARGETS]; }; struct mem_cgroup_reclaim_iter { @@ -106,45 +116,13 @@ struct mem_cgroup_reclaim_iter { }; /* - * Bitmap and deferred work of shrinker::id corresponding to memcg-aware - * shrinkers, which have elements charged to this memcg. - */ -struct shrinker_info { - struct rcu_head rcu; - atomic_long_t *nr_deferred; - unsigned long *map; -}; - -struct lruvec_stats_percpu { - /* Local (CPU and cgroup) state */ - long state[NR_VM_NODE_STAT_ITEMS]; - - /* Delta calculation for lockless upward propagation */ - long state_prev[NR_VM_NODE_STAT_ITEMS]; -}; - -struct lruvec_stats { - /* Aggregated (CPU and subtree) state */ - long state[NR_VM_NODE_STAT_ITEMS]; - - /* Pending child counts during tree propagation */ - long state_pending[NR_VM_NODE_STAT_ITEMS]; -}; - -/* - * per-node information in memory controller. + * per-zone information in memory controller. */ struct mem_cgroup_per_node { struct lruvec lruvec; - - struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; - struct lruvec_stats lruvec_stats; - unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; - struct mem_cgroup_reclaim_iter iter; - - struct shrinker_info __rcu *shrinker_info; + struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; struct rb_node tree_node; /* RB tree node */ unsigned long usage_in_excess;/* Set to the value by which */ @@ -166,7 +144,7 @@ struct mem_cgroup_threshold_ary { /* Size of entries[] */ unsigned int size; /* Array of thresholds */ - struct mem_cgroup_threshold entries[]; + struct mem_cgroup_threshold entries[0]; }; struct mem_cgroup_thresholds { @@ -186,48 +164,6 @@ enum memcg_kmem_state { KMEM_ONLINE, }; -#if defined(CONFIG_SMP) -struct memcg_padding { - char x[0]; -} ____cacheline_internodealigned_in_smp; -#define MEMCG_PADDING(name) struct memcg_padding name -#else -#define MEMCG_PADDING(name) -#endif - -/* - * Remember four most recent foreign writebacks with dirty pages in this - * cgroup. Inode sharing is expected to be uncommon and, even if we miss - * one in a given round, we're likely to catch it later if it keeps - * foreign-dirtying, so a fairly low count should be enough. - * - * See mem_cgroup_track_foreign_dirty_slowpath() for details. - */ -#define MEMCG_CGWB_FRN_CNT 4 - -struct memcg_cgwb_frn { - u64 bdi_id; /* bdi->id of the foreign inode */ - int memcg_id; /* memcg->css.id of foreign inode */ - u64 at; /* jiffies_64 at the time of dirtying */ - struct wb_completion done; /* tracks in-flight foreign writebacks */ -}; - -/* - * Bucket for arbitrarily byte-sized objects charged to a memory - * cgroup. The bucket can be reparented in one piece when the cgroup - * is destroyed, without having to round up the individual references - * of all live memory objects in the wild. - */ -struct obj_cgroup { - struct percpu_ref refcnt; - struct mem_cgroup *memcg; - atomic_t nr_charged_bytes; - union { - struct list_head list; - struct rcu_head rcu; - }; -}; - /* * The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide @@ -241,16 +177,17 @@ struct mem_cgroup { struct mem_cgroup_id id; /* Accounted resources */ - struct page_counter memory; /* Both v1 & v2 */ - - union { - struct page_counter swap; /* v2 only */ - struct page_counter memsw; /* v1 only */ - }; + struct page_counter memory; + struct page_counter swap; /* Legacy consumer-oriented counters */ - struct page_counter kmem; /* v1 only */ - struct page_counter tcpmem; /* v1 only */ + struct page_counter memsw; + struct page_counter kmem; + struct page_counter tcpmem; + + /* Normal memory consumption range */ + unsigned long low; + unsigned long high; /* Range enforcement for interrupt charges */ struct work_struct high_work; @@ -261,9 +198,9 @@ struct mem_cgroup { struct vmpressure vmpressure; /* - * Should the OOM killer kill all belonging tasks, had it kill one? + * Should the accounting and control be hierarchical, per subtree? */ - bool oom_group; + bool use_hierarchy; /* protected by memcg_oom_lock */ bool oom_lock; @@ -273,12 +210,8 @@ struct mem_cgroup { /* OOM-Killer disable */ int oom_kill_disable; - /* memory.events and memory.events.local */ + /* handle for "memory.events" */ struct cgroup_file events_file; - struct cgroup_file events_local_file; - - /* handle for "memory.swap.events" */ - struct cgroup_file swap_events_file; /* protect arrays of thresholds */ struct mutex thresholds_lock; @@ -297,18 +230,18 @@ struct mem_cgroup { * mem_cgroup ? And what type of charges should we move ? */ unsigned long move_charge_at_immigrate; + /* + * set > 0 if pages under this cgroup are moving to other cgroup. + */ + atomic_t moving_account; /* taken only while moving_account > 0 */ spinlock_t move_lock; + struct task_struct *move_lock_task; unsigned long move_lock_flags; - - MEMCG_PADDING(_pad1_); - - /* memory.stat */ - struct memcg_vmstats vmstats; - - /* memory.events */ - atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; - atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; + /* + * percpu counter. + */ + struct mem_cgroup_stat_cpu __percpu *stat; unsigned long socket_pressure; @@ -316,430 +249,94 @@ struct mem_cgroup { bool tcpmem_active; int tcpmem_pressure; -#ifdef CONFIG_MEMCG_KMEM +#ifndef CONFIG_SLOB + /* Index in the kmem_cache->memcg_params.memcg_caches array */ int kmemcg_id; enum memcg_kmem_state kmem_state; - struct obj_cgroup __rcu *objcg; - struct list_head objcg_list; /* list of inherited objcgs */ #endif - MEMCG_PADDING(_pad2_); - - /* - * set > 0 if pages under this cgroup are moving to other cgroup. - */ - atomic_t moving_account; - struct task_struct *move_lock_task; - - struct memcg_vmstats_percpu __percpu *vmstats_percpu; + int last_scanned_node; +#if MAX_NUMNODES > 1 + nodemask_t scan_nodes; + atomic64_t numainfo_events; + atomic_t numainfo_updating; +#endif #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; struct wb_domain cgwb_domain; - struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; #endif /* List of events which userspace want to receive */ struct list_head event_list; spinlock_t event_list_lock; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - struct deferred_split deferred_split_queue; -#endif - - struct mem_cgroup_per_node *nodeinfo[]; + struct mem_cgroup_per_node *nodeinfo[0]; + /* WARNING: nodeinfo must be the last member here */ }; -/* - * size of first charge trial. "32" comes from vmscan.c's magic value. - * TODO: maybe necessary to use big numbers in big irons. - */ -#define MEMCG_CHARGE_BATCH 32U - extern struct mem_cgroup *root_mem_cgroup; -enum page_memcg_data_flags { - /* page->memcg_data is a pointer to an objcgs vector */ - MEMCG_DATA_OBJCGS = (1UL << 0), - /* page has been accounted as a non-slab kernel page */ - MEMCG_DATA_KMEM = (1UL << 1), - /* the next bit after the last actual flag */ - __NR_MEMCG_DATA_FLAGS = (1UL << 2), -}; - -#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) - -static inline bool PageMemcgKmem(struct page *page); - -/* - * After the initialization objcg->memcg is always pointing at - * a valid memcg, but can be atomically swapped to the parent memcg. - * - * The caller must ensure that the returned memcg won't be released: - * e.g. acquire the rcu_read_lock or css_set_lock. - */ -static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) -{ - return READ_ONCE(objcg->memcg); -} - -/* - * __page_memcg - get the memory cgroup associated with a non-kmem page - * @page: a pointer to the page struct - * - * Returns a pointer to the memory cgroup associated with the page, - * or NULL. This function assumes that the page is known to have a - * proper memory cgroup pointer. It's not safe to call this function - * against some type of pages, e.g. slab pages or ex-slab pages or - * kmem pages. - */ -static inline struct mem_cgroup *__page_memcg(struct page *page) -{ - unsigned long memcg_data = page->memcg_data; - - VM_BUG_ON_PAGE(PageSlab(page), page); - VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); - VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); - - return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); -} - -/* - * __page_objcg - get the object cgroup associated with a kmem page - * @page: a pointer to the page struct - * - * Returns a pointer to the object cgroup associated with the page, - * or NULL. This function assumes that the page is known to have a - * proper object cgroup pointer. It's not safe to call this function - * against some type of pages, e.g. slab pages or ex-slab pages or - * LRU pages. - */ -static inline struct obj_cgroup *__page_objcg(struct page *page) -{ - unsigned long memcg_data = page->memcg_data; - - VM_BUG_ON_PAGE(PageSlab(page), page); - VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); - VM_BUG_ON_PAGE(!(memcg_data & MEMCG_DATA_KMEM), page); - - return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); -} - -/* - * page_memcg - get the memory cgroup associated with a page - * @page: a pointer to the page struct - * - * Returns a pointer to the memory cgroup associated with the page, - * or NULL. This function assumes that the page is known to have a - * proper memory cgroup pointer. It's not safe to call this function - * against some type of pages, e.g. slab pages or ex-slab pages. - * - * For a non-kmem page any of the following ensures page and memcg binding - * stability: - * - * - the page lock - * - LRU isolation - * - lock_page_memcg() - * - exclusive reference - * - * For a kmem page a caller should hold an rcu read lock to protect memcg - * associated with a kmem page from being released. - */ -static inline struct mem_cgroup *page_memcg(struct page *page) -{ - if (PageMemcgKmem(page)) - return obj_cgroup_memcg(__page_objcg(page)); - else - return __page_memcg(page); -} - -/* - * page_memcg_rcu - locklessly get the memory cgroup associated with a page - * @page: a pointer to the page struct - * - * Returns a pointer to the memory cgroup associated with the page, - * or NULL. This function assumes that the page is known to have a - * proper memory cgroup pointer. It's not safe to call this function - * against some type of pages, e.g. slab pages or ex-slab pages. - */ -static inline struct mem_cgroup *page_memcg_rcu(struct page *page) -{ - unsigned long memcg_data = READ_ONCE(page->memcg_data); - - VM_BUG_ON_PAGE(PageSlab(page), page); - WARN_ON_ONCE(!rcu_read_lock_held()); - - if (memcg_data & MEMCG_DATA_KMEM) { - struct obj_cgroup *objcg; - - objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); - return obj_cgroup_memcg(objcg); - } - - return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); -} - -/* - * page_memcg_check - get the memory cgroup associated with a page - * @page: a pointer to the page struct - * - * Returns a pointer to the memory cgroup associated with the page, - * or NULL. This function unlike page_memcg() can take any page - * as an argument. It has to be used in cases when it's not known if a page - * has an associated memory cgroup pointer or an object cgroups vector or - * an object cgroup. - * - * For a non-kmem page any of the following ensures page and memcg binding - * stability: - * - * - the page lock - * - LRU isolation - * - lock_page_memcg() - * - exclusive reference - * - * For a kmem page a caller should hold an rcu read lock to protect memcg - * associated with a kmem page from being released. - */ -static inline struct mem_cgroup *page_memcg_check(struct page *page) -{ - /* - * Because page->memcg_data might be changed asynchronously - * for slab pages, READ_ONCE() should be used here. - */ - unsigned long memcg_data = READ_ONCE(page->memcg_data); - - if (memcg_data & MEMCG_DATA_OBJCGS) - return NULL; - - if (memcg_data & MEMCG_DATA_KMEM) { - struct obj_cgroup *objcg; - - objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); - return obj_cgroup_memcg(objcg); - } - - return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); -} - -#ifdef CONFIG_MEMCG_KMEM -/* - * PageMemcgKmem - check if the page has MemcgKmem flag set - * @page: a pointer to the page struct - * - * Checks if the page has MemcgKmem flag set. The caller must ensure that - * the page has an associated memory cgroup. It's not safe to call this function - * against some types of pages, e.g. slab pages. - */ -static inline bool PageMemcgKmem(struct page *page) -{ - VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page); - return page->memcg_data & MEMCG_DATA_KMEM; -} - -/* - * page_objcgs - get the object cgroups vector associated with a page - * @page: a pointer to the page struct - * - * Returns a pointer to the object cgroups vector associated with the page, - * or NULL. This function assumes that the page is known to have an - * associated object cgroups vector. It's not safe to call this function - * against pages, which might have an associated memory cgroup: e.g. - * kernel stack pages. - */ -static inline struct obj_cgroup **page_objcgs(struct page *page) -{ - unsigned long memcg_data = READ_ONCE(page->memcg_data); - - VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page); - VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); - - return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); -} - -/* - * page_objcgs_check - get the object cgroups vector associated with a page - * @page: a pointer to the page struct - * - * Returns a pointer to the object cgroups vector associated with the page, - * or NULL. This function is safe to use if the page can be directly associated - * with a memory cgroup. - */ -static inline struct obj_cgroup **page_objcgs_check(struct page *page) -{ - unsigned long memcg_data = READ_ONCE(page->memcg_data); - - if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS)) - return NULL; - - VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); - - return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); -} - -#else -static inline bool PageMemcgKmem(struct page *page) -{ - return false; -} - -static inline struct obj_cgroup **page_objcgs(struct page *page) -{ - return NULL; -} - -static inline struct obj_cgroup **page_objcgs_check(struct page *page) -{ - return NULL; -} -#endif - -static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) -{ - return (memcg == root_mem_cgroup); -} - static inline bool mem_cgroup_disabled(void) { return !cgroup_subsys_enabled(memory_cgrp_subsys); } -static inline void mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - unsigned long *min, - unsigned long *low) +/** + * mem_cgroup_events - count memory events against a cgroup + * @memcg: the memory cgroup + * @idx: the event index + * @nr: the number of events to account for + */ +static inline void mem_cgroup_events(struct mem_cgroup *memcg, + enum mem_cgroup_events_index idx, + unsigned int nr) { - *min = *low = 0; - - if (mem_cgroup_disabled()) - return; - - /* - * There is no reclaim protection applied to a targeted reclaim. - * We are special casing this specific case here because - * mem_cgroup_protected calculation is not robust enough to keep - * the protection invariant for calculated effective values for - * parallel reclaimers with different reclaim target. This is - * especially a problem for tail memcgs (as they have pages on LRU) - * which would want to have effective values 0 for targeted reclaim - * but a different value for external reclaim. - * - * Example - * Let's have global and A's reclaim in parallel: - * | - * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) - * |\ - * | C (low = 1G, usage = 2.5G) - * B (low = 1G, usage = 0.5G) - * - * For the global reclaim - * A.elow = A.low - * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow - * C.elow = min(C.usage, C.low) - * - * With the effective values resetting we have A reclaim - * A.elow = 0 - * B.elow = B.low - * C.elow = C.low - * - * If the global reclaim races with A's reclaim then - * B.elow = C.elow = 0 because children_low_usage > A.elow) - * is possible and reclaiming B would be violating the protection. - * - */ - if (root == memcg) - return; - - *min = READ_ONCE(memcg->memory.emin); - *low = READ_ONCE(memcg->memory.elow); + this_cpu_add(memcg->stat->events[idx], nr); + cgroup_file_notify(&memcg->events_file); } -void mem_cgroup_calculate_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg); +bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); -static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) -{ - /* - * The root memcg doesn't account charges, and doesn't support - * protection. - */ - return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); - -} - -static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) -{ - if (!mem_cgroup_supports_protection(memcg)) - return false; - - return READ_ONCE(memcg->memory.elow) >= - page_counter_read(&memcg->memory); -} - -static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) -{ - if (!mem_cgroup_supports_protection(memcg)) - return false; - - return READ_ONCE(memcg->memory.emin) >= - page_counter_read(&memcg->memory); -} - -int __mem_cgroup_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask); -static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask) -{ - if (mem_cgroup_disabled()) - return 0; - return __mem_cgroup_charge(page, mm, gfp_mask); -} - -int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, - gfp_t gfp, swp_entry_t entry); -void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); - -void __mem_cgroup_uncharge(struct page *page); -static inline void mem_cgroup_uncharge(struct page *page) -{ - if (mem_cgroup_disabled()) - return; - __mem_cgroup_uncharge(page); -} - -void __mem_cgroup_uncharge_list(struct list_head *page_list); -static inline void mem_cgroup_uncharge_list(struct list_head *page_list) -{ - if (mem_cgroup_disabled()) - return; - __mem_cgroup_uncharge_list(page_list); -} +int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask, struct mem_cgroup **memcgp, + bool compound); +void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, + bool lrucare, bool compound); +void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, + bool compound); +void mem_cgroup_uncharge(struct page *page); +void mem_cgroup_uncharge_list(struct list_head *page_list); void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); +static struct mem_cgroup_per_node * +mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) +{ + return memcg->nodeinfo[nid]; +} + /** - * mem_cgroup_lruvec - get the lru list vector for a memcg & node + * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone + * @node: node of the wanted lruvec * @memcg: memcg of the wanted lruvec - * @pgdat: pglist_data * - * Returns the lru list vector holding pages for a given @memcg & - * @pgdat combination. This can be the node lruvec, if the memory - * controller is disabled. + * Returns the lru list vector holding pages for a given @node or a given + * @memcg and @zone. This can be the node lruvec, if the memory controller + * is disabled. */ -static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, - struct pglist_data *pgdat) +static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, + struct mem_cgroup *memcg) { struct mem_cgroup_per_node *mz; struct lruvec *lruvec; if (mem_cgroup_disabled()) { - lruvec = &pgdat->__lruvec; + lruvec = node_lruvec(pgdat); goto out; } - if (!memcg) - memcg = root_mem_cgroup; - - mz = memcg->nodeinfo[pgdat->node_id]; + mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); lruvec = &mz->lruvec; out: /* @@ -752,70 +349,16 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, return lruvec; } -/** - * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page - * @page: the page - * - * This function relies on page->mem_cgroup being stable. - */ -static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) -{ - pg_data_t *pgdat = page_pgdat(page); - struct mem_cgroup *memcg = page_memcg(page); - - VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page); - return mem_cgroup_lruvec(memcg, pgdat); -} +struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); +bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); -struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); - -struct lruvec *lock_page_lruvec(struct page *page); -struct lruvec *lock_page_lruvec_irq(struct page *page); -struct lruvec *lock_page_lruvec_irqsave(struct page *page, - unsigned long *flags); - -#ifdef CONFIG_DEBUG_VM -void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page); -#else -static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) -{ -} -#endif - static inline struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ return css ? container_of(css, struct mem_cgroup, css) : NULL; } -static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) -{ - return percpu_ref_tryget(&objcg->refcnt); -} - -static inline void obj_cgroup_get(struct obj_cgroup *objcg) -{ - percpu_ref_get(&objcg->refcnt); -} - -static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, - unsigned long nr) -{ - percpu_ref_get_many(&objcg->refcnt, nr); -} - -static inline void obj_cgroup_put(struct obj_cgroup *objcg) -{ - percpu_ref_put(&objcg->refcnt); -} - -static inline void mem_cgroup_put(struct mem_cgroup *memcg) -{ - if (memcg) - css_put(&memcg->css); -} - #define mem_cgroup_from_counter(counter, member) \ container_of(counter, struct mem_cgroup, member) @@ -835,22 +378,6 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) } struct mem_cgroup *mem_cgroup_from_id(unsigned short id); -static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) -{ - return mem_cgroup_from_css(seq_css(m)); -} - -static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) -{ - struct mem_cgroup_per_node *mz; - - if (mem_cgroup_disabled()) - return NULL; - - mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - return mz->memcg; -} - /** * parent_mem_cgroup - find the accounting parent of a memcg * @memcg: memcg whose parent to find @@ -870,6 +397,8 @@ static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, { if (root == memcg) return true; + if (!root->use_hierarchy) + return false; return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); } @@ -897,9 +426,30 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) return !!(memcg->css.flags & CSS_ONLINE); } +/* + * For memory reclaim. + */ +int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); + void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, int zid, int nr_pages); +unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, + int nid, unsigned int lru_mask); + +static inline +unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) +{ + struct mem_cgroup_per_node *mz; + unsigned long nr_pages = 0; + int zid; + + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + for (zid = 0; zid < MAX_NR_ZONES; zid++) + nr_pages += mz->lru_zone_size[zid][lru]; + return nr_pages; +} + static inline unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) @@ -907,30 +457,26 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, struct mem_cgroup_per_node *mz; mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); + return mz->lru_zone_size[zone_idx][lru]; } void mem_cgroup_handle_over_high(void); -unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); +unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg); -unsigned long mem_cgroup_size(struct mem_cgroup *memcg); - -void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, +void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p); -void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); - -static inline void mem_cgroup_enter_user_fault(void) +static inline void mem_cgroup_oom_enable(void) { - WARN_ON(current->in_user_fault); - current->in_user_fault = 1; + WARN_ON(current->memcg_may_oom); + current->memcg_may_oom = 1; } -static inline void mem_cgroup_exit_user_fault(void) +static inline void mem_cgroup_oom_disable(void) { - WARN_ON(!current->in_user_fault); - current->in_user_fault = 0; + WARN_ON(!current->memcg_may_oom); + current->memcg_may_oom = 0; } static inline bool task_in_memcg_oom(struct task_struct *p) @@ -939,259 +485,126 @@ static inline bool task_in_memcg_oom(struct task_struct *p) } bool mem_cgroup_oom_synchronize(bool wait); -struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, - struct mem_cgroup *oom_domain); -void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); #ifdef CONFIG_MEMCG_SWAP -extern bool cgroup_memory_noswap; +extern int do_swap_account; #endif void lock_page_memcg(struct page *page); void unlock_page_memcg(struct page *page); -void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); - -/* idx can be of type enum memcg_stat_item or node_stat_item */ -static inline void mod_memcg_state(struct mem_cgroup *memcg, - int idx, int val) +/** + * mem_cgroup_update_page_stat - update page state statistics + * @page: the page + * @idx: page state item to account + * @val: number of pages (positive or negative) + * + * The @page must be locked or the caller must use lock_page_memcg() + * to prevent double accounting when the page is concurrently being + * moved to another memcg: + * + * lock_page(page) or lock_page_memcg(page) + * if (TestClearPageState(page)) + * mem_cgroup_update_page_stat(page, state, -1); + * unlock_page(page) or unlock_page_memcg(page) + */ +static inline void mem_cgroup_update_page_stat(struct page *page, + enum mem_cgroup_stat_index idx, int val) { - unsigned long flags; + VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page))); - local_irq_save(flags); - __mod_memcg_state(memcg, idx, val); - local_irq_restore(flags); + if (page->mem_cgroup) + this_cpu_add(page->mem_cgroup->stat->count[idx], val); } -static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) +static inline void mem_cgroup_inc_page_stat(struct page *page, + enum mem_cgroup_stat_index idx) { - return READ_ONCE(memcg->vmstats.state[idx]); + mem_cgroup_update_page_stat(page, idx, 1); } -static inline unsigned long lruvec_page_state(struct lruvec *lruvec, - enum node_stat_item idx) +static inline void mem_cgroup_dec_page_stat(struct page *page, + enum mem_cgroup_stat_index idx) { - struct mem_cgroup_per_node *pn; - - if (mem_cgroup_disabled()) - return node_page_state(lruvec_pgdat(lruvec), idx); - - pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - return READ_ONCE(pn->lruvec_stats.state[idx]); + mem_cgroup_update_page_stat(page, idx, -1); } -static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, - enum node_stat_item idx) -{ - struct mem_cgroup_per_node *pn; - long x = 0; - int cpu; - - if (mem_cgroup_disabled()) - return node_page_state(lruvec_pgdat(lruvec), idx); - - pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - for_each_possible_cpu(cpu) - x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); -#ifdef CONFIG_SMP - if (x < 0) - x = 0; -#endif - return x; -} - -void mem_cgroup_flush_stats(void); - -void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, - int val); -void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); - -static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, - int val) -{ - unsigned long flags; - - local_irq_save(flags); - __mod_lruvec_kmem_state(p, idx, val); - local_irq_restore(flags); -} - -static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) -{ - unsigned long flags; - - local_irq_save(flags); - __mod_memcg_lruvec_state(lruvec, idx, val); - local_irq_restore(flags); -} - -void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, - unsigned long count); - -static inline void count_memcg_events(struct mem_cgroup *memcg, - enum vm_event_item idx, - unsigned long count) -{ - unsigned long flags; - - local_irq_save(flags); - __count_memcg_events(memcg, idx, count); - local_irq_restore(flags); -} - -static inline void count_memcg_page_event(struct page *page, - enum vm_event_item idx) -{ - struct mem_cgroup *memcg = page_memcg(page); - - if (memcg) - count_memcg_events(memcg, idx, 1); -} - -static inline void count_memcg_event_mm(struct mm_struct *mm, - enum vm_event_item idx) -{ - struct mem_cgroup *memcg; - - if (mem_cgroup_disabled()) - return; - - rcu_read_lock(); - memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - if (likely(memcg)) - count_memcg_events(memcg, idx, 1); - rcu_read_unlock(); -} - -static inline void memcg_memory_event(struct mem_cgroup *memcg, - enum memcg_memory_event event) -{ - bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || - event == MEMCG_SWAP_FAIL; - - atomic_long_inc(&memcg->memory_events_local[event]); - if (!swap_event) - cgroup_file_notify(&memcg->events_local_file); - - do { - atomic_long_inc(&memcg->memory_events[event]); - if (swap_event) - cgroup_file_notify(&memcg->swap_events_file); - else - cgroup_file_notify(&memcg->events_file); - - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) - break; - if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) - break; - } while ((memcg = parent_mem_cgroup(memcg)) && - !mem_cgroup_is_root(memcg)); -} - -static inline void memcg_memory_event_mm(struct mm_struct *mm, - enum memcg_memory_event event) -{ - struct mem_cgroup *memcg; - - if (mem_cgroup_disabled()) - return; - - rcu_read_lock(); - memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - if (likely(memcg)) - memcg_memory_event(memcg, event); - rcu_read_unlock(); -} - -void split_page_memcg(struct page *head, unsigned int nr); - unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); +static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, + enum vm_event_item idx) +{ + struct mem_cgroup *memcg; + + if (mem_cgroup_disabled()) + return; + + rcu_read_lock(); + memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (unlikely(!memcg)) + goto out; + + switch (idx) { + case PGFAULT: + this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); + break; + case PGMAJFAULT: + this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); + break; + default: + BUG(); + } +out: + rcu_read_unlock(); +} +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +void mem_cgroup_split_huge_fixup(struct page *head); +#endif + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 #define MEM_CGROUP_ID_MAX 0 -static inline struct mem_cgroup *page_memcg(struct page *page) -{ - return NULL; -} - -static inline struct mem_cgroup *page_memcg_rcu(struct page *page) -{ - WARN_ON_ONCE(!rcu_read_lock_held()); - return NULL; -} - -static inline struct mem_cgroup *page_memcg_check(struct page *page) -{ - return NULL; -} - -static inline bool PageMemcgKmem(struct page *page) -{ - return false; -} - -static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) -{ - return true; -} +struct mem_cgroup; static inline bool mem_cgroup_disabled(void) { return true; } -static inline void memcg_memory_event(struct mem_cgroup *memcg, - enum memcg_memory_event event) +static inline void mem_cgroup_events(struct mem_cgroup *memcg, + enum mem_cgroup_events_index idx, + unsigned int nr) { } -static inline void memcg_memory_event_mm(struct mm_struct *mm, - enum memcg_memory_event event) -{ -} - -static inline void mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - unsigned long *min, - unsigned long *low) -{ - *min = *low = 0; -} - -static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg) -{ -} - -static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) +static inline bool mem_cgroup_low(struct mem_cgroup *root, + struct mem_cgroup *memcg) { return false; } -static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) -{ - return false; -} - -static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask) +static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask, + struct mem_cgroup **memcgp, + bool compound) { + *memcgp = NULL; return 0; } -static inline int mem_cgroup_swapin_charge_page(struct page *page, - struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) +static inline void mem_cgroup_commit_charge(struct page *page, + struct mem_cgroup *memcg, + bool lrucare, bool compound) { - return 0; } -static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) +static inline void mem_cgroup_cancel_charge(struct page *page, + struct mem_cgroup *memcg, + bool compound) { } @@ -1207,26 +620,16 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new) { } -static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, - struct pglist_data *pgdat) +static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, + struct mem_cgroup *memcg) { - return &pgdat->__lruvec; + return node_lruvec(pgdat); } -static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) +static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, + struct pglist_data *pgdat) { - pg_data_t *pgdat = page_pgdat(page); - - return &pgdat->__lruvec; -} - -static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) -{ -} - -static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) -{ - return NULL; + return &pgdat->lruvec; } static inline bool mm_match_cgroup(struct mm_struct *mm, @@ -1235,44 +638,10 @@ static inline bool mm_match_cgroup(struct mm_struct *mm, return true; } -static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) +static inline bool task_in_mem_cgroup(struct task_struct *task, + const struct mem_cgroup *memcg) { - return NULL; -} - -static inline -struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) -{ - return NULL; -} - -static inline void mem_cgroup_put(struct mem_cgroup *memcg) -{ -} - -static inline struct lruvec *lock_page_lruvec(struct page *page) -{ - struct pglist_data *pgdat = page_pgdat(page); - - spin_lock(&pgdat->__lruvec.lru_lock); - return &pgdat->__lruvec; -} - -static inline struct lruvec *lock_page_lruvec_irq(struct page *page) -{ - struct pglist_data *pgdat = page_pgdat(page); - - spin_lock_irq(&pgdat->__lruvec.lru_lock); - return &pgdat->__lruvec; -} - -static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page, - unsigned long *flagsp) -{ - struct pglist_data *pgdat = page_pgdat(page); - - spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); - return &pgdat->__lruvec; + return true; } static inline struct mem_cgroup * @@ -1306,21 +675,16 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) return NULL; } -static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) -{ - return NULL; -} - -static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) -{ - return NULL; -} - static inline bool mem_cgroup_online(struct mem_cgroup *memcg) { return true; } +static inline unsigned long +mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) +{ + return 0; +} static inline unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) @@ -1328,23 +692,20 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, return 0; } -static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) +static inline unsigned long +mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, + int nid, unsigned int lru_mask) { return 0; } -static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) +static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) { return 0; } static inline void -mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) -{ -} - -static inline void -mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) +mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) { } @@ -1360,11 +721,11 @@ static inline void mem_cgroup_handle_over_high(void) { } -static inline void mem_cgroup_enter_user_fault(void) +static inline void mem_cgroup_oom_enable(void) { } -static inline void mem_cgroup_exit_user_fault(void) +static inline void mem_cgroup_oom_disable(void) { } @@ -1378,93 +739,19 @@ static inline bool mem_cgroup_oom_synchronize(bool wait) return false; } -static inline struct mem_cgroup *mem_cgroup_get_oom_group( - struct task_struct *victim, struct mem_cgroup *oom_domain) -{ - return NULL; -} - -static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) +static inline void mem_cgroup_update_page_stat(struct page *page, + enum mem_cgroup_stat_index idx, + int nr) { } -static inline void __mod_memcg_state(struct mem_cgroup *memcg, - int idx, - int nr) +static inline void mem_cgroup_inc_page_stat(struct page *page, + enum mem_cgroup_stat_index idx) { } -static inline void mod_memcg_state(struct mem_cgroup *memcg, - int idx, - int nr) -{ -} - -static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) -{ - return 0; -} - -static inline unsigned long lruvec_page_state(struct lruvec *lruvec, - enum node_stat_item idx) -{ - return node_page_state(lruvec_pgdat(lruvec), idx); -} - -static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, - enum node_stat_item idx) -{ - return node_page_state(lruvec_pgdat(lruvec), idx); -} - -static inline void mem_cgroup_flush_stats(void) -{ -} - -static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) -{ -} - -static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, - int val) -{ - struct page *page = virt_to_head_page(p); - - __mod_node_page_state(page_pgdat(page), idx, val); -} - -static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, - int val) -{ - struct page *page = virt_to_head_page(p); - - mod_node_page_state(page_pgdat(page), idx, val); -} - -static inline void count_memcg_events(struct mem_cgroup *memcg, - enum vm_event_item idx, - unsigned long count) -{ -} - -static inline void __count_memcg_events(struct mem_cgroup *memcg, - enum vm_event_item idx, - unsigned long count) -{ -} - -static inline void count_memcg_page_event(struct page *page, - int idx) -{ -} - -static inline -void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) -{ -} - -static inline void split_page_memcg(struct page *head, unsigned int nr) +static inline void mem_cgroup_dec_page_stat(struct page *page, + enum mem_cgroup_stat_index idx) { } @@ -1475,104 +762,25 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, { return 0; } + +static inline void mem_cgroup_split_huge_fixup(struct page *head) +{ +} + +static inline +void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) +{ +} #endif /* CONFIG_MEMCG */ -static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) -{ - __mod_lruvec_kmem_state(p, idx, 1); -} - -static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) -{ - __mod_lruvec_kmem_state(p, idx, -1); -} - -static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) -{ - struct mem_cgroup *memcg; - - memcg = lruvec_memcg(lruvec); - if (!memcg) - return NULL; - memcg = parent_mem_cgroup(memcg); - if (!memcg) - return NULL; - return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); -} - -static inline void unlock_page_lruvec(struct lruvec *lruvec) -{ - spin_unlock(&lruvec->lru_lock); -} - -static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) -{ - spin_unlock_irq(&lruvec->lru_lock); -} - -static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, - unsigned long flags) -{ - spin_unlock_irqrestore(&lruvec->lru_lock, flags); -} - -/* Test requires a stable page->memcg binding, see page_memcg() */ -static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec) -{ - return lruvec_pgdat(lruvec) == page_pgdat(page) && - lruvec_memcg(lruvec) == page_memcg(page); -} - -/* Don't lock again iff page's lruvec locked */ -static inline struct lruvec *relock_page_lruvec_irq(struct page *page, - struct lruvec *locked_lruvec) -{ - if (locked_lruvec) { - if (page_matches_lruvec(page, locked_lruvec)) - return locked_lruvec; - - unlock_page_lruvec_irq(locked_lruvec); - } - - return lock_page_lruvec_irq(page); -} - -/* Don't lock again iff page's lruvec locked */ -static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, - struct lruvec *locked_lruvec, unsigned long *flags) -{ - if (locked_lruvec) { - if (page_matches_lruvec(page, locked_lruvec)) - return locked_lruvec; - - unlock_page_lruvec_irqrestore(locked_lruvec, *flags); - } - - return lock_page_lruvec_irqsave(page, flags); -} - #ifdef CONFIG_CGROUP_WRITEBACK +struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, unsigned long *pheadroom, unsigned long *pdirty, unsigned long *pwriteback); -void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, - struct bdi_writeback *wb); - -static inline void mem_cgroup_track_foreign_dirty(struct page *page, - struct bdi_writeback *wb) -{ - if (mem_cgroup_disabled()) - return; - - if (unlikely(&page_memcg(page)->css != wb->memcg_css)) - mem_cgroup_track_foreign_dirty_slowpath(page, wb); -} - -void mem_cgroup_flush_foreign(struct bdi_writeback *wb); - #else /* CONFIG_CGROUP_WRITEBACK */ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) @@ -1588,20 +796,10 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, { } -static inline void mem_cgroup_track_foreign_dirty(struct page *page, - struct bdi_writeback *wb) -{ -} - -static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) -{ -} - #endif /* CONFIG_CGROUP_WRITEBACK */ struct sock; -bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, - gfp_t gfp_mask); +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); #ifdef CONFIG_MEMCG extern struct static_key_false memcg_sockets_enabled_key; @@ -1618,11 +816,6 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) } while ((memcg = parent_mem_cgroup(memcg))); return false; } - -int alloc_shrinker_info(struct mem_cgroup *memcg); -void free_shrinker_info(struct mem_cgroup *memcg); -void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); -void reparent_shrinker_deferred(struct mem_cgroup *memcg); #else #define mem_cgroup_sockets_enabled 0 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; @@ -1631,23 +824,16 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { return false; } - -static inline void set_shrinker_bit(struct mem_cgroup *memcg, - int nid, int shrinker_id) -{ -} #endif -#ifdef CONFIG_MEMCG_KMEM -bool mem_cgroup_kmem_disabled(void); -int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); -void __memcg_kmem_uncharge_page(struct page *page, int order); - -struct obj_cgroup *get_obj_cgroup_from_current(void); - -int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); -void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); +void memcg_kmem_put_cache(struct kmem_cache *cachep); +int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, + struct mem_cgroup *memcg); +int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); +void memcg_kmem_uncharge(struct page *page, int order); +#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) extern struct static_key_false memcg_kmem_enabled_key; extern int memcg_nr_cache_ids; @@ -1664,60 +850,33 @@ void memcg_put_cache_ids(void); static inline bool memcg_kmem_enabled(void) { - return static_branch_likely(&memcg_kmem_enabled_key); -} - -static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, - int order) -{ - if (memcg_kmem_enabled()) - return __memcg_kmem_charge_page(page, gfp, order); - return 0; -} - -static inline void memcg_kmem_uncharge_page(struct page *page, int order) -{ - if (memcg_kmem_enabled()) - __memcg_kmem_uncharge_page(page, order); + return static_branch_unlikely(&memcg_kmem_enabled_key); } /* - * A helper for accessing memcg's kmem_id, used for getting - * corresponding LRU lists. + * helper for accessing a memcg's index. It will be used as an index in the + * child cache array in kmem_cache, and also to derive its name. This function + * will return -1 when this is not a kmem-limited memcg. */ static inline int memcg_cache_id(struct mem_cgroup *memcg) { return memcg ? memcg->kmemcg_id : -1; } -struct mem_cgroup *mem_cgroup_from_obj(void *p); +/** + * memcg_kmem_update_page_stat - update kmem page state statistics + * @page: the page + * @idx: page state item to account + * @val: number of pages (positive or negative) + */ +static inline void memcg_kmem_update_page_stat(struct page *page, + enum mem_cgroup_stat_index idx, int val) +{ + if (memcg_kmem_enabled() && page->mem_cgroup) + this_cpu_add(page->mem_cgroup->stat->count[idx], val); +} #else -static inline bool mem_cgroup_kmem_disabled(void) -{ - return true; -} - -static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, - int order) -{ - return 0; -} - -static inline void memcg_kmem_uncharge_page(struct page *page, int order) -{ -} - -static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, - int order) -{ - return 0; -} - -static inline void __memcg_kmem_uncharge_page(struct page *page, int order) -{ -} - #define for_each_memcg_cache_index(_idx) \ for (; NULL; ) @@ -1739,11 +898,10 @@ static inline void memcg_put_cache_ids(void) { } -static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) +static inline void memcg_kmem_update_page_stat(struct page *page, + enum mem_cgroup_stat_index idx, int val) { - return NULL; } - -#endif /* CONFIG_MEMCG_KMEM */ +#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory.h b/include/linux/memory.h index 182c606adb..97172279cf 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/memory.h - generic memory definition * @@ -23,66 +22,20 @@ #define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS) -/** - * struct memory_group - a logical group of memory blocks - * @nid: The node id for all memory blocks inside the memory group. - * @blocks: List of all memory blocks belonging to this memory group. - * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this - * memory group. - * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this - * memory group. - * @is_dynamic: The memory group type: static vs. dynamic - * @s.max_pages: Valid with &memory_group.is_dynamic == false. The maximum - * number of pages we'll have in this static memory group. - * @d.unit_pages: Valid with &memory_group.is_dynamic == true. Unit in pages - * in which memory is added/removed in this dynamic memory group. - * This granularity defines the alignment of a unit in physical - * address space; it has to be at least as big as a single - * memory block. - * - * A memory group logically groups memory blocks; each memory block - * belongs to at most one memory group. A memory group corresponds to - * a memory device, such as a DIMM or a NUMA node, which spans multiple - * memory blocks and might even span multiple non-contiguous physical memory - * ranges. - * - * Modification of members after registration is serialized by memory - * hot(un)plug code. - */ -struct memory_group { - int nid; - struct list_head memory_blocks; - unsigned long present_kernel_pages; - unsigned long present_movable_pages; - bool is_dynamic; - union { - struct { - unsigned long max_pages; - } s; - struct { - unsigned long unit_pages; - } d; - }; -}; - struct memory_block { unsigned long start_section_nr; + unsigned long end_section_nr; unsigned long state; /* serialized by the dev->lock */ + int section_count; /* serialized by mem_sysfs_mutex */ int online_type; /* for passing data to online routine */ - int nid; /* NID for this memory block */ + int phys_device; /* to which fru does this belong? */ + void *hw; /* optional pointer to fw/hw data */ + int (*phys_callback)(struct memory_block *); struct device dev; - /* - * Number of vmemmap pages. These pages - * lay at the beginning of the memory block. - */ - unsigned long nr_vmemmap_pages; - struct memory_group *group; /* group (if any) for this block */ - struct list_head group_next; /* next block inside memory group */ }; int arch_get_memory_phys_device(unsigned long start_pfn); unsigned long memory_block_size_bytes(void); -int set_memory_block_size_order(unsigned int order); /* These states are exposed to userspace as text strings in sysfs */ #define MEM_ONLINE (1<<0) /* exposed to userspace */ @@ -100,6 +53,19 @@ struct memory_notify { int status_change_nid; }; +/* + * During pageblock isolation, count the number of pages within the + * range [start_pfn, start_pfn + nr_pages) which are owned by code + * in the notifier chain. + */ +#define MEM_ISOLATE_COUNT (1<<0) + +struct memory_isolate_notify { + unsigned long start_pfn; /* Start of range to check */ + unsigned int nr_pages; /* # pages in range to check */ + unsigned int pages_found; /* # pages owned found by callbacks */ +}; + struct notifier_block; struct mem_section; @@ -111,9 +77,9 @@ struct mem_section; #define IPC_CALLBACK_PRI 10 #ifndef CONFIG_MEMORY_HOTPLUG_SPARSE -static inline void memory_dev_init(void) +static inline int memory_dev_init(void) { - return; + return 0; } static inline int register_memory_notifier(struct notifier_block *nb) { @@ -126,44 +92,48 @@ static inline int memory_notify(unsigned long val, void *v) { return 0; } +static inline int register_memory_isolate_notifier(struct notifier_block *nb) +{ + return 0; +} +static inline void unregister_memory_isolate_notifier(struct notifier_block *nb) +{ +} +static inline int memory_isolate_notify(unsigned long val, void *v) +{ + return 0; +} #else extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); -int create_memory_block_devices(unsigned long start, unsigned long size, - unsigned long vmemmap_pages, - struct memory_group *group); -void remove_memory_block_devices(unsigned long start, unsigned long size); -extern void memory_dev_init(void); +extern int register_memory_isolate_notifier(struct notifier_block *nb); +extern void unregister_memory_isolate_notifier(struct notifier_block *nb); +extern int register_new_memory(int, struct mem_section *); +extern int memory_block_change_state(struct memory_block *mem, + unsigned long to_state, + unsigned long from_state_req); +#ifdef CONFIG_MEMORY_HOTREMOVE +extern int unregister_memory_section(struct mem_section *); +#endif +extern int memory_dev_init(void); extern int memory_notify(unsigned long val, void *v); -extern struct memory_block *find_memory_block(unsigned long section_nr); -typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); -extern int walk_memory_blocks(unsigned long start, unsigned long size, - void *arg, walk_memory_blocks_func_t func); -extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func); +extern int memory_isolate_notify(unsigned long val, void *v); +extern struct memory_block *find_memory_block_hinted(struct mem_section *, + struct memory_block *); +extern struct memory_block *find_memory_block(struct mem_section *); #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<lru.next. These have to be in + * some random range in unsigned long space for debugging purposes. + */ +enum { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, + SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, + MIX_SECTION_INFO, + NODE_INFO, + MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, +}; /* Types for control the zone type of onlined and offlined memory */ enum { - /* Offline the memory. */ - MMOP_OFFLINE = 0, - /* Online the memory. Zone depends, see default_zone_for_pfn(). */ - MMOP_ONLINE, - /* Online the memory to ZONE_NORMAL. */ + MMOP_OFFLINE = -1, + MMOP_ONLINE_KEEP, MMOP_ONLINE_KERNEL, - /* Online the memory to ZONE_MOVABLE. */ MMOP_ONLINE_MOVABLE, }; -/* Flags for add_memory() and friends to specify memory hotplug details. */ -typedef int __bitwise mhp_t; - -/* No special request */ -#define MHP_NONE ((__force mhp_t)0) /* - * Allow merging of the added System RAM resource with adjacent, - * mergeable resources. After a successful call to add_memory_resource() - * with this flag set, the resource pointer must no longer be used as it - * might be stale, or the resource might have changed. + * pgdat resizing functions */ -#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) - -/* - * We want memmap (struct page array) to be self contained. - * To do so, we will use the beginning of the hot-added range to build - * the page tables for the memmap array that describes the entire range. - * Only selected architectures support it with SPARSE_VMEMMAP. - */ -#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) -/* - * The nid field specifies a memory group id (mgid) instead. The memory group - * implies the node id (nid). - */ -#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) - -/* - * Extended parameters for memory hotplug: - * altmap: alternative allocator for memmap array (optional) - * pgprot: page protection flags to apply to newly created page tables - * (required) - */ -struct mhp_params { - struct vmem_altmap *altmap; - pgprot_t pgprot; -}; - -bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); -struct range mhp_get_pluggable_range(bool need_mapping); - +static inline +void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_lock_irqsave(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_init(struct pglist_data *pgdat) +{ + spin_lock_init(&pgdat->node_size_lock); +} /* * Zone resizing functions * @@ -101,61 +83,44 @@ static inline void zone_seqlock_init(struct zone *zone) extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); -extern void adjust_present_page_count(struct page *page, - struct memory_group *group, - long nr_pages); /* VM interface that may be used by firmware interface */ -extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, - struct zone *zone); -extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); -extern int online_pages(unsigned long pfn, unsigned long nr_pages, - struct zone *zone, struct memory_group *group); -extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, - unsigned long end_pfn); -extern void __offline_isolated_pages(unsigned long start_pfn, - unsigned long end_pfn); +extern int online_pages(unsigned long, unsigned long, int); +extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, + unsigned long *valid_start, unsigned long *valid_end); +extern void __offline_isolated_pages(unsigned long, unsigned long); -typedef void (*online_page_callback_t)(struct page *page, unsigned int order); +typedef void (*online_page_callback_t)(struct page *page); -extern void generic_online_page(struct page *page, unsigned int order); extern int set_online_page_callback(online_page_callback_t callback); extern int restore_online_page_callback(online_page_callback_t callback); +extern void __online_page_set_limits(struct page *page); +extern void __online_page_increment_counters(struct page *page); +extern void __online_page_free(struct page *page); + extern int try_online_node(int nid); -extern int arch_add_memory(int nid, u64 start, u64 size, - struct mhp_params *params); -extern u64 max_mem_size; +extern bool memhp_auto_online; -extern int mhp_online_type_from_str(const char *str); +#ifdef CONFIG_MEMORY_HOTREMOVE +extern bool is_pageblock_removable_nolock(struct page *page); +extern int arch_remove_memory(u64 start, u64 size); +extern int __remove_pages(struct zone *zone, unsigned long start_pfn, + unsigned long nr_pages); +#endif /* CONFIG_MEMORY_HOTREMOVE */ -/* Default online_type (MMOP_*) when new memory blocks are added. */ -extern int mhp_default_online_type; -/* If movable_node boot option specified */ -extern bool movable_node_enabled; -static inline bool movable_node_is_enabled(void) +/* reasonably generic interface to expand the physical pages in a zone */ +extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, + unsigned long nr_pages); + +#ifdef CONFIG_NUMA +extern int memory_add_physaddr_to_nid(u64 start); +#else +static inline int memory_add_physaddr_to_nid(u64 start) { - return movable_node_enabled; + return 0; } - -extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap); -extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, - struct vmem_altmap *altmap); - -/* reasonably generic interface to expand the physical pages */ -extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, - struct mhp_params *params); - -#ifndef CONFIG_ARCH_HAS_ADD_PAGES -static inline int add_pages(int nid, unsigned long start_pfn, - unsigned long nr_pages, struct mhp_params *params) -{ - return __add_pages(nid, start_pfn, nr_pages, params); -} -#else /* ARCH_HAS_ADD_PAGES */ -int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, - struct mhp_params *params); -#endif /* ARCH_HAS_ADD_PAGES */ +#endif #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION /* @@ -217,20 +182,33 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) #endif /* CONFIG_NUMA */ #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ +#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE +extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); +#else +static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) +{ +} +#endif +extern void put_page_bootmem(struct page *page); +extern void get_page_bootmem(unsigned long ingo, struct page *page, + unsigned long type); + void get_online_mems(void); void put_online_mems(void); void mem_hotplug_begin(void); void mem_hotplug_done(void); +extern void set_zone_contiguous(struct zone *zone); +extern void clear_zone_contiguous(struct zone *zone); + #else /* ! CONFIG_MEMORY_HOTPLUG */ -#define pfn_to_online_page(pfn) \ -({ \ - struct page *___page = NULL; \ - if (pfn_valid(pfn)) \ - ___page = pfn_to_page(pfn); \ - ___page; \ - }) +/* + * Stub functions for when hotplug is off + */ +static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_init(struct pglist_data *pgdat) {} static inline unsigned zone_span_seqbegin(struct zone *zone) { @@ -244,6 +222,17 @@ static inline void zone_span_writelock(struct zone *zone) {} static inline void zone_span_writeunlock(struct zone *zone) {} static inline void zone_seqlock_init(struct zone *zone) {} +static inline int mhp_notimplemented(const char *func) +{ + printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); + dump_stack(); + return -ENOSYS; +} + +static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) +{ +} + static inline int try_online_node(int nid) { return 0; @@ -255,106 +244,48 @@ static inline void put_online_mems(void) {} static inline void mem_hotplug_begin(void) {} static inline void mem_hotplug_done(void) {} -static inline bool movable_node_is_enabled(void) -{ - return false; -} #endif /* ! CONFIG_MEMORY_HOTPLUG */ -/* - * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some - * platforms might override and use arch_get_mappable_range() - * for internal non memory hotplug purposes. - */ -struct range arch_get_mappable_range(void); - -#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) -/* - * pgdat resizing functions - */ -static inline -void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) -{ - spin_lock_irqsave(&pgdat->node_size_lock, *flags); -} -static inline -void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) -{ - spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); -} -static inline -void pgdat_resize_init(struct pglist_data *pgdat) -{ - spin_lock_init(&pgdat->node_size_lock); -} -#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ -/* - * Stub functions for when hotplug is off - */ -static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} -static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} -static inline void pgdat_resize_init(struct pglist_data *pgdat) {} -#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ - #ifdef CONFIG_MEMORY_HOTREMOVE +extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); extern void try_offline_node(int nid); -extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, - struct memory_group *group); -extern int remove_memory(u64 start, u64 size); -extern void __remove_memory(u64 start, u64 size); -extern int offline_and_remove_memory(u64 start, u64 size); +extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); +extern void remove_memory(int nid, u64 start, u64 size); #else +static inline bool is_mem_section_removable(unsigned long pfn, + unsigned long nr_pages) +{ + return false; +} + static inline void try_offline_node(int nid) {} -static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages, - struct memory_group *group) +static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) { return -EINVAL; } -static inline int remove_memory(u64 start, u64 size) -{ - return -EBUSY; -} - -static inline void __remove_memory(u64 start, u64 size) {} +static inline void remove_memory(int nid, u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ -extern void set_zone_contiguous(struct zone *zone); -extern void clear_zone_contiguous(struct zone *zone); - -#ifdef CONFIG_MEMORY_HOTPLUG -extern void __ref free_area_init_core_hotplug(int nid); -extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); -extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); -extern int add_memory_resource(int nid, struct resource *resource, - mhp_t mhp_flags); -extern int add_memory_driver_managed(int nid, u64 start, u64 size, - const char *resource_name, - mhp_t mhp_flags); -extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, - unsigned long nr_pages, - struct vmem_altmap *altmap, int migratetype); -extern void remove_pfn_range_from_zone(struct zone *zone, - unsigned long start_pfn, - unsigned long nr_pages); +extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, + void *arg, int (*func)(struct memory_block *, void *)); +extern int add_memory(int nid, u64 start, u64 size); +extern int add_memory_resource(int nid, struct resource *resource, bool online); +extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default, + bool for_device); +extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device); +extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern bool is_memblock_offlined(struct memory_block *mem); -extern int sparse_add_section(int nid, unsigned long pfn, - unsigned long nr_pages, struct vmem_altmap *altmap); -extern void sparse_remove_section(struct mem_section *ms, - unsigned long pfn, unsigned long nr_pages, - unsigned long map_offset, struct vmem_altmap *altmap); +extern void remove_memory(int nid, u64 start, u64 size); +extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn); +extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, + unsigned long map_offset); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); -extern struct zone *zone_for_pfn_range(int online_type, int nid, - struct memory_group *group, unsigned long start_pfn, - unsigned long nr_pages); -extern int arch_create_linear_mapping(int nid, u64 start, u64 size, - struct mhp_params *params); -void arch_remove_linear_mapping(u64 start, u64 size); -extern bool mhp_supports_memmap_on_memory(unsigned long size); -#endif /* CONFIG_MEMORY_HOTPLUG */ +extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages, + enum zone_type target, int *zone_shift); #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 4091692bed..629113f062 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * NUMA memory policies for Linux. * Copyright 2003,2004 Andi Kleen SuSE Labs @@ -6,9 +5,8 @@ #ifndef _LINUX_MEMPOLICY_H #define _LINUX_MEMPOLICY_H 1 -#include + #include -#include #include #include #include @@ -28,10 +26,10 @@ struct mm_struct; * the process policy is used. Interrupts ignore the memory policy * of the current process. * - * Locking policy for interleave: + * Locking policy for interlave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on - * mmap_lock. + * mmap_sem. * * Freeing policy: * Mempolicy objects are reference counted. A mempolicy will be freed when @@ -46,8 +44,11 @@ struct mempolicy { atomic_t refcnt; unsigned short mode; /* See MPOL_* above */ unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ - nodemask_t nodes; /* interleave/bind/perfer */ - + union { + short preferred_node; /* preferred */ + nodemask_t nodes; /* interleave/bind */ + /* undefined for default */ + } v; union { nodemask_t cpuset_mems_allowed; /* relative to these nodes */ nodemask_t user_nodemask; /* nodemask passed by user */ @@ -90,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol) } #define vma_policy(vma) ((vma)->vm_policy) +static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol) +{ + vma->vm_policy = pol; +} static inline void mpol_get(struct mempolicy *pol) { @@ -140,24 +145,16 @@ bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); extern void numa_policy_init(void); -extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new); +extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, + enum mpol_rebind_step step); extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); -extern int huge_node(struct vm_area_struct *vma, +extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask); extern bool init_nodemask_of_mempolicy(nodemask_t *mask); -extern bool mempolicy_in_oom_domain(struct task_struct *tsk, +extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask); -extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); - -static inline nodemask_t *policy_nodemask_current(gfp_t gfp) -{ - struct mempolicy *mpol = get_task_policy(current); - - return policy_nodemask(gfp, mpol); -} - extern unsigned int mempolicy_slab_node(void); extern enum zone_type policy_zone; @@ -179,19 +176,31 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol); extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ -extern bool vma_migratable(struct vm_area_struct *vma); +static inline bool vma_migratable(struct vm_area_struct *vma) +{ + if (vma->vm_flags & (VM_IO | VM_PFNMAP)) + return false; + +#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION + if (vma->vm_flags & VM_HUGETLB) + return false; +#endif + + /* + * Migration allocates pages in the highest zone. If we cannot + * do so then migration (at least from node to node) is not + * possible. + */ + if (vma->vm_file && + gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) + < policy_zone) + return false; + return true; +} extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); -extern bool numa_demotion_enabled; - -static inline bool mpol_is_preferred_many(struct mempolicy *pol) -{ - return (pol->mode == MPOL_PREFERRED_MANY); -} - - #else struct mempolicy {}; @@ -231,6 +240,9 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) } #define vma_policy(vma) NULL +static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol) +{ +} static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) @@ -247,7 +259,8 @@ static inline void numa_default_policy(void) } static inline void mpol_rebind_task(struct task_struct *tsk, - const nodemask_t *new) + const nodemask_t *new, + enum mpol_rebind_step step) { } @@ -255,13 +268,13 @@ static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { } -static inline int huge_node(struct vm_area_struct *vma, +static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) { *mpol = NULL; *nodemask = NULL; - return 0; + return node_zonelist(0, gfp_flags); } static inline bool init_nodemask_of_mempolicy(nodemask_t *m) @@ -295,18 +308,5 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, static inline void mpol_put_task_policy(struct task_struct *task) { } - -static inline nodemask_t *policy_nodemask_current(gfp_t gfp) -{ - return NULL; -} - -#define numa_demotion_enabled false - -static inline bool mpol_is_preferred_many(struct mempolicy *pol) -{ - return false; -} - #endif /* CONFIG_NUMA */ #endif diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 0c964ac107..b1086c9365 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * memory buffer pool support */ @@ -25,18 +24,6 @@ typedef struct mempool_s { wait_queue_head_t wait; } mempool_t; -static inline bool mempool_initialized(mempool_t *pool) -{ - return pool->elements != NULL; -} - -void mempool_exit(mempool_t *pool); -int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, - gfp_t gfp_mask, int node_id); -int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data); - extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data); extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, @@ -55,14 +42,6 @@ extern void mempool_free(void *element, mempool_t *pool); */ void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); void mempool_free_slab(void *element, void *pool_data); - -static inline int -mempool_init_slab_pool(mempool_t *pool, int min_nr, struct kmem_cache *kc) -{ - return mempool_init(pool, min_nr, mempool_alloc_slab, - mempool_free_slab, (void *) kc); -} - static inline mempool_t * mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) { @@ -76,13 +55,6 @@ mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) */ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); void mempool_kfree(void *element, void *pool_data); - -static inline int mempool_init_kmalloc_pool(mempool_t *pool, int min_nr, size_t size) -{ - return mempool_init(pool, min_nr, mempool_kmalloc, - mempool_kfree, (void *) size); -} - static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) { return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, @@ -95,13 +67,6 @@ static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) */ void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data); void mempool_free_pages(void *element, void *pool_data); - -static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order) -{ - return mempool_init(pool, min_nr, mempool_alloc_pages, - mempool_free_pages, (void *)(long)order); -} - static inline mempool_t *mempool_create_page_pool(int min_nr, int order) { return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages, diff --git a/include/linux/memremap.h b/include/linux/memremap.h index c0e9d35889..93416196ba 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -1,7 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MEMREMAP_H_ #define _LINUX_MEMREMAP_H_ -#include +#include #include #include @@ -17,134 +16,47 @@ struct device; * @alloc: track pages consumed, private to vmemmap_populate() */ struct vmem_altmap { - unsigned long base_pfn; - const unsigned long end_pfn; + const unsigned long base_pfn; const unsigned long reserve; unsigned long free; unsigned long align; unsigned long alloc; }; -/* - * Specialize ZONE_DEVICE memory into multiple types each has a different - * usage. - * - * MEMORY_DEVICE_PRIVATE: - * Device memory that is not directly addressable by the CPU: CPU can neither - * read nor write private memory. In this case, we do still have struct pages - * backing the device memory. Doing so simplifies the implementation, but it is - * important to remember that there are certain points at which the struct page - * must be treated as an opaque object, rather than a "normal" struct page. - * - * A more complete discussion of unaddressable memory may be found in - * include/linux/hmm.h and Documentation/vm/hmm.rst. - * - * MEMORY_DEVICE_FS_DAX: - * Host memory that has similar access semantics as System RAM i.e. DMA - * coherent and supports page pinning. In support of coordinating page - * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a - * wakeup event whenever a page is unpinned and becomes idle. This - * wakeup is used to coordinate physical address space management (ex: - * fs truncate/hole punch) vs pinned pages (ex: device dma). - * - * MEMORY_DEVICE_GENERIC: - * Host memory that has similar access semantics as System RAM i.e. DMA - * coherent and supports page pinning. This is for example used by DAX devices - * that expose memory using a character device. - * - * MEMORY_DEVICE_PCI_P2PDMA: - * Device memory residing in a PCI BAR intended for use with Peer-to-Peer - * transactions. - */ -enum memory_type { - /* 0 is reserved to catch uninitialized type fields */ - MEMORY_DEVICE_PRIVATE = 1, - MEMORY_DEVICE_FS_DAX, - MEMORY_DEVICE_GENERIC, - MEMORY_DEVICE_PCI_P2PDMA, -}; +unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); +void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); -struct dev_pagemap_ops { - /* - * Called once the page refcount reaches 1. (ZONE_DEVICE pages never - * reach 0 refcount unless there is a refcount bug. This allows the - * device driver to implement its own memory management.) - */ - void (*page_free)(struct page *page); - - /* - * Transition the refcount in struct dev_pagemap to the dead state. - */ - void (*kill)(struct dev_pagemap *pgmap); - - /* - * Wait for refcount in struct dev_pagemap to be idle and reap it. - */ - void (*cleanup)(struct dev_pagemap *pgmap); - - /* - * Used for private (un-addressable) device memory only. Must migrate - * the page back to a CPU accessible page. - */ - vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); -}; - -#define PGMAP_ALTMAP_VALID (1 << 0) +#ifdef CONFIG_ZONE_DEVICE +struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start); +#else +static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) +{ + return NULL; +} +#endif /** * struct dev_pagemap - metadata for ZONE_DEVICE mappings * @altmap: pre-allocated/reserved memory for vmemmap allocations + * @res: physical address range covered by @ref * @ref: reference count that pins the devm_memremap_pages() mapping - * @internal_ref: internal reference if @ref is not provided by the caller - * @done: completion for @internal_ref - * @type: memory type: see MEMORY_* in memory_hotplug.h - * @flags: PGMAP_* flags to specify defailed behavior - * @ops: method table - * @owner: an opaque pointer identifying the entity that manages this - * instance. Used by various helpers to make sure that no - * foreign ZONE_DEVICE memory is accessed. - * @nr_range: number of ranges to be mapped - * @range: range to be mapped when nr_range == 1 - * @ranges: array of ranges to be mapped when nr_range > 1 + * @dev: host device of the mapping for debug */ struct dev_pagemap { - struct vmem_altmap altmap; + struct vmem_altmap *altmap; + const struct resource *res; struct percpu_ref *ref; - struct percpu_ref internal_ref; - struct completion done; - enum memory_type type; - unsigned int flags; - const struct dev_pagemap_ops *ops; - void *owner; - int nr_range; - union { - struct range range; - struct range ranges[0]; - }; + struct device *dev; }; -static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) -{ - if (pgmap->flags & PGMAP_ALTMAP_VALID) - return &pgmap->altmap; - return NULL; -} - #ifdef CONFIG_ZONE_DEVICE -void *memremap_pages(struct dev_pagemap *pgmap, int nid); -void memunmap_pages(struct dev_pagemap *pgmap); -void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); -void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); -struct dev_pagemap *get_dev_pagemap(unsigned long pfn, - struct dev_pagemap *pgmap); -bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); - -unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); -void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); -unsigned long memremap_compat_align(void); +void *devm_memremap_pages(struct device *dev, struct resource *res, + struct percpu_ref *ref, struct vmem_altmap *altmap); +struct dev_pagemap *find_dev_pagemap(resource_size_t phys); #else static inline void *devm_memremap_pages(struct device *dev, - struct dev_pagemap *pgmap) + struct resource *res, struct percpu_ref *ref, + struct vmem_altmap *altmap) { /* * Fail attempts to call devm_memremap_pages() without @@ -155,43 +67,48 @@ static inline void *devm_memremap_pages(struct device *dev, return ERR_PTR(-ENXIO); } -static inline void devm_memunmap_pages(struct device *dev, - struct dev_pagemap *pgmap) -{ -} - -static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, - struct dev_pagemap *pgmap) +static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys) { return NULL; } +#endif -static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) +/** + * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn + * @pfn: page frame number to lookup page_map + * @pgmap: optional known pgmap that already has a reference + * + * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the + * same mapping. + */ +static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, + struct dev_pagemap *pgmap) { - return false; -} + const struct resource *res = pgmap ? pgmap->res : NULL; + resource_size_t phys = PFN_PHYS(pfn); -static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) -{ - return 0; -} + /* + * In the cached case we're already holding a live reference so + * we can simply do a blind increment + */ + if (res && phys >= res->start && phys <= res->end) { + percpu_ref_get(pgmap->ref); + return pgmap; + } -static inline void vmem_altmap_free(struct vmem_altmap *altmap, - unsigned long nr_pfns) -{ -} + /* fall back to slow path lookup */ + rcu_read_lock(); + pgmap = find_dev_pagemap(phys); + if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) + pgmap = NULL; + rcu_read_unlock(); -/* when memremap_pages() is disabled all archs can remap a single page */ -static inline unsigned long memremap_compat_align(void) -{ - return PAGE_SIZE; + return pgmap; } -#endif /* CONFIG_ZONE_DEVICE */ static inline void put_dev_pagemap(struct dev_pagemap *pgmap) { if (pgmap) percpu_ref_put(pgmap->ref); } - #endif /* _LINUX_MEMREMAP_H_ */ diff --git a/include/linux/memstick.h b/include/linux/memstick.h index ebf73d4ee9..690c35a9d4 100644 --- a/include/linux/memstick.h +++ b/include/linux/memstick.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Sony MemoryStick support * * Copyright (C) 2007 Alex Dubov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef _MEMSTICK_H @@ -281,7 +285,6 @@ struct memstick_host { struct memstick_dev *card; unsigned int retries; - bool removing; /* Notify the host that some requests are pending. */ void (*request)(struct memstick_host *host); @@ -289,7 +292,7 @@ struct memstick_host { int (*set_param)(struct memstick_host *host, enum memstick_param param, int value); - unsigned long private[] ____cacheline_aligned; + unsigned long private[0] ____cacheline_aligned; }; struct memstick_driver { diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h index def5df6e74..c118a7ec94 100644 --- a/include/linux/mfd/88pm80x.h +++ b/include/linux/mfd/88pm80x.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Marvell 88PM80x Interface * * Copyright (C) 2012 Marvell International Ltd. * Qiao Zhou + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_MFD_88PM80X_H diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h index 473545a2c4..cd97530205 100644 --- a/include/linux/mfd/88pm860x.h +++ b/include/linux/mfd/88pm860x.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Marvell 88PM860x Interface * * Copyright (C) 2009 Marvell International Ltd. * Haojian Zhuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_MFD_88PM860X_H diff --git a/include/linux/mfd/aat2870.h b/include/linux/mfd/aat2870.h index 2445842d48..f7316c29bd 100644 --- a/include/linux/mfd/aat2870.h +++ b/include/linux/mfd/aat2870.h @@ -1,9 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mfd/aat2870.h * * Copyright (c) 2011, NVIDIA Corporation. * Author: Jin Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA */ #ifndef __LINUX_MFD_AAT2870_H @@ -136,6 +149,7 @@ struct aat2870_data { /* for debugfs */ struct dentry *dentry_root; + struct dentry *dentry_reg; }; struct aat2870_subdev_info { diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h index a881d84951..afd3080bde 100644 --- a/include/linux/mfd/ab3100.h +++ b/include/linux/mfd/ab3100.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2007-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 * AB3100 core access functions * Author: Linus Walleij + * */ #include diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h index 7f07cfe447..552cc1d61c 100644 --- a/include/linux/mfd/abx500.h +++ b/include/linux/mfd/abx500.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2007-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 * * ABX500 core access functions. * The abx500 interface is used for the Analog Baseband chips. @@ -28,6 +28,283 @@ struct abx500_init_settings { u8 setting; }; +/* Battery driver related data */ +/* + * ADC for the battery thermistor. + * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined + * with a NTC resistor to both identify the battery and to measure its + * temperature. Different phone manufactures uses different techniques to both + * identify the battery and to read its temperature. + */ +enum abx500_adc_therm { + ABx500_ADC_THERM_BATCTRL, + ABx500_ADC_THERM_BATTEMP, +}; + +/** + * struct abx500_res_to_temp - defines one point in a temp to res curve. To + * be used in battery packs that combines the identification resistor with a + * NTC resistor. + * @temp: battery pack temperature in Celcius + * @resist: NTC resistor net total resistance + */ +struct abx500_res_to_temp { + int temp; + int resist; +}; + +/** + * struct abx500_v_to_cap - Table for translating voltage to capacity + * @voltage: Voltage in mV + * @capacity: Capacity in percent + */ +struct abx500_v_to_cap { + int voltage; + int capacity; +}; + +/* Forward declaration */ +struct abx500_fg; + +/** + * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds + * if not specified + * @recovery_sleep_timer: Time between measurements while recovering + * @recovery_total_time: Total recovery time + * @init_timer: Measurement interval during startup + * @init_discard_time: Time we discard voltage measurement at startup + * @init_total_time: Total init time during startup + * @high_curr_time: Time current has to be high to go to recovery + * @accu_charging: FG accumulation time while charging + * @accu_high_curr: FG accumulation time in high current mode + * @high_curr_threshold: High current threshold, in mA + * @lowbat_threshold: Low battery threshold, in mV + * @overbat_threshold: Over battery threshold, in mV + * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0 + * Resolution in 50 mV step. + * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1 + * Resolution in 50 mV step. + * @user_cap_limit Capacity reported from user must be within this + * limit to be considered as sane, in percentage + * points. + * @maint_thres This is the threshold where we stop reporting + * battery full while in maintenance, in per cent + * @pcut_enable: Enable power cut feature in ab8505 + * @pcut_max_time: Max time threshold + * @pcut_flag_time: Flagtime threshold + * @pcut_max_restart: Max number of restarts + * @pcut_debounce_time: Sets battery debounce time + */ +struct abx500_fg_parameters { + int recovery_sleep_timer; + int recovery_total_time; + int init_timer; + int init_discard_time; + int init_total_time; + int high_curr_time; + int accu_charging; + int accu_high_curr; + int high_curr_threshold; + int lowbat_threshold; + int overbat_threshold; + int battok_falling_th_sel0; + int battok_raising_th_sel1; + int user_cap_limit; + int maint_thres; + bool pcut_enable; + u8 pcut_max_time; + u8 pcut_flag_time; + u8 pcut_max_restart; + u8 pcut_debounce_time; +}; + +/** + * struct abx500_charger_maximization - struct used by the board config. + * @use_maxi: Enable maximization for this battery type + * @maxi_chg_curr: Maximum charger current allowed + * @maxi_wait_cycles: cycles to wait before setting charger current + * @charger_curr_step delta between two charger current settings (mA) + */ +struct abx500_maxim_parameters { + bool ena_maxi; + int chg_curr; + int wait_cycles; + int charger_curr_step; +}; + +/** + * struct abx500_battery_type - different batteries supported + * @name: battery technology + * @resis_high: battery upper resistance limit + * @resis_low: battery lower resistance limit + * @charge_full_design: Maximum battery capacity in mAh + * @nominal_voltage: Nominal voltage of the battery in mV + * @termination_vol: max voltage upto which battery can be charged + * @termination_curr battery charging termination current in mA + * @recharge_cap battery capacity limit that will trigger a new + * full charging cycle in the case where maintenan- + * -ce charging has been disabled + * @normal_cur_lvl: charger current in normal state in mA + * @normal_vol_lvl: charger voltage in normal state in mV + * @maint_a_cur_lvl: charger current in maintenance A state in mA + * @maint_a_vol_lvl: charger voltage in maintenance A state in mV + * @maint_a_chg_timer_h: charge time in maintenance A state + * @maint_b_cur_lvl: charger current in maintenance B state in mA + * @maint_b_vol_lvl: charger voltage in maintenance B state in mV + * @maint_b_chg_timer_h: charge time in maintenance B state + * @low_high_cur_lvl: charger current in temp low/high state in mA + * @low_high_vol_lvl: charger voltage in temp low/high state in mV' + * @battery_resistance: battery inner resistance in mOhm. + * @n_r_t_tbl_elements: number of elements in r_to_t_tbl + * @r_to_t_tbl: table containing resistance to temp points + * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl + * @v_to_cap_tbl: Voltage to capacity (in %) table + * @n_batres_tbl_elements number of elements in the batres_tbl + * @batres_tbl battery internal resistance vs temperature table + */ +struct abx500_battery_type { + int name; + int resis_high; + int resis_low; + int charge_full_design; + int nominal_voltage; + int termination_vol; + int termination_curr; + int recharge_cap; + int normal_cur_lvl; + int normal_vol_lvl; + int maint_a_cur_lvl; + int maint_a_vol_lvl; + int maint_a_chg_timer_h; + int maint_b_cur_lvl; + int maint_b_vol_lvl; + int maint_b_chg_timer_h; + int low_high_cur_lvl; + int low_high_vol_lvl; + int battery_resistance; + int n_temp_tbl_elements; + const struct abx500_res_to_temp *r_to_t_tbl; + int n_v_cap_tbl_elements; + const struct abx500_v_to_cap *v_to_cap_tbl; + int n_batres_tbl_elements; + const struct batres_vs_temp *batres_tbl; +}; + +/** + * struct abx500_bm_capacity_levels - abx500 capacity level data + * @critical: critical capacity level in percent + * @low: low capacity level in percent + * @normal: normal capacity level in percent + * @high: high capacity level in percent + * @full: full capacity level in percent + */ +struct abx500_bm_capacity_levels { + int critical; + int low; + int normal; + int high; + int full; +}; + +/** + * struct abx500_bm_charger_parameters - Charger specific parameters + * @usb_volt_max: maximum allowed USB charger voltage in mV + * @usb_curr_max: maximum allowed USB charger current in mA + * @ac_volt_max: maximum allowed AC charger voltage in mV + * @ac_curr_max: maximum allowed AC charger current in mA + */ +struct abx500_bm_charger_parameters { + int usb_volt_max; + int usb_curr_max; + int ac_volt_max; + int ac_curr_max; +}; + +/** + * struct abx500_bm_data - abx500 battery management data + * @temp_under under this temp, charging is stopped + * @temp_low between this temp and temp_under charging is reduced + * @temp_high between this temp and temp_over charging is reduced + * @temp_over over this temp, charging is stopped + * @temp_now present battery temperature + * @temp_interval_chg temperature measurement interval in s when charging + * @temp_interval_nochg temperature measurement interval in s when not charging + * @main_safety_tmr_h safety timer for main charger + * @usb_safety_tmr_h safety timer for usb charger + * @bkup_bat_v voltage which we charge the backup battery with + * @bkup_bat_i current which we charge the backup battery with + * @no_maintenance indicates that maintenance charging is disabled + * @capacity_scaling indicates whether capacity scaling is to be used + * @abx500_adc_therm placement of thermistor, batctrl or battemp adc + * @chg_unknown_bat flag to enable charging of unknown batteries + * @enable_overshoot flag to enable VBAT overshoot control + * @auto_trig flag to enable auto adc trigger + * @fg_res resistance of FG resistor in 0.1mOhm + * @n_btypes number of elements in array bat_type + * @batt_id index of the identified battery in array bat_type + * @interval_charging charge alg cycle period time when charging (sec) + * @interval_not_charging charge alg cycle period time when not charging (sec) + * @temp_hysteresis temperature hysteresis + * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm) + * @n_chg_out_curr number of elements in array chg_output_curr + * @n_chg_in_curr number of elements in array chg_input_curr + * @chg_output_curr charger output current level map + * @chg_input_curr charger input current level map + * @maxi maximization parameters + * @cap_levels capacity in percent for the different capacity levels + * @bat_type table of supported battery types + * @chg_params charger parameters + * @fg_params fuel gauge parameters + */ +struct abx500_bm_data { + int temp_under; + int temp_low; + int temp_high; + int temp_over; + int temp_now; + int temp_interval_chg; + int temp_interval_nochg; + int main_safety_tmr_h; + int usb_safety_tmr_h; + int bkup_bat_v; + int bkup_bat_i; + bool autopower_cfg; + bool ac_enabled; + bool usb_enabled; + bool usb_power_path; + bool no_maintenance; + bool capacity_scaling; + bool chg_unknown_bat; + bool enable_overshoot; + bool auto_trig; + enum abx500_adc_therm adc_therm; + int fg_res; + int n_btypes; + int batt_id; + int interval_charging; + int interval_not_charging; + int temp_hysteresis; + int gnd_lift_resistance; + int n_chg_out_curr; + int n_chg_in_curr; + int *chg_output_curr; + int *chg_input_curr; + const struct abx500_maxim_parameters *maxi; + const struct abx500_bm_capacity_levels *cap_levels; + struct abx500_battery_type *bat_type; + const struct abx500_bm_charger_parameters *chg_params; + const struct abx500_fg_parameters *fg_params; +}; + +enum { + NTC_EXTERNAL = 0, + NTC_INTERNAL, +}; + +int ab8500_bm_of_probe(struct device *dev, + struct device_node *np, + struct abx500_bm_data *bm); + int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg, u8 value); int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg, diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h index 903e94c189..12a5b39692 100644 --- a/include/linux/mfd/abx500/ab8500-bm.h +++ b/include/linux/mfd/abx500/ab8500-bm.h @@ -1,8 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright ST-Ericsson 2012. * * Author: Arun Murthy + * Licensed under GPLv2. */ #ifndef _AB8500_BM_H @@ -248,6 +248,8 @@ enum bup_vch_sel { #define BAT_CTRL_20U_ENA 0x02 #define BAT_CTRL_18U_ENA 0x01 #define BAT_CTRL_16U_ENA 0x02 +#define BAT_CTRL_60U_ENA 0x01 +#define BAT_CTRL_120U_ENA 0x02 #define BAT_CTRL_CMP_ENA 0x04 #define FORCE_BAT_CTRL_CMP_HIGH 0x08 #define BAT_CTRL_PULL_UP_ENA 0x10 @@ -277,7 +279,7 @@ enum bup_vch_sel { * struct res_to_temp - defines one point in a temp to res curve. To * be used in battery packs that combines the identification resistor with a * NTC resistor. - * @temp: battery pack temperature in Celsius + * @temp: battery pack temperature in Celcius * @resist: NTC resistor net total resistance */ struct res_to_temp { @@ -288,7 +290,7 @@ struct res_to_temp { /** * struct batres_vs_temp - defines one point in a temp vs battery internal * resistance curve. - * @temp: battery pack temperature in Celsius + * @temp: battery pack temperature in Celcius * @resist: battery internal reistance in mOhm */ struct batres_vs_temp { diff --git a/include/linux/mfd/abx500/ab8500-codec.h b/include/linux/mfd/abx500/ab8500-codec.h index c19f505122..d7079413de 100644 --- a/include/linux/mfd/abx500/ab8500-codec.h +++ b/include/linux/mfd/abx500/ab8500-codec.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2012 * @@ -6,6 +5,10 @@ * for ST-Ericsson. * * License terms: + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. */ #ifndef AB8500_CORE_CODEC_H diff --git a/include/linux/mfd/abx500/ab8500-gpadc.h b/include/linux/mfd/abx500/ab8500-gpadc.h new file mode 100644 index 0000000000..49ded00104 --- /dev/null +++ b/include/linux/mfd/abx500/ab8500-gpadc.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2010 ST-Ericsson SA + * Licensed under GPLv2. + * + * Author: Arun R Murthy + * Author: Daniel Willerud + * Author: M'boumba Cedric Madianga + */ + +#ifndef _AB8500_GPADC_H +#define _AB8500_GPADC_H + +/* GPADC source: From datasheet(ADCSwSel[4:0] in GPADCCtrl2 + * and ADCHwSel[4:0] in GPADCCtrl3 ) */ +#define BAT_CTRL 0x01 +#define BTEMP_BALL 0x02 +#define MAIN_CHARGER_V 0x03 +#define ACC_DETECT1 0x04 +#define ACC_DETECT2 0x05 +#define ADC_AUX1 0x06 +#define ADC_AUX2 0x07 +#define MAIN_BAT_V 0x08 +#define VBUS_V 0x09 +#define MAIN_CHARGER_C 0x0A +#define USB_CHARGER_C 0x0B +#define BK_BAT_V 0x0C +#define DIE_TEMP 0x0D +#define USB_ID 0x0E +#define XTAL_TEMP 0x12 +#define VBAT_TRUE_MEAS 0x13 +#define BAT_CTRL_AND_IBAT 0x1C +#define VBAT_MEAS_AND_IBAT 0x1D +#define VBAT_TRUE_MEAS_AND_IBAT 0x1E +#define BAT_TEMP_AND_IBAT 0x1F + +/* Virtual channel used only for ibat convertion to ampere + * Battery current conversion (ibat) cannot be requested as a single conversion + * but it is always in combination with other input requests + */ +#define IBAT_VIRTUAL_CHANNEL 0xFF + +#define SAMPLE_1 1 +#define SAMPLE_4 4 +#define SAMPLE_8 8 +#define SAMPLE_16 16 +#define RISING_EDGE 0 +#define FALLING_EDGE 1 + +/* Arbitrary ADC conversion type constants */ +#define ADC_SW 0 +#define ADC_HW 1 + +struct ab8500_gpadc; + +struct ab8500_gpadc *ab8500_gpadc_get(char *name); +int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel, + u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type); +static inline int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel) +{ + return ab8500_gpadc_sw_hw_convert(gpadc, channel, + SAMPLE_16, 0, 0, ADC_SW); +} + +int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel, + u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type); +int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel, + u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type, + int *ibat); +int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc, + u8 channel, int ad_value); +void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc, + u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h, + u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h); + +#endif /* _AB8500_GPADC_H */ diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h index 825f6059d4..01024d1aed 100644 --- a/include/linux/mfd/abx500/ab8500-sysctrl.h +++ b/include/linux/mfd/abx500/ab8500-sysctrl.h @@ -1,7 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010 * Author: Mattias Nilsson for ST Ericsson. + * License terms: GNU General Public License (GPL) version 2 */ #ifndef __AB8500_SYSCTRL_H #define __AB8500_SYSCTRL_H diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h index 302a330c5c..d33c245e75 100644 --- a/include/linux/mfd/abx500/ab8500.h +++ b/include/linux/mfd/abx500/ab8500.h @@ -1,7 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010 * + * License Terms: GNU General Public License v2 * Author: Srinidhi Kasagar */ #ifndef MFD_AB8500_H @@ -368,6 +368,7 @@ struct ab8500 { int it_latchhier_num; }; +struct ab8500_regulator_platform_data; struct ab8500_codec_platform_data; struct ab8500_sysctrl_platform_data; @@ -375,9 +376,11 @@ struct ab8500_sysctrl_platform_data; * struct ab8500_platform_data - AB8500 platform data * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used * @init: board-specific initialization after detection of ab8500 + * @regulator: machine-specific constraints for regulators */ struct ab8500_platform_data { void (*init) (struct ab8500 *); + struct ab8500_regulator_platform_data *regulator; struct ab8500_codec_platform_data *codec; struct ab8500_sysctrl_platform_data *sysctrl; }; diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h index bc3819dc33..67703f23e7 100644 --- a/include/linux/mfd/abx500/ux500_chargalg.h +++ b/include/linux/mfd/abx500/ux500_chargalg.h @@ -1,7 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2012 * Author: Johan Gardsmark for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2 */ #ifndef _UX500_CHARGALG_H @@ -15,7 +15,7 @@ * - POWER_SUPPLY_TYPE_USB, * because only them store as drv_data pointer to struct ux500_charger. */ -#define psy_to_ux500_charger(x) power_supply_get_drvdata(x) +#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy) /* Forward declaration */ struct ux500_charger; @@ -25,6 +25,8 @@ struct ux500_charger_ops { int (*check_enable) (struct ux500_charger *, int, int); int (*kick_wd) (struct ux500_charger *); int (*update_curr) (struct ux500_charger *, int); + int (*pp_enable) (struct ux500_charger *, bool); + int (*pre_chg_enable) (struct ux500_charger *, bool); }; /** @@ -35,6 +37,7 @@ struct ux500_charger_ops { * @max_out_curr maximum output charger current in mA * @enabled indicates if this charger is used or not * @external external charger unit (pm2xxx) + * @power_path USB power path support */ struct ux500_charger { struct power_supply *psy; @@ -44,6 +47,7 @@ struct ux500_charger { int wdt_refresh; bool enabled; bool external; + bool power_path; }; extern struct blocking_notifier_head charger_notifier_list; diff --git a/include/linux/mfd/ac100.h b/include/linux/mfd/ac100.h index 88005c3a1b..3c148f196b 100644 --- a/include/linux/mfd/ac100.h +++ b/include/linux/mfd/ac100.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Functions and registers to access AC100 codec / RTC combo IC. * * Copyright (C) 2016 Chen-Yu Tsai * * Chen-Yu Tsai + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_MFD_AC100_H diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h index 9a14f80ec4..ac37558a46 100644 --- a/include/linux/mfd/adp5520.h +++ b/include/linux/mfd/adp5520.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Definitions and platform data for Analog Devices * ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys) * * Copyright 2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. */ diff --git a/include/linux/mfd/altera-a10sr.h b/include/linux/mfd/altera-a10sr.h index d616da4b3c..45a5e6e7db 100644 --- a/include/linux/mfd/altera-a10sr.h +++ b/include/linux/mfd/altera-a10sr.h @@ -1,7 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright Intel Corporation (C) 2014-2016. All Rights Reserved * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * * Declarations for Altera Arria10 MAX5 System Resource Chip * * Adapted from DA9052 diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index 6d6f96b2b2..b31b3be7f8 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Arizona MFD internals * * Copyright 2012 Wolfson Microelectronics plc * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _WM_ARIZONA_CORE_H diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h index 2d13bbea4f..64faeeff69 100644 --- a/include/linux/mfd/arizona/pdata.h +++ b/include/linux/mfd/arizona/pdata.h @@ -1,16 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for Arizona devices * * Copyright 2012 Wolfson Microelectronics. PLC. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _ARIZONA_PDATA_H #define _ARIZONA_PDATA_H #include -#include -#include #define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */ #define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */ @@ -53,7 +54,6 @@ #define ARIZONA_MAX_PDM_SPK 2 struct regulator_init_data; -struct gpio_desc; struct arizona_micbias { int mV; /** Regulated voltage */ @@ -75,13 +75,14 @@ struct arizona_micd_range { }; struct arizona_pdata { - struct gpio_desc *reset; /** GPIO controlling /RESET, if any */ + int reset; /** GPIO controlling /RESET, if any */ + int ldoena; /** GPIO controlling LODENA, if any */ /** Regulator configuration for MICVDD */ - struct arizona_micsupp_pdata micvdd; + struct regulator_init_data *micvdd; /** Regulator configuration for LDO1 */ - struct arizona_ldo1_pdata ldo1; + struct regulator_init_data *ldo1; /** If a direct 32kHz clock is provided on an MCLK specify it here */ int clk32k_src; @@ -172,9 +173,6 @@ struct arizona_pdata { /** Mode for outputs */ int out_mono[ARIZONA_MAX_OUTPUT]; - /** Limit output volumes */ - unsigned int out_vol_limit[2 * ARIZONA_MAX_OUTPUT]; - /** PDM speaker mute setting */ unsigned int spk_mute[ARIZONA_MAX_PDM_SPK]; diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index 49e24d1de8..0d06c5d0af 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * ARIZONA register definitions * * Copyright 2012 Wolfson Microelectronics plc * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _ARIZONA_REGISTERS_H @@ -1186,6 +1189,13 @@ #define ARIZONA_DSP4_SCRATCH_1 0x1441 #define ARIZONA_DSP4_SCRATCH_2 0x1442 #define ARIZONA_DSP4_SCRATCH_3 0x1443 +#define ARIZONA_FRF_COEFF_1 0x1700 +#define ARIZONA_FRF_COEFF_2 0x1701 +#define ARIZONA_FRF_COEFF_3 0x1702 +#define ARIZONA_FRF_COEFF_4 0x1703 +#define ARIZONA_V2_DAC_COMP_1 0x1704 +#define ARIZONA_V2_DAC_COMP_2 0x1705 + /* * Field Definitions. diff --git a/include/linux/mfd/as3711.h b/include/linux/mfd/as3711.h index 4be16b4d2c..34cc85864b 100644 --- a/include/linux/mfd/as3711.h +++ b/include/linux/mfd/as3711.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AS3711 PMIC MFC driver header * * Copyright (C) 2012 Renesas Electronics Corporation * Author: Guennadi Liakhovetski, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License as + * published by the Free Software Foundation */ #ifndef MFD_AS3711_H @@ -105,9 +108,9 @@ struct as3711_regulator_pdata { }; struct as3711_bl_pdata { - bool su1_fb; + const char *su1_fb; int su1_max_uA; - bool su2_fb; + const char *su2_fb; int su2_max_uA; enum as3711_su2_feedback su2_feedback; enum as3711_su2_fbprot su2_fbprot; diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h index 5162dfc7c2..51e6f94145 100644 --- a/include/linux/mfd/as3722.h +++ b/include/linux/mfd/as3722.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * as3722 definitions * @@ -7,6 +6,21 @@ * * Author: Florian Lobmaier * Author: Laxman Dewangan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * */ #ifndef __LINUX_MFD_AS3722_H__ @@ -282,8 +296,6 @@ #define AS3722_ADC1_CONV_NOTREADY BIT(7) #define AS3722_ADC1_SOURCE_SELECT_MASK 0x1F -#define AS3722_CTRL_SEQU1_AC_OK_PWR_ON BIT(0) - /* GPIO modes */ #define AS3722_GPIO_MODE_MASK 0x07 #define AS3722_GPIO_MODE_INPUT 0x00 @@ -379,7 +391,6 @@ struct as3722 { unsigned long irq_flags; bool en_intern_int_pullup; bool en_intern_i2c_pullup; - bool en_ac_ok_pwr_on; struct regmap_irq_chip_data *irq_data; }; diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h index 61e686dbaa..e1148d037e 100644 --- a/include/linux/mfd/asic3.h +++ b/include/linux/mfd/asic3.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/mfd/asic3.h * * Compaq ASIC3 headers. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Copyright 2001 Compaq Computer Corporation. * Copyright 2007-2008 OpenedHand Ltd. */ diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h index a186119a49..1279ab1644 100644 --- a/include/linux/mfd/atmel-hlcdc.h +++ b/include/linux/mfd/atmel-hlcdc.h @@ -1,9 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Free Electrons * Copyright (C) 2014 Atmel * * Author: Boris BREZILLON + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . */ #ifndef __LINUX_MFD_HLCDC_H diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index 9ab0e2fca7..fec597fb34 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Functions and registers to access AXP20X power management chip. * * Copyright (C) 2013, Carlo Caione + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_MFD_AXP20X_H @@ -10,17 +13,15 @@ #include -enum axp20x_variants { +enum { AXP152_ID = 0, AXP202_ID, AXP209_ID, AXP221_ID, AXP223_ID, AXP288_ID, - AXP803_ID, AXP806_ID, AXP809_ID, - AXP813_ID, NR_AXP20X_VARIANTS, }; @@ -32,7 +33,7 @@ enum axp20x_variants { #define AXP152_ALDO_OP_MODE 0x13 #define AXP152_LDO0_CTRL 0x15 #define AXP152_DCDC2_V_OUT 0x23 -#define AXP152_DCDC2_V_RAMP 0x25 +#define AXP152_DCDC2_V_SCAL 0x25 #define AXP152_DCDC1_V_OUT 0x26 #define AXP152_DCDC3_V_OUT 0x27 #define AXP152_ALDO12_V_OUT 0x28 @@ -50,7 +51,7 @@ enum axp20x_variants { #define AXP20X_USB_OTG_STATUS 0x02 #define AXP20X_PWR_OUT_CTRL 0x12 #define AXP20X_DCDC2_V_OUT 0x23 -#define AXP20X_DCDC2_LDO3_V_RAMP 0x25 +#define AXP20X_DCDC2_LDO3_V_SCAL 0x25 #define AXP20X_DCDC3_V_OUT 0x27 #define AXP20X_LDO24_V_OUT 0x28 #define AXP20X_LDO3_V_OUT 0x29 @@ -114,22 +115,6 @@ enum axp20x_variants { #define AXP806_CLDO2_V_CTRL 0x25 #define AXP806_CLDO3_V_CTRL 0x26 #define AXP806_VREF_TEMP_WARN_L 0xf3 -#define AXP806_BUS_ADDR_EXT 0xfe -#define AXP806_REG_ADDR_EXT 0xff - -#define AXP803_POLYPHASE_CTRL 0x14 -#define AXP803_FLDO1_V_OUT 0x1c -#define AXP803_FLDO2_V_OUT 0x1d -#define AXP803_DCDC1_V_OUT 0x20 -#define AXP803_DCDC2_V_OUT 0x21 -#define AXP803_DCDC3_V_OUT 0x22 -#define AXP803_DCDC4_V_OUT 0x23 -#define AXP803_DCDC5_V_OUT 0x24 -#define AXP803_DCDC6_V_OUT 0x25 -#define AXP803_DCDC_FREQ_CTRL 0x3b - -/* Other DCDC regulator control registers are the same as AXP803 */ -#define AXP813_DCDC7_V_OUT 0x26 /* Interrupt */ #define AXP152_IRQ1_EN 0x40 @@ -241,30 +226,13 @@ enum axp20x_variants { #define AXP20X_OCV_MAX 0xf /* AXP22X specific registers */ -#define AXP22X_PMIC_TEMP_H 0x56 -#define AXP22X_PMIC_TEMP_L 0x57 -#define AXP22X_TS_ADC_H 0x58 -#define AXP22X_TS_ADC_L 0x59 #define AXP22X_BATLOW_THRES1 0xe6 -/* AXP288/AXP803 specific registers */ -#define AXP288_POWER_REASON 0x02 -#define AXP288_BC_GLOBAL 0x2c -#define AXP288_BC_VBUS_CNTL 0x2d -#define AXP288_BC_USB_STAT 0x2e -#define AXP288_BC_DET_STAT 0x2f +/* AXP288 specific registers */ #define AXP288_PMIC_ADC_H 0x56 #define AXP288_PMIC_ADC_L 0x57 -#define AXP288_TS_ADC_H 0x58 -#define AXP288_TS_ADC_L 0x59 -#define AXP288_GP_ADC_H 0x5a -#define AXP288_GP_ADC_L 0x5b #define AXP288_ADC_TS_PIN_CTRL 0x84 -#define AXP288_RT_BATT_V_H 0xa0 -#define AXP288_RT_BATT_V_L 0xa1 - -#define AXP813_ACIN_PATH_CTRL 0x3a -#define AXP813_ADC_RATE 0x85 +#define AXP288_PMIC_ADC_EN 0x84 /* Fuel Gauge */ #define AXP288_FG_RDC1_REG 0xba @@ -365,60 +333,6 @@ enum { AXP809_REG_ID_MAX, }; -enum { - AXP803_DCDC1 = 0, - AXP803_DCDC2, - AXP803_DCDC3, - AXP803_DCDC4, - AXP803_DCDC5, - AXP803_DCDC6, - AXP803_DC1SW, - AXP803_ALDO1, - AXP803_ALDO2, - AXP803_ALDO3, - AXP803_DLDO1, - AXP803_DLDO2, - AXP803_DLDO3, - AXP803_DLDO4, - AXP803_ELDO1, - AXP803_ELDO2, - AXP803_ELDO3, - AXP803_FLDO1, - AXP803_FLDO2, - AXP803_RTC_LDO, - AXP803_LDO_IO0, - AXP803_LDO_IO1, - AXP803_REG_ID_MAX, -}; - -enum { - AXP813_DCDC1 = 0, - AXP813_DCDC2, - AXP813_DCDC3, - AXP813_DCDC4, - AXP813_DCDC5, - AXP813_DCDC6, - AXP813_DCDC7, - AXP813_ALDO1, - AXP813_ALDO2, - AXP813_ALDO3, - AXP813_DLDO1, - AXP813_DLDO2, - AXP813_DLDO3, - AXP813_DLDO4, - AXP813_ELDO1, - AXP813_ELDO2, - AXP813_ELDO3, - AXP813_FLDO1, - AXP813_FLDO2, - AXP813_FLDO3, - AXP813_RTC_LDO, - AXP813_LDO_IO0, - AXP813_LDO_IO1, - AXP813_SW, - AXP813_REG_ID_MAX, -}; - /* IRQs */ enum { AXP152_IRQ_LDO0IN_CONNECT = 1, @@ -545,43 +459,6 @@ enum axp288_irqs { AXP288_IRQ_BC_USB_CHNG, }; -enum axp803_irqs { - AXP803_IRQ_ACIN_OVER_V = 1, - AXP803_IRQ_ACIN_PLUGIN, - AXP803_IRQ_ACIN_REMOVAL, - AXP803_IRQ_VBUS_OVER_V, - AXP803_IRQ_VBUS_PLUGIN, - AXP803_IRQ_VBUS_REMOVAL, - AXP803_IRQ_BATT_PLUGIN, - AXP803_IRQ_BATT_REMOVAL, - AXP803_IRQ_BATT_ENT_ACT_MODE, - AXP803_IRQ_BATT_EXIT_ACT_MODE, - AXP803_IRQ_CHARG, - AXP803_IRQ_CHARG_DONE, - AXP803_IRQ_BATT_CHG_TEMP_HIGH, - AXP803_IRQ_BATT_CHG_TEMP_HIGH_END, - AXP803_IRQ_BATT_CHG_TEMP_LOW, - AXP803_IRQ_BATT_CHG_TEMP_LOW_END, - AXP803_IRQ_BATT_ACT_TEMP_HIGH, - AXP803_IRQ_BATT_ACT_TEMP_HIGH_END, - AXP803_IRQ_BATT_ACT_TEMP_LOW, - AXP803_IRQ_BATT_ACT_TEMP_LOW_END, - AXP803_IRQ_DIE_TEMP_HIGH, - AXP803_IRQ_GPADC, - AXP803_IRQ_LOW_PWR_LVL1, - AXP803_IRQ_LOW_PWR_LVL2, - AXP803_IRQ_TIMER, - AXP803_IRQ_PEK_RIS_EDGE, - AXP803_IRQ_PEK_FAL_EDGE, - AXP803_IRQ_PEK_SHORT, - AXP803_IRQ_PEK_LONG, - AXP803_IRQ_PEK_OVER_OFF, - AXP803_IRQ_GPIO1_INPUT, - AXP803_IRQ_GPIO0_INPUT, - AXP803_IRQ_BC_USB_CHNG, - AXP803_IRQ_MV_CHNG, -}; - enum axp806_irqs { AXP806_IRQ_DIE_TEMP_HIGH_LV1, AXP806_IRQ_DIE_TEMP_HIGH_LV2, @@ -590,11 +467,11 @@ enum axp806_irqs { AXP806_IRQ_DCDCC_V_LOW, AXP806_IRQ_DCDCD_V_LOW, AXP806_IRQ_DCDCE_V_LOW, - AXP806_IRQ_POK_LONG, - AXP806_IRQ_POK_SHORT, + AXP806_IRQ_PWROK_LONG, + AXP806_IRQ_PWROK_SHORT, AXP806_IRQ_WAKEUP, - AXP806_IRQ_POK_FALL, - AXP806_IRQ_POK_RISE, + AXP806_IRQ_PWROK_FALL, + AXP806_IRQ_PWROK_RISE, }; enum axp809_irqs { @@ -632,19 +509,57 @@ enum axp809_irqs { AXP809_IRQ_GPIO0_INPUT, }; +#define AXP288_TS_ADC_H 0x58 +#define AXP288_TS_ADC_L 0x59 +#define AXP288_GP_ADC_H 0x5a +#define AXP288_GP_ADC_L 0x5b + struct axp20x_dev { struct device *dev; int irq; - unsigned long irq_flags; struct regmap *regmap; struct regmap_irq_chip_data *regmap_irqc; long variant; int nr_cells; - const struct mfd_cell *cells; + struct mfd_cell *cells; const struct regmap_config *regmap_cfg; const struct regmap_irq_chip *regmap_irq_chip; }; +#define BATTID_LEN 64 +#define OCV_CURVE_SIZE 32 +#define MAX_THERM_CURVE_SIZE 25 +#define PD_DEF_MIN_TEMP 0 +#define PD_DEF_MAX_TEMP 55 + +struct axp20x_fg_pdata { + char battid[BATTID_LEN + 1]; + int design_cap; + int min_volt; + int max_volt; + int max_temp; + int min_temp; + int cap1; + int cap0; + int rdc1; + int rdc0; + int ocv_curve[OCV_CURVE_SIZE]; + int tcsz; + int thermistor_curve[MAX_THERM_CURVE_SIZE][2]; +}; + +struct axp20x_chrg_pdata { + int max_cc; + int max_cv; + int def_cc; + int def_cv; +}; + +struct axp288_extcon_pdata { + /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */ + struct gpio_desc *gpio_mux_cntl; +}; + /* generic helper function for reading 9-16 bit wide regs */ static inline int axp20x_read_variable_width(struct regmap *regmap, unsigned int reg, unsigned int width) @@ -690,12 +605,12 @@ int axp20x_match_device(struct axp20x_dev *axp20x); int axp20x_device_probe(struct axp20x_dev *axp20x); /** - * axp20x_device_remove(): Remove a axp20x device + * axp20x_device_probe(): Remove a axp20x device * * @axp20x: axp20x device to remove * * This tells the axp20x core to remove the associated mfd devices */ -void axp20x_device_remove(struct axp20x_dev *axp20x); +int axp20x_device_remove(struct axp20x_dev *axp20x); #endif /* __LINUX_MFD_AXP20X_H */ diff --git a/include/linux/mfd/bcm590xx.h b/include/linux/mfd/bcm590xx.h index 6b8791da61..267aedee1c 100644 --- a/include/linux/mfd/bcm590xx.h +++ b/include/linux/mfd/bcm590xx.h @@ -1,9 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Broadcom BCM590xx PMU * * Copyright 2014 Linaro Limited * Author: Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_BCM590XX_H diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 0bc7cba798..99c0395fe1 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * drivers/mfd/mfd-core.h * * core MFD support * Copyright (c) 2006 Ian Molton * Copyright (c) 2007 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef MFD_CORE_H @@ -12,45 +16,8 @@ #include -#define MFD_RES_SIZE(arr) (sizeof(arr) / sizeof(struct resource)) - -#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, _use_of_reg, _match) \ - { \ - .name = (_name), \ - .resources = (_res), \ - .num_resources = MFD_RES_SIZE((_res)), \ - .platform_data = (_pdata), \ - .pdata_size = (_pdsize), \ - .of_compatible = (_compat), \ - .of_reg = (_of_reg), \ - .use_of_reg = (_use_of_reg), \ - .acpi_match = (_match), \ - .id = (_id), \ - } - -#define MFD_CELL_OF_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL) - -#define MFD_CELL_OF(_name, _res, _pdata, _pdsize, _id, _compat) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL) - -#define MFD_CELL_ACPI(_name, _res, _pdata, _pdsize, _id, _match) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match) - -#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, NULL) - -#define MFD_CELL_RES(_name, _res) \ - MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, 0, false, NULL) - -#define MFD_CELL_NAME(_name) \ - MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, 0, false, NULL) - -#define MFD_DEP_LEVEL_NORMAL 0 -#define MFD_DEP_LEVEL_HIGH 1 - struct irq_domain; -struct software_node; +struct property_entry; /* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */ struct mfd_cell_acpi_match { @@ -66,8 +33,9 @@ struct mfd_cell_acpi_match { struct mfd_cell { const char *name; int id; - int level; + /* refcounting for multiple drivers to use a single cell */ + atomic_t *usage_count; int (*enable)(struct platform_device *dev); int (*disable)(struct platform_device *dev); @@ -78,25 +46,15 @@ struct mfd_cell { void *platform_data; size_t pdata_size; - /* Software node for the device. */ - const struct software_node *swnode; + /* device properties passed to the sub devices drivers */ + struct property_entry *properties; /* * Device Tree compatible string - * See: Documentation/devicetree/usage-model.rst Chapter 2.2 for details + * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details */ const char *of_compatible; - /* - * Address as defined in Device Tree. Used to compement 'of_compatible' - * (above) when matching OF nodes with devices that have identical - * compatible strings - */ - const u64 of_reg; - - /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */ - bool use_of_reg; - /* Matches ACPI */ const struct mfd_cell_acpi_match *acpi_match; @@ -132,6 +90,24 @@ struct mfd_cell { extern int mfd_cell_enable(struct platform_device *pdev); extern int mfd_cell_disable(struct platform_device *pdev); +/* + * "Clone" multiple platform devices for a single cell. This is to be used + * for devices that have multiple users of a cell. For example, if an mfd + * driver wants the cell "foo" to be used by a GPIO driver, an MTD driver, + * and a platform driver, the following bit of code would be use after first + * calling mfd_add_devices(): + * + * const char *fclones[] = { "foo-gpio", "foo-mtd" }; + * err = mfd_clone_cells("foo", fclones, ARRAY_SIZE(fclones)); + * + * Each driver (MTD, GPIO, and platform driver) would then register + * platform_drivers for "foo-mtd", "foo-gpio", and "foo", respectively. + * The cell's .enable/.disable hooks should be used to deal with hardware + * resource contention. + */ +extern int mfd_clone_cell(const char *cell, const char **clones, + size_t n_clones); + /* * Given a platform device that's been created by mfd_add_devices(), fetch * the mfd_cell that created it. @@ -154,7 +130,6 @@ static inline int mfd_add_hotplug_devices(struct device *parent, } extern void mfd_remove_devices(struct device *parent); -extern void mfd_remove_devices_late(struct device *parent); extern int devm_mfd_add_devices(struct device *dev, int id, const struct mfd_cell *cells, int n_devs, diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h new file mode 100644 index 0000000000..76f7ef4d3a --- /dev/null +++ b/include/linux/mfd/cros_ec.h @@ -0,0 +1,295 @@ +/* + * ChromeOS EC multi-function device + * + * Copyright (C) 2012 Google, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MFD_CROS_EC_H +#define __LINUX_MFD_CROS_EC_H + +#include +#include +#include +#include +#include + +#define CROS_EC_DEV_NAME "cros_ec" +#define CROS_EC_DEV_PD_NAME "cros_pd" + +/* + * The EC is unresponsive for a time after a reboot command. Add a + * simple delay to make sure that the bus stays locked. + */ +#define EC_REBOOT_DELAY_MS 50 + +/* + * Max bus-specific overhead incurred by request/responses. + * I2C requires 1 additional byte for requests. + * I2C requires 2 additional bytes for responses. + * */ +#define EC_PROTO_VERSION_UNKNOWN 0 +#define EC_MAX_REQUEST_OVERHEAD 1 +#define EC_MAX_RESPONSE_OVERHEAD 2 + +/* + * Command interface between EC and AP, for LPC, I2C and SPI interfaces. + */ +enum { + EC_MSG_TX_HEADER_BYTES = 3, + EC_MSG_TX_TRAILER_BYTES = 1, + EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES + + EC_MSG_TX_TRAILER_BYTES, + EC_MSG_RX_PROTO_BYTES = 3, + + /* Max length of messages for proto 2*/ + EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + + EC_MSG_TX_PROTO_BYTES, + + EC_MAX_MSG_BYTES = 64 * 1024, +}; + +/* + * @version: Command version number (often 0) + * @command: Command to send (EC_CMD_...) + * @outsize: Outgoing length in bytes + * @insize: Max number of bytes to accept from EC + * @result: EC's response to the command (separate from communication failure) + * @data: Where to put the incoming data from EC and outgoing data to EC + */ +struct cros_ec_command { + uint32_t version; + uint32_t command; + uint32_t outsize; + uint32_t insize; + uint32_t result; + uint8_t data[0]; +}; + +/** + * struct cros_ec_device - Information about a ChromeOS EC device + * + * @phys_name: name of physical comms layer (e.g. 'i2c-4') + * @dev: Device pointer for physical comms device + * @was_wake_device: true if this device was set to wake the system from + * sleep at the last suspend + * @cmd_readmem: direct read of the EC memory-mapped region, if supported + * @offset is within EC_LPC_ADDR_MEMMAP region. + * @bytes: number of bytes to read. zero means "read a string" (including + * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read. + * Caller must ensure that the buffer is large enough for the result when + * reading a string. + * + * @priv: Private data + * @irq: Interrupt to use + * @id: Device id + * @din: input buffer (for data from EC) + * @dout: output buffer (for data to EC) + * \note + * These two buffers will always be dword-aligned and include enough + * space for up to 7 word-alignment bytes also, so we can ensure that + * the body of the message is always dword-aligned (64-bit). + * We use this alignment to keep ARM and x86 happy. Probably word + * alignment would be OK, there might be a small performance advantage + * to using dword. + * @din_size: size of din buffer to allocate (zero to use static din) + * @dout_size: size of dout buffer to allocate (zero to use static dout) + * @wake_enabled: true if this device can wake the system from sleep + * @cmd_xfer: send command to EC and get response + * Returns the number of bytes received if the communication succeeded, but + * that doesn't mean the EC was happy with the command. The caller + * should check msg.result for the EC's result code. + * @pkt_xfer: send packet to EC and get response + * @lock: one transaction at a time + * @mkbp_event_supported: true if this EC supports the MKBP event protocol. + * @event_notifier: interrupt event notifier for transport devices. + * @event_data: raw payload transferred with the MKBP event. + * @event_size: size in bytes of the event data. + */ +struct cros_ec_device { + + /* These are used by other drivers that want to talk to the EC */ + const char *phys_name; + struct device *dev; + bool was_wake_device; + struct class *cros_class; + int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset, + unsigned int bytes, void *dest); + + /* These are used to implement the platform-specific interface */ + u16 max_request; + u16 max_response; + u16 max_passthru; + u16 proto_version; + void *priv; + int irq; + u8 *din; + u8 *dout; + int din_size; + int dout_size; + bool wake_enabled; + int (*cmd_xfer)(struct cros_ec_device *ec, + struct cros_ec_command *msg); + int (*pkt_xfer)(struct cros_ec_device *ec, + struct cros_ec_command *msg); + struct mutex lock; + bool mkbp_event_supported; + struct blocking_notifier_head event_notifier; + + struct ec_response_get_next_event event_data; + int event_size; +}; + +/* struct cros_ec_platform - ChromeOS EC platform information + * + * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...) + * used in /dev/ and sysfs. + * @cmd_offset: offset to apply for each command. Set when + * registering a devicde behind another one. + */ +struct cros_ec_platform { + const char *ec_name; + u16 cmd_offset; +}; + +/* + * struct cros_ec_dev - ChromeOS EC device entry point + * + * @class_dev: Device structure used in sysfs + * @cdev: Character device structure in /dev + * @ec_dev: cros_ec_device structure to talk to the physical device + * @dev: pointer to the platform device + * @cmd_offset: offset to apply for each command. + */ +struct cros_ec_dev { + struct device class_dev; + struct cdev cdev; + struct cros_ec_device *ec_dev; + struct device *dev; + u16 cmd_offset; +}; + +/** + * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device + * + * This can be called by drivers to handle a suspend event. + * + * ec_dev: Device to suspend + * @return 0 if ok, -ve on error + */ +int cros_ec_suspend(struct cros_ec_device *ec_dev); + +/** + * cros_ec_resume - Handle a resume operation for the ChromeOS EC device + * + * This can be called by drivers to handle a resume event. + * + * @ec_dev: Device to resume + * @return 0 if ok, -ve on error + */ +int cros_ec_resume(struct cros_ec_device *ec_dev); + +/** + * cros_ec_prepare_tx - Prepare an outgoing message in the output buffer + * + * This is intended to be used by all ChromeOS EC drivers, but at present + * only SPI uses it. Once LPC uses the same protocol it can start using it. + * I2C could use it now, with a refactor of the existing code. + * + * @ec_dev: Device to register + * @msg: Message to write + */ +int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_check_result - Check ec_msg->result + * + * This is used by ChromeOS EC drivers to check the ec_msg->result for + * errors and to warn about them. + * + * @ec_dev: EC device + * @msg: Message to check + */ +int cros_ec_check_result(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_cmd_xfer - Send a command to the ChromeOS EC + * + * Call this to send a command to the ChromeOS EC. This should be used + * instead of calling the EC's cmd_xfer() callback directly. + * + * @ec_dev: EC device + * @msg: Message to write + */ +int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC + * + * This function is identical to cros_ec_cmd_xfer, except it returns success + * status only if both the command was transmitted successfully and the EC + * replied with success status. It's not necessary to check msg->result when + * using this function. + * + * @ec_dev: EC device + * @msg: Message to write + * @return: Num. of bytes transferred on success, <0 on failure + */ +int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_remove - Remove a ChromeOS EC + * + * Call this to deregister a ChromeOS EC, then clean up any private data. + * + * @ec_dev: Device to register + * @return 0 if ok, -ve on error + */ +int cros_ec_remove(struct cros_ec_device *ec_dev); + +/** + * cros_ec_register - Register a new ChromeOS EC, using the provided info + * + * Before calling this, allocate a pointer to a new device and then fill + * in all the fields up to the --private-- marker. + * + * @ec_dev: Device to register + * @return 0 if ok, -ve on error + */ +int cros_ec_register(struct cros_ec_device *ec_dev); + +/** + * cros_ec_query_all - Query the protocol version supported by the ChromeOS EC + * + * @ec_dev: Device to register + * @return 0 if ok, -ve on error + */ +int cros_ec_query_all(struct cros_ec_device *ec_dev); + +/** + * cros_ec_get_next_event - Fetch next event from the ChromeOS EC + * + * @ec_dev: Device to fetch event from + * + * Returns: 0 on success, Linux error number on failure + */ +int cros_ec_get_next_event(struct cros_ec_device *ec_dev); + +/* sysfs stuff */ +extern struct attribute_group cros_ec_attr_group; +extern struct attribute_group cros_ec_lightbar_attr_group; +extern struct attribute_group cros_ec_vbc_attr_group; + +#endif /* __LINUX_MFD_CROS_EC_H */ diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h new file mode 100644 index 0000000000..76728ff37d --- /dev/null +++ b/include/linux/mfd/cros_ec_commands.h @@ -0,0 +1,2648 @@ +/* + * Host communication command constants for ChromeOS EC + * + * Copyright (C) 2012 Google, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * The ChromeOS EC multi function device is used to mux all the requests + * to the EC device for its multiple features: keyboard controller, + * battery charging and regulator control, firmware update. + * + * NOTE: This file is copied verbatim from the ChromeOS EC Open Source + * project in an attempt to make future updates easy to make. + */ + +#ifndef __CROS_EC_COMMANDS_H +#define __CROS_EC_COMMANDS_H + +/* + * Current version of this protocol + * + * TODO(crosbug.com/p/11223): This is effectively useless; protocol is + * determined in other ways. Remove this once the kernel code no longer + * depends on it. + */ +#define EC_PROTO_VERSION 0x00000002 + +/* Command version mask */ +#define EC_VER_MASK(version) (1UL << (version)) + +/* I/O addresses for ACPI commands */ +#define EC_LPC_ADDR_ACPI_DATA 0x62 +#define EC_LPC_ADDR_ACPI_CMD 0x66 + +/* I/O addresses for host command */ +#define EC_LPC_ADDR_HOST_DATA 0x200 +#define EC_LPC_ADDR_HOST_CMD 0x204 + +/* I/O addresses for host command args and params */ +/* Protocol version 2 */ +#define EC_LPC_ADDR_HOST_ARGS 0x800 /* And 0x801, 0x802, 0x803 */ +#define EC_LPC_ADDR_HOST_PARAM 0x804 /* For version 2 params; size is + * EC_PROTO2_MAX_PARAM_SIZE */ +/* Protocol version 3 */ +#define EC_LPC_ADDR_HOST_PACKET 0x800 /* Offset of version 3 packet */ +#define EC_LPC_HOST_PACKET_SIZE 0x100 /* Max size of version 3 packet */ + +/* The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff + * and they tell the kernel that so we have to think of it as two parts. */ +#define EC_HOST_CMD_REGION0 0x800 +#define EC_HOST_CMD_REGION1 0x880 +#define EC_HOST_CMD_REGION_SIZE 0x80 + +/* EC command register bit functions */ +#define EC_LPC_CMDR_DATA (1 << 0) /* Data ready for host to read */ +#define EC_LPC_CMDR_PENDING (1 << 1) /* Write pending to EC */ +#define EC_LPC_CMDR_BUSY (1 << 2) /* EC is busy processing a command */ +#define EC_LPC_CMDR_CMD (1 << 3) /* Last host write was a command */ +#define EC_LPC_CMDR_ACPI_BRST (1 << 4) /* Burst mode (not used) */ +#define EC_LPC_CMDR_SCI (1 << 5) /* SCI event is pending */ +#define EC_LPC_CMDR_SMI (1 << 6) /* SMI event is pending */ + +#define EC_LPC_ADDR_MEMMAP 0x900 +#define EC_MEMMAP_SIZE 255 /* ACPI IO buffer max is 255 bytes */ +#define EC_MEMMAP_TEXT_MAX 8 /* Size of a string in the memory map */ + +/* The offset address of each type of data in mapped memory. */ +#define EC_MEMMAP_TEMP_SENSOR 0x00 /* Temp sensors 0x00 - 0x0f */ +#define EC_MEMMAP_FAN 0x10 /* Fan speeds 0x10 - 0x17 */ +#define EC_MEMMAP_TEMP_SENSOR_B 0x18 /* More temp sensors 0x18 - 0x1f */ +#define EC_MEMMAP_ID 0x20 /* 0x20 == 'E', 0x21 == 'C' */ +#define EC_MEMMAP_ID_VERSION 0x22 /* Version of data in 0x20 - 0x2f */ +#define EC_MEMMAP_THERMAL_VERSION 0x23 /* Version of data in 0x00 - 0x1f */ +#define EC_MEMMAP_BATTERY_VERSION 0x24 /* Version of data in 0x40 - 0x7f */ +#define EC_MEMMAP_SWITCHES_VERSION 0x25 /* Version of data in 0x30 - 0x33 */ +#define EC_MEMMAP_EVENTS_VERSION 0x26 /* Version of data in 0x34 - 0x3f */ +#define EC_MEMMAP_HOST_CMD_FLAGS 0x27 /* Host cmd interface flags (8 bits) */ +/* Unused 0x28 - 0x2f */ +#define EC_MEMMAP_SWITCHES 0x30 /* 8 bits */ +/* Unused 0x31 - 0x33 */ +#define EC_MEMMAP_HOST_EVENTS 0x34 /* 32 bits */ +/* Reserve 0x38 - 0x3f for additional host event-related stuff */ +/* Battery values are all 32 bits */ +#define EC_MEMMAP_BATT_VOLT 0x40 /* Battery Present Voltage */ +#define EC_MEMMAP_BATT_RATE 0x44 /* Battery Present Rate */ +#define EC_MEMMAP_BATT_CAP 0x48 /* Battery Remaining Capacity */ +#define EC_MEMMAP_BATT_FLAG 0x4c /* Battery State, defined below */ +#define EC_MEMMAP_BATT_DCAP 0x50 /* Battery Design Capacity */ +#define EC_MEMMAP_BATT_DVLT 0x54 /* Battery Design Voltage */ +#define EC_MEMMAP_BATT_LFCC 0x58 /* Battery Last Full Charge Capacity */ +#define EC_MEMMAP_BATT_CCNT 0x5c /* Battery Cycle Count */ +/* Strings are all 8 bytes (EC_MEMMAP_TEXT_MAX) */ +#define EC_MEMMAP_BATT_MFGR 0x60 /* Battery Manufacturer String */ +#define EC_MEMMAP_BATT_MODEL 0x68 /* Battery Model Number String */ +#define EC_MEMMAP_BATT_SERIAL 0x70 /* Battery Serial Number String */ +#define EC_MEMMAP_BATT_TYPE 0x78 /* Battery Type String */ +#define EC_MEMMAP_ALS 0x80 /* ALS readings in lux (2 X 16 bits) */ +/* Unused 0x84 - 0x8f */ +#define EC_MEMMAP_ACC_STATUS 0x90 /* Accelerometer status (8 bits )*/ +/* Unused 0x91 */ +#define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometer data 0x92 - 0x9f */ +#define EC_MEMMAP_GYRO_DATA 0xa0 /* Gyroscope data 0xa0 - 0xa5 */ +/* Unused 0xa6 - 0xfe (remember, 0xff is NOT part of the memmap region) */ + + +/* Define the format of the accelerometer mapped memory status byte. */ +#define EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK 0x0f +#define EC_MEMMAP_ACC_STATUS_BUSY_BIT (1 << 4) +#define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT (1 << 7) + +/* Number of temp sensors at EC_MEMMAP_TEMP_SENSOR */ +#define EC_TEMP_SENSOR_ENTRIES 16 +/* + * Number of temp sensors at EC_MEMMAP_TEMP_SENSOR_B. + * + * Valid only if EC_MEMMAP_THERMAL_VERSION returns >= 2. + */ +#define EC_TEMP_SENSOR_B_ENTRIES 8 + +/* Special values for mapped temperature sensors */ +#define EC_TEMP_SENSOR_NOT_PRESENT 0xff +#define EC_TEMP_SENSOR_ERROR 0xfe +#define EC_TEMP_SENSOR_NOT_POWERED 0xfd +#define EC_TEMP_SENSOR_NOT_CALIBRATED 0xfc +/* + * The offset of temperature value stored in mapped memory. This allows + * reporting a temperature range of 200K to 454K = -73C to 181C. + */ +#define EC_TEMP_SENSOR_OFFSET 200 + +/* + * Number of ALS readings at EC_MEMMAP_ALS + */ +#define EC_ALS_ENTRIES 2 + +/* + * The default value a temperature sensor will return when it is present but + * has not been read this boot. This is a reasonable number to avoid + * triggering alarms on the host. + */ +#define EC_TEMP_SENSOR_DEFAULT (296 - EC_TEMP_SENSOR_OFFSET) + +#define EC_FAN_SPEED_ENTRIES 4 /* Number of fans at EC_MEMMAP_FAN */ +#define EC_FAN_SPEED_NOT_PRESENT 0xffff /* Entry not present */ +#define EC_FAN_SPEED_STALLED 0xfffe /* Fan stalled */ + +/* Battery bit flags at EC_MEMMAP_BATT_FLAG. */ +#define EC_BATT_FLAG_AC_PRESENT 0x01 +#define EC_BATT_FLAG_BATT_PRESENT 0x02 +#define EC_BATT_FLAG_DISCHARGING 0x04 +#define EC_BATT_FLAG_CHARGING 0x08 +#define EC_BATT_FLAG_LEVEL_CRITICAL 0x10 + +/* Switch flags at EC_MEMMAP_SWITCHES */ +#define EC_SWITCH_LID_OPEN 0x01 +#define EC_SWITCH_POWER_BUTTON_PRESSED 0x02 +#define EC_SWITCH_WRITE_PROTECT_DISABLED 0x04 +/* Was recovery requested via keyboard; now unused. */ +#define EC_SWITCH_IGNORE1 0x08 +/* Recovery requested via dedicated signal (from servo board) */ +#define EC_SWITCH_DEDICATED_RECOVERY 0x10 +/* Was fake developer mode switch; now unused. Remove in next refactor. */ +#define EC_SWITCH_IGNORE0 0x20 + +/* Host command interface flags */ +/* Host command interface supports LPC args (LPC interface only) */ +#define EC_HOST_CMD_FLAG_LPC_ARGS_SUPPORTED 0x01 +/* Host command interface supports version 3 protocol */ +#define EC_HOST_CMD_FLAG_VERSION_3 0x02 + +/* Wireless switch flags */ +#define EC_WIRELESS_SWITCH_ALL ~0x00 /* All flags */ +#define EC_WIRELESS_SWITCH_WLAN 0x01 /* WLAN radio */ +#define EC_WIRELESS_SWITCH_BLUETOOTH 0x02 /* Bluetooth radio */ +#define EC_WIRELESS_SWITCH_WWAN 0x04 /* WWAN power */ +#define EC_WIRELESS_SWITCH_WLAN_POWER 0x08 /* WLAN power */ + +/* + * This header file is used in coreboot both in C and ACPI code. The ACPI code + * is pre-processed to handle constants but the ASL compiler is unable to + * handle actual C code so keep it separate. + */ +#ifndef __ACPI__ + +/* + * Define __packed if someone hasn't beat us to it. Linux kernel style + * checking prefers __packed over __attribute__((packed)). + */ +#ifndef __packed +#define __packed __attribute__((packed)) +#endif + +/* LPC command status byte masks */ +/* EC has written a byte in the data register and host hasn't read it yet */ +#define EC_LPC_STATUS_TO_HOST 0x01 +/* Host has written a command/data byte and the EC hasn't read it yet */ +#define EC_LPC_STATUS_FROM_HOST 0x02 +/* EC is processing a command */ +#define EC_LPC_STATUS_PROCESSING 0x04 +/* Last write to EC was a command, not data */ +#define EC_LPC_STATUS_LAST_CMD 0x08 +/* EC is in burst mode. Unsupported by Chrome EC, so this bit is never set */ +#define EC_LPC_STATUS_BURST_MODE 0x10 +/* SCI event is pending (requesting SCI query) */ +#define EC_LPC_STATUS_SCI_PENDING 0x20 +/* SMI event is pending (requesting SMI query) */ +#define EC_LPC_STATUS_SMI_PENDING 0x40 +/* (reserved) */ +#define EC_LPC_STATUS_RESERVED 0x80 + +/* + * EC is busy. This covers both the EC processing a command, and the host has + * written a new command but the EC hasn't picked it up yet. + */ +#define EC_LPC_STATUS_BUSY_MASK \ + (EC_LPC_STATUS_FROM_HOST | EC_LPC_STATUS_PROCESSING) + +/* Host command response codes */ +enum ec_status { + EC_RES_SUCCESS = 0, + EC_RES_INVALID_COMMAND = 1, + EC_RES_ERROR = 2, + EC_RES_INVALID_PARAM = 3, + EC_RES_ACCESS_DENIED = 4, + EC_RES_INVALID_RESPONSE = 5, + EC_RES_INVALID_VERSION = 6, + EC_RES_INVALID_CHECKSUM = 7, + EC_RES_IN_PROGRESS = 8, /* Accepted, command in progress */ + EC_RES_UNAVAILABLE = 9, /* No response available */ + EC_RES_TIMEOUT = 10, /* We got a timeout */ + EC_RES_OVERFLOW = 11, /* Table / data overflow */ + EC_RES_INVALID_HEADER = 12, /* Header contains invalid data */ + EC_RES_REQUEST_TRUNCATED = 13, /* Didn't get the entire request */ + EC_RES_RESPONSE_TOO_BIG = 14 /* Response was too big to handle */ +}; + +/* + * Host event codes. Note these are 1-based, not 0-based, because ACPI query + * EC command uses code 0 to mean "no event pending". We explicitly specify + * each value in the enum listing so they won't change if we delete/insert an + * item or rearrange the list (it needs to be stable across platforms, not + * just within a single compiled instance). + */ +enum host_event_code { + EC_HOST_EVENT_LID_CLOSED = 1, + EC_HOST_EVENT_LID_OPEN = 2, + EC_HOST_EVENT_POWER_BUTTON = 3, + EC_HOST_EVENT_AC_CONNECTED = 4, + EC_HOST_EVENT_AC_DISCONNECTED = 5, + EC_HOST_EVENT_BATTERY_LOW = 6, + EC_HOST_EVENT_BATTERY_CRITICAL = 7, + EC_HOST_EVENT_BATTERY = 8, + EC_HOST_EVENT_THERMAL_THRESHOLD = 9, + EC_HOST_EVENT_THERMAL_OVERLOAD = 10, + EC_HOST_EVENT_THERMAL = 11, + EC_HOST_EVENT_USB_CHARGER = 12, + EC_HOST_EVENT_KEY_PRESSED = 13, + /* + * EC has finished initializing the host interface. The host can check + * for this event following sending a EC_CMD_REBOOT_EC command to + * determine when the EC is ready to accept subsequent commands. + */ + EC_HOST_EVENT_INTERFACE_READY = 14, + /* Keyboard recovery combo has been pressed */ + EC_HOST_EVENT_KEYBOARD_RECOVERY = 15, + + /* Shutdown due to thermal overload */ + EC_HOST_EVENT_THERMAL_SHUTDOWN = 16, + /* Shutdown due to battery level too low */ + EC_HOST_EVENT_BATTERY_SHUTDOWN = 17, + + /* Suggest that the AP throttle itself */ + EC_HOST_EVENT_THROTTLE_START = 18, + /* Suggest that the AP resume normal speed */ + EC_HOST_EVENT_THROTTLE_STOP = 19, + + /* Hang detect logic detected a hang and host event timeout expired */ + EC_HOST_EVENT_HANG_DETECT = 20, + /* Hang detect logic detected a hang and warm rebooted the AP */ + EC_HOST_EVENT_HANG_REBOOT = 21, + + /* + * The high bit of the event mask is not used as a host event code. If + * it reads back as set, then the entire event mask should be + * considered invalid by the host. This can happen when reading the + * raw event status via EC_MEMMAP_HOST_EVENTS but the LPC interface is + * not initialized on the EC, or improperly configured on the host. + */ + EC_HOST_EVENT_INVALID = 32 +}; +/* Host event mask */ +#define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1)) + +/* Arguments at EC_LPC_ADDR_HOST_ARGS */ +struct ec_lpc_host_args { + uint8_t flags; + uint8_t command_version; + uint8_t data_size; + /* + * Checksum; sum of command + flags + command_version + data_size + + * all params/response data bytes. + */ + uint8_t checksum; +} __packed; + +/* Flags for ec_lpc_host_args.flags */ +/* + * Args are from host. Data area at EC_LPC_ADDR_HOST_PARAM contains command + * params. + * + * If EC gets a command and this flag is not set, this is an old-style command. + * Command version is 0 and params from host are at EC_LPC_ADDR_OLD_PARAM with + * unknown length. EC must respond with an old-style response (that is, + * withouth setting EC_HOST_ARGS_FLAG_TO_HOST). + */ +#define EC_HOST_ARGS_FLAG_FROM_HOST 0x01 +/* + * Args are from EC. Data area at EC_LPC_ADDR_HOST_PARAM contains response. + * + * If EC responds to a command and this flag is not set, this is an old-style + * response. Command version is 0 and response data from EC is at + * EC_LPC_ADDR_OLD_PARAM with unknown length. + */ +#define EC_HOST_ARGS_FLAG_TO_HOST 0x02 + +/*****************************************************************************/ +/* + * Byte codes returned by EC over SPI interface. + * + * These can be used by the AP to debug the EC interface, and to determine + * when the EC is not in a state where it will ever get around to responding + * to the AP. + * + * Example of sequence of bytes read from EC for a current good transfer: + * 1. - - AP asserts chip select (CS#) + * 2. EC_SPI_OLD_READY - AP sends first byte(s) of request + * 3. - - EC starts handling CS# interrupt + * 4. EC_SPI_RECEIVING - AP sends remaining byte(s) of request + * 5. EC_SPI_PROCESSING - EC starts processing request; AP is clocking in + * bytes looking for EC_SPI_FRAME_START + * 6. - - EC finishes processing and sets up response + * 7. EC_SPI_FRAME_START - AP reads frame byte + * 8. (response packet) - AP reads response packet + * 9. EC_SPI_PAST_END - Any additional bytes read by AP + * 10 - - AP deasserts chip select + * 11 - - EC processes CS# interrupt and sets up DMA for + * next request + * + * If the AP is waiting for EC_SPI_FRAME_START and sees any value other than + * the following byte values: + * EC_SPI_OLD_READY + * EC_SPI_RX_READY + * EC_SPI_RECEIVING + * EC_SPI_PROCESSING + * + * Then the EC found an error in the request, or was not ready for the request + * and lost data. The AP should give up waiting for EC_SPI_FRAME_START, + * because the EC is unable to tell when the AP is done sending its request. + */ + +/* + * Framing byte which precedes a response packet from the EC. After sending a + * request, the AP will clock in bytes until it sees the framing byte, then + * clock in the response packet. + */ +#define EC_SPI_FRAME_START 0xec + +/* + * Padding bytes which are clocked out after the end of a response packet. + */ +#define EC_SPI_PAST_END 0xed + +/* + * EC is ready to receive, and has ignored the byte sent by the AP. EC expects + * that the AP will send a valid packet header (starting with + * EC_COMMAND_PROTOCOL_3) in the next 32 bytes. + */ +#define EC_SPI_RX_READY 0xf8 + +/* + * EC has started receiving the request from the AP, but hasn't started + * processing it yet. + */ +#define EC_SPI_RECEIVING 0xf9 + +/* EC has received the entire request from the AP and is processing it. */ +#define EC_SPI_PROCESSING 0xfa + +/* + * EC received bad data from the AP, such as a packet header with an invalid + * length. EC will ignore all data until chip select deasserts. + */ +#define EC_SPI_RX_BAD_DATA 0xfb + +/* + * EC received data from the AP before it was ready. That is, the AP asserted + * chip select and started clocking data before the EC was ready to receive it. + * EC will ignore all data until chip select deasserts. + */ +#define EC_SPI_NOT_READY 0xfc + +/* + * EC was ready to receive a request from the AP. EC has treated the byte sent + * by the AP as part of a request packet, or (for old-style ECs) is processing + * a fully received packet but is not ready to respond yet. + */ +#define EC_SPI_OLD_READY 0xfd + +/*****************************************************************************/ + +/* + * Protocol version 2 for I2C and SPI send a request this way: + * + * 0 EC_CMD_VERSION0 + (command version) + * 1 Command number + * 2 Length of params = N + * 3..N+2 Params, if any + * N+3 8-bit checksum of bytes 0..N+2 + * + * The corresponding response is: + * + * 0 Result code (EC_RES_*) + * 1 Length of params = M + * 2..M+1 Params, if any + * M+2 8-bit checksum of bytes 0..M+1 + */ +#define EC_PROTO2_REQUEST_HEADER_BYTES 3 +#define EC_PROTO2_REQUEST_TRAILER_BYTES 1 +#define EC_PROTO2_REQUEST_OVERHEAD (EC_PROTO2_REQUEST_HEADER_BYTES + \ + EC_PROTO2_REQUEST_TRAILER_BYTES) + +#define EC_PROTO2_RESPONSE_HEADER_BYTES 2 +#define EC_PROTO2_RESPONSE_TRAILER_BYTES 1 +#define EC_PROTO2_RESPONSE_OVERHEAD (EC_PROTO2_RESPONSE_HEADER_BYTES + \ + EC_PROTO2_RESPONSE_TRAILER_BYTES) + +/* Parameter length was limited by the LPC interface */ +#define EC_PROTO2_MAX_PARAM_SIZE 0xfc + +/* Maximum request and response packet sizes for protocol version 2 */ +#define EC_PROTO2_MAX_REQUEST_SIZE (EC_PROTO2_REQUEST_OVERHEAD + \ + EC_PROTO2_MAX_PARAM_SIZE) +#define EC_PROTO2_MAX_RESPONSE_SIZE (EC_PROTO2_RESPONSE_OVERHEAD + \ + EC_PROTO2_MAX_PARAM_SIZE) + +/*****************************************************************************/ + +/* + * Value written to legacy command port / prefix byte to indicate protocol + * 3+ structs are being used. Usage is bus-dependent. + */ +#define EC_COMMAND_PROTOCOL_3 0xda + +#define EC_HOST_REQUEST_VERSION 3 + +/* Version 3 request from host */ +struct ec_host_request { + /* Struct version (=3) + * + * EC will return EC_RES_INVALID_HEADER if it receives a header with a + * version it doesn't know how to parse. + */ + uint8_t struct_version; + + /* + * Checksum of request and data; sum of all bytes including checksum + * should total to 0. + */ + uint8_t checksum; + + /* Command code */ + uint16_t command; + + /* Command version */ + uint8_t command_version; + + /* Unused byte in current protocol version; set to 0 */ + uint8_t reserved; + + /* Length of data which follows this header */ + uint16_t data_len; +} __packed; + +#define EC_HOST_RESPONSE_VERSION 3 + +/* Version 3 response from EC */ +struct ec_host_response { + /* Struct version (=3) */ + uint8_t struct_version; + + /* + * Checksum of response and data; sum of all bytes including checksum + * should total to 0. + */ + uint8_t checksum; + + /* Result code (EC_RES_*) */ + uint16_t result; + + /* Length of data which follows this header */ + uint16_t data_len; + + /* Unused bytes in current protocol version; set to 0 */ + uint16_t reserved; +} __packed; + +/*****************************************************************************/ +/* + * Notes on commands: + * + * Each command is an 16-bit command value. Commands which take params or + * return response data specify structs for that data. If no struct is + * specified, the command does not input or output data, respectively. + * Parameter/response length is implicit in the structs. Some underlying + * communication protocols (I2C, SPI) may add length or checksum headers, but + * those are implementation-dependent and not defined here. + */ + +/*****************************************************************************/ +/* General / test commands */ + +/* + * Get protocol version, used to deal with non-backward compatible protocol + * changes. + */ +#define EC_CMD_PROTO_VERSION 0x00 + +struct ec_response_proto_version { + uint32_t version; +} __packed; + +/* + * Hello. This is a simple command to test the EC is responsive to + * commands. + */ +#define EC_CMD_HELLO 0x01 + +struct ec_params_hello { + uint32_t in_data; /* Pass anything here */ +} __packed; + +struct ec_response_hello { + uint32_t out_data; /* Output will be in_data + 0x01020304 */ +} __packed; + +/* Get version number */ +#define EC_CMD_GET_VERSION 0x02 + +enum ec_current_image { + EC_IMAGE_UNKNOWN = 0, + EC_IMAGE_RO, + EC_IMAGE_RW +}; + +struct ec_response_get_version { + /* Null-terminated version strings for RO, RW */ + char version_string_ro[32]; + char version_string_rw[32]; + char reserved[32]; /* Was previously RW-B string */ + uint32_t current_image; /* One of ec_current_image */ +} __packed; + +/* Read test */ +#define EC_CMD_READ_TEST 0x03 + +struct ec_params_read_test { + uint32_t offset; /* Starting value for read buffer */ + uint32_t size; /* Size to read in bytes */ +} __packed; + +struct ec_response_read_test { + uint32_t data[32]; +} __packed; + +/* + * Get build information + * + * Response is null-terminated string. + */ +#define EC_CMD_GET_BUILD_INFO 0x04 + +/* Get chip info */ +#define EC_CMD_GET_CHIP_INFO 0x05 + +struct ec_response_get_chip_info { + /* Null-terminated strings */ + char vendor[32]; + char name[32]; + char revision[32]; /* Mask version */ +} __packed; + +/* Get board HW version */ +#define EC_CMD_GET_BOARD_VERSION 0x06 + +struct ec_response_board_version { + uint16_t board_version; /* A monotonously incrementing number. */ +} __packed; + +/* + * Read memory-mapped data. + * + * This is an alternate interface to memory-mapped data for bus protocols + * which don't support direct-mapped memory - I2C, SPI, etc. + * + * Response is params.size bytes of data. + */ +#define EC_CMD_READ_MEMMAP 0x07 + +struct ec_params_read_memmap { + uint8_t offset; /* Offset in memmap (EC_MEMMAP_*) */ + uint8_t size; /* Size to read in bytes */ +} __packed; + +/* Read versions supported for a command */ +#define EC_CMD_GET_CMD_VERSIONS 0x08 + +struct ec_params_get_cmd_versions { + uint8_t cmd; /* Command to check */ +} __packed; + +struct ec_response_get_cmd_versions { + /* + * Mask of supported versions; use EC_VER_MASK() to compare with a + * desired version. + */ + uint32_t version_mask; +} __packed; + +/* + * Check EC communcations status (busy). This is needed on i2c/spi but not + * on lpc since it has its own out-of-band busy indicator. + * + * lpc must read the status from the command register. Attempting this on + * lpc will overwrite the args/parameter space and corrupt its data. + */ +#define EC_CMD_GET_COMMS_STATUS 0x09 + +/* Avoid using ec_status which is for return values */ +enum ec_comms_status { + EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */ +}; + +struct ec_response_get_comms_status { + uint32_t flags; /* Mask of enum ec_comms_status */ +} __packed; + +/* Fake a variety of responses, purely for testing purposes. */ +#define EC_CMD_TEST_PROTOCOL 0x0a + +/* Tell the EC what to send back to us. */ +struct ec_params_test_protocol { + uint32_t ec_result; + uint32_t ret_len; + uint8_t buf[32]; +} __packed; + +/* Here it comes... */ +struct ec_response_test_protocol { + uint8_t buf[32]; +} __packed; + +/* Get prococol information */ +#define EC_CMD_GET_PROTOCOL_INFO 0x0b + +/* Flags for ec_response_get_protocol_info.flags */ +/* EC_RES_IN_PROGRESS may be returned if a command is slow */ +#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0) + +struct ec_response_get_protocol_info { + /* Fields which exist if at least protocol version 3 supported */ + + /* Bitmask of protocol versions supported (1 << n means version n)*/ + uint32_t protocol_versions; + + /* Maximum request packet size, in bytes */ + uint16_t max_request_packet_size; + + /* Maximum response packet size, in bytes */ + uint16_t max_response_packet_size; + + /* Flags; see EC_PROTOCOL_INFO_* */ + uint32_t flags; +} __packed; + + +/*****************************************************************************/ +/* Get/Set miscellaneous values */ + +/* The upper byte of .flags tells what to do (nothing means "get") */ +#define EC_GSV_SET 0x80000000 + +/* The lower three bytes of .flags identifies the parameter, if that has + meaning for an individual command. */ +#define EC_GSV_PARAM_MASK 0x00ffffff + +struct ec_params_get_set_value { + uint32_t flags; + uint32_t value; +} __packed; + +struct ec_response_get_set_value { + uint32_t flags; + uint32_t value; +} __packed; + +/* More than one command can use these structs to get/set paramters. */ +#define EC_CMD_GSV_PAUSE_IN_S5 0x0c + + +/*****************************************************************************/ +/* Flash commands */ + +/* Get flash info */ +#define EC_CMD_FLASH_INFO 0x10 + +/* Version 0 returns these fields */ +struct ec_response_flash_info { + /* Usable flash size, in bytes */ + uint32_t flash_size; + /* + * Write block size. Write offset and size must be a multiple + * of this. + */ + uint32_t write_block_size; + /* + * Erase block size. Erase offset and size must be a multiple + * of this. + */ + uint32_t erase_block_size; + /* + * Protection block size. Protection offset and size must be a + * multiple of this. + */ + uint32_t protect_block_size; +} __packed; + +/* Flags for version 1+ flash info command */ +/* EC flash erases bits to 0 instead of 1 */ +#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0) + +/* + * Version 1 returns the same initial fields as version 0, with additional + * fields following. + * + * gcc anonymous structs don't seem to get along with the __packed directive; + * if they did we'd define the version 0 struct as a sub-struct of this one. + */ +struct ec_response_flash_info_1 { + /* Version 0 fields; see above for description */ + uint32_t flash_size; + uint32_t write_block_size; + uint32_t erase_block_size; + uint32_t protect_block_size; + + /* Version 1 adds these fields: */ + /* + * Ideal write size in bytes. Writes will be fastest if size is + * exactly this and offset is a multiple of this. For example, an EC + * may have a write buffer which can do half-page operations if data is + * aligned, and a slower word-at-a-time write mode. + */ + uint32_t write_ideal_size; + + /* Flags; see EC_FLASH_INFO_* */ + uint32_t flags; +} __packed; + +/* + * Read flash + * + * Response is params.size bytes of data. + */ +#define EC_CMD_FLASH_READ 0x11 + +struct ec_params_flash_read { + uint32_t offset; /* Byte offset to read */ + uint32_t size; /* Size to read in bytes */ +} __packed; + +/* Write flash */ +#define EC_CMD_FLASH_WRITE 0x12 +#define EC_VER_FLASH_WRITE 1 + +/* Version 0 of the flash command supported only 64 bytes of data */ +#define EC_FLASH_WRITE_VER0_SIZE 64 + +struct ec_params_flash_write { + uint32_t offset; /* Byte offset to write */ + uint32_t size; /* Size to write in bytes */ + /* Followed by data to write */ +} __packed; + +/* Erase flash */ +#define EC_CMD_FLASH_ERASE 0x13 + +struct ec_params_flash_erase { + uint32_t offset; /* Byte offset to erase */ + uint32_t size; /* Size to erase in bytes */ +} __packed; + +/* + * Get/set flash protection. + * + * If mask!=0, sets/clear the requested bits of flags. Depending on the + * firmware write protect GPIO, not all flags will take effect immediately; + * some flags require a subsequent hard reset to take effect. Check the + * returned flags bits to see what actually happened. + * + * If mask=0, simply returns the current flags state. + */ +#define EC_CMD_FLASH_PROTECT 0x15 +#define EC_VER_FLASH_PROTECT 1 /* Command version 1 */ + +/* Flags for flash protection */ +/* RO flash code protected when the EC boots */ +#define EC_FLASH_PROTECT_RO_AT_BOOT (1 << 0) +/* + * RO flash code protected now. If this bit is set, at-boot status cannot + * be changed. + */ +#define EC_FLASH_PROTECT_RO_NOW (1 << 1) +/* Entire flash code protected now, until reboot. */ +#define EC_FLASH_PROTECT_ALL_NOW (1 << 2) +/* Flash write protect GPIO is asserted now */ +#define EC_FLASH_PROTECT_GPIO_ASSERTED (1 << 3) +/* Error - at least one bank of flash is stuck locked, and cannot be unlocked */ +#define EC_FLASH_PROTECT_ERROR_STUCK (1 << 4) +/* + * Error - flash protection is in inconsistent state. At least one bank of + * flash which should be protected is not protected. Usually fixed by + * re-requesting the desired flags, or by a hard reset if that fails. + */ +#define EC_FLASH_PROTECT_ERROR_INCONSISTENT (1 << 5) +/* Entile flash code protected when the EC boots */ +#define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6) + +struct ec_params_flash_protect { + uint32_t mask; /* Bits in flags to apply */ + uint32_t flags; /* New flags to apply */ +} __packed; + +struct ec_response_flash_protect { + /* Current value of flash protect flags */ + uint32_t flags; + /* + * Flags which are valid on this platform. This allows the caller + * to distinguish between flags which aren't set vs. flags which can't + * be set on this platform. + */ + uint32_t valid_flags; + /* Flags which can be changed given the current protection state */ + uint32_t writable_flags; +} __packed; + +/* + * Note: commands 0x14 - 0x19 version 0 were old commands to get/set flash + * write protect. These commands may be reused with version > 0. + */ + +/* Get the region offset/size */ +#define EC_CMD_FLASH_REGION_INFO 0x16 +#define EC_VER_FLASH_REGION_INFO 1 + +enum ec_flash_region { + /* Region which holds read-only EC image */ + EC_FLASH_REGION_RO = 0, + /* Region which holds rewritable EC image */ + EC_FLASH_REGION_RW, + /* + * Region which should be write-protected in the factory (a superset of + * EC_FLASH_REGION_RO) + */ + EC_FLASH_REGION_WP_RO, + /* Number of regions */ + EC_FLASH_REGION_COUNT, +}; + +struct ec_params_flash_region_info { + uint32_t region; /* enum ec_flash_region */ +} __packed; + +struct ec_response_flash_region_info { + uint32_t offset; + uint32_t size; +} __packed; + +/* Read/write VbNvContext */ +#define EC_CMD_VBNV_CONTEXT 0x17 +#define EC_VER_VBNV_CONTEXT 1 +#define EC_VBNV_BLOCK_SIZE 16 + +enum ec_vbnvcontext_op { + EC_VBNV_CONTEXT_OP_READ, + EC_VBNV_CONTEXT_OP_WRITE, +}; + +struct ec_params_vbnvcontext { + uint32_t op; + uint8_t block[EC_VBNV_BLOCK_SIZE]; +} __packed; + +struct ec_response_vbnvcontext { + uint8_t block[EC_VBNV_BLOCK_SIZE]; +} __packed; + +/*****************************************************************************/ +/* PWM commands */ + +/* Get fan target RPM */ +#define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x20 + +struct ec_response_pwm_get_fan_rpm { + uint32_t rpm; +} __packed; + +/* Set target fan RPM */ +#define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x21 + +struct ec_params_pwm_set_fan_target_rpm { + uint32_t rpm; +} __packed; + +/* Get keyboard backlight */ +#define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x22 + +struct ec_response_pwm_get_keyboard_backlight { + uint8_t percent; + uint8_t enabled; +} __packed; + +/* Set keyboard backlight */ +#define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x23 + +struct ec_params_pwm_set_keyboard_backlight { + uint8_t percent; +} __packed; + +/* Set target fan PWM duty cycle */ +#define EC_CMD_PWM_SET_FAN_DUTY 0x24 + +struct ec_params_pwm_set_fan_duty { + uint32_t percent; +} __packed; + +#define EC_CMD_PWM_SET_DUTY 0x25 +/* 16 bit duty cycle, 0xffff = 100% */ +#define EC_PWM_MAX_DUTY 0xffff + +enum ec_pwm_type { + /* All types, indexed by board-specific enum pwm_channel */ + EC_PWM_TYPE_GENERIC = 0, + /* Keyboard backlight */ + EC_PWM_TYPE_KB_LIGHT, + /* Display backlight */ + EC_PWM_TYPE_DISPLAY_LIGHT, + EC_PWM_TYPE_COUNT, +}; + +struct ec_params_pwm_set_duty { + uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ + uint8_t pwm_type; /* ec_pwm_type */ + uint8_t index; /* Type-specific index, or 0 if unique */ +} __packed; + +#define EC_CMD_PWM_GET_DUTY 0x26 + +struct ec_params_pwm_get_duty { + uint8_t pwm_type; /* ec_pwm_type */ + uint8_t index; /* Type-specific index, or 0 if unique */ +} __packed; + +struct ec_response_pwm_get_duty { + uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ +} __packed; + +/*****************************************************************************/ +/* + * Lightbar commands. This looks worse than it is. Since we only use one HOST + * command to say "talk to the lightbar", we put the "and tell it to do X" part + * into a subcommand. We'll make separate structs for subcommands with + * different input args, so that we know how much to expect. + */ +#define EC_CMD_LIGHTBAR_CMD 0x28 + +struct rgb_s { + uint8_t r, g, b; +}; + +#define LB_BATTERY_LEVELS 4 +/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a + * host command, but the alignment is the same regardless. Keep it that way. + */ +struct lightbar_params_v0 { + /* Timing */ + int32_t google_ramp_up; + int32_t google_ramp_down; + int32_t s3s0_ramp_up; + int32_t s0_tick_delay[2]; /* AC=0/1 */ + int32_t s0a_tick_delay[2]; /* AC=0/1 */ + int32_t s0s3_ramp_down; + int32_t s3_sleep_for; + int32_t s3_ramp_up; + int32_t s3_ramp_down; + + /* Oscillation */ + uint8_t new_s0; + uint8_t osc_min[2]; /* AC=0/1 */ + uint8_t osc_max[2]; /* AC=0/1 */ + uint8_t w_ofs[2]; /* AC=0/1 */ + + /* Brightness limits based on the backlight and AC. */ + uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */ + uint8_t bright_bl_on_min[2]; /* AC=0/1 */ + uint8_t bright_bl_on_max[2]; /* AC=0/1 */ + + /* Battery level thresholds */ + uint8_t battery_threshold[LB_BATTERY_LEVELS - 1]; + + /* Map [AC][battery_level] to color index */ + uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ + uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ + + /* Color palette */ + struct rgb_s color[8]; /* 0-3 are Google colors */ +} __packed; + +struct lightbar_params_v1 { + /* Timing */ + int32_t google_ramp_up; + int32_t google_ramp_down; + int32_t s3s0_ramp_up; + int32_t s0_tick_delay[2]; /* AC=0/1 */ + int32_t s0a_tick_delay[2]; /* AC=0/1 */ + int32_t s0s3_ramp_down; + int32_t s3_sleep_for; + int32_t s3_ramp_up; + int32_t s3_ramp_down; + int32_t tap_tick_delay; + int32_t tap_display_time; + + /* Tap-for-battery params */ + uint8_t tap_pct_red; + uint8_t tap_pct_green; + uint8_t tap_seg_min_on; + uint8_t tap_seg_max_on; + uint8_t tap_seg_osc; + uint8_t tap_idx[3]; + + /* Oscillation */ + uint8_t osc_min[2]; /* AC=0/1 */ + uint8_t osc_max[2]; /* AC=0/1 */ + uint8_t w_ofs[2]; /* AC=0/1 */ + + /* Brightness limits based on the backlight and AC. */ + uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */ + uint8_t bright_bl_on_min[2]; /* AC=0/1 */ + uint8_t bright_bl_on_max[2]; /* AC=0/1 */ + + /* Battery level thresholds */ + uint8_t battery_threshold[LB_BATTERY_LEVELS - 1]; + + /* Map [AC][battery_level] to color index */ + uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ + uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ + + /* Color palette */ + struct rgb_s color[8]; /* 0-3 are Google colors */ +} __packed; + +struct ec_params_lightbar { + uint8_t cmd; /* Command (see enum lightbar_command) */ + union { + struct { + /* no args */ + } dump, off, on, init, get_seq, get_params_v0, get_params_v1, + version, get_brightness, get_demo; + + struct { + uint8_t num; + } set_brightness, seq, demo; + + struct { + uint8_t ctrl, reg, value; + } reg; + + struct { + uint8_t led, red, green, blue; + } set_rgb; + + struct { + uint8_t led; + } get_rgb; + + struct lightbar_params_v0 set_params_v0; + struct lightbar_params_v1 set_params_v1; + }; +} __packed; + +struct ec_response_lightbar { + union { + struct { + struct { + uint8_t reg; + uint8_t ic0; + uint8_t ic1; + } vals[23]; + } dump; + + struct { + uint8_t num; + } get_seq, get_brightness, get_demo; + + struct lightbar_params_v0 get_params_v0; + struct lightbar_params_v1 get_params_v1; + + struct { + uint32_t num; + uint32_t flags; + } version; + + struct { + uint8_t red, green, blue; + } get_rgb; + + struct { + /* no return params */ + } off, on, init, set_brightness, seq, reg, set_rgb, + demo, set_params_v0, set_params_v1; + }; +} __packed; + +/* Lightbar commands */ +enum lightbar_command { + LIGHTBAR_CMD_DUMP = 0, + LIGHTBAR_CMD_OFF = 1, + LIGHTBAR_CMD_ON = 2, + LIGHTBAR_CMD_INIT = 3, + LIGHTBAR_CMD_SET_BRIGHTNESS = 4, + LIGHTBAR_CMD_SEQ = 5, + LIGHTBAR_CMD_REG = 6, + LIGHTBAR_CMD_SET_RGB = 7, + LIGHTBAR_CMD_GET_SEQ = 8, + LIGHTBAR_CMD_DEMO = 9, + LIGHTBAR_CMD_GET_PARAMS_V0 = 10, + LIGHTBAR_CMD_SET_PARAMS_V0 = 11, + LIGHTBAR_CMD_VERSION = 12, + LIGHTBAR_CMD_GET_BRIGHTNESS = 13, + LIGHTBAR_CMD_GET_RGB = 14, + LIGHTBAR_CMD_GET_DEMO = 15, + LIGHTBAR_CMD_GET_PARAMS_V1 = 16, + LIGHTBAR_CMD_SET_PARAMS_V1 = 17, + LIGHTBAR_NUM_CMDS +}; + +/*****************************************************************************/ +/* LED control commands */ + +#define EC_CMD_LED_CONTROL 0x29 + +enum ec_led_id { + /* LED to indicate battery state of charge */ + EC_LED_ID_BATTERY_LED = 0, + /* + * LED to indicate system power state (on or in suspend). + * May be on power button or on C-panel. + */ + EC_LED_ID_POWER_LED, + /* LED on power adapter or its plug */ + EC_LED_ID_ADAPTER_LED, + + EC_LED_ID_COUNT +}; + +/* LED control flags */ +#define EC_LED_FLAGS_QUERY (1 << 0) /* Query LED capability only */ +#define EC_LED_FLAGS_AUTO (1 << 1) /* Switch LED back to automatic control */ + +enum ec_led_colors { + EC_LED_COLOR_RED = 0, + EC_LED_COLOR_GREEN, + EC_LED_COLOR_BLUE, + EC_LED_COLOR_YELLOW, + EC_LED_COLOR_WHITE, + + EC_LED_COLOR_COUNT +}; + +struct ec_params_led_control { + uint8_t led_id; /* Which LED to control */ + uint8_t flags; /* Control flags */ + + uint8_t brightness[EC_LED_COLOR_COUNT]; +} __packed; + +struct ec_response_led_control { + /* + * Available brightness value range. + * + * Range 0 means color channel not present. + * Range 1 means on/off control. + * Other values means the LED is control by PWM. + */ + uint8_t brightness_range[EC_LED_COLOR_COUNT]; +} __packed; + +/*****************************************************************************/ +/* Verified boot commands */ + +/* + * Note: command code 0x29 version 0 was VBOOT_CMD in Link EVT; it may be + * reused for other purposes with version > 0. + */ + +/* Verified boot hash command */ +#define EC_CMD_VBOOT_HASH 0x2A + +struct ec_params_vboot_hash { + uint8_t cmd; /* enum ec_vboot_hash_cmd */ + uint8_t hash_type; /* enum ec_vboot_hash_type */ + uint8_t nonce_size; /* Nonce size; may be 0 */ + uint8_t reserved0; /* Reserved; set 0 */ + uint32_t offset; /* Offset in flash to hash */ + uint32_t size; /* Number of bytes to hash */ + uint8_t nonce_data[64]; /* Nonce data; ignored if nonce_size=0 */ +} __packed; + +struct ec_response_vboot_hash { + uint8_t status; /* enum ec_vboot_hash_status */ + uint8_t hash_type; /* enum ec_vboot_hash_type */ + uint8_t digest_size; /* Size of hash digest in bytes */ + uint8_t reserved0; /* Ignore; will be 0 */ + uint32_t offset; /* Offset in flash which was hashed */ + uint32_t size; /* Number of bytes hashed */ + uint8_t hash_digest[64]; /* Hash digest data */ +} __packed; + +enum ec_vboot_hash_cmd { + EC_VBOOT_HASH_GET = 0, /* Get current hash status */ + EC_VBOOT_HASH_ABORT = 1, /* Abort calculating current hash */ + EC_VBOOT_HASH_START = 2, /* Start computing a new hash */ + EC_VBOOT_HASH_RECALC = 3, /* Synchronously compute a new hash */ +}; + +enum ec_vboot_hash_type { + EC_VBOOT_HASH_TYPE_SHA256 = 0, /* SHA-256 */ +}; + +enum ec_vboot_hash_status { + EC_VBOOT_HASH_STATUS_NONE = 0, /* No hash (not started, or aborted) */ + EC_VBOOT_HASH_STATUS_DONE = 1, /* Finished computing a hash */ + EC_VBOOT_HASH_STATUS_BUSY = 2, /* Busy computing a hash */ +}; + +/* + * Special values for offset for EC_VBOOT_HASH_START and EC_VBOOT_HASH_RECALC. + * If one of these is specified, the EC will automatically update offset and + * size to the correct values for the specified image (RO or RW). + */ +#define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe +#define EC_VBOOT_HASH_OFFSET_RW 0xfffffffd + +/*****************************************************************************/ +/* + * Motion sense commands. We'll make separate structs for sub-commands with + * different input args, so that we know how much to expect. + */ +#define EC_CMD_MOTION_SENSE_CMD 0x2B + +/* Motion sense commands */ +enum motionsense_command { + /* + * Dump command returns all motion sensor data including motion sense + * module flags and individual sensor flags. + */ + MOTIONSENSE_CMD_DUMP = 0, + + /* + * Info command returns data describing the details of a given sensor, + * including enum motionsensor_type, enum motionsensor_location, and + * enum motionsensor_chip. + */ + MOTIONSENSE_CMD_INFO = 1, + + /* + * EC Rate command is a setter/getter command for the EC sampling rate + * of all motion sensors in milliseconds. + */ + MOTIONSENSE_CMD_EC_RATE = 2, + + /* + * Sensor ODR command is a setter/getter command for the output data + * rate of a specific motion sensor in millihertz. + */ + MOTIONSENSE_CMD_SENSOR_ODR = 3, + + /* + * Sensor range command is a setter/getter command for the range of + * a specified motion sensor in +/-G's or +/- deg/s. + */ + MOTIONSENSE_CMD_SENSOR_RANGE = 4, + + /* + * Setter/getter command for the keyboard wake angle. When the lid + * angle is greater than this value, keyboard wake is disabled in S3, + * and when the lid angle goes less than this value, keyboard wake is + * enabled. Note, the lid angle measurement is an approximate, + * un-calibrated value, hence the wake angle isn't exact. + */ + MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5, + + /* Number of motionsense sub-commands. */ + MOTIONSENSE_NUM_CMDS +}; + +enum motionsensor_id { + EC_MOTION_SENSOR_ACCEL_BASE = 0, + EC_MOTION_SENSOR_ACCEL_LID = 1, + EC_MOTION_SENSOR_GYRO = 2, + + /* + * Note, if more sensors are added and this count changes, the padding + * in ec_response_motion_sense dump command must be modified. + */ + EC_MOTION_SENSOR_COUNT = 3 +}; + +/* List of motion sensor types. */ +enum motionsensor_type { + MOTIONSENSE_TYPE_ACCEL = 0, + MOTIONSENSE_TYPE_GYRO = 1, +}; + +/* List of motion sensor locations. */ +enum motionsensor_location { + MOTIONSENSE_LOC_BASE = 0, + MOTIONSENSE_LOC_LID = 1, +}; + +/* List of motion sensor chips. */ +enum motionsensor_chip { + MOTIONSENSE_CHIP_KXCJ9 = 0, +}; + +/* Module flag masks used for the dump sub-command. */ +#define MOTIONSENSE_MODULE_FLAG_ACTIVE (1<<0) + +/* Sensor flag masks used for the dump sub-command. */ +#define MOTIONSENSE_SENSOR_FLAG_PRESENT (1<<0) + +/* + * Send this value for the data element to only perform a read. If you + * send any other value, the EC will interpret it as data to set and will + * return the actual value set. + */ +#define EC_MOTION_SENSE_NO_VALUE -1 + +struct ec_params_motion_sense { + uint8_t cmd; + union { + /* Used for MOTIONSENSE_CMD_DUMP. */ + struct { + /* no args */ + } dump; + + /* + * Used for MOTIONSENSE_CMD_EC_RATE and + * MOTIONSENSE_CMD_KB_WAKE_ANGLE. + */ + struct { + /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ + int16_t data; + } ec_rate, kb_wake_angle; + + /* Used for MOTIONSENSE_CMD_INFO. */ + struct { + /* Should be element of enum motionsensor_id. */ + uint8_t sensor_num; + } info; + + /* + * Used for MOTIONSENSE_CMD_SENSOR_ODR and + * MOTIONSENSE_CMD_SENSOR_RANGE. + */ + struct { + /* Should be element of enum motionsensor_id. */ + uint8_t sensor_num; + + /* Rounding flag, true for round-up, false for down. */ + uint8_t roundup; + + uint16_t reserved; + + /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ + int32_t data; + } sensor_odr, sensor_range; + }; +} __packed; + +struct ec_response_motion_sense { + union { + /* Used for MOTIONSENSE_CMD_DUMP. */ + struct { + /* Flags representing the motion sensor module. */ + uint8_t module_flags; + + /* Flags for each sensor in enum motionsensor_id. */ + uint8_t sensor_flags[EC_MOTION_SENSOR_COUNT]; + + /* Array of all sensor data. Each sensor is 3-axis. */ + int16_t data[3*EC_MOTION_SENSOR_COUNT]; + } dump; + + /* Used for MOTIONSENSE_CMD_INFO. */ + struct { + /* Should be element of enum motionsensor_type. */ + uint8_t type; + + /* Should be element of enum motionsensor_location. */ + uint8_t location; + + /* Should be element of enum motionsensor_chip. */ + uint8_t chip; + } info; + + /* + * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR, + * MOTIONSENSE_CMD_SENSOR_RANGE, and + * MOTIONSENSE_CMD_KB_WAKE_ANGLE. + */ + struct { + /* Current value of the parameter queried. */ + int32_t ret; + } ec_rate, sensor_odr, sensor_range, kb_wake_angle; + }; +} __packed; + +/*****************************************************************************/ +/* USB charging control commands */ + +/* Set USB port charging mode */ +#define EC_CMD_USB_CHARGE_SET_MODE 0x30 + +struct ec_params_usb_charge_set_mode { + uint8_t usb_port_id; + uint8_t mode; +} __packed; + +/*****************************************************************************/ +/* Persistent storage for host */ + +/* Maximum bytes that can be read/written in a single command */ +#define EC_PSTORE_SIZE_MAX 64 + +/* Get persistent storage info */ +#define EC_CMD_PSTORE_INFO 0x40 + +struct ec_response_pstore_info { + /* Persistent storage size, in bytes */ + uint32_t pstore_size; + /* Access size; read/write offset and size must be a multiple of this */ + uint32_t access_size; +} __packed; + +/* + * Read persistent storage + * + * Response is params.size bytes of data. + */ +#define EC_CMD_PSTORE_READ 0x41 + +struct ec_params_pstore_read { + uint32_t offset; /* Byte offset to read */ + uint32_t size; /* Size to read in bytes */ +} __packed; + +/* Write persistent storage */ +#define EC_CMD_PSTORE_WRITE 0x42 + +struct ec_params_pstore_write { + uint32_t offset; /* Byte offset to write */ + uint32_t size; /* Size to write in bytes */ + uint8_t data[EC_PSTORE_SIZE_MAX]; +} __packed; + +/*****************************************************************************/ +/* Real-time clock */ + +/* RTC params and response structures */ +struct ec_params_rtc { + uint32_t time; +} __packed; + +struct ec_response_rtc { + uint32_t time; +} __packed; + +/* These use ec_response_rtc */ +#define EC_CMD_RTC_GET_VALUE 0x44 +#define EC_CMD_RTC_GET_ALARM 0x45 + +/* These all use ec_params_rtc */ +#define EC_CMD_RTC_SET_VALUE 0x46 +#define EC_CMD_RTC_SET_ALARM 0x47 + +/*****************************************************************************/ +/* Port80 log access */ + +/* Maximum entries that can be read/written in a single command */ +#define EC_PORT80_SIZE_MAX 32 + +/* Get last port80 code from previous boot */ +#define EC_CMD_PORT80_LAST_BOOT 0x48 +#define EC_CMD_PORT80_READ 0x48 + +enum ec_port80_subcmd { + EC_PORT80_GET_INFO = 0, + EC_PORT80_READ_BUFFER, +}; + +struct ec_params_port80_read { + uint16_t subcmd; + union { + struct { + uint32_t offset; + uint32_t num_entries; + } read_buffer; + }; +} __packed; + +struct ec_response_port80_read { + union { + struct { + uint32_t writes; + uint32_t history_size; + uint32_t last_boot; + } get_info; + struct { + uint16_t codes[EC_PORT80_SIZE_MAX]; + } data; + }; +} __packed; + +struct ec_response_port80_last_boot { + uint16_t code; +} __packed; + +/*****************************************************************************/ +/* Thermal engine commands. Note that there are two implementations. We'll + * reuse the command number, but the data and behavior is incompatible. + * Version 0 is what originally shipped on Link. + * Version 1 separates the CPU thermal limits from the fan control. + */ + +#define EC_CMD_THERMAL_SET_THRESHOLD 0x50 +#define EC_CMD_THERMAL_GET_THRESHOLD 0x51 + +/* The version 0 structs are opaque. You have to know what they are for + * the get/set commands to make any sense. + */ + +/* Version 0 - set */ +struct ec_params_thermal_set_threshold { + uint8_t sensor_type; + uint8_t threshold_id; + uint16_t value; +} __packed; + +/* Version 0 - get */ +struct ec_params_thermal_get_threshold { + uint8_t sensor_type; + uint8_t threshold_id; +} __packed; + +struct ec_response_thermal_get_threshold { + uint16_t value; +} __packed; + + +/* The version 1 structs are visible. */ +enum ec_temp_thresholds { + EC_TEMP_THRESH_WARN = 0, + EC_TEMP_THRESH_HIGH, + EC_TEMP_THRESH_HALT, + + EC_TEMP_THRESH_COUNT +}; + +/* Thermal configuration for one temperature sensor. Temps are in degrees K. + * Zero values will be silently ignored by the thermal task. + */ +struct ec_thermal_config { + uint32_t temp_host[EC_TEMP_THRESH_COUNT]; /* levels of hotness */ + uint32_t temp_fan_off; /* no active cooling needed */ + uint32_t temp_fan_max; /* max active cooling needed */ +} __packed; + +/* Version 1 - get config for one sensor. */ +struct ec_params_thermal_get_threshold_v1 { + uint32_t sensor_num; +} __packed; +/* This returns a struct ec_thermal_config */ + +/* Version 1 - set config for one sensor. + * Use read-modify-write for best results! */ +struct ec_params_thermal_set_threshold_v1 { + uint32_t sensor_num; + struct ec_thermal_config cfg; +} __packed; +/* This returns no data */ + +/****************************************************************************/ + +/* Toggle automatic fan control */ +#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x52 + +/* Get TMP006 calibration data */ +#define EC_CMD_TMP006_GET_CALIBRATION 0x53 + +struct ec_params_tmp006_get_calibration { + uint8_t index; +} __packed; + +struct ec_response_tmp006_get_calibration { + float s0; + float b0; + float b1; + float b2; +} __packed; + +/* Set TMP006 calibration data */ +#define EC_CMD_TMP006_SET_CALIBRATION 0x54 + +struct ec_params_tmp006_set_calibration { + uint8_t index; + uint8_t reserved[3]; /* Reserved; set 0 */ + float s0; + float b0; + float b1; + float b2; +} __packed; + +/* Read raw TMP006 data */ +#define EC_CMD_TMP006_GET_RAW 0x55 + +struct ec_params_tmp006_get_raw { + uint8_t index; +} __packed; + +struct ec_response_tmp006_get_raw { + int32_t t; /* In 1/100 K */ + int32_t v; /* In nV */ +}; + +/*****************************************************************************/ +/* MKBP - Matrix KeyBoard Protocol */ + +/* + * Read key state + * + * Returns raw data for keyboard cols; see ec_response_mkbp_info.cols for + * expected response size. + */ +#define EC_CMD_MKBP_STATE 0x60 + +/* Provide information about the matrix : number of rows and columns */ +#define EC_CMD_MKBP_INFO 0x61 + +struct ec_response_mkbp_info { + uint32_t rows; + uint32_t cols; + uint8_t switches; +} __packed; + +/* Simulate key press */ +#define EC_CMD_MKBP_SIMULATE_KEY 0x62 + +struct ec_params_mkbp_simulate_key { + uint8_t col; + uint8_t row; + uint8_t pressed; +} __packed; + +/* Configure keyboard scanning */ +#define EC_CMD_MKBP_SET_CONFIG 0x64 +#define EC_CMD_MKBP_GET_CONFIG 0x65 + +/* flags */ +enum mkbp_config_flags { + EC_MKBP_FLAGS_ENABLE = 1, /* Enable keyboard scanning */ +}; + +enum mkbp_config_valid { + EC_MKBP_VALID_SCAN_PERIOD = 1 << 0, + EC_MKBP_VALID_POLL_TIMEOUT = 1 << 1, + EC_MKBP_VALID_MIN_POST_SCAN_DELAY = 1 << 3, + EC_MKBP_VALID_OUTPUT_SETTLE = 1 << 4, + EC_MKBP_VALID_DEBOUNCE_DOWN = 1 << 5, + EC_MKBP_VALID_DEBOUNCE_UP = 1 << 6, + EC_MKBP_VALID_FIFO_MAX_DEPTH = 1 << 7, +}; + +/* Configuration for our key scanning algorithm */ +struct ec_mkbp_config { + uint32_t valid_mask; /* valid fields */ + uint8_t flags; /* some flags (enum mkbp_config_flags) */ + uint8_t valid_flags; /* which flags are valid */ + uint16_t scan_period_us; /* period between start of scans */ + /* revert to interrupt mode after no activity for this long */ + uint32_t poll_timeout_us; + /* + * minimum post-scan relax time. Once we finish a scan we check + * the time until we are due to start the next one. If this time is + * shorter this field, we use this instead. + */ + uint16_t min_post_scan_delay_us; + /* delay between setting up output and waiting for it to settle */ + uint16_t output_settle_us; + uint16_t debounce_down_us; /* time for debounce on key down */ + uint16_t debounce_up_us; /* time for debounce on key up */ + /* maximum depth to allow for fifo (0 = no keyscan output) */ + uint8_t fifo_max_depth; +} __packed; + +struct ec_params_mkbp_set_config { + struct ec_mkbp_config config; +} __packed; + +struct ec_response_mkbp_get_config { + struct ec_mkbp_config config; +} __packed; + +/* Run the key scan emulation */ +#define EC_CMD_KEYSCAN_SEQ_CTRL 0x66 + +enum ec_keyscan_seq_cmd { + EC_KEYSCAN_SEQ_STATUS = 0, /* Get status information */ + EC_KEYSCAN_SEQ_CLEAR = 1, /* Clear sequence */ + EC_KEYSCAN_SEQ_ADD = 2, /* Add item to sequence */ + EC_KEYSCAN_SEQ_START = 3, /* Start running sequence */ + EC_KEYSCAN_SEQ_COLLECT = 4, /* Collect sequence summary data */ +}; + +enum ec_collect_flags { + /* + * Indicates this scan was processed by the EC. Due to timing, some + * scans may be skipped. + */ + EC_KEYSCAN_SEQ_FLAG_DONE = 1 << 0, +}; + +struct ec_collect_item { + uint8_t flags; /* some flags (enum ec_collect_flags) */ +}; + +struct ec_params_keyscan_seq_ctrl { + uint8_t cmd; /* Command to send (enum ec_keyscan_seq_cmd) */ + union { + struct { + uint8_t active; /* still active */ + uint8_t num_items; /* number of items */ + /* Current item being presented */ + uint8_t cur_item; + } status; + struct { + /* + * Absolute time for this scan, measured from the + * start of the sequence. + */ + uint32_t time_us; + uint8_t scan[0]; /* keyscan data */ + } add; + struct { + uint8_t start_item; /* First item to return */ + uint8_t num_items; /* Number of items to return */ + } collect; + }; +} __packed; + +struct ec_result_keyscan_seq_ctrl { + union { + struct { + uint8_t num_items; /* Number of items */ + /* Data for each item */ + struct ec_collect_item item[0]; + } collect; + }; +} __packed; + +/* + * Command for retrieving the next pending MKBP event from the EC device + * + * The device replies with UNAVAILABLE if there aren't any pending events. + */ +#define EC_CMD_GET_NEXT_EVENT 0x67 + +enum ec_mkbp_event { + /* Keyboard matrix changed. The event data is the new matrix state. */ + EC_MKBP_EVENT_KEY_MATRIX = 0, + + /* New host event. The event data is 4 bytes of host event flags. */ + EC_MKBP_EVENT_HOST_EVENT = 1, + + /* New Sensor FIFO data. The event data is fifo_info structure. */ + EC_MKBP_EVENT_SENSOR_FIFO = 2, + + /* Number of MKBP events */ + EC_MKBP_EVENT_COUNT, +}; + +union ec_response_get_next_data { + uint8_t key_matrix[13]; + + /* Unaligned */ + uint32_t host_event; +} __packed; + +struct ec_response_get_next_event { + uint8_t event_type; + /* Followed by event data if any */ + union ec_response_get_next_data data; +} __packed; + +/*****************************************************************************/ +/* Temperature sensor commands */ + +/* Read temperature sensor info */ +#define EC_CMD_TEMP_SENSOR_GET_INFO 0x70 + +struct ec_params_temp_sensor_get_info { + uint8_t id; +} __packed; + +struct ec_response_temp_sensor_get_info { + char sensor_name[32]; + uint8_t sensor_type; +} __packed; + +/*****************************************************************************/ + +/* + * Note: host commands 0x80 - 0x87 are reserved to avoid conflict with ACPI + * commands accidentally sent to the wrong interface. See the ACPI section + * below. + */ + +/*****************************************************************************/ +/* Host event commands */ + +/* + * Host event mask params and response structures, shared by all of the host + * event commands below. + */ +struct ec_params_host_event_mask { + uint32_t mask; +} __packed; + +struct ec_response_host_event_mask { + uint32_t mask; +} __packed; + +/* These all use ec_response_host_event_mask */ +#define EC_CMD_HOST_EVENT_GET_B 0x87 +#define EC_CMD_HOST_EVENT_GET_SMI_MASK 0x88 +#define EC_CMD_HOST_EVENT_GET_SCI_MASK 0x89 +#define EC_CMD_HOST_EVENT_GET_WAKE_MASK 0x8d + +/* These all use ec_params_host_event_mask */ +#define EC_CMD_HOST_EVENT_SET_SMI_MASK 0x8a +#define EC_CMD_HOST_EVENT_SET_SCI_MASK 0x8b +#define EC_CMD_HOST_EVENT_CLEAR 0x8c +#define EC_CMD_HOST_EVENT_SET_WAKE_MASK 0x8e +#define EC_CMD_HOST_EVENT_CLEAR_B 0x8f + +/*****************************************************************************/ +/* Switch commands */ + +/* Enable/disable LCD backlight */ +#define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x90 + +struct ec_params_switch_enable_backlight { + uint8_t enabled; +} __packed; + +/* Enable/disable WLAN/Bluetooth */ +#define EC_CMD_SWITCH_ENABLE_WIRELESS 0x91 +#define EC_VER_SWITCH_ENABLE_WIRELESS 1 + +/* Version 0 params; no response */ +struct ec_params_switch_enable_wireless_v0 { + uint8_t enabled; +} __packed; + +/* Version 1 params */ +struct ec_params_switch_enable_wireless_v1 { + /* Flags to enable now */ + uint8_t now_flags; + + /* Which flags to copy from now_flags */ + uint8_t now_mask; + + /* + * Flags to leave enabled in S3, if they're on at the S0->S3 + * transition. (Other flags will be disabled by the S0->S3 + * transition.) + */ + uint8_t suspend_flags; + + /* Which flags to copy from suspend_flags */ + uint8_t suspend_mask; +} __packed; + +/* Version 1 response */ +struct ec_response_switch_enable_wireless_v1 { + /* Flags to enable now */ + uint8_t now_flags; + + /* Flags to leave enabled in S3 */ + uint8_t suspend_flags; +} __packed; + +/*****************************************************************************/ +/* GPIO commands. Only available on EC if write protect has been disabled. */ + +/* Set GPIO output value */ +#define EC_CMD_GPIO_SET 0x92 + +struct ec_params_gpio_set { + char name[32]; + uint8_t val; +} __packed; + +/* Get GPIO value */ +#define EC_CMD_GPIO_GET 0x93 + +/* Version 0 of input params and response */ +struct ec_params_gpio_get { + char name[32]; +} __packed; +struct ec_response_gpio_get { + uint8_t val; +} __packed; + +/* Version 1 of input params and response */ +struct ec_params_gpio_get_v1 { + uint8_t subcmd; + union { + struct { + char name[32]; + } get_value_by_name; + struct { + uint8_t index; + } get_info; + }; +} __packed; + +struct ec_response_gpio_get_v1 { + union { + struct { + uint8_t val; + } get_value_by_name, get_count; + struct { + uint8_t val; + char name[32]; + uint32_t flags; + } get_info; + }; +} __packed; + +enum gpio_get_subcmd { + EC_GPIO_GET_BY_NAME = 0, + EC_GPIO_GET_COUNT = 1, + EC_GPIO_GET_INFO = 2, +}; + +/*****************************************************************************/ +/* I2C commands. Only available when flash write protect is unlocked. */ + +/* + * TODO(crosbug.com/p/23570): These commands are deprecated, and will be + * removed soon. Use EC_CMD_I2C_XFER instead. + */ + +/* Read I2C bus */ +#define EC_CMD_I2C_READ 0x94 + +struct ec_params_i2c_read { + uint16_t addr; /* 8-bit address (7-bit shifted << 1) */ + uint8_t read_size; /* Either 8 or 16. */ + uint8_t port; + uint8_t offset; +} __packed; +struct ec_response_i2c_read { + uint16_t data; +} __packed; + +/* Write I2C bus */ +#define EC_CMD_I2C_WRITE 0x95 + +struct ec_params_i2c_write { + uint16_t data; + uint16_t addr; /* 8-bit address (7-bit shifted << 1) */ + uint8_t write_size; /* Either 8 or 16. */ + uint8_t port; + uint8_t offset; +} __packed; + +/*****************************************************************************/ +/* Charge state commands. Only available when flash write protect unlocked. */ + +/* Force charge state machine to stop charging the battery or force it to + * discharge the battery. + */ +#define EC_CMD_CHARGE_CONTROL 0x96 +#define EC_VER_CHARGE_CONTROL 1 + +enum ec_charge_control_mode { + CHARGE_CONTROL_NORMAL = 0, + CHARGE_CONTROL_IDLE, + CHARGE_CONTROL_DISCHARGE, +}; + +struct ec_params_charge_control { + uint32_t mode; /* enum charge_control_mode */ +} __packed; + +/*****************************************************************************/ +/* Console commands. Only available when flash write protect is unlocked. */ + +/* Snapshot console output buffer for use by EC_CMD_CONSOLE_READ. */ +#define EC_CMD_CONSOLE_SNAPSHOT 0x97 + +/* + * Read next chunk of data from saved snapshot. + * + * Response is null-terminated string. Empty string, if there is no more + * remaining output. + */ +#define EC_CMD_CONSOLE_READ 0x98 + +/*****************************************************************************/ + +/* + * Cut off battery power immediately or after the host has shut down. + * + * return EC_RES_INVALID_COMMAND if unsupported by a board/battery. + * EC_RES_SUCCESS if the command was successful. + * EC_RES_ERROR if the cut off command failed. + */ + +#define EC_CMD_BATTERY_CUT_OFF 0x99 + +#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0) + +struct ec_params_battery_cutoff { + uint8_t flags; +} __packed; + +/*****************************************************************************/ +/* USB port mux control. */ + +/* + * Switch USB mux or return to automatic switching. + */ +#define EC_CMD_USB_MUX 0x9a + +struct ec_params_usb_mux { + uint8_t mux; +} __packed; + +/*****************************************************************************/ +/* LDOs / FETs control. */ + +enum ec_ldo_state { + EC_LDO_STATE_OFF = 0, /* the LDO / FET is shut down */ + EC_LDO_STATE_ON = 1, /* the LDO / FET is ON / providing power */ +}; + +/* + * Switch on/off a LDO. + */ +#define EC_CMD_LDO_SET 0x9b + +struct ec_params_ldo_set { + uint8_t index; + uint8_t state; +} __packed; + +/* + * Get LDO state. + */ +#define EC_CMD_LDO_GET 0x9c + +struct ec_params_ldo_get { + uint8_t index; +} __packed; + +struct ec_response_ldo_get { + uint8_t state; +} __packed; + +/*****************************************************************************/ +/* Power info. */ + +/* + * Get power info. + */ +#define EC_CMD_POWER_INFO 0x9d + +struct ec_response_power_info { + uint32_t usb_dev_type; + uint16_t voltage_ac; + uint16_t voltage_system; + uint16_t current_system; + uint16_t usb_current_limit; +} __packed; + +/*****************************************************************************/ +/* I2C passthru command */ + +#define EC_CMD_I2C_PASSTHRU 0x9e + +/* Read data; if not present, message is a write */ +#define EC_I2C_FLAG_READ (1 << 15) + +/* Mask for address */ +#define EC_I2C_ADDR_MASK 0x3ff + +#define EC_I2C_STATUS_NAK (1 << 0) /* Transfer was not acknowledged */ +#define EC_I2C_STATUS_TIMEOUT (1 << 1) /* Timeout during transfer */ + +/* Any error */ +#define EC_I2C_STATUS_ERROR (EC_I2C_STATUS_NAK | EC_I2C_STATUS_TIMEOUT) + +struct ec_params_i2c_passthru_msg { + uint16_t addr_flags; /* I2C slave address (7 or 10 bits) and flags */ + uint16_t len; /* Number of bytes to read or write */ +} __packed; + +struct ec_params_i2c_passthru { + uint8_t port; /* I2C port number */ + uint8_t num_msgs; /* Number of messages */ + struct ec_params_i2c_passthru_msg msg[]; + /* Data to write for all messages is concatenated here */ +} __packed; + +struct ec_response_i2c_passthru { + uint8_t i2c_status; /* Status flags (EC_I2C_STATUS_...) */ + uint8_t num_msgs; /* Number of messages processed */ + uint8_t data[]; /* Data read by messages concatenated here */ +} __packed; + +/*****************************************************************************/ +/* Power button hang detect */ + +#define EC_CMD_HANG_DETECT 0x9f + +/* Reasons to start hang detection timer */ +/* Power button pressed */ +#define EC_HANG_START_ON_POWER_PRESS (1 << 0) + +/* Lid closed */ +#define EC_HANG_START_ON_LID_CLOSE (1 << 1) + + /* Lid opened */ +#define EC_HANG_START_ON_LID_OPEN (1 << 2) + +/* Start of AP S3->S0 transition (booting or resuming from suspend) */ +#define EC_HANG_START_ON_RESUME (1 << 3) + +/* Reasons to cancel hang detection */ + +/* Power button released */ +#define EC_HANG_STOP_ON_POWER_RELEASE (1 << 8) + +/* Any host command from AP received */ +#define EC_HANG_STOP_ON_HOST_COMMAND (1 << 9) + +/* Stop on end of AP S0->S3 transition (suspending or shutting down) */ +#define EC_HANG_STOP_ON_SUSPEND (1 << 10) + +/* + * If this flag is set, all the other fields are ignored, and the hang detect + * timer is started. This provides the AP a way to start the hang timer + * without reconfiguring any of the other hang detect settings. Note that + * you must previously have configured the timeouts. + */ +#define EC_HANG_START_NOW (1 << 30) + +/* + * If this flag is set, all the other fields are ignored (including + * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer + * without reconfiguring any of the other hang detect settings. + */ +#define EC_HANG_STOP_NOW (1 << 31) + +struct ec_params_hang_detect { + /* Flags; see EC_HANG_* */ + uint32_t flags; + + /* Timeout in msec before generating host event, if enabled */ + uint16_t host_event_timeout_msec; + + /* Timeout in msec before generating warm reboot, if enabled */ + uint16_t warm_reboot_timeout_msec; +} __packed; + +/*****************************************************************************/ +/* Commands for battery charging */ + +/* + * This is the single catch-all host command to exchange data regarding the + * charge state machine (v2 and up). + */ +#define EC_CMD_CHARGE_STATE 0xa0 + +/* Subcommands for this host command */ +enum charge_state_command { + CHARGE_STATE_CMD_GET_STATE, + CHARGE_STATE_CMD_GET_PARAM, + CHARGE_STATE_CMD_SET_PARAM, + CHARGE_STATE_NUM_CMDS +}; + +/* + * Known param numbers are defined here. Ranges are reserved for board-specific + * params, which are handled by the particular implementations. + */ +enum charge_state_params { + CS_PARAM_CHG_VOLTAGE, /* charger voltage limit */ + CS_PARAM_CHG_CURRENT, /* charger current limit */ + CS_PARAM_CHG_INPUT_CURRENT, /* charger input current limit */ + CS_PARAM_CHG_STATUS, /* charger-specific status */ + CS_PARAM_CHG_OPTION, /* charger-specific options */ + /* How many so far? */ + CS_NUM_BASE_PARAMS, + + /* Range for CONFIG_CHARGER_PROFILE_OVERRIDE params */ + CS_PARAM_CUSTOM_PROFILE_MIN = 0x10000, + CS_PARAM_CUSTOM_PROFILE_MAX = 0x1ffff, + + /* Other custom param ranges go here... */ +}; + +struct ec_params_charge_state { + uint8_t cmd; /* enum charge_state_command */ + union { + struct { + /* no args */ + } get_state; + + struct { + uint32_t param; /* enum charge_state_param */ + } get_param; + + struct { + uint32_t param; /* param to set */ + uint32_t value; /* value to set */ + } set_param; + }; +} __packed; + +struct ec_response_charge_state { + union { + struct { + int ac; + int chg_voltage; + int chg_current; + int chg_input_current; + int batt_state_of_charge; + } get_state; + + struct { + uint32_t value; + } get_param; + struct { + /* no return values */ + } set_param; + }; +} __packed; + + +/* + * Set maximum battery charging current. + */ +#define EC_CMD_CHARGE_CURRENT_LIMIT 0xa1 + +struct ec_params_current_limit { + uint32_t limit; /* in mA */ +} __packed; + +/* + * Set maximum external power current. + */ +#define EC_CMD_EXT_POWER_CURRENT_LIMIT 0xa2 + +struct ec_params_ext_power_current_limit { + uint32_t limit; /* in mA */ +} __packed; + +/*****************************************************************************/ +/* Smart battery pass-through */ + +/* Get / Set 16-bit smart battery registers */ +#define EC_CMD_SB_READ_WORD 0xb0 +#define EC_CMD_SB_WRITE_WORD 0xb1 + +/* Get / Set string smart battery parameters + * formatted as SMBUS "block". + */ +#define EC_CMD_SB_READ_BLOCK 0xb2 +#define EC_CMD_SB_WRITE_BLOCK 0xb3 + +struct ec_params_sb_rd { + uint8_t reg; +} __packed; + +struct ec_response_sb_rd_word { + uint16_t value; +} __packed; + +struct ec_params_sb_wr_word { + uint8_t reg; + uint16_t value; +} __packed; + +struct ec_response_sb_rd_block { + uint8_t data[32]; +} __packed; + +struct ec_params_sb_wr_block { + uint8_t reg; + uint16_t data[32]; +} __packed; + +/*****************************************************************************/ +/* Battery vendor parameters + * + * Get or set vendor-specific parameters in the battery. Implementations may + * differ between boards or batteries. On a set operation, the response + * contains the actual value set, which may be rounded or clipped from the + * requested value. + */ + +#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4 + +enum ec_battery_vendor_param_mode { + BATTERY_VENDOR_PARAM_MODE_GET = 0, + BATTERY_VENDOR_PARAM_MODE_SET, +}; + +struct ec_params_battery_vendor_param { + uint32_t param; + uint32_t value; + uint8_t mode; +} __packed; + +struct ec_response_battery_vendor_param { + uint32_t value; +} __packed; + +/*****************************************************************************/ +/* System commands */ + +/* + * TODO(crosbug.com/p/23747): This is a confusing name, since it doesn't + * necessarily reboot the EC. Rename to "image" or something similar? + */ +#define EC_CMD_REBOOT_EC 0xd2 + +/* Command */ +enum ec_reboot_cmd { + EC_REBOOT_CANCEL = 0, /* Cancel a pending reboot */ + EC_REBOOT_JUMP_RO = 1, /* Jump to RO without rebooting */ + EC_REBOOT_JUMP_RW = 2, /* Jump to RW without rebooting */ + /* (command 3 was jump to RW-B) */ + EC_REBOOT_COLD = 4, /* Cold-reboot */ + EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */ + EC_REBOOT_HIBERNATE = 6 /* Hibernate EC */ +}; + +/* Flags for ec_params_reboot_ec.reboot_flags */ +#define EC_REBOOT_FLAG_RESERVED0 (1 << 0) /* Was recovery request */ +#define EC_REBOOT_FLAG_ON_AP_SHUTDOWN (1 << 1) /* Reboot after AP shutdown */ + +struct ec_params_reboot_ec { + uint8_t cmd; /* enum ec_reboot_cmd */ + uint8_t flags; /* See EC_REBOOT_FLAG_* */ +} __packed; + +/* + * Get information on last EC panic. + * + * Returns variable-length platform-dependent panic information. See panic.h + * for details. + */ +#define EC_CMD_GET_PANIC_INFO 0xd3 + +/*****************************************************************************/ +/* + * ACPI commands + * + * These are valid ONLY on the ACPI command/data port. + */ + +/* + * ACPI Read Embedded Controller + * + * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). + * + * Use the following sequence: + * + * - Write EC_CMD_ACPI_READ to EC_LPC_ADDR_ACPI_CMD + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write address to EC_LPC_ADDR_ACPI_DATA + * - Wait for EC_LPC_CMDR_DATA bit to set + * - Read value from EC_LPC_ADDR_ACPI_DATA + */ +#define EC_CMD_ACPI_READ 0x80 + +/* + * ACPI Write Embedded Controller + * + * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). + * + * Use the following sequence: + * + * - Write EC_CMD_ACPI_WRITE to EC_LPC_ADDR_ACPI_CMD + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write address to EC_LPC_ADDR_ACPI_DATA + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write value to EC_LPC_ADDR_ACPI_DATA + */ +#define EC_CMD_ACPI_WRITE 0x81 + +/* + * ACPI Query Embedded Controller + * + * This clears the lowest-order bit in the currently pending host events, and + * sets the result code to the 1-based index of the bit (event 0x00000001 = 1, + * event 0x80000000 = 32), or 0 if no event was pending. + */ +#define EC_CMD_ACPI_QUERY_EVENT 0x84 + +/* Valid addresses in ACPI memory space, for read/write commands */ + +/* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */ +#define EC_ACPI_MEM_VERSION 0x00 +/* + * Test location; writing value here updates test compliment byte to (0xff - + * value). + */ +#define EC_ACPI_MEM_TEST 0x01 +/* Test compliment; writes here are ignored. */ +#define EC_ACPI_MEM_TEST_COMPLIMENT 0x02 + +/* Keyboard backlight brightness percent (0 - 100) */ +#define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03 +/* DPTF Target Fan Duty (0-100, 0xff for auto/none) */ +#define EC_ACPI_MEM_FAN_DUTY 0x04 + +/* + * DPTF temp thresholds. Any of the EC's temp sensors can have up to two + * independent thresholds attached to them. The current value of the ID + * register determines which sensor is affected by the THRESHOLD and COMMIT + * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme + * as the memory-mapped sensors. The COMMIT register applies those settings. + * + * The spec does not mandate any way to read back the threshold settings + * themselves, but when a threshold is crossed the AP needs a way to determine + * which sensor(s) are responsible. Each reading of the ID register clears and + * returns one sensor ID that has crossed one of its threshold (in either + * direction) since the last read. A value of 0xFF means "no new thresholds + * have tripped". Setting or enabling the thresholds for a sensor will clear + * the unread event count for that sensor. + */ +#define EC_ACPI_MEM_TEMP_ID 0x05 +#define EC_ACPI_MEM_TEMP_THRESHOLD 0x06 +#define EC_ACPI_MEM_TEMP_COMMIT 0x07 +/* + * Here are the bits for the COMMIT register: + * bit 0 selects the threshold index for the chosen sensor (0/1) + * bit 1 enables/disables the selected threshold (0 = off, 1 = on) + * Each write to the commit register affects one threshold. + */ +#define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK (1 << 0) +#define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK (1 << 1) +/* + * Example: + * + * Set the thresholds for sensor 2 to 50 C and 60 C: + * write 2 to [0x05] -- select temp sensor 2 + * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET + * write 0x2 to [0x07] -- enable threshold 0 with this value + * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET + * write 0x3 to [0x07] -- enable threshold 1 with this value + * + * Disable the 60 C threshold, leaving the 50 C threshold unchanged: + * write 2 to [0x05] -- select temp sensor 2 + * write 0x1 to [0x07] -- disable threshold 1 + */ + +/* DPTF battery charging current limit */ +#define EC_ACPI_MEM_CHARGING_LIMIT 0x08 + +/* Charging limit is specified in 64 mA steps */ +#define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64 +/* Value to disable DPTF battery charging limit */ +#define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff + +/* Current version of ACPI memory address space */ +#define EC_ACPI_MEM_VERSION_CURRENT 1 + + +/*****************************************************************************/ +/* + * Special commands + * + * These do not follow the normal rules for commands. See each command for + * details. + */ + +/* + * Reboot NOW + * + * This command will work even when the EC LPC interface is busy, because the + * reboot command is processed at interrupt level. Note that when the EC + * reboots, the host will reboot too, so there is no response to this command. + * + * Use EC_CMD_REBOOT_EC to reboot the EC more politely. + */ +#define EC_CMD_REBOOT 0xd1 /* Think "die" */ + +/* + * Resend last response (not supported on LPC). + * + * Returns EC_RES_UNAVAILABLE if there is no response available - for example, + * there was no previous command, or the previous command's response was too + * big to save. + */ +#define EC_CMD_RESEND_RESPONSE 0xdb + +/* + * This header byte on a command indicate version 0. Any header byte less + * than this means that we are talking to an old EC which doesn't support + * versioning. In that case, we assume version 0. + * + * Header bytes greater than this indicate a later version. For example, + * EC_CMD_VERSION0 + 1 means we are using version 1. + * + * The old EC interface must not use commands 0xdc or higher. + */ +#define EC_CMD_VERSION0 0xdc + +#endif /* !__ACPI__ */ + +/*****************************************************************************/ +/* + * PD commands + * + * These commands are for PD MCU communication. + */ + +/* EC to PD MCU exchange status command */ +#define EC_CMD_PD_EXCHANGE_STATUS 0x100 + +/* Status of EC being sent to PD */ +struct ec_params_pd_status { + int8_t batt_soc; /* battery state of charge */ +} __packed; + +/* Status of PD being sent back to EC */ +struct ec_response_pd_status { + int8_t status; /* PD MCU status */ + uint32_t curr_lim_ma; /* input current limit */ +} __packed; + +/* Set USB type-C port role and muxes */ +#define EC_CMD_USB_PD_CONTROL 0x101 + +enum usb_pd_control_role { + USB_PD_CTRL_ROLE_NO_CHANGE = 0, + USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */ + USB_PD_CTRL_ROLE_TOGGLE_OFF = 2, + USB_PD_CTRL_ROLE_FORCE_SINK = 3, + USB_PD_CTRL_ROLE_FORCE_SOURCE = 4, +}; + +enum usb_pd_control_mux { + USB_PD_CTRL_MUX_NO_CHANGE = 0, + USB_PD_CTRL_MUX_NONE = 1, + USB_PD_CTRL_MUX_USB = 2, + USB_PD_CTRL_MUX_DP = 3, + USB_PD_CTRL_MUX_DOCK = 4, + USB_PD_CTRL_MUX_AUTO = 5, +}; + +struct ec_params_usb_pd_control { + uint8_t port; + uint8_t role; + uint8_t mux; +} __packed; + +/*****************************************************************************/ +/* + * Passthru commands + * + * Some platforms have sub-processors chained to each other. For example. + * + * AP <--> EC <--> PD MCU + * + * The top 2 bits of the command number are used to indicate which device the + * command is intended for. Device 0 is always the device receiving the + * command; other device mapping is board-specific. + * + * When a device receives a command to be passed to a sub-processor, it passes + * it on with the device number set back to 0. This allows the sub-processor + * to remain blissfully unaware of whether the command originated on the next + * device up the chain, or was passed through from the AP. + * + * In the above example, if the AP wants to send command 0x0002 to the PD MCU, + * AP sends command 0x4002 to the EC + * EC sends command 0x0002 to the PD MCU + * EC forwards PD MCU response back to the AP + */ + +/* Offset and max command number for sub-device n */ +#define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n)) +#define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff) + +/*****************************************************************************/ +/* + * Deprecated constants. These constants have been renamed for clarity. The + * meaning and size has not changed. Programs that use the old names should + * switch to the new names soon, as the old names may not be carried forward + * forever. + */ +#define EC_HOST_PARAM_SIZE EC_PROTO2_MAX_PARAM_SIZE +#define EC_LPC_ADDR_OLD_PARAM EC_HOST_CMD_REGION1 +#define EC_OLD_PARAM_SIZE EC_HOST_CMD_REGION_SIZE + +#endif /* __CROS_EC_COMMANDS_H */ diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h index 93bbfc2c1d..304985e288 100644 --- a/include/linux/mfd/da8xx-cfgchip.h +++ b/include/linux/mfd/da8xx-cfgchip.h @@ -1,8 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * TI DaVinci DA8xx CHIPCFGx registers for syscon consumers. * * Copyright (C) 2016 David Lechner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __LINUX_MFD_DA8XX_CFGCHIP_H diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h index d1c57b8dbb..0aa3a1a49e 100644 --- a/include/linux/mfd/da903x.h +++ b/include/linux/mfd/da903x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PMIC_DA903X_H #define __LINUX_PMIC_DA903X_H diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h index 76feb3a706..ce9230af09 100644 --- a/include/linux/mfd/da9052/da9052.h +++ b/include/linux/mfd/da9052/da9052.h @@ -1,10 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * da9052 declarations for DA9052 PMICs. * * Copyright(c) 2011 Dialog Semiconductor Ltd. * * Author: David Dajun Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef __MFD_DA9052_DA9052_H @@ -31,12 +45,6 @@ #define DA9052_ADC_TJUNC 8 #define DA9052_ADC_VBBAT 9 -/* TSI channel has its own 4 channel mux */ -#define DA9052_ADC_TSI_XP 70 -#define DA9052_ADC_TSI_XN 71 -#define DA9052_ADC_TSI_YP 72 -#define DA9052_ADC_TSI_YN 73 - #define DA9052_IRQ_DCIN 0 #define DA9052_IRQ_VBUS 1 #define DA9052_IRQ_DCINREM 2 diff --git a/include/linux/mfd/da9052/pdata.h b/include/linux/mfd/da9052/pdata.h index 60fcab3254..62c5c3c299 100644 --- a/include/linux/mfd/da9052/pdata.h +++ b/include/linux/mfd/da9052/pdata.h @@ -1,10 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Platform data declarations for DA9052 PMICs. * * Copyright(c) 2011 Dialog Semiconductor Ltd. * * Author: David Dajun Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef __MFD_DA9052_PDATA_H__ diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h index 752b20b16d..5010f97872 100644 --- a/include/linux/mfd/da9052/reg.h +++ b/include/linux/mfd/da9052/reg.h @@ -1,10 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Register declarations for DA9052 PMICs. * * Copyright(c) 2011 Dialog Semiconductor Ltd. * * Author: David Dajun Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef __LINUX_MFD_DA9052_REG_H @@ -676,10 +690,7 @@ /* TSI CONTROL REGISTER B BITS */ #define DA9052_TSICONTB_ADCREF 0X80 #define DA9052_TSICONTB_TSIMAN 0X40 -#define DA9052_TSICONTB_TSIMUX_XP 0X00 -#define DA9052_TSICONTB_TSIMUX_YP 0X10 -#define DA9052_TSICONTB_TSIMUX_XN 0X20 -#define DA9052_TSICONTB_TSIMUX_YN 0X30 +#define DA9052_TSICONTB_TSIMUX 0X30 #define DA9052_TSICONTB_TSISEL3 0X08 #define DA9052_TSICONTB_TSISEL2 0X04 #define DA9052_TSICONTB_TSISEL1 0X02 @@ -694,14 +705,8 @@ /* TSI CO-ORDINATE LSB RESULT REGISTER BITS */ #define DA9052_TSILSB_PENDOWN 0X40 #define DA9052_TSILSB_TSIZL 0X30 -#define DA9052_TSILSB_TSIZL_SHIFT 4 -#define DA9052_TSILSB_TSIZL_BITS 2 #define DA9052_TSILSB_TSIYL 0X0C -#define DA9052_TSILSB_TSIYL_SHIFT 2 -#define DA9052_TSILSB_TSIYL_BITS 2 #define DA9052_TSILSB_TSIXL 0X03 -#define DA9052_TSILSB_TSIXL_SHIFT 0 -#define DA9052_TSILSB_TSIXL_BITS 2 /* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */ #define DA9052_TSIZMSB_TSIZM 0XFF diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h index a96eba52c4..5dc743fd63 100644 --- a/include/linux/mfd/da9055/core.h +++ b/include/linux/mfd/da9055/core.h @@ -1,10 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * da9055 declarations for DA9055 PMICs. * * Copyright(c) 2012 Dialog Semiconductor Ltd. * * Author: David Dajun Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef __DA9055_CORE_H diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h index d3f126990a..04e092be4b 100644 --- a/include/linux/mfd/da9055/pdata.h +++ b/include/linux/mfd/da9055/pdata.h @@ -1,5 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Copyright (C) 2012 Dialog Semiconductor Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * */ #ifndef __DA9055_PDATA_H #define __DA9055_PDATA_H @@ -7,7 +12,6 @@ #define DA9055_MAX_REGULATORS 8 struct da9055; -struct gpio_desc; enum gpio_select { NO_GPIO = 0, @@ -35,7 +39,7 @@ struct da9055_pdata { int *gpio_rsel; /* * Regulator mode control bits value (GPI offset) that - * controls the regulator state, 0 if not available. + * that controls the regulator state, 0 if not available. */ enum gpio_select *reg_ren; /* @@ -43,7 +47,7 @@ struct da9055_pdata { * controls the regulator set A/B, 0 if not available. */ enum gpio_select *reg_rsel; - /* GPIO descriptors to enable regulator, NULL if not available */ - struct gpio_desc **ena_gpiods; + /* GPIOs to enable regulator, 0 if not available */ + int *ena_gpio; }; #endif /* __DA9055_PDATA_H */ diff --git a/include/linux/mfd/da9055/reg.h b/include/linux/mfd/da9055/reg.h index 54a717b6c3..2b592e072d 100644 --- a/include/linux/mfd/da9055/reg.h +++ b/include/linux/mfd/da9055/reg.h @@ -1,10 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * DA9055 declarations for DA9055 PMICs. * * Copyright(c) 2012 Dialog Semiconductor Ltd. * * Author: David Dajun Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef __DA9055_REG_H diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h index ea0c670992..376ba84366 100644 --- a/include/linux/mfd/da9062/core.h +++ b/include/linux/mfd/da9062/core.h @@ -1,6 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Copyright (C) 2015-2017 Dialog Semiconductor + * Copyright (C) 2015 Dialog Semiconductor Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __MFD_DA9062_CORE_H__ @@ -9,31 +18,7 @@ #include #include -enum da9062_compatible_types { - COMPAT_TYPE_DA9061 = 1, - COMPAT_TYPE_DA9062, -}; - -enum da9061_irqs { - /* IRQ A */ - DA9061_IRQ_ONKEY, - DA9061_IRQ_WDG_WARN, - DA9061_IRQ_SEQ_RDY, - /* IRQ B*/ - DA9061_IRQ_TEMP, - DA9061_IRQ_LDO_LIM, - DA9061_IRQ_DVC_RDY, - DA9061_IRQ_VDD_WARN, - /* IRQ C */ - DA9061_IRQ_GPI0, - DA9061_IRQ_GPI1, - DA9061_IRQ_GPI2, - DA9061_IRQ_GPI3, - DA9061_IRQ_GPI4, - - DA9061_NUM_IRQ, -}; - +/* Interrupts */ enum da9062_irqs { /* IRQ A */ DA9062_IRQ_ONKEY, @@ -60,7 +45,6 @@ struct da9062 { struct device *dev; struct regmap *regmap; struct regmap_irq_chip_data *regmap_irq; - enum da9062_compatible_types chip_type; }; #endif /* __MFD_DA9062_CORE_H__ */ diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h index 2906bf6160..97790d1b02 100644 --- a/include/linux/mfd/da9062/registers.h +++ b/include/linux/mfd/da9062/registers.h @@ -1,6 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Copyright (C) 2015-2017 Dialog Semiconductor + * registers.h - REGISTERS H for DA9062 + * Copyright (C) 2015 Dialog Semiconductor Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __DA9062_H__ @@ -8,8 +18,6 @@ #define DA9062_PMIC_DEVICE_ID 0x62 #define DA9062_PMIC_VARIANT_MRC_AA 0x01 -#define DA9062_PMIC_VARIANT_VRC_DA9061 0x01 -#define DA9062_PMIC_VARIANT_VRC_DA9062 0x02 #define DA9062_I2C_PAGE_SEL_SHIFT 1 @@ -797,9 +805,6 @@ #define DA9062AA_BUCK3_SL_A_SHIFT 7 #define DA9062AA_BUCK3_SL_A_MASK BIT(7) -/* DA9062AA_VLDO[1-4]_A common */ -#define DA9062AA_VLDO_A_MIN_SEL 2 - /* DA9062AA_VLDO1_A = 0x0A9 */ #define DA9062AA_VLDO1_A_SHIFT 0 #define DA9062AA_VLDO1_A_MASK 0x3f diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h index fa7a43f02f..f3ae65db4c 100644 --- a/include/linux/mfd/da9063/core.h +++ b/include/linux/mfd/da9063/core.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Definitions for DA9063 MFD driver * @@ -6,6 +5,12 @@ * * Author: Michal Hajduk, Dialog Semiconductor * Author: Krystian Garbaciak, Dialog Semiconductor + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_DA9063_CORE_H__ @@ -24,18 +29,14 @@ #define DA9063_DRVNAME_RTC "da9063-rtc" #define DA9063_DRVNAME_VIBRATION "da9063-vibration" -#define PMIC_CHIP_ID_DA9063 0x61 - -enum da9063_type { - PMIC_TYPE_DA9063 = 0, - PMIC_TYPE_DA9063L, +enum da9063_models { + PMIC_DA9063 = 0x61, }; enum da9063_variant_codes { PMIC_DA9063_AD = 0x3, PMIC_DA9063_BB = 0x5, PMIC_DA9063_CA = 0x6, - PMIC_DA9063_DA = 0x7, }; /* Interrupts */ @@ -71,10 +72,13 @@ enum da9063_irqs { DA9063_IRQ_GPI15, }; +#define DA9063_IRQ_BASE_OFFSET 0 +#define DA9063_NUM_IRQ (DA9063_IRQ_GPI15 + 1 - DA9063_IRQ_BASE_OFFSET) + struct da9063 { /* Device */ struct device *dev; - enum da9063_type type; + unsigned short model; unsigned char variant_code; unsigned int flags; @@ -90,4 +94,7 @@ struct da9063 { int da9063_device_init(struct da9063 *da9063, unsigned int irq); int da9063_irq_init(struct da9063 *da9063); +void da9063_device_exit(struct da9063 *da9063); +void da9063_irq_exit(struct da9063 *da9063); + #endif /* __MFD_DA9063_CORE_H__ */ diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h new file mode 100644 index 0000000000..8a125701ef --- /dev/null +++ b/include/linux/mfd/da9063/pdata.h @@ -0,0 +1,112 @@ +/* + * Platform configuration options for DA9063 + * + * Copyright 2012 Dialog Semiconductor Ltd. + * + * Author: Michal Hajduk, Dialog Semiconductor + * Author: Krystian Garbaciak, Dialog Semiconductor + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_DA9063_PDATA_H__ +#define __MFD_DA9063_PDATA_H__ + +#include + +/* + * Regulator configuration + */ +/* DA9063 regulator IDs */ +enum { + /* BUCKs */ + DA9063_ID_BCORE1, + DA9063_ID_BCORE2, + DA9063_ID_BPRO, + DA9063_ID_BMEM, + DA9063_ID_BIO, + DA9063_ID_BPERI, + + /* BCORE1 and BCORE2 in merged mode */ + DA9063_ID_BCORES_MERGED, + /* BMEM and BIO in merged mode */ + DA9063_ID_BMEM_BIO_MERGED, + /* When two BUCKs are merged, they cannot be reused separately */ + + /* LDOs */ + DA9063_ID_LDO1, + DA9063_ID_LDO2, + DA9063_ID_LDO3, + DA9063_ID_LDO4, + DA9063_ID_LDO5, + DA9063_ID_LDO6, + DA9063_ID_LDO7, + DA9063_ID_LDO8, + DA9063_ID_LDO9, + DA9063_ID_LDO10, + DA9063_ID_LDO11, +}; + +/* Regulators platform data */ +struct da9063_regulator_data { + int id; + struct regulator_init_data *initdata; +}; + +struct da9063_regulators_pdata { + unsigned n_regulators; + struct da9063_regulator_data *regulator_data; +}; + + +/* + * RGB LED configuration + */ +/* LED IDs for flags in struct led_info. */ +enum { + DA9063_GPIO11_LED, + DA9063_GPIO14_LED, + DA9063_GPIO15_LED, + + DA9063_LED_NUM +}; +#define DA9063_LED_ID_MASK 0x3 + +/* LED polarity for flags in struct led_info. */ +#define DA9063_LED_HIGH_LEVEL_ACTIVE 0x0 +#define DA9063_LED_LOW_LEVEL_ACTIVE 0x4 + + +/* + * General PMIC configuration + */ +/* HWMON ADC channels configuration */ +#define DA9063_FLG_FORCE_IN0_MANUAL_MODE 0x0010 +#define DA9063_FLG_FORCE_IN0_AUTO_MODE 0x0020 +#define DA9063_FLG_FORCE_IN1_MANUAL_MODE 0x0040 +#define DA9063_FLG_FORCE_IN1_AUTO_MODE 0x0080 +#define DA9063_FLG_FORCE_IN2_MANUAL_MODE 0x0100 +#define DA9063_FLG_FORCE_IN2_AUTO_MODE 0x0200 +#define DA9063_FLG_FORCE_IN3_MANUAL_MODE 0x0400 +#define DA9063_FLG_FORCE_IN3_AUTO_MODE 0x0800 + +/* Disable register caching. */ +#define DA9063_FLG_NO_CACHE 0x0008 + +struct da9063; + +/* DA9063 platform data */ +struct da9063_pdata { + int (*init)(struct da9063 *da9063); + int irq_base; + bool key_power; + unsigned flags; + struct da9063_regulators_pdata *regulators_pdata; + struct led_platform_data *leds_pdata; +}; + +#endif /* __MFD_DA9063_PDATA_H__ */ diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h index 6e0f66a2e7..5d42859cb4 100644 --- a/include/linux/mfd/da9063/registers.h +++ b/include/linux/mfd/da9063/registers.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Registers definition for DA9063 modules * @@ -6,6 +5,12 @@ * * Author: Michal Hajduk, Dialog Semiconductor * Author: Krystian Garbaciak, Dialog Semiconductor + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef _DA9063_REG_H @@ -210,9 +215,9 @@ /* DA9063 Configuration registers */ /* OTP */ -#define DA9063_REG_OTP_CONT 0x101 -#define DA9063_REG_OTP_ADDR 0x102 -#define DA9063_REG_OTP_DATA 0x103 +#define DA9063_REG_OPT_COUNT 0x101 +#define DA9063_REG_OPT_ADDR 0x102 +#define DA9063_REG_OPT_DATA 0x103 /* Customer Trim and Configuration */ #define DA9063_REG_T_OFFSET 0x104 @@ -292,10 +297,8 @@ #define DA9063_BB_REG_GP_ID_19 0x134 /* Chip ID and variant */ -#define DA9063_REG_DEVICE_ID 0x181 -#define DA9063_REG_VARIANT_ID 0x182 -#define DA9063_REG_CUSTOMER_ID 0x183 -#define DA9063_REG_CONFIG_ID 0x184 +#define DA9063_REG_CHIP_ID 0x181 +#define DA9063_REG_CHIP_VARIANT 0x182 /* * PMIC registers bits @@ -931,6 +934,9 @@ #define DA9063_RTC_CLOCK 0x40 #define DA9063_OUT_32K_EN 0x80 +/* DA9063_REG_CHIP_VARIANT */ +#define DA9063_CHIP_VARIANT_SHIFT 4 + /* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */ #define DA9063_BIO_ILIM_MASK 0x0F #define DA9063_BMEM_ILIM_MASK 0xF0 @@ -1037,9 +1043,6 @@ #define DA9063_NONKEY_PIN_AUTODOWN 0x02 #define DA9063_NONKEY_PIN_AUTOFLPRT 0x03 -/* DA9063_REG_CONFIG_J (addr=0x10F) */ -#define DA9063_TWOWIRE_TO 0x40 - /* DA9063_REG_MON_REG_5 (addr=0x116) */ #define DA9063_MON_A8_IDX_MASK 0x07 #define DA9063_MON_A8_IDX_NONE 0x00 @@ -1067,10 +1070,4 @@ #define DA9063_MON_A10_IDX_LDO9 0x04 #define DA9063_MON_A10_IDX_LDO10 0x05 -/* DA9063_REG_VARIANT_ID (addr=0x182) */ -#define DA9063_VARIANT_ID_VRC_SHIFT 0 -#define DA9063_VARIANT_ID_VRC_MASK 0x0F -#define DA9063_VARIANT_ID_MRC_SHIFT 4 -#define DA9063_VARIANT_ID_MRC_MASK 0xF0 - #endif /* _DA9063_REG_H */ diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h index d116d5f3ef..1bf50caeb9 100644 --- a/include/linux/mfd/da9150/core.h +++ b/include/linux/mfd/da9150/core.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * DA9150 MFD Driver - Core Data * * Copyright (c) 2014 Dialog Semiconductor * * Author: Adam Thomson + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __DA9150_CORE_H diff --git a/include/linux/mfd/da9150/registers.h b/include/linux/mfd/da9150/registers.h index 1fd8f59688..27ca6ee4d8 100644 --- a/include/linux/mfd/da9150/registers.h +++ b/include/linux/mfd/da9150/registers.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * DA9150 MFD Driver - Registers * * Copyright (c) 2014 Dialog Semiconductor * * Author: Adam Thomson + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __DA9150_REGISTERS_H diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h index 556375b913..8e1cdbef3d 100644 --- a/include/linux/mfd/davinci_voicecodec.h +++ b/include/linux/mfd/davinci_voicecodec.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * DaVinci Voice Codec Core Interface for TI platforms * * Copyright (C) 2010 Texas Instruments, Inc * * Author: Miguel Aguilar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_ @@ -15,6 +28,8 @@ #include #include +#include + struct regmap; /* @@ -84,6 +99,8 @@ struct davinci_vcif { dma_addr_t dma_rx_addr; }; +struct davinci_vc; + struct davinci_vc { /* Device data */ struct device *dev; diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h index a62de3d155..7ba67b55b3 100644 --- a/include/linux/mfd/db8500-prcmu.h +++ b/include/linux/mfd/db8500-prcmu.h @@ -1,8 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) STMicroelectronics 2009 * Copyright (C) ST-Ericsson SA 2010 * + * License Terms: GNU General Public License v2 * Author: Kumar Sanghvi * * PRCMU f/w APIs @@ -489,7 +489,7 @@ struct prcmu_auto_pm_config { #ifdef CONFIG_MFD_DB8500_PRCMU -void db8500_prcmu_early_init(void); +void db8500_prcmu_early_init(u32 phy_base, u32 size); int prcmu_set_rc_a2p(enum romcode_write); enum romcode_read prcmu_get_rc_p2a(void); enum ap_pwrst prcmu_get_xp70_current_state(void); @@ -525,6 +525,9 @@ u8 db8500_prcmu_get_power_state_result(void); void db8500_prcmu_enable_wakeups(u32 wakeups); int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state); int db8500_prcmu_request_clock(u8 clock, bool enable); +int db8500_prcmu_set_display_clocks(void); +int db8500_prcmu_disable_dsipll(void); +int db8500_prcmu_enable_dsipll(void); void db8500_prcmu_config_abb_event_readout(u32 abb_events); void db8500_prcmu_get_abb_event_buffer(void __iomem **buf); int db8500_prcmu_config_esram0_deep_sleep(u8 state); @@ -543,7 +546,7 @@ void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value); #else /* !CONFIG_MFD_DB8500_PRCMU */ -static inline void db8500_prcmu_early_init(void) {} +static inline void db8500_prcmu_early_init(u32 phy_base, u32 size) {} static inline int prcmu_set_rc_a2p(enum romcode_write code) { @@ -679,6 +682,21 @@ static inline int db8500_prcmu_request_clock(u8 clock, bool enable) return 0; } +static inline int db8500_prcmu_set_display_clocks(void) +{ + return 0; +} + +static inline int db8500_prcmu_disable_dsipll(void) +{ + return 0; +} + +static inline int db8500_prcmu_enable_dsipll(void) +{ + return 0; +} + static inline int db8500_prcmu_config_esram0_deep_sleep(u8 state) { return 0; @@ -720,7 +738,7 @@ static inline int db8500_prcmu_load_a9wdog(u8 id, u32 val) static inline bool db8500_prcmu_is_ac_wake_requested(void) { - return false; + return 0; } static inline int db8500_prcmu_set_arm_opp(u8 opp) diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h index cbf9d76194..2e2c6a63a0 100644 --- a/include/linux/mfd/dbx500-prcmu.h +++ b/include/linux/mfd/dbx500-prcmu.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST Ericsson SA 2011 * + * License Terms: GNU General Public License v2 + * * STE Ux500 PRCMU API */ #ifndef __MACH_PRCMU_H @@ -186,12 +187,10 @@ enum ddr_pwrst { #define PRCMU_FW_PROJECT_U8500_C3 8 #define PRCMU_FW_PROJECT_U8500_C4 9 #define PRCMU_FW_PROJECT_U9500_MBL 10 -#define PRCMU_FW_PROJECT_U8500_SSG1 11 /* Samsung specific */ +#define PRCMU_FW_PROJECT_U8500_MBL 11 /* Customer specific */ #define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */ #define PRCMU_FW_PROJECT_U8520 13 #define PRCMU_FW_PROJECT_U8420 14 -#define PRCMU_FW_PROJECT_U8500_SSG2 15 /* Samsung specific */ -#define PRCMU_FW_PROJECT_U8420_SYSCLK 17 #define PRCMU_FW_PROJECT_A9420 20 /* [32..63] 9540 and derivatives */ #define PRCMU_FW_PROJECT_U9540 32 @@ -213,9 +212,9 @@ struct prcmu_fw_version { #if defined(CONFIG_UX500_SOC_DB8500) -static inline void prcmu_early_init(void) +static inline void prcmu_early_init(u32 phy_base, u32 size) { - return db8500_prcmu_early_init(); + return db8500_prcmu_early_init(phy_base, size); } static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk, @@ -322,6 +321,21 @@ static inline bool prcmu_is_ac_wake_requested(void) return db8500_prcmu_is_ac_wake_requested(); } +static inline int prcmu_set_display_clocks(void) +{ + return db8500_prcmu_set_display_clocks(); +} + +static inline int prcmu_disable_dsipll(void) +{ + return db8500_prcmu_disable_dsipll(); +} + +static inline int prcmu_enable_dsipll(void) +{ + return db8500_prcmu_enable_dsipll(); +} + static inline int prcmu_config_esram0_deep_sleep(u8 state) { return db8500_prcmu_config_esram0_deep_sleep(state); @@ -388,7 +402,7 @@ static inline int prcmu_config_a9wdog(u8 num, bool sleep_auto_off) } #else -static inline void prcmu_early_init(void) {} +static inline void prcmu_early_init(u32 phy_base, u32 size) {} static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll) @@ -497,6 +511,21 @@ static inline bool prcmu_is_ac_wake_requested(void) return false; } +static inline int prcmu_set_display_clocks(void) +{ + return 0; +} + +static inline int prcmu_disable_dsipll(void) +{ + return 0; +} + +static inline int prcmu_enable_dsipll(void) +{ + return 0; +} + static inline int prcmu_config_esram0_deep_sleep(u8 state) { return 0; diff --git a/include/linux/mfd/dln2.h b/include/linux/mfd/dln2.h index 4cade9aa8e..004b24576d 100644 --- a/include/linux/mfd/dln2.h +++ b/include/linux/mfd/dln2.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_USB_DLN2_H #define __LINUX_USB_DLN2_H diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h index 43dfca1c97..38a372a0e2 100644 --- a/include/linux/mfd/ds1wm.h +++ b/include/linux/mfd/ds1wm.h @@ -1,29 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* MFD cell driver data for the DS1WM driver - * - * to be defined in the MFD device that is - * using this driver for one of his sub devices - */ +/* MFD cell driver data for the DS1WM driver */ struct ds1wm_driver_data { int active_high; int clock_rate; - /* in milliseconds, the amount of time to - * sleep following a reset pulse. Zero - * should work if your bus devices recover - * time respects the 1-wire spec since the - * ds1wm implements the precise timings of - * a reset pulse/presence detect sequence. - */ + /* in milliseconds, the amount of time to */ + /* sleep following a reset pulse. Zero */ + /* should work if your bus devices recover*/ + /* time respects the 1-wire spec since the*/ + /* ds1wm implements the precise timings of*/ + /* a reset pulse/presence detect sequence.*/ unsigned int reset_recover_delay; - - /* Say 1 here for big endian Hardware - * (only relevant with bus-shift > 0 - */ - bool is_hw_big_endian; - - /* left shift of register number to get register address offsett. - * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively - */ - unsigned int bus_shift; }; diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h index ffde195e12..32a1b5cfeb 100644 --- a/include/linux/mfd/ezx-pcap.h +++ b/include/linux/mfd/ezx-pcap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2009 Daniel Ribeiro * diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h index 2cadf8897c..587273e35a 100644 --- a/include/linux/mfd/hi6421-pmic.h +++ b/include/linux/mfd/hi6421-pmic.h @@ -1,13 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Header file for device driver Hi6421 PMIC * * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. * http://www.hisilicon.com * Copyright (c) <2013-2014> Linaro Ltd. - * https://www.linaro.org + * http://www.linaro.org * * Author: Guodong Xu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __HI6421_PMIC_H @@ -35,9 +38,4 @@ struct hi6421_pmic { struct regmap *regmap; }; -enum hi6421_type { - HI6421 = 0, - HI6421_V530, -}; - #endif /* __HI6421_PMIC_H */ diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h index af5d97239c..62f03c2b1b 100644 --- a/include/linux/mfd/hi655x-pmic.h +++ b/include/linux/mfd/hi655x-pmic.h @@ -1,12 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Device driver for regulators in hi655x IC * - * Copyright (c) 2016 HiSilicon Ltd. + * Copyright (c) 2016 Hisilicon. * * Authors: * Chen Feng * Fei Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __HI655X_PMIC_H diff --git a/include/linux/mfd/imx25-tsadc.h b/include/linux/mfd/imx25-tsadc.h index 21f8adfefd..7fe4b8c3ba 100644 --- a/include/linux/mfd/imx25-tsadc.h +++ b/include/linux/mfd/imx25-tsadc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ #define _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ diff --git a/include/linux/mfd/intel_bxtwc.h b/include/linux/mfd/intel_bxtwc.h new file mode 100644 index 0000000000..1a0ee9d6ef --- /dev/null +++ b/include/linux/mfd/intel_bxtwc.h @@ -0,0 +1,69 @@ +/* + * intel_bxtwc.h - Header file for Intel Broxton Whiskey Cove PMIC + * + * Copyright (C) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include + +#ifndef __INTEL_BXTWC_H__ +#define __INTEL_BXTWC_H__ + +/* BXT WC devices */ +#define BXTWC_DEVICE1_ADDR 0x4E +#define BXTWC_DEVICE2_ADDR 0x4F +#define BXTWC_DEVICE3_ADDR 0x5E + +/* device1 Registers */ +#define BXTWC_CHIPID 0x4E00 +#define BXTWC_CHIPVER 0x4E01 + +#define BXTWC_SCHGRIRQ0_ADDR 0x5E1A +#define BXTWC_CHGRCTRL0_ADDR 0x5E16 +#define BXTWC_CHGRCTRL1_ADDR 0x5E17 +#define BXTWC_CHGRCTRL2_ADDR 0x5E18 +#define BXTWC_CHGRSTATUS_ADDR 0x5E19 +#define BXTWC_THRMBATZONE_ADDR 0x4F22 + +#define BXTWC_USBPATH_ADDR 0x5E19 +#define BXTWC_USBPHYCTRL_ADDR 0x5E07 +#define BXTWC_USBIDCTRL_ADDR 0x5E05 +#define BXTWC_USBIDEN_MASK 0x01 +#define BXTWC_USBIDSTAT_ADDR 0x00FF +#define BXTWC_USBSRCDETSTATUS_ADDR 0x5E29 + +#define BXTWC_DBGUSBBC1_ADDR 0x5FE0 +#define BXTWC_DBGUSBBC2_ADDR 0x5FE1 +#define BXTWC_DBGUSBBCSTAT_ADDR 0x5FE2 + +#define BXTWC_WAKESRC_ADDR 0x4E22 +#define BXTWC_WAKESRC2_ADDR 0x4EE5 +#define BXTWC_CHRTTADDR_ADDR 0x5E22 +#define BXTWC_CHRTTDATA_ADDR 0x5E23 + +#define BXTWC_STHRMIRQ0_ADDR 0x4F19 +#define WC_MTHRMIRQ1_ADDR 0x4E12 +#define WC_STHRMIRQ1_ADDR 0x4F1A +#define WC_STHRMIRQ2_ADDR 0x4F1B + +#define BXTWC_THRMZN0H_ADDR 0x4F44 +#define BXTWC_THRMZN0L_ADDR 0x4F45 +#define BXTWC_THRMZN1H_ADDR 0x4F46 +#define BXTWC_THRMZN1L_ADDR 0x4F47 +#define BXTWC_THRMZN2H_ADDR 0x4F48 +#define BXTWC_THRMZN2L_ADDR 0x4F49 +#define BXTWC_THRMZN3H_ADDR 0x4F4A +#define BXTWC_THRMZN3L_ADDR 0x4F4B +#define BXTWC_THRMZN4H_ADDR 0x4F4C +#define BXTWC_THRMZN4L_ADDR 0x4F4D + +#endif diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h index 317e8608cf..439a7a617b 100644 --- a/include/linux/mfd/intel_msic.h +++ b/include/linux/mfd/intel_msic.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Core interface for Intel MSIC + * include/linux/mfd/intel_msic.h - Core interface for Intel MSIC * * Copyright (C) 2011, Intel Corporation * Author: Mika Westerberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_MFD_INTEL_MSIC_H__ diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index 6a88e34cb9..cf619dbeac 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h @@ -1,9 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Intel SoC PMIC Driver + * intel_soc_pmic.h - Intel SoC PMIC Driver * * Copyright (C) 2012-2014 Intel Corporation. All rights reserved. * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * * Author: Yang, Bin * Author: Zhu, Lejun */ @@ -13,35 +21,12 @@ #include -/** - * struct intel_soc_pmic - Intel SoC PMIC data - * @irq: Master interrupt number of the parent PMIC device - * @regmap: Pointer to the parent PMIC device regmap structure - * @irq_chip_data: IRQ chip data for the PMIC itself - * @irq_chip_data_pwrbtn: Chained IRQ chip data for the Power Button - * @irq_chip_data_tmu: Chained IRQ chip data for the Time Management Unit - * @irq_chip_data_bcu: Chained IRQ chip data for the Burst Control Unit - * @irq_chip_data_adc: Chained IRQ chip data for the General Purpose ADC - * @irq_chip_data_chgr: Chained IRQ chip data for the External Charger - * @irq_chip_data_crit: Chained IRQ chip data for the Critical Event Handler - * @dev: Pointer to the parent PMIC device - * @scu: Pointer to the SCU IPC device data structure - */ struct intel_soc_pmic { int irq; struct regmap *regmap; struct regmap_irq_chip_data *irq_chip_data; - struct regmap_irq_chip_data *irq_chip_data_pwrbtn; - struct regmap_irq_chip_data *irq_chip_data_tmu; - struct regmap_irq_chip_data *irq_chip_data_bcu; - struct regmap_irq_chip_data *irq_chip_data_adc; - struct regmap_irq_chip_data *irq_chip_data_chgr; - struct regmap_irq_chip_data *irq_chip_data_crit; + struct regmap_irq_chip_data *irq_chip_data_level2; struct device *dev; - struct intel_scu_ipc_dev *scu; }; -int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address, - u32 value, u32 mask); - #endif /* __INTEL_SOC_PMIC_H__ */ diff --git a/include/linux/mfd/ipaq-micro.h b/include/linux/mfd/ipaq-micro.h index ee48a4321c..5c4d29f667 100644 --- a/include/linux/mfd/ipaq-micro.h +++ b/include/linux/mfd/ipaq-micro.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Header file for the compaq Micro MFD */ diff --git a/include/linux/mfd/janz.h b/include/linux/mfd/janz.h index 90dea65fd7..e9994c4698 100644 --- a/include/linux/mfd/janz.h +++ b/include/linux/mfd/janz.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Common Definitions for Janz MODULbus devices * * Copyright (c) 2010 Ira W. Snyder + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef JANZ_H diff --git a/include/linux/mfd/kempld.h b/include/linux/mfd/kempld.h index 643c096b93..26e0b469e5 100644 --- a/include/linux/mfd/kempld.h +++ b/include/linux/mfd/kempld.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Kontron PLD driver definitions * * Copyright (c) 2010-2012 Kontron Europe GmbH * Author: Michael Brunner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License 2 as published + * by the Free Software Foundation. */ #ifndef _LINUX_MFD_KEMPLD_H_ diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h index 77092f6363..594bc591f2 100644 --- a/include/linux/mfd/lm3533.h +++ b/include/linux/mfd/lm3533.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * lm3533.h -- LM3533 interface * * Copyright (C) 2011-2012 Texas Instruments * * Author: Johan Hovold + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __LINUX_MFD_LM3533_H diff --git a/include/linux/mfd/lp3943.h b/include/linux/mfd/lp3943.h index 020a339f96..3490db7829 100644 --- a/include/linux/mfd/lp3943.h +++ b/include/linux/mfd/lp3943.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * TI/National Semiconductor LP3943 Device * * Copyright 2013 Texas Instruments * * Author: Milo Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __MFD_LP3943_H__ diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h index 5546688c7d..edbec8350a 100644 --- a/include/linux/mfd/lp873x.h +++ b/include/linux/mfd/lp873x.h @@ -1,7 +1,7 @@ /* * Functions to access LP873X power management chip. * - * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/mfd/lp8788-isink.h b/include/linux/mfd/lp8788-isink.h index 464dc4c937..f38262d21f 100644 --- a/include/linux/mfd/lp8788-isink.h +++ b/include/linux/mfd/lp8788-isink.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * TI LP8788 MFD - common definitions for current sinks * * Copyright 2012 Texas Instruments * * Author: Milo(Woogyom) Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __ISINK_LP8788_H__ diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h index 3d5c480d58..786bf6679a 100644 --- a/include/linux/mfd/lp8788.h +++ b/include/linux/mfd/lp8788.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * TI LP8788 MFD Device * * Copyright 2012 Texas Instruments * * Author: Milo(Woogyom) Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __MFD_LP8788_H__ @@ -177,6 +181,20 @@ struct lp8788_buck2_dvs { enum lp8788_dvs_sel vsel; }; +/* + * struct lp8788_ldo_enable_pin + * + * Basically, all LDOs are enabled through the I2C commands. + * But ALDO 1 ~ 5, 7, DLDO 7, 9, 11 can be enabled by external gpio pins. + * + * @gpio : gpio number which is used for enabling ldos + * @init_state : initial gpio state (ex. GPIOF_OUT_INIT_LOW) + */ +struct lp8788_ldo_enable_pin { + int gpio; + int init_state; +}; + /* * struct lp8788_chg_param * @addr : charging control register address (range : 0x11 ~ 0x1C) @@ -270,6 +288,7 @@ struct lp8788_vib_platform_data { * @aldo_data : regulator initial data for analog ldo * @buck1_dvs : gpio configurations for buck1 dvs * @buck2_dvs : gpio configurations for buck2 dvs + * @ldo_pin : gpio configurations for enabling LDOs * @chg_pdata : platform data for charger driver * @alarm_sel : rtc alarm selection (1 or 2) * @bl_pdata : configurable data for backlight driver @@ -287,6 +306,7 @@ struct lp8788_platform_data { struct regulator_init_data *aldo_data[LP8788_NUM_ALDOS]; struct lp8788_buck1_dvs *buck1_dvs; struct lp8788_buck2_dvs *buck2_dvs; + struct lp8788_ldo_enable_pin *ldo_pin[EN_LDOS_MAX]; /* charger */ struct lp8788_charger_platform_data *chg_pdata; diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h index 39967a5eca..2b300b44f9 100644 --- a/include/linux/mfd/lpc_ich.h +++ b/include/linux/mfd/lpc_ich.h @@ -1,15 +1,25 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/drivers/mfd/lpc_ich.h * * Copyright (c) 2012 Extreme Engineering Solution, Inc. * Author: Aaron Sierra + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef LPC_ICH_H #define LPC_ICH_H -#include - /* GPIO resources */ #define ICH_RES_GPIO 0 #define ICH_RES_GPE0 1 @@ -30,7 +40,6 @@ struct lpc_ich_info { char name[32]; unsigned int iTCO_version; unsigned int gpio_version; - enum intel_spi_type spi_type; u8 use_gpio; }; diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h index a21374f8ad..df75234f97 100644 --- a/include/linux/mfd/max14577-private.h +++ b/include/linux/mfd/max14577-private.h @@ -1,10 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip * * Copyright (C) 2014 Samsung Electrnoics * Chanwoo Choi * Krzysztof Kozlowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __MAX14577_PRIVATE_H__ diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h index 8b3ef891ba..d81b52bb8b 100644 --- a/include/linux/mfd/max14577.h +++ b/include/linux/mfd/max14577.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * max14577.h - Driver for the Maxim 14577/77836 * @@ -6,6 +5,16 @@ * Chanwoo Choi * Krzysztof Kozlowski * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * * This driver is based on max8997.h * * MAX14577 has MUIC, Charger devices. diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h index f552ef5b11..3ca0af07fc 100644 --- a/include/linux/mfd/max77620.h +++ b/include/linux/mfd/max77620.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Defining registers address and its bit definitions of MAX77620 and MAX20024 * * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. */ #ifndef _MFD_MAX77620_H_ @@ -133,8 +136,8 @@ #define MAX77620_FPS_PERIOD_MIN_US 40 #define MAX20024_FPS_PERIOD_MIN_US 20 -#define MAX20024_FPS_PERIOD_MAX_US 2560 -#define MAX77620_FPS_PERIOD_MAX_US 5120 +#define MAX77620_FPS_PERIOD_MAX_US 2560 +#define MAX20024_FPS_PERIOD_MAX_US 5120 #define MAX77620_REG_FPS_GPIO1 0x54 #define MAX77620_REG_FPS_GPIO2 0x55 @@ -177,7 +180,6 @@ #define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2) #define MAX77620_SD_CFG1_FPWM_SD_SKIP 0 #define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2) -#define MAX20024_SD_CFG1_MPOK_MASK BIT(1) #define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0) #define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0 #define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0) @@ -185,7 +187,6 @@ /* LDO_CNFG2 */ #define MAX77620_LDO_POWER_MODE_MASK 0xC0 #define MAX77620_LDO_POWER_MODE_SHIFT 6 -#define MAX20024_LDO_CFG2_MPOK_MASK BIT(2) #define MAX77620_LDO_CFG2_ADE_MASK BIT(1) #define MAX77620_LDO_CFG2_ADE_DISABLE 0 #define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1) @@ -321,7 +322,6 @@ enum max77620_fps_src { enum max77620_chip_id { MAX77620, MAX20024, - MAX77663, }; struct max77620_chip { @@ -329,6 +329,7 @@ struct max77620_chip { struct regmap *rmap; int chip_irq; + int irq_base; /* chip id */ enum max77620_chip_id chip_id; diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h index 833e578e05..643dae777b 100644 --- a/include/linux/mfd/max77686-private.h +++ b/include/linux/mfd/max77686-private.h @@ -1,9 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * max77686-private.h - Voltage regulator driver for the Maxim 77686/802 * * Copyright (C) 2012 Samsung Electrnoics * Chiwoong Byun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_MFD_MAX77686_PRIV_H diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h index d0fb510875..d4b72d5191 100644 --- a/include/linux/mfd/max77686.h +++ b/include/linux/mfd/max77686.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * max77686.h - Driver for the Maxim 77686/802 * * Copyright (C) 2012 Samsung Electrnoics * Chiwoong Byun * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * * This driver is based on max8997.h * * MAX77686 has PMIC, RTC devices. diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h index a5bce099f1..095b121aa7 100644 --- a/include/linux/mfd/max77693-common.h +++ b/include/linux/mfd/max77693-common.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Common data shared between Maxim 77693 and 77843 drivers * * Copyright (C) 2015 Samsung Electronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __LINUX_MFD_MAX77693_COMMON_H diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 311f7d3d23..3c7a63b98a 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * max77693-private.h - Voltage regulator driver for the Maxim 77693 * @@ -6,6 +5,20 @@ * SangYoung Son * * This program is not provided / owned by Maxim Integrated Products. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_MFD_MAX77693_PRIV_H @@ -131,7 +144,7 @@ enum max77693_pmic_reg { #define FLASH_INT_FLED1_SHORT BIT(3) #define FLASH_INT_OVER_CURRENT BIT(4) -/* Fast charge timer in hours */ +/* Fast charge timer in in hours */ #define DEFAULT_FAST_CHARGE_TIMER 4 /* microamps */ #define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000 diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h index c67c16ba86..d450f68730 100644 --- a/include/linux/mfd/max77693.h +++ b/include/linux/mfd/max77693.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * max77693.h - Driver for the Maxim 77693 * @@ -7,6 +6,20 @@ * * This program is not provided / owned by Maxim Integrated Products. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * * This driver is based on max8997.h * * MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices. diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h index 0bc7454c4d..c19303b0cc 100644 --- a/include/linux/mfd/max77843-private.h +++ b/include/linux/mfd/max77843-private.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Common variables for the Maxim MAX77843 driver * * Copyright (C) 2015 Samsung Electronics * Author: Jaewon Kim * Author: Beomho Seo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __MAX77843_PRIVATE_H_ @@ -241,13 +245,10 @@ enum max77843_irq_muic { #define MAX77843_CHG_OVER_CURRENT_BAT (0x06 << 4) /* MAX77843 CHG_CNFG_00 register */ -#define MAX77843_CHG_MODE_MASK 0x0f #define MAX77843_CHG_DISABLE 0x00 #define MAX77843_CHG_ENABLE 0x05 #define MAX77843_CHG_MASK 0x01 -#define MAX77843_CHG_OTG_MASK 0x02 #define MAX77843_CHG_BUCK_MASK 0x04 -#define MAX77843_CHG_BOOST_MASK 0x08 /* MAX77843 CHG_CNFG_01 register */ #define MAX77843_CHG_RESTART_THRESHOLD_100 0x00 @@ -346,7 +347,6 @@ enum max77843_irq_muic { /* MAX77843 CONTROL register */ #define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT 0 #define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT 3 -#define MAX77843_MUIC_CONTROL1_NOBCCOMP_SHIFT 6 #define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT 7 #define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT 0 #define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT 1 @@ -363,7 +363,6 @@ enum max77843_irq_muic { #define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT) #define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT) #define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT) -#define MAX77843_MUIC_CONTROL1_NOBCCOMP_MASK BIT(MAX77843_MUIC_CONTROL1_NOBCCOMP_SHIFT) #define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT) #define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT) #define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT) diff --git a/include/linux/mfd/max8907.h b/include/linux/mfd/max8907.h index 4be3c2370e..b06f7a6a1e 100644 --- a/include/linux/mfd/max8907.h +++ b/include/linux/mfd/max8907.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Functions to access MAX8907 power management chip. * * Copyright (C) 2010 Gyungoh Yoo * Copyright (C) 2012, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_MFD_MAX8907_H diff --git a/include/linux/mfd/max8925.h b/include/linux/mfd/max8925.h index 07f9af579f..ce8502e9e7 100644 --- a/include/linux/mfd/max8925.h +++ b/include/linux/mfd/max8925.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Maxim8925 Interface * * Copyright (C) 2009 Marvell International Ltd. * Haojian Zhuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_MFD_MAX8925_H diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h index a10cd69452..78c76cd4d3 100644 --- a/include/linux/mfd/max8997-private.h +++ b/include/linux/mfd/max8997-private.h @@ -1,9 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * max8997-private.h - Voltage regulator driver for the Maxim 8997 * * Copyright (C) 2010 Samsung Electrnoics * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_MFD_MAX8997_PRIV_H diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h index e955e2f0a2..cf815577bd 100644 --- a/include/linux/mfd/max8997.h +++ b/include/linux/mfd/max8997.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * max8997.h - Driver for the Maxim 8997/8966 * * Copyright (C) 2009-2010 Samsung Electrnoics * MyungJoo Ham * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * * This driver is based on max8998.h * * MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices. @@ -165,6 +178,7 @@ struct max8997_led_platform_data { struct max8997_platform_data { /* IRQ */ int ono; + int wakeup; /* ---- PMIC ---- */ struct max8997_regulator_data *regulators; diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h index 6deb5f5776..d68ada502f 100644 --- a/include/linux/mfd/max8998-private.h +++ b/include/linux/mfd/max8998-private.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * max8998-private.h - Voltage regulator driver for the Maxim 8998 * * Copyright (C) 2009-2010 Samsung Electrnoics * Kyungmin Park * Marek Szyprowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_MFD_MAX8998_PRIV_H diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h index 79c020bd0c..e3956a654c 100644 --- a/include/linux/mfd/max8998.h +++ b/include/linux/mfd/max8998.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * max8998.h - Voltage regulator driver for the Maxim 8998 * * Copyright (C) 2009-2010 Samsung Electrnoics * Kyungmin Park * Marek Szyprowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_MFD_MAX8998_H @@ -39,7 +52,6 @@ enum { MAX8998_ENVICHG, MAX8998_ESAFEOUT1, MAX8998_ESAFEOUT2, - MAX8998_CHARGER, }; /** diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h index c25b167674..4ff6137d8d 100644 --- a/include/linux/mfd/mc13783.h +++ b/include/linux/mfd/mc13783.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2010 Yong Shen * Copyright 2009-2010 Pengutronix * Uwe Kleine-Koenig + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. */ #ifndef __LINUX_MFD_MC13783_H #define __LINUX_MFD_MC13783_H diff --git a/include/linux/mfd/mc13892.h b/include/linux/mfd/mc13892.h index 880cd949d1..a00f2bec17 100644 --- a/include/linux/mfd/mc13892.h +++ b/include/linux/mfd/mc13892.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2010 Yong Shen + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. */ #ifndef __LINUX_MFD_MC13892_H diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h index f372926d58..638222e43e 100644 --- a/include/linux/mfd/mc13xxx.h +++ b/include/linux/mfd/mc13xxx.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2009-2010 Pengutronix * Uwe Kleine-Koenig + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. */ #ifndef __LINUX_MFD_MC13XXX_H #define __LINUX_MFD_MC13XXX_H @@ -240,13 +243,10 @@ struct mc13xxx_platform_data { #define MC13XXX_ADC0_LICELLCON (1 << 0) #define MC13XXX_ADC0_CHRGICON (1 << 1) #define MC13XXX_ADC0_BATICON (1 << 2) -#define MC13XXX_ADC0_ADIN7SEL_DIE (1 << 4) -#define MC13XXX_ADC0_ADIN7SEL_UID (2 << 4) #define MC13XXX_ADC0_ADREFEN (1 << 10) #define MC13XXX_ADC0_TSMOD0 (1 << 12) #define MC13XXX_ADC0_TSMOD1 (1 << 13) #define MC13XXX_ADC0_TSMOD2 (1 << 14) -#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15) #define MC13XXX_ADC0_ADINC1 (1 << 16) #define MC13XXX_ADC0_ADINC2 (1 << 17) diff --git a/include/linux/mfd/mcp.h b/include/linux/mfd/mcp.h index fd5cafc77e..f682953043 100644 --- a/include/linux/mfd/mcp.h +++ b/include/linux/mfd/mcp.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/drivers/mfd/mcp.h * * Copyright (C) 2001 Russell King, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. */ #ifndef MCP_H #define MCP_H diff --git a/include/linux/mfd/menelaus.h b/include/linux/mfd/menelaus.h index ce489aba88..9e85ac06da 100644 --- a/include/linux/mfd/menelaus.h +++ b/include/linux/mfd/menelaus.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Functions to access Menelaus power management chip */ diff --git a/include/linux/mfd/mt6323/core.h b/include/linux/mfd/mt6323/core.h index 2becc34431..06d0ec3b1f 100644 --- a/include/linux/mfd/mt6323/core.h +++ b/include/linux/mfd/mt6323/core.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2016 Chen Zhong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __MFD_MT6323_CORE_H__ diff --git a/include/linux/mfd/mt6323/registers.h b/include/linux/mfd/mt6323/registers.h index 4455e57544..160f3c0e25 100644 --- a/include/linux/mfd/mt6323/registers.h +++ b/include/linux/mfd/mt6323/registers.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2016 Chen Zhong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __MFD_MT6323_REGISTERS_H__ diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index 56f210eebc..d678f526e4 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -1,23 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 MediaTek Inc. * Author: Flora Fu, MediaTek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __MFD_MT6397_CORE_H__ #define __MFD_MT6397_CORE_H__ -#include -#include - -enum chip_id { - MT6323_CHIP_ID = 0x23, - MT6358_CHIP_ID = 0x58, - MT6359_CHIP_ID = 0x59, - MT6391_CHIP_ID = 0x91, - MT6397_CHIP_ID = 0x97, -}; - enum mt6397_irq_numbers { MT6397_IRQ_SPKL_AB = 0, MT6397_IRQ_SPKR_AB, @@ -57,7 +54,6 @@ enum mt6397_irq_numbers { struct mt6397_chip { struct device *dev; struct regmap *regmap; - struct notifier_block pm_nb; int irq; struct irq_domain *irq_domain; struct mutex irqlock; @@ -66,11 +62,6 @@ struct mt6397_chip { u16 irq_masks_cache[2]; u16 int_con[2]; u16 int_status[2]; - u16 chip_id; - void *irq_data; }; -int mt6358_irq_init(struct mt6397_chip *chip); -int mt6397_irq_init(struct mt6397_chip *chip); - #endif /* __MFD_MT6397_CORE_H__ */ diff --git a/include/linux/mfd/mt6397/registers.h b/include/linux/mfd/mt6397/registers.h index 34d140627a..f23a0a60a8 100644 --- a/include/linux/mfd/mt6397/registers.h +++ b/include/linux/mfd/mt6397/registers.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 MediaTek Inc. * Author: Flora Fu, MediaTek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __MFD_MT6397_REGISTERS_H__ diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 1e61c7e9f5..5c9a1d44c1 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * TI Palmas * @@ -6,6 +5,12 @@ * * Author: Graeme Gregory * Author: Ian Lartey + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_PALMAS_H @@ -15,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -245,7 +250,6 @@ enum tps65917_regulators { TPS65917_REG_SMPS3, TPS65917_REG_SMPS4, TPS65917_REG_SMPS5, - TPS65917_REG_SMPS12, /* LDO regulators */ TPS65917_REG_LDO1, TPS65917_REG_LDO2, @@ -313,7 +317,6 @@ enum tps65917_external_requestor_id { TPS65917_EXTERNAL_REQSTR_ID_SMPS3, TPS65917_EXTERNAL_REQSTR_ID_SMPS4, TPS65917_EXTERNAL_REQSTR_ID_SMPS5, - TPS65917_EXTERNAL_REQSTR_ID_SMPS12, TPS65917_EXTERNAL_REQSTR_ID_LDO1, TPS65917_EXTERNAL_REQSTR_ID_LDO2, TPS65917_EXTERNAL_REQSTR_ID_LDO3, @@ -548,6 +551,7 @@ struct palmas_pmic { struct palmas *palmas; struct device *dev; struct regulator_desc desc[PALMAS_NUM_REGS]; + struct regulator_dev *rdev[PALMAS_NUM_REGS]; struct mutex mutex; int smps123; @@ -3727,9 +3731,6 @@ enum usb_irq_events { #define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01 #define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00 -/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */ -#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC - /* Registers for function RESOURCE */ #define TPS65917_REGEN1_CTRL 0x2 #define TPS65917_PLLEN_CTRL 0x3 diff --git a/include/linux/mfd/pcf50633/adc.h b/include/linux/mfd/pcf50633/adc.h index 6a81896d48..b35e62801f 100644 --- a/include/linux/mfd/pcf50633/adc.h +++ b/include/linux/mfd/pcf50633/adc.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * adc.h -- Driver for NXP PCF50633 ADC * * (C) 2006-2008 by Openmoko, Inc. * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __LINUX_MFD_PCF50633_ADC_H diff --git a/include/linux/mfd/pcf50633/backlight.h b/include/linux/mfd/pcf50633/backlight.h index fd4a4f8d6c..83747e217b 100644 --- a/include/linux/mfd/pcf50633/backlight.h +++ b/include/linux/mfd/pcf50633/backlight.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2009-2010, Lars-Peter Clausen * PCF50633 backlight device driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef __LINUX_MFD_PCF50633_BACKLIGHT diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index 3f752dc62a..a80840752b 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * core.h -- Core driver for NXP PCF50633 * * (C) 2006-2008 by Openmoko, Inc. * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __LINUX_MFD_PCF50633_CORE_H diff --git a/include/linux/mfd/pcf50633/gpio.h b/include/linux/mfd/pcf50633/gpio.h index f589e35795..a42b845efc 100644 --- a/include/linux/mfd/pcf50633/gpio.h +++ b/include/linux/mfd/pcf50633/gpio.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * gpio.h -- GPIO driver for NXP PCF50633 * * (C) 2006-2008 by Openmoko, Inc. * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __LINUX_MFD_PCF50633_GPIO_H diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h index fa5cb9256d..df4f5fa88d 100644 --- a/include/linux/mfd/pcf50633/mbc.h +++ b/include/linux/mfd/pcf50633/mbc.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * mbc.h -- Driver for NXP PCF50633 Main Battery Charger * * (C) 2006-2008 by Openmoko, Inc. * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __LINUX_MFD_PCF50633_MBC_H diff --git a/include/linux/mfd/pcf50633/pmic.h b/include/linux/mfd/pcf50633/pmic.h index eac0c3d8e9..2d3dbe53b2 100644 --- a/include/linux/mfd/pcf50633/pmic.h +++ b/include/linux/mfd/pcf50633/pmic.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MFD_PCF50633_PMIC_H #define __LINUX_MFD_PCF50633_PMIC_H diff --git a/include/linux/mfd/qcom_rpm.h b/include/linux/mfd/qcom_rpm.h index 4b6b644f11..742ebf1b76 100644 --- a/include/linux/mfd/qcom_rpm.h +++ b/include/linux/mfd/qcom_rpm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __QCOM_RPM_H__ #define __QCOM_RPM_H__ diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h index 4f220146cc..8d0a392e0a 100644 --- a/include/linux/mfd/rc5t583.h +++ b/include/linux/mfd/rc5t583.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Core driver interface to access RICOH_RC5T583 power management chip. * @@ -7,6 +6,19 @@ * * Based on code * Copyright (C) 2011 RICOH COMPANY,LTD + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * */ #ifndef __LINUX_MFD_RC5T583_H diff --git a/include/linux/mfd/rdc321x.h b/include/linux/mfd/rdc321x.h index 697933b222..442743a8f9 100644 --- a/include/linux/mfd/rdc321x.h +++ b/include/linux/mfd/rdc321x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __RDC321X_MFD_H #define __RDC321X_MFD_H diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index a96e6d43ca..6d435a3c06 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Register definitions for Rockchip's RK808/RK818 PMIC * @@ -10,6 +9,15 @@ * Copyright (C) 2016 PHYTEC Messtechnik GmbH * * Author: Wadim Egorov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef __LINUX_REGULATOR_RK808_H @@ -198,97 +206,6 @@ enum rk818_reg { #define RK818_USB_ILMIN_2000MA 0x7 #define RK818_USB_CHG_SD_VSEL_MASK 0x70 -/* RK805 */ -enum rk805_reg { - RK805_ID_DCDC1, - RK805_ID_DCDC2, - RK805_ID_DCDC3, - RK805_ID_DCDC4, - RK805_ID_LDO1, - RK805_ID_LDO2, - RK805_ID_LDO3, -}; - -/* CONFIG REGISTER */ -#define RK805_VB_MON_REG 0x21 -#define RK805_THERMAL_REG 0x22 - -/* POWER CHANNELS ENABLE REGISTER */ -#define RK805_DCDC_EN_REG 0x23 -#define RK805_SLP_DCDC_EN_REG 0x25 -#define RK805_SLP_LDO_EN_REG 0x26 -#define RK805_LDO_EN_REG 0x27 - -/* BUCK AND LDO CONFIG REGISTER */ -#define RK805_BUCK_LDO_SLP_LP_EN_REG 0x2A -#define RK805_BUCK1_CONFIG_REG 0x2E -#define RK805_BUCK1_ON_VSEL_REG 0x2F -#define RK805_BUCK1_SLP_VSEL_REG 0x30 -#define RK805_BUCK2_CONFIG_REG 0x32 -#define RK805_BUCK2_ON_VSEL_REG 0x33 -#define RK805_BUCK2_SLP_VSEL_REG 0x34 -#define RK805_BUCK3_CONFIG_REG 0x36 -#define RK805_BUCK4_CONFIG_REG 0x37 -#define RK805_BUCK4_ON_VSEL_REG 0x38 -#define RK805_BUCK4_SLP_VSEL_REG 0x39 -#define RK805_LDO1_ON_VSEL_REG 0x3B -#define RK805_LDO1_SLP_VSEL_REG 0x3C -#define RK805_LDO2_ON_VSEL_REG 0x3D -#define RK805_LDO2_SLP_VSEL_REG 0x3E -#define RK805_LDO3_ON_VSEL_REG 0x3F -#define RK805_LDO3_SLP_VSEL_REG 0x40 - -/* INTERRUPT REGISTER */ -#define RK805_PWRON_LP_INT_TIME_REG 0x47 -#define RK805_PWRON_DB_REG 0x48 -#define RK805_DEV_CTRL_REG 0x4B -#define RK805_INT_STS_REG 0x4C -#define RK805_INT_STS_MSK_REG 0x4D -#define RK805_GPIO_IO_POL_REG 0x50 -#define RK805_OUT_REG 0x52 -#define RK805_ON_SOURCE_REG 0xAE -#define RK805_OFF_SOURCE_REG 0xAF - -#define RK805_NUM_REGULATORS 7 - -#define RK805_PWRON_FALL_RISE_INT_EN 0x0 -#define RK805_PWRON_FALL_RISE_INT_MSK 0x81 - -/* RK805 IRQ Definitions */ -#define RK805_IRQ_PWRON_RISE 0 -#define RK805_IRQ_VB_LOW 1 -#define RK805_IRQ_PWRON 2 -#define RK805_IRQ_PWRON_LP 3 -#define RK805_IRQ_HOTDIE 4 -#define RK805_IRQ_RTC_ALARM 5 -#define RK805_IRQ_RTC_PERIOD 6 -#define RK805_IRQ_PWRON_FALL 7 - -#define RK805_IRQ_PWRON_RISE_MSK BIT(0) -#define RK805_IRQ_VB_LOW_MSK BIT(1) -#define RK805_IRQ_PWRON_MSK BIT(2) -#define RK805_IRQ_PWRON_LP_MSK BIT(3) -#define RK805_IRQ_HOTDIE_MSK BIT(4) -#define RK805_IRQ_RTC_ALARM_MSK BIT(5) -#define RK805_IRQ_RTC_PERIOD_MSK BIT(6) -#define RK805_IRQ_PWRON_FALL_MSK BIT(7) - -#define RK805_PWR_RISE_INT_STATUS BIT(0) -#define RK805_VB_LOW_INT_STATUS BIT(1) -#define RK805_PWRON_INT_STATUS BIT(2) -#define RK805_PWRON_LP_INT_STATUS BIT(3) -#define RK805_HOTDIE_INT_STATUS BIT(4) -#define RK805_ALARM_INT_STATUS BIT(5) -#define RK805_PERIOD_INT_STATUS BIT(6) -#define RK805_PWR_FALL_INT_STATUS BIT(7) - -#define RK805_BUCK1_2_ILMAX_MASK (3 << 6) -#define RK805_BUCK3_4_ILMAX_MASK (3 << 3) -#define RK805_RTC_PERIOD_INT_MASK (1 << 6) -#define RK805_RTC_ALARM_INT_MASK (1 << 5) -#define RK805_INT_ALARM_EN (1 << 3) -#define RK805_INT_TIMER_EN (1 << 2) - /* RK808 IRQ Definitions */ #define RK808_IRQ_VOUT_LO 0 #define RK808_IRQ_VB_LO 1 @@ -373,8 +290,6 @@ enum rk805_reg { #define SWITCH2_EN BIT(6) #define SWITCH1_EN BIT(5) #define DEV_OFF_RST BIT(3) -#define DEV_OFF BIT(0) -#define RTC_STOP BIT(0) #define VB_LO_ACT BIT(4) #define VB_LO_SEL_3500MV (7 << 0) @@ -382,267 +297,6 @@ enum rk805_reg { #define VOUT_LO_INT BIT(0) #define CLK32KOUT2_EN BIT(0) -#define TEMP115C 0x0c -#define TEMP_HOTDIE_MSK 0x0c -#define SLP_SD_MSK (0x3 << 2) -#define SHUTDOWN_FUN (0x2 << 2) -#define SLEEP_FUN (0x1 << 2) -#define RK8XX_ID_MSK 0xfff0 -#define PWM_MODE_MSK BIT(7) -#define FPWM_MODE BIT(7) -#define AUTO_PWM_MODE 0 - -enum rk817_reg_id { - RK817_ID_DCDC1 = 0, - RK817_ID_DCDC2, - RK817_ID_DCDC3, - RK817_ID_DCDC4, - RK817_ID_LDO1, - RK817_ID_LDO2, - RK817_ID_LDO3, - RK817_ID_LDO4, - RK817_ID_LDO5, - RK817_ID_LDO6, - RK817_ID_LDO7, - RK817_ID_LDO8, - RK817_ID_LDO9, - RK817_ID_BOOST, - RK817_ID_BOOST_OTG_SW, - RK817_NUM_REGULATORS -}; - -enum rk809_reg_id { - RK809_ID_DCDC5 = RK817_ID_BOOST, - RK809_ID_SW1, - RK809_ID_SW2, - RK809_NUM_REGULATORS -}; - -#define RK817_SECONDS_REG 0x00 -#define RK817_MINUTES_REG 0x01 -#define RK817_HOURS_REG 0x02 -#define RK817_DAYS_REG 0x03 -#define RK817_MONTHS_REG 0x04 -#define RK817_YEARS_REG 0x05 -#define RK817_WEEKS_REG 0x06 -#define RK817_ALARM_SECONDS_REG 0x07 -#define RK817_ALARM_MINUTES_REG 0x08 -#define RK817_ALARM_HOURS_REG 0x09 -#define RK817_ALARM_DAYS_REG 0x0a -#define RK817_ALARM_MONTHS_REG 0x0b -#define RK817_ALARM_YEARS_REG 0x0c -#define RK817_RTC_CTRL_REG 0xd -#define RK817_RTC_STATUS_REG 0xe -#define RK817_RTC_INT_REG 0xf -#define RK817_RTC_COMP_LSB_REG 0x10 -#define RK817_RTC_COMP_MSB_REG 0x11 - -/* RK817 Codec Registers */ -#define RK817_CODEC_DTOP_VUCTL 0x12 -#define RK817_CODEC_DTOP_VUCTIME 0x13 -#define RK817_CODEC_DTOP_LPT_SRST 0x14 -#define RK817_CODEC_DTOP_DIGEN_CLKE 0x15 -#define RK817_CODEC_AREF_RTCFG0 0x16 -#define RK817_CODEC_AREF_RTCFG1 0x17 -#define RK817_CODEC_AADC_CFG0 0x18 -#define RK817_CODEC_AADC_CFG1 0x19 -#define RK817_CODEC_DADC_VOLL 0x1a -#define RK817_CODEC_DADC_VOLR 0x1b -#define RK817_CODEC_DADC_SR_ACL0 0x1e -#define RK817_CODEC_DADC_ALC1 0x1f -#define RK817_CODEC_DADC_ALC2 0x20 -#define RK817_CODEC_DADC_NG 0x21 -#define RK817_CODEC_DADC_HPF 0x22 -#define RK817_CODEC_DADC_RVOLL 0x23 -#define RK817_CODEC_DADC_RVOLR 0x24 -#define RK817_CODEC_AMIC_CFG0 0x27 -#define RK817_CODEC_AMIC_CFG1 0x28 -#define RK817_CODEC_DMIC_PGA_GAIN 0x29 -#define RK817_CODEC_DMIC_LMT1 0x2a -#define RK817_CODEC_DMIC_LMT2 0x2b -#define RK817_CODEC_DMIC_NG1 0x2c -#define RK817_CODEC_DMIC_NG2 0x2d -#define RK817_CODEC_ADAC_CFG0 0x2e -#define RK817_CODEC_ADAC_CFG1 0x2f -#define RK817_CODEC_DDAC_POPD_DACST 0x30 -#define RK817_CODEC_DDAC_VOLL 0x31 -#define RK817_CODEC_DDAC_VOLR 0x32 -#define RK817_CODEC_DDAC_SR_LMT0 0x35 -#define RK817_CODEC_DDAC_LMT1 0x36 -#define RK817_CODEC_DDAC_LMT2 0x37 -#define RK817_CODEC_DDAC_MUTE_MIXCTL 0x38 -#define RK817_CODEC_DDAC_RVOLL 0x39 -#define RK817_CODEC_DDAC_RVOLR 0x3a -#define RK817_CODEC_AHP_ANTI0 0x3b -#define RK817_CODEC_AHP_ANTI1 0x3c -#define RK817_CODEC_AHP_CFG0 0x3d -#define RK817_CODEC_AHP_CFG1 0x3e -#define RK817_CODEC_AHP_CP 0x3f -#define RK817_CODEC_ACLASSD_CFG1 0x40 -#define RK817_CODEC_ACLASSD_CFG2 0x41 -#define RK817_CODEC_APLL_CFG0 0x42 -#define RK817_CODEC_APLL_CFG1 0x43 -#define RK817_CODEC_APLL_CFG2 0x44 -#define RK817_CODEC_APLL_CFG3 0x45 -#define RK817_CODEC_APLL_CFG4 0x46 -#define RK817_CODEC_APLL_CFG5 0x47 -#define RK817_CODEC_DI2S_CKM 0x48 -#define RK817_CODEC_DI2S_RSD 0x49 -#define RK817_CODEC_DI2S_RXCR1 0x4a -#define RK817_CODEC_DI2S_RXCR2 0x4b -#define RK817_CODEC_DI2S_RXCMD_TSD 0x4c -#define RK817_CODEC_DI2S_TXCR1 0x4d -#define RK817_CODEC_DI2S_TXCR2 0x4e -#define RK817_CODEC_DI2S_TXCR3_TXCMD 0x4f - -/* RK817_CODEC_DI2S_CKM */ -#define RK817_I2S_MODE_MASK (0x1 << 0) -#define RK817_I2S_MODE_MST (0x1 << 0) -#define RK817_I2S_MODE_SLV (0x0 << 0) - -/* RK817_CODEC_DDAC_MUTE_MIXCTL */ -#define DACMT_MASK (0x1 << 0) -#define DACMT_ENABLE (0x1 << 0) -#define DACMT_DISABLE (0x0 << 0) - -/* RK817_CODEC_DI2S_RXCR2 */ -#define VDW_RX_24BITS (0x17) -#define VDW_RX_16BITS (0x0f) - -/* RK817_CODEC_DI2S_TXCR2 */ -#define VDW_TX_24BITS (0x17) -#define VDW_TX_16BITS (0x0f) - -/* RK817_CODEC_AMIC_CFG0 */ -#define MIC_DIFF_MASK (0x1 << 7) -#define MIC_DIFF_DIS (0x0 << 7) -#define MIC_DIFF_EN (0x1 << 7) - -#define RK817_POWER_EN_REG(i) (0xb1 + (i)) -#define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i)) - -#define RK817_POWER_CONFIG (0xb9) - -#define RK817_BUCK_CONFIG_REG(i) (0xba + (i) * 3) - -#define RK817_BUCK1_ON_VSEL_REG 0xBB -#define RK817_BUCK1_SLP_VSEL_REG 0xBC - -#define RK817_BUCK2_CONFIG_REG 0xBD -#define RK817_BUCK2_ON_VSEL_REG 0xBE -#define RK817_BUCK2_SLP_VSEL_REG 0xBF - -#define RK817_BUCK3_CONFIG_REG 0xC0 -#define RK817_BUCK3_ON_VSEL_REG 0xC1 -#define RK817_BUCK3_SLP_VSEL_REG 0xC2 - -#define RK817_BUCK4_CONFIG_REG 0xC3 -#define RK817_BUCK4_ON_VSEL_REG 0xC4 -#define RK817_BUCK4_SLP_VSEL_REG 0xC5 - -#define RK817_LDO_ON_VSEL_REG(idx) (0xcc + (idx) * 2) -#define RK817_BOOST_OTG_CFG (0xde) - -#define RK817_ID_MSB 0xed -#define RK817_ID_LSB 0xee - -#define RK817_SYS_STS 0xf0 -#define RK817_SYS_CFG(i) (0xf1 + (i)) - -#define RK817_ON_SOURCE_REG 0xf5 -#define RK817_OFF_SOURCE_REG 0xf6 - -/* INTERRUPT REGISTER */ -#define RK817_INT_STS_REG0 0xf8 -#define RK817_INT_STS_MSK_REG0 0xf9 -#define RK817_INT_STS_REG1 0xfa -#define RK817_INT_STS_MSK_REG1 0xfb -#define RK817_INT_STS_REG2 0xfc -#define RK817_INT_STS_MSK_REG2 0xfd -#define RK817_GPIO_INT_CFG 0xfe - -/* IRQ Definitions */ -#define RK817_IRQ_PWRON_FALL 0 -#define RK817_IRQ_PWRON_RISE 1 -#define RK817_IRQ_PWRON 2 -#define RK817_IRQ_PWMON_LP 3 -#define RK817_IRQ_HOTDIE 4 -#define RK817_IRQ_RTC_ALARM 5 -#define RK817_IRQ_RTC_PERIOD 6 -#define RK817_IRQ_VB_LO 7 -#define RK817_IRQ_PLUG_IN 8 -#define RK817_IRQ_PLUG_OUT 9 -#define RK817_IRQ_CHRG_TERM 10 -#define RK817_IRQ_CHRG_TIME 11 -#define RK817_IRQ_CHRG_TS 12 -#define RK817_IRQ_USB_OV 13 -#define RK817_IRQ_CHRG_IN_CLMP 14 -#define RK817_IRQ_BAT_DIS_ILIM 15 -#define RK817_IRQ_GATE_GPIO 16 -#define RK817_IRQ_TS_GPIO 17 -#define RK817_IRQ_CODEC_PD 18 -#define RK817_IRQ_CODEC_PO 19 -#define RK817_IRQ_CLASSD_MUTE_DONE 20 -#define RK817_IRQ_CLASSD_OCP 21 -#define RK817_IRQ_BAT_OVP 22 -#define RK817_IRQ_CHRG_BAT_HI 23 -#define RK817_IRQ_END (RK817_IRQ_CHRG_BAT_HI + 1) - -/* - * rtc_ctrl 0xd - * same as 808, except bit4 - */ -#define RK817_RTC_CTRL_RSV4 BIT(4) - -/* power config 0xb9 */ -#define RK817_BUCK3_FB_RES_MSK BIT(6) -#define RK817_BUCK3_FB_RES_INTER BIT(6) -#define RK817_BUCK3_FB_RES_EXT 0 - -/* buck config 0xba */ -#define RK817_RAMP_RATE_OFFSET 6 -#define RK817_RAMP_RATE_MASK (0x3 << RK817_RAMP_RATE_OFFSET) -#define RK817_RAMP_RATE_3MV_PER_US (0x0 << RK817_RAMP_RATE_OFFSET) -#define RK817_RAMP_RATE_6_3MV_PER_US (0x1 << RK817_RAMP_RATE_OFFSET) -#define RK817_RAMP_RATE_12_5MV_PER_US (0x2 << RK817_RAMP_RATE_OFFSET) -#define RK817_RAMP_RATE_25MV_PER_US (0x3 << RK817_RAMP_RATE_OFFSET) - -/* sys_cfg1 0xf2 */ -#define RK817_HOTDIE_TEMP_MSK (0x3 << 4) -#define RK817_HOTDIE_85 (0x0 << 4) -#define RK817_HOTDIE_95 (0x1 << 4) -#define RK817_HOTDIE_105 (0x2 << 4) -#define RK817_HOTDIE_115 (0x3 << 4) - -#define RK817_TSD_TEMP_MSK BIT(6) -#define RK817_TSD_140 0 -#define RK817_TSD_160 BIT(6) - -#define RK817_CLK32KOUT2_EN BIT(7) - -/* sys_cfg3 0xf4 */ -#define RK817_SLPPIN_FUNC_MSK (0x3 << 3) -#define SLPPIN_NULL_FUN (0x0 << 3) -#define SLPPIN_SLP_FUN (0x1 << 3) -#define SLPPIN_DN_FUN (0x2 << 3) -#define SLPPIN_RST_FUN (0x3 << 3) - -#define RK817_RST_FUNC_MSK (0x3 << 6) -#define RK817_RST_FUNC_SFT (6) -#define RK817_RST_FUNC_CNT (3) -#define RK817_RST_FUNC_DEV (0) /* reset the dev */ -#define RK817_RST_FUNC_REG (0x1 << 6) /* reset the reg only */ - -#define RK817_SLPPOL_MSK BIT(5) -#define RK817_SLPPOL_H BIT(5) -#define RK817_SLPPOL_L (0) - -/* gpio&int 0xfe */ -#define RK817_INT_POL_MSK BIT(1) -#define RK817_INT_POL_H BIT(1) -#define RK817_INT_POL_L 0 -#define RK809_BUCK5_CONFIG(i) (RK817_BOOST_OTG_CFG + (i) * 1) - enum { BUCK_ILMIN_50MA, BUCK_ILMIN_100MA, @@ -666,32 +320,8 @@ enum { }; enum { - RK805_BUCK1_2_ILMAX_2500MA, - RK805_BUCK1_2_ILMAX_3000MA, - RK805_BUCK1_2_ILMAX_3500MA, - RK805_BUCK1_2_ILMAX_4000MA, -}; - -enum { - RK805_BUCK3_ILMAX_1500MA, - RK805_BUCK3_ILMAX_2000MA, - RK805_BUCK3_ILMAX_2500MA, - RK805_BUCK3_ILMAX_3000MA, -}; - -enum { - RK805_BUCK4_ILMAX_2000MA, - RK805_BUCK4_ILMAX_2500MA, - RK805_BUCK4_ILMAX_3000MA, - RK805_BUCK4_ILMAX_3500MA, -}; - -enum { - RK805_ID = 0x8050, RK808_ID = 0x0000, - RK809_ID = 0x8090, - RK817_ID = 0x8170, - RK818_ID = 0x8180, + RK818_ID = 0x8181, }; struct rk808 { diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h index 8aa0bda1af..cadc654390 100644 --- a/include/linux/mfd/rn5t618.h +++ b/include/linux/mfd/rn5t618.h @@ -1,8 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MFD core driver for Ricoh RN5T618 PMIC * * Copyright (C) 2014 Beniamino Galvani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef __LINUX_MFD_RN5T618_H @@ -52,13 +58,10 @@ #define RN5T618_DC3CTL2 0x31 #define RN5T618_DC4CTL 0x32 #define RN5T618_DC4CTL2 0x33 -#define RN5T618_DC5CTL 0x34 -#define RN5T618_DC5CTL2 0x35 #define RN5T618_DC1DAC 0x36 #define RN5T618_DC2DAC 0x37 #define RN5T618_DC3DAC 0x38 #define RN5T618_DC4DAC 0x39 -#define RN5T618_DC5DAC 0x3a #define RN5T618_DC1DAC_SLP 0x3b #define RN5T618_DC2DAC_SLP 0x3c #define RN5T618_DC3DAC_SLP 0x3d @@ -74,11 +77,6 @@ #define RN5T618_LDO3DAC 0x4e #define RN5T618_LDO4DAC 0x4f #define RN5T618_LDO5DAC 0x50 -#define RN5T618_LDO6DAC 0x51 -#define RN5T618_LDO7DAC 0x52 -#define RN5T618_LDO8DAC 0x53 -#define RN5T618_LDO9DAC 0x54 -#define RN5T618_LDO10DAC 0x55 #define RN5T618_LDORTCDAC 0x56 #define RN5T618_LDORTC2DAC 0x57 #define RN5T618_LDO1DAC_SLP 0x58 @@ -139,17 +137,6 @@ #define RN5T618_INTPOL 0x9c #define RN5T618_INTEN 0x9d #define RN5T618_INTMON 0x9e - -#define RN5T618_RTC_SECONDS 0xA0 -#define RN5T618_RTC_MDAY 0xA4 -#define RN5T618_RTC_MONTH 0xA5 -#define RN5T618_RTC_YEAR 0xA6 -#define RN5T618_RTC_ADJUST 0xA7 -#define RN5T618_RTC_ALARM_Y_SEC 0xA8 -#define RN5T618_RTC_DAL_MONTH 0xAC -#define RN5T618_RTC_CTRL1 0xAE -#define RN5T618_RTC_CTRL2 0xAF - #define RN5T618_PREVINDAC 0xb0 #define RN5T618_BATDAC 0xb1 #define RN5T618_CHGCTL1 0xb3 @@ -188,7 +175,6 @@ #define RN5T618_CHGOSCSCORESET3 0xd7 #define RN5T618_CHGOSCFREQSET1 0xd8 #define RN5T618_CHGOSCFREQSET2 0xd9 -#define RN5T618_GCHGDET 0xda #define RN5T618_CONTROL 0xe0 #define RN5T618_SOC 0xe1 #define RN5T618_RE_CAP_H 0xe2 @@ -232,17 +218,11 @@ enum { RN5T618_DCDC2, RN5T618_DCDC3, RN5T618_DCDC4, - RN5T618_DCDC5, RN5T618_LDO1, RN5T618_LDO2, RN5T618_LDO3, RN5T618_LDO4, RN5T618_LDO5, - RN5T618_LDO6, - RN5T618_LDO7, - RN5T618_LDO8, - RN5T618_LDO9, - RN5T618_LDO10, RN5T618_LDORTC1, RN5T618_LDORTC2, RN5T618_REG_NUM, @@ -251,27 +231,11 @@ enum { enum { RN5T567 = 0, RN5T618, - RC5T619, -}; - -/* RN5T618 IRQ definitions */ -enum { - RN5T618_IRQ_SYS = 0, - RN5T618_IRQ_DCDC, - RN5T618_IRQ_RTC, - RN5T618_IRQ_ADC, - RN5T618_IRQ_GPIO, - RN5T618_IRQ_CHG, - RN5T618_NR_IRQS, }; struct rn5t618 { struct regmap *regmap; - struct device *dev; long variant; - - int irq; - struct regmap_irq_chip_data *irq_data; }; #endif /* __LINUX_MFD_RN5T618_H */ diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h index 2d1895c3ef..1b63fc2f42 100644 --- a/include/linux/mfd/rt5033-private.h +++ b/include/linux/mfd/rt5033-private.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MFD core driver for Richtek RT5033 * * Copyright (C) 2014 Samsung Electronics, Co., Ltd. * Author: Beomho Seo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published bythe Free Software Foundation. */ #ifndef __RT5033_PRIVATE_H__ @@ -91,14 +94,14 @@ enum rt5033_reg { #define RT5033_RT_HZ_MASK 0x01 /* RT5033 control register */ -#define RT5033_CTRL_FCCM_BUCK_MASK BIT(0) -#define RT5033_CTRL_BUCKOMS_MASK BIT(1) -#define RT5033_CTRL_LDOOMS_MASK BIT(2) -#define RT5033_CTRL_SLDOOMS_MASK BIT(3) -#define RT5033_CTRL_EN_BUCK_MASK BIT(4) -#define RT5033_CTRL_EN_LDO_MASK BIT(5) -#define RT5033_CTRL_EN_SAFE_LDO_MASK BIT(6) -#define RT5033_CTRL_LDO_SLEEP_MASK BIT(7) +#define RT5033_CTRL_FCCM_BUCK_MASK 0x00 +#define RT5033_CTRL_BUCKOMS_MASK 0x01 +#define RT5033_CTRL_LDOOMS_MASK 0x02 +#define RT5033_CTRL_SLDOOMS_MASK 0x03 +#define RT5033_CTRL_EN_BUCK_MASK 0x04 +#define RT5033_CTRL_EN_LDO_MASK 0x05 +#define RT5033_CTRL_EN_SAFE_LDO_MASK 0x06 +#define RT5033_CTRL_LDO_SLEEP_MASK 0x07 /* RT5033 BUCK control register */ #define RT5033_BUCK_CTRL_MASK 0x1f @@ -247,11 +250,11 @@ enum rt5033_fuel_reg { #define RT5033_FUEL_BAT_PRESENT 0x02 /* RT5033 PMIC interrupts */ -#define RT5033_PMIC_IRQ_BUCKOCP BIT(2) -#define RT5033_PMIC_IRQ_BUCKLV BIT(3) -#define RT5033_PMIC_IRQ_SAFELDOLV BIT(4) -#define RT5033_PMIC_IRQ_LDOLV BIT(5) -#define RT5033_PMIC_IRQ_OT BIT(6) -#define RT5033_PMIC_IRQ_VDDA_UV BIT(7) +#define RT5033_PMIC_IRQ_BUCKOCP 2 +#define RT5033_PMIC_IRQ_BUCKLV 3 +#define RT5033_PMIC_IRQ_SAFELDOLV 4 +#define RT5033_PMIC_IRQ_LDOLV 5 +#define RT5033_PMIC_IRQ_OT 6 +#define RT5033_PMIC_IRQ_VDDA_UV 7 #endif /* __RT5033_PRIVATE_H__ */ diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h index 3c23b6220c..6cff5cf458 100644 --- a/include/linux/mfd/rt5033.h +++ b/include/linux/mfd/rt5033.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MFD core driver for the RT5033 * * Copyright (C) 2014 Samsung Electronics * Author: Beomho Seo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published bythe Free Software Foundation. */ #ifndef __RT5033_H__ diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/mfd/rtsx_common.h new file mode 100644 index 0000000000..443176ee1a --- /dev/null +++ b/include/linux/mfd/rtsx_common.h @@ -0,0 +1,50 @@ +/* Driver for Realtek driver-based card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Author: + * Wei WANG + */ + +#ifndef __RTSX_COMMON_H +#define __RTSX_COMMON_H + +#define DRV_NAME_RTSX_PCI "rtsx_pci" +#define DRV_NAME_RTSX_PCI_SDMMC "rtsx_pci_sdmmc" +#define DRV_NAME_RTSX_PCI_MS "rtsx_pci_ms" + +#define RTSX_REG_PAIR(addr, val) (((u32)(addr) << 16) | (u8)(val)) + +#define RTSX_SSC_DEPTH_4M 0x01 +#define RTSX_SSC_DEPTH_2M 0x02 +#define RTSX_SSC_DEPTH_1M 0x03 +#define RTSX_SSC_DEPTH_500K 0x04 +#define RTSX_SSC_DEPTH_250K 0x05 + +#define RTSX_SD_CARD 0 +#define RTSX_MS_CARD 1 + +#define CLK_TO_DIV_N 0 +#define DIV_N_TO_CLK 1 + +struct platform_device; + +struct rtsx_slot { + struct platform_device *p_dev; + void (*card_event)(struct platform_device *p_dev); +}; + +#endif diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h new file mode 100644 index 0000000000..7eb7cbac0a --- /dev/null +++ b/include/linux/mfd/rtsx_pci.h @@ -0,0 +1,1051 @@ +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Author: + * Wei WANG + */ + +#ifndef __RTSX_PCI_H +#define __RTSX_PCI_H + +#include +#include +#include + +#define MAX_RW_REG_CNT 1024 + +#define RTSX_HCBAR 0x00 +#define RTSX_HCBCTLR 0x04 +#define STOP_CMD (0x01 << 28) +#define READ_REG_CMD 0 +#define WRITE_REG_CMD 1 +#define CHECK_REG_CMD 2 + +#define RTSX_HDBAR 0x08 +#define SG_INT 0x04 +#define SG_END 0x02 +#define SG_VALID 0x01 +#define SG_NO_OP 0x00 +#define SG_TRANS_DATA (0x02 << 4) +#define SG_LINK_DESC (0x03 << 4) +#define RTSX_HDBCTLR 0x0C +#define SDMA_MODE 0x00 +#define ADMA_MODE (0x02 << 26) +#define STOP_DMA (0x01 << 28) +#define TRIG_DMA (0x01 << 31) + +#define RTSX_HAIMR 0x10 +#define HAIMR_TRANS_START (0x01 << 31) +#define HAIMR_READ 0x00 +#define HAIMR_WRITE (0x01 << 30) +#define HAIMR_READ_START (HAIMR_TRANS_START | HAIMR_READ) +#define HAIMR_WRITE_START (HAIMR_TRANS_START | HAIMR_WRITE) +#define HAIMR_TRANS_END (HAIMR_TRANS_START) + +#define RTSX_BIPR 0x14 +#define CMD_DONE_INT (1 << 31) +#define DATA_DONE_INT (1 << 30) +#define TRANS_OK_INT (1 << 29) +#define TRANS_FAIL_INT (1 << 28) +#define XD_INT (1 << 27) +#define MS_INT (1 << 26) +#define SD_INT (1 << 25) +#define GPIO0_INT (1 << 24) +#define OC_INT (1 << 23) +#define SD_WRITE_PROTECT (1 << 19) +#define XD_EXIST (1 << 18) +#define MS_EXIST (1 << 17) +#define SD_EXIST (1 << 16) +#define DELINK_INT GPIO0_INT +#define MS_OC_INT (1 << 23) +#define SD_OC_INT (1 << 22) + +#define CARD_INT (XD_INT | MS_INT | SD_INT) +#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT) +#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | \ + CARD_INT | GPIO0_INT | OC_INT) +#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST) + +#define RTSX_BIER 0x18 +#define CMD_DONE_INT_EN (1 << 31) +#define DATA_DONE_INT_EN (1 << 30) +#define TRANS_OK_INT_EN (1 << 29) +#define TRANS_FAIL_INT_EN (1 << 28) +#define XD_INT_EN (1 << 27) +#define MS_INT_EN (1 << 26) +#define SD_INT_EN (1 << 25) +#define GPIO0_INT_EN (1 << 24) +#define OC_INT_EN (1 << 23) +#define DELINK_INT_EN GPIO0_INT_EN +#define MS_OC_INT_EN (1 << 23) +#define SD_OC_INT_EN (1 << 22) + + +/* + * macros for easy use + */ +#define rtsx_pci_writel(pcr, reg, value) \ + iowrite32(value, (pcr)->remap_addr + reg) +#define rtsx_pci_readl(pcr, reg) \ + ioread32((pcr)->remap_addr + reg) +#define rtsx_pci_writew(pcr, reg, value) \ + iowrite16(value, (pcr)->remap_addr + reg) +#define rtsx_pci_readw(pcr, reg) \ + ioread16((pcr)->remap_addr + reg) +#define rtsx_pci_writeb(pcr, reg, value) \ + iowrite8(value, (pcr)->remap_addr + reg) +#define rtsx_pci_readb(pcr, reg) \ + ioread8((pcr)->remap_addr + reg) + +#define rtsx_pci_read_config_byte(pcr, where, val) \ + pci_read_config_byte((pcr)->pci, where, val) + +#define rtsx_pci_write_config_byte(pcr, where, val) \ + pci_write_config_byte((pcr)->pci, where, val) + +#define rtsx_pci_read_config_dword(pcr, where, val) \ + pci_read_config_dword((pcr)->pci, where, val) + +#define rtsx_pci_write_config_dword(pcr, where, val) \ + pci_write_config_dword((pcr)->pci, where, val) + +#define STATE_TRANS_NONE 0 +#define STATE_TRANS_CMD 1 +#define STATE_TRANS_BUF 2 +#define STATE_TRANS_SG 3 + +#define TRANS_NOT_READY 0 +#define TRANS_RESULT_OK 1 +#define TRANS_RESULT_FAIL 2 +#define TRANS_NO_DEVICE 3 + +#define RTSX_RESV_BUF_LEN 4096 +#define HOST_CMDS_BUF_LEN 1024 +#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN) +#define HOST_SG_TBL_ITEMS (HOST_SG_TBL_BUF_LEN / 8) +#define MAX_SG_ITEM_LEN 0x80000 +#define HOST_TO_DEVICE 0 +#define DEVICE_TO_HOST 1 + +#define OUTPUT_3V3 0 +#define OUTPUT_1V8 1 + +#define RTSX_PHASE_MAX 32 +#define RX_TUNING_CNT 3 + +#define MS_CFG 0xFD40 +#define SAMPLE_TIME_RISING 0x00 +#define SAMPLE_TIME_FALLING 0x80 +#define PUSH_TIME_DEFAULT 0x00 +#define PUSH_TIME_ODD 0x40 +#define NO_EXTEND_TOGGLE 0x00 +#define EXTEND_TOGGLE_CHK 0x20 +#define MS_BUS_WIDTH_1 0x00 +#define MS_BUS_WIDTH_4 0x10 +#define MS_BUS_WIDTH_8 0x18 +#define MS_2K_SECTOR_MODE 0x04 +#define MS_512_SECTOR_MODE 0x00 +#define MS_TOGGLE_TIMEOUT_EN 0x00 +#define MS_TOGGLE_TIMEOUT_DISEN 0x01 +#define MS_NO_CHECK_INT 0x02 +#define MS_TPC 0xFD41 +#define MS_TRANS_CFG 0xFD42 +#define WAIT_INT 0x80 +#define NO_WAIT_INT 0x00 +#define NO_AUTO_READ_INT_REG 0x00 +#define AUTO_READ_INT_REG 0x40 +#define MS_CRC16_ERR 0x20 +#define MS_RDY_TIMEOUT 0x10 +#define MS_INT_CMDNK 0x08 +#define MS_INT_BREQ 0x04 +#define MS_INT_ERR 0x02 +#define MS_INT_CED 0x01 +#define MS_TRANSFER 0xFD43 +#define MS_TRANSFER_START 0x80 +#define MS_TRANSFER_END 0x40 +#define MS_TRANSFER_ERR 0x20 +#define MS_BS_STATE 0x10 +#define MS_TM_READ_BYTES 0x00 +#define MS_TM_NORMAL_READ 0x01 +#define MS_TM_WRITE_BYTES 0x04 +#define MS_TM_NORMAL_WRITE 0x05 +#define MS_TM_AUTO_READ 0x08 +#define MS_TM_AUTO_WRITE 0x0C +#define MS_INT_REG 0xFD44 +#define MS_BYTE_CNT 0xFD45 +#define MS_SECTOR_CNT_L 0xFD46 +#define MS_SECTOR_CNT_H 0xFD47 +#define MS_DBUS_H 0xFD48 + +#define SD_CFG1 0xFDA0 +#define SD_CLK_DIVIDE_0 0x00 +#define SD_CLK_DIVIDE_256 0xC0 +#define SD_CLK_DIVIDE_128 0x80 +#define SD_BUS_WIDTH_1BIT 0x00 +#define SD_BUS_WIDTH_4BIT 0x01 +#define SD_BUS_WIDTH_8BIT 0x02 +#define SD_ASYNC_FIFO_NOT_RST 0x10 +#define SD_20_MODE 0x00 +#define SD_DDR_MODE 0x04 +#define SD_30_MODE 0x08 +#define SD_CLK_DIVIDE_MASK 0xC0 +#define SD_CFG2 0xFDA1 +#define SD_CALCULATE_CRC7 0x00 +#define SD_NO_CALCULATE_CRC7 0x80 +#define SD_CHECK_CRC16 0x00 +#define SD_NO_CHECK_CRC16 0x40 +#define SD_NO_CHECK_WAIT_CRC_TO 0x20 +#define SD_WAIT_BUSY_END 0x08 +#define SD_NO_WAIT_BUSY_END 0x00 +#define SD_CHECK_CRC7 0x00 +#define SD_NO_CHECK_CRC7 0x04 +#define SD_RSP_LEN_0 0x00 +#define SD_RSP_LEN_6 0x01 +#define SD_RSP_LEN_17 0x02 +#define SD_RSP_TYPE_R0 0x04 +#define SD_RSP_TYPE_R1 0x01 +#define SD_RSP_TYPE_R1b 0x09 +#define SD_RSP_TYPE_R2 0x02 +#define SD_RSP_TYPE_R3 0x05 +#define SD_RSP_TYPE_R4 0x05 +#define SD_RSP_TYPE_R5 0x01 +#define SD_RSP_TYPE_R6 0x01 +#define SD_RSP_TYPE_R7 0x01 +#define SD_CFG3 0xFDA2 +#define SD_RSP_80CLK_TIMEOUT_EN 0x01 + +#define SD_STAT1 0xFDA3 +#define SD_CRC7_ERR 0x80 +#define SD_CRC16_ERR 0x40 +#define SD_CRC_WRITE_ERR 0x20 +#define SD_CRC_WRITE_ERR_MASK 0x1C +#define GET_CRC_TIME_OUT 0x02 +#define SD_TUNING_COMPARE_ERR 0x01 +#define SD_STAT2 0xFDA4 +#define SD_RSP_80CLK_TIMEOUT 0x01 + +#define SD_BUS_STAT 0xFDA5 +#define SD_CLK_TOGGLE_EN 0x80 +#define SD_CLK_FORCE_STOP 0x40 +#define SD_DAT3_STATUS 0x10 +#define SD_DAT2_STATUS 0x08 +#define SD_DAT1_STATUS 0x04 +#define SD_DAT0_STATUS 0x02 +#define SD_CMD_STATUS 0x01 +#define SD_PAD_CTL 0xFDA6 +#define SD_IO_USING_1V8 0x80 +#define SD_IO_USING_3V3 0x7F +#define TYPE_A_DRIVING 0x00 +#define TYPE_B_DRIVING 0x01 +#define TYPE_C_DRIVING 0x02 +#define TYPE_D_DRIVING 0x03 +#define SD_SAMPLE_POINT_CTL 0xFDA7 +#define DDR_FIX_RX_DAT 0x00 +#define DDR_VAR_RX_DAT 0x80 +#define DDR_FIX_RX_DAT_EDGE 0x00 +#define DDR_FIX_RX_DAT_14_DELAY 0x40 +#define DDR_FIX_RX_CMD 0x00 +#define DDR_VAR_RX_CMD 0x20 +#define DDR_FIX_RX_CMD_POS_EDGE 0x00 +#define DDR_FIX_RX_CMD_14_DELAY 0x10 +#define SD20_RX_POS_EDGE 0x00 +#define SD20_RX_14_DELAY 0x08 +#define SD20_RX_SEL_MASK 0x08 +#define SD_PUSH_POINT_CTL 0xFDA8 +#define DDR_FIX_TX_CMD_DAT 0x00 +#define DDR_VAR_TX_CMD_DAT 0x80 +#define DDR_FIX_TX_DAT_14_TSU 0x00 +#define DDR_FIX_TX_DAT_12_TSU 0x40 +#define DDR_FIX_TX_CMD_NEG_EDGE 0x00 +#define DDR_FIX_TX_CMD_14_AHEAD 0x20 +#define SD20_TX_NEG_EDGE 0x00 +#define SD20_TX_14_AHEAD 0x10 +#define SD20_TX_SEL_MASK 0x10 +#define DDR_VAR_SDCLK_POL_SWAP 0x01 +#define SD_CMD0 0xFDA9 +#define SD_CMD_START 0x40 +#define SD_CMD1 0xFDAA +#define SD_CMD2 0xFDAB +#define SD_CMD3 0xFDAC +#define SD_CMD4 0xFDAD +#define SD_CMD5 0xFDAE +#define SD_BYTE_CNT_L 0xFDAF +#define SD_BYTE_CNT_H 0xFDB0 +#define SD_BLOCK_CNT_L 0xFDB1 +#define SD_BLOCK_CNT_H 0xFDB2 +#define SD_TRANSFER 0xFDB3 +#define SD_TRANSFER_START 0x80 +#define SD_TRANSFER_END 0x40 +#define SD_STAT_IDLE 0x20 +#define SD_TRANSFER_ERR 0x10 +#define SD_TM_NORMAL_WRITE 0x00 +#define SD_TM_AUTO_WRITE_3 0x01 +#define SD_TM_AUTO_WRITE_4 0x02 +#define SD_TM_AUTO_READ_3 0x05 +#define SD_TM_AUTO_READ_4 0x06 +#define SD_TM_CMD_RSP 0x08 +#define SD_TM_AUTO_WRITE_1 0x09 +#define SD_TM_AUTO_WRITE_2 0x0A +#define SD_TM_NORMAL_READ 0x0C +#define SD_TM_AUTO_READ_1 0x0D +#define SD_TM_AUTO_READ_2 0x0E +#define SD_TM_AUTO_TUNING 0x0F +#define SD_CMD_STATE 0xFDB5 +#define SD_CMD_IDLE 0x80 + +#define SD_DATA_STATE 0xFDB6 +#define SD_DATA_IDLE 0x80 + +#define SRCTL 0xFC13 + +#define DCM_DRP_CTL 0xFC23 +#define DCM_RESET 0x08 +#define DCM_LOCKED 0x04 +#define DCM_208M 0x00 +#define DCM_TX 0x01 +#define DCM_RX 0x02 +#define DCM_DRP_TRIG 0xFC24 +#define DRP_START 0x80 +#define DRP_DONE 0x40 +#define DCM_DRP_CFG 0xFC25 +#define DRP_WRITE 0x80 +#define DRP_READ 0x00 +#define DCM_WRITE_ADDRESS_50 0x50 +#define DCM_WRITE_ADDRESS_51 0x51 +#define DCM_READ_ADDRESS_00 0x00 +#define DCM_READ_ADDRESS_51 0x51 +#define DCM_DRP_WR_DATA_L 0xFC26 +#define DCM_DRP_WR_DATA_H 0xFC27 +#define DCM_DRP_RD_DATA_L 0xFC28 +#define DCM_DRP_RD_DATA_H 0xFC29 +#define SD_VPCLK0_CTL 0xFC2A +#define SD_VPCLK1_CTL 0xFC2B +#define SD_DCMPS0_CTL 0xFC2C +#define SD_DCMPS1_CTL 0xFC2D +#define SD_VPTX_CTL SD_VPCLK0_CTL +#define SD_VPRX_CTL SD_VPCLK1_CTL +#define PHASE_CHANGE 0x80 +#define PHASE_NOT_RESET 0x40 +#define SD_DCMPS_TX_CTL SD_DCMPS0_CTL +#define SD_DCMPS_RX_CTL SD_DCMPS1_CTL +#define DCMPS_CHANGE 0x80 +#define DCMPS_CHANGE_DONE 0x40 +#define DCMPS_ERROR 0x20 +#define DCMPS_CURRENT_PHASE 0x1F +#define CARD_CLK_SOURCE 0xFC2E +#define CRC_FIX_CLK (0x00 << 0) +#define CRC_VAR_CLK0 (0x01 << 0) +#define CRC_VAR_CLK1 (0x02 << 0) +#define SD30_FIX_CLK (0x00 << 2) +#define SD30_VAR_CLK0 (0x01 << 2) +#define SD30_VAR_CLK1 (0x02 << 2) +#define SAMPLE_FIX_CLK (0x00 << 4) +#define SAMPLE_VAR_CLK0 (0x01 << 4) +#define SAMPLE_VAR_CLK1 (0x02 << 4) +#define CARD_PWR_CTL 0xFD50 +#define PMOS_STRG_MASK 0x10 +#define PMOS_STRG_800mA 0x10 +#define PMOS_STRG_400mA 0x00 +#define SD_POWER_OFF 0x03 +#define SD_PARTIAL_POWER_ON 0x01 +#define SD_POWER_ON 0x00 +#define SD_POWER_MASK 0x03 +#define MS_POWER_OFF 0x0C +#define MS_PARTIAL_POWER_ON 0x04 +#define MS_POWER_ON 0x00 +#define MS_POWER_MASK 0x0C +#define BPP_POWER_OFF 0x0F +#define BPP_POWER_5_PERCENT_ON 0x0E +#define BPP_POWER_10_PERCENT_ON 0x0C +#define BPP_POWER_15_PERCENT_ON 0x08 +#define BPP_POWER_ON 0x00 +#define BPP_POWER_MASK 0x0F +#define SD_VCC_PARTIAL_POWER_ON 0x02 +#define SD_VCC_POWER_ON 0x00 +#define CARD_CLK_SWITCH 0xFD51 +#define RTL8411B_PACKAGE_MODE 0xFD51 +#define CARD_SHARE_MODE 0xFD52 +#define CARD_SHARE_MASK 0x0F +#define CARD_SHARE_MULTI_LUN 0x00 +#define CARD_SHARE_NORMAL 0x00 +#define CARD_SHARE_48_SD 0x04 +#define CARD_SHARE_48_MS 0x08 +#define CARD_SHARE_BAROSSA_SD 0x01 +#define CARD_SHARE_BAROSSA_MS 0x02 +#define CARD_DRIVE_SEL 0xFD53 +#define MS_DRIVE_8mA (0x01 << 6) +#define MMC_DRIVE_8mA (0x01 << 4) +#define XD_DRIVE_8mA (0x01 << 2) +#define GPIO_DRIVE_8mA 0x01 +#define RTS5209_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\ + XD_DRIVE_8mA | GPIO_DRIVE_8mA) +#define RTL8411_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\ + XD_DRIVE_8mA) +#define RTSX_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | GPIO_DRIVE_8mA) + +#define CARD_STOP 0xFD54 +#define SPI_STOP 0x01 +#define XD_STOP 0x02 +#define SD_STOP 0x04 +#define MS_STOP 0x08 +#define SPI_CLR_ERR 0x10 +#define XD_CLR_ERR 0x20 +#define SD_CLR_ERR 0x40 +#define MS_CLR_ERR 0x80 +#define CARD_OE 0xFD55 +#define SD_OUTPUT_EN 0x04 +#define MS_OUTPUT_EN 0x08 +#define CARD_AUTO_BLINK 0xFD56 +#define CARD_GPIO_DIR 0xFD57 +#define CARD_GPIO 0xFD58 +#define CARD_DATA_SOURCE 0xFD5B +#define PINGPONG_BUFFER 0x01 +#define RING_BUFFER 0x00 +#define SD30_CLK_DRIVE_SEL 0xFD5A +#define DRIVER_TYPE_A 0x05 +#define DRIVER_TYPE_B 0x03 +#define DRIVER_TYPE_C 0x02 +#define DRIVER_TYPE_D 0x01 +#define CARD_SELECT 0xFD5C +#define SD_MOD_SEL 2 +#define MS_MOD_SEL 3 +#define SD30_DRIVE_SEL 0xFD5E +#define CFG_DRIVER_TYPE_A 0x02 +#define CFG_DRIVER_TYPE_B 0x03 +#define CFG_DRIVER_TYPE_C 0x01 +#define CFG_DRIVER_TYPE_D 0x00 +#define SD30_CMD_DRIVE_SEL 0xFD5E +#define SD30_DAT_DRIVE_SEL 0xFD5F +#define CARD_CLK_EN 0xFD69 +#define SD_CLK_EN 0x04 +#define MS_CLK_EN 0x08 +#define SDIO_CTRL 0xFD6B +#define CD_PAD_CTL 0xFD73 +#define CD_DISABLE_MASK 0x07 +#define MS_CD_DISABLE 0x04 +#define SD_CD_DISABLE 0x02 +#define XD_CD_DISABLE 0x01 +#define CD_DISABLE 0x07 +#define CD_ENABLE 0x00 +#define MS_CD_EN_ONLY 0x03 +#define SD_CD_EN_ONLY 0x05 +#define XD_CD_EN_ONLY 0x06 +#define FORCE_CD_LOW_MASK 0x38 +#define FORCE_CD_XD_LOW 0x08 +#define FORCE_CD_SD_LOW 0x10 +#define FORCE_CD_MS_LOW 0x20 +#define CD_AUTO_DISABLE 0x40 +#define FPDCTL 0xFC00 +#define SSC_POWER_DOWN 0x01 +#define SD_OC_POWER_DOWN 0x02 +#define ALL_POWER_DOWN 0x07 +#define OC_POWER_DOWN 0x06 +#define PDINFO 0xFC01 + +#define CLK_CTL 0xFC02 +#define CHANGE_CLK 0x01 +#define CLK_LOW_FREQ 0x01 + +#define CLK_DIV 0xFC03 +#define CLK_DIV_1 0x01 +#define CLK_DIV_2 0x02 +#define CLK_DIV_4 0x03 +#define CLK_DIV_8 0x04 +#define CLK_SEL 0xFC04 + +#define SSC_DIV_N_0 0xFC0F +#define SSC_DIV_N_1 0xFC10 +#define SSC_CTL1 0xFC11 +#define SSC_RSTB 0x80 +#define SSC_8X_EN 0x40 +#define SSC_FIX_FRAC 0x20 +#define SSC_SEL_1M 0x00 +#define SSC_SEL_2M 0x08 +#define SSC_SEL_4M 0x10 +#define SSC_SEL_8M 0x18 +#define SSC_CTL2 0xFC12 +#define SSC_DEPTH_MASK 0x07 +#define SSC_DEPTH_DISALBE 0x00 +#define SSC_DEPTH_4M 0x01 +#define SSC_DEPTH_2M 0x02 +#define SSC_DEPTH_1M 0x03 +#define SSC_DEPTH_500K 0x04 +#define SSC_DEPTH_250K 0x05 +#define RCCTL 0xFC14 + +#define FPGA_PULL_CTL 0xFC1D +#define OLT_LED_CTL 0xFC1E +#define GPIO_CTL 0xFC1F + +#define LDO_CTL 0xFC1E +#define BPP_ASIC_1V7 0x00 +#define BPP_ASIC_1V8 0x01 +#define BPP_ASIC_1V9 0x02 +#define BPP_ASIC_2V0 0x03 +#define BPP_ASIC_2V7 0x04 +#define BPP_ASIC_2V8 0x05 +#define BPP_ASIC_3V2 0x06 +#define BPP_ASIC_3V3 0x07 +#define BPP_REG_TUNED18 0x07 +#define BPP_TUNED18_SHIFT_8402 5 +#define BPP_TUNED18_SHIFT_8411 4 +#define BPP_PAD_MASK 0x04 +#define BPP_PAD_3V3 0x04 +#define BPP_PAD_1V8 0x00 +#define BPP_LDO_POWB 0x03 +#define BPP_LDO_ON 0x00 +#define BPP_LDO_SUSPEND 0x02 +#define BPP_LDO_OFF 0x03 +#define SYS_VER 0xFC32 + +#define CARD_PULL_CTL1 0xFD60 +#define CARD_PULL_CTL2 0xFD61 +#define CARD_PULL_CTL3 0xFD62 +#define CARD_PULL_CTL4 0xFD63 +#define CARD_PULL_CTL5 0xFD64 +#define CARD_PULL_CTL6 0xFD65 + +/* PCI Express Related Registers */ +#define IRQEN0 0xFE20 +#define IRQSTAT0 0xFE21 +#define DMA_DONE_INT 0x80 +#define SUSPEND_INT 0x40 +#define LINK_RDY_INT 0x20 +#define LINK_DOWN_INT 0x10 +#define IRQEN1 0xFE22 +#define IRQSTAT1 0xFE23 +#define TLPRIEN 0xFE24 +#define TLPRISTAT 0xFE25 +#define TLPTIEN 0xFE26 +#define TLPTISTAT 0xFE27 +#define DMATC0 0xFE28 +#define DMATC1 0xFE29 +#define DMATC2 0xFE2A +#define DMATC3 0xFE2B +#define DMACTL 0xFE2C +#define DMA_RST 0x80 +#define DMA_BUSY 0x04 +#define DMA_DIR_TO_CARD 0x00 +#define DMA_DIR_FROM_CARD 0x02 +#define DMA_EN 0x01 +#define DMA_128 (0 << 4) +#define DMA_256 (1 << 4) +#define DMA_512 (2 << 4) +#define DMA_1024 (3 << 4) +#define DMA_PACK_SIZE_MASK 0x30 +#define BCTL 0xFE2D +#define RBBC0 0xFE2E +#define RBBC1 0xFE2F +#define RBDAT 0xFE30 +#define RBCTL 0xFE34 +#define CFGADDR0 0xFE35 +#define CFGADDR1 0xFE36 +#define CFGDATA0 0xFE37 +#define CFGDATA1 0xFE38 +#define CFGDATA2 0xFE39 +#define CFGDATA3 0xFE3A +#define CFGRWCTL 0xFE3B +#define PHYRWCTL 0xFE3C +#define PHYDATA0 0xFE3D +#define PHYDATA1 0xFE3E +#define PHYADDR 0xFE3F +#define MSGRXDATA0 0xFE40 +#define MSGRXDATA1 0xFE41 +#define MSGRXDATA2 0xFE42 +#define MSGRXDATA3 0xFE43 +#define MSGTXDATA0 0xFE44 +#define MSGTXDATA1 0xFE45 +#define MSGTXDATA2 0xFE46 +#define MSGTXDATA3 0xFE47 +#define MSGTXCTL 0xFE48 +#define LTR_CTL 0xFE4A +#define OBFF_CFG 0xFE4C + +#define CDRESUMECTL 0xFE52 +#define WAKE_SEL_CTL 0xFE54 +#define PCLK_CTL 0xFE55 +#define PCLK_MODE_SEL 0x20 +#define PME_FORCE_CTL 0xFE56 + +#define ASPM_FORCE_CTL 0xFE57 +#define FORCE_ASPM_CTL0 0x10 +#define FORCE_ASPM_VAL_MASK 0x03 +#define FORCE_ASPM_L1_EN 0x02 +#define FORCE_ASPM_L0_EN 0x01 +#define FORCE_ASPM_NO_ASPM 0x00 +#define PM_CLK_FORCE_CTL 0xFE58 +#define FUNC_FORCE_CTL 0xFE59 +#define FUNC_FORCE_UPME_XMT_DBG 0x02 +#define PERST_GLITCH_WIDTH 0xFE5C +#define CHANGE_LINK_STATE 0xFE5B +#define RESET_LOAD_REG 0xFE5E +#define EFUSE_CONTENT 0xFE5F +#define HOST_SLEEP_STATE 0xFE60 +#define HOST_ENTER_S1 1 +#define HOST_ENTER_S3 2 + +#define SDIO_CFG 0xFE70 +#define PM_EVENT_DEBUG 0xFE71 +#define PME_DEBUG_0 0x08 +#define NFTS_TX_CTRL 0xFE72 + +#define PWR_GATE_CTRL 0xFE75 +#define PWR_GATE_EN 0x01 +#define LDO3318_PWR_MASK 0x06 +#define LDO_ON 0x00 +#define LDO_SUSPEND 0x04 +#define LDO_OFF 0x06 +#define PWD_SUSPEND_EN 0xFE76 +#define LDO_PWR_SEL 0xFE78 + +#define L1SUB_CONFIG1 0xFE8D +#define L1SUB_CONFIG2 0xFE8E +#define L1SUB_AUTO_CFG 0x02 +#define L1SUB_CONFIG3 0xFE8F + +#define DUMMY_REG_RESET_0 0xFE90 + +#define AUTOLOAD_CFG_BASE 0xFF00 +#define PETXCFG 0xFF03 + +#define PM_CTRL1 0xFF44 +#define CD_RESUME_EN_MASK 0xF0 + +#define PM_CTRL2 0xFF45 +#define PM_CTRL3 0xFF46 +#define SDIO_SEND_PME_EN 0x80 +#define FORCE_RC_MODE_ON 0x40 +#define FORCE_RX50_LINK_ON 0x20 +#define D3_DELINK_MODE_EN 0x10 +#define USE_PESRTB_CTL_DELINK 0x08 +#define DELAY_PIN_WAKE 0x04 +#define RESET_PIN_WAKE 0x02 +#define PM_WAKE_EN 0x01 +#define PM_CTRL4 0xFF47 + +/* Memory mapping */ +#define SRAM_BASE 0xE600 +#define RBUF_BASE 0xF400 +#define PPBUF_BASE1 0xF800 +#define PPBUF_BASE2 0xFA00 +#define IMAGE_FLAG_ADDR0 0xCE80 +#define IMAGE_FLAG_ADDR1 0xCE81 + +#define RREF_CFG 0xFF6C +#define RREF_VBGSEL_MASK 0x38 +#define RREF_VBGSEL_1V25 0x28 + +#define OOBS_CONFIG 0xFF6E +#define OOBS_AUTOK_DIS 0x80 +#define OOBS_VAL_MASK 0x1F + +#define LDO_DV18_CFG 0xFF70 +#define LDO_DV18_SR_MASK 0xC0 +#define LDO_DV18_SR_DF 0x40 + +#define LDO_CONFIG2 0xFF71 +#define LDO_D3318_MASK 0x07 +#define LDO_D3318_33V 0x07 +#define LDO_D3318_18V 0x02 + +#define LDO_VCC_CFG0 0xFF72 +#define LDO_VCC_LMTVTH_MASK 0x30 +#define LDO_VCC_LMTVTH_2A 0x10 + +#define LDO_VCC_CFG1 0xFF73 +#define LDO_VCC_REF_TUNE_MASK 0x30 +#define LDO_VCC_REF_1V2 0x20 +#define LDO_VCC_TUNE_MASK 0x07 +#define LDO_VCC_1V8 0x04 +#define LDO_VCC_3V3 0x07 +#define LDO_VCC_LMT_EN 0x08 + +#define LDO_VIO_CFG 0xFF75 +#define LDO_VIO_SR_MASK 0xC0 +#define LDO_VIO_SR_DF 0x40 +#define LDO_VIO_REF_TUNE_MASK 0x30 +#define LDO_VIO_REF_1V2 0x20 +#define LDO_VIO_TUNE_MASK 0x07 +#define LDO_VIO_1V7 0x03 +#define LDO_VIO_1V8 0x04 +#define LDO_VIO_3V3 0x07 + +#define LDO_DV12S_CFG 0xFF76 +#define LDO_REF12_TUNE_MASK 0x18 +#define LDO_REF12_TUNE_DF 0x10 +#define LDO_D12_TUNE_MASK 0x07 +#define LDO_D12_TUNE_DF 0x04 + +#define LDO_AV12S_CFG 0xFF77 +#define LDO_AV12S_TUNE_MASK 0x07 +#define LDO_AV12S_TUNE_DF 0x04 + +#define SD40_LDO_CTL1 0xFE7D +#define SD40_VIO_TUNE_MASK 0x70 +#define SD40_VIO_TUNE_1V7 0x30 +#define SD_VIO_LDO_1V8 0x40 +#define SD_VIO_LDO_3V3 0x70 + +/* Phy register */ +#define PHY_PCR 0x00 +#define PHY_PCR_FORCE_CODE 0xB000 +#define PHY_PCR_OOBS_CALI_50 0x0800 +#define PHY_PCR_OOBS_VCM_08 0x0200 +#define PHY_PCR_OOBS_SEN_90 0x0040 +#define PHY_PCR_RSSI_EN 0x0002 +#define PHY_PCR_RX10K 0x0001 + +#define PHY_RCR0 0x01 +#define PHY_RCR1 0x02 +#define PHY_RCR1_ADP_TIME_4 0x0400 +#define PHY_RCR1_VCO_COARSE 0x001F +#define PHY_RCR1_INIT_27S 0x0A1F +#define PHY_SSCCR2 0x02 +#define PHY_SSCCR2_PLL_NCODE 0x0A00 +#define PHY_SSCCR2_TIME0 0x001C +#define PHY_SSCCR2_TIME2_WIDTH 0x0003 + +#define PHY_RCR2 0x03 +#define PHY_RCR2_EMPHASE_EN 0x8000 +#define PHY_RCR2_NADJR 0x4000 +#define PHY_RCR2_CDR_SR_2 0x0100 +#define PHY_RCR2_FREQSEL_12 0x0040 +#define PHY_RCR2_CDR_SC_12P 0x0010 +#define PHY_RCR2_CALIB_LATE 0x0002 +#define PHY_RCR2_INIT_27S 0xC152 +#define PHY_SSCCR3 0x03 +#define PHY_SSCCR3_STEP_IN 0x2740 +#define PHY_SSCCR3_CHECK_DELAY 0x0008 +#define _PHY_ANA03 0x03 +#define _PHY_ANA03_TIMER_MAX 0x2700 +#define _PHY_ANA03_OOBS_DEB_EN 0x0040 +#define _PHY_CMU_DEBUG_EN 0x0008 + +#define PHY_RTCR 0x04 +#define PHY_RDR 0x05 +#define PHY_RDR_RXDSEL_1_9 0x4000 +#define PHY_SSC_AUTO_PWD 0x0600 +#define PHY_TCR0 0x06 +#define PHY_TCR1 0x07 +#define PHY_TUNE 0x08 +#define PHY_TUNE_TUNEREF_1_0 0x4000 +#define PHY_TUNE_VBGSEL_1252 0x0C00 +#define PHY_TUNE_SDBUS_33 0x0200 +#define PHY_TUNE_TUNED18 0x01C0 +#define PHY_TUNE_TUNED12 0X0020 +#define PHY_TUNE_TUNEA12 0x0004 +#define PHY_TUNE_VOLTAGE_MASK 0xFC3F +#define PHY_TUNE_VOLTAGE_3V3 0x03C0 +#define PHY_TUNE_D18_1V8 0x0100 +#define PHY_TUNE_D18_1V7 0x0080 +#define PHY_ANA08 0x08 +#define PHY_ANA08_RX_EQ_DCGAIN 0x5000 +#define PHY_ANA08_SEL_RX_EN 0x0400 +#define PHY_ANA08_RX_EQ_VAL 0x03C0 +#define PHY_ANA08_SCP 0x0020 +#define PHY_ANA08_SEL_IPI 0x0004 + +#define PHY_IMR 0x09 +#define PHY_BPCR 0x0A +#define PHY_BPCR_IBRXSEL 0x0400 +#define PHY_BPCR_IBTXSEL 0x0100 +#define PHY_BPCR_IB_FILTER 0x0080 +#define PHY_BPCR_CMIRROR_EN 0x0040 + +#define PHY_BIST 0x0B +#define PHY_RAW_L 0x0C +#define PHY_RAW_H 0x0D +#define PHY_RAW_DATA 0x0E +#define PHY_HOST_CLK_CTRL 0x0F +#define PHY_DMR 0x10 +#define PHY_BACR 0x11 +#define PHY_BACR_BASIC_MASK 0xFFF3 +#define PHY_IER 0x12 +#define PHY_BCSR 0x13 +#define PHY_BPR 0x14 +#define PHY_BPNR2 0x15 +#define PHY_BPNR 0x16 +#define PHY_BRNR2 0x17 +#define PHY_BENR 0x18 +#define PHY_REV 0x19 +#define PHY_REV_RESV 0xE000 +#define PHY_REV_RXIDLE_LATCHED 0x1000 +#define PHY_REV_P1_EN 0x0800 +#define PHY_REV_RXIDLE_EN 0x0400 +#define PHY_REV_CLKREQ_TX_EN 0x0200 +#define PHY_REV_CLKREQ_RX_EN 0x0100 +#define PHY_REV_CLKREQ_DT_1_0 0x0040 +#define PHY_REV_STOP_CLKRD 0x0020 +#define PHY_REV_RX_PWST 0x0008 +#define PHY_REV_STOP_CLKWR 0x0004 +#define _PHY_REV0 0x19 +#define _PHY_REV0_FILTER_OUT 0x3800 +#define _PHY_REV0_CDR_BYPASS_PFD 0x0100 +#define _PHY_REV0_CDR_RX_IDLE_BYPASS 0x0002 + +#define PHY_FLD0 0x1A +#define PHY_ANA1A 0x1A +#define PHY_ANA1A_TXR_LOOPBACK 0x2000 +#define PHY_ANA1A_RXT_BIST 0x0500 +#define PHY_ANA1A_TXR_BIST 0x0040 +#define PHY_ANA1A_REV 0x0006 +#define PHY_FLD0_INIT_27S 0x2546 +#define PHY_FLD1 0x1B +#define PHY_FLD2 0x1C +#define PHY_FLD3 0x1D +#define PHY_FLD3_TIMER_4 0x0800 +#define PHY_FLD3_TIMER_6 0x0020 +#define PHY_FLD3_RXDELINK 0x0004 +#define PHY_FLD3_INIT_27S 0x0004 +#define PHY_ANA1D 0x1D +#define PHY_ANA1D_DEBUG_ADDR 0x0004 +#define _PHY_FLD0 0x1D +#define _PHY_FLD0_CLK_REQ_20C 0x8000 +#define _PHY_FLD0_RX_IDLE_EN 0x1000 +#define _PHY_FLD0_BIT_ERR_RSTN 0x0800 +#define _PHY_FLD0_BER_COUNT 0x01E0 +#define _PHY_FLD0_BER_TIMER 0x001E +#define _PHY_FLD0_CHECK_EN 0x0001 + +#define PHY_FLD4 0x1E +#define PHY_FLD4_FLDEN_SEL 0x4000 +#define PHY_FLD4_REQ_REF 0x2000 +#define PHY_FLD4_RXAMP_OFF 0x1000 +#define PHY_FLD4_REQ_ADDA 0x0800 +#define PHY_FLD4_BER_COUNT 0x00E0 +#define PHY_FLD4_BER_TIMER 0x000A +#define PHY_FLD4_BER_CHK_EN 0x0001 +#define PHY_FLD4_INIT_27S 0x5C7F +#define PHY_DIG1E 0x1E +#define PHY_DIG1E_REV 0x4000 +#define PHY_DIG1E_D0_X_D1 0x1000 +#define PHY_DIG1E_RX_ON_HOST 0x0800 +#define PHY_DIG1E_RCLK_REF_HOST 0x0400 +#define PHY_DIG1E_RCLK_TX_EN_KEEP 0x0040 +#define PHY_DIG1E_RCLK_TX_TERM_KEEP 0x0020 +#define PHY_DIG1E_RCLK_RX_EIDLE_ON 0x0010 +#define PHY_DIG1E_TX_TERM_KEEP 0x0008 +#define PHY_DIG1E_RX_TERM_KEEP 0x0004 +#define PHY_DIG1E_TX_EN_KEEP 0x0002 +#define PHY_DIG1E_RX_EN_KEEP 0x0001 +#define PHY_DUM_REG 0x1F + +#define PCR_SETTING_REG1 0x724 +#define PCR_SETTING_REG2 0x814 +#define PCR_SETTING_REG3 0x747 + +#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0) + +struct rtsx_pcr; + +struct pcr_handle { + struct rtsx_pcr *pcr; +}; + +struct pcr_ops { + int (*write_phy)(struct rtsx_pcr *pcr, u8 addr, u16 val); + int (*read_phy)(struct rtsx_pcr *pcr, u8 addr, u16 *val); + int (*extra_init_hw)(struct rtsx_pcr *pcr); + int (*optimize_phy)(struct rtsx_pcr *pcr); + int (*turn_on_led)(struct rtsx_pcr *pcr); + int (*turn_off_led)(struct rtsx_pcr *pcr); + int (*enable_auto_blink)(struct rtsx_pcr *pcr); + int (*disable_auto_blink)(struct rtsx_pcr *pcr); + int (*card_power_on)(struct rtsx_pcr *pcr, int card); + int (*card_power_off)(struct rtsx_pcr *pcr, int card); + int (*switch_output_voltage)(struct rtsx_pcr *pcr, + u8 voltage); + unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr); + int (*conv_clk_and_div_n)(int clk, int dir); + void (*fetch_vendor_settings)(struct rtsx_pcr *pcr); + void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state); +}; + +enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; + +struct rtsx_pcr { + struct pci_dev *pci; + unsigned int id; + int pcie_cap; + + /* pci resources */ + unsigned long addr; + void __iomem *remap_addr; + int irq; + + /* host reserved buffer */ + void *rtsx_resv_buf; + dma_addr_t rtsx_resv_buf_addr; + + void *host_cmds_ptr; + dma_addr_t host_cmds_addr; + int ci; + + void *host_sg_tbl_ptr; + dma_addr_t host_sg_tbl_addr; + int sgi; + + u32 bier; + char trans_result; + + unsigned int card_inserted; + unsigned int card_removed; + unsigned int card_exist; + + struct delayed_work carddet_work; + struct delayed_work idle_work; + + spinlock_t lock; + struct mutex pcr_mutex; + struct completion *done; + struct completion *finish_me; + + unsigned int cur_clock; + bool remove_pci; + bool msi_en; + +#define EXTRA_CAPS_SD_SDR50 (1 << 0) +#define EXTRA_CAPS_SD_SDR104 (1 << 1) +#define EXTRA_CAPS_SD_DDR50 (1 << 2) +#define EXTRA_CAPS_MMC_HSDDR (1 << 3) +#define EXTRA_CAPS_MMC_HS200 (1 << 4) +#define EXTRA_CAPS_MMC_8BIT (1 << 5) + u32 extra_caps; + +#define IC_VER_A 0 +#define IC_VER_B 1 +#define IC_VER_C 2 +#define IC_VER_D 3 + u8 ic_version; + + u8 sd30_drive_sel_1v8; + u8 sd30_drive_sel_3v3; + u8 card_drive_sel; +#define ASPM_L1_EN 0x02 + u8 aspm_en; + +#define PCR_MS_PMOS (1 << 0) +#define PCR_REVERSE_SOCKET (1 << 1) + u32 flags; + + u32 tx_initial_phase; + u32 rx_initial_phase; + + const u32 *sd_pull_ctl_enable_tbl; + const u32 *sd_pull_ctl_disable_tbl; + const u32 *ms_pull_ctl_enable_tbl; + const u32 *ms_pull_ctl_disable_tbl; + + const struct pcr_ops *ops; + enum PDEV_STAT state; + + u16 reg_pm_ctrl3; + + int num_slots; + struct rtsx_slot *slots; +}; + +#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid)) +#define PCI_VID(pcr) ((pcr)->pci->vendor) +#define PCI_PID(pcr) ((pcr)->pci->device) +#define is_version(pcr, pid, ver) \ + (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver)) +#define pcr_dbg(pcr, fmt, arg...) \ + dev_dbg(&(pcr)->pci->dev, fmt, ##arg) + +#define SDR104_PHASE(val) ((val) & 0xFF) +#define SDR50_PHASE(val) (((val) >> 8) & 0xFF) +#define DDR50_PHASE(val) (((val) >> 16) & 0xFF) +#define SDR104_TX_PHASE(pcr) SDR104_PHASE((pcr)->tx_initial_phase) +#define SDR50_TX_PHASE(pcr) SDR50_PHASE((pcr)->tx_initial_phase) +#define DDR50_TX_PHASE(pcr) DDR50_PHASE((pcr)->tx_initial_phase) +#define SDR104_RX_PHASE(pcr) SDR104_PHASE((pcr)->rx_initial_phase) +#define SDR50_RX_PHASE(pcr) SDR50_PHASE((pcr)->rx_initial_phase) +#define DDR50_RX_PHASE(pcr) DDR50_PHASE((pcr)->rx_initial_phase) +#define SET_CLOCK_PHASE(sdr104, sdr50, ddr50) \ + (((ddr50) << 16) | ((sdr50) << 8) | (sdr104)) + +void rtsx_pci_start_run(struct rtsx_pcr *pcr); +int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data); +int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data); +int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val); +int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val); +void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr); +void rtsx_pci_add_cmd(struct rtsx_pcr *pcr, + u8 cmd_type, u16 reg_addr, u8 mask, u8 data); +void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr); +int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout); +int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int num_sg, bool read, int timeout); +int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int num_sg, bool read); +void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int num_sg, bool read); +int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int count, bool read, int timeout); +int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len); +int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len); +int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card); +int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card); +int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); +int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card); +int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card); +int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card); +int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage); +unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr); +void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr); + +static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr) +{ + return (u8 *)(pcr->host_cmds_ptr); +} + +static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr, + u8 mask, u8 append) +{ + int err; + u8 val; + + err = pci_read_config_byte(pcr->pci, addr, &val); + if (err < 0) + return err; + return pci_write_config_byte(pcr->pci, addr, (val & mask) | append); +} + +static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val) +{ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 1, 0xFF, val >> 16); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 2, 0xFF, val >> 8); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val); +} + +static inline int rtsx_pci_update_phy(struct rtsx_pcr *pcr, u8 addr, + u16 mask, u16 append) +{ + int err; + u16 val; + + err = rtsx_pci_read_phy_register(pcr, addr, &val); + if (err < 0) + return err; + + return rtsx_pci_write_phy_register(pcr, addr, (val & mask) | append); +} + +#endif diff --git a/include/linux/mfd/rtsx_usb.h b/include/linux/mfd/rtsx_usb.h new file mode 100644 index 0000000000..c446e4fd6b --- /dev/null +++ b/include/linux/mfd/rtsx_usb.h @@ -0,0 +1,628 @@ +/* Driver for Realtek RTS5139 USB card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Author: + * Roger Tseng + */ + +#ifndef __RTSX_USB_H +#define __RTSX_USB_H + +#include + +/* related module names */ +#define RTSX_USB_SD_CARD 0 +#define RTSX_USB_MS_CARD 1 + +/* endpoint numbers */ +#define EP_BULK_OUT 1 +#define EP_BULK_IN 2 +#define EP_INTR_IN 3 + +/* USB vendor requests */ +#define RTSX_USB_REQ_REG_OP 0x00 +#define RTSX_USB_REQ_POLL 0x02 + +/* miscellaneous parameters */ +#define MIN_DIV_N 60 +#define MAX_DIV_N 120 + +#define MAX_PHASE 15 +#define RX_TUNING_CNT 3 + +#define QFN24 0 +#define LQFP48 1 +#define CHECK_PKG(ucr, pkg) ((ucr)->package == (pkg)) + +/* data structures */ +struct rtsx_ucr { + u16 vendor_id; + u16 product_id; + + int package; + u8 ic_version; + bool is_rts5179; + + unsigned int cur_clk; + + u8 *cmd_buf; + unsigned int cmd_idx; + u8 *rsp_buf; + + struct usb_device *pusb_dev; + struct usb_interface *pusb_intf; + struct usb_sg_request current_sg; + unsigned char *iobuf; + dma_addr_t iobuf_dma; + + struct timer_list sg_timer; + struct mutex dev_mutex; +}; + +/* buffer size */ +#define IOBUF_SIZE 1024 + +/* prototypes of exported functions */ +extern int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status); + +extern int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data); +extern int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, + u8 data); + +extern int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, + u8 data); +extern int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, + u8 *data); + +extern void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type, + u16 reg_addr, u8 mask, u8 data); +extern int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout); +extern int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout); +extern int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe, + void *buf, unsigned int len, int use_sg, + unsigned int *act_len, int timeout); + +extern int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); +extern int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); +extern int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); +extern int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card); + +/* card status */ +#define SD_CD 0x01 +#define MS_CD 0x02 +#define XD_CD 0x04 +#define CD_MASK (SD_CD | MS_CD | XD_CD) +#define SD_WP 0x08 + +/* reader command field offset & parameters */ +#define READ_REG_CMD 0 +#define WRITE_REG_CMD 1 +#define CHECK_REG_CMD 2 + +#define PACKET_TYPE 4 +#define CNT_H 5 +#define CNT_L 6 +#define STAGE_FLAG 7 +#define CMD_OFFSET 8 +#define SEQ_WRITE_DATA_OFFSET 12 + +#define BATCH_CMD 0 +#define SEQ_READ 1 +#define SEQ_WRITE 2 + +#define STAGE_R 0x01 +#define STAGE_DI 0x02 +#define STAGE_DO 0x04 +#define STAGE_MS_STATUS 0x08 +#define STAGE_XD_STATUS 0x10 +#define MODE_C 0x00 +#define MODE_CR (STAGE_R) +#define MODE_CDIR (STAGE_R | STAGE_DI) +#define MODE_CDOR (STAGE_R | STAGE_DO) + +#define EP0_OP_SHIFT 14 +#define EP0_READ_REG_CMD 2 +#define EP0_WRITE_REG_CMD 3 + +#define rtsx_usb_cmd_hdr_tag(ucr) \ + do { \ + ucr->cmd_buf[0] = 'R'; \ + ucr->cmd_buf[1] = 'T'; \ + ucr->cmd_buf[2] = 'C'; \ + ucr->cmd_buf[3] = 'R'; \ + } while (0) + +static inline void rtsx_usb_init_cmd(struct rtsx_ucr *ucr) +{ + rtsx_usb_cmd_hdr_tag(ucr); + ucr->cmd_idx = 0; + ucr->cmd_buf[PACKET_TYPE] = BATCH_CMD; +} + +/* internal register address */ +#define FPDCTL 0xFC00 +#define SSC_DIV_N_0 0xFC07 +#define SSC_CTL1 0xFC09 +#define SSC_CTL2 0xFC0A +#define CFG_MODE 0xFC0E +#define CFG_MODE_1 0xFC0F +#define RCCTL 0xFC14 +#define SOF_WDOG 0xFC28 +#define SYS_DUMMY0 0xFC30 + +#define MS_BLKEND 0xFD30 +#define MS_READ_START 0xFD31 +#define MS_READ_COUNT 0xFD32 +#define MS_WRITE_START 0xFD33 +#define MS_WRITE_COUNT 0xFD34 +#define MS_COMMAND 0xFD35 +#define MS_OLD_BLOCK_0 0xFD36 +#define MS_OLD_BLOCK_1 0xFD37 +#define MS_NEW_BLOCK_0 0xFD38 +#define MS_NEW_BLOCK_1 0xFD39 +#define MS_LOG_BLOCK_0 0xFD3A +#define MS_LOG_BLOCK_1 0xFD3B +#define MS_BUS_WIDTH 0xFD3C +#define MS_PAGE_START 0xFD3D +#define MS_PAGE_LENGTH 0xFD3E +#define MS_CFG 0xFD40 +#define MS_TPC 0xFD41 +#define MS_TRANS_CFG 0xFD42 +#define MS_TRANSFER 0xFD43 +#define MS_INT_REG 0xFD44 +#define MS_BYTE_CNT 0xFD45 +#define MS_SECTOR_CNT_L 0xFD46 +#define MS_SECTOR_CNT_H 0xFD47 +#define MS_DBUS_H 0xFD48 + +#define CARD_DMA1_CTL 0xFD5C +#define CARD_PULL_CTL1 0xFD60 +#define CARD_PULL_CTL2 0xFD61 +#define CARD_PULL_CTL3 0xFD62 +#define CARD_PULL_CTL4 0xFD63 +#define CARD_PULL_CTL5 0xFD64 +#define CARD_PULL_CTL6 0xFD65 +#define CARD_EXIST 0xFD6F +#define CARD_INT_PEND 0xFD71 + +#define LDO_POWER_CFG 0xFD7B + +#define SD_CFG1 0xFDA0 +#define SD_CFG2 0xFDA1 +#define SD_CFG3 0xFDA2 +#define SD_STAT1 0xFDA3 +#define SD_STAT2 0xFDA4 +#define SD_BUS_STAT 0xFDA5 +#define SD_PAD_CTL 0xFDA6 +#define SD_SAMPLE_POINT_CTL 0xFDA7 +#define SD_PUSH_POINT_CTL 0xFDA8 +#define SD_CMD0 0xFDA9 +#define SD_CMD1 0xFDAA +#define SD_CMD2 0xFDAB +#define SD_CMD3 0xFDAC +#define SD_CMD4 0xFDAD +#define SD_CMD5 0xFDAE +#define SD_BYTE_CNT_L 0xFDAF +#define SD_BYTE_CNT_H 0xFDB0 +#define SD_BLOCK_CNT_L 0xFDB1 +#define SD_BLOCK_CNT_H 0xFDB2 +#define SD_TRANSFER 0xFDB3 +#define SD_CMD_STATE 0xFDB5 +#define SD_DATA_STATE 0xFDB6 +#define SD_VPCLK0_CTL 0xFC2A +#define SD_VPCLK1_CTL 0xFC2B +#define SD_DCMPS0_CTL 0xFC2C +#define SD_DCMPS1_CTL 0xFC2D + +#define CARD_DMA1_CTL 0xFD5C + +#define HW_VERSION 0xFC01 + +#define SSC_CLK_FPGA_SEL 0xFC02 +#define CLK_DIV 0xFC03 +#define SFSM_ED 0xFC04 + +#define CD_DEGLITCH_WIDTH 0xFC20 +#define CD_DEGLITCH_EN 0xFC21 +#define AUTO_DELINK_EN 0xFC23 + +#define FPGA_PULL_CTL 0xFC1D +#define CARD_CLK_SOURCE 0xFC2E + +#define CARD_SHARE_MODE 0xFD51 +#define CARD_DRIVE_SEL 0xFD52 +#define CARD_STOP 0xFD53 +#define CARD_OE 0xFD54 +#define CARD_AUTO_BLINK 0xFD55 +#define CARD_GPIO 0xFD56 +#define SD30_DRIVE_SEL 0xFD57 + +#define CARD_DATA_SOURCE 0xFD5D +#define CARD_SELECT 0xFD5E + +#define CARD_CLK_EN 0xFD79 +#define CARD_PWR_CTL 0xFD7A + +#define OCPCTL 0xFD80 +#define OCPPARA1 0xFD81 +#define OCPPARA2 0xFD82 +#define OCPSTAT 0xFD83 + +#define HS_USB_STAT 0xFE01 +#define HS_VCONTROL 0xFE26 +#define HS_VSTAIN 0xFE27 +#define HS_VLOADM 0xFE28 +#define HS_VSTAOUT 0xFE29 + +#define MC_IRQ 0xFF00 +#define MC_IRQEN 0xFF01 +#define MC_FIFO_CTL 0xFF02 +#define MC_FIFO_BC0 0xFF03 +#define MC_FIFO_BC1 0xFF04 +#define MC_FIFO_STAT 0xFF05 +#define MC_FIFO_MODE 0xFF06 +#define MC_FIFO_RD_PTR0 0xFF07 +#define MC_FIFO_RD_PTR1 0xFF08 +#define MC_DMA_CTL 0xFF10 +#define MC_DMA_TC0 0xFF11 +#define MC_DMA_TC1 0xFF12 +#define MC_DMA_TC2 0xFF13 +#define MC_DMA_TC3 0xFF14 +#define MC_DMA_RST 0xFF15 + +#define RBUF_SIZE_MASK 0xFBFF +#define RBUF_BASE 0xF000 +#define PPBUF_BASE1 0xF800 +#define PPBUF_BASE2 0xFA00 + +/* internal register value macros */ +#define POWER_OFF 0x03 +#define PARTIAL_POWER_ON 0x02 +#define POWER_ON 0x00 +#define POWER_MASK 0x03 +#define LDO3318_PWR_MASK 0x0C +#define LDO_ON 0x00 +#define LDO_SUSPEND 0x08 +#define LDO_OFF 0x0C +#define DV3318_AUTO_PWR_OFF 0x10 +#define FORCE_LDO_POWERB 0x60 + +/* LDO_POWER_CFG */ +#define TUNE_SD18_MASK 0x1C +#define TUNE_SD18_1V7 0x00 +#define TUNE_SD18_1V8 (0x01 << 2) +#define TUNE_SD18_1V9 (0x02 << 2) +#define TUNE_SD18_2V0 (0x03 << 2) +#define TUNE_SD18_2V7 (0x04 << 2) +#define TUNE_SD18_2V8 (0x05 << 2) +#define TUNE_SD18_2V9 (0x06 << 2) +#define TUNE_SD18_3V3 (0x07 << 2) + +/* CLK_DIV */ +#define CLK_CHANGE 0x80 +#define CLK_DIV_1 0x00 +#define CLK_DIV_2 0x01 +#define CLK_DIV_4 0x02 +#define CLK_DIV_8 0x03 + +#define SSC_POWER_MASK 0x01 +#define SSC_POWER_DOWN 0x01 +#define SSC_POWER_ON 0x00 + +#define FPGA_VER 0x80 +#define HW_VER_MASK 0x0F + +#define EXTEND_DMA1_ASYNC_SIGNAL 0x02 + +/* CFG_MODE*/ +#define XTAL_FREE 0x80 +#define CLK_MODE_MASK 0x03 +#define CLK_MODE_12M_XTAL 0x00 +#define CLK_MODE_NON_XTAL 0x01 +#define CLK_MODE_24M_OSC 0x02 +#define CLK_MODE_48M_OSC 0x03 + +/* CFG_MODE_1*/ +#define RTS5179 0x02 + +#define NYET_EN 0x01 +#define NYET_MSAK 0x01 + +#define SD30_DRIVE_MASK 0x07 +#define SD20_DRIVE_MASK 0x03 + +#define DISABLE_SD_CD 0x08 +#define DISABLE_MS_CD 0x10 +#define DISABLE_XD_CD 0x20 +#define SD_CD_DEGLITCH_EN 0x01 +#define MS_CD_DEGLITCH_EN 0x02 +#define XD_CD_DEGLITCH_EN 0x04 + +#define CARD_SHARE_LQFP48 0x04 +#define CARD_SHARE_QFN24 0x00 +#define CARD_SHARE_LQFP_SEL 0x04 +#define CARD_SHARE_XD 0x00 +#define CARD_SHARE_SD 0x01 +#define CARD_SHARE_MS 0x02 +#define CARD_SHARE_MASK 0x03 + + +/* SD30_DRIVE_SEL */ +#define DRIVER_TYPE_A 0x05 +#define DRIVER_TYPE_B 0x03 +#define DRIVER_TYPE_C 0x02 +#define DRIVER_TYPE_D 0x01 + +/* SD_BUS_STAT */ +#define SD_CLK_TOGGLE_EN 0x80 +#define SD_CLK_FORCE_STOP 0x40 +#define SD_DAT3_STATUS 0x10 +#define SD_DAT2_STATUS 0x08 +#define SD_DAT1_STATUS 0x04 +#define SD_DAT0_STATUS 0x02 +#define SD_CMD_STATUS 0x01 + +/* SD_PAD_CTL */ +#define SD_IO_USING_1V8 0x80 +#define SD_IO_USING_3V3 0x7F +#define TYPE_A_DRIVING 0x00 +#define TYPE_B_DRIVING 0x01 +#define TYPE_C_DRIVING 0x02 +#define TYPE_D_DRIVING 0x03 + +/* CARD_CLK_EN */ +#define SD_CLK_EN 0x04 +#define MS_CLK_EN 0x08 + +/* CARD_SELECT */ +#define SD_MOD_SEL 2 +#define MS_MOD_SEL 3 + +/* CARD_SHARE_MODE */ +#define CARD_SHARE_LQFP48 0x04 +#define CARD_SHARE_QFN24 0x00 +#define CARD_SHARE_LQFP_SEL 0x04 +#define CARD_SHARE_XD 0x00 +#define CARD_SHARE_SD 0x01 +#define CARD_SHARE_MS 0x02 +#define CARD_SHARE_MASK 0x03 + +/* SSC_CTL1 */ +#define SSC_RSTB 0x80 +#define SSC_8X_EN 0x40 +#define SSC_FIX_FRAC 0x20 +#define SSC_SEL_1M 0x00 +#define SSC_SEL_2M 0x08 +#define SSC_SEL_4M 0x10 +#define SSC_SEL_8M 0x18 + +/* SSC_CTL2 */ +#define SSC_DEPTH_MASK 0x03 +#define SSC_DEPTH_DISALBE 0x00 +#define SSC_DEPTH_2M 0x01 +#define SSC_DEPTH_1M 0x02 +#define SSC_DEPTH_512K 0x03 + +/* SD_VPCLK0_CTL */ +#define PHASE_CHANGE 0x80 +#define PHASE_NOT_RESET 0x40 + +/* SD_TRANSFER */ +#define SD_TRANSFER_START 0x80 +#define SD_TRANSFER_END 0x40 +#define SD_STAT_IDLE 0x20 +#define SD_TRANSFER_ERR 0x10 +#define SD_TM_NORMAL_WRITE 0x00 +#define SD_TM_AUTO_WRITE_3 0x01 +#define SD_TM_AUTO_WRITE_4 0x02 +#define SD_TM_AUTO_READ_3 0x05 +#define SD_TM_AUTO_READ_4 0x06 +#define SD_TM_CMD_RSP 0x08 +#define SD_TM_AUTO_WRITE_1 0x09 +#define SD_TM_AUTO_WRITE_2 0x0A +#define SD_TM_NORMAL_READ 0x0C +#define SD_TM_AUTO_READ_1 0x0D +#define SD_TM_AUTO_READ_2 0x0E +#define SD_TM_AUTO_TUNING 0x0F + +/* SD_CFG1 */ +#define SD_CLK_DIVIDE_0 0x00 +#define SD_CLK_DIVIDE_256 0xC0 +#define SD_CLK_DIVIDE_128 0x80 +#define SD_CLK_DIVIDE_MASK 0xC0 +#define SD_BUS_WIDTH_1BIT 0x00 +#define SD_BUS_WIDTH_4BIT 0x01 +#define SD_BUS_WIDTH_8BIT 0x02 +#define SD_ASYNC_FIFO_RST 0x10 +#define SD_20_MODE 0x00 +#define SD_DDR_MODE 0x04 +#define SD_30_MODE 0x08 + +/* SD_CFG2 */ +#define SD_CALCULATE_CRC7 0x00 +#define SD_NO_CALCULATE_CRC7 0x80 +#define SD_CHECK_CRC16 0x00 +#define SD_NO_CHECK_CRC16 0x40 +#define SD_WAIT_CRC_TO_EN 0x20 +#define SD_WAIT_BUSY_END 0x08 +#define SD_NO_WAIT_BUSY_END 0x00 +#define SD_CHECK_CRC7 0x00 +#define SD_NO_CHECK_CRC7 0x04 +#define SD_RSP_LEN_0 0x00 +#define SD_RSP_LEN_6 0x01 +#define SD_RSP_LEN_17 0x02 +#define SD_RSP_TYPE_R0 0x04 +#define SD_RSP_TYPE_R1 0x01 +#define SD_RSP_TYPE_R1b 0x09 +#define SD_RSP_TYPE_R2 0x02 +#define SD_RSP_TYPE_R3 0x05 +#define SD_RSP_TYPE_R4 0x05 +#define SD_RSP_TYPE_R5 0x01 +#define SD_RSP_TYPE_R6 0x01 +#define SD_RSP_TYPE_R7 0x01 + +/* SD_STAT1 */ +#define SD_CRC7_ERR 0x80 +#define SD_CRC16_ERR 0x40 +#define SD_CRC_WRITE_ERR 0x20 +#define SD_CRC_WRITE_ERR_MASK 0x1C +#define GET_CRC_TIME_OUT 0x02 +#define SD_TUNING_COMPARE_ERR 0x01 + +/* SD_DATA_STATE */ +#define SD_DATA_IDLE 0x80 + +/* CARD_DATA_SOURCE */ +#define PINGPONG_BUFFER 0x01 +#define RING_BUFFER 0x00 + +/* CARD_OE */ +#define SD_OUTPUT_EN 0x04 +#define MS_OUTPUT_EN 0x08 + +/* CARD_STOP */ +#define SD_STOP 0x04 +#define MS_STOP 0x08 +#define SD_CLR_ERR 0x40 +#define MS_CLR_ERR 0x80 + +/* CARD_CLK_SOURCE */ +#define CRC_FIX_CLK (0x00 << 0) +#define CRC_VAR_CLK0 (0x01 << 0) +#define CRC_VAR_CLK1 (0x02 << 0) +#define SD30_FIX_CLK (0x00 << 2) +#define SD30_VAR_CLK0 (0x01 << 2) +#define SD30_VAR_CLK1 (0x02 << 2) +#define SAMPLE_FIX_CLK (0x00 << 4) +#define SAMPLE_VAR_CLK0 (0x01 << 4) +#define SAMPLE_VAR_CLK1 (0x02 << 4) + +/* SD_SAMPLE_POINT_CTL */ +#define DDR_FIX_RX_DAT 0x00 +#define DDR_VAR_RX_DAT 0x80 +#define DDR_FIX_RX_DAT_EDGE 0x00 +#define DDR_FIX_RX_DAT_14_DELAY 0x40 +#define DDR_FIX_RX_CMD 0x00 +#define DDR_VAR_RX_CMD 0x20 +#define DDR_FIX_RX_CMD_POS_EDGE 0x00 +#define DDR_FIX_RX_CMD_14_DELAY 0x10 +#define SD20_RX_POS_EDGE 0x00 +#define SD20_RX_14_DELAY 0x08 +#define SD20_RX_SEL_MASK 0x08 + +/* SD_PUSH_POINT_CTL */ +#define DDR_FIX_TX_CMD_DAT 0x00 +#define DDR_VAR_TX_CMD_DAT 0x80 +#define DDR_FIX_TX_DAT_14_TSU 0x00 +#define DDR_FIX_TX_DAT_12_TSU 0x40 +#define DDR_FIX_TX_CMD_NEG_EDGE 0x00 +#define DDR_FIX_TX_CMD_14_AHEAD 0x20 +#define SD20_TX_NEG_EDGE 0x00 +#define SD20_TX_14_AHEAD 0x10 +#define SD20_TX_SEL_MASK 0x10 +#define DDR_VAR_SDCLK_POL_SWAP 0x01 + +/* MS_CFG */ +#define SAMPLE_TIME_RISING 0x00 +#define SAMPLE_TIME_FALLING 0x80 +#define PUSH_TIME_DEFAULT 0x00 +#define PUSH_TIME_ODD 0x40 +#define NO_EXTEND_TOGGLE 0x00 +#define EXTEND_TOGGLE_CHK 0x20 +#define MS_BUS_WIDTH_1 0x00 +#define MS_BUS_WIDTH_4 0x10 +#define MS_BUS_WIDTH_8 0x18 +#define MS_2K_SECTOR_MODE 0x04 +#define MS_512_SECTOR_MODE 0x00 +#define MS_TOGGLE_TIMEOUT_EN 0x00 +#define MS_TOGGLE_TIMEOUT_DISEN 0x01 +#define MS_NO_CHECK_INT 0x02 + +/* MS_TRANS_CFG */ +#define WAIT_INT 0x80 +#define NO_WAIT_INT 0x00 +#define NO_AUTO_READ_INT_REG 0x00 +#define AUTO_READ_INT_REG 0x40 +#define MS_CRC16_ERR 0x20 +#define MS_RDY_TIMEOUT 0x10 +#define MS_INT_CMDNK 0x08 +#define MS_INT_BREQ 0x04 +#define MS_INT_ERR 0x02 +#define MS_INT_CED 0x01 + +/* MS_TRANSFER */ +#define MS_TRANSFER_START 0x80 +#define MS_TRANSFER_END 0x40 +#define MS_TRANSFER_ERR 0x20 +#define MS_BS_STATE 0x10 +#define MS_TM_READ_BYTES 0x00 +#define MS_TM_NORMAL_READ 0x01 +#define MS_TM_WRITE_BYTES 0x04 +#define MS_TM_NORMAL_WRITE 0x05 +#define MS_TM_AUTO_READ 0x08 +#define MS_TM_AUTO_WRITE 0x0C +#define MS_TM_SET_CMD 0x06 +#define MS_TM_COPY_PAGE 0x07 +#define MS_TM_MULTI_READ 0x02 +#define MS_TM_MULTI_WRITE 0x03 + +/* MC_FIFO_CTL */ +#define FIFO_FLUSH 0x01 + +/* MC_DMA_RST */ +#define DMA_RESET 0x01 + +/* MC_DMA_CTL */ +#define DMA_TC_EQ_0 0x80 +#define DMA_DIR_TO_CARD 0x00 +#define DMA_DIR_FROM_CARD 0x02 +#define DMA_EN 0x01 +#define DMA_128 (0 << 2) +#define DMA_256 (1 << 2) +#define DMA_512 (2 << 2) +#define DMA_1024 (3 << 2) +#define DMA_PACK_SIZE_MASK 0x0C + +/* CARD_INT_PEND */ +#define XD_INT 0x10 +#define MS_INT 0x08 +#define SD_INT 0x04 + +/* LED operations*/ +static inline int rtsx_usb_turn_on_led(struct rtsx_ucr *ucr) +{ + return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x02); +} + +static inline int rtsx_usb_turn_off_led(struct rtsx_ucr *ucr) +{ + return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x03); +} + +/* HW error clearing */ +static inline void rtsx_usb_clear_fsm_err(struct rtsx_ucr *ucr) +{ + rtsx_usb_ep0_write_register(ucr, SFSM_ED, 0xf8, 0xf8); +} + +static inline void rtsx_usb_clear_dma_err(struct rtsx_ucr *ucr) +{ + rtsx_usb_ep0_write_register(ucr, MC_FIFO_CTL, + FIFO_FLUSH, FIFO_FLUSH); + rtsx_usb_ep0_write_register(ucr, MC_DMA_RST, DMA_RESET, DMA_RESET); +} +#endif /* __RTS51139_H */ diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index f92fe09047..5a23dd4df4 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -1,7 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* - * Copyright (c) 2011 Samsung Electronics Co., Ltd + * core.h + * + * copyright (c) 2011 Samsung Electronics Co., Ltd * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_SEC_CORE_H @@ -20,7 +27,6 @@ #define MIN_850_MV 850000 #define MIN_800_MV 800000 #define MIN_750_MV 750000 -#define MIN_650_MV 650000 #define MIN_600_MV 600000 #define MIN_500_MV 500000 @@ -33,8 +39,6 @@ #define STEP_12_5_MV 12500 #define STEP_6_25_MV 6250 -struct gpio_desc; - enum sec_device_type { S5M8751X, S5M8763X, @@ -67,8 +71,11 @@ struct sec_pmic_dev { struct i2c_client *i2c; unsigned long device_type; + int irq_base; int irq; struct regmap_irq_chip_data *irq_data; + + bool wakeup; }; int sec_irq_init(struct sec_pmic_dev *sec_pmic); @@ -78,8 +85,15 @@ int sec_irq_resume(struct sec_pmic_dev *sec_pmic); struct sec_platform_data { struct sec_regulator_data *regulators; struct sec_opmode_data *opmode; + int device_type; int num_regulators; + int irq_base; + int (*cfg_pmic_irq)(void); + + bool wakeup; + bool buck_voltage_lock; + int buck_gpios[3]; int buck_ds[3]; unsigned int buck2_voltage[8]; @@ -89,12 +103,35 @@ struct sec_platform_data { unsigned int buck4_voltage[8]; bool buck4_gpiodvs; + int buck_set1; + int buck_set2; + int buck_set3; + int buck2_enable; + int buck3_enable; + int buck4_enable; int buck_default_idx; + int buck2_default_idx; + int buck3_default_idx; + int buck4_default_idx; + int buck_ramp_delay; + int buck2_ramp_delay; + int buck34_ramp_delay; + int buck5_ramp_delay; + int buck16_ramp_delay; + int buck7810_ramp_delay; + int buck9_ramp_delay; + int buck24_ramp_delay; + int buck3_ramp_delay; + int buck7_ramp_delay; + int buck8910_ramp_delay; + + bool buck1_ramp_enable; bool buck2_ramp_enable; bool buck3_ramp_enable; bool buck4_ramp_enable; + bool buck6_ramp_enable; int buck2_init; int buck3_init; @@ -114,7 +151,7 @@ struct sec_regulator_data { int id; struct regulator_init_data *initdata; struct device_node *reg_node; - struct gpio_desc *ext_control_gpiod; + int ext_control_gpio; }; /* diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h index 6cfe4201a1..667aa40486 100644 --- a/include/linux/mfd/samsung/irq.h +++ b/include/linux/mfd/samsung/irq.h @@ -1,7 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* +/* irq.h + * * Copyright (c) 2012 Samsung Electronics Co., Ltd * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_SEC_IRQ_H diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h index 0204decfc9..48c3c5be7e 100644 --- a/include/linux/mfd/samsung/rtc.h +++ b/include/linux/mfd/samsung/rtc.h @@ -1,7 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* +/* rtc.h + * * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __LINUX_MFD_SEC_RTC_H @@ -130,4 +141,15 @@ enum s2mps_rtc_reg { #define WTSR_ENABLE_SHIFT 6 #define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT) +enum { + RTC_SEC = 0, + RTC_MIN, + RTC_HOUR, + RTC_WEEKDAY, + RTC_DATE, + RTC_MONTH, + RTC_YEAR1, + RTC_YEAR2, +}; + #endif /* __LINUX_MFD_SEC_RTC_H */ diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h index 0762e9de6f..2766108bca 100644 --- a/include/linux/mfd/samsung/s2mpa01.h +++ b/include/linux/mfd/samsung/s2mpa01.h @@ -1,7 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (c) 2013 Samsung Electronics Co., Ltd * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_S2MPA01_H diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h index 4805c90609..2c14eeca46 100644 --- a/include/linux/mfd/samsung/s2mps11.h +++ b/include/linux/mfd/samsung/s2mps11.h @@ -1,7 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* + * s2mps11.h + * * Copyright (c) 2012 Samsung Electronics Co., Ltd * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_S2MPS11_H @@ -170,9 +177,7 @@ enum s2mps11_regulators { #define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT) #define S2MPS11_ENABLE_SHIFT 0x06 #define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1) -#define S2MPS11_BUCK12346_N_VOLTAGES 153 -#define S2MPS11_BUCK5_N_VOLTAGES 216 -#define S2MPS11_BUCK7810_N_VOLTAGES 225 +#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) #define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1) #define S2MPS11_RAMP_DELAY 25000 /* uV/us */ @@ -190,9 +195,4 @@ enum s2mps11_regulators { #define S2MPS11_BUCK6_RAMP_EN_SHIFT 0 #define S2MPS11_PMIC_EN_SHIFT 6 -/* - * Bits for "enable suspend" (On/Off controlled by PWREN) - * are the same as in S2MPS14: S2MPS14_ENABLE_SUSPEND - */ - #endif /* __LINUX_MFD_S2MPS11_H */ diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h index b96d8a11dc..239e977ba4 100644 --- a/include/linux/mfd/samsung/s2mps13.h +++ b/include/linux/mfd/samsung/s2mps13.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* + * s2mps13.h + * * Copyright (c) 2014 Samsung Electronics Co., Ltd * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __LINUX_MFD_S2MPS13_H diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h index f4afa0cfc2..c92f4782af 100644 --- a/include/linux/mfd/samsung/s2mps14.h +++ b/include/linux/mfd/samsung/s2mps14.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* + * s2mps14.h + * * Copyright (c) 2014 Samsung Electronics Co., Ltd * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __LINUX_MFD_S2MPS14_H diff --git a/include/linux/mfd/samsung/s2mps15.h b/include/linux/mfd/samsung/s2mps15.h index eac6bf74b7..36d35287c3 100644 --- a/include/linux/mfd/samsung/s2mps15.h +++ b/include/linux/mfd/samsung/s2mps15.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (c) 2015 Samsung Electronics Co., Ltd * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __LINUX_MFD_S2MPS15_H diff --git a/include/linux/mfd/samsung/s2mpu02.h b/include/linux/mfd/samsung/s2mpu02.h index 76cd5380cf..47ae9bc583 100644 --- a/include/linux/mfd/samsung/s2mpu02.h +++ b/include/linux/mfd/samsung/s2mpu02.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* + * s2mpu02.h + * * Copyright (c) 2014 Samsung Electronics Co., Ltd * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __LINUX_MFD_S2MPU02_H diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h index c534f086ca..e025418e55 100644 --- a/include/linux/mfd/samsung/s5m8763.h +++ b/include/linux/mfd/samsung/s5m8763.h @@ -1,7 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* +/* s5m8763.h + * * Copyright (c) 2011 Samsung Electronics Co., Ltd * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_S5M8763_H diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h index 704f8d80e9..243b58fec3 100644 --- a/include/linux/mfd/samsung/s5m8767.h +++ b/include/linux/mfd/samsung/s5m8767.h @@ -1,7 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* +/* s5m8767.h + * * Copyright (c) 2011 Samsung Electronics Co., Ltd * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_S5M8767_H diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h index dd95c37ca1..674b45d5a7 100644 --- a/include/linux/mfd/si476x-core.h +++ b/include/linux/mfd/si476x-core.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/media/si476x-core.h -- Common definitions for si476x core * device @@ -7,6 +6,16 @@ * Copyright (C) 2013 Andrey Smirnov * * Author: Andrey Smirnov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * */ #ifndef SI476X_CORE_H @@ -57,7 +66,7 @@ enum si476x_mfd_cells { * @SI476X_POWER_DOWN: In this state all regulators are turned off * and the reset line is pulled low. The device is completely * inactive. - * @SI476X_POWER_UP_FULL: In this state all the power regulators are + * @SI476X_POWER_UP_FULL: In this state all the power regualtors are * turned on, reset line pulled high, IRQ line is enabled(polling is * active for polling use scenario) and device is turned on with * POWER_UP command. The device is ready to be used. diff --git a/include/linux/mfd/si476x-platform.h b/include/linux/mfd/si476x-platform.h index 18363b773d..88bb93b7a9 100644 --- a/include/linux/mfd/si476x-platform.h +++ b/include/linux/mfd/si476x-platform.h @@ -1,10 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/media/si476x-platform.h -- Platform data specific definitions * * Copyright (C) 2013 Andrey Smirnov * * Author: Andrey Smirnov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * */ #ifndef __SI476X_PLATFORM_H__ diff --git a/include/linux/mfd/si476x-reports.h b/include/linux/mfd/si476x-reports.h index 93b3418469..e0b9455a79 100644 --- a/include/linux/mfd/si476x-reports.h +++ b/include/linux/mfd/si476x-reports.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/media/si476x-platform.h -- Definitions of the data formats * returned by debugfs hooks @@ -6,6 +5,16 @@ * Copyright (C) 2013 Andrey Smirnov * * Author: Andrey Smirnov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * */ #ifndef __SI476X_REPORTS_H__ diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h index b08570ff34..b0925fa3e9 100644 --- a/include/linux/mfd/sky81452.h +++ b/include/linux/mfd/sky81452.h @@ -1,17 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * sky81452.h SKY81452 MFD driver * * Copyright 2014 Skyworks Solutions Inc. * Author : Gyungoh Yoo + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . */ #ifndef _SKY81452_H #define _SKY81452_H +#include #include struct sky81452_platform_data { + struct sky81452_bl_platform_data *bl_pdata; struct regulator_init_data *regulator_init_data; }; diff --git a/include/linux/mfd/smsc.h b/include/linux/mfd/smsc.h new file mode 100644 index 0000000000..9747b29f35 --- /dev/null +++ b/include/linux/mfd/smsc.h @@ -0,0 +1,109 @@ +/* + * SMSC ECE1099 + * + * Copyright 2012 Texas Instruments Inc. + * + * Author: Sourav Poddar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_SMSC_H +#define __LINUX_MFD_SMSC_H + +#include + +#define SMSC_ID_ECE1099 1 +#define SMSC_NUM_CLIENTS 2 + +#define SMSC_BASE_ADDR 0x38 +#define OMAP_GPIO_SMSC_IRQ 151 + +#define SMSC_MAXGPIO 32 +#define SMSC_BANK(offs) ((offs) >> 3) +#define SMSC_BIT(offs) (1u << ((offs) & 0x7)) + +struct smsc { + struct device *dev; + struct i2c_client *i2c_clients[SMSC_NUM_CLIENTS]; + struct regmap *regmap; + int clk; + /* Stored chip id */ + int id; +}; + +struct smsc_gpio; +struct smsc_keypad; + +static inline int smsc_read(struct device *child, unsigned int reg, + unsigned int *dest) +{ + struct smsc *smsc = dev_get_drvdata(child->parent); + + return regmap_read(smsc->regmap, reg, dest); +} + +static inline int smsc_write(struct device *child, unsigned int reg, + unsigned int value) +{ + struct smsc *smsc = dev_get_drvdata(child->parent); + + return regmap_write(smsc->regmap, reg, value); +} + +/* Registers for SMSC */ +#define SMSC_RESET 0xF5 +#define SMSC_GRP_INT 0xF9 +#define SMSC_CLK_CTRL 0xFA +#define SMSC_WKUP_CTRL 0xFB +#define SMSC_DEV_ID 0xFC +#define SMSC_DEV_REV 0xFD +#define SMSC_VEN_ID_L 0xFE +#define SMSC_VEN_ID_H 0xFF + +/* CLK VALUE */ +#define SMSC_CLK_VALUE 0x13 + +/* Registers for function GPIO INPUT */ +#define SMSC_GPIO_DATA_IN_START 0x00 + +/* Registers for function GPIO OUPUT */ +#define SMSC_GPIO_DATA_OUT_START 0x05 + +/* Definitions for SMSC GPIO CONFIGURATION REGISTER*/ +#define SMSC_GPIO_INPUT_LOW 0x01 +#define SMSC_GPIO_INPUT_RISING 0x09 +#define SMSC_GPIO_INPUT_FALLING 0x11 +#define SMSC_GPIO_INPUT_BOTH_EDGE 0x19 +#define SMSC_GPIO_OUTPUT_PP 0x21 +#define SMSC_GPIO_OUTPUT_OP 0x31 + +#define GRP_INT_STAT 0xf9 +#define SMSC_GPI_INT 0x0f +#define SMSC_CFG_START 0x0A + +/* Registers for SMSC GPIO INTERRUPT STATUS REGISTER*/ +#define SMSC_GPIO_INT_STAT_START 0x32 + +/* Registers for SMSC GPIO INTERRUPT MASK REGISTER*/ +#define SMSC_GPIO_INT_MASK_START 0x37 + +/* Registers for SMSC function KEYPAD*/ +#define SMSC_KP_OUT 0x40 +#define SMSC_KP_IN 0x41 +#define SMSC_KP_INT_STAT 0x42 +#define SMSC_KP_INT_MASK 0x43 + +/* Definitions for keypad */ +#define SMSC_KP_KSO 0x70 +#define SMSC_KP_KSI 0x51 +#define SMSC_KSO_ALL_LOW 0x20 +#define SMSC_KP_SET_LOW_PWR 0x0B +#define SMSC_KP_SET_HIGH 0xFF +#define SMSC_KSO_EVAL 0x00 + +#endif /* __LINUX_MFD_SMSC_H */ diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h index 2001ca5c44..9a855ac11c 100644 --- a/include/linux/mfd/sta2x11-mfd.h +++ b/include/linux/mfd/sta2x11-mfd.h @@ -1,8 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2009-2011 Wind River Systems, Inc. * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini) * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * * The STMicroelectronics ConneXt (STA2X11) chip has several unrelated * functions in one PCI endpoint functions. This driver simply * registers the platform devices in this iomemregion and exports a few diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h index 87e29d561e..4a827af17e 100644 --- a/include/linux/mfd/stmpe.h +++ b/include/linux/mfd/stmpe.h @@ -1,7 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010 * + * License Terms: GNU General Public License, version 2 * Author: Rabin Vincent for ST-Ericsson */ @@ -10,20 +10,6 @@ #include -#define STMPE_SAMPLE_TIME(x) ((x & 0xf) << 4) -#define STMPE_MOD_12B(x) ((x & 0x1) << 3) -#define STMPE_REF_SEL(x) ((x & 0x1) << 1) -#define STMPE_ADC_FREQ(x) (x & 0x3) -#define STMPE_AVE_CTRL(x) ((x & 0x3) << 6) -#define STMPE_DET_DELAY(x) ((x & 0x7) << 3) -#define STMPE_SETTLING(x) (x & 0x7) -#define STMPE_FRACTION_Z(x) (x & 0x7) -#define STMPE_I_DRIVE(x) (x & 0x1) -#define STMPE_OP_MODE(x) ((x & 0x7) << 1) - -#define STMPE811_REG_ADC_CTRL1 0x20 -#define STMPE811_REG_ADC_CTRL2 0x21 - struct device; struct regulator; @@ -137,12 +123,6 @@ struct stmpe { u8 ier[2]; u8 oldier[2]; struct stmpe_platform_data *pdata; - - /* For devices that use an ADC */ - u8 sample_time; - u8 mod_12b; - u8 ref_sel; - u8 adc_freq; }; extern int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 data); @@ -156,7 +136,6 @@ extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins, enum stmpe_block block); extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks); extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks); -extern int stmpe811_adc_common_init(struct stmpe *stmpe); #define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) diff --git a/include/linux/mfd/stw481x.h b/include/linux/mfd/stw481x.h index 5312804666..833074b766 100644 --- a/include/linux/mfd/stw481x.h +++ b/include/linux/mfd/stw481x.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 ST-Ericsson SA * Written on behalf of Linaro for ST-Ericsson * * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 */ #ifndef MFD_STW481X_H #define MFD_STW481X_H diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index fecc2fa2a3..40a76b97b7 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * System Control Driver * @@ -6,6 +5,11 @@ * Copyright (C) 2012 Linaro Ltd. * * Author: Dong Aisheng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __LINUX_MFD_SYSCON_H__ @@ -17,26 +21,13 @@ struct device_node; #ifdef CONFIG_MFD_SYSCON -extern struct regmap *device_node_to_regmap(struct device_node *np); extern struct regmap *syscon_node_to_regmap(struct device_node *np); extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s); +extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s); extern struct regmap *syscon_regmap_lookup_by_phandle( struct device_node *np, const char *property); -extern struct regmap *syscon_regmap_lookup_by_phandle_args( - struct device_node *np, - const char *property, - int arg_count, - unsigned int *out_args); -extern struct regmap *syscon_regmap_lookup_by_phandle_optional( - struct device_node *np, - const char *property); #else -static inline struct regmap *device_node_to_regmap(struct device_node *np) -{ - return ERR_PTR(-ENOTSUPP); -} - static inline struct regmap *syscon_node_to_regmap(struct device_node *np) { return ERR_PTR(-ENOTSUPP); @@ -47,29 +38,17 @@ static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s) return ERR_PTR(-ENOTSUPP); } +static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s) +{ + return ERR_PTR(-ENOTSUPP); +} + static inline struct regmap *syscon_regmap_lookup_by_phandle( struct device_node *np, const char *property) { return ERR_PTR(-ENOTSUPP); } - -static inline struct regmap *syscon_regmap_lookup_by_phandle_args( - struct device_node *np, - const char *property, - int arg_count, - unsigned int *out_args) -{ - return ERR_PTR(-ENOTSUPP); -} - -static inline struct regmap *syscon_regmap_lookup_by_phandle_optional( - struct device_node *np, - const char *property) -{ - return NULL; -} - #endif #endif /* __LINUX_MFD_SYSCON_H__ */ diff --git a/include/linux/mfd/syscon/atmel-matrix.h b/include/linux/mfd/syscon/atmel-matrix.h index 20c2566521..8293c3e2a8 100644 --- a/include/linux/mfd/syscon/atmel-matrix.h +++ b/include/linux/mfd/syscon/atmel-matrix.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2014 Atmel Corporation. * * Memory Controllers (MATRIX, EBI) - System peripherals registers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef _LINUX_MFD_SYSCON_ATMEL_MATRIX_H @@ -106,6 +110,7 @@ #define AT91_MATRIX_DDR_IOSR BIT(18) #define AT91_MATRIX_NFD0_SELECT BIT(24) #define AT91_MATRIX_DDR_MP_EN BIT(25) +#define AT91_MATRIX_EBI_NUM_CS 8 #define AT91_MATRIX_USBPUCR_PUON BIT(30) diff --git a/include/linux/mfd/syscon/atmel-mc.h b/include/linux/mfd/syscon/atmel-mc.h index 99c56205c4..afd9b8f1e3 100644 --- a/include/linux/mfd/syscon/atmel-mc.h +++ b/include/linux/mfd/syscon/atmel-mc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2005 Ivan Kokshaysky * Copyright (C) SAN People @@ -6,6 +5,11 @@ * Memory Controllers (MC, EBI, SMC, SDRAMC, BFC) - System peripherals * registers. * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef _LINUX_MFD_SYSCON_ATMEL_MC_H_ diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h index e9e24f4c45..be6ebe64ee 100644 --- a/include/linux/mfd/syscon/atmel-smc.h +++ b/include/linux/mfd/syscon/atmel-smc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Atmel SMC (Static Memory Controller) register offsets and bit definitions. * @@ -6,114 +5,169 @@ * Copyright (C) 2014 Free Electrons * * Author: Boris Brezillon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_ #define _LINUX_MFD_SYSCON_ATMEL_SMC_H_ #include -#include #include -#define ATMEL_SMC_SETUP(cs) (((cs) * 0x10)) -#define ATMEL_HSMC_SETUP(layout, cs) \ - ((layout)->timing_regs_offset + ((cs) * 0x14)) -#define ATMEL_SMC_PULSE(cs) (((cs) * 0x10) + 0x4) -#define ATMEL_HSMC_PULSE(layout, cs) \ - ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x4) -#define ATMEL_SMC_CYCLE(cs) (((cs) * 0x10) + 0x8) -#define ATMEL_HSMC_CYCLE(layout, cs) \ - ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x8) -#define ATMEL_SMC_NWE_SHIFT 0 -#define ATMEL_SMC_NCS_WR_SHIFT 8 -#define ATMEL_SMC_NRD_SHIFT 16 -#define ATMEL_SMC_NCS_RD_SHIFT 24 +#define AT91SAM9_SMC_GENERIC 0x00 +#define AT91SAM9_SMC_GENERIC_BLK_SZ 0x10 -#define ATMEL_SMC_MODE(cs) (((cs) * 0x10) + 0xc) -#define ATMEL_HSMC_MODE(layout, cs) \ - ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x10) -#define ATMEL_SMC_MODE_READMODE_MASK BIT(0) -#define ATMEL_SMC_MODE_READMODE_NCS (0 << 0) -#define ATMEL_SMC_MODE_READMODE_NRD (1 << 0) -#define ATMEL_SMC_MODE_WRITEMODE_MASK BIT(1) -#define ATMEL_SMC_MODE_WRITEMODE_NCS (0 << 1) -#define ATMEL_SMC_MODE_WRITEMODE_NWE (1 << 1) -#define ATMEL_SMC_MODE_EXNWMODE_MASK GENMASK(5, 4) -#define ATMEL_SMC_MODE_EXNWMODE_DISABLE (0 << 4) -#define ATMEL_SMC_MODE_EXNWMODE_FROZEN (2 << 4) -#define ATMEL_SMC_MODE_EXNWMODE_READY (3 << 4) -#define ATMEL_SMC_MODE_BAT_MASK BIT(8) -#define ATMEL_SMC_MODE_BAT_SELECT (0 << 8) -#define ATMEL_SMC_MODE_BAT_WRITE (1 << 8) -#define ATMEL_SMC_MODE_DBW_MASK GENMASK(13, 12) -#define ATMEL_SMC_MODE_DBW_8 (0 << 12) -#define ATMEL_SMC_MODE_DBW_16 (1 << 12) -#define ATMEL_SMC_MODE_DBW_32 (2 << 12) -#define ATMEL_SMC_MODE_TDF_MASK GENMASK(19, 16) -#define ATMEL_SMC_MODE_TDF(x) (((x) - 1) << 16) -#define ATMEL_SMC_MODE_TDF_MAX 16 -#define ATMEL_SMC_MODE_TDF_MIN 1 -#define ATMEL_SMC_MODE_TDFMODE_OPTIMIZED BIT(20) -#define ATMEL_SMC_MODE_PMEN BIT(24) -#define ATMEL_SMC_MODE_PS_MASK GENMASK(29, 28) -#define ATMEL_SMC_MODE_PS_4 (0 << 28) -#define ATMEL_SMC_MODE_PS_8 (1 << 28) -#define ATMEL_SMC_MODE_PS_16 (2 << 28) -#define ATMEL_SMC_MODE_PS_32 (3 << 28) +#define SAMA5_SMC_GENERIC 0x600 +#define SAMA5_SMC_GENERIC_BLK_SZ 0x14 -#define ATMEL_HSMC_TIMINGS(layout, cs) \ - ((layout)->timing_regs_offset + ((cs) * 0x14) + 0xc) -#define ATMEL_HSMC_TIMINGS_OCMS BIT(12) -#define ATMEL_HSMC_TIMINGS_RBNSEL(x) ((x) << 28) -#define ATMEL_HSMC_TIMINGS_NFSEL BIT(31) -#define ATMEL_HSMC_TIMINGS_TCLR_SHIFT 0 -#define ATMEL_HSMC_TIMINGS_TADL_SHIFT 4 -#define ATMEL_HSMC_TIMINGS_TAR_SHIFT 8 -#define ATMEL_HSMC_TIMINGS_TRR_SHIFT 16 -#define ATMEL_HSMC_TIMINGS_TWB_SHIFT 24 +#define AT91SAM9_SMC_SETUP(o) ((o) + 0x00) +#define AT91SAM9_SMC_NWESETUP(x) (x) +#define AT91SAM9_SMC_NCS_WRSETUP(x) ((x) << 8) +#define AT91SAM9_SMC_NRDSETUP(x) ((x) << 16) +#define AT91SAM9_SMC_NCS_NRDSETUP(x) ((x) << 24) -struct atmel_hsmc_reg_layout { - unsigned int timing_regs_offset; -}; +#define AT91SAM9_SMC_PULSE(o) ((o) + 0x04) +#define AT91SAM9_SMC_NWEPULSE(x) (x) +#define AT91SAM9_SMC_NCS_WRPULSE(x) ((x) << 8) +#define AT91SAM9_SMC_NRDPULSE(x) ((x) << 16) +#define AT91SAM9_SMC_NCS_NRDPULSE(x) ((x) << 24) -/** - * struct atmel_smc_cs_conf - SMC CS config as described in the datasheet. - * @setup: NCS/NWE/NRD setup timings (not applicable to at91rm9200) - * @pulse: NCS/NWE/NRD pulse timings (not applicable to at91rm9200) - * @cycle: NWE/NRD cycle timings (not applicable to at91rm9200) - * @timings: advanced NAND related timings (only applicable to HSMC) - * @mode: all kind of config parameters (see the fields definition above). - * The mode fields are different on at91rm9200 +#define AT91SAM9_SMC_CYCLE(o) ((o) + 0x08) +#define AT91SAM9_SMC_NWECYCLE(x) (x) +#define AT91SAM9_SMC_NRDCYCLE(x) ((x) << 16) + +#define AT91SAM9_SMC_MODE(o) ((o) + 0x0c) +#define SAMA5_SMC_MODE(o) ((o) + 0x10) +#define AT91_SMC_READMODE BIT(0) +#define AT91_SMC_READMODE_NCS (0 << 0) +#define AT91_SMC_READMODE_NRD (1 << 0) +#define AT91_SMC_WRITEMODE BIT(1) +#define AT91_SMC_WRITEMODE_NCS (0 << 1) +#define AT91_SMC_WRITEMODE_NWE (1 << 1) +#define AT91_SMC_EXNWMODE GENMASK(5, 4) +#define AT91_SMC_EXNWMODE_DISABLE (0 << 4) +#define AT91_SMC_EXNWMODE_FROZEN (2 << 4) +#define AT91_SMC_EXNWMODE_READY (3 << 4) +#define AT91_SMC_BAT BIT(8) +#define AT91_SMC_BAT_SELECT (0 << 8) +#define AT91_SMC_BAT_WRITE (1 << 8) +#define AT91_SMC_DBW GENMASK(13, 12) +#define AT91_SMC_DBW_8 (0 << 12) +#define AT91_SMC_DBW_16 (1 << 12) +#define AT91_SMC_DBW_32 (2 << 12) +#define AT91_SMC_TDF GENMASK(19, 16) +#define AT91_SMC_TDF_(x) ((((x) - 1) << 16) & AT91_SMC_TDF) +#define AT91_SMC_TDF_MAX 16 +#define AT91_SMC_TDFMODE_OPTIMIZED BIT(20) +#define AT91_SMC_PMEN BIT(24) +#define AT91_SMC_PS GENMASK(29, 28) +#define AT91_SMC_PS_4 (0 << 28) +#define AT91_SMC_PS_8 (1 << 28) +#define AT91_SMC_PS_16 (2 << 28) +#define AT91_SMC_PS_32 (3 << 28) + + +/* + * This function converts a setup timing expressed in nanoseconds into an + * encoded value that can be written in the SMC_SETUP register. + * + * The following formula is described in atmel datasheets (section + * "SMC Setup Register"): + * + * setup length = (128* SETUP[5] + SETUP[4:0]) + * + * where setup length is the timing expressed in cycles. */ -struct atmel_smc_cs_conf { - u32 setup; - u32 pulse; - u32 cycle; - u32 timings; - u32 mode; -}; +static inline u32 at91sam9_smc_setup_ns_to_cycles(unsigned int clk_rate, + u32 timing_ns) +{ + u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate); + u32 coded_cycles = 0; + u32 cycles; -void atmel_smc_cs_conf_init(struct atmel_smc_cs_conf *conf); -int atmel_smc_cs_conf_set_timing(struct atmel_smc_cs_conf *conf, - unsigned int shift, - unsigned int ncycles); -int atmel_smc_cs_conf_set_setup(struct atmel_smc_cs_conf *conf, - unsigned int shift, unsigned int ncycles); -int atmel_smc_cs_conf_set_pulse(struct atmel_smc_cs_conf *conf, - unsigned int shift, unsigned int ncycles); -int atmel_smc_cs_conf_set_cycle(struct atmel_smc_cs_conf *conf, - unsigned int shift, unsigned int ncycles); -void atmel_smc_cs_conf_apply(struct regmap *regmap, int cs, - const struct atmel_smc_cs_conf *conf); -void atmel_hsmc_cs_conf_apply(struct regmap *regmap, - const struct atmel_hsmc_reg_layout *reglayout, - int cs, const struct atmel_smc_cs_conf *conf); -void atmel_smc_cs_conf_get(struct regmap *regmap, int cs, - struct atmel_smc_cs_conf *conf); -void atmel_hsmc_cs_conf_get(struct regmap *regmap, - const struct atmel_hsmc_reg_layout *reglayout, - int cs, struct atmel_smc_cs_conf *conf); -const struct atmel_hsmc_reg_layout * -atmel_hsmc_get_reg_layout(struct device_node *np); + cycles = DIV_ROUND_UP(timing_ns, clk_period); + if (cycles / 32) { + coded_cycles |= 1 << 5; + if (cycles < 128) + cycles = 0; + } + + coded_cycles |= cycles % 32; + + return coded_cycles; +} + +/* + * This function converts a pulse timing expressed in nanoseconds into an + * encoded value that can be written in the SMC_PULSE register. + * + * The following formula is described in atmel datasheets (section + * "SMC Pulse Register"): + * + * pulse length = (256* PULSE[6] + PULSE[5:0]) + * + * where pulse length is the timing expressed in cycles. + */ +static inline u32 at91sam9_smc_pulse_ns_to_cycles(unsigned int clk_rate, + u32 timing_ns) +{ + u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate); + u32 coded_cycles = 0; + u32 cycles; + + cycles = DIV_ROUND_UP(timing_ns, clk_period); + if (cycles / 64) { + coded_cycles |= 1 << 6; + if (cycles < 256) + cycles = 0; + } + + coded_cycles |= cycles % 64; + + return coded_cycles; +} + +/* + * This function converts a cycle timing expressed in nanoseconds into an + * encoded value that can be written in the SMC_CYCLE register. + * + * The following formula is described in atmel datasheets (section + * "SMC Cycle Register"): + * + * cycle length = (CYCLE[8:7]*256 + CYCLE[6:0]) + * + * where cycle length is the timing expressed in cycles. + */ +static inline u32 at91sam9_smc_cycle_ns_to_cycles(unsigned int clk_rate, + u32 timing_ns) +{ + u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate); + u32 coded_cycles = 0; + u32 cycles; + + cycles = DIV_ROUND_UP(timing_ns, clk_period); + if (cycles / 128) { + coded_cycles = cycles / 256; + cycles %= 256; + if (cycles >= 128) { + coded_cycles++; + cycles = 0; + } + + if (coded_cycles > 0x3) { + coded_cycles = 0x3; + cycles = 0x7f; + } + + coded_cycles <<= 7; + } + + coded_cycles |= cycles % 128; + + return coded_cycles; +} #endif /* _LINUX_MFD_SYSCON_ATMEL_SMC_H_ */ diff --git a/include/linux/mfd/syscon/atmel-st.h b/include/linux/mfd/syscon/atmel-st.h index 5b6013d0c4..8acf1ec1fa 100644 --- a/include/linux/mfd/syscon/atmel-st.h +++ b/include/linux/mfd/syscon/atmel-st.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2005 Ivan Kokshaysky * Copyright (C) SAN People * * System Timer (ST) - System peripherals registers. * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef _LINUX_MFD_SYSCON_ATMEL_ST_H diff --git a/include/linux/mfd/syscon/clps711x.h b/include/linux/mfd/syscon/clps711x.h index 4c12850dec..26355abae5 100644 --- a/include/linux/mfd/syscon/clps711x.h +++ b/include/linux/mfd/syscon/clps711x.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * CLPS711X system register bits definitions * * Copyright (C) 2013 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef _LINUX_MFD_SYSCON_CLPS711X_H_ diff --git a/include/linux/mfd/syscon/exynos4-pmu.h b/include/linux/mfd/syscon/exynos4-pmu.h new file mode 100644 index 0000000000..278b1b1549 --- /dev/null +++ b/include/linux/mfd/syscon/exynos4-pmu.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2015 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_ +#define _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_ + +/* Exynos4 PMU register definitions */ + +/* MIPI_PHYn_CONTROL register offset: n = 0..1 */ +#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x710 + (n) * 4) +#define EXYNOS4_MIPI_PHY_ENABLE (1 << 0) +#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1) +#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2) +#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1) + +#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_ */ diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h new file mode 100644 index 0000000000..c28ff21ca4 --- /dev/null +++ b/include/linux/mfd/syscon/exynos5-pmu.h @@ -0,0 +1,52 @@ +/* + * Exynos5 SoC series Power Management Unit (PMU) register offsets + * and bit definitions. + * + * Copyright (C) 2014 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ +#define _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ + +/* Exynos5 PMU register definitions */ +#define EXYNOS5_HDMI_PHY_CONTROL (0x700) +#define EXYNOS5_USBDRD_PHY_CONTROL (0x704) + +/* Exynos5250 specific register definitions */ +#define EXYNOS5_USBHOST_PHY_CONTROL (0x708) +#define EXYNOS5_EFNAND_PHY_CONTROL (0x70c) +#define EXYNOS5_MIPI_PHY0_CONTROL (0x710) +#define EXYNOS5_MIPI_PHY1_CONTROL (0x714) +#define EXYNOS5_ADC_PHY_CONTROL (0x718) +#define EXYNOS5_MTCADC_PHY_CONTROL (0x71c) +#define EXYNOS5_DPTX_PHY_CONTROL (0x720) +#define EXYNOS5_SATA_PHY_CONTROL (0x724) + +/* Exynos5420 specific register definitions */ +#define EXYNOS5420_USBDRD1_PHY_CONTROL (0x708) +#define EXYNOS5420_USBHOST_PHY_CONTROL (0x70c) +#define EXYNOS5420_MIPI_PHY0_CONTROL (0x714) +#define EXYNOS5420_MIPI_PHY1_CONTROL (0x718) +#define EXYNOS5420_MIPI_PHY2_CONTROL (0x71c) +#define EXYNOS5420_ADC_PHY_CONTROL (0x720) +#define EXYNOS5420_MTCADC_PHY_CONTROL (0x724) +#define EXYNOS5420_DPTX_PHY_CONTROL (0x728) + +/* Exynos5433 specific register definitions */ +#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x728) +#define EXYNOS5433_MIPI_PHY0_CONTROL (0x710) +#define EXYNOS5433_MIPI_PHY1_CONTROL (0x714) +#define EXYNOS5433_MIPI_PHY2_CONTROL (0x718) + +#define EXYNOS5_PHY_ENABLE BIT(0) +#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1) +#define EXYNOS5_MIPI_PHY_M_RESETN BIT(2) + +#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028) +#define EXYNOS5433_PAD_INITIATE_WAKEUP_FROM_LOWPWR BIT(28) + +#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ */ diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index d4b5e527a7..c8e0164c54 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_IMX6Q_IOMUXC_GPR_H @@ -240,8 +243,6 @@ #define IMX6Q_GPR4_IPU_RD_CACHE_CTL BIT(0) #define IMX6Q_GPR5_L2_CLK_STOP BIT(8) -#define IMX6Q_GPR5_SATA_SW_PD BIT(10) -#define IMX6Q_GPR5_SATA_SW_RST BIT(11) #define IMX6Q_GPR6_IPU1_ID00_WR_QOS_MASK (0xf << 0) #define IMX6Q_GPR6_IPU1_ID01_WR_QOS_MASK (0xf << 4) @@ -407,15 +408,6 @@ #define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17) #define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13) -#define IMX6SX_GPR2_MQS_OVERSAMPLE_MASK (0x1 << 26) -#define IMX6SX_GPR2_MQS_OVERSAMPLE_SHIFT (26) -#define IMX6SX_GPR2_MQS_EN_MASK (0x1 << 25) -#define IMX6SX_GPR2_MQS_EN_SHIFT (25) -#define IMX6SX_GPR2_MQS_SW_RST_MASK (0x1 << 24) -#define IMX6SX_GPR2_MQS_SW_RST_SHIFT (24) -#define IMX6SX_GPR2_MQS_CLK_DIV_MASK (0xFF << 16) -#define IMX6SX_GPR2_MQS_CLK_DIV_SHIFT (16) - #define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3) #define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4) @@ -446,7 +438,6 @@ #define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1) #define IMX6SX_GPR12_PCIE_TEST_POWERDOWN BIT(30) -#define IMX6SX_GPR12_PCIE_PM_TURN_OFF BIT(16) #define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0) #define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0) @@ -464,7 +455,4 @@ #define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \ IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR) -/* For imx6sll iomux gpr register field define */ -#define IMX6SLL_GPR5_AFCG_X_BYPASS_MASK (0x1f << 11) - #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h index 3d46907bab..4585d6105d 100644 --- a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_IMX7_IOMUXC_GPR_H @@ -41,8 +44,4 @@ #define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4) -#define IMX7D_GPR12_PCIE_PHY_REFCLK_SEL BIT(5) - -#define IMX7D_GPR22_PCIE_PHY_PLL_LOCKED BIT(31) - #endif /* __LINUX_IMX7_IOMUXC_GPR_H */ diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h index 69632c1b07..b4629818ae 100644 --- a/include/linux/mfd/t7l66xb.h +++ b/include/linux/mfd/t7l66xb.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * This file contains the definitions for the T7L66XB * * (C) Copyright 2005 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef MFD_T7L66XB_H #define MFD_T7L66XB_H diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h index b84955410e..468c31a27f 100644 --- a/include/linux/mfd/tc3589x.h +++ b/include/linux/mfd/tc3589x.h @@ -1,6 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License, version 2 */ #ifndef __LINUX_MFD_TC3589x_H @@ -19,9 +20,6 @@ enum tx3589x_block { #define TC3589x_RSTCTRL_KBDRST (1 << 1) #define TC3589x_RSTCTRL_GPIRST (1 << 0) -#define TC3589x_DKBDMSK_ELINT (1 << 1) -#define TC3589x_DKBDMSK_EINT (1 << 0) - /* Keyboard Configuration Registers */ #define TC3589x_KBDSETTLE_REG 0x01 #define TC3589x_KBDBOUNCE 0x02 @@ -104,9 +102,6 @@ enum tx3589x_block { #define TC3589x_GPIOODM2 0xE4 #define TC3589x_GPIOODE2 0xE5 -#define TC3589x_DIRECT0 0xEC -#define TC3589x_DKBDMSK 0xF3 - #define TC3589x_INT_GPIIRQ 0 #define TC3589x_INT_TI0IRQ 1 #define TC3589x_INT_TI1IRQ 2 diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h index fcc8e74f0e..626e448205 100644 --- a/include/linux/mfd/tc6393xb.h +++ b/include/linux/mfd/tc6393xb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Toshiba TC6393XB SoC support * @@ -9,6 +8,10 @@ * * Based on code written by Sharp/Lineo for 2.4 kernels * Based on locomo.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef MFD_TC6393XB_H diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index ffc091b776..7f55b8b410 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -4,7 +4,7 @@ /* * TI Touch Screen / ADC MFD driver * - * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -23,8 +23,6 @@ #define REG_IRQENABLE 0x02C #define REG_IRQCLR 0x030 #define REG_IRQWAKEUP 0x034 -#define REG_DMAENABLE_SET 0x038 -#define REG_DMAENABLE_CLEAR 0x03c #define REG_CTRL 0x040 #define REG_ADCFSM 0x044 #define REG_CLKDIV 0x04C @@ -38,7 +36,6 @@ #define REG_FIFO0THR 0xE8 #define REG_FIFO1CNT 0xF0 #define REG_FIFO1THR 0xF4 -#define REG_DMA1REQ 0xF8 #define REG_FIFO0 0x100 #define REG_FIFO1 0x200 @@ -78,8 +75,6 @@ #define STEPCONFIG_YNN BIT(8) #define STEPCONFIG_XNP BIT(9) #define STEPCONFIG_YPN BIT(10) -#define STEPCONFIG_RFP(val) ((val) << 12) -#define STEPCONFIG_RFP_VREFP (0x3 << 12) #define STEPCONFIG_INM_MASK (0xF << 15) #define STEPCONFIG_INM(val) ((val) << 15) #define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) @@ -88,8 +83,6 @@ #define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) #define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) #define STEPCONFIG_FIFO1 BIT(26) -#define STEPCONFIG_RFM(val) ((val) << 23) -#define STEPCONFIG_RFM_VREFN (0x3 << 23) /* Delay register */ #define STEPDELAY_OPEN_MASK (0x3FFFF << 0) @@ -133,10 +126,6 @@ #define FIFOREAD_DATA_MASK (0xfff << 0) #define FIFOREAD_CHNLID_MASK (0xf << 16) -/* DMA ENABLE/CLEAR Register */ -#define DMA_FIFO0 BIT(0) -#define DMA_FIFO1 BIT(1) - /* Sequencer Status */ #define SEQ_STATUS BIT(5) #define CHARGE_STEP 0x11 @@ -166,7 +155,6 @@ struct ti_tscadc_dev { struct device *dev; struct regmap *regmap; void __iomem *tscadc_base; - phys_addr_t tscadc_phys_base; int irq; int used_cells; /* 1-2 */ int tsc_wires; diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 27264fe4b3..7a26286db8 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef MFD_TMIO_H #define MFD_TMIO_H @@ -14,17 +13,37 @@ #define tmio_ioread16(addr) readw(addr) #define tmio_ioread16_rep(r, b, l) readsw(r, b, l) #define tmio_ioread32(addr) \ - (((u32)readw((addr))) | (((u32)readw((addr) + 2)) << 16)) + (((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16)) #define tmio_iowrite8(val, addr) writeb((val), (addr)) #define tmio_iowrite16(val, addr) writew((val), (addr)) #define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) #define tmio_iowrite32(val, addr) \ do { \ - writew((val), (addr)); \ - writew((val) >> 16, (addr) + 2); \ + writew((val), (addr)); \ + writew((val) >> 16, (addr) + 2); \ } while (0) +#define CNF_CMD 0x04 +#define CNF_CTL_BASE 0x10 +#define CNF_INT_PIN 0x3d +#define CNF_STOP_CLK_CTL 0x40 +#define CNF_GCLK_CTL 0x41 +#define CNF_SD_CLK_MODE 0x42 +#define CNF_PIN_STATUS 0x44 +#define CNF_PWR_CTL_1 0x48 +#define CNF_PWR_CTL_2 0x49 +#define CNF_PWR_CTL_3 0x4a +#define CNF_CARD_DETECT_MODE 0x4c +#define CNF_SD_SLOT 0x50 +#define CNF_EXT_GCLK_CTL_1 0xf0 +#define CNF_EXT_GCLK_CTL_2 0xf1 +#define CNF_EXT_GCLK_CTL_3 0xf9 +#define CNF_SD_LED_EN_1 0xfa +#define CNF_SD_LED_EN_2 0xfe + +#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/ + #define sd_config_write8(base, shift, reg, val) \ tmio_iowrite8((val), (base) + ((reg) << (shift))) #define sd_config_write16(base, shift, reg, val) \ @@ -36,53 +55,54 @@ } while (0) /* tmio MMC platform flags */ +#define TMIO_MMC_WRPROTECT_DISABLE (1 << 0) /* * Some controllers can support a 2-byte block size when the bus width * is configured in 4-bit mode. */ -#define TMIO_MMC_BLKSZ_2BYTES BIT(1) +#define TMIO_MMC_BLKSZ_2BYTES (1 << 1) /* * Some controllers can support SDIO IRQ signalling. */ -#define TMIO_MMC_SDIO_IRQ BIT(2) +#define TMIO_MMC_SDIO_IRQ (1 << 2) -/* Some features are only available or tested on R-Car Gen2 or later */ -#define TMIO_MMC_MIN_RCAR2 BIT(3) +/* Some features are only available or tested on RCar Gen2 or later */ +#define TMIO_MMC_MIN_RCAR2 (1 << 3) /* * Some controllers require waiting for the SD bus to become * idle before writing to some registers. */ -#define TMIO_MMC_HAS_IDLE_WAIT BIT(4) +#define TMIO_MMC_HAS_IDLE_WAIT (1 << 4) +/* + * A GPIO is used for card hotplug detection. We need an extra flag for this, + * because 0 is a valid GPIO number too, and requiring users to specify + * cd_gpio < 0 to disable GPIO hotplug would break backwards compatibility. + */ +#define TMIO_MMC_USE_GPIO_CD (1 << 5) /* - * Use the busy timeout feature. Probably all TMIO versions support it. Yet, - * we don't have documentation for old variants, so we enable only known good - * variants with this flag. Can be removed once all variants are known good. + * Some controllers doesn't have over 0x100 register. + * it is used to checking accessibility of + * CTL_SD_CARD_CLK_CTL / CTL_CLK_AND_WAIT_CTL */ -#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5) +#define TMIO_MMC_HAVE_HIGH_REG (1 << 6) /* * Some controllers have CMD12 automatically * issue/non-issue register */ -#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7) - -/* Controller has some SDIO status bits which must be 1 */ -#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8) +#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7) /* - * Some controllers have a 32-bit wide data port register + * Some controllers needs to set 1 on SDIO status reserved bits */ -#define TMIO_MMC_32BIT_DATA_PORT BIT(9) +#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8) /* * Some controllers allows to set SDx actual clock */ -#define TMIO_MMC_CLK_ACTUAL BIT(10) - -/* Some controllers have a CBSY bit */ -#define TMIO_MMC_HAVE_CBSY BIT(11) +#define TMIO_MMC_CLK_ACTUAL (1 << 10) int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); @@ -102,10 +122,9 @@ struct tmio_mmc_data { unsigned long capabilities2; unsigned long flags; u32 ocr_mask; /* available voltages */ + unsigned int cd_gpio; int alignment_shift; dma_addr_t dma_rx_offset; - unsigned int max_blk_count; - unsigned short max_segs; void (*set_pwr)(struct platform_device *host, int state); void (*set_clk_div)(struct platform_device *host, int state); }; @@ -117,7 +136,6 @@ struct tmio_nand_data { struct nand_bbt_descr *badblock_pattern; struct mtd_partition *partition; unsigned int num_partitions; - const char *const *part_parsers; }; #define FBIO_TMIO_ACC_WRITE 0x7C639300 @@ -125,9 +143,9 @@ struct tmio_nand_data { struct tmio_fb_data { int (*lcd_set_power)(struct platform_device *fb_dev, - bool on); + bool on); int (*lcd_mode)(struct platform_device *fb_dev, - const struct fb_videomode *mode); + const struct fb_videomode *mode); int num_modes; struct fb_videomode *modes; @@ -136,4 +154,5 @@ struct tmio_fb_data { int width; }; + #endif diff --git a/include/linux/mfd/tps6105x.h b/include/linux/mfd/tps6105x.h index b1313411ef..8bc5118080 100644 --- a/include/linux/mfd/tps6105x.h +++ b/include/linux/mfd/tps6105x.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 ST-Ericsson SA * Written on behalf of Linaro for ST-Ericsson * * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 */ #ifndef MFD_TPS6105X_H #define MFD_TPS6105X_H diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h index e0a417e537..a228ae4c88 100644 --- a/include/linux/mfd/tps65086.h +++ b/include/linux/mfd/tps65086.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h index 44ebcc4d8f..67d144b3b8 100644 --- a/include/linux/mfd/tps65090.h +++ b/include/linux/mfd/tps65090.h @@ -1,8 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Core driver interface for TI TPS65090 PMIC family * * Copyright (C) 2012 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * */ #ifndef __LINUX_MFD_TPS65090_H @@ -69,8 +83,6 @@ enum { #define TPS65090_MAX_REG TPS65090_REG_AD_OUT2 #define TPS65090_NUM_REGS (TPS65090_MAX_REG + 1) -struct gpio_desc; - struct tps65090 { struct device *dev; struct regmap *rmap; @@ -83,8 +95,8 @@ struct tps65090 { * @reg_init_data: The regulator init data. * @enable_ext_control: Enable extrenal control or not. Only available for * DCDC1, DCDC2 and DCDC3. - * @gpiod: Gpio descriptor if external control is enabled and controlled through - * gpio + * @gpio: Gpio number if external control is enabled and controlled through + * gpio. * @overcurrent_wait_valid: True if the overcurrent_wait should be applied. * @overcurrent_wait: Value to set as the overcurrent wait time. This is the * actual bitfield value, not a time in ms (valid value are 0 - 3). @@ -92,7 +104,7 @@ struct tps65090 { struct tps65090_regulator_plat_data { struct regulator_init_data *reg_init_data; bool enable_ext_control; - struct gpio_desc *gpiod; + int gpio; bool overcurrent_wait_valid; int overcurrent_wait; }; diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h index db7091824e..4ccda89696 100644 --- a/include/linux/mfd/tps65217.h +++ b/include/linux/mfd/tps65217.h @@ -3,7 +3,7 @@ * * Functions to access TPS65217 power management chip. * - * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -73,15 +73,13 @@ #define TPS65217_PPATH_AC_CURRENT_MASK 0x0C #define TPS65217_PPATH_USB_CURRENT_MASK 0x03 +#define TPS65217_INT_RESERVEDM BIT(7) #define TPS65217_INT_PBM BIT(6) #define TPS65217_INT_ACM BIT(5) #define TPS65217_INT_USBM BIT(4) #define TPS65217_INT_PBI BIT(2) #define TPS65217_INT_ACI BIT(1) #define TPS65217_INT_USBI BIT(0) -#define TPS65217_INT_SHIFT 4 -#define TPS65217_INT_MASK (TPS65217_INT_PBM | TPS65217_INT_ACM | \ - TPS65217_INT_USBM) #define TPS65217_CHGCONFIG0_TREG BIT(7) #define TPS65217_CHGCONFIG0_DPPM BIT(6) @@ -236,11 +234,12 @@ struct tps65217_bl_pdata { int dft_brightness; }; -/* Interrupt numbers */ -#define TPS65217_IRQ_USB 0 -#define TPS65217_IRQ_AC 1 -#define TPS65217_IRQ_PB 2 -#define TPS65217_NUM_IRQ 3 +enum tps65217_irq_type { + TPS65217_IRQ_PB, + TPS65217_IRQ_AC, + TPS65217_IRQ_USB, + TPS65217_NUM_IRQ +}; /** * struct tps65217_board - packages regulator init data @@ -263,6 +262,7 @@ struct tps65217_board { struct tps65217 { struct device *dev; struct tps65217_board *pdata; + unsigned long id; struct regulator_desc desc[TPS65217_NUM_REGULATOR]; struct regmap *regmap; u8 *strobes; @@ -277,6 +277,11 @@ static inline struct tps65217 *dev_to_tps65217(struct device *dev) return dev_get_drvdata(dev); } +static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217) +{ + return tps65217->id; +} + int tps65217_reg_read(struct tps65217 *tps, unsigned int reg, unsigned int *val); int tps65217_reg_write(struct tps65217 *tps, unsigned int reg, diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index f4ca367e34..d1db9527fa 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -3,7 +3,7 @@ * * Functions to access TPS65219 power management chip. * - * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 as @@ -137,10 +137,6 @@ #define TPS65218_CONFIG1_PGDLY_MASK 0x18 #define TPS65218_CONFIG1_STRICT BIT(2) #define TPS65218_CONFIG1_UVLO_MASK 0x3 -#define TPS65218_CONFIG1_UVLO_2750000 0x0 -#define TPS65218_CONFIG1_UVLO_2950000 0x1 -#define TPS65218_CONFIG1_UVLO_3250000 0x2 -#define TPS65218_CONFIG1_UVLO_3350000 0x3 #define TPS65218_CONFIG2_DC12_RST BIT(7) #define TPS65218_CONFIG2_UVLOHYS BIT(6) @@ -209,11 +205,10 @@ enum tps65218_regulator_id { TPS65218_DCDC_4, TPS65218_DCDC_5, TPS65218_DCDC_6, + /* LS's */ + TPS65218_LS_3, /* LDOs */ TPS65218_LDO_1, - /* LS's */ - TPS65218_LS_2, - TPS65218_LS_3, }; #define TPS65218_MAX_REG_ID TPS65218_LDO_1 @@ -223,7 +218,7 @@ enum tps65218_regulator_id { /* Number of LDO voltage regulators available */ #define TPS65218_NUM_LDO 1 /* Number of total LS current regulators available */ -#define TPS65218_NUM_LS 2 +#define TPS65218_NUM_LS 1 /* Number of total regulators available */ #define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO \ + TPS65218_NUM_LS) @@ -250,6 +245,24 @@ enum tps65218_irqs { TPS65218_INVALID4_IRQ, }; +/** + * struct tps_info - packages regulator constraints + * @id: Id of the regulator + * @name: Voltage regulator name + * @min_uV: minimum micro volts + * @max_uV: minimum micro volts + * @strobe: sequencing strobe value for the regulator + * + * This data is used to check the regualtor voltage limits while setting. + */ +struct tps_info { + int id; + const char *name; + int min_uV; + int max_uV; + int strobe; +}; + /** * struct tps65218 - tps65218 sub-driver chip access routines * @@ -267,10 +280,12 @@ struct tps65218 { u32 irq_mask; struct regmap_irq_chip_data *irq_data; struct regulator_desc desc[TPS65218_NUM_REGULATOR]; + struct tps_info *info[TPS65218_NUM_REGULATOR]; struct regmap *regmap; - u8 *strobes; }; +int tps65218_reg_read(struct tps65218 *tps, unsigned int reg, + unsigned int *val); int tps65218_reg_write(struct tps65218 *tps, unsigned int reg, unsigned int val, unsigned int level); int tps65218_set_bits(struct tps65218 *tps, unsigned int reg, diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h index b19c2801a3..96187ed9f9 100644 --- a/include/linux/mfd/tps6586x.h +++ b/include/linux/mfd/tps6586x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MFD_TPS6586X_H #define __LINUX_MFD_TPS6586X_H @@ -18,7 +17,6 @@ #define TPS658621A 0x15 #define TPS658621CD 0x2c #define TPS658623 0x1b -#define TPS658624 0x0a #define TPS658640 0x01 #define TPS658640v2 0x02 #define TPS658643 0x03 diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h index 701925db75..6483a6fdce 100644 --- a/include/linux/mfd/tps65910.h +++ b/include/linux/mfd/tps65910.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * tps65910.h -- TI TPS6591x * @@ -7,6 +6,12 @@ * Author: Graeme Gregory * Author: Jorge Eduardo Candelaria * Author: Arnaud Deconinck + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_TPS65910_H @@ -129,7 +134,6 @@ /* RTC_CTRL_REG bitfields */ #define TPS65910_RTC_CTRL_STOP_RTC 0x01 /*0=stop, 1=run */ -#define TPS65910_RTC_CTRL_AUTO_COMP 0x04 #define TPS65910_RTC_CTRL_GET_TIME 0x40 /* RTC_STATUS_REG bitfields */ @@ -874,7 +878,7 @@ struct tps65910_board { bool en_ck32k_xtal; bool en_dev_slp; bool pm_off; - struct tps65910_sleep_keepon_data slp_keepon; + struct tps65910_sleep_keepon_data *slp_keepon; bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO]; unsigned long regulator_ext_sleep_control[TPS65910_NUM_REGS]; struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS]; @@ -890,6 +894,11 @@ struct tps65910 { struct regmap *regmap; unsigned long id; + /* Client devices */ + struct tps65910_pmic *pmic; + struct tps65910_rtc *rtc; + struct tps65910_power *power; + /* Device node parsed board data */ struct tps65910_board *of_plat_data; @@ -908,4 +917,39 @@ static inline int tps65910_chip_id(struct tps65910 *tps65910) return tps65910->id; } +static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg, + unsigned int *val) +{ + return regmap_read(tps65910->regmap, reg, val); +} + +static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg, + unsigned int val) +{ + return regmap_write(tps65910->regmap, reg, val); +} + +static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg, + u8 mask) +{ + return regmap_update_bits(tps65910->regmap, reg, mask, mask); +} + +static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg, + u8 mask) +{ + return regmap_update_bits(tps65910->regmap, reg, mask, 0); +} + +static inline int tps65910_reg_update_bits(struct tps65910 *tps65910, u8 reg, + u8 mask, u8 val) +{ + return regmap_update_bits(tps65910->regmap, reg, mask, val); +} + +static inline int tps65910_irq_get_virq(struct tps65910 *tps65910, int irq) +{ + return regmap_irq_get_virq(tps65910->irq_data, irq); +} + #endif /* __LINUX_MFD_TPS65910_H */ diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h index 7943e413de..1a60370155 100644 --- a/include/linux/mfd/tps65912.h +++ b/include/linux/mfd/tps65912.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or @@ -319,7 +319,21 @@ struct tps65912 { struct regmap_irq_chip_data *irq_data; }; -extern const struct regmap_config tps65912_regmap_config; +static const struct regmap_range tps65912_yes_ranges[] = { + regmap_reg_range(TPS65912_INT_STS, TPS65912_GPIO5), +}; + +static const struct regmap_access_table tps65912_volatile_table = { + .yes_ranges = tps65912_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(tps65912_yes_ranges), +}; + +static const struct regmap_config tps65912_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_RBTREE, + .volatile_table = &tps65912_volatile_table, +}; int tps65912_device_init(struct tps65912 *tps); int tps65912_device_exit(struct tps65912 *tps); diff --git a/include/linux/mfd/twl4030-audio.h b/include/linux/mfd/twl4030-audio.h index 1c28605dfd..3d22b72df0 100644 --- a/include/linux/mfd/twl4030-audio.h +++ b/include/linux/mfd/twl4030-audio.h @@ -1,10 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MFD driver for twl4030 audio submodule * * Author: Peter Ujfalusi * * Copyright: (C) 2009 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * */ #ifndef __TWL4030_CODEC_H__ diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h index 1fc7450bd8..a2e88761c0 100644 --- a/include/linux/mfd/twl6040.h +++ b/include/linux/mfd/twl6040.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MFD driver for twl6040 * @@ -6,6 +5,21 @@ * Misael Lopez Cruz * * Copyright: (C) 2011 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * */ #ifndef __TWL6040_CODEC_H__ diff --git a/include/linux/mfd/ucb1x00.h b/include/linux/mfd/ucb1x00.h index 43bcf35afe..88f90cbf8e 100644 --- a/include/linux/mfd/ucb1x00.h +++ b/include/linux/mfd/ucb1x00.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/mfd/ucb1x00.h * * Copyright (C) 2001 Russell King, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. */ #ifndef UCB1200_H #define UCB1200_H diff --git a/include/linux/mfd/viperboard.h b/include/linux/mfd/viperboard.h index 0557667fe5..193452848c 100644 --- a/include/linux/mfd/viperboard.h +++ b/include/linux/mfd/viperboard.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/viperboard.h * @@ -7,6 +6,12 @@ * (C) 2012 by Lemonage GmbH * Author: Lars Poeschel * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_VIPERBOARD_H__ diff --git a/include/linux/mfd/wl1273-core.h b/include/linux/mfd/wl1273-core.h index c28cf76d5c..db2f3f454a 100644 --- a/include/linux/mfd/wl1273-core.h +++ b/include/linux/mfd/wl1273-core.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/mfd/wl1273-core.h * @@ -6,6 +5,20 @@ * * Copyright (C) 2010 Nokia Corporation * Author: Matti J. Aaltonen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA */ #ifndef WL1273_CORE_H diff --git a/include/linux/mfd/wm831x/auxadc.h b/include/linux/mfd/wm831x/auxadc.h index 02ddb4fe16..867aa23f93 100644 --- a/include/linux/mfd/wm831x/auxadc.h +++ b/include/linux/mfd/wm831x/auxadc.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/auxadc.h -- Auxiliary ADC interface for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM831X_AUXADC_H__ diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index 511bcad876..76c2264843 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/core.h -- Core interface for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM831X_CORE_H__ @@ -16,8 +21,6 @@ #include #include #include -#include -#include /* * Register values. @@ -364,9 +367,6 @@ struct wm831x { struct regmap *regmap; - struct wm831x_pdata pdata; - enum wm831x_parent type; - int irq; /* Our chip IRQ */ struct mutex irq_lock; struct irq_domain *irq_domain; @@ -412,7 +412,8 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg, int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg, int count, u16 *buf); -int wm831x_device_init(struct wm831x *wm831x, int irq); +int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq); +void wm831x_device_exit(struct wm831x *wm831x); int wm831x_device_suspend(struct wm831x *wm831x); void wm831x_device_shutdown(struct wm831x *wm831x); int wm831x_irq_init(struct wm831x *wm831x, int irq); @@ -426,6 +427,4 @@ static inline int wm831x_irq(struct wm831x *wm831x, int irq) extern struct regmap_config wm831x_regmap_config; -extern const struct of_device_id wm831x_of_match[]; - #endif diff --git a/include/linux/mfd/wm831x/gpio.h b/include/linux/mfd/wm831x/gpio.h index 70587a4ec6..9b163c5886 100644 --- a/include/linux/mfd/wm831x/gpio.h +++ b/include/linux/mfd/wm831x/gpio.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/gpio.h -- GPIO for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM831X_GPIO_H__ diff --git a/include/linux/mfd/wm831x/irq.h b/include/linux/mfd/wm831x/irq.h index ab2d1524e7..3a8c97656f 100644 --- a/include/linux/mfd/wm831x/irq.h +++ b/include/linux/mfd/wm831x/irq.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/irq.h -- Interrupt controller for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM831X_IRQ_H__ diff --git a/include/linux/mfd/wm831x/otp.h b/include/linux/mfd/wm831x/otp.h index bc244456ad..ce1f81a39b 100644 --- a/include/linux/mfd/wm831x/otp.h +++ b/include/linux/mfd/wm831x/otp.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/otp.h -- OTP interface for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM831X_OTP_H__ diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h index 75aa94dadf..dcc9631b30 100644 --- a/include/linux/mfd/wm831x/pdata.h +++ b/include/linux/mfd/wm831x/pdata.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/pdata.h -- Platform data for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM831X_PDATA_H__ @@ -47,6 +52,7 @@ struct wm831x_battery_pdata { * I2C or SPI buses. */ struct wm831x_buckv_pdata { + int dvs_gpio; /** CPU GPIO to use for DVS switching */ int dvs_control_src; /** Hardware DVS source to use (1 or 2) */ int dvs_init_state; /** DVS state to expect on startup */ int dvs_state_gpio; /** CPU GPIO to use for monitoring status */ @@ -89,6 +95,7 @@ enum wm831x_watchdog_action { struct wm831x_watchdog_pdata { enum wm831x_watchdog_action primary, secondary; + int update_gpio; unsigned int software:1; }; diff --git a/include/linux/mfd/wm831x/pmu.h b/include/linux/mfd/wm831x/pmu.h index 77187fcaf2..b18cbb027b 100644 --- a/include/linux/mfd/wm831x/pmu.h +++ b/include/linux/mfd/wm831x/pmu.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/pmu.h -- PMU for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM831X_PMU_H__ diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h index 233b301795..955d30fc6a 100644 --- a/include/linux/mfd/wm831x/regulator.h +++ b/include/linux/mfd/wm831x/regulator.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/mfd/wm831x/regulator.h -- Regulator definitons for wm831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM831X_REGULATOR_H__ @@ -1208,6 +1213,6 @@ #define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ #define WM831X_ISINK_MAX_ISEL 55 -extern const unsigned int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1]; +extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1]; #endif diff --git a/include/linux/mfd/wm831x/status.h b/include/linux/mfd/wm831x/status.h index 0d263577d2..6bc090d0e3 100644 --- a/include/linux/mfd/wm831x/status.h +++ b/include/linux/mfd/wm831x/status.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM831X_STATUS_H__ diff --git a/include/linux/mfd/wm831x/watchdog.h b/include/linux/mfd/wm831x/watchdog.h index c997c79294..97a99b5295 100644 --- a/include/linux/mfd/wm831x/watchdog.h +++ b/include/linux/mfd/wm831x/watchdog.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/watchdog.h -- Watchdog for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM831X_WATCHDOG_H__ diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h index ec01ec84d4..bd581c6fa0 100644 --- a/include/linux/mfd/wm8350/audio.h +++ b/include/linux/mfd/wm8350/audio.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * audio.h -- Audio Driver for Wolfson WM8350 PMIC * * Copyright 2007, 2008 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_WM8350_AUDIO_H_ @@ -612,8 +617,11 @@ struct wm8350_audio_platform_data { u32 codec_current_charge:2; /* codec current @ vmid charge */ }; +struct snd_soc_codec; + struct wm8350_codec { struct platform_device *pdev; + struct snd_soc_codec *codec; struct wm8350_audio_platform_data *platform_data; }; diff --git a/include/linux/mfd/wm8350/comparator.h b/include/linux/mfd/wm8350/comparator.h index 250d892395..54bc5d0fd5 100644 --- a/include/linux/mfd/wm8350/comparator.h +++ b/include/linux/mfd/wm8350/comparator.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * comparator.h -- Comparator Aux ADC for Wolfson WM8350 PMIC * * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __LINUX_MFD_WM8350_COMPARATOR_H_ diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index a3241e4d75..509481d9cf 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * core.h -- Core Driver for Wolfson WM8350 PMIC * * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_WM8350_CORE_H_ @@ -638,6 +643,7 @@ struct wm8350_platform_data { */ int wm8350_device_init(struct wm8350 *wm8350, int irq, struct wm8350_platform_data *pdata); +void wm8350_device_exit(struct wm8350 *wm8350); /* * WM8350 device IO diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h index e831b30dde..d657bcd6d9 100644 --- a/include/linux/mfd/wm8350/gpio.h +++ b/include/linux/mfd/wm8350/gpio.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * gpio.h -- GPIO Driver for Wolfson WM8350 PMIC * * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_WM8350_GPIO_H_ diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h index 04b09a2ddb..7a09e7f1f9 100644 --- a/include/linux/mfd/wm8350/pmic.h +++ b/include/linux/mfd/wm8350/pmic.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * pmic.h -- Power Management Driver for Wolfson WM8350 PMIC * * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_WM8350_PMIC_H diff --git a/include/linux/mfd/wm8350/rtc.h b/include/linux/mfd/wm8350/rtc.h index b2f58359b2..ebd72ffc62 100644 --- a/include/linux/mfd/wm8350/rtc.h +++ b/include/linux/mfd/wm8350/rtc.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * rtc.h -- RTC driver for Wolfson WM8350 PMIC * * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __LINUX_MFD_WM8350_RTC_H diff --git a/include/linux/mfd/wm8350/supply.h b/include/linux/mfd/wm8350/supply.h index d7a91e2617..8dc93673e3 100644 --- a/include/linux/mfd/wm8350/supply.h +++ b/include/linux/mfd/wm8350/supply.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * supply.h -- Power Supply Driver for Wolfson WM8350 PMIC * * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MFD_WM8350_SUPPLY_H_ diff --git a/include/linux/mfd/wm8350/wdt.h b/include/linux/mfd/wm8350/wdt.h index 97454aa8c4..f6135b5e5e 100644 --- a/include/linux/mfd/wm8350/wdt.h +++ b/include/linux/mfd/wm8350/wdt.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * wdt.h -- Watchdog Driver for Wolfson WM8350 PMIC * * Copyright 2007, 2008 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __LINUX_MFD_WM8350_WDT_H_ diff --git a/include/linux/mfd/wm8400-audio.h b/include/linux/mfd/wm8400-audio.h index d47bdcc7a7..e06ed3eb1d 100644 --- a/include/linux/mfd/wm8400-audio.h +++ b/include/linux/mfd/wm8400-audio.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * wm8400 private definitions for audio * * Copyright 2008 Wolfson Microelectronics plc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_MFD_WM8400_AUDIO_H diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h index bc8c2ca6dc..4ee908f5b8 100644 --- a/include/linux/mfd/wm8400-private.h +++ b/include/linux/mfd/wm8400-private.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * wm8400 private definitions. * * Copyright 2008 Wolfson Microelectronics plc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_MFD_WM8400_PRIV_H @@ -910,4 +923,12 @@ struct wm8400 { #define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */ #define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */ +int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data); + +static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, + u16 mask, u16 val) +{ + return regmap_update_bits(wm8400->regmap, reg, mask, val); +} + #endif diff --git a/include/linux/mfd/wm8400.h b/include/linux/mfd/wm8400.h index a812d89e7c..b46b566ac1 100644 --- a/include/linux/mfd/wm8400.h +++ b/include/linux/mfd/wm8400.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * wm8400 client interface * * Copyright 2008 Wolfson Microelectronics plc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_MFD_WM8400_H diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h index e8b093522f..eefafa62d3 100644 --- a/include/linux/mfd/wm8994/core.h +++ b/include/linux/mfd/wm8994/core.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm8994/core.h -- Core interface for WM8994 * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM8994_CORE_H__ diff --git a/include/linux/mfd/wm8994/gpio.h b/include/linux/mfd/wm8994/gpio.h index 723fa33177..0c79b5ff4b 100644 --- a/include/linux/mfd/wm8994/gpio.h +++ b/include/linux/mfd/wm8994/gpio.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm8994/gpio.h - GPIO configuration for WM8994 * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM8994_GPIO_H__ diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h index 6e2962ef5b..90c60524a4 100644 --- a/include/linux/mfd/wm8994/pdata.h +++ b/include/linux/mfd/wm8994/pdata.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm8994/pdata.h -- Platform data for WM8994 * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM8994_PDATA_H__ @@ -15,6 +20,9 @@ #define WM8994_NUM_AIF 3 struct wm8994_ldo_pdata { + /** GPIOs to enable regulator, 0 or less if not available */ + int enable; + const struct regulator_init_data *init_data; }; @@ -33,7 +41,7 @@ struct wm8994_ldo_pdata { * DRC configurations are specified with a label and a set of register * values to write (the enable bits will be ignored). At runtime an * enumerated control will be presented for each DRC block allowing - * the user to choose the configuration to use. + * the user to choose the configration to use. * * Configurations may be generated by hand or by using the DRC control * panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/ @@ -214,12 +222,6 @@ struct wm8994_pdata { */ bool spkmode_pu; - /* - * CS/ADDR must be pulled internally by the device on this - * system. - */ - bool csnaddr_pd; - /** * Maximum number of channels clocks will be generated for, * useful for systems where and I2S bus with multiple data diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h index 8782a207fa..db8cef3d53 100644 --- a/include/linux/mfd/wm8994/registers.h +++ b/include/linux/mfd/wm8994/registers.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm8994/registers.h -- Register definitions for WM8994 * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __MFD_WM8994_REGISTERS_H__ diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h new file mode 100644 index 0000000000..e11f4d9f1c --- /dev/null +++ b/include/linux/mg_disk.h @@ -0,0 +1,45 @@ +/* + * include/linux/mg_disk.c + * + * Private data for mflash platform driver + * + * (c) 2008 mGine Co.,LTD + * (c) 2008 unsik Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MG_DISK_H__ +#define __MG_DISK_H__ + +/* name for platform device */ +#define MG_DEV_NAME "mg_disk" + +/* names of GPIO resource */ +#define MG_RST_PIN "mg_rst" +/* except MG_BOOT_DEV, reset-out pin should be assigned */ +#define MG_RSTOUT_PIN "mg_rstout" + +/* device attribution */ +/* use mflash as boot device */ +#define MG_BOOT_DEV (1 << 0) +/* use mflash as storage device */ +#define MG_STORAGE_DEV (1 << 1) +/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */ +#define MG_STORAGE_DEV_SKIP_RST (1 << 2) + +/* private driver data */ +struct mg_drv_data { + /* disk resource */ + u32 use_polling; + + /* device attribution */ + u32 dev_attr; + + /* internally used */ + void *host; +}; + +#endif diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h new file mode 100644 index 0000000000..27d7c95fd0 --- /dev/null +++ b/include/linux/mic_bus.h @@ -0,0 +1,111 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Bus driver. + * + * This implementation is very similar to the the virtio bus driver + * implementation @ include/linux/virtio.h. + */ +#ifndef _MIC_BUS_H_ +#define _MIC_BUS_H_ +/* + * Everything a mbus driver needs to work with any particular mbus + * implementation. + */ +#include +#include + +struct mbus_device_id { + __u32 device; + __u32 vendor; +}; + +#define MBUS_DEV_DMA_HOST 2 +#define MBUS_DEV_DMA_MIC 3 +#define MBUS_DEV_ANY_ID 0xffffffff + +/** + * mbus_device - representation of a device using mbus + * @mmio_va: virtual address of mmio space + * @hw_ops: the hardware ops supported by this device. + * @id: the device type identification (used to match it with a driver). + * @dev: underlying device. + * be used to communicate with. + * @index: unique position on the mbus bus + */ +struct mbus_device { + void __iomem *mmio_va; + struct mbus_hw_ops *hw_ops; + struct mbus_device_id id; + struct device dev; + int index; +}; + +/** + * mbus_driver - operations for a mbus I/O driver + * @driver: underlying device driver (populate name and owner). + * @id_table: the ids serviced by this driver. + * @probe: the function to call when a device is found. Returns 0 or -errno. + * @remove: the function to call when a device is removed. + */ +struct mbus_driver { + struct device_driver driver; + const struct mbus_device_id *id_table; + int (*probe)(struct mbus_device *dev); + void (*scan)(struct mbus_device *dev); + void (*remove)(struct mbus_device *dev); +}; + +/** + * struct mic_irq - opaque pointer used as cookie + */ +struct mic_irq; + +/** + * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus. + */ +struct mbus_hw_ops { + struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev, + irq_handler_t handler, + irq_handler_t thread_fn, + const char *name, void *data, + int intr_src); + void (*free_irq)(struct mbus_device *mbdev, + struct mic_irq *cookie, void *data); + void (*ack_interrupt)(struct mbus_device *mbdev, int num); +}; + +struct mbus_device * +mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, + struct mbus_hw_ops *hw_ops, int index, + void __iomem *mmio_va); +void mbus_unregister_device(struct mbus_device *mbdev); + +int mbus_register_driver(struct mbus_driver *drv); +void mbus_unregister_driver(struct mbus_driver *drv); + +static inline struct mbus_device *dev_to_mbus(struct device *_dev) +{ + return container_of(_dev, struct mbus_device, dev); +} + +static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv) +{ + return container_of(drv, struct mbus_driver, driver); +} + +#endif /* _MIC_BUS_H */ diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 3d43c60b49..257173e009 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/micrel_phy.h * * Micrel PHY IDs + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef _MICREL_PHY_H @@ -26,39 +31,17 @@ #define PHY_ID_KSZ8081 0x00221560 #define PHY_ID_KSZ8061 0x00221570 #define PHY_ID_KSZ9031 0x00221620 -#define PHY_ID_KSZ9131 0x00221640 -#define PHY_ID_LAN8814 0x00221660 #define PHY_ID_KSZ886X 0x00221430 #define PHY_ID_KSZ8863 0x00221435 -#define PHY_ID_KSZ87XX 0x00221550 - -#define PHY_ID_KSZ9477 0x00221631 - /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 #define MICREL_PHY_FXEN 0x00000002 -#define MICREL_KSZ8_P1_ERRATA 0x00000003 #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC #define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104 #define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105 -/* Device specific MII_BMCR (Reg 0) bits */ -/* 1 = HP Auto MDI/MDI-X mode, 0 = Microchip Auto MDI/MDI-X mode */ -#define KSZ886X_BMCR_HP_MDIX BIT(5) -/* 1 = Force MDI (transmit on RXP/RXM pins), 0 = Normal operation - * (transmit on TXP/TXM pins) - */ -#define KSZ886X_BMCR_FORCE_MDI BIT(4) -/* 1 = Disable auto MDI-X */ -#define KSZ886X_BMCR_DISABLE_AUTO_MDIX BIT(3) -#define KSZ886X_BMCR_DISABLE_FAR_END_FAULT BIT(2) -#define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1) -#define KSZ886X_BMCR_DISABLE_LED BIT(0) - -#define KSZ886X_CTRL_MDIX_STAT BIT(4) - #endif /* _MICREL_PHY_H */ diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h index 626c450d71..eb492d47f7 100644 --- a/include/linux/microchipphy.h +++ b/include/linux/microchipphy.h @@ -1,6 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2015 Microchip Technology + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . */ #ifndef _MICROCHIPPHY_H @@ -58,23 +70,4 @@ #define LAN88XX_MMD3_CHIP_ID (32877) #define LAN88XX_MMD3_CHIP_REV (32878) -/* Registers specific to the LAN7800/LAN7850 embedded phy */ -#define LAN78XX_PHY_LED_MODE_SELECT (0x1D) - -#define LAN78XX_PHY_CTRL3 (0x14) -#define LAN78XX_PHY_CTRL3_AUTO_DOWNSHIFT (0x0010) -#define LAN78XX_PHY_CTRL3_DOWNSHIFT_CTRL_MASK (0x000c) -#define LAN78XX_PHY_CTRL3_DOWNSHIFT_CTRL_2 (0x0000) -#define LAN78XX_PHY_CTRL3_DOWNSHIFT_CTRL_3 (0x0004) -#define LAN78XX_PHY_CTRL3_DOWNSHIFT_CTRL_4 (0x0008) -#define LAN78XX_PHY_CTRL3_DOWNSHIFT_CTRL_5 (0x000c) - -/* DSP registers */ -#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A) -#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000) -#define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5) -#define LAN88XX_EXT_PAGE_TR_CR 16 -#define LAN88XX_EXT_PAGE_TR_LOW_DATA 17 -#define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18 - #endif /* _MICROCHIPPHY_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index c8077e9366..ae8d475a93 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -1,17 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MIGRATE_H #define _LINUX_MIGRATE_H #include #include #include -#include -typedef struct page *new_page_t(struct page *page, unsigned long private); +typedef struct page *new_page_t(struct page *page, unsigned long private, + int **reason); typedef void free_page_t(struct page *page, unsigned long private); -struct migration_target_control; - /* * Return values from addresss_space_operations.migratepage(): * - negative errno on page migration failure; @@ -19,11 +16,6 @@ struct migration_target_control; */ #define MIGRATEPAGE_SUCCESS 0 -/* - * Keep sync with: - * - macro MIGRATE_REASON in include/trace/events/migrate.h - * - migrate_reason_names[MR_TYPES] in mm/debug.c - */ enum migrate_reason { MR_COMPACTION, MR_MEMORY_FAILURE, @@ -31,48 +23,42 @@ enum migrate_reason { MR_SYSCALL, /* also applies to cpusets */ MR_MEMPOLICY_MBIND, MR_NUMA_MISPLACED, - MR_CONTIG_RANGE, - MR_LONGTERM_PIN, - MR_DEMOTION, + MR_CMA, MR_TYPES }; -extern const char *migrate_reason_names[MR_TYPES]; +/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ +extern char *migrate_reason_names[MR_TYPES]; #ifdef CONFIG_MIGRATION extern void putback_movable_pages(struct list_head *l); -extern int migrate_page(struct address_space *mapping, - struct page *newpage, struct page *page, - enum migrate_mode mode); +extern int migrate_page(struct address_space *, + struct page *, struct page *, enum migrate_mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, - unsigned long private, enum migrate_mode mode, int reason, - unsigned int *ret_succeeded); -extern struct page *alloc_migration_target(struct page *page, unsigned long private); -extern int isolate_movable_page(struct page *page, isolate_mode_t mode); + unsigned long private, enum migrate_mode mode, int reason); +extern bool isolate_movable_page(struct page *page, isolate_mode_t mode); +extern void putback_movable_page(struct page *page); -extern void migrate_page_states(struct page *newpage, struct page *page); +extern int migrate_prep(void); +extern int migrate_prep_local(void); extern void migrate_page_copy(struct page *newpage, struct page *page); extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); extern int migrate_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page, int extra_count); + struct page *newpage, struct page *page, + struct buffer_head *head, enum migrate_mode mode, + int extra_count); #else static inline void putback_movable_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, - int reason, unsigned int *ret_succeeded) + int reason) { return -ENOSYS; } -static inline struct page *alloc_migration_target(struct page *page, - unsigned long private) - { return NULL; } -static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) - { return -EBUSY; } -static inline void migrate_page_states(struct page *newpage, struct page *page) -{ -} +static inline int migrate_prep(void) { return -ENOSYS; } +static inline int migrate_prep_local(void) { return -ENOSYS; } static inline void migrate_page_copy(struct page *newpage, struct page *page) {} @@ -82,6 +68,7 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, { return -ENOSYS; } + #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_COMPACTION @@ -89,7 +76,7 @@ extern int PageMovable(struct page *page); extern void __SetPageMovable(struct page *page, struct address_space *mapping); extern void __ClearPageMovable(struct page *page); #else -static inline int PageMovable(struct page *page) { return 0; } +static inline int PageMovable(struct page *page) { return 0; }; static inline void __SetPageMovable(struct page *page, struct address_space *mapping) { @@ -100,9 +87,14 @@ static inline void __ClearPageMovable(struct page *page) #endif #ifdef CONFIG_NUMA_BALANCING +extern bool pmd_trans_migrating(pmd_t pmd); extern int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); #else +static inline bool pmd_trans_migrating(pmd_t pmd) +{ + return false; +} static inline int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) { @@ -110,77 +102,21 @@ static inline int migrate_misplaced_page(struct page *page, } #endif /* CONFIG_NUMA_BALANCING */ -#ifdef CONFIG_MIGRATION - -/* - * Watch out for PAE architecture, which has an unsigned long, and might not - * have enough bits to store all physical address and flags. So far we have - * enough room for all our flags. - */ -#define MIGRATE_PFN_VALID (1UL << 0) -#define MIGRATE_PFN_MIGRATE (1UL << 1) -#define MIGRATE_PFN_LOCKED (1UL << 2) -#define MIGRATE_PFN_WRITE (1UL << 3) -#define MIGRATE_PFN_SHIFT 6 - -static inline struct page *migrate_pfn_to_page(unsigned long mpfn) +#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) +extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, + struct vm_area_struct *vma, + pmd_t *pmd, pmd_t entry, + unsigned long address, + struct page *page, int node); +#else +static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, + struct vm_area_struct *vma, + pmd_t *pmd, pmd_t entry, + unsigned long address, + struct page *page, int node) { - if (!(mpfn & MIGRATE_PFN_VALID)) - return NULL; - return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); + return -EAGAIN; } - -static inline unsigned long migrate_pfn(unsigned long pfn) -{ - return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; -} - -enum migrate_vma_direction { - MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, - MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, -}; - -struct migrate_vma { - struct vm_area_struct *vma; - /* - * Both src and dst array must be big enough for - * (end - start) >> PAGE_SHIFT entries. - * - * The src array must not be modified by the caller after - * migrate_vma_setup(), and must not change the dst array after - * migrate_vma_pages() returns. - */ - unsigned long *dst; - unsigned long *src; - unsigned long cpages; - unsigned long npages; - unsigned long start; - unsigned long end; - - /* - * Set to the owner value also stored in page->pgmap->owner for - * migrating out of device private memory. The flags also need to - * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. - * The caller should always set this field when using mmu notifier - * callbacks to avoid device MMU invalidations for device private - * pages that are not being migrated. - */ - void *pgmap_owner; - unsigned long flags; -}; - -int migrate_vma_setup(struct migrate_vma *args); -void migrate_vma_pages(struct migrate_vma *migrate); -void migrate_vma_finalize(struct migrate_vma *migrate); -int next_demotion_node(int node); - -#else /* CONFIG_MIGRATION disabled: */ - -static inline int next_demotion_node(int node) -{ - return NUMA_NO_NODE; -} - -#endif /* CONFIG_MIGRATION */ +#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ #endif /* _LINUX_MIGRATE_H */ diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h index 883c992490..ebf3d89a39 100644 --- a/include/linux/migrate_mode.h +++ b/include/linux/migrate_mode.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef MIGRATE_MODE_H_INCLUDED #define MIGRATE_MODE_H_INCLUDED /* @@ -7,16 +6,11 @@ * on most operations but not ->writepage as the potential stall time * is too significant * MIGRATE_SYNC will block when migrating pages - * MIGRATE_SYNC_NO_COPY will block when migrating pages but will not copy pages - * with the CPU. Instead, page copy happens outside the migratepage() - * callback and is likely using a DMA engine. See migrate_vma() and HMM - * (mm/hmm.c) for users of this mode. */ enum migrate_mode { MIGRATE_ASYNC, MIGRATE_SYNC_LIGHT, MIGRATE_SYNC, - MIGRATE_SYNC_NO_COPY, }; #endif /* MIGRATE_MODE_H_INCLUDED */ diff --git a/include/linux/mii.h b/include/linux/mii.h index 12ea29e042..47492c9631 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/mii.h: definitions for MII-compatible transceivers * Originally drivers/net/sunhme.h. @@ -10,7 +9,6 @@ #include -#include #include struct ethtool_cmd; @@ -32,12 +30,8 @@ struct mii_if_info { extern int mii_link_ok (struct mii_if_info *mii); extern int mii_nway_restart (struct mii_if_info *mii); -extern void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); -extern void mii_ethtool_get_link_ksettings( - struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); +extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); -extern int mii_ethtool_set_link_ksettings( - struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd); extern int mii_check_gmii_support(struct mii_if_info *mii); extern void mii_check_link (struct mii_if_info *mii); extern unsigned int mii_check_media (struct mii_if_info *mii, @@ -132,34 +126,6 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) return result; } -/** - * linkmode_adv_to_mii_adv_t - * @advertising: the linkmode advertisement settings - * - * A small helper function that translates linkmode advertisement - * settings to phy autonegotiation advertisements for the - * MII_ADVERTISE register. - */ -static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising) -{ - u32 result = 0; - - if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising)) - result |= ADVERTISE_10HALF; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising)) - result |= ADVERTISE_10FULL; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising)) - result |= ADVERTISE_100HALF; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising)) - result |= ADVERTISE_100FULL; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising)) - result |= ADVERTISE_PAUSE_CAP; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising)) - result |= ADVERTISE_PAUSE_ASYM; - - return result; -} - /** * mii_adv_to_ethtool_adv_t * @adv: value of the MII_ADVERTISE register @@ -207,28 +173,6 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) return result; } -/** - * linkmode_adv_to_mii_ctrl1000_t - * @advertising: the linkmode advertisement settings - * - * A small helper function that translates linkmode advertisement - * settings to phy autonegotiation advertisements for the - * MII_CTRL1000 register when in 1000T mode. - */ -static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising) -{ - u32 result = 0; - - if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - advertising)) - result |= ADVERTISE_1000HALF; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - advertising)) - result |= ADVERTISE_1000FULL; - - return result; -} - /** * mii_ctrl1000_to_ethtool_adv_t * @adv: value of the MII_CTRL1000 register @@ -287,25 +231,6 @@ static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) return result; } -/** - * mii_stat1000_mod_linkmode_lpa_t - * @advertising: target the linkmode advertisement settings - * @adv: value of the MII_STAT1000 register - * - * A small helper function that translates MII_STAT1000 bits, when in - * 1000Base-T mode, to linkmode advertisement settings. Other bits in - * advertising are not changes. - */ -static inline void mii_stat1000_mod_linkmode_lpa_t(unsigned long *advertising, - u32 lpa) -{ - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - advertising, lpa & LPA_1000HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - advertising, lpa & LPA_1000FULL); -} - /** * ethtool_adv_to_mii_adv_x * @ethadv: the ethtool advertisement settings @@ -355,205 +280,21 @@ static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) } /** - * mii_lpa_mod_linkmode_adv_sgmii - * @lp_advertising: pointer to destination link mode. - * @lpa: value of the MII_LPA register - * - * A small helper function that translates MII_LPA bits to - * linkmode advertisement settings for SGMII. - * Leaves other bits unchanged. - */ -static inline void -mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa) -{ - u32 speed_duplex = lpa & LPA_SGMII_DPX_SPD_MASK; - - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, lp_advertising, - speed_duplex == LPA_SGMII_1000HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, lp_advertising, - speed_duplex == LPA_SGMII_1000FULL); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, lp_advertising, - speed_duplex == LPA_SGMII_100HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, lp_advertising, - speed_duplex == LPA_SGMII_100FULL); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, lp_advertising, - speed_duplex == LPA_SGMII_10HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, lp_advertising, - speed_duplex == LPA_SGMII_10FULL); -} - -/** - * mii_lpa_to_linkmode_adv_sgmii - * @advertising: pointer to destination link mode. - * @lpa: value of the MII_LPA register - * - * A small helper function that translates MII_ADVERTISE bits - * to linkmode advertisement settings when in SGMII mode. - * Clears the old value of advertising. - */ -static inline void mii_lpa_to_linkmode_lpa_sgmii(unsigned long *lp_advertising, - u32 lpa) -{ - linkmode_zero(lp_advertising); - - mii_lpa_mod_linkmode_lpa_sgmii(lp_advertising, lpa); -} - -/** - * mii_adv_mod_linkmode_adv_t - * @advertising:pointer to destination link mode. - * @adv: value of the MII_ADVERTISE register - * - * A small helper function that translates MII_ADVERTISE bits to - * linkmode advertisement settings. Leaves other bits unchanged. - */ -static inline void mii_adv_mod_linkmode_adv_t(unsigned long *advertising, - u32 adv) -{ - linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, - advertising, adv & ADVERTISE_10HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, - advertising, adv & ADVERTISE_10FULL); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - advertising, adv & ADVERTISE_100HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - advertising, adv & ADVERTISE_100FULL); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising, - adv & ADVERTISE_PAUSE_CAP); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - advertising, adv & ADVERTISE_PAUSE_ASYM); -} - -/** - * mii_adv_to_linkmode_adv_t - * @advertising:pointer to destination link mode. - * @adv: value of the MII_ADVERTISE register - * - * A small helper function that translates MII_ADVERTISE bits - * to linkmode advertisement settings. Clears the old value - * of advertising. - */ -static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising, - u32 adv) -{ - linkmode_zero(advertising); - - mii_adv_mod_linkmode_adv_t(advertising, adv); -} - -/** - * mii_lpa_to_linkmode_lpa_t + * mii_lpa_to_ethtool_lpa_x * @adv: value of the MII_LPA register * - * A small helper function that translates MII_LPA bits, when in - * 1000Base-T mode, to linkmode LP advertisement settings. Clears the - * old value of advertising + * A small helper function that translates MII_LPA + * bits, when in 1000Base-X mode, to ethtool + * LP advertisement settings. */ -static inline void mii_lpa_to_linkmode_lpa_t(unsigned long *lp_advertising, - u32 lpa) +static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) { - mii_adv_to_linkmode_adv_t(lp_advertising, lpa); + u32 result = 0; if (lpa & LPA_LPACK) - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, - lp_advertising); + result |= ADVERTISED_Autoneg; -} - -/** - * mii_lpa_mod_linkmode_lpa_t - * @adv: value of the MII_LPA register - * - * A small helper function that translates MII_LPA bits, when in - * 1000Base-T mode, to linkmode LP advertisement settings. Leaves - * other bits unchanged. - */ -static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising, - u32 lpa) -{ - mii_adv_mod_linkmode_adv_t(lp_advertising, lpa); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, - lp_advertising, lpa & LPA_LPACK); -} - -static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising, - u32 ctrl1000) -{ - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising, - ctrl1000 & ADVERTISE_1000HALF); - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising, - ctrl1000 & ADVERTISE_1000FULL); -} - -/** - * linkmode_adv_to_lcl_adv_t - * @advertising:pointer to linkmode advertising - * - * A small helper function that translates linkmode advertising to LVL - * pause capabilities. - */ -static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising) -{ - u32 lcl_adv = 0; - - if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, - advertising)) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - advertising)) - lcl_adv |= ADVERTISE_PAUSE_ASYM; - - return lcl_adv; -} - -/** - * mii_lpa_mod_linkmode_x - decode the link partner's config_reg to linkmodes - * @linkmodes: link modes array - * @lpa: config_reg word from link partner - * @fd_bit: link mode for 1000XFULL bit - */ -static inline void mii_lpa_mod_linkmode_x(unsigned long *linkmodes, u16 lpa, - int fd_bit) -{ - linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, linkmodes, - lpa & LPA_LPACK); - linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes, - lpa & LPA_1000XPAUSE); - linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes, - lpa & LPA_1000XPAUSE_ASYM); - linkmode_mod_bit(fd_bit, linkmodes, - lpa & LPA_1000XFULL); -} - -/** - * linkmode_adv_to_mii_adv_x - encode a linkmode to config_reg - * @linkmodes: linkmodes - * @fd_bit: full duplex bit - */ -static inline u16 linkmode_adv_to_mii_adv_x(const unsigned long *linkmodes, - int fd_bit) -{ - u16 adv = 0; - - if (linkmode_test_bit(fd_bit, linkmodes)) - adv |= ADVERTISE_1000XFULL; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes)) - adv |= ADVERTISE_1000XPAUSE; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes)) - adv |= ADVERTISE_1000XPSE_ASYM; - - return adv; + return result | mii_adv_to_ethtool_adv_x(lpa); } /** diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 0676f18093..722698a43d 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MISCDEVICE_H #define _LINUX_MISCDEVICE_H #include @@ -7,9 +6,9 @@ #include /* - * These allocations are managed by device@lanana.org. If you need - * an entry that is not assigned here, it can be moved and - * reassigned or dynamically set if a fixed value is not justified. + * These allocations are managed by device@lanana.org. If you use an + * entry that is not in assigned your entry may well be moved and + * reassigned, or set dynamic if a fixed value is not justified. */ #define PSMOUSE_MINOR 1 @@ -23,33 +22,17 @@ /*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ #define WATCHDOG_MINOR 130 /* Watchdog timer */ #define TEMP_MINOR 131 /* Temperature Sensor */ -#define APM_MINOR_DEV 134 #define RTC_MINOR 135 -/*#define EFI_RTC_MINOR 136 was EFI Time services */ +#define EFI_RTC_MINOR 136 /* EFI Time services */ #define VHCI_MINOR 137 #define SUN_OPENPROM_MINOR 139 #define DMAPI_MINOR 140 /* unused */ #define NVRAM_MINOR 144 -#define SBUS_FLASH_MINOR 152 #define SGI_MMTIMER 153 -#define PMU_MINOR 154 #define STORE_QUEUE_MINOR 155 /* unused */ -#define LCD_MINOR 156 -#define AC_MINOR 157 -#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */ -#define NWFLASH_MINOR 160 /* MAJOR is 10 - miscdevice */ -#define ENVCTRL_MINOR 162 #define I2O_MINOR 166 -#define UCTRL_MINOR 174 -#define AGPGART_MINOR 175 -#define TOSH_MINOR_DEV 181 -#define HWRNG_MINOR 183 #define MICROCODE_MINOR 184 -#define KEYPAD_MINOR 185 -#define IRNET_MINOR 187 -#define D7S_MINOR 193 #define VFIO_MINOR 196 -#define PXA3XX_GCU_MINOR 197 #define TUN_MINOR 200 #define CUSE_MINOR 203 #define MWAVE_MINOR 219 /* ACP/Mwave Modem */ @@ -60,7 +43,6 @@ #define MISC_MCELOG_MINOR 227 #define HPET_MINOR 228 #define FUSE_MINOR 229 -#define SNAPSHOT_MINOR 231 #define KVM_MINOR 232 #define BTRFS_MINOR 234 #define AUTOFS_MINOR 235 @@ -69,8 +51,6 @@ #define VHOST_NET_MINOR 238 #define UHID_MINOR 239 #define USERIO_MINOR 240 -#define VHOST_VSOCK_MINOR 241 -#define RFKILL_MINOR 242 #define MISC_DYNAMIC_MINOR 255 struct device; @@ -91,16 +71,9 @@ struct miscdevice { extern int misc_register(struct miscdevice *misc); extern void misc_deregister(struct miscdevice *misc); -/* - * Helper macro for drivers that don't do anything special in the initcall. - * This helps to eliminate boilerplate code. - */ -#define builtin_misc_device(__misc_device) \ - builtin_driver(__misc_device, misc_register) - /* * Helper macro for drivers that don't do anything special in module init / exit - * call. This helps to eliminate boilerplate code. + * call. This helps in eleminating of boilerplate code. */ #define module_misc_device(__misc_device) \ module_driver(__misc_device, misc_register, misc_deregister) diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 7b74afcbba..1f3568694a 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -308,7 +308,7 @@ int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index, int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx, struct ifla_vf_stats *vf_stats); u32 mlx4_comm_get_version(void); -int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac); +int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos, __be16 proto); int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 653d2a0aa4..09cebe5284 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h @@ -130,20 +130,12 @@ enum { MLX4_CQE_STATUS_IPOK = 1 << 12, }; -/* L4_CSUM is logically part of status, but has to checked against badfcs_enc */ -enum { - MLX4_CQE_STATUS_L4_CSUM = 1 << 2, -}; - enum { MLX4_CQE_LLC = 1, MLX4_CQE_SNAP = 1 << 1, MLX4_CQE_BAD_FCS = 1 << 4, }; -#define MLX4_MAX_CQ_PERIOD (BIT(16) - 1) -#define MLX4_MAX_CQ_COUNT (BIT(16) - 1) - static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, void __iomem *uar_page, spinlock_t *doorbell_lock) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 30bb59fe97..c9f379689d 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -40,13 +40,14 @@ #include #include -#include +#include #include #define DEFAULT_UAR_PAGE_SHIFT 12 -#define MAX_MSIX 128 +#define MAX_MSIX_P_PORT 17 +#define MAX_MSIX 64 #define MIN_MSIX_P_PORT 5 #define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \ (dev_cap).num_ports * MIN_MSIX_P_PORT) @@ -107,7 +108,7 @@ enum { MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) }; -/* Driver supports 3 different device methods to manage traffic steering: +/* Driver supports 3 diffrent device methods to manage traffic steering: * -device managed - High level API for ib and eth flow steering. FW is * managing flow steering tables. * - B0 steering mode - Common low level API for ib and (if supported) eth. @@ -223,9 +224,6 @@ enum { MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35, MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36, MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37, - MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38, - MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39, - MLX4_DEV_CAP_FLAG2_SW_CQ_INIT = 1ULL << 40, }; enum { @@ -257,6 +255,10 @@ enum { MLX4_DEV_CAP_EQE_STRIDE_ENABLED = 1LL << 3 }; +enum { + MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0 +}; + enum { MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1, @@ -426,12 +428,6 @@ enum mlx4_steer_type { MLX4_NUM_STEERS }; -enum mlx4_resource_usage { - MLX4_RES_USAGE_NONE, - MLX4_RES_USAGE_DRIVER, - MLX4_RES_USAGE_USER_VERBS, -}; - enum { MLX4_NUM_FEXCH = 64 * 1024, }; @@ -480,7 +476,6 @@ enum { enum { MLX4_INTERFACE_STATE_UP = 1 << 0, MLX4_INTERFACE_STATE_DELETION = 1 << 1, - MLX4_INTERFACE_STATE_NOWAIT = 1 << 2, }; #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ @@ -522,14 +517,6 @@ struct mlx4_phys_caps { u32 base_tunnel_sqpn; }; -struct mlx4_spec_qps { - u32 qp0_qkey; - u32 qp0_proxy; - u32 qp0_tunnel; - u32 qp1_proxy; - u32 qp1_tunnel; -}; - struct mlx4_caps { u64 fw_ver; u32 function; @@ -559,7 +546,11 @@ struct mlx4_caps { int max_qp_init_rdma; int max_qp_dest_rdma; int max_tc_eth; - struct mlx4_spec_qps *spec_qps; + u32 *qp0_qkey; + u32 *qp0_proxy; + u32 *qp1_proxy; + u32 *qp0_tunnel; + u32 *qp1_tunnel; int num_srqs; int max_srq_wqes; int max_srq_sge; @@ -572,6 +563,7 @@ struct mlx4_caps { int reserved_eqs; int num_comp_vectors; int num_mpts; + int max_fmr_maps; int num_mtts; int fmr_reserved_mtts; int reserved_mtts; @@ -627,10 +619,7 @@ struct mlx4_caps { u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; u32 vf_caps; - bool wol_port[MLX4_MAX_PORTS + 1]; struct mlx4_rate_limit_caps rl_caps; - u32 health_buffer_addrs; - bool map_clock_to_user; }; struct mlx4_buf_list { @@ -706,6 +695,17 @@ struct mlx4_mw { int enabled; }; +struct mlx4_fmr { + struct mlx4_mr mr; + struct mlx4_mpt_entry *mpt; + __be64 *mtts; + dma_addr_t dma_handle; + int max_pages; + int max_maps; + int maps; + u8 page_shift; +}; + struct mlx4_uar { unsigned long pfn; int index; @@ -738,7 +738,7 @@ struct mlx4_cq { int cqn; unsigned vector; - refcount_t refcount; + atomic_t refcount; struct completion free; struct { struct list_head list; @@ -747,7 +747,6 @@ struct mlx4_cq { } tasklet_ctx; int reset_notify_added; struct list_head reset_notify; - u8 usage; }; struct mlx4_qp { @@ -755,9 +754,8 @@ struct mlx4_qp { int qpn; - refcount_t refcount; + atomic_t refcount; struct completion free; - u8 usage; }; struct mlx4_srq { @@ -768,7 +766,7 @@ struct mlx4_srq { int max_gs; int wqe_shift; - refcount_t refcount; + atomic_t refcount; struct completion free; }; @@ -841,12 +839,6 @@ struct mlx4_vf_dev { u8 n_ports; }; -struct mlx4_fw_crdump { - bool snapshot_enable; - struct devlink_region *region_crspace; - struct devlink_region *region_fw_health; -}; - enum mlx4_pci_status { MLX4_PCI_STATUS_DISABLED, MLX4_PCI_STATUS_ENABLED, @@ -867,7 +859,6 @@ struct mlx4_dev_persistent { u8 interface_state; struct mutex pci_status_mutex; /* sync pci state */ enum mlx4_pci_status pci_status; - struct mlx4_fw_crdump crdump; }; struct mlx4_dev { @@ -1019,7 +1010,8 @@ struct mlx4_mad_ifc { #define mlx4_foreach_ib_transport_port(port, dev) \ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ - ((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_ETH)) + ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) || \ + ((dev)->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)) #define MLX4_INVALID_SLAVE_ID 0xFF #define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1) @@ -1076,7 +1068,7 @@ static inline int mlx4_is_eth(struct mlx4_dev *dev, int port) } int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, - struct mlx4_buf *buf); + struct mlx4_buf *buf, gfp_t gfp); void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) { @@ -1113,9 +1105,10 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw); int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list); int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, - struct mlx4_buf *buf); + struct mlx4_buf *buf, gfp_t gfp); -int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, + gfp_t gfp); void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, @@ -1125,14 +1118,14 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, - unsigned int vector, int collapsed, int timestamp_en, - void *buf_addr, bool user_cq); + unsigned vector, int collapsed, int timestamp_en); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, - int *base, u8 flags, u8 usage); + int *base, u8 flags); void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); -int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); +int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, + gfp_t gfp); void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn, @@ -1381,8 +1374,6 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port); int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); -int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac); -int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu); int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, u8 promisc); int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time); @@ -1393,13 +1384,19 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val); int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv); int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port, bool *vlan_offload_disabled); -void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, - struct _rule_hw *eth_header); int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); +int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, + int npages, u64 iova, u32 *lkey, u32 *rkey); +int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, + int max_maps, u8 page_shift, struct mlx4_fmr *fmr); +int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr); +void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, + u32 *lkey, u32 *rkey); +int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_SYNC_TPT(struct mlx4_dev *dev); int mlx4_test_interrupt(struct mlx4_dev *dev, int vector); int mlx4_test_async(struct mlx4_dev *dev); @@ -1419,7 +1416,7 @@ int mlx4_get_phys_port_id(struct mlx4_dev *dev); int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); -int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage); +int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port); @@ -1463,7 +1460,7 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn); -u64 mlx4_read_clock(struct mlx4_dev *dev); +cycle_t mlx4_read_clock(struct mlx4_dev *dev); struct mlx4_active_ports { DECLARE_BITMAP(ports, MLX4_MAX_PORTS); @@ -1502,8 +1499,6 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, int enable); - -struct mlx4_mpt_entry; int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, struct mlx4_mpt_entry ***mpt_entry); int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, @@ -1542,13 +1537,8 @@ enum mlx4_ptys_proto { MLX4_PTYS_EN = 1<<2, }; -enum mlx4_ptys_flags { - MLX4_PTYS_AN_DISABLE_CAP = 1 << 5, - MLX4_PTYS_AN_DISABLE_ADMIN = 1 << 6, -}; - struct mlx4_ptys_reg { - u8 flags; + u8 resrvd1; u8 local_port; u8 resrvd2; u8 proto_mask; diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index a858bcb622..bd0e7075ea 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -104,14 +104,4 @@ static inline u64 mlx4_mac_to_u64(u8 *addr) return mac; } -static inline void mlx4_u64_to_mac(u8 *addr, u64 mac) -{ - int i; - - for (i = ETH_ALEN; i > 0; i--) { - addr[i - 1] = mac & 0xFF; - mac >>= 8; - } -} - #endif /* MLX4_DRIVER_H */ diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 9db93e4874..b4ee8f62ce 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -362,7 +362,7 @@ struct mlx4_wqe_datagram_seg { struct mlx4_wqe_lso_seg { __be32 mss_hdr_size; - __be32 header[]; + __be32 header[0]; }; enum mlx4_wqe_bind_seg_flags2 { @@ -470,7 +470,6 @@ struct mlx4_update_qp_params { u16 rate_val; }; -struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn); int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params); diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h new file mode 100644 index 0000000000..68cd08f02c --- /dev/null +++ b/include/linux/mlx5/cmd.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_CMD_H +#define MLX5_CMD_H + +#include + +struct manage_pages_layout { + u64 ptr; + u32 reserved; + u16 num_entries; + u16 func_id; +}; + + +struct mlx5_cmd_alloc_uar_imm_out { + u32 rsvd[3]; + u32 uarn; +}; + +#endif /* MLX5_CMD_H */ diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 7bfb673634..7c3c0d3aca 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -33,34 +33,33 @@ #ifndef MLX5_CORE_CQ_H #define MLX5_CORE_CQ_H +#include #include -#include + struct mlx5_core_cq { u32 cqn; int cqe_sz; __be32 *set_ci_db; __be32 *arm_db; - struct mlx5_uars_page *uar; - refcount_t refcount; + atomic_t refcount; struct completion free; unsigned vector; unsigned int irqn; - void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); + void (*comp) (struct mlx5_core_cq *); void (*event) (struct mlx5_core_cq *, enum mlx5_event); + struct mlx5_uar *uar; u32 cons_index; unsigned arm_sn; struct mlx5_rsc_debug *dbg; int pid; struct { struct list_head list; - void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); + void (*comp)(struct mlx5_core_cq *); void *priv; } tasklet_ctx; int reset_notify_added; struct list_head reset_notify; - struct mlx5_eq_comp *eq; - u16 uid; }; @@ -124,18 +123,13 @@ struct mlx5_cq_modify_params { }; enum { - CQE_STRIDE_64 = 0, - CQE_STRIDE_128 = 1, - CQE_STRIDE_128_PAD = 2, + CQE_SIZE_64 = 0, + CQE_SIZE_128 = 1, }; -#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1) -#define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1) - -static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en) +static inline int cqe_sz_to_mlx_sz(u8 size) { - return padding_128_en ? CQE_STRIDE_128_PAD : - size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128; + return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128; } static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) @@ -150,6 +144,7 @@ enum { static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, void __iomem *uar_page, + spinlock_t *doorbell_lock, u32 cons_index) { __be32 doorbell[2]; @@ -169,36 +164,21 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); doorbell[1] = cpu_to_be32(cq->cqn); - mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL); -} - -static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) -{ - refcount_inc(&cq->refcount); -} - -static inline void mlx5_cq_put(struct mlx5_core_cq *cq) -{ - if (refcount_dec_and_test(&cq->refcount)) - complete(&cq->free); + mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock); } +int mlx5_init_cq_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev); int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - u32 *in, int inlen, u32 *out, int outlen); + u32 *in, int inlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - u32 *out); + u32 *out, int outlen); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen); int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u16 cq_period, u16 cq_max_count); -static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev, - struct mlx5_err_cqe *err_cqe) -{ - print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, - sizeof(*err_cqe), false); -} int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 66eaf0aa7f..58276144ba 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -48,16 +48,12 @@ /* helper macros */ #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) -#define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld)) -#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16) +#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld))) #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) -#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf)) #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) -#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) -#define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld)) #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) @@ -67,23 +63,17 @@ #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) -#define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld))) +#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) /* insert a value to a struct */ #define MLX5_SET(typ, p, fld, v) do { \ - u32 _v = v; \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ - (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \ + (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) -#define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \ - BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \ - MLX5_SET(typ, p, fld[idx], v); \ -} while (0) - #define MLX5_SET_TO_ONES(typ, p, fld) do { \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ @@ -125,19 +115,6 @@ __mlx5_mask(typ, fld)) ___t; \ }) -#define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\ -__mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \ -__mlx5_mask16(typ, fld)) - -#define MLX5_SET16(typ, p, fld, v) do { \ - u16 _v = v; \ - BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \ - *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \ - cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \ - (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \ - << __mlx5_16_bit_off(typ, fld))); \ -} while (0) - /* Big endian getters */ #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\ __mlx5_64_off(typ, fld))) @@ -212,13 +189,6 @@ enum { MLX5_PFAULT_SUBTYPE_RDMA = 1, }; -enum wqe_page_fault_type { - MLX5_WQE_PF_TYPE_RMP = 0, - MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1, - MLX5_WQE_PF_TYPE_RESP = 2, - MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3, -}; - enum { MLX5_PERM_LOCAL_READ = 1 << 2, MLX5_PERM_LOCAL_WRITE = 1 << 3, @@ -242,22 +212,10 @@ enum { }; enum { - MLX5_ADAPTER_PAGE_SHIFT = 12, - MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, -}; - -enum { - MLX5_BFREGS_PER_UAR = 4, - MLX5_MAX_UARS = 1 << 8, - MLX5_NON_FP_BFREGS_PER_UAR = 2, - MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR - - MLX5_NON_FP_BFREGS_PER_UAR, - MLX5_MAX_BFREGS = MLX5_MAX_UARS * - MLX5_NON_FP_BFREGS_PER_UAR, - MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, - MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE, - MLX5_MIN_DYN_BFREGS = 512, - MLX5_MAX_DYN_BFREGS = 1024, + MLX5_BF_REGS_PER_PAGE = 4, + MLX5_MAX_UAR_PAGES = 1 << 8, + MLX5_NON_FP_BF_REGS_PER_PAGE = 2, + MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE, }; enum { @@ -276,9 +234,7 @@ enum { MLX5_MKEY_MASK_RW = 1ull << 20, MLX5_MKEY_MASK_A = 1ull << 21, MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, - MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25, - MLX5_MKEY_MASK_FREE = 1ull << 29, - MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47, + MLX5_MKEY_MASK_FREE = 1ull << 29, }; enum { @@ -300,18 +256,11 @@ enum { MLX5_EVENT_QUEUE_TYPE_QP = 0, MLX5_EVENT_QUEUE_TYPE_RQ = 1, MLX5_EVENT_QUEUE_TYPE_SQ = 2, - MLX5_EVENT_QUEUE_TYPE_DCT = 6, }; -/* mlx5 components can subscribe to any one of these events via - * mlx5_eq_notifier_register API. - */ enum mlx5_event { - /* Special value to subscribe to any event */ - MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0, - /* HW events enum start: comp events are not subscribable */ MLX5_EVENT_TYPE_COMP = 0x0, - /* HW Async events enum start: subscribable events */ + MLX5_EVENT_TYPE_PATH_MIG = 0x01, MLX5_EVENT_TYPE_COMM_EST = 0x02, MLX5_EVENT_TYPE_SQ_DRAINED = 0x03, @@ -328,13 +277,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, - MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, - MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, - MLX5_EVENT_TYPE_XRQ_ERROR = 0x18, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, - MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, - MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, - MLX5_EVENT_TYPE_PPS_EVENT = 0x25, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, @@ -344,35 +287,6 @@ enum mlx5_event { MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, - - MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, - MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf, - - MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, - MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, - - MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, - MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, - - MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, - - MLX5_EVENT_TYPE_MAX = 0x100, -}; - -enum mlx5_driver_event { - MLX5_DRIVER_EVENT_TYPE_TRAP = 0, -}; - -enum { - MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0, - MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1, -}; - -enum { - MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1, - MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5, - MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7, - MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8, }; enum { @@ -435,7 +349,6 @@ enum { MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, MLX5_OPCODE_BIND_MW = 0x18, MLX5_OPCODE_CONFIG_CMD = 0x1f, - MLX5_OPCODE_ENHANCED_MPSW = 0x29, MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, MLX5_RECV_OPCODE_SEND = 0x01, @@ -448,7 +361,6 @@ enum { MLX5_OPCODE_SET_PSV = 0x20, MLX5_OPCODE_GET_PSV = 0x21, MLX5_OPCODE_CHECK_PSV = 0x22, - MLX5_OPCODE_DUMP = 0x23, MLX5_OPCODE_RGET_PSV = 0x26, MLX5_OPCODE_RCHECK_PSV = 0x27, @@ -456,25 +368,6 @@ enum { }; -enum { - MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1, - MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2, -}; - -enum { - MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1, - MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2, -}; - -struct mlx5_wqe_tls_static_params_seg { - u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)]; -}; - -struct mlx5_wqe_tls_progress_params_seg { - __be32 tis_tir_num; - u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)]; -}; - enum { MLX5_SET_PORT_RESET_QKEY = 0, MLX5_SET_PORT_GUID0 = 16, @@ -494,6 +387,11 @@ enum { MLX5_MAX_PAGE_SHIFT = 31 }; +enum { + MLX5_ADAPTER_PAGE_SHIFT = 12, + MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, +}; + enum { MLX5_CAP_OFF_CMDIF_CSUM = 46, }; @@ -541,10 +439,6 @@ struct mlx5_cmd_layout { u8 status_own; }; -enum mlx5_fatal_assert_bit_offsets { - MLX5_RFR_OFFSET = 31, -}; - struct health_buffer { __be32 assert_var[5]; __be32 rsvd0[3]; @@ -553,20 +447,12 @@ struct health_buffer { __be32 rsvd1[2]; __be32 fw_ver; __be32 hw_id; - __be32 rfr; + __be32 rsvd2; u8 irisc_index; u8 synd; __be16 ext_synd; }; -enum mlx5_initializing_bit_offsets { - MLX5_FW_RESET_SUPPORTED_OFFSET = 30, -}; - -enum mlx5_cmd_addr_l_sz_offset { - MLX5_NIC_IFC_OFFSET = 8, -}; - struct mlx5_init_seg { __be32 fw_rev; __be32 cmdif_rev_fw_sub; @@ -582,10 +468,7 @@ struct mlx5_init_seg { __be32 internal_timer_l; __be32 rsvd3[2]; __be32 health_counter; - __be32 rsvd4[11]; - __be32 real_time_h; - __be32 real_time_l; - __be32 rsvd5[1006]; + __be32 rsvd4[1019]; __be64 ieee1588_clk; __be32 ieee1588_clk_type; __be32 clr_intx; @@ -609,12 +492,6 @@ struct mlx5_eqe_cq_err { u8 syndrome; }; -struct mlx5_eqe_xrq_err { - __be32 reserved1[5]; - __be32 type_xrqn; - __be32 reserved2; -}; - struct mlx5_eqe_port_state { u8 reserved0[8]; u8 port; @@ -642,7 +519,7 @@ struct mlx5_eqe_cmd { }; struct mlx5_eqe_page_req { - __be16 ec_function; + u8 rsvd0[2]; __be16 func_id; __be32 num_pages; __be32 rsvd1[5]; @@ -656,9 +533,7 @@ struct mlx5_eqe_page_fault { __be16 wqe_index; u16 reserved2; __be16 packet_length; - __be32 token; - u8 reserved4[8]; - __be32 pftype_wq; + u8 reserved3[12]; } __packed wqe; struct { __be32 r_key; @@ -666,9 +541,9 @@ struct mlx5_eqe_page_fault { __be16 packet_length; __be32 rdma_op_len; __be64 rdma_va; - __be32 pftype_token; } __packed rdma; } __packed; + __be32 flags_qpn; } __packed; struct mlx5_eqe_vport_change { @@ -677,59 +552,6 @@ struct mlx5_eqe_vport_change { __be32 rsvd1[6]; } __packed; -struct mlx5_eqe_port_module { - u8 reserved_at_0[1]; - u8 module; - u8 reserved_at_2[1]; - u8 module_status; - u8 reserved_at_4[2]; - u8 error_type; -} __packed; - -struct mlx5_eqe_pps { - u8 rsvd0[3]; - u8 pin; - u8 rsvd1[4]; - union { - struct { - __be32 time_sec; - __be32 time_nsec; - }; - struct { - __be64 time_stamp; - }; - }; - u8 rsvd2[12]; -} __packed; - -struct mlx5_eqe_dct { - __be32 reserved[6]; - __be32 dctn; -}; - -struct mlx5_eqe_temp_warning { - __be64 sensor_warning_msb; - __be64 sensor_warning_lsb; -} __packed; - -#define SYNC_RST_STATE_MASK 0xf - -enum sync_rst_state_type { - MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0, - MLX5_SYNC_RST_STATE_RESET_NOW = 0x1, - MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2, -}; - -struct mlx5_eqe_sync_fw_update { - u8 reserved_at_0[3]; - u8 sync_rst_state; -}; - -struct mlx5_eqe_vhca_state { - __be16 ec_function; - __be16 function_id; -} __packed; - union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -743,13 +565,6 @@ union ev_data { struct mlx5_eqe_page_req req_pages; struct mlx5_eqe_page_fault page_fault; struct mlx5_eqe_vport_change vport_change; - struct mlx5_eqe_port_module port_module; - struct mlx5_eqe_pps pps; - struct mlx5_eqe_dct dct; - struct mlx5_eqe_temp_warning temp_warning; - struct mlx5_eqe_xrq_err xrq_err; - struct mlx5_eqe_sync_fw_update sync_fw_update; - struct mlx5_eqe_vhca_state vhca_state; } __packed; struct mlx5_eqe { @@ -792,7 +607,7 @@ struct mlx5_err_cqe { }; struct mlx5_cqe64 { - u8 tls_outer_l3_tunneled; + u8 outer_l3_tunneled; u8 rsvd0; __be16 wqe_id; u8 lro_tcppsh_abort_dupack; @@ -810,12 +625,7 @@ struct mlx5_cqe64 { u8 l4_l3_hdr_type; __be16 vlan_info; __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ - union { - __be32 immediate; - __be32 inval_rkey; - __be32 pkey; - __be32 ft_metadata; - }; + __be32 imm_inval_pkey; u8 rsvd40[4]; __be32 byte_cnt; __be32 timestamp_h; @@ -831,7 +641,7 @@ struct mlx5_mini_cqe8 { __be32 rx_hash_result; struct { __be16 checksum; - __be16 stridx; + __be16 rsvd; }; struct { __be16 wqe_counter; @@ -851,22 +661,16 @@ enum { enum { MLX5_CQE_FORMAT_CSUM = 0x1, - MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3, }; #define MLX5_MINI_CQE_ARRAY_SIZE 8 -static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) +static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) { return (cqe->op_own >> 2) & 0x3; } -static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) -{ - return cqe->op_own >> 4; -} - -static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) +static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; } @@ -881,19 +685,14 @@ static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) return (cqe->l4_l3_hdr_type >> 2) & 0x3; } -static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) +static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe) { - return cqe->tls_outer_l3_tunneled & 0x1; + return cqe->outer_l3_tunneled & 0x1; } -static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe) +static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) { - return (cqe->tls_outer_l3_tunneled >> 3) & 0x3; -} - -static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) -{ - return cqe->l4_l3_hdr_type & 0x1; + return !!(cqe->l4_l3_hdr_type & 0x1); } static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) @@ -906,17 +705,6 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) return (u64)lo | ((u64)hi << 32); } -static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe) -{ - return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF; -} - -#define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3 -#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9 -#define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16 -#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6 -#define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13 - struct mpwrq_cqe_bc { __be16 filler_consumed_strides; __be16 byte_cnt; @@ -962,14 +750,8 @@ enum { }; enum { - CQE_RSS_HTYPE_IP = 0x3 << 2, - /* cqe->rss_hash_type[3:2] - IP destination selected for hash - * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved) - */ - CQE_RSS_HTYPE_L4 = 0x3 << 6, - /* cqe->rss_hash_type[7:6] - L4 destination selected for hash - * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI - */ + CQE_RSS_HTYPE_IP = 0x3 << 6, + CQE_RSS_HTYPE_L4 = 0x3 << 2, }; enum { @@ -984,13 +766,6 @@ enum { CQE_L4_OK = 1 << 2, }; -enum { - CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0, - CQE_TLS_OFFLOAD_DECRYPTED = 0x1, - CQE_TLS_OFFLOAD_RESYNC = 0x2, - CQE_TLS_OFFLOAD_ERROR = 0x3, -}; - struct mlx5_sig_err_cqe { u8 rsvd0[16]; __be32 expected_trans_sig; @@ -1033,12 +808,13 @@ enum { MLX5_MKEY_REMOTE_INVAL = 1 << 24, MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, MLX5_MKEY_BSF_EN = 1 << 30, + MLX5_MKEY_LEN64 = 1 << 31, }; struct mlx5_mkey_seg { /* This is a two bit field occupying bits 31-30. * bit 31 is always 0, - * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation + * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation */ u8 status; u8 pcie_control; @@ -1069,9 +845,9 @@ enum { }; enum { - MLX5_VPORT_ADMIN_STATE_DOWN = 0x0, - MLX5_VPORT_ADMIN_STATE_UP = 0x1, - MLX5_VPORT_ADMIN_STATE_AUTO = 0x2, + MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0, + MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1, + MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2, }; enum { @@ -1096,9 +872,7 @@ enum { MLX5_MATCH_OUTER_HEADERS = 1 << 0, MLX5_MATCH_MISC_PARAMETERS = 1 << 1, MLX5_MATCH_INNER_HEADERS = 1 << 2, - MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3, - MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4, - MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5, + }; enum { @@ -1134,21 +908,6 @@ enum mlx5_wol_mode { MLX5_WOL_PHY_ACTIVITY = 1 << 7, }; -enum mlx5_mpls_supported_fields { - MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0, - MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1, - MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2, - MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3 -}; - -enum mlx5_flex_parser_protos { - MLX5_FLEX_PROTO_GENEVE = 1 << 3, - MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4, - MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5, - MLX5_FLEX_PROTO_ICMP = 1 << 8, - MLX5_FLEX_PROTO_ICMPV6 = 1 << 9, -}; - /* MLX5 DEV CAPs */ /* TODO: EAT.ME */ @@ -1157,9 +916,6 @@ enum mlx5_cap_mode { HCA_CAP_OPMOD_GET_CUR = 1, }; -/* Any new cap addition must update mlx5_hca_caps_alloc() to allocate - * capability memory. - */ enum mlx5_cap_type { MLX5_CAP_GENERAL = 0, MLX5_CAP_ETHERNET_OFFLOADS, @@ -1167,104 +923,49 @@ enum mlx5_cap_type { MLX5_CAP_ATOMIC, MLX5_CAP_ROCE, MLX5_CAP_IPOIB_OFFLOADS, - MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, + MLX5_CAP_EOIB_OFFLOADS, MLX5_CAP_FLOW_TABLE, MLX5_CAP_ESWITCH_FLOW_TABLE, MLX5_CAP_ESWITCH, MLX5_CAP_RESERVED, MLX5_CAP_VECTOR_CALC, MLX5_CAP_QOS, - MLX5_CAP_DEBUG, - MLX5_CAP_RESERVED_14, - MLX5_CAP_DEV_MEM, - MLX5_CAP_RESERVED_16, - MLX5_CAP_TLS, - MLX5_CAP_VDPA_EMULATION = 0x13, - MLX5_CAP_DEV_EVENT = 0x14, - MLX5_CAP_IPSEC, - MLX5_CAP_GENERAL_2 = 0x20, /* NUM OF CAP Types */ MLX5_CAP_NUM }; -enum mlx5_pcam_reg_groups { - MLX5_PCAM_REGS_5000_TO_507F = 0x0, -}; - -enum mlx5_pcam_feature_groups { - MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0, -}; - -enum mlx5_mcam_reg_groups { - MLX5_MCAM_REGS_FIRST_128 = 0x0, - MLX5_MCAM_REGS_0x9080_0x90FF = 0x1, - MLX5_MCAM_REGS_0x9100_0x917F = 0x2, - MLX5_MCAM_REGS_NUM = 0x3, -}; - -enum mlx5_mcam_feature_groups { - MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0, -}; - -enum mlx5_qcam_reg_groups { - MLX5_QCAM_REGS_FIRST_128 = 0x0, -}; - -enum mlx5_qcam_feature_groups { - MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0, -}; - /* GET Dev Caps macros */ #define MLX5_CAP_GEN(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap) - -#define MLX5_CAP_GEN_64(mdev, cap) \ - MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap) + MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_GEN_MAX(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap) - -#define MLX5_CAP_GEN_2(mdev, cap) \ - MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap) - -#define MLX5_CAP_GEN_2_64(mdev, cap) \ - MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap) - -#define MLX5_CAP_GEN_2_MAX(mdev, cap) \ - MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap) + MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_ETH(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap) + mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_ETH_MAX(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap) - -#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \ - MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap) + mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_ROCE(mdev, cap) \ - MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap) + MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ROCE_MAX(mdev, cap) \ - MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap) + MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ATOMIC(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap) + MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap) + MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_FLOWTABLE(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap) - -#define MLX5_CAP64_FLOWTABLE(mdev, cap) \ - MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap) + MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap) + MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) @@ -1272,12 +973,6 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) -#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \ - MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap) - -#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \ - MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap) - #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap) @@ -1290,25 +985,13 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap) -#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \ - MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap) - -#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \ - MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap) - -#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \ - MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap) - -#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \ - MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap) - #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) + mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap) + mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) @@ -1330,87 +1013,21 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap) - -#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \ - MLX5_GET64(flow_table_eswitch_cap, \ - (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) + mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP_ESW_MAX(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap) + mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP_ODP(mdev, cap)\ - MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap) - -#define MLX5_CAP_ODP_MAX(mdev, cap)\ - MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap) + MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ MLX5_GET(vector_calc_cap, \ - mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap) + mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap) #define MLX5_CAP_QOS(mdev, cap)\ - MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap) - -#define MLX5_CAP_DEBUG(mdev, cap)\ - MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap) - -#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ - MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) - -#define MLX5_CAP_PCAM_REG(mdev, reg) \ - MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg) - -#define MLX5_CAP_MCAM_REG(mdev, reg) \ - MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \ - mng_access_reg_cap_mask.access_regs.reg) - -#define MLX5_CAP_MCAM_REG1(mdev, reg) \ - MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \ - mng_access_reg_cap_mask.access_regs1.reg) - -#define MLX5_CAP_MCAM_REG2(mdev, reg) \ - MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \ - mng_access_reg_cap_mask.access_regs2.reg) - -#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ - MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) - -#define MLX5_CAP_QCAM_REG(mdev, fld) \ - MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld) - -#define MLX5_CAP_QCAM_FEATURE(mdev, fld) \ - MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld) - -#define MLX5_CAP_FPGA(mdev, cap) \ - MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap) - -#define MLX5_CAP64_FPGA(mdev, cap) \ - MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) - -#define MLX5_CAP_DEV_MEM(mdev, cap)\ - MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap) - -#define MLX5_CAP64_DEV_MEM(mdev, cap)\ - MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap) - -#define MLX5_CAP_TLS(mdev, cap) \ - MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap) - -#define MLX5_CAP_DEV_EVENT(mdev, cap)\ - MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap) - -#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ - MLX5_GET(virtio_emulation_cap, \ - (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap) - -#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ - MLX5_GET64(virtio_emulation_cap, \ - (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap) - -#define MLX5_CAP_IPSEC(mdev, cap)\ - MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap) + MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap) enum { MLX5_CMD_STAT_OK = 0x0, @@ -1440,15 +1057,9 @@ enum { MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, - MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13, - MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; -enum { - MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, -}; - static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) { if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) @@ -1456,8 +1067,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; } -#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16 -#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16 +#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8 +#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h index 5c267707e1..afc78a3f44 100644 --- a/include/linux/mlx5/doorbell.h +++ b/include/linux/mlx5/doorbell.h @@ -36,25 +36,44 @@ #define MLX5_BF_OFFSET 0x800 #define MLX5_CQ_DOORBELL 0x20 +#if BITS_PER_LONG == 64 /* Assume that we can just write a 64-bit doorbell atomically. s390 * actually doesn't have writeq() but S/390 systems don't even have * PCI so we won't worry about it. - * - * Note that the write is not atomic on 32-bit systems! In contrast to 64-bit - * ones, it requires proper locking. mlx5_write64 doesn't do any locking, so use - * it at your own discretion, protected by some kind of lock on 32 bits. - * - * TODO: use write{q,l}_relaxed() */ -static inline void mlx5_write64(__be32 val[2], void __iomem *dest) +#define MLX5_DECLARE_DOORBELL_LOCK(name) +#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0) +#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void mlx5_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) { -#if BITS_PER_LONG == 64 __raw_writeq(*(u64 *)val, dest); -#else - __raw_writel((__force u32) val[0], dest); - __raw_writel((__force u32) val[1], dest + 4); -#endif } +#else + +/* Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name; +#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void mlx5_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + unsigned long flags; + + spin_lock_irqsave(doorbell_lock, flags); + __raw_writel((__force u32) val[0], dest); + __raw_writel((__force u32) val[1], dest + 4); + spin_unlock_irqrestore(doorbell_lock, flags); +} + +#endif + #endif /* MLX5_DOORBELL_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f17d2101af..ecc451d89c 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -36,31 +36,21 @@ #include #include #include -#include #include #include #include #include -#include +#include #include -#include #include -#include -#include -#include -#include #include #include -#include -#include -#include -#include - -#define MLX5_ADEV_NAME "mlx5_core" +#include enum { MLX5_BOARD_ID_LEN = 64, + MLX5_MAX_NAME_LEN = 16, }; enum { @@ -90,87 +80,52 @@ enum { }; enum { - MLX5_ATOMIC_MODE_OFFSET = 16, - MLX5_ATOMIC_MODE_IB_COMP = 1, - MLX5_ATOMIC_MODE_CX = 2, - MLX5_ATOMIC_MODE_8B = 3, - MLX5_ATOMIC_MODE_16B = 4, - MLX5_ATOMIC_MODE_32B = 5, - MLX5_ATOMIC_MODE_64B = 6, - MLX5_ATOMIC_MODE_128B = 7, - MLX5_ATOMIC_MODE_256B = 8, + MLX5_EQ_VEC_PAGES = 0, + MLX5_EQ_VEC_CMD = 1, + MLX5_EQ_VEC_ASYNC = 2, + MLX5_EQ_VEC_COMP_BASE, +}; + +enum { + MLX5_MAX_IRQ_NAME = 32 +}; + +enum { + MLX5_ATOMIC_MODE_IB_COMP = 1 << 16, + MLX5_ATOMIC_MODE_CX = 2 << 16, + MLX5_ATOMIC_MODE_8B = 3 << 16, + MLX5_ATOMIC_MODE_16B = 4 << 16, + MLX5_ATOMIC_MODE_32B = 5 << 16, + MLX5_ATOMIC_MODE_64B = 6 << 16, + MLX5_ATOMIC_MODE_128B = 7 << 16, + MLX5_ATOMIC_MODE_256B = 8 << 16, }; enum { - MLX5_REG_QPTS = 0x4002, MLX5_REG_QETCR = 0x4005, MLX5_REG_QTCT = 0x400a, - MLX5_REG_QPDPM = 0x4013, - MLX5_REG_QCAM = 0x4019, - MLX5_REG_DCBX_PARAM = 0x4020, - MLX5_REG_DCBX_APP = 0x4021, - MLX5_REG_FPGA_CAP = 0x4022, - MLX5_REG_FPGA_CTRL = 0x4023, - MLX5_REG_FPGA_ACCESS_REG = 0x4024, - MLX5_REG_CORE_DUMP = 0x402e, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, MLX5_REG_PAOS = 0x5006, MLX5_REG_PFCC = 0x5007, MLX5_REG_PPCNT = 0x5008, - MLX5_REG_PPTB = 0x500b, - MLX5_REG_PBMC = 0x500c, MLX5_REG_PMAOS = 0x5012, MLX5_REG_PUDE = 0x5009, MLX5_REG_PMPE = 0x5010, MLX5_REG_PELC = 0x500e, MLX5_REG_PVLC = 0x500f, MLX5_REG_PCMR = 0x5041, - MLX5_REG_PDDR = 0x5031, MLX5_REG_PMLP = 0x5002, - MLX5_REG_PPLM = 0x5023, - MLX5_REG_PCAM = 0x507f, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, - MLX5_REG_MFRL = 0x9028, MLX5_REG_MLCR = 0x902b, - MLX5_REG_MTRC_CAP = 0x9040, - MLX5_REG_MTRC_CONF = 0x9041, - MLX5_REG_MTRC_STDB = 0x9042, - MLX5_REG_MTRC_CTRL = 0x9043, - MLX5_REG_MPEIN = 0x9050, - MLX5_REG_MPCNT = 0x9051, - MLX5_REG_MTPPS = 0x9053, - MLX5_REG_MTPPSE = 0x9054, - MLX5_REG_MTUTC = 0x9055, - MLX5_REG_MPEGC = 0x9056, - MLX5_REG_MCQS = 0x9060, - MLX5_REG_MCQI = 0x9061, - MLX5_REG_MCC = 0x9062, - MLX5_REG_MCDA = 0x9063, - MLX5_REG_MCAM = 0x907f, - MLX5_REG_MIRC = 0x9162, - MLX5_REG_SBCAM = 0xB01F, - MLX5_REG_RESOURCE_DUMP = 0xC000, -}; - -enum mlx5_qpts_trust_state { - MLX5_QPTS_TRUST_PCP = 1, - MLX5_QPTS_TRUST_DSCP = 2, -}; - -enum mlx5_dcbx_oper_mode { - MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0, - MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, }; enum { MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, - MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2, - MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3, }; enum mlx5_page_fault_resume_flags { @@ -186,20 +141,8 @@ enum dbg_rsc_type { MLX5_DBG_RSC_CQ, }; -enum port_state_policy { - MLX5_POLICY_DOWN = 0, - MLX5_POLICY_UP = 1, - MLX5_POLICY_FOLLOW = 2, - MLX5_POLICY_INVALID = 0xffffffff -}; - -enum mlx5_coredev_type { - MLX5_COREDEV_PF, - MLX5_COREDEV_VF, - MLX5_COREDEV_SF, -}; - struct mlx5_field_desc { + struct dentry *dent; int i; }; @@ -208,12 +151,18 @@ struct mlx5_rsc_debug { void *object; enum dbg_rsc_type type; struct dentry *root; - struct mlx5_field_desc fields[]; + struct mlx5_field_desc fields[0]; }; enum mlx5_dev_event { - MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */ - MLX5_DEV_EVENT_PORT_AFFINITY = 129, + MLX5_DEV_EVENT_SYS_ERROR, + MLX5_DEV_EVENT_PORT_UP, + MLX5_DEV_EVENT_PORT_DOWN, + MLX5_DEV_EVENT_PORT_INITIALIZED, + MLX5_DEV_EVENT_LID_CHANGE, + MLX5_DEV_EVENT_PKEY_CHANGE, + MLX5_DEV_EVENT_GUID_CHANGE, + MLX5_DEV_EVENT_CLIENT_REREG, }; enum mlx5_port_status { @@ -221,10 +170,36 @@ enum mlx5_port_status { MLX5_PORT_DOWN = 2, }; -enum mlx5_cmdif_state { - MLX5_CMDIF_STATE_UNINITIALIZED, - MLX5_CMDIF_STATE_UP, - MLX5_CMDIF_STATE_DOWN, +struct mlx5_uuar_info { + struct mlx5_uar *uars; + int num_uars; + int num_low_latency_uuars; + unsigned long *bitmap; + unsigned int *count; + struct mlx5_bf *bfs; + + /* + * protect uuar allocation data structs + */ + struct mutex lock; + u32 ver; +}; + +struct mlx5_bf { + void __iomem *reg; + void __iomem *regreg; + int buf_size; + struct mlx5_uar *uar; + unsigned long offset; + int need_lock; + /* protect blue flame buffer selection when needed + */ + spinlock_t lock; + + /* serialize 64 bit writes when done as two 32 bit accesses + */ + spinlock_t lock32; + int uuarn; }; struct mlx5_cmd_first { @@ -233,7 +208,7 @@ struct mlx5_cmd_first { struct mlx5_cmd_msg { struct list_head list; - struct cmd_msg_cache *parent; + struct cache_ent *cache; u32 len; struct mlx5_cmd_first first; struct mlx5_cmd_mailbox *next; @@ -241,6 +216,11 @@ struct mlx5_cmd_msg { struct mlx5_cmd_debug { struct dentry *dbg_root; + struct dentry *dbg_in; + struct dentry *dbg_out; + struct dentry *dbg_outlen; + struct dentry *dbg_status; + struct dentry *dbg_run; void *in_msg; void *out_msg; u8 status; @@ -248,31 +228,30 @@ struct mlx5_cmd_debug { u16 outlen; }; -struct cmd_msg_cache { +struct cache_ent { /* protect block chain allocations */ spinlock_t lock; struct list_head head; - unsigned int max_inbox_size; - unsigned int num_ent; }; -enum { - MLX5_NUM_COMMAND_CACHES = 5, +struct cmd_msg_cache { + struct cache_ent large; + struct cache_ent med; + }; struct mlx5_cmd_stats { u64 sum; u64 n; struct dentry *root; + struct dentry *avg; + struct dentry *count; /* protect command average calculations */ spinlock_t lock; }; struct mlx5_cmd { - struct mlx5_nb nb; - - enum mlx5_cmdif_state state; void *cmd_alloc_buf; dma_addr_t alloc_dma; int alloc_size; @@ -299,13 +278,18 @@ struct mlx5_cmd { struct semaphore sem; struct semaphore pages_sem; int mode; - u16 allowed_opcode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; - struct dma_pool *pool; + struct pci_pool *pool; struct mlx5_cmd_debug dbg; - struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; + struct cmd_msg_cache cache; int checksum_disabled; - struct mlx5_cmd_stats *stats; + struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; +}; + +struct mlx5_port_caps { + int gid_table_len; + int pkey_table_len; + u8 ext_port_cap; }; struct mlx5_cmd_mailbox { @@ -319,21 +303,35 @@ struct mlx5_buf_list { dma_addr_t map; }; -struct mlx5_frag_buf { - struct mlx5_buf_list *frags; +struct mlx5_buf { + struct mlx5_buf_list direct; int npages; int size; u8 page_shift; }; -struct mlx5_frag_buf_ctrl { - struct mlx5_buf_list *frags; - u32 sz_m1; - u16 frag_sz_m1; - u16 strides_offset; - u8 log_sz; - u8 log_stride; - u8 log_frag_strides; +struct mlx5_eq_tasklet { + struct list_head list; + struct list_head process_list; + struct tasklet_struct task; + /* lock on completion tasklet list */ + spinlock_t lock; +}; + +struct mlx5_eq { + struct mlx5_core_dev *dev; + __be32 __iomem *doorbell; + u32 cons_index; + struct mlx5_buf buf; + int size; + unsigned int irqn; + u8 eqn; + int nent; + u64 mask; + struct list_head list; + int index; + struct mlx5_rsc_debug *dbg; + struct mlx5_eq_tasklet tasklet_ctx; }; struct mlx5_core_psv { @@ -357,170 +355,138 @@ struct mlx5_core_sig_ctx { u32 sigerr_count; }; -enum { - MLX5_MKEY_MR = 1, - MLX5_MKEY_MW, - MLX5_MKEY_INDIRECT_DEVX, -}; - struct mlx5_core_mkey { u64 iova; u64 size; u32 key; u32 pd; - u32 type; - struct wait_queue_head wait; - refcount_t usecount; }; -#define MLX5_24BIT_MASK ((1 << 24) - 1) - enum mlx5_res_type { MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, MLX5_RES_SRQ = 3, MLX5_RES_XSRQ = 4, - MLX5_RES_XRQ = 5, - MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT, }; struct mlx5_core_rsc_common { enum mlx5_res_type res; - refcount_t refcount; + atomic_t refcount; struct completion free; }; -struct mlx5_uars_page { - void __iomem *map; - bool wc; +struct mlx5_core_srq { + struct mlx5_core_rsc_common common; /* must be first */ + u32 srqn; + int max; + int max_gs; + int max_avail_gather; + int wqe_shift; + void (*event) (struct mlx5_core_srq *, enum mlx5_event); + + atomic_t refcount; + struct completion free; +}; + +struct mlx5_eq_table { + void __iomem *update_ci; + void __iomem *update_arm_ci; + struct list_head comp_eqs_list; + struct mlx5_eq pages_eq; + struct mlx5_eq async_eq; + struct mlx5_eq cmd_eq; + int num_comp_vectors; + /* protect EQs list + */ + spinlock_t lock; +}; + +struct mlx5_uar { u32 index; - struct list_head list; - unsigned int bfregs; - unsigned long *reg_bitmap; /* for non fast path bf regs */ - unsigned long *fp_bitmap; - unsigned int reg_avail; - unsigned int fp_avail; - struct kref ref_count; - struct mlx5_core_dev *mdev; -}; - -struct mlx5_bfreg_head { - /* protect blue flame registers allocations */ - struct mutex lock; - struct list_head list; -}; - -struct mlx5_bfreg_data { - struct mlx5_bfreg_head reg_head; - struct mlx5_bfreg_head wc_head; -}; - -struct mlx5_sq_bfreg { + struct list_head bf_list; + unsigned free_bf_bmap; + void __iomem *bf_map; void __iomem *map; - struct mlx5_uars_page *up; - bool wc; - u32 index; - unsigned int offset; }; + struct mlx5_core_health { struct health_buffer __iomem *health; __be32 __iomem *health_counter; struct timer_list timer; u32 prev; int miss_counter; - u8 synd; - u32 fatal_error; - u32 crdump_size; + bool sick; /* wq spinlock to synchronize draining */ spinlock_t wq_lock; struct workqueue_struct *wq; unsigned long flags; - struct work_struct fatal_report_work; - struct work_struct report_work; - struct devlink_health_reporter *fw_reporter; - struct devlink_health_reporter *fw_fatal_reporter; + struct work_struct work; + struct delayed_work recover_work; }; -struct mlx5_qp_table { - struct notifier_block nb; - +struct mlx5_cq_table { /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; +struct mlx5_qp_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_srq_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_mkey_table { + /* protect radix tree + */ + rwlock_t lock; + struct radix_tree_root tree; +}; + struct mlx5_vf_context { int enabled; - u64 port_guid; - u64 node_guid; - /* Valid bits are used to validate administrative guid only. - * Enabled after ndo_set_vf_guid - */ - u8 port_guid_valid:1; - u8 node_guid_valid:1; - enum port_state_policy policy; }; struct mlx5_core_sriov { struct mlx5_vf_context *vfs_ctx; int num_vfs; - u16 max_vfs; + int enabled_vfs; }; -struct mlx5_fc_pool { - struct mlx5_core_dev *dev; - struct mutex pool_lock; /* protects pool lists */ - struct list_head fully_used; - struct list_head partially_used; - struct list_head unused; - int available_fcs; - int used_fcs; - int threshold; +struct mlx5_irq_info { + cpumask_var_t mask; + char name[MLX5_MAX_IRQ_NAME]; }; struct mlx5_fc_stats { - spinlock_t counters_idr_lock; /* protects counters_idr */ - struct idr counters_idr; - struct list_head counters; - struct llist_head addlist; - struct llist_head dellist; + struct rb_root counters; + struct list_head addlist; + /* protect addlist add/splice operations */ + spinlock_t addlist_lock; struct workqueue_struct *wq; struct delayed_work work; unsigned long next_query; - unsigned long sampling_interval; /* jiffies */ - u32 *bulk_query_out; - struct mlx5_fc_pool fc_pool; }; -struct mlx5_events; -struct mlx5_mpfs; struct mlx5_eswitch; struct mlx5_lag; -struct mlx5_devcom; -struct mlx5_fw_reset; -struct mlx5_eq_table; -struct mlx5_irq_table; -struct mlx5_vhca_state_notifier; -struct mlx5_sf_dev_table; -struct mlx5_sf_hw_table; -struct mlx5_sf_table; - -struct mlx5_rate_limit { - u32 rate; - u32 max_burst_sz; - u16 typical_pkt_sz; -}; struct mlx5_rl_entry { - u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)]; - u64 refcount; - u16 index; - u16 uid; - u8 dedicated : 1; + u32 rate; + u16 index; + u16 refcount; }; struct mlx5_rl_table { @@ -530,58 +496,46 @@ struct mlx5_rl_table { u32 max_rate; u32 min_rate; struct mlx5_rl_entry *rl_entry; - u64 refcount; }; -struct mlx5_core_roce { - struct mlx5_flow_table *ft; - struct mlx5_flow_group *fg; - struct mlx5_flow_handle *allow_rule; -}; - -enum { - MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0, - MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1, - /* Set during device detach to block any further devices - * creation/deletion on drivers rescan. Unset during device attach. - */ - MLX5_PRIV_FLAGS_DETACH = 1 << 2, -}; - -struct mlx5_adev { - struct auxiliary_device adev; - struct mlx5_core_dev *mdev; - int idx; -}; - -struct mlx5_ft_pool; struct mlx5_priv { - /* IRQ table valid only for real pci devices PF or VF */ - struct mlx5_irq_table *irq_table; - struct mlx5_eq_table *eq_table; + char name[MLX5_MAX_NAME_LEN]; + struct mlx5_eq_table eq_table; + struct msix_entry *msix_arr; + struct mlx5_irq_info *irq_info; + struct mlx5_uuar_info uuari; + MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); /* pages stuff */ - struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; - struct xarray page_root_xa; + struct rb_root page_root; int fw_pages; atomic_t reg_pages; struct list_head free_list; int vfs_pages; - int host_pf_pages; struct mlx5_core_health health; - struct list_head traps; + + struct mlx5_srq_table srq_table; /* start: qp staff */ + struct mlx5_qp_table qp_table; struct dentry *qp_debugfs; struct dentry *eq_debugfs; struct dentry *cq_debugfs; struct dentry *cmdif_debugfs; /* end: qp staff */ + /* start: cq staff */ + struct mlx5_cq_table cq_table; + /* end: cq staff */ + + /* start: mkey staff */ + struct mlx5_mkey_table mkey_table; + /* end: mkey staff */ + /* start: alloc staff */ - /* protect buffer allocation according to numa node */ + /* protect buffer alocation according to numa node */ struct mutex alloc_mutex; int numa_node; @@ -590,45 +544,32 @@ struct mlx5_priv { /* end: alloc staff */ struct dentry *dbg_root; + /* protect mkey key part */ + spinlock_t mkey_lock; + u8 mkey_key; + + struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; - struct mlx5_adev **adev; - int adev_idx; - struct mlx5_events *events; struct mlx5_flow_steering *steering; - struct mlx5_mpfs *mpfs; struct mlx5_eswitch *eswitch; struct mlx5_core_sriov sriov; struct mlx5_lag *lag; - u32 flags; - struct mlx5_devcom *devcom; - struct mlx5_fw_reset *fw_reset; - struct mlx5_core_roce roce; + unsigned long pci_dev_data; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; - struct mlx5_ft_pool *ft_pool; - - struct mlx5_bfreg_data bfregs; - struct mlx5_uars_page *uar; -#ifdef CONFIG_MLX5_SF - struct mlx5_vhca_state_notifier *vhca_state_notifier; - struct mlx5_sf_dev_table *sf_dev_table; - struct mlx5_core_dev *parent_mdev; -#endif -#ifdef CONFIG_MLX5_SF_MANAGER - struct mlx5_sf_hw_table *sf_hw_table; - struct mlx5_sf_table *sf_table; -#endif }; enum mlx5_device_state { - MLX5_DEVICE_STATE_UP = 1, + MLX5_DEVICE_STATE_UP, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; enum mlx5_interface_state { - MLX5_INTERFACE_STATE_UP = BIT(0), + MLX5_INTERFACE_STATE_DOWN = BIT(0), + MLX5_INTERFACE_STATE_UP = BIT(1), + MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2), }; enum mlx5_pci_status { @@ -636,107 +577,19 @@ enum mlx5_pci_status { MLX5_PCI_STATUS_ENABLED, }; -enum mlx5_pagefault_type_flags { - MLX5_PFAULT_REQUESTOR = 1 << 0, - MLX5_PFAULT_WRITE = 1 << 1, - MLX5_PFAULT_RDMA = 1 << 2, -}; - struct mlx5_td { - /* protects tirs list changes while tirs refresh */ - struct mutex list_lock; struct list_head tirs_list; u32 tdn; }; struct mlx5e_resources { - struct mlx5e_hw_objs { - u32 pdn; - struct mlx5_td td; - struct mlx5_core_mkey mkey; - struct mlx5_sq_bfreg bfreg; - } hw_objs; - struct devlink_port dl_port; - struct net_device *uplink_netdev; -}; - -enum mlx5_sw_icm_type { - MLX5_SW_ICM_TYPE_STEERING, - MLX5_SW_ICM_TYPE_HEADER_MODIFY, -}; - -#define MLX5_MAX_RESERVED_GIDS 8 - -struct mlx5_rsvd_gids { - unsigned int start; - unsigned int count; - struct ida ida; -}; - -#define MAX_PIN_NUM 8 -struct mlx5_pps { - u8 pin_caps[MAX_PIN_NUM]; - struct work_struct out_work; - u64 start[MAX_PIN_NUM]; - u8 enabled; -}; - -struct mlx5_timer { - struct cyclecounter cycles; - struct timecounter tc; - u32 nominal_c_mult; - unsigned long overflow_period; - struct delayed_work overflow_work; -}; - -struct mlx5_clock { - struct mlx5_nb pps_nb; - seqlock_t lock; - struct hwtstamp_config hwtstamp_config; - struct ptp_clock *ptp; - struct ptp_clock_info ptp_info; - struct mlx5_pps pps_info; - struct mlx5_timer timer; -}; - -struct mlx5_dm; -struct mlx5_fw_tracer; -struct mlx5_vxlan; -struct mlx5_geneve; -struct mlx5_hv_vhca; - -#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) -#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) - -enum { - MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, - MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, -}; - -enum { - MR_CACHE_LAST_STD_ENTRY = 20, - MLX5_IMR_MTT_CACHE_ENTRY, - MLX5_IMR_KSM_CACHE_ENTRY, - MAX_MR_CACHE_ENTRIES -}; - -struct mlx5_profile { - u64 mask; - u8 log_max_qp; - struct { - int size; - int limit; - } mr_cache[MAX_MR_CACHE_ENTRIES]; -}; - -struct mlx5_hca_cap { - u32 cur[MLX5_UN_SZ_DW(hca_cap_union)]; - u32 max[MLX5_UN_SZ_DW(hca_cap_union)]; + struct mlx5_uar cq_uar; + u32 pdn; + struct mlx5_td td; + struct mlx5_core_mkey mkey; }; struct mlx5_core_dev { - struct device *device; - enum mlx5_coredev_type coredev_type; struct pci_dev *pdev; /* sync pci state */ struct mutex pci_status_mutex; @@ -744,45 +597,26 @@ struct mlx5_core_dev { u8 rev_id; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; - struct { - struct mlx5_hca_cap *hca[MLX5_CAP_NUM]; - u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; - u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)]; - u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; - u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; - u8 embedded_cpu; - } caps; - u64 sys_image_guid; + struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; + u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; - phys_addr_t bar_addr; enum mlx5_device_state state; /* sync interface state */ struct mutex intf_state_mutex; unsigned long intf_state; + void (*event) (struct mlx5_core_dev *dev, + enum mlx5_dev_event event, + unsigned long param); struct mlx5_priv priv; - struct mlx5_profile profile; + struct mlx5_profile *profile; + atomic_t num_qps; u32 issi; struct mlx5e_resources mlx5e_res; - struct mlx5_dm *dm; - struct mlx5_vxlan *vxlan; - struct mlx5_geneve *geneve; - struct { - struct mlx5_rsvd_gids reserved_gids; - u32 roce_en; - } roce; -#ifdef CONFIG_MLX5_FPGA - struct mlx5_fpga_device *fpga; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap; #endif -#ifdef CONFIG_MLX5_ACCEL - const struct mlx5_accel_ipsec_ops *ipsec_ops; -#endif - struct mlx5_clock clock; - struct mlx5_ib_clock_info *clock_info; - struct mlx5_fw_tracer *tracer; - struct mlx5_rsc_dump *rsc_dump; - u32 vsc_addr; - struct mlx5_hv_vhca *hv_vhca; }; struct mlx5_db { @@ -806,12 +640,7 @@ enum { typedef void (*mlx5_cmd_cbk_t)(int status, void *context); -enum { - MLX5_CMD_ENT_STATE_PENDING_COMP, -}; - struct mlx5_cmd_work_ent { - unsigned long state; struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *out; void *uout; @@ -820,7 +649,6 @@ struct mlx5_cmd_work_ent { struct delayed_work cb_timeout_work; void *context; int idx; - struct completion handling; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; @@ -832,9 +660,6 @@ struct mlx5_cmd_work_ent { u64 ts1; u64 ts2; u16 op; - bool polling; - /* Track the max comp handlers */ - refcount_t refcnt; }; struct mlx5_pas { @@ -842,6 +667,13 @@ struct mlx5_pas { u8 log_sz; }; +enum port_state_policy { + MLX5_POLICY_DOWN = 0, + MLX5_POLICY_UP = 1, + MLX5_POLICY_FOLLOW = 2, + MLX5_POLICY_INVALID = 0xffffffff +}; + enum phy_port_state { MLX5_AAA_111 }; @@ -860,8 +692,8 @@ struct mlx5_hca_vport_context { u64 node_guid; u32 cap_mask1; u32 cap_mask1_perm; - u16 cap_mask2; - u16 cap_mask2_perm; + u32 cap_mask2; + u32 cap_mask2_perm; u16 lid; u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */ u8 lmc; @@ -873,11 +705,13 @@ struct mlx5_hca_vport_context { bool grh_required; }; -static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset) +static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) { - return buf->frags->buf + offset; + return buf->direct.buf + offset; } +extern struct workqueue_struct *mlx5_core_wq; + #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field @@ -904,126 +738,73 @@ static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; } +static inline u16 cmdif_rev(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; +} + +static inline void *mlx5_vzalloc(unsigned long size) +{ + void *rtn; + + rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!rtn) + rtn = vzalloc(size); + return rtn; +} + static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; } -static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) -{ - return ((u32)1 << log_sz) << log_stride; -} - -static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags, - u8 log_stride, u8 log_sz, - u16 strides_offset, - struct mlx5_frag_buf_ctrl *fbc) -{ - fbc->frags = frags; - fbc->log_stride = log_stride; - fbc->log_sz = log_sz; - fbc->sz_m1 = (1 << fbc->log_sz) - 1; - fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; - fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; - fbc->strides_offset = strides_offset; -} - -static inline void mlx5_init_fbc(struct mlx5_buf_list *frags, - u8 log_stride, u8 log_sz, - struct mlx5_frag_buf_ctrl *fbc) -{ - mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc); -} - -static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, - u32 ix) -{ - unsigned int frag; - - ix += fbc->strides_offset; - frag = ix >> fbc->log_frag_strides; - - return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); -} - -static inline u32 -mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) -{ - u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; - - return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); -} - -enum { - CMD_ALLOWED_OPCODE_ALL, -}; - +int mlx5_cmd_init(struct mlx5_core_dev *dev); +void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); -void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); - -struct mlx5_async_ctx { - struct mlx5_core_dev *dev; - atomic_t num_inflight; - struct wait_queue_head wait; -}; - -struct mlx5_async_work; - -typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); - -struct mlx5_async_work { - struct mlx5_async_ctx *ctx; - mlx5_async_cbk_t user_callback; -}; - -void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, - struct mlx5_async_ctx *ctx); -void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); -int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, - void *out, int out_size, mlx5_async_cbk_t callback, - struct mlx5_async_work *work); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); - -#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \ - ({ \ - mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \ - MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \ - }) - -#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \ - ({ \ - u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \ - mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \ - }) - -int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, - void *out, int out_size); +int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size, mlx5_cmd_cbk_t callback, + void *context); void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); -bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); -void mlx5_health_flush(struct mlx5_core_dev *dev); +int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); +int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); +int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, + bool map_wc); +void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); -void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); +void mlx5_stop_health_poll(struct mlx5_core_dev *dev); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); -void mlx5_trigger_health_work(struct mlx5_core_dev *dev); -int mlx5_buf_alloc(struct mlx5_core_dev *dev, - int size, struct mlx5_frag_buf *buf); -void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); -int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, - struct mlx5_frag_buf *buf, int node); -void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); +int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, + struct mlx5_buf *buf, int node); +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, gfp_t flags, int npages); void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *head); +int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_srq_attr *in); +int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); +int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_srq_attr *out); +int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + u16 lwm, int is_srq); +void mlx5_init_mkey_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); +int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey, + u32 *in, int inlen, + u32 *out, int outlen, + mlx5_cmd_cbk_t callback, void *context); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *in, int inlen); @@ -1031,39 +812,65 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *out, int outlen); +int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, + u32 *mkey); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); -int mlx5_pagealloc_init(struct mlx5_core_dev *dev); +int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, + u16 opmod, u8 port); +void mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); -void mlx5_pagealloc_start(struct mlx5_core_dev *dev); +int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s32 npages, bool ec_function); + s32 npages); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); - -void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); -void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); -void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); +int mlx5_eq_init(struct mlx5_core_dev *dev); +void mlx5_eq_cleanup(struct mlx5_core_dev *dev); +void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); +void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); +void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); +#endif +void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); +struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); +void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); +int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, + int nent, u64 mask, const char *name, struct mlx5_uar *uar); +int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_start_eqs(struct mlx5_core_dev *dev); +int mlx5_stop_eqs(struct mlx5_core_dev *dev); +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, + unsigned int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); -void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); +int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); +int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + u32 *out, int outlen); +int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); const char *mlx5_command_str(int command); -void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); +int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); @@ -1076,26 +883,14 @@ int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); -int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, - struct mlx5_rate_limit *rl); -void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index); +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate); bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); -int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid, - bool dedicated_entry, u16 *index); -void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index); -bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, - struct mlx5_rate_limit *rl_1); -int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, - bool map_wc, bool fast_path); -void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); -unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev); -struct cpumask * -mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector); -unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); -int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, - u8 roce_version, u8 roce_l3_type, const u8 *gid, - const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); +static inline int fw_initializing(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->initializing) >> 31; +} static inline u32 mlx5_mkey_to_idx(u32 mkey) { @@ -1112,94 +907,58 @@ static inline u8 mlx5_mkey_variant(u32 mkey) return mkey & 0xff; } -/* Async-atomic event notifier used by mlx5 core to forward FW - * evetns received from event queue to mlx5 consumers. - * Optimise event queue dipatching. - */ -int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); -int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); +enum { + MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, + MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, +}; -/* Async-atomic event notifier used for forwarding - * evetns from the event queue into the to mlx5 events dispatcher, - * eswitch, clock and others. - */ -int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); -int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); +enum { + MAX_MR_CACHE_ENTRIES = 16, +}; -/* Blocking event notifier used to forward SW events, used for slow path */ -int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); -int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); -int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event, - void *data); +enum { + MLX5_INTERFACE_PROTOCOL_IB = 0, + MLX5_INTERFACE_PROTOCOL_ETH = 1, +}; +struct mlx5_interface { + void * (*add)(struct mlx5_core_dev *dev); + void (*remove)(struct mlx5_core_dev *dev, void *context); + int (*attach)(struct mlx5_core_dev *dev, void *context); + void (*detach)(struct mlx5_core_dev *dev, void *context); + void (*event)(struct mlx5_core_dev *dev, void *context, + enum mlx5_dev_event event, unsigned long param); + void * (*get_dev)(void *context); + int protocol; + struct list_head list; +}; + +void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); +int mlx5_register_interface(struct mlx5_interface *intf); +void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); -bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); -bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); -bool mlx5_lag_is_master(struct mlx5_core_dev *dev); -bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); -u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, - struct net_device *slave); -int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, - u64 *values, - int num_counters, - size_t *offsets); -struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev); -struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); -void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); -int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, - u64 length, u32 log_alignment, u16 uid, - phys_addr_t *addr, u32 *obj_id); -int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, - u64 length, u16 uid, phys_addr_t addr, u32 obj_id); -#ifdef CONFIG_MLX5_CORE_IPOIB -struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, - struct ib_device *ibdev, - const char *name, - void (*setup)(struct net_device *)); -#endif /* CONFIG_MLX5_CORE_IPOIB */ -int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, - struct ib_device *device, - struct rdma_netdev_alloc_params *params); +struct mlx5_profile { + u64 mask; + u8 log_max_qp; + struct { + int size; + int limit; + } mr_cache[MAX_MR_CACHE_ENTRIES]; +}; enum { MLX5_PCI_DEV_IS_VF = 1 << 0, }; -static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev) +static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) { - return dev->coredev_type == MLX5_COREDEV_PF; -} - -static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) -{ - return dev->coredev_type == MLX5_COREDEV_VF; -} - -static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev) -{ - return dev->caps.embedded_cpu; -} - -static inline bool -mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev) -{ - return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); -} - -static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev) -{ - return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); -} - -static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev) -{ - return dev->priv.sriov.max_vfs; + return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); } static inline int mlx5_get_gid_table_len(u16 param) @@ -1217,44 +976,8 @@ static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) return !!(dev->priv.rl_table.max_size); } -static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev) -{ - return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) && - MLX5_CAP_GEN(dev, num_vhca_ports) <= 1; -} - -static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev) -{ - return MLX5_CAP_GEN(dev, num_vhca_ports) > 1; -} - -static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev) -{ - return mlx5_core_is_mp_slave(dev) || - mlx5_core_is_mp_master(dev); -} - -static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev) -{ - if (!mlx5_core_mp_enabled(dev)) - return 1; - - return MLX5_CAP_GEN(dev, native_port_num); -} - enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; -static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev) -{ - struct devlink *devlink = priv_to_devlink(dev); - union devlink_param_value val; - - devlink_param_driverinit_value_get(devlink, - DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, - &val); - return val.vbool; -} - #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 0106c67e8c..93ebc5e213 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -38,21 +38,8 @@ #define MLX5_FS_DEFAULT_FLOW_TAG 0x0 -#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) - enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, - MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, - MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18, - MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS = 1 << 19, -}; - -enum { - MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0), - MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), - MLX5_FLOW_TABLE_TERMINATION = BIT(2), - MLX5_FLOW_TABLE_UNMANAGED = BIT(3), - MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4), }; #define LEFTOVERS_RULE_NUM 2 @@ -78,108 +65,50 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_ESW_INGRESS, MLX5_FLOW_NAMESPACE_SNIFFER_RX, MLX5_FLOW_NAMESPACE_SNIFFER_TX, - MLX5_FLOW_NAMESPACE_EGRESS, - MLX5_FLOW_NAMESPACE_EGRESS_KERNEL, - MLX5_FLOW_NAMESPACE_RDMA_RX, - MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, - MLX5_FLOW_NAMESPACE_RDMA_TX, }; -enum { - FDB_BYPASS_PATH, - FDB_TC_OFFLOAD, - FDB_FT_OFFLOAD, - FDB_TC_MISS, - FDB_BR_OFFLOAD, - FDB_SLOW_PATH, - FDB_PER_VPORT, -}; - -struct mlx5_pkt_reformat; -struct mlx5_modify_hdr; struct mlx5_flow_table; struct mlx5_flow_group; +struct mlx5_flow_rule; struct mlx5_flow_namespace; -struct mlx5_flow_handle; - -enum { - FLOW_CONTEXT_HAS_TAG = BIT(0), -}; - -struct mlx5_flow_context { - u32 flags; - u32 flow_tag; - u32 flow_source; -}; struct mlx5_flow_spec { u8 match_criteria_enable; u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; - struct mlx5_flow_context flow_context; -}; - -enum { - MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0), - MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1), }; struct mlx5_flow_destination { enum mlx5_flow_destination_type type; union { u32 tir_num; - u32 ft_num; struct mlx5_flow_table *ft; - u32 counter_id; - struct { - u16 num; - u16 vhca_id; - struct mlx5_pkt_reformat *pkt_reformat; - u8 flags; - } vport; - u32 sampler_id; + u32 vport_num; + struct mlx5_fc *counter; }; }; -struct mod_hdr_tbl { - struct mutex lock; /* protects hlist */ - DECLARE_HASHTABLE(hlist, 8); -}; - -struct mlx5_flow_namespace * -mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n); struct mlx5_flow_namespace * mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type); -struct mlx5_flow_namespace * -mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, - enum mlx5_flow_namespace_type type, - int vport); - -struct mlx5_flow_table_attr { - int prio; - int max_fte; - u32 level; - u32 flags; - struct mlx5_flow_table *next_ft; - - struct { - int max_num_groups; - int num_reserved_entries; - } autogroup; -}; - -struct mlx5_flow_table * -mlx5_create_flow_table(struct mlx5_flow_namespace *ns, - struct mlx5_flow_table_attr *ft_attr); struct mlx5_flow_table * mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, - struct mlx5_flow_table_attr *ft_attr); + int prio, + int num_flow_table_entries, + int max_num_groups, + u32 level); struct mlx5_flow_table * +mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + u32 level); +struct mlx5_flow_table * mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, - struct mlx5_flow_table_attr *ft_attr, u16 vport); + int prio, + int num_flow_table_entries, + u32 level, u16 vport); struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( struct mlx5_flow_namespace *ns, int prio, u32 level); @@ -195,81 +124,24 @@ struct mlx5_flow_group * mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); -struct mlx5_fs_vlan { - u16 ethtype; - u16 vid; - u8 prio; -}; - -#define MLX5_FS_VLAN_DEPTH 2 - -enum { - FLOW_ACT_NO_APPEND = BIT(0), - FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1), -}; - -struct mlx5_flow_act { - u32 action; - struct mlx5_modify_hdr *modify_hdr; - struct mlx5_pkt_reformat *pkt_reformat; - union { - u32 ipsec_obj_id; - uintptr_t esp_id; - }; - u32 flags; - struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; - struct ib_counters *counters; -}; - -#define MLX5_DECLARE_FLOW_ACT(name) \ - struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\ - .flags = 0, } - /* Single destination per rule. * Group ID is implied by the match criteria. */ -struct mlx5_flow_handle * -mlx5_add_flow_rules(struct mlx5_flow_table *ft, - const struct mlx5_flow_spec *spec, - struct mlx5_flow_act *flow_act, - struct mlx5_flow_destination *dest, - int num_dest); -void mlx5_del_flow_rules(struct mlx5_flow_handle *fr); +struct mlx5_flow_rule * +mlx5_add_flow_rule(struct mlx5_flow_table *ft, + struct mlx5_flow_spec *spec, + u32 action, + u32 flow_tag, + struct mlx5_flow_destination *dest); +void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); -int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, - struct mlx5_flow_destination *new_dest, - struct mlx5_flow_destination *old_dest); +int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, + struct mlx5_flow_destination *dest); +struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule); struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); -u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, u64 *bytes, u64 *packets, u64 *lastuse); -int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, - u64 *packets, u64 *bytes); -u32 mlx5_fc_id(struct mlx5_fc *counter); - -int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); -int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); - -struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, - u8 ns_type, u8 num_actions, - void *modify_actions); -void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, - struct mlx5_modify_hdr *modify_hdr); - -struct mlx5_pkt_reformat_params { - int type; - u8 param_0; - u8 param_1; - size_t size; - void *data; -}; - -struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, - struct mlx5_pkt_reformat_params *params, - enum mlx5_flow_namespace_type ns_type); -void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, - struct mlx5_pkt_reformat *reformat); #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 993204a6c1..6045d4d580 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -32,8 +32,6 @@ #ifndef MLX5_IFC_H #define MLX5_IFC_H -#include "mlx5_ifc_fpga.h" - enum { MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0, MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1, @@ -58,9 +56,7 @@ enum { MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b, MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f, MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa, - MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb, - MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20, - MLX5_EVENT_TYPE_CODING_FPGA_QP_ERROR = 0x21 + MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb }; enum { @@ -72,42 +68,7 @@ enum { enum { MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, - MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, - MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4, -}; - -enum { - MLX5_SHARED_RESOURCE_UID = 0xffff, -}; - -enum { - MLX5_OBJ_TYPE_SW_ICM = 0x0008, -}; - -enum { - MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM), - MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11), - MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13), -}; - -enum { - MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, - MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d, - MLX5_OBJ_TYPE_MKEY = 0xff01, - MLX5_OBJ_TYPE_QP = 0xff02, - MLX5_OBJ_TYPE_PSV = 0xff03, - MLX5_OBJ_TYPE_RMP = 0xff04, - MLX5_OBJ_TYPE_XRC_SRQ = 0xff05, - MLX5_OBJ_TYPE_RQ = 0xff06, - MLX5_OBJ_TYPE_SQ = 0xff07, - MLX5_OBJ_TYPE_TIR = 0xff08, - MLX5_OBJ_TYPE_TIS = 0xff09, - MLX5_OBJ_TYPE_DCT = 0xff0a, - MLX5_OBJ_TYPE_XRQ = 0xff0b, - MLX5_OBJ_TYPE_RQT = 0xff0e, - MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f, - MLX5_OBJ_TYPE_CQ = 0xff10, }; enum { @@ -122,18 +83,11 @@ enum { MLX5_CMD_OP_SET_HCA_CAP = 0x109, MLX5_CMD_OP_QUERY_ISSI = 0x10a, MLX5_CMD_OP_SET_ISSI = 0x10b, - MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, - MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111, - MLX5_CMD_OP_ALLOC_SF = 0x113, - MLX5_CMD_OP_DEALLOC_SF = 0x114, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, - MLX5_CMD_OP_ALLOC_MEMIC = 0x205, - MLX5_CMD_OP_DEALLOC_MEMIC = 0x206, - MLX5_CMD_OP_MODIFY_MEMIC = 0x207, MLX5_CMD_OP_CREATE_EQ = 0x301, MLX5_CMD_OP_DESTROY_EQ = 0x302, MLX5_CMD_OP_QUERY_EQ = 0x303, @@ -173,12 +127,6 @@ enum { MLX5_CMD_OP_DESTROY_XRQ = 0x718, MLX5_CMD_OP_QUERY_XRQ = 0x719, MLX5_CMD_OP_ARM_XRQ = 0x71a, - MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, - MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, - MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, - MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729, - MLX5_CMD_OP_MODIFY_XRQ = 0x72a, - MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, @@ -191,21 +139,12 @@ enum { MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763, MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765, - MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f, MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, - MLX5_CMD_OP_SET_MONITOR_COUNTER = 0x774, - MLX5_CMD_OP_ARM_MONITOR_COUNTER = 0x775, - MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780, + MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, - MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, - MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, - MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784, - MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785, - MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786, - MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787, MLX5_CMD_OP_ALLOC_PD = 0x800, MLX5_CMD_OP_DEALLOC_PD = 0x801, MLX5_CMD_OP_ALLOC_UAR = 0x802, @@ -251,7 +190,6 @@ enum { MLX5_CMD_OP_QUERY_SQ = 0x907, MLX5_CMD_OP_CREATE_RQ = 0x908, MLX5_CMD_OP_MODIFY_RQ = 0x909, - MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910, MLX5_CMD_OP_DESTROY_RQ = 0x90a, MLX5_CMD_OP_QUERY_RQ = 0x90b, MLX5_CMD_OP_CREATE_RMP = 0x90c, @@ -280,46 +218,20 @@ enum { MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, - MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d, - MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e, - MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT = 0x93f, - MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, - MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, - MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942, - MLX5_CMD_OP_FPGA_CREATE_QP = 0x960, - MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961, - MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, - MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963, - MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964, - MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00, - MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01, - MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02, - MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03, - MLX5_CMD_OP_CREATE_UCTX = 0xa04, - MLX5_CMD_OP_DESTROY_UCTX = 0xa06, - MLX5_CMD_OP_CREATE_UMEM = 0xa08, - MLX5_CMD_OP_DESTROY_UMEM = 0xa0a, - MLX5_CMD_OP_SYNC_STEERING = 0xb00, - MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d, - MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e, + MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d, + MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e, MLX5_CMD_OP_MAX }; -/* Valid range for general commands that don't work over an object */ -enum { - MLX5_CMD_OP_GENERAL_START = 0xb00, - MLX5_CMD_OP_GENERAL_END = 0xd00, -}; - struct mlx5_ifc_flow_table_fields_supported_bits { u8 outer_dmac[0x1]; u8 outer_smac[0x1]; u8 outer_ether_type[0x1]; - u8 outer_ip_version[0x1]; + u8 reserved_at_3[0x1]; u8 outer_first_prio[0x1]; u8 outer_first_cfi[0x1]; u8 outer_first_vid[0x1]; - u8 outer_ipv4_ttl[0x1]; + u8 reserved_at_7[0x1]; u8 outer_second_prio[0x1]; u8 outer_second_cfi[0x1]; u8 outer_second_vid[0x1]; @@ -338,17 +250,13 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 outer_gre_protocol[0x1]; u8 outer_gre_key[0x1]; u8 outer_vxlan_vni[0x1]; - u8 outer_geneve_vni[0x1]; - u8 outer_geneve_oam[0x1]; - u8 outer_geneve_protocol_type[0x1]; - u8 outer_geneve_opt_len[0x1]; - u8 reserved_at_1e[0x1]; + u8 reserved_at_1a[0x5]; u8 source_eswitch_port[0x1]; u8 inner_dmac[0x1]; u8 inner_smac[0x1]; u8 inner_ether_type[0x1]; - u8 inner_ip_version[0x1]; + u8 reserved_at_23[0x1]; u8 inner_first_prio[0x1]; u8 inner_first_cfi[0x1]; u8 inner_first_vid[0x1]; @@ -370,27 +278,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 inner_tcp_flags[0x1]; u8 reserved_at_37[0x9]; - u8 geneve_tlv_option_0_data[0x1]; - u8 reserved_at_41[0x4]; - u8 outer_first_mpls_over_udp[0x4]; - u8 outer_first_mpls_over_gre[0x4]; - u8 inner_first_mpls[0x4]; - u8 outer_first_mpls[0x4]; - u8 reserved_at_55[0x2]; - u8 outer_esp_spi[0x1]; - u8 reserved_at_58[0x2]; - u8 bth_dst_qp[0x1]; - u8 reserved_at_5b[0x5]; - - u8 reserved_at_60[0x18]; - u8 metadata_reg_c_7[0x1]; - u8 metadata_reg_c_6[0x1]; - u8 metadata_reg_c_5[0x1]; - u8 metadata_reg_c_4[0x1]; - u8 metadata_reg_c_3[0x1]; - u8 metadata_reg_c_2[0x1]; - u8 metadata_reg_c_1[0x1]; - u8 metadata_reg_c_0[0x1]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_flow_table_prop_layout_bits { @@ -401,48 +289,21 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 modify_root[0x1]; u8 identified_miss_table_mode[0x1]; u8 flow_table_modify[0x1]; - u8 reformat[0x1]; + u8 encap[0x1]; u8 decap[0x1]; - u8 reserved_at_9[0x1]; - u8 pop_vlan[0x1]; - u8 push_vlan[0x1]; - u8 reserved_at_c[0x1]; - u8 pop_vlan_2[0x1]; - u8 push_vlan_2[0x1]; - u8 reformat_and_vlan_action[0x1]; - u8 reserved_at_10[0x1]; - u8 sw_owner[0x1]; - u8 reformat_l3_tunnel_to_l2[0x1]; - u8 reformat_l2_to_l3_tunnel[0x1]; - u8 reformat_and_modify_action[0x1]; - u8 ignore_flow_level[0x1]; - u8 reserved_at_16[0x1]; - u8 table_miss_action_domain[0x1]; - u8 termination_table[0x1]; - u8 reformat_and_fwd_to_table[0x1]; - u8 reserved_at_1a[0x2]; - u8 ipsec_encrypt[0x1]; - u8 ipsec_decrypt[0x1]; - u8 sw_owner_v2[0x1]; - u8 reserved_at_1f[0x1]; + u8 reserved_at_9[0x17]; - u8 termination_table_raw_traffic[0x1]; - u8 reserved_at_21[0x1]; + u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; - u8 log_max_modify_header_context[0x8]; - u8 max_modify_header_actions[0x8]; + u8 reserved_at_28[0x10]; u8 max_ft_level[0x8]; u8 reserved_at_40[0x20]; - u8 reserved_at_60[0x2]; - u8 reformat_insert[0x1]; - u8 reformat_remove[0x1]; - u8 reserver_at_64[0x14]; + u8 reserved_at_60[0x18]; u8 log_max_ft_num[0x8]; - u8 reserved_at_80[0x10]; - u8 log_max_flow_counter[0x8]; + u8 reserved_at_80[0x18]; u8 log_max_destination[0x8]; u8 reserved_at_a0[0x18]; @@ -460,11 +321,27 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits { u8 receive[0x1]; u8 write[0x1]; u8 read[0x1]; - u8 atomic[0x1]; + u8 reserved_at_4[0x1]; u8 srq_receive[0x1]; u8 reserved_at_6[0x1a]; }; +struct mlx5_ifc_ipv4_layout_bits { + u8 reserved_at_0[0x60]; + + u8 ipv4[0x20]; +}; + +struct mlx5_ifc_ipv6_layout_bits { + u8 ipv6[16][0x8]; +}; + +union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { + struct mlx5_ifc_ipv6_layout_bits ipv6_layout; + struct mlx5_ifc_ipv4_layout_bits ipv4_layout; + u8 reserved_at_0[0x80]; +}; + struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 smac_47_16[0x20]; @@ -481,17 +358,16 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 ip_protocol[0x8]; u8 ip_dscp[0x6]; u8 ip_ecn[0x2]; - u8 cvlan_tag[0x1]; - u8 svlan_tag[0x1]; + u8 vlan_tag[0x1]; + u8 reserved_at_91[0x1]; u8 frag[0x1]; - u8 ip_version[0x4]; + u8 reserved_at_93[0x4]; u8 tcp_flags[0x9]; u8 tcp_sport[0x10]; u8 tcp_dport[0x10]; - u8 reserved_at_c0[0x18]; - u8 ttl_hoplimit[0x8]; + u8 reserved_at_c0[0x20]; u8 udp_sport[0x10]; u8 udp_dport[0x10]; @@ -501,25 +377,11 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; }; -struct mlx5_ifc_nvgre_key_bits { - u8 hi[0x18]; - u8 lo[0x8]; -}; - -union mlx5_ifc_gre_key_bits { - struct mlx5_ifc_nvgre_key_bits nvgre; - u8 key[0x20]; -}; - struct mlx5_ifc_fte_match_set_misc_bits { - u8 gre_c_present[0x1]; - u8 reserved_at_1[0x1]; - u8 gre_k_present[0x1]; - u8 gre_s_present[0x1]; - u8 source_vhca_port[0x4]; + u8 reserved_at_0[0x8]; u8 source_sqn[0x18]; - u8 source_eswitch_owner_vhca_id[0x10]; + u8 reserved_at_20[0x10]; u8 source_port[0x10]; u8 outer_second_prio[0x3]; @@ -529,21 +391,18 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 inner_second_cfi[0x1]; u8 inner_second_vid[0xc]; - u8 outer_second_cvlan_tag[0x1]; - u8 inner_second_cvlan_tag[0x1]; - u8 outer_second_svlan_tag[0x1]; - u8 inner_second_svlan_tag[0x1]; - u8 reserved_at_64[0xc]; + u8 outer_second_vlan_tag[0x1]; + u8 inner_second_vlan_tag[0x1]; + u8 reserved_at_62[0xe]; u8 gre_protocol[0x10]; - union mlx5_ifc_gre_key_bits gre_key; + u8 gre_key_h[0x18]; + u8 gre_key_l[0x8]; u8 vxlan_vni[0x18]; u8 reserved_at_b8[0x8]; - u8 geneve_vni[0x18]; - u8 reserved_at_d8[0x7]; - u8 geneve_oam[0x1]; + u8 reserved_at_c0[0x20]; u8 reserved_at_e0[0xc]; u8 outer_ipv6_flow_label[0x14]; @@ -551,114 +410,7 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_at_100[0xc]; u8 inner_ipv6_flow_label[0x14]; - u8 reserved_at_120[0xa]; - u8 geneve_opt_len[0x6]; - u8 geneve_protocol_type[0x10]; - - u8 reserved_at_140[0x8]; - u8 bth_dst_qp[0x18]; - u8 reserved_at_160[0x20]; - u8 outer_esp_spi[0x20]; - u8 reserved_at_1a0[0x60]; -}; - -struct mlx5_ifc_fte_match_mpls_bits { - u8 mpls_label[0x14]; - u8 mpls_exp[0x3]; - u8 mpls_s_bos[0x1]; - u8 mpls_ttl[0x8]; -}; - -struct mlx5_ifc_fte_match_set_misc2_bits { - struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls; - - struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls; - - struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre; - - struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp; - - u8 metadata_reg_c_7[0x20]; - - u8 metadata_reg_c_6[0x20]; - - u8 metadata_reg_c_5[0x20]; - - u8 metadata_reg_c_4[0x20]; - - u8 metadata_reg_c_3[0x20]; - - u8 metadata_reg_c_2[0x20]; - - u8 metadata_reg_c_1[0x20]; - - u8 metadata_reg_c_0[0x20]; - - u8 metadata_reg_a[0x20]; - - u8 reserved_at_1a0[0x60]; -}; - -struct mlx5_ifc_fte_match_set_misc3_bits { - u8 inner_tcp_seq_num[0x20]; - - u8 outer_tcp_seq_num[0x20]; - - u8 inner_tcp_ack_num[0x20]; - - u8 outer_tcp_ack_num[0x20]; - - u8 reserved_at_80[0x8]; - u8 outer_vxlan_gpe_vni[0x18]; - - u8 outer_vxlan_gpe_next_protocol[0x8]; - u8 outer_vxlan_gpe_flags[0x8]; - u8 reserved_at_b0[0x10]; - - u8 icmp_header_data[0x20]; - - u8 icmpv6_header_data[0x20]; - - u8 icmp_type[0x8]; - u8 icmp_code[0x8]; - u8 icmpv6_type[0x8]; - u8 icmpv6_code[0x8]; - - u8 geneve_tlv_option_0_data[0x20]; - - u8 gtpu_teid[0x20]; - - u8 gtpu_msg_type[0x8]; - u8 gtpu_msg_flags[0x8]; - u8 reserved_at_170[0x10]; - - u8 gtpu_dw_2[0x20]; - - u8 gtpu_first_ext_dw_0[0x20]; - - u8 gtpu_dw_0[0x20]; - - u8 reserved_at_1e0[0x20]; -}; - -struct mlx5_ifc_fte_match_set_misc4_bits { - u8 prog_sample_field_value_0[0x20]; - - u8 prog_sample_field_id_0[0x20]; - - u8 prog_sample_field_value_1[0x20]; - - u8 prog_sample_field_id_1[0x20]; - - u8 prog_sample_field_value_2[0x20]; - - u8 prog_sample_field_id_2[0x20]; - - u8 prog_sample_field_value_3[0x20]; - - u8 prog_sample_field_id_3[0x20]; - - u8 reserved_at_100[0x100]; + u8 reserved_at_120[0xe0]; }; struct mlx5_ifc_cmd_pas_bits { @@ -723,7 +475,7 @@ struct mlx5_ifc_ads_bits { u8 dei_cfi[0x1]; u8 eth_prio[0x3]; u8 sl[0x4]; - u8 vhca_port_num[0x8]; + u8 port[0x8]; u8 rmac_47_32[0x10]; u8 rmac_31_0[0x20]; @@ -733,64 +485,25 @@ struct mlx5_ifc_flow_table_nic_cap_bits { u8 nic_rx_multi_path_tirs[0x1]; u8 nic_rx_multi_path_tirs_fts[0x1]; u8 allow_sniffer_and_nic_rx_shared_tir[0x1]; - u8 reserved_at_3[0x4]; - u8 sw_owner_reformat_supported[0x1]; - u8 reserved_at_8[0x18]; - - u8 encap_general_header[0x1]; - u8 reserved_at_21[0xa]; - u8 log_max_packet_reformat_context[0x5]; - u8 reserved_at_30[0x6]; - u8 max_encap_header_size[0xa]; - u8 reserved_at_40[0x1c0]; + u8 reserved_at_3[0x1fd]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; - struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_rdma; + u8 reserved_at_400[0x200]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; - struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_rdma; + u8 reserved_at_a00[0x200]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; - u8 reserved_at_e00[0x1200]; - - u8 sw_steering_nic_rx_action_drop_icm_address[0x40]; - - u8 sw_steering_nic_tx_action_drop_icm_address[0x40]; - - u8 sw_steering_nic_tx_action_allow_icm_address[0x40]; - - u8 reserved_at_20c0[0x5f40]; -}; - -enum { - MLX5_FDB_TO_VPORT_REG_C_0 = 0x01, - MLX5_FDB_TO_VPORT_REG_C_1 = 0x02, - MLX5_FDB_TO_VPORT_REG_C_2 = 0x04, - MLX5_FDB_TO_VPORT_REG_C_3 = 0x08, - MLX5_FDB_TO_VPORT_REG_C_4 = 0x10, - MLX5_FDB_TO_VPORT_REG_C_5 = 0x20, - MLX5_FDB_TO_VPORT_REG_C_6 = 0x40, - MLX5_FDB_TO_VPORT_REG_C_7 = 0x80, + u8 reserved_at_e00[0x7200]; }; struct mlx5_ifc_flow_table_eswitch_cap_bits { - u8 fdb_to_vport_reg_c_id[0x8]; - u8 reserved_at_8[0xd]; - u8 fdb_modify_header_fwd_to_table[0x1]; - u8 reserved_at_16[0x1]; - u8 flow_source[0x1]; - u8 reserved_at_18[0x2]; - u8 multi_fdb_encap[0x1]; - u8 egress_acl_forward_to_vport[0x1]; - u8 fdb_multi_path_to_table[0x1]; - u8 reserved_at_1d[0x3]; - - u8 reserved_at_20[0x1e0]; + u8 reserved_at_0[0x200]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; @@ -798,22 +511,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; - u8 reserved_at_800[0x1000]; - - u8 sw_steering_fdb_action_drop_icm_address_rx[0x40]; - - u8 sw_steering_fdb_action_drop_icm_address_tx[0x40]; - - u8 sw_steering_uplink_icm_address_rx[0x40]; - - u8 sw_steering_uplink_icm_address_tx[0x40]; - - u8 reserved_at_1900[0x6700]; -}; - -enum { - MLX5_COUNTER_SOURCE_ESWITCH = 0x0, - MLX5_COUNTER_FLOW_ESWITCH = 0x1, + u8 reserved_at_800[0x7800]; }; struct mlx5_ifc_e_switch_cap_bits { @@ -822,86 +520,30 @@ struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; - u8 reserved_at_5[0x2]; - u8 esw_shared_ingress_acl[0x1]; - u8 esw_uplink_ingress_acl[0x1]; - u8 root_ft_on_other_esw[0x1]; - u8 reserved_at_a[0xf]; - u8 esw_functions_changed[0x1]; - u8 reserved_at_1a[0x1]; - u8 ecpf_vport_exists[0x1]; - u8 counter_eswitch_affinity[0x1]; - u8 merged_eswitch[0x1]; + u8 reserved_at_5[0x19]; u8 nic_vport_node_guid_modify[0x1]; u8 nic_vport_port_guid_modify[0x1]; u8 vxlan_encap_decap[0x1]; u8 nvgre_encap_decap[0x1]; - u8 reserved_at_22[0x1]; - u8 log_max_fdb_encap_uplink[0x5]; - u8 reserved_at_21[0x3]; - u8 log_max_packet_reformat_context[0x5]; + u8 reserved_at_22[0x9]; + u8 log_max_encap_headers[0x5]; u8 reserved_2b[0x6]; u8 max_encap_header_size[0xa]; - u8 reserved_at_40[0xb]; - u8 log_max_esw_sf[0x5]; - u8 esw_sf_base_id[0x10]; - - u8 reserved_at_60[0x7a0]; + u8 reserved_40[0x7c0]; }; struct mlx5_ifc_qos_cap_bits { u8 packet_pacing[0x1]; - u8 esw_scheduling[0x1]; - u8 esw_bw_share[0x1]; - u8 esw_rate_limit[0x1]; - u8 reserved_at_4[0x1]; - u8 packet_pacing_burst_bound[0x1]; - u8 packet_pacing_typical_size[0x1]; - u8 reserved_at_7[0x1]; - u8 nic_sq_scheduling[0x1]; - u8 nic_bw_share[0x1]; - u8 nic_rate_limit[0x1]; - u8 packet_pacing_uid[0x1]; - u8 log_esw_max_sched_depth[0x4]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0xb]; - u8 log_max_qos_nic_queue_group[0x5]; - u8 reserved_at_30[0x10]; - + u8 reserved_0[0x1f]; + u8 reserved_1[0x20]; u8 packet_pacing_max_rate[0x20]; - u8 packet_pacing_min_rate[0x20]; - - u8 reserved_at_80[0x10]; + u8 reserved_2[0x10]; u8 packet_pacing_rate_table_size[0x10]; - - u8 esw_element_type[0x10]; - u8 esw_tsar_type[0x10]; - - u8 reserved_at_c0[0x10]; - u8 max_qos_para_vport[0x10]; - - u8 max_tsar_bw_share[0x20]; - - u8 reserved_at_100[0x700]; -}; - -struct mlx5_ifc_debug_cap_bits { - u8 core_dump_general[0x1]; - u8 core_dump_qp[0x1]; - u8 reserved_at_2[0x7]; - u8 resource_dump[0x1]; - u8 reserved_at_a[0x16]; - - u8 reserved_at_20[0x2]; - u8 stall_detect[0x1]; - u8 reserved_at_23[0x1d]; - - u8 reserved_at_40[0x7c0]; + u8 reserved_3[0x760]; }; struct mlx5_ifc_per_protocol_networking_offload_caps_bits { @@ -910,42 +552,22 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 lro_cap[0x1]; u8 lro_psh_flag[0x1]; u8 lro_time_stamp[0x1]; - u8 reserved_at_5[0x2]; - u8 wqe_vlan_insert[0x1]; + u8 reserved_at_5[0x3]; u8 self_lb_en_modifiable[0x1]; u8 reserved_at_9[0x2]; u8 max_lso_cap[0x5]; - u8 multi_pkt_send_wqe[0x2]; + u8 reserved_at_10[0x2]; u8 wqe_inline_mode[0x2]; u8 rss_ind_tbl_cap[0x4]; u8 reg_umr_sq[0x1]; u8 scatter_fcs[0x1]; - u8 enhanced_multi_pkt_send_wqe[0x1]; + u8 reserved_at_1a[0x1]; u8 tunnel_lso_const_out_ip_id[0x1]; - u8 tunnel_lro_gre[0x1]; - u8 tunnel_lro_vxlan[0x1]; - u8 tunnel_stateless_gre[0x1]; + u8 reserved_at_1c[0x2]; + u8 tunnel_statless_gre[0x1]; u8 tunnel_stateless_vxlan[0x1]; - u8 swp[0x1]; - u8 swp_csum[0x1]; - u8 swp_lso[0x1]; - u8 cqe_checksum_full[0x1]; - u8 tunnel_stateless_geneve_tx[0x1]; - u8 tunnel_stateless_mpls_over_udp[0x1]; - u8 tunnel_stateless_mpls_over_gre[0x1]; - u8 tunnel_stateless_vxlan_gpe[0x1]; - u8 tunnel_stateless_ipv4_over_vxlan[0x1]; - u8 tunnel_stateless_ip_over_ip[0x1]; - u8 insert_trailer[0x1]; - u8 reserved_at_2b[0x1]; - u8 tunnel_stateless_ip_over_ip_rx[0x1]; - u8 tunnel_stateless_ip_over_ip_tx[0x1]; - u8 reserved_at_2e[0x2]; - u8 max_vxlan_udp_ports[0x8]; - u8 reserved_at_38[0x6]; - u8 max_geneve_opt_len[0x1]; - u8 tunnel_stateless_geneve_rx[0x1]; + u8 reserved_at_20[0x20]; u8 reserved_at_40[0x10]; u8 lro_min_mss_size[0x10]; @@ -957,20 +579,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 reserved_at_200[0x600]; }; -enum { - MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, - MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, - MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, -}; - struct mlx5_ifc_roce_cap_bits { u8 roce_apm[0x1]; - u8 reserved_at_1[0x3]; - u8 sw_r_roce_src_udp_port[0x1]; - u8 fl_rc_qp_when_roce_disabled[0x1]; - u8 fl_rc_qp_when_roce_enabled[0x1]; - u8 reserved_at_7[0x17]; - u8 qp_ts_format[0x2]; + u8 reserved_at_1[0x1f]; u8 reserved_at_20[0x60]; @@ -991,101 +602,6 @@ struct mlx5_ifc_roce_cap_bits { u8 reserved_at_100[0x700]; }; -struct mlx5_ifc_sync_steering_in_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0xc0]; -}; - -struct mlx5_ifc_sync_steering_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_device_mem_cap_bits { - u8 memic[0x1]; - u8 reserved_at_1[0x1f]; - - u8 reserved_at_20[0xb]; - u8 log_min_memic_alloc_size[0x5]; - u8 reserved_at_30[0x8]; - u8 log_max_memic_addr_alignment[0x8]; - - u8 memic_bar_start_addr[0x40]; - - u8 memic_bar_size[0x20]; - - u8 max_memic_size[0x20]; - - u8 steering_sw_icm_start_address[0x40]; - - u8 reserved_at_100[0x8]; - u8 log_header_modify_sw_icm_size[0x8]; - u8 reserved_at_110[0x2]; - u8 log_sw_icm_alloc_granularity[0x6]; - u8 log_steering_sw_icm_size[0x8]; - - u8 reserved_at_120[0x20]; - - u8 header_modify_sw_icm_start_address[0x40]; - - u8 reserved_at_180[0x80]; - - u8 memic_operations[0x20]; - - u8 reserved_at_220[0x5e0]; -}; - -struct mlx5_ifc_device_event_cap_bits { - u8 user_affiliated_events[4][0x40]; - - u8 user_unaffiliated_events[4][0x40]; -}; - -struct mlx5_ifc_virtio_emulation_cap_bits { - u8 desc_tunnel_offload_type[0x1]; - u8 eth_frame_offload_type[0x1]; - u8 virtio_version_1_0[0x1]; - u8 device_features_bits_mask[0xd]; - u8 event_mode[0x8]; - u8 virtio_queue_type[0x8]; - - u8 max_tunnel_desc[0x10]; - u8 reserved_at_30[0x3]; - u8 log_doorbell_stride[0x5]; - u8 reserved_at_38[0x3]; - u8 log_doorbell_bar_size[0x5]; - - u8 doorbell_bar_offset[0x40]; - - u8 max_emulated_devices[0x8]; - u8 max_num_virtio_queues[0x18]; - - u8 reserved_at_a0[0x60]; - - u8 umem_1_buffer_param_a[0x20]; - - u8 umem_1_buffer_param_b[0x20]; - - u8 umem_2_buffer_param_a[0x20]; - - u8 umem_2_buffer_param_b[0x20]; - - u8 umem_3_buffer_param_a[0x20]; - - u8 umem_3_buffer_param_b[0x20]; - - u8 reserved_at_1c0[0x640]; -}; - enum { MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, @@ -1113,9 +629,9 @@ enum { struct mlx5_ifc_atomic_caps_bits { u8 reserved_at_0[0x40]; - u8 atomic_req_8B_endianness_mode[0x2]; + u8 atomic_req_8B_endianess_mode[0x2]; u8 reserved_at_42[0x4]; - u8 supported_atomic_req_8B_endianness_mode_1[0x1]; + u8 supported_atomic_req_8B_endianess_mode_1[0x1]; u8 reserved_at_47[0x19]; @@ -1147,11 +663,7 @@ struct mlx5_ifc_odp_cap_bits { struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; - struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps; - - struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps; - - u8 reserved_at_120[0x6E0]; + u8 reserved_at_e0[0x720]; }; struct mlx5_ifc_calc_op { @@ -1178,41 +690,13 @@ struct mlx5_ifc_vector_calc_cap_bits { struct mlx5_ifc_calc_op calc2; struct mlx5_ifc_calc_op calc3; - u8 reserved_at_c0[0x720]; -}; - -struct mlx5_ifc_tls_cap_bits { - u8 tls_1_2_aes_gcm_128[0x1]; - u8 tls_1_3_aes_gcm_128[0x1]; - u8 tls_1_2_aes_gcm_256[0x1]; - u8 tls_1_3_aes_gcm_256[0x1]; - u8 reserved_at_4[0x1c]; - - u8 reserved_at_20[0x7e0]; -}; - -struct mlx5_ifc_ipsec_cap_bits { - u8 ipsec_full_offload[0x1]; - u8 ipsec_crypto_offload[0x1]; - u8 ipsec_esn[0x1]; - u8 ipsec_crypto_esp_aes_gcm_256_encrypt[0x1]; - u8 ipsec_crypto_esp_aes_gcm_128_encrypt[0x1]; - u8 ipsec_crypto_esp_aes_gcm_256_decrypt[0x1]; - u8 ipsec_crypto_esp_aes_gcm_128_decrypt[0x1]; - u8 reserved_at_7[0x4]; - u8 log_max_ipsec_offload[0x5]; - u8 reserved_at_10[0x10]; - - u8 min_log_ipsec_full_replay_window[0x8]; - u8 max_log_ipsec_full_replay_window[0x8]; - u8 reserved_at_30[0x7d0]; + u8 reserved_at_e0[0x720]; }; enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2, - MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ = 0x3, }; enum { @@ -1253,118 +737,35 @@ enum { MLX5_CAP_PORT_TYPE_ETH = 0x1, }; -enum { - MLX5_CAP_UMR_FENCE_STRONG = 0x0, - MLX5_CAP_UMR_FENCE_SMALL = 0x1, - MLX5_CAP_UMR_FENCE_NONE = 0x2, -}; - -enum { - MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3, - MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4, - mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5, - MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, - MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, - MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, - MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED = 1 << 10, - MLX5_FLEX_PARSER_GTPU_ENABLED = 1 << 11, - MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED = 1 << 16, - MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED = 1 << 17, - MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED = 1 << 18, - MLX5_FLEX_PARSER_GTPU_TEID_ENABLED = 1 << 19, -}; - -enum { - MLX5_UCTX_CAP_RAW_TX = 1UL << 0, - MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, -}; - -#define MLX5_FC_BULK_SIZE_FACTOR 128 - -enum mlx5_fc_bulk_alloc_bitmask { - MLX5_FC_BULK_128 = (1 << 0), - MLX5_FC_BULK_256 = (1 << 1), - MLX5_FC_BULK_512 = (1 << 2), - MLX5_FC_BULK_1024 = (1 << 3), - MLX5_FC_BULK_2048 = (1 << 4), - MLX5_FC_BULK_4096 = (1 << 5), - MLX5_FC_BULK_8192 = (1 << 6), - MLX5_FC_BULK_16384 = (1 << 7), -}; - -#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) - -#define MLX5_FT_MAX_MULTIPATH_LEVEL 63 - -enum { - MLX5_STEERING_FORMAT_CONNECTX_5 = 0, - MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, -}; - struct mlx5_ifc_cmd_hca_cap_bits { - u8 reserved_at_0[0x1f]; - u8 vhca_resource_manager[0x1]; - - u8 hca_cap_2[0x1]; - u8 reserved_at_21[0x2]; - u8 event_on_vhca_state_teardown_request[0x1]; - u8 event_on_vhca_state_in_use[0x1]; - u8 event_on_vhca_state_active[0x1]; - u8 event_on_vhca_state_allocated[0x1]; - u8 event_on_vhca_state_invalid[0x1]; - u8 reserved_at_28[0x8]; - u8 vhca_id[0x10]; - - u8 reserved_at_40[0x40]; + u8 reserved_at_0[0x80]; u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; - u8 event_cap[0x1]; - u8 reserved_at_91[0x2]; - u8 isolate_vl_tc_new[0x1]; - u8 reserved_at_94[0x4]; - u8 prio_tag_required[0x1]; - u8 reserved_at_99[0x2]; + u8 reserved_at_90[0xb]; u8 log_max_qp[0x5]; - u8 reserved_at_a0[0x3]; - u8 ece_support[0x1]; - u8 reserved_at_a4[0x5]; - u8 reg_c_preserve[0x1]; - u8 reserved_at_aa[0x1]; + u8 reserved_at_a0[0xb]; u8 log_max_srq[0x5]; - u8 reserved_at_b0[0x1]; - u8 uplink_follow[0x1]; - u8 ts_cqe_to_dest_cqn[0x1]; - u8 reserved_at_b3[0xd]; + u8 reserved_at_b0[0x10]; - u8 max_sgl_for_optimized_performance[0x8]; + u8 reserved_at_c0[0x8]; u8 log_max_cq_sz[0x8]; - u8 relaxed_ordering_write_umr[0x1]; - u8 relaxed_ordering_read_umr[0x1]; - u8 reserved_at_d2[0x7]; - u8 virtio_net_device_emualtion_manager[0x1]; - u8 virtio_blk_device_emualtion_manager[0x1]; + u8 reserved_at_d0[0xb]; u8 log_max_cq[0x5]; u8 log_max_eq_sz[0x8]; - u8 relaxed_ordering_write[0x1]; - u8 relaxed_ordering_read[0x1]; + u8 reserved_at_e8[0x2]; u8 log_max_mkey[0x6]; - u8 reserved_at_f0[0x8]; - u8 dump_fill_mkey[0x1]; - u8 reserved_at_f9[0x2]; - u8 fast_teardown[0x1]; + u8 reserved_at_f0[0xc]; u8 log_max_eq[0x4]; u8 max_indirection[0x8]; - u8 fixed_buffer_size[0x1]; + u8 reserved_at_108[0x1]; u8 log_max_mrw_sz[0x7]; - u8 force_teardown[0x1]; - u8 reserved_at_111[0x1]; + u8 reserved_at_110[0x2]; u8 log_max_bsf_list_size[0x6]; - u8 umr_extended_translation_offset[0x1]; - u8 null_mkey[0x1]; + u8 reserved_at_118[0x2]; u8 log_max_klm_list_size[0x6]; u8 reserved_at_120[0xa]; @@ -1372,34 +773,23 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_130[0xa]; u8 log_max_ra_res_dc[0x6]; - u8 reserved_at_140[0x6]; - u8 release_all_pages[0x1]; - u8 reserved_at_147[0x2]; - u8 roce_accl[0x1]; + u8 reserved_at_140[0xa]; u8 log_max_ra_req_qp[0x6]; u8 reserved_at_150[0xa]; u8 log_max_ra_res_qp[0x6]; - u8 end_pad[0x1]; + u8 pad_cap[0x1]; u8 cc_query_allowed[0x1]; u8 cc_modify_allowed[0x1]; - u8 start_pad[0x1]; - u8 cache_line_128byte[0x1]; - u8 reserved_at_165[0x4]; - u8 rts2rts_qp_counters_set_id[0x1]; - u8 reserved_at_16a[0x2]; - u8 vnic_env_int_rq_oob[0x1]; - u8 sbcam_reg[0x1]; - u8 reserved_at_16e[0x1]; - u8 qcam_reg[0x1]; + u8 reserved_at_163[0xd]; u8 gid_table_size[0x10]; u8 out_of_seq_cnt[0x1]; u8 vport_counters[0x1]; u8 retransmission_q_counters[0x1]; - u8 debug[0x1]; + u8 reserved_at_183[0x1]; u8 modify_rq_counter_set_id[0x1]; - u8 rq_delay_drop[0x1]; + u8 reserved_at_185[0x1]; u8 max_qp_cnt[0xa]; u8 pkey_table_size[0x10]; @@ -1407,34 +797,28 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 vhca_group_manager[0x1]; u8 ib_virt[0x1]; u8 eth_virt[0x1]; - u8 vnic_env_queue_counters[0x1]; + u8 reserved_at_1a4[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; - u8 eswitch_manager[0x1]; - u8 device_memory[0x1]; - u8 mcam_reg[0x1]; - u8 pcam_reg[0x1]; + u8 eswitch_flow_table[0x1]; + u8 early_vf_enable[0x1]; + u8 reserved_at_1a9[0x2]; u8 local_ca_ack_delay[0x5]; - u8 port_module_event[0x1]; - u8 enhanced_error_q_counters[0x1]; + u8 reserved_at_1af[0x2]; u8 ports_check[0x1]; - u8 reserved_at_1b3[0x1]; + u8 reserved_at_1b2[0x1]; u8 disable_link_up[0x1]; u8 beacon_led[0x1]; u8 port_type[0x2]; u8 num_ports[0x8]; - u8 reserved_at_1c0[0x1]; - u8 pps[0x1]; - u8 pps_modify[0x1]; + u8 reserved_at_1c0[0x3]; u8 log_max_msg[0x5]; u8 reserved_at_1c8[0x4]; u8 max_tc[0x4]; - u8 temp_warn_event[0x1]; + u8 reserved_at_1d0[0x1]; u8 dcbx[0x1]; - u8 general_notification_event[0x1]; - u8 reserved_at_1d3[0x2]; - u8 fpga[0x1]; + u8 reserved_at_1d2[0x4]; u8 rol_s[0x1]; u8 rol_g[0x1]; u8 reserved_at_1d8[0x1]; @@ -1447,26 +831,14 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 wol_p[0x1]; u8 stat_rate_support[0x10]; - u8 reserved_at_1f0[0x1]; - u8 pci_sync_for_fw_update_event[0x1]; - u8 reserved_at_1f2[0x6]; - u8 init2_lag_tx_port_affinity[0x1]; - u8 reserved_at_1fa[0x3]; + u8 reserved_at_1f0[0xc]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; u8 striding_rq[0x1]; - u8 reserved_at_202[0x1]; - u8 ipoib_enhanced_offloads[0x1]; + u8 reserved_at_201[0x2]; u8 ipoib_basic_offloads[0x1]; - u8 reserved_at_205[0x1]; - u8 repeated_block_disabled[0x1]; - u8 umr_modify_entity_size_disabled[0x1]; - u8 umr_modify_atomic_disabled[0x1]; - u8 umr_indirect_mkey_disabled[0x1]; - u8 umr_fence[0x2]; - u8 dc_req_scat_data_cqe[0x1]; - u8 reserved_at_20d[0x2]; + u8 reserved_at_205[0xa]; u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; @@ -1500,8 +872,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 vector_calc[0x1]; u8 umr_ptr_rlky[0x1]; u8 imaicl[0x1]; - u8 qp_packet_based[0x1]; - u8 reserved_at_233[0x3]; + u8 reserved_at_232[0x4]; u8 qkv[0x1]; u8 pkv[0x1]; u8 set_deth_sqpn[0x1]; @@ -1511,28 +882,18 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 uc[0x1]; u8 rc[0x1]; - u8 uar_4k[0x1]; - u8 reserved_at_241[0x9]; + u8 reserved_at_240[0xa]; u8 uar_sz[0x6]; - u8 reserved_at_248[0x2]; - u8 umem_uid_0[0x1]; - u8 reserved_at_250[0x5]; + u8 reserved_at_250[0x8]; u8 log_pg_sz[0x8]; u8 bf[0x1]; - u8 driver_version[0x1]; + u8 reserved_at_261[0x1]; u8 pad_tx_eth_packet[0x1]; - u8 reserved_at_263[0x3]; - u8 mkey_by_name[0x1]; - u8 reserved_at_267[0x4]; - + u8 reserved_at_263[0x8]; u8 log_bf_reg_size[0x5]; - u8 reserved_at_270[0x6]; - u8 lag_dct[0x2]; - u8 lag_tx_port_affinity[0x1]; - u8 lag_native_fdb_selection[0x1]; - u8 reserved_at_27a[0x1]; + u8 reserved_at_270[0xb]; u8 lag_master[0x1]; u8 num_lag_ports[0x4]; @@ -1542,14 +903,13 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_2a0[0x10]; u8 max_wqe_sz_rq[0x10]; - u8 max_flow_counter_31_16[0x10]; + u8 reserved_at_2c0[0x10]; u8 max_wqe_sz_sq_dc[0x10]; u8 reserved_at_2e0[0x7]; u8 max_qp_mcg[0x19]; - u8 reserved_at_300[0x10]; - u8 flow_counter_bulk_alloc[0x8]; + u8 reserved_at_300[0x18]; u8 log_max_mcg[0x8]; u8 reserved_at_320[0x3]; @@ -1559,12 +919,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_330[0xb]; u8 log_max_xrcd[0x5]; - u8 nic_receive_steering_discard[0x1]; - u8 receive_discard_vport_down[0x1]; - u8 transmit_discard_vport_down[0x1]; - u8 reserved_at_343[0x5]; + u8 reserved_at_340[0x8]; u8 log_max_flow_counter_bulk[0x8]; - u8 max_flow_counter_15_0[0x10]; + u8 max_flow_counter[0x10]; u8 reserved_at_360[0x3]; @@ -1586,8 +943,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_398[0x3]; u8 log_max_tis_per_sq[0x5]; - u8 ext_stride_num_range[0x1]; - u8 reserved_at_3a1[0x2]; + u8 reserved_at_3a0[0x3]; u8 log_max_stride_sz_rq[0x5]; u8 reserved_at_3a8[0x3]; u8 log_min_stride_sz_rq[0x5]; @@ -1596,45 +952,20 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_3b8[0x3]; u8 log_min_stride_sz_sq[0x5]; - u8 hairpin[0x1]; - u8 reserved_at_3c1[0x2]; - u8 log_max_hairpin_queues[0x5]; - u8 reserved_at_3c8[0x3]; - u8 log_max_hairpin_wq_data_sz[0x5]; - u8 reserved_at_3d0[0x3]; - u8 log_max_hairpin_num_packets[0x5]; - u8 reserved_at_3d8[0x3]; + u8 reserved_at_3c0[0x1b]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; - u8 disable_local_lb_uc[0x1]; - u8 disable_local_lb_mc[0x1]; - u8 log_min_hairpin_wq_data_sz[0x5]; - u8 reserved_at_3e8[0x2]; - u8 vhca_state[0x1]; + u8 reserved_at_3e1[0xa]; u8 log_max_vlan_list[0x5]; u8 reserved_at_3f0[0x3]; u8 log_max_current_mc_list[0x5]; u8 reserved_at_3f8[0x3]; u8 log_max_current_uc_list[0x5]; - u8 general_obj_types[0x40]; + u8 reserved_at_400[0x80]; - u8 sq_ts_format[0x2]; - u8 rq_ts_format[0x2]; - u8 steering_format_version[0x4]; - u8 create_qp_start_hint[0x18]; - - u8 reserved_at_460[0x3]; - u8 log_max_uctx[0x5]; - u8 reserved_at_468[0x2]; - u8 ipsec_offload[0x1]; - u8 log_max_umem[0x5]; - u8 max_num_eqs[0x10]; - - u8 reserved_at_480[0x1]; - u8 tls_tx[0x1]; - u8 tls_rx[0x1]; + u8 reserved_at_480[0x3]; u8 log_max_l2_table[0x5]; u8 reserved_at_488[0x8]; u8 log_uar_page_sz[0x10]; @@ -1643,152 +974,52 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 device_frequency_mhz[0x20]; u8 device_frequency_khz[0x20]; - u8 reserved_at_500[0x20]; - u8 num_of_uars_per_page[0x20]; + u8 reserved_at_500[0x80]; - u8 flex_parser_protocols[0x20]; - - u8 max_geneve_tlv_options[0x8]; - u8 reserved_at_568[0x3]; - u8 max_geneve_tlv_option_data_len[0x5]; - u8 reserved_at_570[0x10]; - - u8 reserved_at_580[0xb]; - u8 log_max_dci_stream_channels[0x5]; - u8 reserved_at_590[0x3]; - u8 log_max_dci_errored_streams[0x5]; - u8 reserved_at_598[0x8]; - - u8 reserved_at_5a0[0x13]; - u8 log_max_dek[0x5]; - u8 reserved_at_5b8[0x4]; - u8 mini_cqe_resp_stride_index[0x1]; - u8 cqe_128_always[0x1]; - u8 cqe_compression_128[0x1]; + u8 reserved_at_580[0x3f]; u8 cqe_compression[0x1]; u8 cqe_compression_timeout[0x10]; u8 cqe_compression_max_num[0x10]; - u8 reserved_at_5e0[0x8]; - u8 flex_parser_id_gtpu_dw_0[0x4]; - u8 reserved_at_5ec[0x4]; + u8 reserved_at_5e0[0x10]; u8 tag_matching[0x1]; u8 rndv_offload_rc[0x1]; u8 rndv_offload_dc[0x1]; u8 log_tag_matching_list_sz[0x5]; - u8 reserved_at_5f8[0x3]; + u8 reserved_at_5e8[0x3]; u8 log_max_xrq[0x5]; - u8 affiliate_nic_vport_criteria[0x8]; - u8 native_port_num[0x8]; - u8 num_vhca_ports[0x8]; - u8 flex_parser_id_gtpu_teid[0x4]; - u8 reserved_at_61c[0x2]; - u8 sw_owner_id[0x1]; - u8 reserved_at_61f[0x1]; - - u8 max_num_of_monitor_counters[0x10]; - u8 num_ppcnt_monitor_counters[0x10]; - - u8 max_num_sf[0x10]; - u8 num_q_monitor_counters[0x10]; - - u8 reserved_at_660[0x20]; - - u8 sf[0x1]; - u8 sf_set_partition[0x1]; - u8 reserved_at_682[0x1]; - u8 log_max_sf[0x5]; - u8 apu[0x1]; - u8 reserved_at_689[0x7]; - u8 log_min_sf_size[0x8]; - u8 max_num_sf_partitions[0x8]; - - u8 uctx_cap[0x20]; - - u8 reserved_at_6c0[0x4]; - u8 flex_parser_id_geneve_tlv_option_0[0x4]; - u8 flex_parser_id_icmp_dw1[0x4]; - u8 flex_parser_id_icmp_dw0[0x4]; - u8 flex_parser_id_icmpv6_dw1[0x4]; - u8 flex_parser_id_icmpv6_dw0[0x4]; - u8 flex_parser_id_outer_first_mpls_over_gre[0x4]; - u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4]; - - u8 reserved_at_6e0[0x10]; - u8 sf_base_id[0x10]; - - u8 flex_parser_id_gtpu_dw_2[0x4]; - u8 flex_parser_id_gtpu_first_ext_dw_0[0x4]; - u8 num_total_dynamic_vf_msix[0x18]; - u8 reserved_at_720[0x14]; - u8 dynamic_msix_table_size[0xc]; - u8 reserved_at_740[0xc]; - u8 min_dynamic_vf_msix_table_size[0x4]; - u8 reserved_at_750[0x4]; - u8 max_dynamic_vf_msix_table_size[0xc]; - - u8 reserved_at_760[0x20]; - u8 vhca_tunnel_commands[0x40]; - u8 reserved_at_7c0[0x40]; -}; - -struct mlx5_ifc_cmd_hca_cap_2_bits { - u8 reserved_at_0[0xa0]; - - u8 max_reformat_insert_size[0x8]; - u8 max_reformat_insert_offset[0x8]; - u8 max_reformat_remove_size[0x8]; - u8 max_reformat_remove_offset[0x8]; - - u8 reserved_at_c0[0x740]; + u8 reserved_at_5f0[0x200]; }; enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, - MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6, - MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, - MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101, -}; - -enum mlx5_flow_table_miss_action { - MLX5_FLOW_TABLE_MISS_ACTION_DEF, - MLX5_FLOW_TABLE_MISS_ACTION_FWD, - MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, }; struct mlx5_ifc_dest_format_struct_bits { u8 destination_type[0x8]; u8 destination_id[0x18]; - u8 destination_eswitch_owner_vhca_id_valid[0x1]; - u8 packet_reformat[0x1]; - u8 reserved_at_22[0xe]; - u8 destination_eswitch_owner_vhca_id[0x10]; + u8 reserved_at_20[0x20]; }; struct mlx5_ifc_flow_counter_list_bits { - u8 flow_counter_id[0x20]; + u8 clear[0x1]; + u8 num_of_counters[0xf]; + u8 flow_counter_id[0x10]; u8 reserved_at_20[0x20]; }; -struct mlx5_ifc_extended_dest_format_bits { - struct mlx5_ifc_dest_format_struct_bits destination_entry; - - u8 packet_reformat_id[0x20]; - - u8 reserved_at_60[0x20]; -}; - union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { - struct mlx5_ifc_extended_dest_format_bits extended_dest_format; + struct mlx5_ifc_dest_format_struct_bits dest_format_struct; struct mlx5_ifc_flow_counter_list_bits flow_counter_list; + u8 reserved_at_0[0x40]; }; struct mlx5_ifc_fte_match_param_bits { @@ -1798,13 +1029,7 @@ struct mlx5_ifc_fte_match_param_bits { struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; - struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2; - - struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3; - - struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4; - - u8 reserved_at_c00[0x400]; + u8 reserved_at_600[0xa00]; }; enum { @@ -1863,22 +1088,15 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_118[0x3]; u8 log_wq_sz[0x5]; - u8 dbr_umem_valid[0x1]; - u8 wq_umem_valid[0x1]; - u8 reserved_at_122[0x1]; - u8 log_hairpin_num_packets[0x5]; - u8 reserved_at_128[0x3]; - u8 log_hairpin_data_sz[0x5]; - - u8 reserved_at_130[0x4]; - u8 log_wqe_num_of_strides[0x4]; + u8 reserved_at_120[0x15]; + u8 log_wqe_num_of_strides[0x3]; u8 two_byte_shift_en[0x1]; u8 reserved_at_139[0x4]; u8 log_wqe_stride_size[0x3]; u8 reserved_at_140[0x4c0]; - struct mlx5_ifc_cmd_pas_bits pas[]; + struct mlx5_ifc_cmd_pas_bits pas[0]; }; struct mlx5_ifc_rq_num_bits { @@ -1907,8 +1125,7 @@ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { u8 reserved_at_c0[0x12]; u8 cnp_dscp[0x6]; - u8 reserved_at_d8[0x4]; - u8 cnp_prio_mode[0x1]; + u8 reserved_at_d8[0x5]; u8 cnp_802p_prio[0x3]; u8 reserved_at_e0[0x720]; @@ -1996,132 +1213,6 @@ struct mlx5_ifc_resize_field_select_bits { u8 resize_field_select[0x20]; }; -struct mlx5_ifc_resource_dump_bits { - u8 more_dump[0x1]; - u8 inline_dump[0x1]; - u8 reserved_at_2[0xa]; - u8 seq_num[0x4]; - u8 segment_type[0x10]; - - u8 reserved_at_20[0x10]; - u8 vhca_id[0x10]; - - u8 index1[0x20]; - - u8 index2[0x20]; - - u8 num_of_obj1[0x10]; - u8 num_of_obj2[0x10]; - - u8 reserved_at_a0[0x20]; - - u8 device_opaque[0x40]; - - u8 mkey[0x20]; - - u8 size[0x20]; - - u8 address[0x40]; - - u8 inline_data[52][0x20]; -}; - -struct mlx5_ifc_resource_dump_menu_record_bits { - u8 reserved_at_0[0x4]; - u8 num_of_obj2_supports_active[0x1]; - u8 num_of_obj2_supports_all[0x1]; - u8 must_have_num_of_obj2[0x1]; - u8 support_num_of_obj2[0x1]; - u8 num_of_obj1_supports_active[0x1]; - u8 num_of_obj1_supports_all[0x1]; - u8 must_have_num_of_obj1[0x1]; - u8 support_num_of_obj1[0x1]; - u8 must_have_index2[0x1]; - u8 support_index2[0x1]; - u8 must_have_index1[0x1]; - u8 support_index1[0x1]; - u8 segment_type[0x10]; - - u8 segment_name[4][0x20]; - - u8 index1_name[4][0x20]; - - u8 index2_name[4][0x20]; -}; - -struct mlx5_ifc_resource_dump_segment_header_bits { - u8 length_dw[0x10]; - u8 segment_type[0x10]; -}; - -struct mlx5_ifc_resource_dump_command_segment_bits { - struct mlx5_ifc_resource_dump_segment_header_bits segment_header; - - u8 segment_called[0x10]; - u8 vhca_id[0x10]; - - u8 index1[0x20]; - - u8 index2[0x20]; - - u8 num_of_obj1[0x10]; - u8 num_of_obj2[0x10]; -}; - -struct mlx5_ifc_resource_dump_error_segment_bits { - struct mlx5_ifc_resource_dump_segment_header_bits segment_header; - - u8 reserved_at_20[0x10]; - u8 syndrome_id[0x10]; - - u8 reserved_at_40[0x40]; - - u8 error[8][0x20]; -}; - -struct mlx5_ifc_resource_dump_info_segment_bits { - struct mlx5_ifc_resource_dump_segment_header_bits segment_header; - - u8 reserved_at_20[0x18]; - u8 dump_version[0x8]; - - u8 hw_version[0x20]; - - u8 fw_version[0x20]; -}; - -struct mlx5_ifc_resource_dump_menu_segment_bits { - struct mlx5_ifc_resource_dump_segment_header_bits segment_header; - - u8 reserved_at_20[0x10]; - u8 num_of_records[0x10]; - - struct mlx5_ifc_resource_dump_menu_record_bits record[]; -}; - -struct mlx5_ifc_resource_dump_resource_segment_bits { - struct mlx5_ifc_resource_dump_segment_header_bits segment_header; - - u8 reserved_at_20[0x20]; - - u8 index1[0x20]; - - u8 index2[0x20]; - - u8 payload[][0x20]; -}; - -struct mlx5_ifc_resource_dump_terminate_segment_bits { - struct mlx5_ifc_resource_dump_segment_header_bits segment_header; -}; - -struct mlx5_ifc_menu_resource_dump_response_bits { - struct mlx5_ifc_resource_dump_info_segment_bits info; - struct mlx5_ifc_resource_dump_command_segment_bits cmd; - struct mlx5_ifc_resource_dump_menu_segment_bits menu; - struct mlx5_ifc_resource_dump_terminate_segment_bits terminate; -}; - enum { MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2, @@ -2262,42 +1353,6 @@ struct mlx5_ifc_phys_layer_cntrs_bits { u8 reserved_at_640[0x180]; }; -struct mlx5_ifc_phys_layer_statistical_cntrs_bits { - u8 time_since_last_clear_high[0x20]; - - u8 time_since_last_clear_low[0x20]; - - u8 phy_received_bits_high[0x20]; - - u8 phy_received_bits_low[0x20]; - - u8 phy_symbol_errors_high[0x20]; - - u8 phy_symbol_errors_low[0x20]; - - u8 phy_corrected_bits_high[0x20]; - - u8 phy_corrected_bits_low[0x20]; - - u8 phy_corrected_bits_lane0_high[0x20]; - - u8 phy_corrected_bits_lane0_low[0x20]; - - u8 phy_corrected_bits_lane1_high[0x20]; - - u8 phy_corrected_bits_lane1_low[0x20]; - - u8 phy_corrected_bits_lane2_high[0x20]; - - u8 phy_corrected_bits_lane2_low[0x20]; - - u8 phy_corrected_bits_lane3_high[0x20]; - - u8 phy_corrected_bits_lane3_low[0x20]; - - u8 reserved_at_200[0x5c0]; -}; - struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { u8 symbol_error_counter[0x10]; @@ -2325,33 +1380,15 @@ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { u8 vl_15_dropped[0x10]; - u8 reserved_at_a0[0x80]; - - u8 port_xmit_wait[0x20]; + u8 reserved_at_a0[0xa0]; }; -struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits { +struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { u8 transmit_queue_high[0x20]; u8 transmit_queue_low[0x20]; - u8 no_buffer_discard_uc_high[0x20]; - - u8 no_buffer_discard_uc_low[0x20]; - - u8 reserved_at_80[0x740]; -}; - -struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits { - u8 wred_discard_high[0x20]; - - u8 wred_discard_low[0x20]; - - u8 ecn_marked_tc_high[0x20]; - - u8 ecn_marked_tc_low[0x20]; - - u8 reserved_at_80[0x740]; + u8 reserved_at_40[0x780]; }; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { @@ -2395,19 +1432,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { u8 rx_pause_transition_low[0x20]; - u8 rx_discards_high[0x20]; - - u8 rx_discards_low[0x20]; - - u8 device_stall_minor_watermark_cnt_high[0x20]; - - u8 device_stall_minor_watermark_cnt_low[0x20]; - - u8 device_stall_critical_watermark_cnt_high[0x20]; - - u8 device_stall_critical_watermark_cnt_low[0x20]; - - u8 reserved_at_480[0x340]; + u8 reserved_at_3c0[0x400]; }; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { @@ -2415,21 +1440,7 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { u8 port_transmit_wait_low[0x20]; - u8 reserved_at_40[0x100]; - - u8 rx_buffer_almost_full_high[0x20]; - - u8 rx_buffer_almost_full_low[0x20]; - - u8 rx_buffer_full_high[0x20]; - - u8 rx_buffer_full_low[0x20]; - - u8 rx_icrc_encapsulated_high[0x20]; - - u8 rx_icrc_encapsulated_low[0x20]; - - u8 reserved_at_200[0x5c0]; + u8 reserved_at_40[0x780]; }; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { @@ -2724,42 +1735,6 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { u8 reserved_at_4c0[0x300]; }; -struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits { - u8 life_time_counter_high[0x20]; - - u8 life_time_counter_low[0x20]; - - u8 rx_errors[0x20]; - - u8 tx_errors[0x20]; - - u8 l0_to_recovery_eieos[0x20]; - - u8 l0_to_recovery_ts[0x20]; - - u8 l0_to_recovery_framing[0x20]; - - u8 l0_to_recovery_retrain[0x20]; - - u8 crc_error_dllp[0x20]; - - u8 crc_error_tlp[0x20]; - - u8 tx_overflow_buffer_pkt_high[0x20]; - - u8 tx_overflow_buffer_pkt_low[0x20]; - - u8 outbound_stalled_reads[0x20]; - - u8 outbound_stalled_writes[0x20]; - - u8 outbound_stalled_reads_events[0x20]; - - u8 outbound_stalled_writes_events[0x20]; - - u8 reserved_at_200[0x5c0]; -}; - struct mlx5_ifc_cmd_inter_comp_event_bits { u8 command_completion_vector[0x20]; @@ -2915,10 +1890,6 @@ enum { MLX5_QPC_PM_STATE_MIGRATED = 0x3, }; -enum { - MLX5_QPC_OFFLOAD_TYPE_RNDV = 0x1, -}; - enum { MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0, MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1, @@ -2956,22 +1927,13 @@ enum { MLX5_QPC_CS_RES_UP_TO_64B = 0x2, }; -enum { - MLX5_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, - MLX5_TIMESTAMP_FORMAT_DEFAULT = 0x1, - MLX5_TIMESTAMP_FORMAT_REAL_TIME = 0x2, -}; - struct mlx5_ifc_qpc_bits { u8 state[0x4]; u8 lag_tx_port_affinity[0x4]; u8 st[0x8]; - u8 reserved_at_10[0x2]; - u8 isolate_vl_tc[0x1]; + u8 reserved_at_10[0x3]; u8 pm_state[0x2]; - u8 reserved_at_15[0x1]; - u8 req_e2e_credit_mode[0x2]; - u8 offload_type[0x4]; + u8 reserved_at_15[0x7]; u8 end_padding_mode[0x2]; u8 reserved_at_1e[0x2]; @@ -2991,9 +1953,7 @@ struct mlx5_ifc_qpc_bits { u8 log_rq_stride[0x3]; u8 no_sq[0x1]; u8 log_sq_size[0x4]; - u8 reserved_at_55[0x3]; - u8 ts_format[0x2]; - u8 reserved_at_5a[0x1]; + u8 reserved_at_55[0x6]; u8 rlky[0x1]; u8 ulp_stateless_offload_mode[0x4]; @@ -3028,12 +1988,10 @@ struct mlx5_ifc_qpc_bits { u8 reserved_at_3c0[0x8]; u8 next_send_psn[0x18]; - u8 reserved_at_3e0[0x3]; - u8 log_num_dci_stream_channels[0x5]; + u8 reserved_at_3e0[0x8]; u8 cqn_snd[0x18]; - u8 reserved_at_400[0x3]; - u8 log_num_dci_errored_streams[0x5]; + u8 reserved_at_400[0x8]; u8 deth_sqpn[0x18]; u8 reserved_at_420[0x20]; @@ -3095,10 +2053,7 @@ struct mlx5_ifc_qpc_bits { u8 dc_access_key[0x40]; - u8 reserved_at_680[0x3]; - u8 dbr_umem_valid[0x1]; - - u8 reserved_at_684[0xbc]; + u8 reserved_at_680[0xc0]; }; struct mlx5_ifc_roce_addr_layout_bits { @@ -3120,7 +2075,6 @@ struct mlx5_ifc_roce_addr_layout_bits { union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; - struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2; struct mlx5_ifc_odp_cap_bits odp_cap; struct mlx5_ifc_atomic_caps_bits atomic_caps; struct mlx5_ifc_roce_cap_bits roce_cap; @@ -3130,11 +2084,6 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_qos_cap_bits qos_cap; - struct mlx5_ifc_debug_cap_bits debug_cap; - struct mlx5_ifc_fpga_cap_bits fpga_cap; - struct mlx5_ifc_tls_cap_bits tls_cap; - struct mlx5_ifc_device_mem_cap_bits device_mem_cap; - struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap; u8 reserved_at_0[0x8000]; }; @@ -3143,32 +2092,12 @@ enum { MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, - MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10, + MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10, MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20, - MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, - MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80, - MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, - MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400, - MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, - MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000, - MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000, -}; - -enum { - MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT = 0x0, - MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK = 0x1, - MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2, -}; - -struct mlx5_ifc_vlan_bits { - u8 ethtype[0x10]; - u8 prio[0x3]; - u8 cfi[0x1]; - u8 vid[0xc]; }; struct mlx5_ifc_flow_context_bits { - struct mlx5_ifc_vlan_bits push_vlan; + u8 reserved_at_0[0x20]; u8 group_id[0x20]; @@ -3178,29 +2107,21 @@ struct mlx5_ifc_flow_context_bits { u8 reserved_at_60[0x10]; u8 action[0x10]; - u8 extended_destination[0x1]; - u8 reserved_at_81[0x1]; - u8 flow_source[0x2]; - u8 reserved_at_84[0x4]; + u8 reserved_at_80[0x8]; u8 destination_list_size[0x18]; u8 reserved_at_a0[0x8]; u8 flow_counter_list_size[0x18]; - u8 packet_reformat_id[0x20]; + u8 encap_id[0x20]; - u8 modify_header_id[0x20]; - - struct mlx5_ifc_vlan_bits push_vlan_2; - - u8 ipsec_obj_id[0x20]; - u8 reserved_at_140[0xc0]; + u8 reserved_at_e0[0x120]; struct mlx5_ifc_fte_match_param_bits match_value; u8 reserved_at_1200[0x600]; - union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[]; + union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0]; }; enum { @@ -3222,8 +2143,7 @@ struct mlx5_ifc_xrc_srqc_bits { u8 xrcd[0x18]; u8 page_offset[0x6]; - u8 reserved_at_46[0x1]; - u8 dbr_umem_valid[0x1]; + u8 reserved_at_46[0x2]; u8 cqn[0x18]; u8 reserved_at_60[0x20]; @@ -3251,28 +2171,6 @@ struct mlx5_ifc_xrc_srqc_bits { u8 reserved_at_180[0x80]; }; -struct mlx5_ifc_vnic_diagnostic_statistics_bits { - u8 counter_error_queues[0x20]; - - u8 total_error_queues[0x20]; - - u8 send_queue_priority_update_flow[0x20]; - - u8 reserved_at_60[0x20]; - - u8 nic_receive_steering_discard[0x40]; - - u8 receive_discard_vport_down[0x40]; - - u8 transmit_discard_vport_down[0x40]; - - u8 reserved_at_140[0xa0]; - - u8 internal_rq_out_of_buffer[0x20]; - - u8 reserved_at_200[0xe00]; -}; - struct mlx5_ifc_traffic_counter_bits { u8 packets[0x40]; @@ -3281,8 +2179,7 @@ struct mlx5_ifc_traffic_counter_bits { struct mlx5_ifc_tisc_bits { u8 strict_lag_tx_port_affinity[0x1]; - u8 tls_en[0x1]; - u8 reserved_at_2[0x2]; + u8 reserved_at_1[0x3]; u8 lag_tx_port_affinity[0x04]; u8 reserved_at_8[0x4]; @@ -3294,13 +2191,7 @@ struct mlx5_ifc_tisc_bits { u8 reserved_at_120[0x8]; u8 transport_domain[0x18]; - u8 reserved_at_140[0x8]; - u8 underlay_qpn[0x18]; - - u8 reserved_at_160[0x8]; - u8 pd[0x18]; - - u8 reserved_at_180[0x380]; + u8 reserved_at_140[0x3c0]; }; enum { @@ -3320,16 +2211,15 @@ enum { }; enum { - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1, - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2, }; struct mlx5_ifc_tirc_bits { u8 reserved_at_0[0x20]; u8 disp_type[0x4]; - u8 tls_en[0x1]; - u8 reserved_at_25[0x1b]; + u8 reserved_at_24[0x1c]; u8 reserved_at_40[0x40]; @@ -3417,15 +2307,11 @@ struct mlx5_ifc_sqc_bits { u8 cd_master[0x1]; u8 fre[0x1]; u8 flush_in_error_en[0x1]; - u8 allow_multi_pkt_send_wqe[0x1]; + u8 reserved_at_4[0x1]; u8 min_wqe_inline_mode[0x3]; u8 state[0x4]; u8 reg_umr[0x1]; - u8 allow_swp[0x1]; - u8 hairpin[0x1]; - u8 reserved_at_f[0xb]; - u8 ts_format[0x2]; - u8 reserved_at_1c[0x4]; + u8 reserved_at_d[0x13]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -3433,21 +2319,11 @@ struct mlx5_ifc_sqc_bits { u8 reserved_at_40[0x8]; u8 cqn[0x18]; - u8 reserved_at_60[0x8]; - u8 hairpin_peer_rq[0x18]; + u8 reserved_at_60[0x90]; - u8 reserved_at_80[0x10]; - u8 hairpin_peer_vhca[0x10]; - - u8 reserved_at_a0[0x20]; - - u8 reserved_at_c0[0x8]; - u8 ts_cqe_to_dest_cqn[0x18]; - - u8 reserved_at_e0[0x10]; u8 packet_pacing_rate_limit_index[0x10]; u8 tis_lst_sz[0x10]; - u8 qos_queue_group_id[0x10]; + u8 reserved_at_110[0x10]; u8 reserved_at_120[0x40]; @@ -3457,53 +2333,18 @@ struct mlx5_ifc_sqc_bits { struct mlx5_ifc_wq_bits wq; }; -enum { - SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0, - SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1, - SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2, - SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, - SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4, -}; - -enum { - ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0, - ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1, - ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2, - ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3, -}; - -struct mlx5_ifc_scheduling_context_bits { - u8 element_type[0x8]; - u8 reserved_at_8[0x18]; - - u8 element_attributes[0x20]; - - u8 parent_element_id[0x20]; - - u8 reserved_at_60[0x40]; - - u8 bw_share[0x20]; - - u8 max_average_bw[0x20]; - - u8 reserved_at_e0[0x120]; -}; - struct mlx5_ifc_rqtc_bits { - u8 reserved_at_0[0xa0]; + u8 reserved_at_0[0xa0]; - u8 reserved_at_a0[0x5]; - u8 list_q_type[0x3]; - u8 reserved_at_a8[0x8]; - u8 rqt_max_size[0x10]; + u8 reserved_at_a0[0x10]; + u8 rqt_max_size[0x10]; - u8 rq_vhca_id_format[0x1]; - u8 reserved_at_c1[0xf]; - u8 rqt_actual_size[0x10]; + u8 reserved_at_c0[0x10]; + u8 rqt_actual_size[0x10]; - u8 reserved_at_e0[0x6a0]; + u8 reserved_at_e0[0x6a0]; - struct mlx5_ifc_rq_num_bits rq_num[]; + struct mlx5_ifc_rq_num_bits rq_num[0]; }; enum { @@ -3519,17 +2360,14 @@ enum { struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; - u8 delay_drop_en[0x1]; + u8 reserved_at_1[0x1]; u8 scatter_fcs[0x1]; u8 vsd[0x1]; u8 mem_rq_type[0x4]; u8 state[0x4]; u8 reserved_at_c[0x1]; u8 flush_in_error_en[0x1]; - u8 hairpin[0x1]; - u8 reserved_at_f[0xb]; - u8 ts_format[0x2]; - u8 reserved_at_1c[0x4]; + u8 reserved_at_e[0x12]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -3543,13 +2381,7 @@ struct mlx5_ifc_rqc_bits { u8 reserved_at_80[0x8]; u8 rmpn[0x18]; - u8 reserved_at_a0[0x8]; - u8 hairpin_peer_sq[0x18]; - - u8 reserved_at_c0[0x10]; - u8 hairpin_peer_vhca[0x10]; - - u8 reserved_at_e0[0xa0]; + u8 reserved_at_a0[0xe0]; struct mlx5_ifc_wq_bits wq; }; @@ -3575,9 +2407,7 @@ struct mlx5_ifc_rmpc_bits { struct mlx5_ifc_nic_vport_context_bits { u8 reserved_at_0[0x5]; u8 min_wqe_inline_mode[0x3]; - u8 reserved_at_8[0x15]; - u8 disable_mc_local_lb[0x1]; - u8 disable_uc_local_lb[0x1]; + u8 reserved_at_8[0x17]; u8 roce_en[0x1]; u8 arm_change_event[0x1]; @@ -3588,12 +2418,7 @@ struct mlx5_ifc_nic_vport_context_bits { u8 event_on_mc_address_change[0x1]; u8 event_on_uc_address_change[0x1]; - u8 reserved_at_40[0xc]; - - u8 affiliation_criteria[0x4]; - u8 affiliated_vhca_id[0x10]; - - u8 reserved_at_60[0xd0]; + u8 reserved_at_40[0xf0]; u8 mtu[0x10]; @@ -3617,26 +2442,19 @@ struct mlx5_ifc_nic_vport_context_bits { u8 reserved_at_7e0[0x20]; - u8 current_uc_mac_address[][0x40]; + u8 current_uc_mac_address[0][0x40]; }; enum { MLX5_MKC_ACCESS_MODE_PA = 0x0, MLX5_MKC_ACCESS_MODE_MTT = 0x1, MLX5_MKC_ACCESS_MODE_KLMS = 0x2, - MLX5_MKC_ACCESS_MODE_KSM = 0x3, - MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4, - MLX5_MKC_ACCESS_MODE_MEMIC = 0x5, }; struct mlx5_ifc_mkc_bits { u8 reserved_at_0[0x1]; u8 free[0x1]; - u8 reserved_at_2[0x1]; - u8 access_mode_4_2[0x3]; - u8 reserved_at_6[0x7]; - u8 relaxed_ordering_write[0x1]; - u8 reserved_at_e[0x1]; + u8 reserved_at_2[0xd]; u8 small_fence_on_rdma_read_response[0x1]; u8 umr_en[0x1]; u8 a[0x1]; @@ -3644,7 +2462,7 @@ struct mlx5_ifc_mkc_bits { u8 rr[0x1]; u8 lw[0x1]; u8 lr[0x1]; - u8 access_mode_1_0[0x2]; + u8 access_mode[0x2]; u8 reserved_at_18[0x8]; u8 qpn[0x18]; @@ -3671,9 +2489,7 @@ struct mlx5_ifc_mkc_bits { u8 translations_octword_size[0x20]; - u8 reserved_at_1c0[0x19]; - u8 relaxed_ordering_read[0x1]; - u8 reserved_at_1d9[0x1]; + u8 reserved_at_1c0[0x1b]; u8 log_page_size[0x5]; u8 reserved_at_1e0[0x20]; @@ -3738,14 +2554,12 @@ struct mlx5_ifc_hca_vport_context_bits { }; struct mlx5_ifc_esw_vport_context_bits { - u8 fdb_to_vport_reg_c[0x1]; - u8 reserved_at_1[0x2]; + u8 reserved_at_0[0x3]; u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert[0x2]; - u8 fdb_to_vport_reg_c_id[0x8]; - u8 reserved_at_10[0x10]; + u8 reserved_at_8[0x18]; u8 reserved_at_20[0x20]; @@ -3756,11 +2570,7 @@ struct mlx5_ifc_esw_vport_context_bits { u8 cvlan_pcp[0x3]; u8 cvlan_id[0xc]; - u8 reserved_at_60[0x720]; - - u8 sw_steering_vport_icm_address_rx[0x40]; - - u8 sw_steering_vport_icm_address_tx[0x40]; + u8 reserved_at_60[0x7a0]; }; enum { @@ -3794,8 +2604,8 @@ struct mlx5_ifc_eqc_bits { u8 reserved_at_80[0x20]; - u8 reserved_at_a0[0x14]; - u8 intr[0xc]; + u8 reserved_at_a0[0x18]; + u8 intr[0x8]; u8 reserved_at_c0[0x3]; u8 log_page_size[0x5]; @@ -3890,8 +2700,7 @@ struct mlx5_ifc_dctc_bits { u8 ecn[0x2]; u8 dscp[0x6]; - u8 reserved_at_1c0[0x20]; - u8 ece[0x20]; + u8 reserved_at_1c0[0x40]; }; enum { @@ -3919,9 +2728,7 @@ enum { struct mlx5_ifc_cqc_bits { u8 status[0x4]; - u8 reserved_at_4[0x2]; - u8 dbr_umem_valid[0x1]; - u8 apu_cq[0x1]; + u8 reserved_at_4[0x4]; u8 cqe_sz[0x3]; u8 cc[0x1]; u8 reserved_at_c[0x1]; @@ -3947,7 +2754,8 @@ struct mlx5_ifc_cqc_bits { u8 cq_period[0xc]; u8 cq_max_count[0x10]; - u8 c_eqn_or_apu_element[0x20]; + u8 reserved_at_a0[0x18]; + u8 c_eqn[0x8]; u8 reserved_at_c0[0x3]; u8 log_page_size[0x5]; @@ -4036,7 +2844,7 @@ struct mlx5_ifc_xrqc_bits { struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; - u8 reserved_at_180[0x280]; + u8 reserved_at_180[0x200]; struct mlx5_ifc_wq_bits wq; }; @@ -4061,16 +2869,9 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; - struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout; - struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout; + struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; - struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs; - u8 reserved_at_0[0x7c0]; -}; - -union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits { - struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout; u8 reserved_at_0[0x7c0]; }; @@ -4119,49 +2920,18 @@ struct mlx5_ifc_register_loopback_control_bits { u8 reserved_at_20[0x60]; }; -struct mlx5_ifc_vport_tc_element_bits { - u8 traffic_class[0x4]; - u8 reserved_at_4[0xc]; - u8 vport_number[0x10]; -}; - -struct mlx5_ifc_vport_element_bits { - u8 reserved_at_0[0x10]; - u8 vport_number[0x10]; -}; - -enum { - TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, - TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, - TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, -}; - -struct mlx5_ifc_tsar_element_bits { - u8 reserved_at_0[0x8]; - u8 tsar_type[0x8]; - u8 reserved_at_10[0x10]; -}; - -enum { - MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0, - MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1, -}; - struct mlx5_ifc_teardown_hca_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_at_40[0x3f]; - - u8 state[0x1]; + u8 reserved_at_40[0x40]; }; enum { MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, - MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1, - MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2, + MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1, }; struct mlx5_ifc_teardown_hca_in_bits { @@ -4188,7 +2958,7 @@ struct mlx5_ifc_sqerr2rts_qp_out_bits { struct mlx5_ifc_sqerr2rts_qp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -4218,7 +2988,7 @@ struct mlx5_ifc_sqd2rts_qp_out_bits { struct mlx5_ifc_sqd2rts_qp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -4254,8 +3024,7 @@ struct mlx5_ifc_set_roce_address_in_bits { u8 op_mod[0x10]; u8 roce_address_index[0x10]; - u8 reserved_at_50[0xc]; - u8 vhca_port_num[0x4]; + u8 reserved_at_50[0x10]; u8 reserved_at_60[0x20]; @@ -4360,11 +3129,7 @@ struct mlx5_ifc_set_hca_cap_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 other_function[0x1]; - u8 reserved_at_41[0xf]; - u8 function_id[0x10]; - - u8 reserved_at_60[0x20]; + u8 reserved_at_40[0x40]; union mlx5_ifc_hca_cap_union_bits capability; }; @@ -4373,8 +3138,7 @@ enum { MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0, MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1, MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2, - MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3, - MLX5_SET_FTE_MODIFY_ENABLE_MASK_IPSEC_OBJ_ID = 0x4 + MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3 }; struct mlx5_ifc_set_fte_out_bits { @@ -4405,8 +3169,7 @@ struct mlx5_ifc_set_fte_in_bits { u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 ignore_flow_level[0x1]; - u8 reserved_at_c1[0x17]; + u8 reserved_at_c0[0x18]; u8 modify_enable_mask[0x8]; u8 reserved_at_e0[0x20]; @@ -4424,13 +3187,12 @@ struct mlx5_ifc_rts2rts_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x20]; - u8 ece[0x20]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rts2rts_qp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -4442,7 +3204,7 @@ struct mlx5_ifc_rts2rts_qp_in_bits { u8 opt_param_mask[0x20]; - u8 ece[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -4455,13 +3217,12 @@ struct mlx5_ifc_rtr2rts_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x20]; - u8 ece[0x20]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rtr2rts_qp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -4473,7 +3234,7 @@ struct mlx5_ifc_rtr2rts_qp_in_bits { u8 opt_param_mask[0x20]; - u8 ece[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -4486,13 +3247,12 @@ struct mlx5_ifc_rst2init_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x20]; - u8 ece[0x20]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rst2init_qp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -4504,7 +3264,7 @@ struct mlx5_ifc_rst2init_qp_in_bits { u8 opt_param_mask[0x20]; - u8 ece[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -4547,7 +3307,7 @@ struct mlx5_ifc_query_xrc_srq_out_bits { u8 reserved_at_280[0x600]; - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_query_xrc_srq_in_bits { @@ -4582,86 +3342,8 @@ struct mlx5_ifc_query_vport_state_out_bits { }; enum { - MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0, - MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, - MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2, -}; - -struct mlx5_ifc_arm_monitor_counter_in_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x20]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_arm_monitor_counter_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - -enum { - MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT = 0x0, - MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER = 0x1, -}; - -enum mlx5_monitor_counter_ppcnt { - MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0, - MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1, - MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2, - MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3, - MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4, - MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5, -}; - -enum { - MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4, -}; - -struct mlx5_ifc_monitor_counter_output_bits { - u8 reserved_at_0[0x4]; - u8 type[0x4]; - u8 reserved_at_8[0x8]; - u8 counter[0x10]; - - u8 counter_group_id[0x20]; -}; - -#define MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 (6) -#define MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 (1) -#define MLX5_CMD_SET_MONITOR_NUM_COUNTER (MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +\ - MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1) - -struct mlx5_ifc_set_monitor_counter_in_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x10]; - u8 num_of_counters[0x10]; - - u8 reserved_at_60[0x20]; - - struct mlx5_ifc_monitor_counter_output_bits monitor_counter[MLX5_CMD_SET_MONITOR_NUM_COUNTER]; -}; - -struct mlx5_ifc_set_monitor_counter_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1, }; struct mlx5_ifc_query_vport_state_in_bits { @@ -4678,35 +3360,6 @@ struct mlx5_ifc_query_vport_state_in_bits { u8 reserved_at_60[0x20]; }; -struct mlx5_ifc_query_vnic_env_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; - - struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env; -}; - -enum { - MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0, -}; - -struct mlx5_ifc_query_vnic_env_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 other_vport[0x1]; - u8 reserved_at_41[0xf]; - u8 vport_number[0x10]; - - u8 reserved_at_60[0x20]; -}; - struct mlx5_ifc_query_vport_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -4826,7 +3479,7 @@ struct mlx5_ifc_query_srq_out_bits { u8 reserved_at_280[0x600]; - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_query_srq_in_bits { @@ -4875,10 +3528,6 @@ struct mlx5_ifc_query_special_contexts_out_bits { u8 dump_fill_mkey[0x20]; u8 resd_lkey[0x20]; - - u8 null_mkey[0x20]; - - u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_query_special_contexts_in_bits { @@ -4891,40 +3540,6 @@ struct mlx5_ifc_query_special_contexts_in_bits { u8 reserved_at_40[0x40]; }; -struct mlx5_ifc_query_scheduling_element_out_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0xc0]; - - struct mlx5_ifc_scheduling_context_bits scheduling_context; - - u8 reserved_at_300[0x100]; -}; - -enum { - SCHEDULING_HIERARCHY_E_SWITCH = 0x2, - SCHEDULING_HIERARCHY_NIC = 0x3, -}; - -struct mlx5_ifc_query_scheduling_element_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 scheduling_hierarchy[0x8]; - u8 reserved_at_48[0x18]; - - u8 scheduling_element_id[0x20]; - - u8 reserved_at_80[0x180]; -}; - struct mlx5_ifc_query_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -4992,8 +3607,7 @@ struct mlx5_ifc_query_roce_address_in_bits { u8 op_mod[0x10]; u8 roce_address_index[0x10]; - u8 reserved_at_50[0xc]; - u8 vhca_port_num[0x4]; + u8 reserved_at_50[0x10]; u8 reserved_at_60[0x20]; }; @@ -5028,8 +3642,7 @@ struct mlx5_ifc_query_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x20]; - u8 ece[0x20]; + u8 reserved_at_40[0x40]; u8 opt_param_mask[0x20]; @@ -5039,7 +3652,7 @@ struct mlx5_ifc_query_qp_out_bits { u8 reserved_at_800[0x80]; - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_query_qp_in_bits { @@ -5105,59 +3718,7 @@ struct mlx5_ifc_query_q_counter_out_bits { u8 local_ack_timeout_err[0x20]; - u8 reserved_at_320[0xa0]; - - u8 resp_local_length_error[0x20]; - - u8 req_local_length_error[0x20]; - - u8 resp_local_qp_error[0x20]; - - u8 local_operation_error[0x20]; - - u8 resp_local_protection[0x20]; - - u8 req_local_protection[0x20]; - - u8 resp_cqe_error[0x20]; - - u8 req_cqe_error[0x20]; - - u8 req_mw_binding[0x20]; - - u8 req_bad_response[0x20]; - - u8 req_remote_invalid_request[0x20]; - - u8 resp_remote_invalid_request[0x20]; - - u8 req_remote_access_errors[0x20]; - - u8 resp_remote_access_errors[0x20]; - - u8 req_remote_operation_errors[0x20]; - - u8 req_transport_retries_exceeded[0x20]; - - u8 cq_overflow[0x20]; - - u8 resp_cqe_flush_error[0x20]; - - u8 req_cqe_flush_error[0x20]; - - u8 reserved_at_620[0x20]; - - u8 roce_adp_retrans[0x20]; - - u8 roce_adp_retrans_to[0x20]; - - u8 roce_slow_restart[0x20]; - - u8 roce_slow_restart_cnps[0x20]; - - u8 roce_slow_restart_trans[0x20]; - - u8 reserved_at_6e0[0x120]; + u8 reserved_at_320[0x4e0]; }; struct mlx5_ifc_query_q_counter_in_bits { @@ -5182,8 +3743,7 @@ struct mlx5_ifc_query_pages_out_bits { u8 syndrome[0x20]; - u8 embedded_cpu_function[0x1]; - u8 reserved_at_41[0xf]; + u8 reserved_at_40[0x10]; u8 function_id[0x10]; u8 num_pages[0x20]; @@ -5202,8 +3762,7 @@ struct mlx5_ifc_query_pages_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 embedded_cpu_function[0x1]; - u8 reserved_at_41[0xf]; + u8 reserved_at_40[0x10]; u8 function_id[0x10]; u8 reserved_at_60[0x20]; @@ -5345,25 +3904,6 @@ struct mlx5_ifc_query_issi_in_bits { u8 reserved_at_40[0x40]; }; -struct mlx5_ifc_set_driver_version_out_bits { - u8 status[0x8]; - u8 reserved_0[0x18]; - - u8 syndrome[0x20]; - u8 reserved_1[0x40]; -}; - -struct mlx5_ifc_set_driver_version_in_bits { - u8 opcode[0x10]; - u8 reserved_0[0x10]; - - u8 reserved_1[0x10]; - u8 op_mod[0x10]; - - u8 reserved_2[0x40]; - u8 driver_version[64][0x8]; -}; - struct mlx5_ifc_query_hca_vport_pkey_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5372,7 +3912,7 @@ struct mlx5_ifc_query_hca_vport_pkey_out_bits { u8 reserved_at_40[0x40]; - struct mlx5_ifc_pkey_bits pkey[]; + struct mlx5_ifc_pkey_bits pkey[0]; }; struct mlx5_ifc_query_hca_vport_pkey_in_bits { @@ -5408,7 +3948,7 @@ struct mlx5_ifc_query_hca_vport_gid_out_bits { u8 gids_num[0x10]; u8 reserved_at_70[0x10]; - struct mlx5_ifc_array128_auto_bits gid[]; + struct mlx5_ifc_array128_auto_bits gid[0]; }; struct mlx5_ifc_query_hca_vport_gid_in_bits { @@ -5471,87 +4011,7 @@ struct mlx5_ifc_query_hca_cap_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 other_function[0x1]; - u8 reserved_at_41[0xf]; - u8 function_id[0x10]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_other_hca_cap_bits { - u8 roce[0x1]; - u8 reserved_at_1[0x27f]; -}; - -struct mlx5_ifc_query_other_hca_cap_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; - - struct mlx5_ifc_other_hca_cap_bits other_capability; -}; - -struct mlx5_ifc_query_other_hca_cap_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x10]; - u8 function_id[0x10]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_modify_other_hca_cap_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_modify_other_hca_cap_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x10]; - u8 function_id[0x10]; - u8 field_select[0x20]; - - struct mlx5_ifc_other_hca_cap_bits other_capability; -}; - -struct mlx5_ifc_flow_table_context_bits { - u8 reformat_en[0x1]; - u8 decap_en[0x1]; - u8 sw_owner[0x1]; - u8 termination_table[0x1]; - u8 table_miss_action[0x4]; - u8 level[0x8]; - u8 reserved_at_10[0x8]; - u8 log_size[0x8]; - - u8 reserved_at_20[0x8]; - u8 table_miss_id[0x18]; - - u8 reserved_at_40[0x8]; - u8 lag_master_next_table_id[0x18]; - - u8 reserved_at_60[0x60]; - - u8 sw_owner_icm_root_1[0x40]; - - u8 sw_owner_icm_root_0[0x40]; - }; struct mlx5_ifc_query_flow_table_out_bits { @@ -5562,7 +4022,12 @@ struct mlx5_ifc_query_flow_table_out_bits { u8 reserved_at_40[0x80]; - struct mlx5_ifc_flow_table_context_bits flow_table_context; + u8 reserved_at_c0[0x8]; + u8 level[0x8]; + u8 reserved_at_d0[0x8]; + u8 log_size[0x8]; + + u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_query_flow_table_in_bits { @@ -5620,9 +4085,6 @@ enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, - MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, - MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4, - MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5, }; struct mlx5_ifc_query_flow_group_out_bits { @@ -5677,7 +4139,7 @@ struct mlx5_ifc_query_flow_counter_out_bits { u8 reserved_at_40[0x40]; - struct mlx5_ifc_traffic_counter_bits flow_statistics[]; + struct mlx5_ifc_traffic_counter_bits flow_statistics[0]; }; struct mlx5_ifc_query_flow_counter_in_bits { @@ -5693,7 +4155,8 @@ struct mlx5_ifc_query_flow_counter_in_bits { u8 reserved_at_c1[0xf]; u8 num_of_counters[0x10]; - u8 flow_counter_id[0x20]; + u8 reserved_at_e0[0x10]; + u8 flow_counter_id[0x10]; }; struct mlx5_ifc_query_esw_vport_context_out_bits { @@ -5731,8 +4194,7 @@ struct mlx5_ifc_modify_esw_vport_context_out_bits { }; struct mlx5_ifc_esw_vport_context_fields_select_bits { - u8 reserved_at_0[0x1b]; - u8 fdb_to_vport_reg_c_id[0x1]; + u8 reserved_at_0[0x1c]; u8 vport_cvlan_insert[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_strip[0x1]; @@ -5771,7 +4233,7 @@ struct mlx5_ifc_query_eq_out_bits { u8 reserved_at_300[0x580]; - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_query_eq_in_bits { @@ -5787,21 +4249,19 @@ struct mlx5_ifc_query_eq_in_bits { u8 reserved_at_60[0x20]; }; -struct mlx5_ifc_packet_reformat_context_in_bits { - u8 reformat_type[0x8]; - u8 reserved_at_8[0x4]; - u8 reformat_param_0[0x4]; - u8 reserved_at_10[0x6]; - u8 reformat_data_size[0xa]; +struct mlx5_ifc_encap_header_in_bits { + u8 reserved_at_0[0x5]; + u8 header_type[0x3]; + u8 reserved_at_8[0xe]; + u8 encap_header_size[0xa]; - u8 reformat_param_1[0x8]; - u8 reserved_at_28[0x8]; - u8 reformat_data[2][0x8]; + u8 reserved_at_20[0x10]; + u8 encap_header[2][0x8]; - u8 more_reformat_data[][0x8]; + u8 more_encap_header[0][0x8]; }; -struct mlx5_ifc_query_packet_reformat_context_out_bits { +struct mlx5_ifc_query_encap_header_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5809,49 +4269,33 @@ struct mlx5_ifc_query_packet_reformat_context_out_bits { u8 reserved_at_40[0xa0]; - struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[]; + struct mlx5_ifc_encap_header_in_bits encap_header[0]; }; -struct mlx5_ifc_query_packet_reformat_context_in_bits { +struct mlx5_ifc_query_encap_header_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 packet_reformat_id[0x20]; + u8 encap_id[0x20]; u8 reserved_at_60[0xa0]; }; -struct mlx5_ifc_alloc_packet_reformat_context_out_bits { +struct mlx5_ifc_alloc_encap_header_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 packet_reformat_id[0x20]; + u8 encap_id[0x20]; u8 reserved_at_60[0x20]; }; -enum { - MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1, - MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7, - MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9, -}; - -enum mlx5_reformat_ctx_type { - MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0, - MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1, - MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2, - MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3, - MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4, - MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf, - MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10, -}; - -struct mlx5_ifc_alloc_packet_reformat_context_in_bits { +struct mlx5_ifc_alloc_encap_header_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -5860,10 +4304,10 @@ struct mlx5_ifc_alloc_packet_reformat_context_in_bits { u8 reserved_at_40[0xa0]; - struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context; + struct mlx5_ifc_encap_header_in_bits encap_header; }; -struct mlx5_ifc_dealloc_packet_reformat_context_out_bits { +struct mlx5_ifc_dealloc_encap_header_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5872,167 +4316,18 @@ struct mlx5_ifc_dealloc_packet_reformat_context_out_bits { u8 reserved_at_40[0x40]; }; -struct mlx5_ifc_dealloc_packet_reformat_context_in_bits { +struct mlx5_ifc_dealloc_encap_header_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_20[0x10]; u8 op_mod[0x10]; - u8 packet_reformat_id[0x20]; + u8 encap_id[0x20]; u8 reserved_60[0x20]; }; -struct mlx5_ifc_set_action_in_bits { - u8 action_type[0x4]; - u8 field[0xc]; - u8 reserved_at_10[0x3]; - u8 offset[0x5]; - u8 reserved_at_18[0x3]; - u8 length[0x5]; - - u8 data[0x20]; -}; - -struct mlx5_ifc_add_action_in_bits { - u8 action_type[0x4]; - u8 field[0xc]; - u8 reserved_at_10[0x10]; - - u8 data[0x20]; -}; - -struct mlx5_ifc_copy_action_in_bits { - u8 action_type[0x4]; - u8 src_field[0xc]; - u8 reserved_at_10[0x3]; - u8 src_offset[0x5]; - u8 reserved_at_18[0x3]; - u8 length[0x5]; - - u8 reserved_at_20[0x4]; - u8 dst_field[0xc]; - u8 reserved_at_30[0x3]; - u8 dst_offset[0x5]; - u8 reserved_at_38[0x8]; -}; - -union mlx5_ifc_set_add_copy_action_in_auto_bits { - struct mlx5_ifc_set_action_in_bits set_action_in; - struct mlx5_ifc_add_action_in_bits add_action_in; - struct mlx5_ifc_copy_action_in_bits copy_action_in; - u8 reserved_at_0[0x40]; -}; - -enum { - MLX5_ACTION_TYPE_SET = 0x1, - MLX5_ACTION_TYPE_ADD = 0x2, - MLX5_ACTION_TYPE_COPY = 0x3, -}; - -enum { - MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16 = 0x1, - MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0 = 0x2, - MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE = 0x3, - MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16 = 0x4, - MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0 = 0x5, - MLX5_ACTION_IN_FIELD_OUT_IP_DSCP = 0x6, - MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS = 0x7, - MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT = 0x8, - MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT = 0x9, - MLX5_ACTION_IN_FIELD_OUT_IP_TTL = 0xa, - MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT = 0xb, - MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT = 0xc, - MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96 = 0xd, - MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64 = 0xe, - MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32 = 0xf, - MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0 = 0x10, - MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96 = 0x11, - MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64 = 0x12, - MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32 = 0x13, - MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14, - MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15, - MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16, - MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17, - MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47, - MLX5_ACTION_IN_FIELD_METADATA_REG_A = 0x49, - MLX5_ACTION_IN_FIELD_METADATA_REG_B = 0x50, - MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51, - MLX5_ACTION_IN_FIELD_METADATA_REG_C_1 = 0x52, - MLX5_ACTION_IN_FIELD_METADATA_REG_C_2 = 0x53, - MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54, - MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55, - MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56, - MLX5_ACTION_IN_FIELD_METADATA_REG_C_6 = 0x57, - MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58, - MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, - MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, - MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D, - MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F, - MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70, -}; - -struct mlx5_ifc_alloc_modify_header_context_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 modify_header_id[0x20]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_alloc_modify_header_context_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x20]; - - u8 table_type[0x8]; - u8 reserved_at_68[0x10]; - u8 num_of_actions[0x8]; - - union mlx5_ifc_set_add_copy_action_in_auto_bits actions[]; -}; - -struct mlx5_ifc_dealloc_modify_header_context_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_dealloc_modify_header_context_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 modify_header_id[0x20]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_query_modify_header_context_in_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 modify_header_id[0x20]; - - u8 reserved_at_60[0xa0]; -}; - struct mlx5_ifc_query_dct_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6071,7 +4366,7 @@ struct mlx5_ifc_query_cq_out_bits { u8 reserved_at_280[0x600]; - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_query_cq_in_bits { @@ -6122,17 +4417,17 @@ struct mlx5_ifc_query_cong_statistics_out_bits { u8 reserved_at_40[0x40]; - u8 rp_cur_flows[0x20]; + u8 cur_flows[0x20]; u8 sum_flows[0x20]; - u8 rp_cnp_ignored_high[0x20]; + u8 cnp_ignored_high[0x20]; - u8 rp_cnp_ignored_low[0x20]; + u8 cnp_ignored_low[0x20]; - u8 rp_cnp_handled_high[0x20]; + u8 cnp_handled_high[0x20]; - u8 rp_cnp_handled_low[0x20]; + u8 cnp_handled_low[0x20]; u8 reserved_at_140[0x100]; @@ -6142,13 +4437,13 @@ struct mlx5_ifc_query_cong_statistics_out_bits { u8 accumulators_period[0x20]; - u8 np_ecn_marked_roce_packets_high[0x20]; + u8 ecn_marked_roce_packets_high[0x20]; - u8 np_ecn_marked_roce_packets_low[0x20]; + u8 ecn_marked_roce_packets_low[0x20]; - u8 np_cnp_sent_high[0x20]; + u8 cnps_sent_high[0x20]; - u8 np_cnp_sent_low[0x20]; + u8 cnps_sent_low[0x20]; u8 reserved_at_320[0x560]; }; @@ -6222,7 +4517,7 @@ struct mlx5_ifc_qp_2rst_out_bits { struct mlx5_ifc_qp_2rst_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6244,7 +4539,7 @@ struct mlx5_ifc_qp_2err_out_bits { struct mlx5_ifc_qp_2err_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6273,11 +4568,12 @@ struct mlx5_ifc_page_fault_resume_in_bits { u8 error[0x1]; u8 reserved_at_41[0x4]; - u8 page_fault_type[0x3]; - u8 wq_number[0x18]; + u8 rdma[0x1]; + u8 read_write[0x1]; + u8 req_res[0x1]; + u8 qpn[0x18]; - u8 reserved_at_60[0x8]; - u8 token[0x18]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_nop_out_bits { @@ -6344,7 +4640,7 @@ struct mlx5_ifc_modify_tis_bitmask_bits { struct mlx5_ifc_modify_tis_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6383,7 +4679,7 @@ struct mlx5_ifc_modify_tir_out_bits { struct mlx5_ifc_modify_tir_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6411,7 +4707,7 @@ struct mlx5_ifc_modify_sq_out_bits { struct mlx5_ifc_modify_sq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6429,43 +4725,6 @@ struct mlx5_ifc_modify_sq_in_bits { struct mlx5_ifc_sqc_bits ctx; }; -struct mlx5_ifc_modify_scheduling_element_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x1c0]; -}; - -enum { - MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1, - MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2, -}; - -struct mlx5_ifc_modify_scheduling_element_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 scheduling_hierarchy[0x8]; - u8 reserved_at_48[0x18]; - - u8 scheduling_element_id[0x20]; - - u8 reserved_at_80[0x20]; - - u8 modify_bitmask[0x20]; - - u8 reserved_at_c0[0x40]; - - struct mlx5_ifc_scheduling_context_bits scheduling_context; - - u8 reserved_at_300[0x100]; -}; - struct mlx5_ifc_modify_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6484,7 +4743,7 @@ struct mlx5_ifc_rqt_bitmask_bits { struct mlx5_ifc_modify_rqt_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6512,13 +4771,12 @@ struct mlx5_ifc_modify_rq_out_bits { enum { MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1, - MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2, - MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3, }; struct mlx5_ifc_modify_rq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6554,7 +4812,7 @@ struct mlx5_ifc_rmp_bitmask_bits { struct mlx5_ifc_modify_rmp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6582,11 +4840,7 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { }; struct mlx5_ifc_modify_nic_vport_field_select_bits { - u8 reserved_at_0[0x12]; - u8 affiliation[0x1]; - u8 reserved_at_13[0x1]; - u8 disable_uc_local_lb[0x1]; - u8 disable_mc_local_lb[0x1]; + u8 reserved_at_0[0x16]; u8 node_guid[0x1]; u8 port_guid[0x1]; u8 min_inline[0x1]; @@ -6659,7 +4913,7 @@ enum { struct mlx5_ifc_modify_cq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6671,14 +4925,9 @@ struct mlx5_ifc_modify_cq_in_bits { struct mlx5_ifc_cqc_bits cq_context; - u8 reserved_at_280[0x60]; + u8 reserved_at_280[0x600]; - u8 cq_umem_valid[0x1]; - u8 reserved_at_2e1[0x1f]; - - u8 reserved_at_300[0x580]; - - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_modify_cong_status_out_bits { @@ -6742,7 +4991,7 @@ struct mlx5_ifc_manage_pages_out_bits { u8 reserved_at_60[0x20]; - u8 pas[][0x40]; + u8 pas[0][0x40]; }; enum { @@ -6758,13 +5007,12 @@ struct mlx5_ifc_manage_pages_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 embedded_cpu_function[0x1]; - u8 reserved_at_41[0xf]; + u8 reserved_at_40[0x10]; u8 function_id[0x10]; u8 input_num_entries[0x20]; - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_mad_ifc_out_bits { @@ -6811,7 +5059,6 @@ struct mlx5_ifc_init_hca_in_bits { u8 op_mod[0x10]; u8 reserved_at_40[0x40]; - u8 sw_owner_id[4][0x20]; }; struct mlx5_ifc_init2rtr_qp_out_bits { @@ -6820,13 +5067,12 @@ struct mlx5_ifc_init2rtr_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x20]; - u8 ece[0x20]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_init2rtr_qp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6838,7 +5084,7 @@ struct mlx5_ifc_init2rtr_qp_in_bits { u8 opt_param_mask[0x20]; - u8 ece[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -6851,13 +5097,12 @@ struct mlx5_ifc_init2init_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x20]; - u8 ece[0x20]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_init2init_qp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6869,7 +5114,7 @@ struct mlx5_ifc_init2init_qp_in_bits { u8 opt_param_mask[0x20]; - u8 ece[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -6939,8 +5184,7 @@ struct mlx5_ifc_enable_hca_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 embedded_cpu_function[0x1]; - u8 reserved_at_41[0xf]; + u8 reserved_at_40[0x10]; u8 function_id[0x10]; u8 reserved_at_60[0x20]; @@ -6957,7 +5201,7 @@ struct mlx5_ifc_drain_dct_out_bits { struct mlx5_ifc_drain_dct_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6984,8 +5228,7 @@ struct mlx5_ifc_disable_hca_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 embedded_cpu_function[0x1]; - u8 reserved_at_41[0xf]; + u8 reserved_at_40[0x10]; u8 function_id[0x10]; u8 reserved_at_60[0x20]; @@ -7002,7 +5245,7 @@ struct mlx5_ifc_detach_from_mcg_out_bits { struct mlx5_ifc_detach_from_mcg_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7026,7 +5269,7 @@ struct mlx5_ifc_destroy_xrq_out_bits { struct mlx5_ifc_destroy_xrq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7048,7 +5291,7 @@ struct mlx5_ifc_destroy_xrc_srq_out_bits { struct mlx5_ifc_destroy_xrc_srq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7070,7 +5313,7 @@ struct mlx5_ifc_destroy_tis_out_bits { struct mlx5_ifc_destroy_tis_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7092,7 +5335,7 @@ struct mlx5_ifc_destroy_tir_out_bits { struct mlx5_ifc_destroy_tir_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7114,7 +5357,7 @@ struct mlx5_ifc_destroy_srq_out_bits { struct mlx5_ifc_destroy_srq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7136,7 +5379,7 @@ struct mlx5_ifc_destroy_sq_out_bits { struct mlx5_ifc_destroy_sq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7147,30 +5390,6 @@ struct mlx5_ifc_destroy_sq_in_bits { u8 reserved_at_60[0x20]; }; -struct mlx5_ifc_destroy_scheduling_element_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x1c0]; -}; - -struct mlx5_ifc_destroy_scheduling_element_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 scheduling_hierarchy[0x8]; - u8 reserved_at_48[0x18]; - - u8 scheduling_element_id[0x20]; - - u8 reserved_at_80[0x180]; -}; - struct mlx5_ifc_destroy_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -7182,7 +5401,7 @@ struct mlx5_ifc_destroy_rqt_out_bits { struct mlx5_ifc_destroy_rqt_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7204,7 +5423,7 @@ struct mlx5_ifc_destroy_rq_out_bits { struct mlx5_ifc_destroy_rq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7215,28 +5434,6 @@ struct mlx5_ifc_destroy_rq_in_bits { u8 reserved_at_60[0x20]; }; -struct mlx5_ifc_set_delay_drop_params_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x20]; - - u8 reserved_at_60[0x10]; - u8 delay_drop_timeout[0x10]; -}; - -struct mlx5_ifc_set_delay_drop_params_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - struct mlx5_ifc_destroy_rmp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -7248,7 +5445,7 @@ struct mlx5_ifc_destroy_rmp_out_bits { struct mlx5_ifc_destroy_rmp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7270,7 +5467,7 @@ struct mlx5_ifc_destroy_qp_out_bits { struct mlx5_ifc_destroy_qp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7314,7 +5511,7 @@ struct mlx5_ifc_destroy_mkey_out_bits { struct mlx5_ifc_destroy_mkey_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7422,7 +5619,7 @@ struct mlx5_ifc_destroy_dct_out_bits { struct mlx5_ifc_destroy_dct_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7444,7 +5641,7 @@ struct mlx5_ifc_destroy_cq_out_bits { struct mlx5_ifc_destroy_cq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7547,7 +5744,7 @@ struct mlx5_ifc_dealloc_xrcd_out_bits { struct mlx5_ifc_dealloc_xrcd_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7591,7 +5788,7 @@ struct mlx5_ifc_dealloc_transport_domain_out_bits { struct mlx5_ifc_dealloc_transport_domain_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7635,7 +5832,7 @@ struct mlx5_ifc_dealloc_pd_out_bits { struct mlx5_ifc_dealloc_pd_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7662,7 +5859,8 @@ struct mlx5_ifc_dealloc_flow_counter_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 flow_counter_id[0x20]; + u8 reserved_at_40[0x10]; + u8 flow_counter_id[0x10]; u8 reserved_at_60[0x20]; }; @@ -7681,7 +5879,7 @@ struct mlx5_ifc_create_xrq_out_bits { struct mlx5_ifc_create_xrq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7705,7 +5903,7 @@ struct mlx5_ifc_create_xrc_srq_out_bits { struct mlx5_ifc_create_xrc_srq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7714,14 +5912,9 @@ struct mlx5_ifc_create_xrc_srq_in_bits { struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; - u8 reserved_at_280[0x60]; + u8 reserved_at_280[0x600]; - u8 xrc_srq_umem_valid[0x1]; - u8 reserved_at_2e1[0x1f]; - - u8 reserved_at_300[0x580]; - - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_create_tis_out_bits { @@ -7738,7 +5931,7 @@ struct mlx5_ifc_create_tis_out_bits { struct mlx5_ifc_create_tis_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7750,19 +5943,19 @@ struct mlx5_ifc_create_tis_in_bits { struct mlx5_ifc_create_tir_out_bits { u8 status[0x8]; - u8 icm_address_63_40[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 icm_address_39_32[0x8]; + u8 reserved_at_40[0x8]; u8 tirn[0x18]; - u8 icm_address_31_0[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_tir_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7786,7 +5979,7 @@ struct mlx5_ifc_create_srq_out_bits { struct mlx5_ifc_create_srq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7797,7 +5990,7 @@ struct mlx5_ifc_create_srq_in_bits { u8 reserved_at_280[0x600]; - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_create_sq_out_bits { @@ -7814,7 +6007,7 @@ struct mlx5_ifc_create_sq_out_bits { struct mlx5_ifc_create_sq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7824,36 +6017,6 @@ struct mlx5_ifc_create_sq_in_bits { struct mlx5_ifc_sqc_bits ctx; }; -struct mlx5_ifc_create_scheduling_element_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; - - u8 scheduling_element_id[0x20]; - - u8 reserved_at_a0[0x160]; -}; - -struct mlx5_ifc_create_scheduling_element_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 scheduling_hierarchy[0x8]; - u8 reserved_at_48[0x18]; - - u8 reserved_at_60[0xa0]; - - struct mlx5_ifc_scheduling_context_bits scheduling_context; - - u8 reserved_at_300[0x100]; -}; - struct mlx5_ifc_create_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -7868,7 +6031,7 @@ struct mlx5_ifc_create_rqt_out_bits { struct mlx5_ifc_create_rqt_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7892,7 +6055,7 @@ struct mlx5_ifc_create_rq_out_bits { struct mlx5_ifc_create_rq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7916,7 +6079,7 @@ struct mlx5_ifc_create_rmp_out_bits { struct mlx5_ifc_create_rmp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7935,32 +6098,27 @@ struct mlx5_ifc_create_qp_out_bits { u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 ece[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_qp_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x8]; - u8 input_qpn[0x18]; + u8 reserved_at_40[0x40]; - u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; - u8 ece[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; - u8 reserved_at_800[0x60]; + u8 reserved_at_800[0x80]; - u8 wq_umem_valid[0x1]; - u8 reserved_at_861[0x1f]; - - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_create_psv_out_bits { @@ -8012,7 +6170,7 @@ struct mlx5_ifc_create_mkey_out_bits { struct mlx5_ifc_create_mkey_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8020,8 +6178,7 @@ struct mlx5_ifc_create_mkey_in_bits { u8 reserved_at_40[0x20]; u8 pg_access[0x1]; - u8 mkey_umem_valid[0x1]; - u8 reserved_at_62[0x1e]; + u8 reserved_at_61[0x1f]; struct mlx5_ifc_mkc_bits memory_key_mkey_entry; @@ -8031,29 +6188,19 @@ struct mlx5_ifc_create_mkey_in_bits { u8 reserved_at_320[0x560]; - u8 klm_pas_mtt[][0x20]; -}; - -enum { - MLX5_FLOW_TABLE_TYPE_NIC_RX = 0x0, - MLX5_FLOW_TABLE_TYPE_NIC_TX = 0x1, - MLX5_FLOW_TABLE_TYPE_ESW_EGRESS_ACL = 0x2, - MLX5_FLOW_TABLE_TYPE_ESW_INGRESS_ACL = 0x3, - MLX5_FLOW_TABLE_TYPE_FDB = 0X4, - MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 0X5, - MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 0X6, + u8 klm_pas_mtt[0][0x20]; }; struct mlx5_ifc_create_flow_table_out_bits { u8 status[0x8]; - u8 icm_address_63_40[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 icm_address_39_32[0x8]; + u8 reserved_at_40[0x8]; u8 table_id[0x18]; - u8 icm_address_31_0[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_flow_table_in_bits { @@ -8074,7 +6221,21 @@ struct mlx5_ifc_create_flow_table_in_bits { u8 reserved_at_a0[0x20]; - struct mlx5_ifc_flow_table_context_bits flow_table_context; + u8 encap_en[0x1]; + u8 decap_en[0x1]; + u8 reserved_at_c2[0x2]; + u8 table_miss_mode[0x4]; + u8 level[0x8]; + u8 reserved_at_d0[0x8]; + u8 log_size[0x8]; + + u8 reserved_at_e0[0x8]; + u8 table_miss_id[0x18]; + + u8 reserved_at_100[0x8]; + u8 lag_master_next_table_id[0x18]; + + u8 reserved_at_120[0x80]; }; struct mlx5_ifc_create_flow_group_out_bits { @@ -8090,10 +6251,9 @@ struct mlx5_ifc_create_flow_group_out_bits { }; enum { - MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, - MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, - MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, - MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, }; struct mlx5_ifc_create_flow_group_in_bits { @@ -8115,9 +6275,7 @@ struct mlx5_ifc_create_flow_group_in_bits { u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 source_eswitch_owner_vhca_id_valid[0x1]; - - u8 reserved_at_c1[0x1f]; + u8 reserved_at_c0[0x20]; u8 start_flow_index[0x20]; @@ -8149,7 +6307,7 @@ struct mlx5_ifc_create_eq_out_bits { struct mlx5_ifc_create_eq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8160,11 +6318,11 @@ struct mlx5_ifc_create_eq_in_bits { u8 reserved_at_280[0x40]; - u8 event_bitmask[4][0x40]; + u8 event_bitmask[0x40]; - u8 reserved_at_3c0[0x4c0]; + u8 reserved_at_300[0x580]; - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_create_dct_out_bits { @@ -8176,12 +6334,12 @@ struct mlx5_ifc_create_dct_out_bits { u8 reserved_at_40[0x8]; u8 dctn[0x18]; - u8 ece[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_dct_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8207,7 +6365,7 @@ struct mlx5_ifc_create_cq_out_bits { struct mlx5_ifc_create_cq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8216,12 +6374,9 @@ struct mlx5_ifc_create_cq_in_bits { struct mlx5_ifc_cqc_bits cq_context; - u8 reserved_at_280[0x60]; + u8 reserved_at_280[0x600]; - u8 cq_umem_valid[0x1]; - u8 reserved_at_2e1[0x59f]; - - u8 pas[][0x40]; + u8 pas[0][0x40]; }; struct mlx5_ifc_config_int_moderation_out_bits { @@ -8267,7 +6422,7 @@ struct mlx5_ifc_attach_to_mcg_out_bits { struct mlx5_ifc_attach_to_mcg_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8318,7 +6473,7 @@ enum { struct mlx5_ifc_arm_xrc_srq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8346,7 +6501,7 @@ enum { struct mlx5_ifc_arm_rq_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8394,7 +6549,7 @@ struct mlx5_ifc_alloc_xrcd_out_bits { struct mlx5_ifc_alloc_xrcd_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8438,7 +6593,7 @@ struct mlx5_ifc_alloc_transport_domain_out_bits { struct mlx5_ifc_alloc_transport_domain_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8460,7 +6615,7 @@ struct mlx5_ifc_alloc_q_counter_out_bits { struct mlx5_ifc_alloc_q_counter_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8482,7 +6637,7 @@ struct mlx5_ifc_alloc_pd_out_bits { struct mlx5_ifc_alloc_pd_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8496,7 +6651,8 @@ struct mlx5_ifc_alloc_flow_counter_out_bits { u8 syndrome[0x20]; - u8 flow_counter_id[0x20]; + u8 reserved_at_40[0x10]; + u8 flow_counter_id[0x10]; u8 reserved_at_60[0x20]; }; @@ -8508,8 +6664,7 @@ struct mlx5_ifc_alloc_flow_counter_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x38]; - u8 flow_counter_bulk[0x8]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_add_vxlan_udp_dport_out_bits { @@ -8534,7 +6689,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits { u8 vxlan_udp_port[0x10]; }; -struct mlx5_ifc_set_pp_rate_limit_out_bits { +struct mlx5_ifc_set_rate_limit_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -8543,20 +6698,9 @@ struct mlx5_ifc_set_pp_rate_limit_out_bits { u8 reserved_at_40[0x40]; }; -struct mlx5_ifc_set_pp_rate_limit_context_bits { - u8 rate_limit[0x20]; - - u8 burst_upper_bound[0x20]; - - u8 reserved_at_40[0x10]; - u8 typical_packet_size[0x10]; - - u8 reserved_at_60[0x120]; -}; - -struct mlx5_ifc_set_pp_rate_limit_in_bits { +struct mlx5_ifc_set_rate_limit_in_bits { u8 opcode[0x10]; - u8 uid[0x10]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8566,7 +6710,7 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits { u8 reserved_at_60[0x20]; - struct mlx5_ifc_set_pp_rate_limit_context_bits ctx; + u8 rate_limit[0x20]; }; struct mlx5_ifc_access_register_out_bits { @@ -8577,7 +6721,7 @@ struct mlx5_ifc_access_register_out_bits { u8 reserved_at_40[0x40]; - u8 register_data[][0x20]; + u8 register_data[0][0x20]; }; enum { @@ -8597,7 +6741,7 @@ struct mlx5_ifc_access_register_in_bits { u8 argument[0x20]; - u8 register_data[][0x20]; + u8 register_data[0][0x20]; }; struct mlx5_ifc_sltp_reg_bits { @@ -8702,32 +6846,28 @@ struct mlx5_ifc_ptys_reg_bits { u8 proto_mask[0x3]; u8 an_status[0x4]; - u8 reserved_at_24[0xc]; - u8 data_rate_oper[0x10]; - - u8 ext_eth_proto_capability[0x20]; + u8 reserved_at_24[0x3c]; u8 eth_proto_capability[0x20]; u8 ib_link_width_capability[0x10]; u8 ib_proto_capability[0x10]; - u8 ext_eth_proto_admin[0x20]; + u8 reserved_at_a0[0x20]; u8 eth_proto_admin[0x20]; u8 ib_link_width_admin[0x10]; u8 ib_proto_admin[0x10]; - u8 ext_eth_proto_oper[0x20]; + u8 reserved_at_100[0x20]; u8 eth_proto_oper[0x20]; u8 ib_link_width_oper[0x10]; u8 ib_proto_oper[0x10]; - u8 reserved_at_160[0x1c]; - u8 connector_type[0x4]; + u8 reserved_at_160[0x20]; u8 eth_proto_lp_advertise[0x20]; @@ -8858,48 +6998,20 @@ struct mlx5_ifc_pplr_reg_bits { struct mlx5_ifc_pplm_reg_bits { u8 reserved_at_0[0x8]; - u8 local_port[0x8]; - u8 reserved_at_10[0x10]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; - u8 reserved_at_20[0x20]; + u8 reserved_at_20[0x20]; - u8 port_profile_mode[0x8]; - u8 static_port_profile[0x8]; - u8 active_port_profile[0x8]; - u8 reserved_at_58[0x8]; + u8 port_profile_mode[0x8]; + u8 static_port_profile[0x8]; + u8 active_port_profile[0x8]; + u8 reserved_at_58[0x8]; - u8 retransmission_active[0x8]; - u8 fec_mode_active[0x18]; + u8 retransmission_active[0x8]; + u8 fec_mode_active[0x18]; - u8 rs_fec_correction_bypass_cap[0x4]; - u8 reserved_at_84[0x8]; - u8 fec_override_cap_56g[0x4]; - u8 fec_override_cap_100g[0x4]; - u8 fec_override_cap_50g[0x4]; - u8 fec_override_cap_25g[0x4]; - u8 fec_override_cap_10g_40g[0x4]; - - u8 rs_fec_correction_bypass_admin[0x4]; - u8 reserved_at_a4[0x8]; - u8 fec_override_admin_56g[0x4]; - u8 fec_override_admin_100g[0x4]; - u8 fec_override_admin_50g[0x4]; - u8 fec_override_admin_25g[0x4]; - u8 fec_override_admin_10g_40g[0x4]; - - u8 fec_override_cap_400g_8x[0x10]; - u8 fec_override_cap_200g_4x[0x10]; - - u8 fec_override_cap_100g_2x[0x10]; - u8 fec_override_cap_50g_1x[0x10]; - - u8 fec_override_admin_400g_8x[0x10]; - u8 fec_override_admin_200g_4x[0x10]; - - u8 fec_override_admin_100g_2x[0x10]; - u8 fec_override_admin_50g_1x[0x10]; - - u8 reserved_at_140[0x140]; + u8 reserved_at_80[0x20]; }; struct mlx5_ifc_ppcnt_reg_bits { @@ -8916,64 +7028,6 @@ struct mlx5_ifc_ppcnt_reg_bits { union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; }; -struct mlx5_ifc_mpein_reg_bits { - u8 reserved_at_0[0x2]; - u8 depth[0x6]; - u8 pcie_index[0x8]; - u8 node[0x8]; - u8 reserved_at_18[0x8]; - - u8 capability_mask[0x20]; - - u8 reserved_at_40[0x8]; - u8 link_width_enabled[0x8]; - u8 link_speed_enabled[0x10]; - - u8 lane0_physical_position[0x8]; - u8 link_width_active[0x8]; - u8 link_speed_active[0x10]; - - u8 num_of_pfs[0x10]; - u8 num_of_vfs[0x10]; - - u8 bdf0[0x10]; - u8 reserved_at_b0[0x10]; - - u8 max_read_request_size[0x4]; - u8 max_payload_size[0x4]; - u8 reserved_at_c8[0x5]; - u8 pwr_status[0x3]; - u8 port_type[0x4]; - u8 reserved_at_d4[0xb]; - u8 lane_reversal[0x1]; - - u8 reserved_at_e0[0x14]; - u8 pci_power[0xc]; - - u8 reserved_at_100[0x20]; - - u8 device_status[0x10]; - u8 port_state[0x8]; - u8 reserved_at_138[0x8]; - - u8 reserved_at_140[0x10]; - u8 receiver_detect_result[0x10]; - - u8 reserved_at_160[0x20]; -}; - -struct mlx5_ifc_mpcnt_reg_bits { - u8 reserved_at_0[0x8]; - u8 pcie_index[0x8]; - u8 reserved_at_10[0xa]; - u8 grp[0x6]; - - u8 clr[0x1]; - u8 reserved_at_21[0x1f]; - - union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set; -}; - struct mlx5_ifc_ppad_reg_bits { u8 reserved_at_0[0x3]; u8 single_mac[0x1]; @@ -9143,11 +7197,7 @@ struct mlx5_ifc_pifr_reg_bits { struct mlx5_ifc_pfcc_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_at_10[0xb]; - u8 ppan_mask_n[0x1]; - u8 minor_stall_mask[0x1]; - u8 critical_stall_mask[0x1]; - u8 reserved_at_1e[0x2]; + u8 reserved_at_10[0x10]; u8 ppan[0x4]; u8 reserved_at_24[0x4]; @@ -9157,22 +7207,17 @@ struct mlx5_ifc_pfcc_reg_bits { u8 pptx[0x1]; u8 aptx[0x1]; - u8 pptx_mask_n[0x1]; - u8 reserved_at_43[0x5]; + u8 reserved_at_42[0x6]; u8 pfctx[0x8]; u8 reserved_at_50[0x10]; u8 pprx[0x1]; u8 aprx[0x1]; - u8 pprx_mask_n[0x1]; - u8 reserved_at_63[0x5]; + u8 reserved_at_62[0x6]; u8 pfcrx[0x8]; u8 reserved_at_70[0x10]; - u8 device_stall_minor_watermark[0x10]; - u8 device_stall_critical_watermark[0x10]; - - u8 reserved_at_a0[0x60]; + u8 reserved_at_80[0x80]; }; struct mlx5_ifc_pelc_reg_bits { @@ -9212,228 +7257,6 @@ struct mlx5_ifc_peir_reg_bits { u8 error_type[0x8]; }; -struct mlx5_ifc_mpegc_reg_bits { - u8 reserved_at_0[0x30]; - u8 field_select[0x10]; - - u8 tx_overflow_sense[0x1]; - u8 mark_cqe[0x1]; - u8 mark_cnp[0x1]; - u8 reserved_at_43[0x1b]; - u8 tx_lossy_overflow_oper[0x2]; - - u8 reserved_at_60[0x100]; -}; - -enum { - MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1, - MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2, - MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC = 0x3, -}; - -struct mlx5_ifc_mtutc_reg_bits { - u8 reserved_at_0[0x1c]; - u8 operation[0x4]; - - u8 freq_adjustment[0x20]; - - u8 reserved_at_40[0x40]; - - u8 utc_sec[0x20]; - - u8 reserved_at_a0[0x2]; - u8 utc_nsec[0x1e]; - - u8 time_adjustment[0x20]; -}; - -struct mlx5_ifc_pcam_enhanced_features_bits { - u8 reserved_at_0[0x68]; - u8 fec_50G_per_lane_in_pplm[0x1]; - u8 reserved_at_69[0x4]; - u8 rx_icrc_encapsulated_counter[0x1]; - u8 reserved_at_6e[0x4]; - u8 ptys_extended_ethernet[0x1]; - u8 reserved_at_73[0x3]; - u8 pfcc_mask[0x1]; - u8 reserved_at_77[0x3]; - u8 per_lane_error_counters[0x1]; - u8 rx_buffer_fullness_counters[0x1]; - u8 ptys_connector_type[0x1]; - u8 reserved_at_7d[0x1]; - u8 ppcnt_discard_group[0x1]; - u8 ppcnt_statistical_group[0x1]; -}; - -struct mlx5_ifc_pcam_regs_5000_to_507f_bits { - u8 port_access_reg_cap_mask_127_to_96[0x20]; - u8 port_access_reg_cap_mask_95_to_64[0x20]; - - u8 port_access_reg_cap_mask_63_to_36[0x1c]; - u8 pplm[0x1]; - u8 port_access_reg_cap_mask_34_to_32[0x3]; - - u8 port_access_reg_cap_mask_31_to_13[0x13]; - u8 pbmc[0x1]; - u8 pptb[0x1]; - u8 port_access_reg_cap_mask_10_to_09[0x2]; - u8 ppcnt[0x1]; - u8 port_access_reg_cap_mask_07_to_00[0x8]; -}; - -struct mlx5_ifc_pcam_reg_bits { - u8 reserved_at_0[0x8]; - u8 feature_group[0x8]; - u8 reserved_at_10[0x8]; - u8 access_reg_group[0x8]; - - u8 reserved_at_20[0x20]; - - union { - struct mlx5_ifc_pcam_regs_5000_to_507f_bits regs_5000_to_507f; - u8 reserved_at_0[0x80]; - } port_access_reg_cap_mask; - - u8 reserved_at_c0[0x80]; - - union { - struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features; - u8 reserved_at_0[0x80]; - } feature_cap_mask; - - u8 reserved_at_1c0[0xc0]; -}; - -struct mlx5_ifc_mcam_enhanced_features_bits { - u8 reserved_at_0[0x6b]; - u8 ptpcyc2realtime_modify[0x1]; - u8 reserved_at_6c[0x2]; - u8 pci_status_and_power[0x1]; - u8 reserved_at_6f[0x5]; - u8 mark_tx_action_cnp[0x1]; - u8 mark_tx_action_cqe[0x1]; - u8 dynamic_tx_overflow[0x1]; - u8 reserved_at_77[0x4]; - u8 pcie_outbound_stalled[0x1]; - u8 tx_overflow_buffer_pkt[0x1]; - u8 mtpps_enh_out_per_adj[0x1]; - u8 mtpps_fs[0x1]; - u8 pcie_performance_group[0x1]; -}; - -struct mlx5_ifc_mcam_access_reg_bits { - u8 reserved_at_0[0x1c]; - u8 mcda[0x1]; - u8 mcc[0x1]; - u8 mcqi[0x1]; - u8 mcqs[0x1]; - - u8 regs_95_to_87[0x9]; - u8 mpegc[0x1]; - u8 mtutc[0x1]; - u8 regs_84_to_68[0x11]; - u8 tracer_registers[0x4]; - - u8 regs_63_to_32[0x20]; - u8 regs_31_to_0[0x20]; -}; - -struct mlx5_ifc_mcam_access_reg_bits1 { - u8 regs_127_to_96[0x20]; - - u8 regs_95_to_64[0x20]; - - u8 regs_63_to_32[0x20]; - - u8 regs_31_to_0[0x20]; -}; - -struct mlx5_ifc_mcam_access_reg_bits2 { - u8 regs_127_to_99[0x1d]; - u8 mirc[0x1]; - u8 regs_97_to_96[0x2]; - - u8 regs_95_to_64[0x20]; - - u8 regs_63_to_32[0x20]; - - u8 regs_31_to_0[0x20]; -}; - -struct mlx5_ifc_mcam_reg_bits { - u8 reserved_at_0[0x8]; - u8 feature_group[0x8]; - u8 reserved_at_10[0x8]; - u8 access_reg_group[0x8]; - - u8 reserved_at_20[0x20]; - - union { - struct mlx5_ifc_mcam_access_reg_bits access_regs; - struct mlx5_ifc_mcam_access_reg_bits1 access_regs1; - struct mlx5_ifc_mcam_access_reg_bits2 access_regs2; - u8 reserved_at_0[0x80]; - } mng_access_reg_cap_mask; - - u8 reserved_at_c0[0x80]; - - union { - struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features; - u8 reserved_at_0[0x80]; - } mng_feature_cap_mask; - - u8 reserved_at_1c0[0x80]; -}; - -struct mlx5_ifc_qcam_access_reg_cap_mask { - u8 qcam_access_reg_cap_mask_127_to_20[0x6C]; - u8 qpdpm[0x1]; - u8 qcam_access_reg_cap_mask_18_to_4[0x0F]; - u8 qdpm[0x1]; - u8 qpts[0x1]; - u8 qcap[0x1]; - u8 qcam_access_reg_cap_mask_0[0x1]; -}; - -struct mlx5_ifc_qcam_qos_feature_cap_mask { - u8 qcam_qos_feature_cap_mask_127_to_1[0x7F]; - u8 qpts_trust_both[0x1]; -}; - -struct mlx5_ifc_qcam_reg_bits { - u8 reserved_at_0[0x8]; - u8 feature_group[0x8]; - u8 reserved_at_10[0x8]; - u8 access_reg_group[0x8]; - u8 reserved_at_20[0x20]; - - union { - struct mlx5_ifc_qcam_access_reg_cap_mask reg_cap; - u8 reserved_at_0[0x80]; - } qos_access_reg_cap_mask; - - u8 reserved_at_c0[0x80]; - - union { - struct mlx5_ifc_qcam_qos_feature_cap_mask feature_cap; - u8 reserved_at_0[0x80]; - } qos_feature_cap_mask; - - u8 reserved_at_1c0[0x80]; -}; - -struct mlx5_ifc_core_dump_reg_bits { - u8 reserved_at_0[0x18]; - u8 core_dump_type[0x8]; - - u8 reserved_at_20[0x30]; - u8 vhca_id[0x10]; - - u8 reserved_at_60[0x8]; - u8 qpn[0x18]; - u8 reserved_at_80[0x180]; -}; - struct mlx5_ifc_pcap_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; @@ -9474,23 +7297,9 @@ struct mlx5_ifc_pamp_reg_bits { struct mlx5_ifc_pcmr_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_at_10[0x10]; - - u8 entropy_force_cap[0x1]; - u8 entropy_calc_cap[0x1]; - u8 entropy_gre_calc_cap[0x1]; - u8 reserved_at_23[0xf]; - u8 rx_ts_over_crc_cap[0x1]; - u8 reserved_at_33[0xb]; + u8 reserved_at_10[0x2e]; u8 fcs_cap[0x1]; - u8 reserved_at_3f[0x1]; - - u8 entropy_force[0x1]; - u8 entropy_calc[0x1]; - u8 entropy_gre_calc[0x1]; - u8 reserved_at_43[0xf]; - u8 rx_ts_over_crc[0x1]; - u8 reserved_at_53[0xb]; + u8 reserved_at_3f[0x1f]; u8 fcs_chk[0x1]; u8 reserved_at_5f[0x1]; }; @@ -9647,7 +7456,7 @@ struct mlx5_ifc_cmd_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 command[][0x20]; + u8 command[0][0x20]; }; struct mlx5_ifc_cmd_if_box_bits { @@ -9775,8 +7584,7 @@ struct mlx5_ifc_initial_seg_bits { u8 initializing[0x1]; u8 reserved_at_fe1[0x4]; u8 nic_interface_supported[0x3]; - u8 embedded_cpu[0x1]; - u8 reserved_at_fe9[0x17]; + u8 reserved_at_fe8[0x18]; struct mlx5_ifc_health_buffer_bits health_buffer; @@ -9793,273 +7601,6 @@ struct mlx5_ifc_initial_seg_bits { u8 reserved_at_80a0[0x17fc0]; }; -struct mlx5_ifc_mtpps_reg_bits { - u8 reserved_at_0[0xc]; - u8 cap_number_of_pps_pins[0x4]; - u8 reserved_at_10[0x4]; - u8 cap_max_num_of_pps_in_pins[0x4]; - u8 reserved_at_18[0x4]; - u8 cap_max_num_of_pps_out_pins[0x4]; - - u8 reserved_at_20[0x24]; - u8 cap_pin_3_mode[0x4]; - u8 reserved_at_48[0x4]; - u8 cap_pin_2_mode[0x4]; - u8 reserved_at_50[0x4]; - u8 cap_pin_1_mode[0x4]; - u8 reserved_at_58[0x4]; - u8 cap_pin_0_mode[0x4]; - - u8 reserved_at_60[0x4]; - u8 cap_pin_7_mode[0x4]; - u8 reserved_at_68[0x4]; - u8 cap_pin_6_mode[0x4]; - u8 reserved_at_70[0x4]; - u8 cap_pin_5_mode[0x4]; - u8 reserved_at_78[0x4]; - u8 cap_pin_4_mode[0x4]; - - u8 field_select[0x20]; - u8 reserved_at_a0[0x60]; - - u8 enable[0x1]; - u8 reserved_at_101[0xb]; - u8 pattern[0x4]; - u8 reserved_at_110[0x4]; - u8 pin_mode[0x4]; - u8 pin[0x8]; - - u8 reserved_at_120[0x20]; - - u8 time_stamp[0x40]; - - u8 out_pulse_duration[0x10]; - u8 out_periodic_adjustment[0x10]; - u8 enhanced_out_periodic_adjustment[0x20]; - - u8 reserved_at_1c0[0x20]; -}; - -struct mlx5_ifc_mtppse_reg_bits { - u8 reserved_at_0[0x18]; - u8 pin[0x8]; - u8 event_arm[0x1]; - u8 reserved_at_21[0x1b]; - u8 event_generation_mode[0x4]; - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_mcqs_reg_bits { - u8 last_index_flag[0x1]; - u8 reserved_at_1[0x7]; - u8 fw_device[0x8]; - u8 component_index[0x10]; - - u8 reserved_at_20[0x10]; - u8 identifier[0x10]; - - u8 reserved_at_40[0x17]; - u8 component_status[0x5]; - u8 component_update_state[0x4]; - - u8 last_update_state_changer_type[0x4]; - u8 last_update_state_changer_host_id[0x4]; - u8 reserved_at_68[0x18]; -}; - -struct mlx5_ifc_mcqi_cap_bits { - u8 supported_info_bitmask[0x20]; - - u8 component_size[0x20]; - - u8 max_component_size[0x20]; - - u8 log_mcda_word_size[0x4]; - u8 reserved_at_64[0xc]; - u8 mcda_max_write_size[0x10]; - - u8 rd_en[0x1]; - u8 reserved_at_81[0x1]; - u8 match_chip_id[0x1]; - u8 match_psid[0x1]; - u8 check_user_timestamp[0x1]; - u8 match_base_guid_mac[0x1]; - u8 reserved_at_86[0x1a]; -}; - -struct mlx5_ifc_mcqi_version_bits { - u8 reserved_at_0[0x2]; - u8 build_time_valid[0x1]; - u8 user_defined_time_valid[0x1]; - u8 reserved_at_4[0x14]; - u8 version_string_length[0x8]; - - u8 version[0x20]; - - u8 build_time[0x40]; - - u8 user_defined_time[0x40]; - - u8 build_tool_version[0x20]; - - u8 reserved_at_e0[0x20]; - - u8 version_string[92][0x8]; -}; - -struct mlx5_ifc_mcqi_activation_method_bits { - u8 pending_server_ac_power_cycle[0x1]; - u8 pending_server_dc_power_cycle[0x1]; - u8 pending_server_reboot[0x1]; - u8 pending_fw_reset[0x1]; - u8 auto_activate[0x1]; - u8 all_hosts_sync[0x1]; - u8 device_hw_reset[0x1]; - u8 reserved_at_7[0x19]; -}; - -union mlx5_ifc_mcqi_reg_data_bits { - struct mlx5_ifc_mcqi_cap_bits mcqi_caps; - struct mlx5_ifc_mcqi_version_bits mcqi_version; - struct mlx5_ifc_mcqi_activation_method_bits mcqi_activation_mathod; -}; - -struct mlx5_ifc_mcqi_reg_bits { - u8 read_pending_component[0x1]; - u8 reserved_at_1[0xf]; - u8 component_index[0x10]; - - u8 reserved_at_20[0x20]; - - u8 reserved_at_40[0x1b]; - u8 info_type[0x5]; - - u8 info_size[0x20]; - - u8 offset[0x20]; - - u8 reserved_at_a0[0x10]; - u8 data_size[0x10]; - - union mlx5_ifc_mcqi_reg_data_bits data[]; -}; - -struct mlx5_ifc_mcc_reg_bits { - u8 reserved_at_0[0x4]; - u8 time_elapsed_since_last_cmd[0xc]; - u8 reserved_at_10[0x8]; - u8 instruction[0x8]; - - u8 reserved_at_20[0x10]; - u8 component_index[0x10]; - - u8 reserved_at_40[0x8]; - u8 update_handle[0x18]; - - u8 handle_owner_type[0x4]; - u8 handle_owner_host_id[0x4]; - u8 reserved_at_68[0x1]; - u8 control_progress[0x7]; - u8 error_code[0x8]; - u8 reserved_at_78[0x4]; - u8 control_state[0x4]; - - u8 component_size[0x20]; - - u8 reserved_at_a0[0x60]; -}; - -struct mlx5_ifc_mcda_reg_bits { - u8 reserved_at_0[0x8]; - u8 update_handle[0x18]; - - u8 offset[0x20]; - - u8 reserved_at_40[0x10]; - u8 size[0x10]; - - u8 reserved_at_60[0x20]; - - u8 data[][0x20]; -}; - -enum { - MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0), - MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1), -}; - -enum { - MLX5_MFRL_REG_RESET_LEVEL0 = BIT(0), - MLX5_MFRL_REG_RESET_LEVEL3 = BIT(3), - MLX5_MFRL_REG_RESET_LEVEL6 = BIT(6), -}; - -struct mlx5_ifc_mfrl_reg_bits { - u8 reserved_at_0[0x20]; - - u8 reserved_at_20[0x2]; - u8 pci_sync_for_fw_update_start[0x1]; - u8 pci_sync_for_fw_update_resp[0x2]; - u8 rst_type_sel[0x3]; - u8 reserved_at_28[0x8]; - u8 reset_type[0x8]; - u8 reset_level[0x8]; -}; - -struct mlx5_ifc_mirc_reg_bits { - u8 reserved_at_0[0x18]; - u8 status_code[0x8]; - - u8 reserved_at_20[0x20]; -}; - -struct mlx5_ifc_pddr_monitor_opcode_bits { - u8 reserved_at_0[0x10]; - u8 monitor_opcode[0x10]; -}; - -union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits { - struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode; - u8 reserved_at_0[0x20]; -}; - -enum { - /* Monitor opcodes */ - MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR = 0x0, -}; - -struct mlx5_ifc_pddr_troubleshooting_page_bits { - u8 reserved_at_0[0x10]; - u8 group_opcode[0x10]; - - union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits status_opcode; - - u8 reserved_at_40[0x20]; - - u8 status_message[59][0x20]; -}; - -union mlx5_ifc_pddr_reg_page_data_auto_bits { - struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page; - u8 reserved_at_0[0x7c0]; -}; - -enum { - MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE = 0x1, -}; - -struct mlx5_ifc_pddr_reg_bits { - u8 reserved_at_0[0x8]; - u8 local_port[0x8]; - u8 pnat[0x2]; - u8 reserved_at_12[0xe]; - - u8 reserved_at_20[0x18]; - u8 page_select[0x8]; - - union mlx5_ifc_pddr_reg_page_data_auto_bits page_data; -}; - union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; @@ -10068,15 +7609,11 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; - struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout; - struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout; + struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping; struct mlx5_ifc_pamp_reg_bits pamp_reg; struct mlx5_ifc_paos_reg_bits paos_reg; struct mlx5_ifc_pcap_reg_bits pcap_reg; - struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode; - struct mlx5_ifc_pddr_reg_bits pddr_reg; - struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page; struct mlx5_ifc_peir_reg_bits peir_reg; struct mlx5_ifc_pelc_reg_bits pelc_reg; struct mlx5_ifc_pfcc_reg_bits pfcc_reg; @@ -10096,8 +7633,6 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pmtu_reg_bits pmtu_reg; struct mlx5_ifc_ppad_reg_bits ppad_reg; struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; - struct mlx5_ifc_mpein_reg_bits mpein_reg; - struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg; struct mlx5_ifc_pplm_reg_bits pplm_reg; struct mlx5_ifc_pplr_reg_bits pplr_reg; struct mlx5_ifc_ppsc_reg_bits ppsc_reg; @@ -10110,17 +7645,6 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pvlc_reg_bits pvlc_reg; struct mlx5_ifc_slrg_reg_bits slrg_reg; struct mlx5_ifc_sltp_reg_bits sltp_reg; - struct mlx5_ifc_mtpps_reg_bits mtpps_reg; - struct mlx5_ifc_mtppse_reg_bits mtppse_reg; - struct mlx5_ifc_fpga_access_reg_bits fpga_access_reg; - struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits; - struct mlx5_ifc_fpga_cap_bits fpga_cap_bits; - struct mlx5_ifc_mcqi_reg_bits mcqi_reg; - struct mlx5_ifc_mcc_reg_bits mcc_reg; - struct mlx5_ifc_mcda_reg_bits mcda_reg; - struct mlx5_ifc_mirc_reg_bits mirc_reg; - struct mlx5_ifc_mfrl_reg_bits mfrl_reg; - struct mlx5_ifc_mtutc_reg_bits mtutc_reg; u8 reserved_at_0[0x60e0]; }; @@ -10157,19 +7681,12 @@ struct mlx5_ifc_set_flow_table_root_in_bits { u8 reserved_at_60[0x20]; u8 table_type[0x8]; - u8 reserved_at_88[0x7]; - u8 table_of_other_vport[0x1]; - u8 table_vport_number[0x10]; + u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_at_c0[0x8]; - u8 underlay_qpn[0x18]; - u8 table_eswitch_owner_vhca_id_valid[0x1]; - u8 reserved_at_e1[0xf]; - u8 table_eswitch_owner_vhca_id[0x10]; - u8 reserved_at_100[0x100]; + u8 reserved_at_c0[0x140]; }; enum { @@ -10206,7 +7723,17 @@ struct mlx5_ifc_modify_flow_table_in_bits { u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - struct mlx5_ifc_flow_table_context_bits flow_table_context; + u8 reserved_at_c0[0x4]; + u8 table_miss_mode[0x4]; + u8 reserved_at_c8[0x18]; + + u8 reserved_at_e0[0x8]; + u8 table_miss_id[0x18]; + + u8 reserved_at_100[0x8]; + u8 lag_master_next_table_id[0x18]; + + u8 reserved_at_120[0x80]; }; struct mlx5_ifc_ets_tcn_config_reg_bits { @@ -10244,89 +7771,6 @@ struct mlx5_ifc_qetc_reg_bits { struct mlx5_ifc_ets_global_config_reg_bits global_configuration; }; -struct mlx5_ifc_qpdpm_dscp_reg_bits { - u8 e[0x1]; - u8 reserved_at_01[0x0b]; - u8 prio[0x04]; -}; - -struct mlx5_ifc_qpdpm_reg_bits { - u8 reserved_at_0[0x8]; - u8 local_port[0x8]; - u8 reserved_at_10[0x10]; - struct mlx5_ifc_qpdpm_dscp_reg_bits dscp[64]; -}; - -struct mlx5_ifc_qpts_reg_bits { - u8 reserved_at_0[0x8]; - u8 local_port[0x8]; - u8 reserved_at_10[0x2d]; - u8 trust_state[0x3]; -}; - -struct mlx5_ifc_pptb_reg_bits { - u8 reserved_at_0[0x2]; - u8 mm[0x2]; - u8 reserved_at_4[0x4]; - u8 local_port[0x8]; - u8 reserved_at_10[0x6]; - u8 cm[0x1]; - u8 um[0x1]; - u8 pm[0x8]; - - u8 prio_x_buff[0x20]; - - u8 pm_msb[0x8]; - u8 reserved_at_48[0x10]; - u8 ctrl_buff[0x4]; - u8 untagged_buff[0x4]; -}; - -struct mlx5_ifc_sbcam_reg_bits { - u8 reserved_at_0[0x8]; - u8 feature_group[0x8]; - u8 reserved_at_10[0x8]; - u8 access_reg_group[0x8]; - - u8 reserved_at_20[0x20]; - - u8 sb_access_reg_cap_mask[4][0x20]; - - u8 reserved_at_c0[0x80]; - - u8 sb_feature_cap_mask[4][0x20]; - - u8 reserved_at_1c0[0x40]; - - u8 cap_total_buffer_size[0x20]; - - u8 cap_cell_size[0x10]; - u8 cap_max_pg_buffers[0x8]; - u8 cap_num_pool_supported[0x8]; - - u8 reserved_at_240[0x8]; - u8 cap_sbsr_stat_size[0x8]; - u8 cap_max_tclass_data[0x8]; - u8 cap_max_cpu_ingress_tclass_sb[0x8]; -}; - -struct mlx5_ifc_pbmc_reg_bits { - u8 reserved_at_0[0x8]; - u8 local_port[0x8]; - u8 reserved_at_10[0x10]; - - u8 xoff_timer_value[0x10]; - u8 xoff_refresh[0x10]; - - u8 reserved_at_40[0x9]; - u8 fullness_threshold[0x7]; - u8 port_buffer_size[0x10]; - - struct mlx5_ifc_bufferx_reg_bits buffer[10]; - - u8 reserved_at_2e0[0x80]; -}; - struct mlx5_ifc_qtct_reg_bits { u8 reserved_at_0[0x8]; u8 port_number[0x8]; @@ -10371,7 +7815,7 @@ struct mlx5_ifc_dcbx_param_bits { u8 dcbx_cee_cap[0x1]; u8 dcbx_ieee_cap[0x1]; u8 dcbx_standby_cap[0x1]; - u8 reserved_at_3[0x5]; + u8 reserved_at_0[0x5]; u8 port_number[0x8]; u8 reserved_at_10[0xa]; u8 max_application_table_size[6]; @@ -10399,8 +7843,7 @@ struct mlx5_ifc_dcbx_param_bits { }; struct mlx5_ifc_lagc_bits { - u8 fdb_selection_mode[0x1]; - u8 reserved_at_1[0x1c]; + u8 reserved_at_0[0x1d]; u8 lag_state[0x3]; u8 reserved_at_20[0x14]; @@ -10456,6 +7899,8 @@ struct mlx5_ifc_query_lag_out_bits { u8 syndrome[0x20]; + u8 reserved_at_40[0x40]; + struct mlx5_ifc_lagc_bits ctx; }; @@ -10526,609 +7971,4 @@ struct mlx5_ifc_destroy_vport_lag_in_bits { u8 reserved_at_40[0x40]; }; -enum { - MLX5_MODIFY_MEMIC_OP_MOD_ALLOC, - MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC, -}; - -struct mlx5_ifc_modify_memic_in_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x20]; - - u8 reserved_at_60[0x18]; - u8 memic_operation_type[0x8]; - - u8 memic_start_addr[0x40]; - - u8 reserved_at_c0[0x140]; -}; - -struct mlx5_ifc_modify_memic_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; - - u8 memic_operation_addr[0x40]; - - u8 reserved_at_c0[0x140]; -}; - -struct mlx5_ifc_alloc_memic_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_30[0x20]; - - u8 reserved_at_40[0x18]; - u8 log_memic_addr_alignment[0x8]; - - u8 range_start_addr[0x40]; - - u8 range_size[0x20]; - - u8 memic_size[0x20]; -}; - -struct mlx5_ifc_alloc_memic_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 memic_start_addr[0x40]; -}; - -struct mlx5_ifc_dealloc_memic_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x40]; - - u8 memic_start_addr[0x40]; - - u8 memic_size[0x20]; - - u8 reserved_at_e0[0x20]; -}; - -struct mlx5_ifc_dealloc_memic_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_general_obj_in_cmd_hdr_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 vhca_tunnel_id[0x10]; - u8 obj_type[0x10]; - - u8 obj_id[0x20]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_general_obj_out_cmd_hdr_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 obj_id[0x20]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_umem_bits { - u8 reserved_at_0[0x80]; - - u8 reserved_at_80[0x1b]; - u8 log_page_size[0x5]; - - u8 page_offset[0x20]; - - u8 num_of_mtt[0x40]; - - struct mlx5_ifc_mtt_bits mtt[]; -}; - -struct mlx5_ifc_uctx_bits { - u8 cap[0x20]; - - u8 reserved_at_20[0x160]; -}; - -struct mlx5_ifc_sw_icm_bits { - u8 modify_field_select[0x40]; - - u8 reserved_at_40[0x18]; - u8 log_sw_icm_size[0x8]; - - u8 reserved_at_60[0x20]; - - u8 sw_icm_start_addr[0x40]; - - u8 reserved_at_c0[0x140]; -}; - -struct mlx5_ifc_geneve_tlv_option_bits { - u8 modify_field_select[0x40]; - - u8 reserved_at_40[0x18]; - u8 geneve_option_fte_index[0x8]; - - u8 option_class[0x10]; - u8 option_type[0x8]; - u8 reserved_at_78[0x3]; - u8 option_data_length[0x5]; - - u8 reserved_at_80[0x180]; -}; - -struct mlx5_ifc_create_umem_in_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x40]; - - struct mlx5_ifc_umem_bits umem; -}; - -struct mlx5_ifc_create_umem_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x8]; - u8 umem_id[0x18]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_destroy_umem_in_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x8]; - u8 umem_id[0x18]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_destroy_umem_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_create_uctx_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x40]; - - struct mlx5_ifc_uctx_bits uctx; -}; - -struct mlx5_ifc_create_uctx_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x10]; - u8 uid[0x10]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_destroy_uctx_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x10]; - u8 uid[0x10]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_destroy_uctx_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_create_sw_icm_in_bits { - struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; - struct mlx5_ifc_sw_icm_bits sw_icm; -}; - -struct mlx5_ifc_create_geneve_tlv_option_in_bits { - struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; - struct mlx5_ifc_geneve_tlv_option_bits geneve_tlv_opt; -}; - -struct mlx5_ifc_mtrc_string_db_param_bits { - u8 string_db_base_address[0x20]; - - u8 reserved_at_20[0x8]; - u8 string_db_size[0x18]; -}; - -struct mlx5_ifc_mtrc_cap_bits { - u8 trace_owner[0x1]; - u8 trace_to_memory[0x1]; - u8 reserved_at_2[0x4]; - u8 trc_ver[0x2]; - u8 reserved_at_8[0x14]; - u8 num_string_db[0x4]; - - u8 first_string_trace[0x8]; - u8 num_string_trace[0x8]; - u8 reserved_at_30[0x28]; - - u8 log_max_trace_buffer_size[0x8]; - - u8 reserved_at_60[0x20]; - - struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8]; - - u8 reserved_at_280[0x180]; -}; - -struct mlx5_ifc_mtrc_conf_bits { - u8 reserved_at_0[0x1c]; - u8 trace_mode[0x4]; - u8 reserved_at_20[0x18]; - u8 log_trace_buffer_size[0x8]; - u8 trace_mkey[0x20]; - u8 reserved_at_60[0x3a0]; -}; - -struct mlx5_ifc_mtrc_stdb_bits { - u8 string_db_index[0x4]; - u8 reserved_at_4[0x4]; - u8 read_size[0x18]; - u8 start_offset[0x20]; - u8 string_db_data[]; -}; - -struct mlx5_ifc_mtrc_ctrl_bits { - u8 trace_status[0x2]; - u8 reserved_at_2[0x2]; - u8 arm_event[0x1]; - u8 reserved_at_5[0xb]; - u8 modify_field_select[0x10]; - u8 reserved_at_20[0x2b]; - u8 current_timestamp52_32[0x15]; - u8 current_timestamp31_0[0x20]; - u8 reserved_at_80[0x180]; -}; - -struct mlx5_ifc_host_params_context_bits { - u8 host_number[0x8]; - u8 reserved_at_8[0x7]; - u8 host_pf_disabled[0x1]; - u8 host_num_of_vfs[0x10]; - - u8 host_total_vfs[0x10]; - u8 host_pci_bus[0x10]; - - u8 reserved_at_40[0x10]; - u8 host_pci_device[0x10]; - - u8 reserved_at_60[0x10]; - u8 host_pci_function[0x10]; - - u8 reserved_at_80[0x180]; -}; - -struct mlx5_ifc_query_esw_functions_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_query_esw_functions_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; - - struct mlx5_ifc_host_params_context_bits host_params_context; - - u8 reserved_at_280[0x180]; - u8 host_sf_enable[][0x40]; -}; - -struct mlx5_ifc_sf_partition_bits { - u8 reserved_at_0[0x10]; - u8 log_num_sf[0x8]; - u8 log_sf_bar_size[0x8]; -}; - -struct mlx5_ifc_query_sf_partitions_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x18]; - u8 num_sf_partitions[0x8]; - - u8 reserved_at_60[0x20]; - - struct mlx5_ifc_sf_partition_bits sf_partition[]; -}; - -struct mlx5_ifc_query_sf_partitions_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_dealloc_sf_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_dealloc_sf_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x10]; - u8 function_id[0x10]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_alloc_sf_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_alloc_sf_in_bits { - u8 opcode[0x10]; - u8 reserved_at_10[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x10]; - u8 function_id[0x10]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_affiliated_event_header_bits { - u8 reserved_at_0[0x10]; - u8 obj_type[0x10]; - - u8 obj_id[0x20]; -}; - -enum { - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc), - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13), - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20), -}; - -enum { - MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, - MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, - MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20, -}; - -enum { - MLX5_IPSEC_OBJECT_ICV_LEN_16B, - MLX5_IPSEC_OBJECT_ICV_LEN_12B, - MLX5_IPSEC_OBJECT_ICV_LEN_8B, -}; - -struct mlx5_ifc_ipsec_obj_bits { - u8 modify_field_select[0x40]; - u8 full_offload[0x1]; - u8 reserved_at_41[0x1]; - u8 esn_en[0x1]; - u8 esn_overlap[0x1]; - u8 reserved_at_44[0x2]; - u8 icv_length[0x2]; - u8 reserved_at_48[0x4]; - u8 aso_return_reg[0x4]; - u8 reserved_at_50[0x10]; - - u8 esn_msb[0x20]; - - u8 reserved_at_80[0x8]; - u8 dekn[0x18]; - - u8 salt[0x20]; - - u8 implicit_iv[0x40]; - - u8 reserved_at_100[0x700]; -}; - -struct mlx5_ifc_create_ipsec_obj_in_bits { - struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; - struct mlx5_ifc_ipsec_obj_bits ipsec_object; -}; - -enum { - MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP = BIT(0), - MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB = BIT(1), -}; - -struct mlx5_ifc_query_ipsec_obj_out_bits { - struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; - struct mlx5_ifc_ipsec_obj_bits ipsec_object; -}; - -struct mlx5_ifc_modify_ipsec_obj_in_bits { - struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; - struct mlx5_ifc_ipsec_obj_bits ipsec_object; -}; - -struct mlx5_ifc_encryption_key_obj_bits { - u8 modify_field_select[0x40]; - - u8 reserved_at_40[0x14]; - u8 key_size[0x4]; - u8 reserved_at_58[0x4]; - u8 key_type[0x4]; - - u8 reserved_at_60[0x8]; - u8 pd[0x18]; - - u8 reserved_at_80[0x180]; - u8 key[8][0x20]; - - u8 reserved_at_300[0x500]; -}; - -struct mlx5_ifc_create_encryption_key_in_bits { - struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; - struct mlx5_ifc_encryption_key_obj_bits encryption_key_object; -}; - -struct mlx5_ifc_sampler_obj_bits { - u8 modify_field_select[0x40]; - - u8 table_type[0x8]; - u8 level[0x8]; - u8 reserved_at_50[0xf]; - u8 ignore_flow_level[0x1]; - - u8 sample_ratio[0x20]; - - u8 reserved_at_80[0x8]; - u8 sample_table_id[0x18]; - - u8 reserved_at_a0[0x8]; - u8 default_table_id[0x18]; - - u8 sw_steering_icm_address_rx[0x40]; - u8 sw_steering_icm_address_tx[0x40]; - - u8 reserved_at_140[0xa0]; -}; - -struct mlx5_ifc_create_sampler_obj_in_bits { - struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; - struct mlx5_ifc_sampler_obj_bits sampler_object; -}; - -struct mlx5_ifc_query_sampler_obj_out_bits { - struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; - struct mlx5_ifc_sampler_obj_bits sampler_object; -}; - -enum { - MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0, - MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1, -}; - -enum { - MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1, - MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2, -}; - -struct mlx5_ifc_tls_static_params_bits { - u8 const_2[0x2]; - u8 tls_version[0x4]; - u8 const_1[0x2]; - u8 reserved_at_8[0x14]; - u8 encryption_standard[0x4]; - - u8 reserved_at_20[0x20]; - - u8 initial_record_number[0x40]; - - u8 resync_tcp_sn[0x20]; - - u8 gcm_iv[0x20]; - - u8 implicit_iv[0x40]; - - u8 reserved_at_100[0x8]; - u8 dek_index[0x18]; - - u8 reserved_at_120[0xe0]; -}; - -struct mlx5_ifc_tls_progress_params_bits { - u8 next_record_tcp_sn[0x20]; - - u8 hw_resync_tcp_sn[0x20]; - - u8 record_tracker_state[0x2]; - u8 auth_state[0x2]; - u8 reserved_at_44[0x4]; - u8 hw_offset_record_number[0x18]; -}; - -enum { - MLX5_MTT_PERM_READ = 1 << 0, - MLX5_MTT_PERM_WRITE = 1 << 1, - MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE, -}; - #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 77ea4f9c52..b3065acd20 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -45,7 +45,6 @@ enum mlx5_module_id { MLX5_MODULE_ID_QSFP = 0xC, MLX5_MODULE_ID_QSFP_PLUS = 0xD, MLX5_MODULE_ID_QSFP28 = 0x11, - MLX5_MODULE_ID_DSFP = 0x1B, }; enum mlx5_an_status { @@ -61,16 +60,6 @@ enum mlx5_an_status { #define MLX5_I2C_ADDR_LOW 0x50 #define MLX5_I2C_ADDR_HIGH 0x51 #define MLX5_EEPROM_PAGE_LENGTH 256 -#define MLX5_EEPROM_HIGH_PAGE_LENGTH 128 - -struct mlx5_module_eeprom_query_params { - u16 size; - u16 offset; - u16 i2c_address; - u32 page; - u32 bank; - u32 module_number; -}; enum mlx5e_link_mode { MLX5E_1000BASE_CX_SGMII = 0, @@ -103,63 +92,32 @@ enum mlx5e_link_mode { MLX5E_LINK_MODES_NUMBER, }; -enum mlx5e_ext_link_mode { - MLX5E_SGMII_100M = 0, - MLX5E_1000BASE_X_SGMII = 1, - MLX5E_5GBASE_R = 3, - MLX5E_10GBASE_XFI_XAUI_1 = 4, - MLX5E_40GBASE_XLAUI_4_XLPPI_4 = 5, - MLX5E_25GAUI_1_25GBASE_CR_KR = 6, - MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 = 7, - MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8, - MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9, - MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10, - MLX5E_100GAUI_1_100GBASE_CR_KR = 11, - MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12, - MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13, - MLX5E_400GAUI_8 = 15, - MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16, - MLX5E_EXT_LINK_MODES_NUMBER, -}; - -enum mlx5e_connector_type { - MLX5E_PORT_UNKNOWN = 0, - MLX5E_PORT_NONE = 1, - MLX5E_PORT_TP = 2, - MLX5E_PORT_AUI = 3, - MLX5E_PORT_BNC = 4, - MLX5E_PORT_MII = 5, - MLX5E_PORT_FIBRE = 6, - MLX5E_PORT_DA = 7, - MLX5E_PORT_OTHER = 8, - MLX5E_CONNECTOR_TYPE_NUMBER, -}; - -enum mlx5_ptys_width { - MLX5_PTYS_WIDTH_1X = 1 << 0, - MLX5_PTYS_WIDTH_2X = 1 << 1, - MLX5_PTYS_WIDTH_4X = 1 << 2, - MLX5_PTYS_WIDTH_8X = 1 << 3, - MLX5_PTYS_WIDTH_12X = 1 << 4, -}; - #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) -#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \ - (ext ? MLX5_GET(reg, out, ext_##field) : \ - MLX5_GET(reg, out, field)) int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); - -int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper, - u16 *proto_oper, u8 local_port); +int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, + u32 *proto_cap, int proto_mask); +int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, + u32 *proto_admin, int proto_mask); +int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, + u8 *link_width_oper, u8 local_port); +int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, u8 local_port); +int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev, + u32 *proto_oper, u8 local_port); +int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, int proto_mask); void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); +void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, + u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); @@ -177,23 +135,11 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx); -int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev, - u16 stall_critical_watermark, - u16 stall_minor_watermark); -int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev, - u16 *stall_critical_watermark, u16 *stall_minor_watermark); - int mlx5_max_tc(struct mlx5_core_dev *mdev); int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); -int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, - u8 prio, u8 *tc); int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); -int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, - u8 tc, u8 *tc_group); int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); -int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, - u8 tc, u8 *bw_pct); int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, u8 *max_bw_unit); @@ -203,21 +149,10 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); -int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen); -int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen); int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, bool *enabled); int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, u16 offset, u16 size, u8 *data); -int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, - struct mlx5_module_eeprom_query_params *params, u8 *data); -int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); -int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); - -int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state); -int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state); -int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio); -int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio); #endif /* __MLX5_PORT_H__ */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 61e48d459b..0aacb2a748 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -37,8 +37,7 @@ #include #define MLX5_INVALID_LKEY 0x100 -/* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */ -#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8) +#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5) #define MLX5_DIF_SIZE 8 #define MLX5_STRIDE_BLOCK_OP 0x400 #define MLX5_CPY_GRD_MASK 0xc0 @@ -51,6 +50,9 @@ #define MLX5_BSF_APPTAG_ESCAPE 0x1 #define MLX5_BSF_APPREF_ESCAPE 0x2 +#define MLX5_QPN_BITS 24 +#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1) + enum mlx5_qp_optpar { MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MLX5_QP_OPTPAR_RRE = 1 << 1, @@ -66,13 +68,11 @@ enum mlx5_qp_optpar { MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12, MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13, MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, - MLX5_QP_OPTPAR_LAG_TX_AFF = 1 << 15, MLX5_QP_OPTPAR_PRI_PORT = 1 << 16, MLX5_QP_OPTPAR_SRQN = 1 << 18, MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, MLX5_QP_OPTPAR_DC_HS = 1 << 20, MLX5_QP_OPTPAR_DC_KEY = 1 << 21, - MLX5_QP_OPTPAR_COUNTER_SET_ID = 1 << 25, }; enum mlx5_qp_state { @@ -205,12 +205,7 @@ struct mlx5_wqe_ctrl_seg { u8 signature; u8 rsvd[2]; u8 fm_ce_se; - union { - __be32 general_id; - __be32 imm; - __be32 umr_mkey; - __be32 tis_tir_num; - }; + __be32 imm; }; #define MLX5_WQE_CTRL_DS_MASK 0x3f @@ -228,47 +223,14 @@ enum { MLX5_ETH_WQE_L4_CSUM = 1 << 7, }; -enum { - MLX5_ETH_WQE_SVLAN = 1 << 0, - MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC = 1 << 26, - MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC = 1 << 27, - MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC = 3 << 26, - MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC = 1 << 28, - MLX5_ETH_WQE_INSERT_TRAILER = 1 << 30, - MLX5_ETH_WQE_INSERT_VLAN = 1 << 15, -}; - -enum { - MLX5_ETH_WQE_SWP_INNER_L3_IPV6 = 1 << 0, - MLX5_ETH_WQE_SWP_INNER_L4_UDP = 1 << 1, - MLX5_ETH_WQE_SWP_OUTER_L3_IPV6 = 1 << 4, - MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5, -}; - -enum { - MLX5_ETH_WQE_FT_META_IPSEC = BIT(0), -}; - struct mlx5_wqe_eth_seg { - u8 swp_outer_l4_offset; - u8 swp_outer_l3_offset; - u8 swp_inner_l4_offset; - u8 swp_inner_l3_offset; + u8 rsvd0[4]; u8 cs_flags; - u8 swp_flags; + u8 rsvd1; __be16 mss; - __be32 flow_table_metadata; - union { - struct { - __be16 sz; - u8 start[2]; - } inline_hdr; - struct { - __be16 type; - __be16 vlan_tci; - } insert; - __be32 trailer; - }; + __be32 rsvd2; + __be16 inline_hdr_sz; + u8 inline_hdr_start[2]; }; struct mlx5_wqe_xrc_seg { @@ -283,23 +245,6 @@ struct mlx5_wqe_masked_atomic_seg { __be64 compare_mask; }; -struct mlx5_base_av { - union { - struct { - __be32 qkey; - __be32 reserved; - } qkey; - __be64 dc_key; - } key; - __be32 dqp_dct; - u8 stat_rate_sl; - u8 fl_mlid; - union { - __be16 rlid; - __be16 udp_sport; - }; -}; - struct mlx5_av { union { struct { @@ -323,17 +268,6 @@ struct mlx5_av { u8 rgid[16]; }; -struct mlx5_ib_ah { - struct ib_ah ibah; - struct mlx5_av av; - u8 xmit_port; -}; - -static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) -{ - return container_of(ibah, struct mlx5_ib_ah, ibah); -} - struct mlx5_wqe_datagram_seg { struct mlx5_av av; }; @@ -358,14 +292,10 @@ struct mlx5_wqe_data_seg { struct mlx5_wqe_umr_ctrl_seg { u8 flags; u8 rsvd0[3]; - __be16 xlt_octowords; - union { - __be16 xlt_offset; - __be16 bsf_octowords; - }; + __be16 klm_octowords; + __be16 bsf_octowords; __be64 mkey_mask; - __be32 xlt_offset_47_16; - u8 rsvd1[28]; + u8 rsvd1[32]; }; struct mlx5_seg_set_psv { @@ -414,7 +344,6 @@ struct mlx5_wqe_signature_seg { struct mlx5_wqe_inline_seg { __be32 byte_count; - __be32 data[]; }; enum mlx5_sig_type { @@ -460,10 +389,6 @@ struct mlx5_bsf { struct mlx5_bsf_inl m_inl; }; -struct mlx5_mtt { - __be64 ptag; -}; - struct mlx5_klm { __be32 bcount; __be32 key; @@ -485,22 +410,163 @@ struct mlx5_stride_block_ctrl_seg { __be16 num_entries; }; +enum mlx5_pagefault_flags { + MLX5_PFAULT_REQUESTOR = 1 << 0, + MLX5_PFAULT_WRITE = 1 << 1, + MLX5_PFAULT_RDMA = 1 << 2, +}; + +/* Contains the details of a pagefault. */ +struct mlx5_pagefault { + u32 bytes_committed; + u8 event_subtype; + enum mlx5_pagefault_flags flags; + union { + /* Initiator or send message responder pagefault details. */ + struct { + /* Received packet size, only valid for responders. */ + u32 packet_size; + /* + * WQE index. Refers to either the send queue or + * receive queue, according to event_subtype. + */ + u16 wqe_index; + } wqe; + /* RDMA responder pagefault details */ + struct { + u32 r_key; + /* + * Received packet size, minimal size page fault + * resolution required for forward progress. + */ + u32 packet_size; + u32 rdma_op_len; + u64 rdma_va; + } rdma; + }; +}; + struct mlx5_core_qp { struct mlx5_core_rsc_common common; /* must be first */ void (*event) (struct mlx5_core_qp *, int); + void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *); int qpn; struct mlx5_rsc_debug *dbg; int pid; - u16 uid; }; -struct mlx5_core_dct { - struct mlx5_core_qp mqp; - struct completion drained; +struct mlx5_qp_path { + u8 fl_free_ar; + u8 rsvd3; + __be16 pkey_index; + u8 rsvd0; + u8 grh_mlid; + __be16 rlid; + u8 ackto_lt; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 tclass_flowlabel; + union { + u8 rgid[16]; + u8 rip[16]; + }; + u8 f_dscp_ecn_prio; + u8 ecn_dscp; + __be16 udp_sport; + u8 dci_cfi_prio_sl; + u8 port; + u8 rmac[6]; }; +/* FIXME: use mlx5_ifc.h qpc */ +struct mlx5_qp_context { + __be32 flags; + __be32 flags_pd; + u8 mtu_msgmax; + u8 rq_size_stride; + __be16 sq_crq_size; + __be32 qp_counter_set_usr_page; + __be32 wire_qpn; + __be32 log_pg_sz_remote_qpn; + struct mlx5_qp_path pri_path; + struct mlx5_qp_path alt_path; + __be32 params1; + u8 reserved2[4]; + __be32 next_send_psn; + __be32 cqn_send; + __be32 deth_sqpn; + u8 reserved3[4]; + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 xrcd; + __be32 cqn_recv; + __be64 db_rec_addr; + __be32 qkey; + __be32 rq_type_srqn; + __be32 rmsn; + __be16 hw_sq_wqe_counter; + __be16 sw_sq_wqe_counter; + __be16 hw_rcyclic_byte_counter; + __be16 hw_rq_counter; + __be16 sw_rcyclic_byte_counter; + __be16 sw_rq_counter; + u8 rsvd0[5]; + u8 cgs; + u8 cs_req; + u8 cs_res; + __be64 dc_access_key; + u8 rsvd1[24]; +}; + +static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) +{ + return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); +} + +static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key) +{ + return radix_tree_lookup(&dev->priv.mkey_table.tree, key); +} + +int mlx5_core_create_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp, + u32 *in, + int inlen); +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, + u32 opt_param_mask, void *qpc, + struct mlx5_core_qp *qp); +int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp); +int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + u32 *out, int outlen); + +int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); +int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); +void mlx5_init_qp_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, + u8 context, int error); +#endif +int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, + struct mlx5_core_qp *rq); +void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, + struct mlx5_core_qp *rq); +int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, + struct mlx5_core_qp *sq); +void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, + struct mlx5_core_qp *sq); +int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id); +int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); +int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, + int reset, void *out, int out_size); +int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id, + u32 *out_of_buffer); static inline const char *mlx5_qp_type_str(int type) { @@ -547,11 +613,4 @@ static inline const char *mlx5_qp_state_str(int state) } } -static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev) -{ - return !MLX5_CAP_ROCE(dev, qp_ts_format) ? - MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : - MLX5_TIMESTAMP_FORMAT_DEFAULT; -} - #endif /* MLX5_QP_H */ diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h new file mode 100644 index 0000000000..33c97dc900 --- /dev/null +++ b/include/linux/mlx5/srq.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_SRQ_H +#define MLX5_SRQ_H + +#include + +enum { + MLX5_SRQ_FLAG_ERR = (1 << 0), + MLX5_SRQ_FLAG_WQ_SIG = (1 << 1), +}; + +struct mlx5_srq_attr { + u32 type; + u32 flags; + u32 log_size; + u32 wqe_shift; + u32 log_page_size; + u32 wqe_cnt; + u32 srqn; + u32 xrcd; + u32 page_offset; + u32 cqn; + u32 pd; + u32 lwm; + u32 user_index; + u64 db_record; + u64 *pas; +}; + +struct mlx5_core_dev; + +void mlx5_init_srq_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev); + +#endif /* MLX5_SRQ_H */ diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 60ffeb6b67..88441f5ece 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -39,51 +39,40 @@ int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn); void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn); int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn); -int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in); +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out); int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn); -int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in); +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out); -int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); -int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn); -int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in); +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tirn); +int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, + int inlen); void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); -int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn); -int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in); +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tisn); +int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, + int inlen); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); +int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn); +int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen); +int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn); +int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); +int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); +int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn); +int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); +int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); +int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); + int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn); int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, int inlen); void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); -struct mlx5_hairpin_params { - u8 log_data_size; - u8 log_num_packets; - u16 q_counter; - int num_channels; -}; - -struct mlx5_hairpin { - struct mlx5_core_dev *func_mdev; - struct mlx5_core_dev *peer_mdev; - - int num_channels; - - u32 *rqn; - u32 *sqn; - - bool peer_gone; -}; - -struct mlx5_hairpin * -mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev, - struct mlx5_core_dev *peer_mdev, - struct mlx5_hairpin_params *params); - -void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair); -void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp); #endif /* __TRANSOBJ_H__ */ diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index aad53cb72f..451b0bde90 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -36,45 +36,26 @@ #include #include -#define MLX5_VPORT_MANAGER(mdev) \ - (MLX5_CAP_GEN(mdev, vport_group_manager) && \ - (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ - mlx5_core_is_pf(mdev)) - -enum { - MLX5_CAP_INLINE_MODE_L2, - MLX5_CAP_INLINE_MODE_VPORT_CONTEXT, - MLX5_CAP_INLINE_MODE_NOT_REQUIRED, -}; - -/* Vport number for each function must keep unchanged */ -enum { - MLX5_VPORT_PF = 0x0, - MLX5_VPORT_FIRST_VF = 0x1, - MLX5_VPORT_ECPF = 0xfffe, - MLX5_VPORT_UPLINK = 0xffff -}; - u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); +u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, + u16 vport); int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, - u16 vport, u8 other_vport, u8 state); + u16 vport, u8 state); int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, - u16 vport, bool other, u8 *addr); -int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr); -int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, - u16 vport, u8 *min_inline); -void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); + u16 vport, u8 *addr); +void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, - u16 vport, const u8 *addr); + u16 vport, u8 *addr); int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, u64 *system_image_guid); int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, - u16 vport, u64 node_guid); + u32 vport, u64 node_guid); int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, u16 *qkey_viol_cntr); int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, @@ -92,7 +73,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid); int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, - u16 vport, + u32 vport, enum mlx5_list_type list_type, u8 addr_list[][ETH_ALEN], int *list_size); @@ -101,7 +82,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, u8 addr_list[][ETH_ALEN], int list_size); int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, - u16 vport, + u32 vport, int *promisc_uc, int *promisc_mc, int *promisc_all); @@ -109,27 +90,22 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, int promisc_uc, int promisc_mc, int promisc_all); +int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, + u32 vport, + u16 vlans[], + int *size); int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, u16 vlans[], int list_size); int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev); int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev); -int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, - u8 other_vport, u64 *rx_discard_vport_down, - u64 *tx_discard_vport_down); int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, - int vf, u8 port_num, void *out); + int vf, u8 port_num, void *out, + size_t out_sz); int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, int vf, struct mlx5_hca_vport_context *req); -int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable); -int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status); -int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, - struct mlx5_core_dev *port_mdev); -int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev); - -u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev); #endif /* __MLX5_VPORT_H__ */ diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h index 9c4bedc955..4efc3f56e6 100644 --- a/include/linux/mm-arch-hooks.h +++ b/include/linux/mm-arch-hooks.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic mm no-op hooks. * * Copyright (C) 2015, IBM Corporation * Author: Laurent Dufour + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _LINUX_MM_ARCH_HOOKS_H #define _LINUX_MM_ARCH_HOOKS_H diff --git a/include/linux/mm.h b/include/linux/mm.h index 73a52aba44..8f2773e6ba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MM_H #define _LINUX_MM_H @@ -15,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -24,14 +22,7 @@ #include #include #include -#include #include -#include -#include -#include -#include -#include -#include struct mempolicy; struct anon_vma; @@ -40,13 +31,8 @@ struct file_ra_state; struct user_struct; struct writeback_control; struct bdi_writeback; -struct pt_regs; -extern int sysctl_page_lock_unfairness; - -void init_mm_internals(void); - -#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */ +#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ extern unsigned long max_mapnr; static inline void set_max_mapnr(unsigned long limit) @@ -57,27 +43,7 @@ static inline void set_max_mapnr(unsigned long limit) static inline void set_max_mapnr(unsigned long limit) { } #endif -extern atomic_long_t _totalram_pages; -static inline unsigned long totalram_pages(void) -{ - return (unsigned long)atomic_long_read(&_totalram_pages); -} - -static inline void totalram_pages_inc(void) -{ - atomic_long_inc(&_totalram_pages); -} - -static inline void totalram_pages_dec(void) -{ - atomic_long_dec(&_totalram_pages); -} - -static inline void totalram_pages_add(long count) -{ - atomic_long_add(count, &_totalram_pages); -} - +extern unsigned long totalram_pages; extern void * high_memory; extern int page_cluster; @@ -99,19 +65,9 @@ extern int mmap_rnd_compat_bits __read_mostly; #endif #include +#include #include -/* - * Architectures that support memory tagging (assigning tags to memory regions, - * embedding these tags into addresses that point to these memory regions, and - * checking that the memory and the pointer tags match on memory accesses) - * redefine this macro to strip tags from pointers. - * It's defined as noop for architectures that don't support memory tagging. - */ -#ifndef untagged_addr -#define untagged_addr(addr) (addr) -#endif - #ifndef __pa_symbol #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #endif @@ -120,10 +76,6 @@ extern int mmap_rnd_compat_bits __read_mostly; #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) #endif -#ifndef lm_alias -#define lm_alias(x) __va(__pa_symbol(x)) -#endif - /* * To prevent common memory management code establishing * a zero page mapping on a read fault. @@ -135,53 +87,6 @@ extern int mmap_rnd_compat_bits __read_mostly; #define mm_forbids_zeropage(X) (0) #endif -/* - * On some architectures it is expensive to call memset() for small sizes. - * If an architecture decides to implement their own version of - * mm_zero_struct_page they should wrap the defines below in a #ifndef and - * define their own version of this macro in - */ -#if BITS_PER_LONG == 64 -/* This function must be updated when the size of struct page grows above 80 - * or reduces below 56. The idea that compiler optimizes out switch() - * statement, and only leaves move/store instructions. Also the compiler can - * combine write statements if they are both assignments and can be reordered, - * this can result in several of the writes here being dropped. - */ -#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp) -static inline void __mm_zero_struct_page(struct page *page) -{ - unsigned long *_pp = (void *)page; - - /* Check that struct page is either 56, 64, 72, or 80 bytes */ - BUILD_BUG_ON(sizeof(struct page) & 7); - BUILD_BUG_ON(sizeof(struct page) < 56); - BUILD_BUG_ON(sizeof(struct page) > 80); - - switch (sizeof(struct page)) { - case 80: - _pp[9] = 0; - fallthrough; - case 72: - _pp[8] = 0; - fallthrough; - case 64: - _pp[7] = 0; - fallthrough; - case 56: - _pp[6] = 0; - _pp[5] = 0; - _pp[4] = 0; - _pp[3] = 0; - _pp[2] = 0; - _pp[1] = 0; - _pp[0] = 0; - } -} -#else -#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) -#endif - /* * Default maximum number of active map areas, this limits the number of vmas * per mm struct. Users can overwrite this number by sysctl but there is a @@ -202,6 +107,7 @@ static inline void __mm_zero_struct_page(struct page *page) #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) extern int sysctl_max_map_count; +extern unsigned long sysctl_heap_stack_gap; extern unsigned long sysctl_user_reserve_kbytes; extern unsigned long sysctl_admin_reserve_kbytes; @@ -210,25 +116,12 @@ extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; extern unsigned long sysctl_overcommit_kbytes; -int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, - loff_t *); -int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, - loff_t *); -int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, - loff_t *); -/* - * Any attempt to mark this function as static leads to build failure - * when CONFIG_DEBUG_INFO_BTF is enabled because __add_to_page_cache_locked() - * is referred to by BPF code. This must be visible for error injection. - */ -int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp, void **shadowp); +extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, + size_t *, loff_t *); +extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, + size_t *, loff_t *); -#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) -#else -#define nth_page(page,n) ((page) + (n)) -#endif /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) @@ -236,11 +129,6 @@ int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) -#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) - -void setup_initial_init_mm(void *start_code, void *end_code, - void *end_data, void *brk); - /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way @@ -250,9 +138,7 @@ void setup_initial_init_mm(void *start_code, void *end_code, * mmap() functions). */ -struct vm_area_struct *vm_area_alloc(struct mm_struct *); -struct vm_area_struct *vm_area_dup(struct vm_area_struct *); -void vm_area_free(struct vm_area_struct *); +extern struct kmem_cache *vm_area_cachep; #ifndef CONFIG_MMU extern struct rb_root nommu_region_tree; @@ -281,6 +167,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ +#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ #define VM_LOCKED 0x00002000 @@ -296,9 +183,13 @@ extern unsigned int kobjsize(const void *objp); #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ -#define VM_SYNC 0x00800000 /* Synchronous page faults */ + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) +#define VM_PAGEEXEC 0x00800000 /* vma->vm_page_prot needs special handling */ +#endif + #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ -#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */ +#define VM_ARCH_2 0x02000000 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ #ifdef CONFIG_MEM_SOFT_DIRTY @@ -317,81 +208,45 @@ extern unsigned int kobjsize(const void *objp); #define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ -#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) -#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ -#ifdef CONFIG_ARCH_HAS_PKEYS -# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 -# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ -# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */ -# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 -# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 -#ifdef CONFIG_PPC -# define VM_PKEY_BIT4 VM_HIGH_ARCH_4 -#else -# define VM_PKEY_BIT4 0 -#endif -#endif /* CONFIG_ARCH_HAS_PKEYS */ - #if defined(CONFIG_X86) # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ +#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 +# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ +# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 +# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 +# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 +#endif #elif defined(CONFIG_PPC) # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 +#elif defined(CONFIG_METAG) +# define VM_GROWSUP VM_ARCH_1 #elif defined(CONFIG_IA64) # define VM_GROWSUP VM_ARCH_1 -#elif defined(CONFIG_SPARC64) -# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ -# define VM_ARCH_CLEAR VM_SPARC_ADI -#elif defined(CONFIG_ARM64) -# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */ -# define VM_ARCH_CLEAR VM_ARM64_BTI #elif !defined(CONFIG_MMU) # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ #endif -#if defined(CONFIG_ARM64_MTE) -# define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */ -# define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */ -#else -# define VM_MTE VM_NONE -# define VM_MTE_ALLOWED VM_NONE +#if defined(CONFIG_X86) +/* MPX specific bounds table or bounds directory */ +# define VM_MPX VM_ARCH_2 #endif #ifndef VM_GROWSUP # define VM_GROWSUP VM_NONE #endif -#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR -# define VM_UFFD_MINOR_BIT 37 -# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */ -#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ -# define VM_UFFD_MINOR VM_NONE -#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ - /* Bits set in the VMA until the stack is in its final location */ #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) -#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) - -/* Common data flag combinations */ -#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \ - VM_MAYWRITE | VM_MAYEXEC) -#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ -#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC -#endif - #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS #endif @@ -404,122 +259,36 @@ extern unsigned int kobjsize(const void *objp); #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) -/* VMA basic access permission flags */ -#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) - - /* * Special vmas that are non-mergable, non-mlock()able. + * Note: mm/huge_memory.c VM_NO_THP depends on this definition. */ #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) -/* This mask prevents VMA from being scanned with khugepaged */ -#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) - /* This mask defines which mm->def_flags a process can inherit its parent */ #define VM_INIT_DEF_MASK VM_NOHUGEPAGE /* This mask is used to clear all the VMA flags used by mlock */ #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) -/* Arch-specific flags to clear when updating VM flags on protection change */ -#ifndef VM_ARCH_CLEAR -# define VM_ARCH_CLEAR VM_NONE -#endif -#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) - /* * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. */ extern pgprot_t protection_map[16]; -/** - * enum fault_flag - Fault flag definitions. - * @FAULT_FLAG_WRITE: Fault was a write fault. - * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. - * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. - * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying. - * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region. - * @FAULT_FLAG_TRIED: The fault has been tried once. - * @FAULT_FLAG_USER: The fault originated in userspace. - * @FAULT_FLAG_REMOTE: The fault is not for current task/mm. - * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch. - * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals. - * - * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify - * whether we would allow page faults to retry by specifying these two - * fault flags correctly. Currently there can be three legal combinations: - * - * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and - * this is the first try - * - * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and - * we've already tried at least once - * - * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry - * - * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never - * be used. Note that page faults can be allowed to retry for multiple times, - * in which case we'll have an initial fault with flags (a) then later on - * continuous faults with flags (b). We should always try to detect pending - * signals before a retry to make sure the continuous page faults can still be - * interrupted if necessary. - */ -enum fault_flag { - FAULT_FLAG_WRITE = 1 << 0, - FAULT_FLAG_MKWRITE = 1 << 1, - FAULT_FLAG_ALLOW_RETRY = 1 << 2, - FAULT_FLAG_RETRY_NOWAIT = 1 << 3, - FAULT_FLAG_KILLABLE = 1 << 4, - FAULT_FLAG_TRIED = 1 << 5, - FAULT_FLAG_USER = 1 << 6, - FAULT_FLAG_REMOTE = 1 << 7, - FAULT_FLAG_INSTRUCTION = 1 << 8, - FAULT_FLAG_INTERRUPTIBLE = 1 << 9, -}; +#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ +#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */ +#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */ +#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */ +#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */ +#define FAULT_FLAG_TRIED 0x20 /* Second try */ +#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ +#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ +#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ /* - * The default fault flags that should be used by most of the - * arch-specific page fault handlers. - */ -#define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \ - FAULT_FLAG_KILLABLE | \ - FAULT_FLAG_INTERRUPTIBLE) - -/** - * fault_flag_allow_retry_first - check ALLOW_RETRY the first time - * @flags: Fault flags. - * - * This is mostly used for places where we want to try to avoid taking - * the mmap_lock for too long a time when waiting for another condition - * to change, in which case we can try to be polite to release the - * mmap_lock in the first round to avoid potential starvation of other - * processes that would also want the mmap_lock. - * - * Return: true if the page fault allows retry and this is the first - * attempt of the fault handling; false otherwise. - */ -static inline bool fault_flag_allow_retry_first(enum fault_flag flags) -{ - return (flags & FAULT_FLAG_ALLOW_RETRY) && - (!(flags & FAULT_FLAG_TRIED)); -} - -#define FAULT_FLAG_TRACE \ - { FAULT_FLAG_WRITE, "WRITE" }, \ - { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ - { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ - { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ - { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ - { FAULT_FLAG_TRIED, "TRIED" }, \ - { FAULT_FLAG_USER, "USER" }, \ - { FAULT_FLAG_REMOTE, "REMOTE" }, \ - { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \ - { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } - -/* - * vm_fault is filled by the pagefault handler and passed to the vma's + * vm_fault is filled by the the pagefault handler and passed to the vma's * ->fault function. The vma's ->fault is responsible for returning a bitmask * of VM_FAULT_xxx flags that give details about how the fault was handled. * @@ -529,33 +298,36 @@ static inline bool fault_flag_allow_retry_first(enum fault_flag flags) * pgoff should be used in favour of virtual_address, if possible. */ struct vm_fault { - const struct { - struct vm_area_struct *vma; /* Target VMA */ - gfp_t gfp_mask; /* gfp mask to be used for allocations */ - pgoff_t pgoff; /* Logical page offset based on vma */ - unsigned long address; /* Faulting virtual address */ - }; - enum fault_flag flags; /* FAULT_FLAG_xxx flags - * XXX: should really be 'const' */ - pmd_t *pmd; /* Pointer to pmd entry matching - * the 'address' */ - pud_t *pud; /* Pointer to pud entry matching - * the 'address' - */ - union { - pte_t orig_pte; /* Value of PTE at the time of fault */ - pmd_t orig_pmd; /* Value of PMD at the time of fault, - * used by PMD fault only. - */ - }; + unsigned int flags; /* FAULT_FLAG_xxx flags */ + gfp_t gfp_mask; /* gfp mask to be used for allocations */ + pgoff_t pgoff; /* Logical page offset based on vma */ + void __user *virtual_address; /* Faulting virtual address */ - struct page *cow_page; /* Page handler may use for COW fault */ + struct page *cow_page; /* Handler may choose to COW */ struct page *page; /* ->fault handlers should return a * page here, unless VM_FAULT_NOPAGE * is set (which is also implied by * VM_FAULT_ERROR). */ - /* These three entries are valid only while holding ptl lock */ + void *entry; /* ->fault handler can alternatively + * return locked DAX entry. In that + * case handler should return + * VM_FAULT_DAX_LOCKED and fill in + * entry here. + */ +}; + +/* + * Page fault context: passes though page fault handler instead of endless list + * of function arguments. + */ +struct fault_env { + struct vm_area_struct *vma; /* Target VMA */ + unsigned long address; /* Faulting virtual address */ + unsigned int flags; /* FAULT_FLAG_xxx flags */ + pmd_t *pmd; /* Pointer to pmd entry matching + * the 'address' + */ pte_t *pte; /* Pointer to pte entry matching * the 'address'. NULL if the page * table hasn't been allocated. @@ -565,59 +337,41 @@ struct vm_fault { * is not NULL, otherwise pmd. */ pgtable_t prealloc_pte; /* Pre-allocated pte page table. - * vm_ops->map_pages() sets up a page - * table from atomic context. + * vm_ops->map_pages() calls + * alloc_set_pte() from atomic context. * do_fault_around() pre-allocates * page table to avoid allocation from * atomic context. */ }; -/* page entry size for vm->huge_fault() */ -enum page_entry_size { - PE_SIZE_PTE = 0, - PE_SIZE_PMD, - PE_SIZE_PUD, -}; - /* * These are the virtual MM functions - opening of an area, closing and * unmapping it (needed to keep files on disk up-to-date etc), pointer - * to the functions called when a no-page or a wp-page exception occurs. + * to the functions called when a no-page or a wp-page exception occurs. */ struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); - /* Called any time before splitting to check if it's allowed */ - int (*may_split)(struct vm_area_struct *area, unsigned long addr); - int (*mremap)(struct vm_area_struct *area); - /* - * Called by mprotect() to make driver-specific permission - * checks before mprotect() is finalised. The VMA must not - * be modified. Returns 0 if eprotect() can proceed. - */ - int (*mprotect)(struct vm_area_struct *vma, unsigned long start, - unsigned long end, unsigned long newflags); - vm_fault_t (*fault)(struct vm_fault *vmf); - vm_fault_t (*huge_fault)(struct vm_fault *vmf, - enum page_entry_size pe_size); - vm_fault_t (*map_pages)(struct vm_fault *vmf, + int (*mremap)(struct vm_area_struct * area); + int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); + int (*pmd_fault)(struct vm_area_struct *, unsigned long address, + pmd_t *, unsigned int flags); + void (*map_pages)(struct fault_env *fe, pgoff_t start_pgoff, pgoff_t end_pgoff); - unsigned long (*pagesize)(struct vm_area_struct * area); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ - vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); + int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ - vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); + int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); /* called by access_process_vm when get_user_pages() fails, typically - * for use by special VMAs. See also generic_access_phys() for a generic - * implementation useful for any iomem mapping. + * for use by special VMAs that can switch between memory and hardware */ - int (*access)(struct vm_area_struct *vma, unsigned long addr, - void *buf, int len, int write); + ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr, + void *buf, size_t len, int write); /* Called by the /proc/PID/maps code to ask the vma whether it * has a special name. Returning non-NULL will also cause this @@ -639,7 +393,7 @@ struct vm_operations_struct { * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure * in mm/mempolicy.c will do this automatically. * get_policy() must NOT add a ref if the policy at (vma,addr) is not - * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. + * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. * If no [shared/vma] mempolicy exists at the addr, get_policy() op * must return NULL--i.e., do not "fallback" to task or system default * policy. @@ -655,75 +409,26 @@ struct vm_operations_struct { struct page *(*find_special_page)(struct vm_area_struct *vma, unsigned long addr); }; - -static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) -{ - static const struct vm_operations_struct dummy_vm_ops = {}; - - memset(vma, 0, sizeof(*vma)); - vma->vm_mm = mm; - vma->vm_ops = &dummy_vm_ops; - INIT_LIST_HEAD(&vma->anon_vma_chain); -} - -static inline void vma_set_anonymous(struct vm_area_struct *vma) -{ - vma->vm_ops = NULL; -} - -static inline bool vma_is_anonymous(struct vm_area_struct *vma) -{ - return !vma->vm_ops; -} - -static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) -{ - int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); - - if (!maybe_stack) - return false; - - if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == - VM_STACK_INCOMPLETE_SETUP) - return true; - - return false; -} - -static inline bool vma_is_foreign(struct vm_area_struct *vma) -{ - if (!current->mm) - return true; - - if (current->mm != vma->vm_mm) - return true; - - return false; -} - -static inline bool vma_is_accessible(struct vm_area_struct *vma) -{ - return vma->vm_flags & VM_ACCESS_FLAGS; -} - -#ifdef CONFIG_SHMEM -/* - * The vma_is_shmem is not inline because it is used only by slow - * paths in userfault. - */ -bool vma_is_shmem(struct vm_area_struct *vma); -#else -static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } -#endif - -int vma_is_stack_for_current(struct vm_area_struct *vma); - -/* flush_tlb_range() takes a vma, not a mm, and can care about flags */ -#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } +typedef struct vm_operations_struct __no_const vm_operations_struct_no_const; struct mmu_gather; struct inode; +#define page_private(page) ((page)->private) +#define set_page_private(page, v) ((page)->private = (v)) + +#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) +static inline int pmd_devmap(pmd_t pmd) +{ + return 0; +} +#endif + +/* + * FIXME: take this include out, include page-flags.h in + * files which need it (119 of them) + */ +#include #include /* @@ -780,74 +485,37 @@ unsigned long vmalloc_to_pfn(const void *addr); * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there * is no special casing required. */ - -#ifndef is_ioremap_addr -#define is_ioremap_addr(x) is_vmalloc_addr(x) -#endif - -#ifdef CONFIG_MMU -extern bool is_vmalloc_addr(const void *x); -extern int is_vmalloc_or_module_addr(const void *x); -#else static inline bool is_vmalloc_addr(const void *x) { +#ifdef CONFIG_MMU + unsigned long addr = (unsigned long)x; + + return addr >= VMALLOC_START && addr < VMALLOC_END; +#else return false; +#endif } +#ifdef CONFIG_MMU +extern int is_vmalloc_or_module_addr(const void *x); +#else static inline int is_vmalloc_or_module_addr(const void *x) { return 0; } #endif -extern void *kvmalloc_node(size_t size, gfp_t flags, int node); -static inline void *kvmalloc(size_t size, gfp_t flags) -{ - return kvmalloc_node(size, flags, NUMA_NO_NODE); -} -static inline void *kvzalloc_node(size_t size, gfp_t flags, int node) -{ - return kvmalloc_node(size, flags | __GFP_ZERO, node); -} -static inline void *kvzalloc(size_t size, gfp_t flags) -{ - return kvmalloc(size, flags | __GFP_ZERO); -} - -static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) -{ - size_t bytes; - - if (unlikely(check_mul_overflow(n, size, &bytes))) - return NULL; - - return kvmalloc(bytes, flags); -} - -static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) -{ - return kvmalloc_array(n, size, flags | __GFP_ZERO); -} - -extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, - gfp_t flags); extern void kvfree(const void *addr); -extern void kvfree_sensitive(const void *addr, size_t len); -static inline int head_compound_mapcount(struct page *head) +static inline atomic_t *compound_mapcount_ptr(struct page *page) { - return atomic_read(compound_mapcount_ptr(head)) + 1; + return &page[1].compound_mapcount; } -/* - * Mapcount of compound page as a whole, does not include mapped sub-pages. - * - * Must be called only for compound pages or any their tail sub-pages. - */ static inline int compound_mapcount(struct page *page) { VM_BUG_ON_PAGE(!PageCompound(page), page); page = compound_head(page); - return head_compound_mapcount(page); + return atomic_read(compound_mapcount_ptr(page)) + 1; } /* @@ -862,16 +530,10 @@ static inline void page_mapcount_reset(struct page *page) int __page_mapcount(struct page *page); -/* - * Mapcount of 0-order page; when compound sub-page, includes - * compound_mapcount(). - * - * Result is undefined for pages which cannot be mapped into userspace. - * For example SLAB or special types of pages. See function page_has_type(). - * They use this place in struct page differently. - */ static inline int page_mapcount(struct page *page) { + VM_BUG_ON_PAGE(PageSlab(page), page); + if (unlikely(PageCompound(page))) return __page_mapcount(page); return atomic_read(&page->_mapcount) + 1; @@ -907,7 +569,6 @@ void __put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); -void copy_huge_page(struct page *dst, struct page *src); /* * Compound pages have a destructor function. Provide a @@ -928,7 +589,7 @@ enum compound_dtor_id { #endif NR_COMPOUND_DTORS, }; -extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; +extern compound_page_dtor * const compound_page_dtors[]; static inline void set_compound_page_dtor(struct page *page, enum compound_dtor_id compound_dtor) @@ -937,10 +598,10 @@ static inline void set_compound_page_dtor(struct page *page, page[1].compound_dtor = compound_dtor; } -static inline void destroy_compound_page(struct page *page) +static inline compound_page_dtor *get_compound_page_dtor(struct page *page) { VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); - compound_page_dtors[page[1].compound_dtor](page); + return compound_page_dtors[page[1].compound_dtor]; } static inline unsigned int compound_order(struct page *page) @@ -950,53 +611,9 @@ static inline unsigned int compound_order(struct page *page) return page[1].compound_order; } -static inline bool hpage_pincount_available(struct page *page) -{ - /* - * Can the page->hpage_pinned_refcount field be used? That field is in - * the 3rd page of the compound page, so the smallest (2-page) compound - * pages cannot support it. - */ - page = compound_head(page); - return PageCompound(page) && compound_order(page) > 1; -} - -static inline int head_compound_pincount(struct page *head) -{ - return atomic_read(compound_pincount_ptr(head)); -} - -static inline int compound_pincount(struct page *page) -{ - VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); - page = compound_head(page); - return head_compound_pincount(page); -} - static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; - page[1].compound_nr = 1U << order; -} - -/* Returns the number of pages in this potentially compound page. */ -static inline unsigned long compound_nr(struct page *page) -{ - if (!PageHead(page)) - return 1; - return page[1].compound_nr; -} - -/* Returns the number of bytes in this potentially compound page. */ -static inline unsigned long page_size(struct page *page) -{ - return PAGE_SIZE << compound_order(page); -} - -/* Returns the number of bits needed for the number of bytes in a page */ -static inline unsigned int page_shift(struct page *page) -{ - return PAGE_SHIFT + compound_order(page); } void free_compound_page(struct page *page); @@ -1015,11 +632,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); -void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr); - -vm_fault_t finish_fault(struct vm_fault *vmf); -vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); +int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, + struct page *page); #endif /* @@ -1069,7 +683,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); * refcount. The each user mapping also has a reference to the page. * * The pagecache pages are stored in a per-mapping radix tree, which is - * rooted at mapping->i_pages, and indexed by offset. + * rooted at mapping->page_tree, and indexed by offset. * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space * lists, we instead now tag pages as dirty/writeback in the radix tree. * @@ -1092,7 +706,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) -#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) /* * Define the bit shifts to access each section. For non-existent @@ -1103,7 +716,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) -#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ #ifdef NODE_NOT_IN_PAGE_FLAGS @@ -1118,91 +730,41 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) +#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS +#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS +#endif + #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) #define NODES_MASK ((1UL << NODES_WIDTH) - 1) #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) -#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) static inline enum zone_type page_zonenum(const struct page *page) { - ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; } #ifdef CONFIG_ZONE_DEVICE +void get_zone_device_page(struct page *page); +void put_zone_device_page(struct page *page); static inline bool is_zone_device_page(const struct page *page) { return page_zonenum(page) == ZONE_DEVICE; } -extern void memmap_init_zone_device(struct zone *, unsigned long, - unsigned long, struct dev_pagemap *); #else +static inline void get_zone_device_page(struct page *page) +{ +} +static inline void put_zone_device_page(struct page *page) +{ +} static inline bool is_zone_device_page(const struct page *page) { return false; } #endif -static inline bool is_zone_movable_page(const struct page *page) -{ - return page_zonenum(page) == ZONE_MOVABLE; -} - -#ifdef CONFIG_DEV_PAGEMAP_OPS -void free_devmap_managed_page(struct page *page); -DECLARE_STATIC_KEY_FALSE(devmap_managed_key); - -static inline bool page_is_devmap_managed(struct page *page) -{ - if (!static_branch_unlikely(&devmap_managed_key)) - return false; - if (!is_zone_device_page(page)) - return false; - switch (page->pgmap->type) { - case MEMORY_DEVICE_PRIVATE: - case MEMORY_DEVICE_FS_DAX: - return true; - default: - break; - } - return false; -} - -void put_devmap_managed_page(struct page *page); - -#else /* CONFIG_DEV_PAGEMAP_OPS */ -static inline bool page_is_devmap_managed(struct page *page) -{ - return false; -} - -static inline void put_devmap_managed_page(struct page *page) -{ -} -#endif /* CONFIG_DEV_PAGEMAP_OPS */ - -static inline bool is_device_private_page(const struct page *page) -{ - return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && - IS_ENABLED(CONFIG_DEVICE_PRIVATE) && - is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PRIVATE; -} - -static inline bool is_pci_p2pdma_page(const struct page *page) -{ - return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && - IS_ENABLED(CONFIG_PCI_P2PDMA) && - is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; -} - -/* 127: arbitrary random number, small enough to assemble well */ -#define page_ref_zero_or_close_to_overflow(page) \ - ((unsigned int) page_ref_count(page) + 127u <= 127u) - static inline void get_page(struct page *page) { page = compound_head(page); @@ -1210,145 +772,22 @@ static inline void get_page(struct page *page) * Getting a normal page or the head of a compound page * requires to already have an elevated page->_refcount. */ - VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page); + VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); page_ref_inc(page); -} -bool __must_check try_grab_page(struct page *page, unsigned int flags); -struct page *try_grab_compound_head(struct page *page, int refs, - unsigned int flags); - - -static inline __must_check bool try_get_page(struct page *page) -{ - page = compound_head(page); - if (WARN_ON_ONCE(page_ref_count(page) <= 0)) - return false; - page_ref_inc(page); - return true; + if (unlikely(is_zone_device_page(page))) + get_zone_device_page(page); } static inline void put_page(struct page *page) { page = compound_head(page); - /* - * For devmap managed pages we need to catch refcount transition from - * 2 to 1, when refcount reach one it means the page is free and we - * need to inform the device driver through callback. See - * include/linux/memremap.h and HMM for details. - */ - if (page_is_devmap_managed(page)) { - put_devmap_managed_page(page); - return; - } - if (put_page_testzero(page)) __put_page(page); -} -/* - * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload - * the page's refcount so that two separate items are tracked: the original page - * reference count, and also a new count of how many pin_user_pages() calls were - * made against the page. ("gup-pinned" is another term for the latter). - * - * With this scheme, pin_user_pages() becomes special: such pages are marked as - * distinct from normal pages. As such, the unpin_user_page() call (and its - * variants) must be used in order to release gup-pinned pages. - * - * Choice of value: - * - * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference - * counts with respect to pin_user_pages() and unpin_user_page() becomes - * simpler, due to the fact that adding an even power of two to the page - * refcount has the effect of using only the upper N bits, for the code that - * counts up using the bias value. This means that the lower bits are left for - * the exclusive use of the original code that increments and decrements by one - * (or at least, by much smaller values than the bias value). - * - * Of course, once the lower bits overflow into the upper bits (and this is - * OK, because subtraction recovers the original values), then visual inspection - * no longer suffices to directly view the separate counts. However, for normal - * applications that don't have huge page reference counts, this won't be an - * issue. - * - * Locking: the lockless algorithm described in page_cache_get_speculative() - * and page_cache_gup_pin_speculative() provides safe operation for - * get_user_pages and page_mkclean and other calls that race to set up page - * table entries. - */ -#define GUP_PIN_COUNTING_BIAS (1U << 10) - -void unpin_user_page(struct page *page); -void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, - bool make_dirty); -void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, - bool make_dirty); -void unpin_user_pages(struct page **pages, unsigned long npages); - -/** - * page_maybe_dma_pinned - Report if a page is pinned for DMA. - * @page: The page. - * - * This function checks if a page has been pinned via a call to - * a function in the pin_user_pages() family. - * - * For non-huge pages, the return value is partially fuzzy: false is not fuzzy, - * because it means "definitely not pinned for DMA", but true means "probably - * pinned for DMA, but possibly a false positive due to having at least - * GUP_PIN_COUNTING_BIAS worth of normal page references". - * - * False positives are OK, because: a) it's unlikely for a page to get that many - * refcounts, and b) all the callers of this routine are expected to be able to - * deal gracefully with a false positive. - * - * For huge pages, the result will be exactly correct. That's because we have - * more tracking data available: the 3rd struct page in the compound page is - * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS - * scheme). - * - * For more information, please see Documentation/core-api/pin_user_pages.rst. - * - * Return: True, if it is likely that the page has been "dma-pinned". - * False, if the page is definitely not dma-pinned. - */ -static inline bool page_maybe_dma_pinned(struct page *page) -{ - if (hpage_pincount_available(page)) - return compound_pincount(page) > 0; - - /* - * page_ref_count() is signed. If that refcount overflows, then - * page_ref_count() returns a negative value, and callers will avoid - * further incrementing the refcount. - * - * Here, for that overflow case, use the signed bit to count a little - * bit higher via unsigned math, and thus still get an accurate result. - */ - return ((unsigned int)page_ref_count(compound_head(page))) >= - GUP_PIN_COUNTING_BIAS; -} - -static inline bool is_cow_mapping(vm_flags_t flags) -{ - return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; -} - -/* - * This should most likely only be called during fork() to see whether we - * should break the cow immediately for a page on the src mm. - */ -static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, - struct page *page) -{ - if (!is_cow_mapping(vma->vm_flags)) - return false; - - if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) - return false; - - return page_maybe_dma_pinned(page); + if (unlikely(is_zone_device_page(page))) + put_zone_device_page(page); } #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) @@ -1368,14 +807,21 @@ static inline int page_zone_id(struct page *page) return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; } +static inline int zone_to_nid(struct zone *zone) +{ +#ifdef CONFIG_NUMA + return zone->node; +#else + return 0; +#endif +} + #ifdef NODE_NOT_IN_PAGE_FLAGS extern int page_to_nid(const struct page *page); #else static inline int page_to_nid(const struct page *page) { - struct page *p = (struct page *)page; - - return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; + return (page->flags >> NODES_PGSHIFT) & NODES_MASK; } #endif @@ -1476,7 +922,7 @@ static inline int cpu_pid_to_cpupid(int nid, int pid) static inline bool cpupid_pid_unset(int cpupid) { - return true; + return 1; } static inline void page_cpupid_reset_last(struct page *page) @@ -1489,53 +935,6 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) } #endif /* CONFIG_NUMA_BALANCING */ -#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) - -/* - * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid - * setting tags for all pages to native kernel tag value 0xff, as the default - * value 0x00 maps to 0xff. - */ - -static inline u8 page_kasan_tag(const struct page *page) -{ - u8 tag = 0xff; - - if (kasan_enabled()) { - tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; - tag ^= 0xff; - } - - return tag; -} - -static inline void page_kasan_tag_set(struct page *page, u8 tag) -{ - if (kasan_enabled()) { - tag ^= 0xff; - page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); - page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; - } -} - -static inline void page_kasan_tag_reset(struct page *page) -{ - if (kasan_enabled()) - page_kasan_tag_set(page, 0xff); -} - -#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ - -static inline u8 page_kasan_tag(const struct page *page) -{ - return 0xff; -} - -static inline void page_kasan_tag_set(struct page *page, u8 tag) { } -static inline void page_kasan_tag_reset(struct page *page) { } - -#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ - static inline struct zone *page_zone(const struct page *page) { return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; @@ -1559,20 +958,6 @@ static inline unsigned long page_to_section(const struct page *page) } #endif -/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ -#ifdef CONFIG_MIGRATION -static inline bool is_pinnable_page(struct page *page) -{ - return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) || - is_zero_pfn(page_to_pfn(page)); -} -#else -static inline bool is_pinnable_page(struct page *page) -{ - return true; -} -#endif - static inline void set_page_zone(struct page *page, enum zone_type zone) { page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); @@ -1595,6 +980,28 @@ static inline void set_page_links(struct page *page, enum zone_type zone, #endif } +#ifdef CONFIG_MEMCG +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return page->mem_cgroup; +} +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return READ_ONCE(page->mem_cgroup); +} +#else +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return NULL; +} +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return NULL; +} +#endif + /* * Some inline functions in vmstat.h depend on page_zone() */ @@ -1669,14 +1076,13 @@ struct address_space *page_mapping(struct page *page); * ALLOC_NO_WATERMARKS and the low watermark was not * met implying that the system is under some pressure. */ -static inline bool page_is_pfmemalloc(const struct page *page) +static inline bool page_is_pfmemalloc(struct page *page) { /* - * lru.next has bit 1 set if the page is allocated from the - * pfmemalloc reserves. Callers may simply overwrite it if - * they do not need to preserve that information. + * Page index cannot be this large so this must be + * a pfmemalloc page. */ - return (uintptr_t)page->lru.next & BIT(1); + return page->index == -1UL; } /* @@ -1685,21 +1091,50 @@ static inline bool page_is_pfmemalloc(const struct page *page) */ static inline void set_page_pfmemalloc(struct page *page) { - page->lru.next = (void *)BIT(1); + page->index = -1UL; } static inline void clear_page_pfmemalloc(struct page *page) { - page->lru.next = NULL; + page->index = 0; } +/* + * Different kinds of faults, as returned by handle_mm_fault(). + * Used to decide whether a process gets delivered SIGBUS or + * just gets major/minor fault counters bumped up. + */ + +#define VM_FAULT_OOM 0x0001 +#define VM_FAULT_SIGBUS 0x0002 +#define VM_FAULT_MAJOR 0x0004 +#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ +#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ +#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ +#define VM_FAULT_SIGSEGV 0x0040 + +#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ +#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ +#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ +#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ +#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ + +#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ + +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ + VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ + VM_FAULT_FALLBACK) + +/* Encode hstate index for a hwpoisoned large page */ +#define VM_FAULT_SET_HINDEX(x) ((x) << 12) +#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) + /* * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. */ extern void pagefault_out_of_memory(void); #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) -#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) /* * Flags passed to show_mem() and show_free_areas() to suppress output in @@ -1707,15 +1142,22 @@ extern void pagefault_out_of_memory(void); */ #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ -extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); +extern void show_free_areas(unsigned int flags); +extern bool skip_free_areas_node(unsigned int flags, int nid); -#ifdef CONFIG_MMU -extern bool can_do_mlock(void); +int shmem_zero_setup(struct vm_area_struct *); +#ifdef CONFIG_SHMEM +bool shmem_mapping(struct address_space *mapping); #else -static inline bool can_do_mlock(void) { return false; } +static inline bool shmem_mapping(struct address_space *mapping) +{ + return false; +} #endif -extern int user_shm_lock(size_t, struct ucounts *); -extern void user_shm_unlock(size_t, struct ucounts *); + +extern bool can_do_mlock(void); +extern int user_shm_lock(size_t, struct user_struct *); +extern void user_shm_unlock(size_t, struct user_struct *); /* * Parameter block passed down to zap_pte_range in exceptional cases. @@ -1724,38 +1166,80 @@ struct zap_details { struct address_space *check_mapping; /* Check page->mapping if set */ pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */ - struct page *single_page; /* Locked page to be unmapped */ + bool ignore_dirty; /* Ignore dirty pages */ + bool check_swap_entries; /* Check also swap entries */ }; struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte); + pte_t pte); struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); -void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, - unsigned long size); +int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, + unsigned long size); void zap_page_range(struct vm_area_struct *vma, unsigned long address, - unsigned long size); + unsigned long size, struct zap_details *); void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long start, unsigned long end); -struct mmu_notifier_range; +/** + * mm_walk - callbacks for walk_page_range + * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry + * this handler is required to be able to handle + * pmd_trans_huge() pmds. They may simply choose to + * split_huge_page() instead of handling it explicitly. + * @pte_entry: if set, called for each non-empty PTE (4th-level) entry + * @pte_hole: if set, called for each hole at all levels + * @hugetlb_entry: if set, called for each hugetlb entry + * @test_walk: caller specific callback function to determine whether + * we walk over the current vma or not. Returning 0 + * value means "do page table walk over the current vma," + * and a negative one means "abort current page table walk + * right now." 1 means "skip the current vma." + * @mm: mm_struct representing the target process of page table walk + * @vma: vma currently walked (NULL if walking outside vmas) + * @private: private data for callbacks' usage + * + * (see the comment on walk_page_range() for more details) + */ +struct mm_walk { + int (*pmd_entry)(pmd_t *pmd, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*pte_entry)(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*pte_hole)(unsigned long addr, unsigned long next, + struct mm_walk *walk); + int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, + unsigned long addr, unsigned long next, + struct mm_walk *walk); + int (*test_walk)(unsigned long addr, unsigned long next, + struct mm_walk *walk); + struct mm_struct *mm; + struct vm_area_struct *vma; + void *private; +}; +int walk_page_range(unsigned long addr, unsigned long end, + struct mm_walk *walk); +int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk); void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); -int -copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); -int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, - struct mmu_notifier_range *range, pte_t **ptepp, - pmd_t **pmdpp, spinlock_t **ptlp); -int follow_pte(struct mm_struct *mm, unsigned long address, - pte_t **ptepp, spinlock_t **ptlp); +int copy_page_range(struct mm_struct *dst, struct mm_struct *src, + struct vm_area_struct *vma); +void unmap_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen, int even_cows); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys); -int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, - void *buf, int len, int write); +ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + void *buf, size_t len, int write); + +static inline void unmap_shared_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen) +{ + unmap_mapping_range(mapping, holebegin, holelen, 0); +} extern void truncate_pagecache(struct inode *inode, loff_t new); extern void truncate_setsize(struct inode *inode, loff_t newsize); @@ -1766,135 +1250,133 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page); int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU -extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, - unsigned long address, unsigned int flags, - struct pt_regs *regs); -extern int fixup_user_fault(struct mm_struct *mm, +extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags); +extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); -void unmap_mapping_page(struct page *page); -void unmap_mapping_pages(struct address_space *mapping, - pgoff_t start, pgoff_t nr, bool even_cows); -void unmap_mapping_range(struct address_space *mapping, - loff_t const holebegin, loff_t const holelen, int even_cows); #else -static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, - unsigned long address, unsigned int flags, - struct pt_regs *regs) +static inline int handle_mm_fault(struct vm_area_struct *vma, + unsigned long address, unsigned int flags) { /* should never happen if there's no MMU */ BUG(); return VM_FAULT_SIGBUS; } -static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, +static inline int fixup_user_fault(struct task_struct *tsk, + struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { /* should never happen if there's no MMU */ BUG(); return -EFAULT; } -static inline void unmap_mapping_page(struct page *page) { } -static inline void unmap_mapping_pages(struct address_space *mapping, - pgoff_t start, pgoff_t nr, bool even_cows) { } -static inline void unmap_mapping_range(struct address_space *mapping, - loff_t const holebegin, loff_t const holelen, int even_cows) { } #endif -static inline void unmap_shared_mapping_range(struct address_space *mapping, - loff_t const holebegin, loff_t const holelen) -{ - unmap_mapping_range(mapping, holebegin, holelen, 0); -} +extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, + unsigned int gup_flags); +extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr, + void *buf, size_t len, unsigned int gup_flags); +extern ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, + unsigned long addr, void *buf, size_t len, unsigned int gup_flags); -extern int access_process_vm(struct task_struct *tsk, unsigned long addr, - void *buf, int len, unsigned int gup_flags); -extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, - void *buf, int len, unsigned int gup_flags); -extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr, - void *buf, int len, unsigned int gup_flags); - -long get_user_pages_remote(struct mm_struct *mm, +long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked); -long pin_user_pages_remote(struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked); + struct vm_area_struct **vmas); long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); -long pin_user_pages(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas); long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); -long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, int *locked); +long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, + struct page **pages, unsigned int gup_flags); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); -long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, - struct page **pages, unsigned int gup_flags); +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); -int get_user_pages_fast(unsigned long start, int nr_pages, - unsigned int gup_flags, struct page **pages); -int pin_user_pages_fast(unsigned long start, int nr_pages, - unsigned int gup_flags, struct page **pages); +/* Container for pinned pfns / pages */ +struct frame_vector { + unsigned int nr_allocated; /* Number of frames we have space for */ + unsigned int nr_frames; /* Number of frames stored in ptrs array */ + bool got_ref; /* Did we pin pages by getting page ref? */ + bool is_pfns; /* Does array contain pages or pfns? */ + void *ptrs[0]; /* Array of pinned pfns / pages. Use + * pfns_vector_pages() or pfns_vector_pfns() + * for access */ +}; -int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); -int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, - struct task_struct *task, bool bypass_rlim); +struct frame_vector *frame_vector_create(unsigned int nr_frames); +void frame_vector_destroy(struct frame_vector *vec); +int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, + unsigned int gup_flags, struct frame_vector *vec); +void put_vaddr_frames(struct frame_vector *vec); +int frame_vector_to_pages(struct frame_vector *vec); +void frame_vector_to_pfns(struct frame_vector *vec); + +static inline unsigned int frame_vector_count(struct frame_vector *vec) +{ + return vec->nr_frames; +} + +static inline struct page **frame_vector_pages(struct frame_vector *vec) +{ + if (vec->is_pfns) { + int err = frame_vector_to_pages(vec); + + if (err) + return ERR_PTR(err); + } + return (struct page **)(vec->ptrs); +} + +static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) +{ + if (!vec->is_pfns) + frame_vector_to_pfns(vec); + return (unsigned long *)(vec->ptrs); +} struct kvec; int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, struct page **pages); +int get_kernel_page(unsigned long start, int write, struct page **pages); struct page *get_dump_page(unsigned long addr); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned int offset, unsigned int length); +int __set_page_dirty_nobuffers(struct page *page); +int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); +void account_page_dirtied(struct page *page, struct address_space *mapping); void account_page_cleaned(struct page *page, struct address_space *mapping, struct bdi_writeback *wb); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); -void __cancel_dirty_page(struct page *page); -static inline void cancel_dirty_page(struct page *page) -{ - /* Avoid atomic ops, locking, etc. when not actually needed. */ - if (PageDirty(page)) - __cancel_dirty_page(page); -} +void cancel_dirty_page(struct page *page); int clear_page_dirty_for_io(struct page *page); int get_cmdline(struct task_struct *task, char *buffer, int buflen); +static inline bool vma_is_anonymous(struct vm_area_struct *vma) +{ + return !vma->vm_ops; +} + +int vma_is_stack_for_current(struct vm_area_struct *vma); + extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, bool need_rmap_locks); - -/* - * Flags used by change_protection(). For now we make it a bitmap so - * that we can pass in multiple flags just like parameters. However - * for now all the callers are only use one of the flags at the same - * time. - */ -/* Whether we should allow dirty bit accounting */ -#define MM_CP_DIRTY_ACCT (1UL << 0) -/* Whether this protection change is for NUMA hints */ -#define MM_CP_PROT_NUMA (1UL << 1) -/* Whether this change is for write protecting */ -#define MM_CP_UFFD_WP (1UL << 2) /* do wp */ -#define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */ -#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \ - MM_CP_UFFD_WP_RESOLVE) - extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, - unsigned long cp_flags); + int dirty_accountable, int prot_numa); extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); @@ -1902,16 +1384,8 @@ extern int mprotect_fixup(struct vm_area_struct *vma, /* * doesn't attempt to fault and will return short. */ -int get_user_pages_fast_only(unsigned long start, int nr_pages, - unsigned int gup_flags, struct page **pages); -int pin_user_pages_fast_only(unsigned long start, int nr_pages, - unsigned int gup_flags, struct page **pages); - -static inline bool get_user_page_fast_only(unsigned long addr, - unsigned int gup_flags, struct page **pagep) -{ - return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1; -} +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); /* * per-process(per-mm_struct) statistics. */ @@ -1930,27 +1404,19 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) return (unsigned long)val; } -void mm_trace_rss_stat(struct mm_struct *mm, int member, long count); - static inline void add_mm_counter(struct mm_struct *mm, int member, long value) { - long count = atomic_long_add_return(value, &mm->rss_stat.count[member]); - - mm_trace_rss_stat(mm, member, count); + atomic_long_add(value, &mm->rss_stat.count[member]); } static inline void inc_mm_counter(struct mm_struct *mm, int member) { - long count = atomic_long_inc_return(&mm->rss_stat.count[member]); - - mm_trace_rss_stat(mm, member, count); + atomic_long_inc(&mm->rss_stat.count[member]); } static inline void dec_mm_counter(struct mm_struct *mm, int member) { - long count = atomic_long_dec_return(&mm->rss_stat.count[member]); - - mm_trace_rss_stat(mm, member, count); + atomic_long_dec(&mm->rss_stat.count[member]); } /* Optimized variant when page is already known not to be PageAnon */ @@ -2021,19 +1487,7 @@ static inline void sync_mm_rss(struct mm_struct *mm) } #endif -#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL -static inline int pte_special(pte_t pte) -{ - return 0; -} - -static inline pte_t pte_mkspecial(pte_t pte) -{ - return pte; -} -#endif - -#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP +#ifndef __HAVE_ARCH_PTE_DEVMAP static inline int pte_devmap(pte_t pte) { return 0; @@ -2052,41 +1506,21 @@ static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, return ptep; } -#ifdef __PAGETABLE_P4D_FOLDED -static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, +#ifdef __PAGETABLE_PUD_FOLDED +static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, + unsigned long address) +{ + return 0; +} + +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { return 0; } #else -int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); -#endif - -#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU) -static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, - unsigned long address) -{ - return 0; -} -static inline void mm_inc_nr_puds(struct mm_struct *mm) {} -static inline void mm_dec_nr_puds(struct mm_struct *mm) {} - -#else -int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); - -static inline void mm_inc_nr_puds(struct mm_struct *mm) -{ - if (mm_pud_folded(mm)) - return; - atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); -} - -static inline void mm_dec_nr_puds(struct mm_struct *mm) -{ - if (mm_pud_folded(mm)) - return; - atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); -} +int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address); #endif #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) @@ -2096,76 +1530,65 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, return 0; } +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, + unsigned long address) +{ + return 0; +} + +static inline void mm_nr_pmds_init(struct mm_struct *mm) {} + +static inline unsigned long mm_nr_pmds(struct mm_struct *mm) +{ + return 0; +} + static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} #else int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address); + +static inline void mm_nr_pmds_init(struct mm_struct *mm) +{ + atomic_long_set(&mm->nr_pmds, 0); +} + +static inline unsigned long mm_nr_pmds(struct mm_struct *mm) +{ + return atomic_long_read(&mm->nr_pmds); +} static inline void mm_inc_nr_pmds(struct mm_struct *mm) { - if (mm_pmd_folded(mm)) - return; - atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); + atomic_long_inc(&mm->nr_pmds); } static inline void mm_dec_nr_pmds(struct mm_struct *mm) { - if (mm_pmd_folded(mm)) - return; - atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); + atomic_long_dec(&mm->nr_pmds); } #endif -#ifdef CONFIG_MMU -static inline void mm_pgtables_bytes_init(struct mm_struct *mm) +int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); +int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); + +/* + * The following ifdef needed to get the 4level-fixup.h header to work. + * Remove it when 4level-fixup.h has been removed. + */ +#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) +static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { - atomic_long_set(&mm->pgtables_bytes, 0); + return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? + NULL: pud_offset(pgd, address); } -static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { - return atomic_long_read(&mm->pgtables_bytes); -} - -static inline void mm_inc_nr_ptes(struct mm_struct *mm) -{ - atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); -} - -static inline void mm_dec_nr_ptes(struct mm_struct *mm) -{ - atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); -} -#else - -static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {} -static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) -{ - return 0; -} - -static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} -static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} -#endif - -int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); -int __pte_alloc_kernel(pmd_t *pmd); - -#if defined(CONFIG_MMU) - -static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, - unsigned long address) -{ - return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? - NULL : p4d_offset(pgd, address); -} - -static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, - unsigned long address) -{ - return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? - NULL : pud_offset(p4d, address); + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))? + NULL: pud_offset(pgd, address); } static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) @@ -2173,7 +1596,13 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? NULL: pmd_offset(pud, address); } -#endif /* CONFIG_MMU */ + +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address) +{ + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))? + NULL: pmd_offset(pud, address); +} +#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ #if USE_SPLIT_PTE_PTLOCKS #if ALLOC_SPLIT_PTLOCKS @@ -2226,6 +1655,13 @@ static inline bool ptlock_init(struct page *page) return true; } +/* Reset page->mapping so free_pages_check won't complain. */ +static inline void pte_lock_deinit(struct page *page) +{ + page->mapping = NULL; + ptlock_free(page); +} + #else /* !USE_SPLIT_PTE_PTLOCKS */ /* * We use mm->page_table_lock to guard all pagetable pages of the mm. @@ -2236,7 +1672,7 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) } static inline void ptlock_cache_init(void) {} static inline bool ptlock_init(struct page *page) { return true; } -static inline void ptlock_free(struct page *page) {} +static inline void pte_lock_deinit(struct page *page) {} #endif /* USE_SPLIT_PTE_PTLOCKS */ static inline void pgtable_init(void) @@ -2245,20 +1681,18 @@ static inline void pgtable_init(void) pgtable_cache_init(); } -static inline bool pgtable_pte_page_ctor(struct page *page) +static inline bool pgtable_page_ctor(struct page *page) { if (!ptlock_init(page)) return false; - __SetPageTable(page); - inc_lruvec_page_state(page, NR_PAGETABLE); + inc_zone_page_state(page, NR_PAGETABLE); return true; } -static inline void pgtable_pte_page_dtor(struct page *page) +static inline void pgtable_page_dtor(struct page *page) { - ptlock_free(page); - __ClearPageTable(page); - dec_lruvec_page_state(page, NR_PAGETABLE); + pte_lock_deinit(page); + dec_zone_page_state(page, NR_PAGETABLE); } #define pte_offset_map_lock(mm, pmd, address, ptlp) \ @@ -2275,17 +1709,18 @@ static inline void pgtable_pte_page_dtor(struct page *page) pte_unmap(pte); \ } while (0) -#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) +#define pte_alloc(mm, pmd, address) \ + (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) #define pte_alloc_map(mm, pmd, address) \ - (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) + (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ - (pte_alloc(mm, pmd) ? \ + (pte_alloc(mm, pmd, address) ? \ NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) #define pte_alloc_kernel(pmd, address) \ - ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ + ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ NULL: pte_offset_kernel(pmd, address)) #if USE_SPLIT_PMD_PTLOCKS @@ -2301,7 +1736,7 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) return ptlock_ptr(pmd_to_page(pmd)); } -static inline bool pmd_ptlock_init(struct page *page) +static inline bool pgtable_pmd_page_ctor(struct page *page) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE page->pmd_huge_pte = NULL; @@ -2309,7 +1744,7 @@ static inline bool pmd_ptlock_init(struct page *page) return ptlock_init(page); } -static inline void pmd_ptlock_free(struct page *page) +static inline void pgtable_pmd_page_dtor(struct page *page) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE VM_BUG_ON_PAGE(page->pmd_huge_pte, page); @@ -2326,8 +1761,8 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) return &mm->page_table_lock; } -static inline bool pmd_ptlock_init(struct page *page) { return true; } -static inline void pmd_ptlock_free(struct page *page) {} +static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } +static inline void pgtable_pmd_page_dtor(struct page *page) {} #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) @@ -2340,43 +1775,9 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) return ptl; } -static inline bool pgtable_pmd_page_ctor(struct page *page) -{ - if (!pmd_ptlock_init(page)) - return false; - __SetPageTable(page); - inc_lruvec_page_state(page, NR_PAGETABLE); - return true; -} - -static inline void pgtable_pmd_page_dtor(struct page *page) -{ - pmd_ptlock_free(page); - __ClearPageTable(page); - dec_lruvec_page_state(page, NR_PAGETABLE); -} - -/* - * No scalability reason to split PUD locks yet, but follow the same pattern - * as the PMD locks to make it easier if we decide to. The VM should not be - * considered ready to switch to split PUD locks yet; there may be places - * which need to be converted from page_table_lock. - */ -static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) -{ - return &mm->page_table_lock; -} - -static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) -{ - spinlock_t *ptl = pud_lockptr(mm, pud); - - spin_lock(ptl); - return ptl; -} - -extern void __init pagecache_init(void); -extern void __init free_area_init_memoryless_node(int nid); +extern void free_area_init(unsigned long * zones_size); +extern void free_area_init_node(int nid, unsigned long * zones_size, + unsigned long zone_start_pfn, unsigned long *zholes_size); extern void free_initmem(void); /* @@ -2386,22 +1787,34 @@ extern void free_initmem(void); * Return pages freed into the buddy system. */ extern unsigned long free_reserved_area(void *start, void *end, - int poison, const char *s); + int poison, char *s); + +#ifdef CONFIG_HIGHMEM +/* + * Free a highmem page into the buddy system, adjusting totalhigh_pages + * and totalram_pages. + */ +extern void free_highmem_page(struct page *page); +#endif extern void adjust_managed_page_count(struct page *page, long count); -extern void mem_init_print_info(void); +extern void mem_init_print_info(const char *str); extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); /* Free the reserved page into the buddy system, so it gets managed. */ -static inline void free_reserved_page(struct page *page) +static inline void __free_reserved_page(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); +} + +static inline void free_reserved_page(struct page *page) +{ + __free_reserved_page(page); adjust_managed_page_count(page, 1); } -#define free_highmem_page(page) free_reserved_page(page) static inline void mark_page_reserved(struct page *page) { @@ -2420,7 +1833,7 @@ static inline unsigned long free_initmem_default(int poison) extern char __init_begin[], __init_end[]; return free_reserved_area(&__init_begin, &__init_end, - poison, "unused kernel image (initmem)"); + poison, "unused kernel"); } static inline unsigned long get_num_physpages(void) @@ -2434,23 +1847,34 @@ static inline unsigned long get_num_physpages(void) return phys_pages; } +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP /* - * Using memblock node mappings, an architecture may initialise its - * zones, allocate the backing mem_map and account for memory holes in an - * architecture independent manner. + * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its + * zones, allocate the backing mem_map and account for memory holes in a more + * architecture independent manner. This is a substitute for creating the + * zone_sizes[] and zholes_size[] arrays and passing them to + * free_area_init_node() * * An architecture is expected to register range of page frames backed by * physical memory with memblock_add[_node]() before calling - * free_area_init() passing in the PFN each zone ends at. At a basic + * free_area_init_nodes() passing in the PFN each zone ends at. At a basic * usage, an architecture is expected to do something like * * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, * max_highmem_pfn}; * for_each_valid_physical_page_range() * memblock_add_node(base, size, nid) - * free_area_init(max_zone_pfns); + * free_area_init_nodes(max_zone_pfns); + * + * free_bootmem_with_active_regions() calls free_bootmem_node() for each + * registered physical page range. Similarly + * sparse_memory_present_with_active_regions() calls memory_present() for + * each range when SPARSEMEM is enabled. + * + * See mm/page_alloc.c for more information on each function exposed by + * CONFIG_HAVE_MEMBLOCK_NODE_MAP. */ -void free_area_init(unsigned long *max_zone_pfn); +extern void free_area_init_nodes(unsigned long *max_zone_pfn); unsigned long node_map_pfn_alignment(void); unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, unsigned long end_pfn); @@ -2459,26 +1883,35 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn, extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); +extern void free_bootmem_with_active_regions(int nid, + unsigned long max_low_pfn); +extern void sparse_memory_present_with_active_regions(int nid); -#ifndef CONFIG_NUMA -static inline int early_pfn_to_nid(unsigned long pfn) +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ + !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) +static inline int __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state) { return 0; } #else /* please see mm/page_alloc.c */ extern int __meminit early_pfn_to_nid(unsigned long pfn); +/* there is a per-arch backend function. */ +extern int __meminit __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); -extern void memmap_init_range(unsigned long, int, unsigned long, - unsigned long, unsigned long, enum meminit_context, - struct vmem_altmap *, int migratetype); +extern void memmap_init_zone(unsigned long, int, unsigned long, + unsigned long, enum memmap_context); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); extern void __init mmap_init(void); -extern void show_mem(unsigned int flags, nodemask_t *nodemask); +extern void show_mem(unsigned int flags); extern long si_mem_available(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); @@ -2486,16 +1919,17 @@ extern void si_meminfo_node(struct sysinfo *val, int nid); extern unsigned long arch_reserved_kernel_pages(void); #endif -extern __printf(3, 4) -void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); +extern __printf(2, 3) +void warn_alloc(gfp_t gfp_mask, const char *fmt, ...); extern void setup_per_cpu_pageset(void); +extern void zone_pcp_update(struct zone *zone); +extern void zone_pcp_reset(struct zone *zone); + /* page_alloc.c */ extern int min_free_kbytes; -extern int watermark_boost_factor; extern int watermark_scale_factor; -extern bool arch_has_descending_max_zone_pfns(void); /* nommu.c */ extern atomic_long_t mmap_pages_allocated; @@ -2503,13 +1937,13 @@ extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); /* interval_tree.c */ void vma_interval_tree_insert(struct vm_area_struct *node, - struct rb_root_cached *root); + struct rb_root *root); void vma_interval_tree_insert_after(struct vm_area_struct *node, struct vm_area_struct *prev, - struct rb_root_cached *root); + struct rb_root *root); void vma_interval_tree_remove(struct vm_area_struct *node, - struct rb_root_cached *root); -struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, + struct rb_root *root); +struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root, unsigned long start, unsigned long last); struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, unsigned long start, unsigned long last); @@ -2519,12 +1953,11 @@ struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, vma; vma = vma_interval_tree_iter_next(vma, start, last)) void anon_vma_interval_tree_insert(struct anon_vma_chain *node, - struct rb_root_cached *root); + struct rb_root *root); void anon_vma_interval_tree_remove(struct anon_vma_chain *node, - struct rb_root_cached *root); -struct anon_vma_chain * -anon_vma_interval_tree_iter_first(struct rb_root_cached *root, - unsigned long start, unsigned long last); + struct rb_root *root); +struct anon_vma_chain *anon_vma_interval_tree_iter_first( + struct rb_root *root, unsigned long start, unsigned long last); struct anon_vma_chain *anon_vma_interval_tree_iter_next( struct anon_vma_chain *node, unsigned long start, unsigned long last); #ifdef CONFIG_DEBUG_VM_RB @@ -2550,10 +1983,8 @@ extern struct vm_area_struct *vma_merge(struct mm_struct *, unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx); extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); -extern int __split_vma(struct mm_struct *, struct vm_area_struct *, - unsigned long addr, int new_below); -extern int split_vma(struct mm_struct *, struct vm_area_struct *, - unsigned long addr, int new_below); +extern int split_vma(struct mm_struct *, + struct vm_area_struct *, unsigned long addr, int new_below); extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, struct rb_node **, struct rb_node *); @@ -2563,12 +1994,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, bool *need_rmap_locks); extern void exit_mmap(struct mm_struct *); +#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)) +extern void gr_learn_resource(const struct task_struct *task, const int res, + const unsigned long wanted, const int gt); +#else +static inline void gr_learn_resource(const struct task_struct *task, const int res, + const unsigned long wanted, const int gt) +{ +} +#endif + static inline int check_data_rlimit(unsigned long rlim, unsigned long new, unsigned long start, unsigned long end_data, unsigned long start_data) { + gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1); if (rlim < RLIM_INFINITY) { if (((new - start) + (end_data - start_data)) > rlim) return -ENOSPC; @@ -2580,8 +2022,7 @@ static inline int check_data_rlimit(unsigned long rlim, extern int mm_take_all_locks(struct mm_struct *mm); extern void mm_drop_all_locks(struct mm_struct *mm); -extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); -extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); +extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern struct file *get_mm_exe_file(struct mm_struct *mm); extern struct file *get_task_exe_file(struct task_struct *task); @@ -2599,21 +2040,23 @@ extern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, struct page **pages); -unsigned long randomize_stack_top(unsigned long stack_top); - extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, - struct list_head *uf); + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - unsigned long pgoff, unsigned long *populate, struct list_head *uf); -extern int __do_munmap(struct mm_struct *, unsigned long, size_t, - struct list_head *uf, bool downgrade); -extern int do_munmap(struct mm_struct *, unsigned long, size_t, - struct list_head *uf); -extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); + vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate); +extern int do_munmap(struct mm_struct *, unsigned long, size_t); +extern int __do_munmap(struct mm_struct *, unsigned long, size_t); + +static inline unsigned long +do_mmap_pgoff(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, unsigned long flags, + unsigned long pgoff, unsigned long *populate) +{ + return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate); +} #ifdef CONFIG_MMU extern int __mm_populate(unsigned long addr, unsigned long len, @@ -2629,7 +2072,6 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {} /* These take the mm semaphore themselves */ extern int __must_check vm_brk(unsigned long, unsigned long); -extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); extern int vm_munmap(unsigned long, size_t); extern unsigned long __must_check vm_mmap(struct file *, unsigned long, unsigned long, unsigned long, @@ -2643,9 +2085,29 @@ struct vm_unmapped_area_info { unsigned long high_limit; unsigned long align_mask; unsigned long align_offset; + unsigned long threadstack_offset; }; -extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); +extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info); +extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info); + +/* + * Search for an unmapped address range. + * + * We are looking for a range that: + * - does not intersect with any VMA; + * - is contained within the [low_limit, high_limit) interval; + * - is at least the desired size. + * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) + */ +static inline unsigned long +vm_unmapped_area(const struct vm_unmapped_area_info *info) +{ + if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) + return unmapped_area_topdown(info); + else + return unmapped_area(info); +} /* truncate.c */ extern void truncate_inode_pages(struct address_space *, loff_t); @@ -2654,20 +2116,39 @@ extern void truncate_inode_pages_range(struct address_space *, extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ -extern vm_fault_t filemap_fault(struct vm_fault *vmf); -extern vm_fault_t filemap_map_pages(struct vm_fault *vmf, +extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); +extern void filemap_map_pages(struct fault_env *fe, pgoff_t start_pgoff, pgoff_t end_pgoff); -extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); +extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); /* mm/page-writeback.c */ -int __must_check write_one_page(struct page *page); +int write_one_page(struct page *page, int wait); void task_dirty_inc(struct task_struct *tsk); -extern unsigned long stack_guard_gap; +/* readahead.c */ +#define VM_MAX_READAHEAD 128 /* kbytes */ +#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ + +int force_page_cache_readahead(struct address_space *mapping, struct file *filp, + pgoff_t offset, unsigned long nr_to_read); + +void page_cache_sync_readahead(struct address_space *mapping, + struct file_ra_state *ra, + struct file *filp, + pgoff_t offset, + unsigned long size); + +void page_cache_async_readahead(struct address_space *mapping, + struct file_ra_state *ra, + struct file *filp, + struct page *pg, + pgoff_t offset, + unsigned long size); + /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); -/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ +/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ extern int expand_downwards(struct vm_area_struct *vma, unsigned long address); #if VM_GROWSUP @@ -2681,69 +2162,20 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **pprev); -/** - * find_vma_intersection() - Look up the first VMA which intersects the interval - * @mm: The process address space. - * @start_addr: The inclusive start user address. - * @end_addr: The exclusive end user address. - * - * Returns: The first VMA within the provided range, %NULL otherwise. Assumes - * start_addr < end_addr. - */ -static inline -struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, - unsigned long start_addr, - unsigned long end_addr) +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma); +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma); + +/* Look up the first VMA which intersects the interval start_addr..end_addr-1, + NULL if none. Assume start_addr < end_addr. */ +static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) { - struct vm_area_struct *vma = find_vma(mm, start_addr); + struct vm_area_struct * vma = find_vma(mm,start_addr); if (vma && end_addr <= vma->vm_start) vma = NULL; return vma; } -/** - * vma_lookup() - Find a VMA at a specific address - * @mm: The process address space. - * @addr: The user address. - * - * Return: The vm_area_struct at the given address, %NULL otherwise. - */ -static inline -struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) -{ - struct vm_area_struct *vma = find_vma(mm, addr); - - if (vma && addr < vma->vm_start) - vma = NULL; - - return vma; -} - -static inline unsigned long vm_start_gap(struct vm_area_struct *vma) -{ - unsigned long vm_start = vma->vm_start; - - if (vma->vm_flags & VM_GROWSDOWN) { - vm_start -= stack_guard_gap; - if (vm_start > vma->vm_start) - vm_start = 0; - } - return vm_start; -} - -static inline unsigned long vm_end_gap(struct vm_area_struct *vma) -{ - unsigned long vm_end = vma->vm_end; - - if (vma->vm_flags & VM_GROWSUP) { - vm_end += stack_guard_gap; - if (vm_end < vma->vm_end) - vm_end = -PAGE_SIZE; - } - return vm_end; -} - static inline unsigned long vma_pages(struct vm_area_struct *vma) { return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; @@ -2761,17 +2193,11 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, return vma; } -static inline bool range_in_vma(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - return (vma && vma->vm_start <= start && end <= vma->vm_end); -} - #ifdef CONFIG_MMU -pgprot_t vm_get_page_prot(unsigned long vm_flags); +pgprot_t vm_get_page_prot(vm_flags_t vm_flags); void vma_set_page_prot(struct vm_area_struct *vma); #else -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) { return __pgprot(0); } @@ -2781,8 +2207,6 @@ static inline void vma_set_page_prot(struct vm_area_struct *vma) } #endif -void vma_set_file(struct vm_area_struct *vma, struct file *file); - #ifdef CONFIG_NUMA_BALANCING unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -2791,59 +2215,27 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); -int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, - unsigned long pfn, unsigned long size, pgprot_t prot); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); -int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, - struct page **pages, unsigned long *num); -int vm_map_pages(struct vm_area_struct *vma, struct page **pages, - unsigned long num); -int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, - unsigned long num); -vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, +int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); -vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, +int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot); -vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, +int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); -vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, - pfn_t pfn, pgprot_t pgprot); -vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, - unsigned long addr, pfn_t pfn); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); -static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, - unsigned long addr, struct page *page) + +struct page *follow_page_mask(struct vm_area_struct *vma, + unsigned long address, unsigned int foll_flags, + unsigned int *page_mask); + +static inline struct page *follow_page(struct vm_area_struct *vma, + unsigned long address, unsigned int foll_flags) { - int err = vm_insert_page(vma, addr, page); - - if (err == -ENOMEM) - return VM_FAULT_OOM; - if (err < 0 && err != -EBUSY) - return VM_FAULT_SIGBUS; - - return VM_FAULT_NOPAGE; + unsigned int unused_page_mask; + return follow_page_mask(vma, address, foll_flags, &unused_page_mask); } -#ifndef io_remap_pfn_range -static inline int io_remap_pfn_range(struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn, - unsigned long size, pgprot_t prot) -{ - return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); -} -#endif - -static inline vm_fault_t vmf_error(int err) -{ - if (err == -ENOMEM) - return VM_FAULT_OOM; - return VM_FAULT_SIGBUS; -} - -struct page *follow_page(struct vm_area_struct *vma, unsigned long address, - unsigned int foll_flags); - #define FOLL_WRITE 0x01 /* check pte is writable */ #define FOLL_TOUCH 0x02 /* mark page accessed */ #define FOLL_GET 0x04 /* do get_page on page */ @@ -2852,6 +2244,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO * and return without waiting upon it */ #define FOLL_POPULATE 0x40 /* fault in page */ +#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ @@ -2859,180 +2252,54 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_MLOCK 0x1000 /* lock present pages */ #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ #define FOLL_COW 0x4000 /* internal GUP flag */ -#define FOLL_ANON 0x8000 /* don't do file mappings */ -#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */ -#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */ -#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */ -#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */ -/* - * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each - * other. Here is what they mean, and how to use them: - * - * FOLL_LONGTERM indicates that the page will be held for an indefinite time - * period _often_ under userspace control. This is in contrast to - * iov_iter_get_pages(), whose usages are transient. - * - * FIXME: For pages which are part of a filesystem, mappings are subject to the - * lifetime enforced by the filesystem and we need guarantees that longterm - * users like RDMA and V4L2 only establish mappings which coordinate usage with - * the filesystem. Ideas for this coordination include revoking the longterm - * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was - * added after the problem with filesystems was found FS DAX VMAs are - * specifically failed. Filesystem pages are still subject to bugs and use of - * FOLL_LONGTERM should be avoided on those pages. - * - * FIXME: Also NOTE that FOLL_LONGTERM is not supported in every GUP call. - * Currently only get_user_pages() and get_user_pages_fast() support this flag - * and calls to get_user_pages_[un]locked are specifically not allowed. This - * is due to an incompatibility with the FS DAX check and - * FAULT_FLAG_ALLOW_RETRY. - * - * In the CMA case: long term pins in a CMA region would unnecessarily fragment - * that region. And so, CMA attempts to migrate the page before pinning, when - * FOLL_LONGTERM is specified. - * - * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount, - * but an additional pin counting system) will be invoked. This is intended for - * anything that gets a page reference and then touches page data (for example, - * Direct IO). This lets the filesystem know that some non-file-system entity is - * potentially changing the pages' data. In contrast to FOLL_GET (whose pages - * are released via put_page()), FOLL_PIN pages must be released, ultimately, by - * a call to unpin_user_page(). - * - * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different - * and separate refcounting mechanisms, however, and that means that each has - * its own acquire and release mechanisms: - * - * FOLL_GET: get_user_pages*() to acquire, and put_page() to release. - * - * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release. - * - * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call. - * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based - * calls applied to them, and that's perfectly OK. This is a constraint on the - * callers, not on the pages.) - * - * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never - * directly by the caller. That's in order to help avoid mismatches when - * releasing pages: get_user_pages*() pages must be released via put_page(), - * while pin_user_pages*() pages must be released via unpin_user_page(). - * - * Please see Documentation/core-api/pin_user_pages.rst for more information. - */ - -static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) -{ - if (vm_fault & VM_FAULT_OOM) - return -ENOMEM; - if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) - return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; - if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) - return -EFAULT; - return 0; -} - -typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); +typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, + void *data); extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); -extern int apply_to_existing_page_range(struct mm_struct *mm, - unsigned long address, unsigned long size, - pte_fn_t fn, void *data); -extern void init_mem_debugging_and_hardening(void); + #ifdef CONFIG_PAGE_POISONING -extern void __kernel_poison_pages(struct page *page, int numpages); -extern void __kernel_unpoison_pages(struct page *page, int numpages); -extern bool _page_poisoning_enabled_early; -DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled); -static inline bool page_poisoning_enabled(void) -{ - return _page_poisoning_enabled_early; -} -/* - * For use in fast paths after init_mem_debugging() has run, or when a - * false negative result is not harmful when called too early. - */ -static inline bool page_poisoning_enabled_static(void) -{ - return static_branch_unlikely(&_page_poisoning_enabled); -} -static inline void kernel_poison_pages(struct page *page, int numpages) -{ - if (page_poisoning_enabled_static()) - __kernel_poison_pages(page, numpages); -} -static inline void kernel_unpoison_pages(struct page *page, int numpages) -{ - if (page_poisoning_enabled_static()) - __kernel_unpoison_pages(page, numpages); -} +extern bool page_poisoning_enabled(void); +extern void kernel_poison_pages(struct page *page, int numpages, int enable); +extern bool page_is_poisoned(struct page *page); #else static inline bool page_poisoning_enabled(void) { return false; } -static inline bool page_poisoning_enabled_static(void) { return false; } -static inline void __kernel_poison_pages(struct page *page, int nunmpages) { } -static inline void kernel_poison_pages(struct page *page, int numpages) { } -static inline void kernel_unpoison_pages(struct page *page, int numpages) { } +static inline void kernel_poison_pages(struct page *page, int numpages, + int enable) { } +static inline bool page_is_poisoned(struct page *page) { return false; } #endif -DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); -static inline bool want_init_on_alloc(gfp_t flags) -{ - if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, - &init_on_alloc)) - return true; - return flags & __GFP_ZERO; -} - -DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); -static inline bool want_init_on_free(void) -{ - return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, - &init_on_free); -} - -extern bool _debug_pagealloc_enabled_early; -DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); +#ifdef CONFIG_DEBUG_PAGEALLOC +extern bool _debug_pagealloc_enabled; +extern void __kernel_map_pages(struct page *page, int numpages, int enable); static inline bool debug_pagealloc_enabled(void) { - return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && - _debug_pagealloc_enabled_early; + return _debug_pagealloc_enabled; } -/* - * For use in fast paths after init_debug_pagealloc() has run, or when a - * false negative result is not harmful when called too early. - */ -static inline bool debug_pagealloc_enabled_static(void) +static inline void +kernel_map_pages(struct page *page, int numpages, int enable) { - if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) - return false; + if (!debug_pagealloc_enabled()) + return; - return static_branch_unlikely(&_debug_pagealloc_enabled); -} - -#ifdef CONFIG_DEBUG_PAGEALLOC -/* - * To support DEBUG_PAGEALLOC architecture must ensure that - * __kernel_map_pages() never fails - */ -extern void __kernel_map_pages(struct page *page, int numpages, int enable); - -static inline void debug_pagealloc_map_pages(struct page *page, int numpages) -{ - if (debug_pagealloc_enabled_static()) - __kernel_map_pages(page, numpages, 1); -} - -static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) -{ - if (debug_pagealloc_enabled_static()) - __kernel_map_pages(page, numpages, 0); + __kernel_map_pages(page, numpages, enable); } +#ifdef CONFIG_HIBERNATION +extern bool kernel_page_present(struct page *page); +#endif /* CONFIG_HIBERNATION */ #else /* CONFIG_DEBUG_PAGEALLOC */ -static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} -static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} +static inline void +kernel_map_pages(struct page *page, int numpages, int enable) {} +#ifdef CONFIG_HIBERNATION +static inline bool kernel_page_present(struct page *page) { return true; } +#endif /* CONFIG_HIBERNATION */ +static inline bool debug_pagealloc_enabled(void) +{ + return false; +} #endif /* CONFIG_DEBUG_PAGEALLOC */ #ifdef __HAVE_ARCH_GATE_AREA @@ -3055,8 +2322,8 @@ extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); #ifdef CONFIG_SYSCTL extern int sysctl_drop_caches; -int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, - loff_t *); +int drop_caches_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); #endif void drop_slab(void); @@ -3069,44 +2336,38 @@ extern int randomize_va_space; #endif const char * arch_vma_name(struct vm_area_struct *vma); -#ifdef CONFIG_MMU void print_vma_addr(char *prefix, unsigned long rip); -#else -static inline void print_vma_addr(char *prefix, unsigned long rip) -{ -} -#endif -int vmemmap_remap_free(unsigned long start, unsigned long end, - unsigned long reuse); -int vmemmap_remap_alloc(unsigned long start, unsigned long end, - unsigned long reuse, gfp_t gfp_mask); +void sparse_mem_maps_populate_node(struct page **map_map, + unsigned long pnum_begin, + unsigned long pnum_end, + unsigned long map_count, + int nodeid); -void *sparse_buffer_alloc(unsigned long size); -struct page * __populate_section_memmap(unsigned long pfn, - unsigned long nr_pages, int nid, struct vmem_altmap *altmap); +struct page *sparse_mem_map_populate(unsigned long pnum, int nid); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); -p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); -pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); +pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); -pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, - struct vmem_altmap *altmap); +pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); void *vmemmap_alloc_block(unsigned long size, int node); struct vmem_altmap; -void *vmemmap_alloc_block_buf(unsigned long size, int node, - struct vmem_altmap *altmap); +void *__vmemmap_alloc_block_buf(unsigned long size, int node, + struct vmem_altmap *altmap); +static inline void *vmemmap_alloc_block_buf(unsigned long size, int node) +{ + return __vmemmap_alloc_block_buf(size, node, NULL); +} + void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); int vmemmap_populate_basepages(unsigned long start, unsigned long end, - int node, struct vmem_altmap *altmap); -int vmemmap_populate(unsigned long start, unsigned long end, int node, - struct vmem_altmap *altmap); + int node); +int vmemmap_populate(unsigned long start, unsigned long end, int node); void vmemmap_populate_print_last(void); #ifdef CONFIG_MEMORY_HOTPLUG -void vmemmap_free(unsigned long start, unsigned long end, - struct vmem_altmap *altmap); +void vmemmap_free(unsigned long start, unsigned long end); #endif void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, - unsigned long nr_pages); + unsigned long size); enum mf_flags { MF_COUNT_INCREASED = 1 << 0, @@ -3114,15 +2375,16 @@ enum mf_flags { MF_MUST_KILL = 1 << 2, MF_SOFT_OFFLINE = 1 << 3, }; -extern int memory_failure(unsigned long pfn, int flags); -extern void memory_failure_queue(unsigned long pfn, int flags); -extern void memory_failure_queue_kick(int cpu); +extern int memory_failure(unsigned long pfn, int trapno, int flags); +extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); extern int unpoison_memory(unsigned long pfn); +extern int get_hwpoison_page(struct page *page); +#define put_hwpoison_page(page) put_page(page) extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; -extern void shake_page(struct page *p); -extern atomic_long_t num_poisoned_pages __read_mostly; -extern int soft_offline_page(unsigned long pfn, int flags); +extern void shake_page(struct page *p, int access); +extern atomic_long_unchecked_t num_poisoned_pages; +extern int soft_offline_page(struct page *page, int flags); /* @@ -3143,7 +2405,6 @@ enum mf_action_page_type { MF_MSG_POISONED_HUGE, MF_MSG_HUGE, MF_MSG_FREE_HUGE, - MF_MSG_NON_PMD_HUGE, MF_MSG_UNMAP_FAILED, MF_MSG_DIRTY_SWAPCACHE, MF_MSG_CLEAN_SWAPCACHE, @@ -3156,45 +2417,24 @@ enum mf_action_page_type { MF_MSG_TRUNCATED_LRU, MF_MSG_BUDDY, MF_MSG_BUDDY_2ND, - MF_MSG_DAX, - MF_MSG_UNSPLIT_THP, MF_MSG_UNKNOWN, }; #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) extern void clear_huge_page(struct page *page, - unsigned long addr_hint, + unsigned long addr, unsigned int pages_per_huge_page); extern void copy_user_huge_page(struct page *dst, struct page *src, - unsigned long addr_hint, - struct vm_area_struct *vma, + unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page); -extern long copy_huge_page_from_user(struct page *dst_page, - const void __user *usr_src, - unsigned int pages_per_huge_page, - bool allow_pagefault); - -/** - * vma_is_special_huge - Are transhuge page-table entries considered special? - * @vma: Pointer to the struct vm_area_struct to consider - * - * Whether transhuge page-table entries are considered "special" following - * the definition in vm_normal_page(). - * - * Return: true if transhuge page-table entries should be considered special, - * false otherwise. - */ -static inline bool vma_is_special_huge(const struct vm_area_struct *vma) -{ - return vma_is_dax(vma) || (vma->vm_file && - (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); -} - #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ +extern struct page_ext_operations debug_guardpage_ops; +extern struct page_ext_operations page_poisoning_ops; + #ifdef CONFIG_DEBUG_PAGEALLOC extern unsigned int _debug_guardpage_minorder; -DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); +extern bool _debug_guardpage_enabled; static inline unsigned int debug_guardpage_minorder(void) { @@ -3203,15 +2443,21 @@ static inline unsigned int debug_guardpage_minorder(void) static inline bool debug_guardpage_enabled(void) { - return static_branch_unlikely(&_debug_guardpage_enabled); + return _debug_guardpage_enabled; } static inline bool page_is_guard(struct page *page) { + struct page_ext *page_ext; + if (!debug_guardpage_enabled()) return false; - return PageGuard(page); + page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); } #else static inline unsigned int debug_guardpage_minorder(void) { return 0; } @@ -3225,64 +2471,11 @@ void __init setup_nr_node_ids(void); static inline void setup_nr_node_ids(void) {} #endif -extern int memcmp_pages(struct page *page1, struct page *page2); - -static inline int pages_identical(struct page *page1, struct page *page2) -{ - return !memcmp_pages(page1, page2); -} - -#ifdef CONFIG_MAPPING_DIRTY_HELPERS -unsigned long clean_record_shared_mapping_range(struct address_space *mapping, - pgoff_t first_index, pgoff_t nr, - pgoff_t bitmap_pgoff, - unsigned long *bitmap, - pgoff_t *start, - pgoff_t *end); - -unsigned long wp_shared_mapping_range(struct address_space *mapping, - pgoff_t first_index, pgoff_t nr); -#endif - -extern int sysctl_nr_trim_pages; - -#ifdef CONFIG_PRINTK -void mem_dump_obj(void *object); +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot); #else -static inline void mem_dump_obj(void *object) {} +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {} #endif -/** - * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it - * @seals: the seals to check - * @vma: the vma to operate on - * - * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on - * the vma flags. Return 0 if check pass, or <0 for errors. - */ -static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) -{ - if (seals & F_SEAL_FUTURE_WRITE) { - /* - * New PROT_WRITE and MAP_SHARED mmaps are not allowed when - * "future write" seal active. - */ - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) - return -EPERM; - - /* - * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as - * MAP_SHARED and read-only, take care to not allow mprotect to - * revert protections on such mappings. Do this only for shared - * mappings. For private mappings, don't need to mask - * VM_MAYWRITE as we still want them to be COW-writable. - */ - if (vma->vm_flags & VM_SHARED) - vma->vm_flags &= ~(VM_MAYWRITE); - } - - return 0; -} - #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 355ea1ee32..41d376e711 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_MM_INLINE_H #define LINUX_MM_INLINE_H @@ -6,54 +5,95 @@ #include /** - * page_is_file_lru - should the page be on a file LRU or anon LRU? + * page_is_file_cache - should the page be on a file LRU or anon LRU? * @page: the page to test * - * Returns 1 if @page is a regular filesystem backed page cache page or a lazily - * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal - * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by - * functions that manipulate the LRU lists, to sort a page onto the right LRU - * list. + * Returns 1 if @page is page cache page backed by a regular filesystem, + * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. + * Used by functions that manipulate the LRU lists, to sort a page + * onto the right LRU list. * * We would like to get this info without a page flag, but the state * needs to survive until the page is last deleted from the LRU, which * could be as far down as __page_cache_release. */ -static inline int page_is_file_lru(struct page *page) +static inline int page_is_file_cache(struct page *page) { return !PageSwapBacked(page); } +static __always_inline void __update_lru_size(struct lruvec *lruvec, + enum lru_list lru, enum zone_type zid, + int nr_pages) +{ + struct pglist_data *pgdat = lruvec_pgdat(lruvec); + + __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages); + __mod_zone_page_state(&pgdat->node_zones[zid], + NR_ZONE_LRU_BASE + lru, nr_pages); +} + static __always_inline void update_lru_size(struct lruvec *lruvec, enum lru_list lru, enum zone_type zid, int nr_pages) { - struct pglist_data *pgdat = lruvec_pgdat(lruvec); - - __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); - __mod_zone_page_state(&pgdat->node_zones[zid], - NR_ZONE_LRU_BASE + lru, nr_pages); + __update_lru_size(lruvec, lru, zid, nr_pages); #ifdef CONFIG_MEMCG mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); #endif } -/** - * __clear_page_lru_flags - clear page lru flags before releasing a page - * @page: the page that was on lru and now has a zero reference - */ -static __always_inline void __clear_page_lru_flags(struct page *page) +static __always_inline void add_page_to_lru_list(struct page *page, + struct lruvec *lruvec, enum lru_list lru) { - VM_BUG_ON_PAGE(!PageLRU(page), page); + update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + list_add(&page->lru, &lruvec->lists[lru]); +} - __ClearPageLRU(page); +static __always_inline void del_page_from_lru_list(struct page *page, + struct lruvec *lruvec, enum lru_list lru) +{ + list_del(&page->lru); + update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); +} - /* this shouldn't happen, so leave the flags to bad_page() */ - if (PageActive(page) && PageUnevictable(page)) - return; +/** + * page_lru_base_type - which LRU list type should a page be on? + * @page: the page to test + * + * Used for LRU list index arithmetic. + * + * Returns the base LRU type - file or anon - @page should be on. + */ +static inline enum lru_list page_lru_base_type(struct page *page) +{ + if (page_is_file_cache(page)) + return LRU_INACTIVE_FILE; + return LRU_INACTIVE_ANON; +} - __ClearPageActive(page); - __ClearPageUnevictable(page); +/** + * page_off_lru - which LRU list was page on? clearing its lru flags. + * @page: the page to test + * + * Returns the LRU list a page was on, as an index into the array of LRU + * lists; and clears its Unevictable or Active flags, ready for freeing. + */ +static __always_inline enum lru_list page_off_lru(struct page *page) +{ + enum lru_list lru; + + if (PageUnevictable(page)) { + __ClearPageUnevictable(page); + lru = LRU_UNEVICTABLE; + } else { + lru = page_lru_base_type(page); + if (PageActive(page)) { + __ClearPageActive(page); + lru += LRU_ACTIVE; + } + } + return lru; } /** @@ -67,41 +107,16 @@ static __always_inline enum lru_list page_lru(struct page *page) { enum lru_list lru; - VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); - if (PageUnevictable(page)) - return LRU_UNEVICTABLE; - - lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; - if (PageActive(page)) - lru += LRU_ACTIVE; - + lru = LRU_UNEVICTABLE; + else { + lru = page_lru_base_type(page); + if (PageActive(page)) + lru += LRU_ACTIVE; + } return lru; } -static __always_inline void add_page_to_lru_list(struct page *page, - struct lruvec *lruvec) -{ - enum lru_list lru = page_lru(page); +#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) - update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); - list_add(&page->lru, &lruvec->lists[lru]); -} - -static __always_inline void add_page_to_lru_list_tail(struct page *page, - struct lruvec *lruvec) -{ - enum lru_list lru = page_lru(page); - - update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); - list_add_tail(&page->lru, &lruvec->lists[lru]); -} - -static __always_inline void del_page_from_lru_list(struct page *page, - struct lruvec *lruvec) -{ - list_del(&page->lru); - update_lru_size(lruvec, page_lru(page), page_zonenum(page), - -thp_nr_pages(page)); -} #endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 7f8ee09c71..ceb5da6c2a 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1,10 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MM_TYPES_H #define _LINUX_MM_TYPES_H -#include - #include +#include +#include #include #include #include @@ -14,8 +13,7 @@ #include #include #include -#include - +#include #include #ifndef AT_VECTOR_SIZE_ARCH @@ -23,11 +21,14 @@ #endif #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) -#define INIT_PASID 0 - struct address_space; struct mem_cgroup; +#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) +#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ + IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) +#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) + /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -35,188 +36,163 @@ struct mem_cgroup; * a page, though if it is a pagecache page, rmap structures can tell us * who is mapping it. * - * If you allocate the page using alloc_pages(), you can use some of the - * space in struct page for your own purposes. The five words in the main - * union are available, except for bit 0 of the first word which must be - * kept clear. Many users use this word to store a pointer to an object - * which is guaranteed to be aligned. If you use the same storage as - * page->mapping, you must restore it to NULL before freeing the page. - * - * If your page will not be mapped to userspace, you can also use the four - * bytes in the mapcount union, but you must call page_mapcount_reset() - * before freeing it. - * - * If you want to use the refcount field, it must be used in such a way - * that other CPUs temporarily incrementing and then decrementing the - * refcount does not cause problems. On receiving the page from - * alloc_pages(), the refcount will be positive. - * - * If you allocate pages of order > 0, you can use some of the fields - * in each subpage, but you may need to restore some of their values - * afterwards. - * - * SLUB uses cmpxchg_double() to atomically update its freelist and - * counters. That requires that freelist & counters be adjacent and - * double-word aligned. We align all struct pages to double-word - * boundaries, and ensure that 'freelist' is aligned within the - * struct. + * The objects in struct page are organized in double word blocks in + * order to allows us to use atomic double word operations on portions + * of struct page. That is currently only used by slub but the arrangement + * allows the use of atomic double word operations on the flags/mapping + * and lru list pointers also. */ -#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE -#define _struct_page_alignment __aligned(2 * sizeof(unsigned long)) -#else -#define _struct_page_alignment -#endif - struct page { + /* First double word block */ unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ - /* - * Five words (20/40 bytes) are available in this union. - * WARNING: bit 0 of the first word is used for PageTail(). That - * means the other users of this union MUST NOT use the bit to - * avoid collision and false-positive PageTail(). - */ union { - struct { /* Page cache and anonymous pages */ - /** - * @lru: Pageout list, eg. active_list protected by - * lruvec->lru_lock. Sometimes used as a generic list - * by the page owner. - */ - struct list_head lru; - /* See page-flags.h for PAGE_MAPPING_FLAGS */ - struct address_space *mapping; - pgoff_t index; /* Our offset within mapping. */ - /** - * @private: Mapping-private opaque data. - * Usually used for buffer_heads if PagePrivate. - * Used for swp_entry_t if PageSwapCache. - * Indicates order in the buddy system if PageBuddy. - */ - unsigned long private; - }; - struct { /* page_pool used by netstack */ - /** - * @pp_magic: magic value to avoid recycling non - * page_pool allocated pages. - */ - unsigned long pp_magic; - struct page_pool *pp; - unsigned long _pp_mapping_pad; - unsigned long dma_addr; - union { - /** - * dma_addr_upper: might require a 64-bit - * value on 32-bit architectures. - */ - unsigned long dma_addr_upper; - /** - * For frag page support, not supported in - * 32-bit architectures with 64-bit DMA. - */ - atomic_long_t pp_frag_count; - }; - }; - struct { /* slab, slob and slub */ - union { - struct list_head slab_list; - struct { /* Partial pages */ - struct page *next; -#ifdef CONFIG_64BIT - int pages; /* Nr of pages left */ - int pobjects; /* Approximate count */ + struct address_space *mapping; /* If low bit clear, points to + * inode address_space, or NULL. + * If page mapped as anonymous + * memory, low bit is set, and + * it points to anon_vma object: + * see PAGE_MAPPING_ANON below. + */ + void *s_mem; /* slab first object */ + atomic_t compound_mapcount; /* first tail page */ + /* page_deferred_list().next -- second tail page */ + }; + + /* Second double word */ + union { + pgoff_t index; /* Our offset within mapping. */ + void *freelist; /* sl[aou]b first free object */ + /* page_deferred_list().prev -- second tail page */ + }; + + union { +#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ + defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) + /* Used for cmpxchg_double in slub */ + unsigned long counters; #else - short int pages; - short int pobjects; + /* + * Keep _refcount separate from slub cmpxchg_double data. + * As the rest of the double word is protected by slab_lock + * but _refcount is not. + */ + unsigned counters; #endif - }; - }; - struct kmem_cache *slab_cache; /* not slob */ - /* Double-word boundary */ - void *freelist; /* first free object */ + struct { + union { - void *s_mem; /* slab: first object */ - unsigned long counters; /* SLUB */ + /* + * Count of ptes mapped in mms, to show when + * page is mapped & limit reverse map searches. + * + * Extra information about page type may be + * stored here for pages that are never mapped, + * in which case the value MUST BE <= -2. + * See page-flags.h for more details. + */ + atomic_t _mapcount; + + unsigned int active; /* SLAB */ struct { /* SLUB */ unsigned inuse:16; unsigned objects:15; unsigned frozen:1; }; + int units; /* SLOB */ }; + /* + * Usage count, *USE WRAPPER FUNCTION* when manual + * accounting. See page_ref.h + */ + atomic_t _refcount; }; - struct { /* Tail pages of compound page */ - unsigned long compound_head; /* Bit zero is set */ + }; - /* First tail page only */ - unsigned char compound_dtor; - unsigned char compound_order; - atomic_t compound_mapcount; - unsigned int compound_nr; /* 1 << compound_order */ - }; - struct { /* Second tail page of compound page */ - unsigned long _compound_pad_1; /* compound_head */ - atomic_t hpage_pinned_refcount; - /* For both global and memcg */ - struct list_head deferred_list; - }; - struct { /* Page table pages */ - unsigned long _pt_pad_1; /* compound_head */ - pgtable_t pmd_huge_pte; /* protected by page->ptl */ - unsigned long _pt_pad_2; /* mapping */ - union { - struct mm_struct *pt_mm; /* x86 pgds only */ - atomic_t pt_frag_refcount; /* powerpc */ - }; -#if ALLOC_SPLIT_PTLOCKS - spinlock_t *ptl; + /* + * Third double word block + * + * WARNING: bit 0 of the first word encode PageTail(). That means + * the rest users of the storage space MUST NOT use the bit to + * avoid collision and false-positive PageTail(). + */ + union { + struct list_head lru; /* Pageout list, eg. active_list + * protected by zone_lru_lock ! + * Can be used as a generic list + * by the page owner. + */ + struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an + * lru or handled by a slab + * allocator, this points to the + * hosting device page map. + */ + struct { /* slub per cpu partial pages */ + struct page *next; /* Next partial slab */ +#ifdef CONFIG_64BIT + int pages; /* Nr of partial slabs left */ + int pobjects; /* Approximate # of objects */ #else - spinlock_t ptl; + short int pages; + short int pobjects; #endif }; - struct { /* ZONE_DEVICE pages */ - /** @pgmap: Points to the hosting device page map. */ - struct dev_pagemap *pgmap; - void *zone_device_data; + + struct rcu_head rcu_head; /* Used by SLAB + * when destroying via RCU + */ + /* Tail pages of compound page */ + struct { + unsigned long compound_head; /* If bit zero is set */ + + /* First tail page only */ +#ifdef CONFIG_64BIT /* - * ZONE_DEVICE private pages are counted as being - * mapped so the next 3 words hold the mapping, index, - * and private fields from the source anonymous or - * page cache page while the page is migrated to device - * private memory. - * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also - * use the mapping, index, and private fields when - * pmem backed DAX files are mapped. + * On 64 bit system we have enough space in struct page + * to encode compound_dtor and compound_order with + * unsigned int. It can help compiler generate better or + * smaller code on some archtectures. */ + unsigned int compound_dtor; + unsigned int compound_order; +#else + unsigned short int compound_dtor; + unsigned short int compound_order; +#endif }; - /** @rcu_head: You can use this to free a page by RCU. */ - struct rcu_head rcu_head; +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS + struct { + unsigned long __pad; /* do not overlay pmd_huge_pte + * with compound_head to avoid + * possible bit 0 collision. + */ + pgtable_t pmd_huge_pte; /* protected by page->ptl */ + }; +#endif }; - union { /* This union is 4 bytes in size. */ - /* - * If the page can be mapped to userspace, encodes the number - * of times this page is referenced by a page table. - */ - atomic_t _mapcount; - - /* - * If the page is neither PageSlab nor mappable to userspace, - * the value stored here may help determine what this page - * is used for. See page-flags.h for a list of page types - * which are currently stored here. - */ - unsigned int page_type; - - unsigned int active; /* SLAB */ - int units; /* SLOB */ + /* Remainder is not double word aligned */ + union { + unsigned long private; /* Mapping-private opaque data: + * usually used for buffer_heads + * if PagePrivate set; used for + * swp_entry_t if PageSwapCache; + * indicates order in the buddy + * system if PG_buddy is set. + */ +#if USE_SPLIT_PTE_PTLOCKS +#if ALLOC_SPLIT_PTLOCKS + spinlock_t *ptl; +#else + spinlock_t ptl; +#endif +#endif + struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ }; - /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */ - atomic_t _refcount; - #ifdef CONFIG_MEMCG - unsigned long memcg_data; + struct mem_cgroup *mem_cgroup; #endif /* @@ -234,36 +210,41 @@ struct page { not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ +#ifdef CONFIG_KMEMCHECK + /* + * kmemcheck wants to track the status of each byte in a page; this + * is a pointer to such a status block. NULL if not tracked. + */ + void *shadow; +#endif + #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS int _last_cpupid; #endif -} _struct_page_alignment; - -static inline atomic_t *compound_mapcount_ptr(struct page *page) -{ - return &page[1].compound_mapcount; } - -static inline atomic_t *compound_pincount_ptr(struct page *page) -{ - return &page[2].hpage_pinned_refcount; -} - /* - * Used for sizing the vmemmap region on some architectures + * The struct page can be forced to be double word aligned so that atomic ops + * on double words work. The SLUB allocator can make use of such a feature. */ -#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page))) +#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE + __aligned(2 * sizeof(unsigned long)) +#endif +; + +struct page_frag { + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 offset; + __u32 size; +#else + __u16 offset; + __u16 size; +#endif +}; #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) -#define page_private(page) ((page)->private) - -static inline void set_page_private(struct page *page, unsigned long private) -{ - page->private = private; -} - struct page_frag_cache { void * va; #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) @@ -311,8 +292,8 @@ struct vm_userfaultfd_ctx {}; #endif /* CONFIG_USERFAULTFD */ /* - * This struct describes a virtual memory area. There is one of these - * per VM-area/task. A VM area is any part of the process virtual memory + * This struct defines a memory VMM memory area. There is one of these + * per VM-area/task. A VM area is any part of the process virtual memory * space that has a special rule for the page-fault handlers (ie a shared * library, the executable area etc). */ @@ -339,12 +320,7 @@ struct vm_area_struct { /* Second cache line starts here. */ struct mm_struct *vm_mm; /* The address space we belong to. */ - - /* - * Access permissions of this VMA. - * See vmf_insert_mixed_prot() for discussion. - */ - pgprot_t vm_page_prot; + pgprot_t vm_page_prot; /* Access permissions of this VMA. */ unsigned long vm_flags; /* Flags, see mm.h. */ /* @@ -362,7 +338,7 @@ struct vm_area_struct { * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack * or brk vma (with NULL file) can only be in an anon_vma list. */ - struct list_head anon_vma_chain; /* Serialized by mmap_lock & + struct list_head anon_vma_chain; /* Serialized by mmap_sem & * page_table_lock */ struct anon_vma *anon_vma; /* Serialized by page_table_lock */ @@ -375,9 +351,6 @@ struct vm_area_struct { struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ -#ifdef CONFIG_SWAP - atomic_long_t swap_readahead_info; -#endif #ifndef CONFIG_MMU struct vm_region *vm_region; /* NOMMU mapping region */ #endif @@ -385,6 +358,8 @@ struct vm_area_struct { struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; + + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */ } __randomize_layout; struct core_thread { @@ -398,377 +373,229 @@ struct core_state { struct completion startup; }; +enum { + MM_FILEPAGES, /* Resident file mapping pages */ + MM_ANONPAGES, /* Resident anonymous pages */ + MM_SWAPENTS, /* Anonymous swap entries */ + MM_SHMEMPAGES, /* Resident shared memory pages */ + NR_MM_COUNTERS +}; + +#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) +#define SPLIT_RSS_COUNTING +/* per-thread cached information, */ +struct task_rss_stat { + int events; /* for synchronization threshold */ + int count[NR_MM_COUNTERS]; +}; +#endif /* USE_SPLIT_PTE_PTLOCKS */ + +struct mm_rss_stat { + atomic_long_t count[NR_MM_COUNTERS]; +}; + struct kioctx_table; struct mm_struct { - struct { - struct vm_area_struct *mmap; /* list of VMAs */ - struct rb_root mm_rb; - u64 vmacache_seqnum; /* per-thread vmacache */ + struct vm_area_struct *mmap; /* list of VMAs */ + struct rb_root mm_rb; + u32 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU - unsigned long (*get_unmapped_area) (struct file *filp, + unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); #endif - unsigned long mmap_base; /* base of mmap area */ - unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ -#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES - /* Base addresses for compatible mmap() */ - unsigned long mmap_compat_base; - unsigned long mmap_compat_legacy_base; + unsigned long mmap_base; /* base of mmap area */ + unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ + unsigned long task_size; /* size of task vm space */ + unsigned long highest_vm_end; /* highest vma end address */ + pgd_t * pgd; + atomic_t mm_users; /* How many users with user space? */ + atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ + atomic_long_t nr_ptes; /* PTE page table pages */ +#if CONFIG_PGTABLE_LEVELS > 2 + atomic_long_t nr_pmds; /* PMD page table pages */ #endif - unsigned long task_size; /* size of task vm space */ - unsigned long highest_vm_end; /* highest vma end address */ - pgd_t * pgd; + int map_count; /* number of VMAs */ -#ifdef CONFIG_MEMBARRIER - /** - * @membarrier_state: Flags controlling membarrier behavior. - * - * This field is close to @pgd to hopefully fit in the same - * cache-line, which needs to be touched by switch_mm(). - */ - atomic_t membarrier_state; -#endif + spinlock_t page_table_lock; /* Protects page tables and some counters */ + struct rw_semaphore mmap_sem; - /** - * @mm_users: The number of users including userspace. - * - * Use mmget()/mmget_not_zero()/mmput() to modify. When this - * drops to 0 (i.e. when the task exits and there are no other - * temporary reference holders), we also release a reference on - * @mm_count (which may then free the &struct mm_struct if - * @mm_count also drops to 0). - */ - atomic_t mm_users; - - /** - * @mm_count: The number of references to &struct mm_struct - * (@mm_users count as 1). - * - * Use mmgrab()/mmdrop() to modify. When this drops to 0, the - * &struct mm_struct is freed. - */ - atomic_t mm_count; - -#ifdef CONFIG_MMU - atomic_long_t pgtables_bytes; /* PTE page table pages */ -#endif - int map_count; /* number of VMAs */ - - spinlock_t page_table_lock; /* Protects page tables and some - * counters - */ - /* - * With some kernel config, the current mmap_lock's offset - * inside 'mm_struct' is at 0x120, which is very optimal, as - * its two hot fields 'count' and 'owner' sit in 2 different - * cachelines, and when mmap_lock is highly contended, both - * of the 2 fields will be accessed frequently, current layout - * will help to reduce cache bouncing. - * - * So please be careful with adding new fields before - * mmap_lock, which can easily push the 2 fields into one - * cacheline. - */ - struct rw_semaphore mmap_lock; - - struct list_head mmlist; /* List of maybe swapped mm's. These - * are globally strung together off - * init_mm.mmlist, and are protected - * by mmlist_lock - */ + struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung + * together off init_mm.mmlist, and are protected + * by mmlist_lock + */ - unsigned long hiwater_rss; /* High-watermark of RSS usage */ - unsigned long hiwater_vm; /* High-water virtual memory usage */ + unsigned long hiwater_rss; /* High-watermark of RSS usage */ + unsigned long hiwater_vm; /* High-water virtual memory usage */ - unsigned long total_vm; /* Total pages mapped */ - unsigned long locked_vm; /* Pages that have PG_mlocked set */ - atomic64_t pinned_vm; /* Refcount permanently increased */ - unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ - unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ - unsigned long stack_vm; /* VM_STACK */ - unsigned long def_flags; + unsigned long total_vm; /* Total pages mapped */ + unsigned long locked_vm; /* Pages that have PG_mlocked set */ + unsigned long pinned_vm; /* Refcount permanently increased */ + unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ + unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ + unsigned long stack_vm; /* VM_STACK */ + unsigned long def_flags; + unsigned long start_code, end_code, start_data, end_data; + unsigned long start_brk, brk, start_stack; + unsigned long arg_start, arg_end, env_start, env_end; - /** - * @write_protect_seq: Locked when any thread is write - * protecting pages mapped by this mm to enforce a later COW, - * for instance during page table copying for fork(). - */ - seqcount_t write_protect_seq; - - spinlock_t arg_lock; /* protect the below fields */ - - unsigned long start_code, end_code, start_data, end_data; - unsigned long start_brk, brk, start_stack; - unsigned long arg_start, arg_end, env_start, env_end; - - unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ - - /* - * Special counters, in some configurations protected by the - * page_table_lock, in other configurations by being atomic. - */ - struct mm_rss_stat rss_stat; - - struct linux_binfmt *binfmt; - - /* Architecture-specific MM context */ - mm_context_t context; - - unsigned long flags; /* Must use atomic bitops to access */ - - struct core_state *core_state; /* coredumping support */ - -#ifdef CONFIG_AIO - spinlock_t ioctx_lock; - struct kioctx_table __rcu *ioctx_table; -#endif -#ifdef CONFIG_MEMCG - /* - * "owner" points to a task that is regarded as the canonical - * user/owner of this mm. All of the following must be true in - * order for it to be changed: - * - * current == mm->owner - * current->mm != mm - * new_owner->mm == mm - * new_owner->alloc_lock is held - */ - struct task_struct __rcu *owner; -#endif - struct user_namespace *user_ns; - - /* store ref to file /proc//exe symlink points to */ - struct file __rcu *exe_file; -#ifdef CONFIG_MMU_NOTIFIER - struct mmu_notifier_subscriptions *notifier_subscriptions; -#endif -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS - pgtable_t pmd_huge_pte; /* protected by page_table_lock */ -#endif -#ifdef CONFIG_NUMA_BALANCING - /* - * numa_next_scan is the next time that the PTEs will be marked - * pte_numa. NUMA hinting faults will gather statistics and - * migrate pages to new nodes if necessary. - */ - unsigned long numa_next_scan; - - /* Restart point for scanning and setting pte_numa */ - unsigned long numa_scan_offset; - - /* numa_scan_seq prevents two threads setting pte_numa */ - int numa_scan_seq; -#endif - /* - * An operation with batched TLB flushing is going on. Anything - * that can move process memory needs to flush the TLB when - * moving a PROT_NONE or PROT_NUMA mapped page. - */ - atomic_t tlb_flush_pending; -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH - /* See flush_tlb_batched_pending() */ - bool tlb_flush_batched; -#endif - struct uprobes_state uprobes_state; -#ifdef CONFIG_HUGETLB_PAGE - atomic_long_t hugetlb_usage; -#endif - struct work_struct async_put_work; - -#ifdef CONFIG_IOMMU_SUPPORT - u32 pasid; -#endif - } __randomize_layout; + unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ /* - * The mm_cpumask needs to be at the end of mm_struct, because it - * is dynamically sized based on nr_cpu_ids. + * Special counters, in some configurations protected by the + * page_table_lock, in other configurations by being atomic. */ - unsigned long cpu_bitmap[]; -}; + struct mm_rss_stat rss_stat; -extern struct mm_struct init_mm; + struct linux_binfmt *binfmt; + + cpumask_var_t cpu_vm_mask_var; + + /* Architecture-specific MM context */ + mm_context_t context; + + unsigned long flags; /* Must use atomic bitops to access the bits */ + + struct core_state *core_state; /* coredumping support */ +#ifdef CONFIG_AIO + spinlock_t ioctx_lock; + struct kioctx_table __rcu *ioctx_table; +#endif +#ifdef CONFIG_MEMCG + /* + * "owner" points to a task that is regarded as the canonical + * user/owner of this mm. All of the following must be true in + * order for it to be changed: + * + * current == mm->owner + * current->mm != mm + * new_owner->mm == mm + * new_owner->alloc_lock is held + */ + struct task_struct __rcu *owner; +#endif + struct user_namespace *user_ns; + + /* store ref to file /proc//exe symlink points to */ + struct file __rcu *exe_file; +#ifdef CONFIG_MMU_NOTIFIER + struct mmu_notifier_mm *mmu_notifier_mm; +#endif +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS + pgtable_t pmd_huge_pte; /* protected by page_table_lock */ +#endif +#ifdef CONFIG_CPUMASK_OFFSTACK + struct cpumask cpumask_allocation; +#endif +#ifdef CONFIG_NUMA_BALANCING + /* + * numa_next_scan is the next time that the PTEs will be marked + * pte_numa. NUMA hinting faults will gather statistics and migrate + * pages to new nodes if necessary. + */ + unsigned long numa_next_scan; + + /* Restart point for scanning and setting pte_numa */ + unsigned long numa_scan_offset; + + /* numa_scan_seq prevents two threads setting pte_numa */ + int numa_scan_seq; +#endif +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) + /* + * An operation with batched TLB flushing is going on. Anything that + * can move process memory needs to flush the TLB when moving a + * PROT_NONE or PROT_NUMA mapped page. + */ + bool tlb_flush_pending; +#endif + struct uprobes_state uprobes_state; +#ifdef CONFIG_X86_INTEL_MPX + /* address of the bounds directory */ + void __user *bd_addr; +#endif +#ifdef CONFIG_HUGETLB_PAGE + atomic_long_t hugetlb_usage; +#endif + struct work_struct async_put_work; + +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) + unsigned long pax_flags; +#endif + +#ifdef CONFIG_PAX_DLRESOLVE + unsigned long call_dl_resolve; +#endif + +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) + unsigned long call_syscall; +#endif + +#ifdef CONFIG_PAX_ASLR + unsigned long delta_mmap; /* randomized offset */ + unsigned long delta_stack; /* randomized offset */ +#endif + +} __randomize_layout; -/* Pointer magic because the dynamic array size confuses some compilers. */ static inline void mm_init_cpumask(struct mm_struct *mm) { - unsigned long cpu_bitmap = (unsigned long)mm; - - cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap); - cpumask_clear((struct cpumask *)cpu_bitmap); +#ifdef CONFIG_CPUMASK_OFFSTACK + mm->cpu_vm_mask_var = &mm->cpumask_allocation; +#endif + cpumask_clear(mm->cpu_vm_mask_var); } /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) { - return (struct cpumask *)&mm->cpu_bitmap; -} - -struct mmu_gather; -extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm); -extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm); -extern void tlb_finish_mmu(struct mmu_gather *tlb); - -static inline void init_tlb_flush_pending(struct mm_struct *mm) -{ - atomic_set(&mm->tlb_flush_pending, 0); -} - -static inline void inc_tlb_flush_pending(struct mm_struct *mm) -{ - atomic_inc(&mm->tlb_flush_pending); - /* - * The only time this value is relevant is when there are indeed pages - * to flush. And we'll only flush pages after changing them, which - * requires the PTL. - * - * So the ordering here is: - * - * atomic_inc(&mm->tlb_flush_pending); - * spin_lock(&ptl); - * ... - * set_pte_at(); - * spin_unlock(&ptl); - * - * spin_lock(&ptl) - * mm_tlb_flush_pending(); - * .... - * spin_unlock(&ptl); - * - * flush_tlb_range(); - * atomic_dec(&mm->tlb_flush_pending); - * - * Where the increment if constrained by the PTL unlock, it thus - * ensures that the increment is visible if the PTE modification is - * visible. After all, if there is no PTE modification, nobody cares - * about TLB flushes either. - * - * This very much relies on users (mm_tlb_flush_pending() and - * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and - * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc - * locks (PPC) the unlock of one doesn't order against the lock of - * another PTL. - * - * The decrement is ordered by the flush_tlb_range(), such that - * mm_tlb_flush_pending() will not return false unless all flushes have - * completed. - */ -} - -static inline void dec_tlb_flush_pending(struct mm_struct *mm) -{ - /* - * See inc_tlb_flush_pending(). - * - * This cannot be smp_mb__before_atomic() because smp_mb() simply does - * not order against TLB invalidate completion, which is what we need. - * - * Therefore we must rely on tlb_flush_*() to guarantee order. - */ - atomic_dec(&mm->tlb_flush_pending); + return mm->cpu_vm_mask_var; } +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) +/* + * Memory barriers to keep this state in sync are graciously provided by + * the page table locks, outside of which no page table modifications happen. + * The barriers below prevent the compiler from re-ordering the instructions + * around the memory barriers that are already present in the code. + */ static inline bool mm_tlb_flush_pending(struct mm_struct *mm) { - /* - * Must be called after having acquired the PTL; orders against that - * PTLs release and therefore ensures that if we observe the modified - * PTE we must also observe the increment from inc_tlb_flush_pending(). - * - * That is, it only guarantees to return true if there is a flush - * pending for _this_ PTL. - */ - return atomic_read(&mm->tlb_flush_pending); + barrier(); + return mm->tlb_flush_pending; } - -static inline bool mm_tlb_flush_nested(struct mm_struct *mm) +static inline void set_tlb_flush_pending(struct mm_struct *mm) { + mm->tlb_flush_pending = true; + /* - * Similar to mm_tlb_flush_pending(), we must have acquired the PTL - * for which there is a TLB flush pending in order to guarantee - * we've seen both that PTE modification and the increment. - * - * (no requirement on actually still holding the PTL, that is irrelevant) + * Guarantee that the tlb_flush_pending store does not leak into the + * critical section updating the page tables */ - return atomic_read(&mm->tlb_flush_pending) > 1; + smp_mb__before_spinlock(); } +/* Clearing is done after a TLB flush, which also provides a barrier. */ +static inline void clear_tlb_flush_pending(struct mm_struct *mm) +{ + barrier(); + mm->tlb_flush_pending = false; +} +#else +static inline bool mm_tlb_flush_pending(struct mm_struct *mm) +{ + return false; +} +static inline void set_tlb_flush_pending(struct mm_struct *mm) +{ +} +static inline void clear_tlb_flush_pending(struct mm_struct *mm) +{ +} +#endif struct vm_fault; -/** - * typedef vm_fault_t - Return type for page fault handlers. - * - * Page fault handlers return a bitmask of %VM_FAULT values. - */ -typedef __bitwise unsigned int vm_fault_t; - -/** - * enum vm_fault_reason - Page fault handlers return a bitmask of - * these values to tell the core VM what happened when handling the - * fault. Used to decide whether a process gets delivered SIGBUS or - * just gets major/minor fault counters bumped up. - * - * @VM_FAULT_OOM: Out Of Memory - * @VM_FAULT_SIGBUS: Bad access - * @VM_FAULT_MAJOR: Page read from storage - * @VM_FAULT_WRITE: Special case for get_user_pages - * @VM_FAULT_HWPOISON: Hit poisoned small page - * @VM_FAULT_HWPOISON_LARGE: Hit poisoned large page. Index encoded - * in upper bits - * @VM_FAULT_SIGSEGV: segmentation fault - * @VM_FAULT_NOPAGE: ->fault installed the pte, not return page - * @VM_FAULT_LOCKED: ->fault locked the returned page - * @VM_FAULT_RETRY: ->fault blocked, must retry - * @VM_FAULT_FALLBACK: huge page fault failed, fall back to small - * @VM_FAULT_DONE_COW: ->fault has fully handled COW - * @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs - * fsync() to complete (for synchronous page faults - * in DAX) - * @VM_FAULT_HINDEX_MASK: mask HINDEX value - * - */ -enum vm_fault_reason { - VM_FAULT_OOM = (__force vm_fault_t)0x000001, - VM_FAULT_SIGBUS = (__force vm_fault_t)0x000002, - VM_FAULT_MAJOR = (__force vm_fault_t)0x000004, - VM_FAULT_WRITE = (__force vm_fault_t)0x000008, - VM_FAULT_HWPOISON = (__force vm_fault_t)0x000010, - VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020, - VM_FAULT_SIGSEGV = (__force vm_fault_t)0x000040, - VM_FAULT_NOPAGE = (__force vm_fault_t)0x000100, - VM_FAULT_LOCKED = (__force vm_fault_t)0x000200, - VM_FAULT_RETRY = (__force vm_fault_t)0x000400, - VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800, - VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000, - VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000, - VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000, -}; - -/* Encode hstate index for a hwpoisoned large page */ -#define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16)) -#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf) - -#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \ - VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \ - VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK) - -#define VM_FAULT_RESULT_TRACE \ - { VM_FAULT_OOM, "OOM" }, \ - { VM_FAULT_SIGBUS, "SIGBUS" }, \ - { VM_FAULT_MAJOR, "MAJOR" }, \ - { VM_FAULT_WRITE, "WRITE" }, \ - { VM_FAULT_HWPOISON, "HWPOISON" }, \ - { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \ - { VM_FAULT_SIGSEGV, "SIGSEGV" }, \ - { VM_FAULT_NOPAGE, "NOPAGE" }, \ - { VM_FAULT_LOCKED, "LOCKED" }, \ - { VM_FAULT_RETRY, "RETRY" }, \ - { VM_FAULT_FALLBACK, "FALLBACK" }, \ - { VM_FAULT_DONE_COW, "DONE_COW" }, \ - { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" } - struct vm_special_mapping { const char *name; /* The name, e.g. "[vdso]". */ @@ -784,9 +611,9 @@ struct vm_special_mapping { * If non-NULL, then this is called to resolve page faults * on the special mapping. If used, .pages is not checked. */ - vm_fault_t (*fault)(const struct vm_special_mapping *sm, - struct vm_area_struct *vma, - struct vm_fault *vmf); + int (*fault)(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, + struct vm_fault *vmf); int (*mremap)(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma); diff --git a/include/linux/mman.h b/include/linux/mman.h index b66e91b817..634c4c51fe 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMAN_H #define _LINUX_MMAN_H @@ -8,51 +7,6 @@ #include #include -/* - * Arrange for legacy / undefined architecture specific flags to be - * ignored by mmap handling code. - */ -#ifndef MAP_32BIT -#define MAP_32BIT 0 -#endif -#ifndef MAP_HUGE_2MB -#define MAP_HUGE_2MB 0 -#endif -#ifndef MAP_HUGE_1GB -#define MAP_HUGE_1GB 0 -#endif -#ifndef MAP_UNINITIALIZED -#define MAP_UNINITIALIZED 0 -#endif -#ifndef MAP_SYNC -#define MAP_SYNC 0 -#endif - -/* - * The historical set of flags that all mmap implementations implicitly - * support when a ->mmap_validate() op is not provided in file_operations. - * - * MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the - * kernel. - */ -#define LEGACY_MAP_MASK (MAP_SHARED \ - | MAP_PRIVATE \ - | MAP_FIXED \ - | MAP_ANONYMOUS \ - | MAP_DENYWRITE \ - | MAP_EXECUTABLE \ - | MAP_UNINITIALIZED \ - | MAP_GROWSDOWN \ - | MAP_LOCKED \ - | MAP_NORESERVE \ - | MAP_POPULATE \ - | MAP_NONBLOCK \ - | MAP_STACK \ - | MAP_HUGETLB \ - | MAP_32BIT \ - | MAP_HUGE_2MB \ - | MAP_HUGE_1GB) - extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; extern unsigned long sysctl_overcommit_kbytes; @@ -60,19 +14,15 @@ extern struct percpu_counter vm_committed_as; #ifdef CONFIG_SMP extern s32 vm_committed_as_batch; -extern void mm_compute_batch(int overcommit_policy); #else #define vm_committed_as_batch 0 -static inline void mm_compute_batch(int overcommit_policy) -{ -} #endif unsigned long vm_memory_committed(void); static inline void vm_acct_memory(long pages) { - percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch); + __percpu_counter_add(&vm_committed_as, pages, vm_committed_as_batch); } static inline void vm_unacct_memory(long pages) @@ -81,18 +31,13 @@ static inline void vm_unacct_memory(long pages) } /* - * Allow architectures to handle additional protection and flag bits. The - * overriding macros must be defined in the arch-specific asm/mman.h file. + * Allow architectures to handle additional protection bits */ #ifndef arch_calc_vm_prot_bits #define arch_calc_vm_prot_bits(prot, pkey) 0 #endif -#ifndef arch_calc_vm_flag_bits -#define arch_calc_vm_flag_bits(flags) 0 -#endif - #ifndef arch_vm_get_page_prot #define arch_vm_get_page_prot(vm_flags) __pgprot(0) #endif @@ -104,26 +49,13 @@ static inline void vm_unacct_memory(long pages) * * Returns true if the prot flags are valid */ -static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) +static inline bool arch_validate_prot(unsigned long prot) { return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; } #define arch_validate_prot arch_validate_prot #endif -#ifndef arch_validate_flags -/* - * This is called from mmap() and mprotect() with the updated vma->vm_flags. - * - * Returns true if the VM_* flags are valid. - */ -static inline bool arch_validate_flags(unsigned long flags) -{ - return true; -} -#define arch_validate_flags arch_validate_flags -#endif - /* * Optimisation macro. It is equivalent to: * (x & bit1) ? bit2 : 0 @@ -131,9 +63,8 @@ static inline bool arch_validate_flags(unsigned long flags) * ("bit1" and "bit2" must be single bits) */ #define _calc_vm_trans(x, bit1, bit2) \ - ((!(bit1) || !(bit2)) ? 0 : \ ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ - : ((x) & (bit1)) / ((bit1) / (bit2)))) + : ((x) & (bit1)) / ((bit1) / (bit2))) /* * Combine the mmap "prot" argument into "vm_flags" used internally. @@ -154,9 +85,8 @@ static inline unsigned long calc_vm_flag_bits(unsigned long flags) { return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | - _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | - _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | - arch_calc_vm_flag_bits(flags); + _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | + _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ); } unsigned long vm_commit_limit(void); diff --git a/include/linux/mmc/boot.h b/include/linux/mmc/boot.h new file mode 100644 index 0000000000..23acc3baa0 --- /dev/null +++ b/include/linux/mmc/boot.h @@ -0,0 +1,7 @@ +#ifndef LINUX_MMC_BOOT_H +#define LINUX_MMC_BOOT_H + +enum { MMC_PROGRESS_ENTER, MMC_PROGRESS_INIT, + MMC_PROGRESS_LOAD, MMC_PROGRESS_DONE }; + +#endif /* LINUX_MMC_BOOT_H */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index fca1b21026..73fad83acb 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -1,13 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mmc/card.h * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Card driver specific definitions. */ #ifndef LINUX_MMC_CARD_H #define LINUX_MMC_CARD_H #include +#include #include struct mmc_cid { @@ -26,8 +30,8 @@ struct mmc_csd { unsigned char structure; unsigned char mmca_vsn; unsigned short cmdclass; - unsigned short taac_clks; - unsigned int taac_ns; + unsigned short tacc_clks; + unsigned int tacc_ns; unsigned int c_size; unsigned int r2w_factor; unsigned int max_dtr; @@ -48,7 +52,6 @@ struct mmc_ext_csd { u8 sec_feature_support; u8 rel_sectors; u8 rel_param; - bool enhanced_rpmb_supported; u8 part_config; u8 cache_ctrl; u8 rst_n_function; @@ -81,15 +84,11 @@ struct mmc_ext_csd { unsigned int hpi_cmd; /* cmd used as HPI */ bool bkops; /* background support bit */ bool man_bkops_en; /* manual bkops enable bit */ - bool auto_bkops_en; /* auto bkops enable bit */ unsigned int data_sector_size; /* 512 bytes or 4KB */ unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ unsigned int boot_ro_lock; /* ro lock support */ bool boot_ro_lockable; bool ffu_capable; /* Firmware upgrade support */ - bool cmdq_en; /* Command Queue enabled */ - bool cmdq_support; /* Command Queue supported */ - unsigned int cmdq_depth; /* Command Queue depth */ #define MMC_FIRMWARE_LEN 8 u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */ u8 raw_exception_status; /* 54 */ @@ -109,7 +108,6 @@ struct mmc_ext_csd { u8 raw_hc_erase_gap_size; /* 221 */ u8 raw_erase_timeout_mult; /* 223 */ u8 raw_hc_erase_grp_size; /* 224 */ - u8 raw_boot_mult; /* 226 */ u8 raw_sec_trim_mult; /* 229 */ u8 raw_sec_erase_mult; /* 230 */ u8 raw_sec_feature_support;/* 231 */ @@ -121,9 +119,6 @@ struct mmc_ext_csd { u8 raw_pwr_cl_ddr_200_360; /* 253 */ u8 raw_bkops_status; /* 246 */ u8 raw_sectors[4]; /* 212 - 4 bytes */ - u8 pre_eol_info; /* 267 */ - u8 device_life_time_est_typ_a; /* 268 */ - u8 device_life_time_est_typ_b; /* 269 */ unsigned int feature_support; #define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */ @@ -132,16 +127,12 @@ struct mmc_ext_csd { struct sd_scr { unsigned char sda_vsn; unsigned char sda_spec3; - unsigned char sda_spec4; - unsigned char sda_specx; unsigned char bus_widths; #define SD_SCR_BUS_WIDTH_1 (1<<0) #define SD_SCR_BUS_WIDTH_4 (1<<2) unsigned char cmds; #define SD_SCR_CMD20_SUPPORT (1<<0) #define SD_SCR_CMD23_SUPPORT (1<<1) -#define SD_SCR_CMD48_SUPPORT (1<<2) -#define SD_SCR_CMD58_SUPPORT (1<<3) }; struct sd_ssr { @@ -159,7 +150,6 @@ struct sd_switch_caps { #define UHS_DDR50_MAX_DTR 50000000 #define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR #define UHS_SDR12_MAX_DTR 25000000 -#define DEFAULT_SPEED_MAX_DTR UHS_SDR12_MAX_DTR unsigned int sd3_bus_mode; #define UHS_SDR12_BUS_SPEED 0 #define HIGH_SPEED_BUS_SPEED 1 @@ -192,25 +182,6 @@ struct sd_switch_caps { #define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800) }; -struct sd_ext_reg { - u8 fno; - u8 page; - u16 offset; - u8 rev; - u8 feature_enabled; - u8 feature_support; -/* Power Management Function. */ -#define SD_EXT_POWER_OFF_NOTIFY (1<<0) -#define SD_EXT_POWER_SUSTENANCE (1<<1) -#define SD_EXT_POWER_DOWN_MODE (1<<2) -/* Performance Enhancement Function. */ -#define SD_EXT_PERF_FX_EVENT (1<<0) -#define SD_EXT_PERF_CARD_MAINT (1<<1) -#define SD_EXT_PERF_HOST_MAINT (1<<2) -#define SD_EXT_PERF_CACHE (1<<3) -#define SD_EXT_PERF_CMD_QUEUE (1<<4) -}; - struct sdio_cccr { unsigned int sdio_vsn; unsigned int sd_vsn; @@ -230,12 +201,24 @@ struct sdio_cis { }; struct mmc_host; +struct mmc_ios; struct sdio_func; struct sdio_func_tuple; -struct mmc_queue_req; #define SDIO_MAX_FUNCS 7 +enum mmc_blk_status { + MMC_BLK_SUCCESS = 0, + MMC_BLK_PARTIAL, + MMC_BLK_CMD_ERR, + MMC_BLK_RETRY, + MMC_BLK_ABORT, + MMC_BLK_DATA_ERR, + MMC_BLK_ECC_ERR, + MMC_BLK_NOMEDIUM, + MMC_BLK_NEW_REQUEST, +}; + /* The number of MMC physical partitions. These consist of: * boot partitions (2), general purpose partitions (4) and * RPMB partition (1) in MMC v4.4. @@ -249,7 +232,7 @@ struct mmc_queue_req; * MMC Physical partitions */ struct mmc_part { - u64 size; /* partition size (in bytes) */ + unsigned int size; /* partition size (in bytes) */ unsigned int part_cfg; /* partition type */ char name[MAX_MMC_PART_NAME_LEN]; bool force_ro; /* to make boot parts RO by default */ @@ -274,8 +257,14 @@ struct mmc_card { #define MMC_TYPE_SDIO 2 /* SDIO card */ #define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */ unsigned int state; /* (our) card state */ +#define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ +#define MMC_STATE_READONLY (1<<1) /* card is read-only */ +#define MMC_STATE_BLOCKADDR (1<<2) /* card uses block-addressing */ +#define MMC_CARD_SDXC (1<<3) /* card is SDXC */ +#define MMC_CARD_REMOVED (1<<4) /* card has been removed */ +#define MMC_STATE_DOING_BKOPS (1<<5) /* card is doing BKOPS */ +#define MMC_STATE_SUSPENDED (1<<6) /* card is suspended */ unsigned int quirks; /* card quirks */ - unsigned int quirk_max_rate; /* max rate set by quirks */ #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ /* for byte mode */ @@ -293,15 +282,11 @@ struct mmc_card { #define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */ #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */ -#define MMC_QUIRK_ERASE_BROKEN (1<<31) /* Skip erase */ - - bool reenable_cmdq; /* Re-enable Command Queue */ unsigned int erase_size; /* erase size in sectors */ unsigned int erase_shift; /* if erase unit is power 2 */ unsigned int pref_erase; /* in sectors */ unsigned int eg_boundary; /* don't cross erase-group boundaries */ - unsigned int erase_arg; /* erase / trim / discard */ u8 erased_byte; /* value of erased bytes */ u32 raw_cid[4]; /* raw card CID */ @@ -314,17 +299,12 @@ struct mmc_card { struct sd_scr scr; /* extra SD information */ struct sd_ssr ssr; /* yet more SD information */ struct sd_switch_caps sw_caps; /* switch (CMD6) caps */ - struct sd_ext_reg ext_power; /* SD extension reg for PM */ - struct sd_ext_reg ext_perf; /* SD extension reg for PERF */ unsigned int sdio_funcs; /* number of SDIO functions */ - atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */ struct sdio_cccr cccr; /* common card info */ struct sdio_cis cis; /* common tuple info */ struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */ struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */ - u8 major_rev; /* major revision number */ - u8 minor_rev; /* minor revision number */ unsigned num_info; /* number of info strings */ const char **info; /* info strings */ struct sdio_func_tuple *tuples; /* unknown common tuples */ @@ -336,19 +316,247 @@ struct mmc_card { struct dentry *debugfs_root; struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ unsigned int nr_parts; - - struct workqueue_struct *complete_wq; /* Private workqueue */ }; +/* + * This function fill contents in mmc_part. + */ +static inline void mmc_part_add(struct mmc_card *card, unsigned int size, + unsigned int part_cfg, char *name, int idx, bool ro, + int area_type) +{ + card->part[card->nr_parts].size = size; + card->part[card->nr_parts].part_cfg = part_cfg; + sprintf(card->part[card->nr_parts].name, name, idx); + card->part[card->nr_parts].force_ro = ro; + card->part[card->nr_parts].area_type = area_type; + card->nr_parts++; +} + static inline bool mmc_large_sector(struct mmc_card *card) { return card->ext_csd.data_sector_size == 4096; } -bool mmc_card_is_blockaddr(struct mmc_card *card); +/* + * The world is not perfect and supplies us with broken mmc/sdio devices. + * For at least some of these bugs we need a work-around. + */ + +struct mmc_fixup { + /* CID-specific fields. */ + const char *name; + + /* Valid revision range */ + u64 rev_start, rev_end; + + unsigned int manfid; + unsigned short oemid; + + /* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */ + u16 cis_vendor, cis_device; + + /* for MMC cards */ + unsigned int ext_csd_rev; + + void (*vendor_fixup)(struct mmc_card *card, int data); + int data; +}; + +#define CID_MANFID_ANY (-1u) +#define CID_OEMID_ANY ((unsigned short) -1) +#define CID_NAME_ANY (NULL) + +#define EXT_CSD_REV_ANY (-1u) + +#define CID_MANFID_SANDISK 0x2 +#define CID_MANFID_TOSHIBA 0x11 +#define CID_MANFID_MICRON 0x13 +#define CID_MANFID_SAMSUNG 0x15 +#define CID_MANFID_KINGSTON 0x70 +#define CID_MANFID_HYNIX 0x90 + +#define END_FIXUP { NULL } + +#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \ + _cis_vendor, _cis_device, \ + _fixup, _data, _ext_csd_rev) \ + { \ + .name = (_name), \ + .manfid = (_manfid), \ + .oemid = (_oemid), \ + .rev_start = (_rev_start), \ + .rev_end = (_rev_end), \ + .cis_vendor = (_cis_vendor), \ + .cis_device = (_cis_device), \ + .vendor_fixup = (_fixup), \ + .data = (_data), \ + .ext_csd_rev = (_ext_csd_rev), \ + } + +#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \ + _fixup, _data, _ext_csd_rev) \ + _FIXUP_EXT(_name, _manfid, \ + _oemid, _rev_start, _rev_end, \ + SDIO_ANY_ID, SDIO_ANY_ID, \ + _fixup, _data, _ext_csd_rev) \ + +#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \ + MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \ + EXT_CSD_REV_ANY) + +#define MMC_FIXUP_EXT_CSD_REV(_name, _manfid, _oemid, _fixup, _data, \ + _ext_csd_rev) \ + MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \ + _ext_csd_rev) + +#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \ + _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \ + CID_OEMID_ANY, 0, -1ull, \ + _vendor, _device, \ + _fixup, _data, EXT_CSD_REV_ANY) \ + +#define cid_rev(hwrev, fwrev, year, month) \ + (((u64) hwrev) << 40 | \ + ((u64) fwrev) << 32 | \ + ((u64) year) << 16 | \ + ((u64) month)) + +#define cid_rev_card(card) \ + cid_rev(card->cid.hwrev, \ + card->cid.fwrev, \ + card->cid.year, \ + card->cid.month) + +/* + * Unconditionally quirk add/remove. + */ + +static inline void __maybe_unused add_quirk(struct mmc_card *card, int data) +{ + card->quirks |= data; +} + +static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) +{ + card->quirks &= ~data; +} #define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) #define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) #define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO) +#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT) +#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) +#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) +#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) +#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) +#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS) +#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED) + +#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) +#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) +#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) +#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) +#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) +#define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS) +#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS) +#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED) +#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED) + +/* + * Quirk add/remove for MMC products. + */ + +static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data) +{ + if (mmc_card_mmc(card)) + card->quirks |= data; +} + +static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card, + int data) +{ + if (mmc_card_mmc(card)) + card->quirks &= ~data; +} + +/* + * Quirk add/remove for SD products. + */ + +static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data) +{ + if (mmc_card_sd(card)) + card->quirks |= data; +} + +static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card, + int data) +{ + if (mmc_card_sd(card)) + card->quirks &= ~data; +} + +static inline int mmc_card_lenient_fn0(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_LENIENT_FN0; +} + +static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; +} + +static inline int mmc_card_disable_cd(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_DISABLE_CD; +} + +static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF; +} + +static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512; +} + +static inline int mmc_card_long_read_time(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_LONG_READ_TIME; +} + +static inline int mmc_card_broken_irq_polling(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING; +} + +static inline int mmc_card_broken_hpi(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BROKEN_HPI; +} + +#define mmc_card_name(c) ((c)->cid.prod_name) +#define mmc_card_id(c) (dev_name(&(c)->dev)) + +#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev) + +/* + * MMC device driver (e.g., Flash card, I/O card...) + */ +struct mmc_driver { + struct device_driver drv; + int (*probe)(struct mmc_card *); + void (*remove)(struct mmc_card *); + void (*shutdown)(struct mmc_card *); +}; + +extern int mmc_register_driver(struct mmc_driver *); +extern void mmc_unregister_driver(struct mmc_driver *); + +extern void mmc_fixup_device(struct mmc_card *card, + const struct mmc_fixup *table); + #endif /* LINUX_MMC_CARD_H */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 71101d1ec8..2b953eb8ce 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -1,28 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mmc/core.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef LINUX_MMC_CORE_H #define LINUX_MMC_CORE_H +#include #include -#include +struct request; struct mmc_data; struct mmc_request; -enum mmc_blk_status { - MMC_BLK_SUCCESS = 0, - MMC_BLK_PARTIAL, - MMC_BLK_CMD_ERR, - MMC_BLK_RETRY, - MMC_BLK_ABORT, - MMC_BLK_DATA_ERR, - MMC_BLK_ECC_ERR, - MMC_BLK_NOMEDIUM, - MMC_BLK_NEW_REQUEST, -}; - struct mmc_command { u32 opcode; u32 arg; @@ -107,6 +99,9 @@ struct mmc_command { */ unsigned int busy_timeout; /* busy detect timeout in ms */ + /* Set this flag only for blocking sanitize request */ + bool sanitize_busy; + struct mmc_data *data; /* data segment associated with cmd */ struct mmc_request *mrq; /* associated request */ }; @@ -116,18 +111,11 @@ struct mmc_data { unsigned int timeout_clks; /* data timeout (in clocks) */ unsigned int blksz; /* data block size */ unsigned int blocks; /* number of blocks */ - unsigned int blk_addr; /* block address */ int error; /* data error */ unsigned int flags; -#define MMC_DATA_WRITE BIT(8) -#define MMC_DATA_READ BIT(9) -/* Extra flags used by CQE */ -#define MMC_DATA_QBR BIT(10) /* CQE queue barrier*/ -#define MMC_DATA_PRIO BIT(11) /* CQE high priority */ -#define MMC_DATA_REL_WR BIT(12) /* Reliable write */ -#define MMC_DATA_DAT_TAG BIT(13) /* Tag request */ -#define MMC_DATA_FORCED_PRG BIT(14) /* Forced programming */ +#define MMC_DATA_WRITE (1 << 8) +#define MMC_DATA_READ (1 << 9) unsigned int bytes_xfered; @@ -150,33 +138,86 @@ struct mmc_request { struct completion completion; struct completion cmd_completion; void (*done)(struct mmc_request *);/* completion function */ - /* - * Notify uppers layers (e.g. mmc block driver) that recovery is needed - * due to an error associated with the mmc_request. Currently used only - * by CQE. - */ - void (*recovery_notifier)(struct mmc_request *); struct mmc_host *host; /* Allow other commands during this ongoing data transfer or busy wait */ bool cap_cmd_during_tfr; - - int tag; - -#ifdef CONFIG_MMC_CRYPTO - const struct bio_crypt_ctx *crypto_ctx; - int crypto_key_slot; -#endif }; struct mmc_card; +struct mmc_async_req; -void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq); -int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, - int retries); +extern int mmc_stop_bkops(struct mmc_card *); +extern int mmc_read_bkops_status(struct mmc_card *); +extern struct mmc_async_req *mmc_start_req(struct mmc_host *, + struct mmc_async_req *, int *); +extern int mmc_interrupt_hpi(struct mmc_card *); +extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); +extern void mmc_wait_for_req_done(struct mmc_host *host, + struct mmc_request *mrq); +extern bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq); +extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); +extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); +extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, + struct mmc_command *, int); +extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); +extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); +extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); +extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); -int mmc_hw_reset(struct mmc_host *host); -int mmc_sw_reset(struct mmc_host *host); -void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card); +#define MMC_ERASE_ARG 0x00000000 +#define MMC_SECURE_ERASE_ARG 0x80000000 +#define MMC_TRIM_ARG 0x00000001 +#define MMC_DISCARD_ARG 0x00000003 +#define MMC_SECURE_TRIM1_ARG 0x80000001 +#define MMC_SECURE_TRIM2_ARG 0x80008000 + +#define MMC_SECURE_ARGS 0x80000000 +#define MMC_TRIM_ARGS 0x00008001 + +extern int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, + unsigned int arg); +extern int mmc_can_erase(struct mmc_card *card); +extern int mmc_can_trim(struct mmc_card *card); +extern int mmc_can_discard(struct mmc_card *card); +extern int mmc_can_sanitize(struct mmc_card *card); +extern int mmc_can_secure_erase_trim(struct mmc_card *card); +extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, + unsigned int nr); +extern unsigned int mmc_calc_max_discard(struct mmc_card *card); + +extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen); +extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, + bool is_rel_write); +extern int mmc_hw_reset(struct mmc_host *host); +extern int mmc_can_reset(struct mmc_card *card); + +extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *); +extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); + +extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); +extern void mmc_release_host(struct mmc_host *host); + +extern void mmc_get_card(struct mmc_card *card); +extern void mmc_put_card(struct mmc_card *card); + +extern int mmc_flush_cache(struct mmc_card *); + +extern int mmc_detect_card_removed(struct mmc_host *host); + +/** + * mmc_claim_host - exclusively claim a host + * @host: mmc host to claim + * + * Claim a host for a set of operations. + */ +static inline void mmc_claim_host(struct mmc_host *host) +{ + __mmc_claim_host(host, NULL); +} + +struct device_node; +extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max); +extern int mmc_of_parse_voltage(struct device_node *np, u32 *mask); #endif /* LINUX_MMC_CORE_H */ diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h new file mode 100644 index 0000000000..f5af2bd35e --- /dev/null +++ b/include/linux/mmc/dw_mmc.h @@ -0,0 +1,268 @@ +/* + * Synopsys DesignWare Multimedia Card Interface driver + * (Based on NXP driver for lpc 31xx) + * + * Copyright (C) 2009 NXP Semiconductors + * Copyright (C) 2009, 2010 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef LINUX_MMC_DW_MMC_H +#define LINUX_MMC_DW_MMC_H + +#include +#include +#include +#include + +#define MAX_MCI_SLOTS 2 + +enum dw_mci_state { + STATE_IDLE = 0, + STATE_SENDING_CMD, + STATE_SENDING_DATA, + STATE_DATA_BUSY, + STATE_SENDING_STOP, + STATE_DATA_ERROR, + STATE_SENDING_CMD11, + STATE_WAITING_CMD11_DONE, +}; + +enum { + EVENT_CMD_COMPLETE = 0, + EVENT_XFER_COMPLETE, + EVENT_DATA_COMPLETE, + EVENT_DATA_ERROR, +}; + +struct mmc_data; + +enum { + TRANS_MODE_PIO = 0, + TRANS_MODE_IDMAC, + TRANS_MODE_EDMAC +}; + +struct dw_mci_dma_slave { + struct dma_chan *ch; + enum dma_transfer_direction direction; +}; + +/** + * struct dw_mci - MMC controller state shared between all slots + * @lock: Spinlock protecting the queue and associated data. + * @irq_lock: Spinlock protecting the INTMASK setting. + * @regs: Pointer to MMIO registers. + * @fifo_reg: Pointer to MMIO registers for data FIFO + * @sg: Scatterlist entry currently being processed by PIO code, if any. + * @sg_miter: PIO mapping scatterlist iterator. + * @cur_slot: The slot which is currently using the controller. + * @mrq: The request currently being processed on @cur_slot, + * or NULL if the controller is idle. + * @cmd: The command currently being sent to the card, or NULL. + * @data: The data currently being transferred, or NULL if no data + * transfer is in progress. + * @stop_abort: The command currently prepared for stoping transfer. + * @prev_blksz: The former transfer blksz record. + * @timing: Record of current ios timing. + * @use_dma: Whether DMA channel is initialized or not. + * @using_dma: Whether DMA is in use for the current transfer. + * @dma_64bit_address: Whether DMA supports 64-bit address mode or not. + * @sg_dma: Bus address of DMA buffer. + * @sg_cpu: Virtual address of DMA buffer. + * @dma_ops: Pointer to platform-specific DMA callbacks. + * @cmd_status: Snapshot of SR taken upon completion of the current + * @ring_size: Buffer size for idma descriptors. + * command. Only valid when EVENT_CMD_COMPLETE is pending. + * @dms: structure of slave-dma private data. + * @phy_regs: physical address of controller's register map + * @data_status: Snapshot of SR taken upon completion of the current + * data transfer. Only valid when EVENT_DATA_COMPLETE or + * EVENT_DATA_ERROR is pending. + * @stop_cmdr: Value to be loaded into CMDR when the stop command is + * to be sent. + * @dir_status: Direction of current transfer. + * @tasklet: Tasklet running the request state machine. + * @pending_events: Bitmask of events flagged by the interrupt handler + * to be processed by the tasklet. + * @completed_events: Bitmask of events which the state machine has + * processed. + * @state: Tasklet state. + * @queue: List of slots waiting for access to the controller. + * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus + * rate and timeout calculations. + * @current_speed: Configured rate of the controller. + * @num_slots: Number of slots available. + * @fifoth_val: The value of FIFOTH register. + * @verid: Denote Version ID. + * @dev: Device associated with the MMC controller. + * @pdata: Platform data associated with the MMC controller. + * @drv_data: Driver specific data for identified variant of the controller + * @priv: Implementation defined private data. + * @biu_clk: Pointer to bus interface unit clock instance. + * @ciu_clk: Pointer to card interface unit clock instance. + * @slot: Slots sharing this MMC controller. + * @fifo_depth: depth of FIFO. + * @data_shift: log2 of FIFO item size. + * @part_buf_start: Start index in part_buf. + * @part_buf_count: Bytes of partial data in part_buf. + * @part_buf: Simple buffer for partial fifo reads/writes. + * @push_data: Pointer to FIFO push function. + * @pull_data: Pointer to FIFO pull function. + * @vqmmc_enabled: Status of vqmmc, should be true or false. + * @irq_flags: The flags to be passed to request_irq. + * @irq: The irq value to be passed to request_irq. + * @sdio_id0: Number of slot0 in the SDIO interrupt registers. + * @cmd11_timer: Timer for SD3.0 voltage switch over scheme. + * @dto_timer: Timer for broken data transfer over scheme. + * + * Locking + * ======= + * + * @lock is a softirq-safe spinlock protecting @queue as well as + * @cur_slot, @mrq and @state. These must always be updated + * at the same time while holding @lock. + * + * @irq_lock is an irq-safe spinlock protecting the INTMASK register + * to allow the interrupt handler to modify it directly. Held for only long + * enough to read-modify-write INTMASK and no other locks are grabbed when + * holding this one. + * + * The @mrq field of struct dw_mci_slot is also protected by @lock, + * and must always be written at the same time as the slot is added to + * @queue. + * + * @pending_events and @completed_events are accessed using atomic bit + * operations, so they don't need any locking. + * + * None of the fields touched by the interrupt handler need any + * locking. However, ordering is important: Before EVENT_DATA_ERROR or + * EVENT_DATA_COMPLETE is set in @pending_events, all data-related + * interrupts must be disabled and @data_status updated with a + * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the + * CMDRDY interrupt must be disabled and @cmd_status updated with a + * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the + * bytes_xfered field of @data must be written. This is ensured by + * using barriers. + */ +struct dw_mci { + spinlock_t lock; + spinlock_t irq_lock; + void __iomem *regs; + void __iomem *fifo_reg; + + struct scatterlist *sg; + struct sg_mapping_iter sg_miter; + + struct dw_mci_slot *cur_slot; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + struct mmc_command stop_abort; + unsigned int prev_blksz; + unsigned char timing; + + /* DMA interface members*/ + int use_dma; + int using_dma; + int dma_64bit_address; + + dma_addr_t sg_dma; + void *sg_cpu; + const struct dw_mci_dma_ops *dma_ops; + /* For idmac */ + unsigned int ring_size; + + /* For edmac */ + struct dw_mci_dma_slave *dms; + /* Registers's physical base address */ + resource_size_t phy_regs; + + u32 cmd_status; + u32 data_status; + u32 stop_cmdr; + u32 dir_status; + struct tasklet_struct tasklet; + unsigned long pending_events; + unsigned long completed_events; + enum dw_mci_state state; + struct list_head queue; + + u32 bus_hz; + u32 current_speed; + u32 num_slots; + u32 fifoth_val; + u16 verid; + struct device *dev; + struct dw_mci_board *pdata; + const struct dw_mci_drv_data *drv_data; + void *priv; + struct clk *biu_clk; + struct clk *ciu_clk; + struct dw_mci_slot *slot[MAX_MCI_SLOTS]; + + /* FIFO push and pull */ + int fifo_depth; + int data_shift; + u8 part_buf_start; + u8 part_buf_count; + union { + u16 part_buf16; + u32 part_buf32; + u64 part_buf; + }; + void (*push_data)(struct dw_mci *host, void *buf, int cnt); + void (*pull_data)(struct dw_mci *host, void *buf, int cnt); + + bool vqmmc_enabled; + unsigned long irq_flags; /* IRQ flags */ + int irq; + + int sdio_id0; + + struct timer_list cmd11_timer; + struct timer_list dto_timer; +}; + +/* DMA ops for Internal/External DMAC interface */ +struct dw_mci_dma_ops { + /* DMA Ops */ + int (*init)(struct dw_mci *host); + int (*start)(struct dw_mci *host, unsigned int sg_len); + void (*complete)(void *host); + void (*stop)(struct dw_mci *host); + void (*cleanup)(struct dw_mci *host); + void (*exit)(struct dw_mci *host); +}; + +struct dma_pdata; + +/* Board platform data */ +struct dw_mci_board { + u32 num_slots; + + unsigned int bus_hz; /* Clock speed at the cclk_in pad */ + + u32 caps; /* Capabilities */ + u32 caps2; /* More capabilities */ + u32 pm_caps; /* PM capabilities */ + /* + * Override fifo depth. If 0, autodetect it from the FIFOTH register, + * but note that this may not be reliable after a bootloader has used + * it. + */ + unsigned int fifo_depth; + + /* delay in mS before detecting cards after interrupt */ + u32 detect_delay_ms; + + struct reset_control *rstc; + struct dw_mci_dma_ops *dma_ops; + struct dma_pdata *data; +}; + +#endif /* LINUX_MMC_DW_MMC_H */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0c0c9a0fdf..0b2439441c 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -1,26 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mmc/host.h * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Host driver specific definitions. */ #ifndef LINUX_MMC_HOST_H #define LINUX_MMC_HOST_H +#include +#include +#include #include #include #include #include #include +#include #include -#include -#include struct mmc_ios { unsigned int clock; /* clock rate */ unsigned short vdd; - unsigned int power_delay_ms; /* waiting for stable power */ /* vdd stores the bit number of the selected voltage range from below. */ @@ -61,8 +65,6 @@ struct mmc_ios { #define MMC_TIMING_MMC_DDR52 8 #define MMC_TIMING_MMC_HS200 9 #define MMC_TIMING_MMC_HS400 10 -#define MMC_TIMING_SD_EXP 11 -#define MMC_TIMING_SD_EXP_1_2V 12 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ @@ -80,19 +82,6 @@ struct mmc_ios { bool enhanced_strobe; /* hs400es selection */ }; -struct mmc_clk_phase { - bool valid; - u16 in_deg; - u16 out_deg; -}; - -#define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1) -struct mmc_clk_phase_map { - struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES]; -}; - -struct mmc_host; - struct mmc_host_ops { /* * It is optional for the host to implement pre_req and post_req in @@ -104,11 +93,9 @@ struct mmc_host_ops { */ void (*post_req)(struct mmc_host *host, struct mmc_request *req, int err); - void (*pre_req)(struct mmc_host *host, struct mmc_request *req); + void (*pre_req)(struct mmc_host *host, struct mmc_request *req, + bool is_first_req); void (*request)(struct mmc_host *host, struct mmc_request *req); - /* Submit one request to host in atomic context. */ - int (*request_atomic)(struct mmc_host *host, - struct mmc_request *req); /* * Avoid calling the next three functions too often or in a "fast @@ -145,15 +132,13 @@ struct mmc_host_ops { int (*get_cd)(struct mmc_host *host); void (*enable_sdio_irq)(struct mmc_host *host, int enable); - /* Mandatory callback when using MMC_CAP2_SDIO_IRQ_NOTHREAD. */ - void (*ack_sdio_irq)(struct mmc_host *host); /* optional callback for HC quirks */ void (*init_card)(struct mmc_host *host, struct mmc_card *card); int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios); - /* Check if the card is pulling dat[0] low */ + /* Check if the card is pulling dat[0:3] low */ int (*card_busy)(struct mmc_host *host); /* The tuning command opcode value is different for SD and eMMC cards */ @@ -161,23 +146,12 @@ struct mmc_host_ops { /* Prepare HS400 target operating frequency depending host driver */ int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios); - - /* Prepare switch to DDR during the HS400 init sequence */ - int (*hs400_prepare_ddr)(struct mmc_host *host); - - /* Prepare for switching from HS400 to HS200 */ - void (*hs400_downgrade)(struct mmc_host *host); - - /* Complete selection of HS400 */ - void (*hs400_complete)(struct mmc_host *host); - /* Prepare enhanced strobe depending host driver */ void (*hs400_enhanced_strobe)(struct mmc_host *host, struct mmc_ios *ios); int (*select_drive_strength)(struct mmc_card *card, unsigned int max_dtr, int host_drv, int card_drv, int *drv_type); - /* Reset the eMMC card via RST_n */ void (*hw_reset)(struct mmc_host *host); void (*card_event)(struct mmc_host *host); @@ -187,54 +161,10 @@ struct mmc_host_ops { */ int (*multi_io_quirk)(struct mmc_card *card, unsigned int direction, int blk_size); - - /* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */ - int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios); }; -struct mmc_cqe_ops { - /* Allocate resources, and make the CQE operational */ - int (*cqe_enable)(struct mmc_host *host, struct mmc_card *card); - /* Free resources, and make the CQE non-operational */ - void (*cqe_disable)(struct mmc_host *host); - /* - * Issue a read, write or DCMD request to the CQE. Also deal with the - * effect of ->cqe_off(). - */ - int (*cqe_request)(struct mmc_host *host, struct mmc_request *mrq); - /* Free resources (e.g. DMA mapping) associated with the request */ - void (*cqe_post_req)(struct mmc_host *host, struct mmc_request *mrq); - /* - * Prepare the CQE and host controller to accept non-CQ commands. There - * is no corresponding ->cqe_on(), instead ->cqe_request() is required - * to deal with that. - */ - void (*cqe_off)(struct mmc_host *host); - /* - * Wait for all CQE tasks to complete. Return an error if recovery - * becomes necessary. - */ - int (*cqe_wait_for_idle)(struct mmc_host *host); - /* - * Notify CQE that a request has timed out. Return false if the request - * completed or true if a timeout happened in which case indicate if - * recovery is needed. - */ - bool (*cqe_timeout)(struct mmc_host *host, struct mmc_request *mrq, - bool *recovery_needed); - /* - * Stop all CQE activity and prepare the CQE and host controller to - * accept recovery commands. - */ - void (*cqe_recovery_start)(struct mmc_host *host); - /* - * Clear the queue and call mmc_cqe_request_done() on all requests. - * Requests that errored will have the error set on the mmc_request - * (data->error or cmd->error for DCMD). Requests that did not error - * will have zero data bytes transferred. - */ - void (*cqe_recovery_finish)(struct mmc_host *host); -}; +struct mmc_card; +struct device; struct mmc_async_req { /* active mmc request */ @@ -243,7 +173,7 @@ struct mmc_async_req { * Check error status of completed mmc request. * Returns 0 if success otherwise non zero. */ - enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *); + int (*err_check) (struct mmc_card *, struct mmc_async_req *); }; /** @@ -259,7 +189,6 @@ struct mmc_async_req { */ struct mmc_slot { int cd_irq; - bool cd_wake_enabled; void *handler_priv; }; @@ -269,12 +198,14 @@ struct mmc_slot { * @is_new_req wake up reason was new request * @is_waiting_last_req mmc context waiting for single running request * @wait wait queue + * @lock lock to protect data fields */ struct mmc_context_info { bool is_done_rcv; bool is_new_req; bool is_waiting_last_req; wait_queue_head_t wait; + spinlock_t lock; }; struct regulator; @@ -285,10 +216,6 @@ struct mmc_supply { struct regulator *vqmmc; /* Optional Vccq supply */ }; -struct mmc_ctx { - struct task_struct *task; -}; - struct mmc_host { struct device *parent; struct device class_dev; @@ -302,7 +229,9 @@ struct mmc_host { u32 ocr_avail_sdio; /* SDIO-specific OCR */ u32 ocr_avail_sd; /* SD-specific OCR */ u32 ocr_avail_mmc; /* MMC-specific OCR */ - struct wakeup_source *ws; /* Enable consume of uevents */ +#ifdef CONFIG_PM_SLEEP + struct notifier_block pm_notify; +#endif u32 max_current_330; u32 max_current_300; u32 max_current_180; @@ -337,51 +266,45 @@ struct mmc_host { #define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */ #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ -#define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */ -#define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */ -#define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */ -#define MMC_CAP_DDR (MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | \ - MMC_CAP_1_2V_DDR) -#define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */ -#define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */ -#define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */ -#define MMC_CAP_UHS_SDR25 (1 << 17) /* Host supports UHS SDR25 mode */ -#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ -#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ -#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ -#define MMC_CAP_UHS (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | \ - MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \ - MMC_CAP_UHS_DDR50) -#define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */ -#define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */ +#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */ +#define MMC_CAP_1_8V_DDR (1 << 11) /* can support */ + /* DDR mode at 1.8V */ +#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */ + /* DDR mode at 1.2V */ +#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */ +#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */ +#define MMC_CAP_UHS_SDR12 (1 << 15) /* Host supports UHS SDR12 mode */ +#define MMC_CAP_UHS_SDR25 (1 << 16) /* Host supports UHS SDR25 mode */ +#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */ +#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */ +#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ -#define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */ -#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */ #define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */ #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ -#define MMC_CAP_HW_RESET (1 << 31) /* Reset the eMMC card via RST_n */ +#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */ u32 caps2; /* More host capabilities */ #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ -#define MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND (1 << 3) /* Can do full power cycle in suspend */ #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ MMC_CAP2_HS200_1_2V_SDR) -#define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */ -#define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */ +#define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */ #define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */ #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */ +#define MMC_CAP2_PACKED_RD (1 << 12) /* Allow packed read */ +#define MMC_CAP2_PACKED_WR (1 << 13) /* Allow packed write */ +#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \ + MMC_CAP2_PACKED_WR) #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ #define MMC_CAP2_HS400_1_8V (1 << 15) /* Can support HS400 1.8V */ #define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ MMC_CAP2_HS400_1_2V) -#define MMC_CAP2_HSX00_1_8V (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V) #define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V) #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) #define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */ @@ -389,18 +312,6 @@ struct mmc_host { #define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */ #define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */ #define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */ -#define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */ -#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */ -#define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */ -#define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */ -#ifdef CONFIG_MMC_CRYPTO -#define MMC_CAP2_CRYPTO (1 << 27) /* Host supports inline encryption */ -#else -#define MMC_CAP2_CRYPTO 0 -#endif -#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */ - - int fixed_drv_type; /* fixed driver type for non-removable media */ mmc_pm_flag_t pm_caps; /* supported pm features */ @@ -421,13 +332,14 @@ struct mmc_host { /* group bitfields together to minimize padding */ unsigned int use_spi_crc:1; unsigned int claimed:1; /* host exclusively claimed */ - unsigned int doing_init_tune:1; /* initial tuning in progress */ + unsigned int bus_dead:1; /* bus has been released */ +#ifdef CONFIG_MMC_DEBUG + unsigned int removed:1; /* host is being removed */ +#endif unsigned int can_retune:1; /* re-tuning can be used */ unsigned int doing_retune:1; /* re-tuning in progress */ unsigned int retune_now:1; /* do re-tuning at next req */ unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ - unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ - unsigned int can_dma_map_merge:1; /* merging can be used */ int rescan_disable; /* disable card detection */ int rescan_entered; /* used with nonremovable devices */ @@ -442,19 +354,18 @@ struct mmc_host { struct mmc_card *card; /* device attached to this host */ wait_queue_head_t wq; - struct mmc_ctx *claimer; /* context that has host claimed */ + struct task_struct *claimer; /* task that has host claimed */ int claim_cnt; /* "claim" nesting count */ - struct mmc_ctx default_ctx; /* default context */ struct delayed_work detect; int detect_change; /* card detect flag */ struct mmc_slot slot; const struct mmc_bus_ops *bus_ops; /* current bus driver */ + unsigned int bus_refs; /* reference counter */ unsigned int sdio_irqs; struct task_struct *sdio_irq_thread; - struct delayed_work sdio_irq_work; bool sdio_irq_pending; atomic_t sdio_irq_thread_abort; @@ -469,6 +380,9 @@ struct mmc_host { struct dentry *debugfs_root; + struct mmc_async_req *areq; /* active async req */ + struct mmc_context_info context_info; /* async synchronization info */ + /* Ongoing data transfer that allows commands during transfer */ struct mmc_request *ongoing_mrq; @@ -483,66 +397,33 @@ struct mmc_host { int dsr_req; /* DSR value is valid */ u32 dsr; /* optional driver stage (DSR) value */ - /* Command Queue Engine (CQE) support */ - const struct mmc_cqe_ops *cqe_ops; - void *cqe_private; - int cqe_qdepth; - bool cqe_enabled; - bool cqe_on; - - /* Inline encryption support */ -#ifdef CONFIG_MMC_CRYPTO - struct blk_keyslot_manager ksm; -#endif - - /* Host Software Queue support */ - bool hsq_enabled; - - unsigned long private[] ____cacheline_aligned; + unsigned long private[0] ____cacheline_aligned; }; -struct device_node; - struct mmc_host *mmc_alloc_host(int extra, struct device *); int mmc_add_host(struct mmc_host *); void mmc_remove_host(struct mmc_host *); void mmc_free_host(struct mmc_host *); -void mmc_of_parse_clk_phase(struct mmc_host *host, - struct mmc_clk_phase_map *map); int mmc_of_parse(struct mmc_host *host); -int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask); static inline void *mmc_priv(struct mmc_host *host) { return (void *)host->private; } -static inline struct mmc_host *mmc_from_priv(void *priv) -{ - return container_of(priv, struct mmc_host, private); -} - #define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) #define mmc_dev(x) ((x)->parent) #define mmc_classdev(x) (&(x)->class_dev) #define mmc_hostname(x) (dev_name(&(x)->class_dev)) +int mmc_power_save_host(struct mmc_host *host); +int mmc_power_restore_host(struct mmc_host *host); + void mmc_detect_change(struct mmc_host *, unsigned long delay); void mmc_request_done(struct mmc_host *, struct mmc_request *); void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq); -void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq); - -/* - * May be called from host driver's system/runtime suspend/resume callbacks, - * to know if SDIO IRQs has been claimed. - */ -static inline bool sdio_irq_claimed(struct mmc_host *host) -{ - return host->sdio_irqs > 0; -} - static inline void mmc_signal_sdio_irq(struct mmc_host *host) { host->ops->enable_sdio_irq(host, 0); @@ -551,14 +432,20 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host) wake_up_process(host->sdio_irq_thread); } -void sdio_signal_irq(struct mmc_host *host); +void sdio_run_irqs(struct mmc_host *host); #ifdef CONFIG_REGULATOR +int mmc_regulator_get_ocrmask(struct regulator *supply); int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit); int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); #else +static inline int mmc_regulator_get_ocrmask(struct regulator *supply) +{ + return 0; +} + static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit) @@ -590,20 +477,61 @@ static inline int mmc_card_wake_sdio_irq(struct mmc_host *host) return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ; } -/* TODO: Move to private header */ +static inline int mmc_host_cmd23(struct mmc_host *host) +{ + return host->caps & MMC_CAP_CMD23; +} + +static inline int mmc_boot_partition_access(struct mmc_host *host) +{ + return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC); +} + +static inline int mmc_host_uhs(struct mmc_host *host) +{ + return host->caps & + (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | + MMC_CAP_UHS_DDR50); +} + +static inline int mmc_host_packed_wr(struct mmc_host *host) +{ + return host->caps2 & MMC_CAP2_PACKED_WR; +} + static inline int mmc_card_hs(struct mmc_card *card) { return card->host->ios.timing == MMC_TIMING_SD_HS || card->host->ios.timing == MMC_TIMING_MMC_HS; } -/* TODO: Move to private header */ static inline int mmc_card_uhs(struct mmc_card *card) { return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 && card->host->ios.timing <= MMC_TIMING_UHS_DDR50; } +static inline bool mmc_card_hs200(struct mmc_card *card) +{ + return card->host->ios.timing == MMC_TIMING_MMC_HS200; +} + +static inline bool mmc_card_ddr52(struct mmc_card *card) +{ + return card->host->ios.timing == MMC_TIMING_MMC_DDR52; +} + +static inline bool mmc_card_hs400(struct mmc_card *card) +{ + return card->host->ios.timing == MMC_TIMING_MMC_HS400; +} + +static inline bool mmc_card_hs400es(struct mmc_card *card) +{ + return card->host->ios.enhanced_strobe; +} + void mmc_retune_timer_stop(struct mmc_host *host); static inline void mmc_retune_needed(struct mmc_host *host) @@ -612,27 +540,13 @@ static inline void mmc_retune_needed(struct mmc_host *host) host->need_retune = 1; } -static inline bool mmc_can_retune(struct mmc_host *host) +static inline void mmc_retune_recheck(struct mmc_host *host) { - return host->can_retune == 1; + if (host->hold_retune <= 1) + host->retune_now = 1; } -static inline bool mmc_doing_retune(struct mmc_host *host) -{ - return host->doing_retune == 1; -} - -static inline bool mmc_doing_tune(struct mmc_host *host) -{ - return host->doing_retune == 1 || host->doing_init_tune == 1; -} - -static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data) -{ - return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE; -} - -int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); -int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode); +void mmc_retune_pause(struct mmc_host *host); +void mmc_retune_unpause(struct mmc_host *host); #endif /* LINUX_MMC_HOST_H */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index d9a65c6a88..c376209c70 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -24,8 +24,6 @@ #ifndef LINUX_MMC_MMC_H #define LINUX_MMC_MMC_H -#include - /* Standard MMC commands (4.1) type argument response */ /* class 1 */ #define MMC_GO_IDLE_STATE 0 /* bc */ @@ -86,13 +84,6 @@ #define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ #define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ - /* class 11 */ -#define MMC_QUE_TASK_PARAMS 44 /* ac [20:16] task id R1 */ -#define MMC_QUE_TASK_ADDR 45 /* ac [31:0] data addr R1 */ -#define MMC_EXECUTE_READ_TASK 46 /* adtc [20:16] task id R1 */ -#define MMC_EXECUTE_WRITE_TASK 47 /* adtc [20:16] task id R1 */ -#define MMC_CMDQ_TASK_MGMT 48 /* ac [20:16] task id R1b */ - static inline bool mmc_op_multi(u32 opcode) { return opcode == MMC_WRITE_MULTIPLE_BLOCK || @@ -144,7 +135,7 @@ static inline bool mmc_op_multi(u32 opcode) #define R1_WP_ERASE_SKIP (1 << 15) /* sx, c */ #define R1_CARD_ECC_DISABLED (1 << 14) /* sx, a */ #define R1_ERASE_RESET (1 << 13) /* sr, c */ -#define R1_STATUS(x) (x & 0xFFF9A000) +#define R1_STATUS(x) (x & 0xFFFFE000) #define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ #define R1_READY_FOR_DATA (1 << 8) /* sx, a */ #define R1_SWITCH_ERROR (1 << 7) /* sx, c */ @@ -161,16 +152,6 @@ static inline bool mmc_op_multi(u32 opcode) #define R1_STATE_PRG 7 #define R1_STATE_DIS 8 -static inline bool mmc_ready_for_data(u32 status) -{ - /* - * Some cards mishandle the status bits, so make sure to check both the - * busy indication and the card state. - */ - return status & R1_READY_FOR_DATA && - R1_CURRENT_STATE(status) == R1_STATE_TRAN; -} - /* * MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS * R1 is the low order byte; R2 is the next highest byte, when present. @@ -194,6 +175,50 @@ static inline bool mmc_ready_for_data(u32 status) #define R2_SPI_OUT_OF_RANGE (1 << 15) /* or CSD overwrite */ #define R2_SPI_CSD_OVERWRITE R2_SPI_OUT_OF_RANGE +/* These are unpacked versions of the actual responses */ + +struct _mmc_csd { + u8 csd_structure; + u8 spec_vers; + u8 taac; + u8 nsac; + u8 tran_speed; + u16 ccc; + u8 read_bl_len; + u8 read_bl_partial; + u8 write_blk_misalign; + u8 read_blk_misalign; + u8 dsr_imp; + u16 c_size; + u8 vdd_r_curr_min; + u8 vdd_r_curr_max; + u8 vdd_w_curr_min; + u8 vdd_w_curr_max; + u8 c_size_mult; + union { + struct { /* MMC system specification version 3.1 */ + u8 erase_grp_size; + u8 erase_grp_mult; + } v31; + struct { /* MMC system specification version 2.2 */ + u8 sector_size; + u8 erase_grp_size; + } v22; + } erase; + u8 wp_grp_size; + u8 wp_grp_enable; + u8 default_ecc; + u8 r2w_factor; + u8 write_bl_len; + u8 write_bl_partial; + u8 file_format_grp; + u8 copy; + u8 perm_write_protect; + u8 tmp_write_protect; + u8 file_format; + u8 ecc; +}; + /* * OCR bits are mostly in host.h */ @@ -247,7 +272,6 @@ static inline bool mmc_ready_for_data(u32 status) * EXT_CSD fields */ -#define EXT_CSD_CMDQ_MODE_EN 15 /* R/W */ #define EXT_CSD_FLUSH_CACHE 32 /* W */ #define EXT_CSD_CACHE_CTRL 33 /* R/W */ #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ @@ -307,11 +331,6 @@ static inline bool mmc_ready_for_data(u32 status) #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ #define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */ -#define EXT_CSD_PRE_EOL_INFO 267 /* RO */ -#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A 268 /* RO */ -#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B 269 /* RO */ -#define EXT_CSD_CMDQ_DEPTH 307 /* RO */ -#define EXT_CSD_CMDQ_SUPPORT 308 /* RO */ #define EXT_CSD_SUPPORTED_MODE 493 /* RO */ #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ @@ -325,7 +344,6 @@ static inline bool mmc_ready_for_data(u32 status) */ #define EXT_CSD_WR_REL_PARAM_EN (1<<2) -#define EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR (1<<4) #define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40) #define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10) @@ -418,35 +436,16 @@ static inline bool mmc_ready_for_data(u32 status) * BKOPS modes */ #define EXT_CSD_MANUAL_BKOPS_MASK 0x01 -#define EXT_CSD_AUTO_BKOPS_MASK 0x02 - -/* - * Command Queue - */ -#define EXT_CSD_CMDQ_MODE_ENABLED BIT(0) -#define EXT_CSD_CMDQ_DEPTH_MASK GENMASK(4, 0) -#define EXT_CSD_CMDQ_SUPPORTED BIT(0) /* * MMC_SWITCH access modes */ + #define MMC_SWITCH_MODE_CMD_SET 0x00 /* Change the command set */ #define MMC_SWITCH_MODE_SET_BITS 0x01 /* Set bits which are 1 in value */ #define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */ #define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */ -/* - * Erase/trim/discard - */ -#define MMC_ERASE_ARG 0x00000000 -#define MMC_SECURE_ERASE_ARG 0x80000000 -#define MMC_TRIM_ARG 0x00000001 -#define MMC_DISCARD_ARG 0x00000003 -#define MMC_SECURE_TRIM1_ARG 0x80000001 -#define MMC_SECURE_TRIM2_ARG 0x80008000 -#define MMC_SECURE_ARGS 0x80000000 -#define MMC_TRIM_ARGS 0x00008001 - #define mmc_driver_type_mask(n) (1 << (n)) #endif /* LINUX_MMC_MMC_H */ diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h index 3549f80457..4a139204c2 100644 --- a/include/linux/mmc/pm.h +++ b/include/linux/mmc/pm.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mmc/pm.h * * Author: Nicolas Pitre * Copyright: (C) 2009 Marvell Technology Group Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef LINUX_MMC_PM_H diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h index 6727576a87..1ebcf9ba12 100644 --- a/include/linux/mmc/sd.h +++ b/include/linux/mmc/sd.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mmc/sd.h * * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. */ #ifndef LINUX_MMC_SD_H @@ -29,10 +33,6 @@ #define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ #define SD_APP_SEND_SCR 51 /* adtc R1 */ - /* class 11 */ -#define SD_READ_EXTR_SINGLE 48 /* adtc [31:0] R1 */ -#define SD_WRITE_EXTR_SINGLE 49 /* adtc [31:0] R1 */ - /* OCR bit definitions */ #define SD_OCR_S18R (1 << 24) /* 1.8V switching request */ #define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */ @@ -91,10 +91,4 @@ #define SD_SWITCH_ACCESS_DEF 0 #define SD_SWITCH_ACCESS_HS 1 -/* - * Erase/discard - */ -#define SD_ERASE_ARG 0x00000000 -#define SD_DISCARD_ARG 0x00000001 - #endif /* LINUX_MMC_SD_H */ diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h index 1d42872d22..fda15b6d41 100644 --- a/include/linux/mmc/sdhci-pci-data.h +++ b/include/linux/mmc/sdhci-pci-data.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_MMC_SDHCI_PCI_DATA_H #define LINUX_MMC_SDHCI_PCI_DATA_H @@ -15,4 +14,7 @@ struct sdhci_pci_data { extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno); + +extern int sdhci_pci_spt_drive_strength; + #endif diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index e28769991e..17446d3c36 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mmc/sdio.h * * Copyright 2006-2007 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. */ #ifndef LINUX_MMC_SDIO_H diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 478855b8e4..aab032a6ae 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mmc/sdio_func.h * * Copyright 2007-2008 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. */ #ifndef LINUX_MMC_SDIO_FUNC_H @@ -25,7 +29,7 @@ struct sdio_func_tuple { struct sdio_func_tuple *next; unsigned char code; unsigned char size; - unsigned char data[]; + unsigned char data[0]; }; /* @@ -49,10 +53,8 @@ struct sdio_func { unsigned int state; /* function state */ #define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */ - u8 *tmpbuf; /* DMA:able scratch buffer */ + u8 tmpbuf[4]; /* DMA:able scratch buffer */ - u8 major_rev; /* major revision number */ - u8 minor_rev; /* minor revision number */ unsigned num_info; /* number of info strings */ const char **info; /* info strings */ @@ -109,18 +111,6 @@ struct sdio_driver { extern int sdio_register_driver(struct sdio_driver *); extern void sdio_unregister_driver(struct sdio_driver *); -/** - * module_sdio_driver() - Helper macro for registering a SDIO driver - * @__sdio_driver: sdio_driver struct - * - * Helper macro for SDIO drivers which do not do anything special in module - * init/exit. This eliminates a lot of boilerplate. Each module may only - * use this macro once, and calling it replaces module_init() and module_exit() - */ -#define module_sdio_driver(__sdio_driver) \ - module_driver(__sdio_driver, sdio_register_driver, \ - sdio_unregister_driver) - /* * SDIO I/O operations */ @@ -169,10 +159,4 @@ extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b, extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func); extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags); -extern void sdio_retune_crc_disable(struct sdio_func *func); -extern void sdio_retune_crc_enable(struct sdio_func *func); - -extern void sdio_retune_hold_now(struct sdio_func *func); -extern void sdio_retune_release(struct sdio_func *func); - #endif /* LINUX_MMC_SDIO_FUNC_H */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index a85c9f0bd4..d43ef96bf0 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * SDIO Classes, Interface Types, Manufacturer IDs, etc. */ @@ -24,9 +23,21 @@ /* * Vendors and devices. Sort key: vendor first, device next. */ - -#define SDIO_VENDOR_ID_STE 0x0020 -#define SDIO_DEVICE_ID_STE_CW1200 0x2280 +#define SDIO_VENDOR_ID_BROADCOM 0x02d0 +#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887 +#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 +#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 +#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 +#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 +#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c +#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d +#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 +#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339 +#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 +#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 +#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 +#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 +#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 #define SDIO_VENDOR_ID_INTEL 0x0089 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 @@ -36,97 +47,17 @@ #define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407 -#define SDIO_VENDOR_ID_CGUYS 0x0092 -#define SDIO_DEVICE_ID_CGUYS_EW_CG1102GC 0x0004 - -#define SDIO_VENDOR_ID_TI 0x0097 -#define SDIO_DEVICE_ID_TI_WL1271 0x4076 - -#define SDIO_VENDOR_ID_ATHEROS 0x0271 -#define SDIO_DEVICE_ID_ATHEROS_AR6003_00 0x0300 -#define SDIO_DEVICE_ID_ATHEROS_AR6003_01 0x0301 -#define SDIO_DEVICE_ID_ATHEROS_AR6004_00 0x0400 -#define SDIO_DEVICE_ID_ATHEROS_AR6004_01 0x0401 -#define SDIO_DEVICE_ID_ATHEROS_AR6004_02 0x0402 -#define SDIO_DEVICE_ID_ATHEROS_AR6004_18 0x0418 -#define SDIO_DEVICE_ID_ATHEROS_AR6004_19 0x0419 -#define SDIO_DEVICE_ID_ATHEROS_AR6005 0x050A -#define SDIO_DEVICE_ID_ATHEROS_QCA9377 0x0701 - -#define SDIO_VENDOR_ID_BROADCOM 0x02d0 -#define SDIO_DEVICE_ID_BROADCOM_NINTENDO_WII 0x044b -#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 -#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 -#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 -#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 -#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 -#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339 -#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 -#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 -#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359 0x4355 -#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 -#define SDIO_DEVICE_ID_BROADCOM_4359 0x4359 -#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373 0x4373 -#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012 0xa804 -#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887 -#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c -#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d -#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 -#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 -#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 -#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf -#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8 - #define SDIO_VENDOR_ID_MARVELL 0x02df #define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 -#define SDIO_DEVICE_ID_MARVELL_8688_WLAN 0x9104 -#define SDIO_DEVICE_ID_MARVELL_8688_BT 0x9105 -#define SDIO_DEVICE_ID_MARVELL_8786_WLAN 0x9116 -#define SDIO_DEVICE_ID_MARVELL_8787_WLAN 0x9119 -#define SDIO_DEVICE_ID_MARVELL_8787_BT 0x911a -#define SDIO_DEVICE_ID_MARVELL_8787_BT_AMP 0x911b -#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128 -#define SDIO_DEVICE_ID_MARVELL_8797_WLAN 0x9129 -#define SDIO_DEVICE_ID_MARVELL_8797_BT 0x912a -#define SDIO_DEVICE_ID_MARVELL_8897_WLAN 0x912d -#define SDIO_DEVICE_ID_MARVELL_8897_BT 0x912e -#define SDIO_DEVICE_ID_MARVELL_8887_F0 0x9134 -#define SDIO_DEVICE_ID_MARVELL_8887_WLAN 0x9135 -#define SDIO_DEVICE_ID_MARVELL_8887_BT 0x9136 -#define SDIO_DEVICE_ID_MARVELL_8801_WLAN 0x9139 -#define SDIO_DEVICE_ID_MARVELL_8997_F0 0x9140 -#define SDIO_DEVICE_ID_MARVELL_8997_WLAN 0x9141 -#define SDIO_DEVICE_ID_MARVELL_8997_BT 0x9142 -#define SDIO_DEVICE_ID_MARVELL_8977_WLAN 0x9145 -#define SDIO_DEVICE_ID_MARVELL_8977_BT 0x9146 -#define SDIO_DEVICE_ID_MARVELL_8987_WLAN 0x9149 -#define SDIO_DEVICE_ID_MARVELL_8987_BT 0x914a - -#define SDIO_VENDOR_ID_MEDIATEK 0x037a -#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663 -#define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668 - -#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296 -#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347 +#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104 +#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105 #define SDIO_VENDOR_ID_SIANO 0x039a #define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 #define SDIO_DEVICE_ID_SIANO_NICE 0x0202 #define SDIO_DEVICE_ID_SIANO_VEGA_A0 0x0300 #define SDIO_DEVICE_ID_SIANO_VENICE 0x0301 -#define SDIO_DEVICE_ID_SIANO_MING 0x0302 -#define SDIO_DEVICE_ID_SIANO_PELE 0x0500 -#define SDIO_DEVICE_ID_SIANO_RIO 0x0600 -#define SDIO_DEVICE_ID_SIANO_DENVER_2160 0x0700 -#define SDIO_DEVICE_ID_SIANO_DENVER_1530 0x0800 #define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100 #define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347 -#define SDIO_VENDOR_ID_RSI 0x041b -#define SDIO_DEVICE_ID_RSI_9113 0x9330 -#define SDIO_DEVICE_ID_RSI_9116 0x9116 - -#define SDIO_VENDOR_ID_TI_WL1251 0x104c -#define SDIO_DEVICE_ID_TI_WL1251 0x9066 - #endif /* LINUX_MMC_SDIO_IDS_H */ diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h index e25533b95d..ccd8fb2cad 100644 --- a/include/linux/mmc/sh_mmcif.h +++ b/include/linux/mmc/sh_mmcif.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/mmc/sh_mmcif.h * * platform data for eMMC driver * * Copyright (C) 2010 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * */ #ifndef LINUX_MMC_SH_MMCIF_H @@ -28,8 +32,13 @@ */ struct sh_mmcif_plat_data { + int (*get_cd)(struct platform_device *pdef); unsigned int slave_id_tx; /* embedded slave_id_[tr]x */ unsigned int slave_id_rx; + bool use_cd_gpio : 1; + bool ccs_unsupported : 1; + bool clk_ctrl2_present : 1; + unsigned int cd_gpio; u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */ unsigned long caps; u32 ocr; diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 4ae2f2908f..3945a8c9d3 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -1,30 +1,33 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic GPIO card-detect helper header * * Copyright (C) 2011, Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef MMC_SLOT_GPIO_H #define MMC_SLOT_GPIO_H -#include -#include - struct mmc_host; int mmc_gpio_get_ro(struct mmc_host *host); +int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio); + int mmc_gpio_get_cd(struct mmc_host *host); +int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio, + unsigned int debounce); + int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, - unsigned int debounce); + unsigned int debounce, bool *gpio_invert); int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, - unsigned int idx, unsigned int debounce); + unsigned int idx, bool override_active_level, + unsigned int debounce, bool *gpio_invert); void mmc_gpio_set_cd_isr(struct mmc_host *host, irqreturn_t (*isr)(int irq, void *dev_id)); -int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on); void mmc_gpiod_request_cd_irq(struct mmc_host *host); -bool mmc_can_gpio_cd(struct mmc_host *host); -bool mmc_can_gpio_ro(struct mmc_host *host); #endif diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 1935d4c72d..451a811f48 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_MM_DEBUG_H #define LINUX_MM_DEBUG_H 1 @@ -9,7 +8,8 @@ struct page; struct vm_area_struct; struct mm_struct; -void dump_page(struct page *page, const char *reason); +extern void dump_page(struct page *page, const char *reason); +extern void __dump_page(struct page *page, const char *reason); void dump_vma(const struct vm_area_struct *vma); void dump_mm(const struct mm_struct *mm); @@ -36,22 +36,10 @@ void dump_mm(const struct mm_struct *mm); BUG(); \ } \ } while (0) -#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \ - static bool __section(".data.once") __warned; \ - int __ret_warn_once = !!(cond); \ - \ - if (unlikely(__ret_warn_once && !__warned)) { \ - dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\ - __warned = true; \ - WARN_ON(1); \ - } \ - unlikely(__ret_warn_once); \ -}) - -#define VM_WARN_ON(cond) (void)WARN_ON(cond) -#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) -#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format) -#define VM_WARN(cond, format...) (void)WARN(cond, format) +#define VM_WARN_ON(cond) WARN_ON(cond) +#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) +#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) +#define VM_WARN(cond, format...) WARN(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) @@ -59,7 +47,6 @@ void dump_mm(const struct mm_struct *mm); #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) -#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) #endif diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h index 8823684989..85cd5ce089 100644 --- a/include/linux/mmiotrace.h +++ b/include/linux/mmiotrace.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMIOTRACE_H #define _LINUX_MMIOTRACE_H @@ -47,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); /* Called from ioremap.c */ extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, void __iomem *addr); -extern void mmiotrace_iounmap(volatile void __iomem *addr); +extern void mmiotrace_iounmap(const volatile void __iomem *addr); /* For anyone to insert markers. Remember trailing newline. */ extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...); @@ -67,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset, { } -static inline void mmiotrace_iounmap(volatile void __iomem *addr) +static inline void mmiotrace_iounmap(const volatile void __iomem *addr) { } diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index b9b970f7ab..a444178450 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -1,31 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMU_CONTEXT_H #define _LINUX_MMU_CONTEXT_H #include -#include + +struct mm_struct; + +void use_mm(struct mm_struct *mm); +void unuse_mm(struct mm_struct *mm); /* Architectures that care about IRQ state in switch_mm can override this. */ #ifndef switch_mm_irqs_off # define switch_mm_irqs_off switch_mm #endif -#ifndef leave_mm -static inline void leave_mm(int cpu) { } -#endif - -/* - * CPUs that are capable of running user task @p. Must contain at least one - * active CPU. It is assumed that the kernel can run on all CPUs, so calling - * this for a kernel thread is pointless. - * - * By default, we assume a sane, homogeneous system. - */ -#ifndef task_cpu_possible_mask -# define task_cpu_possible_mask(p) cpu_possible_mask -# define task_cpu_possible(cpu, p) true -#else -# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p)) -#endif - #endif diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 45fc2c81e3..a1a210d599 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -1,66 +1,29 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMU_NOTIFIER_H #define _LINUX_MMU_NOTIFIER_H #include #include #include -#include #include -#include -struct mmu_notifier_subscriptions; struct mmu_notifier; -struct mmu_notifier_range; -struct mmu_interval_notifier; +struct mmu_notifier_ops; -/** - * enum mmu_notifier_event - reason for the mmu notifier callback - * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that - * move the range - * - * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like - * madvise() or replacing a page by another one, ...). - * - * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range - * ie using the vma access permission (vm_page_prot) to update the whole range - * is enough no need to inspect changes to the CPU page table (mprotect() - * syscall) - * - * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for - * pages in the range so to mirror those changes the user must inspect the CPU - * page table (from the end callback). - * - * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same - * access flags). User should soft dirty the page in the end callback to make - * sure that anyone relying on soft dirtiness catch pages that might be written - * through non CPU mappings. - * - * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal - * that the mm refcount is zero and the range is no longer accessible. - * - * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal - * a device driver to possibly ignore the invalidation if the - * owner field matches the driver's device private pgmap owner. - * - * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no - * longer have exclusive access to the page. When sent during creation of an - * exclusive range the owner will be initialised to the value provided by the - * caller of make_device_exclusive_range(), otherwise the owner will be NULL. +#ifdef CONFIG_MMU_NOTIFIER + +/* + * The mmu notifier_mm structure is allocated and installed in + * mm->mmu_notifier_mm inside the mm_take_all_locks() protected + * critical section and it's released only when mm_count reaches zero + * in mmdrop(). */ -enum mmu_notifier_event { - MMU_NOTIFY_UNMAP = 0, - MMU_NOTIFY_CLEAR, - MMU_NOTIFY_PROTECTION_VMA, - MMU_NOTIFY_PROTECTION_PAGE, - MMU_NOTIFY_SOFT_DIRTY, - MMU_NOTIFY_RELEASE, - MMU_NOTIFY_MIGRATE, - MMU_NOTIFY_EXCLUSIVE, +struct mmu_notifier_mm { + /* all mmu notifiers registerd in this mm are queued in this list */ + struct hlist_head list; + /* to serialize the list modifications and hlist_unhashed */ + spinlock_t lock; }; -#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0) - struct mmu_notifier_ops { /* * Called either by mmu_notifier_unregister or when the mm is @@ -85,7 +48,7 @@ struct mmu_notifier_ops { * through the gart alias address, so leading to memory * corruption. */ - void (*release)(struct mmu_notifier *subscription, + void (*release)(struct mmu_notifier *mn, struct mm_struct *mm); /* @@ -97,7 +60,7 @@ struct mmu_notifier_ops { * Start-end is necessary in case the secondary MMU is mapping the page * at a smaller granularity than the primary MMU. */ - int (*clear_flush_young)(struct mmu_notifier *subscription, + int (*clear_flush_young)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end); @@ -107,7 +70,7 @@ struct mmu_notifier_ops { * latter, it is supposed to test-and-clear the young/accessed bitflag * in the secondary pte, but it may omit flushing the secondary tlb. */ - int (*clear_young)(struct mmu_notifier *subscription, + int (*clear_young)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end); @@ -118,7 +81,7 @@ struct mmu_notifier_ops { * frequently used without actually clearing the flag or tearing * down the secondary mapping on the page. */ - int (*test_young)(struct mmu_notifier *subscription, + int (*test_young)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address); @@ -126,14 +89,25 @@ struct mmu_notifier_ops { * change_pte is called in cases that pte mapping to page is changed: * for example, when ksm remaps pte to point to a new shared page. */ - void (*change_pte)(struct mmu_notifier *subscription, + void (*change_pte)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, pte_t pte); + /* + * Before this is invoked any secondary MMU is still ok to + * read/write to the page previously pointed to by the Linux + * pte because the page hasn't been freed yet and it won't be + * freed until this returns. If required set_page_dirty has to + * be called internally to this method. + */ + void (*invalidate_page)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + /* * invalidate_range_start() and invalidate_range_end() must be - * paired and are called only when the mmap_lock and/or the + * paired and are called only when the mmap_sem and/or the * locks protecting the reverse maps are held. If the subsystem * can't guarantee that no additional references are taken to * the pages in the range, it has to implement the @@ -167,24 +141,19 @@ struct mmu_notifier_ops { * decrease the refcount. If the refcount is decreased on * invalidate_range_start() then the VM can free pages as page * table entries are removed. If the refcount is only - * dropped on invalidate_range_end() then the driver itself + * droppped on invalidate_range_end() then the driver itself * will drop the last refcount but it must take care to flush * any secondary tlb before doing the final free on the * page. Pages will no longer be referenced by the linux * address space but may still be referenced by sptes until * the last refcount is dropped. - * - * If blockable argument is set to false then the callback cannot - * sleep and has to return with -EAGAIN if sleeping would be required. - * 0 should be returned otherwise. Please note that notifiers that can - * fail invalidate_range_start are not allowed to implement - * invalidate_range_end, as there is no mechanism for informing the - * notifier that its start failed. */ - int (*invalidate_range_start)(struct mmu_notifier *subscription, - const struct mmu_notifier_range *range); - void (*invalidate_range_end)(struct mmu_notifier *subscription, - const struct mmu_notifier_range *range); + void (*invalidate_range_start)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); + void (*invalidate_range_end)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); /* * invalidate_range() is either called between @@ -196,194 +165,50 @@ struct mmu_notifier_ops { * If invalidate_range() is used to manage a non-CPU TLB with * shared page-tables, it not necessary to implement the * invalidate_range_start()/end() notifiers, as - * invalidate_range() already catches the points in time when an - * external TLB range needs to be flushed. For more in depth - * discussion on this see Documentation/vm/mmu_notifier.rst + * invalidate_range() alread catches the points in time when an + * external TLB range needs to be flushed. + * + * The invalidate_range() function is called under the ptl + * spin-lock and not allowed to sleep. * * Note that this function might be called with just a sub-range * of what was passed to invalidate_range_start()/end(), if * called between those functions. */ - void (*invalidate_range)(struct mmu_notifier *subscription, - struct mm_struct *mm, - unsigned long start, - unsigned long end); - - /* - * These callbacks are used with the get/put interface to manage the - * lifetime of the mmu_notifier memory. alloc_notifier() returns a new - * notifier for use with the mm. - * - * free_notifier() is only called after the mmu_notifier has been - * fully put, calls to any ops callback are prevented and no ops - * callbacks are currently running. It is called from a SRCU callback - * and cannot sleep. - */ - struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm); - void (*free_notifier)(struct mmu_notifier *subscription); + void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, + unsigned long start, unsigned long end); }; /* - * The notifier chains are protected by mmap_lock and/or the reverse map + * The notifier chains are protected by mmap_sem and/or the reverse map * semaphores. Notifier chains are only changed when all reverse maps and - * the mmap_lock locks are taken. + * the mmap_sem locks are taken. * * Therefore notifier chains can only be traversed when either * - * 1. mmap_lock is held. + * 1. mmap_sem is held. * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem). * 3. No other concurrent thread can access the list (release) */ struct mmu_notifier { struct hlist_node hlist; const struct mmu_notifier_ops *ops; - struct mm_struct *mm; - struct rcu_head rcu; - unsigned int users; -}; - -/** - * struct mmu_interval_notifier_ops - * @invalidate: Upon return the caller must stop using any SPTEs within this - * range. This function can sleep. Return false only if sleeping - * was required but mmu_notifier_range_blockable(range) is false. - */ -struct mmu_interval_notifier_ops { - bool (*invalidate)(struct mmu_interval_notifier *interval_sub, - const struct mmu_notifier_range *range, - unsigned long cur_seq); -}; - -struct mmu_interval_notifier { - struct interval_tree_node interval_tree; - const struct mmu_interval_notifier_ops *ops; - struct mm_struct *mm; - struct hlist_node deferred_item; - unsigned long invalidate_seq; -}; - -#ifdef CONFIG_MMU_NOTIFIER - -#ifdef CONFIG_LOCKDEP -extern struct lockdep_map __mmu_notifier_invalidate_range_start_map; -#endif - -struct mmu_notifier_range { - struct vm_area_struct *vma; - struct mm_struct *mm; - unsigned long start; - unsigned long end; - unsigned flags; - enum mmu_notifier_event event; - void *owner; }; static inline int mm_has_notifiers(struct mm_struct *mm) { - return unlikely(mm->notifier_subscriptions); + return unlikely(mm->mmu_notifier_mm); } -struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops, - struct mm_struct *mm); -static inline struct mmu_notifier * -mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm) -{ - struct mmu_notifier *ret; - - mmap_write_lock(mm); - ret = mmu_notifier_get_locked(ops, mm); - mmap_write_unlock(mm); - return ret; -} -void mmu_notifier_put(struct mmu_notifier *subscription); -void mmu_notifier_synchronize(void); - -extern int mmu_notifier_register(struct mmu_notifier *subscription, +extern int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm); -extern int __mmu_notifier_register(struct mmu_notifier *subscription, +extern int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm); -extern void mmu_notifier_unregister(struct mmu_notifier *subscription, +extern void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm); - -unsigned long -mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub); -int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub, - struct mm_struct *mm, unsigned long start, - unsigned long length, - const struct mmu_interval_notifier_ops *ops); -int mmu_interval_notifier_insert_locked( - struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, - unsigned long start, unsigned long length, - const struct mmu_interval_notifier_ops *ops); -void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub); - -/** - * mmu_interval_set_seq - Save the invalidation sequence - * @interval_sub - The subscription passed to invalidate - * @cur_seq - The cur_seq passed to the invalidate() callback - * - * This must be called unconditionally from the invalidate callback of a - * struct mmu_interval_notifier_ops under the same lock that is used to call - * mmu_interval_read_retry(). It updates the sequence number for later use by - * mmu_interval_read_retry(). The provided cur_seq will always be odd. - * - * If the caller does not call mmu_interval_read_begin() or - * mmu_interval_read_retry() then this call is not required. - */ -static inline void -mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub, - unsigned long cur_seq) -{ - WRITE_ONCE(interval_sub->invalidate_seq, cur_seq); -} - -/** - * mmu_interval_read_retry - End a read side critical section against a VA range - * interval_sub: The subscription - * seq: The return of the paired mmu_interval_read_begin() - * - * This MUST be called under a user provided lock that is also held - * unconditionally by op->invalidate() when it calls mmu_interval_set_seq(). - * - * Each call should be paired with a single mmu_interval_read_begin() and - * should be used to conclude the read side. - * - * Returns true if an invalidation collided with this critical section, and - * the caller should retry. - */ -static inline bool -mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub, - unsigned long seq) -{ - return interval_sub->invalidate_seq != seq; -} - -/** - * mmu_interval_check_retry - Test if a collision has occurred - * interval_sub: The subscription - * seq: The return of the matching mmu_interval_read_begin() - * - * This can be used in the critical section between mmu_interval_read_begin() - * and mmu_interval_read_retry(). A return of true indicates an invalidation - * has collided with this critical region and a future - * mmu_interval_read_retry() will return true. - * - * False is not reliable and only suggests a collision may not have - * occurred. It can be called many times and does not have to hold the user - * provided lock. - * - * This call can be used as part of loops and other expensive operations to - * expedite a retry. - */ -static inline bool -mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub, - unsigned long seq) -{ - /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */ - return READ_ONCE(interval_sub->invalidate_seq) != seq; -} - -extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm); +extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); extern void __mmu_notifier_release(struct mm_struct *mm); extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, unsigned long start, @@ -395,19 +220,14 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address); extern void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte); -extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r); -extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r, - bool only_end); +extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end); extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end); -extern bool -mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range); - -static inline bool -mmu_notifier_range_blockable(const struct mmu_notifier_range *range) -{ - return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE); -} static inline void mmu_notifier_release(struct mm_struct *mm) { @@ -448,48 +268,25 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, __mmu_notifier_change_pte(mm, address, pte); } -static inline void -mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) { - might_sleep(); - - lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); - if (mm_has_notifiers(range->mm)) { - range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE; - __mmu_notifier_invalidate_range_start(range); - } - lock_map_release(&__mmu_notifier_invalidate_range_start_map); + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_page(mm, address); } -static inline int -mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) { - int ret = 0; - - lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); - if (mm_has_notifiers(range->mm)) { - range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE; - ret = __mmu_notifier_invalidate_range_start(range); - } - lock_map_release(&__mmu_notifier_invalidate_range_start_map); - return ret; + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_start(mm, start, end); } -static inline void -mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) { - if (mmu_notifier_range_blockable(range)) - might_sleep(); - - if (mm_has_notifiers(range->mm)) - __mmu_notifier_invalidate_range_end(range, false); -} - -static inline void -mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range) -{ - if (mm_has_notifiers(range->mm)) - __mmu_notifier_invalidate_range_end(range, true); + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_end(mm, start, end); } static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, @@ -499,42 +296,15 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, __mmu_notifier_invalidate_range(mm, start, end); } -static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm) +static inline void mmu_notifier_mm_init(struct mm_struct *mm) { - mm->notifier_subscriptions = NULL; + mm->mmu_notifier_mm = NULL; } -static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm) +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) { if (mm_has_notifiers(mm)) - __mmu_notifier_subscriptions_destroy(mm); -} - - -static inline void mmu_notifier_range_init(struct mmu_notifier_range *range, - enum mmu_notifier_event event, - unsigned flags, - struct vm_area_struct *vma, - struct mm_struct *mm, - unsigned long start, - unsigned long end) -{ - range->vma = vma; - range->event = event; - range->mm = mm; - range->start = start; - range->end = end; - range->flags = flags; -} - -static inline void mmu_notifier_range_init_owner( - struct mmu_notifier_range *range, - enum mmu_notifier_event event, unsigned int flags, - struct vm_area_struct *vma, struct mm_struct *mm, - unsigned long start, unsigned long end, void *owner) -{ - mmu_notifier_range_init(range, event, flags, vma, mm, start, end); - range->owner = owner; + __mmu_notifier_mm_destroy(mm); } #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ @@ -611,17 +381,16 @@ static inline void mmu_notifier_range_init_owner( ___pmd; \ }) -#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \ +#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \ ({ \ - unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \ - struct mm_struct *___mm = (__vma)->vm_mm; \ - pud_t ___pud; \ + unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ + pmd_t ___pmd; \ \ - ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \ - mmu_notifier_invalidate_range(___mm, ___haddr, \ - ___haddr + HPAGE_PUD_SIZE); \ + ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \ + mmu_notifier_invalidate_range(__mm, ___haddr, \ + ___haddr + HPAGE_PMD_SIZE); \ \ - ___pud; \ + ___pmd; \ }) /* @@ -644,38 +413,12 @@ static inline void mmu_notifier_range_init_owner( set_pte_at(___mm, ___address, __ptep, ___pte); \ }) +extern void mmu_notifier_call_srcu(struct rcu_head *rcu, + void (*func)(struct rcu_head *rcu)); +extern void mmu_notifier_synchronize(void); + #else /* CONFIG_MMU_NOTIFIER */ -struct mmu_notifier_range { - unsigned long start; - unsigned long end; -}; - -static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range, - unsigned long start, - unsigned long end) -{ - range->start = start; - range->end = end; -} - -#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \ - _mmu_notifier_range_init(range, start, end) -#define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \ - end, owner) \ - _mmu_notifier_range_init(range, start, end) - -static inline bool -mmu_notifier_range_blockable(const struct mmu_notifier_range *range) -{ - return true; -} - -static inline int mm_has_notifiers(struct mm_struct *mm) -{ - return 0; -} - static inline void mmu_notifier_release(struct mm_struct *mm) { } @@ -698,24 +441,18 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, { } -static inline void -mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) { } -static inline int -mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) -{ - return 0; -} - -static inline -void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) { } -static inline void -mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range) +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) { } @@ -724,29 +461,23 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, { } -static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm) +static inline void mmu_notifier_mm_init(struct mm_struct *mm) { } -static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm) +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) { } -#define mmu_notifier_range_update_to_read_only(r) false - #define ptep_clear_flush_young_notify ptep_clear_flush_young #define pmdp_clear_flush_young_notify pmdp_clear_flush_young #define ptep_clear_young_notify ptep_test_and_clear_young #define pmdp_clear_young_notify pmdp_test_and_clear_young #define ptep_clear_flush_notify ptep_clear_flush #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush -#define pudp_huge_clear_flush_notify pudp_huge_clear_flush +#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear #define set_pte_at_notify set_pte_at -static inline void mmu_notifier_synchronize(void) -{ -} - #endif /* CONFIG_MMU_NOTIFIER */ #endif /* _LINUX_MMU_NOTIFIER_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6a1d79d846..bd5c3aefdf 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMZONE_H #define _LINUX_MMZONE_H @@ -18,9 +17,6 @@ #include #include #include -#include -#include -#include #include /* Free memory management - zoned buddy allocator. */ @@ -39,7 +35,7 @@ */ #define PAGE_ALLOC_COSTLY_ORDER 3 -enum migratetype { +enum { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RECLAIMABLE, @@ -56,7 +52,7 @@ enum migratetype { * pageblocks to MIGRATE_CMA which can be done by * __free_pageblock_cma() function. What is important though * is that a range of pageblocks must be aligned to - * MAX_ORDER_NR_PAGES should biggest page be bigger than + * MAX_ORDER_NR_PAGES should biggest page be bigger then * a single pageblock. */ MIGRATE_CMA, @@ -68,7 +64,7 @@ enum migratetype { }; /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ -extern const char * const migratetype_names[MIGRATE_TYPES]; +extern char * const migratetype_names[MIGRATE_TYPES]; #ifdef CONFIG_CMA # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) @@ -78,43 +74,29 @@ extern const char * const migratetype_names[MIGRATE_TYPES]; # define is_migrate_cma_page(_page) false #endif -static inline bool is_migrate_movable(int mt) -{ - return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; -} - #define for_each_migratetype_order(order, type) \ for (order = 0; order < MAX_ORDER; order++) \ for (type = 0; type < MIGRATE_TYPES; type++) extern int page_group_by_mobility_disabled; -#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) +#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) +#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) #define get_pageblock_migratetype(page) \ - get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) + get_pfnblock_flags_mask(page, page_to_pfn(page), \ + PB_migrate_end, MIGRATETYPE_MASK) struct free_area { struct list_head free_list[MIGRATE_TYPES]; unsigned long nr_free; }; -static inline struct page *get_page_from_free_area(struct free_area *area, - int migratetype) -{ - return list_first_entry_or_null(&area->free_list[migratetype], - struct page, lru); -} - -static inline bool free_area_empty(struct free_area *area, int migratetype) -{ - return list_empty(&area->free_list[migratetype]); -} - struct pglist_data; /* - * Add a wild amount of padding here to ensure data fall into separate + * zone->lock and the zone lru_lock are two of the hottest locks in the kernel. + * So add a wild amount of padding here to ensure that they fall into separate * cachelines. There are very few zone structures in the machine, so space * consumption is not a concern here. */ @@ -127,20 +109,6 @@ struct zone_padding { #define ZONE_PADDING(name) #endif -#ifdef CONFIG_NUMA -enum numa_stat_item { - NUMA_HIT, /* allocated in intended node */ - NUMA_MISS, /* allocated in non intended node */ - NUMA_FOREIGN, /* was intended here, hit elsewhere */ - NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ - NUMA_LOCAL, /* allocation from local node */ - NUMA_OTHER, /* allocation from other node */ - NR_VM_NUMA_EVENT_ITEMS -}; -#else -#define NR_VM_NUMA_EVENT_ITEMS 0 -#endif - enum zone_stat_item { /* First 128 byte cacheline (assuming 64 bit words) */ NR_FREE_PAGES, @@ -152,10 +120,22 @@ enum zone_stat_item { NR_ZONE_UNEVICTABLE, NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ + NR_SLAB_RECLAIMABLE, + NR_SLAB_UNRECLAIMABLE, + NR_PAGETABLE, /* used for pagetables */ + NR_KERNEL_STACK_KB, /* measured in KiB */ /* Second 128 byte cacheline */ NR_BOUNCE, #if IS_ENABLED(CONFIG_ZSMALLOC) NR_ZSPAGES, /* allocated in zsmalloc */ +#endif +#ifdef CONFIG_NUMA + NUMA_HIT, /* allocated in intended node */ + NUMA_MISS, /* allocated in non intended node */ + NUMA_FOREIGN, /* was intended here, hit elsewhere */ + NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ + NUMA_LOCAL, /* allocation from local node */ + NUMA_OTHER, /* allocation from other node */ #endif NR_FREE_CMA_PAGES, NR_VM_ZONE_STAT_ITEMS }; @@ -167,20 +147,11 @@ enum node_stat_item { NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ - NR_SLAB_RECLAIMABLE_B, - NR_SLAB_UNRECLAIMABLE_B, NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ - WORKINGSET_NODES, - WORKINGSET_REFAULT_BASE, - WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, - WORKINGSET_REFAULT_FILE, - WORKINGSET_ACTIVATE_BASE, - WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, - WORKINGSET_ACTIVATE_FILE, - WORKINGSET_RESTORE_BASE, - WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, - WORKINGSET_RESTORE_FILE, + NR_PAGES_SCANNED, /* pages scanned since last reclaim */ + WORKINGSET_REFAULT, + WORKINGSET_ACTIVATE, WORKINGSET_NODERECLAIM, NR_ANON_MAPPED, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. @@ -192,64 +163,15 @@ enum node_stat_item { NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ NR_SHMEM_THPS, NR_SHMEM_PMDMAPPED, - NR_FILE_THPS, - NR_FILE_PMDMAPPED, NR_ANON_THPS, + NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_VMSCAN_WRITE, NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ NR_DIRTIED, /* page dirtyings since bootup */ NR_WRITTEN, /* page writings since bootup */ - NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ - NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ - NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ - NR_KERNEL_STACK_KB, /* measured in KiB */ -#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) - NR_KERNEL_SCS_KB, /* measured in KiB */ -#endif - NR_PAGETABLE, /* used for pagetables */ -#ifdef CONFIG_SWAP - NR_SWAPCACHE, -#endif NR_VM_NODE_STAT_ITEMS }; -/* - * Returns true if the item should be printed in THPs (/proc/vmstat - * currently prints number of anon, file and shmem THPs. But the item - * is charged in pages). - */ -static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) -{ - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) - return false; - - return item == NR_ANON_THPS || - item == NR_FILE_THPS || - item == NR_SHMEM_THPS || - item == NR_SHMEM_PMDMAPPED || - item == NR_FILE_PMDMAPPED; -} - -/* - * Returns true if the value is measured in bytes (most vmstat values are - * measured in pages). This defines the API part, the internal representation - * might be different. - */ -static __always_inline bool vmstat_item_in_bytes(int idx) -{ - /* - * Global and per-node slab counters track slab pages. - * It's expected that changes are multiples of PAGE_SIZE. - * Internally values are stored in pages. - * - * Per-memcg and per-lruvec counters track memory, consumed - * by individual slab objects. These counters are actually - * byte-precise. - */ - return (idx == NR_SLAB_RECLAIMABLE_B || - idx == NR_SLAB_UNRECLAIMABLE_B); -} - /* * We do arithmetic on the LRU lists in various places in the code, * so it is important to keep the active lists LRU_ACTIVE higher in @@ -276,47 +198,47 @@ enum lru_list { #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) -static inline bool is_file_lru(enum lru_list lru) +static inline int is_file_lru(enum lru_list lru) { return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); } -static inline bool is_active_lru(enum lru_list lru) +static inline int is_active_lru(enum lru_list lru) { return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); } -#define ANON_AND_FILE 2 - -enum lruvec_flags { - LRUVEC_CONGESTED, /* lruvec has many dirty pages - * backed by a congested BDI - */ +struct zone_reclaim_stat { + /* + * The pageout code in vmscan.c keeps track of how many of the + * mem/swap backed and file backed pages are referenced. + * The higher the rotated/scanned ratio, the more valuable + * that cache is. + * + * The anon LRU stats live in [0], file LRU stats in [1] + */ + unsigned long recent_rotated[2]; + unsigned long recent_scanned[2]; }; struct lruvec { struct list_head lists[NR_LRU_LISTS]; - /* per lruvec lru_lock for memcg */ - spinlock_t lru_lock; - /* - * These track the cost of reclaiming one LRU - file or anon - - * over the other. As the observed cost of reclaiming one LRU - * increases, the reclaim scan balance tips toward the other. - */ - unsigned long anon_cost; - unsigned long file_cost; - /* Non-resident age, driven by LRU movement */ - atomic_long_t nonresident_age; - /* Refaults at the time of last reclaim cycle */ - unsigned long refaults[ANON_AND_FILE]; - /* Various lruvec state flags (enum lruvec_flags) */ - unsigned long flags; + struct zone_reclaim_stat reclaim_stat; + /* Evictions & activations on the inactive file list */ + atomic_long_t inactive_age; #ifdef CONFIG_MEMCG struct pglist_data *pgdat; #endif }; -/* Isolate unmapped pages */ +/* Mask used at gathering information at once (see memcontrol.c) */ +#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) +#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) +#define LRU_ALL ((1 << NR_LRU_LISTS) - 1) + +/* Isolate clean file */ +#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) +/* Isolate unmapped file */ #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) /* Isolate for asynchronous migration */ #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) @@ -324,7 +246,7 @@ struct lruvec { #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) /* LRU Isolation modes. */ -typedef unsigned __bitwise isolate_mode_t; +typedef unsigned __bitwise__ isolate_mode_t; enum zone_watermarks { WMARK_MIN, @@ -333,55 +255,27 @@ enum zone_watermarks { NR_WMARK }; -/* - * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional - * for pageblock size for THP if configured. - */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define NR_PCP_THP 1 -#else -#define NR_PCP_THP 0 -#endif -#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP)) +#define min_wmark_pages(z) (z->watermark[WMARK_MIN]) +#define low_wmark_pages(z) (z->watermark[WMARK_LOW]) +#define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) -/* - * Shift to encode migratetype and order in the same integer, with order - * in the least significant bits. - */ -#define NR_PCP_ORDER_WIDTH 8 -#define NR_PCP_ORDER_MASK ((1<_watermark[WMARK_MIN] + z->watermark_boost) -#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) -#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) -#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) - -/* Fields and list protected by pagesets local_lock in page_alloc.c */ struct per_cpu_pages { int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ int batch; /* chunk size for buddy add/remove */ - short free_factor; /* batch scaling factor during free */ -#ifdef CONFIG_NUMA - short expire; /* When 0, remote pagesets are drained */ -#endif /* Lists of pages, one per migrate type stored on the pcp-lists */ - struct list_head lists[NR_PCP_LISTS]; + struct list_head lists[MIGRATE_PCPTYPES]; }; -struct per_cpu_zonestat { -#ifdef CONFIG_SMP - s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; - s8 stat_threshold; -#endif +struct per_cpu_pageset { + struct per_cpu_pages pcp; #ifdef CONFIG_NUMA - /* - * Low priority inaccurate counters that are only folded - * on demand. Use a large type to avoid the overhead of - * folding during refresh_cpu_vm_stats. - */ - unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; + s8 expire; +#endif +#ifdef CONFIG_SMP + s8 stat_threshold; + s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; #endif }; @@ -393,20 +287,33 @@ struct per_cpu_nodestat { #endif /* !__GENERATING_BOUNDS.H */ enum zone_type { - /* - * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able - * to DMA to all of the addressable memory (ZONE_NORMAL). - * On architectures where this area covers the whole 32 bit address - * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller - * DMA addressing constraints. This distinction is important as a 32bit - * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit - * platforms may need both zones as they support peripherals with - * different DMA addressing limitations. - */ #ifdef CONFIG_ZONE_DMA + /* + * ZONE_DMA is used when there are devices that are not able + * to do DMA to all of addressable memory (ZONE_NORMAL). Then we + * carve out the portion of memory that is needed for these devices. + * The range is arch specific. + * + * Some examples + * + * Architecture Limit + * --------------------------- + * parisc, ia64, sparc <4G + * s390 <2G + * arm Various + * alpha Unlimited or 0-16MB. + * + * i386, x86_64 and multiple other arches + * <16M. + */ ZONE_DMA, #endif #ifdef CONFIG_ZONE_DMA32 + /* + * x86_64 needs two ZONE_DMAs because it supports devices that are + * only able to do DMA to the lower 16M but also 32 bit devices that + * can only do DMA areas below 4G. + */ ZONE_DMA32, #endif /* @@ -426,55 +333,6 @@ enum zone_type { */ ZONE_HIGHMEM, #endif - /* - * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains - * movable pages with few exceptional cases described below. Main use - * cases for ZONE_MOVABLE are to make memory offlining/unplug more - * likely to succeed, and to locally limit unmovable allocations - e.g., - * to increase the number of THP/huge pages. Notable special cases are: - * - * 1. Pinned pages: (long-term) pinning of movable pages might - * essentially turn such pages unmovable. Therefore, we do not allow - * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and - * faulted, they come from the right zone right away. However, it is - * still possible that address space already has pages in - * ZONE_MOVABLE at the time when pages are pinned (i.e. user has - * touches that memory before pinning). In such case we migrate them - * to a different zone. When migration fails - pinning fails. - * 2. memblock allocations: kernelcore/movablecore setups might create - * situations where ZONE_MOVABLE contains unmovable allocations - * after boot. Memory offlining and allocations fail early. - * 3. Memory holes: kernelcore/movablecore setups might create very rare - * situations where ZONE_MOVABLE contains memory holes after boot, - * for example, if we have sections that are only partially - * populated. Memory offlining and allocations fail early. - * 4. PG_hwpoison pages: while poisoned pages can be skipped during - * memory offlining, such pages cannot be allocated. - * 5. Unmovable PG_offline pages: in paravirtualized environments, - * hotplugged memory blocks might only partially be managed by the - * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The - * parts not manged by the buddy are unmovable PG_offline pages. In - * some cases (virtio-mem), such pages can be skipped during - * memory offlining, however, cannot be moved/allocated. These - * techniques might use alloc_contig_range() to hide previously - * exposed pages from the buddy again (e.g., to implement some sort - * of memory unplug in virtio-mem). - * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create - * situations where ZERO_PAGE(0) which is allocated differently - * on different platforms may end up in a movable zone. ZERO_PAGE(0) - * cannot be migrated. - * 7. Memory-hotplug: when using memmap_on_memory and onlining the - * memory to the MOVABLE zone, the vmemmap pages are also placed in - * such zone. Such pages cannot be really moved around as they are - * self-stored in the range, but they are treated as movable when - * the range they describe is about to be offlined. - * - * In general, no unmovable allocations that degrade memory offlining - * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) - * have to expect that migrating pages in ZONE_MOVABLE can fail (even - * if has_unmovable_pages() states that there are no unmovable pages, - * there can be false negatives). - */ ZONE_MOVABLE, #ifdef CONFIG_ZONE_DEVICE ZONE_DEVICE, @@ -485,14 +343,11 @@ enum zone_type { #ifndef __GENERATING_BOUNDS_H -#define ASYNC_AND_SYNC 2 - struct zone { /* Read-mostly fields */ /* zone watermarks, access with *_wmark_pages(zone) macros */ - unsigned long _watermark[NR_WMARK]; - unsigned long watermark_boost; + unsigned long watermark[NR_WMARK]; unsigned long nr_reserved_highatomic; @@ -511,14 +366,7 @@ struct zone { int node; #endif struct pglist_data *zone_pgdat; - struct per_cpu_pages __percpu *per_cpu_pageset; - struct per_cpu_zonestat __percpu *per_cpu_zonestats; - /* - * the high and batch values are copied to individual pagesets for - * faster access - */ - int pageset_high; - int pageset_batch; + struct per_cpu_pageset __percpu *pageset; #ifndef CONFIG_SPARSEMEM /* @@ -540,18 +388,11 @@ struct zone { * is calculated as: * present_pages = spanned_pages - absent_pages(pages in holes); * - * present_early_pages is present pages existing within the zone - * located on memory available since early boot, excluding hotplugged - * memory. - * * managed_pages is present pages managed by the buddy system, which * is calculated as (reserved_pages includes pages allocated by the * bootmem allocator): * managed_pages = present_pages - reserved_pages; * - * cma pages is present pages that are assigned for CMA use - * (MIGRATE_CMA). - * * So present_pages may be used by memory hotplug or memory power * management logic to figure out unmanaged pages by checking * (present_pages - managed_pages). And managed_pages should be used @@ -572,16 +413,16 @@ struct zone { * Write access to present_pages at runtime should be protected by * mem_hotplug_begin/end(). Any reader who can't tolerant drift of * present_pages should get_online_mems() to get a stable value. + * + * Read access to managed_pages should be safe because it's unsigned + * long. Write access to zone->managed_pages and totalram_pages are + * protected by managed_page_count_lock at runtime. Idealy only + * adjust_managed_page_count() should be used instead of directly + * touching zone->managed_pages and totalram_pages. */ - atomic_long_t managed_pages; + unsigned long managed_pages; unsigned long spanned_pages; unsigned long present_pages; -#if defined(CONFIG_MEMORY_HOTPLUG) - unsigned long present_early_pages; -#endif -#ifdef CONFIG_CMA - unsigned long cma_pages; -#endif const char *name; @@ -626,10 +467,8 @@ struct zone { #if defined CONFIG_COMPACTION || defined CONFIG_CMA /* pfn where compaction free scanner should start */ unsigned long compact_cached_free_pfn; - /* pfn where compaction migration scanner should start */ - unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC]; - unsigned long compact_init_migrate_pfn; - unsigned long compact_init_free_pfn; + /* pfn where async and sync compaction migration scanner should start */ + unsigned long compact_cached_migrate_pfn[2]; #endif #ifdef CONFIG_COMPACTION @@ -637,7 +476,6 @@ struct zone { * On compaction failure, 1<managed_pages); -} - -static inline unsigned long zone_cma_pages(struct zone *zone) -{ -#ifdef CONFIG_CMA - return zone->cma_pages; -#else - return 0; -#endif -} - static inline unsigned long zone_end_pfn(const struct zone *zone) { return zone->zone_start_pfn + zone->spanned_pages; @@ -709,22 +528,6 @@ static inline bool zone_is_empty(struct zone *zone) return zone->spanned_pages == 0; } -/* - * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty - * intersection with the given zone - */ -static inline bool zone_intersects(struct zone *zone, - unsigned long start_pfn, unsigned long nr_pages) -{ - if (zone_is_empty(zone)) - return false; - if (start_pfn >= zone_end_pfn(zone) || - start_pfn + nr_pages <= zone->zone_start_pfn) - return false; - - return true; -} - /* * The "priority" of VM scanning is how much of the queues we will scan in one * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the @@ -774,61 +577,44 @@ struct zonelist { struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; }; -/* - * The array of struct pages for flatmem. - * It must be declared for SPARSEMEM as well because there are configurations - * that rely on that. - */ +#ifndef CONFIG_DISCONTIGMEM +/* The array of struct pages - for discontigmem use pgdat->lmem_map */ extern struct page *mem_map; - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -struct deferred_split { - spinlock_t split_queue_lock; - struct list_head split_queue; - unsigned long split_queue_len; -}; #endif /* + * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM + * (mostly NUMA machines?) to denote a higher-level memory zone than the + * zone denotes. + * * On NUMA machines, each NUMA node would have a pg_data_t to describe - * it's memory layout. On UMA machines there is a single pglist_data which - * describes the whole memory. + * it's memory layout. * * Memory statistics and page replacement data structures are maintained on a * per-zone basis. */ +struct bootmem_data; typedef struct pglist_data { - /* - * node_zones contains just the zones for THIS node. Not all of the - * zones may be populated, but it is the full list. It is referenced by - * this node's node_zonelists as well as other node's node_zonelists. - */ struct zone node_zones[MAX_NR_ZONES]; - - /* - * node_zonelists contains references to all zones in all nodes. - * Generally the first zones will be references to this node's - * node_zones. - */ struct zonelist node_zonelists[MAX_ZONELISTS]; - - int nr_zones; /* number of populated zones in this node */ -#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */ + int nr_zones; +#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ struct page *node_mem_map; #ifdef CONFIG_PAGE_EXTENSION struct page_ext *node_page_ext; #endif #endif -#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) +#ifndef CONFIG_NO_BOOTMEM + struct bootmem_data *bdata; +#endif +#ifdef CONFIG_MEMORY_HOTPLUG /* - * Must be held any time you expect node_start_pfn, - * node_present_pages, node_spanned_pages or nr_zones to stay constant. - * Also synchronizes pgdat->first_deferred_pfn during deferred page - * init. + * Must be held any time you expect node_start_pfn, node_present_pages + * or node_spanned_pages stay constant. Holding this will also + * guarantee that any pfn_valid() stays that way. * * pgdat_resize_lock() and pgdat_resize_unlock() are provided to - * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG - * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. + * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG. * * Nests above zone->lock and zone->span_seqlock */ @@ -844,16 +630,23 @@ typedef struct pglist_data { struct task_struct *kswapd; /* Protected by mem_hotplug_begin/end() */ int kswapd_order; - enum zone_type kswapd_highest_zoneidx; - - int kswapd_failures; /* Number of 'reclaimed == 0' runs */ + enum zone_type kswapd_classzone_idx; #ifdef CONFIG_COMPACTION int kcompactd_max_order; - enum zone_type kcompactd_highest_zoneidx; + enum zone_type kcompactd_classzone_idx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; - bool proactive_compact_trigger; +#endif +#ifdef CONFIG_NUMA_BALANCING + /* Lock serializing the migrate rate limiting window */ + spinlock_t numabalancing_migrate_lock; + + /* Rate limiting time interval */ + unsigned long numabalancing_migrate_next_window; + + /* Number of pages migrated during the rate limiting time interval */ + unsigned long numabalancing_migrate_nr_pages; #endif /* * This is a per-node reserve of pages that are not available @@ -863,7 +656,7 @@ typedef struct pglist_data { #ifdef CONFIG_NUMA /* - * node reclaim becomes active if more unmapped pages exist. + * zone reclaim becomes active if more unmapped pages exist. */ unsigned long min_unmapped_pages; unsigned long min_slab_pages; @@ -871,6 +664,7 @@ typedef struct pglist_data { /* Write-intensive fields used by page reclaim */ ZONE_PADDING(_pad1_) + spinlock_t lru_lock; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* @@ -881,17 +675,19 @@ typedef struct pglist_data { #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE - struct deferred_split deferred_split_queue; + spinlock_t split_queue_lock; + struct list_head split_queue; + unsigned long split_queue_len; #endif /* Fields commonly accessed by the page reclaim scanner */ + struct lruvec lruvec; /* - * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED. - * - * Use mem_cgroup_lruvec() to look up lruvecs. + * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on + * this node's LRU. Maintained by the pageout code. */ - struct lruvec __lruvec; + unsigned int inactive_ratio; unsigned long flags; @@ -899,12 +695,12 @@ typedef struct pglist_data { /* Per-node vmstats */ struct per_cpu_nodestat __percpu *per_cpu_nodestats; - atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; + atomic_long_unchecked_t vm_stat[NR_VM_NODE_STAT_ITEMS]; } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) -#ifdef CONFIG_FLATMEM +#ifdef CONFIG_FLAT_NODE_MEM_MAP #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) #else #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) @@ -913,6 +709,15 @@ typedef struct pglist_data { #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) +static inline spinlock_t *zone_lru_lock(struct zone *zone) +{ + return &zone->zone_pgdat->lru_lock; +} + +static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) +{ + return &pgdat->lruvec; +} static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) { @@ -924,29 +729,43 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat) return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; } +static inline int zone_id(const struct zone *zone) +{ + struct pglist_data *pgdat = zone->zone_pgdat; + + return zone - pgdat->node_zones; +} + +#ifdef CONFIG_ZONE_DEVICE +static inline bool is_dev_zone(const struct zone *zone) +{ + return zone_id(zone) == ZONE_DEVICE; +} +#else +static inline bool is_dev_zone(const struct zone *zone) +{ + return false; +} +#endif + #include -void build_all_zonelists(pg_data_t *pgdat); -void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, - enum zone_type highest_zoneidx); +extern struct mutex zonelists_mutex; +void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); +void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, - int highest_zoneidx, unsigned int alloc_flags, + int classzone_idx, unsigned int alloc_flags, long free_pages); bool zone_watermark_ok(struct zone *z, unsigned int order, - unsigned long mark, int highest_zoneidx, + unsigned long mark, int classzone_idx, unsigned int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, - unsigned long mark, int highest_zoneidx); -/* - * Memory initialization context, use to differentiate memory added by - * the platform statically or via memory hotplug interface. - */ -enum meminit_context { - MEMINIT_EARLY, - MEMINIT_HOTPLUG, + unsigned long mark, int classzone_idx); +enum memmap_context { + MEMMAP_EARLY, + MEMMAP_HOTPLUG, }; - -extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, +extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size); extern void lruvec_init(struct lruvec *lruvec); @@ -956,33 +775,33 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) #ifdef CONFIG_MEMCG return lruvec->pgdat; #else - return container_of(lruvec, struct pglist_data, __lruvec); + return container_of(lruvec, struct pglist_data, lruvec); #endif } +extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); + +#ifdef CONFIG_HAVE_MEMORY_PRESENT +void memory_present(int nid, unsigned long start, unsigned long end); +#else +static inline void memory_present(int nid, unsigned long start, unsigned long end) {} +#endif + #ifdef CONFIG_HAVE_MEMORYLESS_NODES int local_memory_node(int node_id); #else static inline int local_memory_node(int node_id) { return node_id; }; #endif +#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE +unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); +#endif + /* * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. */ #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) -#ifdef CONFIG_ZONE_DEVICE -static inline bool zone_is_zone_device(struct zone *zone) -{ - return zone_idx(zone) == ZONE_DEVICE; -} -#else -static inline bool zone_is_zone_device(struct zone *zone) -{ - return false; -} -#endif - /* * Returns true if a zone has pages managed by the buddy allocator. * All the reclaim decisions have to use this function rather than @@ -991,7 +810,7 @@ static inline bool zone_is_zone_device(struct zone *zone) */ static inline bool managed_zone(struct zone *zone) { - return zone_managed_pages(zone); + return zone->managed_pages; } /* Returns true if a zone has memory */ @@ -1000,43 +819,34 @@ static inline bool populated_zone(struct zone *zone) return zone->present_pages; } -#ifdef CONFIG_NUMA -static inline int zone_to_nid(struct zone *zone) -{ - return zone->node; -} - -static inline void zone_set_nid(struct zone *zone, int nid) -{ - zone->node = nid; -} -#else -static inline int zone_to_nid(struct zone *zone) -{ - return 0; -} - -static inline void zone_set_nid(struct zone *zone, int nid) {} -#endif - extern int movable_zone; +#ifdef CONFIG_HIGHMEM +static inline int zone_movable_is_highmem(void) +{ +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP + return movable_zone == ZONE_HIGHMEM; +#else + return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; +#endif +} +#endif + static inline int is_highmem_idx(enum zone_type idx) { #ifdef CONFIG_HIGHMEM return (idx == ZONE_HIGHMEM || - (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM)); + (idx == ZONE_MOVABLE && zone_movable_is_highmem())); #else return 0; #endif } /** - * is_highmem - helper function to quickly check if a struct zone is a + * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. - * @zone: pointer to struct zone variable - * Return: 1 for a highmem zone, 0 otherwise + * @zone - pointer to struct zone variable */ static inline int is_highmem(struct zone *zone) { @@ -1049,40 +859,36 @@ static inline int is_highmem(struct zone *zone) /* These two functions are used to setup the per zone pages min values */ struct ctl_table; - -int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, - loff_t *); -int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, - size_t *, loff_t *); -extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; -int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, - size_t *, loff_t *); -int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); +int min_free_kbytes_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; +int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); + void __user *, size_t *, loff_t *); int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); -int numa_zonelist_order_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); -extern int percpu_pagelist_high_fraction; -extern char numa_zonelist_order[]; -#define NUMA_ZONELIST_ORDER_LEN 16 + void __user *, size_t *, loff_t *); -#ifndef CONFIG_NUMA +extern int numa_zonelist_order_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern char numa_zonelist_order[]; +#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ + +#ifndef CONFIG_NEED_MULTIPLE_NODES extern struct pglist_data contig_page_data; -static inline struct pglist_data *NODE_DATA(int nid) -{ - return &contig_page_data; -} +#define NODE_DATA(nid) (&contig_page_data) #define NODE_MEM_MAP(nid) mem_map -#else /* CONFIG_NUMA */ +#else /* CONFIG_NEED_MULTIPLE_NODES */ #include -#endif /* !CONFIG_NUMA */ +#endif /* !CONFIG_NEED_MULTIPLE_NODES */ extern struct pglist_data *first_online_pgdat(void); extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); @@ -1090,7 +896,7 @@ extern struct zone *next_zone(struct zone *zone); /** * for_each_online_pgdat - helper macro to iterate over all online nodes - * @pgdat: pointer to a pg_data_t variable + * @pgdat - pointer to a pg_data_t variable */ #define for_each_online_pgdat(pgdat) \ for (pgdat = first_online_pgdat(); \ @@ -1098,7 +904,7 @@ extern struct zone *next_zone(struct zone *zone); pgdat = next_online_pgdat(pgdat)) /** * for_each_zone - helper macro to iterate over all memory zones - * @zone: pointer to struct zone variable + * @zone - pointer to struct zone variable * * The user only needs to declare the zone variable, for_each_zone * fills it in. @@ -1128,7 +934,12 @@ static inline int zonelist_zone_idx(struct zoneref *zoneref) static inline int zonelist_node_idx(struct zoneref *zoneref) { - return zone_to_nid(zoneref->zone); +#ifdef CONFIG_NUMA + /* zone_to_nid not available in this context */ + return zoneref->zone->node; +#else + return 0; +#endif /* CONFIG_NUMA */ } struct zoneref *__next_zones_zonelist(struct zoneref *z, @@ -1137,18 +948,15 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z, /** * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point - * @z: The cursor used as a starting point for the search - * @highest_zoneidx: The zone index of the highest zone to return - * @nodes: An optional nodemask to filter the zonelist with + * @z - The cursor used as a starting point for the search + * @highest_zoneidx - The zone index of the highest zone to return + * @nodes - An optional nodemask to filter the zonelist with * * This function returns the next zone at or below a given zone index that is * within the allowed nodemask using a cursor as the starting point for the * search. The zoneref returned is a cursor that represents the current zone * being examined. It should be advanced by one before calling * next_zones_zonelist again. - * - * Return: the next zone at or below highest_zoneidx within the allowed - * nodemask using a cursor within a zonelist as a starting point */ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, @@ -1161,9 +969,10 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, /** * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist - * @zonelist: The zonelist to search for a suitable zone - * @highest_zoneidx: The zone index of the highest zone to return - * @nodes: An optional nodemask to filter the zonelist with + * @zonelist - The zonelist to search for a suitable zone + * @highest_zoneidx - The zone index of the highest zone to return + * @nodes - An optional nodemask to filter the zonelist with + * @return - Zoneref pointer for the first suitable zone found (see below) * * This function returns the first zone at or below a given zone index that is * within the allowed nodemask. The zoneref returned is a cursor that can be @@ -1173,8 +982,6 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is * never NULL). This may happen either genuinely, or due to concurrent nodemask * update due to cpuset modification. - * - * Return: Zoneref pointer for the first suitable zone found */ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, @@ -1186,11 +993,11 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, /** * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask - * @zone: The current zone in the iterator - * @z: The current pointer within zonelist->_zonerefs being iterated - * @zlist: The zonelist being iterated - * @highidx: The zone index of the highest zone to return - * @nodemask: Nodemask allowed by the allocator + * @zone - The current zone in the iterator + * @z - The current pointer within zonelist->zones being iterated + * @zlist - The zonelist being iterated + * @highidx - The zone index of the highest zone to return + * @nodemask - Nodemask allowed by the allocator * * This iterator iterates though all zones at or below a given zone index and * within a given nodemask @@ -1201,7 +1008,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, z = next_zones_zonelist(++z, highidx, nodemask), \ zone = zonelist_zone(z)) -#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ +#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ for (zone = z->zone; \ zone; \ z = next_zones_zonelist(++z, highidx, nodemask), \ @@ -1210,10 +1017,10 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, /** * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index - * @zone: The current zone in the iterator - * @z: The current pointer within zonelist->zones being iterated - * @zlist: The zonelist being iterated - * @highidx: The zone index of the highest zone to return + * @zone - The current zone in the iterator + * @z - The current pointer within zonelist->zones being iterated + * @zlist - The zonelist being iterated + * @highidx - The zone index of the highest zone to return * * This iterator iterates though all zones at or below a given zone index. */ @@ -1224,6 +1031,14 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, #include #endif +#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ + !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) +static inline unsigned long early_pfn_to_nid(unsigned long pfn) +{ + return 0; +} +#endif + #ifdef CONFIG_FLATMEM #define pfn_to_nid(pfn) (0) #endif @@ -1231,6 +1046,8 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, #ifdef CONFIG_SPARSEMEM /* + * SECTION_SHIFT #bits space required to store a section # + * * PA_SECTION_SHIFT physical address to/from section number * PFN_SECTION_SHIFT pfn to/from section number */ @@ -1249,44 +1066,12 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, #error Allocator MAX_ORDER exceeds SECTION_SIZE #endif -static inline unsigned long pfn_to_section_nr(unsigned long pfn) -{ - return pfn >> PFN_SECTION_SHIFT; -} -static inline unsigned long section_nr_to_pfn(unsigned long sec) -{ - return sec << PFN_SECTION_SHIFT; -} +#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) +#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) -#define SUBSECTION_SHIFT 21 -#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT) - -#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) -#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) -#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) - -#if SUBSECTION_SHIFT > SECTION_SIZE_BITS -#error Subsection size exceeds section size -#else -#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) -#endif - -#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) -#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) - -struct mem_section_usage { -#ifdef CONFIG_SPARSEMEM_VMEMMAP - DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); -#endif - /* See declaration of similar field in struct zone */ - unsigned long pageblock_flags[0]; -}; - -void subsection_map_init(unsigned long pfn, unsigned long nr_pages); - struct page; struct page_ext; struct mem_section { @@ -1304,7 +1089,8 @@ struct mem_section { */ unsigned long section_mem_map; - struct mem_section_usage *usage; + /* See declaration of similar field in struct zone */ + unsigned long *pageblock_flags; #ifdef CONFIG_PAGE_EXTENSION /* * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use @@ -1330,49 +1116,30 @@ struct mem_section { #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) #ifdef CONFIG_SPARSEMEM_EXTREME -extern struct mem_section **mem_section; +extern struct mem_section *mem_section[NR_SECTION_ROOTS]; #else extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; #endif -static inline unsigned long *section_to_usemap(struct mem_section *ms) -{ - return ms->usage->pageblock_flags; -} - static inline struct mem_section *__nr_to_section(unsigned long nr) { -#ifdef CONFIG_SPARSEMEM_EXTREME - if (!mem_section) - return NULL; -#endif if (!mem_section[SECTION_NR_TO_ROOT(nr)]) return NULL; return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; } -extern size_t mem_section_usage_size(void); +extern int __section_nr(struct mem_section* ms); +extern unsigned long usemap_size(void); /* * We use the lower bits of the mem_map pointer to store - * a little bit of information. The pointer is calculated - * as mem_map - section_nr_to_pfn(pnum). The result is - * aligned to the minimum alignment of the two values: - * 1. All mem_map arrays are page-aligned. - * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT - * lowest bits. PFN_SECTION_SHIFT is arch-specific - * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the - * worst combination is powerpc with 256k pages, - * which results in PFN_SECTION_SHIFT equal 6. - * To sum it up, at least 6 bits are available. + * a little bit of information. There should be at least + * 3 bits here due to 32-bit alignment. */ -#define SECTION_MARKED_PRESENT (1UL<<0) -#define SECTION_HAS_MEM_MAP (1UL<<1) -#define SECTION_IS_ONLINE (1UL<<2) -#define SECTION_IS_EARLY (1UL<<3) -#define SECTION_TAINT_ZONE_DEVICE (1UL<<4) -#define SECTION_MAP_LAST_BIT (1UL<<5) -#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) -#define SECTION_NID_SHIFT 6 +#define SECTION_MARKED_PRESENT (1UL<<0) +#define SECTION_HAS_MEM_MAP (1UL<<1) +#define SECTION_MAP_LAST_BIT (1UL<<2) +#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) +#define SECTION_NID_SHIFT 2 static inline struct page *__section_mem_map_addr(struct mem_section *section) { @@ -1396,119 +1163,32 @@ static inline int valid_section(struct mem_section *section) return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); } -static inline int early_section(struct mem_section *section) -{ - return (section && (section->section_mem_map & SECTION_IS_EARLY)); -} - static inline int valid_section_nr(unsigned long nr) { return valid_section(__nr_to_section(nr)); } -static inline int online_section(struct mem_section *section) -{ - return (section && (section->section_mem_map & SECTION_IS_ONLINE)); -} - -static inline int online_device_section(struct mem_section *section) -{ - unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; - - return section && ((section->section_mem_map & flags) == flags); -} - -static inline int online_section_nr(unsigned long nr) -{ - return online_section(__nr_to_section(nr)); -} - -#ifdef CONFIG_MEMORY_HOTPLUG -void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); -void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); -#endif - static inline struct mem_section *__pfn_to_section(unsigned long pfn) { return __nr_to_section(pfn_to_section_nr(pfn)); } -extern unsigned long __highest_present_section_nr; - -static inline int subsection_map_index(unsigned long pfn) -{ - return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; -} - -#ifdef CONFIG_SPARSEMEM_VMEMMAP -static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) -{ - int idx = subsection_map_index(pfn); - - return test_bit(idx, ms->usage->subsection_map); -} -#else -static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) -{ - return 1; -} -#endif - #ifndef CONFIG_HAVE_ARCH_PFN_VALID -/** - * pfn_valid - check if there is a valid memory map entry for a PFN - * @pfn: the page frame number to check - * - * Check if there is a valid memory map entry aka struct page for the @pfn. - * Note, that availability of the memory map entry does not imply that - * there is actual usable memory at that @pfn. The struct page may - * represent a hole or an unusable page frame. - * - * Return: 1 for PFNs that have memory map entries and 0 otherwise - */ static inline int pfn_valid(unsigned long pfn) { - struct mem_section *ms; - - /* - * Ensure the upper PAGE_SHIFT bits are clear in the - * pfn. Else it might lead to false positives when - * some of the upper bits are set, but the lower bits - * match a valid pfn. - */ - if (PHYS_PFN(PFN_PHYS(pfn)) != pfn) - return 0; - if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; - ms = __nr_to_section(pfn_to_section_nr(pfn)); - if (!valid_section(ms)) - return 0; - /* - * Traditionally early sections always returned pfn_valid() for - * the entire section-sized span. - */ - return early_section(ms) || pfn_section_valid(ms, pfn); + return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); } #endif -static inline int pfn_in_present_section(unsigned long pfn) +static inline int pfn_present(unsigned long pfn) { if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; return present_section(__nr_to_section(pfn_to_section_nr(pfn))); } -static inline unsigned long next_present_section_nr(unsigned long section_nr) -{ - while (++section_nr <= __highest_present_section_nr) { - if (present_section_nr(section_nr)) - return section_nr; - } - - return -1; -} - /* * These are _only_ used during initialisation, therefore they * can use __initdata ... They could have names to indicate @@ -1524,14 +1204,69 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr) #define pfn_to_nid(pfn) (0) #endif +#define early_pfn_valid(pfn) pfn_valid(pfn) void sparse_init(void); #else #define sparse_init() do {} while (0) #define sparse_index_init(_sec, _nid) do {} while (0) -#define pfn_in_present_section pfn_valid -#define subsection_map_init(_pfn, _nr_pages) do {} while (0) #endif /* CONFIG_SPARSEMEM */ +/* + * During memory init memblocks map pfns to nids. The search is expensive and + * this caches recent lookups. The implementation of __early_pfn_to_nid + * may treat start/end as pfns or sections. + */ +struct mminit_pfnnid_cache { + unsigned long last_start; + unsigned long last_end; + int last_nid; +}; + +#ifndef early_pfn_valid +#define early_pfn_valid(pfn) (1) +#endif + +void memory_present(int nid, unsigned long start, unsigned long end); +unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); + +/* + * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we + * need to check pfn validility within that MAX_ORDER_NR_PAGES block. + * pfn_valid_within() should be used in this case; we optimise this away + * when we have no holes within a MAX_ORDER_NR_PAGES block. + */ +#ifdef CONFIG_HOLES_IN_ZONE +#define pfn_valid_within(pfn) pfn_valid(pfn) +#else +#define pfn_valid_within(pfn) (1) +#endif + +#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL +/* + * pfn_valid() is meant to be able to tell if a given PFN has valid memmap + * associated with it or not. In FLATMEM, it is expected that holes always + * have valid memmap as long as there is valid PFNs either side of the hole. + * In SPARSEMEM, it is assumed that a valid section has a memmap for the + * entire section. + * + * However, an ARM, and maybe other embedded architectures in the future + * free memmap backing holes to save memory on the assumption the memmap is + * never used. The page_zone linkages are then broken even though pfn_valid() + * returns true. A walker of the full memmap must then do this additional + * check to ensure the memmap they are looking at is sane by making sure + * the zone and PFN linkages are still valid. This is expensive, but walkers + * of the full memmap are extremely rare. + */ +bool memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone); +#else +static inline bool memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone) +{ + return true; +} +#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ + #endif /* !__GENERATING_BOUNDS.H */ #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h index 8f882f5881..12b2ab5103 100644 --- a/include/linux/mnt_namespace.h +++ b/include/linux/mnt_namespace.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NAMESPACE_H_ #define _NAMESPACE_H_ #ifdef __KERNEL__ @@ -6,12 +5,10 @@ struct mnt_namespace; struct fs_struct; struct user_namespace; -struct ns_common; extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, struct user_namespace *, struct fs_struct *); extern void put_mnt_ns(struct mnt_namespace *ns); -extern struct ns_common *from_mnt_ns(struct mnt_namespace *); extern const struct file_operations proc_mounts_operations; extern const struct file_operations proc_mountinfo_operations; diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index ae2e75d15b..c29bce453d 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Device tables which are exported to userspace via * scripts/mod/file2alias.c. You must keep that file in sync with this @@ -16,36 +15,11 @@ typedef unsigned long kernel_ulong_t; #define PCI_ANY_ID (~0) -enum { - PCI_ID_F_VFIO_DRIVER_OVERRIDE = 1, -}; - -/** - * struct pci_device_id - PCI device ID structure - * @vendor: Vendor ID to match (or PCI_ANY_ID) - * @device: Device ID to match (or PCI_ANY_ID) - * @subvendor: Subsystem vendor ID to match (or PCI_ANY_ID) - * @subdevice: Subsystem device ID to match (or PCI_ANY_ID) - * @class: Device class, subclass, and "interface" to match. - * See Appendix D of the PCI Local Bus Spec or - * include/linux/pci_ids.h for a full list of classes. - * Most drivers do not need to specify class/class_mask - * as vendor/device is normally sufficient. - * @class_mask: Limit which sub-fields of the class field are compared. - * See drivers/scsi/sym53c8xx_2/ for example of usage. - * @driver_data: Data private to the driver. - * Most drivers don't need to use driver_data field. - * Best practice is to use driver_data as an index - * into a static list of equivalent device types, - * instead of using it as a pointer. - * @override_only: Match only when dev->driver_override is this driver. - */ struct pci_device_id { __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ __u32 class, class_mask; /* (class,subclass,prog-if) triplet */ kernel_ulong_t driver_data; /* Data private to the driver */ - __u32 override_only; }; @@ -165,7 +139,7 @@ struct usb_device_id { #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400 -#define HID_ANY_ID (~0) +#define HID_ANY_ID (~0U) #define HID_BUS_ANY 0xffff #define HID_GROUP_ANY 0x0000 @@ -201,8 +175,7 @@ struct ap_device_id { kernel_ulong_t driver_info; }; -#define AP_DEVICE_ID_MATCH_CARD_TYPE 0x01 -#define AP_DEVICE_ID_MATCH_QUEUE_TYPE 0x02 +#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 /* s390 css bus devices (subchannels) */ struct css_device_id { @@ -254,14 +227,6 @@ struct hda_device_id { unsigned long driver_data; }; -struct sdw_device_id { - __u16 mfg_id; - __u16 part_id; - __u8 sdw_version; - __u8 class_id; - kernel_ulong_t driver_data; -}; - /* * Struct used for matching a device */ @@ -284,17 +249,17 @@ struct pcmcia_device_id { __u16 match_flags; __u16 manf_id; - __u16 card_id; + __u16 card_id; - __u8 func_id; + __u8 func_id; /* for real multi-function devices */ - __u8 function; + __u8 function; /* for pseudo multi-function devices */ - __u8 device_no; + __u8 device_no; - __u32 prod_id_hash[4]; + __u32 prod_id_hash[4]; /* not matched against in kernelspace */ const char * prod_id[4]; @@ -326,8 +291,7 @@ struct pcmcia_device_id { #define INPUT_DEVICE_ID_LED_MAX 0x0f #define INPUT_DEVICE_ID_SND_MAX 0x07 #define INPUT_DEVICE_ID_FF_MAX 0x7f -#define INPUT_DEVICE_ID_SW_MAX 0x10 -#define INPUT_DEVICE_ID_PROP_MAX 0x1f +#define INPUT_DEVICE_ID_SW_MAX 0x0f #define INPUT_DEVICE_ID_MATCH_BUS 1 #define INPUT_DEVICE_ID_MATCH_VENDOR 2 @@ -343,7 +307,6 @@ struct pcmcia_device_id { #define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400 #define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800 #define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000 -#define INPUT_DEVICE_ID_MATCH_PROPBIT 0x2000 struct input_device_id { @@ -363,7 +326,6 @@ struct input_device_id { kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1]; kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1]; kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1]; - kernel_ulong_t propbit[INPUT_DEVICE_ID_PROP_MAX / BITS_PER_LONG + 1]; kernel_ulong_t driver_info; }; @@ -442,7 +404,7 @@ struct virtio_device_id { * For Hyper-V devices we use the device guid as the id. */ struct hv_vmbus_device_id { - guid_t guid; + uuid_le guid; kernel_ulong_t driver_data; /* Data private to the driver */ }; @@ -453,7 +415,6 @@ struct hv_vmbus_device_id { struct rpmsg_device_id { char name[RPMSG_NAME_SIZE]; - kernel_ulong_t driver_data; }; /* i2c */ @@ -466,33 +427,6 @@ struct i2c_device_id { kernel_ulong_t driver_data; /* Data private to the driver */ }; -/* pci_epf */ - -#define PCI_EPF_NAME_SIZE 20 -#define PCI_EPF_MODULE_PREFIX "pci_epf:" - -struct pci_epf_device_id { - char name[PCI_EPF_NAME_SIZE]; - kernel_ulong_t driver_data; -}; - -/* i3c */ - -#define I3C_MATCH_DCR 0x1 -#define I3C_MATCH_MANUF 0x2 -#define I3C_MATCH_PART 0x4 -#define I3C_MATCH_EXTRA_INFO 0x8 - -struct i3c_device_id { - __u8 match_flags; - __u8 dcr; - __u16 manuf_id; - __u16 part_id; - __u16 extra_info; - - const void *data; -}; - /* spi */ #define SPI_NAME_SIZE 32 @@ -503,30 +437,6 @@ struct spi_device_id { kernel_ulong_t driver_data; /* Data private to the driver */ }; -/* SLIMbus */ - -#define SLIMBUS_NAME_SIZE 32 -#define SLIMBUS_MODULE_PREFIX "slim:" - -struct slim_device_id { - __u16 manf_id, prod_code; - __u16 dev_index, instance; - - /* Data private to the driver */ - kernel_ulong_t driver_data; -}; - -#define APR_NAME_SIZE 32 -#define APR_MODULE_PREFIX "apr:" - -struct apr_device_id { - char name[APR_NAME_SIZE]; - __u32 domain_id; - __u32 svc_id; - __u32 svc_version; - kernel_ulong_t driver_data; /* Data private to the driver */ -}; - #define SPMI_NAME_SIZE 32 #define SPMI_MODULE_PREFIX "spmi:" @@ -541,15 +451,11 @@ enum dmi_field { DMI_BIOS_VENDOR, DMI_BIOS_VERSION, DMI_BIOS_DATE, - DMI_BIOS_RELEASE, - DMI_EC_FIRMWARE_RELEASE, DMI_SYS_VENDOR, DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, DMI_PRODUCT_SERIAL, DMI_PRODUCT_UUID, - DMI_PRODUCT_SKU, - DMI_PRODUCT_FAMILY, DMI_BOARD_VENDOR, DMI_BOARD_NAME, DMI_BOARD_VERSION, @@ -561,7 +467,6 @@ enum dmi_field { DMI_CHASSIS_SERIAL, DMI_CHASSIS_ASSET_TAG, DMI_STRING_MAX, - DMI_OEM_STRING, /* special case - will not be in dmi_ident */ }; struct dmi_strmatch { @@ -575,7 +480,7 @@ struct dmi_system_id { const char *ident; struct dmi_strmatch matches[4]; void *driver_data; -}; +} __do_const; /* * struct dmi_device_id appears during expansion of * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it @@ -595,12 +500,11 @@ struct platform_device_id { kernel_ulong_t driver_data; }; -#define MDIO_NAME_SIZE 32 #define MDIO_MODULE_PREFIX "mdio:" -#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u" +#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" #define MDIO_ID_ARGS(_id) \ - ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \ + (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \ ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \ ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \ ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \ @@ -612,7 +516,7 @@ struct platform_device_id { /** * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus * @phy_id: The result of - * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&MII_PHYSID2)) & @phy_id_mask + * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask * for this PHY type * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0 * is used to terminate an array of struct mdio_device_id. @@ -674,16 +578,16 @@ struct x86_cpu_id { __u16 vendor; __u16 family; __u16 model; - __u16 steppings; __u16 feature; /* bit index */ kernel_ulong_t driver_data; }; -/* Wild cards for x86_cpu_id::vendor, family, model and feature */ +#define X86_FEATURE_MATCH(x) \ + { X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x } + #define X86_VENDOR_ANY 0xffff #define X86_FAMILY_ANY 0 #define X86_MODEL_ANY 0 -#define X86_STEPPING_ANY 0 #define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ /* @@ -757,6 +661,8 @@ struct ulpi_device_id { * struct fsl_mc_device_id - MC object device identifier * @vendor: vendor ID * @obj_type: MC object type + * @ver_major: MC object version major number + * @ver_minor: MC object version minor number * * Type of entries in the "device Id" table for MC object devices supported by * a MC object device driver. The last entry of the table has vendor set to 0x0 @@ -766,133 +672,5 @@ struct fsl_mc_device_id { const char obj_type[16]; }; -/** - * struct tb_service_id - Thunderbolt service identifiers - * @match_flags: Flags used to match the structure - * @protocol_key: Protocol key the service supports - * @protocol_id: Protocol id the service supports - * @protocol_version: Version of the protocol - * @protocol_revision: Revision of the protocol software - * @driver_data: Driver specific data - * - * Thunderbolt XDomain services are exposed as devices where each device - * carries the protocol information the service supports. Thunderbolt - * XDomain service drivers match against that information. - */ -struct tb_service_id { - __u32 match_flags; - char protocol_key[8 + 1]; - __u32 protocol_id; - __u32 protocol_version; - __u32 protocol_revision; - kernel_ulong_t driver_data; -}; - -#define TBSVC_MATCH_PROTOCOL_KEY 0x0001 -#define TBSVC_MATCH_PROTOCOL_ID 0x0002 -#define TBSVC_MATCH_PROTOCOL_VERSION 0x0004 -#define TBSVC_MATCH_PROTOCOL_REVISION 0x0008 - -/* USB Type-C Alternate Modes */ - -#define TYPEC_ANY_MODE 0x7 - -/** - * struct typec_device_id - USB Type-C alternate mode identifiers - * @svid: Standard or Vendor ID - * @mode: Mode index - * @driver_data: Driver specific data - */ -struct typec_device_id { - __u16 svid; - __u8 mode; - kernel_ulong_t driver_data; -}; - -/** - * struct tee_client_device_id - tee based device identifier - * @uuid: For TEE based client devices we use the device uuid as - * the identifier. - */ -struct tee_client_device_id { - uuid_t uuid; -}; - -/* WMI */ - -#define WMI_MODULE_PREFIX "wmi:" - -/** - * struct wmi_device_id - WMI device identifier - * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba - * @context: pointer to driver specific data - */ -struct wmi_device_id { - const char guid_string[UUID_STRING_LEN+1]; - const void *context; -}; - -#define MHI_DEVICE_MODALIAS_FMT "mhi:%s" -#define MHI_NAME_SIZE 32 - -/** - * struct mhi_device_id - MHI device identification - * @chan: MHI channel name - * @driver_data: driver data; - */ -struct mhi_device_id { - const char chan[MHI_NAME_SIZE]; - kernel_ulong_t driver_data; -}; - -#define AUXILIARY_NAME_SIZE 32 -#define AUXILIARY_MODULE_PREFIX "auxiliary:" - -struct auxiliary_device_id { - char name[AUXILIARY_NAME_SIZE]; - kernel_ulong_t driver_data; -}; - -/* Surface System Aggregator Module */ - -#define SSAM_MATCH_TARGET 0x1 -#define SSAM_MATCH_INSTANCE 0x2 -#define SSAM_MATCH_FUNCTION 0x4 - -struct ssam_device_id { - __u8 match_flags; - - __u8 domain; - __u8 category; - __u8 target; - __u8 instance; - __u8 function; - - kernel_ulong_t driver_data; -}; - -/* - * DFL (Device Feature List) - * - * DFL defines a linked list of feature headers within the device MMIO space to - * provide an extensible way of adding features. Software can walk through these - * predefined data structures to enumerate features. It is now used in the FPGA. - * See Documentation/fpga/dfl.rst for more information. - * - * The dfl bus type is introduced to match the individual feature devices (dfl - * devices) for specific dfl drivers. - */ - -/** - * struct dfl_device_id - dfl device identifier - * @type: DFL FIU type of the device. See enum dfl_id_type. - * @feature_id: feature identifier local to its DFL FIU type. - * @driver_data: driver specific data. - */ -struct dfl_device_id { - __u16 type; - __u16 feature_id; - kernel_ulong_t driver_data; -}; #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module.h b/include/linux/module.h index c9f1200b23..18808a587e 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -1,17 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _LINUX_MODULE_H +#define _LINUX_MODULE_H /* * Dynamic loading of modules into the kernel. * * Rewritten by Richard Henderson Dec 1996 * Rewritten again by Rusty Russell, 2002 */ - -#ifndef _LINUX_MODULE_H -#define _LINUX_MODULE_H - #include #include -#include #include #include #include @@ -22,15 +18,19 @@ #include #include #include +#include /* only as arch move module.h -> extable.h */ #include -#include -#include -#include -#include -#include +#include #include #include +#include + +/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */ +#define MODULE_SIG_STRING "~Module signature appended~\n" + +/* Not Yet Implemented */ +#define MODULE_SUPPORTED_DEVICE(name) #define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN @@ -60,12 +60,13 @@ struct module_attribute { int (*test)(struct module *); void (*free)(struct module *); }; +typedef struct module_attribute __no_const module_attribute_no_const; struct module_version_attribute { struct module_attribute mattr; const char *module_name; const char *version; -}; +} __do_const __attribute__ ((__aligned__(sizeof(void *)))); extern ssize_t __modver_version_show(struct module_attribute *, struct module_kobject *, char *); @@ -125,22 +126,19 @@ extern void cleanup_module(void); #define late_initcall_sync(fn) module_init(fn) #define console_initcall(fn) module_init(fn) +#define security_initcall(fn) module_init(fn) /* Each module must use one module_init(). */ #define module_init(initfn) \ - static inline initcall_t __maybe_unused __inittest(void) \ + static inline initcall_t __inittest(void) \ { return initfn; } \ - int init_module(void) __copy(initfn) \ - __attribute__((alias(#initfn))); \ - __CFI_ADDRESSABLE(init_module, __initdata); + int init_module(void) __attribute__((alias(#initfn))); /* This is only required if you want to be unloadable. */ #define module_exit(exitfn) \ - static inline exitcall_t __maybe_unused __exittest(void) \ + static inline exitcall_t __exittest(void) \ { return exitfn; } \ - void cleanup_module(void) __copy(exitfn) \ - __attribute__((alias(#exitfn))); \ - __CFI_ADDRESSABLE(cleanup_module, __exitdata); + void cleanup_module(void) __attribute__((alias(#exitfn))); #endif @@ -173,21 +171,11 @@ extern void cleanup_module(void); */ #define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep) -/* - * MODULE_FILE is used for generating modules.builtin - * So, make it no-op when this is being built as a module - */ -#ifdef MODULE -#define MODULE_FILE -#else -#define MODULE_FILE MODULE_INFO(file, KBUILD_MODFILE); -#endif - /* * The following license idents are currently accepted as indicating free * software modules * - * "GPL" [GNU Public License v2] + * "GPL" [GNU Public License v2 or later] * "GPL v2" [GNU Public License v2] * "GPL and additional rights" [GNU Public License v2 rights and more] * "Dual BSD/GPL" [GNU Public License v2 @@ -201,22 +189,6 @@ extern void cleanup_module(void); * * "Proprietary" [Non free products] * - * Both "GPL v2" and "GPL" (the latter also in dual licensed strings) are - * merely stating that the module is licensed under the GPL v2, but are not - * telling whether "GPL v2 only" or "GPL v2 or later". The reason why there - * are two variants is a historic and failed attempt to convey more - * information in the MODULE_LICENSE string. For module loading the - * "only/or later" distinction is completely irrelevant and does neither - * replace the proper license identifiers in the corresponding source file - * nor amends them in any way. The sole purpose is to make the - * 'Proprietary' flagging work and to refuse to bind symbols which are - * exported with EXPORT_SYMBOL_GPL when a non free module is loaded. - * - * In the same way "BSD" is not a clear license information. It merely - * states, that the module is licensed under one of the compatible BSD - * license variants. The detailed and correct license information is again - * to be found in the corresponding source files. - * * There are dual licensed components, but when running with Linux it is the * GPL that is relevant so this is a non issue. Similarly LGPL linked with GPL * is a GPL combined work. @@ -227,7 +199,7 @@ extern void cleanup_module(void); * 2. So the community can ignore bug reports including proprietary modules * 3. So vendors can do likewise based on their own policies */ -#define MODULE_LICENSE(_license) MODULE_FILE MODULE_INFO(license, _license) +#define MODULE_LICENSE(_license) MODULE_INFO(license, _license) /* * Author(s), use "Name " or just "Name", for multiple @@ -241,7 +213,7 @@ extern void cleanup_module(void); #ifdef MODULE /* Creates an alias so file2alias.c can find device table. */ #define MODULE_DEVICE_TABLE(type, name) \ -extern typeof(name) __mod_##type##__##name##_device_table \ +extern const typeof(name) __mod_##type##__##name##_device_table \ __attribute__ ((unused, alias(__stringify(name)))) #else /* !MODULE */ #define MODULE_DEVICE_TABLE(type, name) @@ -268,21 +240,20 @@ extern typeof(name) __mod_##type##__##name##_device_table \ #define MODULE_VERSION(_version) MODULE_INFO(version, _version) #else #define MODULE_VERSION(_version) \ - MODULE_INFO(version, _version); \ - static struct module_version_attribute __modver_attr \ - __used __section("__modver") \ - __aligned(__alignof__(struct module_version_attribute)) \ - = { \ - .mattr = { \ - .attr = { \ - .name = "version", \ - .mode = S_IRUGO, \ - }, \ - .show = __modver_version_show, \ + static struct module_version_attribute ___modver_attr = { \ + .mattr = { \ + .attr = { \ + .name = "version", \ + .mode = S_IRUGO, \ }, \ - .module_name = KBUILD_MODNAME, \ - .version = _version, \ - } + .show = __modver_version_show, \ + }, \ + .module_name = KBUILD_MODNAME, \ + .version = _version, \ + }; \ + static const struct module_version_attribute \ + __used __attribute__ ((__section__ ("__modver"))) \ + * __moduleparam_const __modver_attr = &___modver_attr #endif /* Optional firmware file (or files) needed by the module @@ -290,8 +261,6 @@ extern typeof(name) __mod_##type##__##name##_device_table \ * files require multiple MODULE_FIRMWARE() specifiers */ #define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware) -#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, #ns) - struct notifier_block; #ifdef CONFIG_MODULES @@ -300,7 +269,7 @@ extern int modules_disabled; /* for sysctl */ /* Get/put a kernel symbol (calls must be symmetric) */ void *__symbol_get(const char *symbol); void *__symbol_get_gpl(const char *symbol); -#define symbol_get(x) ((typeof(&x))(__symbol_get(__stringify(x)))) +#define symbol_get(x) ((typeof(&x))(__symbol_get(VMLINUX_SYMBOL_STR(x)))) /* modules using other modules: kdb wants to see this. */ struct module_use { @@ -316,25 +285,26 @@ enum module_state { MODULE_STATE_UNFORMED, /* Still setting it up. */ }; +struct module; + struct mod_tree_node { struct module *mod; struct latch_tree_node node; }; struct module_layout { - /* The actual code + data. */ - void *base; - /* Total size. */ - unsigned int size; - /* The size of the executable code. */ - unsigned int text_size; - /* Size of RO section of the module (text+rodata) */ - unsigned int ro_size; - /* Size of RO after init section */ - unsigned int ro_after_init_size; + /* The actual code. */ + void *base_rx; + /* The actual data. */ + void *base_rw; + /* Code size. */ + unsigned int size_rx; + /* Data size. */ + unsigned int size_rw; #ifdef CONFIG_MODULES_TREE_LOOKUP - struct mod_tree_node mtn; + struct mod_tree_node mtn_rx; + struct mod_tree_node mtn_rw; #endif }; @@ -349,7 +319,6 @@ struct mod_kallsyms { Elf_Sym *symtab; unsigned int num_symtab; char *strtab; - char *typetab; }; #ifdef CONFIG_LIVEPATCH @@ -370,27 +339,18 @@ struct module { /* Unique handle for this module */ char name[MODULE_NAME_LEN]; -#ifdef CONFIG_STACKTRACE_BUILD_ID - /* Module build ID */ - unsigned char build_id[BUILD_ID_SIZE_MAX]; -#endif - /* Sysfs stuff. */ struct module_kobject mkobj; - struct module_attribute *modinfo_attrs; + module_attribute_no_const *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; /* Exported symbols */ const struct kernel_symbol *syms; - const s32 *crcs; + const unsigned long *crcs; unsigned int num_syms; -#ifdef CONFIG_CFI_CLANG - cfi_check_fn cfi_check; -#endif - /* Kernel parameters. */ #ifdef CONFIG_SYSFS struct mutex param_lock; @@ -401,8 +361,19 @@ struct module { /* GPL-only exported symbols. */ unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; - const s32 *gpl_crcs; - bool using_gplonly_symbols; + const unsigned long *gpl_crcs; + +#ifdef CONFIG_UNUSED_SYMBOLS + /* unused exported symbols. */ + const struct kernel_symbol *unused_syms; + const unsigned long *unused_crcs; + unsigned int num_unused_syms; + + /* GPL-only, unused exported symbols. */ + unsigned int num_unused_gpl_syms; + const struct kernel_symbol *unused_gpl_syms; + const unsigned long *unused_gpl_crcs; +#endif #ifdef CONFIG_MODULE_SIG /* Signature was verified. */ @@ -411,6 +382,11 @@ struct module { bool async_probe_requested; + /* symbols that will be GPL-only in the near future. */ + const struct kernel_symbol *gpl_future_syms; + const unsigned long *gpl_future_crcs; + unsigned int num_gpl_future_syms; + /* Exception table */ unsigned int num_exentries; struct exception_table_entry *extable; @@ -425,7 +401,7 @@ struct module { /* Arch-specific module values */ struct mod_arch_specific arch; - unsigned long taints; /* same bits as kernel:taint_flags */ + unsigned int taints; /* same bits as kernel:tainted */ #ifdef CONFIG_GENERIC_BUG /* Support for BUG */ @@ -436,9 +412,9 @@ struct module { #ifdef CONFIG_KALLSYMS /* Protected by RCU and/or module_mutex: use rcu_dereference() */ - struct mod_kallsyms __rcu *kallsyms; + struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; - + /* Section attributes */ struct module_sect_attrs *sect_attrs; @@ -455,26 +431,12 @@ struct module { void __percpu *percpu; unsigned int percpu_size; #endif - void *noinstr_text_start; - unsigned int noinstr_text_size; #ifdef CONFIG_TRACEPOINTS unsigned int num_tracepoints; - tracepoint_ptr_t *tracepoints_ptrs; + struct tracepoint * const *tracepoints_ptrs; #endif -#ifdef CONFIG_TREE_SRCU - unsigned int num_srcu_structs; - struct srcu_struct **srcu_struct_ptrs; -#endif -#ifdef CONFIG_BPF_EVENTS - unsigned int num_bpf_raw_events; - struct bpf_raw_event_map *bpf_raw_events; -#endif -#ifdef CONFIG_DEBUG_INFO_BTF_MODULES - unsigned int btf_data_size; - void *btf_data; -#endif -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL struct jump_entry *jump_entries; unsigned int num_jump_entries; #endif @@ -485,23 +447,17 @@ struct module { #ifdef CONFIG_EVENT_TRACING struct trace_event_call **trace_events; unsigned int num_trace_events; - struct trace_eval_map **trace_evals; - unsigned int num_trace_evals; + struct trace_enum_map **trace_enums; + unsigned int num_trace_enums; + struct file_operations trace_id; + struct file_operations trace_enable; + struct file_operations trace_format; + struct file_operations trace_filter; #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD unsigned int num_ftrace_callsites; unsigned long *ftrace_callsites; #endif -#ifdef CONFIG_KPROBES - void *kprobes_text_start; - unsigned int kprobes_text_size; - unsigned long *kprobe_blacklist; - unsigned int num_kprobe_blacklist; -#endif -#ifdef CONFIG_HAVE_STATIC_CALL_INLINE - int num_static_call_sites; - struct static_call_site *static_call_sites; -#endif #ifdef CONFIG_LIVEPATCH bool klp; /* Is this a livepatch module? */ @@ -511,11 +467,6 @@ struct module { struct klp_modinfo *klp_info; #endif -#ifdef CONFIG_PRINTK_INDEX - unsigned int printk_index_size; - struct pi_entry **printk_index_start; -#endif - #ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ struct list_head source_list; @@ -533,27 +484,18 @@ struct module { ctor_fn_t *ctors; unsigned int num_ctors; #endif - -#ifdef CONFIG_FUNCTION_ERROR_INJECTION - struct error_injection_entry *ei_funcs; - unsigned int num_ei_funcs; -#endif } ____cacheline_aligned __randomize_layout; + #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} #endif -#ifndef HAVE_ARCH_KALLSYMS_SYMBOL_VALUE -static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym) -{ - return sym->st_value; -} -#endif +extern struct mutex module_mutex; /* FIXME: It'd be nice to isolate modules during init, too, so they aren't used before they (may) fail. But presently too much code (IDE & SCSI) require entry into the module during init.*/ -static inline bool module_is_live(struct module *mod) +static inline int module_is_live(struct module *mod) { return mod->state != MODULE_STATE_GOING; } @@ -561,22 +503,41 @@ static inline bool module_is_live(struct module *mod) struct module *__module_text_address(unsigned long addr); struct module *__module_address(unsigned long addr); bool is_module_address(unsigned long addr); -bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr); bool is_module_percpu_address(unsigned long addr); bool is_module_text_address(unsigned long addr); +static inline int within_module_range(unsigned long addr, void *start, unsigned long size) +{ + +#ifdef CONFIG_PAX_KERNEXEC + if (ktla_ktva(addr) >= (unsigned long)start && + ktla_ktva(addr) < (unsigned long)start + size) + return 1; +#endif + + return ((void *)addr >= start && (void *)addr < start + size); +} + +static inline int within_module_rx(unsigned long addr, const struct module_layout *layout) +{ + return within_module_range(addr, layout->base_rx, layout->size_rx); +} + +static inline int within_module_rw(unsigned long addr, const struct module_layout *layout) +{ + return within_module_range(addr, layout->base_rw, layout->size_rw); +} + static inline bool within_module_core(unsigned long addr, const struct module *mod) { - return (unsigned long)mod->core_layout.base <= addr && - addr < (unsigned long)mod->core_layout.base + mod->core_layout.size; + return within_module_rx(addr, &mod->core_layout) || within_module_rw(addr, &mod->core_layout); } static inline bool within_module_init(unsigned long addr, const struct module *mod) { - return (unsigned long)mod->init_layout.base <= addr && - addr < (unsigned long)mod->init_layout.base + mod->init_layout.size; + return within_module_rx(addr, &mod->init_layout) || within_module_rw(addr, &mod->init_layout); } static inline bool within_module(unsigned long addr, const struct module *mod) @@ -584,9 +545,40 @@ static inline bool within_module(unsigned long addr, const struct module *mod) return within_module_init(addr, mod) || within_module_core(addr, mod); } -/* Search for module by name: must be in a RCU-sched critical section. */ +/* Search for module by name: must hold module_mutex. */ struct module *find_module(const char *name); +struct symsearch { + const struct kernel_symbol *start, *stop; + const unsigned long *crcs; + enum { + NOT_GPL_ONLY, + GPL_ONLY, + WILL_BE_GPL_ONLY, + } licence; + bool unused; +}; + +/* + * Search for an exported symbol by name. + * + * Must be called with module_mutex held or preemption disabled. + */ +const struct kernel_symbol *find_symbol(const char *name, + struct module **owner, + const unsigned long **crc, + bool gplok, + bool warn); + +/* + * Walk the exported symbol table + * + * Must be called with module_mutex held or preemption disabled. + */ +bool each_symbol_section(bool (*fn)(const struct symsearch *arr, + struct module *owner, + void *data), void *data); + /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if symnum out of range. */ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, @@ -595,6 +587,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name); +int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data); + extern void __noreturn __module_put_and_exit(struct module *mod, long code); #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code) @@ -602,7 +598,7 @@ extern void __noreturn __module_put_and_exit(struct module *mod, #ifdef CONFIG_MODULE_UNLOAD int module_refcount(struct module *mod); void __symbol_put(const char *symbol); -#define symbol_put(x) __symbol_put(__stringify(x)) +#define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x)) void symbol_put_addr(void *addr); /* Sometimes we know we already have a refcount, and it's easier not @@ -616,7 +612,7 @@ extern bool try_module_get(struct module *module); extern void module_put(struct module *module); #else /*!CONFIG_MODULE_UNLOAD*/ -static inline bool try_module_get(struct module *module) +static inline int try_module_get(struct module *module) { return !module || module_is_live(module); } @@ -630,6 +626,7 @@ static inline void __module_get(struct module *module) #define symbol_put_addr(p) do { } while (0) #endif /* CONFIG_MODULE_UNLOAD */ +int ref_module(struct module *a, struct module *b); /* This is a #define so the string doesn't get put in every .o file */ #define module_name(mod) \ @@ -638,16 +635,13 @@ static inline void __module_get(struct module *module) __mod ? __mod->name : "kernel"; \ }) -/* Dereference module function descriptor */ -void *dereference_module_function_descriptor(struct module *mod, void *ptr); - /* For kallsyms to ask for address resolution. namebuf should be at * least KSYM_NAME_LEN long: a pointer to namebuf is returned if * found, otherwise NULL. */ const char *module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, - char **modname, const unsigned char **modbuildid, + char **modname, char *namebuf); int lookup_module_symbol_name(unsigned long addr, char *symname); int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); @@ -674,9 +668,6 @@ static inline bool is_livepatch_module(struct module *mod) } #endif /* CONFIG_LIVEPATCH */ -bool is_module_sig_enforced(void); -void set_module_sig_enforced(void); - #else /* !CONFIG_MODULES... */ static inline struct module *__module_address(unsigned long addr) @@ -699,35 +690,13 @@ static inline bool is_module_percpu_address(unsigned long addr) return false; } -static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) -{ - return false; -} - static inline bool is_module_text_address(unsigned long addr) { return false; } -static inline bool within_module_core(unsigned long addr, - const struct module *mod) -{ - return false; -} - -static inline bool within_module_init(unsigned long addr, - const struct module *mod) -{ - return false; -} - -static inline bool within_module(unsigned long addr, const struct module *mod) -{ - return false; -} - /* Get/put a kernel symbol (calls should be symmetric) */ -#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak,visibility("hidden"))); &(x); }) +#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); }) #define symbol_put(x) do { } while (0) #define symbol_put_addr(x) do { } while (0) @@ -735,9 +704,9 @@ static inline void __module_get(struct module *module) { } -static inline bool try_module_get(struct module *module) +static inline int try_module_get(struct module *module) { - return true; + return 1; } static inline void module_put(struct module *module) @@ -751,7 +720,6 @@ static inline const char *module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, - const unsigned char **modbuildid, char *namebuf) { return NULL; @@ -779,6 +747,14 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name) return 0; } +static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, + unsigned long), + void *data) +{ + return 0; +} + static inline int register_module_notifier(struct notifier_block *nb) { /* no events will happen anyway, so this can always succeed */ @@ -801,22 +777,6 @@ static inline bool module_requested_async_probing(struct module *module) return false; } -static inline bool is_module_sig_enforced(void) -{ - return false; -} - -static inline void set_module_sig_enforced(void) -{ -} - -/* Dereference module function descriptor */ -static inline -void *dereference_module_function_descriptor(struct module *mod, void *ptr) -{ - return ptr; -} - #endif /* CONFIG_MODULES */ #ifdef CONFIG_SYSFS @@ -831,6 +791,18 @@ extern int module_sysfs_initialized; #define __MODULE_STRING(x) __stringify(x) +#ifdef CONFIG_DEBUG_SET_MODULE_RONX +extern void set_all_modules_text_rw(void); +extern void set_all_modules_text_ro(void); +extern void module_enable_ro(const struct module *mod, bool after_init); +extern void module_disable_ro(const struct module *mod); +#else +static inline void set_all_modules_text_rw(void) { } +static inline void set_all_modules_text_ro(void) { } +static inline void module_enable_ro(const struct module *mod, bool after_init) { } +static inline void module_disable_ro(const struct module *mod) { } +#endif + #ifdef CONFIG_GENERIC_BUG void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, struct module *); @@ -846,15 +818,6 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr, static inline void module_bug_cleanup(struct module *mod) {} #endif /* CONFIG_GENERIC_BUG */ -#ifdef CONFIG_RETPOLINE -extern bool retpoline_module_ok(bool has_retpoline); -#else -static inline bool retpoline_module_ok(bool has_retpoline) -{ - return true; -} -#endif - #ifdef CONFIG_MODULE_SIG static inline bool module_sig_ok(struct module *module) { @@ -867,8 +830,4 @@ static inline bool module_sig_ok(struct module *module) } #endif /* CONFIG_MODULE_SIG */ -int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, - struct module *, unsigned long), - void *data); - #endif /* _LINUX_MODULE_H */ diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index 9e09d11ffe..3169ac7c61 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MODULELOADER_H #define _LINUX_MODULELOADER_H /* The stuff needed for archs to support modules. */ @@ -26,18 +25,20 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section); sections. Returns NULL on failure. */ void *module_alloc(unsigned long size); +#ifdef CONFIG_PAX_KERNEXEC +void *module_alloc_exec(unsigned long size); +#else +#define module_alloc_exec(x) module_alloc(x) +#endif + /* Free memory returned from module_alloc. */ void module_memfree(void *module_region); -/* Determines if the section name is an init section (that is only used during - * module loading). - */ -bool module_init_section(const char *name); - -/* Determines if the section name is an exit section (that is only used during - * module unloading) - */ -bool module_exit_section(const char *name); +#ifdef CONFIG_PAX_KERNEXEC +void module_memfree_exec(void *module_region); +#else +#define module_memfree_exec(x) module_memfree((x)) +#endif /* * Apply the given relocation to the (simplified) ELF. Return -error @@ -56,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs, unsigned int relsec, struct module *me) { +#ifdef CONFIG_MODULES printk(KERN_ERR "module %s: REL relocation unsupported\n", module_name(me)); +#endif return -ENOEXEC; } #endif @@ -79,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs, unsigned int relsec, struct module *me) { +#ifdef CONFIG_MODULES printk(KERN_ERR "module %s: REL relocation unsupported\n", module_name(me)); +#endif return -ENOEXEC; } #endif @@ -96,8 +101,7 @@ void module_arch_cleanup(struct module *mod); /* Any cleanup before freeing mod->module_init */ void module_arch_freeing_init(struct module *mod); -#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ - !defined(CONFIG_KASAN_VMALLOC) +#ifdef CONFIG_KASAN #include #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) #else diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 962cd41a2c..f10563b7ed 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MODULE_PARAMS_H #define _LINUX_MODULE_PARAMS_H /* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */ @@ -10,23 +9,25 @@ module name. */ #ifdef MODULE #define MODULE_PARAM_PREFIX /* empty */ -#define __MODULE_INFO_PREFIX /* empty */ #else #define MODULE_PARAM_PREFIX KBUILD_MODNAME "." -/* We cannot use MODULE_PARAM_PREFIX because some modules override it. */ -#define __MODULE_INFO_PREFIX KBUILD_MODNAME "." #endif /* Chosen so that structs with an unsigned long line up. */ #define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long)) +#ifdef MODULE #define __MODULE_INFO(tag, name, info) \ - static const char __UNIQUE_ID(name)[] \ - __used __section(".modinfo") __aligned(1) \ - = __MODULE_INFO_PREFIX __stringify(tag) "=" info - +static const char __UNIQUE_ID(name)[] \ + __used __attribute__((section(".modinfo"), unused, aligned(1))) \ + = __stringify(tag) "=" info +#else /* !MODULE */ +/* This struct is here for syntactic coherency, it is not used */ +#define __MODULE_INFO(tag, name, info) \ + struct __UNIQUE_ID(name) {} +#endif #define __MODULE_PARM_TYPE(name, _type) \ - __MODULE_INFO(parmtype, name##type, #name ":" _type) + __MODULE_INFO(parmtype, name##type, #name ":" _type) /* One for each parameter, describing how to use it. Some files do multiple of these per line, so can't just use MODULE_INFO. */ @@ -53,17 +54,15 @@ struct kernel_param_ops { int (*get)(char *buffer, const struct kernel_param *kp); /* Optional function to free kp->arg when module unloaded. */ void (*free)(void *arg); -}; +} __do_const; /* * Flags available for kernel_param * * UNSAFE - the parameter is dangerous and setting it will taint the kernel - * HWPARAM - Hardware param not permitted in lockdown mode */ enum { - KERNEL_PARAM_FL_UNSAFE = (1 << 0), - KERNEL_PARAM_FL_HWPARAM = (1 << 1), + KERNEL_PARAM_FL_UNSAFE = (1 << 0) }; struct kernel_param { @@ -100,15 +99,15 @@ struct kparam_array /** * module_param - typesafe helper for a module/cmdline parameter - * @name: the variable to alter, and exposed parameter name. + * @value: the variable to alter, and exposed parameter name. * @type: the type of the parameter * @perm: visibility in sysfs. * - * @name becomes the module parameter, or (prefixed by KBUILD_MODNAME and a + * @value becomes the module parameter, or (prefixed by KBUILD_MODNAME and a * ".") the kernel commandline parameter. Note that - is changed to _, so * the user can use "foo-bar=1" even for variable "foo_bar". * - * @perm is 0 if the variable is not to appear in sysfs, or 0444 + * @perm is 0 if the the variable is not to appear in sysfs, or 0444 * for world-readable, 0644 for root-writable, etc. Note that if it * is writable, you may need to use kernel_param_lock() around * accesses (esp. charp, which can be kfreed when it changes). @@ -118,7 +117,7 @@ struct kparam_array * you can create your own by defining those variables. * * Standard types are: - * byte, hexint, short, ushort, int, uint, long, ulong + * byte, short, ushort, int, uint, long, ulong * charp: a character pointer * bool: a bool, values 0/1, y/n, Y/N. * invbool: the above, only sense-reversed (N = true). @@ -128,9 +127,6 @@ struct kparam_array /** * module_param_unsafe - same as module_param but taints kernel - * @name: the variable to alter, and exposed parameter name. - * @type: the type of the parameter - * @perm: visibility in sysfs. */ #define module_param_unsafe(name, type, perm) \ module_param_named_unsafe(name, name, type, perm) @@ -153,10 +149,6 @@ struct kparam_array /** * module_param_named_unsafe - same as module_param_named but taints kernel - * @name: a valid C identifier which is the parameter name. - * @value: the actual lvalue to alter. - * @type: the type of the parameter - * @perm: visibility in sysfs. */ #define module_param_named_unsafe(name, value, type, perm) \ param_check_##type(name, &(value)); \ @@ -167,7 +159,6 @@ struct kparam_array * module_param_cb - general callback for a module/cmdline parameter * @name: a valid C identifier which is the parameter name. * @ops: the set & get operations for this parameter. - * @arg: args for @ops * @perm: visibility in sysfs. * * The ops can have NULL set or get functions. @@ -179,96 +170,36 @@ struct kparam_array __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, \ KERNEL_PARAM_FL_UNSAFE) -#define __level_param_cb(name, ops, arg, perm, level) \ - __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0) /** - * core_param_cb - general callback for a module/cmdline parameter - * to be evaluated before core initcall level + * _param_cb - general callback for a module/cmdline parameter + * to be evaluated before certain initcall level * @name: a valid C identifier which is the parameter name. * @ops: the set & get operations for this parameter. - * @arg: args for @ops * @perm: visibility in sysfs. * * The ops can have NULL set or get functions. */ +#define __level_param_cb(name, ops, arg, perm, level) \ + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0) + #define core_param_cb(name, ops, arg, perm) \ __level_param_cb(name, ops, arg, perm, 1) -/** - * postcore_param_cb - general callback for a module/cmdline parameter - * to be evaluated before postcore initcall level - * @name: a valid C identifier which is the parameter name. - * @ops: the set & get operations for this parameter. - * @arg: args for @ops - * @perm: visibility in sysfs. - * - * The ops can have NULL set or get functions. - */ #define postcore_param_cb(name, ops, arg, perm) \ __level_param_cb(name, ops, arg, perm, 2) -/** - * arch_param_cb - general callback for a module/cmdline parameter - * to be evaluated before arch initcall level - * @name: a valid C identifier which is the parameter name. - * @ops: the set & get operations for this parameter. - * @arg: args for @ops - * @perm: visibility in sysfs. - * - * The ops can have NULL set or get functions. - */ #define arch_param_cb(name, ops, arg, perm) \ __level_param_cb(name, ops, arg, perm, 3) -/** - * subsys_param_cb - general callback for a module/cmdline parameter - * to be evaluated before subsys initcall level - * @name: a valid C identifier which is the parameter name. - * @ops: the set & get operations for this parameter. - * @arg: args for @ops - * @perm: visibility in sysfs. - * - * The ops can have NULL set or get functions. - */ #define subsys_param_cb(name, ops, arg, perm) \ __level_param_cb(name, ops, arg, perm, 4) -/** - * fs_param_cb - general callback for a module/cmdline parameter - * to be evaluated before fs initcall level - * @name: a valid C identifier which is the parameter name. - * @ops: the set & get operations for this parameter. - * @arg: args for @ops - * @perm: visibility in sysfs. - * - * The ops can have NULL set or get functions. - */ #define fs_param_cb(name, ops, arg, perm) \ __level_param_cb(name, ops, arg, perm, 5) -/** - * device_param_cb - general callback for a module/cmdline parameter - * to be evaluated before device initcall level - * @name: a valid C identifier which is the parameter name. - * @ops: the set & get operations for this parameter. - * @arg: args for @ops - * @perm: visibility in sysfs. - * - * The ops can have NULL set or get functions. - */ #define device_param_cb(name, ops, arg, perm) \ __level_param_cb(name, ops, arg, perm, 6) -/** - * late_param_cb - general callback for a module/cmdline parameter - * to be evaluated before late initcall level - * @name: a valid C identifier which is the parameter name. - * @ops: the set & get operations for this parameter. - * @arg: args for @ops - * @perm: visibility in sysfs. - * - * The ops can have NULL set or get functions. - */ #define late_param_cb(name, ops, arg, perm) \ __level_param_cb(name, ops, arg, perm, 7) @@ -288,17 +219,25 @@ struct kparam_array /* Default value instead of permissions? */ \ static const char __param_str_##name[] = prefix #name; \ static struct kernel_param __moduleparam_const __param_##name \ - __used __section("__param") \ - __aligned(__alignof__(struct kernel_param)) \ + __used \ + __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ = { __param_str_##name, THIS_MODULE, ops, \ VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } } /* Obsolete - use module_param_cb() */ -#define module_param_call(name, _set, _get, arg, perm) \ +#define module_param_call(name, set, get, arg, perm) \ static const struct kernel_param_ops __param_ops_##name = \ - { .flags = 0, .set = _set, .get = _get }; \ + { .flags = 0, set, get }; \ __module_param_call(MODULE_PARAM_PREFIX, \ - name, &__param_ops_##name, arg, perm, -1, 0) + name, &__param_ops_##name, arg, \ + (perm) + sizeof(__check_old_set_param(set))*0, -1, 0) + +/* We don't get oldget: it's often a new-style param_get_uint, etc. */ +static inline int +__check_old_set_param(int (*oldset)(const char *, const struct kernel_param *)) +{ + return 0; +} #ifdef CONFIG_SYSFS extern void kernel_param_lock(struct module *mod); @@ -331,10 +270,6 @@ static inline void kernel_param_unlock(struct module *mod) /** * core_param_unsafe - same as core_param but taints kernel - * @name: the name of the cmdline and sysfs parameter (often the same as var) - * @var: the variable - * @type: the type of the parameter - * @perm: visibility in sysfs */ #define core_param_unsafe(name, var, type, perm) \ param_check_##type(name, &(var)); \ @@ -354,7 +289,7 @@ static inline void kernel_param_unlock(struct module *mod) * @len is usually just sizeof(string). */ #define module_param_string(name, string, len, perm) \ - static const struct kparam_string __param_string_##name \ + static const struct kparam_string __param_string_##name __used \ = { len, string }; \ __module_param_call(MODULE_PARAM_PREFIX, name, \ ¶m_ops_string, \ @@ -431,8 +366,6 @@ extern int param_get_int(char *buffer, const struct kernel_param *kp); extern const struct kernel_param_ops param_ops_uint; extern int param_set_uint(const char *val, const struct kernel_param *kp); extern int param_get_uint(char *buffer, const struct kernel_param *kp); -int param_set_uint_minmax(const char *val, const struct kernel_param *kp, - unsigned int min, unsigned int max); #define param_check_uint(name, p) __param_check(name, p, unsigned int) extern const struct kernel_param_ops param_ops_long; @@ -450,11 +383,6 @@ extern int param_set_ullong(const char *val, const struct kernel_param *kp); extern int param_get_ullong(char *buffer, const struct kernel_param *kp); #define param_check_ullong(name, p) __param_check(name, p, unsigned long long) -extern const struct kernel_param_ops param_ops_hexint; -extern int param_set_hexint(const char *val, const struct kernel_param *kp); -extern int param_get_hexint(char *buffer, const struct kernel_param *kp); -#define param_check_hexint(name, p) param_check_uint(name, p) - extern const struct kernel_param_ops param_ops_charp; extern int param_set_charp(const char *val, const struct kernel_param *kp); extern int param_get_charp(char *buffer, const struct kernel_param *kp); @@ -513,7 +441,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp); */ #define module_param_array_named(name, array, type, nump, perm) \ param_check_##type(name, &(array)[0]); \ - static const struct kparam_array __param_arr_##name \ + static const struct kparam_array __param_arr_##name __used \ = { .max = ARRAY_SIZE(array), .num = nump, \ .ops = ¶m_ops_##type, \ .elemsize = sizeof(array[0]), .elem = array }; \ @@ -523,67 +451,6 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp); perm, -1, 0); \ __MODULE_PARM_TYPE(name, "array of " #type) -enum hwparam_type { - hwparam_ioport, /* Module parameter configures an I/O port */ - hwparam_iomem, /* Module parameter configures an I/O mem address */ - hwparam_ioport_or_iomem, /* Module parameter could be either, depending on other option */ - hwparam_irq, /* Module parameter configures an IRQ */ - hwparam_dma, /* Module parameter configures a DMA channel */ - hwparam_dma_addr, /* Module parameter configures a DMA buffer address */ - hwparam_other, /* Module parameter configures some other value */ -}; - -/** - * module_param_hw_named - A parameter representing a hw parameters - * @name: a valid C identifier which is the parameter name. - * @value: the actual lvalue to alter. - * @type: the type of the parameter - * @hwtype: what the value represents (enum hwparam_type) - * @perm: visibility in sysfs. - * - * Usually it's a good idea to have variable names and user-exposed names the - * same, but that's harder if the variable must be non-static or is inside a - * structure. This allows exposure under a different name. - */ -#define module_param_hw_named(name, value, type, hwtype, perm) \ - param_check_##type(name, &(value)); \ - __module_param_call(MODULE_PARAM_PREFIX, name, \ - ¶m_ops_##type, &value, \ - perm, -1, \ - KERNEL_PARAM_FL_HWPARAM | (hwparam_##hwtype & 0)); \ - __MODULE_PARM_TYPE(name, #type) - -#define module_param_hw(name, type, hwtype, perm) \ - module_param_hw_named(name, name, type, hwtype, perm) - -/** - * module_param_hw_array - A parameter representing an array of hw parameters - * @name: the name of the array variable - * @type: the type, as per module_param() - * @hwtype: what the value represents (enum hwparam_type) - * @nump: optional pointer filled in with the number written - * @perm: visibility in sysfs - * - * Input and output are as comma-separated values. Commas inside values - * don't work properly (eg. an array of charp). - * - * ARRAY_SIZE(@name) is used to determine the number of elements in the - * array, so the definition must be visible. - */ -#define module_param_hw_array(name, type, hwtype, nump, perm) \ - param_check_##type(name, &(name)[0]); \ - static const struct kparam_array __param_arr_##name \ - = { .max = ARRAY_SIZE(name), .num = nump, \ - .ops = ¶m_ops_##type, \ - .elemsize = sizeof(name[0]), .elem = name }; \ - __module_param_call(MODULE_PARAM_PREFIX, name, \ - ¶m_array_ops, \ - .arr = &__param_arr_##name, \ - perm, -1, \ - KERNEL_PARAM_FL_HWPARAM | (hwparam_##hwtype & 0)); \ - __MODULE_PARM_TYPE(name, "array of " #type) - - extern const struct kernel_param_ops param_array_ops; extern const struct kernel_param_ops param_ops_string; diff --git a/include/linux/mount.h b/include/linux/mount.h index 5d92a7e1a7..377fcf14bd 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * * Definitions for mount interface. This describes the in the kernel build @@ -21,7 +20,6 @@ struct super_block; struct vfsmount; struct dentry; struct mnt_namespace; -struct fs_context; #define MNT_NOSUID 0x01 #define MNT_NODEV 0x02 @@ -30,7 +28,6 @@ struct fs_context; #define MNT_NODIRATIME 0x10 #define MNT_RELATIME 0x20 #define MNT_READONLY 0x40 /* does the user want this to be r/o? */ -#define MNT_NOSYMFOLLOW 0x80 #define MNT_SHRINKABLE 0x100 #define MNT_WRITE_HOLD 0x200 @@ -47,12 +44,11 @@ struct fs_context; #define MNT_SHARED_MASK (MNT_UNBINDABLE) #define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \ | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \ - | MNT_READONLY | MNT_NOSYMFOLLOW) + | MNT_READONLY) #define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME ) #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \ - MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \ - MNT_CURSOR) + MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED) #define MNT_INTERNAL 0x4000 @@ -66,42 +62,31 @@ struct fs_context; #define MNT_SYNC_UMOUNT 0x2000000 #define MNT_MARKED 0x4000000 #define MNT_UMOUNT 0x8000000 -#define MNT_CURSOR 0x10000000 struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ struct super_block *mnt_sb; /* pointer to superblock */ int mnt_flags; - struct user_namespace *mnt_userns; } __randomize_layout; -static inline struct user_namespace *mnt_user_ns(const struct vfsmount *mnt) -{ - /* Pairs with smp_store_release() in do_idmap_mount(). */ - return smp_load_acquire(&mnt->mnt_userns); -} - struct file; /* forward dec */ struct path; extern int mnt_want_write(struct vfsmount *mnt); extern int mnt_want_write_file(struct file *file); +extern int mnt_clone_write(struct vfsmount *mnt); extern void mnt_drop_write(struct vfsmount *mnt); extern void mnt_drop_write_file(struct file *file); extern void mntput(struct vfsmount *mnt); extern struct vfsmount *mntget(struct vfsmount *mnt); -extern struct vfsmount *mnt_clone_internal(const struct path *path); -extern bool __mnt_is_readonly(struct vfsmount *mnt); +extern struct vfsmount *mnt_clone_internal(struct path *path); +extern int __mnt_is_readonly(struct vfsmount *mnt); extern bool mnt_may_suid(struct vfsmount *mnt); struct path; -extern struct vfsmount *clone_private_mount(const struct path *path); -extern int __mnt_want_write(struct vfsmount *); -extern void __mnt_drop_write(struct vfsmount *); +extern struct vfsmount *clone_private_mount(struct path *path); struct file_system_type; -extern struct vfsmount *fc_mount(struct fs_context *fc); -extern struct vfsmount *vfs_create_mount(struct fs_context *fc); extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data); @@ -116,8 +101,4 @@ extern dev_t name_to_dev_t(const char *name); extern unsigned int sysctl_mount_max; -extern bool path_is_mountpoint(const struct path *path); - -extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num); - #endif /* _LINUX_MOUNT_H */ diff --git a/include/linux/mpage.h b/include/linux/mpage.h index f4f5e90a68..068a0c9946 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/mpage.h * @@ -13,9 +12,9 @@ #ifdef CONFIG_BLOCK struct writeback_control; -struct readahead_control; -void mpage_readahead(struct readahead_control *, get_block_t get_block); +int mpage_readpages(struct address_space *mapping, struct list_head *pages, + unsigned nr_pages, get_block_t get_block); int mpage_readpage(struct page *page, get_block_t get_block); int mpage_writepages(struct address_space *mapping, struct writeback_control *wbc, get_block_t get_block); diff --git a/include/linux/mpi.h b/include/linux/mpi.h index eb0d1c1db2..1cc5ffb769 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* mpi.h - Multi Precision Integers * Copyright (C) 1994, 1996, 1998, 1999, * 2000, 2001 Free Software Foundation, Inc. * * This file is part of GNUPG. * + * GNUPG is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * GNUPG is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction @@ -40,234 +53,100 @@ struct gcry_mpi { typedef struct gcry_mpi *MPI; #define mpi_get_nlimbs(a) ((a)->nlimbs) -#define mpi_has_sign(a) ((a)->sign) +#define mpi_is_neg(a) ((a)->sign) /*-- mpiutil.c --*/ MPI mpi_alloc(unsigned nlimbs); -void mpi_clear(MPI a); +MPI mpi_alloc_secure(unsigned nlimbs); +MPI mpi_alloc_like(MPI a); void mpi_free(MPI a); int mpi_resize(MPI a, unsigned nlimbs); - -static inline MPI mpi_new(unsigned int nbits) -{ - return mpi_alloc((nbits + BITS_PER_MPI_LIMB - 1) / BITS_PER_MPI_LIMB); -} - -MPI mpi_copy(MPI a); -MPI mpi_alloc_like(MPI a); -void mpi_snatch(MPI w, MPI u); -MPI mpi_set(MPI w, MPI u); -MPI mpi_set_ui(MPI w, unsigned long u); +int mpi_copy(MPI *copy, const MPI a); +void mpi_clear(MPI a); +int mpi_set(MPI w, MPI u); +int mpi_set_ui(MPI w, ulong u); MPI mpi_alloc_set_ui(unsigned long u); -void mpi_swap_cond(MPI a, MPI b, unsigned long swap); - -/* Constants used to return constant MPIs. See mpi_init if you - * want to add more constants. - */ -#define MPI_NUMBER_OF_CONSTANTS 6 -enum gcry_mpi_constants { - MPI_C_ZERO, - MPI_C_ONE, - MPI_C_TWO, - MPI_C_THREE, - MPI_C_FOUR, - MPI_C_EIGHT -}; - -MPI mpi_const(enum gcry_mpi_constants no); +void mpi_m_check(MPI a); +void mpi_swap(MPI a, MPI b); /*-- mpicoder.c --*/ - -/* Different formats of external big integer representation. */ -enum gcry_mpi_format { - GCRYMPI_FMT_NONE = 0, - GCRYMPI_FMT_STD = 1, /* Twos complement stored without length. */ - GCRYMPI_FMT_PGP = 2, /* As used by OpenPGP (unsigned only). */ - GCRYMPI_FMT_SSH = 3, /* As used by SSH (like STD but with length). */ - GCRYMPI_FMT_HEX = 4, /* Hex format. */ - GCRYMPI_FMT_USG = 5, /* Like STD but unsigned. */ - GCRYMPI_FMT_OPAQUE = 8 /* Opaque format (some functions only). */ -}; - +MPI do_encode_md(const void *sha_buffer, unsigned nbits); MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes); MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread); -int mpi_fromstr(MPI val, const char *str); -MPI mpi_scanval(const char *string); MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len); +int mpi_fromstr(MPI val, const char *str); +u32 mpi_get_keyid(MPI a, u32 *keyid); void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, int *sign); +void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes, int *sign); -int mpi_print(enum gcry_mpi_format format, unsigned char *buffer, - size_t buflen, size_t *nwritten, MPI a); -/*-- mpi-mod.c --*/ -void mpi_mod(MPI rem, MPI dividend, MPI divisor); +#define log_mpidump g10_log_mpidump -/* Context used with Barrett reduction. */ -struct barrett_ctx_s; -typedef struct barrett_ctx_s *mpi_barrett_t; +/*-- mpi-add.c --*/ +int mpi_add_ui(MPI w, MPI u, ulong v); +int mpi_add(MPI w, MPI u, MPI v); +int mpi_addm(MPI w, MPI u, MPI v, MPI m); +int mpi_sub_ui(MPI w, MPI u, ulong v); +int mpi_sub(MPI w, MPI u, MPI v); +int mpi_subm(MPI w, MPI u, MPI v, MPI m); -mpi_barrett_t mpi_barrett_init(MPI m, int copy); -void mpi_barrett_free(mpi_barrett_t ctx); -void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx); -void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx); +/*-- mpi-mul.c --*/ +int mpi_mul_ui(MPI w, MPI u, ulong v); +int mpi_mul_2exp(MPI w, MPI u, ulong cnt); +int mpi_mul(MPI w, MPI u, MPI v); +int mpi_mulm(MPI w, MPI u, MPI v, MPI m); + +/*-- mpi-div.c --*/ +ulong mpi_fdiv_r_ui(MPI rem, MPI dividend, ulong divisor); +int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor); +int mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor); +int mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor); +int mpi_tdiv_r(MPI rem, MPI num, MPI den); +int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den); +int mpi_tdiv_q_2exp(MPI w, MPI u, unsigned count); +int mpi_divisible_ui(const MPI dividend, ulong divisor); + +/*-- mpi-gcd.c --*/ +int mpi_gcd(MPI g, const MPI a, const MPI b); /*-- mpi-pow.c --*/ +int mpi_pow(MPI w, MPI u, MPI v); int mpi_powm(MPI res, MPI base, MPI exp, MPI mod); +/*-- mpi-mpow.c --*/ +int mpi_mulpowm(MPI res, MPI *basearray, MPI *exparray, MPI mod); + /*-- mpi-cmp.c --*/ int mpi_cmp_ui(MPI u, ulong v); int mpi_cmp(MPI u, MPI v); -int mpi_cmpabs(MPI u, MPI v); -/*-- mpi-sub-ui.c --*/ -int mpi_sub_ui(MPI w, MPI u, unsigned long vval); +/*-- mpi-scan.c --*/ +int mpi_getbyte(MPI a, unsigned idx); +void mpi_putbyte(MPI a, unsigned idx, int value); +unsigned mpi_trailing_zeros(MPI a); /*-- mpi-bit.c --*/ void mpi_normalize(MPI a); unsigned mpi_get_nbits(MPI a); -int mpi_test_bit(MPI a, unsigned int n); -void mpi_set_bit(MPI a, unsigned int n); -void mpi_set_highbit(MPI a, unsigned int n); -void mpi_clear_highbit(MPI a, unsigned int n); -void mpi_clear_bit(MPI a, unsigned int n); -void mpi_rshift_limbs(MPI a, unsigned int count); -void mpi_rshift(MPI x, MPI a, unsigned int n); -void mpi_lshift_limbs(MPI a, unsigned int count); -void mpi_lshift(MPI x, MPI a, unsigned int n); - -/*-- mpi-add.c --*/ -void mpi_add_ui(MPI w, MPI u, unsigned long v); -void mpi_add(MPI w, MPI u, MPI v); -void mpi_sub(MPI w, MPI u, MPI v); -void mpi_addm(MPI w, MPI u, MPI v, MPI m); -void mpi_subm(MPI w, MPI u, MPI v, MPI m); - -/*-- mpi-mul.c --*/ -void mpi_mul(MPI w, MPI u, MPI v); -void mpi_mulm(MPI w, MPI u, MPI v, MPI m); - -/*-- mpi-div.c --*/ -void mpi_tdiv_r(MPI rem, MPI num, MPI den); -void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor); -void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor); +int mpi_test_bit(MPI a, unsigned n); +int mpi_set_bit(MPI a, unsigned n); +int mpi_set_highbit(MPI a, unsigned n); +void mpi_clear_highbit(MPI a, unsigned n); +void mpi_clear_bit(MPI a, unsigned n); +int mpi_rshift(MPI x, MPI a, unsigned n); /*-- mpi-inv.c --*/ -int mpi_invm(MPI x, MPI a, MPI n); - -/*-- ec.c --*/ - -/* Object to represent a point in projective coordinates */ -struct gcry_mpi_point { - MPI x; - MPI y; - MPI z; -}; - -typedef struct gcry_mpi_point *MPI_POINT; - -/* Models describing an elliptic curve */ -enum gcry_mpi_ec_models { - /* The Short Weierstrass equation is - * y^2 = x^3 + ax + b - */ - MPI_EC_WEIERSTRASS = 0, - /* The Montgomery equation is - * by^2 = x^3 + ax^2 + x - */ - MPI_EC_MONTGOMERY, - /* The Twisted Edwards equation is - * ax^2 + y^2 = 1 + bx^2y^2 - * Note that we use 'b' instead of the commonly used 'd'. - */ - MPI_EC_EDWARDS -}; - -/* Dialects used with elliptic curves */ -enum ecc_dialects { - ECC_DIALECT_STANDARD = 0, - ECC_DIALECT_ED25519, - ECC_DIALECT_SAFECURVE -}; - -/* This context is used with all our EC functions. */ -struct mpi_ec_ctx { - enum gcry_mpi_ec_models model; /* The model describing this curve. */ - enum ecc_dialects dialect; /* The ECC dialect used with the curve. */ - int flags; /* Public key flags (not always used). */ - unsigned int nbits; /* Number of bits. */ - - /* Domain parameters. Note that they may not all be set and if set - * the MPIs may be flagged as constant. - */ - MPI p; /* Prime specifying the field GF(p). */ - MPI a; /* First coefficient of the Weierstrass equation. */ - MPI b; /* Second coefficient of the Weierstrass equation. */ - MPI_POINT G; /* Base point (generator). */ - MPI n; /* Order of G. */ - unsigned int h; /* Cofactor. */ - - /* The actual key. May not be set. */ - MPI_POINT Q; /* Public key. */ - MPI d; /* Private key. */ - - const char *name; /* Name of the curve. */ - - /* This structure is private to mpi/ec.c! */ - struct { - struct { - unsigned int a_is_pminus3:1; - unsigned int two_inv_p:1; - } valid; /* Flags to help setting the helper vars below. */ - - int a_is_pminus3; /* True if A = P - 3. */ - - MPI two_inv_p; - - mpi_barrett_t p_barrett; - - /* Scratch variables. */ - MPI scratch[11]; - - /* Helper for fast reduction. */ - /* int nist_nbits; /\* If this is a NIST curve, the # of bits. *\/ */ - /* MPI s[10]; */ - /* MPI c; */ - } t; - - /* Curve specific computation routines for the field. */ - void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx); - void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec); - void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx); - void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx); - void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx); -}; - -void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model, - enum ecc_dialects dialect, - int flags, MPI p, MPI a, MPI b); -void mpi_ec_deinit(struct mpi_ec_ctx *ctx); -MPI_POINT mpi_point_new(unsigned int nbits); -void mpi_point_release(MPI_POINT p); -void mpi_point_init(MPI_POINT p); -void mpi_point_free_parts(MPI_POINT p); -int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx); -void mpi_ec_add_points(MPI_POINT result, - MPI_POINT p1, MPI_POINT p2, - struct mpi_ec_ctx *ctx); -void mpi_ec_mul_point(MPI_POINT result, - MPI scalar, MPI_POINT point, - struct mpi_ec_ctx *ctx); -int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx); +int mpi_invm(MPI x, MPI u, MPI v); /* inline functions */ /** * mpi_get_size() - returns max size required to store the number * - * @a: A multi precision integer for which we want to allocate a buffer + * @a: A multi precision integer for which we want to allocate a bufer * * Return: size required to store the number */ diff --git a/include/linux/mpls.h b/include/linux/mpls.h index ae1a188c01..9999145bc1 100644 --- a/include/linux/mpls.h +++ b/include/linux/mpls.h @@ -1,12 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MPLS_H #define _LINUX_MPLS_H #include -#define MPLS_TTL_MASK (MPLS_LS_TTL_MASK >> MPLS_LS_TTL_SHIFT) -#define MPLS_BOS_MASK (MPLS_LS_S_MASK >> MPLS_LS_S_SHIFT) -#define MPLS_TC_MASK (MPLS_LS_TC_MASK >> MPLS_LS_TC_SHIFT) -#define MPLS_LABEL_MASK (MPLS_LS_LABEL_MASK >> MPLS_LS_LABEL_SHIFT) - #endif /* _LINUX_MPLS_H */ diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h index 140c56954f..ef29eb2d6d 100644 --- a/include/linux/mpls_iptunnel.h +++ b/include/linux/mpls_iptunnel.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MPLS_IPTUNNEL_H #define _LINUX_MPLS_IPTUNNEL_H diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 6cbbfe9434..e5fb81376e 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -1,14 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MROUTE_H #define __LINUX_MROUTE_H #include #include -#include -#include +#include #include -#include -#include #ifdef CONFIG_IP_MROUTE static inline int ip_mroute_opt(int opt) @@ -16,15 +12,14 @@ static inline int ip_mroute_opt(int opt) return opt >= MRT_BASE && opt <= MRT_MAX; } -int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); +int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); int ip_mr_init(void); -bool ipmr_rule_default(const struct fib_rule *rule); #else static inline int ip_mroute_setsockopt(struct sock *sock, int optname, - sockptr_t optval, unsigned int optlen) + char __user *optval, unsigned int optlen) { return -ENOPROTOOPT; } @@ -49,40 +44,81 @@ static inline int ip_mroute_opt(int opt) { return 0; } - -static inline bool ipmr_rule_default(const struct fib_rule *rule) -{ - return true; -} #endif +struct vif_device { + struct net_device *dev; /* Device we are using */ + unsigned long bytes_in,bytes_out; + unsigned long pkt_in,pkt_out; /* Statistics */ + unsigned long rate_limit; /* Traffic shaping (NI) */ + unsigned char threshold; /* TTL threshold */ + unsigned short flags; /* Control flags */ + __be32 local,remote; /* Addresses(remote for tunnels)*/ + int link; /* Physical interface index */ +}; + #define VIFF_STATIC 0x8000 -struct mfc_cache_cmp_arg { - __be32 mfc_mcastgrp; - __be32 mfc_origin; +#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) +#define MFC_LINES 64 + +struct mr_table { + struct list_head list; + possible_net_t net; + u32 id; + struct sock __rcu *mroute_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc_unres_queue; + struct list_head mfc_cache_array[MFC_LINES]; + struct vif_device vif_table[MAXVIFS]; + int maxvif; + atomic_t cache_resolve_queue_len; + bool mroute_do_assert; + bool mroute_do_pim; + int mroute_reg_vif_num; }; -/** - * struct mfc_cache - multicast routing entries - * @_c: Common multicast routing information; has to be first [for casting] - * @mfc_mcastgrp: destination multicast group address - * @mfc_origin: source address - * @cmparg: used for rhashtable comparisons +/* mfc_flags: + * MFC_STATIC - the entry was added statically (not by a routing daemon) */ +enum { + MFC_STATIC = BIT(0), +}; + struct mfc_cache { - struct mr_mfc _c; + struct list_head list; + __be32 mfc_mcastgrp; /* Group the entry belongs to */ + __be32 mfc_origin; /* Source of packet */ + vifi_t mfc_parent; /* Source interface */ + int mfc_flags; /* Flags on line */ + union { struct { - __be32 mfc_mcastgrp; - __be32 mfc_origin; - }; - struct mfc_cache_cmp_arg cmparg; - }; + unsigned long expires; + struct sk_buff_head unresolved; /* Unresolved buffers */ + } unres; + struct { + unsigned long last_assert; + int minvif; + int maxvif; + unsigned long bytes; + unsigned long pkt; + unsigned long wrong_if; + unsigned long lastuse; + unsigned char ttls[MAXVIFS]; /* TTL thresholds */ + } res; + } mfc_un; + struct rcu_head rcu; }; +#ifdef __BIG_ENDIAN +#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1)) +#else +#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1)) +#endif + struct rtmsg; int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, - struct rtmsg *rtm, u32 portid); + struct rtmsg *rtm, int nowait, u32 portid); #endif diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index bc351a85ce..19a1c0c299 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MROUTE6_H #define __LINUX_MROUTE6_H @@ -7,9 +6,6 @@ #include /* for struct sk_buff_head */ #include #include -#include -#include -#include #ifdef CONFIG_IPV6_MROUTE static inline int ip6_mroute_opt(int opt) @@ -26,7 +22,7 @@ static inline int ip6_mroute_opt(int opt) struct sock; #ifdef CONFIG_IPV6_MROUTE -extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); +extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); extern int ip6_mr_input(struct sk_buff *skb); extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); @@ -34,8 +30,9 @@ extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *ar extern int ip6_mr_init(void); extern void ip6_mr_cleanup(void); #else -static inline int ip6_mroute_setsockopt(struct sock *sock, int optname, - sockptr_t optval, unsigned int optlen) +static inline +int ip6_mroute_setsockopt(struct sock *sock, + int optname, char __user *optval, unsigned int optlen) { return -ENOPROTOOPT; } @@ -64,46 +61,70 @@ static inline void ip6_mr_cleanup(void) } #endif -#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES -bool ip6mr_rule_default(const struct fib_rule *rule); -#else -static inline bool ip6mr_rule_default(const struct fib_rule *rule) -{ - return true; -} -#endif +struct mif_device { + struct net_device *dev; /* Device we are using */ + unsigned long bytes_in,bytes_out; + unsigned long pkt_in,pkt_out; /* Statistics */ + unsigned long rate_limit; /* Traffic shaping (NI) */ + unsigned char threshold; /* TTL threshold */ + unsigned short flags; /* Control flags */ + int link; /* Physical interface index */ +}; #define VIFF_STATIC 0x8000 -struct mfc6_cache_cmp_arg { - struct in6_addr mf6c_mcastgrp; - struct in6_addr mf6c_origin; -}; - struct mfc6_cache { - struct mr_mfc _c; + struct list_head list; + struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */ + struct in6_addr mf6c_origin; /* Source of packet */ + mifi_t mf6c_parent; /* Source interface */ + int mfc_flags; /* Flags on line */ + union { struct { - struct in6_addr mf6c_mcastgrp; - struct in6_addr mf6c_origin; - }; - struct mfc6_cache_cmp_arg cmparg; - }; + unsigned long expires; + struct sk_buff_head unresolved; /* Unresolved buffers */ + } unres; + struct { + unsigned long last_assert; + int minvif; + int maxvif; + unsigned long bytes; + unsigned long pkt; + unsigned long wrong_if; + unsigned long lastuse; + unsigned char ttls[MAXMIFS]; /* TTL thresholds */ + } res; + } mfc_un; }; +#define MFC_STATIC 1 +#define MFC_NOTIFY 2 + +#define MFC6_LINES 64 + +#define MFC6_HASH(a, g) (((__force u32)(a)->s6_addr32[0] ^ \ + (__force u32)(a)->s6_addr32[1] ^ \ + (__force u32)(a)->s6_addr32[2] ^ \ + (__force u32)(a)->s6_addr32[3] ^ \ + (__force u32)(g)->s6_addr32[0] ^ \ + (__force u32)(g)->s6_addr32[1] ^ \ + (__force u32)(g)->s6_addr32[2] ^ \ + (__force u32)(g)->s6_addr32[3]) % MFC6_LINES) + #define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */ struct rtmsg; extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, - struct rtmsg *rtm, u32 portid); + struct rtmsg *rtm, int nowait, u32 portid); #ifdef CONFIG_IPV6_MROUTE -bool mroute6_is_socket(struct net *net, struct sk_buff *skb); +extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); extern int ip6mr_sk_done(struct sock *sk); #else -static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb) +static inline struct sock *mroute6_socket(struct net *net, struct sk_buff *skb) { - return false; + return NULL; } static inline int ip6mr_sk_done(struct sock *sk) { diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h index b7a5d4c72f..e1b163f912 100644 --- a/include/linux/msdos_fs.h +++ b/include/linux/msdos_fs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MSDOS_FS_H #define _LINUX_MSDOS_FS_H diff --git a/include/linux/msg.h b/include/linux/msg.h index 9a972a296b..a001305f5a 100644 --- a/include/linux/msg.h +++ b/include/linux/msg.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MSG_H #define _LINUX_MSG_H @@ -15,4 +14,29 @@ struct msg_msg { /* the actual message follows immediately */ }; +/* one msq_queue structure for each present queue on the system */ +struct msg_queue { + struct kern_ipc_perm q_perm; + time_t q_stime; /* last msgsnd time */ + time_t q_rtime; /* last msgrcv time */ + time_t q_ctime; /* last change time */ + unsigned long q_cbytes; /* current number of bytes on queue */ + unsigned long q_qnum; /* number of messages in queue */ + unsigned long q_qbytes; /* max number of bytes on queue */ + pid_t q_lspid; /* pid of last msgsnd */ + pid_t q_lrpid; /* last receive pid */ + + struct list_head q_messages; + struct list_head q_receivers; + struct list_head q_senders; +} __randomize_layout; + +/* Helper routines for sys_msgsnd and sys_msgrcv */ +extern long do_msgsnd(int msqid, long mtype, void __user *mtext, + size_t msgsz, int msgflg); +extern long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, + int msgflg, + long (*msg_fill)(void __user *, struct msg_msg *, + size_t)); + #endif /* _LINUX_MSG_H */ diff --git a/include/linux/msi.h b/include/linux/msi.h index 49cf6eb222..0db320b7bb 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -1,53 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_MSI_H #define LINUX_MSI_H #include #include -#include -/* Dummy shadow structures if an architecture does not define them */ -#ifndef arch_msi_msg_addr_lo -typedef struct arch_msi_msg_addr_lo { - u32 address_lo; -} __attribute__ ((packed)) arch_msi_msg_addr_lo_t; -#endif - -#ifndef arch_msi_msg_addr_hi -typedef struct arch_msi_msg_addr_hi { - u32 address_hi; -} __attribute__ ((packed)) arch_msi_msg_addr_hi_t; -#endif - -#ifndef arch_msi_msg_data -typedef struct arch_msi_msg_data { - u32 data; -} __attribute__ ((packed)) arch_msi_msg_data_t; -#endif - -/** - * msi_msg - Representation of a MSI message - * @address_lo: Low 32 bits of msi message address - * @arch_addrlo: Architecture specific shadow of @address_lo - * @address_hi: High 32 bits of msi message address - * (only used when device supports it) - * @arch_addrhi: Architecture specific shadow of @address_hi - * @data: MSI message data (usually 16 bits) - * @arch_data: Architecture specific shadow of @data - */ struct msi_msg { - union { - u32 address_lo; - arch_msi_msg_addr_lo_t arch_addr_lo; - }; - union { - u32 address_hi; - arch_msi_msg_addr_hi_t arch_addr_hi; - }; - union { - u32 data; - arch_msi_msg_data_t arch_data; - }; + u32 address_lo; /* low 32 bits of msi message address */ + u32 address_hi; /* high 32 bits of msi message address */ + u32 data; /* 16 bits of msi message data */ }; extern int pci_msi_ignore_mask; @@ -57,13 +17,7 @@ struct msi_desc; struct pci_dev; struct platform_msi_priv_data; void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -#ifdef CONFIG_GENERIC_MSI_IRQ void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); -#else -static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) -{ -} -#endif typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, struct msi_msg *msg); @@ -86,14 +40,6 @@ struct fsl_mc_msi_desc { u16 msi_index; }; -/** - * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data - * @dev_index: TISCI device index - */ -struct ti_sci_inta_msi_desc { - u16 dev_index; -}; - /** * struct msi_desc - Descriptor structure for MSI based interrupts * @list: List head for management @@ -103,12 +49,7 @@ struct ti_sci_inta_msi_desc { * @msg: The last set MSI message cached for reuse * @affinity: Optional pointer to a cpu affinity mask for this descriptor * - * @write_msi_msg: Callback that may be called when the MSI message - * address or data changes - * @write_msi_msg_data: Data parameter for the callback. - * - * @msi_mask: [PCI MSI] MSI cached mask bits - * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits + * @masked: [PCI MSI/X] Mask bits * @is_msix: [PCI MSI/X] True if MSI-X * @multiple: [PCI MSI/X] log2 num of messages allocated * @multi_cap: [PCI MSI/X] log2 num of messages supported @@ -119,8 +60,6 @@ struct ti_sci_inta_msi_desc { * @mask_pos: [PCI MSI] Mask register position * @mask_base: [PCI MSI-X] Mask register base address * @platform: [platform] Platform device specific msi descriptor data - * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data - * @inta: [INTA] TISCI based INTA specific msi descriptor data */ struct msi_desc { /* Shared device/bus type independent data */ @@ -129,29 +68,19 @@ struct msi_desc { unsigned int nvec_used; struct device *dev; struct msi_msg msg; - struct irq_affinity_desc *affinity; -#ifdef CONFIG_IRQ_MSI_IOMMU - const void *iommu_cookie; -#endif - - void (*write_msi_msg)(struct msi_desc *entry, void *data); - void *write_msi_msg_data; + struct cpumask *affinity; union { /* PCI MSI/X specific data */ struct { - union { - u32 msi_mask; - u32 msix_ctrl; - }; + u32 masked; struct { - u8 is_msix : 1; - u8 multiple : 3; - u8 multi_cap : 3; - u8 maskbit : 1; - u8 is_64 : 1; - u8 is_virtual : 1; - u16 entry_nr; + __u8 is_msix : 1; + __u8 multiple : 3; + __u8 multi_cap : 3; + __u8 maskbit : 1; + __u8 is_64 : 1; + __u16 entry_nr; unsigned default_irq; } msi_attrib; union { @@ -169,7 +98,6 @@ struct msi_desc { */ struct platform_msi_desc platform; struct fsl_mc_msi_desc fsl_mc; - struct ti_sci_inta_msi_desc inta; }; }; @@ -180,37 +108,6 @@ struct msi_desc { list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) #define for_each_msi_entry(desc, dev) \ list_for_each_entry((desc), dev_to_msi_list((dev)), list) -#define for_each_msi_entry_safe(desc, tmp, dev) \ - list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) -#define for_each_msi_vector(desc, __irq, dev) \ - for_each_msi_entry((desc), (dev)) \ - if ((desc)->irq) \ - for (__irq = (desc)->irq; \ - __irq < ((desc)->irq + (desc)->nvec_used); \ - __irq++) - -#ifdef CONFIG_IRQ_MSI_IOMMU -static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) -{ - return desc->iommu_cookie; -} - -static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, - const void *iommu_cookie) -{ - desc->iommu_cookie = iommu_cookie; -} -#else -static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) -{ - return NULL; -} - -static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, - const void *iommu_cookie) -{ -} -#endif #ifdef CONFIG_PCI_MSI #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) @@ -219,66 +116,74 @@ static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); void *msi_desc_to_pci_sysdata(struct msi_desc *desc); -void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); #else /* CONFIG_PCI_MSI */ static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) { return NULL; } -static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) -{ -} #endif /* CONFIG_PCI_MSI */ struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, - const struct irq_affinity_desc *affinity); + const struct cpumask *affinity); void free_msi_entry(struct msi_desc *entry); void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); +u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); +u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); void pci_msi_mask_irq(struct irq_data *data); void pci_msi_unmask_irq(struct irq_data *data); -const struct attribute_group **msi_populate_sysfs(struct device *dev); -void msi_destroy_sysfs(struct device *dev, - const struct attribute_group **msi_irq_groups); +/* Conversion helpers. Should be removed after merging */ +static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +{ + __pci_write_msi_msg(entry, msg); +} +static inline void write_msi_msg(int irq, struct msi_msg *msg) +{ + pci_write_msi_msg(irq, msg); +} +static inline void mask_msi_irq(struct irq_data *data) +{ + pci_msi_mask_irq(data); +} +static inline void unmask_msi_irq(struct irq_data *data) +{ + pci_msi_unmask_irq(data); +} /* - * The arch hooks to setup up msi irqs. Default functions are implemented - * as weak symbols so that they /can/ be overriden by architecture specific - * code if needed. These hooks can only be enabled by the architecture. - * - * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by - * stubs with warnings. + * The arch hooks to setup up msi irqs. Those functions are + * implemented as weak symbols so that they /can/ be overriden by + * architecture specific code if needed. */ -#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); void arch_teardown_msi_irqs(struct pci_dev *dev); -#else -static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - WARN_ON_ONCE(1); - return -ENODEV; -} - -static inline void arch_teardown_msi_irqs(struct pci_dev *dev) -{ - WARN_ON_ONCE(1); -} -#endif - -/* - * The restore hooks are still available as they are useful even - * for fully irq domain based setups. Courtesy to XEN/X86. - */ void arch_restore_msi_irqs(struct pci_dev *dev); + +void default_teardown_msi_irqs(struct pci_dev *dev); void default_restore_msi_irqs(struct pci_dev *dev); +struct msi_controller { + struct module *owner; + struct device *dev; + struct device_node *of_node; + struct list_head list; + + int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, + struct msi_desc *desc); + int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, + int nvec, int type); + void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); +}; + #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN #include +#include struct irq_domain; struct irq_domain_ops; @@ -297,10 +202,6 @@ struct msi_domain_info; * @msi_finish: Optional callback to finalize the allocation * @set_desc: Set the msi descriptor for an interrupt * @handle_error: Optional error handler if the allocation fails - * @domain_alloc_irqs: Optional function to override the default allocation - * function. - * @domain_free_irqs: Optional function to override the default free - * function. * * @get_hwirq, @msi_init and @msi_free are callbacks used by * msi_create_irq_domain() and related interfaces @@ -308,22 +209,6 @@ struct msi_domain_info; * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error * are callbacks used by msi_domain_alloc_irqs() and related * interfaces which are based on msi_desc. - * - * @domain_alloc_irqs, @domain_free_irqs can be used to override the - * default allocation/free functions (__msi_domain_alloc/free_irqs). This - * is initially for a wrapper around XENs seperate MSI universe which can't - * be wrapped into the regular irq domains concepts by mere mortals. This - * allows to universally use msi_domain_alloc/free_irqs without having to - * special case XEN all over the place. - * - * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs - * are set to the default implementation if NULL and even when - * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and - * because these callbacks are obviously mandatory. - * - * This is NOT meant to be abused, but it can be useful to build wrappers - * for specialized MSI irq domains which need extra work before and after - * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs(). */ struct msi_domain_ops { irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, @@ -346,10 +231,6 @@ struct msi_domain_ops { struct msi_desc *desc); int (*handle_error)(struct irq_domain *domain, struct msi_desc *desc, int error); - int (*domain_alloc_irqs)(struct irq_domain *domain, - struct device *dev, int nvec); - void (*domain_free_irqs)(struct irq_domain *domain, - struct device *dev); }; /** @@ -392,13 +273,6 @@ enum { MSI_FLAG_PCI_MSIX = (1 << 3), /* Needs early activate, required for PCI */ MSI_FLAG_ACTIVATE_EARLY = (1 << 4), - /* - * Must reactivate when irq is started even when - * MSI_FLAG_ACTIVATE_EARLY has been set. - */ - MSI_FLAG_MUST_REACTIVATE = (1 << 5), - /* Is level-triggered capable, using two messages */ - MSI_FLAG_LEVEL_CAPABLE = (1 << 6), }; int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, @@ -407,11 +281,8 @@ int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); -int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, - int nvec); int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec); -void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); @@ -428,18 +299,11 @@ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, int virq, int nvec, msi_alloc_info_t *args); struct irq_domain * -__platform_msi_create_device_domain(struct device *dev, - unsigned int nvec, - bool is_tree, - irq_write_msi_msg_t write_msi_msg, - const struct irq_domain_ops *ops, - void *host_data); - -#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \ - __platform_msi_create_device_domain(dev, nvec, false, write, ops, data) -#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \ - __platform_msi_create_device_domain(dev, nvec, true, write, ops, data) - +platform_msi_create_device_domain(struct device *dev, + unsigned int nvec, + irq_write_msi_msg_t write_msi_msg, + const struct irq_domain_ops *ops, + void *host_data); int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, @@ -452,11 +316,18 @@ void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); +int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, + int nvec, int type); +void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); +struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode, + struct msi_domain_info *info, struct irq_domain *parent); + +irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, + struct msi_desc *desc); int pci_msi_domain_check_cap(struct irq_domain *domain, struct msi_domain_info *info, struct device *dev); u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); -bool pci_dev_has_special_msi_domain(struct pci_dev *pdev); #else static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) { diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index d890805f54..3bf8f954b6 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h @@ -1,5 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* + * linux/include/linux/mtd/bbm.h + * * NAND family Bad Block Management (BBM) header file * - Bad Block Table (BBT) implementation * @@ -8,6 +9,21 @@ * * Copyright © 2000-2005 * Thomas Gleixner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #ifndef __LINUX_MTD_BBM_H #define __LINUX_MTD_BBM_H @@ -79,7 +95,10 @@ struct nand_bbt_descr { #define NAND_BBT_WRITE 0x00002000 /* Read and write back block contents when writing bbt */ #define NAND_BBT_SAVECONTENT 0x00004000 - +/* Search good / bad pattern on the first and the second page */ +#define NAND_BBT_SCAN2NDPAGE 0x00008000 +/* Search good / bad pattern on the last page of the eraseblock */ +#define NAND_BBT_SCANLASTPAGE 0x00010000 /* * Use a flash based bad block table. By default, OOB identifier is saved in * OOB area. This option is passed to the default bad block table function. @@ -98,7 +117,7 @@ struct nand_bbt_descr { /* * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr - * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning + * was allocated dynamicaly and must be freed in nand_release(). Has no meaning * in nand_chip.bbt_options. */ #define NAND_BBT_DYNAMICSTRUCT 0x80000000 @@ -106,6 +125,13 @@ struct nand_bbt_descr { /* The maximum number of blocks to scan for a bbt */ #define NAND_BBT_SCAN_MAXBLOCKS 4 +/* + * Constants for oob configuration + */ +#define NAND_SMALL_BADBLOCK_POS 5 +#define NAND_LARGE_BADBLOCK_POS 0 +#define ONENAND_BADBLOCK_POS 0 + /* * Bad block scanning errors */ @@ -116,6 +142,7 @@ struct nand_bbt_descr { /** * struct bbm_info - [GENERIC] Bad Block Table data structure * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry + * @badblockpos: [INTERN] position of the bad block marker in the oob area * @options: options for this descriptor * @bbt: [INTERN] bad block table pointer * @isbad_bbt: function to determine if a block is bad @@ -125,6 +152,7 @@ struct nand_bbt_descr { */ struct bbm_info { int bbt_erase_shift; + int badblockpos; int options; uint8_t *bbt; diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h index 15cc9b95e3..e93837f647 100644 --- a/include/linux/mtd/blktrans.h +++ b/include/linux/mtd/blktrans.h @@ -1,6 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2003-2010 David Woodhouse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #ifndef __MTD_TRANS_H__ @@ -9,6 +23,7 @@ #include #include #include +#include struct hd_geometry; struct mtd_info; @@ -29,9 +44,9 @@ struct mtd_blktrans_dev { struct kref ref; struct gendisk *disk; struct attribute_group *disk_attributes; + struct workqueue_struct *wq; + struct work_struct work; struct request_queue *rq; - struct list_head rq_list; - struct blk_mq_tag_set *tag_set; spinlock_t queue_lock; void *priv; fmode_t file_mode; @@ -77,16 +92,5 @@ extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev); -/** - * module_mtd_blktrans() - Helper macro for registering a mtd blktrans driver - * @__mtd_blktrans: mtd_blktrans_ops struct - * - * Helper macro for mtd blktrans drivers which do not do anything special in - * module init/exit. This eliminates a lot of boilerplate. Each module may only - * use this macro once, and calling it replaces module_init() and module_exit() - */ -#define module_mtd_blktrans(__mtd_blktrans) \ - module_driver(__mtd_blktrans, register_mtd_blktrans, \ - deregister_mtd_blktrans) #endif /* __MTD_TRANS_H__ */ diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index fd1ecb8211..9b57a9b1b0 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -1,6 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2000-2010 David Woodhouse et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #ifndef __MTD_CFI_H__ @@ -138,7 +152,7 @@ struct cfi_ident { uint16_t InterfaceDesc; uint16_t MaxBufWriteSize; uint8_t NumEraseRegions; - uint32_t EraseRegionInfo[]; /* Not host ordered */ + uint32_t EraseRegionInfo[0]; /* Not host ordered */ } __packed; /* Extended Query Structure for both PRI and ALT */ @@ -165,7 +179,7 @@ struct cfi_pri_intelext { uint16_t ProtRegAddr; uint8_t FactProtRegSize; uint8_t UserProtRegSize; - uint8_t extra[]; + uint8_t extra[0]; } __packed; struct cfi_intelext_otpinfo { @@ -219,13 +233,6 @@ struct cfi_pri_amdstd { uint8_t VppMin; uint8_t VppMax; uint8_t TopBottom; - /* Below field are added from version 1.5 */ - uint8_t ProgramSuspend; - uint8_t UnlockBypass; - uint8_t SecureSiliconSector; - uint8_t SoftwareFeatures; -#define CFI_POLL_STATUS_REG BIT(0) -#define CFI_POLL_DQ BIT(1) } __packed; /* Vendor-Specific PRI for Atmel chips (command set 0x0002) */ @@ -286,7 +293,7 @@ struct cfi_private { map_word sector_erase_cmd; unsigned long chipshift; /* Because they're of the same type */ const char *im_name; /* inter_module name for cmdset_setup */ - struct flchip chips[]; /* per-chip data structure for each chip */ + struct flchip chips[0]; /* per-chip data structure for each chip */ }; uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, @@ -370,7 +377,6 @@ struct cfi_fixup { #define CFI_MFR_SHARP 0x00B0 #define CFI_MFR_SST 0x00BF #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ -#define CFI_MFR_MICRON 0x002C /* Micron */ #define CFI_MFR_TOSHIBA 0x0098 #define CFI_MFR_WINBOND 0x00DA diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h index 5275118aa4..b97a625071 100644 --- a/include/linux/mtd/cfi_endian.h +++ b/include/linux/mtd/cfi_endian.h @@ -1,6 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2001-2010 David Woodhouse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #include diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h index d6f653e074..ccdbe93a90 100644 --- a/include/linux/mtd/concat.h +++ b/include/linux/mtd/concat.h @@ -1,8 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * MTD device concatenation layer definitions * * Copyright © 2002 Robert Kaiser + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #ifndef MTD_CONCAT_H diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h index 1b7b0ee070..407d1e556c 100644 --- a/include/linux/mtd/doc2000.h +++ b/include/linux/mtd/doc2000.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux driver for Disk-On-Chip devices * @@ -6,6 +5,21 @@ * Copyright © 1999-2010 David Woodhouse * Copyright © 2002-2003 Greg Ungerer * Copyright © 2002-2003 SnapGear Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #ifndef __MTD_DOC2000_H__ diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index c04f690871..b63fa457fe 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h @@ -1,7 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2000 Red Hat UK Limited * Copyright © 2000-2010 David Woodhouse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #ifndef __MTD_FLASHCHIP_H__ @@ -40,7 +54,7 @@ typedef enum { FL_READING, FL_CACHEDPRG, /* These 4 come from onenand_state_t, which has been unified here */ - FL_RESETTING, + FL_RESETING, FL_OTPING, FL_PREPARING_ERASE, FL_VERIFYING_ERASE, @@ -71,7 +85,6 @@ struct flchip { unsigned int write_suspended:1; unsigned int erase_suspended:1; unsigned long in_progress_block_addr; - unsigned long in_progress_block_mask; struct mutex mutex; wait_queue_head_t wq; /* Wait on here when we're waiting for the chip diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h new file mode 100644 index 0000000000..ad3c348807 --- /dev/null +++ b/include/linux/mtd/fsmc.h @@ -0,0 +1,156 @@ +/* + * incude/mtd/fsmc.h + * + * ST Microelectronics + * Flexible Static Memory Controller (FSMC) + * platform data interface and header file + * + * Copyright © 2010 ST Microelectronics + * Vipin Kumar + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __MTD_FSMC_H +#define __MTD_FSMC_H + +#include +#include +#include +#include +#include +#include + +#define FSMC_NAND_BW8 1 +#define FSMC_NAND_BW16 2 + +#define FSMC_MAX_NOR_BANKS 4 +#define FSMC_MAX_NAND_BANKS 4 + +#define FSMC_FLASH_WIDTH8 1 +#define FSMC_FLASH_WIDTH16 2 + +/* fsmc controller registers for NOR flash */ +#define CTRL 0x0 + /* ctrl register definitions */ + #define BANK_ENABLE (1 << 0) + #define MUXED (1 << 1) + #define NOR_DEV (2 << 2) + #define WIDTH_8 (0 << 4) + #define WIDTH_16 (1 << 4) + #define RSTPWRDWN (1 << 6) + #define WPROT (1 << 7) + #define WRT_ENABLE (1 << 12) + #define WAIT_ENB (1 << 13) + +#define CTRL_TIM 0x4 + /* ctrl_tim register definitions */ + +#define FSMC_NOR_BANK_SZ 0x8 +#define FSMC_NOR_REG_SIZE 0x40 + +#define FSMC_NOR_REG(base, bank, reg) (base + \ + FSMC_NOR_BANK_SZ * (bank) + \ + reg) + +/* fsmc controller registers for NAND flash */ +#define PC 0x00 + /* pc register definitions */ + #define FSMC_RESET (1 << 0) + #define FSMC_WAITON (1 << 1) + #define FSMC_ENABLE (1 << 2) + #define FSMC_DEVTYPE_NAND (1 << 3) + #define FSMC_DEVWID_8 (0 << 4) + #define FSMC_DEVWID_16 (1 << 4) + #define FSMC_ECCEN (1 << 6) + #define FSMC_ECCPLEN_512 (0 << 7) + #define FSMC_ECCPLEN_256 (1 << 7) + #define FSMC_TCLR_1 (1) + #define FSMC_TCLR_SHIFT (9) + #define FSMC_TCLR_MASK (0xF) + #define FSMC_TAR_1 (1) + #define FSMC_TAR_SHIFT (13) + #define FSMC_TAR_MASK (0xF) +#define STS 0x04 + /* sts register definitions */ + #define FSMC_CODE_RDY (1 << 15) +#define COMM 0x08 + /* comm register definitions */ + #define FSMC_TSET_0 0 + #define FSMC_TSET_SHIFT 0 + #define FSMC_TSET_MASK 0xFF + #define FSMC_TWAIT_6 6 + #define FSMC_TWAIT_SHIFT 8 + #define FSMC_TWAIT_MASK 0xFF + #define FSMC_THOLD_4 4 + #define FSMC_THOLD_SHIFT 16 + #define FSMC_THOLD_MASK 0xFF + #define FSMC_THIZ_1 1 + #define FSMC_THIZ_SHIFT 24 + #define FSMC_THIZ_MASK 0xFF +#define ATTRIB 0x0C +#define IOATA 0x10 +#define ECC1 0x14 +#define ECC2 0x18 +#define ECC3 0x1C +#define FSMC_NAND_BANK_SZ 0x20 + +#define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \ + (FSMC_NAND_BANK_SZ * (bank)) + \ + reg) + +#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) + +struct fsmc_nand_timings { + uint8_t tclr; + uint8_t tar; + uint8_t thiz; + uint8_t thold; + uint8_t twait; + uint8_t tset; +}; + +enum access_mode { + USE_DMA_ACCESS = 1, + USE_WORD_ACCESS, +}; + +/** + * fsmc_nand_platform_data - platform specific NAND controller config + * @nand_timings: timing setup for the physical NAND interface + * @partitions: partition table for the platform, use a default fallback + * if this is NULL + * @nr_partitions: the number of partitions in the previous entry + * @options: different options for the driver + * @width: bus width + * @bank: default bank + * @select_bank: callback to select a certain bank, this is + * platform-specific. If the controller only supports one bank + * this may be set to NULL + */ +struct fsmc_nand_platform_data { + struct fsmc_nand_timings *nand_timings; + struct mtd_partition *partitions; + unsigned int nr_partitions; + unsigned int options; + unsigned int width; + unsigned int bank; + + enum access_mode mode; + + void (*select_bank)(uint32_t bank, uint32_t busw); + + /* priv structures for dma accesses */ + void *read_dma_priv; + void *write_dma_priv; +}; + +extern int __init fsmc_nor_init(struct platform_device *pdev, + unsigned long base, uint32_t bank, uint32_t width); +extern void __init fsmc_init_board_info(struct platform_device *pdev, + struct mtd_partition *partitions, unsigned int nr_partitions, + unsigned int width); + +#endif /* __MTD_FSMC_H */ diff --git a/include/linux/mtd/gen_probe.h b/include/linux/mtd/gen_probe.h index 6bd0b30d59..2c456054fd 100644 --- a/include/linux/mtd/gen_probe.h +++ b/include/linux/mtd/gen_probe.h @@ -1,7 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2001 Red Hat UK Limited * Copyright © 2001-2010 David Woodhouse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #ifndef __LINUX_MTD_GEN_PROBE_H__ diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h index fdfff87066..8255118be0 100644 --- a/include/linux/mtd/inftl.h +++ b/include/linux/mtd/inftl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * inftl.h -- defines to support the Inverse NAND Flash Translation Layer * diff --git a/include/linux/mtd/lpc32xx_mlc.h b/include/linux/mtd/lpc32xx_mlc.h index d168c628c0..d91b1e3563 100644 --- a/include/linux/mtd/lpc32xx_mlc.h +++ b/include/linux/mtd/lpc32xx_mlc.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for LPC32xx SoC MLC NAND controller * * Copyright © 2012 Roland Stigge + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_MTD_LPC32XX_MLC_H diff --git a/include/linux/mtd/lpc32xx_slc.h b/include/linux/mtd/lpc32xx_slc.h index cf54a9f804..1169548a15 100644 --- a/include/linux/mtd/lpc32xx_slc.h +++ b/include/linux/mtd/lpc32xx_slc.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for LPC32xx SoC SLC NAND controller * * Copyright © 2012 Roland Stigge + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_MTD_LPC32XX_SLC_H diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index b4fa92a6e4..3aa56e3104 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -1,6 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2000-2010 David Woodhouse et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ /* Overhauled routines for dealing with different mmap regions of flash */ @@ -256,67 +270,75 @@ void map_destroy(struct mtd_info *mtd); #define INVALIDATE_CACHED_RANGE(map, from, size) \ do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) -#define map_word_equal(map, val1, val2) \ -({ \ - int i, ret = 1; \ - for (i = 0; i < map_words(map); i++) \ - if ((val1).x[i] != (val2).x[i]) { \ - ret = 0; \ - break; \ - } \ - ret; \ -}) -#define map_word_and(map, val1, val2) \ -({ \ - map_word r; \ - int i; \ - for (i = 0; i < map_words(map); i++) \ - r.x[i] = (val1).x[i] & (val2).x[i]; \ - r; \ -}) +static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2) +{ + int i; -#define map_word_clr(map, val1, val2) \ -({ \ - map_word r; \ - int i; \ - for (i = 0; i < map_words(map); i++) \ - r.x[i] = (val1).x[i] & ~(val2).x[i]; \ - r; \ -}) + for (i = 0; i < map_words(map); i++) { + if (val1.x[i] != val2.x[i]) + return 0; + } -#define map_word_or(map, val1, val2) \ -({ \ - map_word r; \ - int i; \ - for (i = 0; i < map_words(map); i++) \ - r.x[i] = (val1).x[i] | (val2).x[i]; \ - r; \ -}) + return 1; +} -#define map_word_andequal(map, val1, val2, val3) \ -({ \ - int i, ret = 1; \ - for (i = 0; i < map_words(map); i++) { \ - if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \ - ret = 0; \ - break; \ - } \ - } \ - ret; \ -}) +static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2) +{ + map_word r; + int i; -#define map_word_bitsset(map, val1, val2) \ -({ \ - int i, ret = 0; \ - for (i = 0; i < map_words(map); i++) { \ - if ((val1).x[i] & (val2).x[i]) { \ - ret = 1; \ - break; \ - } \ - } \ - ret; \ -}) + for (i = 0; i < map_words(map); i++) + r.x[i] = val1.x[i] & val2.x[i]; + + return r; +} + +static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2) +{ + map_word r; + int i; + + for (i = 0; i < map_words(map); i++) + r.x[i] = val1.x[i] & ~val2.x[i]; + + return r; +} + +static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2) +{ + map_word r; + int i; + + for (i = 0; i < map_words(map); i++) + r.x[i] = val1.x[i] | val2.x[i]; + + return r; +} + +static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3) +{ + int i; + + for (i = 0; i < map_words(map); i++) { + if ((val1.x[i] & val2.x[i]) != val3.x[i]) + return 0; + } + + return 1; +} + +static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2) +{ + int i; + + for (i = 0; i < map_words(map); i++) { + if (val1.x[i] & val2.x[i]) + return 1; + } + + return 0; +} static inline map_word map_word_load(struct map_info *map, const void *ptr) { diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 88227044fc..13f8052b9f 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -1,6 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 1999-2010 David Woodhouse et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #ifndef __MTD_MTD_H__ @@ -8,19 +22,20 @@ #include #include -#include #include #include -#include -#include #include #include -#define MTD_FAIL_ADDR_UNKNOWN -1LL +#define MTD_ERASE_PENDING 0x01 +#define MTD_ERASING 0x02 +#define MTD_ERASE_SUSPEND 0x04 +#define MTD_ERASE_DONE 0x08 +#define MTD_ERASE_FAILED 0x10 -struct mtd_info; +#define MTD_FAIL_ADDR_UNKNOWN -1LL /* * If the erase fails, fail_addr might indicate exactly which block failed. If @@ -28,9 +43,18 @@ struct mtd_info; * or was not specific to any particular block. */ struct erase_info { + struct mtd_info *mtd; uint64_t addr; uint64_t len; uint64_t fail_addr; + u_long time; + u_long retries; + unsigned dev; + unsigned cell; + void (*callback) (struct erase_info *self); + u_long priv; + u_char state; + struct erase_info *next; }; struct mtd_erase_region_info { @@ -55,11 +79,9 @@ struct mtd_erase_region_info { * @datbuf: data buffer - if NULL only oob data are read/written * @oobbuf: oob data buffer * - * Note, some MTD drivers do not allow you to write more than one OOB area at - * one go. If you try to do that on such an MTD device, -EINVAL will be - * returned. If you want to make your implementation portable on all kind of MTD - * devices you should split the write request into several sub-requests when the - * request crosses a page boundary. + * Note, it is allowed to read more than one OOB area at one go, but not write. + * The interface assumes that the OOB write requests program only one page's + * OOB area. */ struct mtd_oob_ops { unsigned int mode; @@ -183,56 +205,6 @@ struct mtd_pairing_scheme { struct module; /* only needed for owner field in mtd_info */ -/** - * struct mtd_debug_info - debugging information for an MTD device. - * - * @dfs_dir: direntry object of the MTD device debugfs directory - */ -struct mtd_debug_info { - struct dentry *dfs_dir; - - const char *partname; - const char *partid; -}; - -/** - * struct mtd_part - MTD partition specific fields - * - * @node: list node used to add an MTD partition to the parent partition list - * @offset: offset of the partition relatively to the parent offset - * @size: partition size. Should be equal to mtd->size unless - * MTD_SLC_ON_MLC_EMULATION is set - * @flags: original flags (before the mtdpart logic decided to tweak them based - * on flash constraints, like eraseblock/pagesize alignment) - * - * This struct is embedded in mtd_info and contains partition-specific - * properties/fields. - */ -struct mtd_part { - struct list_head node; - u64 offset; - u64 size; - u32 flags; -}; - -/** - * struct mtd_master - MTD master specific fields - * - * @partitions_lock: lock protecting accesses to the partition list. Protects - * not only the master partition list, but also all - * sub-partitions. - * @suspended: et to 1 when the device is suspended, 0 otherwise - * - * This struct is embedded in mtd_info and contains master-specific - * properties/fields. The master is the root MTD device from the MTD partition - * point of view. - */ -struct mtd_master { - struct mutex partitions_lock; - struct mutex chrdev_lock; - unsigned int suspended : 1; -}; - struct mtd_info { u_char type; uint32_t flags; @@ -285,7 +257,7 @@ struct mtd_info { */ unsigned int bitflip_threshold; - /* Kernel-only stuff starts here. */ + // Kernel-only stuff starts here. const char *name; int index; @@ -315,6 +287,10 @@ struct mtd_info { int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys); int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); + unsigned long (*_get_unmapped_area) (struct mtd_info *mtd, + unsigned long len, + unsigned long offset, + unsigned long flags); int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, @@ -334,12 +310,9 @@ struct mtd_info { int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to, - size_t len, size_t *retlen, - const u_char *buf); + size_t len, size_t *retlen, u_char *buf); int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len); - int (*_erase_user_prot_reg) (struct mtd_info *mtd, loff_t from, - size_t len); int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); void (*_sync) (struct mtd_info *mtd); @@ -349,7 +322,6 @@ struct mtd_info { int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs); int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); - int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len); int (*_suspend) (struct mtd_info *mtd); void (*_resume) (struct mtd_info *mtd); void (*_reboot) (struct mtd_info *mtd); @@ -360,11 +332,10 @@ struct mtd_info { int (*_get_device) (struct mtd_info *mtd); void (*_put_device) (struct mtd_info *mtd); - /* - * flag indicates a panic write, low level drivers can take appropriate - * action if required to ensure writes go through + /* Backing device capabilities for this device + * - provides mmap capabilities */ - bool oops_panic_write; + struct backing_dev_info *backing_dev_info; struct notifier_block reboot_notifier; /* default mode before reboot */ @@ -378,56 +349,8 @@ struct mtd_info { struct module *owner; struct device dev; int usecount; - struct mtd_debug_info dbg; - struct nvmem_device *nvmem; - struct nvmem_device *otp_user_nvmem; - struct nvmem_device *otp_factory_nvmem; - - /* - * Parent device from the MTD partition point of view. - * - * MTD masters do not have any parent, MTD partitions do. The parent - * MTD device can itself be a partition. - */ - struct mtd_info *parent; - - /* List of partitions attached to this MTD device */ - struct list_head partitions; - - union { - struct mtd_part part; - struct mtd_master master; - }; }; -static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd) -{ - while (mtd->parent) - mtd = mtd->parent; - - return mtd; -} - -static inline u64 mtd_get_master_ofs(struct mtd_info *mtd, u64 ofs) -{ - while (mtd->parent) { - ofs += mtd->part.offset; - mtd = mtd->parent; - } - - return ofs; -} - -static inline bool mtd_is_partition(const struct mtd_info *mtd) -{ - return mtd->parent; -} - -static inline bool mtd_has_partitions(const struct mtd_info *mtd) -{ - return !list_empty(&mtd->partitions); -} - int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobecc); int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, @@ -462,35 +385,18 @@ static inline void mtd_set_of_node(struct mtd_info *mtd, struct device_node *np) { mtd->dev.of_node = np; - if (!mtd->name) - of_property_read_string(np, "label", &mtd->name); } static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) { - return dev_of_node(&mtd->dev); + return mtd->dev.of_node; } -static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) +static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) { return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize; } -static inline int mtd_max_bad_blocks(struct mtd_info *mtd, - loff_t ofs, size_t len) -{ - struct mtd_info *master = mtd_get_master(mtd); - - if (!master->_max_bad_blocks) - return -ENOTSUPP; - - if (mtd->size < (len + ofs) || ofs < 0) - return -EINVAL; - - return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs), - len); -} - int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, struct mtd_pairing_info *info); int mtd_pairing_info_to_wunit(struct mtd_info *mtd, @@ -521,19 +427,16 @@ int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf); + size_t *retlen, u_char *buf); int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len); -int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len); int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); static inline void mtd_sync(struct mtd_info *mtd) { - struct mtd_info *master = mtd_get_master(mtd); - - if (master->_sync) - master->_sync(master); + if (mtd->_sync) + mtd->_sync(mtd); } int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); @@ -545,31 +448,13 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs); static inline int mtd_suspend(struct mtd_info *mtd) { - struct mtd_info *master = mtd_get_master(mtd); - int ret; - - if (master->master.suspended) - return 0; - - ret = master->_suspend ? master->_suspend(master) : 0; - if (ret) - return ret; - - master->master.suspended = 1; - return 0; + return mtd->_suspend ? mtd->_suspend(mtd) : 0; } static inline void mtd_resume(struct mtd_info *mtd) { - struct mtd_info *master = mtd_get_master(mtd); - - if (!master->master.suspended) - return; - - if (master->_resume) - master->_resume(master); - - master->master.suspended = 0; + if (mtd->_resume) + mtd->_resume(mtd); } static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) @@ -587,34 +472,6 @@ static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd) return do_div(sz, mtd->erasesize); } -/** - * mtd_align_erase_req - Adjust an erase request to align things on eraseblock - * boundaries. - * @mtd: the MTD device this erase request applies on - * @req: the erase request to adjust - * - * This function will adjust @req->addr and @req->len to align them on - * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0. - */ -static inline void mtd_align_erase_req(struct mtd_info *mtd, - struct erase_info *req) -{ - u32 mod; - - if (WARN_ON(!mtd->erasesize)) - return; - - mod = mtd_mod_by_eb(req->addr, mtd); - if (mod) { - req->addr -= mod; - req->len += mod; - } - - mod = mtd_mod_by_eb(req->addr + req->len, mtd); - if (mod) - req->len += mtd->erasesize - mod; -} - static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) { if (mtd->writesize_shift) @@ -632,9 +489,7 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) static inline int mtd_wunit_per_eb(struct mtd_info *mtd) { - struct mtd_info *master = mtd_get_master(mtd); - - return master->erasesize / mtd->writesize; + return mtd->erasesize / mtd->writesize; } static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs) @@ -651,9 +506,7 @@ static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base, static inline int mtd_has_oob(const struct mtd_info *mtd) { - struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd); - - return master->_read_oob && master->_write_oob; + return mtd->_read_oob && mtd->_write_oob; } static inline int mtd_type_is_nand(const struct mtd_info *mtd) @@ -663,9 +516,7 @@ static inline int mtd_type_is_nand(const struct mtd_info *mtd) static inline int mtd_can_have_bb(const struct mtd_info *mtd) { - struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd); - - return !!master->_block_isbad; + return !!mtd->_block_isbad; } /* Kernel-side ioctl definitions */ @@ -699,6 +550,8 @@ extern void register_mtd_user (struct mtd_notifier *new); extern int unregister_mtd_user (struct mtd_notifier *old); void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); +void mtd_erase_callback(struct erase_info *instr); + static inline int mtd_is_bitflip(int err) { return err == -EUCLEAN; } diff --git a/include/linux/mtd/mtdram.h b/include/linux/mtd/mtdram.h index ee8f95643f..628a6a21dd 100644 --- a/include/linux/mtd/mtdram.h +++ b/include/linux/mtd/mtdram.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MTD_MTDRAM_H__ #define __MTD_MTDRAM_H__ diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h index 7ab51bc4a3..51534e50f7 100644 --- a/include/linux/mtd/nand-gpio.h +++ b/include/linux/mtd/nand-gpio.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MTD_NAND_GPIO_H #define __LINUX_MTD_NAND_GPIO_H -#include +#include struct gpio_nand_platdata { + int gpio_nce; + int gpio_nwp; + int gpio_cle; + int gpio_ale; + int gpio_rdy; void (*adjust_parts)(struct gpio_nand_platdata *, size_t); struct mtd_partition *parts; unsigned int num_parts; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 32fc7edf65..d8905a229f 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -1,1017 +1,1192 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright 2017 - Free Electrons + * linux/include/linux/mtd/nand.h * - * Authors: - * Boris Brezillon - * Peter Pan + * Copyright © 2000-2010 David Woodhouse + * Steven J. Hill + * Thomas Gleixner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Info: + * Contains standard defines and IDs for NAND flash devices + * + * Changelog: + * See git changelog. */ - #ifndef __LINUX_MTD_NAND_H #define __LINUX_MTD_NAND_H +#include +#include #include +#include +#include -struct nand_device; +struct mtd_info; +struct nand_flash_dev; +struct device_node; -/** - * struct nand_memory_organization - Memory organization structure - * @bits_per_cell: number of bits per NAND cell - * @pagesize: page size - * @oobsize: OOB area size - * @pages_per_eraseblock: number of pages per eraseblock - * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number) - * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN - * @planes_per_lun: number of planes per LUN - * @luns_per_target: number of LUN per target (target is a synonym for die) - * @ntargets: total number of targets exposed by the NAND device +/* Scan and identify a NAND device */ +int nand_scan(struct mtd_info *mtd, int max_chips); +/* + * Separate phases of nand_scan(), allowing board driver to intervene + * and override command or ECC setup according to flash type. */ -struct nand_memory_organization { - unsigned int bits_per_cell; - unsigned int pagesize; - unsigned int oobsize; - unsigned int pages_per_eraseblock; - unsigned int eraseblocks_per_lun; - unsigned int max_bad_eraseblocks_per_lun; - unsigned int planes_per_lun; - unsigned int luns_per_target; - unsigned int ntargets; -}; +int nand_scan_ident(struct mtd_info *mtd, int max_chips, + struct nand_flash_dev *table); +int nand_scan_tail(struct mtd_info *mtd); -#define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \ - { \ - .bits_per_cell = (bpc), \ - .pagesize = (ps), \ - .oobsize = (os), \ - .pages_per_eraseblock = (ppe), \ - .eraseblocks_per_lun = (epl), \ - .max_bad_eraseblocks_per_lun = (mbb), \ - .planes_per_lun = (ppl), \ - .luns_per_target = (lpt), \ - .ntargets = (nt), \ - } +/* Unregister the MTD device and free resources held by the NAND device */ +void nand_release(struct mtd_info *mtd); -/** - * struct nand_row_converter - Information needed to convert an absolute offset - * into a row address - * @lun_addr_shift: position of the LUN identifier in the row address - * @eraseblock_addr_shift: position of the eraseblock identifier in the row - * address - */ -struct nand_row_converter { - unsigned int lun_addr_shift; - unsigned int eraseblock_addr_shift; -}; +/* Internal helper for board drivers which need to override command function */ +void nand_wait_ready(struct mtd_info *mtd); -/** - * struct nand_pos - NAND position object - * @target: the NAND target/die - * @lun: the LUN identifier - * @plane: the plane within the LUN - * @eraseblock: the eraseblock within the LUN - * @page: the page within the LUN +/* locks all blocks present in the device */ +int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); + +/* unlocks specified locked blocks */ +int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); + +/* The maximum number of NAND chips in an array */ +#define NAND_MAX_CHIPS 8 + +/* + * Constants for hardware specific CLE/ALE/NCE function * - * These information are usually used by specific sub-layers to select the - * appropriate target/die and generate a row address to pass to the device. + * These are bits which can be or'ed to set/clear multiple + * bits in one go. */ -struct nand_pos { - unsigned int target; - unsigned int lun; - unsigned int plane; - unsigned int eraseblock; - unsigned int page; -}; +/* Select the chip by setting nCE to low */ +#define NAND_NCE 0x01 +/* Select the command latch by setting CLE to high */ +#define NAND_CLE 0x02 +/* Select the address latch by setting ALE to high */ +#define NAND_ALE 0x04 -/** - * enum nand_page_io_req_type - Direction of an I/O request - * @NAND_PAGE_READ: from the chip, to the controller - * @NAND_PAGE_WRITE: from the controller, to the chip +#define NAND_CTRL_CLE (NAND_NCE | NAND_CLE) +#define NAND_CTRL_ALE (NAND_NCE | NAND_ALE) +#define NAND_CTRL_CHANGE 0x80 + +/* + * Standard NAND flash commands */ -enum nand_page_io_req_type { - NAND_PAGE_READ = 0, - NAND_PAGE_WRITE, -}; +#define NAND_CMD_READ0 0 +#define NAND_CMD_READ1 1 +#define NAND_CMD_RNDOUT 5 +#define NAND_CMD_PAGEPROG 0x10 +#define NAND_CMD_READOOB 0x50 +#define NAND_CMD_ERASE1 0x60 +#define NAND_CMD_STATUS 0x70 +#define NAND_CMD_SEQIN 0x80 +#define NAND_CMD_RNDIN 0x85 +#define NAND_CMD_READID 0x90 +#define NAND_CMD_ERASE2 0xd0 +#define NAND_CMD_PARAM 0xec +#define NAND_CMD_GET_FEATURES 0xee +#define NAND_CMD_SET_FEATURES 0xef +#define NAND_CMD_RESET 0xff -/** - * struct nand_page_io_req - NAND I/O request object - * @type: the type of page I/O: read or write - * @pos: the position this I/O request is targeting - * @dataoffs: the offset within the page - * @datalen: number of data bytes to read from/write to this page - * @databuf: buffer to store data in or get data from - * @ooboffs: the OOB offset within the page - * @ooblen: the number of OOB bytes to read from/write to this page - * @oobbuf: buffer to store OOB data in or get OOB data from - * @mode: one of the %MTD_OPS_XXX mode - * - * This object is used to pass per-page I/O requests to NAND sub-layers. This - * way all useful information are already formatted in a useful way and - * specific NAND layers can focus on translating these information into - * specific commands/operations. +#define NAND_CMD_LOCK 0x2a +#define NAND_CMD_UNLOCK1 0x23 +#define NAND_CMD_UNLOCK2 0x24 + +/* Extended commands for large page devices */ +#define NAND_CMD_READSTART 0x30 +#define NAND_CMD_RNDOUTSTART 0xE0 +#define NAND_CMD_CACHEDPROG 0x15 + +#define NAND_CMD_NONE -1 + +/* Status bits */ +#define NAND_STATUS_FAIL 0x01 +#define NAND_STATUS_FAIL_N1 0x02 +#define NAND_STATUS_TRUE_READY 0x20 +#define NAND_STATUS_READY 0x40 +#define NAND_STATUS_WP 0x80 + +/* + * Constants for ECC_MODES */ -struct nand_page_io_req { - enum nand_page_io_req_type type; - struct nand_pos pos; - unsigned int dataoffs; - unsigned int datalen; - union { - const void *out; - void *in; - } databuf; - unsigned int ooboffs; - unsigned int ooblen; - union { - const void *out; - void *in; - } oobbuf; - int mode; -}; +typedef enum { + NAND_ECC_NONE, + NAND_ECC_SOFT, + NAND_ECC_HW, + NAND_ECC_HW_SYNDROME, + NAND_ECC_HW_OOB_FIRST, +} nand_ecc_modes_t; -const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void); -const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void); -const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void); - -/** - * enum nand_ecc_engine_type - NAND ECC engine type - * @NAND_ECC_ENGINE_TYPE_INVALID: Invalid value - * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction - * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction - * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction - * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction - */ -enum nand_ecc_engine_type { - NAND_ECC_ENGINE_TYPE_INVALID, - NAND_ECC_ENGINE_TYPE_NONE, - NAND_ECC_ENGINE_TYPE_SOFT, - NAND_ECC_ENGINE_TYPE_ON_HOST, - NAND_ECC_ENGINE_TYPE_ON_DIE, -}; - -/** - * enum nand_ecc_placement - NAND ECC bytes placement - * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown - * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area - * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes - * interleaved with regular data in the main - * area - */ -enum nand_ecc_placement { - NAND_ECC_PLACEMENT_UNKNOWN, - NAND_ECC_PLACEMENT_OOB, - NAND_ECC_PLACEMENT_INTERLEAVED, -}; - -/** - * enum nand_ecc_algo - NAND ECC algorithm - * @NAND_ECC_ALGO_UNKNOWN: Unknown algorithm - * @NAND_ECC_ALGO_HAMMING: Hamming algorithm - * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm - * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm - */ enum nand_ecc_algo { - NAND_ECC_ALGO_UNKNOWN, - NAND_ECC_ALGO_HAMMING, - NAND_ECC_ALGO_BCH, - NAND_ECC_ALGO_RS, + NAND_ECC_UNKNOWN, + NAND_ECC_HAMMING, + NAND_ECC_BCH, }; -/** - * struct nand_ecc_props - NAND ECC properties - * @engine_type: ECC engine type - * @placement: OOB placement (if relevant) - * @algo: ECC algorithm (if relevant) - * @strength: ECC strength - * @step_size: Number of bytes per step - * @flags: Misc properties +/* + * Constants for Hardware ECC */ -struct nand_ecc_props { - enum nand_ecc_engine_type engine_type; - enum nand_ecc_placement placement; +/* Reset Hardware ECC for read */ +#define NAND_ECC_READ 0 +/* Reset Hardware ECC for write */ +#define NAND_ECC_WRITE 1 +/* Enable Hardware ECC before syndrome is read back from flash */ +#define NAND_ECC_READSYN 2 + +/* + * Enable generic NAND 'page erased' check. This check is only done when + * ecc.correct() returns -EBADMSG. + * Set this flag if your implementation does not fix bitflips in erased + * pages and you want to rely on the default implementation. + */ +#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) +#define NAND_ECC_MAXIMIZE BIT(1) + +/* Bit mask for flags passed to do_nand_read_ecc */ +#define NAND_GET_DEVICE 0x80 + + +/* + * Option constants for bizarre disfunctionality and real + * features. + */ +/* Buswidth is 16 bit */ +#define NAND_BUSWIDTH_16 0x00000002 +/* Chip has cache program function */ +#define NAND_CACHEPRG 0x00000008 +/* + * Chip requires ready check on read (for auto-incremented sequential read). + * True only for small page devices; large page devices do not support + * autoincrement. + */ +#define NAND_NEED_READRDY 0x00000100 + +/* Chip does not allow subpage writes */ +#define NAND_NO_SUBPAGE_WRITE 0x00000200 + +/* Device is one of 'new' xD cards that expose fake nand command set */ +#define NAND_BROKEN_XD 0x00000400 + +/* Device behaves just like nand, but is readonly */ +#define NAND_ROM 0x00000800 + +/* Device supports subpage reads */ +#define NAND_SUBPAGE_READ 0x00001000 + +/* + * Some MLC NANDs need data scrambling to limit bitflips caused by repeated + * patterns. + */ +#define NAND_NEED_SCRAMBLING 0x00002000 + +/* Options valid for Samsung large page devices */ +#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG + +/* Macros to identify the above */ +#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) +#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) + +/* Non chip related options */ +/* This option skips the bbt scan during initialization. */ +#define NAND_SKIP_BBTSCAN 0x00010000 +/* + * This option is defined if the board driver allocates its own buffers + * (e.g. because it needs them DMA-coherent). + */ +#define NAND_OWN_BUFFERS 0x00020000 +/* Chip may not exist, so silence any errors in scan */ +#define NAND_SCAN_SILENT_NODEV 0x00040000 +/* + * Autodetect nand buswidth with readid/onfi. + * This suppose the driver will configure the hardware in 8 bits mode + * when calling nand_scan_ident, and update its configuration + * before calling nand_scan_tail. + */ +#define NAND_BUSWIDTH_AUTO 0x00080000 +/* + * This option could be defined by controller drivers to protect against + * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers + */ +#define NAND_USE_BOUNCE_BUFFER 0x00100000 + +/* Options set by nand scan */ +/* Nand scan has allocated controller struct */ +#define NAND_CONTROLLER_ALLOC 0x80000000 + +/* Cell info constants */ +#define NAND_CI_CHIPNR_MSK 0x03 +#define NAND_CI_CELLTYPE_MSK 0x0C +#define NAND_CI_CELLTYPE_SHIFT 2 + +/* Keep gcc happy */ +struct nand_chip; + +/* ONFI features */ +#define ONFI_FEATURE_16_BIT_BUS (1 << 0) +#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) + +/* ONFI timing mode, used in both asynchronous and synchronous mode */ +#define ONFI_TIMING_MODE_0 (1 << 0) +#define ONFI_TIMING_MODE_1 (1 << 1) +#define ONFI_TIMING_MODE_2 (1 << 2) +#define ONFI_TIMING_MODE_3 (1 << 3) +#define ONFI_TIMING_MODE_4 (1 << 4) +#define ONFI_TIMING_MODE_5 (1 << 5) +#define ONFI_TIMING_MODE_UNKNOWN (1 << 6) + +/* ONFI feature address */ +#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1 + +/* Vendor-specific feature address (Micron) */ +#define ONFI_FEATURE_ADDR_READ_RETRY 0x89 + +/* ONFI subfeature parameters length */ +#define ONFI_SUBFEATURE_PARAM_LEN 4 + +/* ONFI optional commands SET/GET FEATURES supported? */ +#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2) + +struct nand_onfi_params { + /* rev info and features block */ + /* 'O' 'N' 'F' 'I' */ + u8 sig[4]; + __le16 revision; + __le16 features; + __le16 opt_cmd; + u8 reserved0[2]; + __le16 ext_param_page_length; /* since ONFI 2.1 */ + u8 num_of_param_pages; /* since ONFI 2.1 */ + u8 reserved1[17]; + + /* manufacturer information block */ + char manufacturer[12]; + char model[20]; + u8 jedec_id; + __le16 date_code; + u8 reserved2[13]; + + /* memory organization block */ + __le32 byte_per_page; + __le16 spare_bytes_per_page; + __le32 data_bytes_per_ppage; + __le16 spare_bytes_per_ppage; + __le32 pages_per_block; + __le32 blocks_per_lun; + u8 lun_count; + u8 addr_cycles; + u8 bits_per_cell; + __le16 bb_per_lun; + __le16 block_endurance; + u8 guaranteed_good_blocks; + __le16 guaranteed_block_endurance; + u8 programs_per_page; + u8 ppage_attr; + u8 ecc_bits; + u8 interleaved_bits; + u8 interleaved_ops; + u8 reserved3[13]; + + /* electrical parameter block */ + u8 io_pin_capacitance_max; + __le16 async_timing_mode; + __le16 program_cache_timing_mode; + __le16 t_prog; + __le16 t_bers; + __le16 t_r; + __le16 t_ccs; + __le16 src_sync_timing_mode; + u8 src_ssync_features; + __le16 clk_pin_capacitance_typ; + __le16 io_pin_capacitance_typ; + __le16 input_pin_capacitance_typ; + u8 input_pin_capacitance_max; + u8 driver_strength_support; + __le16 t_int_r; + __le16 t_adl; + u8 reserved4[8]; + + /* vendor */ + __le16 vendor_revision; + u8 vendor[88]; + + __le16 crc; +} __packed; + +#define ONFI_CRC_BASE 0x4F4E + +/* Extended ECC information Block Definition (since ONFI 2.1) */ +struct onfi_ext_ecc_info { + u8 ecc_bits; + u8 codeword_size; + __le16 bb_per_lun; + __le16 block_endurance; + u8 reserved[2]; +} __packed; + +#define ONFI_SECTION_TYPE_0 0 /* Unused section. */ +#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */ +#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */ +struct onfi_ext_section { + u8 type; + u8 length; +} __packed; + +#define ONFI_EXT_SECTION_MAX 8 + +/* Extended Parameter Page Definition (since ONFI 2.1) */ +struct onfi_ext_param_page { + __le16 crc; + u8 sig[4]; /* 'E' 'P' 'P' 'S' */ + u8 reserved0[10]; + struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX]; + + /* + * The actual size of the Extended Parameter Page is in + * @ext_param_page_length of nand_onfi_params{}. + * The following are the variable length sections. + * So we do not add any fields below. Please see the ONFI spec. + */ +} __packed; + +struct nand_onfi_vendor_micron { + u8 two_plane_read; + u8 read_cache; + u8 read_unique_id; + u8 dq_imped; + u8 dq_imped_num_settings; + u8 dq_imped_feat_addr; + u8 rb_pulldown_strength; + u8 rb_pulldown_strength_feat_addr; + u8 rb_pulldown_strength_num_settings; + u8 otp_mode; + u8 otp_page_start; + u8 otp_data_prot_addr; + u8 otp_num_pages; + u8 otp_feat_addr; + u8 read_retry_options; + u8 reserved[72]; + u8 param_revision; +} __packed; + +struct jedec_ecc_info { + u8 ecc_bits; + u8 codeword_size; + __le16 bb_per_lun; + __le16 block_endurance; + u8 reserved[2]; +} __packed; + +/* JEDEC features */ +#define JEDEC_FEATURE_16_BIT_BUS (1 << 0) + +struct nand_jedec_params { + /* rev info and features block */ + /* 'J' 'E' 'S' 'D' */ + u8 sig[4]; + __le16 revision; + __le16 features; + u8 opt_cmd[3]; + __le16 sec_cmd; + u8 num_of_param_pages; + u8 reserved0[18]; + + /* manufacturer information block */ + char manufacturer[12]; + char model[20]; + u8 jedec_id[6]; + u8 reserved1[10]; + + /* memory organization block */ + __le32 byte_per_page; + __le16 spare_bytes_per_page; + u8 reserved2[6]; + __le32 pages_per_block; + __le32 blocks_per_lun; + u8 lun_count; + u8 addr_cycles; + u8 bits_per_cell; + u8 programs_per_page; + u8 multi_plane_addr; + u8 multi_plane_op_attr; + u8 reserved3[38]; + + /* electrical parameter block */ + __le16 async_sdr_speed_grade; + __le16 toggle_ddr_speed_grade; + __le16 sync_ddr_speed_grade; + u8 async_sdr_features; + u8 toggle_ddr_features; + u8 sync_ddr_features; + __le16 t_prog; + __le16 t_bers; + __le16 t_r; + __le16 t_r_multi_plane; + __le16 t_ccs; + __le16 io_pin_capacitance_typ; + __le16 input_pin_capacitance_typ; + __le16 clk_pin_capacitance_typ; + u8 driver_strength_support; + __le16 t_adl; + u8 reserved4[36]; + + /* ECC and endurance block */ + u8 guaranteed_good_blocks; + __le16 guaranteed_block_endurance; + struct jedec_ecc_info ecc_info[4]; + u8 reserved5[29]; + + /* reserved */ + u8 reserved6[148]; + + /* vendor */ + __le16 vendor_rev_num; + u8 reserved7[88]; + + /* CRC for Parameter Page */ + __le16 crc; +} __packed; + +/** + * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices + * @lock: protection lock + * @active: the mtd device which holds the controller currently + * @wq: wait queue to sleep on if a NAND operation is in + * progress used instead of the per chip wait queue + * when a hw controller is available. + */ +struct nand_hw_control { + spinlock_t lock; + struct nand_chip *active; + wait_queue_head_t wq; +}; + +static inline void nand_hw_control_init(struct nand_hw_control *nfc) +{ + nfc->active = NULL; + spin_lock_init(&nfc->lock); + init_waitqueue_head(&nfc->wq); +} + +/** + * struct nand_ecc_ctrl - Control structure for ECC + * @mode: ECC mode + * @algo: ECC algorithm + * @steps: number of ECC steps per page + * @size: data bytes per ECC step + * @bytes: ECC bytes per step + * @strength: max number of correctible bits per ECC step + * @total: total number of ECC bytes per page + * @prepad: padding information for syndrome based ECC generators + * @postpad: padding information for syndrome based ECC generators + * @options: ECC specific options (see NAND_ECC_XXX flags defined above) + * @priv: pointer to private ECC control data + * @hwctl: function to control hardware ECC generator. Must only + * be provided if an hardware ECC is available + * @calculate: function for ECC calculation or readback from ECC hardware + * @correct: function for ECC correction, matching to ECC generator (sw/hw). + * Should return a positive number representing the number of + * corrected bitflips, -EBADMSG if the number of bitflips exceed + * ECC strength, or any other error code if the error is not + * directly related to correction. + * If -EBADMSG is returned the input buffers should be left + * untouched. + * @read_page_raw: function to read a raw page without ECC. This function + * should hide the specific layout used by the ECC + * controller and always return contiguous in-band and + * out-of-band data even if they're not stored + * contiguously on the NAND chip (e.g. + * NAND_ECC_HW_SYNDROME interleaves in-band and + * out-of-band data). + * @write_page_raw: function to write a raw page without ECC. This function + * should hide the specific layout used by the ECC + * controller and consider the passed data as contiguous + * in-band and out-of-band data. ECC controller is + * responsible for doing the appropriate transformations + * to adapt to its specific layout (e.g. + * NAND_ECC_HW_SYNDROME interleaves in-band and + * out-of-band data). + * @read_page: function to read a page according to the ECC generator + * requirements; returns maximum number of bitflips corrected in + * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error + * @read_subpage: function to read parts of the page covered by ECC; + * returns same as read_page() + * @write_subpage: function to write parts of the page covered by ECC. + * @write_page: function to write a page according to the ECC generator + * requirements. + * @write_oob_raw: function to write chip OOB data without ECC + * @read_oob_raw: function to read chip OOB data without ECC + * @read_oob: function to read chip OOB data + * @write_oob: function to write chip OOB data + */ +struct nand_ecc_ctrl { + nand_ecc_modes_t mode; enum nand_ecc_algo algo; - unsigned int strength; - unsigned int step_size; - unsigned int flags; -}; - -#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) } - -/* NAND ECC misc flags */ -#define NAND_ECC_MAXIMIZE_STRENGTH BIT(0) - -/** - * struct nand_bbt - bad block table object - * @cache: in memory BBT cache - */ -struct nand_bbt { - unsigned long *cache; + int steps; + int size; + int bytes; + int total; + int strength; + int prepad; + int postpad; + unsigned int options; + void *priv; + void (*hwctl)(struct mtd_info *mtd, int mode); + int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, + uint8_t *ecc_code); + int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, + uint8_t *calc_ecc); + int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page); + int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page); + int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page); + int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, + uint32_t offs, uint32_t len, uint8_t *buf, int page); + int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip, + uint32_t offset, uint32_t data_len, + const uint8_t *data_buf, int oob_required, int page); + int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page); + int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, + int page); + int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, + int page); + int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page); + int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip, + int page); }; /** - * struct nand_ops - NAND operations - * @erase: erase a specific block. No need to check if the block is bad before - * erasing, this has been taken care of by the generic NAND layer - * @markbad: mark a specific block bad. No need to check if the block is - * already marked bad, this has been taken care of by the generic - * NAND layer. This method should just write the BBM (Bad Block - * Marker) so that future call to struct_nand_ops->isbad() return - * true - * @isbad: check whether a block is bad or not. This method should just read - * the BBM and return whether the block is bad or not based on what it - * reads + * struct nand_buffers - buffer structure for read/write + * @ecccalc: buffer pointer for calculated ECC, size is oobsize. + * @ecccode: buffer pointer for ECC read from flash, size is oobsize. + * @databuf: buffer pointer for data, size is (page size + oobsize). * - * These are all low level operations that should be implemented by specialized - * NAND layers (SPI NAND, raw NAND, ...). + * Do not change the order of buffers. databuf and oobrbuf must be in + * consecutive order. */ -struct nand_ops { - int (*erase)(struct nand_device *nand, const struct nand_pos *pos); - int (*markbad)(struct nand_device *nand, const struct nand_pos *pos); - bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos); +struct nand_buffers { + uint8_t *ecccalc; + uint8_t *ecccode; + uint8_t *databuf; }; /** - * struct nand_ecc_context - Context for the ECC engine - * @conf: basic ECC engine parameters - * @nsteps: number of ECC steps - * @total: total number of bytes used for storing ECC codes, this is used by - * generic OOB layouts - * @priv: ECC engine driver private data + * struct nand_sdr_timings - SDR NAND chip timings + * + * This struct defines the timing requirements of a SDR NAND chip. + * These information can be found in every NAND datasheets and the timings + * meaning are described in the ONFI specifications: + * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing + * Parameters) + * + * All these timings are expressed in picoseconds. + * + * @tALH_min: ALE hold time + * @tADL_min: ALE to data loading time + * @tALS_min: ALE setup time + * @tAR_min: ALE to RE# delay + * @tCEA_max: CE# access time + * @tCEH_min: + * @tCH_min: CE# hold time + * @tCHZ_max: CE# high to output hi-Z + * @tCLH_min: CLE hold time + * @tCLR_min: CLE to RE# delay + * @tCLS_min: CLE setup time + * @tCOH_min: CE# high to output hold + * @tCS_min: CE# setup time + * @tDH_min: Data hold time + * @tDS_min: Data setup time + * @tFEAT_max: Busy time for Set Features and Get Features + * @tIR_min: Output hi-Z to RE# low + * @tITC_max: Interface and Timing Mode Change time + * @tRC_min: RE# cycle time + * @tREA_max: RE# access time + * @tREH_min: RE# high hold time + * @tRHOH_min: RE# high to output hold + * @tRHW_min: RE# high to WE# low + * @tRHZ_max: RE# high to output hi-Z + * @tRLOH_min: RE# low to output hold + * @tRP_min: RE# pulse width + * @tRR_min: Ready to RE# low (data only) + * @tRST_max: Device reset time, measured from the falling edge of R/B# to the + * rising edge of R/B#. + * @tWB_max: WE# high to SR[6] low + * @tWC_min: WE# cycle time + * @tWH_min: WE# high hold time + * @tWHR_min: WE# high to RE# low + * @tWP_min: WE# pulse width + * @tWW_min: WP# transition to WE# low */ -struct nand_ecc_context { - struct nand_ecc_props conf; - unsigned int nsteps; - unsigned int total; +struct nand_sdr_timings { + u32 tALH_min; + u32 tADL_min; + u32 tALS_min; + u32 tAR_min; + u32 tCEA_max; + u32 tCEH_min; + u32 tCH_min; + u32 tCHZ_max; + u32 tCLH_min; + u32 tCLR_min; + u32 tCLS_min; + u32 tCOH_min; + u32 tCS_min; + u32 tDH_min; + u32 tDS_min; + u32 tFEAT_max; + u32 tIR_min; + u32 tITC_max; + u32 tRC_min; + u32 tREA_max; + u32 tREH_min; + u32 tRHOH_min; + u32 tRHW_min; + u32 tRHZ_max; + u32 tRLOH_min; + u32 tRP_min; + u32 tRR_min; + u64 tRST_max; + u32 tWB_max; + u32 tWC_min; + u32 tWH_min; + u32 tWHR_min; + u32 tWP_min; + u32 tWW_min; +}; + +/** + * enum nand_data_interface_type - NAND interface timing type + * @NAND_SDR_IFACE: Single Data Rate interface + */ +enum nand_data_interface_type { + NAND_SDR_IFACE, +}; + +/** + * struct nand_data_interface - NAND interface timing + * @type: type of the timing + * @timings: The timing, type according to @type + */ +struct nand_data_interface { + enum nand_data_interface_type type; + union { + struct nand_sdr_timings sdr; + } timings; +}; + +/** + * nand_get_sdr_timings - get SDR timing from data interface + * @conf: The data interface + */ +static inline const struct nand_sdr_timings * +nand_get_sdr_timings(const struct nand_data_interface *conf) +{ + if (conf->type != NAND_SDR_IFACE) + return ERR_PTR(-EINVAL); + + return &conf->timings.sdr; +} + +/** + * struct nand_chip - NAND Private Flash Chip Data + * @mtd: MTD device registered to the MTD framework + * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the + * flash device + * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the + * flash device. + * @read_byte: [REPLACEABLE] read one byte from the chip + * @read_word: [REPLACEABLE] read one word from the chip + * @write_byte: [REPLACEABLE] write a single byte to the chip on the + * low 8 I/O lines + * @write_buf: [REPLACEABLE] write data from the buffer to the chip + * @read_buf: [REPLACEABLE] read data from the chip into the buffer + * @select_chip: [REPLACEABLE] select chip nr + * @block_bad: [REPLACEABLE] check if a block is bad, using OOB markers + * @block_markbad: [REPLACEABLE] mark a block bad + * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling + * ALE/CLE/nCE. Also used to write command and address + * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing + * device ready/busy line. If set to NULL no access to + * ready/busy is available and the ready/busy information + * is read from the chip status register. + * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing + * commands to the chip. + * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on + * ready. + * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for + * setting the read-retry mode. Mostly needed for MLC NAND. + * @ecc: [BOARDSPECIFIC] ECC control structure + * @buffers: buffer structure for read/write + * @hwcontrol: platform-specific hardware control structure + * @erase: [REPLACEABLE] erase function + * @scan_bbt: [REPLACEABLE] function to scan bad block table + * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring + * data from array to read regs (tR). + * @state: [INTERN] the current state of the NAND device + * @oob_poi: "poison value buffer," used for laying out OOB data + * before writing + * @page_shift: [INTERN] number of address bits in a page (column + * address bits). + * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock + * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry + * @chip_shift: [INTERN] number of address bits in one chip + * @options: [BOARDSPECIFIC] various chip options. They can partly + * be set to inform nand_scan about special functionality. + * See the defines for further explanation. + * @bbt_options: [INTERN] bad block specific options. All options used + * here must come from bbm.h. By default, these options + * will be copied to the appropriate nand_bbt_descr's. + * @badblockpos: [INTERN] position of the bad block marker in the oob + * area. + * @badblockbits: [INTERN] minimum number of set bits in a good block's + * bad block marker position; i.e., BBM == 11110111b is + * not bad when badblockbits == 7 + * @bits_per_cell: [INTERN] number of bits per cell. i.e., 1 means SLC. + * @ecc_strength_ds: [INTERN] ECC correctability from the datasheet. + * Minimum amount of bit errors per @ecc_step_ds guaranteed + * to be correctable. If unknown, set to zero. + * @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds, + * also from the datasheet. It is the recommended ECC step + * size, if known; if unknown, set to zero. + * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is + * set to the actually used ONFI mode if the chip is + * ONFI compliant or deduced from the datasheet if + * the NAND chip is not ONFI compliant. + * @numchips: [INTERN] number of physical chips + * @chipsize: [INTERN] the size of one chip for multichip arrays + * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 + * @pagebuf: [INTERN] holds the pagenumber which is currently in + * data_buf. + * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is + * currently in data_buf. + * @subpagesize: [INTERN] holds the subpagesize + * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded), + * non 0 if ONFI supported. + * @jedec_version: [INTERN] holds the chip JEDEC version (BCD encoded), + * non 0 if JEDEC supported. + * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is + * supported, 0 otherwise. + * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is + * supported, 0 otherwise. + * @read_retries: [INTERN] the number of read retry modes supported + * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand + * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand + * @setup_data_interface: [OPTIONAL] setup the data interface and timing + * @bbt: [INTERN] bad block table pointer + * @bbt_td: [REPLACEABLE] bad block table descriptor for flash + * lookup. + * @bbt_md: [REPLACEABLE] bad block table mirror descriptor + * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial + * bad block scan. + * @controller: [REPLACEABLE] a pointer to a hardware controller + * structure which is shared among multiple independent + * devices. + * @priv: [OPTIONAL] pointer to private chip data + * @errstat: [OPTIONAL] hardware specific function to perform + * additional error status checks (determine if errors are + * correctable). + * @write_page: [REPLACEABLE] High-level page write function + */ + +struct nand_chip { + struct mtd_info mtd; + void __iomem *IO_ADDR_R; + void __iomem *IO_ADDR_W; + + uint8_t (*read_byte)(struct mtd_info *mtd); + u16 (*read_word)(struct mtd_info *mtd); + void (*write_byte)(struct mtd_info *mtd, uint8_t byte); + void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); + void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); + void (*select_chip)(struct mtd_info *mtd, int chip); + int (*block_bad)(struct mtd_info *mtd, loff_t ofs); + int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); + void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); + int (*dev_ready)(struct mtd_info *mtd); + void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, + int page_addr); + int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); + int (*erase)(struct mtd_info *mtd, int page); + int (*scan_bbt)(struct mtd_info *mtd); + int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, + int status, int page); + int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, + uint32_t offset, int data_len, const uint8_t *buf, + int oob_required, int page, int cached, int raw); + int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, + int feature_addr, uint8_t *subfeature_para); + int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip, + int feature_addr, uint8_t *subfeature_para); + int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode); + int (*setup_data_interface)(struct mtd_info *mtd, + const struct nand_data_interface *conf, + bool check_only); + + + int chip_delay; + unsigned int options; + unsigned int bbt_options; + + int page_shift; + int phys_erase_shift; + int bbt_erase_shift; + int chip_shift; + int numchips; + uint64_t chipsize; + int pagemask; + int pagebuf; + unsigned int pagebuf_bitflips; + int subpagesize; + uint8_t bits_per_cell; + uint16_t ecc_strength_ds; + uint16_t ecc_step_ds; + int onfi_timing_mode_default; + int badblockpos; + int badblockbits; + + int onfi_version; + int jedec_version; + union { + struct nand_onfi_params onfi_params; + struct nand_jedec_params jedec_params; + }; + + struct nand_data_interface *data_interface; + + int read_retries; + + flstate_t state; + + uint8_t *oob_poi; + struct nand_hw_control *controller; + + struct nand_ecc_ctrl ecc; + struct nand_buffers *buffers; + struct nand_hw_control hwcontrol; + + uint8_t *bbt; + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; + + struct nand_bbt_descr *badblock_pattern; + + void *priv; +}; + +extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; +extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; + +static inline void nand_set_flash_node(struct nand_chip *chip, + struct device_node *np) +{ + mtd_set_of_node(&chip->mtd, np); +} + +static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) +{ + return mtd_get_of_node(&chip->mtd); +} + +static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd) +{ + return container_of(mtd, struct nand_chip, mtd); +} + +static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip) +{ + return &chip->mtd; +} + +static inline void *nand_get_controller_data(struct nand_chip *chip) +{ + return chip->priv; +} + +static inline void nand_set_controller_data(struct nand_chip *chip, void *priv) +{ + chip->priv = priv; +} + +/* + * NAND Flash Manufacturer ID Codes + */ +#define NAND_MFR_TOSHIBA 0x98 +#define NAND_MFR_ESMT 0xc8 +#define NAND_MFR_SAMSUNG 0xec +#define NAND_MFR_FUJITSU 0x04 +#define NAND_MFR_NATIONAL 0x8f +#define NAND_MFR_RENESAS 0x07 +#define NAND_MFR_STMICRO 0x20 +#define NAND_MFR_HYNIX 0xad +#define NAND_MFR_MICRON 0x2c +#define NAND_MFR_AMD 0x01 +#define NAND_MFR_MACRONIX 0xc2 +#define NAND_MFR_EON 0x92 +#define NAND_MFR_SANDISK 0x45 +#define NAND_MFR_INTEL 0x89 +#define NAND_MFR_ATO 0x9b + +/* The maximum expected count of bytes in the NAND ID sequence */ +#define NAND_MAX_ID_LEN 8 + +/* + * A helper for defining older NAND chips where the second ID byte fully + * defined the chip, including the geometry (chip size, eraseblock size, page + * size). All these chips have 512 bytes NAND page size. + */ +#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \ + { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \ + .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) } + +/* + * A helper for defining newer chips which report their page size and + * eraseblock size via the extended ID bytes. + * + * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with + * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the + * device ID now only represented a particular total chip size (and voltage, + * buswidth), and the page size, eraseblock size, and OOB size could vary while + * using the same device ID. + */ +#define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \ + { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \ + .options = (opts) } + +#define NAND_ECC_INFO(_strength, _step) \ + { .strength_ds = (_strength), .step_ds = (_step) } +#define NAND_ECC_STRENGTH(type) ((type)->ecc.strength_ds) +#define NAND_ECC_STEP(type) ((type)->ecc.step_ds) + +/** + * struct nand_flash_dev - NAND Flash Device ID Structure + * @name: a human-readable name of the NAND chip + * @dev_id: the device ID (the second byte of the full chip ID array) + * @mfr_id: manufecturer ID part of the full chip ID array (refers the same + * memory address as @id[0]) + * @dev_id: device ID part of the full chip ID array (refers the same memory + * address as @id[1]) + * @id: full device ID array + * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as + * well as the eraseblock size) is determined from the extended NAND + * chip ID array) + * @chipsize: total chip size in MiB + * @erasesize: eraseblock size in bytes (determined from the extended ID if 0) + * @options: stores various chip bit options + * @id_len: The valid length of the @id. + * @oobsize: OOB size + * @ecc: ECC correctability and step information from the datasheet. + * @ecc.strength_ds: The ECC correctability from the datasheet, same as the + * @ecc_strength_ds in nand_chip{}. + * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the + * @ecc_step_ds in nand_chip{}, also from the datasheet. + * For example, the "4bit ECC for each 512Byte" can be set with + * NAND_ECC_INFO(4, 512). + * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND + * reset. Should be deduced from timings described + * in the datasheet. + * + */ +struct nand_flash_dev { + char *name; + union { + struct { + uint8_t mfr_id; + uint8_t dev_id; + }; + uint8_t id[NAND_MAX_ID_LEN]; + }; + unsigned int pagesize; + unsigned int chipsize; + unsigned int erasesize; + unsigned int options; + uint16_t id_len; + uint16_t oobsize; + struct { + uint16_t strength_ds; + uint16_t step_ds; + } ecc; + int onfi_timing_mode_default; +}; + +/** + * struct nand_manufacturers - NAND Flash Manufacturer ID Structure + * @name: Manufacturer name + * @id: manufacturer ID code of device. +*/ +struct nand_manufacturers { + int id; + char *name; +}; + +extern struct nand_flash_dev nand_flash_ids[]; +extern struct nand_manufacturers nand_manuf_ids[]; + +int nand_default_bbt(struct mtd_info *mtd); +int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); +int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs); +int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt); +int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, + int allowbbt); +int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, uint8_t *buf); + +/** + * struct platform_nand_chip - chip level device structure + * @nr_chips: max. number of chips to scan for + * @chip_offset: chip number offset + * @nr_partitions: number of partitions pointed to by partitions (or zero) + * @partitions: mtd partition list + * @chip_delay: R/B delay value in us + * @options: Option flags, e.g. 16bit buswidth + * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH + * @part_probe_types: NULL-terminated array of probe types + */ +struct platform_nand_chip { + int nr_chips; + int chip_offset; + int nr_partitions; + struct mtd_partition *partitions; + int chip_delay; + unsigned int options; + unsigned int bbt_options; + const char **part_probe_types; +}; + +/* Keep gcc happy */ +struct platform_device; + +/** + * struct platform_nand_ctrl - controller level device structure + * @probe: platform specific function to probe/setup hardware + * @remove: platform specific function to remove/teardown hardware + * @hwcontrol: platform specific hardware control structure + * @dev_ready: platform specific function to read ready/busy pin + * @select_chip: platform specific chip select function + * @cmd_ctrl: platform specific function for controlling + * ALE/CLE/nCE. Also used to write command and address + * @write_buf: platform specific function for write buffer + * @read_buf: platform specific function for read buffer + * @read_byte: platform specific function to read one byte from chip + * @priv: private data to transport driver specific settings + * + * All fields are optional and depend on the hardware driver requirements + */ +struct platform_nand_ctrl { + int (*probe)(struct platform_device *pdev); + void (*remove)(struct platform_device *pdev); + void (*hwcontrol)(struct mtd_info *mtd, int cmd); + int (*dev_ready)(struct mtd_info *mtd); + void (*select_chip)(struct mtd_info *mtd, int chip); + void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); + void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); + void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); + unsigned char (*read_byte)(struct mtd_info *mtd); void *priv; }; /** - * struct nand_ecc_engine_ops - ECC engine operations - * @init_ctx: given a desired user configuration for the pointed NAND device, - * requests the ECC engine driver to setup a configuration with - * values it supports. - * @cleanup_ctx: clean the context initialized by @init_ctx. - * @prepare_io_req: is called before reading/writing a page to prepare the I/O - * request to be performed with ECC correction. - * @finish_io_req: is called after reading/writing a page to terminate the I/O - * request and ensure proper ECC correction. + * struct platform_nand_data - container structure for platform-specific data + * @chip: chip level chip structure + * @ctrl: controller level device structure */ -struct nand_ecc_engine_ops { - int (*init_ctx)(struct nand_device *nand); - void (*cleanup_ctx)(struct nand_device *nand); - int (*prepare_io_req)(struct nand_device *nand, - struct nand_page_io_req *req); - int (*finish_io_req)(struct nand_device *nand, - struct nand_page_io_req *req); +struct platform_nand_data { + struct platform_nand_chip chip; + struct platform_nand_ctrl ctrl; }; -/** - * struct nand_ecc_engine - ECC engine abstraction for NAND devices - * @ops: ECC engine operations - */ -struct nand_ecc_engine { - struct nand_ecc_engine_ops *ops; -}; - -void of_get_nand_ecc_user_config(struct nand_device *nand); -int nand_ecc_init_ctx(struct nand_device *nand); -void nand_ecc_cleanup_ctx(struct nand_device *nand); -int nand_ecc_prepare_io_req(struct nand_device *nand, - struct nand_page_io_req *req); -int nand_ecc_finish_io_req(struct nand_device *nand, - struct nand_page_io_req *req); -bool nand_ecc_is_strong_enough(struct nand_device *nand); -struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand); -struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand); - -#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING) -struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void); -#else -static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void) +/* return the supported features. */ +static inline int onfi_feature(struct nand_chip *chip) { - return NULL; -} -#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */ - -#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH) -struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void); -#else -static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void) -{ - return NULL; -} -#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */ - -/** - * struct nand_ecc_req_tweak_ctx - Help for automatically tweaking requests - * @orig_req: Pointer to the original IO request - * @nand: Related NAND device, to have access to its memory organization - * @page_buffer_size: Real size of the page buffer to use (can be set by the - * user before the tweaking mechanism initialization) - * @oob_buffer_size: Real size of the OOB buffer to use (can be set by the - * user before the tweaking mechanism initialization) - * @spare_databuf: Data bounce buffer - * @spare_oobbuf: OOB bounce buffer - * @bounce_data: Flag indicating a data bounce buffer is used - * @bounce_oob: Flag indicating an OOB bounce buffer is used - */ -struct nand_ecc_req_tweak_ctx { - struct nand_page_io_req orig_req; - struct nand_device *nand; - unsigned int page_buffer_size; - unsigned int oob_buffer_size; - void *spare_databuf; - void *spare_oobbuf; - bool bounce_data; - bool bounce_oob; -}; - -int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx, - struct nand_device *nand); -void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx); -void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx, - struct nand_page_io_req *req); -void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx, - struct nand_page_io_req *req); - -/** - * struct nand_ecc - Information relative to the ECC - * @defaults: Default values, depend on the underlying subsystem - * @requirements: ECC requirements from the NAND chip perspective - * @user_conf: User desires in terms of ECC parameters - * @ctx: ECC context for the ECC engine, derived from the device @requirements - * the @user_conf and the @defaults - * @ondie_engine: On-die ECC engine reference, if any - * @engine: ECC engine actually bound - */ -struct nand_ecc { - struct nand_ecc_props defaults; - struct nand_ecc_props requirements; - struct nand_ecc_props user_conf; - struct nand_ecc_context ctx; - struct nand_ecc_engine *ondie_engine; - struct nand_ecc_engine *engine; -}; - -/** - * struct nand_device - NAND device - * @mtd: MTD instance attached to the NAND device - * @memorg: memory layout - * @ecc: NAND ECC object attached to the NAND device - * @rowconv: position to row address converter - * @bbt: bad block table info - * @ops: NAND operations attached to the NAND device - * - * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND) - * should declare their own NAND object embedding a nand_device struct (that's - * how inheritance is done). - * struct_nand_device->memorg and struct_nand_device->ecc.requirements should - * be filled at device detection time to reflect the NAND device - * capabilities/requirements. Once this is done nanddev_init() can be called. - * It will take care of converting NAND information into MTD ones, which means - * the specialized NAND layers should never manually tweak - * struct_nand_device->mtd except for the ->_read/write() hooks. - */ -struct nand_device { - struct mtd_info mtd; - struct nand_memory_organization memorg; - struct nand_ecc ecc; - struct nand_row_converter rowconv; - struct nand_bbt bbt; - const struct nand_ops *ops; -}; - -/** - * struct nand_io_iter - NAND I/O iterator - * @req: current I/O request - * @oobbytes_per_page: maximum number of OOB bytes per page - * @dataleft: remaining number of data bytes to read/write - * @oobleft: remaining number of OOB bytes to read/write - * - * Can be used by specialized NAND layers to iterate over all pages covered - * by an MTD I/O request, which should greatly simplifies the boiler-plate - * code needed to read/write data from/to a NAND device. - */ -struct nand_io_iter { - struct nand_page_io_req req; - unsigned int oobbytes_per_page; - unsigned int dataleft; - unsigned int oobleft; -}; - -/** - * mtd_to_nanddev() - Get the NAND device attached to the MTD instance - * @mtd: MTD instance - * - * Return: the NAND device embedding @mtd. - */ -static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd) -{ - return container_of(mtd, struct nand_device, mtd); + return chip->onfi_version ? le16_to_cpu(chip->onfi_params.features) : 0; } -/** - * nanddev_to_mtd() - Get the MTD device attached to a NAND device - * @nand: NAND device - * - * Return: the MTD device embedded in @nand. - */ -static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand) +/* return the supported asynchronous timing mode. */ +static inline int onfi_get_async_timing_mode(struct nand_chip *chip) { - return &nand->mtd; + if (!chip->onfi_version) + return ONFI_TIMING_MODE_UNKNOWN; + return le16_to_cpu(chip->onfi_params.async_timing_mode); } +/* return the supported synchronous timing mode. */ +static inline int onfi_get_sync_timing_mode(struct nand_chip *chip) +{ + if (!chip->onfi_version) + return ONFI_TIMING_MODE_UNKNOWN; + return le16_to_cpu(chip->onfi_params.src_sync_timing_mode); +} + +int onfi_init_data_interface(struct nand_chip *chip, + struct nand_data_interface *iface, + enum nand_data_interface_type type, + int timing_mode); + /* - * nanddev_bits_per_cell() - Get the number of bits per cell - * @nand: NAND device - * - * Return: the number of bits per cell. + * Check if it is a SLC nand. + * The !nand_is_slc() can be used to check the MLC/TLC nand chips. + * We do not distinguish the MLC and TLC now. */ -static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand) +static inline bool nand_is_slc(struct nand_chip *chip) { - return nand->memorg.bits_per_cell; + return chip->bits_per_cell == 1; } /** - * nanddev_page_size() - Get NAND page size - * @nand: NAND device - * - * Return: the page size. + * Check if the opcode's address should be sent only on the lower 8 bits + * @command: opcode to check */ -static inline size_t nanddev_page_size(const struct nand_device *nand) +static inline int nand_opcode_8bits(unsigned int command) { - return nand->memorg.pagesize; -} - -/** - * nanddev_per_page_oobsize() - Get NAND OOB size - * @nand: NAND device - * - * Return: the OOB size. - */ -static inline unsigned int -nanddev_per_page_oobsize(const struct nand_device *nand) -{ - return nand->memorg.oobsize; -} - -/** - * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock - * @nand: NAND device - * - * Return: the number of pages per eraseblock. - */ -static inline unsigned int -nanddev_pages_per_eraseblock(const struct nand_device *nand) -{ - return nand->memorg.pages_per_eraseblock; -} - -/** - * nanddev_pages_per_target() - Get the number of pages per target - * @nand: NAND device - * - * Return: the number of pages per target. - */ -static inline unsigned int -nanddev_pages_per_target(const struct nand_device *nand) -{ - return nand->memorg.pages_per_eraseblock * - nand->memorg.eraseblocks_per_lun * - nand->memorg.luns_per_target; -} - -/** - * nanddev_per_page_oobsize() - Get NAND erase block size - * @nand: NAND device - * - * Return: the eraseblock size. - */ -static inline size_t nanddev_eraseblock_size(const struct nand_device *nand) -{ - return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock; -} - -/** - * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN - * @nand: NAND device - * - * Return: the number of eraseblocks per LUN. - */ -static inline unsigned int -nanddev_eraseblocks_per_lun(const struct nand_device *nand) -{ - return nand->memorg.eraseblocks_per_lun; -} - -/** - * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target - * @nand: NAND device - * - * Return: the number of eraseblocks per target. - */ -static inline unsigned int -nanddev_eraseblocks_per_target(const struct nand_device *nand) -{ - return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target; -} - -/** - * nanddev_target_size() - Get the total size provided by a single target/die - * @nand: NAND device - * - * Return: the total size exposed by a single target/die in bytes. - */ -static inline u64 nanddev_target_size(const struct nand_device *nand) -{ - return (u64)nand->memorg.luns_per_target * - nand->memorg.eraseblocks_per_lun * - nand->memorg.pages_per_eraseblock * - nand->memorg.pagesize; -} - -/** - * nanddev_ntarget() - Get the total of targets - * @nand: NAND device - * - * Return: the number of targets/dies exposed by @nand. - */ -static inline unsigned int nanddev_ntargets(const struct nand_device *nand) -{ - return nand->memorg.ntargets; -} - -/** - * nanddev_neraseblocks() - Get the total number of eraseblocks - * @nand: NAND device - * - * Return: the total number of eraseblocks exposed by @nand. - */ -static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) -{ - return nand->memorg.ntargets * nand->memorg.luns_per_target * - nand->memorg.eraseblocks_per_lun; -} - -/** - * nanddev_size() - Get NAND size - * @nand: NAND device - * - * Return: the total size (in bytes) exposed by @nand. - */ -static inline u64 nanddev_size(const struct nand_device *nand) -{ - return nanddev_target_size(nand) * nanddev_ntargets(nand); -} - -/** - * nanddev_get_memorg() - Extract memory organization info from a NAND device - * @nand: NAND device - * - * This can be used by the upper layer to fill the memorg info before calling - * nanddev_init(). - * - * Return: the memorg object embedded in the NAND device. - */ -static inline struct nand_memory_organization * -nanddev_get_memorg(struct nand_device *nand) -{ - return &nand->memorg; -} - -/** - * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device - * @nand: NAND device - */ -static inline const struct nand_ecc_props * -nanddev_get_ecc_conf(struct nand_device *nand) -{ - return &nand->ecc.ctx.conf; -} - -/** - * nanddev_get_ecc_nsteps() - Extract the number of ECC steps - * @nand: NAND device - */ -static inline unsigned int -nanddev_get_ecc_nsteps(struct nand_device *nand) -{ - return nand->ecc.ctx.nsteps; -} - -/** - * nanddev_get_ecc_bytes_per_step() - Extract the number of ECC bytes per step - * @nand: NAND device - */ -static inline unsigned int -nanddev_get_ecc_bytes_per_step(struct nand_device *nand) -{ - return nand->ecc.ctx.total / nand->ecc.ctx.nsteps; -} - -/** - * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND - * device - * @nand: NAND device - */ -static inline const struct nand_ecc_props * -nanddev_get_ecc_requirements(struct nand_device *nand) -{ - return &nand->ecc.requirements; -} - -/** - * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND - * device - * @nand: NAND device - * @reqs: Requirements - */ -static inline void -nanddev_set_ecc_requirements(struct nand_device *nand, - const struct nand_ecc_props *reqs) -{ - nand->ecc.requirements = *reqs; -} - -int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, - struct module *owner); -void nanddev_cleanup(struct nand_device *nand); - -/** - * nanddev_register() - Register a NAND device - * @nand: NAND device - * - * Register a NAND device. - * This function is just a wrapper around mtd_device_register() - * registering the MTD device embedded in @nand. - * - * Return: 0 in case of success, a negative error code otherwise. - */ -static inline int nanddev_register(struct nand_device *nand) -{ - return mtd_device_register(&nand->mtd, NULL, 0); -} - -/** - * nanddev_unregister() - Unregister a NAND device - * @nand: NAND device - * - * Unregister a NAND device. - * This function is just a wrapper around mtd_device_unregister() - * unregistering the MTD device embedded in @nand. - * - * Return: 0 in case of success, a negative error code otherwise. - */ -static inline int nanddev_unregister(struct nand_device *nand) -{ - return mtd_device_unregister(&nand->mtd); -} - -/** - * nanddev_set_of_node() - Attach a DT node to a NAND device - * @nand: NAND device - * @np: DT node - * - * Attach a DT node to a NAND device. - */ -static inline void nanddev_set_of_node(struct nand_device *nand, - struct device_node *np) -{ - mtd_set_of_node(&nand->mtd, np); -} - -/** - * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device - * @nand: NAND device - * - * Return: the DT node attached to @nand. - */ -static inline struct device_node *nanddev_get_of_node(struct nand_device *nand) -{ - return mtd_get_of_node(&nand->mtd); -} - -/** - * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position - * @nand: NAND device - * @offs: absolute NAND offset (usually passed by the MTD layer) - * @pos: a NAND position object to fill in - * - * Converts @offs into a nand_pos representation. - * - * Return: the offset within the NAND page pointed by @pos. - */ -static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand, - loff_t offs, - struct nand_pos *pos) -{ - unsigned int pageoffs; - u64 tmp = offs; - - pageoffs = do_div(tmp, nand->memorg.pagesize); - pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock); - pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun); - pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; - pos->lun = do_div(tmp, nand->memorg.luns_per_target); - pos->target = tmp; - - return pageoffs; -} - -/** - * nanddev_pos_cmp() - Compare two NAND positions - * @a: First NAND position - * @b: Second NAND position - * - * Compares two NAND positions. - * - * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b. - */ -static inline int nanddev_pos_cmp(const struct nand_pos *a, - const struct nand_pos *b) -{ - if (a->target != b->target) - return a->target < b->target ? -1 : 1; - - if (a->lun != b->lun) - return a->lun < b->lun ? -1 : 1; - - if (a->eraseblock != b->eraseblock) - return a->eraseblock < b->eraseblock ? -1 : 1; - - if (a->page != b->page) - return a->page < b->page ? -1 : 1; - + switch (command) { + case NAND_CMD_READID: + case NAND_CMD_PARAM: + case NAND_CMD_GET_FEATURES: + case NAND_CMD_SET_FEATURES: + return 1; + default: + break; + } return 0; } -/** - * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset - * @nand: NAND device - * @pos: the NAND position to convert - * - * Converts @pos NAND position into an absolute offset. - * - * Return: the absolute offset. Note that @pos points to the beginning of a - * page, if one wants to point to a specific offset within this page - * the returned offset has to be adjusted manually. - */ -static inline loff_t nanddev_pos_to_offs(struct nand_device *nand, - const struct nand_pos *pos) +/* return the supported JEDEC features. */ +static inline int jedec_feature(struct nand_chip *chip) { - unsigned int npages; - - npages = pos->page + - ((pos->eraseblock + - (pos->lun + - (pos->target * nand->memorg.luns_per_target)) * - nand->memorg.eraseblocks_per_lun) * - nand->memorg.pages_per_eraseblock); - - return (loff_t)npages * nand->memorg.pagesize; + return chip->jedec_version ? le16_to_cpu(chip->jedec_params.features) + : 0; } -/** - * nanddev_pos_to_row() - Extract a row address from a NAND position - * @nand: NAND device - * @pos: the position to convert - * - * Converts a NAND position into a row address that can then be passed to the - * device. - * - * Return: the row address extracted from @pos. - */ -static inline unsigned int nanddev_pos_to_row(struct nand_device *nand, - const struct nand_pos *pos) -{ - return (pos->lun << nand->rowconv.lun_addr_shift) | - (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) | - pos->page; -} +/* get timing characteristics from ONFI timing mode. */ +const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); +/* get data interface from ONFI timing mode 0, used after reset. */ +const struct nand_data_interface *nand_get_default_data_interface(void); -/** - * nanddev_pos_next_target() - Move a position to the next target/die - * @nand: NAND device - * @pos: the position to update - * - * Updates @pos to point to the start of the next target/die. Useful when you - * want to iterate over all targets/dies of a NAND device. - */ -static inline void nanddev_pos_next_target(struct nand_device *nand, - struct nand_pos *pos) -{ - pos->page = 0; - pos->plane = 0; - pos->eraseblock = 0; - pos->lun = 0; - pos->target++; -} +int nand_check_erased_ecc_chunk(void *data, int datalen, + void *ecc, int ecclen, + void *extraoob, int extraooblen, + int threshold); -/** - * nanddev_pos_next_lun() - Move a position to the next LUN - * @nand: NAND device - * @pos: the position to update - * - * Updates @pos to point to the start of the next LUN. Useful when you want to - * iterate over all LUNs of a NAND device. - */ -static inline void nanddev_pos_next_lun(struct nand_device *nand, - struct nand_pos *pos) -{ - if (pos->lun >= nand->memorg.luns_per_target - 1) - return nanddev_pos_next_target(nand, pos); +/* Default write_oob implementation */ +int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); - pos->lun++; - pos->page = 0; - pos->plane = 0; - pos->eraseblock = 0; -} +/* Default write_oob syndrome implementation */ +int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page); -/** - * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock - * @nand: NAND device - * @pos: the position to update - * - * Updates @pos to point to the start of the next eraseblock. Useful when you - * want to iterate over all eraseblocks of a NAND device. - */ -static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, - struct nand_pos *pos) -{ - if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1) - return nanddev_pos_next_lun(nand, pos); +/* Default read_oob implementation */ +int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); - pos->eraseblock++; - pos->page = 0; - pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; -} +/* Default read_oob syndrome implementation */ +int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page); -/** - * nanddev_pos_next_page() - Move a position to the next page - * @nand: NAND device - * @pos: the position to update - * - * Updates @pos to point to the start of the next page. Useful when you want to - * iterate over all pages of a NAND device. - */ -static inline void nanddev_pos_next_page(struct nand_device *nand, - struct nand_pos *pos) -{ - if (pos->page >= nand->memorg.pages_per_eraseblock - 1) - return nanddev_pos_next_eraseblock(nand, pos); +/* Reset and initialize a NAND device */ +int nand_reset(struct nand_chip *chip, int chipnr); - pos->page++; -} - -/** - * nand_io_iter_init - Initialize a NAND I/O iterator - * @nand: NAND device - * @offs: absolute offset - * @req: MTD request - * @iter: NAND I/O iterator - * - * Initializes a NAND iterator based on the information passed by the MTD - * layer. - */ -static inline void nanddev_io_iter_init(struct nand_device *nand, - enum nand_page_io_req_type reqtype, - loff_t offs, struct mtd_oob_ops *req, - struct nand_io_iter *iter) -{ - struct mtd_info *mtd = nanddev_to_mtd(nand); - - iter->req.type = reqtype; - iter->req.mode = req->mode; - iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); - iter->req.ooboffs = req->ooboffs; - iter->oobbytes_per_page = mtd_oobavail(mtd, req); - iter->dataleft = req->len; - iter->oobleft = req->ooblen; - iter->req.databuf.in = req->datbuf; - iter->req.datalen = min_t(unsigned int, - nand->memorg.pagesize - iter->req.dataoffs, - iter->dataleft); - iter->req.oobbuf.in = req->oobbuf; - iter->req.ooblen = min_t(unsigned int, - iter->oobbytes_per_page - iter->req.ooboffs, - iter->oobleft); -} - -/** - * nand_io_iter_next_page - Move to the next page - * @nand: NAND device - * @iter: NAND I/O iterator - * - * Updates the @iter to point to the next page. - */ -static inline void nanddev_io_iter_next_page(struct nand_device *nand, - struct nand_io_iter *iter) -{ - nanddev_pos_next_page(nand, &iter->req.pos); - iter->dataleft -= iter->req.datalen; - iter->req.databuf.in += iter->req.datalen; - iter->oobleft -= iter->req.ooblen; - iter->req.oobbuf.in += iter->req.ooblen; - iter->req.dataoffs = 0; - iter->req.ooboffs = 0; - iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize, - iter->dataleft); - iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page, - iter->oobleft); -} - -/** - * nand_io_iter_end - Should end iteration or not - * @nand: NAND device - * @iter: NAND I/O iterator - * - * Check whether @iter has reached the end of the NAND portion it was asked to - * iterate on or not. - * - * Return: true if @iter has reached the end of the iteration request, false - * otherwise. - */ -static inline bool nanddev_io_iter_end(struct nand_device *nand, - const struct nand_io_iter *iter) -{ - if (iter->dataleft || iter->oobleft) - return false; - - return true; -} - -/** - * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O - * request - * @nand: NAND device - * @start: start address to read/write from - * @req: MTD I/O request - * @iter: NAND I/O iterator - * - * Should be used for iterate over pages that are contained in an MTD request. - */ -#define nanddev_io_for_each_page(nand, type, start, req, iter) \ - for (nanddev_io_iter_init(nand, type, start, req, iter); \ - !nanddev_io_iter_end(nand, iter); \ - nanddev_io_iter_next_page(nand, iter)) - -bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos); -bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos); -int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos); -int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); - -/* ECC related functions */ -int nanddev_ecc_engine_init(struct nand_device *nand); -void nanddev_ecc_engine_cleanup(struct nand_device *nand); - -/* BBT related functions */ -enum nand_bbt_block_status { - NAND_BBT_BLOCK_STATUS_UNKNOWN, - NAND_BBT_BLOCK_GOOD, - NAND_BBT_BLOCK_WORN, - NAND_BBT_BLOCK_RESERVED, - NAND_BBT_BLOCK_FACTORY_BAD, - NAND_BBT_BLOCK_NUM_STATUS, -}; - -int nanddev_bbt_init(struct nand_device *nand); -void nanddev_bbt_cleanup(struct nand_device *nand); -int nanddev_bbt_update(struct nand_device *nand); -int nanddev_bbt_get_block_status(const struct nand_device *nand, - unsigned int entry); -int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, - enum nand_bbt_block_status status); -int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block); - -/** - * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry - * @nand: NAND device - * @pos: the NAND position we want to get BBT entry for - * - * Return the BBT entry used to store information about the eraseblock pointed - * by @pos. - * - * Return: the BBT entry storing information about eraseblock pointed by @pos. - */ -static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand, - const struct nand_pos *pos) -{ - return pos->eraseblock + - ((pos->lun + (pos->target * nand->memorg.luns_per_target)) * - nand->memorg.eraseblocks_per_lun); -} - -/** - * nanddev_bbt_is_initialized() - Check if the BBT has been initialized - * @nand: NAND device - * - * Return: true if the BBT has been initialized, false otherwise. - */ -static inline bool nanddev_bbt_is_initialized(struct nand_device *nand) -{ - return !!nand->bbt.cache; -} - -/* MTD -> NAND helper functions. */ -int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo); -int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len); +/* Free resources held by the NAND device */ +void nand_cleanup(struct nand_chip *chip); #endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h index d5956cc48b..98f20ef05d 100644 --- a/include/linux/mtd/nand_bch.h +++ b/include/linux/mtd/nand_bch.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2011 Ivan Djelic * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * This file is the header for the NAND BCH ECC implementation. */ @@ -9,24 +12,23 @@ #define __MTD_NAND_BCH_H__ struct mtd_info; -struct nand_chip; struct nand_bch_control; -#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH) +#if defined(CONFIG_MTD_NAND_ECC_BCH) static inline int mtd_nand_has_bch(void) { return 1; } /* * Calculate BCH ecc code */ -int nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat, +int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); /* * Detect and correct bit errors */ -int nand_bch_correct_data(struct nand_chip *chip, u_char *dat, - u_char *read_ecc, u_char *calc_ecc); +int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, + u_char *calc_ecc); /* * Initialize BCH encoder/decoder */ @@ -36,19 +38,19 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd); */ void nand_bch_free(struct nand_bch_control *nbc); -#else /* !CONFIG_MTD_NAND_ECC_SW_BCH */ +#else /* !CONFIG_MTD_NAND_ECC_BCH */ static inline int mtd_nand_has_bch(void) { return 0; } static inline int -nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat, +nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { return -1; } static inline int -nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf, +nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, unsigned char *read_ecc, unsigned char *calc_ecc) { return -ENOTSUPP; @@ -61,6 +63,6 @@ static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) static inline void nand_bch_free(struct nand_bch_control *nbc) {} -#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */ +#endif /* CONFIG_MTD_NAND_ECC_BCH */ #endif /* __MTD_NAND_BCH_H__ */ diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index d423916b94..4d8406c816 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h @@ -1,39 +1,42 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* + * drivers/mtd/nand_ecc.h + * * Copyright (C) 2000-2010 Steven J. Hill * David Woodhouse * Thomas Gleixner * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * This file is the header for the ECC algorithm. */ #ifndef __MTD_NAND_ECC_H__ #define __MTD_NAND_ECC_H__ -struct nand_chip; +struct mtd_info; /* * Calculate 3 byte ECC code for eccsize byte block */ void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize, - u_char *ecc_code, bool sm_order); + u_char *ecc_code); /* * Calculate 3 byte ECC code for 256/512 byte block */ -int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat, - u_char *ecc_code); +int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); /* * Detect and correct a 1 bit error for eccsize byte block */ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, - unsigned int eccsize, bool sm_order); + unsigned int eccsize); /* * Detect and correct a 1 bit error for 256/512 byte block */ -int nand_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc, - u_char *calc_ecc); +int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); #endif /* __MTD_NAND_ECC_H__ */ diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h index 98f075b869..d0558a9826 100644 --- a/include/linux/mtd/ndfc.h +++ b/include/linux/mtd/ndfc.h @@ -1,9 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* + * linux/include/linux/mtd/ndfc.h + * * Copyright (c) 2006 Thomas Gleixner * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Info: * Contains defines, datastructures for ndfc nand controller + * */ #ifndef __LINUX_MTD_NDFC_H #define __LINUX_MTD_NDFC_H diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h index 4423d3b385..044daa02b8 100644 --- a/include/linux/mtd/nftl.h +++ b/include/linux/mtd/nftl.h @@ -1,6 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 1999-2010 David Woodhouse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #ifndef __MTD_NFTL_H__ diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index 1e517961d0..0aaa98b219 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mtd/onenand.h * * Copyright © 2005-2009 Samsung Electronics * Kyungmin Park + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_MTD_ONENAND_H @@ -91,7 +94,6 @@ struct onenand_chip { unsigned int technology; unsigned int density_mask; unsigned int options; - unsigned int badblockpos; unsigned int erase_shift; unsigned int page_shift; @@ -186,8 +188,6 @@ struct onenand_chip { /* Check byte access in OneNAND */ #define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1) -#define ONENAND_BADBLOCK_POS 0 - /* * Options bits */ diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index 5f728407a5..d60130f88e 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mtd/onenand_regs.h * @@ -6,6 +5,10 @@ * * Copyright (C) 2005-2007 Samsung Electronics * Kyungmin Park + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __ONENAND_REG_H @@ -77,7 +80,6 @@ #define ONENAND_DEVICE_DENSITY_1Gb (0x003) #define ONENAND_DEVICE_DENSITY_2Gb (0x004) #define ONENAND_DEVICE_DENSITY_4Gb (0x005) -#define ONENAND_DEVICE_DENSITY_8Gb (0x006) /* * Version ID Register F002h (R) diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index b74a539ec5..70736e1e6c 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -20,12 +20,6 @@ * * For each partition, these fields are available: * name: string that will be used to label the partition's MTD device. - * types: some partitions can be containers using specific format to describe - * embedded subpartitions / volumes. E.g. many home routers use "firmware" - * partition that contains at least kernel and rootfs. In such case an - * extra parser is needed that will detect these dynamic partitions and - * report them to the MTD subsystem. If set this property stores an array - * of parser names to use when looking for subpartitions. * size: the partition size; if defined as MTDPART_SIZ_FULL, the partition * will extend to the end of the master MTD device. * offset: absolute starting position within the master MTD device; if @@ -37,7 +31,6 @@ * master MTD flag set for the corresponding MTD partition. * For example, to force a read-only partition, simply adding * MTD_WRITEABLE to the mask_flags will do the trick. - * add_flags: contains flags to add to the parent flags * * Note: writeable partitions require their size and offset be * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). @@ -45,12 +38,9 @@ struct mtd_partition { const char *name; /* identifier string */ - const char *const *types; /* names of parsers to use if any */ uint64_t size; /* partition size */ uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ - uint32_t add_flags; /* flags to add to the partition */ - struct device_node *of_node; }; #define MTDPART_OFS_RETAIN (-3) @@ -79,7 +69,6 @@ struct mtd_part_parser { struct list_head list; struct module *owner; const char *name; - const struct of_device_id *of_match_table; int (*parse_fn)(struct mtd_info *, const struct mtd_partition **, struct mtd_part_parser_data *); void (*cleanup)(const struct mtd_partition *pparts, int nr_parts); @@ -107,6 +96,7 @@ extern void deregister_mtd_parser(struct mtd_part_parser *parser); module_driver(__mtd_part_parser, register_mtd_parser, \ deregister_mtd_parser) +int mtd_is_partition(const struct mtd_info *mtd); int mtd_add_partition(struct mtd_info *master, const char *name, long long offset, long long length); int mtd_del_partition(struct mtd_info *master, int partno); diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h index 146413d4bd..42ff7ff09b 100644 --- a/include/linux/mtd/pfow.h +++ b/include/linux/mtd/pfow.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* Primary function overlay window definitions * and service functions used by LPDDR chips */ @@ -19,7 +18,7 @@ /* Identification info for LPDDR chip */ #define PFOW_MANUFACTURER_ID 0x0020 #define PFOW_DEVICE_ID 0x0022 -/* Address in PFOW where prog buffer can be found */ +/* Address in PFOW where prog buffer can can be found */ #define PFOW_PROGRAM_BUFFER_OFFSET 0x0040 /* Size of program buffer in words */ #define PFOW_PROGRAM_BUFFER_SIZE 0x0042 @@ -121,4 +120,37 @@ static inline void send_pfow_command(struct map_info *map, map_write(map, CMD(LPDDR_START_EXECUTION), map->pfow_base + PFOW_COMMAND_EXECUTE); } + +static inline void print_drs_error(unsigned dsr) +{ + int prog_status = (dsr & DSR_RPS) >> 8; + + if (!(dsr & DSR_AVAILABLE)) + printk(KERN_NOTICE"DSR.15: (0) Device not Available\n"); + if (prog_status & 0x03) + printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid " + "half with 41h command\n"); + else if (prog_status & 0x02) + printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt " + "in region with Control Mode data\n"); + else if (prog_status & 0x01) + printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region " + "with Object Mode data\n"); + if (!(dsr & DSR_READY_STATUS)) + printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n"); + if (dsr & DSR_ESS) + printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n"); + if (dsr & DSR_ERASE_STATUS) + printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n"); + if (dsr & DSR_PROGRAM_STATUS) + printk(KERN_NOTICE"DSR.4: (1) Program Error\n"); + if (dsr & DSR_VPPS) + printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation " + "aborted\n"); + if (dsr & DSR_PSS) + printk(KERN_NOTICE"DSR.2: (1) Program suspended\n"); + if (dsr & DSR_DPS) + printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt " + "on locked block\n"); +} #endif /* __LINUX_MTD_PFOW_H */ diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h index bfaa9cc1dc..aa6a2633c2 100644 --- a/include/linux/mtd/physmap.h +++ b/include/linux/mtd/physmap.h @@ -1,10 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * For boards with physically mapped flash and using * drivers/mtd/maps/physmap.c mapping driver. * * Copyright (C) 2003 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __LINUX_MTD_PHYSMAP__ diff --git a/include/linux/mtd/pismo.h b/include/linux/mtd/pismo.h index 085b639c95..8dfb7e1421 100644 --- a/include/linux/mtd/pismo.h +++ b/include/linux/mtd/pismo.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * PISMO memory driver - http://www.pismoworld.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. */ #ifndef __LINUX_MTD_PISMO_H #define __LINUX_MTD_PISMO_H diff --git a/include/linux/mtd/plat-ram.h b/include/linux/mtd/plat-ram.h index 09441856d2..44212d65aa 100644 --- a/include/linux/mtd/plat-ram.h +++ b/include/linux/mtd/plat-ram.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* linux/include/linux/mtd/plat-ram.h * * (c) 2004 Simtec Electronics @@ -6,6 +5,11 @@ * Ben Dooks * * Generic platform device based RAM map + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __LINUX_MTD_PLATRAM_H diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h index 2e3f43788d..7b3d487d8b 100644 --- a/include/linux/mtd/qinfo.h +++ b/include/linux/mtd/qinfo.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MTD_QINFO_H #define __LINUX_MTD_QINFO_H @@ -15,7 +14,7 @@ * @DevId - Chip Device ID * @qinfo - pointer to qinfo records describing the chip * @numchips - number of chips including virual RWW partitions - * @chipshift - Chip/partition size 2^chipshift + * @chipshift - Chip/partiton size 2^chipshift * @chips - per-chip data structure */ struct lpddr_private { @@ -24,7 +23,7 @@ struct lpddr_private { struct qinfo_chip *qinfo; int numchips; unsigned long chipshift; - struct flchip chips[]; + struct flchip chips[0]; }; /* qinfo_query_info structure contains request information for diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h index 78fc2d4218..2251add65f 100644 --- a/include/linux/mtd/sh_flctl.h +++ b/include/linux/mtd/sh_flctl.h @@ -1,8 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0 - * +/* * SuperH FLCTL nand controller * * Copyright © 2008 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __SH_FLCTL_H__ @@ -10,7 +22,7 @@ #include #include -#include +#include #include #include diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h index 231bd1c3f4..65e91d0fa9 100644 --- a/include/linux/mtd/sharpsl.h +++ b/include/linux/mtd/sharpsl.h @@ -1,14 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * SharpSL NAND support * * Copyright (C) 2008 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ -#ifndef _MTD_SHARPSL_H -#define _MTD_SHARPSL_H - -#include +#include +#include #include struct sharpsl_nand_platform_data { @@ -16,7 +17,4 @@ struct sharpsl_nand_platform_data { const struct mtd_ooblayout_ops *ecc_layout; struct mtd_partition *partitions; unsigned int nr_partitions; - const char *const *part_parsers; }; - -#endif /* _MTD_SHARPSL_H */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index f67457748e..c425c7b4c2 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2014 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __LINUX_MTD_SPI_NOR_H @@ -9,7 +13,21 @@ #include #include #include -#include + +/* + * Manufacturer IDs + * + * The first byte returned from the flash after sending opcode SPINOR_OP_RDID. + * Sometimes these are the same as CFI IDs, but sometimes they aren't. + */ +#define SNOR_MFR_ATMEL CFI_MFR_ATMEL +#define SNOR_MFR_GIGADEVICE 0xc8 +#define SNOR_MFR_INTEL CFI_MFR_INTEL +#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */ +#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX +#define SNOR_MFR_SPANSION CFI_MFR_AMD +#define SNOR_MFR_SST CFI_MFR_SST +#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */ /* * Note on opcode nomenclature: some opcodes have a format like @@ -20,98 +38,47 @@ */ /* Flash opcodes. */ -#define SPINOR_OP_WRDI 0x04 /* Write disable */ #define SPINOR_OP_WREN 0x06 /* Write enable */ #define SPINOR_OP_RDSR 0x05 /* Read status register */ #define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ -#define SPINOR_OP_RDSR2 0x3f /* Read status register 2 */ -#define SPINOR_OP_WRSR2 0x3e /* Write status register 2 */ #define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */ #define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */ -#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */ -#define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */ -#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */ -#define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */ -#define SPINOR_OP_READ_1_1_8 0x8b /* Read data bytes (Octal Output SPI) */ -#define SPINOR_OP_READ_1_8_8 0xcb /* Read data bytes (Octal I/O SPI) */ +#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual SPI) */ +#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad SPI) */ #define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ -#define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */ -#define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */ -#define SPINOR_OP_PP_1_1_8 0x82 /* Octal page program */ -#define SPINOR_OP_PP_1_8_8 0xc2 /* Octal page program */ #define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ #define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ #define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ #define SPINOR_OP_CHIP_ERASE 0xc7 /* Erase whole flash chip */ #define SPINOR_OP_SE 0xd8 /* Sector erase (usually 64KiB) */ #define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */ -#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */ #define SPINOR_OP_RDCR 0x35 /* Read configuration register */ #define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ -#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ -#define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */ -#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */ -#define SPINOR_OP_SRSTEN 0x66 /* Software Reset Enable */ -#define SPINOR_OP_SRST 0x99 /* Software Reset */ -#define SPINOR_OP_GBULK 0x98 /* Global Block Unlock */ /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ -#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */ -#define SPINOR_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */ -#define SPINOR_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */ -#define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */ -#define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ -#define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ -#define SPINOR_OP_READ_1_1_8_4B 0x7c /* Read data bytes (Octal Output SPI) */ -#define SPINOR_OP_READ_1_8_8_4B 0xcc /* Read data bytes (Octal I/O SPI) */ +#define SPINOR_OP_READ4 0x13 /* Read data bytes (low frequency) */ +#define SPINOR_OP_READ4_FAST 0x0c /* Read data bytes (high frequency) */ +#define SPINOR_OP_READ4_1_1_2 0x3c /* Read data bytes (Dual SPI) */ +#define SPINOR_OP_READ4_1_1_4 0x6c /* Read data bytes (Quad SPI) */ #define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ -#define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */ -#define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */ -#define SPINOR_OP_PP_1_1_8_4B 0x84 /* Octal page program */ -#define SPINOR_OP_PP_1_8_8_4B 0x8e /* Octal page program */ -#define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */ -#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */ #define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ -/* Double Transfer Rate opcodes - defined in JEDEC JESD216B. */ -#define SPINOR_OP_READ_1_1_1_DTR 0x0d -#define SPINOR_OP_READ_1_2_2_DTR 0xbd -#define SPINOR_OP_READ_1_4_4_DTR 0xed - -#define SPINOR_OP_READ_1_1_1_DTR_4B 0x0e -#define SPINOR_OP_READ_1_2_2_DTR_4B 0xbe -#define SPINOR_OP_READ_1_4_4_DTR_4B 0xee - /* Used for SST flashes only. */ #define SPINOR_OP_BP 0x02 /* Byte program */ +#define SPINOR_OP_WRDI 0x04 /* Write disable */ #define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ -/* Used for S3AN flashes only */ -#define SPINOR_OP_XSE 0x50 /* Sector erase */ -#define SPINOR_OP_XPP 0x82 /* Page program */ -#define SPINOR_OP_XRDSR 0xd7 /* Read status register */ - -#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */ -#define XSR_RDY BIT(7) /* Ready */ - - /* Used for Macronix and Winbond flashes. */ #define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */ #define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */ /* Used for Spansion flashes only. */ #define SPINOR_OP_BRWR 0x17 /* Bank register write */ -#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ /* Used for Micron flashes only. */ #define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ #define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ -/* Used for GigaDevices and Winbond flashes. */ -#define SPINOR_OP_ESECR 0x44 /* Erase Security registers */ -#define SPINOR_OP_PSECR 0x42 /* Program Security registers */ -#define SPINOR_OP_RSECR 0x48 /* Read Security registers */ - /* Status Register bits. */ #define SR_WIP BIT(0) /* Write in progress */ #define SR_WEL BIT(1) /* Write enable latch */ @@ -119,310 +86,102 @@ #define SR_BP0 BIT(2) /* Block protect 0 */ #define SR_BP1 BIT(3) /* Block protect 1 */ #define SR_BP2 BIT(4) /* Block protect 2 */ -#define SR_BP3 BIT(5) /* Block protect 3 */ -#define SR_TB_BIT5 BIT(5) /* Top/Bottom protect */ -#define SR_BP3_BIT6 BIT(6) /* Block protect 3 */ -#define SR_TB_BIT6 BIT(6) /* Top/Bottom protect */ +#define SR_TB BIT(5) /* Top/Bottom protect */ #define SR_SRWD BIT(7) /* SR write protect */ -/* Spansion/Cypress specific status bits */ -#define SR_E_ERR BIT(5) -#define SR_P_ERR BIT(6) -#define SR1_QUAD_EN_BIT6 BIT(6) - -#define SR_BP_SHIFT 2 +#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */ /* Enhanced Volatile Configuration Register bits */ #define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ /* Flag Status Register bits */ -#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */ -#define FSR_E_ERR BIT(5) /* Erase operation status */ -#define FSR_P_ERR BIT(4) /* Program operation status */ -#define FSR_PT_ERR BIT(1) /* Protection error bit */ +#define FSR_READY BIT(7) -/* Status Register 2 bits. */ -#define SR2_QUAD_EN_BIT1 BIT(1) -#define SR2_LB1 BIT(3) /* Security Register Lock Bit 1 */ -#define SR2_LB2 BIT(4) /* Security Register Lock Bit 2 */ -#define SR2_LB3 BIT(5) /* Security Register Lock Bit 3 */ -#define SR2_QUAD_EN_BIT7 BIT(7) +/* Configuration Register bits. */ +#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */ -/* Supported SPI protocols */ -#define SNOR_PROTO_INST_MASK GENMASK(23, 16) -#define SNOR_PROTO_INST_SHIFT 16 -#define SNOR_PROTO_INST(_nbits) \ - ((((unsigned long)(_nbits)) << SNOR_PROTO_INST_SHIFT) & \ - SNOR_PROTO_INST_MASK) - -#define SNOR_PROTO_ADDR_MASK GENMASK(15, 8) -#define SNOR_PROTO_ADDR_SHIFT 8 -#define SNOR_PROTO_ADDR(_nbits) \ - ((((unsigned long)(_nbits)) << SNOR_PROTO_ADDR_SHIFT) & \ - SNOR_PROTO_ADDR_MASK) - -#define SNOR_PROTO_DATA_MASK GENMASK(7, 0) -#define SNOR_PROTO_DATA_SHIFT 0 -#define SNOR_PROTO_DATA(_nbits) \ - ((((unsigned long)(_nbits)) << SNOR_PROTO_DATA_SHIFT) & \ - SNOR_PROTO_DATA_MASK) - -#define SNOR_PROTO_IS_DTR BIT(24) /* Double Transfer Rate */ - -#define SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits) \ - (SNOR_PROTO_INST(_inst_nbits) | \ - SNOR_PROTO_ADDR(_addr_nbits) | \ - SNOR_PROTO_DATA(_data_nbits)) -#define SNOR_PROTO_DTR(_inst_nbits, _addr_nbits, _data_nbits) \ - (SNOR_PROTO_IS_DTR | \ - SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits)) - -enum spi_nor_protocol { - SNOR_PROTO_1_1_1 = SNOR_PROTO_STR(1, 1, 1), - SNOR_PROTO_1_1_2 = SNOR_PROTO_STR(1, 1, 2), - SNOR_PROTO_1_1_4 = SNOR_PROTO_STR(1, 1, 4), - SNOR_PROTO_1_1_8 = SNOR_PROTO_STR(1, 1, 8), - SNOR_PROTO_1_2_2 = SNOR_PROTO_STR(1, 2, 2), - SNOR_PROTO_1_4_4 = SNOR_PROTO_STR(1, 4, 4), - SNOR_PROTO_1_8_8 = SNOR_PROTO_STR(1, 8, 8), - SNOR_PROTO_2_2_2 = SNOR_PROTO_STR(2, 2, 2), - SNOR_PROTO_4_4_4 = SNOR_PROTO_STR(4, 4, 4), - SNOR_PROTO_8_8_8 = SNOR_PROTO_STR(8, 8, 8), - - SNOR_PROTO_1_1_1_DTR = SNOR_PROTO_DTR(1, 1, 1), - SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2), - SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4), - SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8), - SNOR_PROTO_8_8_8_DTR = SNOR_PROTO_DTR(8, 8, 8), +enum read_mode { + SPI_NOR_NORMAL = 0, + SPI_NOR_FAST, + SPI_NOR_DUAL, + SPI_NOR_QUAD, }; -static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto) -{ - return !!(proto & SNOR_PROTO_IS_DTR); -} - -static inline u8 spi_nor_get_protocol_inst_nbits(enum spi_nor_protocol proto) -{ - return ((unsigned long)(proto & SNOR_PROTO_INST_MASK)) >> - SNOR_PROTO_INST_SHIFT; -} - -static inline u8 spi_nor_get_protocol_addr_nbits(enum spi_nor_protocol proto) -{ - return ((unsigned long)(proto & SNOR_PROTO_ADDR_MASK)) >> - SNOR_PROTO_ADDR_SHIFT; -} - -static inline u8 spi_nor_get_protocol_data_nbits(enum spi_nor_protocol proto) -{ - return ((unsigned long)(proto & SNOR_PROTO_DATA_MASK)) >> - SNOR_PROTO_DATA_SHIFT; -} - -static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto) -{ - return spi_nor_get_protocol_data_nbits(proto); -} - -/** - * struct spi_nor_hwcaps - Structure for describing the hardware capabilies - * supported by the SPI controller (bus master). - * @mask: the bitmask listing all the supported hw capabilies - */ -struct spi_nor_hwcaps { - u32 mask; +#define SPI_NOR_MAX_CMD_SIZE 8 +enum spi_nor_ops { + SPI_NOR_OPS_READ = 0, + SPI_NOR_OPS_WRITE, + SPI_NOR_OPS_ERASE, + SPI_NOR_OPS_LOCK, + SPI_NOR_OPS_UNLOCK, }; -/* - *(Fast) Read capabilities. - * MUST be ordered by priority: the higher bit position, the higher priority. - * As a matter of performances, it is relevant to use Octal SPI protocols first, - * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly - * (Slow) Read. - */ -#define SNOR_HWCAPS_READ_MASK GENMASK(15, 0) -#define SNOR_HWCAPS_READ BIT(0) -#define SNOR_HWCAPS_READ_FAST BIT(1) -#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2) - -#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3) -#define SNOR_HWCAPS_READ_1_1_2 BIT(3) -#define SNOR_HWCAPS_READ_1_2_2 BIT(4) -#define SNOR_HWCAPS_READ_2_2_2 BIT(5) -#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6) - -#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7) -#define SNOR_HWCAPS_READ_1_1_4 BIT(7) -#define SNOR_HWCAPS_READ_1_4_4 BIT(8) -#define SNOR_HWCAPS_READ_4_4_4 BIT(9) -#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) - -#define SNOR_HWCAPS_READ_OCTAL GENMASK(15, 11) -#define SNOR_HWCAPS_READ_1_1_8 BIT(11) -#define SNOR_HWCAPS_READ_1_8_8 BIT(12) -#define SNOR_HWCAPS_READ_8_8_8 BIT(13) -#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14) -#define SNOR_HWCAPS_READ_8_8_8_DTR BIT(15) - -/* - * Page Program capabilities. - * MUST be ordered by priority: the higher bit position, the higher priority. - * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the - * legacy SPI 1-1-1 protocol. - * Note that Dual Page Programs are not supported because there is no existing - * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory - * implements such commands. - */ -#define SNOR_HWCAPS_PP_MASK GENMASK(23, 16) -#define SNOR_HWCAPS_PP BIT(16) - -#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17) -#define SNOR_HWCAPS_PP_1_1_4 BIT(17) -#define SNOR_HWCAPS_PP_1_4_4 BIT(18) -#define SNOR_HWCAPS_PP_4_4_4 BIT(19) - -#define SNOR_HWCAPS_PP_OCTAL GENMASK(23, 20) -#define SNOR_HWCAPS_PP_1_1_8 BIT(20) -#define SNOR_HWCAPS_PP_1_8_8 BIT(21) -#define SNOR_HWCAPS_PP_8_8_8 BIT(22) -#define SNOR_HWCAPS_PP_8_8_8_DTR BIT(23) - -#define SNOR_HWCAPS_X_X_X (SNOR_HWCAPS_READ_2_2_2 | \ - SNOR_HWCAPS_READ_4_4_4 | \ - SNOR_HWCAPS_READ_8_8_8 | \ - SNOR_HWCAPS_PP_4_4_4 | \ - SNOR_HWCAPS_PP_8_8_8) - -#define SNOR_HWCAPS_X_X_X_DTR (SNOR_HWCAPS_READ_8_8_8_DTR | \ - SNOR_HWCAPS_PP_8_8_8_DTR) - -#define SNOR_HWCAPS_DTR (SNOR_HWCAPS_READ_1_1_1_DTR | \ - SNOR_HWCAPS_READ_1_2_2_DTR | \ - SNOR_HWCAPS_READ_1_4_4_DTR | \ - SNOR_HWCAPS_READ_1_8_8_DTR | \ - SNOR_HWCAPS_READ_8_8_8_DTR) - -#define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \ - SNOR_HWCAPS_PP_MASK) - -/* Forward declaration that is used in 'struct spi_nor_controller_ops' */ -struct spi_nor; - -/** - * struct spi_nor_controller_ops - SPI NOR controller driver specific - * operations. - * @prepare: [OPTIONAL] do some preparations for the - * read/write/erase/lock/unlock operations. - * @unprepare: [OPTIONAL] do some post work after the - * read/write/erase/lock/unlock operations. - * @read_reg: read out the register. - * @write_reg: write data to the register. - * @read: read data from the SPI NOR. - * @write: write data to the SPI NOR. - * @erase: erase a sector of the SPI NOR at the offset @offs; if - * not provided by the driver, SPI NOR will send the erase - * opcode via write_reg(). - */ -struct spi_nor_controller_ops { - int (*prepare)(struct spi_nor *nor); - void (*unprepare)(struct spi_nor *nor); - int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len); - int (*write_reg)(struct spi_nor *nor, u8 opcode, const u8 *buf, - size_t len); - - ssize_t (*read)(struct spi_nor *nor, loff_t from, size_t len, u8 *buf); - ssize_t (*write)(struct spi_nor *nor, loff_t to, size_t len, - const u8 *buf); - int (*erase)(struct spi_nor *nor, loff_t offs); +enum spi_nor_option_flags { + SNOR_F_USE_FSR = BIT(0), + SNOR_F_HAS_SR_TB = BIT(1), }; /** - * enum spi_nor_cmd_ext - describes the command opcode extension in DTR mode - * @SPI_NOR_EXT_NONE: no extension. This is the default, and is used in Legacy - * SPI mode - * @SPI_NOR_EXT_REPEAT: the extension is same as the opcode - * @SPI_NOR_EXT_INVERT: the extension is the bitwise inverse of the opcode - * @SPI_NOR_EXT_HEX: the extension is any hex value. The command and opcode - * combine to form a 16-bit opcode. - */ -enum spi_nor_cmd_ext { - SPI_NOR_EXT_NONE = 0, - SPI_NOR_EXT_REPEAT, - SPI_NOR_EXT_INVERT, - SPI_NOR_EXT_HEX, -}; - -/* - * Forward declarations that are used internally by the core and manufacturer - * drivers. - */ -struct flash_info; -struct spi_nor_manufacturer; -struct spi_nor_flash_parameter; - -/** - * struct spi_nor - Structure for defining the SPI NOR layer - * @mtd: an mtd_info structure + * struct spi_nor - Structure for defining a the SPI NOR layer + * @mtd: point to a mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations - * @dev: pointer to an SPI device or an SPI NOR controller device - * @spimem: pointer to the SPI memory device - * @bouncebuf: bounce buffer used when the buffer passed by the MTD - * layer is not DMA-able - * @bouncebuf_size: size of the bounce buffer - * @info: SPI NOR part JEDEC MFR ID and other info - * @manufacturer: SPI NOR manufacturer + * @dev: point to a spi device, or a spi nor controller device. * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector * @read_opcode: the read opcode * @read_dummy: the dummy needed by the read operation * @program_opcode: the program opcode + * @flash_read: the mode of the read * @sst_write_second: used by the SST write operation - * @flags: flag options for the current SPI NOR (SNOR_F_*) - * @cmd_ext_type: the command opcode extension type for DTR mode. - * @read_proto: the SPI protocol for read operations - * @write_proto: the SPI protocol for write operations - * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations - * @sfdp: the SFDP data of the flash - * @controller_ops: SPI NOR controller driver specific operations. - * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings. - * The structure includes legacy flash parameters and - * settings that can be overwritten by the spi_nor_fixups - * hooks, or dynamically when parsing the SFDP tables. - * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes. - * @priv: pointer to the private data + * @flags: flag options for the current SPI-NOR (SNOR_F_*) + * @cmd_buf: used by the write_reg + * @prepare: [OPTIONAL] do some preparations for the + * read/write/erase/lock/unlock operations + * @unprepare: [OPTIONAL] do some post work after the + * read/write/erase/lock/unlock operations + * @read_reg: [DRIVER-SPECIFIC] read out the register + * @write_reg: [DRIVER-SPECIFIC] write data to the register + * @read: [DRIVER-SPECIFIC] read data from the SPI NOR + * @write: [DRIVER-SPECIFIC] write data to the SPI NOR + * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR + * at the offset @offs; if not provided by the driver, + * spi-nor will send the erase opcode via write_reg() + * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR + * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR + * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is + * completely locked + * @priv: the private data */ struct spi_nor { struct mtd_info mtd; struct mutex lock; struct device *dev; - struct spi_mem *spimem; - u8 *bouncebuf; - size_t bouncebuf_size; - const struct flash_info *info; - const struct spi_nor_manufacturer *manufacturer; u32 page_size; u8 addr_width; u8 erase_opcode; u8 read_opcode; u8 read_dummy; u8 program_opcode; - enum spi_nor_protocol read_proto; - enum spi_nor_protocol write_proto; - enum spi_nor_protocol reg_proto; + enum read_mode flash_read; bool sst_write_second; u32 flags; - enum spi_nor_cmd_ext cmd_ext_type; - struct sfdp *sfdp; + u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; - const struct spi_nor_controller_ops *controller_ops; + int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); + void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); + int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); + int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); - struct spi_nor_flash_parameter *params; + ssize_t (*read)(struct spi_nor *nor, loff_t from, + size_t len, u_char *read_buf); + ssize_t (*write)(struct spi_nor *nor, loff_t to, + size_t len, const u_char *write_buf); + int (*erase)(struct spi_nor *nor, loff_t offs); - struct { - struct spi_mem_dirmap_desc *rdesc; - struct spi_mem_dirmap_desc *wdesc; - } dirmap; + int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); void *priv; }; @@ -442,9 +201,9 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) * spi_nor_scan() - scan the SPI NOR * @nor: the spi_nor structure * @name: the chip type name - * @hwcaps: the hardware capabilities supported by the controller driver + * @mode: the read mode supported by the driver * - * The drivers can use this function to scan the SPI NOR. + * The drivers can use this fuction to scan the SPI NOR. * In the scanning, it will try to get all the necessary information to * fill the mtd_info{} and the spi_nor{}. * @@ -452,13 +211,6 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) * * Return: 0 for success, others for failure. */ -int spi_nor_scan(struct spi_nor *nor, const char *name, - const struct spi_nor_hwcaps *hwcaps); - -/** - * spi_nor_restore_addr_mode() - restore the status of SPI NOR - * @nor: the spi_nor structure - */ -void spi_nor_restore(struct spi_nor *nor); +int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode); #endif diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h index 3608a6c36f..f456230f93 100644 --- a/include/linux/mtd/super.h +++ b/include/linux/mtd/super.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* MTD-based superblock handling * * Copyright © 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef __MTD_SUPER_H__ @@ -14,9 +18,9 @@ #include #include -extern int get_tree_mtd(struct fs_context *fc, - int (*fill_super)(struct super_block *sb, - struct fs_context *fc)); +extern struct dentry *mount_mtd(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, + int (*fill_super)(struct super_block *, void *, int)); extern void kill_mtd_super(struct super_block *sb); diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h index 7d48ea368c..1e271cb559 100644 --- a/include/linux/mtd/ubi.h +++ b/include/linux/mtd/ubi.h @@ -1,7 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) International Business Machines Corp., 2006 * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * * Author: Artem Bityutskiy (Битюцкий Ðртём) */ diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h index 3cac936058..abed4dec5c 100644 --- a/include/linux/mtd/xip.h +++ b/include/linux/mtd/xip.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MTD primitives for XIP support * @@ -8,6 +7,10 @@ * * This XIP support for MTD has been loosely inspired * by an earlier patch authored by David Woodhouse. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_MTD_XIP_H__ @@ -27,9 +30,7 @@ * obviously not be running from flash. The __xipram is therefore marking * those functions so they get relocated to ram. */ -#ifdef CONFIG_XIP_KERNEL -#define __xipram noinline __section(".xiptext") -#endif +#define __xipram noinline __attribute__ ((__section__ (".data"))) /* * Each architecture has to provide the following macros. They must access @@ -89,10 +90,10 @@ #define xip_cpu_idle() do { } while (0) #endif +#else + +#define __xipram + #endif /* CONFIG_MTD_XIP */ -#ifndef __xipram -#define __xipram -#endif - #endif /* __LINUX_MTD_XIP_H__ */ diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h new file mode 100644 index 0000000000..4ac8b1977b --- /dev/null +++ b/include/linux/mutex-debug.h @@ -0,0 +1,24 @@ +#ifndef __LINUX_MUTEX_DEBUG_H +#define __LINUX_MUTEX_DEBUG_H + +#include +#include +#include + +/* + * Mutexes - debugging helpers: + */ + +#define __DEBUG_MUTEX_INITIALIZER(lockname) \ + , .magic = &lockname + +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) + +extern void mutex_destroy(struct mutex *lock); + +#endif diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 8f226d460f..2cb7531e7d 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Mutexes: blocking mutual exclusion locks * @@ -14,23 +13,11 @@ #include #include #include +#include #include #include #include #include -#include - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SLEEP, \ - } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - -#ifndef CONFIG_PREEMPT_RT /* * Simple, straightforward mutexes with strict semantics: @@ -61,12 +48,16 @@ * locks and tasks (and only those tasks) */ struct mutex { - atomic_long_t owner; - raw_spinlock_t wait_lock; + /* 1: unlocked, 0: locked, negative: locked, possible waiters */ + atomic_t count; + spinlock_t wait_lock; + struct list_head wait_list; +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) + struct task_struct *owner; +#endif #ifdef CONFIG_MUTEX_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* Spinner MCS lock */ #endif - struct list_head wait_list; #ifdef CONFIG_DEBUG_MUTEXES void *magic; #endif @@ -75,21 +66,22 @@ struct mutex { #endif }; +/* + * This is the control structure for tasks blocked on mutex, + * which resides on the blocked task's kernel stack: + */ +struct mutex_waiter { + struct list_head list; + struct task_struct *task; #ifdef CONFIG_DEBUG_MUTEXES - -#define __DEBUG_MUTEX_INITIALIZER(lockname) \ - , .magic = &lockname - -extern void mutex_destroy(struct mutex *lock); - -#else - -# define __DEBUG_MUTEX_INITIALIZER(lockname) - -static inline void mutex_destroy(struct mutex *lock) {} - + void *magic; #endif +}; +#ifdef CONFIG_DEBUG_MUTEXES +# include +#else +# define __DEBUG_MUTEX_INITIALIZER(lockname) /** * mutex_init - initialize the mutex * @mutex: the mutex to be initialized @@ -98,16 +90,25 @@ static inline void mutex_destroy(struct mutex *lock) {} * * It is not allowed to initialize an already locked mutex. */ -#define mutex_init(mutex) \ -do { \ - static struct lock_class_key __key; \ - \ - __mutex_init((mutex), #mutex, &__key); \ +# define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ } while (0) +static inline void mutex_destroy(struct mutex *lock) {} +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif #define __MUTEX_INITIALIZER(lockname) \ - { .owner = ATOMIC_LONG_INIT(0) \ - , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ + { .count = ATOMIC_INIT(1) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ __DEBUG_MUTEX_INITIALIZER(lockname) \ __DEP_MAP_MUTEX_INITIALIZER(lockname) } @@ -122,57 +123,16 @@ extern void __mutex_init(struct mutex *lock, const char *name, * mutex_is_locked - is the mutex locked * @lock: the mutex to be queried * - * Returns true if the mutex is locked, false if unlocked. + * Returns 1 if the mutex is locked, 0 if unlocked. */ -extern bool mutex_is_locked(struct mutex *lock); - -#else /* !CONFIG_PREEMPT_RT */ -/* - * Preempt-RT variant based on rtmutexes. - */ -#include - -struct mutex { - struct rt_mutex_base rtmutex; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#define __MUTEX_INITIALIZER(mutexname) \ -{ \ - .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \ - __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ +static inline int mutex_is_locked(struct mutex *lock) +{ + return atomic_read(&lock->count) != 1; } -#define DEFINE_MUTEX(mutexname) \ - struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) - -extern void __mutex_rt_init(struct mutex *lock, const char *name, - struct lock_class_key *key); -extern int mutex_trylock(struct mutex *lock); - -static inline void mutex_destroy(struct mutex *lock) { } - -#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex) - -#define __mutex_init(mutex, name, key) \ -do { \ - rt_mutex_base_init(&(mutex)->rtmutex); \ - __mutex_rt_init((mutex), name, key); \ -} while (0) - -#define mutex_init(mutex) \ -do { \ - static struct lock_class_key __key; \ - \ - __mutex_init((mutex), #mutex, &__key); \ -} while (0) -#endif /* CONFIG_PREEMPT_RT */ - /* * See kernel/locking/mutex.c for detailed documentation of these APIs. - * Also see Documentation/locking/mutex-design.rst. + * Also see Documentation/locking/mutex-design.txt. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); @@ -182,12 +142,10 @@ extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass); extern int __must_check mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass); -extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass); #define mutex_lock(lock) mutex_lock_nested(lock, 0) #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) -#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0) #define mutex_lock_nest_lock(lock, nest_lock) \ do { \ @@ -199,13 +157,11 @@ do { \ extern void mutex_lock(struct mutex *lock); extern int __must_check mutex_lock_interruptible(struct mutex *lock); extern int __must_check mutex_lock_killable(struct mutex *lock); -extern void mutex_lock_io(struct mutex *lock); # define mutex_lock_nested(lock, subclass) mutex_lock(lock) # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) -# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock) #endif /* diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index 000b126acf..69327b7b4c 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * mv643xx.h - MV-643XX Internal registers definition file. * * Copyright 2002 Momentum Computer, Inc. * Author: Matthew Dharm * Copyright 2002 GALILEO TECHNOLOGY, LTD. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __ASM_MV643XX_H #define __ASM_MV643XX_H @@ -918,4 +922,58 @@ extern void mv64340_irq_init(unsigned int base); +/* MPSC Platform Device, Driver Data (Shared register regions) */ +#define MPSC_SHARED_NAME "mpsc_shared" + +#define MPSC_ROUTING_BASE_ORDER 0 +#define MPSC_SDMA_INTR_BASE_ORDER 1 + +#define MPSC_ROUTING_REG_BLOCK_SIZE 0x000c +#define MPSC_SDMA_INTR_REG_BLOCK_SIZE 0x0084 + +struct mpsc_shared_pdata { + u32 mrr_val; + u32 rcrr_val; + u32 tcrr_val; + u32 intr_cause_val; + u32 intr_mask_val; +}; + +/* MPSC Platform Device, Driver Data */ +#define MPSC_CTLR_NAME "mpsc" + +#define MPSC_BASE_ORDER 0 +#define MPSC_SDMA_BASE_ORDER 1 +#define MPSC_BRG_BASE_ORDER 2 + +#define MPSC_REG_BLOCK_SIZE 0x0038 +#define MPSC_SDMA_REG_BLOCK_SIZE 0x0c18 +#define MPSC_BRG_REG_BLOCK_SIZE 0x0008 + +struct mpsc_pdata { + u8 mirror_regs; + u8 cache_mgmt; + u8 max_idle; + int default_baud; + int default_bits; + int default_parity; + int default_flow; + u32 chr_1_val; + u32 chr_2_val; + u32 chr_10_val; + u32 mpcr_val; + u32 bcr_val; + u8 brg_can_tune; + u8 brg_clk_src; + u32 brg_clk_freq; +}; + +/* Watchdog Platform Device, Driver Data */ +#define MV64x60_WDT_NAME "mv64x60_wdt" + +struct mv64x60_wdt_pdata { + int timeout; /* watchdog expiry in seconds, default 10 */ + int bus_clk; /* bus clock in MHz, default 133 */ +}; + #endif /* __ASM_MV643XX_H */ diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 3682ae75c7..61a0da38d0 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * MV-643XX ethernet platform device data definition file. */ diff --git a/include/linux/mv643xx_i2c.h b/include/linux/mv643xx_i2c.h index b2844e1cac..5db5152e9d 100644 --- a/include/linux/mv643xx_i2c.h +++ b/include/linux/mv643xx_i2c.h @@ -1,5 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef _MV64XXX_I2C_H_ diff --git a/include/linux/mxm-wmi.h b/include/linux/mxm-wmi.h index 28b5b4c2a7..617a295052 100644 --- a/include/linux/mxm-wmi.h +++ b/include/linux/mxm-wmi.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * MXM WMI driver * * Copyright(C) 2010 Red Hat. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef MXM_WMI_H diff --git a/include/linux/namei.h b/include/linux/namei.h index e89329bb31..f29abda31e 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -1,8 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NAMEI_H #define _LINUX_NAMEI_H -#include #include #include #include @@ -15,37 +13,37 @@ enum { MAX_NESTED_LINKS = 8 }; /* * Type of the last component on LOOKUP_PARENT */ -enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; +enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; -/* pathwalk mode */ -#define LOOKUP_FOLLOW 0x0001 /* follow links at the end */ -#define LOOKUP_DIRECTORY 0x0002 /* require a directory */ -#define LOOKUP_AUTOMOUNT 0x0004 /* force terminal automount */ -#define LOOKUP_EMPTY 0x4000 /* accept empty path [user_... only] */ -#define LOOKUP_DOWN 0x8000 /* follow mounts in the starting point */ -#define LOOKUP_MOUNTPOINT 0x0080 /* follow mounts in the end */ +/* + * The bitmask for a lookup event: + * - follow links at the end + * - require a directory + * - ending slashes ok even for nonexistent files + * - internal "there are more path components" flag + * - dentry cache is untrusted; force a real lookup + * - suppress terminal automount + */ +#define LOOKUP_FOLLOW 0x0001 +#define LOOKUP_DIRECTORY 0x0002 +#define LOOKUP_AUTOMOUNT 0x0004 -#define LOOKUP_REVAL 0x0020 /* tell ->d_revalidate() to trust no cache */ -#define LOOKUP_RCU 0x0040 /* RCU pathwalk mode; semi-internal */ - -/* These tell filesystem methods that we are dealing with the final component... */ -#define LOOKUP_OPEN 0x0100 /* ... in open */ -#define LOOKUP_CREATE 0x0200 /* ... in object creation */ -#define LOOKUP_EXCL 0x0400 /* ... in exclusive creation */ -#define LOOKUP_RENAME_TARGET 0x0800 /* ... in destination of rename() */ - -/* internal use only */ #define LOOKUP_PARENT 0x0010 +#define LOOKUP_REVAL 0x0020 +#define LOOKUP_RCU 0x0040 +#define LOOKUP_NO_REVAL 0x0080 -/* Scoping flags for lookup. */ -#define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */ -#define LOOKUP_NO_MAGICLINKS 0x020000 /* No nd_jump_link() crossing. */ -#define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */ -#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */ -#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */ -#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */ -/* LOOKUP_* flags which do scope-related checks based on the dirfd. */ -#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT) +/* + * Intent data + */ +#define LOOKUP_OPEN 0x0100 +#define LOOKUP_CREATE 0x0200 +#define LOOKUP_EXCL 0x0400 +#define LOOKUP_RENAME_TARGET 0x0800 + +#define LOOKUP_JUMPED 0x1000 +#define LOOKUP_ROOT 0x2000 +#define LOOKUP_EMPTY 0x4000 extern int path_pts(struct path *path); @@ -57,18 +55,32 @@ static inline int user_path_at(int dfd, const char __user *name, unsigned flags, return user_path_at_empty(dfd, name, flags, path, NULL); } +static inline int user_path(const char __user *name, struct path *path) +{ + return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL); +} + +static inline int user_lpath(const char __user *name, struct path *path) +{ + return user_path_at_empty(AT_FDCWD, name, 0, path, NULL); +} + +static inline int user_path_dir(const char __user *name, struct path *path) +{ + return user_path_at_empty(AT_FDCWD, name, + LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL); +} + extern int kern_path(const char *, unsigned, struct path *); extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int); extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int); extern void done_path_create(struct path *, struct dentry *); extern struct dentry *kern_path_locked(const char *, struct path *); +extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int); -extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); -extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int); -struct dentry *lookup_one(struct user_namespace *, const char *, struct dentry *, int); extern int follow_down_one(struct path *); extern int follow_down(struct path *); @@ -77,7 +89,7 @@ extern int follow_up(struct path *); extern struct dentry *lock_rename(struct dentry *, struct dentry *); extern void unlock_rename(struct dentry *, struct dentry *); -extern int __must_check nd_jump_link(struct path *path); +extern void nd_jump_link(struct path *path); static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) { diff --git a/include/linux/nd.h b/include/linux/nd.h index ee9ad76afb..fa66aeed44 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. */ #ifndef __LINUX_ND_H__ #define __LINUX_ND_H__ @@ -11,23 +19,13 @@ enum nvdimm_event { NVDIMM_REVALIDATE_POISON, - NVDIMM_REVALIDATE_REGION, -}; - -enum nvdimm_claim_class { - NVDIMM_CCLASS_NONE, - NVDIMM_CCLASS_BTT, - NVDIMM_CCLASS_BTT2, - NVDIMM_CCLASS_PFN, - NVDIMM_CCLASS_DAX, - NVDIMM_CCLASS_UNKNOWN, }; struct nd_device_driver { struct device_driver drv; unsigned long type; int (*probe)(struct device *dev); - void (*remove)(struct device *dev); + int (*remove)(struct device *dev); void (*shutdown)(struct device *dev); void (*notify)(struct device *dev, enum nvdimm_event event); }; @@ -43,16 +41,14 @@ static inline struct nd_device_driver *to_nd_device_driver( * @force_raw: ignore other personalities for the namespace (e.g. btt) * @dev: device model node * @claim: when set a another personality has taken ownership of the namespace - * @claim_class: restrict claim type to a given class * @rw_bytes: access the raw namespace capacity with byte-aligned transfers */ struct nd_namespace_common { int force_raw; struct device dev; struct device *claim; - enum nvdimm_claim_class claim_class; int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset, - void *buf, size_t size, int rw, unsigned long flags); + void *buf, size_t size, int rw); }; static inline struct nd_namespace_common *to_ndns(struct device *dev) @@ -79,14 +75,12 @@ struct nd_namespace_io { /** * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory * @nsio: device and system physical address range to drive - * @lbasize: logical sector size for the namespace in block-device-mode * @alt_name: namespace name supplied in the dimm label * @uuid: namespace name supplied in the dimm label * @id: ida allocated id */ struct nd_namespace_pmem { struct nd_namespace_io nsio; - unsigned long lbasize; char *alt_name; u8 *uuid; int id; @@ -140,15 +134,14 @@ static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device * * @buf is up-to-date upon return from this routine. */ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns, - resource_size_t offset, void *buf, size_t size, - unsigned long flags) + resource_size_t offset, void *buf, size_t size) { - return ndns->rw_bytes(ndns, offset, buf, size, READ, flags); + return ndns->rw_bytes(ndns, offset, buf, size, READ); } /** * nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace - * @ndns: device to write + * @ndns: device to read * @offset: namespace-relative starting offset * @buf: buffer to drain * @size: transfer length @@ -159,10 +152,9 @@ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns, * to media is handled internal to the @ndns driver, if at all. */ static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns, - resource_size_t offset, void *buf, size_t size, - unsigned long flags) + resource_size_t offset, void *buf, size_t size) { - return ndns->rw_bytes(ndns, offset, buf, size, WRITE, flags); + return ndns->rw_bytes(ndns, offset, buf, size, WRITE); } #define MODULE_ALIAS_ND_DEVICE(type) \ @@ -173,12 +165,6 @@ struct nd_region; void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event); int __must_check __nd_driver_register(struct nd_device_driver *nd_drv, struct module *module, const char *mod_name); -static inline void nd_driver_unregister(struct nd_device_driver *drv) -{ - driver_unregister(&drv->drv); -} #define nd_driver_register(driver) \ __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) -#define module_nd_driver(driver) \ - module_driver(driver, nd_driver_register, nd_driver_unregister) #endif /* __LINUX_ND_H__ */ diff --git a/include/linux/net.h b/include/linux/net.h index ba736b457a..8c20e41b9f 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * NET An implementation of the SOCKET network access protocol. * This is the master header file for the Linux NET layer, @@ -10,6 +9,11 @@ * Authors: Orest Zborowski, * Ross Biro * Fred N. van Kempen, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_NET_H #define _LINUX_NET_H @@ -18,11 +22,10 @@ #include #include #include /* For O_CLOEXEC and O_NONBLOCK */ +#include #include #include #include -#include -#include #include @@ -34,7 +37,7 @@ struct net; /* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected. - * Eventually all flags will be in sk->sk_wq->flags. + * Eventually all flags will be in sk->sk_wq_flags. */ #define SOCKWQ_ASYNC_NOSPACE 0 #define SOCKWQ_ASYNC_WAITDATA 1 @@ -81,12 +84,6 @@ enum sock_type { #endif /* ARCH_HAS_SOCKET_TYPES */ -/** - * enum sock_shutdown_cmd - Shutdown types - * @SHUT_RD: shutdown receptions - * @SHUT_WR: shutdown transmissions - * @SHUT_RDWR: shutdown receptions/transmissions - */ enum sock_shutdown_cmd { SHUT_RD, SHUT_WR, @@ -114,15 +111,17 @@ struct socket_wq { struct socket { socket_state state; + kmemcheck_bitfield_begin(type); short type; + kmemcheck_bitfield_end(type); unsigned long flags; + struct socket_wq __rcu *wq; + struct file *file; struct sock *sk; const struct proto_ops *ops; - - struct socket_wq wq; }; struct vm_area_struct; @@ -147,11 +146,11 @@ struct proto_ops { int (*socketpair)(struct socket *sock1, struct socket *sock2); int (*accept) (struct socket *sock, - struct socket *newsock, int flags, bool kern); + struct socket *newsock, int flags); int (*getname) (struct socket *sock, struct sockaddr *addr, - int peer); - __poll_t (*poll) (struct file *file, struct socket *sock, + int *sockaddr_len, int peer); + unsigned int (*poll) (struct file *file, struct socket *sock, struct poll_table_struct *wait); int (*ioctl) (struct socket *sock, unsigned int cmd, unsigned long arg); @@ -159,16 +158,18 @@ struct proto_ops { int (*compat_ioctl) (struct socket *sock, unsigned int cmd, unsigned long arg); #endif - int (*gettstamp) (struct socket *sock, void __user *userstamp, - bool timeval, bool time32); int (*listen) (struct socket *sock, int len); int (*shutdown) (struct socket *sock, int flags); int (*setsockopt)(struct socket *sock, int level, - int optname, sockptr_t optval, - unsigned int optlen); + int optname, char __user *optval, unsigned int optlen); int (*getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); - void (*show_fdinfo)(struct seq_file *m, struct socket *sock); +#ifdef CONFIG_COMPAT + int (*compat_setsockopt)(struct socket *sock, int level, + int optname, char __user *optval, unsigned int optlen); + int (*compat_getsockopt)(struct socket *sock, int level, + int optname, char __user *optval, int __user *optlen); +#endif int (*sendmsg) (struct socket *sock, struct msghdr *m, size_t total_len); /* Notes for implementing recvmsg: @@ -189,17 +190,8 @@ struct proto_ops { struct pipe_inode_info *pipe, size_t len, unsigned int flags); int (*set_peek_off)(struct sock *sk, int val); int (*peek_len)(struct socket *sock); - - /* The following functions are called internally by kernel with - * sock lock already held. - */ int (*read_sock)(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor); - int (*sendpage_locked)(struct sock *sk, struct page *page, - int offset, size_t size, int flags); - int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg, - size_t size); - int (*set_rcvlowat)(struct sock *sk, int val); }; #define DECLARE_SOCKADDR(type, dst, src) \ @@ -210,7 +202,7 @@ struct net_proto_family { int (*create)(struct net *net, struct socket *sock, int protocol, int kern); struct module *owner; -}; +} __do_const; struct iovec; struct kvec; @@ -225,7 +217,6 @@ enum { int sock_wake_async(struct socket_wq *sk_wq, int how, int band); int sock_register(const struct net_proto_family *fam); void sock_unregister(int family); -bool sock_is_registered(int family); int __sock_create(struct net *net, int family, int type, int proto, struct socket **res, int kern); int sock_create(int family, int type, int proto, struct socket **res); @@ -237,7 +228,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg); int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags); struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); struct socket *sockfd_lookup(int fd, int *err); -struct socket *sock_from_file(struct file *file); +struct socket *sock_from_file(struct file *file, int *err); #define sockfd_put(sock) fput(sock->file) int net_ratelimit(void); @@ -261,12 +252,11 @@ do { \ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) #define net_info_ratelimited(fmt, ...) \ net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) -#if defined(CONFIG_DYNAMIC_DEBUG) || \ - (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) +#if defined(CONFIG_DYNAMIC_DEBUG) #define net_dbg_ratelimited(fmt, ...) \ do { \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ - if (DYNAMIC_DEBUG_BRANCH(descriptor) && \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ net_ratelimit()) \ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ ##__VA_ARGS__); \ @@ -284,28 +274,9 @@ do { \ #define net_get_random_once(buf, nbytes) \ get_random_once((buf), (nbytes)) -#define net_get_random_once_wait(buf, nbytes) \ - get_random_once_wait((buf), (nbytes)) - -/* - * E.g. XFS meta- & log-data is in slab pages, or bcache meta - * data pages, or other high order pages allocated by - * __get_free_pages() without __GFP_COMP, which have a page_count - * of 0 and/or have PageSlab() set. We cannot use send_page for - * those, as that does get_page(); put_page(); and would cause - * either a VM_BUG directly, or __page_cache_release a page that - * would actually still be referenced by someone, leading to some - * obscure delayed Oops somewhere else. - */ -static inline bool sendpage_ok(struct page *page) -{ - return !PageSlab(page) && page_count(page) >= 1; -} int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len); -int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, - struct kvec *vec, size_t num, size_t len); int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len, int flags); @@ -314,17 +285,19 @@ int kernel_listen(struct socket *sock, int backlog); int kernel_accept(struct socket *sock, struct socket **newsock, int flags); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags); -int kernel_getsockname(struct socket *sock, struct sockaddr *addr); -int kernel_getpeername(struct socket *sock, struct sockaddr *addr); +int kernel_getsockname(struct socket *sock, struct sockaddr *addr, + int *addrlen); +int kernel_getpeername(struct socket *sock, struct sockaddr *addr, + int *addrlen); +int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, + int *optlen); +int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, + unsigned int optlen); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); -int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, - size_t size, int flags); +int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); -/* Routine returns the IP overhead imposed by a (caller-protected) socket. */ -u32 kernel_sock_ip_overhead(struct sock *sk); - #define MODULE_ALIAS_NETPROTO(proto) \ MODULE_ALIAS("net-pf-" __stringify(proto)) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 2c6b9e4162..9c6c8ef2e9 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -1,13 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Network device features. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_NETDEV_FEATURES_H #define _LINUX_NETDEV_FEATURES_H #include -#include -#include typedef u64 netdev_features_t; @@ -33,6 +36,7 @@ enum { /**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */ NETIF_F_TSO_BIT /* ... TCPv4 segmentation */ = NETIF_F_GSO_SHIFT, + NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */ NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */ NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */ @@ -50,12 +54,8 @@ enum { */ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */ - NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */ - NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */ - NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */ - NETIF_F_GSO_FRAGLIST_BIT, /* ... Fraglist GSO */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_FRAGLIST_BIT, + NETIF_F_GSO_SCTP_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ @@ -71,31 +71,15 @@ enum { NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */ NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ + NETIF_F_BUSY_POLL_BIT, /* Busy poll */ NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ - NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */ - NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ - NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ - NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */ - NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */ - - NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ - NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */ - NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */ - - NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */ - NETIF_F_GRO_UDP_FWD_BIT, /* Allow UDP GRO for forwarding */ - - NETIF_F_HW_HSR_TAG_INS_BIT, /* Offload HSR tag insertion */ - NETIF_F_HW_HSR_TAG_RM_BIT, /* Offload HSR tag removal */ - NETIF_F_HW_HSR_FWD_BIT, /* Offload HSR forwarding */ - NETIF_F_HW_HSR_DUP_BIT, /* Offload HSR duplication */ /* * Add your fresh new feature above and remember to update - * netdev_features_strings[] in net/ethtool/common.c and maybe + * netdev_features_strings[] in net/core/ethtool.c and maybe * some feature mask #defines below. Please also describe it - * in Documentation/networking/netdev-features.rst. + * in Documentation/networking/netdev-features.txt. */ /**/NETDEV_FEATURE_COUNT @@ -110,7 +94,6 @@ enum { #define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) #define NETIF_F_FSO __NETIF_F(FSO) #define NETIF_F_GRO __NETIF_F(GRO) -#define NETIF_F_GRO_HW __NETIF_F(GRO_HW) #define NETIF_F_GSO __NETIF_F(GSO) #define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) #define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) @@ -133,6 +116,7 @@ enum { #define NETIF_F_TSO6 __NETIF_F(TSO6) #define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) #define NETIF_F_TSO __NETIF_F(TSO) +#define NETIF_F_UFO __NETIF_F(UFO) #define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED) #define NETIF_F_RXFCS __NETIF_F(RXFCS) #define NETIF_F_RXALL __NETIF_F(RXALL) @@ -146,49 +130,15 @@ enum { #define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP) -#define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP) -#define NETIF_F_GSO_UDP __NETIF_F(GSO_UDP) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) +#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) #define NETIF_F_HW_TC __NETIF_F(HW_TC) -#define NETIF_F_HW_ESP __NETIF_F(HW_ESP) -#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM) -#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT) -#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD) -#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4) -#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) -#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) -#define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST) -#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST) -#define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC) -#define NETIF_F_GRO_UDP_FWD __NETIF_F(GRO_UDP_FWD) -#define NETIF_F_HW_HSR_TAG_INS __NETIF_F(HW_HSR_TAG_INS) -#define NETIF_F_HW_HSR_TAG_RM __NETIF_F(HW_HSR_TAG_RM) -#define NETIF_F_HW_HSR_FWD __NETIF_F(HW_HSR_FWD) -#define NETIF_F_HW_HSR_DUP __NETIF_F(HW_HSR_DUP) -/* Finds the next feature with the highest number of the range of start till 0. - */ -static inline int find_next_netdev_feature(u64 feature, unsigned long start) -{ - /* like BITMAP_LAST_WORD_MASK() for u64 - * this sets the most significant 64 - start to 0. - */ - feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); - - return fls64(feature) - 1; -} - -/* This goes for the MSB to the LSB through the set feature bits, - * mask_addr should be a u64 and bit an int - */ -#define for_each_netdev_feature(mask_addr, bit) \ - for ((bit) = find_next_netdev_feature((mask_addr), \ - NETDEV_FEATURE_COUNT); \ - (bit) >= 0; \ - (bit) = find_next_netdev_feature((mask_addr), (bit) - 1)) +#define for_each_netdev_feature(mask_addr, bit) \ + for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ @@ -204,7 +154,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) #define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) -/* List of IP checksum features. Note that NETIF_F_HW_CSUM should not be +/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set-- * this would be contradictory */ @@ -218,8 +168,8 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) NETIF_F_FSO) /* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \ - NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST) +#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO | \ + NETIF_F_GSO_SCTP) /* * If one device supports one of these features, then enable them @@ -244,9 +194,6 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) /* changeable features with no special hardware requirements */ #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) -/* Changeable features with no special hardware requirements that defaults to off. */ -#define NETIF_F_SOFT_FEATURES_OFF (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD) - #define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d79163208d..1969a6493d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -16,6 +15,11 @@ * Bjorn Ekwall. * Pekka Riikonen * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * * Moved to /usr/include/linux for NET3 */ #ifndef _LINUX_NETDEVICE_H @@ -31,15 +35,17 @@ #include #include +#include #include #include +#include #include +#include #ifdef CONFIG_DCB #include #endif #include -#include #include #include @@ -47,18 +53,10 @@ #include #include #include -#include struct netpoll_info; struct device; -struct ethtool_ops; struct phy_device; -struct dsa_port; -struct ip_tunnel_parm; -struct macsec_context; -struct macsec_ops; - -struct sfp_bus; /* 802.11 specific */ struct wireless_dev; /* 802.15.4 specific */ @@ -66,12 +64,8 @@ struct wpan_dev; struct mpls_dev; /* UDP Tunnel offloads */ struct udp_tunnel_info; -struct udp_tunnel_nic_info; -struct udp_tunnel_nic; struct bpf_prog; -struct xdp_buff; -void synchronize_net(void); void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@ -79,8 +73,6 @@ void netdev_set_default_ethtool_ops(struct net_device *dev, #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ #define NET_RX_DROP 1 /* packet dropped */ -#define MAX_NEST_DEV 8 - /* * Transmit return codes: transmit return codes originate from three different * namespaces: @@ -199,8 +191,7 @@ struct net_device_stats { #ifdef CONFIG_RPS #include -extern struct static_key_false rps_needed; -extern struct static_key_false rfs_needed; +extern struct static_key rps_needed; #endif struct neighbour; @@ -209,13 +200,13 @@ struct sk_buff; struct netdev_hw_addr { struct list_head list; - struct rb_node node; unsigned char addr[MAX_ADDR_LEN]; unsigned char type; #define NETDEV_HW_ADDR_T_LAN 1 #define NETDEV_HW_ADDR_T_SAN 2 -#define NETDEV_HW_ADDR_T_UNICAST 3 -#define NETDEV_HW_ADDR_T_MULTICAST 4 +#define NETDEV_HW_ADDR_T_SLAVE 3 +#define NETDEV_HW_ADDR_T_UNICAST 4 +#define NETDEV_HW_ADDR_T_MULTICAST 5 bool global_use; int sync_cnt; int refcount; @@ -226,9 +217,6 @@ struct netdev_hw_addr { struct netdev_hw_addr_list { struct list_head list; int count; - - /* Auxiliary tree for faster lookup on addition and deletion */ - struct rb_root tree; }; #define netdev_hw_addr_list_count(l) ((l)->count) @@ -247,7 +235,8 @@ struct netdev_hw_addr_list { netdev_hw_addr_list_for_each(ha, &(dev)->mc) struct hh_cache { - unsigned int hh_len; + u16 hh_len; + u16 __pad; seqlock_t hh_lock; /* cached hardware header; allow for machine alignment needs. */ @@ -282,7 +271,6 @@ struct header_ops { const struct net_device *dev, const unsigned char *haddr); bool (*validate)(const char *ll_header, unsigned int len); - __be16 (*parse_protocol)(const struct sk_buff *skb); }; /* These flag bits are private to the generic network queueing @@ -296,20 +284,20 @@ enum netdev_state_t { __LINK_STATE_NOCARRIER, __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, - __LINK_STATE_TESTING, }; -struct gro_list { - struct list_head list; - int count; -}; - /* - * size of gro hash buckets, must less than bit number of - * napi_struct::gro_bitmask + * This structure holds boot-time configured netdevice settings. They + * are then used in the device probing. */ -#define GRO_HASH_BUCKETS 8 +struct netdev_boot_setup { + char name[IFNAMSIZ]; + struct ifmap map; +}; +#define NETDEV_BOOT_SETUP_MAX 8 + +int __init netdev_boot_setup(char *str); /* * Structure for NAPI scheduling similar to tasklet but with weighting @@ -325,48 +313,27 @@ struct napi_struct { unsigned long state; int weight; - int defer_hard_irqs_count; - unsigned long gro_bitmask; + unsigned int gro_count; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL + spinlock_t poll_lock; int poll_owner; #endif struct net_device *dev; - struct gro_list gro_hash[GRO_HASH_BUCKETS]; + struct sk_buff *gro_list; struct sk_buff *skb; - struct list_head rx_list; /* Pending GRO_NORMAL skbs */ - int rx_count; /* length of rx_list */ struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; - struct task_struct *thread; }; enum { - NAPI_STATE_SCHED, /* Poll is scheduled */ - NAPI_STATE_MISSED, /* reschedule a napi */ - NAPI_STATE_DISABLE, /* Disable pending */ - NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ - NAPI_STATE_LISTED, /* NAPI added to system lists */ - NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ - NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ - NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ - NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ - NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ -}; - -enum { - NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), - NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), - NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), - NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), - NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), - NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), - NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), - NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), - NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), - NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), + NAPI_STATE_SCHED, /* Poll is scheduled */ + NAPI_STATE_DISABLE, /* Disable pending */ + NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ + NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ + NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ }; enum gro_result { @@ -374,7 +341,7 @@ enum gro_result { GRO_MERGED_FREE, GRO_HELD, GRO_NORMAL, - GRO_CONSUMED, + GRO_DROP, }; typedef enum gro_result gro_result_t; @@ -436,13 +403,21 @@ static inline bool napi_disable_pending(struct napi_struct *n) return test_bit(NAPI_STATE_DISABLE, &n->state); } -static inline bool napi_prefer_busy_poll(struct napi_struct *n) +/** + * napi_schedule_prep - check if NAPI can be scheduled + * @n: NAPI context + * + * Test if NAPI routine is already running, and if not mark + * it as running. This is used as a condition variable to + * insure only one NAPI poll instance runs. We also make + * sure there is no pending NAPI disable. + */ +static inline bool napi_schedule_prep(struct napi_struct *n) { - return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); + return !napi_disable_pending(n) && + !test_and_set_bit(NAPI_STATE_SCHED, &n->state); } -bool napi_schedule_prep(struct napi_struct *n); - /** * napi_schedule - schedule NAPI poll * @n: NAPI context @@ -478,21 +453,44 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } -bool napi_complete_done(struct napi_struct *n, int work_done); +void __napi_complete(struct napi_struct *n); +void napi_complete_done(struct napi_struct *n, int work_done); /** * napi_complete - NAPI processing complete * @n: NAPI context * * Mark NAPI processing as complete. * Consider using napi_complete_done() instead. - * Return false if device should avoid rearming interrupts. */ -static inline bool napi_complete(struct napi_struct *n) +static inline void napi_complete(struct napi_struct *n) { return napi_complete_done(n, 0); } -int dev_set_threaded(struct net_device *dev, bool threaded); +/** + * napi_hash_add - add a NAPI to global hashtable + * @napi: NAPI context + * + * Generate a new napi_id and store a @napi under it in napi_hash. + * Used for busy polling (CONFIG_NET_RX_BUSY_POLL). + * Note: This is normally automatically done from netif_napi_add(), + * so might disappear in a future Linux version. + */ +void napi_hash_add(struct napi_struct *napi); + +/** + * napi_hash_del - remove a NAPI from global table + * @napi: NAPI context + * + * Warning: caller must observe RCU grace period + * before freeing memory containing @napi, if + * this function returns true. + * Note: core networking stack automatically calls it + * from netif_napi_del(). + * Drivers might want to call this helper to combine all + * the needed RCU grace periods into a single one. + */ +bool napi_hash_del(struct napi_struct *napi); /** * napi_disable - prevent NAPI from scheduling @@ -503,7 +501,20 @@ int dev_set_threaded(struct net_device *dev, bool threaded); */ void napi_disable(struct napi_struct *n); -void napi_enable(struct napi_struct *n); +/** + * napi_enable - enable NAPI scheduling + * @n: NAPI context + * + * Resume NAPI from being scheduled on this context. + * Must be paired with napi_disable. + */ +static inline void napi_enable(struct napi_struct *n) +{ + BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); + smp_mb__before_atomic(); + clear_bit(NAPI_STATE_SCHED, &n->state); + clear_bit(NAPI_STATE_NPSVC, &n->state); +} /** * napi_synchronize - wait until NAPI is not running @@ -522,32 +533,6 @@ static inline void napi_synchronize(const struct napi_struct *n) barrier(); } -/** - * napi_if_scheduled_mark_missed - if napi is running, set the - * NAPIF_STATE_MISSED - * @n: NAPI context - * - * If napi is running, set the NAPIF_STATE_MISSED, and return true if - * NAPI is scheduled. - **/ -static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) -{ - unsigned long val, new; - - do { - val = READ_ONCE(n->state); - if (val & NAPIF_STATE_DISABLE) - return true; - - if (!(val & NAPIF_STATE_SCHED)) - return false; - - new = val | NAPIF_STATE_MISSED; - } while (cmpxchg(&n->state, val, new) != val); - - return true; -} - enum netdev_queue_state_t { __QUEUE_STATE_DRV_XOFF, __QUEUE_STATE_STACK_XOFF, @@ -593,12 +578,6 @@ struct netdev_queue { * (/sys/class/net/DEV/Q/trans_timeout) */ unsigned long trans_timeout; - - /* Subordinate device that the queue has been assigned to */ - struct net_device *sb_dev; -#ifdef CONFIG_XDP_SOCKETS - struct xsk_buff_pool *pool; -#endif /* * write-mostly part */ @@ -616,21 +595,6 @@ struct netdev_queue { #endif } ____cacheline_aligned_in_smp; -extern int sysctl_fb_tunnels_only_for_init_net; -extern int sysctl_devconf_inherit_init_net; - -/* - * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns - * == 1 : For initns only - * == 2 : For none. - */ -static inline bool net_has_fallback_tunnels(const struct net *net) -{ - return !IS_ENABLED(CONFIG_SYSCTL) || - !sysctl_fb_tunnels_only_for_init_net || - (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1); -} - static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) { #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) @@ -655,7 +619,7 @@ static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node struct rps_map { unsigned int len; struct rcu_head rcu; - u16 cpus[]; + u16 cpus[0]; }; #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) @@ -677,7 +641,7 @@ struct rps_dev_flow { struct rps_dev_flow_table { unsigned int mask; struct rcu_head rcu; - struct rps_dev_flow flows[]; + struct rps_dev_flow flows[0]; }; #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ ((_num) * sizeof(struct rps_dev_flow))) @@ -695,7 +659,7 @@ struct rps_dev_flow_table { struct rps_sock_flow_table { u32 mask; - u32 ents[] ____cacheline_aligned_in_smp; + u32 ents[0] ____cacheline_aligned_in_smp; }; #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) @@ -727,16 +691,12 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, /* This structure contains an instance of an RX queue. */ struct netdev_rx_queue { - struct xdp_rxq_info xdp_rxq; #ifdef CONFIG_RPS struct rps_map __rcu *rps_map; struct rps_dev_flow_table __rcu *rps_flow_table; #endif struct kobject kobj; struct net_device *dev; -#ifdef CONFIG_XDP_SOCKETS - struct xsk_buff_pool *pool; -#endif } ____cacheline_aligned_in_smp; /* @@ -744,16 +704,10 @@ struct netdev_rx_queue { */ struct rx_queue_attribute { struct attribute attr; - ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); + ssize_t (*show)(struct netdev_rx_queue *queue, + struct rx_queue_attribute *attr, char *buf); ssize_t (*store)(struct netdev_rx_queue *queue, - const char *buf, size_t len); -}; - -/* XPS map type and offset of the xps map within net_device->xps_maps[]. */ -enum xps_map_type { - XPS_CPUS = 0, - XPS_RXQS, - XPS_MAPS_MAX, + struct rx_queue_attribute *attr, const char *buf, size_t len); }; #ifdef CONFIG_XPS @@ -765,7 +719,7 @@ struct xps_map { unsigned int len; unsigned int alloc_len; struct rcu_head rcu; - u16 queues[]; + u16 queues[0]; }; #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ @@ -773,28 +727,13 @@ struct xps_map { /* * This structure holds all XPS maps for device. Maps are indexed by CPU. - * - * We keep track of the number of cpus/rxqs used when the struct is allocated, - * in nr_ids. This will help not accessing out-of-bound memory. - * - * We keep track of the number of traffic classes used when the struct is - * allocated, in num_tc. This will be used to navigate the maps, to ensure we're - * not crossing its upper bound, as the original dev->num_tc can be updated in - * the meantime. */ struct xps_dev_maps { struct rcu_head rcu; - unsigned int nr_ids; - s16 num_tc; - struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ + struct xps_map __rcu *cpu_map[0]; }; - -#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ - (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) - -#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ - (_rxqs * (_tcs) * sizeof(struct xps_map *))) - +#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ + (nr_cpu_ids * sizeof(struct xps_map *))) #endif /* CONFIG_XPS */ #define TC_MAX_QUEUE 16 @@ -840,88 +779,36 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, } typedef u16 (*select_queue_fallback_t)(struct net_device *dev, - struct sk_buff *skb, - struct net_device *sb_dev); + struct sk_buff *skb); -enum net_device_path_type { - DEV_PATH_ETHERNET = 0, - DEV_PATH_VLAN, - DEV_PATH_BRIDGE, - DEV_PATH_PPPOE, - DEV_PATH_DSA, +/* These structures hold the attributes of qdisc and classifiers + * that are being passed to the netdevice through the setup_tc op. + */ +enum { + TC_SETUP_MQPRIO, + TC_SETUP_CLSU32, + TC_SETUP_CLSFLOWER, + TC_SETUP_MATCHALL, + TC_SETUP_CLSBPF, }; -struct net_device_path { - enum net_device_path_type type; - const struct net_device *dev; +struct tc_cls_u32_offload; + +struct tc_to_netdev { + unsigned int type; union { - struct { - u16 id; - __be16 proto; - u8 h_dest[ETH_ALEN]; - } encap; - struct { - enum { - DEV_PATH_BR_VLAN_KEEP, - DEV_PATH_BR_VLAN_TAG, - DEV_PATH_BR_VLAN_UNTAG, - DEV_PATH_BR_VLAN_UNTAG_HW, - } vlan_mode; - u16 vlan_id; - __be16 vlan_proto; - } bridge; - struct { - int port; - u16 proto; - } dsa; + u8 tc; + struct tc_cls_u32_offload *cls_u32; + struct tc_cls_flower_offload *cls_flower; + struct tc_cls_matchall_offload *cls_mall; + struct tc_cls_bpf_offload *cls_bpf; }; }; -#define NET_DEVICE_PATH_STACK_MAX 5 -#define NET_DEVICE_PATH_VLAN_MAX 2 - -struct net_device_path_stack { - int num_paths; - struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; -}; - -struct net_device_path_ctx { - const struct net_device *dev; - const u8 *daddr; - - int num_vlans; - struct { - u16 id; - __be16 proto; - } vlan[NET_DEVICE_PATH_VLAN_MAX]; -}; - -enum tc_setup_type { - TC_SETUP_QDISC_MQPRIO, - TC_SETUP_CLSU32, - TC_SETUP_CLSFLOWER, - TC_SETUP_CLSMATCHALL, - TC_SETUP_CLSBPF, - TC_SETUP_BLOCK, - TC_SETUP_QDISC_CBS, - TC_SETUP_QDISC_RED, - TC_SETUP_QDISC_PRIO, - TC_SETUP_QDISC_MQ, - TC_SETUP_QDISC_ETF, - TC_SETUP_ROOT_QDISC, - TC_SETUP_QDISC_GRED, - TC_SETUP_QDISC_TAPRIO, - TC_SETUP_FT, - TC_SETUP_QDISC_ETS, - TC_SETUP_QDISC_TBF, - TC_SETUP_QDISC_FIFO, - TC_SETUP_QDISC_HTB, -}; - -/* These structures hold the attributes of bpf state that are being passed - * to the netdevice through the bpf op. +/* These structures hold the attributes of xdp state that are being passed + * to the netdevice through the xdp op. */ -enum bpf_netdev_command { +enum xdp_netdev_command { /* Set or clear a bpf program used in the earliest stages of packet * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee * is responsible for calling bpf_prog_put on any old progs that are @@ -930,90 +817,22 @@ enum bpf_netdev_command { * when it is no longer used. */ XDP_SETUP_PROG, - XDP_SETUP_PROG_HW, - /* BPF program for offload callbacks, invoked at program load time. */ - BPF_OFFLOAD_MAP_ALLOC, - BPF_OFFLOAD_MAP_FREE, - XDP_SETUP_XSK_POOL, + /* Check if a bpf program is set on the device. The callee should + * return true if a program is currently attached and running. + */ + XDP_QUERY_PROG, }; -struct bpf_prog_offload_ops; -struct netlink_ext_ack; -struct xdp_umem; -struct xdp_dev_bulk_queue; -struct bpf_xdp_link; - -enum bpf_xdp_mode { - XDP_MODE_SKB = 0, - XDP_MODE_DRV = 1, - XDP_MODE_HW = 2, - __MAX_XDP_MODE -}; - -struct bpf_xdp_entity { - struct bpf_prog *prog; - struct bpf_xdp_link *link; -}; - -struct netdev_bpf { - enum bpf_netdev_command command; +struct netdev_xdp { + enum xdp_netdev_command command; union { /* XDP_SETUP_PROG */ - struct { - u32 flags; - struct bpf_prog *prog; - struct netlink_ext_ack *extack; - }; - /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ - struct { - struct bpf_offloaded_map *offmap; - }; - /* XDP_SETUP_XSK_POOL */ - struct { - struct xsk_buff_pool *pool; - u16 queue_id; - } xsk; + struct bpf_prog *prog; + /* XDP_QUERY_PROG */ + bool prog_attached; }; }; -/* Flags for ndo_xsk_wakeup. */ -#define XDP_WAKEUP_RX (1 << 0) -#define XDP_WAKEUP_TX (1 << 1) - -#ifdef CONFIG_XFRM_OFFLOAD -struct xfrmdev_ops { - int (*xdo_dev_state_add) (struct xfrm_state *x); - void (*xdo_dev_state_delete) (struct xfrm_state *x); - void (*xdo_dev_state_free) (struct xfrm_state *x); - bool (*xdo_dev_offload_ok) (struct sk_buff *skb, - struct xfrm_state *x); - void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); -}; -#endif - -struct dev_ifalias { - struct rcu_head rcuhead; - char ifalias[]; -}; - -struct devlink; -struct tlsdev_ops; - -struct netdev_name_node { - struct hlist_node hlist; - struct list_head list; - struct net_device *dev; - const char *name; -}; - -int netdev_name_node_alt_create(struct net_device *dev, const char *name); -int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); - -struct netdev_net_notifier { - struct list_head list; - struct notifier_block *nb; -}; - /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -1046,18 +865,14 @@ struct netdev_net_notifier { * of useless work if you return NETDEV_TX_BUSY. * Required; cannot be NULL. * - * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, - * struct net_device *dev - * netdev_features_t features); - * Called by core transmit path to determine if device is capable of - * performing offload operations on a given packet. This is to give - * the device an opportunity to implement any restrictions that cannot - * be otherwise expressed by feature flags. The check is called with - * the set of features that the stack has calculated and it returns - * those the driver believes to be appropriate. + * netdev_features_t (*ndo_fix_features)(struct net_device *dev, + * netdev_features_t features); + * Adjusts the requested feature flags according to device-specific + * constraints, and returns the resulting flags. Must not modify + * the device state. * * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, - * struct net_device *sb_dev); + * void *accel_priv, select_queue_fallback_t fallback); * Called to decide which queue to use when device supports multiple * transmit queues. * @@ -1079,18 +894,9 @@ struct netdev_net_notifier { * Test if Media Access Control address is valid for the device. * * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); - * Old-style ioctl entry point. This is used internally by the - * appletalk and ieee802154 subsystems but is no longer called by - * the device ioctl handler. - * - * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); - * Used by the bonding driver for its device specific ioctls: - * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, - * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY - * - * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); - * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, - * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. + * Called when a user requests an ioctl which can't be handled by + * the generic interface code. If not defined ioctls return + * not supported error code. * * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); * Used to set network devices bus interface parameters. This interface @@ -1099,14 +905,15 @@ struct netdev_net_notifier { * * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); * Called when a user wants to change the Maximum Transfer Unit - * of a device. + * of a device. If not defined, any request to change MTU will + * will return an error. * - * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); + * void (*ndo_tx_timeout)(struct net_device *dev); * Callback used when the transmitter has not made any progress * for dev->watchdog ticks. * - * void (*ndo_get_stats64)(struct net_device *dev, - * struct rtnl_link_stats64 *storage); + * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, + * struct rtnl_link_stats64 *storage); * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); * Called when a user wants to get the network device usage * statistics. Drivers must do one of the following: @@ -1119,7 +926,7 @@ struct netdev_net_notifier { * 3. Update dev->stats asynchronously and atomically, and define * neither operation. * - * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) + * bool (*ndo_has_offload_stats)(int attr_id) * Return true if this device supports offload stats of this attr_id. * * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, @@ -1156,12 +963,11 @@ struct netdev_net_notifier { * with PF and querying it may introduce a theoretical security risk. * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); - * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, - * void *type_data); - * Called to setup any 'tc' scheduler, classifier or action on @dev. - * This is always called from the stack with the rtnl lock held and netif - * tx queues stopped. This allows the netdevice to perform queue - * management safely. + * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) + * Called to setup 'tc' number of traffic classes in the net device. This + * is always called from the stack with the rtnl lock held and netif tx + * queues stopped. This allows the netdevice to perform queue management + * safely. * * Fiber Channel over Ethernet (FCoE) offload functions. * int (*ndo_fcoe_enable)(struct net_device *dev); @@ -1220,19 +1026,7 @@ struct netdev_net_notifier { * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); * Called to release previously enslaved netdev. * - * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, - * struct sk_buff *skb, - * bool all_slaves); - * Get the xmit slave of master device. If all_slaves is true, function - * assume all the slaves can transmit. - * * Feature/offload setting functions. - * netdev_features_t (*ndo_fix_features)(struct net_device *dev, - * netdev_features_t features); - * Adjusts the requested feature flags according to device-specific - * constraints, and returns the resulting flags. Must not modify - * the device state. - * * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); * Called to update device configuration to new features. Passed * feature set might be less than what was returned by ndo_fix_features()). @@ -1240,8 +1034,7 @@ struct netdev_net_notifier { * * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr, u16 vid, u16 flags, - * struct netlink_ext_ack *extack); + * const unsigned char *addr, u16 vid, u16 flags) * Adds an FDB entry to dev for addr. * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, @@ -1254,7 +1047,7 @@ struct netdev_net_notifier { * entries to skb and update idx with the number of entries. * * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, - * u16 flags, struct netlink_ext_ack *extack) + * u16 flags) * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, * struct net_device *dev, u32 filter_mask, * int nlflags) @@ -1275,9 +1068,18 @@ struct netdev_net_notifier { * not implement this, it is assumed that the hw is not able to have * multiple net devices on single physical port. * - * int (*ndo_get_port_parent_id)(struct net_device *dev, - * struct netdev_phys_item_id *ppid) - * Called to get the parent ID of the physical port of this device. + * void (*ndo_udp_tunnel_add)(struct net_device *dev, + * struct udp_tunnel_info *ti); + * Called by UDP tunnel to notify a driver about the UDP port and socket + * address family that a UDP tunnel is listnening to. It is called only + * when a new port starts listening. The operation is protected by the + * RTNL. + * + * void (*ndo_udp_tunnel_del)(struct net_device *dev, + * struct udp_tunnel_info *ti); + * Called by UDP tunnel to notify the driver about a UDP port and socket + * address family that the UDP tunnel is not listening to anymore. The + * operation is protected by the RTNL. * * void* (*ndo_dfwd_add_station)(struct net_device *pdev, * struct net_device *dev) @@ -1291,6 +1093,21 @@ struct netdev_net_notifier { * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing * the station and priv is the structure returned by the add * operation. + * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb, + * struct net_device *dev, + * void *priv); + * Callback to use for xmit over the accelerated station. This + * is used in place of ndo_start_xmit on accelerated net + * devices. + * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, + * struct net_device *dev + * netdev_features_t features); + * Called by core transmit path to determine if device is capable of + * performing offload operations on a given packet. This is to give + * the device an opportunity to implement any restrictions that cannot + * be otherwise expressed by feature flags. The check is called with + * the set of features that the stack has calculated and it returns + * those the driver believes to be appropriate. * int (*ndo_set_tx_maxrate)(struct net_device *dev, * int queue_index, u32 maxrate); * Called when a user wants to set a max-rate limitation of specific @@ -1312,38 +1129,10 @@ struct netdev_net_notifier { * appropriate rx headroom value allows avoiding skb head copy on * forward. Setting a negative value resets the rx headroom to the * default value. - * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); + * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp); * This function is used to set or query state related to XDP on the - * netdevice and manage BPF offload. See definition of - * enum bpf_netdev_command for details. - * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, - * u32 flags); - * This function is used to submit @n XDP packets for transmit on a - * netdevice. Returns number of frames successfully transmitted, frames - * that got dropped are freed/returned via xdp_return_frame(). - * Returns negative number, means general error invoking ndo, meaning - * no frames were xmit'ed and core-caller will free all frames. - * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, - * struct xdp_buff *xdp); - * Get the xmit slave of master device based on the xdp_buff. - * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); - * This function is used to wake up the softirq, ksoftirqd or kthread - * responsible for sending and/or receiving packets on a specific - * queue id bound to an AF_XDP socket. The flags field specifies if - * only RX, only Tx, or both should be woken up using the flags - * XDP_WAKEUP_RX and XDP_WAKEUP_TX. - * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); - * Get devlink port instance associated with a given netdev. - * Called with a reference on the netdevice and devlink locks only, - * rtnl_lock is not held. - * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, - * int cmd); - * Add, change, delete or get information on an IPv4 tunnel. - * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); - * If a device is paired with a peer device, return the peer instance. - * The caller must be under RCU read context. - * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); - * Get the forwarding path to reach the real device from the HW destination address + * netdevice. See definition of enum xdp_netdev_command for details. + * */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1357,7 +1146,8 @@ struct net_device_ops { netdev_features_t features); u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev); + void *accel_priv, + select_queue_fallback_t fallback); void (*ndo_change_rx_flags)(struct net_device *dev, int flags); void (*ndo_set_rx_mode)(struct net_device *dev); @@ -1366,27 +1156,17 @@ struct net_device_ops { int (*ndo_validate_addr)(struct net_device *dev); int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); - int (*ndo_eth_ioctl)(struct net_device *dev, - struct ifreq *ifr, int cmd); - int (*ndo_siocbond)(struct net_device *dev, - struct ifreq *ifr, int cmd); - int (*ndo_siocwandev)(struct net_device *dev, - struct if_settings *ifs); - int (*ndo_siocdevprivate)(struct net_device *dev, - struct ifreq *ifr, - void __user *data, int cmd); int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); int (*ndo_neigh_setup)(struct net_device *dev, struct neigh_parms *); - void (*ndo_tx_timeout) (struct net_device *dev, - unsigned int txqueue); + void (*ndo_tx_timeout) (struct net_device *dev); - void (*ndo_get_stats64)(struct net_device *dev, - struct rtnl_link_stats64 *storage); - bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); + struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, + struct rtnl_link_stats64 *storage); + bool (*ndo_has_offload_stats)(int attr_id); int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, void *attr_data); @@ -1401,6 +1181,9 @@ struct net_device_ops { int (*ndo_netpoll_setup)(struct net_device *dev, struct netpoll_info *info); void (*ndo_netpoll_cleanup)(struct net_device *dev); +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + int (*ndo_busy_poll)(struct napi_struct *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, int queue, u8 *mac); @@ -1428,10 +1211,6 @@ struct net_device_ops { struct nlattr *port[]); int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); - int (*ndo_get_vf_guid)(struct net_device *dev, - int vf, - struct ifla_vf_guid *node_guid, - struct ifla_vf_guid *port_guid); int (*ndo_set_vf_guid)(struct net_device *dev, int vf, u64 guid, int guid_type); @@ -1439,8 +1218,9 @@ struct net_device_ops { struct net_device *dev, int vf, bool setting); int (*ndo_setup_tc)(struct net_device *dev, - enum tc_setup_type type, - void *type_data); + u32 handle, + __be16 protocol, + struct tc_to_netdev *tc); #if IS_ENABLED(CONFIG_FCOE) int (*ndo_fcoe_enable)(struct net_device *dev); int (*ndo_fcoe_disable)(struct net_device *dev); @@ -1472,15 +1252,9 @@ struct net_device_ops { u32 flow_id); #endif int (*ndo_add_slave)(struct net_device *dev, - struct net_device *slave_dev, - struct netlink_ext_ack *extack); + struct net_device *slave_dev); int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); - struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, - struct sk_buff *skb, - bool all_slaves); - struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, - struct sock *sk); netdev_features_t (*ndo_fix_features)(struct net_device *dev, netdev_features_t features); int (*ndo_set_features)(struct net_device *dev, @@ -1495,8 +1269,7 @@ struct net_device_ops { struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags, - struct netlink_ext_ack *extack); + u16 flags); int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, @@ -1507,16 +1280,10 @@ struct net_device_ops { struct net_device *dev, struct net_device *filter_dev, int *idx); - int (*ndo_fdb_get)(struct sk_buff *skb, - struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, - u16 vid, u32 portid, u32 seq, - struct netlink_ext_ack *extack); + int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, - u16 flags, - struct netlink_ext_ack *extack); + u16 flags); int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, @@ -1529,15 +1296,21 @@ struct net_device_ops { bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); - int (*ndo_get_port_parent_id)(struct net_device *dev, - struct netdev_phys_item_id *ppid); int (*ndo_get_phys_port_name)(struct net_device *dev, char *name, size_t len); + void (*ndo_udp_tunnel_add)(struct net_device *dev, + struct udp_tunnel_info *ti); + void (*ndo_udp_tunnel_del)(struct net_device *dev, + struct udp_tunnel_info *ti); void* (*ndo_dfwd_add_station)(struct net_device *pdev, struct net_device *dev); void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv); + netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb, + struct net_device *dev, + void *priv); + int (*ndo_get_lock_subclass)(struct net_device *dev); int (*ndo_set_tx_maxrate)(struct net_device *dev, int queue_index, u32 maxrate); @@ -1548,25 +1321,13 @@ struct net_device_ops { struct sk_buff *skb); void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); - int (*ndo_bpf)(struct net_device *dev, - struct netdev_bpf *bpf); - int (*ndo_xdp_xmit)(struct net_device *dev, int n, - struct xdp_frame **xdp, - u32 flags); - struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, - struct xdp_buff *xdp); - int (*ndo_xsk_wakeup)(struct net_device *dev, - u32 queue_id, u32 flags); - struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); - int (*ndo_tunnel_ctl)(struct net_device *dev, - struct ip_tunnel_parm *p, int cmd); - struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); - int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, - struct net_device_path *path); + int (*ndo_xdp)(struct net_device *dev, + struct netdev_xdp *xdp); }; +typedef struct net_device_ops __no_const net_device_ops_no_const; /** - * enum netdev_priv_flags - &struct net_device priv_flags + * enum net_device_priv_flags - &struct net_device priv_flags * * These are the &struct net_device, they are only set internally * by drivers and used in the kernel. These flags are invisible to @@ -1596,6 +1357,8 @@ struct net_device_ops { * @IFF_MACVLAN: Macvlan device * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account * underlying stacked devices + * @IFF_IPVLAN_MASTER: IPvlan master device + * @IFF_IPVLAN_SLAVE: IPvlan slave device * @IFF_L3MDEV_MASTER: device is an L3 master device * @IFF_NO_QUEUE: device can run without qdisc attached * @IFF_OPENVSWITCH: device is a Open vSwitch master @@ -1605,13 +1368,6 @@ struct net_device_ops { * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external * entity (i.e. the master device for bridged veth) * @IFF_MACSEC: device is a MACsec device - * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook - * @IFF_FAILOVER: device is a failover master device - * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device - * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device - * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running - * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with - * skb_headlen(skb) == 0 (data starts from frag0) */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1632,20 +1388,16 @@ enum netdev_priv_flags { IFF_LIVE_ADDR_CHANGE = 1<<15, IFF_MACVLAN = 1<<16, IFF_XMIT_DST_RELEASE_PERM = 1<<17, - IFF_L3MDEV_MASTER = 1<<18, - IFF_NO_QUEUE = 1<<19, - IFF_OPENVSWITCH = 1<<20, - IFF_L3MDEV_SLAVE = 1<<21, - IFF_TEAM = 1<<22, - IFF_RXFH_CONFIGURED = 1<<23, - IFF_PHONY_HEADROOM = 1<<24, - IFF_MACSEC = 1<<25, - IFF_NO_RX_HANDLER = 1<<26, - IFF_FAILOVER = 1<<27, - IFF_FAILOVER_SLAVE = 1<<28, - IFF_L3MDEV_RX_HANDLER = 1<<29, - IFF_LIVE_RENAME_OK = 1<<30, - IFF_TX_SKB_NO_LINEAR = 1<<31, + IFF_IPVLAN_MASTER = 1<<18, + IFF_IPVLAN_SLAVE = 1<<19, + IFF_L3MDEV_MASTER = 1<<20, + IFF_NO_QUEUE = 1<<21, + IFF_OPENVSWITCH = 1<<22, + IFF_L3MDEV_SLAVE = 1<<23, + IFF_TEAM = 1<<24, + IFF_RXFH_CONFIGURED = 1<<25, + IFF_PHONY_HEADROOM = 1<<26, + IFF_MACSEC = 1<<27, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1666,45 +1418,35 @@ enum netdev_priv_flags { #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE #define IFF_MACVLAN IFF_MACVLAN #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM +#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER +#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER #define IFF_NO_QUEUE IFF_NO_QUEUE #define IFF_OPENVSWITCH IFF_OPENVSWITCH #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE #define IFF_TEAM IFF_TEAM #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED -#define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM #define IFF_MACSEC IFF_MACSEC -#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER -#define IFF_FAILOVER IFF_FAILOVER -#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE -#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER -#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK -#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR - -/* Specifies the type of the struct net_device::ml_priv pointer */ -enum netdev_ml_priv_type { - ML_PRIV_NONE, - ML_PRIV_CAN, -}; /** * struct net_device - The DEVICE structure. - * - * Actually, this whole structure is a big mistake. It mixes I/O - * data with strictly "high-level" data, and it has to know about - * almost every data structure used in the INET module. + * Actually, this whole structure is a big mistake. It mixes I/O + * data with strictly "high-level" data, and it has to know about + * almost every data structure used in the INET module. * * @name: This is the first field of the "visible" part of this structure * (i.e. as seen by users in the "Space.c" file). It is the name - * of the interface. + * of the interface. * - * @name_node: Name hashlist node + * @name_hlist: Device name hash chain, please keep it close to name[] * @ifalias: SNMP alias * @mem_end: Shared memory end * @mem_start: Shared memory start * @base_addr: Device I/O address * @irq: Device IRQ number * + * @carrier_changes: Stats to monitor carrier on<->off transitions + * * @state: Generic network queuing layer state, see netdev_state_t * @dev_list: The global list of network devices * @napi_list: List entry used for polling NAPI devices @@ -1715,6 +1457,7 @@ enum netdev_ml_priv_type { * @ptype_specific: Device-specific, protocol-specific packet handlers * * @adj_list: Directly linked devices, like slaves for bonding + * @all_adj_list: All linked devices, *including* neighbours * @features: Currently active device features * @hw_features: User-changeable features * @@ -1727,7 +1470,6 @@ enum netdev_ml_priv_type { * and drivers will need to set them appropriately. * * @mpls_features: Mask of features inheritable by MPLS - * @gso_partial_features: value(s) from NETIF_F_GSO\* * * @ifindex: interface index * @group: The group the device belongs to @@ -1741,8 +1483,6 @@ enum netdev_ml_priv_type { * do not use this in drivers * @rx_nohandler: nohandler dropped packets by core network on * inactive devices, do not use this in drivers - * @carrier_up_count: Number of times the carrier has been up - * @carrier_down_count: Number of times the carrier has been down * * @wireless_handlers: List of functions to handle Wireless Extensions, * instead of ioctl, @@ -1752,11 +1492,8 @@ enum netdev_ml_priv_type { * @netdev_ops: Includes several pointers to callbacks, * if one wants to override the ndo_*() functions * @ethtool_ops: Management operations - * @l3mdev_ops: Layer 3 master device operations * @ndisc_ops: Includes callbacks for different IPv6 neighbour * discovery handling. Necessary for e.g. 6LoWPAN. - * @xfrmdev_ops: Transformation offload operations - * @tlsdev_ops: Transport Layer Security offload operations * @header_ops: Includes callbacks for creating,parsing,caching,etc * of Layer 2 headers. * @@ -1770,8 +1507,6 @@ enum netdev_ml_priv_type { * @if_port: Selectable AUI, TP, ... * @dma: DMA channel * @mtu: Interface MTU value - * @min_mtu: Interface Minimum MTU value - * @max_mtu: Interface Maximum MTU value * @type: Interface hardware type * @hard_header_len: Maximum hardware header length. * @min_header_len: Minimum hardware header length @@ -1787,15 +1522,12 @@ enum netdev_ml_priv_type { * @perm_addr: Permanent hw address * @addr_assign_type: Hw address assignment type * @addr_len: Hardware address length - * @upper_level: Maximum depth level of upper devices. - * @lower_level: Maximum depth level of lower devices. * @neigh_priv_len: Used in neigh_alloc() * @dev_id: Used to differentiate devices that share * the same link layer address * @dev_port: Used to differentiate devices that share * the same function * @addr_list_lock: XXX: need comments on this one - * @name_assign_type: network interface name assignment type * @uc_promisc: Counter that indicates promiscuous mode * has been enabled due to the need to listen to * additional unicast addresses in a device that @@ -1818,11 +1550,8 @@ enum netdev_ml_priv_type { * @ip6_ptr: IPv6 specific data * @ax25_ptr: AX.25 specific data * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering - * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network - * device struct - * @mpls_ptr: mpls_dev struct pointer - * @mctp_ptr: MCTP specific data * + * @last_rx: Time of last Rx * @dev_addr: Hw address (before bcast, * because most packets are unicast) * @@ -1830,17 +1559,10 @@ enum netdev_ml_priv_type { * @num_rx_queues: Number of RX queues * allocated at register_netdev() time * @real_num_rx_queues: Number of RX queues currently active in device - * @xdp_prog: XDP sockets filter program pointer - * @gro_flush_timeout: timeout for GRO layer in NAPI - * @napi_defer_hard_irqs: If not zero, provides a counter that would - * allow to avoid NIC hard IRQ, on busy queues. * * @rx_handler: handler for received packets * @rx_handler_data: XXX: need comments on this one - * @miniq_ingress: ingress/clsact qdisc specific data for - * ingress processing * @ingress_queue: XXX: need comments on this one - * @nf_hooks_ingress: netfilter hooks executed for ingress packets * @broadcast: hw bcast address * * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, @@ -1855,20 +1577,14 @@ enum netdev_ml_priv_type { * @qdisc: Root qdisc from userspace point of view * @tx_queue_len: Max frames per queue allowed * @tx_global_lock: XXX: need comments on this one - * @xdp_bulkq: XDP device bulk queue - * @xps_maps: all CPUs/RXQs maps for XPS device * * @xps_maps: XXX: need comments on this one - * @miniq_egress: clsact qdisc specific data for - * egress processing - * @qdisc_hash: qdisc hash table + * * @watchdog_timeo: Represents the timeout that is used by * the watchdog (see dev_watchdog()) * @watchdog_timer: List of timers * - * @proto_down_reason: reason a netdev interface is held down * @pcpu_refcnt: Number of references to this device - * @dev_refcnt: Number of references to this device * @todo_list: Delayed register/unregister * @link_watch_list: XXX: need comments on this one * @@ -1877,13 +1593,12 @@ enum netdev_ml_priv_type { * @rtnl_link_state: This enum represents the phases of creating * a new link * - * @needs_free_netdev: Should unregister perform free_netdev? - * @priv_destructor: Called from unregister + * @destructor: Called from unregister, + * can be used to call free_netdev * @npinfo: XXX: need comments on this one * @nd_net: Network namespace this network device is inside * * @ml_priv: Mid-layer private - * @ml_priv_type: Mid-layer private type * @lstats: Loopback statistics * @tstats: Tunnel statistics * @dstats: Dummy statistics @@ -1913,7 +1628,6 @@ enum netdev_ml_priv_type { * @priomap: XXX: need comments on this one * @phydev: Physical device may attach itself * for hardware timestamping - * @sfp_bus: attached &struct sfp_bus structure. * * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount @@ -1922,34 +1636,14 @@ enum netdev_ml_priv_type { * switch driver and used to set the phys state of the * switch port. * - * @wol_enabled: Wake-on-LAN is enabled - * - * @threaded: napi threaded mode is enabled - * - * @net_notifier_list: List of per-net netdev notifier block - * that follow this device when it is moved - * to another network namespace. - * - * @macsec_ops: MACsec offloading ops - * - * @udp_tunnel_nic_info: static structure describing the UDP tunnel - * offload capabilities of the device - * @udp_tunnel_nic: UDP tunnel offload state - * @xdp_state: stores info on attached XDP BPF programs - * - * @nested_level: Used as as a parameter of spin_lock_nested() of - * dev->addr_list_lock. - * @unlink_list: As netif_addr_lock() can be called recursively, - * keep a list of interfaces to be deleted. - * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ struct net_device { char name[IFNAMSIZ]; - struct netdev_name_node *name_node; - struct dev_ifalias __rcu *ifalias; + struct hlist_node name_hlist; + char *ifalias; /* * I/O specific fields * FIXME: Merge these and struct ifmap into one @@ -1957,6 +1651,9 @@ struct net_device { unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; + int irq; + + atomic_unchecked_t carrier_changes; /* * Some hardware also needs these fields (state,dev_list, @@ -1978,22 +1675,10 @@ struct net_device { struct list_head lower; } adj_list; - /* Read-mostly cache-line for fast-path access */ - unsigned int flags; - unsigned int priv_flags; - const struct net_device_ops *netdev_ops; - int ifindex; - unsigned short gflags; - unsigned short hard_header_len; - - /* Note : dev->mtu is often read without holding a lock. - * Writers usually hold RTNL. - * It is recommended to use READ_ONCE() to annotate the reads, - * and to use WRITE_ONCE() to annotate the writes. - */ - unsigned int mtu; - unsigned short needed_headroom; - unsigned short needed_tailroom; + struct { + struct list_head upper; + struct list_head lower; + } all_adj_list; netdev_features_t features; netdev_features_t hw_features; @@ -2003,29 +1688,24 @@ struct net_device { netdev_features_t mpls_features; netdev_features_t gso_partial_features; - unsigned int min_mtu; - unsigned int max_mtu; - unsigned short type; - unsigned char min_header_len; - unsigned char name_assign_type; - + int ifindex; int group; - struct net_device_stats stats; /* not used by modern drivers */ + struct net_device_stats stats; - atomic_long_t rx_dropped; - atomic_long_t tx_dropped; - atomic_long_t rx_nohandler; - - /* Stats to monitor link on/off, flapping */ - atomic_t carrier_up_count; - atomic_t carrier_down_count; + atomic_long_unchecked_t rx_dropped; + atomic_long_unchecked_t tx_dropped; + atomic_long_unchecked_t rx_nohandler; #ifdef CONFIG_WIRELESS_EXT const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; #endif + const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; +#ifdef CONFIG_NET_SWITCHDEV + const struct switchdev_ops *switchdev_ops; +#endif #ifdef CONFIG_NET_L3_MASTER_DEV const struct l3mdev_ops *l3mdev_ops; #endif @@ -2033,53 +1713,47 @@ struct net_device { const struct ndisc_ops *ndisc_ops; #endif -#ifdef CONFIG_XFRM_OFFLOAD - const struct xfrmdev_ops *xfrmdev_ops; -#endif - -#if IS_ENABLED(CONFIG_TLS_DEVICE) - const struct tlsdev_ops *tlsdev_ops; -#endif - const struct header_ops *header_ops; + unsigned int flags; + unsigned int priv_flags; + + unsigned short gflags; + unsigned short padded; + unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; + unsigned int mtu; + unsigned short type; + unsigned short hard_header_len; + unsigned short min_header_len; + + unsigned short needed_headroom; + unsigned short needed_tailroom; + /* Interface address info. */ unsigned char perm_addr[MAX_ADDR_LEN]; unsigned char addr_assign_type; unsigned char addr_len; - unsigned char upper_level; - unsigned char lower_level; - unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; - unsigned short padded; - spinlock_t addr_list_lock; - int irq; - + unsigned char name_assign_type; + bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; #ifdef CONFIG_SYSFS struct kset *queues_kset; -#endif -#ifdef CONFIG_LOCKDEP - struct list_head unlink_list; #endif unsigned int promiscuity; unsigned int allmulti; - bool uc_promisc; -#ifdef CONFIG_LOCKDEP - unsigned char nested_level; -#endif /* Protocol-specific pointers */ @@ -2088,53 +1762,47 @@ struct net_device { struct vlan_info __rcu *vlan_info; #endif #if IS_ENABLED(CONFIG_NET_DSA) - struct dsa_port *dsa_ptr; + struct dsa_switch_tree *dsa_ptr; #endif #if IS_ENABLED(CONFIG_TIPC) struct tipc_bearer __rcu *tipc_ptr; #endif -#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK) void *atalk_ptr; -#endif struct in_device __rcu *ip_ptr; -#if IS_ENABLED(CONFIG_DECNET) struct dn_dev __rcu *dn_ptr; -#endif struct inet6_dev __rcu *ip6_ptr; -#if IS_ENABLED(CONFIG_AX25) void *ax25_ptr; -#endif struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; #if IS_ENABLED(CONFIG_MPLS_ROUTING) struct mpls_dev __rcu *mpls_ptr; #endif -#if IS_ENABLED(CONFIG_MCTP) - struct mctp_dev __rcu *mctp_ptr; -#endif /* * Cache lines mostly used on receive path (including eth_type_trans()) */ + unsigned long last_rx; + /* Interface address info used in eth_type_trans() */ unsigned char *dev_addr; +#ifdef CONFIG_SYSFS struct netdev_rx_queue *_rx; + unsigned int num_rx_queues; unsigned int real_num_rx_queues; +#endif - struct bpf_prog __rcu *xdp_prog; unsigned long gro_flush_timeout; - int napi_defer_hard_irqs; rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; #ifdef CONFIG_NET_CLS_ACT - struct mini_Qdisc __rcu *miniq_ingress; + struct tcf_proto __rcu *ingress_cl_list; #endif struct netdev_queue __rcu *ingress_queue; #ifdef CONFIG_NETFILTER_INGRESS - struct nf_hook_entries __rcu *nf_hooks_ingress; + struct nf_hook_entry __rcu *nf_hooks_ingress; #endif unsigned char broadcast[MAX_ADDR_LEN]; @@ -2150,34 +1818,25 @@ struct net_device { unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; - unsigned int tx_queue_len; - spinlock_t tx_global_lock; - - struct xdp_dev_bulk_queue __percpu *xdp_bulkq; - -#ifdef CONFIG_XPS - struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; -#endif -#ifdef CONFIG_NET_CLS_ACT - struct mini_Qdisc __rcu *miniq_egress; -#endif - #ifdef CONFIG_NET_SCHED DECLARE_HASHTABLE (qdisc_hash, 4); #endif - /* These may be needed for future network-power-down code. */ - struct timer_list watchdog_timer; + unsigned long tx_queue_len; + spinlock_t tx_global_lock; int watchdog_timeo; - u32 proto_down_reason; - - struct list_head todo_list; - -#ifdef CONFIG_PCPU_DEV_REFCNT - int __percpu *pcpu_refcnt; -#else - refcount_t dev_refcnt; +#ifdef CONFIG_XPS + struct xps_dev_maps __rcu *xps_maps; #endif +#ifdef CONFIG_NET_CLS_ACT + struct tcf_proto __rcu *egress_cl_list; +#endif + + /* These may be needed for future network-power-down code. */ + struct timer_list watchdog_timer; + + int __percpu *pcpu_refcnt; + struct list_head todo_list; struct list_head link_watch_list; @@ -2196,8 +1855,7 @@ struct net_device { RTNL_LINK_INITIALIZING, } rtnl_link_state:16; - bool needs_free_netdev; - void (*priv_destructor)(struct net_device *dev); + void (*destructor)(struct net_device *dev); #ifdef CONFIG_NETPOLL struct netpoll_info __rcu *npinfo; @@ -2206,21 +1864,16 @@ struct net_device { possible_net_t nd_net; /* mid-layer private */ - void *ml_priv; - enum netdev_ml_priv_type ml_priv_type; - union { + void *ml_priv; struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; struct pcpu_dstats __percpu *dstats; + struct pcpu_vstats __percpu *vstats; }; -#if IS_ENABLED(CONFIG_GARP) struct garp_port __rcu *garp_port; -#endif -#if IS_ENABLED(CONFIG_MRP) struct mrp_port __rcu *mrp_port; -#endif struct device dev; const struct attribute_group *sysfs_groups[4]; @@ -2237,7 +1890,7 @@ struct net_device { #ifdef CONFIG_DCB const struct dcbnl_rtnl_ops *dcbnl_ops; #endif - s16 num_tc; + u8 num_tc; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; u8 prio_tc_map[TC_BITMASK + 1]; @@ -2248,34 +1901,12 @@ struct net_device { struct netprio_map __rcu *priomap; #endif struct phy_device *phydev; - struct sfp_bus *sfp_bus; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; - unsigned wol_enabled:1; - unsigned threaded:1; - - struct list_head net_notifier_list; - -#if IS_ENABLED(CONFIG_MACSEC) - /* MACsec management functions */ - const struct macsec_ops *macsec_ops; -#endif - const struct udp_tunnel_nic_info *udp_tunnel_nic_info; - struct udp_tunnel_nic *udp_tunnel_nic; - - /* protected by rtnl_lock */ - struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; }; #define to_net_dev(d) container_of(d, struct net_device, dev) -static inline bool netif_elide_gro(const struct net_device *dev) -{ - if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) - return true; - return false; -} - #define NETDEV_ALIGN 32 static inline @@ -2294,10 +1925,34 @@ int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) return 0; } -int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); -void netdev_reset_tc(struct net_device *dev); -int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); -int netdev_set_num_tc(struct net_device *dev, u8 num_tc); +static inline +void netdev_reset_tc(struct net_device *dev) +{ + dev->num_tc = 0; + memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); + memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); +} + +static inline +int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) +{ + if (tc >= dev->num_tc) + return -EINVAL; + + dev->tc_to_txq[tc].count = count; + dev->tc_to_txq[tc].offset = offset; + return 0; +} + +static inline +int netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + if (num_tc > TC_MAX_QUEUE) + return -EINVAL; + + dev->num_tc = num_tc; + return 0; +} static inline int netdev_get_num_tc(struct net_device *dev) @@ -2305,33 +1960,6 @@ int netdev_get_num_tc(struct net_device *dev) return dev->num_tc; } -static inline void net_prefetch(void *p) -{ - prefetch(p); -#if L1_CACHE_BYTES < 128 - prefetch((u8 *)p + L1_CACHE_BYTES); -#endif -} - -static inline void net_prefetchw(void *p) -{ - prefetchw(p); -#if L1_CACHE_BYTES < 128 - prefetchw((u8 *)p + L1_CACHE_BYTES); -#endif -} - -void netdev_unbind_sb_channel(struct net_device *dev, - struct net_device *sb_dev); -int netdev_bind_sb_channel_queue(struct net_device *dev, - struct net_device *sb_dev, - u8 tc, u16 count, u16 offset); -int netdev_set_sb_channel(struct net_device *dev, u16 channel); -static inline int netdev_get_sb_channel(struct net_device *dev) -{ - return max_t(int, -dev->num_tc, 0); -} - static inline struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, unsigned int index) @@ -2368,17 +1996,15 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ (dev)->qdisc_running_key = &qdisc_running_key; \ lockdep_set_class(&(dev)->addr_list_lock, \ - &dev_addr_list_lock_key); \ + &dev_addr_list_lock_key); \ for (i = 0; i < (dev)->num_tx_queues; i++) \ lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ &qdisc_xmit_lock_key); \ } -u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev); -struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, - struct sk_buff *skb, - struct net_device *sb_dev); +struct netdev_queue *netdev_pick_tx(struct net_device *dev, + struct sk_buff *skb, + void *accel_priv); /* returns the headroom that the master device needs to take in account * when forwarding to this dev @@ -2400,29 +2026,6 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev) netdev_set_rx_headroom(dev, -1); } -static inline void *netdev_get_ml_priv(struct net_device *dev, - enum netdev_ml_priv_type type) -{ - if (dev->ml_priv_type != type) - return NULL; - - return dev->ml_priv; -} - -static inline void netdev_set_ml_priv(struct net_device *dev, - void *ml_priv, - enum netdev_ml_priv_type type) -{ - WARN(dev->ml_priv_type && dev->ml_priv_type != type, - "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", - dev->ml_priv_type, type); - WARN(!dev->ml_priv_type && dev->ml_priv, - "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); - - dev->ml_priv = ml_priv; - dev->ml_priv_type = type; -} - /* * Net namespace inlines */ @@ -2438,6 +2041,15 @@ void dev_net_set(struct net_device *dev, struct net *net) write_pnet(&dev->nd_net, net); } +static inline bool netdev_uses_dsa(struct net_device *dev) +{ +#if IS_ENABLED(CONFIG_NET_DSA) + if (dev->dsa_ptr != NULL) + return dsa_uses_tagged_protocol(dev->dsa_ptr); +#endif + return false; +} + /** * netdev_priv - access network device private data * @dev: network device @@ -2498,27 +2110,13 @@ static inline void netif_tx_napi_add(struct net_device *dev, netif_napi_add(dev, napi, poll, weight); } -/** - * __netif_napi_del - remove a NAPI context - * @napi: NAPI context - * - * Warning: caller must observe RCU grace period before freeing memory - * containing @napi. Drivers might want to call this helper to combine - * all the needed RCU grace periods into a single one. - */ -void __netif_napi_del(struct napi_struct *napi); - /** * netif_napi_del - remove a NAPI context * @napi: NAPI context * * netif_napi_del() removes a NAPI context from the network device NAPI list */ -static inline void netif_napi_del(struct napi_struct *napi) -{ - __netif_napi_del(napi); - synchronize_net(); -} +void netif_napi_del(struct napi_struct *napi); struct napi_gro_cb { /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ @@ -2577,8 +2175,7 @@ struct napi_gro_cb { /* Number of gro_receive callbacks this packet already went through */ u8 recursion_counter:4; - /* GRO is done by frag_list pointer chaining. */ - u8 is_flist:1; + /* 1 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -2595,10 +2192,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb) return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; } -typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *); -static inline struct sk_buff *call_gro_receive(gro_receive_t cb, - struct list_head *head, - struct sk_buff *skb) +typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); +static inline struct sk_buff **call_gro_receive(gro_receive_t cb, + struct sk_buff **head, + struct sk_buff *skb) { if (unlikely(gro_recursion_inc_test(skb))) { NAPI_GRO_CB(skb)->flush |= 1; @@ -2608,12 +2205,12 @@ static inline struct sk_buff *call_gro_receive(gro_receive_t cb, return cb(head, skb); } -typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *, - struct sk_buff *); -static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, - struct sock *sk, - struct list_head *head, - struct sk_buff *skb) +typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, + struct sk_buff *); +static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, + struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb) { if (unlikely(gro_recursion_inc_test(skb))) { NAPI_GRO_CB(skb)->flush |= 1; @@ -2625,15 +2222,11 @@ static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, struct packet_type { __be16 type; /* This is really htons(ether_type). */ - bool ignore_outgoing; struct net_device *dev; /* NULL is wildcarded here */ int (*func) (struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); - void (*list_func) (struct list_head *, - struct packet_type *, - struct net_device *); bool (*id_match)(struct packet_type *ptype, struct sock *sk); void *af_packet_priv; @@ -2643,8 +2236,8 @@ struct packet_type { struct offload_callbacks { struct sk_buff *(*gso_segment)(struct sk_buff *skb, netdev_features_t features); - struct sk_buff *(*gro_receive)(struct list_head *head, - struct sk_buff *skb); + struct sk_buff **(*gro_receive)(struct sk_buff **head, + struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb, int nhoff); }; @@ -2662,47 +2255,7 @@ struct pcpu_sw_netstats { u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; -} __aligned(4 * sizeof(u64)); - -struct pcpu_lstats { - u64_stats_t packets; - u64_stats_t bytes; - struct u64_stats_sync syncp; -} __aligned(2 * sizeof(u64)); - -void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); - -static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) -{ - struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); - - u64_stats_update_begin(&tstats->syncp); - tstats->rx_bytes += len; - tstats->rx_packets++; - u64_stats_update_end(&tstats->syncp); -} - -static inline void dev_sw_netstats_tx_add(struct net_device *dev, - unsigned int packets, - unsigned int len) -{ - struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); - - u64_stats_update_begin(&tstats->syncp); - tstats->tx_bytes += len; - tstats->tx_packets += packets; - u64_stats_update_end(&tstats->syncp); -} - -static inline void dev_lstats_add(struct net_device *dev, unsigned int len) -{ - struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); - - u64_stats_update_begin(&lstats->syncp); - u64_stats_add(&lstats->bytes, len); - u64_stats_inc(&lstats->packets); - u64_stats_update_end(&lstats->syncp); -} +}; #define __netdev_alloc_pcpu_stats(type, gfp) \ ({ \ @@ -2721,20 +2274,6 @@ static inline void dev_lstats_add(struct net_device *dev, unsigned int len) #define netdev_alloc_pcpu_stats(type) \ __netdev_alloc_pcpu_stats(type, GFP_KERNEL) -#define devm_netdev_alloc_pcpu_stats(dev, type) \ -({ \ - typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ - if (pcpu_stats) { \ - int __cpu; \ - for_each_possible_cpu(__cpu) { \ - typeof(type) *stat; \ - stat = per_cpu_ptr(pcpu_stats, __cpu); \ - u64_stats_init(&stat->syncp); \ - } \ - } \ - pcpu_stats; \ -}) - enum netdev_lag_tx_type { NETDEV_LAG_TX_TYPE_UNKNOWN, NETDEV_LAG_TX_TYPE_RANDOM, @@ -2744,20 +2283,8 @@ enum netdev_lag_tx_type { NETDEV_LAG_TX_TYPE_HASH, }; -enum netdev_lag_hash { - NETDEV_LAG_HASH_NONE, - NETDEV_LAG_HASH_L2, - NETDEV_LAG_HASH_L34, - NETDEV_LAG_HASH_L23, - NETDEV_LAG_HASH_E23, - NETDEV_LAG_HASH_E34, - NETDEV_LAG_HASH_VLAN_SRCMAC, - NETDEV_LAG_HASH_UNKNOWN, -}; - struct netdev_lag_upper_info { enum netdev_lag_tx_type tx_type; - enum netdev_lag_hash hash_type; }; struct netdev_lag_lower_state_info { @@ -2767,73 +2294,48 @@ struct netdev_lag_lower_state_info { #include -/* netdevice notifier chain. Please remember to update netdev_cmd_to_name() - * and the rtnetlink notification exclusion list in rtnetlink_event() when - * adding new types. +/* netdevice notifier chain. Please remember to update the rtnetlink + * notification exclusion list in rtnetlink_event() when adding new + * types. */ -enum netdev_cmd { - NETDEV_UP = 1, /* For now you can't veto a device up/down */ - NETDEV_DOWN, - NETDEV_REBOOT, /* Tell a protocol stack a network interface +#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */ +#define NETDEV_DOWN 0x0002 +#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface detected a hardware crash and restarted - we can use this eg to kick tcp sessions once done */ - NETDEV_CHANGE, /* Notify device state change */ - NETDEV_REGISTER, - NETDEV_UNREGISTER, - NETDEV_CHANGEMTU, /* notify after mtu change happened */ - NETDEV_CHANGEADDR, /* notify after the address change */ - NETDEV_PRE_CHANGEADDR, /* notify before the address change */ - NETDEV_GOING_DOWN, - NETDEV_CHANGENAME, - NETDEV_FEAT_CHANGE, - NETDEV_BONDING_FAILOVER, - NETDEV_PRE_UP, - NETDEV_PRE_TYPE_CHANGE, - NETDEV_POST_TYPE_CHANGE, - NETDEV_POST_INIT, - NETDEV_RELEASE, - NETDEV_NOTIFY_PEERS, - NETDEV_JOIN, - NETDEV_CHANGEUPPER, - NETDEV_RESEND_IGMP, - NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ - NETDEV_CHANGEINFODATA, - NETDEV_BONDING_INFO, - NETDEV_PRECHANGEUPPER, - NETDEV_CHANGELOWERSTATE, - NETDEV_UDP_TUNNEL_PUSH_INFO, - NETDEV_UDP_TUNNEL_DROP_INFO, - NETDEV_CHANGE_TX_QUEUE_LEN, - NETDEV_CVLAN_FILTER_PUSH_INFO, - NETDEV_CVLAN_FILTER_DROP_INFO, - NETDEV_SVLAN_FILTER_PUSH_INFO, - NETDEV_SVLAN_FILTER_DROP_INFO, -}; -const char *netdev_cmd_to_name(enum netdev_cmd cmd); +#define NETDEV_CHANGE 0x0004 /* Notify device state change */ +#define NETDEV_REGISTER 0x0005 +#define NETDEV_UNREGISTER 0x0006 +#define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */ +#define NETDEV_CHANGEADDR 0x0008 +#define NETDEV_GOING_DOWN 0x0009 +#define NETDEV_CHANGENAME 0x000A +#define NETDEV_FEAT_CHANGE 0x000B +#define NETDEV_BONDING_FAILOVER 0x000C +#define NETDEV_PRE_UP 0x000D +#define NETDEV_PRE_TYPE_CHANGE 0x000E +#define NETDEV_POST_TYPE_CHANGE 0x000F +#define NETDEV_POST_INIT 0x0010 +#define NETDEV_UNREGISTER_FINAL 0x0011 +#define NETDEV_RELEASE 0x0012 +#define NETDEV_NOTIFY_PEERS 0x0013 +#define NETDEV_JOIN 0x0014 +#define NETDEV_CHANGEUPPER 0x0015 +#define NETDEV_RESEND_IGMP 0x0016 +#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ +#define NETDEV_CHANGEINFODATA 0x0018 +#define NETDEV_BONDING_INFO 0x0019 +#define NETDEV_PRECHANGEUPPER 0x001A +#define NETDEV_CHANGELOWERSTATE 0x001B +#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C +#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); -int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); -int unregister_netdevice_notifier_net(struct net *net, - struct notifier_block *nb); -int register_netdevice_notifier_dev_net(struct net_device *dev, - struct notifier_block *nb, - struct netdev_net_notifier *nn); -int unregister_netdevice_notifier_dev_net(struct net_device *dev, - struct notifier_block *nb, - struct netdev_net_notifier *nn); struct netdev_notifier_info { - struct net_device *dev; - struct netlink_ext_ack *extack; -}; - -struct netdev_notifier_info_ext { - struct netdev_notifier_info info; /* must be first */ - union { - u32 mtu; - } ext; + struct net_device *dev; }; struct netdev_notifier_change_info { @@ -2854,16 +2356,10 @@ struct netdev_notifier_changelowerstate_info { void *lower_state_info; /* is lower dev state */ }; -struct netdev_notifier_pre_changeaddr_info { - struct netdev_notifier_info info; /* must be first */ - const unsigned char *dev_addr; -}; - static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, struct net_device *dev) { info->dev = dev; - info->extack = NULL; } static inline struct net_device * @@ -2872,12 +2368,6 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) return info->dev; } -static inline struct netlink_ext_ack * -netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) -{ - return info->extack; -} - int call_netdevice_notifiers(unsigned long val, struct net_device *dev); @@ -2893,9 +2383,6 @@ extern rwlock_t dev_base_lock; /* Device list lock */ list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) #define for_each_netdev_continue(net, d) \ list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) -#define for_each_netdev_continue_reverse(net, d) \ - list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ - dev_list) #define for_each_netdev_continue_rcu(net, d) \ list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) #define for_each_netdev_in_bond_rcu(bond, slave) \ @@ -2937,9 +2424,11 @@ static inline struct net_device *first_net_device_rcu(struct net *net) } int netdev_boot_setup_check(struct net_device *dev); +unsigned long netdev_boot_base(const char *prefix, int unit); struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, const char *hwaddr); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); +struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); void dev_add_pack(struct packet_type *pt); void dev_remove_pack(struct packet_type *pt); void __dev_remove_pack(struct packet_type *pt); @@ -2948,38 +2437,19 @@ void dev_remove_offload(struct packet_offload *po); int dev_get_iflink(const struct net_device *dev); int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); -int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, - struct net_device_path_stack *stack); struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, unsigned short mask); struct net_device *dev_get_by_name(struct net *net, const char *name); struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); struct net_device *__dev_get_by_name(struct net *net, const char *name); int dev_alloc_name(struct net_device *dev, const char *name); -int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); -void dev_close(struct net_device *dev); -void dev_close_many(struct list_head *head, bool unlink); +int dev_open(struct net_device *dev); +int dev_close(struct net_device *dev); +int dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); -u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev); -u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev); - int dev_queue_xmit(struct sk_buff *skb); -int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); -int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); - -static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) -{ - int ret; - - ret = __dev_direct_xmit(skb, queue_id); - if (!dev_xmit_complete(ret)) - kfree_skb(skb); - return ret; -} - +int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); void unregister_netdevice_many(struct list_head *head); @@ -2991,21 +2461,23 @@ static inline void unregister_netdevice(struct net_device *dev) int netdev_refcnt_read(const struct net_device *dev); void free_netdev(struct net_device *dev); void netdev_freemem(struct net_device *dev); +void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); -struct net_device *netdev_get_xmit_slave(struct net_device *dev, - struct sk_buff *skb, - bool all_slaves); -struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, - struct sock *sk); +DECLARE_PER_CPU(int, xmit_recursion); +#define XMIT_RECURSION_LIMIT 10 + +static inline int dev_recursion_level(void) +{ + return this_cpu_read(xmit_recursion); +} + struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); -struct net_device *dev_get_by_napi_id(unsigned int napi_id); int netdev_get_name(struct net *net, char *name, int ifindex); int dev_restart(struct net_device *dev); -int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); -int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb); +int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); static inline unsigned int skb_gro_offset(const struct sk_buff *skb) { @@ -3120,7 +2592,9 @@ static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ __ret = __skb_gro_checksum_validate_complete(skb, \ compute_pseudo(skb, proto)); \ - if (!__ret) \ + if (__ret) \ + __skb_mark_checksum_bad(skb); \ + else \ skb_gro_incr_csum_unnecessary(skb); \ __ret; \ }) @@ -3142,16 +2616,16 @@ static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb) } static inline void __skb_gro_checksum_convert(struct sk_buff *skb, - __wsum pseudo) + __sum16 check, __wsum pseudo) { NAPI_GRO_CB(skb)->csum = ~pseudo; NAPI_GRO_CB(skb)->csum_valid = 1; } -#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \ +#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ do { \ if (__skb_gro_checksum_convert_check(skb)) \ - __skb_gro_checksum_convert(skb, \ + __skb_gro_checksum_convert(skb, check, \ compute_pseudo(skb, proto)); \ } while (0) @@ -3220,38 +2694,70 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, remcsum_unadjust((__sum16 *)ptr, grc->delta); } -#ifdef CONFIG_XFRM_OFFLOAD -static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) +struct skb_csum_offl_spec { + __u16 ipv4_okay:1, + ipv6_okay:1, + encap_okay:1, + ip_options_okay:1, + ext_hdrs_okay:1, + tcp_okay:1, + udp_okay:1, + sctp_okay:1, + vlan_okay:1, + no_encapped_ipv6:1, + no_not_encapped:1; +}; + +bool __skb_csum_offload_chk(struct sk_buff *skb, + const struct skb_csum_offl_spec *spec, + bool *csum_encapped, + bool csum_help); + +static inline bool skb_csum_offload_chk(struct sk_buff *skb, + const struct skb_csum_offl_spec *spec, + bool *csum_encapped, + bool csum_help) { - if (PTR_ERR(pp) != -EINPROGRESS) - NAPI_GRO_CB(skb)->flush |= flush; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help); } -static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, - struct sk_buff *pp, - int flush, - struct gro_remcsum *grc) + +static inline bool skb_csum_offload_chk_help(struct sk_buff *skb, + const struct skb_csum_offl_spec *spec) { - if (PTR_ERR(pp) != -EINPROGRESS) { - NAPI_GRO_CB(skb)->flush |= flush; - skb_gro_remcsum_cleanup(skb, grc); - skb->remcsum_offload = 0; - } + bool csum_encapped; + + return skb_csum_offload_chk(skb, spec, &csum_encapped, true); } -#else -static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) + +static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb) { - NAPI_GRO_CB(skb)->flush |= flush; + static const struct skb_csum_offl_spec csum_offl_spec = { + .ipv4_okay = 1, + .ip_options_okay = 1, + .ipv6_okay = 1, + .vlan_okay = 1, + .tcp_okay = 1, + .udp_okay = 1, + }; + + return skb_csum_offload_chk_help(skb, &csum_offl_spec); } -static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, - struct sk_buff *pp, - int flush, - struct gro_remcsum *grc) + +static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb) { - NAPI_GRO_CB(skb)->flush |= flush; - skb_gro_remcsum_cleanup(skb, grc); - skb->remcsum_offload = 0; + static const struct skb_csum_offl_spec csum_offl_spec = { + .ipv4_okay = 1, + .ip_options_okay = 1, + .tcp_okay = 1, + .udp_okay = 1, + .vlan_okay = 1, + }; + + return skb_csum_offload_chk_help(skb, &csum_offl_spec); } -#endif static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, @@ -3274,15 +2780,6 @@ static inline int dev_parse_header(const struct sk_buff *skb, return dev->header_ops->parse(skb, haddr); } -static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) -{ - const struct net_device *dev = skb->dev; - - if (!dev->header_ops || !dev->header_ops->parse_protocol) - return 0; - return dev->header_ops->parse_protocol(skb); -} - /* ll_header must have at least hard_header_len allocated */ static inline bool dev_validate_header(const struct net_device *dev, char *ll_header, int len) @@ -3303,9 +2800,11 @@ static inline bool dev_validate_header(const struct net_device *dev, return false; } -static inline bool dev_has_header(const struct net_device *dev) +typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); +int register_gifconf(unsigned int family, gifconf_func_t *gifconf); +static inline int unregister_gifconf(unsigned int family) { - return dev->header_ops && dev->header_ops->create; + return register_gifconf(family, NULL); } #ifdef CONFIG_NET_FLOW_LIMIT @@ -3341,14 +2840,7 @@ struct softnet_data { struct Qdisc *output_queue; struct Qdisc **output_queue_tailp; struct sk_buff *completion_queue; -#ifdef CONFIG_XFRM_OFFLOAD - struct sk_buff_head xfrm_backlog; -#endif - /* written and read only by owning cpu: */ - struct { - u16 recursion; - u8 more; - } xmit; + #ifdef CONFIG_RPS /* input_queue_head should be written by cpu owning this struct, * and only read by other cpus. Worth using a cache line. @@ -3356,7 +2848,7 @@ struct softnet_data { unsigned int input_queue_head ____cacheline_aligned_in_smp; /* Elements below can be accessed between CPUs for RPS/RFS */ - call_single_data_t csd ____cacheline_aligned_in_smp; + struct call_single_data csd ____cacheline_aligned_in_smp; struct softnet_data *rps_ipi_next; unsigned int cpu; unsigned int input_queue_tail; @@ -3384,28 +2876,6 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd, DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); -static inline int dev_recursion_level(void) -{ - return this_cpu_read(softnet_data.xmit.recursion); -} - -#define XMIT_RECURSION_LIMIT 8 -static inline bool dev_xmit_recursion(void) -{ - return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > - XMIT_RECURSION_LIMIT); -} - -static inline void dev_xmit_recursion_inc(void) -{ - __this_cpu_inc(softnet_data.xmit.recursion); -} - -static inline void dev_xmit_recursion_dec(void) -{ - __this_cpu_dec(softnet_data.xmit.recursion); -} - void __netif_schedule(struct Qdisc *q); void netif_schedule_queue(struct netdev_queue *txq); @@ -3519,24 +2989,6 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; } -/** - * netdev_queue_set_dql_min_limit - set dql minimum limit - * @dev_queue: pointer to transmit queue - * @min_limit: dql minimum limit - * - * Forces xmit_more() to return true until the minimum threshold - * defined by @min_limit is reached (or until the tx queue is - * empty). Warning: to be use with care, misuse will impact the - * latency. - */ -static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, - unsigned int min_limit) -{ -#ifdef CONFIG_BQL - dev_queue->dql.min_limit = min_limit; -#endif -} - /** * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write * @dev_queue: pointer to transmit queue @@ -3589,26 +3041,6 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, #endif } -/* Variant of netdev_tx_sent_queue() for drivers that are aware - * that they should not test BQL status themselves. - * We do want to change __QUEUE_STATE_STACK_XOFF only for the last - * skb of a batch. - * Returns true if the doorbell must be used to kick the NIC. - */ -static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, - unsigned int bytes, - bool xmit_more) -{ - if (xmit_more) { -#ifdef CONFIG_BQL - dql_queued(&dev_queue->dql, bytes); -#endif - return netif_tx_queue_stopped(dev_queue); - } - netdev_tx_sent_queue(dev_queue, bytes); - return true; -} - /** * netdev_sent_queue - report the number of bytes queued to hardware * @dev: network device @@ -3623,14 +3055,6 @@ static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); } -static inline bool __netdev_sent_queue(struct net_device *dev, - unsigned int bytes, - bool xmit_more) -{ - return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, - xmit_more); -} - static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, unsigned int pkts, unsigned int bytes) { @@ -3647,7 +3071,7 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, */ smp_mb(); - if (unlikely(dql_avail(&dev_queue->dql) < 0)) + if (dql_avail(&dev_queue->dql) < 0) return; if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) @@ -3757,7 +3181,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) } /** - * __netif_subqueue_stopped - test status of subqueue + * netif_subqueue_stopped - test status of subqueue * @dev: network device * @queue_index: sub queue index * @@ -3771,122 +3195,17 @@ static inline bool __netif_subqueue_stopped(const struct net_device *dev, return netif_tx_queue_stopped(txq); } -/** - * netif_subqueue_stopped - test status of subqueue - * @dev: network device - * @skb: sub queue buffer pointer - * - * Check individual transmit queue of a device with multiple transmit queues. - */ static inline bool netif_subqueue_stopped(const struct net_device *dev, struct sk_buff *skb) { return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); } -/** - * netif_wake_subqueue - allow sending packets on subqueue - * @dev: network device - * @queue_index: sub queue index - * - * Resume individual transmit queue of a device with multiple transmit queues. - */ -static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) -{ - struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); - - netif_tx_wake_queue(txq); -} +void netif_wake_subqueue(struct net_device *dev, u16 queue_index); #ifdef CONFIG_XPS int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, u16 index); -int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, - u16 index, enum xps_map_type type); - -/** - * netif_attr_test_mask - Test a CPU or Rx queue set in a mask - * @j: CPU/Rx queue index - * @mask: bitmask of all cpus/rx queues - * @nr_bits: number of bits in the bitmask - * - * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. - */ -static inline bool netif_attr_test_mask(unsigned long j, - const unsigned long *mask, - unsigned int nr_bits) -{ - cpu_max_bits_warn(j, nr_bits); - return test_bit(j, mask); -} - -/** - * netif_attr_test_online - Test for online CPU/Rx queue - * @j: CPU/Rx queue index - * @online_mask: bitmask for CPUs/Rx queues that are online - * @nr_bits: number of bits in the bitmask - * - * Returns true if a CPU/Rx queue is online. - */ -static inline bool netif_attr_test_online(unsigned long j, - const unsigned long *online_mask, - unsigned int nr_bits) -{ - cpu_max_bits_warn(j, nr_bits); - - if (online_mask) - return test_bit(j, online_mask); - - return (j < nr_bits); -} - -/** - * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask - * @n: CPU/Rx queue index - * @srcp: the cpumask/Rx queue mask pointer - * @nr_bits: number of bits in the bitmask - * - * Returns >= nr_bits if no further CPUs/Rx queues set. - */ -static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, - unsigned int nr_bits) -{ - /* -1 is a legal arg here. */ - if (n != -1) - cpu_max_bits_warn(n, nr_bits); - - if (srcp) - return find_next_bit(srcp, nr_bits, n + 1); - - return n + 1; -} - -/** - * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p - * @n: CPU/Rx queue index - * @src1p: the first CPUs/Rx queues mask pointer - * @src2p: the second CPUs/Rx queues mask pointer - * @nr_bits: number of bits in the bitmask - * - * Returns >= nr_bits if no further CPUs/Rx queues set in both. - */ -static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, - const unsigned long *src2p, - unsigned int nr_bits) -{ - /* -1 is a legal arg here. */ - if (n != -1) - cpu_max_bits_warn(n, nr_bits); - - if (src1p && src2p) - return find_next_and_bit(src1p, src2p, nr_bits, n + 1); - else if (src1p) - return find_next_bit(src1p, nr_bits, n + 1); - else if (src2p) - return find_next_bit(src2p, nr_bits, n + 1); - - return n + 1; -} #else static inline int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, @@ -3894,15 +3213,21 @@ static inline int netif_set_xps_queue(struct net_device *dev, { return 0; } - -static inline int __netif_set_xps_queue(struct net_device *dev, - const unsigned long *mask, - u16 index, enum xps_map_type type) -{ - return 0; -} #endif +u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, + unsigned int num_tx_queues); + +/* + * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used + * as a distribution range limit for the returned value. + */ +static inline u16 skb_tx_hash(const struct net_device *dev, + struct sk_buff *skb) +{ + return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); +} + /** * netif_is_multiqueue - test if device has multiple transmit queues * @dev: network device @@ -3920,20 +3245,11 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); #else static inline int netif_set_real_num_rx_queues(struct net_device *dev, - unsigned int rxqs) + unsigned int rxq) { - dev->real_num_rx_queues = rxqs; return 0; } #endif -int netif_set_real_num_queues(struct net_device *dev, - unsigned int txq, unsigned int rxq); - -static inline struct netdev_rx_queue * -__netif_get_rx_queue(struct net_device *dev, unsigned int rxq) -{ - return dev->_rx + rxq; -} #ifdef CONFIG_SYSFS static inline unsigned int get_netdev_rx_queue_index( @@ -3961,7 +3277,7 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); /* * It is not allowed to call kfree_skb() or consume_skb() from hardware * interrupt context or with hardware interrupts being disabled. - * (in_hardirq() || irqs_disabled()) + * (in_irq() || irqs_disabled()) * * We provide four helpers that can be used in following contexts : * @@ -3997,16 +3313,9 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); } -u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog); -void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); -int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); int netif_rx(struct sk_buff *skb); int netif_rx_ni(struct sk_buff *skb); -int netif_rx_any_context(struct sk_buff *skb); int netif_receive_skb(struct sk_buff *skb); -int netif_receive_skb_core(struct sk_buff *skb); -void netif_receive_skb_list(struct list_head *head); gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); void napi_gro_flush(struct napi_struct *napi, bool flush_old); struct sk_buff *napi_get_frags(struct napi_struct *napi); @@ -4027,125 +3336,52 @@ int netdev_rx_handler_register(struct net_device *dev, void netdev_rx_handler_unregister(struct net_device *dev); bool dev_valid_name(const char *name); -static inline bool is_socket_ioctl_cmd(unsigned int cmd) -{ - return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; -} -int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); -int put_user_ifreq(struct ifreq *ifr, void __user *arg); -int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, - void __user *data, bool *need_copyout); -int dev_ifconf(struct net *net, struct ifconf __user *ifc); -int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); +int dev_ioctl(struct net *net, unsigned int cmd, void __user *); +int dev_ethtool(struct net *net, struct ifreq *); unsigned int dev_get_flags(const struct net_device *); -int __dev_change_flags(struct net_device *dev, unsigned int flags, - struct netlink_ext_ack *extack); -int dev_change_flags(struct net_device *dev, unsigned int flags, - struct netlink_ext_ack *extack); +int __dev_change_flags(struct net_device *, unsigned int flags); +int dev_change_flags(struct net_device *, unsigned int); void __dev_notify_flags(struct net_device *, unsigned int old_flags, unsigned int gchanges); int dev_change_name(struct net_device *, const char *); int dev_set_alias(struct net_device *, const char *, size_t); -int dev_get_alias(const struct net_device *, char *, size_t); -int __dev_change_net_namespace(struct net_device *dev, struct net *net, - const char *pat, int new_ifindex); -static inline -int dev_change_net_namespace(struct net_device *dev, struct net *net, - const char *pat) -{ - return __dev_change_net_namespace(dev, net, pat, 0); -} -int __dev_set_mtu(struct net_device *, int); -int dev_validate_mtu(struct net_device *dev, int mtu, - struct netlink_ext_ack *extack); -int dev_set_mtu_ext(struct net_device *dev, int mtu, - struct netlink_ext_ack *extack); +int dev_change_net_namespace(struct net_device *, struct net *, const char *); int dev_set_mtu(struct net_device *, int); -int dev_change_tx_queue_len(struct net_device *, unsigned long); void dev_set_group(struct net_device *, int); -int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, - struct netlink_ext_ack *extack); -int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, - struct netlink_ext_ack *extack); -int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, - struct netlink_ext_ack *extack); -int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); +int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); -int dev_get_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid, bool recurse); -bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); int dev_change_proto_down(struct net_device *dev, bool proto_down); -int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); -void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, - u32 value); -struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); +int dev_change_xdp_fd(struct net_device *dev, int fd); +struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); - -typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); -int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, - int fd, int expected_fd, u32 flags); -int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); -u8 dev_xdp_prog_count(struct net_device *dev); -u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); - int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); -int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb); -static __always_inline bool __is_skb_forwardable(const struct net_device *dev, - const struct sk_buff *skb, - const bool check_mtu) -{ - const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ - unsigned int len; - - if (!(dev->flags & IFF_UP)) - return false; - - if (!check_mtu) - return true; - - len = dev->mtu + dev->hard_header_len + vlan_hdr_len; - if (skb->len <= len) - return true; - - /* if TSO is enabled, we don't care about the length as the packet - * could be forwarded without being segmented before - */ - if (skb_is_gso(skb)) - return true; - - return false; -} - static __always_inline int ____dev_forward_skb(struct net_device *dev, - struct sk_buff *skb, - const bool check_mtu) + struct sk_buff *skb) { if (skb_orphan_frags(skb, GFP_ATOMIC) || - unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { - atomic_long_inc(&dev->rx_dropped); + unlikely(!is_skb_forwardable(dev, skb))) { + atomic_long_inc_unchecked(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; } - skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); + skb_scrub_packet(skb, true); skb->priority = 0; return 0; } -bool dev_nit_active(struct net_device *dev); void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); extern int netdev_budget; -extern unsigned int netdev_budget_usecs; /* Called by rtnetlink.c:rtnl_unlock() */ void netdev_run_todo(void); @@ -4158,13 +3394,7 @@ void netdev_run_todo(void); */ static inline void dev_put(struct net_device *dev) { - if (dev) { -#ifdef CONFIG_PCPU_DEV_REFCNT - this_cpu_dec(*dev->pcpu_refcnt); -#else - refcount_dec(&dev->dev_refcnt); -#endif - } + this_cpu_dec(*dev->pcpu_refcnt); } /** @@ -4175,13 +3405,7 @@ static inline void dev_put(struct net_device *dev) */ static inline void dev_hold(struct net_device *dev) { - if (dev) { -#ifdef CONFIG_PCPU_DEV_REFCNT - this_cpu_inc(*dev->pcpu_refcnt); -#else - refcount_inc(&dev->dev_refcnt); -#endif - } + this_cpu_inc(*dev->pcpu_refcnt); } /* Carrier loss detection, dial on demand. The functions netif_carrier_on @@ -4213,8 +3437,8 @@ unsigned long dev_trans_start(struct net_device *dev); void __netdev_watchdog_up(struct net_device *dev); void netif_carrier_on(struct net_device *dev); + void netif_carrier_off(struct net_device *dev); -void netif_carrier_event(struct net_device *dev); /** * netif_dormant_on - mark device as dormant. @@ -4247,10 +3471,10 @@ static inline void netif_dormant_off(struct net_device *dev) } /** - * netif_dormant - test if device is dormant + * netif_dormant - test if carrier present * @dev: network device * - * Check if device is dormant. + * Check if carrier is present on device */ static inline bool netif_dormant(const struct net_device *dev) { @@ -4258,46 +3482,6 @@ static inline bool netif_dormant(const struct net_device *dev) } -/** - * netif_testing_on - mark device as under test. - * @dev: network device - * - * Mark device as under test (as per RFC2863). - * - * The testing state indicates that some test(s) must be performed on - * the interface. After completion, of the test, the interface state - * will change to up, dormant, or down, as appropriate. - */ -static inline void netif_testing_on(struct net_device *dev) -{ - if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) - linkwatch_fire_event(dev); -} - -/** - * netif_testing_off - set device as not under test. - * @dev: network device - * - * Device is not in testing state. - */ -static inline void netif_testing_off(struct net_device *dev) -{ - if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) - linkwatch_fire_event(dev); -} - -/** - * netif_testing - test if device is under test - * @dev: network device - * - * Check if device is under test - */ -static inline bool netif_testing(const struct net_device *dev) -{ - return test_bit(__LINK_STATE_TESTING, &dev->state); -} - - /** * netif_oper_up - test if device is operational * @dev: network device @@ -4316,7 +3500,7 @@ static inline bool netif_oper_up(const struct net_device *dev) * * Check if device has not been removed from system. */ -static inline bool netif_device_present(const struct net_device *dev) +static inline bool netif_device_present(struct net_device *dev) { return test_bit(__LINK_STATE_PRESENT, &dev->state); } @@ -4330,48 +3514,22 @@ void netif_device_attach(struct net_device *dev); */ enum { - NETIF_MSG_DRV_BIT, - NETIF_MSG_PROBE_BIT, - NETIF_MSG_LINK_BIT, - NETIF_MSG_TIMER_BIT, - NETIF_MSG_IFDOWN_BIT, - NETIF_MSG_IFUP_BIT, - NETIF_MSG_RX_ERR_BIT, - NETIF_MSG_TX_ERR_BIT, - NETIF_MSG_TX_QUEUED_BIT, - NETIF_MSG_INTR_BIT, - NETIF_MSG_TX_DONE_BIT, - NETIF_MSG_RX_STATUS_BIT, - NETIF_MSG_PKTDATA_BIT, - NETIF_MSG_HW_BIT, - NETIF_MSG_WOL_BIT, - - /* When you add a new bit above, update netif_msg_class_names array - * in net/ethtool/common.c - */ - NETIF_MSG_CLASS_COUNT, + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, }; -/* Both ethtool_ops interface and internal driver implementation use u32 */ -static_assert(NETIF_MSG_CLASS_COUNT <= 32); - -#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) -#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) - -#define NETIF_MSG_DRV __NETIF_MSG(DRV) -#define NETIF_MSG_PROBE __NETIF_MSG(PROBE) -#define NETIF_MSG_LINK __NETIF_MSG(LINK) -#define NETIF_MSG_TIMER __NETIF_MSG(TIMER) -#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) -#define NETIF_MSG_IFUP __NETIF_MSG(IFUP) -#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) -#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) -#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) -#define NETIF_MSG_INTR __NETIF_MSG(INTR) -#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) -#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) -#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) -#define NETIF_MSG_HW __NETIF_MSG(HW) -#define NETIF_MSG_WOL __NETIF_MSG(WOL) #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) @@ -4397,7 +3555,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) if (debug_value == 0) /* no output */ return 0; /* set low N bits */ - return (1U << debug_value) - 1; + return (1 << debug_value) - 1; } static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) @@ -4406,17 +3564,6 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) txq->xmit_lock_owner = cpu; } -static inline bool __netif_tx_acquire(struct netdev_queue *txq) -{ - __acquire(&txq->_xmit_lock); - return true; -} - -static inline void __netif_tx_release(struct netdev_queue *txq) -{ - __release(&txq->_xmit_lock); -} - static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); @@ -4518,21 +3665,17 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) #define HARD_TX_LOCK(dev, txq, cpu) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ __netif_tx_lock(txq, cpu); \ - } else { \ - __netif_tx_acquire(txq); \ } \ } #define HARD_TX_TRYLOCK(dev, txq) \ (((dev->features & NETIF_F_LLTX) == 0) ? \ __netif_tx_trylock(txq) : \ - __netif_tx_acquire(txq)) + true ) #define HARD_TX_UNLOCK(dev, txq) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ __netif_tx_unlock(txq); \ - } else { \ - __netif_tx_release(txq); \ } \ } @@ -4543,7 +3686,6 @@ static inline void netif_tx_disable(struct net_device *dev) local_bh_disable(); cpu = smp_processor_id(); - spin_lock(&dev->tx_global_lock); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); @@ -4551,29 +3693,27 @@ static inline void netif_tx_disable(struct net_device *dev) netif_tx_stop_queue(txq); __netif_tx_unlock(txq); } - spin_unlock(&dev->tx_global_lock); local_bh_enable(); } static inline void netif_addr_lock(struct net_device *dev) { - unsigned char nest_level = 0; + spin_lock(&dev->addr_list_lock); +} -#ifdef CONFIG_LOCKDEP - nest_level = dev->nested_level; -#endif - spin_lock_nested(&dev->addr_list_lock, nest_level); +static inline void netif_addr_lock_nested(struct net_device *dev) +{ + int subclass = SINGLE_DEPTH_NESTING; + + if (dev->netdev_ops->ndo_get_lock_subclass) + subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); + + spin_lock_nested(&dev->addr_list_lock, subclass); } static inline void netif_addr_lock_bh(struct net_device *dev) { - unsigned char nest_level = 0; - -#ifdef CONFIG_LOCKDEP - nest_level = dev->nested_level; -#endif - local_bh_disable(); - spin_lock_nested(&dev->addr_list_lock, nest_level); + spin_lock_bh(&dev->addr_list_lock); } static inline void netif_addr_unlock(struct net_device *dev) @@ -4612,8 +3752,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, int register_netdev(struct net_device *dev); void unregister_netdev(struct net_device *dev); -int devm_register_netdev(struct device *dev, struct net_device *ndev); - /* General hardware address lists handling functions */ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len); @@ -4624,16 +3762,6 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, int (*sync)(struct net_device *, const unsigned char *), int (*unsync)(struct net_device *, const unsigned char *)); -int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, - struct net_device *dev, - int (*sync)(struct net_device *, - const unsigned char *, int), - int (*unsync)(struct net_device *, - const unsigned char *, int)); -void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, - struct net_device *dev, - int (*unsync)(struct net_device *, - const unsigned char *, int)); void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, struct net_device *dev, int (*unsync)(struct net_device *, @@ -4641,24 +3769,6 @@ void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, void __hw_addr_init(struct netdev_hw_addr_list *list); /* Functions used for device addresses handling */ -static inline void -__dev_addr_set(struct net_device *dev, const u8 *addr, size_t len) -{ - memcpy(dev->dev_addr, addr, len); -} - -static inline void dev_addr_set(struct net_device *dev, const u8 *addr) -{ - __dev_addr_set(dev, addr, dev->addr_len); -} - -static inline void -dev_addr_mod(struct net_device *dev, unsigned int offset, - const u8 *addr, size_t len) -{ - memcpy(&dev->dev_addr[offset], addr, len); -} - int dev_addr_add(struct net_device *dev, const unsigned char *addr, unsigned char addr_type); int dev_addr_del(struct net_device *dev, const unsigned char *addr, @@ -4758,7 +3868,6 @@ void __dev_set_rx_mode(struct net_device *dev); int dev_set_promiscuity(struct net_device *dev, int inc); int dev_set_allmulti(struct net_device *dev, int inc); void netdev_state_change(struct net_device *dev); -void __netdev_notify_peers(struct net_device *dev); void netdev_notify_peers(struct net_device *dev); void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ @@ -4767,35 +3876,10 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, struct rtnl_link_stats64 *storage); void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, const struct net_device_stats *netdev_stats); -void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, - const struct pcpu_sw_netstats __percpu *netstats); -void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; -extern int netdev_unregister_timeout_secs; extern int weight_p; -extern int dev_weight_rx_bias; -extern int dev_weight_tx_bias; -extern int dev_rx_weight; -extern int dev_tx_weight; -extern int gro_normal_batch; - -enum { - NESTED_SYNC_IMM_BIT, - NESTED_SYNC_TODO_BIT, -}; - -#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) -#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) - -#define NESTED_SYNC_IMM __NESTED_SYNC(IMM) -#define NESTED_SYNC_TODO __NESTED_SYNC(TODO) - -struct netdev_nested_priv { - unsigned char flags; - void *data; -}; bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, @@ -4803,16 +3887,6 @@ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter); -#ifdef CONFIG_LOCKDEP -static LIST_HEAD(net_unlink_list); - -static inline void net_unlink_todo(struct net_device *dev) -{ - if (list_empty(&dev->unlink_list)) - list_add_tail(&dev->unlink_list, &net_unlink_list); -} -#endif - /* iterate through upper list, must be called under RCU read lock */ #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ for (iter = &(dev)->adj_list.upper, \ @@ -4820,15 +3894,12 @@ static inline void net_unlink_todo(struct net_device *dev) updev; \ updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) -int netdev_walk_all_upper_dev_rcu(struct net_device *dev, - int (*fn)(struct net_device *upper_dev, - struct netdev_nested_priv *priv), - struct netdev_nested_priv *priv); - -bool netdev_has_upper_dev_all_rcu(struct net_device *dev, - struct net_device *upper_dev); - -bool netdev_has_any_upper_dev(struct net_device *dev); +/* iterate through upper list, must be called under RCU read lock */ +#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ + for (iter = &(dev)->all_adj_list.upper, \ + updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ + updev; \ + updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter))) void *netdev_lower_get_next_private(struct net_device *dev, struct list_head **iter); @@ -4856,55 +3927,50 @@ void *netdev_lower_get_next(struct net_device *dev, ldev; \ ldev = netdev_lower_get_next(dev, &(iter))) -struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, +struct net_device *netdev_all_lower_get_next(struct net_device *dev, struct list_head **iter); -int netdev_walk_all_lower_dev(struct net_device *dev, - int (*fn)(struct net_device *lower_dev, - struct netdev_nested_priv *priv), - struct netdev_nested_priv *priv); -int netdev_walk_all_lower_dev_rcu(struct net_device *dev, - int (*fn)(struct net_device *lower_dev, - struct netdev_nested_priv *priv), - struct netdev_nested_priv *priv); +struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, + struct list_head **iter); + +#define netdev_for_each_all_lower_dev(dev, ldev, iter) \ + for (iter = (dev)->all_adj_list.lower.next, \ + ldev = netdev_all_lower_get_next(dev, &(iter)); \ + ldev; \ + ldev = netdev_all_lower_get_next(dev, &(iter))) + +#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ + for (iter = &(dev)->all_adj_list.lower, \ + ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ + ldev; \ + ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); struct net_device *netdev_master_upper_dev_get(struct net_device *dev); struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); -int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, - struct netlink_ext_ack *extack); +int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev); int netdev_master_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, - void *upper_priv, void *upper_info, - struct netlink_ext_ack *extack); + void *upper_priv, void *upper_info); void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev); -int netdev_adjacent_change_prepare(struct net_device *old_dev, - struct net_device *new_dev, - struct net_device *dev, - struct netlink_ext_ack *extack); -void netdev_adjacent_change_commit(struct net_device *old_dev, - struct net_device *new_dev, - struct net_device *dev); -void netdev_adjacent_change_abort(struct net_device *old_dev, - struct net_device *new_dev, - struct net_device *dev); void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); void netdev_lower_state_changed(struct net_device *lower_dev, void *lower_state_info); +int netdev_default_l2upper_neigh_construct(struct net_device *dev, + struct neighbour *n); +void netdev_default_l2upper_neigh_destroy(struct net_device *dev, + struct neighbour *n); /* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; void netdev_rss_key_fill(void *buffer, size_t len); +int dev_get_nest_level(struct net_device *dev); int skb_checksum_help(struct sk_buff *skb); -int skb_crc32c_csum_help(struct sk_buff *skb); -int skb_csum_hwoffload_help(struct sk_buff *skb, - const netdev_features_t features); - struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, @@ -4923,15 +3989,6 @@ struct netdev_notifier_bonding_info { void netdev_bonding_info_change(struct net_device *dev, struct netdev_bonding_info *bonding_info); -#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) -void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); -#else -static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, - const void *data) -{ -} -#endif - static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) { @@ -4962,11 +4019,23 @@ static inline bool can_checksum_protocol(netdev_features_t features, } } +/* Map an ethertype into IP protocol if possible */ +static inline int eproto_to_ipproto(int eproto) +{ + switch (eproto) { + case htons(ETH_P_IP): + return IPPROTO_IP; + case htons(ETH_P_IPV6): + return IPPROTO_IPV6; + default: + return -1; + } +} + #ifdef CONFIG_BUG -void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); +void netdev_rx_csum_fault(struct net_device *dev); #else -static inline void netdev_rx_csum_fault(struct net_device *dev, - struct sk_buff *skb) +static inline void netdev_rx_csum_fault(struct net_device *dev) { } #endif @@ -4984,20 +4053,15 @@ static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, struct sk_buff *skb, struct net_device *dev, bool more) { - __this_cpu_write(softnet_data.xmit.more, more); + skb->xmit_more = more ? 1 : 0; return ops->ndo_start_xmit(skb, dev); } -static inline bool netdev_xmit_more(void) -{ - return __this_cpu_read(softnet_data.xmit.more); -} - static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, bool more) { const struct net_device_ops *ops = dev->netdev_ops; - netdev_tx_t rc; + int rc; rc = __netdev_start_xmit(ops, skb, dev, more); if (rc == NETDEV_TX_OK) @@ -5006,12 +4070,22 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi return rc; } -int netdev_class_create_file_ns(const struct class_attribute *class_attr, +int netdev_class_create_file_ns(struct class_attribute *class_attr, const void *ns); -void netdev_class_remove_file_ns(const struct class_attribute *class_attr, +void netdev_class_remove_file_ns(struct class_attribute *class_attr, const void *ns); -extern const struct kobj_ns_type_operations net_ns_type_operations; +static inline int netdev_class_create_file(struct class_attribute *class_attr) +{ + return netdev_class_create_file_ns(class_attr, NULL); +} + +static inline void netdev_class_remove_file(struct class_attribute *class_attr) +{ + netdev_class_remove_file_ns(class_attr, NULL); +} + +extern struct kobj_ns_type_operations net_ns_type_operations; const char *netdev_drivername(const struct net_device *dev); @@ -5066,6 +4140,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) /* check flags correspondence */ BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); @@ -5080,10 +4155,6 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } @@ -5136,6 +4207,16 @@ static inline bool netif_is_macvlan_port(const struct net_device *dev) return dev->priv_flags & IFF_MACVLAN_PORT; } +static inline bool netif_is_ipvlan(const struct net_device *dev) +{ + return dev->priv_flags & IFF_IPVLAN_SLAVE; +} + +static inline bool netif_is_ipvlan_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_IPVLAN_MASTER; +} + static inline bool netif_is_bond_master(const struct net_device *dev) { return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; @@ -5151,11 +4232,6 @@ static inline bool netif_supports_nofcs(struct net_device *dev) return dev->priv_flags & IFF_SUPP_NOFCS; } -static inline bool netif_has_l3_rx_handler(const struct net_device *dev) -{ - return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; -} - static inline bool netif_is_l3_master(const struct net_device *dev) { return dev->priv_flags & IFF_L3MDEV_MASTER; @@ -5181,16 +4257,6 @@ static inline bool netif_is_ovs_master(const struct net_device *dev) return dev->priv_flags & IFF_OPENVSWITCH; } -static inline bool netif_is_ovs_port(const struct net_device *dev) -{ - return dev->priv_flags & IFF_OVS_DATAPATH; -} - -static inline bool netif_is_any_bridge_port(const struct net_device *dev) -{ - return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); -} - static inline bool netif_is_team_master(const struct net_device *dev) { return dev->priv_flags & IFF_TEAM; @@ -5216,16 +4282,6 @@ static inline bool netif_is_rxfh_configured(const struct net_device *dev) return dev->priv_flags & IFF_RXFH_CONFIGURED; } -static inline bool netif_is_failover(const struct net_device *dev) -{ - return dev->priv_flags & IFF_FAILOVER; -} - -static inline bool netif_is_failover_slave(const struct net_device *dev) -{ - return dev->priv_flags & IFF_FAILOVER_SLAVE; -} - /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ static inline void netif_keep_dst(struct net_device *dev) { @@ -5239,7 +4295,7 @@ static inline bool netif_reduces_vlan_mtu(struct net_device *dev) return dev->priv_flags & IFF_MACSEC; } -extern struct pernet_operations __net_initdata loopback_net_ops; +extern struct pernet_operations __net_initconst loopback_net_ops; /* Logging, debugging and troubleshooting/diagnostic helpers. */ @@ -5252,11 +4308,6 @@ static inline const char *netdev_name(const struct net_device *dev) return dev->name; } -static inline bool netdev_unregistering(const struct net_device *dev) -{ - return dev->reg_state == NETREG_UNREGISTERING; -} - static inline const char *netdev_reg_state(const struct net_device *dev) { switch (dev->reg_state) { @@ -5272,54 +4323,28 @@ static inline const char *netdev_reg_state(const struct net_device *dev) return " (unknown)"; } -__printf(3, 4) __cold +__printf(3, 4) void netdev_printk(const char *level, const struct net_device *dev, const char *format, ...); -__printf(2, 3) __cold +__printf(2, 3) void netdev_emerg(const struct net_device *dev, const char *format, ...); -__printf(2, 3) __cold +__printf(2, 3) void netdev_alert(const struct net_device *dev, const char *format, ...); -__printf(2, 3) __cold +__printf(2, 3) void netdev_crit(const struct net_device *dev, const char *format, ...); -__printf(2, 3) __cold +__printf(2, 3) void netdev_err(const struct net_device *dev, const char *format, ...); -__printf(2, 3) __cold +__printf(2, 3) void netdev_warn(const struct net_device *dev, const char *format, ...); -__printf(2, 3) __cold +__printf(2, 3) void netdev_notice(const struct net_device *dev, const char *format, ...); -__printf(2, 3) __cold +__printf(2, 3) void netdev_info(const struct net_device *dev, const char *format, ...); -#define netdev_level_once(level, dev, fmt, ...) \ -do { \ - static bool __print_once __read_mostly; \ - \ - if (!__print_once) { \ - __print_once = true; \ - netdev_printk(level, dev, fmt, ##__VA_ARGS__); \ - } \ -} while (0) - -#define netdev_emerg_once(dev, fmt, ...) \ - netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__) -#define netdev_alert_once(dev, fmt, ...) \ - netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__) -#define netdev_crit_once(dev, fmt, ...) \ - netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__) -#define netdev_err_once(dev, fmt, ...) \ - netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__) -#define netdev_warn_once(dev, fmt, ...) \ - netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__) -#define netdev_notice_once(dev, fmt, ...) \ - netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__) -#define netdev_info_once(dev, fmt, ...) \ - netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__) - #define MODULE_ALIAS_NETDEV(device) \ MODULE_ALIAS("netdev-" device) -#if defined(CONFIG_DYNAMIC_DEBUG) || \ - (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) +#if defined(CONFIG_DYNAMIC_DEBUG) #define netdev_dbg(__dev, format, args...) \ do { \ dynamic_netdev_dbg(__dev, format, ##args); \ @@ -5353,13 +4378,9 @@ do { \ * file/line information and a backtrace. */ #define netdev_WARN(dev, format, args...) \ - WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ + WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \ netdev_reg_state(dev), ##args) -#define netdev_WARN_ONCE(dev, format, args...) \ - WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ - netdev_reg_state(dev), ##args) - /* netif printk helpers, similar to netdev_printk */ #define netif_printk(priv, type, level, dev, fmt, args...) \ @@ -5389,8 +4410,7 @@ do { \ #define netif_info(priv, type, dev, fmt, args...) \ netif_level(info, priv, type, dev, fmt, ##args) -#if defined(CONFIG_DYNAMIC_DEBUG) || \ - (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) +#if defined(CONFIG_DYNAMIC_DEBUG) #define netif_dbg(priv, type, netdev, format, args...) \ do { \ if (netif_msg_##type(priv)) \ @@ -5408,15 +4428,6 @@ do { \ }) #endif -/* if @cond then downgrade to debug, else print at @level */ -#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \ - do { \ - if (cond) \ - netif_dbg(priv, type, netdev, fmt, ##args); \ - else \ - netif_ ## level(priv, type, netdev, fmt, ##args); \ - } while (0) - #if defined(VERBOSE_DEBUG) #define netif_vdbg netif_dbg #else @@ -5435,7 +4446,15 @@ do { \ * Why 16. Because with 16 the only overlap we get on a hash of the * low nibble of the protocol value is RARP/SNAP/X.25. * + * NOTE: That is no longer true with the addition of VLAN tags. Not + * sure which should go first, but I bet it won't make much + * difference if we are running VLANs. The good news is that + * this protocol won't be in the list unless compiled in, so + * the average user (w/out VLANs) will not be adversely affected. + * --BLG + * * 0800 IP + * 8100 802.1Q VLAN * 0001 802.3 * 0002 AX.25 * 0004 802.2 @@ -5450,9 +4469,4 @@ do { \ #define PTYPE_HASH_SIZE (16) #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) -extern struct list_head ptype_all __read_mostly; -extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; - -extern struct net_device *blackhole_netdev; - #endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 3fda1a5087..455cf2e7d3 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NETFILTER_H #define __LINUX_NETFILTER_H @@ -13,9 +12,9 @@ #include #include #include -#include #include +#ifdef CONFIG_NETFILTER static inline int NF_DROP_GETERR(int verdict) { return -(verdict >> NF_VERDICT_QBITS); @@ -24,36 +23,20 @@ static inline int NF_DROP_GETERR(int verdict) static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, const union nf_inet_addr *a2) { -#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 - const unsigned long *ul1 = (const unsigned long *)a1; - const unsigned long *ul2 = (const unsigned long *)a2; - - return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL; -#else return a1->all[0] == a2->all[0] && a1->all[1] == a2->all[1] && a1->all[2] == a2->all[2] && a1->all[3] == a2->all[3]; -#endif } static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, union nf_inet_addr *result, const union nf_inet_addr *mask) { -#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 - const unsigned long *ua = (const unsigned long *)a1; - unsigned long *ur = (unsigned long *)result; - const unsigned long *um = (const unsigned long *)mask; - - ur[0] = ua[0] & um[0]; - ur[1] = ua[1] & um[1]; -#else result->all[0] = a1->all[0] & mask->all[0]; result->all[1] = a1->all[1] & mask->all[1]; result->all[2] = a1->all[2] & mask->all[2]; result->all[3] = a1->all[3] & mask->all[3]; -#endif } int netfilter_init(void); @@ -65,86 +48,43 @@ struct nf_hook_ops; struct sock; struct nf_hook_state { - u8 hook; - u8 pf; + unsigned int hook; + int thresh; + u_int8_t pf; struct net_device *in; struct net_device *out; struct sock *sk; struct net *net; + struct nf_hook_entry __rcu *hook_entries; int (*okfn)(struct net *, struct sock *, struct sk_buff *); }; typedef unsigned int nf_hookfn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); -enum nf_hook_ops_type { - NF_HOOK_OP_UNDEFINED, - NF_HOOK_OP_NF_TABLES, -}; - struct nf_hook_ops { + struct list_head list; + /* User fills in from here down. */ nf_hookfn *hook; struct net_device *dev; void *priv; - u8 pf; - enum nf_hook_ops_type hook_ops_type:8; + u_int8_t pf; unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; }; struct nf_hook_entry { - nf_hookfn *hook; - void *priv; + struct nf_hook_entry __rcu *next; + struct nf_hook_ops ops; + const struct nf_hook_ops *orig_ops; }; -struct nf_hook_entries_rcu_head { - struct rcu_head head; - void *allocation; -}; - -struct nf_hook_entries { - u16 num_hook_entries; - /* padding */ - struct nf_hook_entry hooks[]; - - /* trailer: pointers to original orig_ops of each hook, - * followed by rcu_head and scratch space used for freeing - * the structure via call_rcu. - * - * This is not part of struct nf_hook_entry since its only - * needed in slow path (hook register/unregister): - * const struct nf_hook_ops *orig_ops[] - * - * For the same reason, we store this at end -- its - * only needed when a hook is deleted, not during - * packet path processing: - * struct nf_hook_entries_rcu_head head - */ -}; - -#ifdef CONFIG_NETFILTER -static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e) -{ - unsigned int n = e->num_hook_entries; - const void *hook_end; - - hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */ - - return (struct nf_hook_ops **)hook_end; -} - -static inline int -nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, - struct nf_hook_state *state) -{ - return entry->hook(entry->priv, skb, state); -} - static inline void nf_hook_state_init(struct nf_hook_state *p, + struct nf_hook_entry *hook_entry, unsigned int hook, - u_int8_t pf, + int thresh, u_int8_t pf, struct net_device *indev, struct net_device *outdev, struct sock *sk, @@ -152,11 +92,13 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { p->hook = hook; + p->thresh = thresh; p->pf = pf; p->in = indev; p->out = outdev; p->sk = sk; p->net = net; + RCU_INIT_POINTER(p->hook_entries, hook_entry); p->okfn = okfn; } @@ -170,14 +112,21 @@ struct nf_sockopt_ops { /* Non-inclusive ranges: use 0/0/NULL to never get called. */ int set_optmin; int set_optmax; - int (*set)(struct sock *sk, int optval, sockptr_t arg, - unsigned int len); + int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); +#ifdef CONFIG_COMPAT + int (*compat_set)(struct sock *sk, int optval, + void __user *user, unsigned int len); +#endif int get_optmin; int get_optmax; int (*get)(struct sock *sk, int optval, void __user *user, int *len); +#ifdef CONFIG_COMPAT + int (*compat_get)(struct sock *sk, int optval, + void __user *user, int *len); +#endif /* Use the module struct to lock set/get code in place */ struct module *owner; -}; +} __do_const; /* Function to register/unregister hook points. */ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops); @@ -187,36 +136,44 @@ int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int n); +int nf_register_hook(struct nf_hook_ops *reg); +void nf_unregister_hook(struct nf_hook_ops *reg); +int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n); +void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n); +int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n); +void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n); + /* Functions to register get/setsockopt ranges (non-inclusive). You need to check permissions yourself! */ int nf_register_sockopt(struct nf_sockopt_ops *reg); void nf_unregister_sockopt(struct nf_sockopt_ops *reg); -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; #endif -int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, - const struct nf_hook_entries *e, unsigned int i); +int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); -void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state, - const struct nf_hook_entries *e); /** - * nf_hook - call a netfilter hook + * nf_hook_thresh - call a netfilter hook * * Returns 1 if the hook has allowed the packet to pass. The function * okfn must be invoked by the caller in this case. Any other return * value indicates the packet has been consumed by the hook. */ -static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, - struct sock *sk, struct sk_buff *skb, - struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, + struct net *net, + struct sock *sk, + struct sk_buff *skb, + struct net_device *indev, + struct net_device *outdev, + int (*okfn)(struct net *, struct sock *, struct sk_buff *), + int thresh) { - struct nf_hook_entries *hook_head = NULL; + struct nf_hook_entry *hook_head; int ret = 1; -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL if (__builtin_constant_p(pf) && __builtin_constant_p(hook) && !static_key_false(&nf_hooks_needed[pf][hook])) @@ -224,48 +181,28 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, #endif rcu_read_lock(); - switch (pf) { - case NFPROTO_IPV4: - hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); - break; - case NFPROTO_IPV6: - hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); - break; - case NFPROTO_ARP: -#ifdef CONFIG_NETFILTER_FAMILY_ARP - if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp))) - break; - hook_head = rcu_dereference(net->nf.hooks_arp[hook]); -#endif - break; - case NFPROTO_BRIDGE: -#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE - hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); -#endif - break; -#if IS_ENABLED(CONFIG_DECNET) - case NFPROTO_DECNET: - hook_head = rcu_dereference(net->nf.hooks_decnet[hook]); - break; -#endif - default: - WARN_ON_ONCE(1); - break; - } - + hook_head = rcu_dereference(net->nf.hooks[pf][hook]); if (hook_head) { struct nf_hook_state state; - nf_hook_state_init(&state, hook, pf, indev, outdev, - sk, net, okfn); + nf_hook_state_init(&state, hook_head, hook, thresh, + pf, indev, outdev, sk, net, okfn); - ret = nf_hook_slow(skb, &state, hook_head, 0); + ret = nf_hook_slow(skb, &state); } rcu_read_unlock(); return ret; } +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, + struct sock *sk, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN); +} + /* Activate hook; either okfn or kfree_skb called, unless a hook returns NF_STOLEN (in which case, it's up to the hook to deal with the consequences). @@ -283,6 +220,19 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, coders :) */ +static inline int +NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct sk_buff *skb, struct net_device *in, + struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *), + int thresh) +{ + int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh); + if (ret == 1) + ret = okfn(net, sk, skb); + return ret; +} + static inline int NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, @@ -292,7 +242,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, int ret; if (!cond || - ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) + ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1)) ret = okfn(net, sk, skb); return ret; } @@ -302,97 +252,101 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); - if (ret == 1) - ret = okfn(net, sk, skb); - return ret; -} - -static inline void -NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, - struct list_head *head, struct net_device *in, struct net_device *out, - int (*okfn)(struct net *, struct sock *, struct sk_buff *)) -{ - struct nf_hook_entries *hook_head = NULL; - -#ifdef CONFIG_JUMP_LABEL - if (__builtin_constant_p(pf) && - __builtin_constant_p(hook) && - !static_key_false(&nf_hooks_needed[pf][hook])) - return; -#endif - - rcu_read_lock(); - switch (pf) { - case NFPROTO_IPV4: - hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); - break; - case NFPROTO_IPV6: - hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); - break; - default: - WARN_ON_ONCE(1); - break; - } - - if (hook_head) { - struct nf_hook_state state; - - nf_hook_state_init(&state, hook, pf, in, out, sk, net, okfn); - - nf_hook_slow_list(head, &state, hook_head); - } - rcu_read_unlock(); + return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN); } /* Call setsockopt() */ -int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt, +int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, unsigned int len); int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); +#ifdef CONFIG_COMPAT +int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, + char __user *opt, unsigned int len); +int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, + char __user *opt, int *len); +#endif + +/* Call this before modifying an existing packet: ensures it is + modifiable and linear to the point you care about (writable_len). + Returns true or false. */ +int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); struct flowi; struct nf_queue_entry; -__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol, - unsigned short family); - -__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, unsigned int len, - u_int8_t protocol, unsigned short family); -int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, - bool strict, unsigned short family); -int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry); - -#include - -struct nf_conn; -enum nf_nat_manip_type; -struct nlattr; -enum ip_conntrack_dir; - -struct nf_nat_hook { - int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip, - const struct nlattr *attr); - void (*decode_session)(struct sk_buff *skb, struct flowi *fl); - unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct, - enum nf_nat_manip_type mtype, - enum ip_conntrack_dir dir); +struct nf_afinfo { + unsigned short family; + __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); + __sum16 (*checksum_partial)(struct sk_buff *skb, + unsigned int hook, + unsigned int dataoff, + unsigned int len, + u_int8_t protocol); + int (*route)(struct net *net, struct dst_entry **dst, + struct flowi *fl, bool strict); + void (*saveroute)(const struct sk_buff *skb, + struct nf_queue_entry *entry); + int (*reroute)(struct net *net, struct sk_buff *skb, + const struct nf_queue_entry *entry); + int route_key_size; }; -extern struct nf_nat_hook __rcu *nf_nat_hook; +extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO]; +static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) +{ + return rcu_dereference(nf_afinfo[family]); +} + +static inline __sum16 +nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, + u_int8_t protocol, unsigned short family) +{ + const struct nf_afinfo *afinfo; + __sum16 csum = 0; + + rcu_read_lock(); + afinfo = nf_get_afinfo(family); + if (afinfo) + csum = afinfo->checksum(skb, hook, dataoff, protocol); + rcu_read_unlock(); + return csum; +} + +static inline __sum16 +nf_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol, unsigned short family) +{ + const struct nf_afinfo *afinfo; + __sum16 csum = 0; + + rcu_read_lock(); + afinfo = nf_get_afinfo(family); + if (afinfo) + csum = afinfo->checksum_partial(skb, hook, dataoff, len, + protocol); + rcu_read_unlock(); + return csum; +} + +int nf_register_afinfo(const struct nf_afinfo *afinfo); +void nf_unregister_afinfo(const struct nf_afinfo *afinfo); + +#include +extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { -#if IS_ENABLED(CONFIG_NF_NAT) - struct nf_nat_hook *nat_hook; +#ifdef CONFIG_NF_NAT_NEEDED + void (*decodefn)(struct sk_buff *, struct flowi *); rcu_read_lock(); - nat_hook = rcu_dereference(nf_nat_hook); - if (nat_hook && nat_hook->decode_session) - nat_hook->decode_session(skb, fl); + decodefn = rcu_dereference(nf_nat_decode_session_hook); + if (decodefn) + decodefn(skb, fl); rcu_read_unlock(); #endif } @@ -415,14 +369,6 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, return okfn(net, sk, skb); } -static inline void -NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, - struct list_head *head, struct net_device *in, struct net_device *out, - int (*okfn)(struct net *, struct sock *, struct sk_buff *)) -{ - /* nothing to do */ -} - static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, @@ -437,38 +383,23 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) } #endif /*CONFIG_NETFILTER*/ -#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; void nf_ct_attach(struct sk_buff *, const struct sk_buff *); -struct nf_conntrack_tuple; -bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, - const struct sk_buff *skb); +extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; #else static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} -struct nf_conntrack_tuple; -static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, - const struct sk_buff *skb) -{ - return false; -} #endif struct nf_conn; enum ip_conntrack_info; - -struct nf_ct_hook { - int (*update)(struct net *net, struct sk_buff *skb); - void (*destroy)(struct nf_conntrack *); - bool (*get_tuple_skb)(struct nf_conntrack_tuple *, - const struct sk_buff *); -}; -extern struct nf_ct_hook __rcu *nf_ct_hook; - struct nlattr; struct nfnl_ct_hook { + struct nf_conn *(*get_ct)(const struct sk_buff *skb, + enum ip_conntrack_info *ctinfo); size_t (*build_size)(const struct nf_conn *ct); int (*build)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index ada1296c87..5266f3bf4b 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (C) 2000-2002 Joakim Axelsson * Patrick Schaaf * Martin Josefsson - * Copyright (C) 2003-2013 Jozsef Kadlecsik + * Copyright (C) 2003-2013 Jozsef Kadlecsik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _IP_SET_H #define _IP_SET_H @@ -76,12 +79,10 @@ enum ip_set_ext_id { IPSET_EXT_ID_MAX, }; -struct ip_set; - /* Extension type */ struct ip_set_ext_type { /* Destroy extension private data (can be NULL) */ - void (*destroy)(struct ip_set *set, void *ext); + void (*destroy)(void *ext); enum ip_set_extension type; enum ipset_cadt_flags flag; /* Size and minimal alignment */ @@ -91,14 +92,25 @@ struct ip_set_ext_type { extern const struct ip_set_ext_type ip_set_extensions[]; +struct ip_set_ext { + u64 packets; + u64 bytes; + u32 timeout; + u32 skbmark; + u32 skbmarkmask; + u32 skbprio; + u16 skbqueue; + char *comment; +}; + struct ip_set_counter { - atomic64_t bytes; - atomic64_t packets; + atomic64_unchecked_t bytes; + atomic64_unchecked_t packets; }; struct ip_set_comment_rcu { struct rcu_head rcu; - char str[]; + char str[0]; }; struct ip_set_comment { @@ -110,19 +122,9 @@ struct ip_set_skbinfo { u32 skbmarkmask; u32 skbprio; u16 skbqueue; - u16 __pad; }; -struct ip_set_ext { - struct ip_set_skbinfo skbinfo; - u64 packets; - u64 bytes; - char *comment; - u32 timeout; - u8 packets_op; - u8 bytes_op; - bool target; -}; +struct ip_set; #define ext_timeout(e, s) \ ((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])) @@ -186,22 +188,8 @@ struct ip_set_type_variant { /* Return true if "b" set is the same as "a" * according to the create set parameters */ bool (*same_set)(const struct ip_set *a, const struct ip_set *b); - /* Region-locking is used */ - bool region_lock; }; -struct ip_set_region { - spinlock_t lock; /* Region lock */ - size_t ext_size; /* Size of the dynamic extensions */ - u32 elements; /* Number of elements vs timeout */ -}; - -/* Max range where every element is added/deleted in one step */ -#define IPSET_MAX_RANGE (1<<20) - -/* The max revision number supported by any set type + 1 */ -#define IPSET_REVISION_MAX 9 - /* The core set type structure */ struct ip_set_type { struct list_head list; @@ -219,8 +207,6 @@ struct ip_set_type { u8 family; /* Type revisions */ u8 revision_min, revision_max; - /* Revision-specific supported (create) flags */ - u8 create_flags[IPSET_REVISION_MAX+1]; /* Set features to control swapping */ u16 features; @@ -266,10 +252,6 @@ struct ip_set { u8 flags; /* Default timeout value, if enabled */ u32 timeout; - /* Number of elements (vs timeout) */ - u32 elements; - /* Size of the dynamic extensions (vs timeout) */ - size_t ext_size; /* Element data size */ size_t dsize; /* Offsets to extensions in elements */ @@ -284,30 +266,147 @@ ip_set_ext_destroy(struct ip_set *set, void *data) /* Check that the extension is enabled for the set and * call it's destroy function for its extension part in data. */ - if (SET_WITH_COMMENT(set)) { - struct ip_set_comment *c = ext_comment(data, set); + if (SET_WITH_COMMENT(set)) + ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy( + ext_comment(data, set)); +} - ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(set, c); +static inline int +ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) +{ + u32 cadt_flags = 0; + + if (SET_WITH_TIMEOUT(set)) + if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(set->timeout)))) + return -EMSGSIZE; + if (SET_WITH_COUNTER(set)) + cadt_flags |= IPSET_FLAG_WITH_COUNTERS; + if (SET_WITH_COMMENT(set)) + cadt_flags |= IPSET_FLAG_WITH_COMMENT; + if (SET_WITH_SKBINFO(set)) + cadt_flags |= IPSET_FLAG_WITH_SKBINFO; + if (SET_WITH_FORCEADD(set)) + cadt_flags |= IPSET_FLAG_WITH_FORCEADD; + + if (!cadt_flags) + return 0; + return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags)); +} + +static inline void +ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) +{ + atomic64_add_unchecked((long long)bytes, &(counter)->bytes); +} + +static inline void +ip_set_add_packets(u64 packets, struct ip_set_counter *counter) +{ + atomic64_add_unchecked((long long)packets, &(counter)->packets); +} + +static inline u64 +ip_set_get_bytes(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read_unchecked(&(counter)->bytes); +} + +static inline u64 +ip_set_get_packets(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read_unchecked(&(counter)->packets); +} + +static inline void +ip_set_update_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + if (ext->packets != ULLONG_MAX && + !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { + ip_set_add_bytes(ext->bytes, counter); + ip_set_add_packets(ext->packets, counter); + } + if (flags & IPSET_FLAG_MATCH_COUNTERS) { + mext->packets = ip_set_get_packets(counter); + mext->bytes = ip_set_get_bytes(counter); } } -int ip_set_put_flags(struct sk_buff *skb, struct ip_set *set); +static inline void +ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + mext->skbmark = skbinfo->skbmark; + mext->skbmarkmask = skbinfo->skbmarkmask; + mext->skbprio = skbinfo->skbprio; + mext->skbqueue = skbinfo->skbqueue; +} +static inline bool +ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo) +{ + /* Send nonzero parameters only */ + return ((skbinfo->skbmark || skbinfo->skbmarkmask) && + nla_put_net64(skb, IPSET_ATTR_SKBMARK, + cpu_to_be64((u64)skbinfo->skbmark << 32 | + skbinfo->skbmarkmask), + IPSET_ATTR_PAD)) || + (skbinfo->skbprio && + nla_put_net32(skb, IPSET_ATTR_SKBPRIO, + cpu_to_be32(skbinfo->skbprio))) || + (skbinfo->skbqueue && + nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, + cpu_to_be16(skbinfo->skbqueue))); +} + +static inline void +ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext) +{ + skbinfo->skbmark = ext->skbmark; + skbinfo->skbmarkmask = ext->skbmarkmask; + skbinfo->skbprio = ext->skbprio; + skbinfo->skbqueue = ext->skbqueue; +} + +static inline bool +ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) +{ + return nla_put_net64(skb, IPSET_ATTR_BYTES, + cpu_to_be64(ip_set_get_bytes(counter)), + IPSET_ATTR_PAD) || + nla_put_net64(skb, IPSET_ATTR_PACKETS, + cpu_to_be64(ip_set_get_packets(counter)), + IPSET_ATTR_PAD); +} + +static inline void +ip_set_init_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext) +{ + if (ext->bytes != ULLONG_MAX) + atomic64_set_unchecked(&(counter)->bytes, (long long)(ext->bytes)); + if (ext->packets != ULLONG_MAX) + atomic64_set_unchecked(&(counter)->packets, (long long)(ext->packets)); +} /* Netlink CB args */ enum { IPSET_CB_NET = 0, /* net namespace */ - IPSET_CB_PROTO, /* ipset protocol */ IPSET_CB_DUMP, /* dump single set/all sets */ IPSET_CB_INDEX, /* set index */ IPSET_CB_PRIVATE, /* set private data */ IPSET_CB_ARG0, /* type specific */ + IPSET_CB_ARG1, }; /* register and unregister set references */ extern ip_set_id_t ip_set_get_byname(struct net *net, const char *name, struct ip_set **set); extern void ip_set_put_byindex(struct net *net, ip_set_id_t index); -extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name); +extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index); extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index); extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index); @@ -332,12 +431,6 @@ extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, size_t align); extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], struct ip_set_ext *ext); -extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, - const void *e, bool active); -extern bool ip_set_match_extensions(struct ip_set *set, - const struct ip_set_ext *ext, - struct ip_set_ext *mext, - u32 flags, void *data); static inline int ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) @@ -394,30 +487,33 @@ ip_set_get_h16(const struct nlattr *attr) return ntohs(nla_get_be16(attr)); } +#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED) +#define ipset_nest_end(skb, start) nla_nest_end(skb, start) + static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr) { - struct nlattr *__nested = nla_nest_start(skb, type); + struct nlattr *__nested = ipset_nest_start(skb, type); int ret; if (!__nested) return -EMSGSIZE; ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); if (!ret) - nla_nest_end(skb, __nested); + ipset_nest_end(skb, __nested); return ret; } static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, const struct in6_addr *ipaddrptr) { - struct nlattr *__nested = nla_nest_start(skb, type); + struct nlattr *__nested = ipset_nest_start(skb, type); int ret; if (!__nested) return -EMSGSIZE; ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr); if (!ret) - nla_nest_end(skb, __nested); + ipset_nest_end(skb, __nested); return ret; } @@ -441,82 +537,22 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr) sizeof(*addr)); } -/* How often should the gc be run by default */ -#define IPSET_GC_TIME (3 * 60) - -/* Timeout period depending on the timeout value of the given set */ -#define IPSET_GC_PERIOD(timeout) \ - ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1) - -/* Entry is set with no timeout value */ -#define IPSET_ELEM_PERMANENT 0 - -/* Set is defined with timeout support: timeout value may be 0 */ -#define IPSET_NO_TIMEOUT UINT_MAX - -/* Max timeout value, see msecs_to_jiffies() in jiffies.h */ -#define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC - -#define ip_set_adt_opt_timeout(opt, set) \ -((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout) - -static inline unsigned int -ip_set_timeout_uget(struct nlattr *tb) +/* Calculate the bytes required to store the inclusive range of a-b */ +static inline int +bitmap_bytes(u32 a, u32 b) { - unsigned int timeout = ip_set_get_h32(tb); - - /* Normalize to fit into jiffies */ - if (timeout > IPSET_MAX_TIMEOUT) - timeout = IPSET_MAX_TIMEOUT; - - return timeout; + return 4 * ((((b - a + 8) / 8) + 3) / 4); } -static inline bool -ip_set_timeout_expired(const unsigned long *t) -{ - return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); -} +#include +#include -static inline void -ip_set_timeout_set(unsigned long *timeout, u32 value) -{ - unsigned long t; - - if (!value) { - *timeout = IPSET_ELEM_PERMANENT; - return; - } - - t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies; - if (t == IPSET_ELEM_PERMANENT) - /* Bingo! :-) */ - t--; - *timeout = t; -} - -void ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, - const struct ip_set_ext *ext); - -static inline void -ip_set_init_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext) -{ - if (ext->bytes != ULLONG_MAX) - atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); - if (ext->packets != ULLONG_MAX) - atomic64_set(&(counter)->packets, (long long)(ext->packets)); -} - -static inline void -ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, - const struct ip_set_ext *ext) -{ - *skbinfo = ext->skbinfo; -} +int +ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, + const void *e, bool active); #define IP_SET_INIT_KEXT(skb, opt, set) \ - { .bytes = (skb)->len, .packets = 1, .target = true,\ + { .bytes = (skb)->len, .packets = 1, \ .timeout = ip_set_adt_opt_timeout(opt, set) } #define IP_SET_INIT_UEXT(set) \ diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h index fcc4d214a7..5e4662a71e 100644 --- a/include/linux/netfilter/ipset/ip_set_bitmap.h +++ b/include/linux/netfilter/ipset/ip_set_bitmap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IP_SET_BITMAP_H #define __IP_SET_BITMAP_H @@ -7,9 +6,23 @@ #define IPSET_BITMAP_MAX_RANGE 0x0000FFFF enum { - IPSET_ADD_STORE_PLAIN_TIMEOUT = -1, IPSET_ADD_FAILED = 1, + IPSET_ADD_STORE_PLAIN_TIMEOUT, IPSET_ADD_START_STORED_TIMEOUT, }; +/* Common functions */ + +static inline u32 +range_to_mask(u32 from, u32 to, u8 *bits) +{ + u32 mask = 0xFFFFFFFE; + + *bits = 32; + while (--(*bits) > 0 && mask && (to & mask) != from) + mask <<= 1; + + return mask; +} + #endif /* __IP_SET_BITMAP_H */ diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h new file mode 100644 index 0000000000..a1e1aa5a55 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_comment.h @@ -0,0 +1,74 @@ +#ifndef _IP_SET_COMMENT_H +#define _IP_SET_COMMENT_H + +/* Copyright (C) 2013 Oliver Smith + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +static inline char* +ip_set_comment_uget(struct nlattr *tb) +{ + return nla_data(tb); +} + +/* Called from uadd only, protected by the set spinlock. + * The kadt functions don't use the comment extensions in any way. + */ +static inline void +ip_set_init_comment(struct ip_set_comment *comment, + const struct ip_set_ext *ext) +{ + struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); + size_t len = ext->comment ? strlen(ext->comment) : 0; + + if (unlikely(c)) { + kfree_rcu(c, rcu); + rcu_assign_pointer(comment->c, NULL); + } + if (!len) + return; + if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) + len = IPSET_MAX_COMMENT_SIZE; + c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC); + if (unlikely(!c)) + return; + strlcpy(c->str, ext->comment, len + 1); + rcu_assign_pointer(comment->c, c); +} + +/* Used only when dumping a set, protected by rcu_read_lock_bh() */ +static inline int +ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) +{ + struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c); + + if (!c) + return 0; + return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str); +} + +/* Called from uadd/udel, flush or the garbage collectors protected + * by the set spinlock. + * Called when the set is destroyed and when there can't be any user + * of the set data anymore. + */ +static inline void +ip_set_comment_free(void *_comment) +{ + struct ip_set_comment *comment = _comment; + struct ip_set_comment_rcu *c; + + c = rcu_dereference_protected(comment->c, 1); + if (unlikely(!c)) + return; + kfree_rcu(c, rcu); + rcu_assign_pointer(comment->c, NULL); +} + +#endif +#endif diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h index 1ecaabd9a0..90d09300e9 100644 --- a/include/linux/netfilter/ipset/ip_set_getport.h +++ b/include/linux/netfilter/ipset/ip_set_getport.h @@ -1,15 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _IP_SET_GETPORT_H #define _IP_SET_GETPORT_H -#include -#include -#include - extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto); -#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto); #else @@ -20,6 +15,9 @@ static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, } #endif +extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, + __be16 *port); + static inline bool ip_set_proto_with_ports(u8 proto) { switch (proto) { diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h index 838abab672..f98ddfb094 100644 --- a/include/linux/netfilter/ipset/ip_set_hash.h +++ b/include/linux/netfilter/ipset/ip_set_hash.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IP_SET_HASH_H #define __IP_SET_HASH_H diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h index a61fe2a7e6..fe2622a001 100644 --- a/include/linux/netfilter/ipset/ip_set_list.h +++ b/include/linux/netfilter/ipset/ip_set_list.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IP_SET_LIST_H #define __IP_SET_LIST_H diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h new file mode 100644 index 0000000000..1d6a935c1a --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -0,0 +1,73 @@ +#ifndef _IP_SET_TIMEOUT_H +#define _IP_SET_TIMEOUT_H + +/* Copyright (C) 2003-2013 Jozsef Kadlecsik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +/* How often should the gc be run by default */ +#define IPSET_GC_TIME (3 * 60) + +/* Timeout period depending on the timeout value of the given set */ +#define IPSET_GC_PERIOD(timeout) \ + ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1) + +/* Entry is set with no timeout value */ +#define IPSET_ELEM_PERMANENT 0 + +/* Set is defined with timeout support: timeout value may be 0 */ +#define IPSET_NO_TIMEOUT UINT_MAX + +#define ip_set_adt_opt_timeout(opt, set) \ +((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout) + +static inline unsigned int +ip_set_timeout_uget(struct nlattr *tb) +{ + unsigned int timeout = ip_set_get_h32(tb); + + /* Normalize to fit into jiffies */ + if (timeout > UINT_MAX/MSEC_PER_SEC) + timeout = UINT_MAX/MSEC_PER_SEC; + + /* Userspace supplied TIMEOUT parameter: adjust crazy size */ + return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout; +} + +static inline bool +ip_set_timeout_expired(unsigned long *t) +{ + return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); +} + +static inline void +ip_set_timeout_set(unsigned long *timeout, u32 value) +{ + unsigned long t; + + if (!value) { + *timeout = IPSET_ELEM_PERMANENT; + return; + } + + t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies; + if (t == IPSET_ELEM_PERMANENT) + /* Bingo! :-) */ + t--; + *timeout = t; +} + +static inline u32 +ip_set_timeout_get(unsigned long *timeout) +{ + return *timeout == IPSET_ELEM_PERMANENT ? 0 : + jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; +} + +#endif /* __KERNEL__ */ +#endif /* _IP_SET_TIMEOUT_H */ diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h index f59094e615..1afbb94b4b 100644 --- a/include/linux/netfilter/ipset/pfxlen.h +++ b/include/linux/netfilter/ipset/pfxlen.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PFXLEN_H #define _PFXLEN_H diff --git a/include/linux/netfilter/nf_conntrack_amanda.h b/include/linux/netfilter/nf_conntrack_amanda.h index 6f0ac896fc..4b59a15849 100644 --- a/include/linux/netfilter/nf_conntrack_amanda.h +++ b/include/linux/netfilter/nf_conntrack_amanda.h @@ -1,12 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_AMANDA_H #define _NF_CONNTRACK_AMANDA_H /* AMANDA tracking. */ -#include -#include -#include - extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int protoff, diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 700ea077ce..1d1ef4e205 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -1,16 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_COMMON_H #define _NF_CONNTRACK_COMMON_H -#include #include struct ip_conntrack_stat { unsigned int found; unsigned int invalid; + unsigned int ignore; unsigned int insert; unsigned int insert_failed; - unsigned int clash_resolve; unsigned int drop; unsigned int early_drop; unsigned int error; @@ -18,26 +16,9 @@ struct ip_conntrack_stat { unsigned int expect_create; unsigned int expect_delete; unsigned int search_restart; - unsigned int chaintoolong; }; -#define NFCT_INFOMASK 7UL -#define NFCT_PTRMASK ~(NFCT_INFOMASK) - -struct nf_conntrack { - atomic_t use; -}; - -void nf_conntrack_destroy(struct nf_conntrack *nfct); -static inline void nf_conntrack_put(struct nf_conntrack *nfct) -{ - if (nfct && atomic_dec_and_test(&nfct->use)) - nf_conntrack_destroy(nfct); -} -static inline void nf_conntrack_get(struct nf_conntrack *nfct) -{ - if (nfct) - atomic_inc(&nfct->use); -} +/* call to create an explicit dependency on nf_conntrack. */ +void need_conntrack(void); #endif /* _NF_CONNTRACK_COMMON_H */ diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h index c509ed76e7..40dcc82058 100644 --- a/include/linux/netfilter/nf_conntrack_dccp.h +++ b/include/linux/netfilter/nf_conntrack_dccp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_DCCP_H #define _NF_CONNTRACK_DCCP_H @@ -25,7 +24,8 @@ enum ct_dccp_roles { }; #define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1) -#include +#ifdef __KERNEL__ +#include struct nf_ct_dccp { u_int8_t role[IP_CT_DIR_MAX]; @@ -35,4 +35,6 @@ struct nf_ct_dccp { u_int64_t handshake_seq; }; +#endif /* __KERNEL__ */ + #endif /* _NF_CONNTRACK_DCCP_H */ diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h index 0e38302820..5f818b01e0 100644 --- a/include/linux/netfilter/nf_conntrack_ftp.h +++ b/include/linux/netfilter/nf_conntrack_ftp.h @@ -1,13 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_FTP_H #define _NF_CONNTRACK_FTP_H -#include -#include -#include -#include #include -#include + #define FTP_PORT 21 @@ -24,6 +19,8 @@ struct nf_ct_ftp_master { u_int16_t flags[IP_CT_DIR_MAX]; }; +struct nf_conntrack_expect; + /* For NAT to hook in when we find a packet which describes what other * connection we should expect. */ extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h index 4561ec0fce..858d9b2140 100644 --- a/include/linux/netfilter/nf_conntrack_h323.h +++ b/include/linux/netfilter/nf_conntrack_h323.h @@ -1,13 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_H323_H #define _NF_CONNTRACK_H323_H -#include -#include -#include +#ifdef __KERNEL__ + #include -#include -#include #define RAS_PORT 1719 #define Q931_PORT 1720 @@ -31,6 +27,8 @@ struct nf_ct_h323_master { }; }; +struct nf_conn; + int get_h225_addr(struct nf_conn *ct, unsigned char *data, TransportAddress *taddr, union nf_inet_addr *addr, __be16 *port); @@ -95,3 +93,5 @@ extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct, struct nf_conntrack_expect *exp); #endif + +#endif diff --git a/include/linux/netfilter/nf_conntrack_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h index bd6797f823..3176a277ee 100644 --- a/include/linux/netfilter/nf_conntrack_h323_asn1.h +++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /**************************************************************************** - * BER and PER decoding library for H.323 conntrack/NAT module. + * ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323 + * conntrack/NAT module. * * Copyright (c) 2006 by Jing Min Zhao * + * This source code is licensed under General Public License version 2. + * + * * This library is based on H.225 version 4, H.235 version 2 and H.245 * version 7. It is extremely optimized to decode only the absolutely * necessary objects in a signal for Linux kernel NAT module use, so don't @@ -37,8 +40,6 @@ /***************************************************************************** * H.323 Types ****************************************************************************/ - -#include #include typedef struct { diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h index 74c6f92419..b0821f45fb 100644 --- a/include/linux/netfilter/nf_conntrack_h323_types.h +++ b/include/linux/netfilter/nf_conntrack_h323_types.h @@ -1,12 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* Generated by Jing Min Zhao's ASN.1 parser, May 16 2007 * * Copyright (c) 2006 Jing Min Zhao + * + * This source code is licensed under General Public License version 2. */ -#ifndef _NF_CONNTRACK_H323_TYPES_H -#define _NF_CONNTRACK_H323_TYPES_H - typedef struct TransportAddress_ipAddress { /* SEQUENCE */ int options; /* No use */ unsigned int ip; @@ -934,5 +932,3 @@ typedef struct RasMessage { /* CHOICE */ InfoRequestResponse infoRequestResponse; }; } RasMessage; - -#endif /* _NF_CONNTRACK_H323_TYPES_H */ diff --git a/include/linux/netfilter/nf_conntrack_irc.h b/include/linux/netfilter/nf_conntrack_irc.h index d02255f721..4bb9bae671 100644 --- a/include/linux/netfilter/nf_conntrack_irc.h +++ b/include/linux/netfilter/nf_conntrack_irc.h @@ -1,10 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_IRC_H #define _NF_CONNTRACK_IRC_H -#include -#include -#include +#ifdef __KERNEL__ #define IRC_PORT 6667 @@ -15,4 +12,5 @@ extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, unsigned int matchlen, struct nf_conntrack_expect *exp); +#endif /* __KERNEL__ */ #endif /* _NF_CONNTRACK_IRC_H */ diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h index a28aa289af..2ab2830316 100644 --- a/include/linux/netfilter/nf_conntrack_pptp.h +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -1,16 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* PPTP constants and structs */ #ifndef _NF_CONNTRACK_PPTP_H #define _NF_CONNTRACK_PPTP_H -#include -#include -#include #include -#include -#include -const char *pptp_msg_name(u_int16_t msg); +extern const char *const pptp_msg_name[]; /* state of the control session */ enum pptp_ctrlsess_state { @@ -50,6 +44,8 @@ struct nf_nat_pptp { __be16 pac_call_id; /* NAT'ed PAC call id */ }; +#ifdef __KERNEL__ + #define PPTP_CONTROL_PORT 1723 #define PPTP_PACKET_CONTROL 1 @@ -300,6 +296,10 @@ union pptp_ctrl_union { struct PptpSetLinkInfo setlink; }; +/* crap needed for nf_conntrack_compat.h */ +struct nf_conn; +struct nf_conntrack_expect; + extern int (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, @@ -322,4 +322,5 @@ extern void (*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, struct nf_conntrack_expect *exp); +#endif /* __KERNEL__ */ #endif /* _NF_CONNTRACK_PPTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index f33aa60213..dee0acd0dd 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _CONNTRACK_PROTO_GRE_H #define _CONNTRACK_PROTO_GRE_H #include @@ -10,6 +9,7 @@ struct nf_ct_gre { unsigned int timeout; }; +#ifdef __KERNEL__ #include struct nf_conn; @@ -18,17 +18,16 @@ struct nf_conn; struct nf_ct_gre_keymap { struct list_head list; struct nf_conntrack_tuple tuple; - struct rcu_head rcu; }; /* add new tuple->key_reply pair to keymap */ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, struct nf_conntrack_tuple *t); -void nf_ct_gre_keymap_flush(struct net *net); /* delete keymap entries */ void nf_ct_gre_keymap_destroy(struct nf_conn *ct); -bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, - struct net *net, struct nf_conntrack_tuple *tuple); +void nf_nat_need_gre(void); + +#endif /* __KERNEL__ */ #endif /* _CONNTRACK_PROTO_GRE_H */ diff --git a/include/linux/netfilter/nf_conntrack_sane.h b/include/linux/netfilter/nf_conntrack_sane.h index 46c7acd1b4..4767d6e23e 100644 --- a/include/linux/netfilter/nf_conntrack_sane.h +++ b/include/linux/netfilter/nf_conntrack_sane.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_SANE_H #define _NF_CONNTRACK_SANE_H /* SANE tracking. */ +#ifdef __KERNEL__ + #define SANE_PORT 6566 enum sane_state { @@ -15,4 +16,6 @@ struct nf_ct_sane_master { enum sane_state state; }; +#endif /* __KERNEL__ */ + #endif /* _NF_CONNTRACK_SANE_H */ diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h index 625f491b95..22a16a23cd 100644 --- a/include/linux/netfilter/nf_conntrack_sctp.h +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_SCTP_H #define _NF_CONNTRACK_SCTP_H /* SCTP tracking. */ @@ -9,8 +8,6 @@ struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[IP_CT_DIR_MAX]; - u8 last_dir; - u8 flags; }; #endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h index c620521c42..d5af3c27fb 100644 --- a/include/linux/netfilter/nf_conntrack_sip.h +++ b/include/linux/netfilter/nf_conntrack_sip.h @@ -1,11 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NF_CONNTRACK_SIP_H__ #define __NF_CONNTRACK_SIP_H__ +#ifdef __KERNEL__ -#include -#include #include +#include + #define SIP_PORT 5060 #define SIP_TIMEOUT 3600 @@ -195,4 +195,5 @@ int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, enum sdp_header_types term, unsigned int *matchoff, unsigned int *matchlen); +#endif /* __KERNEL__ */ #endif /* __NF_CONNTRACK_SIP_H__ */ diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h index 87e4f33eb5..064bc63a53 100644 --- a/include/linux/netfilter/nf_conntrack_snmp.h +++ b/include/linux/netfilter/nf_conntrack_snmp.h @@ -1,10 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_SNMP_H #define _NF_CONNTRACK_SNMP_H -#include -#include - extern int (*nf_nat_snmp_hook)(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h index f9e3a66303..22db9614b5 100644 --- a/include/linux/netfilter/nf_conntrack_tcp.h +++ b/include/linux/netfilter/nf_conntrack_tcp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_TCP_H #define _NF_CONNTRACK_TCP_H diff --git a/include/linux/netfilter/nf_conntrack_tftp.h b/include/linux/netfilter/nf_conntrack_tftp.h index dc4c1b9bea..c78d38fdb0 100644 --- a/include/linux/netfilter/nf_conntrack_tftp.h +++ b/include/linux/netfilter/nf_conntrack_tftp.h @@ -1,14 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_TFTP_H #define _NF_CONNTRACK_TFTP_H #define TFTP_PORT 69 -#include -#include -#include -#include - struct tftphdr { __be16 opcode; }; diff --git a/include/linux/netfilter/nf_conntrack_zones_common.h b/include/linux/netfilter/nf_conntrack_zones_common.h index 8f3905e12a..5d7cf36d47 100644 --- a/include/linux/netfilter/nf_conntrack_zones_common.h +++ b/include/linux/netfilter/nf_conntrack_zones_common.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_ZONES_COMMON_H #define _NF_CONNTRACK_ZONES_COMMON_H diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 241e005f29..d6b384c9d2 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -1,52 +1,33 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NFNETLINK_H #define _NFNETLINK_H + #include #include #include #include -struct nfnl_info { - struct net *net; - struct sock *sk; - const struct nlmsghdr *nlh; - const struct nfgenmsg *nfmsg; - struct netlink_ext_ack *extack; -}; - -enum nfnl_callback_type { - NFNL_CB_UNSPEC = 0, - NFNL_CB_MUTEX, - NFNL_CB_RCU, - NFNL_CB_BATCH, -}; - struct nfnl_callback { - int (*call)(struct sk_buff *skb, const struct nfnl_info *info, + int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb, + const struct nlmsghdr *nlh, const struct nlattr * const cda[]); - const struct nla_policy *policy; - enum nfnl_callback_type type; - __u16 attr_count; -}; - -enum nfnl_abort_action { - NFNL_ABORT_NONE = 0, - NFNL_ABORT_AUTOLOAD, - NFNL_ABORT_VALIDATE, -}; + int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const cda[]); + int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const cda[]); + const struct nla_policy *policy; /* netlink attribute policy */ + const u_int16_t attr_count; /* number of nlattr's */ +} __do_const; struct nfnetlink_subsystem { const char *name; __u8 subsys_id; /* nfnetlink subsystem ID */ __u8 cb_count; /* number of callbacks */ const struct nfnl_callback *cb; /* callback for individual types */ - struct module *owner; int (*commit)(struct net *net, struct sk_buff *skb); - int (*abort)(struct net *net, struct sk_buff *skb, - enum nfnl_abort_action action); - void (*cleanup)(struct net *net); - bool (*valid_genid)(struct net *net, u32 genid); + int (*abort)(struct net *net, struct sk_buff *skb); }; int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); @@ -56,41 +37,8 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid); -void nfnetlink_broadcast(struct net *net, struct sk_buff *skb, __u32 portid, - __u32 group, gfp_t allocation); - -static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) -{ - return subsys << 8 | msg_type; -} - -static inline void nfnl_fill_hdr(struct nlmsghdr *nlh, u8 family, u8 version, - __be16 res_id) -{ - struct nfgenmsg *nfmsg; - - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = family; - nfmsg->version = version; - nfmsg->res_id = res_id; -} - -static inline struct nlmsghdr *nfnl_msg_put(struct sk_buff *skb, u32 portid, - u32 seq, int type, int flags, - u8 family, u8 version, - __be16 res_id) -{ - struct nlmsghdr *nlh; - - nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags); - if (!nlh) - return NULL; - - nfnl_fill_hdr(nlh, family, version, res_id); - - return nlh; -} +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, + int flags); void nfnl_lock(__u8 subsys_id); void nfnl_unlock(__u8 subsys_id); @@ -103,6 +51,19 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id) } #endif /* CONFIG_PROVE_LOCKING */ +/* + * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex + * + * @p: The pointer to read, prior to dereferencing + * @ss: The nfnetlink subsystem ID + * + * Return the value of the specified RCU-protected pointer, but omit + * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because + * caller holds the NFNL subsystem mutex. + */ +#define nfnl_dereference(p, ss) \ + rcu_dereference_protected(p, lockdep_nfnl_is_held(ss)) + #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h index beee8bffe4..664da00486 100644 --- a/include/linux/netfilter/nfnetlink_acct.h +++ b/include/linux/netfilter/nfnetlink_acct.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NFNL_ACCT_H_ #define _NFNL_ACCT_H_ @@ -16,5 +15,6 @@ struct nf_acct; struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name); void nfnl_acct_put(struct nf_acct *acct); void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); -int nfnl_acct_overquota(struct net *net, struct nf_acct *nfacct); +int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb, + struct nf_acct *nfacct); #endif /* _NFNL_ACCT_H */ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 5897f3dbaf..2ad1a2b289 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -1,11 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _X_TABLES_H #define _X_TABLES_H #include #include -#include #include /* Test a struct->invflags and a boolean for inequality */ @@ -19,9 +17,14 @@ * @target: the target extension * @matchinfo: per-match data * @targetinfo: per-target data - * @state: pointer to hook state this packet came from + * @net network namespace through which the action was invoked + * @in: input netdevice + * @out: output netdevice * @fragoff: packet is a fragment, this is the data offset * @thoff: position of transport header relative to skb->data + * @hook: hook number given packet came from + * @family: Actual NFPROTO_* through which the function is invoked + * (helpful when match->family == NFPROTO_UNSPEC) * * Fields written to by extensions: * @@ -35,47 +38,15 @@ struct xt_action_param { union { const void *matchinfo, *targinfo; }; - const struct nf_hook_state *state; + struct net *net; + const struct net_device *in, *out; + int fragoff; unsigned int thoff; - u16 fragoff; + unsigned int hooknum; + u_int8_t family; bool hotdrop; }; -static inline struct net *xt_net(const struct xt_action_param *par) -{ - return par->state->net; -} - -static inline struct net_device *xt_in(const struct xt_action_param *par) -{ - return par->state->in; -} - -static inline const char *xt_inname(const struct xt_action_param *par) -{ - return par->state->in->name; -} - -static inline struct net_device *xt_out(const struct xt_action_param *par) -{ - return par->state->out; -} - -static inline const char *xt_outname(const struct xt_action_param *par) -{ - return par->state->out->name; -} - -static inline unsigned int xt_hooknum(const struct xt_action_param *par) -{ - return par->state->hook; -} - -static inline u_int8_t xt_family(const struct xt_action_param *par) -{ - return par->state->pf; -} - /** * struct xt_mtchk_param - parameters for match extensions' * checkentry functions @@ -158,7 +129,7 @@ struct xt_match { /* Called when entry of this type deleted. */ void (*destroy)(const struct xt_mtdtor_param *); -#ifdef CONFIG_NETFILTER_XTABLES_COMPAT +#ifdef CONFIG_COMPAT /* Called when userspace align differs from kernel space one */ void (*compat_from_user)(void *dst, const void *src); int (*compat_to_user)(void __user *dst, const void *src); @@ -168,8 +139,7 @@ struct xt_match { const char *table; unsigned int matchsize; - unsigned int usersize; -#ifdef CONFIG_NETFILTER_XTABLES_COMPAT +#ifdef CONFIG_COMPAT unsigned int compatsize; #endif unsigned int hooks; @@ -199,7 +169,7 @@ struct xt_target { /* Called when entry of this type deleted. */ void (*destroy)(const struct xt_tgdtor_param *); -#ifdef CONFIG_NETFILTER_XTABLES_COMPAT +#ifdef CONFIG_COMPAT /* Called when userspace align differs from kernel space one */ void (*compat_from_user)(void *dst, const void *src); int (*compat_to_user)(void __user *dst, const void *src); @@ -209,8 +179,7 @@ struct xt_target { const char *table; unsigned int targetsize; - unsigned int usersize; -#ifdef CONFIG_NETFILTER_XTABLES_COMPAT +#ifdef CONFIG_COMPAT unsigned int compatsize; #endif unsigned int hooks; @@ -229,15 +198,15 @@ struct xt_table { /* Man behind the curtain... */ struct xt_table_info *private; - /* hook ops that register the table with the netfilter core */ - struct nf_hook_ops *ops; - /* Set this to THIS_MODULE if you are a module, otherwise NULL */ struct module *me; u_int8_t af; /* address/protocol family */ int priority; /* hook order */ + /* called when table is needed in the given netns */ + int (*table_init)(struct net *net); + /* A unique name... */ const char name[XT_TABLE_MAXNAMELEN]; }; @@ -264,7 +233,7 @@ struct xt_table_info { unsigned int stacksize; void ***jumpstack; - unsigned char entries[] __aligned(8); + unsigned char entries[0] __aligned(8); }; int xt_register_target(struct xt_target *target); @@ -281,29 +250,17 @@ int xt_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset); -int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks); - unsigned int *xt_alloc_entry_offsets(unsigned int size); bool xt_find_jump_offset(const unsigned int *offsets, unsigned int target, unsigned int size); -int xt_check_proc_name(const char *name, unsigned int size); - -int xt_check_match(struct xt_mtchk_param *, unsigned int size, u16 proto, +int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); -int xt_check_target(struct xt_tgchk_param *, unsigned int size, u16 proto, +int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); -int xt_match_to_user(const struct xt_entry_match *m, - struct xt_entry_match __user *u); -int xt_target_to_user(const struct xt_entry_target *t, - struct xt_entry_target __user *u); -int xt_data_to_user(void __user *dst, const void *src, - int usersize, int size, int aligned_size); - -void *xt_copy_counters(sockptr_t arg, unsigned int len, - struct xt_counters_info *info); -struct xt_counters *xt_counters_alloc(unsigned int counters); +void *xt_copy_counters_from_user(const void __user *user, unsigned int len, + struct xt_counters_info *info, bool compat); struct xt_table *xt_register_table(struct net *net, const struct xt_table *table, @@ -317,16 +274,14 @@ struct xt_table_info *xt_replace_table(struct xt_table *table, int *error); struct xt_match *xt_find_match(u8 af, const char *name, u8 revision); +struct xt_target *xt_find_target(u8 af, const char *name, u8 revision); struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision); struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision); int xt_find_revision(u8 af, const char *name, u8 revision, int target, int *err); -struct xt_table *xt_find_table(struct net *net, u8 af, const char *name); struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, const char *name); -struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af, - const char *name); void xt_table_unlock(struct xt_table *t); int xt_proto_init(struct net *net, u_int8_t af); @@ -337,7 +292,7 @@ void xt_free_table_info(struct xt_table_info *info); /** * xt_recseq - recursive seqcount for netfilter use - * + * * Packet processing changes the seqcount only if no recursion happened * get_counters() can use read_seqcount_begin()/read_seqcount_retry(), * because we use the normal seqcount convention : @@ -377,7 +332,7 @@ static inline unsigned int xt_write_recseq_begin(void) * since addend is most likely 1 */ __this_cpu_add(xt_recseq.sequence, addend); - smp_mb(); + smp_wmb(); return addend; } @@ -420,14 +375,38 @@ static inline unsigned long ifname_compare_aligned(const char *_a, return ret; } -struct xt_percpu_counter_alloc_state { - unsigned int off; - const char __percpu *mem; -}; -bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, - struct xt_counters *counter); -void xt_percpu_counter_free(struct xt_counters *cnt); +/* On SMP, ip(6)t_entry->counters.pcnt holds address of the + * real (percpu) counter. On !SMP, its just the packet count, + * so nothing needs to be done there. + * + * xt_percpu_counter_alloc returns the address of the percpu + * counter, or 0 on !SMP. We force an alignment of 16 bytes + * so that bytes/packets share a common cache line. + * + * Hence caller must use IS_ERR_VALUE to check for error, this + * allows us to return 0 for single core systems without forcing + * callers to deal with SMP vs. NONSMP issues. + */ +static inline unsigned long xt_percpu_counter_alloc(void) +{ + if (nr_cpu_ids > 1) { + void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), + sizeof(struct xt_counters)); + + if (res == NULL) + return -ENOMEM; + + return (__force unsigned long) res; + } + + return 0; +} +static inline void xt_percpu_counter_free(u64 pcnt) +{ + if (nr_cpu_ids > 1) + free_percpu((void __percpu *) (unsigned long) pcnt); +} static inline struct xt_counters * xt_get_this_cpu_counter(struct xt_counters *cnt) @@ -449,10 +428,7 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); -int xt_register_template(const struct xt_table *t, int(*table_init)(struct net *net)); -void xt_unregister_template(const struct xt_table *t); - -#ifdef CONFIG_NETFILTER_XTABLES_COMPAT +#ifdef CONFIG_COMPAT #include struct compat_xt_entry_match { @@ -468,7 +444,7 @@ struct compat_xt_entry_match { } kernel; u_int16_t match_size; } u; - unsigned char data[]; + unsigned char data[0]; }; struct compat_xt_entry_target { @@ -484,7 +460,7 @@ struct compat_xt_entry_target { } kernel; u_int16_t target_size; } u; - unsigned char data[]; + unsigned char data[0]; }; /* FIXME: this works only on 32 bit tasks @@ -498,7 +474,7 @@ struct compat_xt_counters { struct compat_xt_counters_info { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t num_counters; - struct compat_xt_counters counters[]; + struct compat_xt_counters counters[0]; }; struct _compat_xt_align { @@ -515,7 +491,7 @@ void xt_compat_unlock(u_int8_t af); int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta); void xt_compat_flush_offsets(u_int8_t af); -int xt_compat_init_offsets(u8 af, unsigned int number); +void xt_compat_init_offsets(u_int8_t af, unsigned int number); int xt_compat_calc_jump(u_int8_t af, unsigned int offset); int xt_compat_match_offset(const struct xt_match *match); @@ -533,5 +509,5 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset); -#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */ +#endif /* CONFIG_COMPAT */ #endif /* _X_TABLES_H */ diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h new file mode 100644 index 0000000000..33f4af8935 --- /dev/null +++ b/include/linux/netfilter/xt_gradm.h @@ -0,0 +1,9 @@ +#ifndef _LINUX_NETFILTER_XT_GRADM_H +#define _LINUX_NETFILTER_XT_GRADM_H 1 + +struct xt_gradm_mtinfo { + __u16 flags; + __u16 invflags; +}; + +#endif diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h new file mode 100644 index 0000000000..074790c0cf --- /dev/null +++ b/include/linux/netfilter/xt_hashlimit.h @@ -0,0 +1,9 @@ +#ifndef _XT_HASHLIMIT_H +#define _XT_HASHLIMIT_H + +#include + +#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \ + XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \ + XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES) +#endif /*_XT_HASHLIMIT_H*/ diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h new file mode 100644 index 0000000000..5b5e41716d --- /dev/null +++ b/include/linux/netfilter/xt_physdev.h @@ -0,0 +1,7 @@ +#ifndef _XT_PHYSDEV_H +#define _XT_PHYSDEV_H + +#include +#include + +#endif /*_XT_PHYSDEV_H*/ diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index 4f9a4b3c58..029b95e892 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Format of an ARP firewall descriptor * @@ -51,14 +50,14 @@ struct arpt_error { extern void *arpt_alloc_initial_table(const struct xt_table *); int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, - const struct nf_hook_ops *ops); -void arpt_unregister_table(struct net *net, const char *name); -void arpt_unregister_table_pre_exit(struct net *net, const char *name); + const struct nf_hook_ops *ops, struct xt_table **res); +void arpt_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); extern unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); -#ifdef CONFIG_NETFILTER_XTABLES_COMPAT +#ifdef CONFIG_COMPAT #include struct compat_arpt_entry { @@ -67,7 +66,7 @@ struct compat_arpt_entry { __u16 next_offset; compat_uint_t comefrom; struct compat_xt_counters counters; - unsigned char elems[]; + unsigned char elems[0]; }; static inline struct xt_entry_target * diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index f980edfdd2..2ed40c402b 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -1,15 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BRIDGE_NETFILTER_H #define __LINUX_BRIDGE_NETFILTER_H #include #include -struct nf_bridge_frag_data { - char mac[ETH_HLEN]; - bool vlan_present; - u16 vlan_tci; - __be16 vlan_proto; +enum nf_br_hook_priorities { + NF_BR_PRI_FIRST = INT_MIN, + NF_BR_PRI_NAT_DST_BRIDGED = -300, + NF_BR_PRI_FILTER_BRIDGED = -200, + NF_BR_PRI_BRNF = 0, + NF_BR_PRI_NAT_DST_OTHER = 100, + NF_BR_PRI_FILTER_OTHER = 200, + NF_BR_PRI_NAT_SRC = 300, + NF_BR_PRI_LAST = INT_MAX, }; #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) @@ -24,58 +27,43 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb) skb_dst_drop(skb); } -static inline struct nf_bridge_info * -nf_bridge_info_get(const struct sk_buff *skb) -{ - return skb_ext_find(skb, SKB_EXT_BRIDGE_NF); -} - -static inline bool nf_bridge_info_exists(const struct sk_buff *skb) -{ - return skb_ext_exist(skb, SKB_EXT_BRIDGE_NF); -} - static inline int nf_bridge_get_physinif(const struct sk_buff *skb) { - const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + struct nf_bridge_info *nf_bridge; - if (!nf_bridge) + if (skb->nf_bridge == NULL) return 0; + nf_bridge = skb->nf_bridge; return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0; } static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) { - const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + struct nf_bridge_info *nf_bridge; - if (!nf_bridge) + if (skb->nf_bridge == NULL) return 0; + nf_bridge = skb->nf_bridge; return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0; } static inline struct net_device * nf_bridge_get_physindev(const struct sk_buff *skb) { - const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); - - return nf_bridge ? nf_bridge->physindev : NULL; + return skb->nf_bridge ? skb->nf_bridge->physindev : NULL; } static inline struct net_device * nf_bridge_get_physoutdev(const struct sk_buff *skb) { - const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); - - return nf_bridge ? nf_bridge->physoutdev : NULL; + return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL; } static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb) { - const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); - - return nf_bridge && nf_bridge->in_prerouting; + return skb->nf_bridge && skb->nf_bridge->in_prerouting; } #else #define br_drop_fake_rtable(skb) do { } while (0) diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h new file mode 100644 index 0000000000..e17e8bfb4e --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_802_3.h @@ -0,0 +1,11 @@ +#ifndef __LINUX_BRIDGE_EBT_802_3_H +#define __LINUX_BRIDGE_EBT_802_3_H + +#include +#include + +static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb) +{ + return (struct ebt_802_3_hdr *)skb_mac_header(skb); +} +#endif diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 10a01978bc..984b2112c7 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * ebtables * @@ -17,6 +16,10 @@ #include #include +/* return values for match() functions */ +#define EBT_MATCH 0 +#define EBT_NOMATCH 1 + struct ebt_match { struct list_head list; const char name[EBT_FUNCTION_MAXNAMELEN]; @@ -85,7 +88,7 @@ struct ebt_table_info { /* room to maintain the stack used for jumping from and into udc */ struct ebt_chainstack **chainstack; char *entries; - struct ebt_counter counters[] ____cacheline_aligned; + struct ebt_counter counters[0] ____cacheline_aligned; }; struct ebt_table { @@ -100,18 +103,14 @@ struct ebt_table { unsigned int valid_hooks); /* the data used by the kernel */ struct ebt_table_info *private; - struct nf_hook_ops *ops; struct module *me; }; #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ ~(__alignof__(struct _xt_align)-1)) - -extern int ebt_register_table(struct net *net, - const struct ebt_table *table, - const struct nf_hook_ops *ops); -extern void ebt_unregister_table(struct net *net, const char *tablename); -void ebt_unregister_table_pre_exit(struct net *net, const char *tablename); +extern struct ebt_table *ebt_register_table(struct net *net, + const struct ebt_table *table); +extern void ebt_unregister_table(struct net *net, struct ebt_table *table); extern unsigned int ebt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct ebt_table *table); @@ -121,12 +120,7 @@ extern unsigned int ebt_do_table(struct sk_buff *skb, #define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS)) /* Clear the bit in the hook mask that tells if the rule is on a base chain */ #define CLEAR_BASE_CHAIN_BIT (par->hook_mask &= ~(1 << NF_BR_NUMHOOKS)) +/* True if the target is not a standard target */ +#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0) -static inline bool ebt_invalid_target(int target) -{ - return (target < -NUM_STANDARD_TARGETS || target >= 0); -} - -int ebt_register_template(const struct ebt_table *t, int(*table_init)(struct net *net)); -void ebt_unregister_template(const struct ebt_table *t); #endif diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h index 8dddfb151f..d3a7f8597e 100644 --- a/include/linux/netfilter_defs.h +++ b/include/linux/netfilter_defs.h @@ -1,20 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NETFILTER_CORE_H_ #define __LINUX_NETFILTER_CORE_H_ #include -/* in/out/forward only */ -#define NF_ARP_NUMHOOKS 3 - -/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */ -#define NF_DN_NUMHOOKS 7 - -#if IS_ENABLED(CONFIG_DECNET) /* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */ -#define NF_MAX_HOOKS NF_DN_NUMHOOKS -#else -#define NF_MAX_HOOKS NF_INET_NUMHOOKS -#endif +#define NF_MAX_HOOKS 8 #endif diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h index a13774be2e..33e37fb41d 100644 --- a/include/linux/netfilter_ingress.h +++ b/include/linux/netfilter_ingress.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NETFILTER_INGRESS_H_ #define _NETFILTER_INGRESS_H_ @@ -8,7 +7,7 @@ #ifdef CONFIG_NETFILTER_INGRESS static inline bool nf_hook_ingress_active(const struct sk_buff *skb) { -#ifdef CONFIG_JUMP_LABEL +#ifdef HAVE_JUMP_LABEL if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) return false; #endif @@ -18,9 +17,8 @@ static inline bool nf_hook_ingress_active(const struct sk_buff *skb) /* caller must hold rcu_read_lock */ static inline int nf_hook_ingress(struct sk_buff *skb) { - struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress); + struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress); struct nf_hook_state state; - int ret; /* Must recheck the ingress hook head, in the event it became NULL * after the check in nf_hook_ingress_active evaluated to true. @@ -28,14 +26,10 @@ static inline int nf_hook_ingress(struct sk_buff *skb) if (unlikely(!e)) return 0; - nf_hook_state_init(&state, NF_NETDEV_INGRESS, + nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, skb->dev, NULL, NULL, dev_net(skb->dev), NULL); - ret = nf_hook_slow(skb, &state, e, 0); - if (ret == 0) - return -1; - - return ret; + return nf_hook_slow(skb, &state); } static inline void nf_hook_ingress_init(struct net_device *dev) diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 5b70ca868b..98c03b2462 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -6,36 +6,7 @@ #include -/* Extra routing may needed on local out, as the QUEUE target never returns - * control to the table. - */ -struct ip_rt_info { - __be32 daddr; - __be32 saddr; - u_int8_t tos; - u_int32_t mark; -}; - -int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type); - -struct nf_queue_entry; - -#ifdef CONFIG_INET +int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); -int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, - bool strict); -#else -static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol) -{ - return 0; -} -static inline int nf_ip_route(struct net *net, struct dst_entry **dst, - struct flowi *fl, bool strict) -{ - return -EOPNOTSUPP; -} -#endif /* CONFIG_INET */ - #endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 8d09bfe850..7bfc5893ec 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * 25-Jul-1998 Major changes to allow for ip chain table * @@ -17,17 +16,19 @@ #include #include -#include #include #include + +#include #include +extern void ipt_init(void) __init; + int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, - const struct nf_hook_ops *ops); - -void ipt_unregister_table_pre_exit(struct net *net, const char *name); -void ipt_unregister_table_exit(struct net *net, const char *name); + const struct nf_hook_ops *ops, struct xt_table **res); +void ipt_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); /* Standard entry. */ struct ipt_standard { @@ -67,7 +68,7 @@ extern unsigned int ipt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); -#ifdef CONFIG_NETFILTER_XTABLES_COMPAT +#ifdef CONFIG_COMPAT #include struct compat_ipt_entry { @@ -77,7 +78,7 @@ struct compat_ipt_entry { __u16 next_offset; compat_uint_t comefrom; struct compat_xt_counters counters; - unsigned char elems[]; + unsigned char elems[0]; }; /* Helper functions */ diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 48314ade15..47c6b04c28 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -1,74 +1,33 @@ /* IPv6-specific defines for netfilter. * (C)1998 Rusty Russell -- This code is GPL. * (C)1999 David Jeffery - * this header was blatantly ripped from netfilter_ipv4.h + * this header was blatantly ripped from netfilter_ipv4.h * it's amazing what adding a bunch of 6s can do =8^) */ #ifndef __LINUX_IP6_NETFILTER_H #define __LINUX_IP6_NETFILTER_H #include -#include - -/* Check for an extension */ -static inline int -nf_ip6_ext_hdr(u8 nexthdr) -{ return (nexthdr == IPPROTO_HOPOPTS) || - (nexthdr == IPPROTO_ROUTING) || - (nexthdr == IPPROTO_FRAGMENT) || - (nexthdr == IPPROTO_ESP) || - (nexthdr == IPPROTO_AH) || - (nexthdr == IPPROTO_NONE) || - (nexthdr == IPPROTO_DSTOPTS); -} - -/* Extra routing may needed on local out, as the QUEUE target never returns - * control to the table. - */ -struct ip6_rt_info { - struct in6_addr daddr; - struct in6_addr saddr; - u_int32_t mark; -}; - -struct nf_queue_entry; -struct nf_bridge_frag_data; /* * Hook functions for ipv6 to allow xt_* modules to be built-in even * if IPv6 is a module. */ struct nf_ipv6_ops { -#if IS_MODULE(CONFIG_IPV6) int (*chk_addr)(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict); - int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb); - int (*dev_get_saddr)(struct net *net, const struct net_device *dev, - const struct in6_addr *daddr, unsigned int srcprefs, - struct in6_addr *saddr); - int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, - bool strict); - u32 (*cookie_init_sequence)(const struct ipv6hdr *iph, - const struct tcphdr *th, u16 *mssp); - int (*cookie_v6_check)(const struct ipv6hdr *iph, - const struct tcphdr *th, __u32 cookie); -#endif void (*route_input)(struct sk_buff *skb); int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); - int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); -#if IS_MODULE(CONFIG_IPV6) - int (*br_fragment)(struct net *net, struct sock *sk, - struct sk_buff *skb, - struct nf_bridge_frag_data *data, - int (*output)(struct net *, struct sock *sk, - const struct nf_bridge_frag_data *data, - struct sk_buff *)); -#endif }; #ifdef CONFIG_NETFILTER -#include +int ip6_route_me_harder(struct net *net, struct sk_buff *skb); +__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); + +int ipv6_netfilter_init(void); +void ipv6_netfilter_fini(void); extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) @@ -76,130 +35,6 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) return rcu_dereference(nf_ipv6_ops); } -static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, - const struct net_device *dev, int strict) -{ -#if IS_MODULE(CONFIG_IPV6) - const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); - - if (!v6_ops) - return 1; - - return v6_ops->chk_addr(net, addr, dev, strict); -#elif IS_BUILTIN(CONFIG_IPV6) - return ipv6_chk_addr(net, addr, dev, strict); -#else - return 1; -#endif -} - -int __nf_ip6_route(struct net *net, struct dst_entry **dst, - struct flowi *fl, bool strict); - -static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, - struct flowi *fl, bool strict) -{ -#if IS_MODULE(CONFIG_IPV6) - const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops(); - - if (v6ops) - return v6ops->route(net, dst, fl, strict); - - return -EHOSTUNREACH; -#endif -#if IS_BUILTIN(CONFIG_IPV6) - return __nf_ip6_route(net, dst, fl, strict); -#else - return -EHOSTUNREACH; -#endif -} - -#include - -int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, - struct nf_bridge_frag_data *data, - int (*output)(struct net *, struct sock *sk, - const struct nf_bridge_frag_data *data, - struct sk_buff *)); - -static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk, - struct sk_buff *skb, - struct nf_bridge_frag_data *data, - int (*output)(struct net *, struct sock *sk, - const struct nf_bridge_frag_data *data, - struct sk_buff *)) -{ -#if IS_MODULE(CONFIG_IPV6) - const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); - - if (!v6_ops) - return 1; - - return v6_ops->br_fragment(net, sk, skb, data, output); -#elif IS_BUILTIN(CONFIG_IPV6) - return br_ip6_fragment(net, sk, skb, data, output); -#else - return 1; -#endif -} - -int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb); - -static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb) -{ -#if IS_MODULE(CONFIG_IPV6) - const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); - - if (!v6_ops) - return -EHOSTUNREACH; - - return v6_ops->route_me_harder(net, sk, skb); -#elif IS_BUILTIN(CONFIG_IPV6) - return ip6_route_me_harder(net, sk, skb); -#else - return -EHOSTUNREACH; -#endif -} - -static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph, - const struct tcphdr *th, - u16 *mssp) -{ -#if IS_ENABLED(CONFIG_SYN_COOKIES) -#if IS_MODULE(CONFIG_IPV6) - const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); - - if (v6_ops) - return v6_ops->cookie_init_sequence(iph, th, mssp); -#elif IS_BUILTIN(CONFIG_IPV6) - return __cookie_v6_init_sequence(iph, th, mssp); -#endif -#endif - return 0; -} - -static inline int nf_cookie_v6_check(const struct ipv6hdr *iph, - const struct tcphdr *th, __u32 cookie) -{ -#if IS_ENABLED(CONFIG_SYN_COOKIES) -#if IS_MODULE(CONFIG_IPV6) - const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); - - if (v6_ops) - return v6_ops->cookie_v6_check(iph, th, cookie); -#elif IS_BUILTIN(CONFIG_IPV6) - return __cookie_v6_check(iph, th, cookie); -#endif -#endif - return 0; -} - -__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol); - -int ipv6_netfilter_init(void); -void ipv6_netfilter_fini(void); - #else /* CONFIG_NETFILTER */ static inline int ipv6_netfilter_init(void) { return 0; } static inline void ipv6_netfilter_fini(void) { return; } diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 79e73fd7d9..b21c392d60 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * 25-Jul-1998 Major changes to allow for ip chain table * @@ -17,23 +16,37 @@ #include #include -#include #include #include + +#include #include -extern void *ip6t_alloc_initial_table(const struct xt_table *); +extern void ip6t_init(void) __init; +extern void *ip6t_alloc_initial_table(const struct xt_table *); int ip6t_register_table(struct net *net, const struct xt_table *table, const struct ip6t_replace *repl, - const struct nf_hook_ops *ops); -void ip6t_unregister_table_pre_exit(struct net *net, const char *name); -void ip6t_unregister_table_exit(struct net *net, const char *name); + const struct nf_hook_ops *ops, struct xt_table **res); +void ip6t_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); extern unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); -#ifdef CONFIG_NETFILTER_XTABLES_COMPAT +/* Check for an extension */ +static inline int +ip6t_ext_hdr(u8 nexthdr) +{ return (nexthdr == IPPROTO_HOPOPTS) || + (nexthdr == IPPROTO_ROUTING) || + (nexthdr == IPPROTO_FRAGMENT) || + (nexthdr == IPPROTO_ESP) || + (nexthdr == IPPROTO_AH) || + (nexthdr == IPPROTO_NONE) || + (nexthdr == IPPROTO_DSTOPTS); +} + +#ifdef CONFIG_COMPAT #include struct compat_ip6t_entry { @@ -43,7 +56,7 @@ struct compat_ip6t_entry { __u16 next_offset; compat_uint_t comefrom; struct compat_xt_counters counters; - unsigned char elems[]; + unsigned char elems[0]; }; static inline struct xt_entry_target * diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 61b1c7fcc4..874abff865 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NETLINK_H #define __LINUX_NETLINK_H @@ -11,14 +10,15 @@ struct net; -void do_trace_netlink_extack(const char *msg); - static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) { return (struct nlmsghdr *)skb->data; } enum netlink_skb_flags { + NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */ + NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */ + NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */ NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */ }; @@ -36,8 +36,8 @@ struct netlink_skb_parms { #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) -void netlink_table_grab(void); -void netlink_table_ungrab(void); +extern void netlink_table_grab(void); +extern void netlink_table_ungrab(void); #define NL_CFG_F_NONROOT_RECV (1 << 0) #define NL_CFG_F_NONROOT_SEND (1 << 1) @@ -53,7 +53,7 @@ struct netlink_kernel_cfg { bool (*compare)(struct net *net, struct sock *sk); }; -struct sock *__netlink_kernel_create(struct net *net, int unit, +extern struct sock *__netlink_kernel_create(struct net *net, int unit, struct module *module, struct netlink_kernel_cfg *cfg); static inline struct sock * @@ -62,107 +62,23 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg) return __netlink_kernel_create(net, unit, THIS_MODULE, cfg); } -/* this can be increased when necessary - don't expose to userland */ -#define NETLINK_MAX_COOKIE_LEN 20 +extern void netlink_kernel_release(struct sock *sk); +extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); +extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); +extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); +extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); +extern int netlink_has_listeners(struct sock *sk, unsigned int group); -/** - * struct netlink_ext_ack - netlink extended ACK report struct - * @_msg: message string to report - don't access directly, use - * %NL_SET_ERR_MSG - * @bad_attr: attribute with error - * @policy: policy for a bad attribute - * @cookie: cookie data to return to userspace (for success) - * @cookie_len: actual cookie data length - */ -struct netlink_ext_ack { - const char *_msg; - const struct nlattr *bad_attr; - const struct nla_policy *policy; - u8 cookie[NETLINK_MAX_COOKIE_LEN]; - u8 cookie_len; -}; - -/* Always use this macro, this allows later putting the - * message into a separate section or such for things - * like translation or listing all possible messages. - * Currently string formatting is not supported (due - * to the lack of an output buffer.) - */ -#define NL_SET_ERR_MSG(extack, msg) do { \ - static const char __msg[] = msg; \ - struct netlink_ext_ack *__extack = (extack); \ - \ - do_trace_netlink_extack(__msg); \ - \ - if (__extack) \ - __extack->_msg = __msg; \ -} while (0) - -#define NL_SET_ERR_MSG_MOD(extack, msg) \ - NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg) - -#define NL_SET_BAD_ATTR_POLICY(extack, attr, pol) do { \ - if ((extack)) { \ - (extack)->bad_attr = (attr); \ - (extack)->policy = (pol); \ - } \ -} while (0) - -#define NL_SET_BAD_ATTR(extack, attr) NL_SET_BAD_ATTR_POLICY(extack, attr, NULL) - -#define NL_SET_ERR_MSG_ATTR_POL(extack, attr, pol, msg) do { \ - static const char __msg[] = msg; \ - struct netlink_ext_ack *__extack = (extack); \ - \ - do_trace_netlink_extack(__msg); \ - \ - if (__extack) { \ - __extack->_msg = __msg; \ - __extack->bad_attr = (attr); \ - __extack->policy = (pol); \ - } \ -} while (0) - -#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) \ - NL_SET_ERR_MSG_ATTR_POL(extack, attr, NULL, msg) - -static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, - u64 cookie) -{ - if (!extack) - return; - memcpy(extack->cookie, &cookie, sizeof(cookie)); - extack->cookie_len = sizeof(cookie); -} - -static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack, - u32 cookie) -{ - if (!extack) - return; - memcpy(extack->cookie, &cookie, sizeof(cookie)); - extack->cookie_len = sizeof(cookie); -} - -void netlink_kernel_release(struct sock *sk); -int __netlink_change_ngroups(struct sock *sk, unsigned int groups); -int netlink_change_ngroups(struct sock *sk, unsigned int groups); -void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); -void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, - const struct netlink_ext_ack *extack); -int netlink_has_listeners(struct sock *sk, unsigned int group); -bool netlink_strict_get_check(struct sk_buff *skb); - -int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); -int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, - __u32 group, gfp_t allocation); -int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, - __u32 portid, __u32 group, gfp_t allocation, - int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), - void *filter_data); -int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); -int netlink_register_notifier(struct notifier_block *nb); -int netlink_unregister_notifier(struct notifier_block *nb); +extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); +extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, + __u32 group, gfp_t allocation); +extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, + __u32 portid, __u32 group, gfp_t allocation, + int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), + void *filter_data); +extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); +extern int netlink_register_notifier(struct notifier_block *nb); +extern int netlink_unregister_notifier(struct notifier_block *nb); /* finegrained unicast helpers: */ struct sock *netlink_getsockbyfilp(struct file *filp); @@ -205,26 +121,17 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; + int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); void *data; /* the module that dump function belong to */ struct module *module; - struct netlink_ext_ack *extack; u16 family; - u16 answer_flags; - u32 min_dump_alloc; + u16 min_dump_alloc; unsigned int prev_seq, seq; - bool strict_check; - union { - u8 ctx[48]; - - /* args is deprecated. Cast a struct over ctx instead - * for proper type safety. - */ - long args[6]; - }; + long args[6]; }; struct netlink_notify { @@ -242,20 +149,20 @@ struct netlink_dump_control { int (*done)(struct netlink_callback *); void *data; struct module *module; - u32 min_dump_alloc; -}; + u16 min_dump_alloc; +} __do_const; +typedef struct netlink_dump_control __no_const netlink_dump_control_no_const; -int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, +extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, - struct netlink_dump_control *control); + struct netlink_dump_control *control, + void *data, + struct module *module); static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, struct netlink_dump_control *control) { - if (!control->module) - control->module = THIS_MODULE; - - return __netlink_dump_start(ssk, skb, nlh, control); + return __netlink_dump_start(ssk, skb, nlh, control, control->data, control->module ? : THIS_MODULE); } struct netlink_tap { @@ -264,8 +171,8 @@ struct netlink_tap { struct list_head list; }; -int netlink_add_tap(struct netlink_tap *nt); -int netlink_remove_tap(struct netlink_tap *nt); +extern int netlink_add_tap(struct netlink_tap *nt); +extern int netlink_remove_tap(struct netlink_tap *nt); bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, struct user_namespace *ns, int cap); diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index e6a2d72e0d..b25ee9ffdb 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Common code for low-level network console, dump, and debugger code * @@ -12,7 +11,6 @@ #include #include #include -#include union inet_addr { __u32 all[4]; @@ -31,10 +29,12 @@ struct netpoll { bool ipv6; u16 local_port, remote_port; u8 remote_mac[ETH_ALEN]; + + struct work_struct cleanup_work; }; struct netpoll_info { - refcount_t refcnt; + atomic_t refcnt; struct semaphore dev_lock; @@ -47,9 +47,8 @@ struct netpoll_info { }; #ifdef CONFIG_NETPOLL -void netpoll_poll_dev(struct net_device *dev); -void netpoll_poll_disable(struct net_device *dev); -void netpoll_poll_enable(struct net_device *dev); +extern void netpoll_poll_disable(struct net_device *dev); +extern void netpoll_poll_enable(struct net_device *dev); #else static inline void netpoll_poll_disable(struct net_device *dev) { return; } static inline void netpoll_poll_enable(struct net_device *dev) { return; } @@ -61,9 +60,17 @@ int netpoll_parse_options(struct netpoll *np, char *opt); int __netpoll_setup(struct netpoll *np, struct net_device *ndev); int netpoll_setup(struct netpoll *np); void __netpoll_cleanup(struct netpoll *np); -void __netpoll_free(struct netpoll *np); +void __netpoll_free_async(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); -netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); +void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, + struct net_device *dev); +static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) +{ + unsigned long flags; + local_irq_save(flags); + netpoll_send_skb_on_dev(np, skb, np->dev); + local_irq_restore(flags); +} #ifdef CONFIG_NETPOLL static inline void *netpoll_poll_lock(struct napi_struct *napi) @@ -71,11 +78,8 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) struct net_device *dev = napi->dev; if (dev && dev->npinfo) { - int owner = smp_processor_id(); - - while (cmpxchg(&napi->poll_owner, -1, owner) != -1) - cpu_relax(); - + spin_lock(&napi->poll_lock); + napi->poll_owner = smp_processor_id(); return napi; } return NULL; @@ -85,8 +89,10 @@ static inline void netpoll_poll_unlock(void *have) { struct napi_struct *napi = have; - if (napi) - smp_store_release(&napi->poll_owner, -1); + if (napi) { + napi->poll_owner = -1; + spin_unlock(&napi->poll_lock); + } } static inline bool netpoll_tx_running(struct net_device *dev) @@ -102,6 +108,9 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) static inline void netpoll_poll_unlock(void *have) { } +static inline void netpoll_netdev_init(struct net_device *dev) +{ +} static inline bool netpoll_tx_running(struct net_device *dev) { return false; diff --git a/include/linux/nfs.h b/include/linux/nfs.h index 0dc7ad38a0..610af5155e 100644 --- a/include/linux/nfs.h +++ b/include/linux/nfs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * NFS protocol definitions * diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h index 404b8f724f..a778ad8e3a 100644 --- a/include/linux/nfs3.h +++ b/include/linux/nfs3.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * NFSv3 protocol definitions */ diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 15004c4698..039e76e918 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/nfs4.h * @@ -16,7 +15,6 @@ #include #include #include -#include enum nfs4_acl_whotype { NFS4_ACL_WHO_NAMED = 0, @@ -38,7 +36,7 @@ struct nfs4_ace { struct nfs4_acl { uint32_t naces; - struct nfs4_ace aces[]; + struct nfs4_ace aces[0]; }; #define NFS4_MAXLABELLEN 2048 @@ -150,12 +148,6 @@ enum nfs_opnum4 { OP_WRITE_SAME = 70, OP_CLONE = 71, - /* xattr support (RFC8726) */ - OP_GETXATTR = 72, - OP_SETXATTR = 73, - OP_LISTXATTRS = 74, - OP_REMOVEXATTR = 75, - OP_ILLEGAL = 10044, }; @@ -165,7 +157,7 @@ Needs to be updated if more operations are defined in future.*/ #define FIRST_NFS4_OP OP_ACCESS #define LAST_NFS40_OP OP_RELEASE_LOCKOWNER #define LAST_NFS41_OP OP_RECLAIM_COMPLETE -#define LAST_NFS42_OP OP_REMOVEXATTR +#define LAST_NFS42_OP OP_CLONE #define LAST_NFS4_OP LAST_NFS42_OP enum nfsstat4 { @@ -286,10 +278,6 @@ enum nfsstat4 { NFS4ERR_WRONG_LFS = 10092, NFS4ERR_BADLABEL = 10093, NFS4ERR_OFFLOAD_NO_REQS = 10094, - - /* xattr (RFC8276) */ - NFS4ERR_NOXATTR = 10095, - NFS4ERR_XATTR2BIG = 10096, }; static inline bool seqid_mutating_err(u32 err) @@ -305,7 +293,7 @@ static inline bool seqid_mutating_err(u32 err) case NFS4ERR_NOFILEHANDLE: case NFS4ERR_MOVED: return false; - } + }; return true; } @@ -452,10 +440,7 @@ enum lock_type4 { #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) #define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) -#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) -#define FATTR4_WORD2_MODE_UMASK (1UL << 17) -#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18) /* MDS threshold bitmap bits */ #define THRESHOLD_RD (1UL << 0) @@ -470,12 +455,7 @@ enum lock_type4 { #define NFS4_DEBUG 1 -/* - * Index of predefined Linux client operations - * - * To ensure that /proc/net/rpc/nfs remains correctly ordered, please - * append only to this enum when adding new client operations. - */ +/* Index of predefined Linux client operations */ enum { NFSPROC4_CLNT_NULL = 0, /* Unused */ @@ -517,6 +497,7 @@ enum { NFSPROC4_CLNT_SECINFO, NFSPROC4_CLNT_FSID_PRESENT, + /* nfs41 */ NFSPROC4_CLNT_EXCHANGE_ID, NFSPROC4_CLNT_CREATE_SESSION, NFSPROC4_CLNT_DESTROY_SESSION, @@ -534,23 +515,13 @@ enum { NFSPROC4_CLNT_BIND_CONN_TO_SESSION, NFSPROC4_CLNT_DESTROY_CLIENTID, + /* nfs42 */ NFSPROC4_CLNT_SEEK, NFSPROC4_CLNT_ALLOCATE, NFSPROC4_CLNT_DEALLOCATE, NFSPROC4_CLNT_LAYOUTSTATS, NFSPROC4_CLNT_CLONE, NFSPROC4_CLNT_COPY, - NFSPROC4_CLNT_OFFLOAD_CANCEL, - - NFSPROC4_CLNT_LOOKUPP, - NFSPROC4_CLNT_LAYOUTERROR, - NFSPROC4_CLNT_COPY_NOTIFY, - - NFSPROC4_CLNT_GETXATTR, - NFSPROC4_CLNT_SETXATTR, - NFSPROC4_CLNT_LISTXATTRS, - NFSPROC4_CLNT_REMOVEXATTR, - NFSPROC4_CLNT_READ_PLUS, }; /* nfs41 types */ @@ -672,7 +643,6 @@ enum pnfs_update_layout_reason { PNFS_UPDATE_LAYOUT_BLOCKED, PNFS_UPDATE_LAYOUT_INVALID_OPEN, PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, - PNFS_UPDATE_LAYOUT_EXIT, }; #define NFS4_OP_MAP_NUM_LONGS \ @@ -686,44 +656,4 @@ struct nfs4_op_map { } u; }; -struct nfs42_netaddr { - char netid[RPCBIND_MAXNETIDLEN]; - char addr[RPCBIND_MAXUADDRLEN + 1]; - u32 netid_len; - u32 addr_len; -}; - -enum netloc_type4 { - NL4_NAME = 1, - NL4_URL = 2, - NL4_NETADDR = 3, -}; - -struct nl4_server { - enum netloc_type4 nl4_type; - union { - struct { /* NL4_NAME, NL4_URL */ - int nl4_str_sz; - char nl4_str[NFS4_OPAQUE_LIMIT + 1]; - }; - struct nfs42_netaddr nl4_addr; /* NL4_NETADDR */ - } u; -}; - -enum nfs4_change_attr_type { - NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0, - NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1, - NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2, - NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3, - NFS4_CHANGE_TYPE_IS_UNDEFINED = 4, -}; - -/* - * Options for setxattr. These match the flags for setxattr(2). - */ -enum nfs4_setxattr_options { - SETXATTR4_EITHER = 0, - SETXATTR4_CREATE = 1, - SETXATTR4_REPLACE = 2, -}; #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index b9a8b925db..810124b333 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/nfs_fs.h * @@ -23,7 +22,6 @@ #include #include #include -#include #include #include @@ -40,67 +38,58 @@ #include -/* - * These are the default for number of transports to different server IPs - */ -#define NFS_MAX_TRANSPORTS 16 - /* * These are the default flags for swap requests */ #define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) -/* - * Size of the NFS directory verifier - */ -#define NFS_DIR_VERIFIER_SIZE 2 - /* * NFSv3/v4 Access mode cache entry */ struct nfs_access_entry { struct rb_node rb_node; struct list_head lru; - const struct cred * cred; - __u32 mask; + unsigned long jiffies; + struct rpc_cred * cred; + int mask; struct rcu_head rcu_head; }; +struct nfs_lockowner { + fl_owner_t l_owner; + pid_t l_pid; +}; + struct nfs_lock_context { - refcount_t count; + atomic_t count; struct list_head list; struct nfs_open_context *open_context; - fl_owner_t lockowner; + struct nfs_lockowner lockowner; atomic_t io_count; - struct rcu_head rcu_head; }; struct nfs4_state; struct nfs_open_context { struct nfs_lock_context lock_context; - fl_owner_t flock_owner; struct dentry *dentry; - const struct cred *cred; - struct rpc_cred *ll_cred; /* low-level cred - use to check for expiry */ + struct rpc_cred *cred; struct nfs4_state *state; fmode_t mode; unsigned long flags; +#define NFS_CONTEXT_ERROR_WRITE (0) #define NFS_CONTEXT_RESEND_WRITES (1) #define NFS_CONTEXT_BAD (2) -#define NFS_CONTEXT_UNLOCK (3) -#define NFS_CONTEXT_FILE_OPEN (4) int error; struct list_head list; struct nfs4_threshold *mdsthreshold; - struct rcu_head rcu_head; }; struct nfs_open_dir_context { struct list_head list; + struct rpc_cred *cred; unsigned long attr_gencount; - __be32 verf[NFS_DIR_VERIFIER_SIZE]; __u64 dir_cookie; __u64 dup_cookie; signed char duped; @@ -113,8 +102,6 @@ struct nfs_delegation; struct posix_acl; -struct nfs4_xattr_cache; - /* * nfs fs inode data in memory */ @@ -168,9 +155,9 @@ struct nfs_inode { * This is the cookie verifier used for NFSv3 readdir * operations */ - __be32 cookieverf[NFS_DIR_VERIFIER_SIZE]; + __be32 cookieverf[2]; - atomic_long_t nrequests; + unsigned long nrequests; struct nfs_mds_commit_info commit_info; /* Open contexts for shared mmap writes */ @@ -179,10 +166,6 @@ struct nfs_inode { /* Readers: in-flight sillydelete RPC calls */ /* Writers: rmdir */ struct rw_semaphore rmdir_sem; - struct mutex commit_mutex; - - /* track last access to cached pages */ - unsigned long page_index; #if IS_ENABLED(CONFIG_NFS_V4) struct nfs4_cached_acl *nfs4_acl; @@ -201,67 +184,19 @@ struct nfs_inode { struct fscache_cookie *fscache; #endif struct inode vfs_inode; - -#ifdef CONFIG_NFS_V4_2 - struct nfs4_xattr_cache *xattr_cache; -#endif }; -struct nfs4_copy_state { - struct list_head copies; - struct list_head src_copies; - nfs4_stateid stateid; - struct completion completion; - uint64_t count; - struct nfs_writeverf verf; - int error; - int flags; - struct nfs4_state *parent_src_state; - struct nfs4_state *parent_dst_state; -}; - -/* - * Access bit flags - */ -#define NFS_ACCESS_READ 0x0001 -#define NFS_ACCESS_LOOKUP 0x0002 -#define NFS_ACCESS_MODIFY 0x0004 -#define NFS_ACCESS_EXTEND 0x0008 -#define NFS_ACCESS_DELETE 0x0010 -#define NFS_ACCESS_EXECUTE 0x0020 -#define NFS_ACCESS_XAREAD 0x0040 -#define NFS_ACCESS_XAWRITE 0x0080 -#define NFS_ACCESS_XALIST 0x0100 - /* * Cache validity bit flags */ -#define NFS_INO_INVALID_DATA BIT(1) /* cached data is invalid */ -#define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */ -#define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */ -#define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */ -#define NFS_INO_REVAL_PAGECACHE BIT(5) /* must revalidate pagecache */ -#define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */ -#define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */ -#define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */ -#define NFS_INO_INVALID_CTIME BIT(9) /* cached ctime is invalid */ -#define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */ -#define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */ -#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ -#define NFS_INO_DATA_INVAL_DEFER \ - BIT(13) /* Deferred cache invalidation */ -#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ -#define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */ -#define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */ -#define NFS_INO_INVALID_MODE BIT(17) /* cached mode is invalid */ - -#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ - | NFS_INO_INVALID_CTIME \ - | NFS_INO_INVALID_MTIME \ - | NFS_INO_INVALID_SIZE \ - | NFS_INO_INVALID_NLINK \ - | NFS_INO_INVALID_MODE \ - | NFS_INO_INVALID_OTHER) /* inode metadata is invalid */ +#define NFS_INO_INVALID_ATTR 0x0001 /* cached attrs are invalid */ +#define NFS_INO_INVALID_DATA 0x0002 /* cached data is invalid */ +#define NFS_INO_INVALID_ATIME 0x0004 /* cached atime is invalid */ +#define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */ +#define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */ +#define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */ +#define NFS_INO_REVAL_FORCED 0x0040 /* force revalidation ignoring a delegation */ +#define NFS_INO_INVALID_LABEL 0x0080 /* cached label is invalid */ /* * Bit offsets in flags field @@ -348,11 +283,10 @@ static inline void nfs_mark_for_revalidate(struct inode *inode) struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); - nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE - | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL - | NFS_INO_INVALID_CHANGE - | NFS_INO_INVALID_CTIME; + nfsi->cache_validity |= NFS_INO_INVALID_ATTR | + NFS_INO_REVAL_PAGECACHE | + NFS_INO_INVALID_ACCESS | + NFS_INO_INVALID_ACL; if (S_ISDIR(inode->i_mode)) nfsi->cache_validity |= NFS_INO_INVALID_DATA; spin_unlock(&inode->i_lock); @@ -363,53 +297,68 @@ static inline int nfs_server_capable(struct inode *inode, int cap) return NFS_SERVER(inode)->caps & cap; } +static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf) +{ + dentry->d_time = verf; +} + /** * nfs_save_change_attribute - Returns the inode attribute change cookie * @dir - pointer to parent directory inode - * The "cache change attribute" is updated when we need to revalidate - * our dentry cache after a directory was seen to change on the server. + * The "change attribute" is updated every time we finish an operation + * that will result in a metadata change on the server. */ static inline unsigned long nfs_save_change_attribute(struct inode *dir) { return NFS_I(dir)->cache_change_attribute; } +/** + * nfs_verify_change_attribute - Detects NFS remote directory changes + * @dir - pointer to parent directory inode + * @chattr - previously saved change attribute + * Return "false" if the verifiers doesn't match the change attribute. + * This would usually indicate that the directory contents have changed on + * the server, and that any dentries need revalidating. + */ +static inline int nfs_verify_change_attribute(struct inode *dir, unsigned long chattr) +{ + return chattr == NFS_I(dir)->cache_change_attribute; +} + /* * linux/fs/nfs/inode.c */ extern int nfs_sync_mapping(struct address_space *mapping); extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping); extern void nfs_zap_caches(struct inode *); -extern void nfs_set_inode_stale(struct inode *inode); extern void nfs_invalidate_atime(struct inode *); extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); -struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *); extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); -extern int nfs_getattr(struct user_namespace *, const struct path *, - struct kstat *, u32, unsigned int); +extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); -extern int nfs_permission(struct user_namespace *, struct inode *, int); +extern int nfs_permission(struct inode *, int); extern int nfs_open(struct inode *, struct file *); +extern int nfs_attribute_timeout(struct inode *inode); extern int nfs_attribute_cache_expired(struct inode *inode); -extern int nfs_revalidate_inode(struct inode *inode, unsigned long flags); +extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); +extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); -extern int nfs_clear_invalid_mapping(struct address_space *mapping); -extern bool nfs_mapping_need_revalidate_inode(struct inode *inode); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_revalidate_mapping_rcu(struct inode *inode); -extern int nfs_setattr(struct user_namespace *, struct dentry *, struct iattr *); +extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, struct nfs4_label *label); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); -extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode); -extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp); +extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); +extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode); extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); extern void nfs_file_clear_open_context(struct file *flip); @@ -479,7 +428,7 @@ static inline struct nfs_open_context *nfs_file_open_context(struct file *filp) return filp->private_data; } -static inline const struct cred *nfs_file_cred(struct file *file) +static inline struct rpc_cred *nfs_file_cred(struct file *file) { if (file != NULL) { struct nfs_open_context *ctx = @@ -506,19 +455,10 @@ extern const struct file_operations nfs_dir_operations; extern const struct dentry_operations nfs_dentry_operations; extern void nfs_force_lookup_revalidate(struct inode *dir); -extern void nfs_set_verifier(struct dentry * dentry, unsigned long verf); -#if IS_ENABLED(CONFIG_NFS_V4) -extern void nfs_clear_verifier_delegated(struct inode *inode); -#endif /* IS_ENABLED(CONFIG_NFS_V4) */ -extern struct dentry *nfs_add_or_obtain(struct dentry *dentry, - struct nfs_fh *fh, struct nfs_fattr *fattr, - struct nfs4_label *label); extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr, struct nfs4_label *label); -extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); +extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); -extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, - bool may_block); /* * linux/fs/nfs/symlink.c @@ -564,16 +504,28 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned */ extern int nfs_sync_inode(struct inode *inode); extern int nfs_wb_all(struct inode *inode); -extern int nfs_wb_page(struct inode *inode, struct page *page); +extern int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); -extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); +extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_commit_data *data); +static inline int +nfs_wb_launder_page(struct inode *inode, struct page *page) +{ + return nfs_wb_single_page(inode, page, true); +} + +static inline int +nfs_wb_page(struct inode *inode, struct page *page) +{ + return nfs_wb_single_page(inode, page, false); +} + static inline int nfs_have_writebacks(struct inode *inode) { - return atomic_long_read(&NFS_I(inode)->nrequests) != 0; + return NFS_I(inode)->nrequests != 0; } /* @@ -582,6 +534,8 @@ nfs_have_writebacks(struct inode *inode) extern int nfs_readpage(struct file *, struct page *); extern int nfs_readpages(struct file *, struct address_space *, struct list_head *, unsigned); +extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, + struct page *); /* * inline functions diff --git a/include/linux/nfs_fs_i.h b/include/linux/nfs_fs_i.h index 98f9268fcf..a5c50d9734 100644 --- a/include/linux/nfs_fs_i.h +++ b/include/linux/nfs_fs_i.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NFS_FS_I #define _NFS_FS_I diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 2a9acbfe00..b34097c678 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NFS_FS_SB #define _NFS_FS_SB @@ -10,7 +9,6 @@ #include #include -#include struct nfs4_session; struct nfs_iostats; @@ -26,7 +24,7 @@ struct nfs41_impl_id; * The nfs_client identifies our client state to the server. */ struct nfs_client { - refcount_t cl_count; + atomic_t cl_count; atomic_t cl_mds_count; int cl_cons_state; /* current construction state (-ve: init error) */ #define NFS_CS_READY 0 /* ready to be used */ @@ -44,10 +42,6 @@ struct nfs_client { #define NFS_CS_MIGRATION 2 /* - transparent state migr */ #define NFS_CS_INFINITE_SLOTS 3 /* - don't limit TCP slots */ #define NFS_CS_NO_RETRANS_TIMEOUT 4 /* - Disable retransmit timeouts */ -#define NFS_CS_TSM_POSSIBLE 5 /* - Maybe state migration */ -#define NFS_CS_NOPING 6 /* - don't ping on connect */ -#define NFS_CS_DS 7 /* - Server is a DS */ -#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */ struct sockaddr_storage cl_addr; /* server identifier */ size_t cl_addrlen; char * cl_hostname; /* hostname of server */ @@ -61,9 +55,7 @@ struct nfs_client { struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */ u32 cl_minorversion;/* NFSv4 minorversion */ - unsigned int cl_nconnect; /* Number of connections */ - unsigned int cl_max_connect; /* max number of xprts allowed */ - const char * cl_principal; /* used for machine cred */ + struct rpc_cred *cl_machine_cred; #if IS_ENABLED(CONFIG_NFS_V4) struct list_head cl_ds_clients; /* auth flavor data servers */ @@ -126,7 +118,6 @@ struct nfs_client { #endif struct net *cl_net; - struct list_head pending_cb_stateids; }; /* @@ -142,22 +133,9 @@ struct nfs_server { struct rpc_clnt * client_acl; /* ACL RPC client handle */ struct nlm_host *nlm_host; /* NLM client handle */ struct nfs_iostats __percpu *io_stats; /* I/O statistics */ + struct backing_dev_info backing_dev_info; atomic_long_t writeback; /* number of writeback pages */ - unsigned int flags; /* various flags */ - -/* The following are for internal use only. Also see uapi/linux/nfs_mount.h */ -#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 -#define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000 -#define NFS_MOUNT_NORESVPORT 0x40000 -#define NFS_MOUNT_LEGACY_INTERFACE 0x80000 -#define NFS_MOUNT_LOCAL_FLOCK 0x100000 -#define NFS_MOUNT_LOCAL_FCNTL 0x200000 -#define NFS_MOUNT_SOFTERR 0x400000 -#define NFS_MOUNT_SOFTREVAL 0x800000 -#define NFS_MOUNT_WRITE_EAGER 0x01000000 -#define NFS_MOUNT_WRITE_WAIT 0x02000000 - - unsigned int fattr_valid; /* Valid attributes */ + int flags; /* various flags */ unsigned int caps; /* server capabilities */ unsigned int rsize; /* read size */ unsigned int rpages; /* read size (in pages) */ @@ -167,11 +145,6 @@ struct nfs_server { unsigned int dtsize; /* readdir size */ unsigned short port; /* "port=" setting */ unsigned int bsize; /* server block size */ -#ifdef CONFIG_NFS_V4_2 - unsigned int gxasize; /* getxattr size */ - unsigned int sxasize; /* setxattr size */ - unsigned int lxasize; /* listxattr size */ -#endif unsigned int acregmin; /* attr cache timeouts */ unsigned int acregmax; unsigned int acdirmin; @@ -182,12 +155,9 @@ struct nfs_server { #define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ #define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */ - enum nfs4_change_attr_type - change_attr_type;/* Description of change attribute */ - struct nfs_fsid fsid; __u64 maxfilesize; /* maximum file size */ - struct timespec64 time_delta; /* smallest time granularity */ + struct timespec time_delta; /* smallest time granularity */ unsigned long mount_time; /* when this fs was mounted */ struct super_block *super; /* VFS super block */ dev_t s_dev; /* superblock dev numbers */ @@ -236,13 +206,11 @@ struct nfs_server { struct list_head state_owners_lru; struct list_head layouts; struct list_head delegations; - struct list_head ss_copies; unsigned long mig_gen; unsigned long mig_status; #define NFS_MIG_IN_TRANSITION (1) #define NFS_MIG_FAILED (2) -#define NFS_MIG_TSM_POSSIBLE (3) void (*destroy)(struct nfs_server *); @@ -254,14 +222,6 @@ struct nfs_server { u32 mountd_version; unsigned short mountd_port; unsigned short mountd_protocol; - struct rpc_wait_queue uoc_rpcwaitq; - - /* XDR related information */ - unsigned int read_hdrsize; - - /* User namespace info */ - const struct cred *cred; - bool has_sec_mnt_opts; }; /* Server capabilities */ @@ -270,7 +230,15 @@ struct nfs_server { #define NFS_CAP_SYMLINKS (1U << 2) #define NFS_CAP_ACLS (1U << 3) #define NFS_CAP_ATOMIC_OPEN (1U << 4) -#define NFS_CAP_LGOPEN (1U << 5) +/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */ +#define NFS_CAP_FILEID (1U << 6) +#define NFS_CAP_MODE (1U << 7) +#define NFS_CAP_NLINK (1U << 8) +#define NFS_CAP_OWNER (1U << 9) +#define NFS_CAP_OWNER_GROUP (1U << 10) +#define NFS_CAP_ATIME (1U << 11) +#define NFS_CAP_CTIME (1U << 12) +#define NFS_CAP_MTIME (1U << 13) #define NFS_CAP_POSIX_LOCK (1U << 14) #define NFS_CAP_UIDGID_NOMAP (1U << 15) #define NFS_CAP_STATEID_NFSV41 (1U << 16) @@ -282,10 +250,5 @@ struct nfs_server { #define NFS_CAP_LAYOUTSTATS (1U << 22) #define NFS_CAP_CLONE (1U << 23) #define NFS_CAP_COPY (1U << 24) -#define NFS_CAP_OFFLOAD_CANCEL (1U << 25) -#define NFS_CAP_LAYOUTERROR (1U << 26) -#define NFS_CAP_COPY_NOTIFY (1U << 27) -#define NFS_CAP_XATTR (1U << 28) -#define NFS_CAP_READ_PLUS (1U << 29) #endif diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h index 027874c36c..9dcbbe9a51 100644 --- a/include/linux/nfs_iostat.h +++ b/include/linux/nfs_iostat.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * User-space visible declarations for NFS client per-mount * point statistics diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index f0373a6cb5..957049f722 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/nfs_page.h * @@ -34,14 +33,13 @@ enum { PG_UPTODATE, /* page group sync bit in read path */ PG_WB_END, /* page group sync bit in write path */ PG_REMOVE, /* page group sync bit in write path */ - PG_CONTENDED1, /* Is someone waiting for a lock? */ - PG_CONTENDED2, /* Is someone waiting for a lock? */ }; struct nfs_inode; struct nfs_page { struct list_head wb_list; /* Defines state of page: */ struct page *wb_page; /* page to read in/write out */ + struct nfs_open_context *wb_context; /* File state context info */ struct nfs_lock_context *wb_lock_context; /* lock context info */ pgoff_t wb_index; /* Offset >> PAGE_SHIFT */ unsigned int wb_offset, /* Offset & ~PAGE_MASK */ @@ -52,10 +50,8 @@ struct nfs_page { struct nfs_write_verifier wb_verf; /* Commit cookie */ struct nfs_page *wb_this_page; /* list of reqs for this page */ struct nfs_page *wb_head; /* head pointer for req list */ - unsigned short wb_nio; /* Number of I/O attempts */ }; -struct nfs_pgio_mirror; struct nfs_pageio_descriptor; struct nfs_pageio_ops { void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *); @@ -65,12 +61,10 @@ struct nfs_pageio_ops { unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *, struct nfs_page *); void (*pg_cleanup)(struct nfs_pageio_descriptor *); - struct nfs_pgio_mirror * - (*pg_get_mirror)(struct nfs_pageio_descriptor *, u32); - u32 (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32); }; struct nfs_rw_ops { + const fmode_t rw_mode; struct nfs_pgio_header *(*rw_alloc_header)(void); void (*rw_free_header)(struct nfs_pgio_header *); int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *, @@ -91,6 +85,7 @@ struct nfs_pgio_mirror { }; struct nfs_pageio_descriptor { + unsigned char pg_moreio : 1; struct inode *pg_inode; const struct nfs_pageio_ops *pg_ops; const struct nfs_rw_ops *pg_rw_ops; @@ -99,8 +94,8 @@ struct nfs_pageio_descriptor { const struct rpc_call_ops *pg_rpc_callops; const struct nfs_pgio_completion_ops *pg_completion_ops; struct pnfs_layout_segment *pg_lseg; - struct nfs_io_completion *pg_io_completion; struct nfs_direct_req *pg_dreq; + void *pg_layout_private; unsigned int pg_bsize; /* default bsize for mirrors */ u32 pg_mirror_count; @@ -108,8 +103,6 @@ struct nfs_pageio_descriptor { struct nfs_pgio_mirror pg_mirrors_static[1]; struct nfs_pgio_mirror *pg_mirrors_dynamic; u32 pg_mirror_idx; /* current mirror */ - unsigned short pg_maxretrans; - unsigned char pg_moreio : 1; }; /* arbitrarily selected limit to number of mirrors */ @@ -119,6 +112,7 @@ struct nfs_pageio_descriptor { extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, struct page *page, + struct nfs_page *last, unsigned int offset, unsigned int count); extern void nfs_release_request(struct nfs_page *); @@ -143,15 +137,10 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); -extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req); -extern int nfs_page_group_lock_subrequests(struct nfs_page *head); -extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode); -extern int nfs_page_group_lock(struct nfs_page *); +extern int nfs_page_group_lock(struct nfs_page *, bool); +extern void nfs_page_group_lock_wait(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); -extern int nfs_page_set_headlock(struct nfs_page *req); -extern void nfs_page_clear_headlock(struct nfs_page *req); -extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); /* * Lock the page of an asynchronous request @@ -173,16 +162,6 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head) list_add_tail(&req->wb_list, head); } -/** - * nfs_list_move_request - Move a request to a new list - * @req: request - * @head: head of list into which to insert the request. - */ -static inline void -nfs_list_move_request(struct nfs_page *req, struct list_head *head) -{ - list_move_tail(&req->wb_list, head); -} /** * nfs_list_remove_request - Remove a request from its wb_list @@ -208,10 +187,4 @@ loff_t req_offset(struct nfs_page *req) return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset; } -static inline struct nfs_open_context * -nfs_req_openctx(struct nfs_page *req) -{ - return req->wb_lock_context->open_context; -} - #endif /* _LINUX_NFS_PAGE_H */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index e9698b6278..beb1e10f44 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NFS_XDR_H #define _LINUX_NFS_XDR_H @@ -15,8 +14,6 @@ #define NFS_DEF_FILE_IO_SIZE (4096U) #define NFS_MIN_FILE_IO_SIZE (1024U) -#define NFS_BITMASK_SZ 3 - struct nfs4_string { unsigned int len; char *data; @@ -64,20 +61,19 @@ struct nfs_fattr { struct nfs_fsid fsid; __u64 fileid; __u64 mounted_on_fileid; - struct timespec64 atime; - struct timespec64 mtime; - struct timespec64 ctime; + struct timespec atime; + struct timespec mtime; + struct timespec ctime; __u64 change_attr; /* NFSv4 change attribute */ __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ __u64 pre_size; /* pre_op_attr.size */ - struct timespec64 pre_mtime; /* pre_op_attr.mtime */ - struct timespec64 pre_ctime; /* pre_op_attr.ctime */ + struct timespec pre_mtime; /* pre_op_attr.mtime */ + struct timespec pre_ctime; /* pre_op_attr.ctime */ unsigned long time_start; unsigned long gencount; struct nfs4_string *owner_name; struct nfs4_string *group_name; struct nfs4_threshold *mdsthreshold; /* pNFS threshold hints */ - struct nfs4_label *label; }; #define NFS_ATTR_FATTR_TYPE (1U << 0) @@ -146,15 +142,12 @@ struct nfs_fsinfo { __u32 wtmult; /* writes should be multiple of this */ __u32 dtpref; /* pref. readdir transfer size */ __u64 maxfilesize; - struct timespec64 time_delta; /* server time granularity */ + struct timespec time_delta; /* server time granularity */ __u32 lease_time; /* in seconds */ __u32 nlayouttypes; /* number of layouttypes */ __u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */ __u32 blksize; /* preferred pnfs io block size */ __u32 clone_blksize; /* granularity of a CLONE operation */ - enum nfs4_change_attr_type - change_attr_type; /* Info about change attr */ - __u32 xattr_support; /* User xattrs supported */ }; struct nfs_fsstat { @@ -223,20 +216,6 @@ struct nfs4_get_lease_time_res { struct nfs_fsinfo *lr_fsinfo; }; -struct xdr_stream; -struct nfs4_xdr_opaque_data; - -struct nfs4_xdr_opaque_ops { - void (*encode)(struct xdr_stream *, const void *args, - const struct nfs4_xdr_opaque_data *); - void (*free)(struct nfs4_xdr_opaque_data *); -}; - -struct nfs4_xdr_opaque_data { - const struct nfs4_xdr_opaque_ops *ops; - void *data; -}; - #define PNFS_LAYOUT_MAXSIZE 4096 struct nfs4_layoutdriver_data { @@ -265,7 +244,6 @@ struct nfs4_layoutget_args { struct nfs4_layoutget_res { struct nfs4_sequence_res seq_res; - int status; __u32 return_on_close; struct pnfs_layout_range range; __u32 type; @@ -276,8 +254,7 @@ struct nfs4_layoutget_res { struct nfs4_layoutget { struct nfs4_layoutget_args args; struct nfs4_layoutget_res res; - const struct cred *cred; - struct pnfs_layout_hdr *lo; + struct rpc_cred *cred; gfp_t gfp_flags; }; @@ -316,7 +293,7 @@ struct nfs4_layoutcommit_data { struct rpc_task task; struct nfs_fattr fattr; struct list_head lseg_list; - const struct cred *cred; + struct rpc_cred *cred; struct inode *inode; struct nfs4_layoutcommit_args args; struct nfs4_layoutcommit_res res; @@ -329,7 +306,6 @@ struct nfs4_layoutreturn_args { struct pnfs_layout_range range; nfs4_stateid stateid; __u32 layout_type; - struct nfs4_xdr_opaque_data *ld_private; }; struct nfs4_layoutreturn_res { @@ -341,11 +317,10 @@ struct nfs4_layoutreturn_res { struct nfs4_layoutreturn { struct nfs4_layoutreturn_args args; struct nfs4_layoutreturn_res res; - const struct cred *cred; + struct rpc_cred *cred; struct nfs_client *clp; struct inode *inode; int rpc_status; - struct nfs4_xdr_opaque_data ld_private; }; #define PNFS_LAYOUTSTATS_MAXSIZE 256 @@ -366,7 +341,8 @@ struct nfs42_layoutstat_devinfo { __u64 write_count; __u64 write_bytes; __u32 layout_type; - struct nfs4_xdr_opaque_data ld_private; + layoutstats_encode_t layoutstats_encode; + void *layout_private; }; struct nfs42_layoutstat_args { @@ -390,41 +366,6 @@ struct nfs42_layoutstat_data { struct nfs42_layoutstat_res res; }; -struct nfs42_device_error { - struct nfs4_deviceid dev_id; - int status; - enum nfs_opnum4 opnum; -}; - -struct nfs42_layout_error { - __u64 offset; - __u64 length; - nfs4_stateid stateid; - struct nfs42_device_error errors[1]; -}; - -#define NFS42_LAYOUTERROR_MAX 5 - -struct nfs42_layouterror_args { - struct nfs4_sequence_args seq_args; - struct inode *inode; - unsigned int num_errors; - struct nfs42_layout_error errors[NFS42_LAYOUTERROR_MAX]; -}; - -struct nfs42_layouterror_res { - struct nfs4_sequence_res seq_res; - unsigned int num_errors; - int rpc_status; -}; - -struct nfs42_layouterror_data { - struct nfs42_layouterror_args args; - struct nfs42_layouterror_res res; - struct inode *inode; - struct pnfs_layout_segment *lseg; -}; - struct nfs42_clone_args { struct nfs4_sequence_args seq_args; struct nfs_fh *src_fh; @@ -477,8 +418,6 @@ struct nfs_openargs { enum open_claim_type4 claim; enum createmode4 createmode; const struct nfs4_label *label; - umode_t umask; - struct nfs4_layoutget_args *lg_args; }; struct nfs_openres { @@ -501,7 +440,6 @@ struct nfs_openres { __u32 access_request; __u32 access_supported; __u32 access_result; - struct nfs4_layoutget_res *lg_res; }; /* @@ -531,8 +469,6 @@ struct nfs_closeargs { fmode_t fmode; u32 share_access; const u32 * bitmask; - u32 bitmask_store[NFS_BITMASK_SZ]; - struct nfs4_layoutreturn_args *lr_args; }; struct nfs_closeres { @@ -541,8 +477,6 @@ struct nfs_closeres { struct nfs_fattr * fattr; struct nfs_seqid * seqid; const struct nfs_server *server; - struct nfs4_layoutreturn_res *lr_res; - int lr_ret; }; /* * * Arguments to the lock,lockt, and locku call. @@ -614,17 +548,13 @@ struct nfs4_delegreturnargs { struct nfs4_sequence_args seq_args; const struct nfs_fh *fhandle; const nfs4_stateid *stateid; - const u32 *bitmask; - u32 bitmask_store[NFS_BITMASK_SZ]; - struct nfs4_layoutreturn_args *lr_args; + const u32 * bitmask; }; struct nfs4_delegreturnres { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; struct nfs_server *server; - struct nfs4_layoutreturn_res *lr_res; - int lr_ret; }; /* @@ -652,31 +582,19 @@ struct nfs_pgio_args { __u32 count; unsigned int pgbase; struct page ** pages; - union { - unsigned int replen; /* used by read */ - struct { - const u32 * bitmask; /* used by write */ - u32 bitmask_store[NFS_BITMASK_SZ]; /* used by write */ - enum nfs3_stable_how stable; /* used by write */ - }; - }; + const u32 * bitmask; /* used by write */ + enum nfs3_stable_how stable; /* used by write */ }; struct nfs_pgio_res { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; - __u64 count; + __u32 count; __u32 op_status; - union { - struct { - unsigned int replen; /* used by read */ - int eof; /* used by read */ - }; - struct { - struct nfs_writeverf * verf; /* used by write */ - const struct nfs_server *server; /* used by write */ - }; - }; + int eof; /* used by read */ + struct nfs_writeverf * verf; /* used by write */ + const struct nfs_server *server; /* used by write */ + }; /* @@ -758,20 +676,6 @@ struct nfs_entry { struct nfs_server * server; }; -struct nfs_readdir_arg { - struct dentry *dentry; - const struct cred *cred; - __be32 *verf; - u64 cookie; - struct page **pages; - unsigned int page_len; - bool plus; -}; - -struct nfs_readdir_res { - __be32 *verf; -}; - /* * The following types are for NFSv2 only. */ @@ -893,7 +797,7 @@ struct nfs3_sattrargs { struct nfs_fh * fh; struct iattr * sattr; unsigned int guard; - struct timespec64 guardtime; + struct timespec guardtime; }; struct nfs3_diropargs { @@ -952,7 +856,7 @@ struct nfs3_readdirargs { struct nfs_fh * fh; __u64 cookie; __be32 verf[2]; - bool plus; + int plus; unsigned int count; struct page ** pages; }; @@ -983,7 +887,7 @@ struct nfs3_linkres { struct nfs3_readdirres { struct nfs_fattr * dir_attr; __be32 * verf; - bool plus; + int plus; }; struct nfs3_getaclres { @@ -1033,7 +937,6 @@ struct nfs4_create_arg { const struct nfs_fh * dir_fh; const u32 * bitmask; const struct nfs4_label *label; - umode_t umask; }; struct nfs4_create_res { @@ -1086,6 +989,7 @@ struct nfs4_link_res { struct nfs_fattr * dir_attr; }; + struct nfs4_lookup_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh * dir_fh; @@ -1101,20 +1005,6 @@ struct nfs4_lookup_res { struct nfs4_label *label; }; -struct nfs4_lookupp_arg { - struct nfs4_sequence_args seq_args; - const struct nfs_fh *fh; - const u32 *bitmask; -}; - -struct nfs4_lookupp_res { - struct nfs4_sequence_res seq_res; - const struct nfs_server *server; - struct nfs_fattr *fattr; - struct nfs_fh *fh; - struct nfs4_label *label; -}; - struct nfs4_lookup_root_arg { struct nfs4_sequence_args seq_args; const u32 * bitmask; @@ -1140,7 +1030,7 @@ struct nfs4_readdir_arg { struct page ** pages; /* zero-copy data */ unsigned int pgbase; /* zero-copy data */ const u32 * bitmask; - bool plus; + int plus; }; struct nfs4_readdir_res { @@ -1250,7 +1140,7 @@ struct nfs4_secinfo4 { struct nfs4_secinfo_flavors { unsigned int num_flavors; - struct nfs4_secinfo4 flavors[]; + struct nfs4_secinfo4 flavors[0]; }; struct nfs4_secinfo_arg { @@ -1289,25 +1179,16 @@ struct nfstime4 { struct pnfs_commit_bucket { struct list_head written; struct list_head committing; - struct pnfs_layout_segment *lseg; + struct pnfs_layout_segment *wlseg; + struct pnfs_layout_segment *clseg; struct nfs_writeverf direct_verf; }; -struct pnfs_commit_array { - struct list_head cinfo_list; - struct list_head lseg_list; - struct pnfs_layout_segment *lseg; - struct rcu_head rcu; - refcount_t refcount; - unsigned int nbuckets; - struct pnfs_commit_bucket buckets[]; -}; - struct pnfs_ds_commit_info { - struct list_head commits; - unsigned int nwritten; - unsigned int ncommitting; - const struct pnfs_commit_ops *ops; + int nwritten; + int ncommitting; + int nbuckets; + struct pnfs_commit_bucket *buckets; }; struct nfs41_state_protection { @@ -1318,7 +1199,7 @@ struct nfs41_state_protection { struct nfs41_exchange_id_args { struct nfs_client *client; - nfs4_verifier verifier; + nfs4_verifier *verifier; u32 flags; struct nfs41_state_protection state_protect; }; @@ -1340,13 +1221,11 @@ struct nfs41_impl_id { struct nfstime4 date; }; -#define MAX_BIND_CONN_TO_SESSION_RETRIES 3 struct nfs41_bind_conn_to_session_args { struct nfs_client *client; struct nfs4_sessionid sessionid; u32 dir; bool use_conn_in_rdma_mode; - int retries; }; struct nfs41_bind_conn_to_session_res { @@ -1420,11 +1299,22 @@ struct nfs41_free_stateid_res { unsigned int status; }; +static inline void +nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) +{ + kfree(cinfo->buckets); +} + #else struct pnfs_ds_commit_info { }; +static inline void +nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) +{ +} + #endif /* CONFIG_NFS_V4_1 */ #ifdef CONFIG_NFS_V4_2 @@ -1458,12 +1348,9 @@ struct nfs42_copy_args { u64 dst_pos; u64 count; - bool sync; - struct nl4_server *cp_src; }; struct nfs42_write_res { - nfs4_stateid stateid; u64 count; struct nfs_writeverf verifier; }; @@ -1473,35 +1360,6 @@ struct nfs42_copy_res { struct nfs42_write_res write_res; bool consecutive; bool synchronous; - struct nfs_commitres commit_res; -}; - -struct nfs42_offload_status_args { - struct nfs4_sequence_args osa_seq_args; - struct nfs_fh *osa_src_fh; - nfs4_stateid osa_stateid; -}; - -struct nfs42_offload_status_res { - struct nfs4_sequence_res osr_seq_res; - uint64_t osr_count; - int osr_status; -}; - -struct nfs42_copy_notify_args { - struct nfs4_sequence_args cna_seq_args; - - struct nfs_fh *cna_src_fh; - nfs4_stateid cna_src_stateid; - struct nl4_server cna_dst; -}; - -struct nfs42_copy_notify_res { - struct nfs4_sequence_res cnr_seq_res; - - struct nfstime4 cnr_lease_time; - nfs4_stateid cnr_stateid; - struct nl4_server cnr_src; }; struct nfs42_seek_args { @@ -1520,64 +1378,7 @@ struct nfs42_seek_res { u32 sr_eof; u64 sr_offset; }; - -struct nfs42_setxattrargs { - struct nfs4_sequence_args seq_args; - struct nfs_fh *fh; - const char *xattr_name; - u32 xattr_flags; - size_t xattr_len; - struct page **xattr_pages; -}; - -struct nfs42_setxattrres { - struct nfs4_sequence_res seq_res; - struct nfs4_change_info cinfo; -}; - -struct nfs42_getxattrargs { - struct nfs4_sequence_args seq_args; - struct nfs_fh *fh; - const char *xattr_name; - size_t xattr_len; - struct page **xattr_pages; -}; - -struct nfs42_getxattrres { - struct nfs4_sequence_res seq_res; - size_t xattr_len; -}; - -struct nfs42_listxattrsargs { - struct nfs4_sequence_args seq_args; - struct nfs_fh *fh; - u32 count; - u64 cookie; - struct page **xattr_pages; -}; - -struct nfs42_listxattrsres { - struct nfs4_sequence_res seq_res; - struct page *scratch; - void *xattr_buf; - size_t xattr_len; - u64 cookie; - bool eof; - size_t copied; -}; - -struct nfs42_removexattrargs { - struct nfs4_sequence_args seq_args; - struct nfs_fh *fh; - const char *xattr_name; -}; - -struct nfs42_removexattrres { - struct nfs4_sequence_res seq_res; - struct nfs4_change_info cinfo; -}; - -#endif /* CONFIG_NFS_V4_2 */ +#endif struct nfs_page; @@ -1595,30 +1396,27 @@ enum { NFS_IOHDR_EOF, NFS_IOHDR_REDO, NFS_IOHDR_STAT, - NFS_IOHDR_RESEND_PNFS, - NFS_IOHDR_RESEND_MDS, }; -struct nfs_io_completion; struct nfs_pgio_header { struct inode *inode; - const struct cred *cred; + struct rpc_cred *cred; struct list_head pages; struct nfs_page *req; struct nfs_writeverf verf; /* Used for writes */ - fmode_t rw_mode; struct pnfs_layout_segment *lseg; loff_t io_start; const struct rpc_call_ops *mds_ops; void (*release) (struct nfs_pgio_header *hdr); const struct nfs_pgio_completion_ops *completion_ops; const struct nfs_rw_ops *rw_ops; - struct nfs_io_completion *io_completion; struct nfs_direct_req *dreq; - + void *layout_private; + spinlock_t lock; + /* fields protected by lock */ int pnfs_error; int error; /* merge with pnfs_error */ - unsigned int good_bytes; /* boundary of good data */ + unsigned long good_bytes; /* boundary of good data */ unsigned long flags; /* @@ -1633,13 +1431,13 @@ struct nfs_pgio_header { __u64 mds_offset; /* Filelayout dense stripe */ struct nfs_page_array page_array; struct nfs_client *ds_clp; /* pNFS data server */ - u32 ds_commit_idx; /* ds index if ds_clp is set */ - u32 pgio_mirror_idx;/* mirror index in pgio layer */ + int ds_commit_idx; /* ds index if ds_clp is set */ + int pgio_mirror_idx;/* mirror index in pgio layer */ }; struct nfs_mds_commit_info { atomic_t rpcs_out; - atomic_long_t ncommit; + unsigned long ncommit; struct list_head list; }; @@ -1662,7 +1460,7 @@ struct nfs_commit_info { struct nfs_commit_data { struct rpc_task task; struct inode *inode; - const struct cred *cred; + struct rpc_cred *cred; struct nfs_fattr fattr; struct nfs_writeverf verf; struct list_head pages; /* Coalesced requests we wish to flush */ @@ -1682,7 +1480,7 @@ struct nfs_commit_data { }; struct nfs_pgio_completion_ops { - void (*error_cleanup)(struct list_head *head, int); + void (*error_cleanup)(struct list_head *head); void (*init_hdr)(struct nfs_pgio_header *hdr); void (*completion)(struct nfs_pgio_header *hdr); void (*reschedule_io)(struct nfs_pgio_header *hdr); @@ -1693,7 +1491,7 @@ struct nfs_unlinkdata { struct nfs_removeres res; struct dentry *dentry; wait_queue_head_t wq; - const struct cred *cred; + struct rpc_cred *cred; struct nfs_fattr dir_attr; long timeout; }; @@ -1701,7 +1499,7 @@ struct nfs_unlinkdata { struct nfs_renamedata { struct nfs_renameargs args; struct nfs_renameres res; - const struct cred *cred; + struct rpc_cred *cred; struct inode *old_dir; struct dentry *old_dentry; struct nfs_fattr old_fattr; @@ -1710,7 +1508,6 @@ struct nfs_renamedata { struct nfs_fattr new_fattr; void (*complete)(struct rpc_task *, struct nfs_renamedata *); long timeout; - bool cancelled; }; struct nfs_access_entry; @@ -1720,7 +1517,6 @@ struct nfs_subversion; struct nfs_mount_info; struct nfs_client_initdata; struct nfs_pageio_descriptor; -struct fs_context; /* * RPC procedure vector for NFSv2/NFSv3 demuxing @@ -1731,34 +1527,30 @@ struct nfs_rpc_ops { const struct inode_operations *dir_inode_ops; const struct inode_operations *file_inode_ops; const struct file_operations *file_ops; - const struct nlmclnt_operations *nlmclnt_ops; int (*getroot) (struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); - int (*submount) (struct fs_context *, struct nfs_server *); - int (*try_get_tree) (struct fs_context *); + struct vfsmount *(*submount) (struct nfs_server *, struct dentry *, + struct nfs_fh *, struct nfs_fattr *); + struct dentry *(*try_mount) (int, const char *, struct nfs_mount_info *, + struct nfs_subversion *); int (*getattr) (struct nfs_server *, struct nfs_fh *, - struct nfs_fattr *, struct nfs4_label *, - struct inode *); + struct nfs_fattr *, struct nfs4_label *); int (*setattr) (struct dentry *, struct nfs_fattr *, struct iattr *); - int (*lookup) (struct inode *, struct dentry *, + int (*lookup) (struct inode *, const struct qstr *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); - int (*lookupp) (struct inode *, struct nfs_fh *, - struct nfs_fattr *, struct nfs4_label *); int (*access) (struct inode *, struct nfs_access_entry *); int (*readlink)(struct inode *, struct page *, unsigned int, unsigned int); int (*create) (struct inode *, struct dentry *, struct iattr *, int); - int (*remove) (struct inode *, struct dentry *); - void (*unlink_setup) (struct rpc_message *, struct dentry *, struct inode *); + int (*remove) (struct inode *, const struct qstr *); + void (*unlink_setup) (struct rpc_message *, struct inode *dir); void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *); int (*unlink_done) (struct rpc_task *, struct inode *); - void (*rename_setup) (struct rpc_message *msg, - struct dentry *old_dentry, - struct dentry *new_dentry); + void (*rename_setup) (struct rpc_message *msg, struct inode *dir); void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); int (*link) (struct inode *, struct inode *, const struct qstr *); @@ -1766,7 +1558,8 @@ struct nfs_rpc_ops { unsigned int, struct iattr *); int (*mkdir) (struct inode *, struct dentry *, struct iattr *); int (*rmdir) (struct inode *, const struct qstr *); - int (*readdir) (struct nfs_readdir_arg *, struct nfs_readdir_res *); + int (*readdir) (struct dentry *, struct rpc_cred *, + u64, struct page **, unsigned int, int); int (*mknod) (struct inode *, struct dentry *, struct iattr *, dev_t); int (*statfs) (struct nfs_server *, struct nfs_fh *, @@ -1776,16 +1569,14 @@ struct nfs_rpc_ops { int (*pathconf) (struct nfs_server *, struct nfs_fh *, struct nfs_pathconf *); int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); - int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, bool); + int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int); int (*pgio_rpc_prepare)(struct rpc_task *, struct nfs_pgio_header *); void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *); int (*read_done)(struct rpc_task *, struct nfs_pgio_header *); - void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *, - struct rpc_clnt **); + void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *); int (*write_done)(struct rpc_task *, struct nfs_pgio_header *); - void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *, - struct rpc_clnt **); + void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *); void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *); int (*commit_done) (struct rpc_task *, struct nfs_commit_data *); int (*lock)(struct file *, int, struct file_lock *); @@ -1798,11 +1589,12 @@ struct nfs_rpc_ops { struct iattr *iattr, int *); int (*have_delegation)(struct inode *, fmode_t); + int (*return_delegation)(struct inode *); struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *); struct nfs_client *(*init_client) (struct nfs_client *, const struct nfs_client_initdata *); void (*free_client) (struct nfs_client *); - struct nfs_server *(*create_server)(struct fs_context *); + struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *); struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t); }; diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h index 8e76a79cdc..5e69e67b31 100644 --- a/include/linux/nfsacl.h +++ b/include/linux/nfsacl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * File: linux/nfsacl.h * @@ -38,11 +37,5 @@ nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, extern int nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, struct posix_acl **pacl); -extern bool -nfs_stream_decode_acl(struct xdr_stream *xdr, unsigned int *aclcnt, - struct posix_acl **pacl); -extern bool -nfs_stream_encode_acl(struct xdr_stream *xdr, struct inode *inode, - struct posix_acl *acl, int encode_entries, int typeflag); #endif /* __LINUX_NFSACL_H */ diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index b22782225f..0f6f6607f5 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h @@ -1,8 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * nl802154.h * * Copyright (C) 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef NL802154_H diff --git a/include/linux/nls.h b/include/linux/nls.h index 499e486b37..2b7fabb994 100644 --- a/include/linux/nls.h +++ b/include/linux/nls.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NLS_H #define _LINUX_NLS_H @@ -32,7 +31,7 @@ struct nls_table { const unsigned char *charset2upper; struct module *owner; struct nls_table *next; -}; +} __do_const; /* this value hold the maximum octet of charset */ #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */ @@ -47,7 +46,7 @@ enum utf16_endian { /* nls_base.c */ extern int __register_nls(struct nls_table *, struct module *); extern int unregister_nls(struct nls_table *); -extern struct nls_table *load_nls(char *); +extern struct nls_table *load_nls(const char *); extern void unload_nls(struct nls_table *); extern struct nls_table *load_nls_default(void); #define register_nls(nls) __register_nls((nls), THIS_MODULE) diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 750c7f395c..a78c35cff1 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/nmi.h */ @@ -7,133 +6,29 @@ #include #include -#if defined(CONFIG_HAVE_NMI_WATCHDOG) -#include -#endif - -#ifdef CONFIG_LOCKUP_DETECTOR -void lockup_detector_init(void); -void lockup_detector_soft_poweroff(void); -void lockup_detector_cleanup(void); -bool is_hardlockup(void); - -extern int watchdog_user_enabled; -extern int nmi_watchdog_user_enabled; -extern int soft_watchdog_user_enabled; -extern int watchdog_thresh; -extern unsigned long watchdog_enabled; - -extern struct cpumask watchdog_cpumask; -extern unsigned long *watchdog_cpumask_bits; -#ifdef CONFIG_SMP -extern int sysctl_softlockup_all_cpu_backtrace; -extern int sysctl_hardlockup_all_cpu_backtrace; -#else -#define sysctl_softlockup_all_cpu_backtrace 0 -#define sysctl_hardlockup_all_cpu_backtrace 0 -#endif /* !CONFIG_SMP */ - -#else /* CONFIG_LOCKUP_DETECTOR */ -static inline void lockup_detector_init(void) { } -static inline void lockup_detector_soft_poweroff(void) { } -static inline void lockup_detector_cleanup(void) { } -#endif /* !CONFIG_LOCKUP_DETECTOR */ - -#ifdef CONFIG_SOFTLOCKUP_DETECTOR -extern void touch_softlockup_watchdog_sched(void); -extern void touch_softlockup_watchdog(void); -extern void touch_softlockup_watchdog_sync(void); -extern void touch_all_softlockup_watchdogs(void); -extern unsigned int softlockup_panic; - -extern int lockup_detector_online_cpu(unsigned int cpu); -extern int lockup_detector_offline_cpu(unsigned int cpu); -#else /* CONFIG_SOFTLOCKUP_DETECTOR */ -static inline void touch_softlockup_watchdog_sched(void) { } -static inline void touch_softlockup_watchdog(void) { } -static inline void touch_softlockup_watchdog_sync(void) { } -static inline void touch_all_softlockup_watchdogs(void) { } - -#define lockup_detector_online_cpu NULL -#define lockup_detector_offline_cpu NULL -#endif /* CONFIG_SOFTLOCKUP_DETECTOR */ - -#ifdef CONFIG_DETECT_HUNG_TASK -void reset_hung_task_detector(void); -#else -static inline void reset_hung_task_detector(void) { } -#endif - -/* - * The run state of the lockup detectors is controlled by the content of the - * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - - * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. - * - * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and - * 'soft_watchdog_user_enabled' are variables that are only used as an - * 'interface' between the parameters in /proc/sys/kernel and the internal - * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is - * handled differently because its value is not boolean, and the lockup - * detectors are 'suspended' while 'watchdog_thresh' is equal zero. - */ -#define NMI_WATCHDOG_ENABLED_BIT 0 -#define SOFT_WATCHDOG_ENABLED_BIT 1 -#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) -#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) - -#if defined(CONFIG_HARDLOCKUP_DETECTOR) -extern void hardlockup_detector_disable(void); -extern unsigned int hardlockup_panic; -#else -static inline void hardlockup_detector_disable(void) {} -#endif - -#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) -# define NMI_WATCHDOG_SYSCTL_PERM 0644 -#else -# define NMI_WATCHDOG_SYSCTL_PERM 0444 -#endif - -#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) -extern void arch_touch_nmi_watchdog(void); -extern void hardlockup_detector_perf_stop(void); -extern void hardlockup_detector_perf_restart(void); -extern void hardlockup_detector_perf_disable(void); -extern void hardlockup_detector_perf_enable(void); -extern void hardlockup_detector_perf_cleanup(void); -extern int hardlockup_detector_perf_init(void); -#else -static inline void hardlockup_detector_perf_stop(void) { } -static inline void hardlockup_detector_perf_restart(void) { } -static inline void hardlockup_detector_perf_disable(void) { } -static inline void hardlockup_detector_perf_enable(void) { } -static inline void hardlockup_detector_perf_cleanup(void) { } -# if !defined(CONFIG_HAVE_NMI_WATCHDOG) -static inline int hardlockup_detector_perf_init(void) { return -ENODEV; } -static inline void arch_touch_nmi_watchdog(void) {} -# else -static inline int hardlockup_detector_perf_init(void) { return 0; } -# endif -#endif - -void watchdog_nmi_stop(void); -void watchdog_nmi_start(void); -int watchdog_nmi_probe(void); -int watchdog_nmi_enable(unsigned int cpu); -void watchdog_nmi_disable(unsigned int cpu); /** * touch_nmi_watchdog - restart NMI watchdog timeout. - * + * * If the architecture supports the NMI watchdog, touch_nmi_watchdog() * may be used to reset the timeout - for code which intentionally * disables interrupts for a long time. This call is stateless. */ +#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) +#include +extern void touch_nmi_watchdog(void); +#else static inline void touch_nmi_watchdog(void) { - arch_touch_nmi_watchdog(); touch_softlockup_watchdog(); } +#endif + +#if defined(CONFIG_HARDLOCKUP_DETECTOR) +extern void hardlockup_detector_disable(void); +#else +static inline void hardlockup_detector_disable(void) {} +#endif /* * Create trigger_all_cpu_backtrace() out of the arch-provided @@ -190,23 +85,38 @@ static inline bool trigger_single_cpu_backtrace(int cpu) } #endif -#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF +#ifdef CONFIG_LOCKUP_DETECTOR u64 hw_nmi_get_sample_period(int watchdog_thresh); -#endif - -#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ - defined(CONFIG_HARDLOCKUP_DETECTOR) -void watchdog_update_hrtimer_threshold(u64 period); -#else -static inline void watchdog_update_hrtimer_threshold(u64 period) { } -#endif - +extern int nmi_watchdog_enabled; +extern int soft_watchdog_enabled; +extern int watchdog_user_enabled; +extern int watchdog_thresh; +extern unsigned long *watchdog_cpumask_bits; +extern int sysctl_softlockup_all_cpu_backtrace; +extern int sysctl_hardlockup_all_cpu_backtrace; struct ctl_table; -int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *); -int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); -int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); -int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *); -int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *); +extern int proc_watchdog(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_nmi_watchdog(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_soft_watchdog(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_watchdog_thresh(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_watchdog_cpumask(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int lockup_detector_suspend(void); +extern void lockup_detector_resume(void); +#else +static inline int lockup_detector_suspend(void) +{ + return 0; +} + +static inline void lockup_detector_resume(void) +{ +} +#endif #ifdef CONFIG_HAVE_ACPI_APEI_NMI #include diff --git a/include/linux/node.h b/include/linux/node.h index 8e5a298979..2115ad5d6f 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/node.h - generic node definition * @@ -17,142 +16,36 @@ #include #include -#include #include -/** - * struct node_hmem_attrs - heterogeneous memory performance attributes - * - * @read_bandwidth: Read bandwidth in MB/s - * @write_bandwidth: Write bandwidth in MB/s - * @read_latency: Read latency in nanoseconds - * @write_latency: Write latency in nanoseconds - */ -struct node_hmem_attrs { - unsigned int read_bandwidth; - unsigned int write_bandwidth; - unsigned int read_latency; - unsigned int write_latency; -}; - -enum cache_indexing { - NODE_CACHE_DIRECT_MAP, - NODE_CACHE_INDEXED, - NODE_CACHE_OTHER, -}; - -enum cache_write_policy { - NODE_CACHE_WRITE_BACK, - NODE_CACHE_WRITE_THROUGH, - NODE_CACHE_WRITE_OTHER, -}; - -/** - * struct node_cache_attrs - system memory caching attributes - * - * @indexing: The ways memory blocks may be placed in cache - * @write_policy: Write back or write through policy - * @size: Total size of cache in bytes - * @line_size: Number of bytes fetched on a cache miss - * @level: The cache hierarchy level - */ -struct node_cache_attrs { - enum cache_indexing indexing; - enum cache_write_policy write_policy; - u64 size; - u16 line_size; - u8 level; -}; - -#ifdef CONFIG_HMEM_REPORTING -void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs); -void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, - unsigned access); -#else -static inline void node_add_cache(unsigned int nid, - struct node_cache_attrs *cache_attrs) -{ -} - -static inline void node_set_perf_attrs(unsigned int nid, - struct node_hmem_attrs *hmem_attrs, - unsigned access) -{ -} -#endif - struct node { struct device dev; - struct list_head access_list; #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) struct work_struct node_work; #endif -#ifdef CONFIG_HMEM_REPORTING - struct list_head cache_attrs; - struct device *cache_dev; -#endif }; struct memory_block; extern struct node *node_devices[]; typedef void (*node_registration_func_t)(struct node *); -#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) -void link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn, - enum meminit_context context); -#else -static inline void link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn, - enum meminit_context context) -{ -} -#endif - extern void unregister_node(struct node *node); #ifdef CONFIG_NUMA -/* Core of the node registration - only memory hotplug should use this */ -extern int __register_one_node(int nid); - -/* Registers an online node */ -static inline int register_one_node(int nid) -{ - int error = 0; - - if (node_online(nid)) { - struct pglist_data *pgdat = NODE_DATA(nid); - unsigned long start_pfn = pgdat->node_start_pfn; - unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; - - error = __register_one_node(nid); - if (error) - return error; - /* link memory sections under this node */ - link_mem_sections(nid, start_pfn, end_pfn, MEMINIT_EARLY); - } - - return error; -} - +extern int register_one_node(int nid); extern void unregister_one_node(int nid); extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); -extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk); - -extern int register_memory_node_under_compute_node(unsigned int mem_nid, - unsigned int cpu_nid, - unsigned access); +extern int register_mem_sect_under_node(struct memory_block *mem_blk, + int nid); +extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, + unsigned long phys_index); #ifdef CONFIG_HUGETLBFS extern void register_hugetlbfs_with_node(node_registration_func_t doregister, node_registration_func_t unregister); #endif #else -static inline int __register_one_node(int nid) -{ - return 0; -} static inline int register_one_node(int nid) { return 0; @@ -169,8 +62,15 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) { return 0; } -static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk) +static inline int register_mem_sect_under_node(struct memory_block *mem_blk, + int nid) { + return 0; +} +static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, + unsigned long phys_index) +{ + return 0; } static inline void register_hugetlbfs_with_node(node_registration_func_t reg, diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 567c3ddba2..f746e44d40 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NODEMASK_H #define __LINUX_NODEMASK_H @@ -90,9 +89,9 @@ * for such situations. See below and CPUMASK_ALLOC also. */ +#include #include #include -#include #include typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; @@ -104,22 +103,13 @@ extern nodemask_t _unused_nodemask_arg_; * * Can be used to provide arguments for '%*pb[l]' when printing a nodemask. */ -#define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \ - __nodemask_pr_bits(maskp) -static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m) -{ - return m ? MAX_NUMNODES : 0; -} -static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m) -{ - return m ? m->bits : NULL; -} +#define nodemask_pr_args(maskp) MAX_NUMNODES, (maskp)->bits /* * The inline keyword gives the compiler room to decide to inline, or * not inline a function as it sees best. However, as these functions * are called in both __init and non-__init functions, if they are not - * inlined we will end up with a section mismatch error (of the type of + * inlined we will end up with a section mis-match error (of the type of * freeable items not being freed). So we must use __always_inline here * to fix the problem. If other functions in the future also end up in * this situation they will also need to be annotated as __always_inline @@ -397,9 +387,12 @@ enum node_states { #else N_HIGH_MEMORY = N_NORMAL_MEMORY, #endif +#ifdef CONFIG_MOVABLE_NODE N_MEMORY, /* The node has memory(regular, high, movable) */ +#else + N_MEMORY = N_HIGH_MEMORY, +#endif N_CPU, /* The node has one or more cpus */ - N_GENERIC_INITIATOR, /* The node has one or more Generic Initiators */ NR_NODE_STATES }; @@ -445,8 +438,8 @@ static inline int next_memory_node(int nid) return next_node(nid, node_states[N_MEMORY]); } -extern unsigned int nr_node_ids; -extern unsigned int nr_online_nodes; +extern int nr_node_ids; +extern int nr_online_nodes; static inline void node_set_online(int nid) { @@ -486,8 +479,8 @@ static inline int num_node_state(enum node_states state) #define first_online_node 0 #define first_memory_node 0 #define next_online_node(nid) (MAX_NUMNODES) -#define nr_node_ids 1U -#define nr_online_nodes 1U +#define nr_node_ids 1 +#define nr_online_nodes 1 #define node_set_online(node) node_set_state((node), N_ONLINE) #define node_set_offline(node) node_clear_state((node), N_ONLINE) @@ -515,11 +508,11 @@ static inline int node_random(const nodemask_t *mask) #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) /* - * For nodemask scratch area. + * For nodemask scrach area. * NODEMASK_ALLOC(type, name) allocates an object with a specified type and * name. */ -#if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */ +#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */ #define NODEMASK_ALLOC(type, name, gfp_flags) \ type *name = kmalloc(sizeof(*name), gfp_flags) #define NODEMASK_FREE(m) kfree(m) @@ -528,7 +521,7 @@ static inline int node_random(const nodemask_t *mask) #define NODEMASK_FREE(m) do {} while (0) #endif -/* Example structure for using NODEMASK_ALLOC, used in mempolicy. */ +/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ struct nodemask_scratch { nodemask_t mask1; nodemask_t mask2; diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 87069b8459..0971cea9fa 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Routines to manage notifier chains for passing status changes to any * interested routines. We need this instead of hard coded call lists so @@ -43,7 +42,9 @@ * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. * As compensation, srcu_notifier_chain_unregister() is rather expensive. * SRCU notifier chains should be used when the chain will be called very - * often but notifier_blocks will seldom be removed. + * often but notifier_blocks will seldom be removed. Also, SRCU notifier + * chains are slightly more difficult to use because they require special + * runtime initialization. */ struct notifier_block; @@ -55,7 +56,8 @@ struct notifier_block { notifier_fn_t notifier_call; struct notifier_block __rcu *next; int priority; -}; +} __do_const; +typedef struct notifier_block __no_const notifier_block_no_const; struct atomic_notifier_head { spinlock_t lock; @@ -89,7 +91,7 @@ struct srcu_notifier_head { (name)->head = NULL; \ } while (0) -/* srcu_notifier_heads must be cleaned up dynamically */ +/* srcu_notifier_heads must be initialized and cleaned up dynamically */ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); #define srcu_cleanup_notifier_head(name) \ cleanup_srcu_struct(&(name)->srcu); @@ -102,13 +104,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); .head = NULL } #define RAW_NOTIFIER_INIT(name) { \ .head = NULL } - -#define SRCU_NOTIFIER_INIT(name, pcpu) \ - { \ - .mutex = __MUTEX_INITIALIZER(name.mutex), \ - .head = NULL, \ - .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ - } +/* srcu_notifier_heads cannot be initialized statically */ #define ATOMIC_NOTIFIER_HEAD(name) \ struct atomic_notifier_head name = \ @@ -120,25 +116,6 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); struct raw_notifier_head name = \ RAW_NOTIFIER_INIT(name) -#ifdef CONFIG_TREE_SRCU -#define _SRCU_NOTIFIER_HEAD(name, mod) \ - static DEFINE_PER_CPU(struct srcu_data, name##_head_srcu_data); \ - mod struct srcu_notifier_head name = \ - SRCU_NOTIFIER_INIT(name, name##_head_srcu_data) - -#else -#define _SRCU_NOTIFIER_HEAD(name, mod) \ - mod struct srcu_notifier_head name = \ - SRCU_NOTIFIER_INIT(name, name) - -#endif - -#define SRCU_NOTIFIER_HEAD(name) \ - _SRCU_NOTIFIER_HEAD(name, /* not static */) - -#define SRCU_NOTIFIER_HEAD_STATIC(name) \ - _SRCU_NOTIFIER_HEAD(name, static) - #ifdef __KERNEL__ extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, @@ -150,6 +127,10 @@ extern int raw_notifier_chain_register(struct raw_notifier_head *nh, extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, struct notifier_block *nb); +extern int blocking_notifier_chain_cond_register( + struct blocking_notifier_head *nh, + struct notifier_block *nb); + extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, struct notifier_block *nb); extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, @@ -161,17 +142,20 @@ extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v); +extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v); +extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); extern int raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v); +extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v); - -extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh, - unsigned long val_up, unsigned long val_down, void *v); -extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh, - unsigned long val_up, unsigned long val_down, void *v); +extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); #define NOTIFY_DONE 0x0000 /* Don't care */ #define NOTIFY_OK 0x0001 /* Suits me */ diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h index 0f1d024bd9..85a5c8c16b 100644 --- a/include/linux/ns_common.h +++ b/include/linux/ns_common.h @@ -1,16 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NS_COMMON_H #define _LINUX_NS_COMMON_H -#include - struct proc_ns_operations; struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; - refcount_t count; }; #endif diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h index d7a04a6e37..7da0cf3702 100644 --- a/include/linux/nsc_gpio.h +++ b/include/linux/nsc_gpio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /** nsc_gpio.c diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index cdb171efc7..ac0d65bef5 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NSPROXY_H #define _LINUX_NSPROXY_H @@ -35,36 +34,10 @@ struct nsproxy { struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; - struct time_namespace *time_ns; - struct time_namespace *time_ns_for_children; struct cgroup_namespace *cgroup_ns; }; extern struct nsproxy init_nsproxy; -/* - * A structure to encompass all bits needed to install - * a partial or complete new set of namespaces. - * - * If a new user namespace is requested cred will - * point to a modifiable set of credentials. If a pointer - * to a modifiable set is needed nsset_cred() must be - * used and tested. - */ -struct nsset { - unsigned flags; - struct nsproxy *nsproxy; - struct fs_struct *fs; - const struct cred *cred; -}; - -static inline struct cred *nsset_cred(struct nsset *set) -{ - if (set->flags & CLONE_NEWUSER) - return (struct cred *)set->cred; - - return NULL; -} - /* * the namespaces access rules are: * diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 191b524e5c..6f47562d47 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h @@ -5,7 +5,6 @@ * GPL LICENSE SUMMARY * * Copyright (C) 2015 EMC Corporation. All Rights Reserved. - * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,7 +18,6 @@ * BSD LICENSE * * Copyright (C) 2015 EMC Corporation. All Rights Reserved. - * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -58,11 +56,9 @@ #include #include -#include struct ntb_client; struct ntb_dev; -struct ntb_msi; struct pci_dev; /** @@ -72,8 +68,6 @@ struct pci_dev; * @NTB_TOPO_SEC: On secondary side of remote ntb. * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. - * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb. - * @NTB_TOPO_CROSSLINK: Connected via two symmetric switchecs */ enum ntb_topo { NTB_TOPO_NONE = -1, @@ -81,8 +75,6 @@ enum ntb_topo { NTB_TOPO_SEC, NTB_TOPO_B2B_USD, NTB_TOPO_B2B_DSD, - NTB_TOPO_SWITCH, - NTB_TOPO_CROSSLINK, }; static inline int ntb_topo_is_b2b(enum ntb_topo topo) @@ -98,13 +90,11 @@ static inline int ntb_topo_is_b2b(enum ntb_topo topo) static inline char *ntb_topo_string(enum ntb_topo topo) { switch (topo) { - case NTB_TOPO_NONE: return "NTB_TOPO_NONE"; - case NTB_TOPO_PRI: return "NTB_TOPO_PRI"; - case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; - case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; - case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; - case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH"; - case NTB_TOPO_CROSSLINK: return "NTB_TOPO_CROSSLINK"; + case NTB_TOPO_NONE: return "NTB_TOPO_NONE"; + case NTB_TOPO_PRI: return "NTB_TOPO_PRI"; + case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; + case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; + case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; } return "NTB_TOPO_INVALID"; } @@ -116,7 +106,6 @@ static inline char *ntb_topo_string(enum ntb_topo topo) * @NTB_SPEED_GEN1: Link is trained to gen1 speed. * @NTB_SPEED_GEN2: Link is trained to gen2 speed. * @NTB_SPEED_GEN3: Link is trained to gen3 speed. - * @NTB_SPEED_GEN4: Link is trained to gen4 speed. */ enum ntb_speed { NTB_SPEED_AUTO = -1, @@ -124,7 +113,6 @@ enum ntb_speed { NTB_SPEED_GEN1 = 1, NTB_SPEED_GEN2 = 2, NTB_SPEED_GEN3 = 3, - NTB_SPEED_GEN4 = 4 }; /** @@ -151,20 +139,6 @@ enum ntb_width { NTB_WIDTH_32 = 32, }; -/** - * enum ntb_default_port - NTB default port number - * @NTB_PORT_PRI_USD: Default port of the NTB_TOPO_PRI/NTB_TOPO_B2B_USD - * topologies - * @NTB_PORT_SEC_DSD: Default port of the NTB_TOPO_SEC/NTB_TOPO_B2B_DSD - * topologies - */ -enum ntb_default_port { - NTB_PORT_PRI_USD, - NTB_PORT_SEC_DSD -}; -#define NTB_DEF_PEER_CNT (1) -#define NTB_DEF_PEER_IDX (0) - /** * struct ntb_client_ops - ntb client operations * @probe: Notify client of a new device. @@ -188,12 +162,10 @@ static inline int ntb_client_ops_is_valid(const struct ntb_client_ops *ops) * struct ntb_ctx_ops - ntb driver context operations * @link_event: See ntb_link_event(). * @db_event: See ntb_db_event(). - * @msg_event: See ntb_msg_event(). */ struct ntb_ctx_ops { void (*link_event)(void *ctx); void (*db_event)(void *ctx, int db_vector); - void (*msg_event)(void *ctx); }; static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops) @@ -202,27 +174,18 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops) return /* ops->link_event && */ /* ops->db_event && */ - /* ops->msg_event && */ 1; } /** - * struct ntb_dev_ops - ntb device operations - * @port_number: See ntb_port_number(). - * @peer_port_count: See ntb_peer_port_count(). - * @peer_port_number: See ntb_peer_port_number(). - * @peer_port_idx: See ntb_peer_port_idx(). + * struct ntb_ctx_ops - ntb device operations + * @mw_count: See ntb_mw_count(). + * @mw_get_range: See ntb_mw_get_range(). + * @mw_set_trans: See ntb_mw_set_trans(). + * @mw_clear_trans: See ntb_mw_clear_trans(). * @link_is_up: See ntb_link_is_up(). * @link_enable: See ntb_link_enable(). * @link_disable: See ntb_link_disable(). - * @mw_count: See ntb_mw_count(). - * @mw_get_align: See ntb_mw_get_align(). - * @mw_set_trans: See ntb_mw_set_trans(). - * @mw_clear_trans: See ntb_mw_clear_trans(). - * @peer_mw_count: See ntb_peer_mw_count(). - * @peer_mw_get_addr: See ntb_peer_mw_get_addr(). - * @peer_mw_set_trans: See ntb_peer_mw_set_trans(). - * @peer_mw_clear_trans:See ntb_peer_mw_clear_trans(). * @db_is_unsafe: See ntb_db_is_unsafe(). * @db_valid_mask: See ntb_db_valid_mask(). * @db_vector_count: See ntb_db_vector_count(). @@ -247,43 +210,22 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops) * @peer_spad_addr: See ntb_peer_spad_addr(). * @peer_spad_read: See ntb_peer_spad_read(). * @peer_spad_write: See ntb_peer_spad_write(). - * @msg_count: See ntb_msg_count(). - * @msg_inbits: See ntb_msg_inbits(). - * @msg_outbits: See ntb_msg_outbits(). - * @msg_read_sts: See ntb_msg_read_sts(). - * @msg_clear_sts: See ntb_msg_clear_sts(). - * @msg_set_mask: See ntb_msg_set_mask(). - * @msg_clear_mask: See ntb_msg_clear_mask(). - * @msg_read: See ntb_msg_read(). - * @peer_msg_write: See ntb_peer_msg_write(). */ struct ntb_dev_ops { - int (*port_number)(struct ntb_dev *ntb); - int (*peer_port_count)(struct ntb_dev *ntb); - int (*peer_port_number)(struct ntb_dev *ntb, int pidx); - int (*peer_port_idx)(struct ntb_dev *ntb, int port); + int (*mw_count)(struct ntb_dev *ntb); + int (*mw_get_range)(struct ntb_dev *ntb, int idx, + phys_addr_t *base, resource_size_t *size, + resource_size_t *align, resource_size_t *align_size); + int (*mw_set_trans)(struct ntb_dev *ntb, int idx, + dma_addr_t addr, resource_size_t size); + int (*mw_clear_trans)(struct ntb_dev *ntb, int idx); - u64 (*link_is_up)(struct ntb_dev *ntb, + int (*link_is_up)(struct ntb_dev *ntb, enum ntb_speed *speed, enum ntb_width *width); int (*link_enable)(struct ntb_dev *ntb, enum ntb_speed max_speed, enum ntb_width max_width); int (*link_disable)(struct ntb_dev *ntb); - int (*mw_count)(struct ntb_dev *ntb, int pidx); - int (*mw_get_align)(struct ntb_dev *ntb, int pidx, int widx, - resource_size_t *addr_align, - resource_size_t *size_align, - resource_size_t *size_max); - int (*mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx, - dma_addr_t addr, resource_size_t size); - int (*mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx); - int (*peer_mw_count)(struct ntb_dev *ntb); - int (*peer_mw_get_addr)(struct ntb_dev *ntb, int widx, - phys_addr_t *base, resource_size_t *size); - int (*peer_mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx, - u64 addr, resource_size_t size); - int (*peer_mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx); - int (*db_is_unsafe)(struct ntb_dev *ntb); u64 (*db_valid_mask)(struct ntb_dev *ntb); int (*db_vector_count)(struct ntb_dev *ntb); @@ -298,8 +240,7 @@ struct ntb_dev_ops { int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits); int (*peer_db_addr)(struct ntb_dev *ntb, - phys_addr_t *db_addr, resource_size_t *db_size, - u64 *db_data, int db_bit); + phys_addr_t *db_addr, resource_size_t *db_size); u64 (*peer_db_read)(struct ntb_dev *ntb); int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits); int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits); @@ -311,55 +252,32 @@ struct ntb_dev_ops { int (*spad_is_unsafe)(struct ntb_dev *ntb); int (*spad_count)(struct ntb_dev *ntb); - u32 (*spad_read)(struct ntb_dev *ntb, int sidx); - int (*spad_write)(struct ntb_dev *ntb, int sidx, u32 val); + u32 (*spad_read)(struct ntb_dev *ntb, int idx); + int (*spad_write)(struct ntb_dev *ntb, int idx, u32 val); - int (*peer_spad_addr)(struct ntb_dev *ntb, int pidx, int sidx, + int (*peer_spad_addr)(struct ntb_dev *ntb, int idx, phys_addr_t *spad_addr); - u32 (*peer_spad_read)(struct ntb_dev *ntb, int pidx, int sidx); - int (*peer_spad_write)(struct ntb_dev *ntb, int pidx, int sidx, - u32 val); - - int (*msg_count)(struct ntb_dev *ntb); - u64 (*msg_inbits)(struct ntb_dev *ntb); - u64 (*msg_outbits)(struct ntb_dev *ntb); - u64 (*msg_read_sts)(struct ntb_dev *ntb); - int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits); - int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits); - int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits); - u32 (*msg_read)(struct ntb_dev *ntb, int *pidx, int midx); - int (*peer_msg_write)(struct ntb_dev *ntb, int pidx, int midx, u32 msg); + u32 (*peer_spad_read)(struct ntb_dev *ntb, int idx); + int (*peer_spad_write)(struct ntb_dev *ntb, int idx, u32 val); }; static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) { /* commented callbacks are not required: */ return - /* Port operations are required for multiport devices */ - !ops->peer_port_count == !ops->port_number && - !ops->peer_port_number == !ops->port_number && - !ops->peer_port_idx == !ops->port_number && - - /* Link operations are required */ + ops->mw_count && + ops->mw_get_range && + ops->mw_set_trans && + /* ops->mw_clear_trans && */ ops->link_is_up && ops->link_enable && ops->link_disable && - - /* One or both MW interfaces should be developed */ - ops->mw_count && - ops->mw_get_align && - (ops->mw_set_trans || - ops->peer_mw_set_trans) && - /* ops->mw_clear_trans && */ - ops->peer_mw_count && - ops->peer_mw_get_addr && - /* ops->peer_mw_clear_trans && */ - - /* Doorbell operations are mostly required */ /* ops->db_is_unsafe && */ ops->db_valid_mask && + /* both set, or both unset */ - (!ops->db_vector_count == !ops->db_vector_mask) && + (!ops->db_vector_count == !ops->db_vector_mask) && + ops->db_read && /* ops->db_set && */ ops->db_clear && @@ -373,24 +291,13 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) /* ops->peer_db_read_mask && */ /* ops->peer_db_set_mask && */ /* ops->peer_db_clear_mask && */ - - /* Scrachpads interface is optional */ - /* !ops->spad_is_unsafe == !ops->spad_count && */ - !ops->spad_read == !ops->spad_count && - !ops->spad_write == !ops->spad_count && - /* !ops->peer_spad_addr == !ops->spad_count && */ - /* !ops->peer_spad_read == !ops->spad_count && */ - !ops->peer_spad_write == !ops->spad_count && - - /* Messaging interface is optional */ - !ops->msg_inbits == !ops->msg_count && - !ops->msg_outbits == !ops->msg_count && - !ops->msg_read_sts == !ops->msg_count && - !ops->msg_clear_sts == !ops->msg_count && - /* !ops->msg_set_mask == !ops->msg_count && */ - /* !ops->msg_clear_mask == !ops->msg_count && */ - !ops->msg_read == !ops->msg_count && - !ops->peer_msg_write == !ops->msg_count && + /* ops->spad_is_unsafe && */ + ops->spad_count && + ops->spad_read && + ops->spad_write && + /* ops->peer_spad_addr && */ + /* ops->peer_spad_read && */ + ops->peer_spad_write && 1; } @@ -403,12 +310,13 @@ struct ntb_client { struct device_driver drv; const struct ntb_client_ops ops; }; + #define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv) /** - * struct ntb_dev - ntb device + * struct ntb_device - ntb device * @dev: Linux device object. - * @pdev: PCI device entry of the ntb. + * @pdev: Pci device entry of the ntb. * @topo: Detected topology of the ntb. * @ops: See &ntb_dev_ops. * @ctx: See &ntb_ctx_ops. @@ -428,11 +336,8 @@ struct ntb_dev { spinlock_t ctx_lock; /* block unregister until device is fully released */ struct completion released; - -#ifdef CONFIG_NTB_MSI - struct ntb_msi *msi; -#endif }; + #define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev) /** @@ -478,7 +383,7 @@ void ntb_unregister_client(struct ntb_client *client); int ntb_register_device(struct ntb_dev *ntb); /** - * ntb_unregister_device() - unregister a ntb device + * ntb_register_device() - unregister a ntb device * @ntb: NTB device context. * * The device will be removed from the list of ntb devices. If the ntb device @@ -529,203 +434,86 @@ void ntb_link_event(struct ntb_dev *ntb); * multiple interrupt vectors for doorbells, the vector number indicates which * vector received the interrupt. The vector number is relative to the first * vector used for doorbells, starting at zero, and must be less than - * ntb_db_vector_count(). The driver may call ntb_db_read() to check which + ** ntb_db_vector_count(). The driver may call ntb_db_read() to check which * doorbell bits need service, and ntb_db_vector_mask() to determine which of * those bits are associated with the vector number. */ void ntb_db_event(struct ntb_dev *ntb, int vector); /** - * ntb_msg_event() - notify driver context of a message event + * ntb_mw_count() - get the number of memory windows * @ntb: NTB device context. * - * Notify the driver context of a message event. If hardware supports - * message registers, this event indicates, that a new message arrived in - * some incoming message register or last sent message couldn't be delivered. - * The events can be masked/unmasked by the methods ntb_msg_set_mask() and - * ntb_msg_clear_mask(). + * Hardware and topology may support a different number of memory windows. + * + * Return: the number of memory windows. */ -void ntb_msg_event(struct ntb_dev *ntb); - -/** - * ntb_default_port_number() - get the default local port number - * @ntb: NTB device context. - * - * If hardware driver doesn't specify port_number() callback method, the NTB - * is considered with just two ports. So this method returns default local - * port number in compliance with topology. - * - * NOTE Don't call this method directly. The ntb_port_number() function should - * be used instead. - * - * Return: the default local port number - */ -int ntb_default_port_number(struct ntb_dev *ntb); - -/** - * ntb_default_port_count() - get the default number of peer device ports - * @ntb: NTB device context. - * - * By default hardware driver supports just one peer device. - * - * NOTE Don't call this method directly. The ntb_peer_port_count() function - * should be used instead. - * - * Return: the default number of peer ports - */ -int ntb_default_peer_port_count(struct ntb_dev *ntb); - -/** - * ntb_default_peer_port_number() - get the default peer port by given index - * @ntb: NTB device context. - * @idx: Peer port index (should not differ from zero). - * - * By default hardware driver supports just one peer device, so this method - * shall return the corresponding value from enum ntb_default_port. - * - * NOTE Don't call this method directly. The ntb_peer_port_number() function - * should be used instead. - * - * Return: the peer device port or negative value indicating an error - */ -int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx); - -/** - * ntb_default_peer_port_idx() - get the default peer device port index by - * given port number - * @ntb: NTB device context. - * @port: Peer port number (should be one of enum ntb_default_port). - * - * By default hardware driver supports just one peer device, so while - * specified port-argument indicates peer port from enum ntb_default_port, - * the return value shall be zero. - * - * NOTE Don't call this method directly. The ntb_peer_port_idx() function - * should be used instead. - * - * Return: the peer port index or negative value indicating an error - */ -int ntb_default_peer_port_idx(struct ntb_dev *ntb, int port); - -/** - * ntb_port_number() - get the local port number - * @ntb: NTB device context. - * - * Hardware must support at least simple two-ports ntb connection - * - * Return: the local port number - */ -static inline int ntb_port_number(struct ntb_dev *ntb) +static inline int ntb_mw_count(struct ntb_dev *ntb) { - if (!ntb->ops->port_number) - return ntb_default_port_number(ntb); - - return ntb->ops->port_number(ntb); -} -/** - * ntb_peer_port_count() - get the number of peer device ports - * @ntb: NTB device context. - * - * Hardware may support an access to memory of several remote domains - * over multi-port NTB devices. This method returns the number of peers, - * local device can have shared memory with. - * - * Return: the number of peer ports - */ -static inline int ntb_peer_port_count(struct ntb_dev *ntb) -{ - if (!ntb->ops->peer_port_count) - return ntb_default_peer_port_count(ntb); - - return ntb->ops->peer_port_count(ntb); + return ntb->ops->mw_count(ntb); } /** - * ntb_peer_port_number() - get the peer port by given index + * ntb_mw_get_range() - get the range of a memory window * @ntb: NTB device context. - * @pidx: Peer port index. + * @idx: Memory window number. + * @base: OUT - the base address for mapping the memory window + * @size: OUT - the size for mapping the memory window + * @align: OUT - the base alignment for translating the memory window + * @align_size: OUT - the size alignment for translating the memory window * - * Peer ports are continuously enumerated by NTB API logic, so this method - * lets to retrieve port real number by its index. + * Get the range of a memory window. NULL may be given for any output + * parameter if the value is not needed. The base and size may be used for + * mapping the memory window, to access the peer memory. The alignment and + * size may be used for translating the memory window, for the peer to access + * memory on the local system. * - * Return: the peer device port or negative value indicating an error + * Return: Zero on success, otherwise an error number. */ -static inline int ntb_peer_port_number(struct ntb_dev *ntb, int pidx) +static inline int ntb_mw_get_range(struct ntb_dev *ntb, int idx, + phys_addr_t *base, resource_size_t *size, + resource_size_t *align, resource_size_t *align_size) { - if (!ntb->ops->peer_port_number) - return ntb_default_peer_port_number(ntb, pidx); - - return ntb->ops->peer_port_number(ntb, pidx); + return ntb->ops->mw_get_range(ntb, idx, base, size, + align, align_size); } /** - * ntb_logical_port_number() - get the logical port number of the local port + * ntb_mw_set_trans() - set the translation of a memory window * @ntb: NTB device context. + * @idx: Memory window number. + * @addr: The dma address local memory to expose to the peer. + * @size: The size of the local memory to expose to the peer. * - * The Logical Port Number is defined to be a unique number for each - * port starting from zero through to the number of ports minus one. - * This is in contrast to the Port Number where each port can be assigned - * any unique physical number by the hardware. + * Set the translation of a memory window. The peer may access local memory + * through the window starting at the address, up to the size. The address + * must be aligned to the alignment specified by ntb_mw_get_range(). The size + * must be aligned to the size alignment specified by ntb_mw_get_range(). * - * The logical port number is useful for calculating the resource indexes - * used by peers. - * - * Return: the logical port number or negative value indicating an error + * Return: Zero on success, otherwise an error number. */ -static inline int ntb_logical_port_number(struct ntb_dev *ntb) +static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int idx, + dma_addr_t addr, resource_size_t size) { - int lport = ntb_port_number(ntb); - int pidx; - - if (lport < 0) - return lport; - - for (pidx = 0; pidx < ntb_peer_port_count(ntb); pidx++) - if (lport <= ntb_peer_port_number(ntb, pidx)) - return pidx; - - return pidx; + return ntb->ops->mw_set_trans(ntb, idx, addr, size); } /** - * ntb_peer_logical_port_number() - get the logical peer port by given index + * ntb_mw_clear_trans() - clear the translation of a memory window * @ntb: NTB device context. - * @pidx: Peer port index. + * @idx: Memory window number. * - * The Logical Port Number is defined to be a unique number for each - * port starting from zero through to the number of ports minus one. - * This is in contrast to the Port Number where each port can be assigned - * any unique physical number by the hardware. + * Clear the translation of a memory window. The peer may no longer access + * local memory through the window. * - * The logical port number is useful for calculating the resource indexes - * used by peers. - * - * Return: the peer's logical port number or negative value indicating an error + * Return: Zero on success, otherwise an error number. */ -static inline int ntb_peer_logical_port_number(struct ntb_dev *ntb, int pidx) +static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx) { - if (ntb_peer_port_number(ntb, pidx) < ntb_port_number(ntb)) - return pidx; - else - return pidx + 1; -} + if (!ntb->ops->mw_clear_trans) + return ntb->ops->mw_set_trans(ntb, idx, 0, 0); -/** - * ntb_peer_port_idx() - get the peer device port index by given port number - * @ntb: NTB device context. - * @port: Peer port number. - * - * Inverse operation of ntb_peer_port_number(), so one can get port index - * by specified port number. - * - * Return: the peer port index or negative value indicating an error - */ -static inline int ntb_peer_port_idx(struct ntb_dev *ntb, int port) -{ - if (!ntb->ops->peer_port_idx) - return ntb_default_peer_port_idx(ntb, port); - - return ntb->ops->peer_port_idx(ntb, port); + return ntb->ops->mw_clear_trans(ntb, idx); } /** @@ -738,26 +526,25 @@ static inline int ntb_peer_port_idx(struct ntb_dev *ntb, int port) * state once after every link event. It is safe to query the link state in * the context of the link event callback. * - * Return: bitfield of indexed ports link state: bit is set/cleared if the - * link is up/down respectively. + * Return: One if the link is up, zero if the link is down, otherwise a + * negative value indicating the error number. */ -static inline u64 ntb_link_is_up(struct ntb_dev *ntb, +static inline int ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed, enum ntb_width *width) { return ntb->ops->link_is_up(ntb, speed, width); } /** - * ntb_link_enable() - enable the local port ntb connection + * ntb_link_enable() - enable the link on the secondary side of the ntb * @ntb: NTB device context. * @max_speed: The maximum link speed expressed as PCIe generation number. * @max_width: The maximum link width expressed as the number of PCIe lanes. * - * Enable the NTB/PCIe link on the local or remote (for bridge-to-bridge - * topology) side of the bridge. If it's supported the ntb device should train - * the link to its maximum speed and width, or the requested speed and width, - * whichever is smaller. Some hardware doesn't support PCIe link training, so - * the last two arguments will be ignored then. + * Enable the link on the secondary side of the ntb. This can only be done + * from the primary side of the ntb in primary or b2b topology. The ntb device + * should train the link to its maximum speed and width, or the requested speed + * and width, whichever is smaller, if supported. * * Return: Zero on success, otherwise an error number. */ @@ -769,14 +556,14 @@ static inline int ntb_link_enable(struct ntb_dev *ntb, } /** - * ntb_link_disable() - disable the local port ntb connection + * ntb_link_disable() - disable the link on the secondary side of the ntb * @ntb: NTB device context. * - * Disable the link on the local or remote (for b2b topology) of the ntb. - * The ntb device should disable the link. Returning from this call must - * indicate that a barrier has passed, though with no more writes may pass in - * either direction across the link, except if this call returns an error - * number. + * Disable the link on the secondary side of the ntb. This can only be + * done from the primary side of the ntb in primary or b2b topology. The ntb + * device should disable the link. Returning from this call must indicate that + * a barrier has passed, though with no more writes may pass in either + * direction across the link, except if this call returns an error number. * * Return: Zero on success, otherwise an error number. */ @@ -785,187 +572,6 @@ static inline int ntb_link_disable(struct ntb_dev *ntb) return ntb->ops->link_disable(ntb); } -/** - * ntb_mw_count() - get the number of inbound memory windows, which could - * be created for a specified peer device - * @ntb: NTB device context. - * @pidx: Port index of peer device. - * - * Hardware and topology may support a different number of memory windows. - * Moreover different peer devices can support different number of memory - * windows. Simply speaking this method returns the number of possible inbound - * memory windows to share with specified peer device. Note: this may return - * zero if the link is not up yet. - * - * Return: the number of memory windows. - */ -static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx) -{ - return ntb->ops->mw_count(ntb, pidx); -} - -/** - * ntb_mw_get_align() - get the restriction parameters of inbound memory window - * @ntb: NTB device context. - * @pidx: Port index of peer device. - * @widx: Memory window index. - * @addr_align: OUT - the base alignment for translating the memory window - * @size_align: OUT - the size alignment for translating the memory window - * @size_max: OUT - the maximum size of the memory window - * - * Get the alignments of an inbound memory window with specified index. - * NULL may be given for any output parameter if the value is not needed. - * The alignment and size parameters may be used for allocation of proper - * shared memory. Note: this must only be called when the link is up. - * - * Return: Zero on success, otherwise a negative error number. - */ -static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx, - resource_size_t *addr_align, - resource_size_t *size_align, - resource_size_t *size_max) -{ - if (!(ntb_link_is_up(ntb, NULL, NULL) & BIT_ULL(pidx))) - return -ENOTCONN; - - return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align, - size_max); -} - -/** - * ntb_mw_set_trans() - set the translation of an inbound memory window - * @ntb: NTB device context. - * @pidx: Port index of peer device. - * @widx: Memory window index. - * @addr: The dma address of local memory to expose to the peer. - * @size: The size of the local memory to expose to the peer. - * - * Set the translation of a memory window. The peer may access local memory - * through the window starting at the address, up to the size. The address - * and size must be aligned in compliance with restrictions of - * ntb_mw_get_align(). The region size should not exceed the size_max parameter - * of that method. - * - * This method may not be implemented due to the hardware specific memory - * windows interface. - * - * Return: Zero on success, otherwise an error number. - */ -static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, - dma_addr_t addr, resource_size_t size) -{ - if (!ntb->ops->mw_set_trans) - return 0; - - return ntb->ops->mw_set_trans(ntb, pidx, widx, addr, size); -} - -/** - * ntb_mw_clear_trans() - clear the translation address of an inbound memory - * window - * @ntb: NTB device context. - * @pidx: Port index of peer device. - * @widx: Memory window index. - * - * Clear the translation of an inbound memory window. The peer may no longer - * access local memory through the window. - * - * Return: Zero on success, otherwise an error number. - */ -static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int pidx, int widx) -{ - if (!ntb->ops->mw_clear_trans) - return ntb_mw_set_trans(ntb, pidx, widx, 0, 0); - - return ntb->ops->mw_clear_trans(ntb, pidx, widx); -} - -/** - * ntb_peer_mw_count() - get the number of outbound memory windows, which could - * be mapped to access a shared memory - * @ntb: NTB device context. - * - * Hardware and topology may support a different number of memory windows. - * This method returns the number of outbound memory windows supported by - * local device. - * - * Return: the number of memory windows. - */ -static inline int ntb_peer_mw_count(struct ntb_dev *ntb) -{ - return ntb->ops->peer_mw_count(ntb); -} - -/** - * ntb_peer_mw_get_addr() - get map address of an outbound memory window - * @ntb: NTB device context. - * @widx: Memory window index (within ntb_peer_mw_count() return value). - * @base: OUT - the base address of mapping region. - * @size: OUT - the size of mapping region. - * - * Get base and size of memory region to map. NULL may be given for any output - * parameter if the value is not needed. The base and size may be used for - * mapping the memory window, to access the peer memory. - * - * Return: Zero on success, otherwise a negative error number. - */ -static inline int ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx, - phys_addr_t *base, resource_size_t *size) -{ - return ntb->ops->peer_mw_get_addr(ntb, widx, base, size); -} - -/** - * ntb_peer_mw_set_trans() - set a translation address of a memory window - * retrieved from a peer device - * @ntb: NTB device context. - * @pidx: Port index of peer device the translation address received from. - * @widx: Memory window index. - * @addr: The dma address of the shared memory to access. - * @size: The size of the shared memory to access. - * - * Set the translation of an outbound memory window. The local device may - * access shared memory allocated by a peer device sent the address. - * - * This method may not be implemented due to the hardware specific memory - * windows interface, so a translation address can be only set on the side, - * where shared memory (inbound memory windows) is allocated. - * - * Return: Zero on success, otherwise an error number. - */ -static inline int ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, - u64 addr, resource_size_t size) -{ - if (!ntb->ops->peer_mw_set_trans) - return 0; - - return ntb->ops->peer_mw_set_trans(ntb, pidx, widx, addr, size); -} - -/** - * ntb_peer_mw_clear_trans() - clear the translation address of an outbound - * memory window - * @ntb: NTB device context. - * @pidx: Port index of peer device. - * @widx: Memory window index. - * - * Clear the translation of a outbound memory window. The local device may no - * longer access a shared memory through the window. - * - * This method may not be implemented due to the hardware specific memory - * windows interface. - * - * Return: Zero on success, otherwise an error number. - */ -static inline int ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx, - int widx) -{ - if (!ntb->ops->peer_mw_clear_trans) - return ntb_peer_mw_set_trans(ntb, pidx, widx, 0, 0); - - return ntb->ops->peer_mw_clear_trans(ntb, pidx, widx); -} - /** * ntb_db_is_unsafe() - check if it is safe to use hardware doorbell * @ntb: NTB device context. @@ -1136,8 +742,6 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) * @ntb: NTB device context. * @db_addr: OUT - The address of the peer doorbell register. * @db_size: OUT - The number of bytes to write the peer doorbell register. - * @db_data: OUT - The data of peer doorbell register - * @db_bit: door bell bit number * * Return the address of the peer doorbell register. This may be used, for * example, by drivers that offload memory copy operations to a dma engine. @@ -1151,13 +755,12 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) */ static inline int ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, - resource_size_t *db_size, - u64 *db_data, int db_bit) + resource_size_t *db_size) { if (!ntb->ops->peer_db_addr) return -EINVAL; - return ntb->ops->peer_db_addr(ntb, db_addr, db_size, db_data, db_bit); + return ntb->ops->peer_db_addr(ntb, db_addr, db_size); } /** @@ -1293,411 +896,94 @@ static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb) } /** - * ntb_spad_count() - get the number of scratchpads + * ntb_mw_count() - get the number of scratchpads * @ntb: NTB device context. * * Hardware and topology may support a different number of scratchpads. - * Although it must be the same for all ports per NTB device. * * Return: the number of scratchpads. */ static inline int ntb_spad_count(struct ntb_dev *ntb) { - if (!ntb->ops->spad_count) - return 0; - return ntb->ops->spad_count(ntb); } /** * ntb_spad_read() - read the local scratchpad register * @ntb: NTB device context. - * @sidx: Scratchpad index. + * @idx: Scratchpad index. * * Read the local scratchpad register, and return the value. * * Return: The value of the local scratchpad register. */ -static inline u32 ntb_spad_read(struct ntb_dev *ntb, int sidx) +static inline u32 ntb_spad_read(struct ntb_dev *ntb, int idx) { - if (!ntb->ops->spad_read) - return ~(u32)0; - - return ntb->ops->spad_read(ntb, sidx); + return ntb->ops->spad_read(ntb, idx); } /** * ntb_spad_write() - write the local scratchpad register * @ntb: NTB device context. - * @sidx: Scratchpad index. + * @idx: Scratchpad index. * @val: Scratchpad value. * * Write the value to the local scratchpad register. * * Return: Zero on success, otherwise an error number. */ -static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val) +static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val) { - if (!ntb->ops->spad_write) - return -EINVAL; - - return ntb->ops->spad_write(ntb, sidx, val); + return ntb->ops->spad_write(ntb, idx, val); } /** * ntb_peer_spad_addr() - address of the peer scratchpad register * @ntb: NTB device context. - * @pidx: Port index of peer device. - * @sidx: Scratchpad index. + * @idx: Scratchpad index. * @spad_addr: OUT - The address of the peer scratchpad register. * - * Return the address of the peer scratchpad register. This may be used, for + * Return the address of the peer doorbell register. This may be used, for * example, by drivers that offload memory copy operations to a dma engine. * * Return: Zero on success, otherwise an error number. */ -static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, +static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, phys_addr_t *spad_addr) { if (!ntb->ops->peer_spad_addr) return -EINVAL; - return ntb->ops->peer_spad_addr(ntb, pidx, sidx, spad_addr); + return ntb->ops->peer_spad_addr(ntb, idx, spad_addr); } /** * ntb_peer_spad_read() - read the peer scratchpad register * @ntb: NTB device context. - * @pidx: Port index of peer device. - * @sidx: Scratchpad index. + * @idx: Scratchpad index. * * Read the peer scratchpad register, and return the value. * - * Return: The value of the peer scratchpad register. + * Return: The value of the local scratchpad register. */ -static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) +static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx) { - if (!ntb->ops->peer_spad_read) - return ~(u32)0; - - return ntb->ops->peer_spad_read(ntb, pidx, sidx); + return ntb->ops->peer_spad_read(ntb, idx); } /** * ntb_peer_spad_write() - write the peer scratchpad register * @ntb: NTB device context. - * @pidx: Port index of peer device. - * @sidx: Scratchpad index. + * @idx: Scratchpad index. * @val: Scratchpad value. * * Write the value to the peer scratchpad register. * * Return: Zero on success, otherwise an error number. */ -static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx, - u32 val) +static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int idx, u32 val) { - if (!ntb->ops->peer_spad_write) - return -EINVAL; - - return ntb->ops->peer_spad_write(ntb, pidx, sidx, val); -} - -/** - * ntb_msg_count() - get the number of message registers - * @ntb: NTB device context. - * - * Hardware may support a different number of message registers. - * - * Return: the number of message registers. - */ -static inline int ntb_msg_count(struct ntb_dev *ntb) -{ - if (!ntb->ops->msg_count) - return 0; - - return ntb->ops->msg_count(ntb); -} - -/** - * ntb_msg_inbits() - get a bitfield of inbound message registers status - * @ntb: NTB device context. - * - * The method returns the bitfield of status and mask registers, which related - * to inbound message registers. - * - * Return: bitfield of inbound message registers. - */ -static inline u64 ntb_msg_inbits(struct ntb_dev *ntb) -{ - if (!ntb->ops->msg_inbits) - return 0; - - return ntb->ops->msg_inbits(ntb); -} - -/** - * ntb_msg_outbits() - get a bitfield of outbound message registers status - * @ntb: NTB device context. - * - * The method returns the bitfield of status and mask registers, which related - * to outbound message registers. - * - * Return: bitfield of outbound message registers. - */ -static inline u64 ntb_msg_outbits(struct ntb_dev *ntb) -{ - if (!ntb->ops->msg_outbits) - return 0; - - return ntb->ops->msg_outbits(ntb); -} - -/** - * ntb_msg_read_sts() - read the message registers status - * @ntb: NTB device context. - * - * Read the status of message register. Inbound and outbound message registers - * related bits can be filtered by masks retrieved from ntb_msg_inbits() and - * ntb_msg_outbits(). - * - * Return: status bits of message registers - */ -static inline u64 ntb_msg_read_sts(struct ntb_dev *ntb) -{ - if (!ntb->ops->msg_read_sts) - return 0; - - return ntb->ops->msg_read_sts(ntb); -} - -/** - * ntb_msg_clear_sts() - clear status bits of message registers - * @ntb: NTB device context. - * @sts_bits: Status bits to clear. - * - * Clear bits in the status register. - * - * Return: Zero on success, otherwise a negative error number. - */ -static inline int ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits) -{ - if (!ntb->ops->msg_clear_sts) - return -EINVAL; - - return ntb->ops->msg_clear_sts(ntb, sts_bits); -} - -/** - * ntb_msg_set_mask() - set mask of message register status bits - * @ntb: NTB device context. - * @mask_bits: Mask bits. - * - * Mask the message registers status bits from raising the message event. - * - * Return: Zero on success, otherwise a negative error number. - */ -static inline int ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits) -{ - if (!ntb->ops->msg_set_mask) - return -EINVAL; - - return ntb->ops->msg_set_mask(ntb, mask_bits); -} - -/** - * ntb_msg_clear_mask() - clear message registers mask - * @ntb: NTB device context. - * @mask_bits: Mask bits to clear. - * - * Clear bits in the message events mask register. - * - * Return: Zero on success, otherwise a negative error number. - */ -static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits) -{ - if (!ntb->ops->msg_clear_mask) - return -EINVAL; - - return ntb->ops->msg_clear_mask(ntb, mask_bits); -} - -/** - * ntb_msg_read() - read inbound message register with specified index - * @ntb: NTB device context. - * @pidx: OUT - Port index of peer device a message retrieved from - * @midx: Message register index - * - * Read data from the specified message register. Source port index of a - * message is retrieved as well. - * - * Return: The value of the inbound message register. - */ -static inline u32 ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx) -{ - if (!ntb->ops->msg_read) - return ~(u32)0; - - return ntb->ops->msg_read(ntb, pidx, midx); -} - -/** - * ntb_peer_msg_write() - write data to the specified peer message register - * @ntb: NTB device context. - * @pidx: Port index of peer device a message being sent to - * @midx: Message register index - * @msg: Data to send - * - * Send data to a specified peer device using the defined message register. - * Message event can be raised if the midx registers isn't empty while - * calling this method and the corresponding interrupt isn't masked. - * - * Return: Zero on success, otherwise a negative error number. - */ -static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx, - u32 msg) -{ - if (!ntb->ops->peer_msg_write) - return -EINVAL; - - return ntb->ops->peer_msg_write(ntb, pidx, midx, msg); -} - -/** - * ntb_peer_resource_idx() - get a resource index for a given peer idx - * @ntb: NTB device context. - * @pidx: Peer port index. - * - * When constructing a graph of peers, each remote peer must use a different - * resource index (mw, doorbell, etc) to communicate with each other - * peer. - * - * In a two peer system, this function should always return 0 such that - * resource 0 points to the remote peer on both ports. - * - * In a 5 peer system, this function will return the following matrix - * - * pidx \ port 0 1 2 3 4 - * 0 0 0 1 2 3 - * 1 0 1 1 2 3 - * 2 0 1 2 2 3 - * 3 0 1 2 3 3 - * - * For example, if this function is used to program peer's memory - * windows, port 0 will program MW 0 on all it's peers to point to itself. - * port 1 will program MW 0 in port 0 to point to itself and MW 1 on all - * other ports. etc. - * - * For the legacy two host case, ntb_port_number() and ntb_peer_port_number() - * both return zero and therefore this function will always return zero. - * So MW 0 on each host would be programmed to point to the other host. - * - * Return: the resource index to use for that peer. - */ -static inline int ntb_peer_resource_idx(struct ntb_dev *ntb, int pidx) -{ - int local_port, peer_port; - - if (pidx >= ntb_peer_port_count(ntb)) - return -EINVAL; - - local_port = ntb_logical_port_number(ntb); - peer_port = ntb_peer_logical_port_number(ntb, pidx); - - if (peer_port < local_port) - return local_port - 1; - else - return local_port; -} - -/** - * ntb_peer_highest_mw_idx() - get a memory window index for a given peer idx - * using the highest index memory windows first - * - * @ntb: NTB device context. - * @pidx: Peer port index. - * - * Like ntb_peer_resource_idx(), except it returns indexes starting with - * last memory window index. - * - * Return: the resource index to use for that peer. - */ -static inline int ntb_peer_highest_mw_idx(struct ntb_dev *ntb, int pidx) -{ - int ret; - - ret = ntb_peer_resource_idx(ntb, pidx); - if (ret < 0) - return ret; - - return ntb_mw_count(ntb, pidx) - ret - 1; -} - -struct ntb_msi_desc { - u32 addr_offset; - u32 data; -}; - -#ifdef CONFIG_NTB_MSI - -int ntb_msi_init(struct ntb_dev *ntb, void (*desc_changed)(void *ctx)); -int ntb_msi_setup_mws(struct ntb_dev *ntb); -void ntb_msi_clear_mws(struct ntb_dev *ntb); -int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, - irq_handler_t thread_fn, - const char *name, void *dev_id, - struct ntb_msi_desc *msi_desc); -void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id); -int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, - struct ntb_msi_desc *desc); -int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, - struct ntb_msi_desc *desc, - phys_addr_t *msi_addr); - -#else /* not CONFIG_NTB_MSI */ - -static inline int ntb_msi_init(struct ntb_dev *ntb, - void (*desc_changed)(void *ctx)) -{ - return -EOPNOTSUPP; -} -static inline int ntb_msi_setup_mws(struct ntb_dev *ntb) -{ - return -EOPNOTSUPP; -} -static inline void ntb_msi_clear_mws(struct ntb_dev *ntb) {} -static inline int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, - irq_handler_t handler, - irq_handler_t thread_fn, - const char *name, void *dev_id, - struct ntb_msi_desc *msi_desc) -{ - return -EOPNOTSUPP; -} -static inline void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, - void *dev_id) {} -static inline int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, - struct ntb_msi_desc *desc) -{ - return -EOPNOTSUPP; -} -static inline int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, - struct ntb_msi_desc *desc, - phys_addr_t *msi_addr) -{ - return -EOPNOTSUPP; - -} - -#endif /* CONFIG_NTB_MSI */ - -static inline int ntbm_msi_request_irq(struct ntb_dev *ntb, - irq_handler_t handler, - const char *name, void *dev_id, - struct ntb_msi_desc *msi_desc) -{ - return ntbm_msi_request_threaded_irq(ntb, handler, NULL, name, - dev_id, msi_desc); + return ntb->ops->peer_spad_write(ntb, idx, val); } #endif diff --git a/include/linux/nubus.h b/include/linux/nubus.h index 392fc6c53e..6165b2c620 100644 --- a/include/linux/nubus.h +++ b/include/linux/nubus.h @@ -1,40 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* nubus.h: various definitions and prototypes for NuBus drivers to use. Originally written by Alan Cox. Hacked to death by C. Scott Ananian and David Huggins-Daines. -*/ - + + Some of the constants in here are from the corresponding + NetBSD/OpenBSD header file, by Allen Briggs. We figured out the + rest of them on our own. */ #ifndef LINUX_NUBUS_H #define LINUX_NUBUS_H -#include #include #include -struct proc_dir_entry; -struct seq_file; - -struct nubus_dir { - unsigned char *base; - unsigned char *ptr; - int done; - int mask; - struct proc_dir_entry *procdir; -}; - -struct nubus_dirent { - unsigned char *base; - unsigned char type; - __u32 data; /* Actually 24 bits used */ - int mask; -}; - struct nubus_board { - struct device dev; - + struct nubus_board* next; + struct nubus_dev* first_dev; + /* Only 9-E actually exist, though 0-8 are also theoretically possible, and 0 is a special case which represents the motherboard and onboard peripherals (Ethernet, video) */ @@ -43,10 +26,10 @@ struct nubus_board { char name[64]; /* Format block */ - unsigned char *fblock; + unsigned char* fblock; /* Root directory (does *not* always equal fblock + doffset!) */ - unsigned char *directory; - + unsigned char* directory; + unsigned long slot_addr; /* Offset to root directory (sometimes) */ unsigned long doffset; @@ -57,15 +40,15 @@ struct nubus_board { unsigned char rev; unsigned char format; unsigned char lanes; - - /* Directory entry in /proc/bus/nubus */ - struct proc_dir_entry *procdir; }; -struct nubus_rsrc { - struct list_head list; +struct nubus_dev { + /* Next link in device list */ + struct nubus_dev* next; + /* Directory entry in /proc/bus/nubus */ + struct proc_dir_entry* procdir; - /* The functional resource ID */ + /* The functional resource ID of this device */ unsigned char resid; /* These are mostly here for convenience; we could always read them from the ROMs if we wanted to */ @@ -73,116 +56,79 @@ struct nubus_rsrc { unsigned short type; unsigned short dr_sw; unsigned short dr_hw; - + /* This is the device's name rather than the board's. + Sometimes they are different. Usually the board name is + more correct. */ + char name[64]; + /* MacOS driver (I kid you not) */ + unsigned char* driver; + /* Actually this is an offset */ + unsigned long iobase; + unsigned long iosize; + unsigned char flags, hwdevid; + /* Functional directory */ - unsigned char *directory; + unsigned char* directory; /* Much of our info comes from here */ - struct nubus_board *board; + struct nubus_board* board; }; -/* This is all NuBus functional resources (used to find devices later on) */ -extern struct list_head nubus_func_rsrcs; - -struct nubus_driver { - struct device_driver driver; - int (*probe)(struct nubus_board *board); - void (*remove)(struct nubus_board *board); -}; - -extern struct bus_type nubus_bus_type; +/* This is all NuBus devices (used to find devices later on) */ +extern struct nubus_dev* nubus_devices; +/* This is all NuBus cards */ +extern struct nubus_board* nubus_boards; /* Generic NuBus interface functions, modelled after the PCI interface */ +void nubus_scan_bus(void); #ifdef CONFIG_PROC_FS -void nubus_proc_init(void); -struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board); -struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir, - const struct nubus_dirent *ent, - struct nubus_board *board); -void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, - const struct nubus_dirent *ent, - unsigned int size); -void nubus_proc_add_rsrc(struct proc_dir_entry *procdir, - const struct nubus_dirent *ent); +extern void nubus_proc_init(void); #else static inline void nubus_proc_init(void) {} -static inline -struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board) -{ return NULL; } -static inline -struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir, - const struct nubus_dirent *ent, - struct nubus_board *board) -{ return NULL; } -static inline void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, - const struct nubus_dirent *ent, - unsigned int size) {} -static inline void nubus_proc_add_rsrc(struct proc_dir_entry *procdir, - const struct nubus_dirent *ent) {} #endif - -struct nubus_rsrc *nubus_first_rsrc_or_null(void); -struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from); - -#define for_each_func_rsrc(f) \ - for (f = nubus_first_rsrc_or_null(); f; f = nubus_next_rsrc_or_null(f)) - -#define for_each_board_func_rsrc(b, f) \ - for_each_func_rsrc(f) if (f->board != b) {} else +int get_nubus_list(char *buf); +int nubus_proc_attach_device(struct nubus_dev *dev); +/* If we need more precision we can add some more of these */ +struct nubus_dev* nubus_find_device(unsigned short category, + unsigned short type, + unsigned short dr_hw, + unsigned short dr_sw, + const struct nubus_dev* from); +struct nubus_dev* nubus_find_type(unsigned short category, + unsigned short type, + const struct nubus_dev* from); +/* Might have more than one device in a slot, you know... */ +struct nubus_dev* nubus_find_slot(unsigned int slot, + const struct nubus_dev* from); /* These are somewhat more NuBus-specific. They all return 0 for success and -1 for failure, as you'd expect. */ /* The root directory which contains the board and functional directories */ -int nubus_get_root_dir(const struct nubus_board *board, - struct nubus_dir *dir); +int nubus_get_root_dir(const struct nubus_board* board, + struct nubus_dir* dir); /* The board directory */ -int nubus_get_board_dir(const struct nubus_board *board, - struct nubus_dir *dir); +int nubus_get_board_dir(const struct nubus_board* board, + struct nubus_dir* dir); /* The functional directory */ -int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir); +int nubus_get_func_dir(const struct nubus_dev* dev, + struct nubus_dir* dir); /* These work on any directory gotten via the above */ -int nubus_readdir(struct nubus_dir *dir, - struct nubus_dirent *ent); -int nubus_find_rsrc(struct nubus_dir *dir, +int nubus_readdir(struct nubus_dir* dir, + struct nubus_dirent* ent); +int nubus_find_rsrc(struct nubus_dir* dir, unsigned char rsrc_type, - struct nubus_dirent *ent); -int nubus_rewinddir(struct nubus_dir *dir); + struct nubus_dirent* ent); +int nubus_rewinddir(struct nubus_dir* dir); /* Things to do with directory entries */ -int nubus_get_subdir(const struct nubus_dirent *ent, - struct nubus_dir *dir); -void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent, - unsigned int len); -unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent, - unsigned int len); -void nubus_seq_write_rsrc_mem(struct seq_file *m, - const struct nubus_dirent *dirent, - unsigned int len); -unsigned char *nubus_dirptr(const struct nubus_dirent *nd); - -/* Declarations relating to driver model objects */ -int nubus_parent_device_register(void); -int nubus_device_register(struct nubus_board *board); -int nubus_driver_register(struct nubus_driver *ndrv); -void nubus_driver_unregister(struct nubus_driver *ndrv); -int nubus_proc_show(struct seq_file *m, void *data); - -static inline void nubus_set_drvdata(struct nubus_board *board, void *data) -{ - dev_set_drvdata(&board->dev, data); -} - -static inline void *nubus_get_drvdata(struct nubus_board *board) -{ - return dev_get_drvdata(&board->dev); -} - -/* Returns a pointer to the "standard" slot space. */ -static inline void *nubus_slot_addr(int slot) -{ - return (void *)(0xF0000000 | (slot << 24)); -} - +int nubus_get_subdir(const struct nubus_dirent* ent, + struct nubus_dir* dir); +void nubus_get_rsrc_mem(void* dest, + const struct nubus_dirent *dirent, + int len); +void nubus_get_rsrc_str(void* dest, + const struct nubus_dirent *dirent, + int maxlen); #endif /* LINUX_NUBUS_H */ diff --git a/include/linux/numa.h b/include/linux/numa.h index cb44cfe2b7..3aaa31603a 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -1,7 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NUMA_H #define _LINUX_NUMA_H -#include + #ifdef CONFIG_NODES_SHIFT #define NODES_SHIFT CONFIG_NODES_SHIFT @@ -13,49 +12,4 @@ #define NUMA_NO_NODE (-1) -/* optionally keep NUMA memory info available post init */ -#ifdef CONFIG_NUMA_KEEP_MEMINFO -#define __initdata_or_meminfo -#else -#define __initdata_or_meminfo __initdata -#endif - -#ifdef CONFIG_NUMA -#include -#include - -/* Generic implementation available */ -int numa_map_to_online_node(int node); - -#ifndef memory_add_physaddr_to_nid -static inline int memory_add_physaddr_to_nid(u64 start) -{ - pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n", - start); - return 0; -} -#endif -#ifndef phys_to_target_node -static inline int phys_to_target_node(u64 start) -{ - pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n", - start); - return 0; -} -#endif -#else /* !CONFIG_NUMA */ -static inline int numa_map_to_online_node(int node) -{ - return NUMA_NO_NODE; -} -static inline int memory_add_physaddr_to_nid(u64 start) -{ - return 0; -} -static inline int phys_to_target_node(u64 start) -{ - return 0; -} -#endif - #endif /* _LINUX_NUMA_H */ diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h index 3ec8e50efa..bf240a3cbf 100644 --- a/include/linux/nvme-rdma.h +++ b/include/linux/nvme-rdma.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef _LINUX_NVME_RDMA_H @@ -21,30 +29,6 @@ enum nvme_rdma_cm_status { NVME_RDMA_CM_INVALID_ORD = 0x08, }; -static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status) -{ - switch (status) { - case NVME_RDMA_CM_INVALID_LEN: - return "invalid length"; - case NVME_RDMA_CM_INVALID_RECFMT: - return "invalid record format"; - case NVME_RDMA_CM_INVALID_QID: - return "invalid queue ID"; - case NVME_RDMA_CM_INVALID_HSQSIZE: - return "invalid host SQ size"; - case NVME_RDMA_CM_INVALID_HRQSIZE: - return "invalid host RQ size"; - case NVME_RDMA_CM_NO_RSC: - return "resource not found"; - case NVME_RDMA_CM_INVALID_IRD: - return "invalid IRD"; - case NVME_RDMA_CM_INVALID_ORD: - return "Invalid ORD"; - default: - return "unrecognized reason"; - } -} - /** * struct nvme_rdma_cm_req - rdma connect request * @@ -77,7 +61,7 @@ struct nvme_rdma_cm_rep { * struct nvme_rdma_cm_rej - rdma connect reject * * @recfmt: format of the RDMA Private Data - * @sts: error status for the associated connect request + * @fsts: error status for the associated connect request */ struct nvme_rdma_cm_rej { __le16 recfmt; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index b7c4c4130b..fc3c242065 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -1,14 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for the NVM Express interface * Copyright (c) 2011-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef _LINUX_NVME_H #define _LINUX_NVME_H #include -#include /* NQN names in commands fields specified one size */ #define NVMF_NQN_FIELD_LEN 256 @@ -24,8 +31,6 @@ #define NVME_RDMA_IP_PORT 4420 -#define NVME_NSID_ALL 0xffffffff - enum nvme_subsys_type { NVME_NQN_DISC = 1, /* Discovery type target subsystem */ NVME_NQN_NVME = 2, /* NVME type target subsystem */ @@ -38,65 +43,50 @@ enum { NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */ NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */ NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */ - NVMF_ADDR_FAMILY_LOOP = 254, /* Reserved for host usage */ - NVMF_ADDR_FAMILY_MAX, }; /* Transport Type codes for Discovery Log Page entry TRTYPE field */ enum { NVMF_TRTYPE_RDMA = 1, /* RDMA */ NVMF_TRTYPE_FC = 2, /* Fibre Channel */ - NVMF_TRTYPE_TCP = 3, /* TCP/IP */ NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */ NVMF_TRTYPE_MAX, }; /* Transport Requirements codes for Discovery Log Page entry TREQ field */ enum { - NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */ - NVMF_TREQ_REQUIRED = 1, /* Required */ - NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */ -#define NVME_TREQ_SECURE_CHANNEL_MASK \ - (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED) - - NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */ + NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */ + NVMF_TREQ_REQUIRED = 1, /* Required */ + NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */ }; /* RDMA QP Service Type codes for Discovery Log Page entry TSAS * RDMA_QPTYPE field */ enum { - NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */ - NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */ + NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */ + NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */ }; /* RDMA QP Service Type codes for Discovery Log Page entry TSAS * RDMA_QPTYPE field */ enum { - NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */ - NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */ - NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */ - NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */ - NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */ + NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */ + NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */ + NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */ + NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */ + NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */ }; /* RDMA Connection Management Service Type codes for Discovery Log Page * entry TSAS RDMA_CMS field */ enum { - NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */ + NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */ }; -#define NVME_AQ_DEPTH 32 -#define NVME_NR_AEN_COMMANDS 1 -#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS) - -/* - * Subtract one to leave an empty queue entry for 'Full Queue' condition. See - * NVM-Express 1.2 specification, section 4.1.2. - */ -#define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1) +#define NVMF_AQ_DEPTH 32 enum { NVME_REG_CAP = 0x0000, /* Controller Capabilities */ @@ -109,95 +99,55 @@ enum { NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */ NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */ NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */ - NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */ + NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */ NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */ - NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */ - NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */ - NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer - * Location - */ - NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory - * Space Control - */ - NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */ - NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */ - NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */ - NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity - * Buffer Size - */ - NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained - * Write Throughput - */ - NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */ }; #define NVME_CAP_MQES(cap) ((cap) & 0xffff) #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) #define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1) -#define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff) #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) -#define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1) #define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7) #define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff) +#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff) +#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf) -enum { - NVME_CMBSZ_SQS = 1 << 0, - NVME_CMBSZ_CQS = 1 << 1, - NVME_CMBSZ_LISTS = 1 << 2, - NVME_CMBSZ_RDS = 1 << 3, - NVME_CMBSZ_WDS = 1 << 4, - - NVME_CMBSZ_SZ_SHIFT = 12, - NVME_CMBSZ_SZ_MASK = 0xfffff, - - NVME_CMBSZ_SZU_SHIFT = 8, - NVME_CMBSZ_SZU_MASK = 0xf, -}; +#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10) +#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8) +#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4) +#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2) +#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1) /* * Submission and Completion Queue Entry Sizes for the NVM command set. * (In bytes and specified as a power of two (2^n)). */ -#define NVME_ADM_SQES 6 #define NVME_NVM_IOSQES 6 #define NVME_NVM_IOCQES 4 enum { NVME_CC_ENABLE = 1 << 0, - NVME_CC_EN_SHIFT = 0, - NVME_CC_CSS_SHIFT = 4, + NVME_CC_CSS_NVM = 0 << 4, NVME_CC_MPS_SHIFT = 7, - NVME_CC_AMS_SHIFT = 11, - NVME_CC_SHN_SHIFT = 14, - NVME_CC_IOSQES_SHIFT = 16, - NVME_CC_IOCQES_SHIFT = 20, - NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT, - NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT, - NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT, - NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT, - NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT, - NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT, - NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT, - NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT, - NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT, - NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT, - NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT, - NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT, - NVME_CAP_CSS_NVM = 1 << 0, - NVME_CAP_CSS_CSI = 1 << 6, + NVME_CC_ARB_RR = 0 << 11, + NVME_CC_ARB_WRRU = 1 << 11, + NVME_CC_ARB_VS = 7 << 11, + NVME_CC_SHN_NONE = 0 << 14, + NVME_CC_SHN_NORMAL = 1 << 14, + NVME_CC_SHN_ABRUPT = 2 << 14, + NVME_CC_SHN_MASK = 3 << 14, + NVME_CC_IOSQES = NVME_NVM_IOSQES << 16, + NVME_CC_IOCQES = NVME_NVM_IOCQES << 20, NVME_CSTS_RDY = 1 << 0, NVME_CSTS_CFS = 1 << 1, NVME_CSTS_NSSRO = 1 << 4, - NVME_CSTS_PP = 1 << 5, NVME_CSTS_SHST_NORMAL = 0 << 2, NVME_CSTS_SHST_OCCUR = 1 << 2, NVME_CSTS_SHST_CMPLT = 2 << 2, NVME_CSTS_SHST_MASK = 3 << 2, - NVME_CMBMSC_CRE = 1 << 0, - NVME_CMBMSC_CMSE = 1 << 1, }; struct nvme_id_power_state { @@ -223,11 +173,6 @@ enum { NVME_PS_FLAGS_NON_OP_STATE = 1 << 1, }; -enum nvme_ctrl_attr { - NVME_CTRL_ATTR_HID_128_BIT = (1 << 0), - NVME_CTRL_ATTR_TBKAS = (1 << 6), -}; - struct nvme_id_ctrl { __le16 vid; __le16 ssvid; @@ -244,11 +189,7 @@ struct nvme_id_ctrl { __le32 rtd3e; __le32 oaes; __le32 ctratt; - __u8 rsvd100[28]; - __le16 crdt1; - __le16 crdt2; - __le16 crdt3; - __u8 rsvd134[122]; + __u8 rsvd100[156]; __le16 oacs; __u8 acl; __u8 aerl; @@ -266,22 +207,9 @@ struct nvme_id_ctrl { __u8 tnvmcap[16]; __u8 unvmcap[16]; __le32 rpmbs; - __le16 edstt; - __u8 dsto; - __u8 fwug; + __u8 rsvd316[4]; __le16 kas; - __le16 hctma; - __le16 mntmt; - __le16 mxtmt; - __le32 sanicap; - __le32 hmminds; - __le16 hmmaxd; - __u8 rsvd338[4]; - __u8 anatt; - __u8 anacap; - __le32 anagrpmax; - __le32 nanagrpid; - __u8 rsvd352[160]; + __u8 rsvd322[190]; __u8 sqes; __u8 cqes; __le16 maxcmd; @@ -293,12 +221,11 @@ struct nvme_id_ctrl { __le16 awun; __le16 awupf; __u8 nvscc; - __u8 nwpc; + __u8 rsvd531; __le16 acwu; __u8 rsvd534[2]; __le32 sgls; - __le32 mnan; - __u8 rsvd544[224]; + __u8 rsvd540[228]; char subnqn[256]; __u8 rsvd1024[768]; __le32 ioccsz; @@ -312,27 +239,10 @@ struct nvme_id_ctrl { }; enum { - NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1, - NVME_CTRL_CMIC_ANA = 1 << 3, NVME_CTRL_ONCS_COMPARE = 1 << 0, NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, NVME_CTRL_ONCS_DSM = 1 << 2, - NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, - NVME_CTRL_ONCS_RESERVATIONS = 1 << 5, - NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, NVME_CTRL_VWC_PRESENT = 1 << 0, - NVME_CTRL_OACS_SEC_SUPP = 1 << 0, - NVME_CTRL_OACS_DIRECTIVES = 1 << 5, - NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8, - NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1, - NVME_CTRL_CTRATT_128_ID = 1 << 0, - NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1, - NVME_CTRL_CTRATT_NVM_SETS = 1 << 2, - NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3, - NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4, - NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5, - NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7, - NVME_CTRL_CTRATT_UUID_LIST = 1 << 9, }; struct nvme_lbaf { @@ -354,26 +264,16 @@ struct nvme_id_ns { __u8 nmic; __u8 rescap; __u8 fpi; - __u8 dlfeat; + __u8 rsvd33; __le16 nawun; __le16 nawupf; __le16 nacwu; __le16 nabsn; __le16 nabo; __le16 nabspf; - __le16 noiob; + __u16 rsvd46; __u8 nvmcap[16]; - __le16 npwg; - __le16 npwa; - __le16 npdg; - __le16 npda; - __le16 nows; - __u8 rsvd74[18]; - __le32 anagrpid; - __u8 rsvd96[3]; - __u8 nsattr; - __le16 nvmsetid; - __le16 endgid; + __u8 rsvd64[40]; __u8 nguid[16]; __u8 eui64[8]; struct nvme_lbaf lbaf[16]; @@ -381,82 +281,20 @@ struct nvme_id_ns { __u8 vs[3712]; }; -struct nvme_zns_lbafe { - __le64 zsze; - __u8 zdes; - __u8 rsvd9[7]; -}; - -struct nvme_id_ns_zns { - __le16 zoc; - __le16 ozcs; - __le32 mar; - __le32 mor; - __le32 rrl; - __le32 frl; - __u8 rsvd20[2796]; - struct nvme_zns_lbafe lbafe[16]; - __u8 rsvd3072[768]; - __u8 vs[256]; -}; - -struct nvme_id_ctrl_zns { - __u8 zasl; - __u8 rsvd1[4095]; -}; - -struct nvme_id_ctrl_nvm { - __u8 vsl; - __u8 wzsl; - __u8 wusl; - __u8 dmrl; - __le32 dmrsl; - __le64 dmsl; - __u8 rsvd16[4080]; -}; - enum { NVME_ID_CNS_NS = 0x00, NVME_ID_CNS_CTRL = 0x01, NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, - NVME_ID_CNS_NS_DESC_LIST = 0x03, - NVME_ID_CNS_CS_NS = 0x05, - NVME_ID_CNS_CS_CTRL = 0x06, NVME_ID_CNS_NS_PRESENT_LIST = 0x10, NVME_ID_CNS_NS_PRESENT = 0x11, NVME_ID_CNS_CTRL_NS_LIST = 0x12, NVME_ID_CNS_CTRL_LIST = 0x13, - NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15, - NVME_ID_CNS_NS_GRANULARITY = 0x16, - NVME_ID_CNS_UUID_LIST = 0x17, -}; - -enum { - NVME_CSI_NVM = 0, - NVME_CSI_ZNS = 2, -}; - -enum { - NVME_DIR_IDENTIFY = 0x00, - NVME_DIR_STREAMS = 0x01, - NVME_DIR_SND_ID_OP_ENABLE = 0x01, - NVME_DIR_SND_ST_OP_REL_ID = 0x01, - NVME_DIR_SND_ST_OP_REL_RSC = 0x02, - NVME_DIR_RCV_ID_OP_PARAM = 0x01, - NVME_DIR_RCV_ST_OP_PARAM = 0x01, - NVME_DIR_RCV_ST_OP_STATUS = 0x02, - NVME_DIR_RCV_ST_OP_RESOURCE = 0x03, - NVME_DIR_ENDIR = 0x01, }; enum { NVME_NS_FEAT_THIN = 1 << 0, - NVME_NS_FEAT_ATOMICS = 1 << 1, - NVME_NS_FEAT_IO_OPT = 1 << 4, - NVME_NS_ATTR_RO = 1 << 0, NVME_NS_FLBAS_LBA_MASK = 0xf, NVME_NS_FLBAS_META_EXT = 0x10, - NVME_NS_NMIC_SHARED = 1 << 0, NVME_LBAF_RP_BEST = 0, NVME_LBAF_RP_BETTER = 1, NVME_LBAF_RP_GOOD = 2, @@ -473,38 +311,13 @@ enum { NVME_NS_DPS_PI_TYPE3 = 3, }; -/* Identify Namespace Metadata Capabilities (MC): */ -enum { - NVME_MC_EXTENDED_LBA = (1 << 0), - NVME_MC_METADATA_PTR = (1 << 1), -}; - -struct nvme_ns_id_desc { - __u8 nidt; - __u8 nidl; - __le16 reserved; -}; - -#define NVME_NIDT_EUI64_LEN 8 -#define NVME_NIDT_NGUID_LEN 16 -#define NVME_NIDT_UUID_LEN 16 -#define NVME_NIDT_CSI_LEN 1 - -enum { - NVME_NIDT_EUI64 = 0x01, - NVME_NIDT_NGUID = 0x02, - NVME_NIDT_UUID = 0x03, - NVME_NIDT_CSI = 0x04, -}; - struct nvme_smart_log { __u8 critical_warning; __u8 temperature[2]; __u8 avail_spare; __u8 spare_thresh; __u8 percent_used; - __u8 endu_grp_crit_warn_sumry; - __u8 rsvd7[25]; + __u8 rsvd6[26]; __u8 data_units_read[16]; __u8 data_units_written[16]; __u8 host_reads[16]; @@ -518,81 +331,7 @@ struct nvme_smart_log { __le32 warning_temp_time; __le32 critical_comp_time; __le16 temp_sensor[8]; - __le32 thm_temp1_trans_count; - __le32 thm_temp2_trans_count; - __le32 thm_temp1_total_time; - __le32 thm_temp2_total_time; - __u8 rsvd232[280]; -}; - -struct nvme_fw_slot_info_log { - __u8 afi; - __u8 rsvd1[7]; - __le64 frs[7]; - __u8 rsvd64[448]; -}; - -enum { - NVME_CMD_EFFECTS_CSUPP = 1 << 0, - NVME_CMD_EFFECTS_LBCC = 1 << 1, - NVME_CMD_EFFECTS_NCC = 1 << 2, - NVME_CMD_EFFECTS_NIC = 1 << 3, - NVME_CMD_EFFECTS_CCC = 1 << 4, - NVME_CMD_EFFECTS_CSE_MASK = 3 << 16, - NVME_CMD_EFFECTS_UUID_SEL = 1 << 19, -}; - -struct nvme_effects_log { - __le32 acs[256]; - __le32 iocs[256]; - __u8 resv[2048]; -}; - -enum nvme_ana_state { - NVME_ANA_OPTIMIZED = 0x01, - NVME_ANA_NONOPTIMIZED = 0x02, - NVME_ANA_INACCESSIBLE = 0x03, - NVME_ANA_PERSISTENT_LOSS = 0x04, - NVME_ANA_CHANGE = 0x0f, -}; - -struct nvme_ana_group_desc { - __le32 grpid; - __le32 nnsids; - __le64 chgcnt; - __u8 state; - __u8 rsvd17[15]; - __le32 nsids[]; -}; - -/* flag for the log specific field of the ANA log */ -#define NVME_ANA_LOG_RGO (1 << 0) - -struct nvme_ana_rsp_hdr { - __le64 chgcnt; - __le16 ngrps; - __le16 rsvd10[3]; -}; - -struct nvme_zone_descriptor { - __u8 zt; - __u8 zs; - __u8 za; - __u8 rsvd3[5]; - __le64 zcap; - __le64 zslba; - __le64 wp; - __u8 rsvd32[32]; -}; - -enum { - NVME_ZONE_TYPE_SEQWRITE_REQ = 0x2, -}; - -struct nvme_zone_report { - __le64 nr_zones; - __u8 resv8[56]; - struct nvme_zone_descriptor entries[]; + __u8 rsvd216[296]; }; enum { @@ -604,40 +343,15 @@ enum { }; enum { - NVME_AER_ERROR = 0, - NVME_AER_SMART = 1, - NVME_AER_NOTICE = 2, - NVME_AER_CSS = 6, - NVME_AER_VS = 7, -}; - -enum { - NVME_AER_NOTICE_NS_CHANGED = 0x00, - NVME_AER_NOTICE_FW_ACT_STARTING = 0x01, - NVME_AER_NOTICE_ANA = 0x03, - NVME_AER_NOTICE_DISC_CHANGED = 0xf0, -}; - -enum { - NVME_AEN_BIT_NS_ATTR = 8, - NVME_AEN_BIT_FW_ACT = 9, - NVME_AEN_BIT_ANA_CHANGE = 11, - NVME_AEN_BIT_DISC_CHANGE = 31, -}; - -enum { - NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR, - NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT, - NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE, - NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE, + NVME_AER_NOTICE_NS_CHANGED = 0x0002, }; struct nvme_lba_range_type { __u8 type; __u8 attributes; __u8 rsvd2[14]; - __le64 slba; - __le64 nlb; + __u64 slba; + __u64 nlb; __u8 guid[16]; __u8 rsvd48[16]; }; @@ -684,49 +398,23 @@ enum nvme_opcode { nvme_cmd_compare = 0x05, nvme_cmd_write_zeroes = 0x08, nvme_cmd_dsm = 0x09, - nvme_cmd_verify = 0x0c, nvme_cmd_resv_register = 0x0d, nvme_cmd_resv_report = 0x0e, nvme_cmd_resv_acquire = 0x11, nvme_cmd_resv_release = 0x15, - nvme_cmd_zone_mgmt_send = 0x79, - nvme_cmd_zone_mgmt_recv = 0x7a, - nvme_cmd_zone_append = 0x7d, }; -#define nvme_opcode_name(opcode) { opcode, #opcode } -#define show_nvm_opcode_name(val) \ - __print_symbolic(val, \ - nvme_opcode_name(nvme_cmd_flush), \ - nvme_opcode_name(nvme_cmd_write), \ - nvme_opcode_name(nvme_cmd_read), \ - nvme_opcode_name(nvme_cmd_write_uncor), \ - nvme_opcode_name(nvme_cmd_compare), \ - nvme_opcode_name(nvme_cmd_write_zeroes), \ - nvme_opcode_name(nvme_cmd_dsm), \ - nvme_opcode_name(nvme_cmd_resv_register), \ - nvme_opcode_name(nvme_cmd_resv_report), \ - nvme_opcode_name(nvme_cmd_resv_acquire), \ - nvme_opcode_name(nvme_cmd_resv_release), \ - nvme_opcode_name(nvme_cmd_zone_mgmt_send), \ - nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \ - nvme_opcode_name(nvme_cmd_zone_append)) - - - /* * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier * * @NVME_SGL_FMT_ADDRESS: absolute address of the data block * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block - * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation * request subtype */ enum { NVME_SGL_FMT_ADDRESS = 0x00, NVME_SGL_FMT_OFFSET = 0x01, - NVME_SGL_FMT_TRANSPORT_A = 0x0A, NVME_SGL_FMT_INVALIDATE = 0x0f, }; @@ -740,16 +428,12 @@ enum { * * For struct nvme_keyed_sgl_desc: * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor - * - * Transport-specific SGL types: - * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor */ enum { NVME_SGL_FMT_DATA_DESC = 0x00, NVME_SGL_FMT_SEG_DESC = 0x02, NVME_SGL_FMT_LAST_SEG_DESC = 0x03, NVME_KEY_SGL_FMT_DATA_DESC = 0x04, - NVME_TRANSPORT_SGL_DATA_DESC = 0x05, }; struct nvme_sgl_desc { @@ -806,12 +490,7 @@ struct nvme_common_command { __le32 cdw2[2]; __le64 metadata; union nvme_data_ptr dptr; - __le32 cdw10; - __le32 cdw11; - __le32 cdw12; - __le32 cdw13; - __le32 cdw14; - __le32 cdw15; + __le32 cdw10[6]; }; struct nvme_rw_command { @@ -834,7 +513,6 @@ struct nvme_rw_command { enum { NVME_RW_LR = 1 << 15, NVME_RW_FUA = 1 << 14, - NVME_RW_APPEND_PIREMAP = 1 << 9, NVME_RW_DSM_FREQ_UNSPEC = 0, NVME_RW_DSM_FREQ_TYPICAL = 1, NVME_RW_DSM_FREQ_RARE = 2, @@ -854,7 +532,6 @@ enum { NVME_RW_PRINFO_PRCHK_APP = 1 << 11, NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, NVME_RW_PRINFO_PRACT = 1 << 13, - NVME_RW_DTYPE_STREAMS = 1 << 4, }; struct nvme_dsm_cmd { @@ -875,111 +552,12 @@ enum { NVME_DSMGMT_AD = 1 << 2, }; -#define NVME_DSM_MAX_RANGES 256 - struct nvme_dsm_range { __le32 cattr; __le32 nlb; __le64 slba; }; -struct nvme_write_zeroes_cmd { - __u8 opcode; - __u8 flags; - __u16 command_id; - __le32 nsid; - __u64 rsvd2; - __le64 metadata; - union nvme_data_ptr dptr; - __le64 slba; - __le16 length; - __le16 control; - __le32 dsmgmt; - __le32 reftag; - __le16 apptag; - __le16 appmask; -}; - -enum nvme_zone_mgmt_action { - NVME_ZONE_CLOSE = 0x1, - NVME_ZONE_FINISH = 0x2, - NVME_ZONE_OPEN = 0x3, - NVME_ZONE_RESET = 0x4, - NVME_ZONE_OFFLINE = 0x5, - NVME_ZONE_SET_DESC_EXT = 0x10, -}; - -struct nvme_zone_mgmt_send_cmd { - __u8 opcode; - __u8 flags; - __u16 command_id; - __le32 nsid; - __le32 cdw2[2]; - __le64 metadata; - union nvme_data_ptr dptr; - __le64 slba; - __le32 cdw12; - __u8 zsa; - __u8 select_all; - __u8 rsvd13[2]; - __le32 cdw14[2]; -}; - -struct nvme_zone_mgmt_recv_cmd { - __u8 opcode; - __u8 flags; - __u16 command_id; - __le32 nsid; - __le64 rsvd2[2]; - union nvme_data_ptr dptr; - __le64 slba; - __le32 numd; - __u8 zra; - __u8 zrasf; - __u8 pr; - __u8 rsvd13; - __le32 cdw14[2]; -}; - -enum { - NVME_ZRA_ZONE_REPORT = 0, - NVME_ZRASF_ZONE_REPORT_ALL = 0, - NVME_ZRASF_ZONE_STATE_EMPTY = 0x01, - NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02, - NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03, - NVME_ZRASF_ZONE_STATE_CLOSED = 0x04, - NVME_ZRASF_ZONE_STATE_READONLY = 0x05, - NVME_ZRASF_ZONE_STATE_FULL = 0x06, - NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07, - NVME_REPORT_ZONE_PARTIAL = 1, -}; - -/* Features */ - -enum { - NVME_TEMP_THRESH_MASK = 0xffff, - NVME_TEMP_THRESH_SELECT_SHIFT = 16, - NVME_TEMP_THRESH_TYPE_UNDER = 0x100000, -}; - -struct nvme_feat_auto_pst { - __le64 entries[32]; -}; - -enum { - NVME_HOST_MEM_ENABLE = (1 << 0), - NVME_HOST_MEM_RETURN = (1 << 1), -}; - -struct nvme_feat_host_behavior { - __u8 acre; - __u8 resv1[511]; -}; - -enum { - NVME_ENABLE_ACRE = 1, -}; - /* Admin commands */ enum nvme_admin_opcode { @@ -996,50 +574,13 @@ enum nvme_admin_opcode { nvme_admin_ns_mgmt = 0x0d, nvme_admin_activate_fw = 0x10, nvme_admin_download_fw = 0x11, - nvme_admin_dev_self_test = 0x14, nvme_admin_ns_attach = 0x15, nvme_admin_keep_alive = 0x18, - nvme_admin_directive_send = 0x19, - nvme_admin_directive_recv = 0x1a, - nvme_admin_virtual_mgmt = 0x1c, - nvme_admin_nvme_mi_send = 0x1d, - nvme_admin_nvme_mi_recv = 0x1e, - nvme_admin_dbbuf = 0x7C, nvme_admin_format_nvm = 0x80, nvme_admin_security_send = 0x81, nvme_admin_security_recv = 0x82, - nvme_admin_sanitize_nvm = 0x84, - nvme_admin_get_lba_status = 0x86, - nvme_admin_vendor_start = 0xC0, }; -#define nvme_admin_opcode_name(opcode) { opcode, #opcode } -#define show_admin_opcode_name(val) \ - __print_symbolic(val, \ - nvme_admin_opcode_name(nvme_admin_delete_sq), \ - nvme_admin_opcode_name(nvme_admin_create_sq), \ - nvme_admin_opcode_name(nvme_admin_get_log_page), \ - nvme_admin_opcode_name(nvme_admin_delete_cq), \ - nvme_admin_opcode_name(nvme_admin_create_cq), \ - nvme_admin_opcode_name(nvme_admin_identify), \ - nvme_admin_opcode_name(nvme_admin_abort_cmd), \ - nvme_admin_opcode_name(nvme_admin_set_features), \ - nvme_admin_opcode_name(nvme_admin_get_features), \ - nvme_admin_opcode_name(nvme_admin_async_event), \ - nvme_admin_opcode_name(nvme_admin_ns_mgmt), \ - nvme_admin_opcode_name(nvme_admin_activate_fw), \ - nvme_admin_opcode_name(nvme_admin_download_fw), \ - nvme_admin_opcode_name(nvme_admin_ns_attach), \ - nvme_admin_opcode_name(nvme_admin_keep_alive), \ - nvme_admin_opcode_name(nvme_admin_directive_send), \ - nvme_admin_opcode_name(nvme_admin_directive_recv), \ - nvme_admin_opcode_name(nvme_admin_dbbuf), \ - nvme_admin_opcode_name(nvme_admin_format_nvm), \ - nvme_admin_opcode_name(nvme_admin_security_send), \ - nvme_admin_opcode_name(nvme_admin_security_recv), \ - nvme_admin_opcode_name(nvme_admin_sanitize_nvm), \ - nvme_admin_opcode_name(nvme_admin_get_lba_status)) - enum { NVME_QUEUE_PHYS_CONTIG = (1 << 0), NVME_CQ_IRQ_ENABLED = (1 << 1), @@ -1060,32 +601,14 @@ enum { NVME_FEAT_ASYNC_EVENT = 0x0b, NVME_FEAT_AUTO_PST = 0x0c, NVME_FEAT_HOST_MEM_BUF = 0x0d, - NVME_FEAT_TIMESTAMP = 0x0e, NVME_FEAT_KATO = 0x0f, - NVME_FEAT_HCTM = 0x10, - NVME_FEAT_NOPSC = 0x11, - NVME_FEAT_RRL = 0x12, - NVME_FEAT_PLM_CONFIG = 0x13, - NVME_FEAT_PLM_WINDOW = 0x14, - NVME_FEAT_HOST_BEHAVIOR = 0x16, - NVME_FEAT_SANITIZE = 0x17, NVME_FEAT_SW_PROGRESS = 0x80, NVME_FEAT_HOST_ID = 0x81, NVME_FEAT_RESV_MASK = 0x82, NVME_FEAT_RESV_PERSIST = 0x83, - NVME_FEAT_WRITE_PROTECT = 0x84, - NVME_FEAT_VENDOR_START = 0xC0, - NVME_FEAT_VENDOR_END = 0xFF, NVME_LOG_ERROR = 0x01, NVME_LOG_SMART = 0x02, NVME_LOG_FW_SLOT = 0x03, - NVME_LOG_CHANGED_NS = 0x04, - NVME_LOG_CMD_EFFECTS = 0x05, - NVME_LOG_DEVICE_SELF_TEST = 0x06, - NVME_LOG_TELEMETRY_HOST = 0x07, - NVME_LOG_TELEMETRY_CTRL = 0x08, - NVME_LOG_ENDURANCE_GROUP = 0x09, - NVME_LOG_ANA = 0x0c, NVME_LOG_DISC = 0x70, NVME_LOG_RESERVATION = 0x80, NVME_FWACT_REPL = (0 << 3), @@ -1093,16 +616,6 @@ enum { NVME_FWACT_ACTV = (2 << 3), }; -/* NVMe Namespace Write Protect State */ -enum { - NVME_NS_NO_WRITE_PROTECT = 0, - NVME_NS_WRITE_PROTECT, - NVME_NS_WRITE_PROTECT_POWER_CYCLE, - NVME_NS_WRITE_PROTECT_PERMANENT, -}; - -#define NVME_MAX_CHANGED_NAMESPACES 1024 - struct nvme_identify { __u8 opcode; __u8 flags; @@ -1110,16 +623,10 @@ struct nvme_identify { __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; - __u8 cns; - __u8 rsvd3; - __le16 ctrlid; - __u8 rsvd11[3]; - __u8 csi; - __u32 rsvd12[4]; + __le32 cns; + __u32 rsvd11[5]; }; -#define NVME_IDENTIFY_DATA_SIZE 4096 - struct nvme_features { __u8 opcode; __u8 flags; @@ -1129,16 +636,7 @@ struct nvme_features { union nvme_data_ptr dptr; __le32 fid; __le32 dword11; - __le32 dword12; - __le32 dword13; - __le32 dword14; - __le32 dword15; -}; - -struct nvme_host_mem_buf_desc { - __le64 addr; - __le32 size; - __u32 rsvd; + __u32 rsvd12[4]; }; struct nvme_create_cq { @@ -1218,38 +716,13 @@ struct nvme_get_log_page_command { __u64 rsvd2[2]; union nvme_data_ptr dptr; __u8 lid; - __u8 lsp; /* upper 4 bits reserved */ + __u8 rsvd10; __le16 numdl; __le16 numdu; __u16 rsvd11; - union { - struct { - __le32 lpol; - __le32 lpou; - }; - __le64 lpo; - }; - __u8 rsvd14[3]; - __u8 csi; - __u32 rsvd15; -}; - -struct nvme_directive_cmd { - __u8 opcode; - __u8 flags; - __u16 command_id; - __le32 nsid; - __u64 rsvd2[2]; - union nvme_data_ptr dptr; - __le32 numd; - __u8 doper; - __u8 dtype; - __le16 dspec; - __u8 endir; - __u8 tdtype; - __u16 rsvd15; - - __u32 rsvd16[3]; + __le32 lpol; + __le32 lpou; + __u32 rsvd14[2]; }; /* @@ -1265,23 +738,6 @@ enum nvmf_capsule_command { nvme_fabrics_type_property_get = 0x04, }; -#define nvme_fabrics_type_name(type) { type, #type } -#define show_fabrics_type_name(type) \ - __print_symbolic(type, \ - nvme_fabrics_type_name(nvme_fabrics_type_property_set), \ - nvme_fabrics_type_name(nvme_fabrics_type_connect), \ - nvme_fabrics_type_name(nvme_fabrics_type_property_get)) - -/* - * If not fabrics command, fctype will be ignored. - */ -#define show_opcode_name(qid, opcode, fctype) \ - ((opcode) == nvme_fabrics_command ? \ - show_fabrics_type_name(fctype) : \ - ((qid) ? \ - show_nvm_opcode_name(opcode) : \ - show_admin_opcode_name(opcode))) - struct nvmf_common_command { __u8 opcode; __u8 resv1; @@ -1336,11 +792,7 @@ struct nvmf_disc_rsp_page_hdr { __le64 numrec; __le16 recfmt; __u8 resv14[1006]; - struct nvmf_disc_rsp_page_entry entries[]; -}; - -enum { - NVME_CONNECT_DISABLE_SQFLOW = (1 << 2), + struct nvmf_disc_rsp_page_entry entries[0]; }; struct nvmf_connect_command { @@ -1360,7 +812,7 @@ struct nvmf_connect_command { }; struct nvmf_connect_data { - uuid_t hostid; + __u8 hostid[16]; __le16 cntlid; char resv4[238]; char subsysnqn[NVMF_NQN_FIELD_LEN]; @@ -1393,28 +845,6 @@ struct nvmf_property_get_command { __u8 resv4[16]; }; -struct nvme_dbbuf { - __u8 opcode; - __u8 flags; - __u16 command_id; - __u32 rsvd1[5]; - __le64 prp1; - __le64 prp2; - __u32 rsvd12[6]; -}; - -struct streams_directive_params { - __le16 msl; - __le16 nssa; - __le16 nsso; - __u8 rsvd[10]; - __le32 sws; - __le16 sgs; - __le16 nsa; - __le16 nso; - __u8 rsvd2[6]; -}; - struct nvme_command { union { struct nvme_common_command common; @@ -1427,39 +857,15 @@ struct nvme_command { struct nvme_download_firmware dlfw; struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; - struct nvme_write_zeroes_cmd write_zeroes; - struct nvme_zone_mgmt_send_cmd zms; - struct nvme_zone_mgmt_recv_cmd zmr; struct nvme_abort_cmd abort; struct nvme_get_log_page_command get_log_page; struct nvmf_common_command fabrics; struct nvmf_connect_command connect; struct nvmf_property_set_command prop_set; struct nvmf_property_get_command prop_get; - struct nvme_dbbuf dbbuf; - struct nvme_directive_cmd directive; }; }; -static inline bool nvme_is_fabrics(struct nvme_command *cmd) -{ - return cmd->common.opcode == nvme_fabrics_command; -} - -struct nvme_error_slot { - __le64 error_count; - __le16 sqid; - __le16 cmdid; - __le16 status_field; - __le16 param_error_location; - __le64 lba; - __le32 nsid; - __u8 vs; - __u8 resv[3]; - __le64 cs; - __u8 resv2[24]; -}; - static inline bool nvme_is_write(struct nvme_command *cmd) { /* @@ -1467,8 +873,8 @@ static inline bool nvme_is_write(struct nvme_command *cmd) * * Why can't we simply have a Fabrics In and Fabrics out command? */ - if (unlikely(nvme_is_fabrics(cmd))) - return cmd->fabrics.fctype & 1; + if (unlikely(cmd->common.opcode == nvme_fabrics_command)) + return cmd->fabrics.opcode & 1; return cmd->common.opcode & 1; } @@ -1494,30 +900,14 @@ enum { NVME_SC_SGL_INVALID_DATA = 0xf, NVME_SC_SGL_INVALID_METADATA = 0x10, NVME_SC_SGL_INVALID_TYPE = 0x11, - NVME_SC_CMB_INVALID_USE = 0x12, - NVME_SC_PRP_INVALID_OFFSET = 0x13, - NVME_SC_ATOMIC_WU_EXCEEDED = 0x14, - NVME_SC_OP_DENIED = 0x15, + NVME_SC_SGL_INVALID_OFFSET = 0x16, - NVME_SC_RESERVED = 0x17, - NVME_SC_HOST_ID_INCONSIST = 0x18, - NVME_SC_KA_TIMEOUT_EXPIRED = 0x19, - NVME_SC_KA_TIMEOUT_INVALID = 0x1A, - NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B, - NVME_SC_SANITIZE_FAILED = 0x1C, - NVME_SC_SANITIZE_IN_PROGRESS = 0x1D, - NVME_SC_SGL_INVALID_GRANULARITY = 0x1E, - NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F, - NVME_SC_NS_WRITE_PROTECTED = 0x20, - NVME_SC_CMD_INTERRUPTED = 0x21, - NVME_SC_TRANSIENT_TR_ERR = 0x22, - NVME_SC_INVALID_IO_CMD_SET = 0x2C, + NVME_SC_SGL_INVALID_SUBTYPE = 0x17, NVME_SC_LBA_RANGE = 0x80, NVME_SC_CAP_EXCEEDED = 0x81, NVME_SC_NS_NOT_READY = 0x82, NVME_SC_RESERVATION_CONFLICT = 0x83, - NVME_SC_FORMAT_IN_PROGRESS = 0x84, /* * Command Specific Status: @@ -1541,24 +931,15 @@ enum { NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110, NVME_SC_FW_NEEDS_RESET = 0x111, NVME_SC_FW_NEEDS_MAX_TIME = 0x112, - NVME_SC_FW_ACTIVATE_PROHIBITED = 0x113, + NVME_SC_FW_ACIVATE_PROHIBITED = 0x113, NVME_SC_OVERLAPPING_RANGE = 0x114, - NVME_SC_NS_INSUFFICIENT_CAP = 0x115, + NVME_SC_NS_INSUFFICENT_CAP = 0x115, NVME_SC_NS_ID_UNAVAILABLE = 0x116, NVME_SC_NS_ALREADY_ATTACHED = 0x118, NVME_SC_NS_IS_PRIVATE = 0x119, NVME_SC_NS_NOT_ATTACHED = 0x11a, NVME_SC_THIN_PROV_NOT_SUPP = 0x11b, NVME_SC_CTRL_LIST_INVALID = 0x11c, - NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d, - NVME_SC_BP_WRITE_PROHIBITED = 0x11e, - NVME_SC_CTRL_ID_INVALID = 0x11f, - NVME_SC_SEC_CTRL_STATE_INVALID = 0x120, - NVME_SC_CTRL_RES_NUM_INVALID = 0x121, - NVME_SC_RES_ID_INVALID = 0x122, - NVME_SC_PMR_SAN_PROHIBITED = 0x123, - NVME_SC_ANA_GROUP_ID_INVALID = 0x124, - NVME_SC_ANA_ATTACH_FAILED = 0x125, /* * I/O Command Set Specific - NVM commands: @@ -1566,7 +947,6 @@ enum { NVME_SC_BAD_ATTRIBUTES = 0x180, NVME_SC_INVALID_PI = 0x181, NVME_SC_READ_ONLY = 0x182, - NVME_SC_ONCS_NOT_SUPPORTED = 0x183, /* * I/O Command Set Specific - Fabrics commands: @@ -1580,18 +960,6 @@ enum { NVME_SC_DISCOVERY_RESTART = 0x190, NVME_SC_AUTH_REQUIRED = 0x191, - /* - * I/O Command Set Specific - Zoned commands: - */ - NVME_SC_ZONE_BOUNDARY_ERROR = 0x1b8, - NVME_SC_ZONE_FULL = 0x1b9, - NVME_SC_ZONE_READ_ONLY = 0x1ba, - NVME_SC_ZONE_OFFLINE = 0x1bb, - NVME_SC_ZONE_INVALID_WRITE = 0x1bc, - NVME_SC_ZONE_TOO_MANY_ACTIVE = 0x1bd, - NVME_SC_ZONE_TOO_MANY_OPEN = 0x1be, - NVME_SC_ZONE_INVALID_TRANSITION = 0x1bf, - /* * Media and Data Integrity Errors: */ @@ -1604,16 +972,6 @@ enum { NVME_SC_ACCESS_DENIED = 0x286, NVME_SC_UNWRITTEN_BLOCK = 0x287, - /* - * Path-related Errors: - */ - NVME_SC_ANA_PERSISTENT_LOSS = 0x301, - NVME_SC_ANA_INACCESSIBLE = 0x302, - NVME_SC_ANA_TRANSITION = 0x303, - NVME_SC_HOST_PATH_ERROR = 0x370, - NVME_SC_HOST_ABORTED_CMD = 0x371, - - NVME_SC_CRD = 0x1800, NVME_SC_DNR = 0x4000, }; @@ -1621,11 +979,11 @@ struct nvme_completion { /* * Used by Admin and Fabrics commands to return data: */ - union nvme_result { - __le16 u16; - __le32 u32; - __le64 u64; - } result; + union { + __le16 result16; + __le32 result; + __le64 result64; + }; __le16 sq_head; /* how much of this queue may be reclaimed */ __le16 sq_id; /* submission queue that generated this entry */ __u16 command_id; /* of the command which completed */ @@ -1635,8 +993,4 @@ struct nvme_completion { #define NVME_VS(major, minor, tertiary) \ (((major) << 16) | ((minor) << 8) | (tertiary)) -#define NVME_MAJOR(ver) ((ver) >> 16) -#define NVME_MINOR(ver) (((ver) >> 8) & 0xff) -#define NVME_TERTIARY(ver) ((ver) & 0xff) - #endif /* _LINUX_NVME_H */ diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index c0c0cefc3b..c2256d7465 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -1,18 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * nvmem framework consumer. * * Copyright (C) 2015 Srinivas Kandagatla * Copyright (C) 2013 Maxime Ripard + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. */ #ifndef _LINUX_NVMEM_CONSUMER_H #define _LINUX_NVMEM_CONSUMER_H -#include -#include -#include - struct device; struct device_node; /* consumer cookie */ @@ -27,48 +26,15 @@ struct nvmem_cell_info { unsigned int nbits; }; -/** - * struct nvmem_cell_lookup - cell lookup entry - * - * @nvmem_name: Name of the provider. - * @cell_name: Name of the nvmem cell as defined in the name field of - * struct nvmem_cell_info. - * @dev_id: Name of the consumer device that will be associated with - * this cell. - * @con_id: Connector id for this cell lookup. - */ -struct nvmem_cell_lookup { - const char *nvmem_name; - const char *cell_name; - const char *dev_id; - const char *con_id; - struct list_head node; -}; - -enum { - NVMEM_ADD = 1, - NVMEM_REMOVE, - NVMEM_CELL_ADD, - NVMEM_CELL_REMOVE, -}; - #if IS_ENABLED(CONFIG_NVMEM) /* Cell based interface */ -struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id); -struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id); +struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name); +struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name); void nvmem_cell_put(struct nvmem_cell *cell); void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell); void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len); int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len); -int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val); -int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val); -int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val); -int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val); -int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, - u32 *val); -int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, - u64 *val); /* direct nvmem device read/write interface */ struct nvmem_device *nvmem_device_get(struct device *dev, const char *name); @@ -85,31 +51,18 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, int nvmem_device_cell_write(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf); -const char *nvmem_dev_name(struct nvmem_device *nvmem); - -void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, - size_t nentries); -void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, - size_t nentries); - -int nvmem_register_notifier(struct notifier_block *nb); -int nvmem_unregister_notifier(struct notifier_block *nb); - -struct nvmem_device *nvmem_device_find(void *data, - int (*match)(struct device *dev, const void *data)); - #else static inline struct nvmem_cell *nvmem_cell_get(struct device *dev, - const char *id) + const char *name) { - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOSYS); } static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, - const char *id) + const char *name) { - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOSYS); } static inline void devm_nvmem_cell_put(struct device *dev, @@ -123,57 +76,25 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell) static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) { - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOSYS); } static inline int nvmem_cell_write(struct nvmem_cell *cell, - void *buf, size_t len) + const char *buf, size_t len) { - return -EOPNOTSUPP; -} - -static inline int nvmem_cell_read_u16(struct device *dev, - const char *cell_id, u16 *val) -{ - return -EOPNOTSUPP; -} - -static inline int nvmem_cell_read_u32(struct device *dev, - const char *cell_id, u32 *val) -{ - return -EOPNOTSUPP; -} - -static inline int nvmem_cell_read_u64(struct device *dev, - const char *cell_id, u64 *val) -{ - return -EOPNOTSUPP; -} - -static inline int nvmem_cell_read_variable_le_u32(struct device *dev, - const char *cell_id, - u32 *val) -{ - return -EOPNOTSUPP; -} - -static inline int nvmem_cell_read_variable_le_u64(struct device *dev, - const char *cell_id, - u64 *val) -{ - return -EOPNOTSUPP; + return -ENOSYS; } static inline struct nvmem_device *nvmem_device_get(struct device *dev, const char *name) { - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOSYS); } static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *name) { - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOSYS); } static inline void nvmem_device_put(struct nvmem_device *nvmem) @@ -189,74 +110,47 @@ static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf) { - return -EOPNOTSUPP; + return -ENOSYS; } static inline int nvmem_device_cell_write(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf) { - return -EOPNOTSUPP; + return -ENOSYS; } static inline int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset, size_t bytes, void *buf) { - return -EOPNOTSUPP; + return -ENOSYS; } static inline int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset, size_t bytes, void *buf) { - return -EOPNOTSUPP; + return -ENOSYS; } - -static inline const char *nvmem_dev_name(struct nvmem_device *nvmem) -{ - return NULL; -} - -static inline void -nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {} -static inline void -nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {} - -static inline int nvmem_register_notifier(struct notifier_block *nb) -{ - return -EOPNOTSUPP; -} - -static inline int nvmem_unregister_notifier(struct notifier_block *nb) -{ - return -EOPNOTSUPP; -} - -static inline struct nvmem_device *nvmem_device_find(void *data, - int (*match)(struct device *dev, const void *data)) -{ - return NULL; -} - #endif /* CONFIG_NVMEM */ #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, - const char *id); + const char *name); struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *name); #else static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, - const char *id) + const char *name) { - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOSYS); } static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *name) { - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOSYS); } #endif /* CONFIG_NVMEM && CONFIG_OF */ diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 104505e902..cd93416d76 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -1,18 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * nvmem framework provider. * * Copyright (C) 2015 Srinivas Kandagatla * Copyright (C) 2013 Maxime Ripard + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. */ #ifndef _LINUX_NVMEM_PROVIDER_H #define _LINUX_NVMEM_PROVIDER_H -#include -#include -#include - struct nvmem_device; struct nvmem_cell_info; typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset, @@ -20,76 +19,15 @@ typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset, typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, void *val, size_t bytes); -enum nvmem_type { - NVMEM_TYPE_UNKNOWN = 0, - NVMEM_TYPE_EEPROM, - NVMEM_TYPE_OTP, - NVMEM_TYPE_BATTERY_BACKED, - NVMEM_TYPE_FRAM, -}; - -#define NVMEM_DEVID_NONE (-1) -#define NVMEM_DEVID_AUTO (-2) - -/** - * struct nvmem_keepout - NVMEM register keepout range. - * - * @start: The first byte offset to avoid. - * @end: One beyond the last byte offset to avoid. - * @value: The byte to fill reads with for this region. - */ -struct nvmem_keepout { - unsigned int start; - unsigned int end; - unsigned char value; -}; - -/** - * struct nvmem_config - NVMEM device configuration - * - * @dev: Parent device. - * @name: Optional name. - * @id: Optional device ID used in full name. Ignored if name is NULL. - * @owner: Pointer to exporter module. Used for refcounting. - * @cells: Optional array of pre-defined NVMEM cells. - * @ncells: Number of elements in cells. - * @keepout: Optional array of keepout ranges (sorted ascending by start). - * @nkeepout: Number of elements in the keepout array. - * @type: Type of the nvmem storage - * @read_only: Device is read-only. - * @root_only: Device is accessibly to root only. - * @of_node: If given, this will be used instead of the parent's of_node. - * @no_of_node: Device should not use the parent's of_node even if it's !NULL. - * @reg_read: Callback to read data. - * @reg_write: Callback to write data. - * @size: Device size. - * @word_size: Minimum read/write access granularity. - * @stride: Minimum read/write access stride. - * @priv: User context passed to read/write callbacks. - * @wp-gpio: Write protect pin - * - * Note: A default "nvmem" name will be assigned to the device if - * no name is specified in its configuration. In such case "" is - * generated with ida_simple_get() and provided id field is ignored. - * - * Note: Specifying name and setting id to -1 implies a unique device - * whose name is provided as-is (kept unaltered). - */ struct nvmem_config { struct device *dev; const char *name; int id; struct module *owner; - struct gpio_desc *wp_gpio; const struct nvmem_cell_info *cells; int ncells; - const struct nvmem_keepout *keepout; - unsigned int nkeepout; - enum nvmem_type type; bool read_only; bool root_only; - struct device_node *of_node; - bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; int size; @@ -101,61 +39,22 @@ struct nvmem_config { struct device *base_dev; }; -/** - * struct nvmem_cell_table - NVMEM cell definitions for given provider - * - * @nvmem_name: Provider name. - * @cells: Array of cell definitions. - * @ncells: Number of cell definitions in the array. - * @node: List node. - * - * This structure together with related helper functions is provided for users - * that don't can't access the nvmem provided structure but wish to register - * cell definitions for it e.g. board files registering an EEPROM device. - */ -struct nvmem_cell_table { - const char *nvmem_name; - const struct nvmem_cell_info *cells; - size_t ncells; - struct list_head node; -}; - #if IS_ENABLED(CONFIG_NVMEM) struct nvmem_device *nvmem_register(const struct nvmem_config *cfg); -void nvmem_unregister(struct nvmem_device *nvmem); - -struct nvmem_device *devm_nvmem_register(struct device *dev, - const struct nvmem_config *cfg); - -int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem); - -void nvmem_add_cell_table(struct nvmem_cell_table *table); -void nvmem_del_cell_table(struct nvmem_cell_table *table); +int nvmem_unregister(struct nvmem_device *nvmem); #else static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c) { - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOSYS); } -static inline void nvmem_unregister(struct nvmem_device *nvmem) {} - -static inline struct nvmem_device * -devm_nvmem_register(struct device *dev, const struct nvmem_config *c) +static inline int nvmem_unregister(struct nvmem_device *nvmem) { - return nvmem_register(c); + return -ENOSYS; } -static inline int -devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) -{ - return -EOPNOTSUPP; -} - -static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {} -static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {} - #endif /* CONFIG_NVMEM */ #endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ diff --git a/include/linux/nvram.h b/include/linux/nvram.h index d29d9c93a9..cf0ff555a6 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -1,133 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NVRAM_H #define _LINUX_NVRAM_H -#include #include -#ifdef CONFIG_PPC -#include -#endif - -/** - * struct nvram_ops - NVRAM functionality made available to drivers - * @read: validate checksum (if any) then load a range of bytes from NVRAM - * @write: store a range of bytes to NVRAM then update checksum (if any) - * @read_byte: load a single byte from NVRAM - * @write_byte: store a single byte to NVRAM - * @get_size: return the fixed number of bytes in the NVRAM - * - * Architectures which provide an nvram ops struct need not implement all - * of these methods. If the NVRAM hardware can be accessed only one byte - * at a time then it may be sufficient to provide .read_byte and .write_byte. - * If the NVRAM has a checksum (and it is to be checked) the .read and - * .write methods can be used to implement that efficiently. - * - * Portable drivers may use the wrapper functions defined here. - * The nvram_read() and nvram_write() functions call the .read and .write - * methods when available and fall back on the .read_byte and .write_byte - * methods otherwise. - */ - -struct nvram_ops { - ssize_t (*get_size)(void); - unsigned char (*read_byte)(int); - void (*write_byte)(unsigned char, int); - ssize_t (*read)(char *, size_t, loff_t *); - ssize_t (*write)(char *, size_t, loff_t *); -#if defined(CONFIG_X86) || defined(CONFIG_M68K) - long (*initialize)(void); - long (*set_checksum)(void); -#endif -}; - -extern const struct nvram_ops arch_nvram_ops; - -static inline ssize_t nvram_get_size(void) -{ -#ifdef CONFIG_PPC - if (ppc_md.nvram_size) - return ppc_md.nvram_size(); -#else - if (arch_nvram_ops.get_size) - return arch_nvram_ops.get_size(); -#endif - return -ENODEV; -} - -static inline unsigned char nvram_read_byte(int addr) -{ -#ifdef CONFIG_PPC - if (ppc_md.nvram_read_val) - return ppc_md.nvram_read_val(addr); -#else - if (arch_nvram_ops.read_byte) - return arch_nvram_ops.read_byte(addr); -#endif - return 0xFF; -} - -static inline void nvram_write_byte(unsigned char val, int addr) -{ -#ifdef CONFIG_PPC - if (ppc_md.nvram_write_val) - ppc_md.nvram_write_val(addr, val); -#else - if (arch_nvram_ops.write_byte) - arch_nvram_ops.write_byte(val, addr); -#endif -} - -static inline ssize_t nvram_read_bytes(char *buf, size_t count, loff_t *ppos) -{ - ssize_t nvram_size = nvram_get_size(); - loff_t i; - char *p = buf; - - if (nvram_size < 0) - return nvram_size; - for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count) - *p = nvram_read_byte(i); - *ppos = i; - return p - buf; -} - -static inline ssize_t nvram_write_bytes(char *buf, size_t count, loff_t *ppos) -{ - ssize_t nvram_size = nvram_get_size(); - loff_t i; - char *p = buf; - - if (nvram_size < 0) - return nvram_size; - for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count) - nvram_write_byte(*p, i); - *ppos = i; - return p - buf; -} - -static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) -{ -#ifdef CONFIG_PPC - if (ppc_md.nvram_read) - return ppc_md.nvram_read(buf, count, ppos); -#else - if (arch_nvram_ops.read) - return arch_nvram_ops.read(buf, count, ppos); -#endif - return nvram_read_bytes(buf, count, ppos); -} - -static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos) -{ -#ifdef CONFIG_PPC - if (ppc_md.nvram_write) - return ppc_md.nvram_write(buf, count, ppos); -#else - if (arch_nvram_ops.write) - return arch_nvram_ops.write(buf, count, ppos); -#endif - return nvram_write_bytes(buf, count, ppos); -} - +/* __foo is foo without grabbing the rtc_lock - get it yourself */ +extern unsigned char __nvram_read_byte(int i); +extern unsigned char nvram_read_byte(int i); +extern void __nvram_write_byte(unsigned char c, int i); +extern void nvram_write_byte(unsigned char c, int i); +extern int __nvram_check_checksum(void); +extern int nvram_check_checksum(void); #endif /* _LINUX_NVRAM_H */ diff --git a/include/linux/of.h b/include/linux/of.h index 6f1c41f109..299aeb1927 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ #ifndef _LINUX_OF_H #define _LINUX_OF_H /* @@ -10,6 +9,11 @@ * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. * Updates for SPARC64 by David S. Miller * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #include #include @@ -33,15 +37,9 @@ struct property { int length; void *value; struct property *next; -#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC) unsigned long _flags; -#endif -#if defined(CONFIG_OF_PROMTREE) unsigned int unique_id; -#endif -#if defined(CONFIG_OF_KOBJ) struct bin_attribute attr; -#endif }; #if defined(CONFIG_SPARC) @@ -50,6 +48,7 @@ struct of_irq_controller; struct device_node { const char *name; + const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; @@ -59,12 +58,11 @@ struct device_node { struct device_node *parent; struct device_node *child; struct device_node *sibling; -#if defined(CONFIG_OF_KOBJ) struct kobject kobj; -#endif unsigned long _flags; void *data; #if defined(CONFIG_SPARC) + const char *path_component_name; unsigned int unique_id; struct of_irq_controller *irq_trans; #endif @@ -102,20 +100,23 @@ struct of_reconfig_data { /* initialize a node */ extern struct kobj_type of_node_ktype; -extern const struct fwnode_operations of_fwnode_ops; static inline void of_node_init(struct device_node *node) { -#if defined(CONFIG_OF_KOBJ) kobject_init(&node->kobj, &of_node_ktype); -#endif - fwnode_init(&node->fwnode, &of_fwnode_ops); + node->fwnode.type = FWNODE_OF; } -#if defined(CONFIG_OF_KOBJ) -#define of_node_kobj(n) (&(n)->kobj) -#else -#define of_node_kobj(n) NULL -#endif +/* true when node is initialized */ +static inline int of_node_is_initialized(struct device_node *node) +{ + return node && node->kobj.state_initialized; +} + +/* true when node is attached (i.e. present on sysfs) */ +static inline int of_node_is_attached(struct device_node *node) +{ + return node && node->kobj.state_in_sysfs; +} #ifdef CONFIG_OF_DYNAMIC extern struct device_node *of_node_get(struct device_node *node); @@ -136,44 +137,27 @@ extern struct device_node *of_aliases; extern struct device_node *of_stdout; extern raw_spinlock_t devtree_lock; -/* - * struct device_node flag descriptions - * (need to be visible even when !CONFIG_OF) - */ -#define OF_DYNAMIC 1 /* (and properties) allocated via kmalloc */ -#define OF_DETACHED 2 /* detached from the device tree */ -#define OF_POPULATED 3 /* device already created */ -#define OF_POPULATED_BUS 4 /* platform bus created for children */ -#define OF_OVERLAY 5 /* allocated for an overlay */ -#define OF_OVERLAY_FREE_CSET 6 /* in overlay cset being freed */ +/* flag descriptions (need to be visible even when !CONFIG_OF) */ +#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ +#define OF_DETACHED 2 /* node has been detached from the device tree */ +#define OF_POPULATED 3 /* device already created for the node */ +#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */ #define OF_BAD_ADDR ((u64)-1) #ifdef CONFIG_OF void of_core_init(void); -static inline bool is_of_node(const struct fwnode_handle *fwnode) +static inline bool is_of_node(struct fwnode_handle *fwnode) { - return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &of_fwnode_ops; + return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF; } -#define to_of_node(__fwnode) \ - ({ \ - typeof(__fwnode) __to_of_node_fwnode = (__fwnode); \ - \ - is_of_node(__to_of_node_fwnode) ? \ - container_of(__to_of_node_fwnode, \ - struct device_node, fwnode) : \ - NULL; \ - }) - -#define of_fwnode_handle(node) \ - ({ \ - typeof(node) __of_fwnode_handle_node = (node); \ - \ - __of_fwnode_handle_node ? \ - &__of_fwnode_handle_node->fwnode : NULL; \ - }) +static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) +{ + return is_of_node(fwnode) ? + container_of(fwnode, struct device_node, fwnode) : NULL; +} static inline bool of_have_populated_dt(void) { @@ -206,7 +190,6 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) clear_bit(flag, &n->_flags); } -#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC) static inline int of_property_check_flag(struct property *p, unsigned long flag) { return test_bit(flag, &p->_flags); @@ -221,7 +204,6 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag { clear_bit(flag, &p->_flags); } -#endif extern struct device_node *__of_find_all_nodes(struct device_node *prev); extern struct device_node *of_find_all_nodes(struct device_node *prev); @@ -234,8 +216,8 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev); static inline u64 of_read_number(const __be32 *cell, int size) { u64 r = 0; - for (; size--; cell++) - r = (r << 32) | be32_to_cpu(*cell); + while (size--) + r = (r << 32) | be32_to_cpu(*(cell++)); return r; } @@ -250,12 +232,15 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) #include #endif +/* Default #address and #size cells. Allow arch asm/prom.h to override */ +#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT) +#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1 +#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 +#endif + #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) -extern bool of_node_name_eq(const struct device_node *np, const char *name); -extern bool of_node_name_prefix(const struct device_node *np, const char *prefix); - static inline const char *of_node_full_name(const struct device_node *np) { return np ? np->full_name : ""; @@ -290,14 +275,11 @@ extern struct device_node *of_get_next_child(const struct device_node *node, extern struct device_node *of_get_next_available_child( const struct device_node *node, struct device_node *prev); -extern struct device_node *of_get_compatible_child(const struct device_node *parent, - const char *compatible); extern struct device_node *of_get_child_by_name(const struct device_node *node, const char *name); /* cache lookup */ extern struct device_node *of_find_next_cache_node(const struct device_node *); -extern int of_find_last_cache_level(unsigned int cpu); extern struct device_node *of_find_node_with_property( struct device_node *from, const char *prop_name); @@ -309,9 +291,6 @@ extern int of_property_count_elems_of_size(const struct device_node *np, extern int of_property_read_u32_index(const struct device_node *np, const char *propname, u32 index, u32 *out_value); -extern int of_property_read_u64_index(const struct device_node *np, - const char *propname, - u32 index, u64 *out_value); extern int of_property_read_variable_u8_array(const struct device_node *np, const char *propname, u8 *out_values, size_t sz_min, size_t sz_max); @@ -350,10 +329,6 @@ extern const void *of_get_property(const struct device_node *node, const char *name, int *lenp); extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); -extern struct device_node *of_get_next_cpu_node(struct device_node *prev); -extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node, - int index); - #define for_each_property_of_node(dn, pp) \ for (pp = dn->properties; pp != NULL; pp = pp->next) @@ -369,9 +344,6 @@ extern struct device_node *of_parse_phandle(const struct device_node *np, extern int of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int index, struct of_phandle_args *out_args); -extern int of_parse_phandle_with_args_map(const struct device_node *np, - const char *list_name, const char *stem_name, int index, - struct of_phandle_args *out_args); extern int of_parse_phandle_with_fixed_args(const struct device_node *np, const char *list_name, int cells_count, int index, struct of_phandle_args *out_args); @@ -393,9 +365,6 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it, extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); extern int of_alias_get_id(struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); -extern int of_alias_get_alias_list(const struct of_device_id *matches, - const char *stem, unsigned long *bitmap, - unsigned int nbits); extern int of_machine_is_compatible(const char *compat); @@ -424,15 +393,13 @@ extern int of_detach_node(struct device_node *); * @sz: number of array elements to read * * Search for a property in a device node and read 8-bit value(s) from - * it. - * - * dts entry of array should be like: - * ``property = /bits/ 8 <0x50 0x60 0x70>;`` - * - * Return: 0 on success, -EINVAL if the property does not exist, + * it. Returns 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * + * dts entry of array should be like: + * property = /bits/ 8 <0x50 0x60 0x70>; + * * The out_values is modified only if a valid u8 value can be decoded. */ static inline int of_property_read_u8_array(const struct device_node *np, @@ -456,15 +423,13 @@ static inline int of_property_read_u8_array(const struct device_node *np, * @sz: number of array elements to read * * Search for a property in a device node and read 16-bit value(s) from - * it. - * - * dts entry of array should be like: - * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;`` - * - * Return: 0 on success, -EINVAL if the property does not exist, + * it. Returns 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * + * dts entry of array should be like: + * property = /bits/ 16 <0x5000 0x6000 0x7000>; + * * The out_values is modified only if a valid u16 value can be decoded. */ static inline int of_property_read_u16_array(const struct device_node *np, @@ -489,9 +454,7 @@ static inline int of_property_read_u16_array(const struct device_node *np, * @sz: number of array elements to read * * Search for a property in a device node and read 32-bit value(s) from - * it. - * - * Return: 0 on success, -EINVAL if the property does not exist, + * it. Returns 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * @@ -519,9 +482,7 @@ static inline int of_property_read_u32_array(const struct device_node *np, * @sz: number of array elements to read * * Search for a property in a device node and read 64-bit value(s) from - * it. - * - * Return: 0 on success, -EINVAL if the property does not exist, + * it. Returns 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * @@ -560,47 +521,22 @@ const char *of_prop_next_string(struct property *prop, const char *cur); bool of_console_check(struct device_node *dn, char *name, int index); -extern int of_cpu_node_to_id(struct device_node *np); - -int of_map_id(struct device_node *np, u32 id, - const char *map_name, const char *map_mask_name, - struct device_node **target, u32 *id_out); - -phys_addr_t of_dma_get_max_cpu_address(struct device_node *np); - -struct kimage; -void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, - unsigned long initrd_load_addr, - unsigned long initrd_len, - const char *cmdline, size_t extra_fdt_size); -int ima_get_kexec_buffer(void **addr, size_t *size); -int ima_free_kexec_buffer(void); #else /* CONFIG_OF */ static inline void of_core_init(void) { } -static inline bool is_of_node(const struct fwnode_handle *fwnode) +static inline bool is_of_node(struct fwnode_handle *fwnode) { return false; } -static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode) +static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) { return NULL; } -static inline bool of_node_name_eq(const struct device_node *np, const char *name) -{ - return false; -} - -static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix) -{ - return false; -} - static inline const char* of_node_full_name(const struct device_node *np) { return ""; @@ -647,11 +583,6 @@ static inline struct device_node *of_get_parent(const struct device_node *node) return NULL; } -static inline struct device_node *of_get_next_parent(struct device_node *node) -{ - return NULL; -} - static inline struct device_node *of_get_next_child( const struct device_node *node, struct device_node *prev) { @@ -670,19 +601,11 @@ static inline struct device_node *of_find_node_with_property( return NULL; } -#define of_fwnode_handle(node) NULL - static inline bool of_have_populated_dt(void) { return false; } -static inline struct device_node *of_get_compatible_child(const struct device_node *parent, - const char *compatible) -{ - return NULL; -} - static inline struct device_node *of_get_child_by_name( const struct device_node *node, const char *name) @@ -696,12 +619,6 @@ static inline int of_device_is_compatible(const struct device_node *device, return 0; } -static inline int of_device_compatible_match(struct device_node *device, - const char *const *compat) -{ - return 0; -} - static inline bool of_device_is_available(const struct device_node *device) { return false; @@ -733,6 +650,12 @@ static inline int of_property_count_elems_of_size(const struct device_node *np, return -ENOSYS; } +static inline int of_property_read_u32_index(const struct device_node *np, + const char *propname, u32 index, u32 *out_value) +{ + return -ENOSYS; +} + static inline int of_property_read_u8_array(const struct device_node *np, const char *propname, u8 *out_values, size_t sz) { @@ -759,14 +682,16 @@ static inline int of_property_read_u64_array(const struct device_node *np, return -ENOSYS; } -static inline int of_property_read_u32_index(const struct device_node *np, - const char *propname, u32 index, u32 *out_value) +static inline int of_property_read_string(const struct device_node *np, + const char *propname, + const char **out_string) { return -ENOSYS; } -static inline int of_property_read_u64_index(const struct device_node *np, - const char *propname, u32 index, u64 *out_value) +static inline int of_property_read_string_helper(const struct device_node *np, + const char *propname, + const char **out_strs, size_t sz, int index) { return -ENOSYS; } @@ -784,72 +709,12 @@ static inline struct device_node *of_get_cpu_node(int cpu, return NULL; } -static inline struct device_node *of_get_next_cpu_node(struct device_node *prev) -{ - return NULL; -} - -static inline struct device_node *of_get_cpu_state_node(struct device_node *cpu_node, - int index) -{ - return NULL; -} - -static inline int of_n_addr_cells(struct device_node *np) -{ - return 0; - -} -static inline int of_n_size_cells(struct device_node *np) -{ - return 0; -} - -static inline int of_property_read_variable_u8_array(const struct device_node *np, - const char *propname, u8 *out_values, - size_t sz_min, size_t sz_max) -{ - return -ENOSYS; -} - -static inline int of_property_read_variable_u16_array(const struct device_node *np, - const char *propname, u16 *out_values, - size_t sz_min, size_t sz_max) -{ - return -ENOSYS; -} - -static inline int of_property_read_variable_u32_array(const struct device_node *np, - const char *propname, - u32 *out_values, - size_t sz_min, - size_t sz_max) -{ - return -ENOSYS; -} - static inline int of_property_read_u64(const struct device_node *np, const char *propname, u64 *out_value) { return -ENOSYS; } -static inline int of_property_read_variable_u64_array(const struct device_node *np, - const char *propname, - u64 *out_values, - size_t sz_min, - size_t sz_max) -{ - return -ENOSYS; -} - -static inline int of_property_read_string(const struct device_node *np, - const char *propname, - const char **out_string) -{ - return -ENOSYS; -} - static inline int of_property_match_string(const struct device_node *np, const char *propname, const char *string) @@ -857,13 +722,6 @@ static inline int of_property_match_string(const struct device_node *np, return -ENOSYS; } -static inline int of_property_read_string_helper(const struct device_node *np, - const char *propname, - const char **out_strs, size_t sz, int index) -{ - return -ENOSYS; -} - static inline struct device_node *of_parse_phandle(const struct device_node *np, const char *phandle_name, int index) @@ -880,15 +738,6 @@ static inline int of_parse_phandle_with_args(const struct device_node *np, return -ENOSYS; } -static inline int of_parse_phandle_with_args_map(const struct device_node *np, - const char *list_name, - const char *stem_name, - int index, - struct of_phandle_args *out_args) -{ - return -ENOSYS; -} - static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, const char *list_name, int cells_count, int index, struct of_phandle_args *out_args) @@ -896,7 +745,7 @@ static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, return -ENOSYS; } -static inline int of_count_phandle_with_args(const struct device_node *np, +static inline int of_count_phandle_with_args(struct device_node *np, const char *list_name, const char *cells_name) { @@ -934,28 +783,11 @@ static inline int of_alias_get_highest_id(const char *stem) return -ENOSYS; } -static inline int of_alias_get_alias_list(const struct of_device_id *matches, - const char *stem, unsigned long *bitmap, - unsigned int nbits) -{ - return -ENOSYS; -} - static inline int of_machine_is_compatible(const char *compat) { return 0; } -static inline int of_add_property(struct device_node *np, struct property *prop) -{ - return 0; -} - -static inline int of_remove_property(struct device_node *np, struct property *prop) -{ - return 0; -} - static inline bool of_console_check(const struct device_node *dn, const char *name, int index) { return false; @@ -1005,23 +837,6 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag { } -static inline int of_cpu_node_to_id(struct device_node *np) -{ - return -ENODEV; -} - -static inline int of_map_id(struct device_node *np, u32 id, - const char *map_name, const char *map_mask_name, - struct device_node **target, u32 *id_out) -{ - return -EINVAL; -} - -static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np) -{ - return PHYS_ADDR_MAX; -} - #define of_match_ptr(_ptr) NULL #define of_match_node(_matches, _node) NULL #endif /* CONFIG_OF */ @@ -1033,12 +848,6 @@ static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np) #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) #endif -static inline int of_prop_val_eq(struct property *p1, struct property *p2) -{ - return p1->length == p2->length && - !memcmp(p1->value, p2->value, (size_t)p1->length); -} - #if defined(CONFIG_OF) && defined(CONFIG_NUMA) extern int of_node_to_nid(struct device_node *np); #else @@ -1064,18 +873,6 @@ static inline struct device_node *of_find_matching_node( return of_find_matching_node_and_match(from, matches, NULL); } -static inline const char *of_node_get_device_type(const struct device_node *np) -{ - return of_get_property(np, "device_type", NULL); -} - -static inline bool of_node_is_type(const struct device_node *np, const char *type) -{ - const char *match = of_node_get_device_type(np); - - return np && match && type && !strcmp(match, type); -} - /** * of_property_count_u8_elems - Count the number of u8 elements in a property * @@ -1083,9 +880,7 @@ static inline bool of_node_is_type(const struct device_node *np, const char *typ * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u8 elements - * in it. - * - * Return: The number of elements on sucess, -EINVAL if the property does + * in it. Returns number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u8 and -ENODATA if the * property does not have a value. */ @@ -1102,9 +897,7 @@ static inline int of_property_count_u8_elems(const struct device_node *np, * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u16 elements - * in it. - * - * Return: The number of elements on sucess, -EINVAL if the property does + * in it. Returns number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u16 and -ENODATA if the * property does not have a value. */ @@ -1121,9 +914,7 @@ static inline int of_property_count_u16_elems(const struct device_node *np, * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u32 elements - * in it. - * - * Return: The number of elements on sucess, -EINVAL if the property does + * in it. Returns number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u32 and -ENODATA if the * property does not have a value. */ @@ -1140,9 +931,7 @@ static inline int of_property_count_u32_elems(const struct device_node *np, * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u64 elements - * in it. - * - * Return: The number of elements on sucess, -EINVAL if the property does + * in it. Returns number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u64 and -ENODATA if the * property does not have a value. */ @@ -1163,7 +952,7 @@ static inline int of_property_count_u64_elems(const struct device_node *np, * Search for a property in a device tree node and retrieve a list of * terminated string values (pointer to data, not a copy) in that property. * - * Return: If @out_strs is NULL, the number of strings in the property is returned. + * If @out_strs is NULL, the number of strings in the property is returned. */ static inline int of_property_read_string_array(const struct device_node *np, const char *propname, const char **out_strs, @@ -1179,11 +968,10 @@ static inline int of_property_read_string_array(const struct device_node *np, * @propname: name of the property to be searched. * * Search for a property in a device tree node and retrieve the number of null - * terminated string contain in it. - * - * Return: The number of strings on success, -EINVAL if the property does not - * exist, -ENODATA if property does not have a value, and -EILSEQ if the string - * is not null-terminated within the length of the property data. + * terminated string contain in it. Returns the number of strings on + * success, -EINVAL if the property does not exist, -ENODATA if property + * does not have a value, and -EILSEQ if the string is not null-terminated + * within the length of the property data. */ static inline int of_property_count_strings(const struct device_node *np, const char *propname) @@ -1197,14 +985,13 @@ static inline int of_property_count_strings(const struct device_node *np, * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @index: index of the string in the list of strings - * @output: pointer to null terminated return string, modified only if + * @out_string: pointer to null terminated return string, modified only if * return value is 0. * * Search for a property in a device tree node and retrieve a null * terminated string value (pointer to data, not a copy) in the list of strings * contained in that property. - * - * Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if + * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if * property does not have a value, and -EILSEQ if the string is not * null-terminated within the length of the property data. * @@ -1219,13 +1006,12 @@ static inline int of_property_read_string_index(const struct device_node *np, } /** - * of_property_read_bool - Find a property + * of_property_read_bool - Findfrom a property * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device node. - * - * Return: true if the property exists false otherwise. + * Returns true if the property exists false otherwise. */ static inline bool of_property_read_bool(const struct device_node *np, const char *propname) @@ -1304,10 +1090,6 @@ static inline int of_property_read_s32(const struct device_node *np, for (child = of_get_next_available_child(parent, NULL); child != NULL; \ child = of_get_next_available_child(parent, child)) -#define for_each_of_cpu_node(cpu) \ - for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \ - cpu = of_get_next_cpu_node(cpu)) - #define for_each_node_with_property(dn, prop_name) \ for (dn = of_find_node_with_property(NULL, prop_name); dn; \ dn = of_find_node_with_property(dn, prop_name)) @@ -1334,22 +1116,18 @@ static inline int of_get_available_child_count(const struct device_node *np) return num; } -#define _OF_DECLARE_STUB(table, name, compat, fn, fn_type) \ - static const struct of_device_id __of_table_##name \ - __attribute__((unused)) \ - = { .compatible = compat, \ - .data = (fn == (fn_type)NULL) ? fn : fn } - #if defined(CONFIG_OF) && !defined(MODULE) #define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ - __used __section("__" #table "_of_table") \ - __aligned(__alignof__(struct of_device_id)) \ + __used __section(__##table##_of_table) \ = { .compatible = compat, \ .data = (fn == (fn_type)NULL) ? fn : fn } #else #define _OF_DECLARE(table, name, compat, fn, fn_type) \ - _OF_DECLARE_STUB(table, name, compat, fn, fn_type) + static const struct of_device_id __of_table_##name \ + __attribute__((unused)) \ + = { .compatible = compat, \ + .data = (fn == (fn_type)NULL) ? fn : fn } #endif typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); @@ -1470,71 +1248,48 @@ static inline int of_reconfig_get_state_change(unsigned long action, } #endif /* CONFIG_OF_DYNAMIC */ +/* CONFIG_OF_RESOLVE api */ +extern int of_resolve_phandles(struct device_node *tree); + /** * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node * @np: Pointer to the given device_node * - * Return: true if present false otherwise + * return true if present false otherwise */ static inline bool of_device_is_system_power_controller(const struct device_node *np) { return of_property_read_bool(np, "system-power-controller"); } -/* +/** * Overlay support */ -enum of_overlay_notify_action { - OF_OVERLAY_PRE_APPLY = 0, - OF_OVERLAY_POST_APPLY, - OF_OVERLAY_PRE_REMOVE, - OF_OVERLAY_POST_REMOVE, -}; - -struct of_overlay_notify_data { - struct device_node *overlay; - struct device_node *target; -}; - #ifdef CONFIG_OF_OVERLAY -int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, - int *ovcs_id); -int of_overlay_remove(int *ovcs_id); -int of_overlay_remove_all(void); - -int of_overlay_notifier_register(struct notifier_block *nb); -int of_overlay_notifier_unregister(struct notifier_block *nb); +/* ID based overlays; the API for external users */ +int of_overlay_create(struct device_node *tree); +int of_overlay_destroy(int id); +int of_overlay_destroy_all(void); #else -static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size, - int *ovcs_id) +static inline int of_overlay_create(struct device_node *tree) { return -ENOTSUPP; } -static inline int of_overlay_remove(int *ovcs_id) +static inline int of_overlay_destroy(int id) { return -ENOTSUPP; } -static inline int of_overlay_remove_all(void) +static inline int of_overlay_destroy_all(void) { return -ENOTSUPP; } -static inline int of_overlay_notifier_register(struct notifier_block *nb) -{ - return 0; -} - -static inline int of_overlay_notifier_unregister(struct notifier_block *nb) -{ - return 0; -} - #endif #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 45598dbec2..37864734ca 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __OF_ADDRESS_H #define __OF_ADDRESS_H #include @@ -6,34 +5,24 @@ #include #include -struct of_bus; - struct of_pci_range_parser { struct device_node *node; - struct of_bus *bus; const __be32 *range; const __be32 *end; - int na; - int ns; + int np; int pna; - bool dma; }; -#define of_range_parser of_pci_range_parser struct of_pci_range { - union { - u64 pci_addr; - u64 bus_addr; - }; + u32 pci_space; + u64 pci_addr; u64 cpu_addr; u64 size; u32 flags; }; -#define of_range of_pci_range #define for_each_of_pci_range(parser, range) \ for (; of_pci_range_parser_one(parser, range);) -#define for_each_of_range for_each_of_pci_range /* Translate a DMA address from device space to CPU space */ extern u64 of_translate_dma_address(struct device_node *dev, @@ -43,6 +32,10 @@ extern u64 of_translate_dma_address(struct device_node *dev, extern u64 of_translate_address(struct device_node *np, const __be32 *addr); extern int of_address_to_resource(struct device_node *dev, int index, struct resource *r); +extern struct device_node *of_find_matching_node_by_address( + struct device_node *from, + const struct of_device_id *matches, + u64 base_address); extern void __iomem *of_iomap(struct device_node *device, int index); void __iomem *of_io_request_and_map(struct device_node *device, int index, const char *name); @@ -51,21 +44,16 @@ void __iomem *of_io_request_and_map(struct device_node *device, * the address space flags too. The PCI version uses a BAR number * instead of an absolute index */ -extern const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no, - u64 *size, unsigned int *flags); +extern const __be32 *of_get_address(struct device_node *dev, int index, + u64 *size, unsigned int *flags); extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node); -extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser, - struct device_node *node); extern struct of_pci_range *of_pci_range_parser_one( struct of_pci_range_parser *parser, struct of_pci_range *range); -extern int of_pci_address_to_resource(struct device_node *dev, int bar, - struct resource *r); -extern int of_pci_range_to_resource(struct of_pci_range *range, - struct device_node *np, - struct resource *res); +extern int of_dma_get_range(struct device_node *np, u64 *dma_addr, + u64 *paddr, u64 *size); extern bool of_dma_is_coherent(struct device_node *np); #else /* CONFIG_OF_ADDRESS */ static inline void __iomem *of_io_request_and_map(struct device_node *device, @@ -80,8 +68,16 @@ static inline u64 of_translate_address(struct device_node *np, return OF_BAD_ADDR; } -static inline const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no, - u64 *size, unsigned int *flags) +static inline struct device_node *of_find_matching_node_by_address( + struct device_node *from, + const struct of_device_id *matches, + u64 base_address) +{ + return NULL; +} + +static inline const __be32 *of_get_address(struct device_node *dev, int index, + u64 *size, unsigned int *flags) { return NULL; } @@ -89,13 +85,7 @@ static inline const __be32 *__of_get_address(struct device_node *dev, int index, static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node) { - return -ENOSYS; -} - -static inline int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser, - struct device_node *node) -{ - return -ENOSYS; + return -1; } static inline struct of_pci_range *of_pci_range_parser_one( @@ -105,17 +95,10 @@ static inline struct of_pci_range *of_pci_range_parser_one( return NULL; } -static inline int of_pci_address_to_resource(struct device_node *dev, int bar, - struct resource *r) +static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr, + u64 *paddr, u64 *size) { - return -ENOSYS; -} - -static inline int of_pci_range_to_resource(struct of_pci_range *range, - struct device_node *np, - struct resource *res) -{ - return -ENOSYS; + return -ENODEV; } static inline bool of_dma_is_coherent(struct device_node *np) @@ -140,18 +123,34 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) return NULL; } #endif -#define of_range_parser_init of_pci_range_parser_init -static inline const __be32 *of_get_address(struct device_node *dev, int index, - u64 *size, unsigned int *flags) +#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) +extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, + u64 *size, unsigned int *flags); +extern int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r); +extern int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res); +#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */ +static inline int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r) { - return __of_get_address(dev, index, -1, size, flags); + return -ENOSYS; } -static inline const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, - u64 *size, unsigned int *flags) +static inline const __be32 *of_get_pci_address(struct device_node *dev, + int bar_no, u64 *size, unsigned int *flags) { - return __of_get_address(dev, -1, bar_no, size, flags); + return NULL; } +static inline int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res) +{ + return -ENOSYS; +} +#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ #endif /* __OF_ADDRESS_H */ + diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 1d7992a02e..cc7dd687a8 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_OF_DEVICE_H #define _LINUX_OF_DEVICE_H @@ -14,6 +13,7 @@ struct device; #ifdef CONFIG_OF extern const struct of_device_id *of_match_device( const struct of_device_id *matches, const struct device *dev); +extern void of_device_make_bus_id(struct device *dev); /** * of_driver_match_device - Tell if a driver's of_match_table matches a device. @@ -26,36 +26,36 @@ static inline int of_driver_match_device(struct device *dev, return of_match_device(drv->of_match_table, dev) != NULL; } +extern struct platform_device *of_dev_get(struct platform_device *dev); +extern void of_dev_put(struct platform_device *dev); + extern int of_device_add(struct platform_device *pdev); extern int of_device_register(struct platform_device *ofdev); extern void of_device_unregister(struct platform_device *ofdev); extern const void *of_device_get_match_data(const struct device *dev); -extern ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len); -extern int of_device_request_module(struct device *dev); +extern ssize_t of_device_get_modalias(struct device *dev, + char *str, ssize_t len); extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env); extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env); +static inline void of_device_node_put(struct device *dev) +{ + of_node_put(dev->of_node); +} + static inline struct device_node *of_cpu_device_node_get(int cpu) { struct device *cpu_dev; cpu_dev = get_cpu_device(cpu); if (!cpu_dev) - return of_get_cpu_node(cpu, NULL); + return NULL; return of_node_get(cpu_dev->of_node); } -int of_dma_configure_id(struct device *dev, - struct device_node *np, - bool force_dma, const u32 *id); -static inline int of_dma_configure(struct device *dev, - struct device_node *np, - bool force_dma) -{ - return of_dma_configure_id(dev, np, force_dma, NULL); -} +void of_dma_configure(struct device *dev, struct device_node *np); #else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, @@ -72,13 +72,8 @@ static inline const void *of_device_get_match_data(const struct device *dev) return NULL; } -static inline int of_device_modalias(struct device *dev, - char *str, ssize_t len) -{ - return -ENODEV; -} - -static inline int of_device_request_module(struct device *dev) +static inline int of_device_get_modalias(struct device *dev, + char *str, ssize_t len) { return -ENODEV; } @@ -89,29 +84,22 @@ static inline int of_device_uevent_modalias(struct device *dev, return -ENODEV; } -static inline const struct of_device_id *of_match_device( +static inline void of_device_node_put(struct device *dev) { } + +static inline const struct of_device_id *__of_match_device( const struct of_device_id *matches, const struct device *dev) { return NULL; } +#define of_match_device(matches, dev) \ + __of_match_device(of_match_ptr(matches), (dev)) static inline struct device_node *of_cpu_device_node_get(int cpu) { return NULL; } - -static inline int of_dma_configure_id(struct device *dev, - struct device_node *np, - bool force_dma) -{ - return 0; -} -static inline int of_dma_configure(struct device *dev, - struct device_node *np, - bool force_dma) -{ - return 0; -} +static inline void of_dma_configure(struct device *dev, struct device_node *np) +{} #endif /* CONFIG_OF */ #endif /* _LINUX_OF_DEVICE_H */ diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index fd706cdf25..b90d8ec57c 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * OF helpers for DMA request / controller * * Based on of_gpio.h * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_OF_DMA_H diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index cf6a65b94d..4341f32516 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for working with the Flattened Device Tree data format * * Copyright 2009 Benjamin Herrenschmidt, IBM Corp * benh@kernel.crashing.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. */ #ifndef _LINUX_OF_FDT_H @@ -23,6 +26,18 @@ struct device_node; /* For scanning an arbitrary device-tree at any time */ +extern char *of_fdt_get_string(const void *blob, u32 offset); +extern void *of_fdt_get_property(const void *blob, + unsigned long node, + const char *name, + int *size); +extern int of_fdt_is_compatible(const void *blob, + unsigned long node, + const char *compat); +extern bool of_fdt_is_big_endian(const void *blob, + unsigned long node); +extern int of_fdt_match(const void *blob, unsigned long node, + const char *const *compat); extern void *of_fdt_unflatten_tree(const unsigned long *blob, struct device_node *dad, struct device_node **mynodes); @@ -35,28 +50,18 @@ extern void *initial_boot_params; extern char __dtb_start[]; extern char __dtb_end[]; -/* Other Prototypes */ -extern u64 of_flat_dt_translate_address(unsigned long node); -extern void of_fdt_limit_memory(int limit); -#endif /* CONFIG_OF_FLATTREE */ - -#ifdef CONFIG_OF_EARLY_FLATTREE /* For scanning the flat device-tree at boot time */ extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, int depth, void *data), void *data); -extern int of_scan_flat_dt_subnodes(unsigned long node, - int (*it)(unsigned long node, - const char *uname, - void *data), - void *data); extern int of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname); extern const void *of_get_flat_dt_prop(unsigned long node, const char *name, int *size); extern int of_flat_dt_is_compatible(unsigned long node, const char *name); +extern int of_flat_dt_match(unsigned long node, const char *const *matches); extern unsigned long of_get_flat_dt_root(void); -extern uint32_t of_get_flat_dt_phandle(unsigned long node); +extern int of_get_flat_dt_size(void); extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, int depth, void *data); @@ -65,8 +70,10 @@ extern int early_init_dt_scan_memory(unsigned long node, const char *uname, extern int early_init_dt_scan_chosen_stdout(void); extern void early_init_fdt_scan_reserved_mem(void); extern void early_init_fdt_reserve_self(void); -extern void __init early_init_dt_scan_chosen_arch(unsigned long node); extern void early_init_dt_add_memory_arch(u64 base, u64 size); +extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, + bool no_map); +extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align); extern u64 dt_mem_next_cell(int s, const __be32 **cellp); /* Early flat tree scan hooks */ @@ -86,14 +93,16 @@ extern void unflatten_device_tree(void); extern void unflatten_and_copy_device_tree(void); extern void early_init_devtree(void *); extern void early_get_first_memblock_info(void *, phys_addr_t *); -#else /* CONFIG_OF_EARLY_FLATTREE */ +extern u64 of_flat_dt_translate_address(unsigned long node); +extern void of_fdt_limit_memory(int limit); +#else /* CONFIG_OF_FLATTREE */ static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; } static inline void early_init_fdt_scan_reserved_mem(void) {} static inline void early_init_fdt_reserve_self(void) {} static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } static inline void unflatten_device_tree(void) {} static inline void unflatten_and_copy_device_tree(void) {} -#endif /* CONFIG_OF_EARLY_FLATTREE */ +#endif /* CONFIG_OF_FLATTREE */ #endif /* __ASSEMBLY__ */ #endif /* _LINUX_OF_FDT_H */ diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 8bf2ea8596..3f87ea5b8b 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -1,18 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * OF helpers for the GPIO API * * Copyright (c) 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __LINUX_OF_GPIO_H #define __LINUX_OF_GPIO_H #include -#include -#include /* FIXME: Shouldn't be here */ +#include +#include +#include #include struct device_node; @@ -25,16 +30,10 @@ struct device_node; enum of_gpio_flags { OF_GPIO_ACTIVE_LOW = 0x1, OF_GPIO_SINGLE_ENDED = 0x2, - OF_GPIO_OPEN_DRAIN = 0x4, - OF_GPIO_TRANSITORY = 0x8, - OF_GPIO_PULL_UP = 0x10, - OF_GPIO_PULL_DOWN = 0x20, }; #ifdef CONFIG_OF_GPIO -#include - /* * OF GPIO chip for memory mapped banks */ @@ -49,7 +48,7 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) return container_of(gc, struct of_mm_gpio_chip, gc); } -extern int of_get_named_gpio_flags(const struct device_node *np, +extern int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags); extern int of_mm_gpiochip_add_data(struct device_node *np, @@ -62,12 +61,14 @@ static inline int of_mm_gpiochip_add(struct device_node *np, } extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); +extern int of_gpio_simple_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags); + #else /* CONFIG_OF_GPIO */ -#include - /* Drivers may not strictly depend on the GPIO support, so let them link. */ -static inline int of_get_named_gpio_flags(const struct device_node *np, +static inline int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags) { if (flags) @@ -76,6 +77,13 @@ static inline int of_get_named_gpio_flags(const struct device_node *np, return -ENOSYS; } +static inline int of_gpio_simple_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ + return -ENOSYS; +} + #endif /* CONFIG_OF_GPIO */ /** @@ -98,8 +106,7 @@ static inline int of_get_named_gpio_flags(const struct device_node *np, * The above example defines four GPIOs, two of which are not specified. * This function will return '4' */ -static inline int of_gpio_named_count(const struct device_node *np, - const char *propname) +static inline int of_gpio_named_count(struct device_node *np, const char* propname) { return of_count_phandle_with_args(np, propname, "#gpio-cells"); } @@ -110,12 +117,12 @@ static inline int of_gpio_named_count(const struct device_node *np, * * Same as of_gpio_named_count, but hard coded to use the 'gpios' property */ -static inline int of_gpio_count(const struct device_node *np) +static inline int of_gpio_count(struct device_node *np) { return of_gpio_named_count(np, "gpios"); } -static inline int of_get_gpio_flags(const struct device_node *np, int index, +static inline int of_get_gpio_flags(struct device_node *np, int index, enum of_gpio_flags *flags) { return of_get_named_gpio_flags(np, "gpios", index, flags); @@ -130,7 +137,7 @@ static inline int of_get_gpio_flags(const struct device_node *np, int index, * Returns GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. */ -static inline int of_get_named_gpio(const struct device_node *np, +static inline int of_get_named_gpio(struct device_node *np, const char *propname, int index) { return of_get_named_gpio_flags(np, propname, index, NULL); @@ -144,7 +151,7 @@ static inline int of_get_named_gpio(const struct device_node *np, * Returns GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. */ -static inline int of_get_gpio(const struct device_node *np, int index) +static inline int of_get_gpio(struct device_node *np, int index) { return of_get_gpio_flags(np, index, NULL); } diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index 4d7756087b..bb3a5a2cd5 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * OF graph binding parsing helpers * @@ -7,6 +6,10 @@ * * Copyright (C) 2012 Renesas Electronics Corp. * Author: Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. */ #ifndef __LINUX_OF_GRAPH_H #define __LINUX_OF_GRAPH_H @@ -38,41 +41,24 @@ struct of_endpoint { child = of_graph_get_next_endpoint(parent, child)) #ifdef CONFIG_OF -bool of_graph_is_present(const struct device_node *node); int of_graph_parse_endpoint(const struct device_node *node, struct of_endpoint *endpoint); -int of_graph_get_endpoint_count(const struct device_node *np); struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id); struct device_node *of_graph_get_next_endpoint(const struct device_node *parent, struct device_node *previous); struct device_node *of_graph_get_endpoint_by_regs( const struct device_node *parent, int port_reg, int reg); -struct device_node *of_graph_get_remote_endpoint( - const struct device_node *node); -struct device_node *of_graph_get_port_parent(struct device_node *node); struct device_node *of_graph_get_remote_port_parent( const struct device_node *node); struct device_node *of_graph_get_remote_port(const struct device_node *node); -struct device_node *of_graph_get_remote_node(const struct device_node *node, - u32 port, u32 endpoint); #else -static inline bool of_graph_is_present(const struct device_node *node) -{ - return false; -} - static inline int of_graph_parse_endpoint(const struct device_node *node, struct of_endpoint *endpoint) { return -ENOSYS; } -static inline int of_graph_get_endpoint_count(const struct device_node *np) -{ - return 0; -} - static inline struct device_node *of_graph_get_port_by_id( struct device_node *node, u32 id) { @@ -92,18 +78,6 @@ static inline struct device_node *of_graph_get_endpoint_by_regs( return NULL; } -static inline struct device_node *of_graph_get_remote_endpoint( - const struct device_node *node) -{ - return NULL; -} - -static inline struct device_node *of_graph_get_port_parent( - struct device_node *node) -{ - return NULL; -} - static inline struct device_node *of_graph_get_remote_port_parent( const struct device_node *node) { @@ -115,12 +89,6 @@ static inline struct device_node *of_graph_get_remote_port( { return NULL; } -static inline struct device_node *of_graph_get_remote_node( - const struct device_node *node, - u32 port, u32 endpoint) -{ - return NULL; -} #endif /* CONFIG_OF */ diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index 55c1eb300a..e80b9c762a 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -1,26 +1,44 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __OF_IOMMU_H #define __OF_IOMMU_H -struct device; -struct device_node; -struct iommu_ops; +#include +#include +#include #ifdef CONFIG_OF_IOMMU +extern int of_get_dma_window(struct device_node *dn, const char *prefix, + int index, unsigned long *busno, dma_addr_t *addr, + size_t *size); + extern const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np, - const u32 *id); + struct device_node *master_np); #else +static inline int of_get_dma_window(struct device_node *dn, const char *prefix, + int index, unsigned long *busno, dma_addr_t *addr, + size_t *size) +{ + return -EINVAL; +} + static inline const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np, - const u32 *id) + struct device_node *master_np) { return NULL; } #endif /* CONFIG_OF_IOMMU */ +void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops); +const struct iommu_ops *of_iommu_get_ops(struct device_node *np); + +extern struct of_device_id __iommu_of_table; + +typedef int (*of_iommu_init_fn)(struct device_node *); + +#define IOMMU_OF_DECLARE(name, compat, fn) \ + _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn) + #endif /* __OF_IOMMU_H */ diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index aaf219bd03..1e0deb8e84 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __OF_IRQ_H #define __OF_IRQ_H @@ -33,6 +32,8 @@ static inline int of_irq_parse_oldworld(struct device_node *device, int index, #endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */ extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq); +extern int of_irq_parse_one(struct device_node *device, int index, + struct of_phandle_args *out_irq); extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data); extern int of_irq_to_resource(struct device_node *dev, int index, struct resource *r); @@ -40,8 +41,6 @@ extern int of_irq_to_resource(struct device_node *dev, int index, extern void of_irq_init(const struct of_device_id *matches); #ifdef CONFIG_OF_IRQ -extern int of_irq_parse_one(struct device_node *device, int index, - struct of_phandle_args *out_irq); extern int of_irq_count(struct device_node *dev); extern int of_irq_get(struct device_node *dev, int index); extern int of_irq_get_byname(struct device_node *dev, const char *name); @@ -52,16 +51,10 @@ extern struct irq_domain *of_msi_get_domain(struct device *dev, struct device_node *np, enum irq_domain_bus_token token); extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, - u32 id, - u32 bus_token); + u32 rid); extern void of_msi_configure(struct device *dev, struct device_node *np); -u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in); +u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); #else -static inline int of_irq_parse_one(struct device_node *device, int index, - struct of_phandle_args *out_irq) -{ - return -EINVAL; -} static inline int of_irq_count(struct device_node *dev) { return 0; @@ -91,17 +84,17 @@ static inline struct irq_domain *of_msi_get_domain(struct device *dev, return NULL; } static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev, - u32 id, u32 bus_token) + u32 rid) { return NULL; } static inline void of_msi_configure(struct device *dev, struct device_node *np) { } -static inline u32 of_msi_map_id(struct device *dev, - struct device_node *msi_np, u32 id_in) +static inline u32 of_msi_map_rid(struct device *dev, + struct device_node *msi_np, u32 rid_in) { - return id_in; + return rid_in; } #endif diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index da633d34ab..a58cca8bcb 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -1,67 +1,38 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * OF helpers for the MDIO (Ethernet PHY) API * * Copyright (c) 2009 Secret Lab Technologies, Ltd. + * + * This file is released under the GPLv2 */ #ifndef __LINUX_OF_MDIO_H #define __LINUX_OF_MDIO_H -#include #include #include -#if IS_ENABLED(CONFIG_OF_MDIO) -bool of_mdiobus_child_is_phy(struct device_node *child); -int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); -int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, - struct device_node *np); -struct mdio_device *of_mdio_find_device(struct device_node *np); -struct phy_device *of_phy_find_device(struct device_node *phy_np); -struct phy_device * -of_phy_connect(struct net_device *dev, struct device_node *phy_np, - void (*hndlr)(struct net_device *), u32 flags, - phy_interface_t iface); -struct phy_device * +#ifdef CONFIG_OF +extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); +extern struct phy_device *of_phy_find_device(struct device_node *phy_np); +extern struct phy_device *of_phy_connect(struct net_device *dev, + struct device_node *phy_np, + void (*hndlr)(struct net_device *), + u32 flags, phy_interface_t iface); +extern struct phy_device * of_phy_get_and_connect(struct net_device *dev, struct device_node *np, void (*hndlr)(struct net_device *)); +struct phy_device *of_phy_attach(struct net_device *dev, + struct device_node *phy_np, u32 flags, + phy_interface_t iface); -struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); -int of_phy_register_fixed_link(struct device_node *np); -void of_phy_deregister_fixed_link(struct device_node *np); -bool of_phy_is_fixed_link(struct device_node *np); -int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy, - struct device_node *child, u32 addr); - -static inline int of_mdio_parse_addr(struct device *dev, - const struct device_node *np) -{ - u32 addr; - int ret; - - ret = of_property_read_u32(np, "reg", &addr); - if (ret < 0) { - dev_err(dev, "%s has invalid PHY address\n", np->full_name); - return ret; - } - - /* A PHY must have a reg property in the range [0-31] */ - if (addr >= PHY_MAX_ADDR) { - dev_err(dev, "%s PHY address %i is too large\n", - np->full_name, addr); - return -EINVAL; - } - - return addr; -} - -#else /* CONFIG_OF_MDIO */ -static inline bool of_mdiobus_child_is_phy(struct device_node *child) -{ - return false; -} +extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); +extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); +extern int of_phy_register_fixed_link(struct device_node *np); +extern void of_phy_deregister_fixed_link(struct device_node *np); +extern bool of_phy_is_fixed_link(struct device_node *np); +#else /* CONFIG_OF */ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) { /* @@ -72,18 +43,6 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node * return mdiobus_register(mdio); } -static inline int devm_of_mdiobus_register(struct device *dev, - struct mii_bus *mdio, - struct device_node *np) -{ - return devm_mdiobus_register(dev, mdio); -} - -static inline struct mdio_device *of_mdio_find_device(struct device_node *np) -{ - return NULL; -} - static inline struct phy_device *of_phy_find_device(struct device_node *phy_np) { return NULL; @@ -104,6 +63,13 @@ of_phy_get_and_connect(struct net_device *dev, struct device_node *np, return NULL; } +static inline struct phy_device *of_phy_attach(struct net_device *dev, + struct device_node *phy_np, + u32 flags, phy_interface_t iface) +{ + return NULL; +} + static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np) { return NULL; @@ -125,13 +91,6 @@ static inline bool of_phy_is_fixed_link(struct device_node *np) { return false; } - -static inline int of_mdiobus_phy_device_register(struct mii_bus *mdio, - struct phy_device *phy, - struct device_node *child, u32 addr) -{ - return -ENOSYS; -} #endif diff --git a/include/linux/of_net.h b/include/linux/of_net.h index daef3b0d92..9cd72aab76 100644 --- a/include/linux/of_net.h +++ b/include/linux/of_net.h @@ -1,30 +1,28 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * OF helpers for network devices. + * + * This file is released under the GPLv2 */ #ifndef __LINUX_OF_NET_H #define __LINUX_OF_NET_H -#include - #ifdef CONFIG_OF_NET #include struct net_device; -extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface); -extern int of_get_mac_address(struct device_node *np, u8 *mac); +extern int of_get_phy_mode(struct device_node *np); +extern const void *of_get_mac_address(struct device_node *np); extern struct net_device *of_find_net_device_by_node(struct device_node *np); #else -static inline int of_get_phy_mode(struct device_node *np, - phy_interface_t *interface) +static inline int of_get_phy_mode(struct device_node *np) { return -ENODEV; } -static inline int of_get_mac_address(struct device_node *np, u8 *mac) +static inline const void *of_get_mac_address(struct device_node *np) { - return -ENODEV; + return NULL; } static inline struct net_device *of_find_net_device_by_node(struct device_node *np) diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 29658c0ee7..7fd5cfce91 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -1,19 +1,31 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __OF_PCI_H #define __OF_PCI_H -#include -#include +#include +#include struct pci_dev; +struct of_phandle_args; struct device_node; -#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI) +#ifdef CONFIG_OF_PCI +int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn); int of_pci_get_devfn(struct device_node *np); +int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); +int of_pci_parse_bus_range(struct device_node *node, struct resource *res); +int of_get_pci_domain_nr(struct device_node *node); void of_pci_check_probe_only(void); +int of_pci_map_rid(struct device_node *np, u32 rid, + const char *map_name, const char *map_mask_name, + struct device_node **target, u32 *id_out); #else +static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) +{ + return 0; +} + static inline struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn) { @@ -25,17 +37,56 @@ static inline int of_pci_get_devfn(struct device_node *np) return -EINVAL; } -static inline void of_pci_check_probe_only(void) { } -#endif - -#if IS_ENABLED(CONFIG_OF_IRQ) -int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); -#else static inline int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) { return 0; } + +static inline int +of_pci_parse_bus_range(struct device_node *node, struct resource *res) +{ + return -EINVAL; +} + +static inline int +of_get_pci_domain_nr(struct device_node *node) +{ + return -1; +} + +static inline int of_pci_map_rid(struct device_node *np, u32 rid, + const char *map_name, const char *map_mask_name, + struct device_node **target, u32 *id_out) +{ + return -EINVAL; +} + +static inline void of_pci_check_probe_only(void) { } +#endif + +#if defined(CONFIG_OF_ADDRESS) +int of_pci_get_host_bridge_resources(struct device_node *dev, + unsigned char busno, unsigned char bus_max, + struct list_head *resources, resource_size_t *io_base); +#else +static inline int of_pci_get_host_bridge_resources(struct device_node *dev, + unsigned char busno, unsigned char bus_max, + struct list_head *resources, resource_size_t *io_base) +{ + return -EINVAL; +} +#endif + +#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) +int of_pci_msi_chip_add(struct msi_controller *chip); +void of_pci_msi_chip_remove(struct msi_controller *chip); +struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node); +#else +static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; } +static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { } +static inline struct msi_controller * +of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } #endif #endif diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h index 89e4eb076a..7e09244bb6 100644 --- a/include/linux/of_pdt.h +++ b/include/linux/of_pdt.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Definitions for building a device tree by calling into the * Open Firmware PROM. * * Copyright (C) 2010 Andres Salomon + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_OF_PDT_H @@ -35,4 +39,6 @@ extern void *prom_early_alloc(unsigned long size); /* for building the device tree */ extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); +extern void (*of_pdt_build_more)(struct device_node *dp); + #endif /* _LINUX_OF_PDT_H */ diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 84a966623e..956a1006ae 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -1,9 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ #ifndef _LINUX_OF_PLATFORM_H #define _LINUX_OF_PLATFORM_H /* * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * */ #include @@ -52,21 +57,13 @@ extern const struct of_device_id of_default_bus_match_table[]; extern struct platform_device *of_device_alloc(struct device_node *np, const char *bus_id, struct device *parent); -#ifdef CONFIG_OF extern struct platform_device *of_find_device_by_node(struct device_node *np); -#else -static inline struct platform_device *of_find_device_by_node(struct device_node *np) -{ - return NULL; -} -#endif /* Platform devices and busses creation */ extern struct platform_device *of_platform_device_create(struct device_node *np, const char *bus_id, struct device *parent); -extern int of_platform_device_destroy(struct device *dev, void *data); extern int of_platform_bus_probe(struct device_node *root, const struct of_device_id *matches, struct device *parent); @@ -79,10 +76,6 @@ extern int of_platform_default_populate(struct device_node *root, const struct of_dev_auxdata *lookup, struct device *parent); extern void of_platform_depopulate(struct device *parent); - -extern int devm_of_platform_populate(struct device *dev); - -extern void devm_of_platform_depopulate(struct device *dev); #else static inline int of_platform_populate(struct device_node *root, const struct of_device_id *matches, @@ -98,13 +91,6 @@ static inline int of_platform_default_populate(struct device_node *root, return -ENODEV; } static inline void of_platform_depopulate(struct device *parent) { } - -static inline int devm_of_platform_populate(struct device *dev) -{ - return -ENODEV; -} - -static inline void devm_of_platform_depopulate(struct device *dev) { } #endif #if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS) diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 4de2a24cad..f8e1992d64 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -1,9 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __OF_RESERVED_MEM_H #define __OF_RESERVED_MEM_H #include -#include struct of_phandle_args; struct reserved_mem_ops; @@ -27,43 +25,36 @@ struct reserved_mem_ops { typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); -#ifdef CONFIG_OF_RESERVED_MEM - #define RESERVEDMEM_OF_DECLARE(name, compat, init) \ _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) +#ifdef CONFIG_OF_RESERVED_MEM + int of_reserved_mem_device_init_by_idx(struct device *dev, struct device_node *np, int idx); -int of_reserved_mem_device_init_by_name(struct device *dev, - struct device_node *np, - const char *name); void of_reserved_mem_device_release(struct device *dev); -struct reserved_mem *of_reserved_mem_lookup(struct device_node *np); +int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, + phys_addr_t align, + phys_addr_t start, + phys_addr_t end, + bool nomap, + phys_addr_t *res_base); + +void fdt_init_reserved_mem(void); +void fdt_reserved_mem_save_node(unsigned long node, const char *uname, + phys_addr_t base, phys_addr_t size); #else - -#define RESERVEDMEM_OF_DECLARE(name, compat, init) \ - _OF_DECLARE_STUB(reservedmem, name, compat, init, reservedmem_of_init_fn) - static inline int of_reserved_mem_device_init_by_idx(struct device *dev, struct device_node *np, int idx) { return -ENOSYS; } - -static inline int of_reserved_mem_device_init_by_name(struct device *dev, - struct device_node *np, - const char *name) -{ - return -ENOSYS; -} - static inline void of_reserved_mem_device_release(struct device *pdev) { } -static inline struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) -{ - return NULL; -} +static inline void fdt_init_reserved_mem(void) { } +static inline void fdt_reserved_mem_save_node(unsigned long node, + const char *uname, phys_addr_t base, phys_addr_t size) { } #endif /** diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 0f4a890392..d2fa9ca42e 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* ASN.1 Object identifier (OID) registry * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_OID_REGISTRY_H @@ -19,14 +23,8 @@ enum OID { OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */ OID_id_dsa, /* 1.2.840.10040.4.1 */ - OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */ - OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */ - OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */ OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ - OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */ - OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */ - OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */ - OID_id_ecdsa_with_sha512, /* 1.2.840.10045.4.3.4 */ + OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */ /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ @@ -54,10 +52,6 @@ enum OID { OID_md4, /* 1.2.840.113549.2.4 */ OID_md5, /* 1.2.840.113549.2.5 */ - OID_mskrb5, /* 1.2.840.48018.1.2.2 */ - OID_krb5, /* 1.2.840.113554.1.2.2 */ - OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */ - /* Microsoft Authenticode & Software Publishing */ OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */ OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */ @@ -66,16 +60,8 @@ enum OID { OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */ OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */ - OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */ - - OID_spnego, /* 1.3.6.1.5.5.2 */ - - OID_IAKerb, /* 1.3.6.1.5.2.5 */ - OID_PKU2U, /* 1.3.5.1.5.2.7 */ - OID_Scram, /* 1.3.6.1.5.5.14 */ OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ - OID_id_ansip384r1, /* 1.3.132.0.34 */ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ OID_sha512, /* 2.16.840.1.101.3.4.2.3 */ @@ -107,44 +93,10 @@ enum OID { OID_authorityKeyIdentifier, /* 2.5.29.35 */ OID_extKeyUsage, /* 2.5.29.37 */ - /* Heimdal mechanisms */ - OID_NetlogonMechanism, /* 1.2.752.43.14.2 */ - OID_appleLocalKdcSupported, /* 1.2.752.43.14.3 */ - - /* EC-RDSA */ - OID_gostCPSignA, /* 1.2.643.2.2.35.1 */ - OID_gostCPSignB, /* 1.2.643.2.2.35.2 */ - OID_gostCPSignC, /* 1.2.643.2.2.35.3 */ - OID_gost2012PKey256, /* 1.2.643.7.1.1.1.1 */ - OID_gost2012PKey512, /* 1.2.643.7.1.1.1.2 */ - OID_gost2012Digest256, /* 1.2.643.7.1.1.2.2 */ - OID_gost2012Digest512, /* 1.2.643.7.1.1.2.3 */ - OID_gost2012Signature256, /* 1.2.643.7.1.1.3.2 */ - OID_gost2012Signature512, /* 1.2.643.7.1.1.3.3 */ - OID_gostTC26Sign256A, /* 1.2.643.7.1.2.1.1.1 */ - OID_gostTC26Sign256B, /* 1.2.643.7.1.2.1.1.2 */ - OID_gostTC26Sign256C, /* 1.2.643.7.1.2.1.1.3 */ - OID_gostTC26Sign256D, /* 1.2.643.7.1.2.1.1.4 */ - OID_gostTC26Sign512A, /* 1.2.643.7.1.2.1.2.1 */ - OID_gostTC26Sign512B, /* 1.2.643.7.1.2.1.2.2 */ - OID_gostTC26Sign512C, /* 1.2.643.7.1.2.1.2.3 */ - - /* OSCCA */ - OID_sm2, /* 1.2.156.10197.1.301 */ - OID_sm3, /* 1.2.156.10197.1.401 */ - OID_SM2_with_SM3, /* 1.2.156.10197.1.501 */ - OID_sm3WithRSAEncryption, /* 1.2.156.10197.1.504 */ - - /* TCG defined OIDS for TPM based keys */ - OID_TPMLoadableKey, /* 2.23.133.10.1.3 */ - OID_TPMImportableKey, /* 2.23.133.10.1.4 */ - OID_TPMSealedData, /* 2.23.133.10.1.5 */ - OID__NR }; extern enum OID look_up_OID(const void *data, size_t datasize); -extern int parse_OID(const void *data, size_t datasize, enum OID *oid); extern int sprint_oid(const void *, size_t, char *, size_t); extern int sprint_OID(enum OID, char *, size_t); diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h index c4602364e9..2925df3ce7 100644 --- a/include/linux/olpc-ec.h +++ b/include/linux/olpc-ec.h @@ -1,9 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_OLPC_EC_H #define _LINUX_OLPC_EC_H -#include - /* XO-1 EC commands */ #define EC_FIRMWARE_REV 0x08 #define EC_WRITE_SCI_MASK 0x1b @@ -18,57 +15,28 @@ #define EC_SCI_QUERY 0x84 #define EC_EXT_SCI_QUERY 0x85 -/* SCI source values */ -#define EC_SCI_SRC_GAME BIT(0) -#define EC_SCI_SRC_BATTERY BIT(1) -#define EC_SCI_SRC_BATSOC BIT(2) -#define EC_SCI_SRC_BATERR BIT(3) -#define EC_SCI_SRC_EBOOK BIT(4) /* XO-1 only */ -#define EC_SCI_SRC_WLAN BIT(5) /* XO-1 only */ -#define EC_SCI_SRC_ACPWR BIT(6) -#define EC_SCI_SRC_BATCRIT BIT(7) -#define EC_SCI_SRC_GPWAKE BIT(8) /* XO-1.5 only */ -#define EC_SCI_SRC_ALL GENMASK(8, 0) - struct platform_device; struct olpc_ec_driver { + int (*probe)(struct platform_device *); int (*suspend)(struct platform_device *); int (*resume)(struct platform_device *); int (*ec_cmd)(u8, u8 *, size_t, u8 *, size_t, void *); - - bool wakeup_available; }; -#ifdef CONFIG_OLPC_EC +#ifdef CONFIG_OLPC extern void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg); extern int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen); -extern void olpc_ec_wakeup_set(u16 value); -extern void olpc_ec_wakeup_clear(u16 value); - -extern int olpc_ec_mask_write(u16 bits); -extern int olpc_ec_sci_query(u16 *sci_value); - -extern bool olpc_ec_wakeup_available(void); - #else static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen) { return -ENODEV; } -static inline void olpc_ec_wakeup_set(u16 value) { } -static inline void olpc_ec_wakeup_clear(u16 value) { } - -static inline bool olpc_ec_wakeup_available(void) -{ - return false; -} - -#endif /* CONFIG_OLPC_EC */ +#endif /* CONFIG_OLPC */ #endif /* _LINUX_OLPC_EC_H */ diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index 5c5c93ad6b..290081620b 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h @@ -1,6 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_OMAP_DMA_H #define __LINUX_OMAP_DMA_H +#include + /* * Legacy OMAP DMA handling defines and functions * @@ -129,6 +130,7 @@ #define IS_WORD_16 BIT(0xd) #define ENABLE_16XX_MODE BIT(0xe) #define HS_CHANNELS_RESERVED BIT(0xf) +#define DMA_ENGINE_HANDLE_IRQ BIT(0x10) /* Defines for DMA Capabilities */ #define DMA_HAS_TRANSPARENT_CAPS (0x1 << 18) @@ -238,6 +240,9 @@ struct omap_dma_lch { void (*callback)(int lch, u16 ch_status, void *data); void *data; long flags; + /* required for Dynamic chaining */ + int prev_linked_ch; + int next_linked_ch; int state; int chain_id; int status; @@ -299,6 +304,7 @@ extern void omap_set_dma_priority(int lch, int dst_port, int priority); extern int omap_request_dma(int dev_id, const char *dev_name, void (*callback)(int lch, u16 ch_status, void *data), void *data, int *dma_ch); +extern void omap_enable_dma_irq(int ch, u16 irq_bits); extern void omap_disable_dma_irq(int ch, u16 irq_bits); extern void omap_free_dma(int ch); extern void omap_start_dma(int lch); @@ -307,6 +313,7 @@ extern void omap_set_dma_transfer_params(int lch, int data_type, int elem_count, int frame_count, int sync_mode, int dma_trigger, int src_or_dst_synch); +extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode); extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode); extern void omap_set_dma_src_params(int lch, int src_port, int src_amode, @@ -323,10 +330,22 @@ extern void omap_set_dma_dest_data_pack(int lch, int enable); extern void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode); +extern void omap_set_dma_params(int lch, + struct omap_dma_channel_params *params); + +extern void omap_dma_link_lch(int lch_head, int lch_queue); + +extern int omap_set_dma_callback(int lch, + void (*callback)(int lch, u16 ch_status, void *data), + void *data); extern dma_addr_t omap_get_dma_src_pos(int lch); extern dma_addr_t omap_get_dma_dst_pos(int lch); extern int omap_get_dma_active_status(int lch); extern int omap_dma_running(void); +extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth, + int tparams); +void omap_dma_global_context_save(void); +void omap_dma_global_context_restore(void); #if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP) #include diff --git a/include/linux/omap-dmaengine.h b/include/linux/omap-dmaengine.h new file mode 100644 index 0000000000..8e6906c72e --- /dev/null +++ b/include/linux/omap-dmaengine.h @@ -0,0 +1,21 @@ +/* + * OMAP DMA Engine support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_OMAP_DMAENGINE_H +#define __LINUX_OMAP_DMAENGINE_H + +struct dma_chan; + +#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE)) +bool omap_dma_filter_fn(struct dma_chan *, void *); +#else +static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) +{ + return false; +} +#endif +#endif /* __LINUX_OMAP_DMAENGINE_H */ diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h index 082841908f..35d0fd7a49 100644 --- a/include/linux/omap-gpmc.h +++ b/include/linux/omap-gpmc.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * OMAP GPMC (General Purpose Memory Controller) defines + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #include @@ -21,44 +25,28 @@ struct gpmc_nand_ops { struct gpmc_nand_regs; -struct gpmc_onenand_info { - bool sync_read; - bool sync_write; - int burst_len; -}; - #if IS_ENABLED(CONFIG_OMAP_GPMC) struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, int cs); -/** - * gpmc_omap_onenand_set_timings - set optimized sync timings. - * @cs: Chip Select Region - * @freq: Chip frequency - * @latency: Burst latency cycle count - * @info: Structure describing parameters used - * - * Sets optimized timings for the @cs region based on @freq and @latency. - * Updates the @info structure based on the GPMC settings. - */ -int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq, - int latency, - struct gpmc_onenand_info *info); - #else static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, int cs) { return NULL; } +#endif /* CONFIG_OMAP_GPMC */ -static inline -int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq, - int latency, - struct gpmc_onenand_info *info) +/*--------------------------------*/ + +/* deprecated APIs */ +#if IS_ENABLED(CONFIG_OMAP_GPMC) +void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); +#else +static inline void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) { - return -EINVAL; } #endif /* CONFIG_OMAP_GPMC */ +/*--------------------------------*/ extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, struct gpmc_settings *gpmc_s, @@ -81,16 +69,29 @@ extern int gpmc_configure(int cmd, int wval); extern void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p); +extern void omap3_gpmc_save_context(void); +extern void omap3_gpmc_restore_context(void); + struct gpmc_timings; struct omap_nand_platform_data; struct omap_onenand_platform_data; -#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) -extern int gpmc_onenand_init(struct omap_onenand_platform_data *d); +#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2) +extern int gpmc_nand_init(struct omap_nand_platform_data *d, + struct gpmc_timings *gpmc_t); #else -#define board_onenand_data NULL -static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d) +static inline int gpmc_nand_init(struct omap_nand_platform_data *d, + struct gpmc_timings *gpmc_t) { return 0; } #endif + +#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) +extern void gpmc_onenand_init(struct omap_onenand_platform_data *d); +#else +#define board_onenand_data NULL +static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d) +{ +} +#endif diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h index 2c32ca09df..c1aede4671 100644 --- a/include/linux/omap-iommu.h +++ b/include/linux/omap-iommu.h @@ -1,36 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * omap iommu: simple virtual address space management * * Copyright (C) 2008-2009 Nokia Corporation * * Written by Hiroshi DOYU + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _OMAP_IOMMU_H_ #define _OMAP_IOMMU_H_ -struct iommu_domain; - -#ifdef CONFIG_OMAP_IOMMU extern void omap_iommu_save_ctx(struct device *dev); extern void omap_iommu_restore_ctx(struct device *dev); -int omap_iommu_domain_deactivate(struct iommu_domain *domain); -int omap_iommu_domain_activate(struct iommu_domain *domain); -#else -static inline void omap_iommu_save_ctx(struct device *dev) {} -static inline void omap_iommu_restore_ctx(struct device *dev) {} - -static inline int omap_iommu_domain_deactivate(struct iommu_domain *domain) -{ - return -ENODEV; -} - -static inline int omap_iommu_domain_activate(struct iommu_domain *domain) -{ - return -ENODEV; -} -#endif - #endif diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index 8aa984ec1f..c726bd8337 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h @@ -1,14 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * omap-mailbox: interprocessor communication module for OMAP + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef OMAP_MAILBOX_H #define OMAP_MAILBOX_H -typedef uintptr_t mbox_msg_t; - -#define omap_mbox_message(data) (u32)(mbox_msg_t)(data) +typedef u32 mbox_msg_t; typedef int __bitwise omap_mbox_irq_t; #define IRQ_TX ((__force omap_mbox_irq_t) 1) diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h index 63c9d473e7..d1f4dccaee 100644 --- a/include/linux/omapfb.h +++ b/include/linux/omapfb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * File: include/linux/omapfb.h * @@ -6,6 +5,20 @@ * * Copyright (C) 2004 Nokia Corporation * Author: Imre Deak + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef __LINUX_OMAPFB_H__ #define __LINUX_OMAPFB_H__ diff --git a/include/linux/once.h b/include/linux/once.h index d361fb14ac..285f12cb40 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ONCE_H #define _LINUX_ONCE_H @@ -6,8 +5,8 @@ #include bool __do_once_start(bool *done, unsigned long *flags); -void __do_once_done(bool *done, struct static_key_true *once_key, - unsigned long *flags, struct module *mod); +void __do_once_done(bool *done, struct static_key *once_key, + unsigned long *flags); /* Call a function exactly once. The idea of DO_ONCE() is to perform * a function call such as initialization of random seeds, etc, only @@ -16,7 +15,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key, * out the condition into a nop. DO_ONCE() guarantees type safety of * arguments! * - * Note that the following is not equivalent ... + * Not that the following is not equivalent ... * * DO_ONCE(func, arg); * DO_ONCE(func, arg); @@ -39,14 +38,14 @@ void __do_once_done(bool *done, struct static_key_true *once_key, ({ \ bool ___ret = false; \ static bool ___done = false; \ - static DEFINE_STATIC_KEY_TRUE(___once_key); \ - if (static_branch_unlikely(&___once_key)) { \ + static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \ + if (static_key_true(&___once_key)) { \ unsigned long ___flags; \ ___ret = __do_once_start(&___done, &___flags); \ if (unlikely(___ret)) { \ func(__VA_ARGS__); \ __do_once_done(&___done, &___once_key, \ - &___flags, THIS_MODULE); \ + &___flags); \ } \ } \ ___ret; \ @@ -54,7 +53,5 @@ void __do_once_done(bool *done, struct static_key_true *once_key, #define get_random_once(buf, nbytes) \ DO_ONCE(get_random_bytes, (buf), (nbytes)) -#define get_random_once_wait(buf, nbytes) \ - DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \ #endif /* _LINUX_ONCE_H */ diff --git a/include/linux/oom.h b/include/linux/oom.h index 2db9a14325..b4e36e92bc 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -1,27 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __INCLUDE_LINUX_OOM_H #define __INCLUDE_LINUX_OOM_H -#include +#include #include #include #include -#include /* MMF_* */ -#include /* VM_FAULT* */ struct zonelist; struct notifier_block; struct mem_cgroup; struct task_struct; -enum oom_constraint { - CONSTRAINT_NONE, - CONSTRAINT_CPUSET, - CONSTRAINT_MEMORY_POLICY, - CONSTRAINT_MEMCG, -}; - /* * Details of the page allocation that triggered the oom killer that are used to * determine what should be killed. @@ -48,14 +38,10 @@ struct oom_control { /* Used by oom implementation, do not set */ unsigned long totalpages; struct task_struct *chosen; - long chosen_points; - - /* Used to print the constraint info. */ - enum oom_constraint constraint; + unsigned long chosen_points; }; extern struct mutex oom_lock; -extern struct mutex oom_adj_mutex; static inline void set_current_oom_origin(void) { @@ -77,38 +63,8 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk) return tsk->signal->oom_mm; } -/* - * Use this helper if tsk->mm != mm and the victim mm needs a special - * handling. This is guaranteed to stay true after once set. - */ -static inline bool mm_is_oom_victim(struct mm_struct *mm) -{ - return test_bit(MMF_OOM_VICTIM, &mm->flags); -} - -/* - * Checks whether a page fault on the given mm is still reliable. - * This is no longer true if the oom reaper started to reap the - * address space which is reflected by MMF_UNSTABLE flag set in - * the mm. At that moment any !shared mapping would lose the content - * and could cause a memory corruption (zero pages instead of the - * original content). - * - * User should call this before establishing a page table entry for - * a !shared mapping and under the proper page table lock. - * - * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise. - */ -static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) -{ - if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags))) - return VM_FAULT_SIGBUS; - return 0; -} - -bool __oom_reap_task_mm(struct mm_struct *mm); - -long oom_badness(struct task_struct *p, +extern unsigned long oom_badness(struct task_struct *p, + struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages); extern bool out_of_memory(struct oom_control *oc); diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h index 3b037bab32..e6b240b619 100644 --- a/include/linux/openvswitch.h +++ b/include/linux/openvswitch.h @@ -1,6 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2007-2011 Nicira Networks. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA */ #ifndef _LINUX_OPENVSWITCH_H @@ -8,9 +21,4 @@ #include -#define OVS_CLONE_ATTR_EXEC 0 /* Specify an u32 value. When nonzero, - * actions in clone will not change flow - * keys. False otherwise. - */ - #endif /* _LINUX_OPENVSWITCH_H */ diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index b2a0f15f11..4d7da327ec 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root, int oprofilefs_create_ro_ulong(struct dentry * root, char const * name, ulong * val); -/** Create a file for read-only access to an atomic_t. */ +/** Create a file for read-only access to an atomic_unchecked_t. */ int oprofilefs_create_ro_atomic(struct dentry * root, - char const * name, atomic_t * val); + char const * name, atomic_unchecked_t * val); /** create a directory */ struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name); diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h index 5581dbd3bd..703ea5c30a 100644 --- a/include/linux/osq_lock.h +++ b/include/linux/osq_lock.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_OSQ_LOCK_H #define __LINUX_OSQ_LOCK_H diff --git a/include/linux/oxu210hp.h b/include/linux/oxu210hp.h new file mode 100644 index 0000000000..0bf96eae53 --- /dev/null +++ b/include/linux/oxu210hp.h @@ -0,0 +1,7 @@ +/* platform data for the OXU210HP HCD */ + +struct oxu210hp_platform_data { + unsigned int bus16:1; + unsigned int use_hcd_otg:1; + unsigned int use_hcd_sph:1; +}; diff --git a/include/linux/padata.h b/include/linux/padata.h index 495b16b6b4..3c370bddd2 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -1,29 +1,38 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * padata.h - header for the padata parallelization interface * * Copyright (C) 2008, 2009 secunet Security Networks AG * Copyright (C) 2008, 2009 Steffen Klassert * - * Copyright (c) 2020 Oracle and/or its affiliates. - * Author: Daniel Jordan + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef PADATA_H #define PADATA_H -#include -#include #include #include #include +#include +#include #include #define PADATA_CPU_SERIAL 0x01 #define PADATA_CPU_PARALLEL 0x02 /** - * struct padata_priv - Represents one job + * struct padata_priv - Embedded to the users data structure. * * @list: List entry, to attach to the padata lists. * @pd: Pointer to the internal control structure. @@ -37,14 +46,13 @@ struct padata_priv { struct list_head list; struct parallel_data *pd; int cb_cpu; - unsigned int seq_nr; int info; void (*parallel)(struct padata_priv *padata); void (*serial)(struct padata_priv *padata); }; /** - * struct padata_list - one per work type per CPU + * struct padata_list * * @list: List head. * @lock: List lock. @@ -67,6 +75,28 @@ struct padata_serial_queue { struct parallel_data *pd; }; +/** + * struct padata_parallel_queue - The percpu padata parallel queue + * + * @parallel: List to wait for parallelization. + * @reorder: List to wait for reordering after parallel processing. + * @serial: List to wait for serialization after reordering. + * @pwork: work struct for parallelization. + * @swork: work struct for serialization. + * @pd: Backpointer to the internal control structure. + * @work: work struct for parallelization. + * @num_obj: Number of objects that are processed by this cpu. + * @cpu_index: Index of the cpu. + */ +struct padata_parallel_queue { + struct padata_list parallel; + struct padata_list reorder; + struct parallel_data *pd; + struct work_struct work; + atomic_t num_obj; + int cpu_index; +}; + /** * struct padata_cpumask - The cpumasks for the parallel/serial workers * @@ -82,92 +112,50 @@ struct padata_cpumask { * struct parallel_data - Internal control structure, covers everything * that depends on the cpumask in use. * - * @ps: padata_shell object. - * @reorder_list: percpu reorder lists + * @pinst: padata instance. + * @pqueue: percpu padata queues used for parallelization. * @squeue: percpu padata queues used for serialuzation. + * @reorder_objects: Number of objects waiting in the reorder queues. * @refcnt: Number of objects holding a reference on this parallel_data. - * @seq_nr: Sequence number of the parallelized data object. - * @processed: Number of already processed objects. - * @cpu: Next CPU to be processed. + * @max_seq_nr: Maximal used sequence number. * @cpumask: The cpumasks in use for parallel and serial workers. - * @reorder_work: work struct for reordering. * @lock: Reorder lock. + * @processed: Number of already processed objects. + * @timer: Reorder timer. */ struct parallel_data { - struct padata_shell *ps; - struct padata_list __percpu *reorder_list; - struct padata_serial_queue __percpu *squeue; - refcount_t refcnt; - unsigned int seq_nr; - unsigned int processed; - int cpu; - struct padata_cpumask cpumask; - struct work_struct reorder_work; - spinlock_t ____cacheline_aligned lock; -}; - -/** - * struct padata_shell - Wrapper around struct parallel_data, its - * purpose is to allow the underlying control structure to be replaced - * on the fly using RCU. - * - * @pinst: padat instance. - * @pd: Actual parallel_data structure which may be substituted on the fly. - * @opd: Pointer to old pd to be freed by padata_replace. - * @list: List entry in padata_instance list. - */ -struct padata_shell { struct padata_instance *pinst; - struct parallel_data __rcu *pd; - struct parallel_data *opd; - struct list_head list; -}; - -/** - * struct padata_mt_job - represents one multithreaded job - * - * @thread_fn: Called for each chunk of work that a padata thread does. - * @fn_arg: The thread function argument. - * @start: The start of the job (units are job-specific). - * @size: size of this node's work (units are job-specific). - * @align: Ranges passed to the thread function fall on this boundary, with the - * possible exceptions of the beginning and end of the job. - * @min_chunk: The minimum chunk size in job-specific units. This allows - * the client to communicate the minimum amount of work that's - * appropriate for one worker thread to do at once. - * @max_threads: Max threads to use for the job, actual number may be less - * depending on task size and minimum chunk size. - */ -struct padata_mt_job { - void (*thread_fn)(unsigned long start, unsigned long end, void *arg); - void *fn_arg; - unsigned long start; - unsigned long size; - unsigned long align; - unsigned long min_chunk; - int max_threads; + struct padata_parallel_queue __percpu *pqueue; + struct padata_serial_queue __percpu *squeue; + atomic_t reorder_objects; + atomic_t refcnt; + atomic_unchecked_t seq_nr; + struct padata_cpumask cpumask; + spinlock_t lock ____cacheline_aligned; + unsigned int processed; + struct timer_list timer; }; /** * struct padata_instance - The overall control structure. * - * @cpu_online_node: Linkage for CPU online callback. - * @cpu_dead_node: Linkage for CPU offline callback. - * @parallel_wq: The workqueue used for parallel work. - * @serial_wq: The workqueue used for serial work. - * @pslist: List of padata_shell objects attached to this instance. + * @cpu_notifier: cpu hotplug notifier. + * @wq: The workqueue in use. + * @pd: The internal control structure. * @cpumask: User supplied cpumasks for parallel and serial works. + * @cpumask_change_notifier: Notifiers chain for user-defined notify + * callbacks that will be called when either @pcpu or @cbcpu + * or both cpumasks change. * @kobj: padata instance kernel object. * @lock: padata instance lock. * @flags: padata flags. */ struct padata_instance { - struct hlist_node cpu_online_node; - struct hlist_node cpu_dead_node; - struct workqueue_struct *parallel_wq; - struct workqueue_struct *serial_wq; - struct list_head pslist; + struct hlist_node node; + struct workqueue_struct *wq; + struct parallel_data *pd; struct padata_cpumask cpumask; + struct blocking_notifier_head cpumask_change_notifier; struct kobject kobj; struct mutex lock; u8 flags; @@ -176,20 +164,21 @@ struct padata_instance { #define PADATA_INVALID 4 }; -#ifdef CONFIG_PADATA -extern void __init padata_init(void); -#else -static inline void __init padata_init(void) {} -#endif - -extern struct padata_instance *padata_alloc(const char *name); +extern struct padata_instance *padata_alloc_possible( + struct workqueue_struct *wq); +extern struct padata_instance *padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask); extern void padata_free(struct padata_instance *pinst); -extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst); -extern void padata_free_shell(struct padata_shell *ps); -extern int padata_do_parallel(struct padata_shell *ps, - struct padata_priv *padata, int *cb_cpu); +extern int padata_do_parallel(struct padata_instance *pinst, + struct padata_priv *padata, int cb_cpu); extern void padata_do_serial(struct padata_priv *padata); -extern void __init padata_do_multithreaded(struct padata_mt_job *job); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); +extern int padata_start(struct padata_instance *pinst); +extern void padata_stop(struct padata_instance *pinst); +extern int padata_register_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); +extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); #endif diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index ef1e3e736e..77b078c103 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef PAGE_FLAGS_LAYOUT_H #define PAGE_FLAGS_LAYOUT_H @@ -21,19 +20,17 @@ #elif MAX_NR_ZONES <= 8 #define ZONES_SHIFT 3 #else -#error ZONES_SHIFT "Too many zones configured" +#error ZONES_SHIFT -- too many zones configured adjust calculation #endif -#define ZONES_WIDTH ZONES_SHIFT - #ifdef CONFIG_SPARSEMEM #include -#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) -#else -#define SECTIONS_SHIFT 0 -#endif -#ifndef BUILD_VDSO32_64 +/* SECTION_SHIFT #bits space required to store a section # */ +#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) + +#endif /* CONFIG_SPARSEMEM */ + /* * page->flags layout: * @@ -55,28 +52,17 @@ #define SECTIONS_WIDTH 0 #endif -#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS +#define ZONES_WIDTH ZONES_SHIFT + +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS #define NODES_WIDTH NODES_SHIFT -#elif defined(CONFIG_SPARSEMEM_VMEMMAP) +#else +#ifdef CONFIG_SPARSEMEM_VMEMMAP #error "Vmemmap: No space for nodes field in page flags" -#else +#endif #define NODES_WIDTH 0 #endif -/* - * Note that this #define MUST have a value so that it can be tested with - * the IS_ENABLED() macro. - */ -#if NODES_SHIFT != 0 && NODES_WIDTH == 0 -#define NODE_NOT_IN_PAGE_FLAGS 1 -#endif - -#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) -#define KASAN_TAG_WIDTH 8 -#else -#define KASAN_TAG_WIDTH 0 -#endif - #ifdef CONFIG_NUMA_BALANCING #define LAST__PID_SHIFT 8 #define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1) @@ -89,21 +75,22 @@ #define LAST_CPUPID_SHIFT 0 #endif -#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT \ - <= BITS_PER_LONG - NR_PAGEFLAGS +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT #else #define LAST_CPUPID_WIDTH 0 #endif -#if LAST_CPUPID_SHIFT != 0 && LAST_CPUPID_WIDTH == 0 +/* + * We are going to use the flags for the page to node mapping if its in + * there. This includes the case where there is no node, so it is implicit. + */ +#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0) +#define NODE_NOT_IN_PAGE_FLAGS +#endif + +#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0 #define LAST_CPUPID_NOT_IN_PAGE_FLAGS #endif -#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH \ - > BITS_PER_LONG - NR_PAGEFLAGS -#error "Not enough bits in page flags" -#endif - -#endif #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index fbfd3fad48..74e4dda912 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Macros for manipulating and testing page->flags */ @@ -17,37 +16,8 @@ /* * Various page->flags bits: * - * PG_reserved is set for special pages. The "struct page" of such a page - * should in general not be touched (e.g. set dirty) except by its owner. - * Pages marked as PG_reserved include: - * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, - * initrd, HW tables) - * - Pages reserved or allocated early during boot (before the page allocator - * was initialized). This includes (depending on the architecture) the - * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much - * much more. Once (if ever) freed, PG_reserved is cleared and they will - * be given to the page allocator. - * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying - * to read/write these pages might end badly. Don't touch! - * - The zero page(s) - * - Pages not added to the page allocator when onlining a section because - * they were excluded via the online_page_callback() or because they are - * PG_hwpoison. - * - Pages allocated in the context of kexec/kdump (loaded kernel image, - * control pages, vmcoreinfo) - * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are - * not marked PG_reserved (as they might be in use by somebody else who does - * not respect the caching strategy). - * - Pages part of an offline section (struct pages of offline sections should - * not be trusted as they will be initialized when first onlined). - * - MCA pages on ia64 - * - Pages holding CPU notes for POWER Firmware Assisted Dump - * - Device memory (e.g. PMEM, DAX, HMM) - * Some PG_reserved pages will be excluded from the hibernation image. - * PG_reserved does in general not hinder anybody from dumping or swapping - * and is no longer required for remap_pfn_range(). ioremap might require it. - * Consequently, PG_reserved for a page mapped into user space can indicate - * the zero page, the vDSO, MMIO pages or device memory. + * PG_reserved is set for special pages, which can never be swapped out. Some + * of them might not even exist (eg empty_bad_page)... * * The PG_private bitflag is set on pagecache pages if they contain filesystem * specific data (which is normally at page->private). It can be used by @@ -63,11 +33,6 @@ * page_waitqueue(page) is a wait queue of all tasks waiting for the page * to become unlocked. * - * PG_swapbacked is set when a page uses swap as a backing storage. This are - * usually PageAnon or shmem pages but please note that even anonymous pages - * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as - * a result of MADV_FREE). - * * PG_uptodate tells whether the page's contents is valid. When a read * completes, the page becomes uptodate, unless a disk I/O error happened. * @@ -80,13 +45,19 @@ * guarantees that this bit is cleared for a page when it first is entered into * the page cache. * + * PG_highmem pages are not permanently mapped into the kernel virtual address + * space, they need to be kmapped separately for doing IO on the pages. The + * struct page (these bits with information) are always mapped into kernel + * address space... + * * PG_hwpoison indicates that a page got corrupted in hardware and contains * data with incorrect ECC bits that triggered a machine check. Accessing is * not safe since it may cause another machine check. Don't touch! */ /* - * Don't use the pageflags directly. Use the PageFoo macros. + * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break + * locked- and dirty-page accounting. * * The page flags field is split into two parts, the main flags area * which extends from the low bits upwards, and the fields area which @@ -102,14 +73,12 @@ */ enum pageflags { PG_locked, /* Page is locked. Don't touch. */ + PG_error, PG_referenced, PG_uptodate, PG_dirty, PG_lru, PG_active, - PG_workingset, - PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ - PG_error, PG_slab, PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ PG_arch_1, @@ -118,6 +87,7 @@ enum pageflags { PG_private_2, /* If pagecache, has fs aux data */ PG_writeback, /* Page is under writeback */ PG_head, /* A head page */ + PG_swapcache, /* Swap page: swp_entry_t in private */ PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ PG_swapbacked, /* Page is backed by RAM/swap */ @@ -131,24 +101,15 @@ enum pageflags { #ifdef CONFIG_MEMORY_FAILURE PG_hwpoison, /* hardware poisoned page. Don't touch */ #endif -#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) +#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) PG_young, PG_idle, -#endif -#ifdef CONFIG_64BIT - PG_arch_2, -#endif -#ifdef CONFIG_KASAN_HW_TAGS - PG_skip_kasan_poison, #endif __NR_PAGEFLAGS, /* Filesystems */ PG_checked = PG_owner_priv_1, - /* SwapBacked */ - PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ - /* Two page bits are conscripted by FS-Cache to maintain local caching * state. These bits are set on pages belonging to the netfs's inodes * when those inodes are being locally cached. @@ -162,46 +123,30 @@ enum pageflags { PG_savepinned = PG_dirty, /* Has a grant mapping of another (foreign) domain's page. */ PG_foreign = PG_owner_priv_1, - /* Remapped by swiotlb-xen. */ - PG_xen_remapped = PG_owner_priv_1, /* SLOB */ PG_slob_free = PG_private, /* Compound pages. Stored in first tail page's flags */ - PG_double_map = PG_workingset, - -#ifdef CONFIG_MEMORY_FAILURE - /* - * Compound pages. Stored in first tail page's flags. - * Indicates that at least one subpage is hwpoisoned in the - * THP. - */ - PG_has_hwpoisoned = PG_mappedtodisk, -#endif + PG_double_map = PG_private_2, /* non-lru isolated movable page */ PG_isolated = PG_reclaim, - - /* Only valid for buddy pages. Used to track pages that are reported */ - PG_reported = PG_uptodate, }; -#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) - #ifndef __GENERATING_BOUNDS_H -static inline unsigned long _compound_head(const struct page *page) +struct page; /* forward declaration */ + +static inline struct page *compound_head(struct page *page) { unsigned long head = READ_ONCE(page->compound_head); if (unlikely(head & 1)) - return head - 1; - return (unsigned long)page; + return (struct page *) (head - 1); + return page; } -#define compound_head(page) ((typeof(page))_compound_head(page)) - static __always_inline int PageTail(struct page *page) { return READ_ONCE(page->compound_head) & 1; @@ -212,26 +157,9 @@ static __always_inline int PageCompound(struct page *page) return test_bit(PG_head, &page->flags) || PageTail(page); } -#define PAGE_POISON_PATTERN -1l -static inline int PagePoisoned(const struct page *page) -{ - return page->flags == PAGE_POISON_PATTERN; -} - -#ifdef CONFIG_DEBUG_VM -void page_init_poison(struct page *page, size_t size); -#else -static inline void page_init_poison(struct page *page, size_t size) -{ -} -#endif - /* * Page flags policies wrt compound pages * - * PF_POISONED_CHECK - * check if this struct page poisoned/uninitialized - * * PF_ANY: * the page flag is relevant for small, head and tail pages. * @@ -239,36 +167,21 @@ static inline void page_init_poison(struct page *page, size_t size) * for compound page all operations related to the page flag applied to * head page. * - * PF_ONLY_HEAD: - * for compound page, callers only ever operate on the head page. - * * PF_NO_TAIL: * modifications of the page flag must be done on small or head pages, * checks can be done on tail pages too. * * PF_NO_COMPOUND: * the page flag is not relevant for compound pages. - * - * PF_SECOND: - * the page flag is stored in the first tail page. */ -#define PF_POISONED_CHECK(page) ({ \ - VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ - page; }) -#define PF_ANY(page, enforce) PF_POISONED_CHECK(page) -#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) -#define PF_ONLY_HEAD(page, enforce) ({ \ - VM_BUG_ON_PGFLAGS(PageTail(page), page); \ - PF_POISONED_CHECK(page); }) +#define PF_ANY(page, enforce) page +#define PF_HEAD(page, enforce) compound_head(page) #define PF_NO_TAIL(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ - PF_POISONED_CHECK(compound_head(page)); }) + compound_head(page);}) #define PF_NO_COMPOUND(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ - PF_POISONED_CHECK(page); }) -#define PF_SECOND(page, enforce) ({ \ - VM_BUG_ON_PGFLAGS(!PageHead(page), page); \ - PF_POISONED_CHECK(&page[1]); }) + page;}) /* * Macros to create function definitions for page flags @@ -340,19 +253,15 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname) __PAGEFLAG(Locked, locked, PF_NO_TAIL) -PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) -PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) +PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND) PAGEFLAG(Referenced, referenced, PF_HEAD) TESTCLEARFLAG(Referenced, referenced, PF_HEAD) __SETPAGEFLAG(Referenced, referenced, PF_HEAD) PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) - TESTCLEARFLAG(LRU, lru, PF_HEAD) PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) TESTCLEARFLAG(Active, active, PF_HEAD) -PAGEFLAG(Workingset, workingset, PF_HEAD) - TESTCLEARFLAG(Workingset, workingset, PF_HEAD) __PAGEFLAG(Slab, slab, PF_NO_TAIL) __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ @@ -362,12 +271,9 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); -PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) - TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) - __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) @@ -377,7 +283,8 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) * for its own purposes. * - PG_private and PG_private_2 cause releasepage() and co to be invoked */ -PAGEFLAG(Private, private, PF_ANY) +PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY) + __CLEARPAGEFLAG(Private, private, PF_ANY) PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) @@ -386,8 +293,8 @@ PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) * Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. */ -TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) - TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) +TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND) + TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND) PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) /* PG_readahead is only used for reads; PG_reclaim is only for writes */ @@ -407,16 +314,7 @@ PAGEFLAG_FALSE(HighMem) #endif #ifdef CONFIG_SWAP -static __always_inline int PageSwapCache(struct page *page) -{ -#ifdef CONFIG_THP_SWAP - page = compound_head(page); -#endif - return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags); - -} -SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) -CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) +PAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND) #else PAGEFLAG_FALSE(SwapCache) #endif @@ -444,33 +342,18 @@ PAGEFLAG_FALSE(Uncached) PAGEFLAG(HWPoison, hwpoison, PF_ANY) TESTSCFLAG(HWPoison, hwpoison, PF_ANY) #define __PG_HWPOISON (1UL << PG_hwpoison) -extern bool take_page_off_buddy(struct page *page); #else PAGEFLAG_FALSE(HWPoison) #define __PG_HWPOISON 0 #endif -#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) +#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) TESTPAGEFLAG(Young, young, PF_ANY) SETPAGEFLAG(Young, young, PF_ANY) TESTCLEARFLAG(Young, young, PF_ANY) PAGEFLAG(Idle, idle, PF_ANY) #endif -#ifdef CONFIG_KASAN_HW_TAGS -PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD) -#else -PAGEFLAG_FALSE(SkipKASanPoison) -#endif - -/* - * PageReported() is used to track reported free pages within the Buddy - * allocator. We can use the non-atomic version of the test and set - * operations as both should be shielded with the zone lock to prevent - * any possible races on the setting or clearing of the bit. - */ -__PAGEFLAG(Reported, reported, PF_NO_COMPOUND) - /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; @@ -612,9 +495,15 @@ static inline void ClearPageCompound(struct page *page) #ifdef CONFIG_HUGETLB_PAGE int PageHuge(struct page *page); int PageHeadHuge(struct page *page); +bool page_huge_active(struct page *page); #else TESTPAGEFLAG_FALSE(Huge) TESTPAGEFLAG_FALSE(HeadHuge) + +static inline bool page_huge_active(struct page *page) +{ + return 0; +} #endif @@ -643,6 +532,27 @@ static inline int PageTransCompound(struct page *page) return PageCompound(page); } +/* + * PageTransCompoundMap is the same as PageTransCompound, but it also + * guarantees the primary MMU has the entire compound page mapped + * through pmd_trans_huge, which in turn guarantees the secondary MMUs + * can also map the entire compound page. This allows the secondary + * MMUs to call get_user_pages() only once for each compound page and + * to immediately map the entire compound page with a single secondary + * MMU fault. If there will be a pmd split later, the secondary MMUs + * will get an update through the MMU notifier invalidation through + * split_huge_pmd(). + * + * Unlike PageTransCompound, this is safe to be called only while + * split_huge_pmd() cannot run from under us, like if protected by the + * MMU notifier, otherwise it may result in page->_mapcount < 0 false + * positives. + */ +static inline int PageTransCompoundMap(struct page *page) +{ + return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0; +} + /* * PageTransTail returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known @@ -666,128 +576,88 @@ static inline int PageTransTail(struct page *page) * * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap(). */ -PAGEFLAG(DoubleMap, double_map, PF_SECOND) - TESTSCFLAG(DoubleMap, double_map, PF_SECOND) +static inline int PageDoubleMap(struct page *page) +{ + return PageHead(page) && test_bit(PG_double_map, &page[1].flags); +} + +static inline void SetPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + set_bit(PG_double_map, &page[1].flags); +} + +static inline void ClearPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + clear_bit(PG_double_map, &page[1].flags); +} +static inline int TestSetPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + return test_and_set_bit(PG_double_map, &page[1].flags); +} + +static inline int TestClearPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + return test_and_clear_bit(PG_double_map, &page[1].flags); +} + #else TESTPAGEFLAG_FALSE(TransHuge) TESTPAGEFLAG_FALSE(TransCompound) TESTPAGEFLAG_FALSE(TransCompoundMap) TESTPAGEFLAG_FALSE(TransTail) PAGEFLAG_FALSE(DoubleMap) - TESTSCFLAG_FALSE(DoubleMap) -#endif - -#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) -/* - * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the - * compound page. - * - * This flag is set by hwpoison handler. Cleared by THP split or free page. - */ -PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) - TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) -#else -PAGEFLAG_FALSE(HasHWPoisoned) - TESTSCFLAG_FALSE(HasHWPoisoned) + TESTSETFLAG_FALSE(DoubleMap) + TESTCLEARFLAG_FALSE(DoubleMap) #endif /* - * Check if a page is currently marked HWPoisoned. Note that this check is - * best effort only and inherently racy: there is no way to synchronize with - * failing hardware. + * For pages that are never mapped to userspace, page->mapcount may be + * used for storing extra information about page type. Any value used + * for this purpose must be <= -2, but it's better start not too close + * to -2 so that an underflow of the page_mapcount() won't be mistaken + * for a special page. */ -static inline bool is_page_hwpoison(struct page *page) -{ - if (PageHWPoison(page)) - return true; - return PageHuge(page) && PageHWPoison(compound_head(page)); -} - -/* - * For pages that are never mapped to userspace (and aren't PageSlab), - * page_type may be used. Because it is initialised to -1, we invert the - * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and - * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and - * low bits so that an underflow or overflow of page_mapcount() won't be - * mistaken for a page type value. - */ - -#define PAGE_TYPE_BASE 0xf0000000 -/* Reserve 0x0000007f to catch underflows of page_mapcount */ -#define PAGE_MAPCOUNT_RESERVE -128 -#define PG_buddy 0x00000080 -#define PG_offline 0x00000100 -#define PG_table 0x00000200 -#define PG_guard 0x00000400 - -#define PageType(page, flag) \ - ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) - -static inline int page_has_type(struct page *page) -{ - return (int)page->page_type < PAGE_MAPCOUNT_RESERVE; -} - -#define PAGE_TYPE_OPS(uname, lname) \ +#define PAGE_MAPCOUNT_OPS(uname, lname) \ static __always_inline int Page##uname(struct page *page) \ { \ - return PageType(page, PG_##lname); \ + return atomic_read(&page->_mapcount) == \ + PAGE_##lname##_MAPCOUNT_VALUE; \ } \ static __always_inline void __SetPage##uname(struct page *page) \ { \ - VM_BUG_ON_PAGE(!PageType(page, 0), page); \ - page->page_type &= ~PG_##lname; \ + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \ + atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \ } \ static __always_inline void __ClearPage##uname(struct page *page) \ { \ VM_BUG_ON_PAGE(!Page##uname(page), page); \ - page->page_type |= PG_##lname; \ + atomic_set(&page->_mapcount, -1); \ } /* - * PageBuddy() indicates that the page is free and in the buddy system + * PageBuddy() indicate that the page is free and in the buddy system * (see mm/page_alloc.c). */ -PAGE_TYPE_OPS(Buddy, buddy) +#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) +PAGE_MAPCOUNT_OPS(Buddy, BUDDY) /* - * PageOffline() indicates that the page is logically offline although the - * containing section is online. (e.g. inflated in a balloon driver or - * not onlined when onlining the section). - * The content of these pages is effectively stale. Such pages should not - * be touched (read/write/dump/save) except by their owner. - * - * If a driver wants to allow to offline unmovable PageOffline() pages without - * putting them back to the buddy, it can do so via the memory notifier by - * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the - * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() - * pages (now with a reference count of zero) are treated like free pages, - * allowing the containing memory block to get offlined. A driver that - * relies on this feature is aware that re-onlining the memory block will - * require to re-set the pages PageOffline() and not giving them to the - * buddy via online_page_callback_t. - * - * There are drivers that mark a page PageOffline() and expect there won't be - * any further access to page content. PFN walkers that read content of random - * pages should check PageOffline() and synchronize with such drivers using - * page_offline_freeze()/page_offline_thaw(). + * PageBalloon() is set on pages that are on the balloon page list + * (see mm/balloon_compaction.c). */ -PAGE_TYPE_OPS(Offline, offline) - -extern void page_offline_freeze(void); -extern void page_offline_thaw(void); -extern void page_offline_begin(void); -extern void page_offline_end(void); +#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) +PAGE_MAPCOUNT_OPS(Balloon, BALLOON) /* - * Marks pages in use as page tables. + * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on + * pages allocated with __GFP_ACCOUNT. It gets cleared on page free. */ -PAGE_TYPE_OPS(Table, table) - -/* - * Marks guardpages used with debug_pagealloc. - */ -PAGE_TYPE_OPS(Guard, guard) +#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512) +PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG) extern bool is_free_buddy_page(struct page *page); @@ -803,15 +673,6 @@ static inline int PageSlabPfmemalloc(struct page *page) return PageActive(page); } -/* - * A version of PageSlabPfmemalloc() for opportunistic checks where the page - * might have been freed under us and not be a PageSlab anymore. - */ -static inline int __PageSlabPfmemalloc(struct page *page) -{ - return PageActive(page); -} - static inline void SetPageSlabPfmemalloc(struct page *page) { VM_BUG_ON_PAGE(!PageSlab(page), page); @@ -838,25 +699,25 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) /* * Flags checked when a page is freed. Pages being freed should not have - * these flags set. If they are, there is a problem. + * these flags set. It they are, there is a problem. */ -#define PAGE_FLAGS_CHECK_AT_FREE \ - (1UL << PG_lru | 1UL << PG_locked | \ - 1UL << PG_private | 1UL << PG_private_2 | \ - 1UL << PG_writeback | 1UL << PG_reserved | \ - 1UL << PG_slab | 1UL << PG_active | \ - 1UL << PG_unevictable | __PG_MLOCKED) +#define PAGE_FLAGS_CHECK_AT_FREE \ + (1UL << PG_lru | 1UL << PG_locked | \ + 1UL << PG_private | 1UL << PG_private_2 | \ + 1UL << PG_writeback | 1UL << PG_reserved | \ + 1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \ + 1UL << PG_unevictable | __PG_MLOCKED) /* * Flags checked when a page is prepped for return by the page allocator. - * Pages being prepped should not have these flags set. If they are set, + * Pages being prepped should not have these flags set. It they are set, * there has been a kernel bug or struct page corruption. * * __PG_HWPOISON is exceptional because it needs to be kept beyond page's * alloc-free cycle to prevent from reusing the page. */ #define PAGE_FLAGS_CHECK_AT_PREP \ - (PAGEFLAGS_MASK & ~__PG_HWPOISON) + (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) #define PAGE_FLAGS_PRIVATE \ (1UL << PG_private | 1UL << PG_private_2) @@ -874,10 +735,8 @@ static inline int page_has_private(struct page *page) #undef PF_ANY #undef PF_HEAD -#undef PF_ONLY_HEAD #undef PF_NO_TAIL #undef PF_NO_COMPOUND -#undef PF_SECOND #endif /* !__GENERATING_BOUNDS_H */ #endif /* PAGE_FLAGS_H */ diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 5724580163..047d64706f 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PAGEISOLATION_H #define __LINUX_PAGEISOLATION_H @@ -30,27 +29,33 @@ static inline bool is_migrate_isolate(int migratetype) } #endif -#define MEMORY_OFFLINE 0x1 -#define REPORT_FAILURE 0x2 - -struct page *has_unmovable_pages(struct zone *zone, struct page *page, - int migratetype, int flags); +bool has_unmovable_pages(struct zone *zone, struct page *page, int count, + bool skip_hwpoisoned_pages); void set_pageblock_migratetype(struct page *page, int migratetype); int move_freepages_block(struct zone *zone, struct page *page, - int migratetype, int *num_movable); + int migratetype); +int move_freepages(struct zone *zone, + struct page *start_page, struct page *end_page, + int migratetype); /* * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. + * If specified range includes migrate types other than MOVABLE or CMA, + * this will fail with -EBUSY. + * + * For isolating all pages in the range finally, the caller have to + * free all pages in the range. test_page_isolated() can be used for + * test it. */ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - unsigned migratetype, int flags); + unsigned migratetype, bool skip_hwpoisoned_pages); /* * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. * target range is [start_pfn, end_pfn) */ -void +int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, unsigned migratetype); @@ -58,8 +63,9 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, * Test all pages in [start_pfn, end_pfn) are isolated or not. */ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, - int isol_flags); + bool skip_hwpoisoned_pages); -struct page *alloc_migrate_target(struct page *page, unsigned long private); +struct page *alloc_migrate_target(struct page *page, unsigned long private, + int **resultp); #endif diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 6795913019..7e62920a3a 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGE_COUNTER_H #define _LINUX_PAGE_COUNTER_H @@ -7,33 +6,13 @@ #include struct page_counter { - atomic_long_t usage; - unsigned long min; - unsigned long low; - unsigned long high; - unsigned long max; - - /* effective memory.min and memory.min usage tracking */ - unsigned long emin; - atomic_long_t min_usage; - atomic_long_t children_min_usage; - - /* effective memory.low and memory.low usage tracking */ - unsigned long elow; - atomic_long_t low_usage; - atomic_long_t children_low_usage; + atomic_long_t count; + unsigned long limit; + struct page_counter *parent; /* legacy */ unsigned long watermark; unsigned long failcnt; - - /* - * 'parent' is placed here to be far from 'usage' to reduce - * cache false sharing, as 'usage' is written mostly while - * parent is frequently read for cgroup's hierarchical - * counting nature. - */ - struct page_counter *parent; }; #if BITS_PER_LONG == 32 @@ -45,14 +24,14 @@ struct page_counter { static inline void page_counter_init(struct page_counter *counter, struct page_counter *parent) { - atomic_long_set(&counter->usage, 0); - counter->max = PAGE_COUNTER_MAX; + atomic_long_set(&counter->count, 0); + counter->limit = PAGE_COUNTER_MAX; counter->parent = parent; } static inline unsigned long page_counter_read(struct page_counter *counter) { - return atomic_long_read(&counter->usage); + return atomic_long_read(&counter->count); } void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); @@ -61,16 +40,7 @@ bool page_counter_try_charge(struct page_counter *counter, unsigned long nr_pages, struct page_counter **fail); void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); -void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); -void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); - -static inline void page_counter_set_high(struct page_counter *counter, - unsigned long nr_pages) -{ - WRITE_ONCE(counter->high, nr_pages); -} - -int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); +int page_counter_limit(struct page_counter *counter, unsigned long limit); int page_counter_memparse(const char *buf, const char *max, unsigned long *nr_pages); diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index fabb2e1e08..9298c393dd 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PAGE_EXT_H #define __LINUX_PAGE_EXT_H @@ -16,10 +15,21 @@ struct page_ext_operations { #ifdef CONFIG_PAGE_EXTENSION +/* + * page_ext->flags bits: + * + * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to + * implement generic debug pagealloc feature. The pages are filled with + * poison patterns and set this flag after free_pages(). The poisoned + * pages are verified whether the patterns are not corrupted and clear + * the flag before alloc_pages(). + */ + enum page_ext_flags { + PAGE_EXT_DEBUG_POISON, /* Page is poisoned */ + PAGE_EXT_DEBUG_GUARD, PAGE_EXT_OWNER, - PAGE_EXT_OWNER_ALLOCATED, -#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) +#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) PAGE_EXT_YOUNG, PAGE_EXT_IDLE, #endif @@ -36,7 +46,6 @@ struct page_ext { unsigned long flags; }; -extern unsigned long page_ext_size; extern void pgdat_page_ext_init(struct pglist_data *pgdat); #ifdef CONFIG_SPARSEMEM @@ -44,25 +53,14 @@ static inline void page_ext_init_flatmem(void) { } extern void page_ext_init(void); -static inline void page_ext_init_flatmem_late(void) -{ -} #else extern void page_ext_init_flatmem(void); -extern void page_ext_init_flatmem_late(void); static inline void page_ext_init(void) { } #endif -struct page_ext *lookup_page_ext(const struct page *page); - -static inline struct page_ext *page_ext_next(struct page_ext *curr) -{ - void *next = curr; - next += page_ext_size; - return next; -} +struct page_ext *lookup_page_ext(struct page *page); #else /* !CONFIG_PAGE_EXTENSION */ struct page_ext; @@ -71,7 +69,7 @@ static inline void pgdat_page_ext_init(struct pglist_data *pgdat) { } -static inline struct page_ext *lookup_page_ext(const struct page *page) +static inline struct page_ext *lookup_page_ext(struct page *page) { return NULL; } @@ -80,10 +78,6 @@ static inline void page_ext_init(void) { } -static inline void page_ext_init_flatmem_late(void) -{ -} - static inline void page_ext_init_flatmem(void) { } diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index d8a6aecf99..fec4027133 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MM_PAGE_IDLE_H #define _LINUX_MM_PAGE_IDLE_H @@ -6,7 +5,7 @@ #include #include -#ifdef CONFIG_PAGE_IDLE_FLAG +#ifdef CONFIG_IDLE_PAGE_TRACKING #ifdef CONFIG_64BIT static inline bool page_is_young(struct page *page) @@ -106,7 +105,7 @@ static inline void clear_page_idle(struct page *page) } #endif /* CONFIG_64BIT */ -#else /* !CONFIG_PAGE_IDLE_FLAG */ +#else /* !CONFIG_IDLE_PAGE_TRACKING */ static inline bool page_is_young(struct page *page) { @@ -135,6 +134,6 @@ static inline void clear_page_idle(struct page *page) { } -#endif /* CONFIG_PAGE_IDLE_FLAG */ +#endif /* CONFIG_IDLE_PAGE_TRACKING */ #endif /* _LINUX_MM_PAGE_IDLE_H */ diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 719bfe5108..2be728d156 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PAGE_OWNER_H #define __LINUX_PAGE_OWNER_H @@ -11,10 +10,10 @@ extern struct page_ext_operations page_owner_ops; extern void __reset_page_owner(struct page *page, unsigned int order); extern void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask); -extern void __split_page_owner(struct page *page, unsigned int nr); +extern void __split_page_owner(struct page *page, unsigned int order); extern void __copy_page_owner(struct page *oldpage, struct page *newpage); extern void __set_page_owner_migrate_reason(struct page *page, int reason); -extern void __dump_page_owner(const struct page *page); +extern void __dump_page_owner(struct page *page); extern void pagetypeinfo_showmixedcount_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone); @@ -31,10 +30,10 @@ static inline void set_page_owner(struct page *page, __set_page_owner(page, order, gfp_mask); } -static inline void split_page_owner(struct page *page, unsigned int nr) +static inline void split_page_owner(struct page *page, unsigned int order) { if (static_branch_unlikely(&page_owner_inited)) - __split_page_owner(page, nr); + __split_page_owner(page, order); } static inline void copy_page_owner(struct page *oldpage, struct page *newpage) { @@ -46,7 +45,7 @@ static inline void set_page_owner_migrate_reason(struct page *page, int reason) if (static_branch_unlikely(&page_owner_inited)) __set_page_owner_migrate_reason(page, reason); } -static inline void dump_page_owner(const struct page *page) +static inline void dump_page_owner(struct page *page) { if (static_branch_unlikely(&page_owner_inited)) __dump_page_owner(page); @@ -69,7 +68,7 @@ static inline void copy_page_owner(struct page *oldpage, struct page *newpage) static inline void set_page_owner_migrate_reason(struct page *page, int reason) { } -static inline void dump_page_owner(const struct page *page) +static inline void dump_page_owner(struct page *page) { } #endif /* CONFIG_PAGE_OWNER */ diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index 7ad46f45df..610e132719 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGE_REF_H #define _LINUX_PAGE_REF_H @@ -7,13 +6,13 @@ #include #include -DECLARE_TRACEPOINT(page_ref_set); -DECLARE_TRACEPOINT(page_ref_mod); -DECLARE_TRACEPOINT(page_ref_mod_and_test); -DECLARE_TRACEPOINT(page_ref_mod_and_return); -DECLARE_TRACEPOINT(page_ref_mod_unless); -DECLARE_TRACEPOINT(page_ref_freeze); -DECLARE_TRACEPOINT(page_ref_unfreeze); +extern struct tracepoint __tracepoint_page_ref_set; +extern struct tracepoint __tracepoint_page_ref_mod; +extern struct tracepoint __tracepoint_page_ref_mod_and_test; +extern struct tracepoint __tracepoint_page_ref_mod_and_return; +extern struct tracepoint __tracepoint_page_ref_mod_unless; +extern struct tracepoint __tracepoint_page_ref_freeze; +extern struct tracepoint __tracepoint_page_ref_unfreeze; #ifdef CONFIG_DEBUG_PAGE_REF @@ -24,7 +23,7 @@ DECLARE_TRACEPOINT(page_ref_unfreeze); * * See trace_##name##_enabled(void) in include/linux/tracepoint.h */ -#define page_ref_tracepoint_active(t) tracepoint_enabled(t) +#define page_ref_tracepoint_active(t) static_key_false(&(t).key) extern void __page_ref_set(struct page *page, int v); extern void __page_ref_mod(struct page *page, int v); @@ -62,12 +61,12 @@ static inline void __page_ref_unfreeze(struct page *page, int v) #endif -static inline int page_ref_count(const struct page *page) +static inline int page_ref_count(struct page *page) { return atomic_read(&page->_refcount); } -static inline int page_count(const struct page *page) +static inline int page_count(struct page *page) { return atomic_read(&compound_head(page)->_refcount); } @@ -75,7 +74,7 @@ static inline int page_count(const struct page *page) static inline void set_page_count(struct page *page, int v) { atomic_set(&page->_refcount, v); - if (page_ref_tracepoint_active(page_ref_set)) + if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) __page_ref_set(page, v); } @@ -91,37 +90,28 @@ static inline void init_page_count(struct page *page) static inline void page_ref_add(struct page *page, int nr) { atomic_add(nr, &page->_refcount); - if (page_ref_tracepoint_active(page_ref_mod)) + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, nr); } static inline void page_ref_sub(struct page *page, int nr) { atomic_sub(nr, &page->_refcount); - if (page_ref_tracepoint_active(page_ref_mod)) + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, -nr); } -static inline int page_ref_sub_return(struct page *page, int nr) -{ - int ret = atomic_sub_return(nr, &page->_refcount); - - if (page_ref_tracepoint_active(page_ref_mod_and_return)) - __page_ref_mod_and_return(page, -nr, ret); - return ret; -} - static inline void page_ref_inc(struct page *page) { atomic_inc(&page->_refcount); - if (page_ref_tracepoint_active(page_ref_mod)) + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, 1); } static inline void page_ref_dec(struct page *page) { atomic_dec(&page->_refcount); - if (page_ref_tracepoint_active(page_ref_mod)) + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, -1); } @@ -129,7 +119,7 @@ static inline int page_ref_sub_and_test(struct page *page, int nr) { int ret = atomic_sub_and_test(nr, &page->_refcount); - if (page_ref_tracepoint_active(page_ref_mod_and_test)) + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) __page_ref_mod_and_test(page, -nr, ret); return ret; } @@ -138,7 +128,7 @@ static inline int page_ref_inc_return(struct page *page) { int ret = atomic_inc_return(&page->_refcount); - if (page_ref_tracepoint_active(page_ref_mod_and_return)) + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) __page_ref_mod_and_return(page, 1, ret); return ret; } @@ -147,7 +137,7 @@ static inline int page_ref_dec_and_test(struct page *page) { int ret = atomic_dec_and_test(&page->_refcount); - if (page_ref_tracepoint_active(page_ref_mod_and_test)) + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) __page_ref_mod_and_test(page, -1, ret); return ret; } @@ -156,7 +146,7 @@ static inline int page_ref_dec_return(struct page *page) { int ret = atomic_dec_return(&page->_refcount); - if (page_ref_tracepoint_active(page_ref_mod_and_return)) + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) __page_ref_mod_and_return(page, -1, ret); return ret; } @@ -165,7 +155,7 @@ static inline int page_ref_add_unless(struct page *page, int nr, int u) { int ret = atomic_add_unless(&page->_refcount, nr, u); - if (page_ref_tracepoint_active(page_ref_mod_unless)) + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) __page_ref_mod_unless(page, nr, ret); return ret; } @@ -174,7 +164,7 @@ static inline int page_ref_freeze(struct page *page, int count) { int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); - if (page_ref_tracepoint_active(page_ref_freeze)) + if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) __page_ref_freeze(page, count, ret); return ret; } @@ -184,8 +174,8 @@ static inline void page_ref_unfreeze(struct page *page, int count) VM_BUG_ON_PAGE(page_count(page) != 0, page); VM_BUG_ON(count == 0); - atomic_set_release(&page->_refcount, count); - if (page_ref_tracepoint_active(page_ref_unfreeze)) + atomic_set(&page->_refcount, count); + if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) __page_ref_unfreeze(page, count); } diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 973fd731a5..e942558b35 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -1,8 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Macros for manipulating and testing flags related to a * pageblock_nr_pages number of pages. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * * Copyright (C) IBM Corporation, 2006 * * Original author, Mel Gorman @@ -13,11 +25,10 @@ #include -#define PB_migratetype_bits 3 /* Bit indices that affect a whole block of pages */ enum pageblock_bits { PB_migrate, - PB_migrate_end = PB_migrate + PB_migratetype_bits - 1, + PB_migrate_end = PB_migrate + 3 - 1, /* 3 bits required for migrate types */ PB_migrate_skip,/* If set the block is skipped by compaction */ @@ -54,38 +65,37 @@ extern unsigned int pageblock_order; /* Forward declaration */ struct page; -unsigned long get_pfnblock_flags_mask(const struct page *page, +unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, + unsigned long end_bitidx, unsigned long mask); void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, + unsigned long end_bitidx, unsigned long mask); /* Declarations for getting and setting flags. See mm/page_alloc.c */ +#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \ + get_pfnblock_flags_mask(page, page_to_pfn(page), \ + end_bitidx, \ + (1 << (end_bitidx - start_bitidx + 1)) - 1) +#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \ + set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \ + end_bitidx, \ + (1 << (end_bitidx - start_bitidx + 1)) - 1) + #ifdef CONFIG_COMPACTION #define get_pageblock_skip(page) \ - get_pfnblock_flags_mask(page, page_to_pfn(page), \ - (1 << (PB_migrate_skip))) + get_pageblock_flags_group(page, PB_migrate_skip, \ + PB_migrate_skip) #define clear_pageblock_skip(page) \ - set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \ - (1 << PB_migrate_skip)) + set_pageblock_flags_group(page, 0, PB_migrate_skip, \ + PB_migrate_skip) #define set_pageblock_skip(page) \ - set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \ - page_to_pfn(page), \ - (1 << PB_migrate_skip)) -#else -static inline bool get_pageblock_skip(struct page *page) -{ - return false; -} -static inline void clear_pageblock_skip(struct page *page) -{ -} -static inline void set_pageblock_skip(struct page *page) -{ -} + set_pageblock_flags_group(page, 1, PB_migrate_skip, \ + PB_migrate_skip) #endif /* CONFIG_COMPACTION */ #endif /* PAGEBLOCK_FLAGS_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 62db6b0176..0741609bf4 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGEMAP_H #define _LINUX_PAGEMAP_H @@ -10,19 +9,12 @@ #include #include #include -#include +#include #include #include #include /* for in_interrupt() */ #include -struct pagevec; - -static inline bool mapping_empty(struct address_space *mapping) -{ - return xa_empty(&mapping->i_pages); -} - /* * Bits in mapping->flags. */ @@ -34,40 +26,16 @@ enum mapping_flags { AS_EXITING = 4, /* final truncate in progress */ /* writeback related tags are not used */ AS_NO_WRITEBACK_TAGS = 5, - AS_THP_SUPPORT = 6, /* THPs supported */ }; -/** - * mapping_set_error - record a writeback error in the address_space - * @mapping: the mapping in which an error should be set - * @error: the error to set in the mapping - * - * When writeback fails in some way, we must record that error so that - * userspace can be informed when fsync and the like are called. We endeavor - * to report errors on any file that was open at the time of the error. Some - * internal callers also need to know when writeback errors have occurred. - * - * When a writeback error occurs, most filesystems will want to call - * mapping_set_error to record the error in the mapping so that it can be - * reported when the application calls fsync(2). - */ static inline void mapping_set_error(struct address_space *mapping, int error) { - if (likely(!error)) - return; - - /* Record in wb_err for checkers using errseq_t based tracking */ - __filemap_set_wb_err(mapping, error); - - /* Record it in superblock */ - if (mapping->host) - errseq_set(&mapping->host->i_sb->s_wb_err, error); - - /* Record it in flags for now, for legacy callers */ - if (error == -ENOSPC) - set_bit(AS_ENOSPC, &mapping->flags); - else - set_bit(AS_EIO, &mapping->flags); + if (unlikely(error)) { + if (error == -ENOSPC) + set_bit(AS_ENOSPC, &mapping->flags); + else + set_bit(AS_EIO, &mapping->flags); + } } static inline void mapping_set_unevictable(struct address_space *mapping) @@ -80,9 +48,11 @@ static inline void mapping_clear_unevictable(struct address_space *mapping) clear_bit(AS_UNEVICTABLE, &mapping->flags); } -static inline bool mapping_unevictable(struct address_space *mapping) +static inline int mapping_unevictable(struct address_space *mapping) { - return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); + if (mapping) + return test_bit(AS_UNEVICTABLE, &mapping->flags); + return !!mapping; } static inline void mapping_set_exiting(struct address_space *mapping) @@ -126,51 +96,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) m->gfp_mask = mask; } -static inline bool mapping_thp_support(struct address_space *mapping) -{ - return test_bit(AS_THP_SUPPORT, &mapping->flags); -} - -static inline int filemap_nr_thps(struct address_space *mapping) -{ -#ifdef CONFIG_READ_ONLY_THP_FOR_FS - return atomic_read(&mapping->nr_thps); -#else - return 0; -#endif -} - -static inline void filemap_nr_thps_inc(struct address_space *mapping) -{ -#ifdef CONFIG_READ_ONLY_THP_FOR_FS - if (!mapping_thp_support(mapping)) - atomic_inc(&mapping->nr_thps); -#else - WARN_ON_ONCE(1); -#endif -} - -static inline void filemap_nr_thps_dec(struct address_space *mapping) -{ -#ifdef CONFIG_READ_ONLY_THP_FOR_FS - if (!mapping_thp_support(mapping)) - atomic_dec(&mapping->nr_thps); -#else - WARN_ON_ONCE(1); -#endif -} - -void release_pages(struct page **pages, int nr); - -/* - * For file cache pages, return the address_space, otherwise return NULL - */ -static inline struct address_space *page_mapping_file(struct page *page) -{ - if (unlikely(PageSwapCache(page))) - return NULL; - return page_mapping(page); -} +void release_pages(struct page **pages, int nr, bool cold); /* * speculatively take a reference to a page. @@ -196,7 +122,7 @@ static inline struct address_space *page_mapping_file(struct page *page) * 3. check the page is still in pagecache (if no, goto 1) * * Remove-side that cares about stability of _refcount (eg. reclaim) has the - * following (with the i_pages lock held): + * following (with tree_lock held for write): * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) * B. remove page from pagecache * C. free the page @@ -209,18 +135,20 @@ static inline struct address_space *page_mapping_file(struct page *page) * * It is possible that between 1 and 2, the page is removed then the exact same * page is inserted into the same position in pagecache. That's OK: the - * old find_get_page using a lock could equally have run before or after + * old find_get_page using tree_lock could equally have run before or after * such a re-insertion, depending on order that locks are granted. * * Lookups racing against pagecache insertion isn't a big problem: either 1 * will find the page or it will not. Likewise, the old find_get_page could run * either before the insertion or afterwards, depending on timing. */ -static inline int __page_cache_add_speculative(struct page *page, int count) +static inline int page_cache_get_speculative(struct page *page) { + VM_BUG_ON(in_interrupt()); + #ifdef CONFIG_TINY_RCU # ifdef CONFIG_PREEMPT_COUNT - VM_BUG_ON(!in_atomic() && !irqs_disabled()); + VM_BUG_ON(!in_atomic()); # endif /* * Preempt must be disabled here - we rely on rcu_read_lock doing @@ -232,10 +160,10 @@ static inline int __page_cache_add_speculative(struct page *page, int count) * SMP requires. */ VM_BUG_ON_PAGE(page_count(page) == 0, page); - page_ref_add(page, count); + page_ref_inc(page); #else - if (unlikely(!page_ref_add_unless(page, count, 0))) { + if (unlikely(!get_page_unless_zero(page))) { /* * Either the page has been freed, or will be freed. * In either case, retry here and the caller should @@ -249,51 +177,27 @@ static inline int __page_cache_add_speculative(struct page *page, int count) return 1; } -static inline int page_cache_get_speculative(struct page *page) -{ - return __page_cache_add_speculative(page, 1); -} - +/* + * Same as above, but add instead of inc (could just be merged) + */ static inline int page_cache_add_speculative(struct page *page, int count) { - return __page_cache_add_speculative(page, count); -} + VM_BUG_ON(in_interrupt()); -/** - * attach_page_private - Attach private data to a page. - * @page: Page to attach data to. - * @data: Data to attach to page. - * - * Attaching private data to a page increments the page's reference count. - * The data must be detached before the page will be freed. - */ -static inline void attach_page_private(struct page *page, void *data) -{ - get_page(page); - set_page_private(page, (unsigned long)data); - SetPagePrivate(page); -} +#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) +# ifdef CONFIG_PREEMPT_COUNT + VM_BUG_ON(!in_atomic()); +# endif + VM_BUG_ON_PAGE(page_count(page) == 0, page); + page_ref_add(page, count); -/** - * detach_page_private - Detach private data from a page. - * @page: Page to detach data from. - * - * Removes the data that was previously attached to the page and decrements - * the refcount on the page. - * - * Return: Data that was attached to the page. - */ -static inline void *detach_page_private(struct page *page) -{ - void *data = (void *)page_private(page); +#else + if (unlikely(!page_ref_add_unless(page, count, 0))) + return 0; +#endif + VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); - if (!PagePrivate(page)) - return NULL; - ClearPagePrivate(page); - set_page_private(page, 0); - put_page(page); - - return data; + return 1; } #ifdef CONFIG_NUMA @@ -310,16 +214,22 @@ static inline struct page *page_cache_alloc(struct address_space *x) return __page_cache_alloc(mapping_gfp_mask(x)); } -static inline gfp_t readahead_gfp_mask(struct address_space *x) +static inline struct page *page_cache_alloc_cold(struct address_space *x) { - return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; + return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); } -typedef int filler_t(void *, struct page *); +static inline gfp_t readahead_gfp_mask(struct address_space *x) +{ + return mapping_gfp_mask(x) | + __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; +} -pgoff_t page_cache_next_miss(struct address_space *mapping, +typedef int filler_t(struct file *, struct page *); + +pgoff_t page_cache_next_hole(struct address_space *mapping, pgoff_t index, unsigned long max_scan); -pgoff_t page_cache_prev_miss(struct address_space *mapping, +pgoff_t page_cache_prev_hole(struct address_space *mapping, pgoff_t index, unsigned long max_scan); #define FGP_ACCESSED 0x00000001 @@ -328,9 +238,6 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping, #define FGP_WRITE 0x00000008 #define FGP_NOFS 0x00000010 #define FGP_NOWAIT 0x00000020 -#define FGP_FOR_MMAP 0x00000040 -#define FGP_HEAD 0x00000080 -#define FGP_ENTRY 0x00000100 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, int fgp_flags, gfp_t cache_gfp_mask); @@ -359,40 +266,22 @@ static inline struct page *find_get_page_flags(struct address_space *mapping, /** * find_lock_page - locate, pin and lock a pagecache page + * pagecache_get_page - find and get a page reference * @mapping: the address_space to search - * @index: the page index + * @offset: the page index * - * Looks up the page cache entry at @mapping & @index. If there is a + * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned locked and with an increased * refcount. * - * Context: May sleep. - * Return: A struct page or %NULL if there is no page in the cache for this - * index. + * Otherwise, %NULL is returned. + * + * find_lock_page() may sleep. */ static inline struct page *find_lock_page(struct address_space *mapping, - pgoff_t index) + pgoff_t offset) { - return pagecache_get_page(mapping, index, FGP_LOCK, 0); -} - -/** - * find_lock_head - Locate, pin and lock a pagecache page. - * @mapping: The address_space to search. - * @index: The page index. - * - * Looks up the page cache entry at @mapping & @index. If there is a - * page cache page, its head page is returned locked and with an increased - * refcount. - * - * Context: May sleep. - * Return: A struct page which is !PageTail, or %NULL if there is no page - * in the cache for this index. - */ -static inline struct page *find_lock_head(struct address_space *mapping, - pgoff_t index) -{ - return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0); + return pagecache_get_page(mapping, offset, FGP_LOCK, 0); } /** @@ -415,9 +304,9 @@ static inline struct page *find_lock_head(struct address_space *mapping, * atomic allocation! */ static inline struct page *find_or_create_page(struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask) + pgoff_t offset, gfp_t gfp_mask) { - return pagecache_get_page(mapping, index, + return pagecache_get_page(mapping, offset, FGP_LOCK|FGP_ACCESSED|FGP_CREAT, gfp_mask); } @@ -443,52 +332,20 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, mapping_gfp_mask(mapping)); } -/* Does this page contain this index? */ -static inline bool thp_contains(struct page *head, pgoff_t index) -{ - /* HugeTLBfs indexes the page cache in units of hpage_size */ - if (PageHuge(head)) - return head->index == index; - return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL)); -} - -/* - * Given the page we found in the page cache, return the page corresponding - * to this index in the file - */ -static inline struct page *find_subpage(struct page *head, pgoff_t index) -{ - /* HugeTLBfs wants the head page regardless */ - if (PageHuge(head)) - return head; - - return head + (index & (thp_nr_pages(head) - 1)); -} - +struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); +struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); unsigned find_get_entries(struct address_space *mapping, pgoff_t start, - pgoff_t end, struct pagevec *pvec, pgoff_t *indices); -unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, - pgoff_t end, unsigned int nr_pages, - struct page **pages); -static inline unsigned find_get_pages(struct address_space *mapping, - pgoff_t *start, unsigned int nr_pages, - struct page **pages) -{ - return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, - pages); -} + unsigned int nr_entries, struct page **entries, + pgoff_t *indices); +unsigned find_get_pages(struct address_space *mapping, pgoff_t start, + unsigned int nr_pages, struct page **pages); unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); -unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, - pgoff_t end, xa_mark_t tag, unsigned int nr_pages, - struct page **pages); -static inline unsigned find_get_pages_tag(struct address_space *mapping, - pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, - struct page **pages) -{ - return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, - nr_pages, pages); -} +unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, + int tag, unsigned int nr_pages, struct page **pages); +unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, + int tag, unsigned int nr_entries, + struct page **entries, pgoff_t *indices); struct page *grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index, unsigned flags); @@ -512,38 +369,39 @@ extern int read_cache_pages(struct address_space *mapping, static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, void *data) { - return read_cache_page(mapping, index, NULL, data); + filler_t *filler = mapping->a_ops->readpage; + return read_cache_page(mapping, index, filler, data); } /* - * Get index of the page within radix-tree (but not for hugetlb pages). + * Get index of the page with in radix-tree * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) */ static inline pgoff_t page_to_index(struct page *page) { - struct page *head; + pgoff_t pgoff; if (likely(!PageTransTail(page))) return page->index; - head = compound_head(page); /* * We don't initialize ->index for tail pages: calculate based on * head page */ - return head->index + page - head; + pgoff = compound_head(page)->index; + pgoff += page - compound_head(page); + return pgoff; } -extern pgoff_t hugetlb_basepage_index(struct page *page); - /* - * Get the offset in PAGE_SIZE (even for hugetlb pages). - * (TODO: hugetlb pages should have ->index in PAGE_SIZE) + * Get the offset in PAGE_SIZE. + * (TODO: hugepage should have ->index in PAGE_SIZE) */ static inline pgoff_t page_to_pgoff(struct page *page) { - if (unlikely(PageHuge(page))) - return hugetlb_basepage_index(page); + if (unlikely(PageHeadHuge(page))) + return page->index << compound_order(page); + return page_to_index(page); } @@ -574,41 +432,12 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, return pgoff; } -struct wait_page_key { - struct page *page; - int bit_nr; - int page_match; -}; - -struct wait_page_queue { - struct page *page; - int bit_nr; - wait_queue_entry_t wait; -}; - -static inline bool wake_page_match(struct wait_page_queue *wait_page, - struct wait_page_key *key) -{ - if (wait_page->page != key->page) - return false; - key->page_match = 1; - - if (wait_page->bit_nr != key->bit_nr) - return false; - - return true; -} - extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); -extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page); -/* - * Return true if the page was successfully locked - */ static inline int trylock_page(struct page *page) { page = compound_head(page); @@ -638,27 +467,11 @@ static inline int lock_page_killable(struct page *page) return 0; } -/* - * lock_page_async - Lock the page, unless this would block. If the page - * is already locked, then queue a callback when the page becomes unlocked. - * This callback can then retry the operation. - * - * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page - * was already locked and the callback defined in 'wait' was queued. - */ -static inline int lock_page_async(struct page *page, - struct wait_page_queue *wait) -{ - if (!trylock_page(page)) - return __lock_page_async(page, wait); - return 0; -} - /* * lock_page_or_retry - Lock the page, unless this would block and the * caller indicated that it can handle a retry. * - * Return value and mmap_lock implications depend on flags; see + * Return value and mmap_sem implications depend on flags; see * __lock_page_or_retry(). */ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, @@ -669,11 +482,27 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, } /* - * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., - * and should not be used directly. + * This is exported only for wait_on_page_locked/wait_on_page_writeback, + * and for filesystems which need to wait on PG_private. */ extern void wait_on_page_bit(struct page *page, int bit_nr); + extern int wait_on_page_bit_killable(struct page *page, int bit_nr); +extern int wait_on_page_bit_killable_timeout(struct page *page, + int bit_nr, unsigned long timeout); + +static inline int wait_on_page_locked_killable(struct page *page) +{ + if (!PageLocked(page)) + return 0; + return wait_on_page_bit_killable(compound_head(page), PG_locked); +} + +extern wait_queue_head_t *page_waitqueue(struct page *page); +static inline void wake_up_page(struct page *page, int bit) +{ + __wake_up_bit(page_waitqueue(page), &page->flags, bit); +} /* * Wait for a page to be unlocked. @@ -688,54 +517,29 @@ static inline void wait_on_page_locked(struct page *page) wait_on_page_bit(compound_head(page), PG_locked); } -static inline int wait_on_page_locked_killable(struct page *page) +/* + * Wait for a page to complete writeback + */ +static inline void wait_on_page_writeback(struct page *page) { - if (!PageLocked(page)) - return 0; - return wait_on_page_bit_killable(compound_head(page), PG_locked); + if (PageWriteback(page)) + wait_on_page_bit(page, PG_writeback); } -int put_and_wait_on_page_locked(struct page *page, int state); -void wait_on_page_writeback(struct page *page); -int wait_on_page_writeback_killable(struct page *page); extern void end_page_writeback(struct page *page); void wait_for_stable_page(struct page *page); -void __set_page_dirty(struct page *, struct address_space *, int warn); -int __set_page_dirty_nobuffers(struct page *page); -int __set_page_dirty_no_writeback(struct page *page); - void page_endio(struct page *page, bool is_write, int err); -/** - * set_page_private_2 - Set PG_private_2 on a page and take a ref - * @page: The page. - * - * Set the PG_private_2 flag on a page and take the reference needed for the VM - * to handle its lifetime correctly. This sets the flag and takes the - * reference unconditionally, so care must be taken not to set the flag again - * if it's already set. - */ -static inline void set_page_private_2(struct page *page) -{ - page = compound_head(page); - get_page(page); - SetPagePrivate2(page); -} - -void end_page_private_2(struct page *page); -void wait_on_page_private_2(struct page *page); -int wait_on_page_private_2_killable(struct page *page); - /* * Add an arbitrary waiter to a page's wait queue */ -extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); +extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); /* * Fault everything in given userspace address range in. */ -static inline int fault_in_pages_writeable(char __user *uaddr, size_t size) +static inline int fault_in_pages_writeable(char __user *uaddr, int size) { char __user *end = uaddr + size - 1; @@ -762,7 +566,7 @@ static inline int fault_in_pages_writeable(char __user *uaddr, size_t size) return 0; } -static inline int fault_in_pages_readable(const char __user *uaddr, size_t size) +static inline int fault_in_pages_readable(const char __user *uaddr, int size) { volatile char c; const char __user *end = uaddr + size - 1; @@ -795,11 +599,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern void delete_from_page_cache(struct page *page); extern void __delete_from_page_cache(struct page *page, void *shadow); -void replace_page_cache_page(struct page *old, struct page *new); -void delete_from_page_cache_batch(struct address_space *mapping, - struct pagevec *pvec); -loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, - int whence); +int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); /* * Like add_to_page_cache_locked, but used to add newly allocated pages: @@ -817,270 +617,10 @@ static inline int add_to_page_cache(struct page *page, return error; } -/** - * struct readahead_control - Describes a readahead request. - * - * A readahead request is for consecutive pages. Filesystems which - * implement the ->readahead method should call readahead_page() or - * readahead_page_batch() in a loop and attempt to start I/O against - * each page in the request. - * - * Most of the fields in this struct are private and should be accessed - * by the functions below. - * - * @file: The file, used primarily by network filesystems for authentication. - * May be NULL if invoked internally by the filesystem. - * @mapping: Readahead this filesystem object. - * @ra: File readahead state. May be NULL. - */ -struct readahead_control { - struct file *file; - struct address_space *mapping; - struct file_ra_state *ra; -/* private: use the readahead_* accessors instead */ - pgoff_t _index; - unsigned int _nr_pages; - unsigned int _batch_count; -}; - -#define DEFINE_READAHEAD(ractl, f, r, m, i) \ - struct readahead_control ractl = { \ - .file = f, \ - .mapping = m, \ - .ra = r, \ - ._index = i, \ - } - -#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) - -void page_cache_ra_unbounded(struct readahead_control *, - unsigned long nr_to_read, unsigned long lookahead_count); -void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); -void page_cache_async_ra(struct readahead_control *, struct page *, - unsigned long req_count); -void readahead_expand(struct readahead_control *ractl, - loff_t new_start, size_t new_len); - -/** - * page_cache_sync_readahead - generic file readahead - * @mapping: address_space which holds the pagecache and I/O vectors - * @ra: file_ra_state which holds the readahead state - * @file: Used by the filesystem for authentication. - * @index: Index of first page to be read. - * @req_count: Total number of pages being read by the caller. - * - * page_cache_sync_readahead() should be called when a cache miss happened: - * it will submit the read. The readahead logic may decide to piggyback more - * pages onto the read request if access patterns suggest it will improve - * performance. - */ -static inline -void page_cache_sync_readahead(struct address_space *mapping, - struct file_ra_state *ra, struct file *file, pgoff_t index, - unsigned long req_count) -{ - DEFINE_READAHEAD(ractl, file, ra, mapping, index); - page_cache_sync_ra(&ractl, req_count); -} - -/** - * page_cache_async_readahead - file readahead for marked pages - * @mapping: address_space which holds the pagecache and I/O vectors - * @ra: file_ra_state which holds the readahead state - * @file: Used by the filesystem for authentication. - * @page: The page at @index which triggered the readahead call. - * @index: Index of first page to be read. - * @req_count: Total number of pages being read by the caller. - * - * page_cache_async_readahead() should be called when a page is used which - * is marked as PageReadahead; this is a marker to suggest that the application - * has used up enough of the readahead window that we should start pulling in - * more pages. - */ -static inline -void page_cache_async_readahead(struct address_space *mapping, - struct file_ra_state *ra, struct file *file, - struct page *page, pgoff_t index, unsigned long req_count) -{ - DEFINE_READAHEAD(ractl, file, ra, mapping, index); - page_cache_async_ra(&ractl, page, req_count); -} - -/** - * readahead_page - Get the next page to read. - * @rac: The current readahead request. - * - * Context: The page is locked and has an elevated refcount. The caller - * should decreases the refcount once the page has been submitted for I/O - * and unlock the page once all I/O to that page has completed. - * Return: A pointer to the next page, or %NULL if we are done. - */ -static inline struct page *readahead_page(struct readahead_control *rac) -{ - struct page *page; - - BUG_ON(rac->_batch_count > rac->_nr_pages); - rac->_nr_pages -= rac->_batch_count; - rac->_index += rac->_batch_count; - - if (!rac->_nr_pages) { - rac->_batch_count = 0; - return NULL; - } - - page = xa_load(&rac->mapping->i_pages, rac->_index); - VM_BUG_ON_PAGE(!PageLocked(page), page); - rac->_batch_count = thp_nr_pages(page); - - return page; -} - -static inline unsigned int __readahead_batch(struct readahead_control *rac, - struct page **array, unsigned int array_sz) -{ - unsigned int i = 0; - XA_STATE(xas, &rac->mapping->i_pages, 0); - struct page *page; - - BUG_ON(rac->_batch_count > rac->_nr_pages); - rac->_nr_pages -= rac->_batch_count; - rac->_index += rac->_batch_count; - rac->_batch_count = 0; - - xas_set(&xas, rac->_index); - rcu_read_lock(); - xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { - if (xas_retry(&xas, page)) - continue; - VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(PageTail(page), page); - array[i++] = page; - rac->_batch_count += thp_nr_pages(page); - - /* - * The page cache isn't using multi-index entries yet, - * so the xas cursor needs to be manually moved to the - * next index. This can be removed once the page cache - * is converted. - */ - if (PageHead(page)) - xas_set(&xas, rac->_index + rac->_batch_count); - - if (i == array_sz) - break; - } - rcu_read_unlock(); - - return i; -} - -/** - * readahead_page_batch - Get a batch of pages to read. - * @rac: The current readahead request. - * @array: An array of pointers to struct page. - * - * Context: The pages are locked and have an elevated refcount. The caller - * should decreases the refcount once the page has been submitted for I/O - * and unlock the page once all I/O to that page has completed. - * Return: The number of pages placed in the array. 0 indicates the request - * is complete. - */ -#define readahead_page_batch(rac, array) \ - __readahead_batch(rac, array, ARRAY_SIZE(array)) - -/** - * readahead_pos - The byte offset into the file of this readahead request. - * @rac: The readahead request. - */ -static inline loff_t readahead_pos(struct readahead_control *rac) -{ - return (loff_t)rac->_index * PAGE_SIZE; -} - -/** - * readahead_length - The number of bytes in this readahead request. - * @rac: The readahead request. - */ -static inline size_t readahead_length(struct readahead_control *rac) -{ - return rac->_nr_pages * PAGE_SIZE; -} - -/** - * readahead_index - The index of the first page in this readahead request. - * @rac: The readahead request. - */ -static inline pgoff_t readahead_index(struct readahead_control *rac) -{ - return rac->_index; -} - -/** - * readahead_count - The number of pages in this readahead request. - * @rac: The readahead request. - */ -static inline unsigned int readahead_count(struct readahead_control *rac) -{ - return rac->_nr_pages; -} - -/** - * readahead_batch_length - The number of bytes in the current batch. - * @rac: The readahead request. - */ -static inline size_t readahead_batch_length(struct readahead_control *rac) -{ - return rac->_batch_count * PAGE_SIZE; -} - static inline unsigned long dir_pages(struct inode *inode) { return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; } -/** - * page_mkwrite_check_truncate - check if page was truncated - * @page: the page to check - * @inode: the inode to check the page against - * - * Returns the number of bytes in the page up to EOF, - * or -EFAULT if the page was truncated. - */ -static inline int page_mkwrite_check_truncate(struct page *page, - struct inode *inode) -{ - loff_t size = i_size_read(inode); - pgoff_t index = size >> PAGE_SHIFT; - int offset = offset_in_page(size); - - if (page->mapping != inode->i_mapping) - return -EFAULT; - - /* page is wholly inside EOF */ - if (page->index < index) - return PAGE_SIZE; - /* page is wholly past EOF */ - if (page->index > index || !offset) - return -EFAULT; - /* page is partially inside EOF */ - return offset; -} - -/** - * i_blocks_per_page - How many blocks fit in this page. - * @inode: The inode which contains the blocks. - * @page: The page (head page if the page is a THP). - * - * If the block size is larger than the size of this page, return zero. - * - * Context: The caller should hold a refcount on the page to prevent it - * from being split. - * Return: The number of filesystem blocks covered by this page. - */ -static inline -unsigned int i_blocks_per_page(struct inode *inode, struct page *page) -{ - return thp_size(page) >> inode->i_blkbits; -} #endif /* _LINUX_PAGEMAP_H */ diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 7f3f19065a..b45d391b45 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/pagevec.h * @@ -9,46 +8,35 @@ #ifndef _LINUX_PAGEVEC_H #define _LINUX_PAGEVEC_H -#include - -/* 15 pointers + header align the pagevec structure to a power of two */ -#define PAGEVEC_SIZE 15 +/* 14 pointers + two long's align the pagevec structure to a power of two */ +#define PAGEVEC_SIZE 14 struct page; struct address_space; struct pagevec { - unsigned char nr; - bool percpu_pvec_drained; + unsigned long nr; + unsigned long cold; struct page *pages[PAGEVEC_SIZE]; }; void __pagevec_release(struct pagevec *pvec); void __pagevec_lru_add(struct pagevec *pvec); +unsigned pagevec_lookup_entries(struct pagevec *pvec, + struct address_space *mapping, + pgoff_t start, unsigned nr_entries, + pgoff_t *indices); void pagevec_remove_exceptionals(struct pagevec *pvec); -unsigned pagevec_lookup_range(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t *start, pgoff_t end); -static inline unsigned pagevec_lookup(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t *start) -{ - return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); -} +unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, + pgoff_t start, unsigned nr_pages); +unsigned pagevec_lookup_tag(struct pagevec *pvec, + struct address_space *mapping, pgoff_t *index, int tag, + unsigned nr_pages); -unsigned pagevec_lookup_range_tag(struct pagevec *pvec, - struct address_space *mapping, pgoff_t *index, pgoff_t end, - xa_mark_t tag); -static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, - struct address_space *mapping, pgoff_t *index, xa_mark_t tag) -{ - return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); -} - -static inline void pagevec_init(struct pagevec *pvec) +static inline void pagevec_init(struct pagevec *pvec, int cold) { pvec->nr = 0; - pvec->percpu_pvec_drained = false; + pvec->cold = cold; } static inline void pagevec_reinit(struct pagevec *pvec) diff --git a/include/linux/parport.h b/include/linux/parport.h index 1c16ffb8b9..58e3c64c6b 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -225,7 +225,6 @@ struct parport { struct pardevice *waittail; struct list_head list; - struct timer_list timer; unsigned int flags; void *sysctl_table; @@ -297,54 +296,13 @@ int __must_check __parport_register_driver(struct parport_driver *, * parport_register_driver must be a macro so that KBUILD_MODNAME can * be expanded */ - -/** - * parport_register_driver - register a parallel port device driver - * @driver: structure describing the driver - * - * This can be called by a parallel port device driver in order - * to receive notifications about ports being found in the - * system, as well as ports no longer available. - * - * If devmodel is true then the new device model is used - * for registration. - * - * The @driver structure is allocated by the caller and must not be - * deallocated until after calling parport_unregister_driver(). - * - * If using the non device model: - * The driver's attach() function may block. The port that - * attach() is given will be valid for the duration of the - * callback, but if the driver wants to take a copy of the - * pointer it must call parport_get_port() to do so. Calling - * parport_register_device() on that port will do this for you. - * - * The driver's detach() function may block. The port that - * detach() is given will be valid for the duration of the - * callback, but if the driver wants to take a copy of the - * pointer it must call parport_get_port() to do so. - * - * - * Returns 0 on success. The non device model will always succeeds. - * but the new device model can fail and will return the error code. - **/ #define parport_register_driver(driver) \ __parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) /* Unregister a high-level driver. */ +extern void parport_unregister_driver (struct parport_driver *); void parport_unregister_driver(struct parport_driver *); -/** - * module_parport_driver() - Helper macro for registering a modular parport driver - * @__parport_driver: struct parport_driver to be used - * - * Helper macro for parport drivers which do not do anything special in module - * init and exit. This eliminates a lot of boilerplate. Each module may only - * use this macro once, and calling it replaces module_init() and module_exit(). - */ -#define module_parport_driver(__parport_driver) \ - module_driver(__parport_driver, parport_register_driver, parport_unregister_driver) - /* If parport_register_driver doesn't fit your needs, perhaps * parport_find_xxx does. */ extern struct parport *parport_find_number (int); @@ -366,10 +324,18 @@ struct pardev_cb { unsigned int flags; }; -/* - * parport_register_dev_model declares that a device is connected to a - * port, and tells the kernel all it needs to know. - */ +/* parport_register_device declares that a device is connected to a + port, and tells the kernel all it needs to know. + - pf is the preemption function (may be NULL for no callback) + - kf is the wake-up function (may be NULL for no callback) + - irq_func is the interrupt handler (may be NULL for no interrupts) + - handle is a user pointer that gets handed to callback functions. */ +struct pardevice *parport_register_device(struct parport *port, + const char *name, + int (*pf)(void *), void (*kf)(void *), + void (*irq_func)(void *), + int flags, void *handle); + struct pardevice * parport_register_dev_model(struct parport *port, const char *name, const struct pardev_cb *par_dev_cb, int cnt); @@ -493,7 +459,6 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *, void *, size_t, int); /* IEEE1284.3 functions */ -#define daisy_dev_name "Device ID probe" extern int parport_daisy_init (struct parport *port); extern void parport_daisy_fini (struct parport *port); extern struct pardevice *parport_open (int devnum, const char *name); diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h index 3d6fc576d6..cc1767f5cc 100644 --- a/include/linux/parport_pc.h +++ b/include/linux/parport_pc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PARPORT_PC_H #define __LINUX_PARPORT_PC_H diff --git a/include/linux/parser.h b/include/linux/parser.h index dd79f45a37..39d5b7955b 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/parser.h * @@ -7,8 +6,7 @@ * but could potentially be used anywhere else that simple option=arg * parsing is required. */ -#ifndef _LINUX_PARSER_H -#define _LINUX_PARSER_H + /* associates an integer enumerator with a pattern string. */ struct match_token { @@ -29,12 +27,8 @@ typedef struct { int match_token(char *, const match_table_t table, substring_t args[]); int match_int(substring_t *, int *result); -int match_uint(substring_t *s, unsigned int *result); -int match_u64(substring_t *, u64 *result); int match_octal(substring_t *, int *result); int match_hex(substring_t *, int *result); bool match_wildcard(const char *pattern, const char *str); size_t match_strlcpy(char *, const substring_t *, size_t); char *match_strdup(const substring_t *); - -#endif /* _LINUX_PARSER_H */ diff --git a/include/linux/patchkey.h b/include/linux/patchkey.h index f581defb2d..97a919fc99 100644 --- a/include/linux/patchkey.h +++ b/include/linux/patchkey.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * -- definition of _PATCHKEY macro * diff --git a/include/linux/path.h b/include/linux/path.h index 475225a03d..be0c176b20 100644 --- a/include/linux/path.h +++ b/include/linux/path.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PATH_H #define _LINUX_PATH_H +#include + struct dentry; struct vfsmount; @@ -18,10 +19,4 @@ static inline int path_equal(const struct path *path1, const struct path *path2) return path1->mnt == path2->mnt && path1->dentry == path2->dentry; } -static inline void path_put_init(struct path *path) -{ - path_put(path); - *path = (struct path) { }; -} - #endif /* _LINUX_PATH_H */ diff --git a/include/linux/pch_dma.h b/include/linux/pch_dma.h index d5a6a4b6b4..fdafe529ef 100644 --- a/include/linux/pch_dma.h +++ b/include/linux/pch_dma.h @@ -1,6 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef PCH_DMA_H diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index f16de399d2..7d63a66e8e 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * File pci-acpi.h * @@ -25,9 +24,7 @@ static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) } extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); -struct pci_ecam_ops; -extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres, - const struct pci_ecam_ops **ecam_ops); +extern phys_addr_t pci_mcfg_lookup(u16 domain, struct resource *bus_res); static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) { @@ -106,28 +103,20 @@ static inline void acpiphp_remove_slots(struct pci_bus *bus) { } static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { } #endif -extern const guid_t pci_acpi_dsm_guid; - -/* _DSM Definitions for PCI */ -#define DSM_PCI_PRESERVE_BOOT_CONFIG 0x05 -#define DSM_PCI_DEVICE_NAME 0x07 -#define DSM_PCI_POWER_ON_RESET_DELAY 0x08 -#define DSM_PCI_DEVICE_READINESS_DURATIONS 0x09 - -#ifdef CONFIG_PCIE_EDR -void pci_acpi_add_edr_notifier(struct pci_dev *pdev); -void pci_acpi_remove_edr_notifier(struct pci_dev *pdev); -#else -static inline void pci_acpi_add_edr_notifier(struct pci_dev *pdev) { } -static inline void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) { } -#endif /* CONFIG_PCIE_EDR */ - -int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *)); -void pci_acpi_clear_companion_lookup_hook(void); +extern const u8 pci_acpi_dsm_uuid[]; +#define DEVICE_LABEL_DSM 0x07 +#define RESET_DELAY_DSM 0x08 +#define FUNCTION_DELAY_DSM 0x09 #else /* CONFIG_ACPI */ static inline void acpi_pci_add_bus(struct pci_bus *bus) { } static inline void acpi_pci_remove_bus(struct pci_bus *bus) { } #endif /* CONFIG_ACPI */ +#ifdef CONFIG_ACPI_APEI +extern bool aer_acpi_firmware_first(void); +#else +static inline bool aer_acpi_firmware_first(void) { return false; } +#endif + #endif /* _PCI_ACPI_H_ */ diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h new file mode 100644 index 0000000000..207c561fb4 --- /dev/null +++ b/include/linux/pci-aspm.h @@ -0,0 +1,65 @@ +/* + * aspm.h + * + * PCI Express ASPM defines and function prototypes + * + * Copyright (C) 2007 Intel Corp. + * Zhang Yanmin (yanmin.zhang@intel.com) + * Shaohua Li (shaohua.li@intel.com) + * + * For more information, please consult the following manuals (look at + * http://www.pcisig.com/ for how to get them): + * + * PCI Express Specification + */ + +#ifndef LINUX_ASPM_H +#define LINUX_ASPM_H + +#include + +#define PCIE_LINK_STATE_L0S 1 +#define PCIE_LINK_STATE_L1 2 +#define PCIE_LINK_STATE_CLKPM 4 + +#ifdef CONFIG_PCIEASPM +void pcie_aspm_init_link_state(struct pci_dev *pdev); +void pcie_aspm_exit_link_state(struct pci_dev *pdev); +void pcie_aspm_pm_state_change(struct pci_dev *pdev); +void pcie_aspm_powersave_config_link(struct pci_dev *pdev); +void pci_disable_link_state(struct pci_dev *pdev, int state); +void pci_disable_link_state_locked(struct pci_dev *pdev, int state); +void pcie_no_aspm(void); +#else +static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) +{ +} +static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) +{ +} +static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) +{ +} +static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) +{ +} +static inline void pci_disable_link_state(struct pci_dev *pdev, int state) +{ +} +static inline void pcie_no_aspm(void) +{ +} +#endif + +#ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */ +void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev); +void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev); +#else +static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) +{ +} +static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) +{ +} +#endif +#endif /* LINUX_ASPM_H */ diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index df54cd5b15..57e0b82509 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h @@ -1,52 +1,61 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_PCI_ATS_H #define LINUX_PCI_ATS_H #include -#ifdef CONFIG_PCI_ATS -/* Address Translation Service */ -bool pci_ats_supported(struct pci_dev *dev); -int pci_enable_ats(struct pci_dev *dev, int ps); -void pci_disable_ats(struct pci_dev *dev); -int pci_ats_queue_depth(struct pci_dev *dev); -int pci_ats_page_aligned(struct pci_dev *dev); -#else /* CONFIG_PCI_ATS */ -static inline bool pci_ats_supported(struct pci_dev *d) -{ return false; } -static inline int pci_enable_ats(struct pci_dev *d, int ps) -{ return -ENODEV; } -static inline void pci_disable_ats(struct pci_dev *d) { } -static inline int pci_ats_queue_depth(struct pci_dev *d) -{ return -ENODEV; } -static inline int pci_ats_page_aligned(struct pci_dev *dev) -{ return 0; } -#endif /* CONFIG_PCI_ATS */ - #ifdef CONFIG_PCI_PRI + int pci_enable_pri(struct pci_dev *pdev, u32 reqs); void pci_disable_pri(struct pci_dev *pdev); int pci_reset_pri(struct pci_dev *pdev); -int pci_prg_resp_pasid_required(struct pci_dev *pdev); -bool pci_pri_supported(struct pci_dev *pdev); -#else -static inline bool pci_pri_supported(struct pci_dev *pdev) -{ return false; } + +#else /* CONFIG_PCI_PRI */ + +static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs) +{ + return -ENODEV; +} + +static inline void pci_disable_pri(struct pci_dev *pdev) +{ +} + +static inline int pci_reset_pri(struct pci_dev *pdev) +{ + return -ENODEV; +} + #endif /* CONFIG_PCI_PRI */ #ifdef CONFIG_PCI_PASID + int pci_enable_pasid(struct pci_dev *pdev, int features); void pci_disable_pasid(struct pci_dev *pdev); int pci_pasid_features(struct pci_dev *pdev); int pci_max_pasids(struct pci_dev *pdev); -#else /* CONFIG_PCI_PASID */ + +#else /* CONFIG_PCI_PASID */ + static inline int pci_enable_pasid(struct pci_dev *pdev, int features) -{ return -EINVAL; } -static inline void pci_disable_pasid(struct pci_dev *pdev) { } +{ + return -EINVAL; +} + +static inline void pci_disable_pasid(struct pci_dev *pdev) +{ +} + static inline int pci_pasid_features(struct pci_dev *pdev) -{ return -EINVAL; } +{ + return -EINVAL; +} + static inline int pci_max_pasids(struct pci_dev *pdev) -{ return -EINVAL; } +{ + return -EINVAL; +} + #endif /* CONFIG_PCI_PASID */ -#endif /* LINUX_PCI_ATS_H */ + +#endif /* LINUX_PCI_ATS_H*/ diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h index 249d4d7fbf..39726caef5 100644 --- a/include/linux/pci-dma-compat.h +++ b/include/linux/pci-dma-compat.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* include this file if the platform implements the dma_ DMA Mapping API * and wants to provide the pci_ DMA Mapping API in terms of it */ @@ -8,99 +7,100 @@ #include /* This defines the direction arg to the DMA mapping routines. */ -#define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL -#define PCI_DMA_TODEVICE DMA_TO_DEVICE -#define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE -#define PCI_DMA_NONE DMA_NONE +#define PCI_DMA_BIDIRECTIONAL 0 +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#define PCI_DMA_NONE 3 static inline void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { - return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); + return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); } static inline void * pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { - return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); + return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, + size, dma_handle, GFP_ATOMIC); } static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { - dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle); + dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle); } static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) { - return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction); + return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction); } static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) { - dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); + dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); } static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, unsigned long offset, size_t size, int direction) { - return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction); + return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction); } static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, size_t size, int direction) { - dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction); + dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction); } static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) { - return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); + return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); } static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) { - dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); + dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); } static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) { - dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); + dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); } static inline void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) { - dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); + dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); } static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) { - dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); + dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); } static inline void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) { - dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); + dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); } static inline int @@ -119,11 +119,29 @@ static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) { return dma_set_coherent_mask(&dev->dev, mask); } + +static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, + unsigned int size) +{ + return dma_set_max_seg_size(&dev->dev, size); +} + +static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, + unsigned long mask) +{ + return dma_set_seg_boundary(&dev->dev, mask); +} #else static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; } static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; } +static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, + unsigned int size) +{ return -EIO; } +static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, + unsigned long mask) +{ return -EIO; } #endif #endif diff --git a/include/linux/pci-dma.h b/include/linux/pci-dma.h new file mode 100644 index 0000000000..549a041f9c --- /dev/null +++ b/include/linux/pci-dma.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_PCI_DMA_H +#define _LINUX_PCI_DMA_H + +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) DEFINE_DMA_UNMAP_ADDR(ADDR_NAME); +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) DEFINE_DMA_UNMAP_LEN(LEN_NAME); +#define pci_unmap_addr dma_unmap_addr +#define pci_unmap_addr_set dma_unmap_addr_set +#define pci_unmap_len dma_unmap_len +#define pci_unmap_len_set dma_unmap_len_set + +#endif diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index adea5a4771..7adad206b1 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -1,41 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2016 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation (the "GPL"). + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 (GPLv2) for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 (GPLv2) along with this source code. */ #ifndef DRIVERS_PCI_ECAM_H #define DRIVERS_PCI_ECAM_H -#include #include #include -/* - * Memory address shift values for the byte-level address that - * can be used when accessing the PCI Express Configuration Space. - */ - -/* - * Enhanced Configuration Access Mechanism (ECAM) - * - * See PCI Express Base Specification, Revision 5.0, Version 1.0, - * Section 7.2.2, Table 7-1, p. 677. - */ -#define PCIE_ECAM_BUS_SHIFT 20 /* Bus number */ -#define PCIE_ECAM_DEVFN_SHIFT 12 /* Device and Function number */ - -#define PCIE_ECAM_BUS_MASK 0xff -#define PCIE_ECAM_DEVFN_MASK 0xff -#define PCIE_ECAM_REG_MASK 0xfff /* Limit offset to a maximum of 4K */ - -#define PCIE_ECAM_BUS(x) (((x) & PCIE_ECAM_BUS_MASK) << PCIE_ECAM_BUS_SHIFT) -#define PCIE_ECAM_DEVFN(x) (((x) & PCIE_ECAM_DEVFN_MASK) << PCIE_ECAM_DEVFN_SHIFT) -#define PCIE_ECAM_REG(x) ((x) & PCIE_ECAM_REG_MASK) - -#define PCIE_ECAM_OFFSET(bus, devfn, where) \ - (PCIE_ECAM_BUS(bus) | \ - PCIE_ECAM_DEVFN(devfn) | \ - PCIE_ECAM_REG(where)) - /* * struct to hold pci ops and bus shift of the config window * for a PCI controller. @@ -55,9 +38,8 @@ struct pci_ecam_ops { struct pci_config_window { struct resource res; struct resource busr; - unsigned int bus_shift; void *priv; - const struct pci_ecam_ops *ops; + struct pci_ecam_ops *ops; union { void __iomem *win; /* 64-bit single mapping */ void __iomem **winp; /* 32-bit per-bus mapping */ @@ -68,30 +50,18 @@ struct pci_config_window { /* create and free pci_config_window */ struct pci_config_window *pci_ecam_create(struct device *dev, struct resource *cfgres, struct resource *busr, - const struct pci_ecam_ops *ops); + struct pci_ecam_ops *ops); void pci_ecam_free(struct pci_config_window *cfg); /* map_bus when ->sysdata is an instance of pci_config_window */ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, int where); /* default ECAM ops */ -extern const struct pci_ecam_ops pci_generic_ecam_ops; +extern struct pci_ecam_ops pci_generic_ecam_ops; -#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) -extern const struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */ -extern const struct pci_ecam_ops pci_32b_read_ops; /* 32-bit read only */ -extern const struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */ -extern const struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */ -extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ -extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ -extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ -extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ -extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ -#endif - -#if IS_ENABLED(CONFIG_PCI_HOST_COMMON) +#ifdef CONFIG_PCI_HOST_GENERIC /* for DT-based PCI controllers that support ECAM */ -int pci_host_common_probe(struct platform_device *pdev); -int pci_host_common_remove(struct platform_device *pdev); +int pci_host_common_probe(struct platform_device *pdev, + struct pci_ecam_ops *ops); #endif #endif diff --git a/include/linux/pci.h b/include/linux/pci.h index cd8aa6fce2..a38772a855 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * pci.h * @@ -6,18 +5,12 @@ * Copyright 1994, Drew Eckhardt * Copyright 1997--1999 Martin Mares * - * PCI Express ASPM defines and function prototypes - * Copyright (c) 2007 Intel Corp. - * Zhang Yanmin (yanmin.zhang@intel.com) - * Shaohua Li (shaohua.li@intel.com) - * * For more information, please consult the following manuals (look at * http://www.pcisig.com/ for how to get them): * * PCI BIOS Specification * PCI Local Bus Specification * PCI to PCI Bridge Specification - * PCI Express Specification * PCI System Design Guide */ #ifndef LINUX_PCI_H @@ -35,26 +28,12 @@ #include #include #include -#include #include #include #include #include -#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ - PCI_STATUS_SIG_SYSTEM_ERROR | \ - PCI_STATUS_REC_MASTER_ABORT | \ - PCI_STATUS_REC_TARGET_ABORT | \ - PCI_STATUS_SIG_TARGET_ABORT | \ - PCI_STATUS_PARITY) - -/* Number of reset methods used in pci_reset_fn_methods array in pci.c */ -#define PCI_NUM_RESET_METHODS 7 - -#define PCI_RESET_PROBE true -#define PCI_RESET_DO_RESET false - /* * The PCI interface treats multi-function devices as independent * devices. The slot/function address of each device is encoded @@ -67,17 +46,17 @@ * In the interest of not exposing interfaces to user-space unnecessarily, * the following kernel-only defines are being added here. */ -#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) /* pci_slot represents a physical slot */ struct pci_slot { - struct pci_bus *bus; /* Bus this slot is on */ - struct list_head list; /* Node in list of slots */ - struct hotplug_slot *hotplug; /* Hotplug info (move here) */ - unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ - struct kobject kobj; + struct pci_bus *bus; /* The bus this slot is on */ + struct list_head list; /* node in list of slots on this bus */ + struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */ + unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ + struct kobject kobj; }; static inline const char *pci_slot_name(const struct pci_slot *slot) @@ -91,69 +70,37 @@ enum pci_mmap_state { pci_mmap_mem }; -/* For PCI devices, the region numbers are assigned this way: */ +/* + * For PCI devices, the region numbers are assigned this way: + */ enum { /* #0-5: standard PCI resources */ PCI_STD_RESOURCES, - PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1, + PCI_STD_RESOURCE_END = 5, /* #6: expansion ROM resource */ PCI_ROM_RESOURCE, - /* Device-specific resources */ + /* device specific resources */ #ifdef CONFIG_PCI_IOV PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, #endif -/* PCI-to-PCI (P2P) bridge windows */ -#define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0) -#define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1) -#define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2) - -/* CardBus bridge windows */ -#define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0) -#define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1) -#define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2) -#define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3) - -/* Total number of bridge resources for P2P and CardBus */ + /* resources assigned to buses behind the bridge */ #define PCI_BRIDGE_RESOURCE_NUM 4 - /* Resources assigned to buses behind the bridge */ PCI_BRIDGE_RESOURCES, PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + PCI_BRIDGE_RESOURCE_NUM - 1, - /* Total resources associated with a PCI device */ + /* total resources associated with a PCI device */ PCI_NUM_RESOURCES, - /* Preserve this for compatibility */ + /* preserve this for compatibility */ DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, }; -/** - * enum pci_interrupt_pin - PCI INTx interrupt values - * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt - * @PCI_INTERRUPT_INTA: PCI INTA pin - * @PCI_INTERRUPT_INTB: PCI INTB pin - * @PCI_INTERRUPT_INTC: PCI INTC pin - * @PCI_INTERRUPT_INTD: PCI INTD pin - * - * Corresponds to values for legacy PCI INTx interrupts, as can be found in the - * PCI_INTERRUPT_PIN register. - */ -enum pci_interrupt_pin { - PCI_INTERRUPT_UNKNOWN, - PCI_INTERRUPT_INTA, - PCI_INTERRUPT_INTB, - PCI_INTERRUPT_INTC, - PCI_INTERRUPT_INTD, -}; - -/* The number of legacy PCI INTx interrupts */ -#define PCI_NUM_INTX 4 - /* * pci_power_t values must match the bits in the Capabilities PME_Support * and Control/Status PowerState fields in the Power Management capability. @@ -176,16 +123,18 @@ static inline const char *pci_power_name(pci_power_t state) return pci_power_names[1 + (__force int) state]; } -/** - * typedef pci_channel_state_t - * - * The pci_channel state describes connectivity between the CPU and - * the PCI device. If some PCI bus between here and the PCI device - * has crashed or locked up, this info is reflected here. +#define PCI_PM_D2_DELAY 200 +#define PCI_PM_D3_WAIT 10 +#define PCI_PM_D3COLD_WAIT 100 +#define PCI_PM_BUS_WAIT 50 + +/** The pci_channel state describes connectivity between the CPU and + * the pci device. If some PCI bus between here and the pci device + * has crashed or locked up, this info is reflected here. */ typedef unsigned int __bitwise pci_channel_state_t; -enum { +enum pci_channel_state { /* I/O channel is in normal state */ pci_channel_io_normal = (__force pci_channel_state_t) 1, @@ -211,7 +160,9 @@ enum pcie_reset_state { typedef unsigned short __bitwise pci_dev_flags_t; enum pci_dev_flags { - /* INTX_DISABLE in PCI_COMMAND register disables MSI too */ + /* INTX_DISABLE in PCI_COMMAND register disables MSI + * generation too. + */ PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0), /* Device configuration is irrevocably lost if disabled into D3 */ PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1), @@ -227,12 +178,6 @@ enum pci_dev_flags { PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), /* Get VPD from function 0 VPD */ PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), - /* A non-root bridge where translation occurs, stop alias search here */ - PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), - /* Do not use FLR even if device advertises PCI_AF_CAP */ - PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), - /* Don't use Relaxed Ordering for TLPs directed at this device */ - PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11), }; enum pci_irq_reroute_variant { @@ -245,23 +190,22 @@ enum pci_bus_flags { PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4, - PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8, }; -/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */ +/* These values come from the PCI Express Spec */ enum pcie_link_width { PCIE_LNK_WIDTH_RESRV = 0x00, PCIE_LNK_X1 = 0x01, PCIE_LNK_X2 = 0x02, PCIE_LNK_X4 = 0x04, PCIE_LNK_X8 = 0x08, - PCIE_LNK_X12 = 0x0c, + PCIE_LNK_X12 = 0x0C, PCIE_LNK_X16 = 0x10, PCIE_LNK_X32 = 0x20, - PCIE_LNK_WIDTH_UNKNOWN = 0xff, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, }; -/* See matching string table in pci_speed_string() */ +/* Based on the PCI Hotplug Spec, but some values are made up by us */ enum pci_bus_speed { PCI_SPEED_33MHz = 0x00, PCI_SPEED_66MHz = 0x01, @@ -285,38 +229,39 @@ enum pci_bus_speed { PCIE_SPEED_2_5GT = 0x14, PCIE_SPEED_5_0GT = 0x15, PCIE_SPEED_8_0GT = 0x16, - PCIE_SPEED_16_0GT = 0x17, - PCIE_SPEED_32_0GT = 0x18, - PCIE_SPEED_64_0GT = 0x19, PCI_SPEED_UNKNOWN = 0xff, }; -enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); -enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); - -struct pci_vpd { - struct mutex lock; - unsigned int len; - u8 cap; +struct pci_cap_saved_data { + u16 cap_nr; + bool cap_extended; + unsigned int size; + u32 data[0]; +}; + +struct pci_cap_saved_state { + struct hlist_node next; + struct pci_cap_saved_data cap; }; -struct irq_affinity; struct pcie_link_state; +struct pci_vpd; struct pci_sriov; -struct pci_p2pdma; -struct rcec_ea; +struct pci_ats; -/* The pci_dev structure describes PCI devices */ +/* + * The pci_dev structure is used to describe PCI devices. + */ struct pci_dev { - struct list_head bus_list; /* Node in per-bus list */ - struct pci_bus *bus; /* Bus this device is on */ - struct pci_bus *subordinate; /* Bus this device bridges to */ + struct list_head bus_list; /* node in per-bus list */ + struct pci_bus *bus; /* bus this device is on */ + struct pci_bus *subordinate; /* bus this device bridges to */ - void *sysdata; /* Hook for sys-specific extension */ - struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */ + void *sysdata; /* hook for sys-specific extension */ + struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ struct pci_slot *slot; /* Physical slot this device is in */ - unsigned int devfn; /* Encoded device & function index */ + unsigned int devfn; /* encoded device & function index */ unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; @@ -326,23 +271,17 @@ struct pci_dev { u8 hdr_type; /* PCI header type (`multi' flag masked out) */ #ifdef CONFIG_PCIEAER u16 aer_cap; /* AER capability offset */ - struct aer_stats *aer_stats; /* AER stats for this device */ #endif -#ifdef CONFIG_PCIEPORTBUS - struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */ - struct pci_dev *rcec; /* Associated RCEC device */ -#endif - u32 devcap; /* PCIe Device Capabilities */ u8 pcie_cap; /* PCIe capability offset */ u8 msi_cap; /* MSI capability offset */ u8 msix_cap; /* MSI-X capability offset */ u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */ - u8 rom_base_reg; /* Config register controlling ROM */ - u8 pin; /* Interrupt pin this device uses */ - u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */ - unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */ + u8 rom_base_reg; /* which config register controls the ROM */ + u8 pin; /* which interrupt pin this device uses */ + u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ + unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */ - struct pci_driver *driver; /* Driver bound to this device */ + struct pci_driver *driver; /* which driver has allocated this device */ u64 dma_mask; /* Mask of the bits of bus address this device implements. Normally this is 0xffffffff. You only need to change @@ -351,13 +290,13 @@ struct pci_dev { struct device_dma_parameters dma_parms; - pci_power_t current_state; /* Current operating state. In ACPI, - this is D0-D3, D0 being fully - functional, and D3 being off. */ - unsigned int imm_ready:1; /* Supports Immediate Readiness */ + pci_power_t current_state; /* Current operating state. In ACPI-speak, + this is D0-D3, D0 being fully functional, + and D3 being off. */ u8 pm_cap; /* PM capability offset */ unsigned int pme_support:5; /* Bitmask of states from which PME# can be generated */ + unsigned int pme_interrupt:1; unsigned int pme_poll:1; /* Poll device's PME status bit */ unsigned int d1_support:1; /* Low power state D1 is supported */ unsigned int d2_support:1; /* Low power state D2 is supported */ @@ -365,106 +304,77 @@ struct pci_dev { unsigned int no_d3cold:1; /* D3cold is forbidden */ unsigned int bridge_d3:1; /* Allow D3 for bridge */ unsigned int d3cold_allowed:1; /* D3cold is allowed by user */ - unsigned int mmio_always_on:1; /* Disallow turning off io/mem - decoding during BAR sizing */ + unsigned int mmio_always_on:1; /* disallow turning off io/mem + decoding during bar sizing */ unsigned int wakeup_prepared:1; - unsigned int runtime_d3cold:1; /* Whether go through runtime + unsigned int runtime_d3cold:1; /* whether go through runtime D3cold, not set for devices powered on/off by the corresponding bridge */ - unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */ unsigned int ignore_hotplug:1; /* Ignore hotplug events */ unsigned int hotplug_user_indicators:1; /* SlotCtl indicators controlled exclusively by user sysfs */ - unsigned int clear_retrain_link:1; /* Need to clear Retrain Link - bit manually */ - unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */ + unsigned int d3_delay; /* D3->D0 transition time in ms */ unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ #ifdef CONFIG_PCIEASPM struct pcie_link_state *link_state; /* ASPM link state */ - unsigned int ltr_path:1; /* Latency Tolerance Reporting - supported from root to here */ - u16 l1ss; /* L1SS Capability pointer */ #endif - unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */ - unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */ - pci_channel_state_t error_state; /* Current connectivity state */ - struct device dev; /* Generic device interface */ + pci_channel_state_t error_state; /* current connectivity state */ + struct device dev; /* Generic device interface */ - int cfg_size; /* Size of config space */ + int cfg_size; /* Size of configuration space */ /* * Instead of touching interrupt line and base address registers * directly, use the values stored here. They might be different! */ unsigned int irq; + struct cpumask *irq_affinity; struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ - bool match_driver; /* Skip attaching driver */ - - unsigned int transparent:1; /* Subtractive decode bridge */ - unsigned int io_window:1; /* Bridge has I/O window */ - unsigned int pref_window:1; /* Bridge has pref mem window */ - unsigned int pref_64_window:1; /* Pref mem window is 64-bit */ - unsigned int multifunction:1; /* Multi-function device */ - - unsigned int is_busmaster:1; /* Is busmaster */ - unsigned int no_msi:1; /* May not use MSI */ - unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ - unsigned int block_cfg_access:1; /* Config space access blocked */ - unsigned int broken_parity_status:1; /* Generates false positive parity */ - unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */ + bool match_driver; /* Skip attaching driver */ + /* These fields are used by common fixups */ + unsigned int transparent:1; /* Subtractive decode PCI bridge */ + unsigned int multifunction:1;/* Part of multi-function device */ + /* keep track of device state */ + unsigned int is_added:1; + unsigned int is_busmaster:1; /* device is busmaster */ + unsigned int no_msi:1; /* device may not use msi */ + unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ + unsigned int block_cfg_access:1; /* config space access is blocked */ + unsigned int broken_parity_status:1; /* Device generates false positive parity */ + unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ unsigned int msi_enabled:1; unsigned int msix_enabled:1; - unsigned int ari_enabled:1; /* ARI forwarding */ - unsigned int ats_enabled:1; /* Address Translation Svc */ - unsigned int pasid_enabled:1; /* Process Address Space ID */ - unsigned int pri_enabled:1; /* Page Request Interface */ + unsigned int ari_enabled:1; /* ARI forwarding */ + unsigned int ats_enabled:1; /* Address Translation Service */ unsigned int is_managed:1; - unsigned int needs_freset:1; /* Requires fundamental reset */ + unsigned int needs_freset:1; /* Dev requires fundamental reset */ unsigned int state_saved:1; unsigned int is_physfn:1; unsigned int is_virtfn:1; - unsigned int is_hotplug_bridge:1; - unsigned int shpc_managed:1; /* SHPC owned by shpchp */ - unsigned int is_thunderbolt:1; /* Thunderbolt controller */ - /* - * Devices marked being untrusted are the ones that can potentially - * execute DMA attacks and similar. They are typically connected - * through external ports such as Thunderbolt but not limited to - * that. When an IOMMU is enabled they should be getting full - * mappings to make sure they cannot access arbitrary memory. - */ - unsigned int untrusted:1; - /* - * Info from the platform, e.g., ACPI or device tree, may mark a - * device as "external-facing". An external-facing device is - * itself internal but devices downstream from it are external. - */ - unsigned int external_facing:1; - unsigned int broken_intx_masking:1; /* INTx masking can't be used */ - unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ + unsigned int reset_fn:1; + unsigned int is_hotplug_bridge:1; + unsigned int __aer_firmware_first_valid:1; + unsigned int __aer_firmware_first:1; + unsigned int broken_intx_masking:1; + unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ unsigned int irq_managed:1; - unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ - unsigned int is_probed:1; /* Device probing in progress */ - unsigned int link_active_reporting:1;/* Device capable of reporting link active */ - unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ - unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ + unsigned int has_secondary_link:1; + unsigned int non_compliant_bars:1; /* broken BARs; ignore them */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ - u32 saved_config_space[16]; /* Config space saved at suspend time */ + u32 saved_config_space[16]; /* config space saved at suspend time */ struct hlist_head saved_cap_space; - int rom_attr_enabled; /* Display of ROM attribute enabled? */ + struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ + int rom_attr_enabled; /* has display of the rom attribute been enabled? */ struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ -#ifdef CONFIG_HOTPLUG_PCI_PCIE - unsigned int broken_cmd_compl:1; /* No compl for some cmds */ -#endif #ifdef CONFIG_PCIE_PTM unsigned int ptm_root:1; unsigned int ptm_enabled:1; @@ -473,41 +383,19 @@ struct pci_dev { #ifdef CONFIG_PCI_MSI const struct attribute_group **msi_irq_groups; #endif - struct pci_vpd vpd; -#ifdef CONFIG_PCIE_DPC - u16 dpc_cap; - unsigned int dpc_rp_extensions:1; - u8 dpc_rp_log_size; -#endif + struct pci_vpd *vpd; #ifdef CONFIG_PCI_ATS union { - struct pci_sriov *sriov; /* PF: SR-IOV info */ - struct pci_dev *physfn; /* VF: related PF */ + struct pci_sriov *sriov; /* SR-IOV capability related */ + struct pci_dev *physfn; /* the PF this VF is associated with */ }; u16 ats_cap; /* ATS Capability offset */ u8 ats_stu; /* ATS Smallest Translation Unit */ + atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */ #endif -#ifdef CONFIG_PCI_PRI - u16 pri_cap; /* PRI Capability offset */ - u32 pri_reqs_alloc; /* Number of PRI requests allocated */ - unsigned int pasid_required:1; /* PRG Response PASID Required */ -#endif -#ifdef CONFIG_PCI_PASID - u16 pasid_cap; /* PASID Capability offset */ - u16 pasid_features; -#endif -#ifdef CONFIG_PCI_P2PDMA - struct pci_p2pdma __rcu *p2pdma; -#endif - u16 acs_cap; /* ACS Capability offset */ - phys_addr_t rom; /* Physical address if not from BAR */ - size_t romlen; /* Length if not from BAR */ - char *driver_override; /* Driver name to force a match */ - - unsigned long priv_flags; /* Private flags for the PCI driver */ - - /* These methods index pci_reset_fn_methods[] */ - u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */ + phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ + size_t romlen; /* Length of ROM if it's not from the BAR */ + char *driver_override; /* Driver name to force a match */ }; static inline struct pci_dev *pci_physfn(struct pci_dev *dev) @@ -529,72 +417,28 @@ static inline int pci_channel_offline(struct pci_dev *pdev) return (pdev->error_state != pci_channel_io_normal); } -/* - * Currently in ACPI spec, for each PCI host bridge, PCI Segment - * Group number is limited to a 16-bit value, therefore (int)-1 is - * not a valid PCI domain number, and can be used as a sentinel - * value indicating ->domain_nr is not set by the driver (and - * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with - * pci_bus_find_domain_nr()). - */ -#define PCI_DOMAIN_NR_NOT_SET (-1) - struct pci_host_bridge { - struct device dev; - struct pci_bus *bus; /* Root bus */ - struct pci_ops *ops; - struct pci_ops *child_ops; - void *sysdata; - int busnr; - int domain_nr; + struct device dev; + struct pci_bus *bus; /* root bus */ struct list_head windows; /* resource_entry */ - struct list_head dma_ranges; /* dma ranges resource list */ - u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ - int (*map_irq)(const struct pci_dev *, u8, u8); void (*release_fn)(struct pci_host_bridge *); - void *release_data; - unsigned int ignore_reset_delay:1; /* For entire hierarchy */ - unsigned int no_ext_tags:1; /* No Extended Tags */ - unsigned int native_aer:1; /* OS may use PCIe AER */ - unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */ - unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */ - unsigned int native_pme:1; /* OS may use PCIe PME */ - unsigned int native_ltr:1; /* OS may use PCIe LTR */ - unsigned int native_dpc:1; /* OS may use PCIe DPC */ - unsigned int preserve_config:1; /* Preserve FW resource setup */ - unsigned int size_windows:1; /* Enable root bus sizing */ - unsigned int msi_domain:1; /* Bridge wants MSI domain */ - + void *release_data; + unsigned int ignore_reset_delay:1; /* for entire hierarchy */ /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, const struct resource *res, resource_size_t start, resource_size_t size, resource_size_t align); - unsigned long private[] ____cacheline_aligned; }; #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) -static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge) -{ - return (void *)bridge->private; -} - -static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv) -{ - return container_of(priv, struct pci_host_bridge, private); -} - -struct pci_host_bridge *pci_alloc_host_bridge(size_t priv); -struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, - size_t priv); -void pci_free_host_bridge(struct pci_host_bridge *bridge); struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); void pci_set_host_bridge_release(struct pci_host_bridge *bridge, - void (*release_fn)(struct pci_host_bridge *), - void *release_data); + void (*release_fn)(struct pci_host_bridge *), + void *release_data); int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); @@ -614,31 +458,32 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); #define PCI_SUBTRACTIVE_DECODE 0x1 struct pci_bus_resource { - struct list_head list; - struct resource *res; - unsigned int flags; + struct list_head list; + struct resource *res; + unsigned int flags; }; #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ struct pci_bus { - struct list_head node; /* Node in list of buses */ - struct pci_bus *parent; /* Parent bus this bridge is on */ - struct list_head children; /* List of child buses */ - struct list_head devices; /* List of devices on this bus */ - struct pci_dev *self; /* Bridge device as seen by parent */ - struct list_head slots; /* List of slots on this bus; + struct list_head node; /* node in list of buses */ + struct pci_bus *parent; /* parent bus this bridge is on */ + struct list_head children; /* list of child buses */ + struct list_head devices; /* list of devices on this bus */ + struct pci_dev *self; /* bridge device as seen by parent */ + struct list_head slots; /* list of slots on this bus; protected by pci_slot_mutex */ struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; - struct list_head resources; /* Address space routed to this bus */ - struct resource busn_res; /* Bus numbers routed to this bus */ + struct list_head resources; /* address space routed to this bus */ + struct resource busn_res; /* bus numbers routed to this bus */ - struct pci_ops *ops; /* Configuration access functions */ - void *sysdata; /* Hook for sys-specific extension */ - struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */ + struct pci_ops *ops; /* configuration access functions */ + struct msi_controller *msi; /* MSI controller */ + void *sysdata; /* hook for sys-specific extension */ + struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ - unsigned char number; /* Bus number */ - unsigned char primary; /* Number of primary bridge */ + unsigned char number; /* bus number */ + unsigned char primary; /* number of primary bridge */ unsigned char max_bus_speed; /* enum pci_bus_speed */ unsigned char cur_bus_speed; /* enum pci_bus_speed */ #ifdef CONFIG_PCI_DOMAINS_GENERIC @@ -647,22 +492,17 @@ struct pci_bus { char name[48]; - unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */ - pci_bus_flags_t bus_flags; /* Inherited by child buses */ + unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ + pci_bus_flags_t bus_flags; /* inherited by child buses */ struct device *bridge; struct device dev; - struct bin_attribute *legacy_io; /* Legacy I/O for this bus */ - struct bin_attribute *legacy_mem; /* Legacy mem */ + struct bin_attribute *legacy_io; /* legacy I/O for this bus */ + struct bin_attribute *legacy_mem; /* legacy mem */ unsigned int is_added:1; }; #define to_pci_bus(n) container_of(n, struct pci_bus, dev) -static inline u16 pci_dev_id(struct pci_dev *dev) -{ - return PCI_DEVID(dev->bus->number, dev->devfn); -} - /* * Returns true if the PCI bus is root (behind host-PCI bridge), * false otherwise @@ -689,10 +529,6 @@ static inline bool pci_is_bridge(struct pci_dev *dev) dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; } -#define for_each_pci_bridge(dev, bus) \ - list_for_each_entry(dev, &bus->devices, bus_list) \ - if (!pci_is_bridge(dev)) {} else - static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) { dev = pci_physfn(dev); @@ -702,6 +538,9 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) return dev->bus->self; } +struct device *pci_get_host_bridge_device(struct pci_dev *dev); +void pci_put_host_bridge_device(struct device *dev); + #ifdef CONFIG_PCI_MSI static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { @@ -711,7 +550,9 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } #endif -/* Error values that may be returned by PCI functions */ +/* + * Error values that may be returned by PCI functions. + */ #define PCIBIOS_SUCCESSFUL 0x00 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 #define PCIBIOS_BAD_VENDOR_ID 0x83 @@ -720,7 +561,9 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; #define PCIBIOS_SET_FAILED 0x88 #define PCIBIOS_BUFFER_TOO_SMALL 0x89 -/* Translate above to generic errno for passing back through non-PCI code */ +/* + * Translate above to generic errno for passing back through non-PCI code. + */ static inline int pcibios_err_to_errno(int err) { if (err <= PCIBIOS_SUCCESSFUL) @@ -763,20 +606,20 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 val); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT typedef u64 pci_bus_addr_t; #else typedef u32 pci_bus_addr_t; #endif struct pci_bus_region { - pci_bus_addr_t start; - pci_bus_addr_t end; + pci_bus_addr_t start; + pci_bus_addr_t end; }; struct pci_dynids { - spinlock_t lock; /* Protects list, index */ - struct list_head list; /* For IDs added at runtime */ + spinlock_t lock; /* protects list, index */ + struct list_head list; /* for IDs added at runtime */ }; @@ -790,13 +633,13 @@ struct pci_dynids { typedef unsigned int __bitwise pci_ers_result_t; enum pci_ers_result { - /* No result/none/not supported in device driver */ + /* no result/none/not supported in device driver */ PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, /* Device driver can recover without slot reset */ PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, - /* Device driver wants slot to be reset */ + /* Device driver wants slot to be reset. */ PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, /* Device has completely failed, is unrecoverable */ @@ -813,17 +656,19 @@ enum pci_ers_result { struct pci_error_handlers { /* PCI bus error detected on this device */ pci_ers_result_t (*error_detected)(struct pci_dev *dev, - pci_channel_state_t error); + enum pci_channel_state error); /* MMIO has been re-enabled, but not DMA */ pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); + /* PCI Express link has been reset */ + pci_ers_result_t (*link_reset)(struct pci_dev *dev); + /* PCI slot has been reset */ pci_ers_result_t (*slot_reset)(struct pci_dev *dev); /* PCI function reset prepare or completed */ - void (*reset_prepare)(struct pci_dev *dev); - void (*reset_done)(struct pci_dev *dev); + void (*reset_notify)(struct pci_dev *dev, bool prepare); /* Device driver may resume normal operations */ void (*resume)(struct pci_dev *dev); @@ -831,79 +676,27 @@ struct pci_error_handlers { struct module; - -/** - * struct pci_driver - PCI driver structure - * @node: List of driver structures. - * @name: Driver name. - * @id_table: Pointer to table of device IDs the driver is - * interested in. Most drivers should export this - * table using MODULE_DEVICE_TABLE(pci,...). - * @probe: This probing function gets called (during execution - * of pci_register_driver() for already existing - * devices or later if a new device gets inserted) for - * all PCI devices which match the ID table and are not - * "owned" by the other drivers yet. This function gets - * passed a "struct pci_dev \*" for each device whose - * entry in the ID table matches the device. The probe - * function returns zero when the driver chooses to - * take "ownership" of the device or an error code - * (negative number) otherwise. - * The probe function always gets called from process - * context, so it can sleep. - * @remove: The remove() function gets called whenever a device - * being handled by this driver is removed (either during - * deregistration of the driver or when it's manually - * pulled out of a hot-pluggable slot). - * The remove function always gets called from process - * context, so it can sleep. - * @suspend: Put device into low power state. - * @resume: Wake device from low power state. - * (Please see Documentation/power/pci.rst for descriptions - * of PCI Power Management and the related functions.) - * @shutdown: Hook into reboot_notifier_list (kernel/sys.c). - * Intended to stop any idling DMA operations. - * Useful for enabling wake-on-lan (NIC) or changing - * the power state of a device before reboot. - * e.g. drivers/net/e100.c. - * @sriov_configure: Optional driver callback to allow configuration of - * number of VFs to enable via sysfs "sriov_numvfs" file. - * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X - * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count". - * This will change MSI-X Table Size in the VF Message Control - * registers. - * @sriov_get_vf_total_msix: PF driver callback to get the total number of - * MSI-X vectors available for distribution to the VFs. - * @err_handler: See Documentation/PCI/pci-error-recovery.rst - * @groups: Sysfs attribute groups. - * @dev_groups: Attributes attached to the device that will be - * created once it is bound to the driver. - * @driver: Driver model structure. - * @dynids: List of dynamically added device IDs. - */ struct pci_driver { - struct list_head node; - const char *name; - const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */ - int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ - void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ - int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */ - int (*resume)(struct pci_dev *dev); /* Device woken up */ - void (*shutdown)(struct pci_dev *dev); - int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */ - int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */ - u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf); + struct list_head node; + const char *name; + const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ + int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ + void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ + int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ + int (*suspend_late) (struct pci_dev *dev, pm_message_t state); + int (*resume_early) (struct pci_dev *dev); + int (*resume) (struct pci_dev *dev); /* Device woken up */ + void (*shutdown) (struct pci_dev *dev); + int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */ const struct pci_error_handlers *err_handler; - const struct attribute_group **groups; - const struct attribute_group **dev_groups; struct device_driver driver; - struct pci_dynids dynids; + struct pci_dynids dynids; }; #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) /** - * PCI_DEVICE - macro used to describe a specific PCI device + * PCI_DEVICE - macro used to describe a specific pci device * @vend: the 16 bit PCI Vendor ID * @dev: the 16 bit PCI Device ID * @@ -916,36 +709,7 @@ struct pci_driver { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID /** - * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with - * override_only flags. - * @vend: the 16 bit PCI Vendor ID - * @dev: the 16 bit PCI Device ID - * @driver_override: the 32 bit PCI Device override_only - * - * This macro is used to create a struct pci_device_id that matches only a - * driver_override device. The subvendor and subdevice fields will be set to - * PCI_ANY_ID. - */ -#define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \ - .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \ - .subdevice = PCI_ANY_ID, .override_only = (driver_override) - -/** - * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO - * "driver_override" PCI device. - * @vend: the 16 bit PCI Vendor ID - * @dev: the 16 bit PCI Device ID - * - * This macro is used to create a struct pci_device_id that matches a - * specific device. The subvendor and subdevice fields will be set to - * PCI_ANY_ID and the driver_override will be set to - * PCI_ID_F_VFIO_DRIVER_OVERRIDE. - */ -#define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \ - PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE) - -/** - * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem + * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem * @vend: the 16 bit PCI Vendor ID * @dev: the 16 bit PCI Device ID * @subvend: the 16 bit PCI Subvendor ID @@ -959,7 +723,7 @@ struct pci_driver { .subvendor = (subvend), .subdevice = (subdev) /** - * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class + * PCI_DEVICE_CLASS - macro used to describe a specific pci device class * @dev_class: the class, subclass, prog-if triple for this device * @dev_class_mask: the class mask for this device * @@ -973,7 +737,7 @@ struct pci_driver { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID /** - * PCI_VDEVICE - macro used to describe a specific PCI device in short form + * PCI_VDEVICE - macro used to describe a specific pci device in short form * @vend: the vendor name * @dev: the 16 bit PCI Device ID * @@ -982,41 +746,22 @@ struct pci_driver { * to PCI_ANY_ID. The macro allows the next field to follow as the device * private data. */ + #define PCI_VDEVICE(vend, dev) \ .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 -/** - * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form - * @vend: the vendor name (without PCI_VENDOR_ID_ prefix) - * @dev: the device name (without PCI_DEVICE_ID__ prefix) - * @data: the driver data to be filled - * - * This macro is used to create a struct pci_device_id that matches a - * specific PCI device. The subvendor, and subdevice fields will be set - * to PCI_ANY_ID. - */ -#define PCI_DEVICE_DATA(vend, dev, data) \ - .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \ - .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \ - .driver_data = (kernel_ulong_t)(data) - enum { - PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */ - PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */ - PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */ - PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */ - PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */ + PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */ + PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */ + PCI_PROBE_ONLY = 0x00000004, /* use existing setup */ + PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */ + PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */ PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */ - PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ + PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */ }; -#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ -#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ -#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ -#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ - -/* These external functions are only available when PCI support is enabled */ +/* these external functions are only available when PCI support is enabled */ #ifdef CONFIG_PCI extern unsigned int pci_flags; @@ -1029,11 +774,11 @@ static inline int pci_has_flag(int flag) { return pci_flags & flag; } void pcie_bus_configure_settings(struct pci_bus *bus); enum pcie_bus_config_types { - PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */ - PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */ - PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */ - PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */ - PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */ + PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */ + PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */ + PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */ + PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */ + PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */ }; extern enum pcie_bus_config_types pcie_bus_config; @@ -1042,7 +787,7 @@ extern struct bus_type pci_bus_type; /* Do NOT directly access these two variables, unless you are arch-specific PCI * code, or PCI core code. */ -extern struct list_head pci_root_buses; /* List of all known PCI buses */ +extern struct list_head pci_root_buses; /* list of all known PCI buses */ /* Some device drivers need know if PCI is initiated */ int no_pci_devices(void); @@ -1059,8 +804,9 @@ char *pcibios_setup(char *str); resource_size_t pcibios_align_resource(void *, const struct resource *, resource_size_t, resource_size_t); +void pcibios_update_irq(struct pci_dev *, int irq); -/* Weak but can be overridden by arch */ +/* Weak but can be overriden by arch */ void pci_fixup_cardbus(struct pci_bus *); /* Generic PCI functions used internally */ @@ -1076,16 +822,19 @@ struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata); struct pci_bus *pci_create_root_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata, struct list_head *resources); -int pci_host_probe(struct pci_host_bridge *bridge); int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); void pci_bus_release_busn_res(struct pci_bus *b); +struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus, + struct pci_ops *ops, void *sysdata, + struct list_head *resources, + struct msi_controller *msi); struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, - struct pci_ops *ops, void *sysdata, - struct list_head *resources); -int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); + struct pci_ops *ops, void *sysdata, + struct list_head *resources); struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr); +void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, const char *name, struct hotplug_slot *hotplug); @@ -1103,6 +852,7 @@ void pci_bus_add_device(struct pci_dev *dev); void pci_read_bridge_bases(struct pci_bus *child); struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); +struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev); u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); @@ -1118,29 +868,38 @@ void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type); void pci_sort_breadthfirst(void); #define dev_is_pci(d) ((d)->bus == &pci_bus_type) #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false)) +#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0)) /* Generic PCI functions exported to card drivers */ -u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); -u8 pci_find_capability(struct pci_dev *dev, int cap); -u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); -u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap); -u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap); -u16 pci_find_ext_capability(struct pci_dev *dev, int cap); -u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap); +enum pci_lost_interrupt_reason { + PCI_LOST_IRQ_NO_INFORMATION = 0, + PCI_LOST_IRQ_DISABLE_MSI, + PCI_LOST_IRQ_DISABLE_MSIX, + PCI_LOST_IRQ_DISABLE_ACPI, +}; +enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev); +int pci_find_capability(struct pci_dev *dev, int cap); +int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); +int pci_find_ext_capability(struct pci_dev *dev, int cap); +int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap); +int pci_find_ht_capability(struct pci_dev *dev, int ht_cap); +int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); struct pci_bus *pci_find_next_bus(const struct pci_bus *from); -u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap); - -u64 pci_get_dsn(struct pci_dev *dev); struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, - struct pci_dev *from); + struct pci_dev *from); struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, - unsigned int ss_vendor, unsigned int ss_device, - struct pci_dev *from); + unsigned int ss_vendor, unsigned int ss_device, + struct pci_dev *from); struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn); +static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, + unsigned int devfn) +{ + return pci_get_domain_bus_and_slot(0, bus, devfn); +} struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); int pci_dev_present(const struct pci_device_id *ids); @@ -1168,12 +927,32 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); -int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val); -int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val); -int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); -int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); -int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); -int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); +static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) +{ + return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); +} +static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) +{ + return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); +} +static inline int pci_read_config_dword(const struct pci_dev *dev, int where, + u32 *val) +{ + return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); +} +static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) +{ + return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); +} +static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) +{ + return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); +} +static inline int pci_write_config_dword(const struct pci_dev *dev, int where, + u32 val) +{ + return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); +} int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); @@ -1208,7 +987,7 @@ static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos, return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); } -/* User-space driven config access */ +/* user-space driven config access */ int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val); @@ -1223,15 +1002,6 @@ int __must_check pci_reenable_device(struct pci_dev *); int __must_check pcim_enable_device(struct pci_dev *pdev); void pcim_pin_device(struct pci_dev *pdev); -static inline bool pci_intx_mask_supported(struct pci_dev *pdev) -{ - /* - * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is - * writable and no quirk has marked the feature broken. - */ - return !pdev->broken_intx_masking; -} - static inline int pci_is_enabled(struct pci_dev *pdev) { return (atomic_read(&pdev->enable_cnt) > 0); @@ -1250,12 +1020,12 @@ void pci_clear_master(struct pci_dev *dev); int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); int pci_set_cacheline_size(struct pci_dev *dev); +#define HAVE_PCI_SET_MWI int __must_check pci_set_mwi(struct pci_dev *dev); -int __must_check pcim_set_mwi(struct pci_dev *dev); int pci_try_set_mwi(struct pci_dev *dev); void pci_clear_mwi(struct pci_dev *dev); -void pci_disable_parity(struct pci_dev *dev); void pci_intx(struct pci_dev *dev, int enable); +bool pci_intx_mask_supported(struct pci_dev *dev); bool pci_check_and_mask_intx(struct pci_dev *dev); bool pci_check_and_unmask_intx(struct pci_dev *dev); int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); @@ -1267,51 +1037,35 @@ int pcie_get_readrq(struct pci_dev *dev); int pcie_set_readrq(struct pci_dev *dev, int rq); int pcie_get_mps(struct pci_dev *dev); int pcie_set_mps(struct pci_dev *dev, int mps); -u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, - enum pci_bus_speed *speed, - enum pcie_link_width *width); -void pcie_print_link_status(struct pci_dev *dev); -int pcie_reset_flr(struct pci_dev *dev, bool probe); -int pcie_flr(struct pci_dev *dev); +int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +int __pci_reset_function(struct pci_dev *dev); int __pci_reset_function_locked(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); -int pci_reset_function_locked(struct pci_dev *dev); int pci_try_reset_function(struct pci_dev *dev); int pci_probe_reset_slot(struct pci_slot *slot); +int pci_reset_slot(struct pci_slot *slot); +int pci_try_reset_slot(struct pci_slot *slot); int pci_probe_reset_bus(struct pci_bus *bus); -int pci_reset_bus(struct pci_dev *dev); +int pci_reset_bus(struct pci_bus *bus); +int pci_try_reset_bus(struct pci_bus *bus); void pci_reset_secondary_bus(struct pci_dev *dev); void pcibios_reset_secondary_bus(struct pci_dev *dev); +void pci_reset_bridge_secondary_bus(struct pci_dev *dev); void pci_update_resource(struct pci_dev *dev, int resno); int __must_check pci_assign_resource(struct pci_dev *dev, int i); int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); -void pci_release_resource(struct pci_dev *dev, int resno); -static inline int pci_rebar_bytes_to_size(u64 bytes) -{ - bytes = roundup_pow_of_two(bytes); - - /* Return BAR size as defined in the resizable BAR specification */ - return max(ilog2(bytes), 20) - 20; -} - -u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar); -int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size); int pci_select_bars(struct pci_dev *dev, unsigned long flags); bool pci_device_is_present(struct pci_dev *pdev); void pci_ignore_hotplug(struct pci_dev *dev); -struct pci_dev *pci_real_dma_dev(struct pci_dev *dev); -int pci_status_get_and_clear_errors(struct pci_dev *pdev); - -int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr, - irq_handler_t handler, irq_handler_t thread_fn, void *dev_id, - const char *fmt, ...); -void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id); /* ROM control related routines */ int pci_enable_rom(struct pci_dev *pdev); void pci_disable_rom(struct pci_dev *pdev); void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); +size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); +void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); /* Power management related routines */ int pci_save_state(struct pci_dev *dev); @@ -1321,35 +1075,54 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state); int pci_load_and_free_saved_state(struct pci_dev *dev, struct pci_saved_state **state); -int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state); +struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); +struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, + u16 cap); +int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size); +int pci_add_ext_cap_save_buffer(struct pci_dev *dev, + u16 cap, unsigned int size); +int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); void pci_pme_active(struct pci_dev *dev, bool enable); -int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable); +int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, + bool runtime, bool enable); int pci_wake_from_d3(struct pci_dev *dev, bool enable); int pci_prepare_to_sleep(struct pci_dev *dev); int pci_back_from_sleep(struct pci_dev *dev); bool pci_dev_run_wake(struct pci_dev *dev); +bool pci_check_pme_status(struct pci_dev *dev); +void pci_pme_wakeup_bus(struct pci_bus *bus); void pci_d3cold_enable(struct pci_dev *dev); void pci_d3cold_disable(struct pci_dev *dev); -bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); -void pci_resume_bus(struct pci_bus *bus); -void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state); + +static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, + bool enable) +{ + return __pci_enable_wake(dev, state, false, enable); +} + +/* PCI Virtual Channel */ +int pci_save_vc_state(struct pci_dev *dev); +void pci_restore_vc_state(struct pci_dev *dev); +void pci_allocate_vc_save_buffers(struct pci_dev *dev); /* For use by arch with custom probe code */ void set_pcie_port_type(struct pci_dev *pdev); void set_pcie_hotplug_bridge(struct pci_dev *pdev); /* Functions for PCI Hotplug drivers to use */ +int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge); unsigned int pci_rescan_bus(struct pci_bus *bus); void pci_lock_rescan_remove(void); void pci_unlock_rescan_remove(void); -/* Vital Product Data routines */ +/* Vital product data routines */ ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); +int pci_set_vpd_size(struct pci_dev *dev, size_t len); /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); @@ -1362,22 +1135,25 @@ void pci_assign_unassigned_resources(void); void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge); void pci_assign_unassigned_bus_resources(struct pci_bus *bus); void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus); -int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type); void pdev_enable_device(struct pci_dev *); int pci_enable_resources(struct pci_dev *, int mask); -void pci_assign_irq(struct pci_dev *dev); +void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), + int (*)(const struct pci_dev *, u8, u8)); struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res); #define HAVE_PCI_REQ_REGIONS 2 int __must_check pci_request_regions(struct pci_dev *, const char *); int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); void pci_release_regions(struct pci_dev *); int __must_check pci_request_region(struct pci_dev *, int, const char *); +int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *); void pci_release_region(struct pci_dev *, int); int pci_request_selected_regions(struct pci_dev *, int, const char *); int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); void pci_release_selected_regions(struct pci_dev *, int); /* drivers/pci/bus.c */ +struct pci_bus *pci_bus_get(struct pci_bus *bus); +void pci_bus_put(struct pci_bus *bus); void pci_add_resource(struct list_head *resources, struct resource *res); void pci_add_resource_offset(struct list_head *resources, struct resource *res, resource_size_t offset); @@ -1389,9 +1165,6 @@ void pci_bus_remove_resources(struct pci_bus *bus); int devm_request_pci_bus_resources(struct device *dev, struct list_head *resources); -/* Temporary until new and working PCI SBR API in place */ -int pci_bridge_secondary_bus_reset(struct pci_dev *dev); - #define pci_bus_for_each_resource(bus, res, i) \ for (i = 0; \ (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \ @@ -1408,19 +1181,11 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, void *alignf_data); -int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, - resource_size_t size); +int pci_register_io_range(phys_addr_t addr, resource_size_t size); unsigned long pci_address_to_pio(phys_addr_t addr); phys_addr_t pci_pio_to_address(unsigned long pio); int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); -int devm_pci_remap_iospace(struct device *dev, const struct resource *res, - phys_addr_t phys_addr); void pci_unmap_iospace(struct resource *res); -void __iomem *devm_pci_remap_cfgspace(struct device *dev, - resource_size_t offset, - resource_size_t size); -void __iomem *devm_pci_remap_cfg_resource(struct device *dev, - struct resource *res); static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) { @@ -1434,7 +1199,9 @@ static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) int __must_check __pci_register_driver(struct pci_driver *, struct module *, const char *mod_name); -/* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */ +/* + * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded + */ #define pci_register_driver(driver) \ __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) @@ -1449,7 +1216,8 @@ void pci_unregister_driver(struct pci_driver *dev); * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_pci_driver(__pci_driver) \ - module_driver(__pci_driver, pci_register_driver, pci_unregister_driver) + module_driver(__pci_driver, pci_register_driver, \ + pci_unregister_driver) /** * builtin_pci_driver() - Helper macro for registering a PCI driver @@ -1480,6 +1248,7 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus); void pci_setup_bridge(struct pci_bus *bus); resource_size_t pcibios_window_alignment(struct pci_bus *bus, unsigned long type); +resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) @@ -1487,19 +1256,16 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus, int pci_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags); -/* - * Virtual interrupts allow for more interrupts to be allocated - * than the device has interrupts for. These are not programmed - * into the device's MSI-X table and must be handled by some - * other driver means. - */ -#define PCI_IRQ_VIRTUAL (1 << 4) - +#define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */ +#define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */ +#define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */ +#define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */ #define PCI_IRQ_ALL_TYPES \ (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) /* kmem_cache style wrapper around pci_alloc_consistent() */ +#include #include #define pci_pool dma_pool @@ -1512,18 +1278,28 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode, #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) struct msix_entry { - u32 vector; /* Kernel uses to write allocated vector */ - u16 entry; /* Driver uses to specify entry, OS writes */ + u32 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ }; #ifdef CONFIG_PCI_MSI int pci_msi_vec_count(struct pci_dev *dev); +void pci_msi_shutdown(struct pci_dev *dev); void pci_disable_msi(struct pci_dev *dev); int pci_msix_vec_count(struct pci_dev *dev); +int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); +void pci_msix_shutdown(struct pci_dev *dev); void pci_disable_msix(struct pci_dev *dev); void pci_restore_msi_state(struct pci_dev *dev); int pci_msi_enabled(void); -int pci_enable_msi(struct pci_dev *dev); +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); +static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec) +{ + int rc = pci_enable_msi_range(dev, nvec, nvec); + if (rc < 0) + return rc; + return 0; +} int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec); static inline int pci_enable_msix_exact(struct pci_dev *dev, @@ -1534,40 +1310,43 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, return rc; return 0; } -int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, - unsigned int max_vecs, unsigned int flags, - struct irq_affinity *affd); - +int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags); void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } +static inline void pci_msi_shutdown(struct pci_dev *dev) { } static inline void pci_disable_msi(struct pci_dev *dev) { } static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } +static inline int pci_enable_msix(struct pci_dev *dev, + struct msix_entry *entries, int nvec) +{ return -ENOSYS; } +static inline void pci_msix_shutdown(struct pci_dev *dev) { } static inline void pci_disable_msix(struct pci_dev *dev) { } static inline void pci_restore_msi_state(struct pci_dev *dev) { } static inline int pci_msi_enabled(void) { return 0; } -static inline int pci_enable_msi(struct pci_dev *dev) +static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, + int maxvec) +{ return -ENOSYS; } +static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec) { return -ENOSYS; } static inline int pci_enable_msix_range(struct pci_dev *dev, - struct msix_entry *entries, int minvec, int maxvec) + struct msix_entry *entries, int minvec, int maxvec) { return -ENOSYS; } static inline int pci_enable_msix_exact(struct pci_dev *dev, - struct msix_entry *entries, int nvec) + struct msix_entry *entries, int nvec) { return -ENOSYS; } - -static inline int -pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, - unsigned int max_vecs, unsigned int flags, - struct irq_affinity *aff_desc) +static inline int pci_alloc_irq_vectors(struct pci_dev *dev, + unsigned int min_vecs, unsigned int max_vecs, + unsigned int flags) { - if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) - return 1; - return -ENOSPC; + if (min_vecs > 1) + return -EINVAL; + return 1; } - static inline void pci_free_irq_vectors(struct pci_dev *dev) { } @@ -1585,95 +1364,70 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, } #endif -/** - * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq - * @d: the INTx IRQ domain - * @node: the DT node for the device whose interrupt we're translating - * @intspec: the interrupt specifier data from the DT - * @intsize: the number of entries in @intspec - * @out_hwirq: pointer at which to write the hwirq number - * @out_type: pointer at which to write the interrupt type - * - * Translate a PCI INTx interrupt number from device tree in the range 1-4, as - * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range - * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the - * INTx value to obtain the hwirq number. - * - * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range. - */ -static inline int pci_irqd_intx_xlate(struct irq_domain *d, - struct device_node *node, - const u32 *intspec, - unsigned int intsize, - unsigned long *out_hwirq, - unsigned int *out_type) -{ - const u32 intx = intspec[0]; - - if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD) - return -EINVAL; - - *out_hwirq = intx - PCI_INTERRUPT_INTA; - return 0; -} - #ifdef CONFIG_PCIEPORTBUS extern bool pcie_ports_disabled; -extern bool pcie_ports_native; +extern bool pcie_ports_auto; #else #define pcie_ports_disabled true -#define pcie_ports_native false +#define pcie_ports_auto false #endif -#define PCIE_LINK_STATE_L0S BIT(0) -#define PCIE_LINK_STATE_L1 BIT(1) -#define PCIE_LINK_STATE_CLKPM BIT(2) -#define PCIE_LINK_STATE_L1_1 BIT(3) -#define PCIE_LINK_STATE_L1_2 BIT(4) -#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) -#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) - #ifdef CONFIG_PCIEASPM -int pci_disable_link_state(struct pci_dev *pdev, int state); -int pci_disable_link_state_locked(struct pci_dev *pdev, int state); -void pcie_no_aspm(void); bool pcie_aspm_support_enabled(void); -bool pcie_aspm_enabled(struct pci_dev *pdev); #else -static inline int pci_disable_link_state(struct pci_dev *pdev, int state) -{ return 0; } -static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state) -{ return 0; } -static inline void pcie_no_aspm(void) { } static inline bool pcie_aspm_support_enabled(void) { return false; } -static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } #endif #ifdef CONFIG_PCIEAER +void pci_no_aer(void); bool pci_aer_available(void); +int pci_aer_init(struct pci_dev *dev); #else +static inline void pci_no_aer(void) { } static inline bool pci_aer_available(void) { return false; } +static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; } #endif -bool pci_ats_disabled(void); +#ifdef CONFIG_PCIE_ECRC +void pcie_set_ecrc_checking(struct pci_dev *dev); +void pcie_ecrc_get_policy(char *str); +#else +static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } +static inline void pcie_ecrc_get_policy(char *str) { } +#endif + +#define pci_enable_msi(pdev) pci_enable_msi_exact(pdev, 1) + +#ifdef CONFIG_HT_IRQ +/* The functions a driver should call */ +int ht_create_irq(struct pci_dev *dev, int idx); +void ht_destroy_irq(unsigned int irq); +#endif /* CONFIG_HT_IRQ */ + +#ifdef CONFIG_PCI_ATS +/* Address Translation Service */ +void pci_ats_init(struct pci_dev *dev); +int pci_enable_ats(struct pci_dev *dev, int ps); +void pci_disable_ats(struct pci_dev *dev); +int pci_ats_queue_depth(struct pci_dev *dev); +#else +static inline void pci_ats_init(struct pci_dev *d) { } +static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; } +static inline void pci_disable_ats(struct pci_dev *d) { } +static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; } +#endif #ifdef CONFIG_PCIE_PTM int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); -bool pcie_ptm_enabled(struct pci_dev *dev); #else static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) { return -EINVAL; } -static inline bool pcie_ptm_enabled(struct pci_dev *dev) -{ return false; } #endif void pci_cfg_access_lock(struct pci_dev *dev); bool pci_cfg_access_trylock(struct pci_dev *dev); void pci_cfg_access_unlock(struct pci_dev *dev); -int pci_dev_trylock(struct pci_dev *dev); -void pci_dev_unlock(struct pci_dev *dev); - /* * PCI domain support. Sometimes called PCI segment (eg by ACPI), * a PCI domain is defined to be a set of PCI buses which share @@ -1681,10 +1435,12 @@ void pci_dev_unlock(struct pci_dev *dev); */ #ifdef CONFIG_PCI_DOMAINS extern int pci_domains_supported; +int pci_get_new_domain_nr(void); #else enum { pci_domains_supported = 0 }; static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } +static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } #endif /* CONFIG_PCI_DOMAINS */ /* @@ -1706,9 +1462,9 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); #endif -/* Some architectures require additional setup to direct VGA traffic */ +/* some architectures require additional setup to direct VGA traffic */ typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, - unsigned int command_bits, u32 flags); + unsigned int command_bits, u32 flags); void pci_register_set_vga_state(arch_set_vga_state_t func); static inline int @@ -1747,9 +1503,10 @@ static inline void pci_clear_flags(int flags) { } static inline int pci_has_flag(int flag) { return 0; } /* - * If the system does not have PCI, clearly these return errors. Define - * these as simple inline functions to avoid hair in drivers. + * If the system does not have PCI, clearly these return errors. Define + * these as simple inline functions to avoid hair in drivers. */ + #define _PCI_NOP(o, s, t) \ static inline int pci_##o##_config_##s(struct pci_dev *dev, \ int where, t val) \ @@ -1784,17 +1541,15 @@ static inline struct pci_dev *pci_get_class(unsigned int class, static inline void pci_set_master(struct pci_dev *dev) { } static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } static inline void pci_disable_device(struct pci_dev *dev) { } -static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; } static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY; } -static inline int __must_check __pci_register_driver(struct pci_driver *drv, - struct module *owner, - const char *mod_name) +static inline int __pci_register_driver(struct pci_driver *drv, + struct module *owner) { return 0; } static inline int pci_register_driver(struct pci_driver *drv) { return 0; } static inline void pci_unregister_driver(struct pci_driver *drv) { } -static inline u8 pci_find_capability(struct pci_dev *dev, int cap) +static inline int pci_find_capability(struct pci_dev *dev, int cap) { return 0; } static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, int cap) @@ -1802,9 +1557,6 @@ static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) { return 0; } -static inline u64 pci_get_dsn(struct pci_dev *dev) -{ return 0; } - /* Power management related routines */ static inline int pci_save_state(struct pci_dev *dev) { return 0; } static inline void pci_restore_state(struct pci_dev *dev) { } @@ -1826,116 +1578,53 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } -static inline int pci_register_io_range(struct fwnode_handle *fwnode, - phys_addr_t addr, resource_size_t size) -{ return -EINVAL; } - static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } +static inline void pci_block_cfg_access(struct pci_dev *dev) { } +static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) +{ return 0; } +static inline void pci_unblock_cfg_access(struct pci_dev *dev) { } + static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) { return NULL; } static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn) { return NULL; } -static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain, - unsigned int bus, unsigned int devfn) +static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, + unsigned int devfn) { return NULL; } static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } +static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } #define dev_is_pci(d) (false) #define dev_is_pf(d) (false) -static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) -{ return false; } -static inline int pci_irqd_intx_xlate(struct irq_domain *d, - struct device_node *node, - const u32 *intspec, - unsigned int intsize, - unsigned long *out_hwirq, - unsigned int *out_type) -{ return -EINVAL; } - -static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, - struct pci_dev *dev) -{ return NULL; } -static inline bool pci_ats_disabled(void) { return true; } - -static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) -{ - return -EINVAL; -} - -static inline int -pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, - unsigned int max_vecs, unsigned int flags, - struct irq_affinity *aff_desc) -{ - return -ENOSPC; -} +#define dev_num_vf(d) (0) #endif /* CONFIG_PCI */ -static inline int -pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, - unsigned int max_vecs, unsigned int flags) -{ - return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, - NULL); -} - /* Include architecture-dependent settings and functions */ #include -/* These two functions provide almost identical functionality. Depending - * on the architecture, one will be implemented as a wrapper around the - * other (in drivers/pci/mmap.c). - * - * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff - * is expected to be an offset within that region. - * - * pci_mmap_page_range() is the legacy architecture-specific interface, - * which accepts a "user visible" resource address converted by - * pci_resource_to_user(), as used in the legacy mmap() interface in - * /proc/bus/pci/. - */ -int pci_mmap_resource_range(struct pci_dev *dev, int bar, - struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); -int pci_mmap_page_range(struct pci_dev *pdev, int bar, - struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); - -#ifndef arch_can_pci_mmap_wc -#define arch_can_pci_mmap_wc() 0 -#endif - -#ifndef arch_can_pci_mmap_io -#define arch_can_pci_mmap_io() 0 -#define pci_iobar_pfn(pdev, bar, vma) (-EINVAL) -#else -int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); -#endif - #ifndef pci_root_bus_fwnode #define pci_root_bus_fwnode(bus) NULL #endif -/* - * These helpers provide future and backwards compatibility - * for accessing popular PCI BAR info - */ +/* these helpers provide future and backwards compatibility + * for accessing popular PCI BAR info */ #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) #define pci_resource_len(dev,bar) \ - ((pci_resource_end((dev), (bar)) == 0) ? 0 : \ + ((pci_resource_start((dev), (bar)) == 0 && \ + pci_resource_end((dev), (bar)) == \ + pci_resource_start((dev), (bar))) ? 0 : \ \ (pci_resource_end((dev), (bar)) - \ pci_resource_start((dev), (bar)) + 1)) -/* - * Similar to the helpers above, these manipulate per-pci_dev +/* Similar to the helpers above, these manipulate per-pci_dev * driver-specific data. They are really just a wrapper around * the generic device structure functions of these calls. */ @@ -1949,32 +1638,46 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) dev_set_drvdata(&pdev->dev, data); } +/* If you want to know what to call your pci_dev, ask this function. + * Again, it's a wrapper around the generic device. + */ static inline const char *pci_name(const struct pci_dev *pdev) { return dev_name(&pdev->dev); } + +/* Some archs don't want to expose struct resource to userland as-is + * in sysfs and /proc + */ +#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end); +#else +static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, resource_size_t *start, + resource_size_t *end) +{ + *start = rsrc->start; + *end = rsrc->end; +} +#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ + /* - * The world is not perfect and supplies us with broken PCI devices. - * For at least a part of these bugs we need a work-around, so both - * generic (drivers/pci/quirks.c) and per-architecture code can define - * fixup hooks to be called for particular buggy devices. + * The world is not perfect and supplies us with broken PCI devices. + * For at least a part of these bugs we need a work-around, so both + * generic (drivers/pci/quirks.c) and per-architecture code can define + * fixup hooks to be called for particular buggy devices. */ struct pci_fixup { - u16 vendor; /* Or PCI_ANY_ID */ - u16 device; /* Or PCI_ANY_ID */ - u32 class; /* Or PCI_ANY_ID */ + u16 vendor; /* You can use PCI_ANY_ID here of course */ + u16 device; /* You can use PCI_ANY_ID here of course */ + u32 class; /* You can use PCI_ANY_ID here too */ unsigned int class_shift; /* should be 0, 8, 16 */ -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS - int hook_offset; -#else void (*hook)(struct pci_dev *dev); -#endif }; enum pci_fixup_pass { @@ -1988,51 +1691,12 @@ enum pci_fixup_pass { pci_fixup_suspend_late, /* pci_device_suspend_late() */ }; -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS -#define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ - class_shift, hook) \ - __ADDRESSABLE(hook) \ - asm(".section " #sec ", \"a\" \n" \ - ".balign 16 \n" \ - ".short " #vendor ", " #device " \n" \ - ".long " #class ", " #class_shift " \n" \ - ".long " #hook " - . \n" \ - ".previous \n"); - -/* - * Clang's LTO may rename static functions in C, but has no way to - * handle such renamings when referenced from inline asm. To work - * around this, create global C stubs for these cases. - */ -#ifdef CONFIG_LTO_CLANG -#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ - class_shift, hook, stub) \ - void __cficanonical stub(struct pci_dev *dev); \ - void __cficanonical stub(struct pci_dev *dev) \ - { \ - hook(dev); \ - } \ - ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ - class_shift, stub) -#else -#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ - class_shift, hook, stub) \ - ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ - class_shift, hook) -#endif - -#define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ - class_shift, hook) \ - __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ - class_shift, hook, __UNIQUE_ID(hook)) -#else /* Anonymous variables would be nice... */ #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ class_shift, hook) \ static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \ __attribute__((__section__(#section), aligned((sizeof(void *))))) \ = { vendor, device, class, class_shift, hook }; -#endif #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ class_shift, hook) \ @@ -2053,19 +1717,23 @@ enum pci_fixup_pass { #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ - resume##hook, vendor, device, class, class_shift, hook) + resume##hook, vendor, device, class, \ + class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ - resume_early##hook, vendor, device, class, class_shift, hook) + resume_early##hook, vendor, device, \ + class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ - suspend##hook, vendor, device, class, class_shift, hook) + suspend##hook, vendor, device, class, \ + class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ - suspend_late##hook, vendor, device, class, class_shift, hook) + suspend_late##hook, vendor, device, \ + class, class_shift, hook) #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ @@ -2081,22 +1749,37 @@ enum pci_fixup_pass { hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ - resume##hook, vendor, device, PCI_ANY_ID, 0, hook) + resume##hook, vendor, device, \ + PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ - resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook) + resume_early##hook, vendor, device, \ + PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ - suspend##hook, vendor, device, PCI_ANY_ID, 0, hook) + suspend##hook, vendor, device, \ + PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ - suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook) + suspend_late##hook, vendor, device, \ + PCI_ANY_ID, 0, hook) #ifdef CONFIG_PCI_QUIRKS void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); +int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); +int pci_dev_specific_enable_acs(struct pci_dev *dev); #else static inline void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { } +static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, + u16 acs_flags) +{ + return -ENOTTY; +} +static inline int pci_dev_specific_enable_acs(struct pci_dev *dev) +{ + return -ENOTTY; +} #endif void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); @@ -2121,6 +1804,10 @@ extern unsigned long pci_cardbus_mem_size; extern u8 pci_dfl_cache_line_size; extern u8 pci_cache_line_size; +extern unsigned long pci_hotplug_io_size; +extern unsigned long pci_hotplug_mem_size; +extern unsigned long pci_hotplug_bus_size; + /* Architecture-specific versions may override these (weak) */ void pcibios_disable_device(struct pci_dev *dev); void pcibios_set_master(struct pci_dev *dev); @@ -2128,14 +1815,13 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); int pcibios_add_device(struct pci_dev *dev); void pcibios_release_device(struct pci_dev *dev); -#ifdef CONFIG_PCI void pcibios_penalize_isa_irq(int irq, int active); -#else -static inline void pcibios_penalize_isa_irq(int irq, int active) {} -#endif int pcibios_alloc_irq(struct pci_dev *dev); void pcibios_free_irq(struct pci_dev *dev); -resource_size_t pcibios_default_alignment(void); + +#ifdef CONFIG_HIBERNATE_CALLBACKS +extern struct dev_pm_ops pcibios_pm_ops; +#endif #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG) void __init pci_mmcfg_early_init(void); @@ -2156,22 +1842,13 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); void pci_disable_sriov(struct pci_dev *dev); - -int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id); -int pci_iov_add_virtfn(struct pci_dev *dev, int id); -void pci_iov_remove_virtfn(struct pci_dev *dev, int id); +int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset); +void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset); int pci_num_vf(struct pci_dev *dev); int pci_vfs_assigned(struct pci_dev *dev); int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); int pci_sriov_get_totalvfs(struct pci_dev *dev); -int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn); resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); -void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe); - -/* Arch may override these (weak) */ -int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs); -int pcibios_sriov_disable(struct pci_dev *pdev); -resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); #else static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) { @@ -2183,18 +1860,12 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id) } static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { return -ENODEV; } - -static inline int pci_iov_sysfs_link(struct pci_dev *dev, - struct pci_dev *virtfn, int id) -{ - return -ENODEV; -} -static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id) +static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset) { return -ENOSYS; } static inline void pci_iov_remove_virtfn(struct pci_dev *dev, - int id) { } + int id, int reset) { } static inline void pci_disable_sriov(struct pci_dev *dev) { } static inline int pci_num_vf(struct pci_dev *dev) { return 0; } static inline int pci_vfs_assigned(struct pci_dev *dev) @@ -2203,10 +1874,8 @@ static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) { return 0; } static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) { return 0; } -#define pci_sriov_configure_simple NULL static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) { return 0; } -static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { } #endif #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) @@ -2259,22 +1928,17 @@ static inline int pci_pcie_type(const struct pci_dev *dev) return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; } -/** - * pcie_find_root_port - Get the PCIe root port device - * @dev: PCI device - * - * Traverse up the parent chain and return the PCIe Root Port PCI Device - * for a given PCI/PCIe Device. - */ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) { - while (dev) { - if (pci_is_pcie(dev) && - pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + while (1) { + if (!pci_is_pcie(dev)) + break; + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) return dev; - dev = pci_upstream_bridge(dev); + if (!dev->bus->self) + break; + dev = dev->bus->self; } - return NULL; } @@ -2282,7 +1946,6 @@ void pci_request_acs(void); bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); bool pci_acs_path_enabled(struct pci_dev *start, struct pci_dev *end, u16 acs_flags); -int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) @@ -2296,69 +1959,118 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA) #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) +/* Small Resource Data Type Tag Item Names */ +#define PCI_VPD_STIN_END 0x0f /* End */ + +#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3) + +#define PCI_VPD_SRDT_TIN_MASK 0x78 +#define PCI_VPD_SRDT_LEN_MASK 0x07 +#define PCI_VPD_LRDT_TIN_MASK 0x7f + +#define PCI_VPD_LRDT_TAG_SIZE 3 +#define PCI_VPD_SRDT_TAG_SIZE 1 + +#define PCI_VPD_INFO_FLD_HDR_SIZE 3 + #define PCI_VPD_RO_KEYWORD_PARTNO "PN" -#define PCI_VPD_RO_KEYWORD_SERIALNO "SN" #define PCI_VPD_RO_KEYWORD_MFR_ID "MN" #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0" #define PCI_VPD_RO_KEYWORD_CHKSUM "RV" /** - * pci_vpd_alloc - Allocate buffer and read VPD into it - * @dev: PCI device - * @size: pointer to field where VPD length is returned + * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length + * @lrdt: Pointer to the beginning of the Large Resource Data Type tag * - * Returns pointer to allocated buffer or an ERR_PTR in case of failure + * Returns the extracted Large Resource Data Type length. */ -void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size); +static inline u16 pci_vpd_lrdt_size(const u8 *lrdt) +{ + return (u16)lrdt[1] + ((u16)lrdt[2] << 8); +} /** - * pci_vpd_find_id_string - Locate id string in VPD - * @buf: Pointer to buffered VPD data - * @len: The length of the buffer area in which to search - * @size: Pointer to field where length of id string is returned + * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item + * @lrdt: Pointer to the beginning of the Large Resource Data Type tag * - * Returns the index of the id string or -ENOENT if not found. + * Returns the extracted Large Resource Data Type Tag item. */ -int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size); +static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt) +{ + return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK); +} /** - * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section - * @buf: Pointer to buffered VPD data - * @len: The length of the buffer area in which to search + * pci_vpd_srdt_size - Extracts the Small Resource Data Type length + * @lrdt: Pointer to the beginning of the Small Resource Data Type tag + * + * Returns the extracted Small Resource Data Type length. + */ +static inline u8 pci_vpd_srdt_size(const u8 *srdt) +{ + return (*srdt) & PCI_VPD_SRDT_LEN_MASK; +} + +/** + * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item + * @lrdt: Pointer to the beginning of the Small Resource Data Type tag + * + * Returns the extracted Small Resource Data Type Tag Item. + */ +static inline u8 pci_vpd_srdt_tag(const u8 *srdt) +{ + return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3; +} + +/** + * pci_vpd_info_field_size - Extracts the information field length + * @lrdt: Pointer to the beginning of an information field header + * + * Returns the extracted information field length. + */ +static inline u8 pci_vpd_info_field_size(const u8 *info_field) +{ + return info_field[2]; +} + +/** + * pci_vpd_find_tag - Locates the Resource Data Type tag provided + * @buf: Pointer to buffered vpd data + * @off: The offset into the buffer at which to begin the search + * @len: The length of the vpd buffer + * @rdt: The Resource Data Type to search for + * + * Returns the index where the Resource Data Type was found or + * -ENOENT otherwise. + */ +int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt); + +/** + * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD + * @buf: Pointer to buffered vpd data + * @off: The offset into the buffer at which to begin the search + * @len: The length of the buffer area, relative to off, in which to search * @kw: The keyword to search for - * @size: Pointer to field where length of found keyword data is returned * - * Returns the index of the information field keyword data or -ENOENT if - * not found. + * Returns the index where the information field keyword was found or + * -ENOENT otherwise. */ -int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len, - const char *kw, unsigned int *size); - -/** - * pci_vpd_check_csum - Check VPD checksum - * @buf: Pointer to buffered VPD data - * @len: VPD size - * - * Returns 1 if VPD has no checksum, otherwise 0 or an errno - */ -int pci_vpd_check_csum(const void *buf, unsigned int len); +int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, + unsigned int len, const char *kw); /* PCI <-> OF binding helpers */ #ifdef CONFIG_OF struct device_node; struct irq_domain; +void pci_set_of_node(struct pci_dev *dev); +void pci_release_of_node(struct pci_dev *dev); +void pci_set_bus_of_node(struct pci_bus *bus); +void pci_release_bus_of_node(struct pci_bus *bus); struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); -bool pci_host_of_has_msi_map(struct device *dev); /* Arch may override this (weak) */ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); -#else /* CONFIG_OF */ -static inline struct irq_domain * -pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } -static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; } -#endif /* CONFIG_OF */ - static inline struct device_node * pci_device_to_OF_node(const struct pci_dev *pdev) { @@ -2370,16 +2082,25 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) return bus ? bus->dev.of_node : NULL; } +#else /* CONFIG_OF */ +static inline void pci_set_of_node(struct pci_dev *dev) { } +static inline void pci_release_of_node(struct pci_dev *dev) { } +static inline void pci_set_bus_of_node(struct pci_bus *bus) { } +static inline void pci_release_bus_of_node(struct pci_bus *bus) { } +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; } +static inline struct irq_domain * +pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } +#endif /* CONFIG_OF */ + #ifdef CONFIG_ACPI struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); void pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)); -bool pci_pr3_present(struct pci_dev *pdev); #else static inline struct irq_domain * pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } -static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; } #endif #ifdef CONFIG_EEH @@ -2389,13 +2110,13 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) } #endif -void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns); +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn); bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2); int pci_for_each_dma_alias(struct pci_dev *pdev, int (*fn)(struct pci_dev *pdev, u16 alias, void *data), void *data); -/* Helper functions for operation of device flag */ +/* helper functions for operation of device flag */ static inline void pci_set_dev_assigned(struct pci_dev *pdev) { pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; @@ -2420,59 +2141,7 @@ static inline bool pci_ari_enabled(struct pci_bus *bus) return bus->self && bus->self->ari_enabled; } -/** - * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain - * @pdev: PCI device to check - * - * Walk upwards from @pdev and check for each encountered bridge if it's part - * of a Thunderbolt controller. Reaching the host bridge means @pdev is not - * Thunderbolt-attached. (But rather soldered to the mainboard usually.) - */ -static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) -{ - struct pci_dev *parent = pdev; - - if (pdev->is_thunderbolt) - return true; - - while ((parent = pci_upstream_bridge(parent))) - if (parent->is_thunderbolt) - return true; - - return false; -} - -#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) -void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); -#endif - -/* Provide the legacy pci_dma_* API */ +/* provide the legacy pci_dma_* API */ #include -#define pci_printk(level, pdev, fmt, arg...) \ - dev_printk(level, &(pdev)->dev, fmt, ##arg) - -#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) -#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) -#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) -#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) -#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) -#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) -#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) -#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) - -#define pci_notice_ratelimited(pdev, fmt, arg...) \ - dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg) - -#define pci_info_ratelimited(pdev, fmt, arg...) \ - dev_info_ratelimited(&(pdev)->dev, fmt, ##arg) - -#define pci_WARN(pdev, condition, fmt, arg...) \ - WARN(condition, "%s %s: " fmt, \ - dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg) - -#define pci_WARN_ONCE(pdev, condition, fmt, arg...) \ - WARN_ONCE(condition, "%s %s: " fmt, \ - dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg) - #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 3a10d6ec3e..0d74ed91f2 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * PCI HotPlug Core Functions * @@ -8,6 +7,21 @@ * * All rights reserved. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * * Send feedback to * */ @@ -16,6 +30,8 @@ /** * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use + * @owner: The module owner of this structure + * @mod_name: The module name (KBUILD_MODNAME) of this structure * @enable_slot: Called when the user wants to enable a specific pci slot * @disable_slot: Called when the user wants to disable a specific pci slot * @set_attention_status: Called to set the specific slot's attention LED to @@ -23,9 +39,17 @@ * @hardware_test: Called to run a specified hardware test on the specified * slot. * @get_power_status: Called to get the current power status of a slot. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. * @get_attention_status: Called to get the current attention status of a slot. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. * @get_latch_status: Called to get the current latch status of a slot. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. * @get_adapter_status: Called to get see if an adapter is present in the slot or not. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. * @reset_slot: Optional interface to allow override of a bus reset for the * slot for cases where a secondary bus reset can result in spurious * hotplug events or where a slot can be reset independent of the bus. @@ -36,6 +60,8 @@ * set an LED, enable / disable power, etc.) */ struct hotplug_slot_ops { + struct module *owner; + const char *mod_name; int (*enable_slot) (struct hotplug_slot *slot); int (*disable_slot) (struct hotplug_slot *slot); int (*set_attention_status) (struct hotplug_slot *slot, u8 value); @@ -44,25 +70,45 @@ struct hotplug_slot_ops { int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); - int (*reset_slot) (struct hotplug_slot *slot, bool probe); + int (*reset_slot) (struct hotplug_slot *slot, int probe); +} __do_const; +typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const; + +/** + * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot + * @power_status: if power is enabled or not (1/0) + * @attention_status: if the attention light is enabled or not (1/0) + * @latch_status: if the latch (if any) is open or closed (1/0) + * @adapter_status: if there is a pci board present in the slot or not (1/0) + * + * Used to notify the hotplug pci core of the status of a specific slot. + */ +struct hotplug_slot_info { + u8 power_status; + u8 attention_status; + u8 latch_status; + u8 adapter_status; }; /** * struct hotplug_slot - used to register a physical slot with the hotplug pci core * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot - * @slot_list: internal list used to track hotplug PCI slots - * @pci_slot: represents a physical slot - * @owner: The module owner of this structure - * @mod_name: The module name (KBUILD_MODNAME) of this structure + * @info: pointer to the &struct hotplug_slot_info for the initial values for + * this slot. + * @release: called during pci_hp_deregister to free memory allocated in a + * hotplug_slot structure. + * @private: used by the hotplug pci controller driver to store whatever it + * needs. */ struct hotplug_slot { - const struct hotplug_slot_ops *ops; + struct hotplug_slot_ops *ops; + struct hotplug_slot_info *info; + void (*release) (struct hotplug_slot *slot); + void *private; /* Variables below this are for use only by the hotplug pci core. */ struct list_head slot_list; struct pci_slot *pci_slot; - struct module *owner; - const char *mod_name; }; static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) @@ -73,39 +119,72 @@ static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus, int nr, const char *name, struct module *owner, const char *mod_name); -int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus, int nr, - const char *name, struct module *owner, - const char *mod_name); -int pci_hp_add(struct hotplug_slot *slot); - -void pci_hp_del(struct hotplug_slot *slot); -void pci_hp_destroy(struct hotplug_slot *slot); -void pci_hp_deregister(struct hotplug_slot *slot); +int pci_hp_deregister(struct hotplug_slot *slot); +int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot, + struct hotplug_slot_info *info); /* use a define to avoid include chaining to get THIS_MODULE & friends */ #define pci_hp_register(slot, pbus, devnr, name) \ __pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME) -#define pci_hp_initialize(slot, bus, nr, name) \ - __pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME) + +/* PCI Setting Record (Type 0) */ +struct hpp_type0 { + u32 revision; + u8 cache_line_size; + u8 latency_timer; + u8 enable_serr; + u8 enable_perr; +}; + +/* PCI-X Setting Record (Type 1) */ +struct hpp_type1 { + u32 revision; + u8 max_mem_read; + u8 avg_max_split; + u16 tot_max_split; +}; + +/* PCI Express Setting Record (Type 2) */ +struct hpp_type2 { + u32 revision; + u32 unc_err_mask_and; + u32 unc_err_mask_or; + u32 unc_err_sever_and; + u32 unc_err_sever_or; + u32 cor_err_mask_and; + u32 cor_err_mask_or; + u32 adv_err_cap_and; + u32 adv_err_cap_or; + u16 pci_exp_devctl_and; + u16 pci_exp_devctl_or; + u16 pci_exp_lnkctl_and; + u16 pci_exp_lnkctl_or; + u32 sec_unc_err_sever_and; + u32 sec_unc_err_sever_or; + u32 sec_unc_err_mask_and; + u32 sec_unc_err_mask_or; +}; + +struct hotplug_params { + struct hpp_type0 *t0; /* Type0: NULL if not available */ + struct hpp_type1 *t1; /* Type1: NULL if not available */ + struct hpp_type2 *t2; /* Type2: NULL if not available */ + struct hpp_type0 type0_data; + struct hpp_type1 type1_data; + struct hpp_type2 type2_data; +}; #ifdef CONFIG_ACPI #include -bool pciehp_is_native(struct pci_dev *bridge); -int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge); -bool shpchp_is_native(struct pci_dev *bridge); +int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp); +int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); int acpi_pci_detect_ejectable(acpi_handle handle); #else -static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge) +static inline int pci_get_hp_params(struct pci_dev *dev, + struct hotplug_params *hpp) { - return 0; -} -static inline bool pciehp_is_native(struct pci_dev *bridge) { return true; } -static inline bool shpchp_is_native(struct pci_dev *bridge) { return true; } -#endif - -static inline bool hotplug_is_native(struct pci_dev *bridge) -{ - return pciehp_is_native(bridge) || shpchp_is_native(bridge); + return -ENODEV; } #endif +#endif diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 011f2f1ea5..3e5dbbe75f 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * PCI Class, Vendor and Device IDs * @@ -24,10 +23,8 @@ #define PCI_CLASS_STORAGE_SATA 0x0106 #define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 #define PCI_CLASS_STORAGE_SAS 0x0107 -#define PCI_CLASS_STORAGE_EXPRESS 0x010802 #define PCI_CLASS_STORAGE_OTHER 0x0180 - #define PCI_BASE_CLASS_NETWORK 0x02 #define PCI_CLASS_NETWORK_ETHERNET 0x0200 #define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 @@ -45,13 +42,11 @@ #define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 #define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 #define PCI_CLASS_MULTIMEDIA_PHONE 0x0402 -#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 #define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 #define PCI_BASE_CLASS_MEMORY 0x05 #define PCI_CLASS_MEMORY_RAM 0x0500 #define PCI_CLASS_MEMORY_FLASH 0x0501 -#define PCI_CLASS_MEMORY_CXL 0x0502 #define PCI_CLASS_MEMORY_OTHER 0x0580 #define PCI_BASE_CLASS_BRIDGE 0x06 @@ -82,7 +77,6 @@ #define PCI_CLASS_SYSTEM_RTC 0x0803 #define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804 #define PCI_CLASS_SYSTEM_SDHCI 0x0805 -#define PCI_CLASS_SYSTEM_RCEC 0x0807 #define PCI_CLASS_SYSTEM_OTHER 0x0880 #define PCI_BASE_CLASS_INPUT 0x09 @@ -119,10 +113,6 @@ #define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe #define PCI_CLASS_SERIAL_FIBER 0x0c04 #define PCI_CLASS_SERIAL_SMBUS 0x0c05 -#define PCI_CLASS_SERIAL_IPMI 0x0c07 -#define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700 -#define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701 -#define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702 #define PCI_BASE_CLASS_WIRELESS 0x0d #define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10 @@ -150,16 +140,12 @@ /* Vendors and devices. Sort key: vendor first, device next. */ -#define PCI_VENDOR_ID_LOONGSON 0x0014 - #define PCI_VENDOR_ID_TTTECH 0x0357 #define PCI_DEVICE_ID_TTTECH_MC322 0x000a #define PCI_VENDOR_ID_DYNALINK 0x0675 #define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702 -#define PCI_VENDOR_ID_UBIQUITI 0x0777 - #define PCI_VENDOR_ID_BERKOM 0x0871 #define PCI_DEVICE_ID_BERKOM_A1T 0xffa1 #define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2 @@ -549,14 +535,6 @@ #define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584 -#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 -#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb -#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 -#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b -#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 -#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 -#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c -#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 @@ -577,7 +555,6 @@ #define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 #define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 #define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 -#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 #define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 #define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 #define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 @@ -597,7 +574,6 @@ #define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095 #define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096 #define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097 -#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE 0x2092 #define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A #define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081 #define PCI_DEVICE_ID_AMD_LX_AES 0x2082 @@ -633,8 +609,6 @@ #define PCI_DEVICE_ID_DELL_RAC4 0x0012 #define PCI_DEVICE_ID_DELL_PERC5 0x0015 -#define PCI_SUBVENDOR_ID_DELL 0x1028 - #define PCI_VENDOR_ID_MATROX 0x102B #define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 #define PCI_DEVICE_ID_MATROX_MIL 0x0519 @@ -886,9 +860,6 @@ #define PCI_DEVICE_ID_TI_X620 0xac8d #define PCI_DEVICE_ID_TI_X420 0xac8e #define PCI_DEVICE_ID_TI_XX20_FM 0xac8f -#define PCI_DEVICE_ID_TI_J721E 0xb00d -#define PCI_DEVICE_ID_TI_DRA74x 0xb500 -#define PCI_DEVICE_ID_TI_DRA72x 0xb501 #define PCI_VENDOR_ID_SONY 0x104d @@ -1082,6 +1053,7 @@ #define PCI_VENDOR_ID_SGI 0x10a9 #define PCI_DEVICE_ID_SGI_IOC3 0x0003 #define PCI_DEVICE_ID_SGI_LITHIUM 0x1002 +#define PCI_DEVICE_ID_SGI_IOC4 0x100a #define PCI_VENDOR_ID_WINBOND 0x10ad #define PCI_DEVICE_ID_WINBOND_82C105 0x0105 @@ -1122,9 +1094,8 @@ #define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a #define PCI_VENDOR_ID_AL 0x10b9 -#define PCI_DEVICE_ID_AL_M1489 0x1489 #define PCI_DEVICE_ID_AL_M1533 0x1533 -#define PCI_DEVICE_ID_AL_M1535 0x1535 +#define PCI_DEVICE_ID_AL_M1535 0x1535 #define PCI_DEVICE_ID_AL_M1541 0x1541 #define PCI_DEVICE_ID_AL_M1563 0x1563 #define PCI_DEVICE_ID_AL_M1621 0x1621 @@ -1152,8 +1123,6 @@ #define PCI_VENDOR_ID_TCONRAD 0x10da #define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508 -#define PCI_VENDOR_ID_ROHM 0x10db - #define PCI_VENDOR_ID_NVIDIA 0x10de #define PCI_DEVICE_ID_NVIDIA_TNT 0x0020 #define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028 @@ -1348,7 +1317,6 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 -#define PCI_DEVICE_ID_NVIDIA_GEFORCE_320M 0x08A0 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85 @@ -1357,7 +1325,6 @@ #define PCI_DEVICE_ID_IMS_TT3D 0x9135 #define PCI_VENDOR_ID_AMCC 0x10e8 -#define PCI_VENDOR_ID_AMPERE 0x1def #define PCI_VENDOR_ID_INTERG 0x10ea #define PCI_DEVICE_ID_INTERG_1682 0x1682 @@ -1402,8 +1369,6 @@ #define PCI_DEVICE_ID_TTI_HPT374 0x0008 #define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */ -#define PCI_VENDOR_ID_SIGMA 0x1105 - #define PCI_VENDOR_ID_VIA 0x1106 #define PCI_DEVICE_ID_VIA_8763_0 0x0198 #define PCI_DEVICE_ID_VIA_8380_0 0x0204 @@ -1586,8 +1551,6 @@ #define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227 #define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408 -#define PCI_VENDOR_ID_ALTERA 0x1172 - #define PCI_VENDOR_ID_SBE 0x1176 #define PCI_DEVICE_ID_SBE_WANXL100 0x0301 #define PCI_DEVICE_ID_SBE_WANXL200 0x0302 @@ -1690,11 +1653,39 @@ #define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112 #define PCI_VENDOR_ID_PMC_Sierra 0x11f8 -#define PCI_VENDOR_ID_MICROSEMI 0x11f8 #define PCI_VENDOR_ID_RP 0x11fe +#define PCI_DEVICE_ID_RP32INTF 0x0001 +#define PCI_DEVICE_ID_RP8INTF 0x0002 +#define PCI_DEVICE_ID_RP16INTF 0x0003 +#define PCI_DEVICE_ID_RP4QUAD 0x0004 +#define PCI_DEVICE_ID_RP8OCTA 0x0005 +#define PCI_DEVICE_ID_RP8J 0x0006 +#define PCI_DEVICE_ID_RP4J 0x0007 +#define PCI_DEVICE_ID_RP8SNI 0x0008 +#define PCI_DEVICE_ID_RP16SNI 0x0009 +#define PCI_DEVICE_ID_RPP4 0x000A +#define PCI_DEVICE_ID_RPP8 0x000B +#define PCI_DEVICE_ID_RP4M 0x000D +#define PCI_DEVICE_ID_RP2_232 0x000E +#define PCI_DEVICE_ID_RP2_422 0x000F +#define PCI_DEVICE_ID_URP32INTF 0x0801 +#define PCI_DEVICE_ID_URP8INTF 0x0802 +#define PCI_DEVICE_ID_URP16INTF 0x0803 +#define PCI_DEVICE_ID_URP8OCTA 0x0805 +#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C +#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D +#define PCI_DEVICE_ID_CRP16INTF 0x0903 #define PCI_VENDOR_ID_CYCLADES 0x120e +#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 +#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101 +#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102 +#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103 +#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104 +#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105 +#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200 +#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201 #define PCI_DEVICE_ID_PC300_RX_2 0x0300 #define PCI_DEVICE_ID_PC300_RX_1 0x0301 #define PCI_DEVICE_ID_PC300_TE_2 0x0310 @@ -1736,7 +1727,7 @@ #define PCI_VENDOR_ID_STALLION 0x124d /* Allied Telesyn */ -#define PCI_VENDOR_ID_AT 0x1259 +#define PCI_VENDOR_ID_AT 0x1259 #define PCI_SUBDEVICE_ID_AT_2700FX 0x2701 #define PCI_SUBDEVICE_ID_AT_2701FX 0x2703 @@ -1812,12 +1803,6 @@ #define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 #define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 -#define PCI_VENDOR_ID_PERICOM 0x12D8 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 - #define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021 @@ -1940,8 +1925,6 @@ #define PCI_VENDOR_ID_DIGIGRAM 0x1369 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002 -#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_SERIAL_SUBSYSTEM 0xc021 -#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_CAE_SERIAL_SUBSYSTEM 0xc022 #define PCI_VENDOR_ID_KAWASAKI 0x136b #define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 @@ -2041,6 +2024,8 @@ #define PCI_DEVICE_ID_EXAR_XR17V358 0x0358 #define PCI_VENDOR_ID_MICROGATE 0x13c0 +#define PCI_DEVICE_ID_MICROGATE_USC 0x0010 +#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030 #define PCI_VENDOR_ID_3WARE 0x13C1 #define PCI_DEVICE_ID_3WARE_1000 0x1000 @@ -2120,9 +2105,6 @@ #define PCI_VENDOR_ID_MYRICOM 0x14c1 -#define PCI_VENDOR_ID_MEDIATEK 0x14c3 -#define PCI_DEVICE_ID_MEDIATEK_7629 0x7629 - #define PCI_VENDOR_ID_TITAN 0x14D2 #define PCI_DEVICE_ID_TITAN_010L 0x8001 #define PCI_DEVICE_ID_TITAN_100L 0x8010 @@ -2269,7 +2251,6 @@ #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 #define PCI_VENDOR_ID_VMWARE 0x15ad -#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0 #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 @@ -2354,12 +2335,6 @@ #define PCI_DEVICE_ID_CENATEK_IDE 0x0001 #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 -#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd -#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce -#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf -#define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda - -#define PCI_VENDOR_ID_USR 0x16ec #define PCI_VENDOR_ID_VITESSE 0x1725 #define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174 @@ -2395,14 +2370,8 @@ #define PCI_DEVICE_ID_RDC_R6061 0x6061 #define PCI_DEVICE_ID_RDC_D1010 0x1010 -#define PCI_VENDOR_ID_GLI 0x17a0 - #define PCI_VENDOR_ID_LENOVO 0x17aa -#define PCI_VENDOR_ID_QCOM 0x17cb - -#define PCI_VENDOR_ID_CDNS 0x17cd - #define PCI_VENDOR_ID_ARECA 0x17d3 #define PCI_DEVICE_ID_ARECA_1110 0x1110 #define PCI_DEVICE_ID_ARECA_1120 0x1120 @@ -2453,8 +2422,7 @@ #define PCI_VENDOR_ID_TDI 0x192E #define PCI_DEVICE_ID_TDI_EHCI 0x0101 -#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */ -#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */ +#define PCI_VENDOR_ID_FREESCALE 0x1957 #define PCI_DEVICE_ID_MPC8308 0xc006 #define PCI_DEVICE_ID_MPC8315E 0x00b4 #define PCI_DEVICE_ID_MPC8315 0x00b5 @@ -2545,11 +2513,12 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff -#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define PCI_VENDOR_ID_HUAWEI 0x19e5 #define PCI_VENDOR_ID_NETRONOME 0x19ee +#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200 +#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240 #define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 -#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000 #define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000 #define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003 @@ -2563,23 +2532,9 @@ #define PCI_VENDOR_ID_ASMEDIA 0x1b21 -#define PCI_VENDOR_ID_REDHAT 0x1b36 - -#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c - -#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 - #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 #define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001 -#define PCI_VENDOR_ID_AMAZON 0x1d0f - -#define PCI_VENDOR_ID_ZHAOXIN 0x1d17 - -#define PCI_VENDOR_ID_HYGON 0x1d94 - -#define PCI_VENDOR_ID_HXT 0x1dbf - #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 @@ -2588,9 +2543,6 @@ #define PCI_DEVICE_ID_TEHUTI_3010 0x3010 #define PCI_DEVICE_ID_TEHUTI_3014 0x3014 -#define PCI_VENDOR_ID_SUNIX 0x1fd4 -#define PCI_DEVICE_ID_SUNIX_1999 0x1999 - #define PCI_VENDOR_ID_HINT 0x3388 #define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013 @@ -2641,12 +2593,9 @@ #define PCI_DEVICE_ID_INTEL_80332_1 0x0332 #define PCI_DEVICE_ID_INTEL_80333_0 0x0370 #define PCI_DEVICE_ID_INTEL_80333_1 0x0372 -#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC 0x0435 -#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF 0x0443 #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 -#define PCI_DEVICE_ID_INTEL_82425 0x0486 #define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807 #define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808 #define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820 @@ -2693,8 +2642,6 @@ #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578 #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 -#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2 -#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 @@ -2705,7 +2652,6 @@ #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31 #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40 #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f -#define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310 #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 @@ -2810,7 +2756,6 @@ #define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 #define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e #define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 -#define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0 #define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910 #define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917 #define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912 @@ -2911,8 +2856,6 @@ #define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 #define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 #define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 -#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8 -#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9 #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 @@ -3007,7 +2950,6 @@ #define PCI_DEVICE_ID_INTEL_84460GX 0x84ea #define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500 #define PCI_DEVICE_ID_INTEL_IXP2800 0x9004 -#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 #define PCI_VENDOR_ID_SCALEMP 0x8686 @@ -3111,6 +3053,4 @@ #define PCI_VENDOR_ID_OCZ 0x1b85 -#define PCI_VENDOR_ID_NCUBE 0x10ff - #endif /* _LINUX_PCI_IDS_H */ diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h new file mode 100644 index 0000000000..afcd130ab3 --- /dev/null +++ b/include/linux/pcieport_if.h @@ -0,0 +1,70 @@ +/* + * File: pcieport_if.h + * Purpose: PCI Express Port Bus Driver's IF Data Structure + * + * Copyright (C) 2004 Intel + * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) + */ + +#ifndef _PCIEPORT_IF_H_ +#define _PCIEPORT_IF_H_ + +/* Port Type */ +#define PCIE_ANY_PORT (~0) + +/* Service Type */ +#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */ +#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT) +#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */ +#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT) +#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */ +#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) +#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ +#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) +#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */ +#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT) + +struct pcie_device { + int irq; /* Service IRQ/MSI/MSI-X Vector */ + struct pci_dev *port; /* Root/Upstream/Downstream Port */ + u32 service; /* Port service this device represents */ + void *priv_data; /* Service Private Data */ + struct device device; /* Generic Device Interface */ +}; +#define to_pcie_device(d) container_of(d, struct pcie_device, device) + +static inline void set_service_data(struct pcie_device *dev, void *data) +{ + dev->priv_data = data; +} + +static inline void* get_service_data(struct pcie_device *dev) +{ + return dev->priv_data; +} + +struct pcie_port_service_driver { + const char *name; + int (*probe) (struct pcie_device *dev); + void (*remove) (struct pcie_device *dev); + int (*suspend) (struct pcie_device *dev); + int (*resume) (struct pcie_device *dev); + + /* Service Error Recovery Handler */ + const struct pci_error_handlers *err_handler; + + /* Link Reset Capability - AER service driver specific */ + pci_ers_result_t (*reset_link) (struct pci_dev *dev); + + int port_type; /* Type of the port this driver can handle */ + u32 service; /* Port service this device represents */ + + struct device_driver driver; +}; +#define to_service_driver(d) \ + container_of(d, struct pcie_port_service_driver, driver) + +int pcie_port_service_register(struct pcie_port_service_driver *new); +void pcie_port_service_unregister(struct pcie_port_service_driver *new); + +#endif /* _PCIEPORT_IF_H_ */ diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h index 2a69db4b60..2bb62bf296 100644 --- a/include/linux/pda_power.h +++ b/include/linux/pda_power.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Common power driver for PDAs and phones with one or two external * power supplies (AC/USB) connected to main and backup batteries, * and optional builtin charger. * * Copyright © 2007 Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __PDA_POWER_H__ diff --git a/include/linux/pe.h b/include/linux/pe.h index daf09ffffe..e170b95e76 100644 --- a/include/linux/pe.h +++ b/include/linux/pe.h @@ -1,8 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2011 Red Hat, Inc. * All rights reserved. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * * Author(s): Peter Jones */ #ifndef __LINUX_PE_H @@ -10,29 +21,36 @@ #include -/* - * Linux EFI stub v1.0 adds the following functionality: - * - Loading initrd from the LINUX_EFI_INITRD_MEDIA_GUID device path, - * - Loading/starting the kernel from firmware that targets a different - * machine type, via the entrypoint exposed in the .compat PE/COFF section. - * - * The recommended way of loading and starting v1.0 or later kernels is to use - * the LoadImage() and StartImage() EFI boot services, and expose the initrd - * via the LINUX_EFI_INITRD_MEDIA_GUID device path. - * - * Versions older than v1.0 support initrd loading via the image load options - * (using initrd=, limited to the volume from which the kernel itself was - * loaded), or via arch specific means (bootparams, DT, etc). - * - * On x86, LoadImage() and StartImage() can be omitted if the EFI handover - * protocol is implemented, which can be inferred from the version, - * handover_offset and xloadflags fields in the bootparams structure. - */ -#define LINUX_EFISTUB_MAJOR_VERSION 0x1 -#define LINUX_EFISTUB_MINOR_VERSION 0x0 - #define MZ_MAGIC 0x5a4d /* "MZ" */ +struct mz_hdr { + uint16_t magic; /* MZ_MAGIC */ + uint16_t lbsize; /* size of last used block */ + uint16_t blocks; /* pages in file, 0x3 */ + uint16_t relocs; /* relocations */ + uint16_t hdrsize; /* header size in "paragraphs" */ + uint16_t min_extra_pps; /* .bss */ + uint16_t max_extra_pps; /* runtime limit for the arena size */ + uint16_t ss; /* relative stack segment */ + uint16_t sp; /* initial %sp register */ + uint16_t checksum; /* word checksum */ + uint16_t ip; /* initial %ip register */ + uint16_t cs; /* initial %cs relative to load segment */ + uint16_t reloc_table_offset; /* offset of the first relocation */ + uint16_t overlay_num; /* overlay number. set to 0. */ + uint16_t reserved0[4]; /* reserved */ + uint16_t oem_id; /* oem identifier */ + uint16_t oem_info; /* oem specific */ + uint16_t reserved1[10]; /* reserved */ + uint32_t peaddr; /* address of pe header */ + char message[64]; /* message to print */ +}; + +struct mz_reloc { + uint16_t offset; + uint16_t segment; +}; + #define PE_MAGIC 0x00004550 /* "PE\0\0" */ #define PE_OPT_MAGIC_PE32 0x010b #define PE_OPT_MAGIC_PE32_ROM 0x0107 @@ -44,7 +62,6 @@ #define IMAGE_FILE_MACHINE_AMD64 0x8664 #define IMAGE_FILE_MACHINE_ARM 0x01c0 #define IMAGE_FILE_MACHINE_ARMV7 0x01c4 -#define IMAGE_FILE_MACHINE_ARM64 0xaa64 #define IMAGE_FILE_MACHINE_EBC 0x0ebc #define IMAGE_FILE_MACHINE_I386 0x014c #define IMAGE_FILE_MACHINE_IA64 0x0200 @@ -55,9 +72,6 @@ #define IMAGE_FILE_MACHINE_POWERPC 0x01f0 #define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1 #define IMAGE_FILE_MACHINE_R4000 0x0166 -#define IMAGE_FILE_MACHINE_RISCV32 0x5032 -#define IMAGE_FILE_MACHINE_RISCV64 0x5064 -#define IMAGE_FILE_MACHINE_RISCV128 0x5128 #define IMAGE_FILE_MACHINE_SH3 0x01a2 #define IMAGE_FILE_MACHINE_SH3DSP 0x01a3 #define IMAGE_FILE_MACHINE_SH3E 0x01a4 @@ -84,6 +98,17 @@ #define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000 #define IMAGE_FILE_BYTES_REVERSED_HI 0x8000 +struct pe_hdr { + uint32_t magic; /* PE magic */ + uint16_t machine; /* machine type */ + uint16_t sections; /* number of sections */ + uint32_t timestamp; /* time_t */ + uint32_t symbol_table; /* symbol table offset */ + uint32_t symbols; /* number of symbols */ + uint16_t opt_hdr_size; /* size of optional header */ + uint16_t flags; /* flags */ +}; + #define IMAGE_FILE_OPT_ROM_MAGIC 0x107 #define IMAGE_FILE_OPT_PE32_MAGIC 0x10b #define IMAGE_FILE_OPT_PE32_PLUS_MAGIC 0x20b @@ -109,95 +134,6 @@ #define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000 #define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000 -/* they actually defined 0x00000000 as well, but I think we'll skip that one. */ -#define IMAGE_SCN_RESERVED_0 0x00000001 -#define IMAGE_SCN_RESERVED_1 0x00000002 -#define IMAGE_SCN_RESERVED_2 0x00000004 -#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */ -#define IMAGE_SCN_RESERVED_3 0x00000010 -#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */ -#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */ -#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */ -#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */ -#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */ -#define IMAGE_SCN_RESERVED_4 0x00000400 -#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/ -#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */ -#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */ -#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */ -#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */ -/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */ -#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */ -#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */ -#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */ -#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */ -/* and here they just stuck a 1-byte integer in the middle of a bitfield */ -#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */ -#define IMAGE_SCN_ALIGN_2BYTES 0x00200000 -#define IMAGE_SCN_ALIGN_4BYTES 0x00300000 -#define IMAGE_SCN_ALIGN_8BYTES 0x00400000 -#define IMAGE_SCN_ALIGN_16BYTES 0x00500000 -#define IMAGE_SCN_ALIGN_32BYTES 0x00600000 -#define IMAGE_SCN_ALIGN_64BYTES 0x00700000 -#define IMAGE_SCN_ALIGN_128BYTES 0x00800000 -#define IMAGE_SCN_ALIGN_256BYTES 0x00900000 -#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000 -#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000 -#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000 -#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000 -#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000 -#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */ -#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */ -#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */ -#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */ -#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */ -#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */ -#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */ -#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */ - -#define IMAGE_DEBUG_TYPE_CODEVIEW 2 - -#ifndef __ASSEMBLY__ - -struct mz_hdr { - uint16_t magic; /* MZ_MAGIC */ - uint16_t lbsize; /* size of last used block */ - uint16_t blocks; /* pages in file, 0x3 */ - uint16_t relocs; /* relocations */ - uint16_t hdrsize; /* header size in "paragraphs" */ - uint16_t min_extra_pps; /* .bss */ - uint16_t max_extra_pps; /* runtime limit for the arena size */ - uint16_t ss; /* relative stack segment */ - uint16_t sp; /* initial %sp register */ - uint16_t checksum; /* word checksum */ - uint16_t ip; /* initial %ip register */ - uint16_t cs; /* initial %cs relative to load segment */ - uint16_t reloc_table_offset; /* offset of the first relocation */ - uint16_t overlay_num; /* overlay number. set to 0. */ - uint16_t reserved0[4]; /* reserved */ - uint16_t oem_id; /* oem identifier */ - uint16_t oem_info; /* oem specific */ - uint16_t reserved1[10]; /* reserved */ - uint32_t peaddr; /* address of pe header */ - char message[]; /* message to print */ -}; - -struct mz_reloc { - uint16_t offset; - uint16_t segment; -}; - -struct pe_hdr { - uint32_t magic; /* PE magic */ - uint16_t machine; /* machine type */ - uint16_t sections; /* number of sections */ - uint32_t timestamp; /* time_t */ - uint32_t symbol_table; /* symbol table offset */ - uint32_t symbols; /* number of symbols */ - uint16_t opt_hdr_size; /* size of optional header */ - uint16_t flags; /* flags */ -}; - /* the fact that pe32 isn't padded where pe32+ is 64-bit means union won't * work right. vomit. */ struct pe32_opt_hdr { @@ -307,6 +243,52 @@ struct section_header { uint32_t flags; }; +/* they actually defined 0x00000000 as well, but I think we'll skip that one. */ +#define IMAGE_SCN_RESERVED_0 0x00000001 +#define IMAGE_SCN_RESERVED_1 0x00000002 +#define IMAGE_SCN_RESERVED_2 0x00000004 +#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */ +#define IMAGE_SCN_RESERVED_3 0x00000010 +#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */ +#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */ +#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */ +#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */ +#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */ +#define IMAGE_SCN_RESERVED_4 0x00000400 +#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/ +#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */ +#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */ +#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */ +#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */ +/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */ +#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */ +#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */ +#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */ +#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */ +/* and here they just stuck a 1-byte integer in the middle of a bitfield */ +#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */ +#define IMAGE_SCN_ALIGN_2BYTES 0x00200000 +#define IMAGE_SCN_ALIGN_4BYTES 0x00300000 +#define IMAGE_SCN_ALIGN_8BYTES 0x00400000 +#define IMAGE_SCN_ALIGN_16BYTES 0x00500000 +#define IMAGE_SCN_ALIGN_32BYTES 0x00600000 +#define IMAGE_SCN_ALIGN_64BYTES 0x00700000 +#define IMAGE_SCN_ALIGN_128BYTES 0x00800000 +#define IMAGE_SCN_ALIGN_256BYTES 0x00900000 +#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000 +#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000 +#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000 +#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000 +#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000 +#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */ +#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */ +#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */ +#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */ +#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */ +#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */ +#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */ +#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */ + enum x64_coff_reloc_type { IMAGE_REL_AMD64_ABSOLUTE = 0, IMAGE_REL_AMD64_ADDR64, @@ -463,6 +445,4 @@ struct win_certificate { uint16_t cert_type; }; -#endif /* !__ASSEMBLY__ */ - #endif /* __LINUX_PE_H */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index af1071535d..8bf7cf025c 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/percpu-defs.h - basic definitions for percpu areas * @@ -51,7 +50,7 @@ PER_CPU_ATTRIBUTES #define __PCPU_DUMMY_ATTRS \ - __section(".discard") __attribute__((unused)) + __attribute__((section(".discard"), unused)) /* * s390 and alpha modules require percpu variables to be defined as @@ -92,7 +91,8 @@ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ extern __PCPU_ATTRS(sec) __typeof__(type) name; \ - __PCPU_ATTRS(sec) __weak __typeof__(type) name + __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ + __typeof__(type) name #else /* * Normal declaration and definition macros. @@ -101,7 +101,8 @@ extern __PCPU_ATTRS(sec) __typeof__(type) name #define DEFINE_PER_CPU_SECTION(type, name, sec) \ - __PCPU_ATTRS(sec) __typeof__(type) name + __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ + __typeof__(type) name #endif /* @@ -172,18 +173,13 @@ DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") /* - * Declaration/definition used for per-CPU variables that should be accessed - * as decrypted when memory encryption is enabled in the guest. + * Declaration/definition used for per-CPU variables that must be read only. */ -#ifdef CONFIG_AMD_MEM_ENCRYPT -#define DECLARE_PER_CPU_DECRYPTED(type, name) \ - DECLARE_PER_CPU_SECTION(type, name, "..decrypted") +#define DECLARE_PER_CPU_READ_ONLY(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, "..read_only") -#define DEFINE_PER_CPU_DECRYPTED(type, name) \ - DEFINE_PER_CPU_SECTION(type, name, "..decrypted") -#else -#define DEFINE_PER_CPU_DECRYPTED(type, name) DEFINE_PER_CPU(type, name) -#endif +#define DEFINE_PER_CPU_READ_ONLY(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, "..read_only") /* * Intermodule exports for per-CPU variables. sparse forgets about @@ -412,7 +408,7 @@ do { \ * instead. * * If there is no other protection through preempt disable and/or disabling - * interrupts then one of these RMW operations can show unexpected behavior + * interupts then one of these RMW operations can show unexpected behavior * because the execution thread was rescheduled on another processor or an * interrupt occurred and the same percpu variable was modified from the * interrupt context. diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 16c35a728b..3a481a4954 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Percpu refcounts: * (C) 2012 Google, Inc. @@ -30,14 +29,10 @@ * calls io_destroy() or the process exits. * * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it - * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref. - * After that, there can't be any new users of the kioctx (from lookup_ioctx()) - * and it's then safe to drop the initial ref with percpu_ref_put(). - * - * Note that the free path, free_ioctx(), needs to go through explicit call_rcu() - * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't - * imply RCU grace periods of any kind and if a user wants to combine percpu_ref - * with RCU protection, it must be done explicitly. + * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove + * the kioctx from the proccess's list of kioctxs - after that, there can't be + * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop + * the initial ref with percpu_ref_put(). * * Code that does a two stage shutdown like this often needs some kind of * explicit synchronization to ensure the initial refcount can only be dropped @@ -75,47 +70,27 @@ enum { * operation using percpu_ref_switch_to_percpu(). If initialized * with this flag, the ref will stay in atomic mode until * percpu_ref_switch_to_percpu() is invoked on it. - * Implies ALLOW_REINIT. */ PERCPU_REF_INIT_ATOMIC = 1 << 0, /* * Start dead w/ ref == 0 in atomic mode. Must be revived with - * percpu_ref_reinit() before used. Implies INIT_ATOMIC and - * ALLOW_REINIT. + * percpu_ref_reinit() before used. Implies INIT_ATOMIC. */ PERCPU_REF_INIT_DEAD = 1 << 1, - - /* - * Allow switching from atomic mode to percpu mode. - */ - PERCPU_REF_ALLOW_REINIT = 1 << 2, -}; - -struct percpu_ref_data { - atomic_long_t count; - percpu_ref_func_t *release; - percpu_ref_func_t *confirm_switch; - bool force_atomic:1; - bool allow_reinit:1; - struct rcu_head rcu; - struct percpu_ref *ref; }; struct percpu_ref { + atomic_long_t count; /* * The low bit of the pointer indicates whether the ref is in percpu * mode; if set, then get/put will manipulate the atomic_t. */ unsigned long percpu_count_ptr; - - /* - * 'percpu_ref' is often embedded into user structure, and only - * 'percpu_count_ptr' is required in fast path, move other fields - * into 'percpu_ref_data', so we can reduce memory footprint in - * fast path. - */ - struct percpu_ref_data *data; + percpu_ref_func_t *release; + percpu_ref_func_t *confirm_switch; + bool force_atomic:1; + struct rcu_head rcu; }; int __must_check percpu_ref_init(struct percpu_ref *ref, @@ -124,13 +99,10 @@ int __must_check percpu_ref_init(struct percpu_ref *ref, void percpu_ref_exit(struct percpu_ref *ref); void percpu_ref_switch_to_atomic(struct percpu_ref *ref, percpu_ref_func_t *confirm_switch); -void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref); void percpu_ref_switch_to_percpu(struct percpu_ref *ref); void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill); -void percpu_ref_resurrect(struct percpu_ref *ref); void percpu_ref_reinit(struct percpu_ref *ref); -bool percpu_ref_is_zero(struct percpu_ref *ref); /** * percpu_ref_kill - drop the initial ref @@ -139,10 +111,8 @@ bool percpu_ref_is_zero(struct percpu_ref *ref); * Must be used to drop the initial ref on a percpu refcount; must be called * precisely once before shutdown. * - * Switches @ref into atomic mode before gathering up the percpu counters - * and dropping the initial ref. - * - * There are no implied RCU grace periods between kill and release. + * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the + * percpu counters and dropping the initial ref. */ static inline void percpu_ref_kill(struct percpu_ref *ref) { @@ -167,12 +137,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in * between contaminating the pointer value, meaning that * READ_ONCE() is required when fetching it. - * - * The dependency ordering from the READ_ONCE() pairs - * with smp_store_release() in __percpu_ref_switch_to_percpu(). */ percpu_ptr = READ_ONCE(ref->percpu_count_ptr); + /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */ + smp_read_barrier_depends(); + /* * Theoretically, the following could test just ATOMIC; however, * then we'd have to mask off DEAD separately as DEAD may be @@ -199,14 +169,14 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; - rcu_read_lock(); + rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) this_cpu_add(*percpu_count, nr); else - atomic_long_add(nr, &ref->data->count); + atomic_long_add(nr, &ref->count); - rcu_read_unlock(); + rcu_read_unlock_sched(); } /** @@ -222,36 +192,6 @@ static inline void percpu_ref_get(struct percpu_ref *ref) percpu_ref_get_many(ref, 1); } -/** - * percpu_ref_tryget_many - try to increment a percpu refcount - * @ref: percpu_ref to try-get - * @nr: number of references to get - * - * Increment a percpu refcount by @nr unless its count already reached zero. - * Returns %true on success; %false on failure. - * - * This function is safe to call as long as @ref is between init and exit. - */ -static inline bool percpu_ref_tryget_many(struct percpu_ref *ref, - unsigned long nr) -{ - unsigned long __percpu *percpu_count; - bool ret; - - rcu_read_lock(); - - if (__ref_is_percpu(ref, &percpu_count)) { - this_cpu_add(*percpu_count, nr); - ret = true; - } else { - ret = atomic_long_add_unless(&ref->data->count, nr, 0); - } - - rcu_read_unlock(); - - return ret; -} - /** * percpu_ref_tryget - try to increment a percpu refcount * @ref: percpu_ref to try-get @@ -263,7 +203,21 @@ static inline bool percpu_ref_tryget_many(struct percpu_ref *ref, */ static inline bool percpu_ref_tryget(struct percpu_ref *ref) { - return percpu_ref_tryget_many(ref, 1); + unsigned long __percpu *percpu_count; + bool ret; + + rcu_read_lock_sched(); + + if (__ref_is_percpu(ref, &percpu_count)) { + this_cpu_inc(*percpu_count); + ret = true; + } else { + ret = atomic_long_inc_not_zero(&ref->count); + } + + rcu_read_unlock_sched(); + + return ret; } /** @@ -286,16 +240,16 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) unsigned long __percpu *percpu_count; bool ret = false; - rcu_read_lock(); + rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) { this_cpu_inc(*percpu_count); ret = true; } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { - ret = atomic_long_inc_not_zero(&ref->data->count); + ret = atomic_long_inc_not_zero(&ref->count); } - rcu_read_unlock(); + rcu_read_unlock_sched(); return ret; } @@ -314,14 +268,14 @@ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; - rcu_read_lock(); + rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) this_cpu_sub(*percpu_count, nr); - else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count))) - ref->data->release(ref); + else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) + ref->release(ref); - rcu_read_unlock(); + rcu_read_unlock_sched(); } /** @@ -352,4 +306,21 @@ static inline bool percpu_ref_is_dying(struct percpu_ref *ref) return ref->percpu_count_ptr & __PERCPU_REF_DEAD; } +/** + * percpu_ref_is_zero - test whether a percpu refcount reached zero + * @ref: percpu_ref to test + * + * Returns %true if @ref reached zero. + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline bool percpu_ref_is_zero(struct percpu_ref *ref) +{ + unsigned long __percpu *percpu_count; + + if (__ref_is_percpu(ref, &percpu_count)) + return false; + return !atomic_long_read(&ref->count); +} + #endif diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 5fda40f97f..5b2e6159b7 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -1,10 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PERCPU_RWSEM_H #define _LINUX_PERCPU_RWSEM_H #include +#include #include -#include #include #include #include @@ -12,43 +11,28 @@ struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int __percpu *read_count; - struct rcuwait writer; - wait_queue_head_t waiters; - atomic_t block; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif + struct rw_semaphore rw_sem; + wait_queue_head_t writer; + int readers_block; }; -#ifdef CONFIG_DEBUG_LOCK_ALLOC -#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }, -#else -#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) -#endif - -#define __DEFINE_PERCPU_RWSEM(name, is_static) \ +#define DEFINE_STATIC_PERCPU_RWSEM(name) \ static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ -is_static struct percpu_rw_semaphore name = { \ - .rss = __RCU_SYNC_INITIALIZER(name.rss), \ +static struct percpu_rw_semaphore name = { \ + .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ .read_count = &__percpu_rwsem_rc_##name, \ - .writer = __RCUWAIT_INITIALIZER(name.writer), \ - .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \ - .block = ATOMIC_INIT(0), \ - __PERCPU_RWSEM_DEP_MAP_INIT(name) \ + .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ + .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \ } -#define DEFINE_PERCPU_RWSEM(name) \ - __DEFINE_PERCPU_RWSEM(name, /* not static */) -#define DEFINE_STATIC_PERCPU_RWSEM(name) \ - __DEFINE_PERCPU_RWSEM(name, static) +extern int __percpu_down_read(struct percpu_rw_semaphore *, int); +extern void __percpu_up_read(struct percpu_rw_semaphore *); -extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool); - -static inline void percpu_down_read(struct percpu_rw_semaphore *sem) +static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) { might_sleep(); - rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); + rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_); preempt_disable(); /* @@ -56,31 +40,35 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem) * cannot both change sem->state from readers_fast and start checking * counters while we are here. So if we see !sem->state, we know that * the writer won't be checking until we're past the preempt_enable() - * and that once the synchronize_rcu() is done, the writer will see + * and that one the synchronize_sched() is done, the writer will see * anything we did within this RCU-sched read-size critical section. */ - if (likely(rcu_sync_is_idle(&sem->rss))) - this_cpu_inc(*sem->read_count); - else + __this_cpu_inc(*sem->read_count); + if (unlikely(!rcu_sync_is_idle(&sem->rss))) __percpu_down_read(sem, false); /* Unconditional memory barrier */ + barrier(); /* - * The preempt_enable() prevents the compiler from + * The barrier() prevents the compiler from * bleeding the critical section out. */ +} + +static inline void percpu_down_read(struct percpu_rw_semaphore *sem) +{ + percpu_down_read_preempt_disable(sem); preempt_enable(); } -static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) +static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) { - bool ret = true; + int ret = 1; preempt_disable(); /* * Same as in percpu_down_read(). */ - if (likely(rcu_sync_is_idle(&sem->rss))) - this_cpu_inc(*sem->read_count); - else + __this_cpu_inc(*sem->read_count); + if (unlikely(!rcu_sync_is_idle(&sem->rss))) ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ preempt_enable(); /* @@ -89,36 +77,34 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) */ if (ret) - rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); + rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_); return ret; } -static inline void percpu_up_read(struct percpu_rw_semaphore *sem) +static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) { - rwsem_release(&sem->dep_map, _RET_IP_); - - preempt_disable(); + /* + * The barrier() prevents the compiler from + * bleeding the critical section out. + */ + barrier(); /* * Same as in percpu_down_read(). */ - if (likely(rcu_sync_is_idle(&sem->rss))) { - this_cpu_dec(*sem->read_count); - } else { - /* - * slowpath; reader will only ever wake a single blocked - * writer. - */ - smp_mb(); /* B matches C */ - /* - * In other words, if they see our decrement (presumably to - * aggregate zero, as that is the only time it matters) they - * will also see our critical section. - */ - this_cpu_dec(*sem->read_count); - rcuwait_wake_up(&sem->writer); - } + if (likely(rcu_sync_is_idle(&sem->rss))) + __this_cpu_dec(*sem->read_count); + else + __percpu_up_read(sem); /* Unconditional memory barrier */ preempt_enable(); + + rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); +} + +static inline void percpu_up_read(struct percpu_rw_semaphore *sem) +{ + preempt_disable(); + percpu_up_read_preempt_enable(sem); } extern void percpu_down_write(struct percpu_rw_semaphore *); @@ -135,19 +121,25 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *); __percpu_init_rwsem(sem, #sem, &rwsem_key); \ }) -#define percpu_rwsem_is_held(sem) lockdep_is_held(sem) -#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem) +#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem) + +#define percpu_rwsem_assert_held(sem) \ + lockdep_assert_held(&(sem)->rw_sem) static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { - lock_release(&sem->dep_map, ip); + lock_release(&sem->rw_sem.dep_map, 1, ip); +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER + if (!read) + sem->rw_sem.owner = NULL; +#endif } static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { - lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip); + lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); } #endif diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 5e76af742c..7fb18e3118 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PERCPU_H #define __LINUX_PERCPU_H @@ -22,19 +21,6 @@ /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) -/* minimum allocation size and shift in bytes */ -#define PCPU_MIN_ALLOC_SHIFT 2 -#define PCPU_MIN_ALLOC_SIZE (1 << PCPU_MIN_ALLOC_SHIFT) - -/* - * The PCPU_BITMAP_BLOCK_SIZE must be the same size as PAGE_SIZE as the - * updating of hints is used to manage the nr_empty_pop_pages in both - * the chunk and globally. - */ -#define PCPU_BITMAP_BLOCK_SIZE PAGE_SIZE -#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \ - PCPU_MIN_ALLOC_SHIFT) - /* * Percpu allocator can serve percpu allocations before slab is * initialized which allows slab to depend on the percpu allocator. @@ -42,7 +28,7 @@ * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or * larger than PERCPU_DYNAMIC_EARLY_SIZE. */ -#define PERCPU_DYNAMIC_EARLY_SLOTS 128 +#define PERCPU_DYNAMIC_EARLY_SLOTS 256 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10) /* @@ -105,7 +91,7 @@ extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, int nr_units); extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); -extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, +extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr); #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK @@ -124,12 +110,12 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, #endif extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); -extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); extern bool is_kernel_percpu_address(unsigned long addr); #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) extern void __init setup_per_cpu_areas(void); #endif +extern void __init percpu_init_late(void); extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp); extern void __percpu *__alloc_percpu(size_t size, size_t align); @@ -143,6 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) -extern unsigned long pcpu_nr_pages(void); - #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 01861eebed..84a1094496 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PERCPU_COUNTER_H #define _LINUX_PERCPU_COUNTER_H /* @@ -40,11 +39,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, void percpu_counter_destroy(struct percpu_counter *fbc); void percpu_counter_set(struct percpu_counter *fbc, s64 amount); -void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, - s32 batch); +void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); -void percpu_counter_sync(struct percpu_counter *fbc); static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) { @@ -53,7 +50,7 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { - percpu_counter_add_batch(fbc, amount, percpu_counter_batch); + __percpu_counter_add(fbc, amount, percpu_counter_batch); } static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) @@ -79,15 +76,15 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc) */ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) { - /* Prevent reloads of fbc->count */ - s64 ret = READ_ONCE(fbc->count); + s64 ret = fbc->count; + barrier(); /* Prevent reloads of fbc->count */ if (ret >= 0) return ret; return 0; } -static inline bool percpu_counter_initialized(struct percpu_counter *fbc) +static inline int percpu_counter_initialized(struct percpu_counter *fbc) { return (fbc->counters != NULL); } @@ -139,7 +136,7 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount) } static inline void -percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) +__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) { percpu_counter_add(fbc, amount); } @@ -168,14 +165,11 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc) return percpu_counter_read(fbc); } -static inline bool percpu_counter_initialized(struct percpu_counter *fbc) +static inline int percpu_counter_initialized(struct percpu_counter *fbc) { - return true; + return 1; } -static inline void percpu_counter_sync(struct percpu_counter *fbc) -{ -} #endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h new file mode 100644 index 0000000000..f5cfdd6a55 --- /dev/null +++ b/include/linux/percpu_ida.h @@ -0,0 +1,82 @@ +#ifndef __PERCPU_IDA_H__ +#define __PERCPU_IDA_H__ + +#include +#include +#include +#include +#include +#include +#include + +struct percpu_ida_cpu; + +struct percpu_ida { + /* + * number of tags available to be allocated, as passed to + * percpu_ida_init() + */ + unsigned nr_tags; + unsigned percpu_max_size; + unsigned percpu_batch_size; + + struct percpu_ida_cpu __percpu *tag_cpu; + + /* + * Bitmap of cpus that (may) have tags on their percpu freelists: + * steal_tags() uses this to decide when to steal tags, and which cpus + * to try stealing from. + * + * It's ok for a freelist to be empty when its bit is set - steal_tags() + * will just keep looking - but the bitmap _must_ be set whenever a + * percpu freelist does have tags. + */ + cpumask_t cpus_have_tags; + + struct { + spinlock_t lock; + /* + * When we go to steal tags from another cpu (see steal_tags()), + * we want to pick a cpu at random. Cycling through them every + * time we steal is a bit easier and more or less equivalent: + */ + unsigned cpu_last_stolen; + + /* For sleeping on allocation failure */ + wait_queue_head_t wait; + + /* + * Global freelist - it's a stack where nr_free points to the + * top + */ + unsigned nr_free; + unsigned *freelist; + } ____cacheline_aligned_in_smp; +}; + +/* + * Number of tags we move between the percpu freelist and the global freelist at + * a time + */ +#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U +/* Max size of percpu freelist, */ +#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2) + +int percpu_ida_alloc(struct percpu_ida *pool, int state); +void percpu_ida_free(struct percpu_ida *pool, unsigned tag); + +void percpu_ida_destroy(struct percpu_ida *pool); +int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags, + unsigned long max_size, unsigned long batch_size); +static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) +{ + return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE, + IDA_DEFAULT_PCPU_BATCH_MOVE); +} + +typedef int (*percpu_ida_cb)(unsigned, void *); +int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, + void *data); + +unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu); +#endif /* __PERCPU_IDA_H__ */ diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 2512e2f9cd..8462da2660 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/include/asm/pmu.h * * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __ARM_PMU_H__ @@ -10,10 +14,22 @@ #include #include -#include #include #include +/* + * struct arm_pmu_platdata - ARM PMU platform data + * + * @handle_irq: an optional handler which will be called from the + * interrupt and passed the address of the low level handler, + * and can be used to implement any platform specific handling + * before or after calling it. + */ +struct arm_pmu_platdata { + irqreturn_t (*handle_irq)(int irq, void *dev, + irq_handler_t pmu_handler); +}; + #ifdef CONFIG_ARM_PMU /* @@ -21,12 +37,6 @@ */ #define ARMPMU_MAX_HWEVENTS 32 -/* - * ARM PMU hw_event flags - */ -/* Event uses a 64bit counter */ -#define ARMPMU_EVT_64BIT 1 - #define HW_OP_UNSUPPORTED 0xFFFF #define C(_x) PERF_COUNT_HW_CACHE_##_x #define CACHE_OP_UNSUPPORTED 0xFFFF @@ -65,24 +75,22 @@ struct pmu_hw_events { * already have to allocate this struct per cpu. */ struct arm_pmu *percpu_pmu; - - int irq; }; enum armpmu_attr_groups { ARMPMU_ATTR_GROUP_COMMON, ARMPMU_ATTR_GROUP_EVENTS, ARMPMU_ATTR_GROUP_FORMATS, - ARMPMU_ATTR_GROUP_CAPS, ARMPMU_NR_ATTR_GROUPS }; struct arm_pmu { struct pmu pmu; + cpumask_t active_irqs; cpumask_t supported_cpus; + int *irq_affinity; char *name; - int pmuver; - irqreturn_t (*handle_irq)(struct arm_pmu *pmu); + irqreturn_t (*handle_irq)(int irq_num, void *dev); void (*enable)(struct perf_event *event); void (*disable)(struct perf_event *event); int (*get_event_idx)(struct pmu_hw_events *hw_events, @@ -91,30 +99,27 @@ struct arm_pmu { struct perf_event *event); int (*set_event_filter)(struct hw_perf_event *evt, struct perf_event_attr *attr); - u64 (*read_counter)(struct perf_event *event); - void (*write_counter)(struct perf_event *event, u64 val); + u32 (*read_counter)(struct perf_event *event); + void (*write_counter)(struct perf_event *event, u32 val); void (*start)(struct arm_pmu *); void (*stop)(struct arm_pmu *); void (*reset)(void *); + int (*request_irq)(struct arm_pmu *, irq_handler_t handler); + void (*free_irq)(struct arm_pmu *); int (*map_event)(struct perf_event *event); - int (*filter_match)(struct perf_event *event); int num_events; + atomic_t active_events; + struct mutex reserve_mutex; + u64 max_period; bool secure_access; /* 32-bit ARM only */ -#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 +#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); -#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000 - DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); struct platform_device *plat_device; struct pmu_hw_events __percpu *hw_events; struct hlist_node node; struct notifier_block cpu_pm_nb; /* the attr_groups array must be NULL-terminated */ const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1]; - /* store the PMMIR_EL1 to expose slots */ - u64 reg_pmmir; - - /* Only to be used by ACPI probing code */ - unsigned long acpi_cpuid; }; #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) @@ -130,12 +135,10 @@ int armpmu_map_event(struct perf_event *event, [PERF_COUNT_HW_CACHE_RESULT_MAX], u32 raw_event_mask); -typedef int (*armpmu_init_fn)(struct arm_pmu *); - struct pmu_probe_info { unsigned int cpuid; unsigned int mask; - armpmu_init_fn init; + int (*init)(struct arm_pmu *); }; #define PMU_PROBE(_cpuid, _mask, _fn) \ @@ -157,30 +160,8 @@ int arm_pmu_device_probe(struct platform_device *pdev, const struct of_device_id *of_table, const struct pmu_probe_info *probe_table); -#ifdef CONFIG_ACPI -int arm_pmu_acpi_probe(armpmu_init_fn init_fn); -#else -static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } -#endif - -#ifdef CONFIG_KVM -void kvm_host_pmu_init(struct arm_pmu *pmu); -#else -#define kvm_host_pmu_init(x) do { } while(0) -#endif - -/* Internal functions only for core arm_pmu code */ -struct arm_pmu *armpmu_alloc(void); -struct arm_pmu *armpmu_alloc_atomic(void); -void armpmu_free(struct arm_pmu *pmu); -int armpmu_register(struct arm_pmu *pmu); -int armpmu_request_irq(int irq, int cpu); -void armpmu_free_irq(int irq, int cpu); - #define ARMV8_PMU_PDEV_NAME "armv8-pmu" #endif /* CONFIG_ARM_PMU */ -#define ARMV8_SPE_PDEV_NAME "arm,spe-v1" - #endif /* __ARM_PMU_H__ */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9b60bb89d8..fd324404c2 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -15,7 +15,6 @@ #define _LINUX_PERF_EVENT_H #include -#include /* * Kernel-internal data types and definitions: @@ -30,7 +29,6 @@ struct perf_guest_info_callbacks { int (*is_in_guest)(void); int (*is_user_mode)(void); unsigned long (*get_guest_ip)(void); - void (*handle_intel_pt_intr)(void); }; #ifdef CONFIG_HAVE_HW_BREAKPOINT @@ -54,14 +52,13 @@ struct perf_guest_info_callbacks { #include #include #include +#include #include -#include -#include #include struct perf_callchain_entry { __u64 nr; - __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */ + __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ }; struct perf_callchain_entry_ctx { @@ -93,27 +90,15 @@ struct perf_raw_record { /* * branch stack layout: * nr: number of taken branches stored in entries[] - * hw_idx: The low level index of raw branch records - * for the most recent branch. - * -1ULL means invalid/unknown. * * Note that nr can vary from sample to sample * branches (to, from) are stored from most recent * to least recent, i.e., entries[0] contains the most * recent branch. - * The entries[] is an abstraction of raw branch records, - * which may not be stored in age order in HW, e.g. Intel LBR. - * The hw_idx is to expose the low level index of raw - * branch record for the most recent branch aka entries[0]. - * The hw_idx index is between -1 (unknown) and max depth, - * which can be retrieved in /sys/devices/cpu/caps/branches. - * For the architectures whose raw branch records are - * already stored in age order, the hw_idx should be 0. */ struct perf_branch_stack { __u64 nr; - __u64 hw_idx; - struct perf_branch_entry entries[]; + struct perf_branch_entry entries[0]; }; struct task_struct; @@ -154,6 +139,17 @@ struct hw_perf_event { /* for tp_event->class */ struct list_head tp_list; }; + struct { /* intel_cqm */ + int cqm_state; + u32 cqm_rmid; + int is_group_event; + struct list_head cqm_events_entry; + struct list_head cqm_groups_entry; + struct list_head cqm_group_entry; + }; + struct { /* itrace */ + int itrace_started; + }; struct { /* amd_power */ u64 pwr_acc; u64 ptsc; @@ -169,13 +165,6 @@ struct hw_perf_event { struct list_head bp_list; }; #endif - struct { /* amd_iommu */ - u8 iommu_bank; - u8 iommu_cntr; - u16 padding; - u64 conf; - u64 conf1; - }; }; /* * If the event is a per task event, this will point to the task in @@ -212,26 +201,17 @@ struct hw_perf_event { */ u64 sample_period; - union { - struct { /* Sampling */ - /* - * The period we started this sample with. - */ - u64 last_period; + /* + * The period we started this sample with. + */ + u64 last_period; - /* - * However much is left of the current period; - * note that this is a full 64bit value and - * allows for generation of periods longer - * than hardware might allow. - */ - local64_t period_left; - }; - struct { /* Topdown events counting for context switch */ - u64 saved_metric; - u64 saved_slots; - }; - }; + /* + * However much is left of the current period; note that this is + * a full 64bit value and allows for generation of periods longer + * than hardware might allow. + */ + local64_t period_left; /* * State for throttling the event, see __perf_event_overflow() and @@ -260,18 +240,13 @@ struct perf_event; /** * pmu::capabilities flags */ -#define PERF_PMU_CAP_NO_INTERRUPT 0x0001 -#define PERF_PMU_CAP_NO_NMI 0x0002 -#define PERF_PMU_CAP_AUX_NO_SG 0x0004 -#define PERF_PMU_CAP_EXTENDED_REGS 0x0008 -#define PERF_PMU_CAP_EXCLUSIVE 0x0010 -#define PERF_PMU_CAP_ITRACE 0x0020 -#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040 -#define PERF_PMU_CAP_NO_EXCLUDE 0x0080 -#define PERF_PMU_CAP_AUX_OUTPUT 0x0100 -#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200 - -struct perf_output_handle; +#define PERF_PMU_CAP_NO_INTERRUPT 0x01 +#define PERF_PMU_CAP_NO_NMI 0x02 +#define PERF_PMU_CAP_AUX_NO_SG 0x04 +#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08 +#define PERF_PMU_CAP_EXCLUSIVE 0x10 +#define PERF_PMU_CAP_ITRACE 0x20 +#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 /** * struct pmu - generic performance monitoring unit @@ -282,7 +257,6 @@ struct pmu { struct module *module; struct device *dev; const struct attribute_group **attr_groups; - const struct attribute_group **attr_update; const char *name; int type; @@ -291,8 +265,8 @@ struct pmu { */ int capabilities; - int __percpu *pmu_disable_count; - struct perf_cpu_context __percpu *pmu_cpu_context; + int * __percpu pmu_disable_count; + struct perf_cpu_context * __percpu pmu_cpu_context; atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ int task_ctx_nr; int hrtimer_interval_ms; @@ -317,7 +291,7 @@ struct pmu { * -EBUSY -- @event is for this PMU but PMU temporarily unavailable * -EINVAL -- @event is for this PMU but @event is not valid * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported - * -EACCES -- @event is for this PMU, @event is valid, but no privileges + * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges * * 0 -- @event is for this PMU and valid * @@ -329,8 +303,8 @@ struct pmu { * Notification that the event was mapped or unmapped. Called * in the context of the mapping task. */ - void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ - void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ + void (*event_mapped) (struct perf_event *event); /*optional*/ + void (*event_unmapped) (struct perf_event *event); /*optional*/ /* * Flags for ->add()/->del()/ ->start()/->stop(). There are @@ -376,7 +350,7 @@ struct pmu { * ->stop() with PERF_EF_UPDATE will read the counter and update * period/count values like ->read() would. * - * ->start() with PERF_EF_RELOAD will reprogram the counter + * ->start() with PERF_EF_RELOAD will reprogram the the counter * value, must be preceded by a ->stop() with PERF_EF_UPDATE. */ void (*start) (struct perf_event *event, int flags); @@ -429,26 +403,21 @@ struct pmu { */ void (*sched_task) (struct perf_event_context *ctx, bool sched_in); + /* + * PMU specific data size + */ + size_t task_ctx_size; + /* - * Kmem cache of PMU specific data + * Return the count value for a counter. */ - struct kmem_cache *task_ctx_cache; - - /* - * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data) - * can be synchronized using this function. See Intel LBR callstack support - * implementation and Perf core context switch handling callbacks for usage - * examples. - */ - void (*swap_task_ctx) (struct perf_event_context *prev, - struct perf_event_context *next); - /* optional */ + u64 (*count) (struct perf_event *event); /*optional*/ /* * Set up pmu-private data structures for an AUX area */ - void *(*setup_aux) (struct perf_event *event, void **pages, + void *(*setup_aux) (int cpu, void **pages, int nr_pages, bool overwrite); /* optional */ @@ -457,19 +426,6 @@ struct pmu { */ void (*free_aux) (void *aux); /* optional */ - /* - * Take a snapshot of the AUX buffer without touching the event - * state, so that preempting ->start()/->stop() callbacks does - * not interfere with their logic. Called in PMI context. - * - * Returns the size of AUX data copied to the output handle. - * - * Optional. - */ - long (*snapshot_aux) (struct perf_event *event, - struct perf_output_handle *handle, - unsigned long size); - /* * Validate address range filters: make sure the HW supports the * requested configuration and number of filters; return 0 if the @@ -495,49 +451,30 @@ struct pmu { void (*addr_filters_sync) (struct perf_event *event); /* optional */ - /* - * Check if event can be used for aux_output purposes for - * events of this PMU. - * - * Runs from perf_event_open(). Should return 0 for "no match" - * or non-zero for "match". - */ - int (*aux_output_match) (struct perf_event *event); - /* optional */ - /* * Filter events for PMU-specific reasons. */ int (*filter_match) (struct perf_event *event); /* optional */ - - /* - * Check period value for PERF_EVENT_IOC_PERIOD ioctl. - */ - int (*check_period) (struct perf_event *event, u64 value); /* optional */ -}; - -enum perf_addr_filter_action_t { - PERF_ADDR_FILTER_ACTION_STOP = 0, - PERF_ADDR_FILTER_ACTION_START, - PERF_ADDR_FILTER_ACTION_FILTER, }; /** * struct perf_addr_filter - address range filter definition * @entry: event's filter list linkage - * @path: object file's path for file-based filters + * @inode: object file's inode for file-based filters * @offset: filter range offset - * @size: filter range size (size==0 means single address trigger) - * @action: filter/start/stop + * @size: filter range size + * @range: 1: range, 0: address + * @filter: 1: filter/start, 0: stop * * This is a hardware-agnostic filter configuration as specified by the user. */ struct perf_addr_filter { struct list_head entry; - struct path path; + struct inode *inode; unsigned long offset; unsigned long size; - enum perf_addr_filter_action_t action; + unsigned int range : 1, + filter : 1; }; /** @@ -545,7 +482,6 @@ struct perf_addr_filter { * @list: list of filters for this event * @lock: spinlock that serializes accesses to the @list and event's * (and its children's) filter generations. - * @nr_file_filters: number of file-based filters * * A child event will use parent's @list (and therefore @lock), so they are * bundled together; see perf_event_addr_filters(). @@ -553,18 +489,12 @@ struct perf_addr_filter { struct perf_addr_filters_head { struct list_head list; raw_spinlock_t lock; - unsigned int nr_file_filters; -}; - -struct perf_addr_filter_range { - unsigned long start; - unsigned long size; }; /** - * enum perf_event_state - the states of an event: + * enum perf_event_active_state - the states of a event */ -enum perf_event_state { +enum perf_event_active_state { PERF_EVENT_STATE_DEAD = -4, PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, @@ -586,13 +516,9 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, * PERF_EV_CAP_SOFTWARE: Is a software event. * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read * from any CPU in the package where it is active. - * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and - * cannot be a group leader. If an event with this flag is detached from the - * group it is scheduled out and moved into an unrecoverable ERROR state. */ #define PERF_EV_CAP_SOFTWARE BIT(0) #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) -#define PERF_EV_CAP_SIBLING BIT(2) #define SWEVENT_HLIST_BITS 8 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) @@ -606,22 +532,15 @@ struct swevent_hlist { #define PERF_ATTACH_GROUP 0x02 #define PERF_ATTACH_TASK 0x04 #define PERF_ATTACH_TASK_DATA 0x08 -#define PERF_ATTACH_ITRACE 0x10 -#define PERF_ATTACH_SCHED_CB 0x20 -#define PERF_ATTACH_CHILD 0x40 struct perf_cgroup; -struct perf_buffer; +struct ring_buffer; struct pmu_event_list { raw_spinlock_t lock; struct list_head list; }; -#define for_each_sibling_event(sibling, event) \ - if ((event)->group_leader == (event)) \ - list_for_each_entry((sibling), &(event)->sibling_list, sibling_list) - /** * struct perf_event - performance event kernel representation: */ @@ -635,16 +554,16 @@ struct perf_event { struct list_head event_entry; /* + * XXX: group_entry and sibling_list should be mutually exclusive; + * either you're a sibling on a group, or you're the group leader. + * Rework the code to always use the same list element. + * * Locked for modification by both ctx->mutex and ctx->lock; holding * either sufficies for read. */ + struct list_head group_entry; struct list_head sibling_list; - struct list_head active_list; - /* - * Node on the pinned or flexible tree located at the event context; - */ - struct rb_node group_node; - u64 group_index; + /* * We need storage to track the entries in perf_pmu_migrate_context; we * cannot use the event_entry because of RCU and we want to keep the @@ -665,27 +584,41 @@ struct perf_event { struct pmu *pmu; void *pmu_private; - enum perf_event_state state; + enum perf_event_active_state state; unsigned int attach_state; - local64_t count; - atomic64_t child_count; + local64_t count; /* PaX: fix it one day */ + atomic64_unchecked_t child_count; /* * These are the total time in nanoseconds that the event * has been enabled (i.e. eligible to run, and the task has * been scheduled in, if this is a per-task event) * and running (scheduled onto the CPU), respectively. + * + * They are computed from tstamp_enabled, tstamp_running and + * tstamp_stopped when the event is in INACTIVE or ACTIVE state. */ u64 total_time_enabled; u64 total_time_running; - u64 tstamp; + + /* + * These are timestamps used for computing total_time_enabled + * and total_time_running when the event is in INACTIVE or + * ACTIVE state, measured in nanoseconds from an arbitrary point + * in time. + * tstamp_enabled: the notional time when the event was enabled + * tstamp_running: the notional time when the event was scheduled on + * tstamp_stopped: in INACTIVE state, the notional time when the + * event was scheduled off. + */ + u64 tstamp_enabled; + u64 tstamp_running; + u64 tstamp_stopped; /* * timestamp shadows the actual context timing but it can * be safely used in NMI interrupt context. It reflects the - * context time as it was when the event was last scheduled in, - * or when ctx_sched_in failed to schedule the event because we - * run out of PMC. + * context time as it was when the event was last scheduled in. * * ctx_time already accounts for ctx->timestamp. Therefore to * compute ctx_time for a sample, simply add perf_clock(). @@ -705,8 +638,8 @@ struct perf_event { * These accumulate total time (in nanoseconds) that children * events have been enabled and running, respectively. */ - atomic64_t child_total_time_enabled; - atomic64_t child_total_time_running; + atomic64_unchecked_t child_total_time_enabled; + atomic64_unchecked_t child_total_time_running; /* * Protect attach/detach and child_list: @@ -725,7 +658,7 @@ struct perf_event { struct mutex mmap_mutex; atomic_t mmap_count; - struct perf_buffer *rb; + struct ring_buffer *rb; struct list_head rb_entry; unsigned long rcu_batches; int rcu_pending; @@ -738,7 +671,6 @@ struct perf_event { int pending_wakeup; int pending_kill; int pending_disable; - unsigned long pending_addr; /* SIGTRAP */ struct irq_work pending; atomic_t event_limit; @@ -746,12 +678,9 @@ struct perf_event { /* address range filters */ struct perf_addr_filters_head addr_filters; /* vma address array for file-based filders */ - struct perf_addr_filter_range *addr_filter_ranges; + unsigned long *addr_filters_offs; unsigned long addr_filters_gen; - /* for aux_output events */ - struct perf_event *aux_event; - void (*destroy)(struct perf_event *); struct rcu_head rcu_head; @@ -764,7 +693,6 @@ struct perf_event { #ifdef CONFIG_BPF_SYSCALL perf_overflow_handler_t orig_overflow_handler; struct bpf_prog *prog; - u64 bpf_cookie; #endif #ifdef CONFIG_EVENT_TRACING @@ -777,21 +705,13 @@ struct perf_event { #ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; /* cgroup event is attach to */ + int cgrp_defer_enabled; #endif -#ifdef CONFIG_SECURITY - void *security; -#endif struct list_head sb_list; #endif /* CONFIG_PERF_EVENTS */ }; - -struct perf_event_groups { - struct rb_root tree; - u64 index; -}; - /** * struct perf_event_context - event context structure * @@ -812,25 +732,16 @@ struct perf_event_context { struct mutex mutex; struct list_head active_ctx_list; - struct perf_event_groups pinned_groups; - struct perf_event_groups flexible_groups; + struct list_head pinned_groups; + struct list_head flexible_groups; struct list_head event_list; - - struct list_head pinned_active; - struct list_head flexible_active; - int nr_events; int nr_active; int is_active; int nr_stat; int nr_freq; int rotate_disable; - /* - * Set when nr_events != nr_active, except tolerant to events not - * necessary to be active due to scheduling constraints, such as cgroups. - */ - int rotate_necessary; - refcount_t refcount; + atomic_t refcount; struct task_struct *task; /* @@ -874,30 +785,20 @@ struct perf_cpu_context { ktime_t hrtimer_interval; unsigned int hrtimer_active; + struct pmu *unique_pmu; #ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; - struct list_head cgrp_cpuctx_entry; #endif struct list_head sched_cb_entry; int sched_cb_usage; - - int online; - /* - * Per-CPU storage for iterators used in visit_groups_merge. The default - * storage is of size 2 to hold the CPU and any CPU event iterators. - */ - int heap_size; - struct perf_event **heap; - struct perf_event *heap_default[2]; }; struct perf_output_handle { struct perf_event *event; - struct perf_buffer *rb; + struct ring_buffer *rb; unsigned long wakeup; unsigned long size; - u64 aux_flags; union { void *addr; unsigned long head; @@ -906,9 +807,8 @@ struct perf_output_handle { }; struct bpf_perf_event_data_kern { - bpf_user_pt_regs_t *regs; + struct pt_regs *regs; struct perf_sample_data *data; - struct perf_event *event; }; #ifdef CONFIG_CGROUP_PERF @@ -947,26 +847,25 @@ perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) extern void *perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event); extern void perf_aux_output_end(struct perf_output_handle *handle, - unsigned long size); + unsigned long size, bool truncated); extern int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size); extern void *perf_get_aux(struct perf_output_handle *handle); -extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); -extern void perf_event_itrace_started(struct perf_event *event); extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); extern void perf_pmu_unregister(struct pmu *pmu); +extern int perf_num_counters(void); +extern const char *perf_pmu_name(void); extern void __perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task); extern void __perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next); -extern int perf_event_init_task(struct task_struct *child, u64 clone_flags); +extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); extern void perf_event_delayed_put(struct task_struct *task); extern struct file *perf_event_get(unsigned int fd); -extern const struct perf_event *perf_get_event(struct file *file); extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); extern void perf_event_print_debug(void); extern void perf_pmu_disable(struct pmu *pmu); @@ -975,9 +874,6 @@ extern void perf_sched_cb_dec(struct pmu *pmu); extern void perf_sched_cb_inc(struct pmu *pmu); extern int perf_event_task_disable(void); extern int perf_event_task_enable(void); - -extern void perf_pmu_resched(struct pmu *pmu); - extern int perf_event_refresh(struct perf_event *event, int refresh); extern void perf_event_update_userpage(struct perf_event *event); extern int perf_event_release_kernel(struct perf_event *event); @@ -989,8 +885,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, void *context); extern void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); -int perf_event_read_local(struct perf_event *event, u64 *value, - u64 *enabled, u64 *running); +extern u64 perf_event_read_local(struct perf_event *event); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); @@ -1004,7 +899,7 @@ struct perf_sample_data { struct perf_raw_record *raw; struct perf_branch_stack *br_stack; u64 period; - union perf_sample_weight weight; + u64 weight; u64 txn; union perf_mem_data_src data_src; @@ -1026,16 +921,16 @@ struct perf_sample_data { u32 reserved; } cpu_entry; struct perf_callchain_entry *callchain; - u64 aux_size; + /* + * regs_user may point to task_pt_regs or to regs_user_copy, depending + * on arch details. + */ struct perf_regs regs_user; + struct pt_regs regs_user_copy; + struct perf_regs regs_intr; u64 stack_user_size; - - u64 phys_addr; - u64 cgroup; - u64 data_page_size; - u64 code_page_size; } ____cacheline_aligned; /* default value for data source */ @@ -1053,7 +948,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, data->raw = NULL; data->br_stack = NULL; data->period = period; - data->weight.full = 0; + data->weight = 0; data->data_src.val = PERF_MEM_NA; data->txn = 0; } @@ -1077,9 +972,9 @@ extern void perf_event_output_forward(struct perf_event *event, extern void perf_event_output_backward(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); -extern int perf_event_output(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs); +extern void perf_event_output(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); static inline bool is_default_overflow_handler(struct perf_event *event) @@ -1103,15 +998,6 @@ perf_event__output_id_sample(struct perf_event *event, extern void perf_log_lost_samples(struct perf_event *event, u64 lost); -static inline bool event_has_any_exclude_flag(struct perf_event *event) -{ - struct perf_event_attr *attr = &event->attr; - - return attr->exclude_idle || attr->exclude_user || - attr->exclude_kernel || attr->exclude_hv || - attr->exclude_guest || attr->exclude_host; -} - static inline bool is_sampling_event(struct perf_event *event) { return event->attr.sample_period != 0; @@ -1125,19 +1011,6 @@ static inline int is_software_event(struct perf_event *event) return event->event_caps & PERF_EV_CAP_SOFTWARE; } -/* - * Return 1 for event in sw context, 0 for event in hw context - */ -static inline int in_software_context(struct perf_event *event) -{ - return event->ctx->pmu->task_ctx_nr == perf_sw_context; -} - -static inline int is_exclusive_pmu(struct pmu *pmu) -{ - return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; -} - extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); @@ -1148,18 +1021,12 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo #endif /* - * When generating a perf sample in-line, instead of from an interrupt / - * exception, we lack a pt_regs. This is typically used from software events - * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints. - * - * We typically don't need a full set, but (for x86) do require: + * Take a snapshot of the regs. Skip ip and frame pointer to + * the nth caller. We only need a few of the regs: * - ip for PERF_SAMPLE_IP * - cs for user_mode() tests - * - sp for PERF_SAMPLE_CALLCHAIN - * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs()) - * - * NOTE: assumes @regs is otherwise already 0 filled; this is important for - * things like PERF_SAMPLE_REGS_INTR. + * - bp for callchains + * - eflags, for future purposes, just in case */ static inline void perf_fetch_caller_regs(struct pt_regs *regs) { @@ -1180,24 +1047,30 @@ DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); * which is guaranteed by us not actually scheduling inside other swevents * because those disable preemption. */ -static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) +static __always_inline void +perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { - struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); + if (static_key_false(&perf_swevent_enabled[event_id])) { + struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); - perf_fetch_caller_regs(regs); - ___perf_sw_event(event_id, nr, regs, addr); + perf_fetch_caller_regs(regs); + ___perf_sw_event(event_id, nr, regs, addr); + } } extern struct static_key_false perf_sched_events; -static __always_inline bool __perf_sw_enabled(int swevt) +static __always_inline bool +perf_sw_migrate_enabled(void) { - return static_key_false(&perf_swevent_enabled[swevt]); + if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) + return true; + return false; } static inline void perf_event_task_migrate(struct task_struct *task) { - if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS)) + if (perf_sw_migrate_enabled()) task->sched_migrated = 1; } @@ -1207,9 +1080,11 @@ static inline void perf_event_task_sched_in(struct task_struct *prev, if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_in(prev, task); - if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) && - task->sched_migrated) { - __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); + if (perf_sw_migrate_enabled() && task->sched_migrated) { + struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); + + perf_fetch_caller_regs(regs); + ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); task->sched_migrated = 0; } } @@ -1217,39 +1092,25 @@ static inline void perf_event_task_sched_in(struct task_struct *prev, static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { - if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES)) - __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); - -#ifdef CONFIG_CGROUP_PERF - if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) && - perf_cgroup_from_task(prev, NULL) != - perf_cgroup_from_task(next, NULL)) - __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0); -#endif + perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_out(prev, next); } +static inline u64 __perf_event_count(struct perf_event *event) +{ + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count); +} + extern void perf_event_mmap(struct vm_area_struct *vma); - -extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, - bool unregister, const char *sym); -extern void perf_event_bpf_event(struct bpf_prog *prog, - enum perf_bpf_event_type type, - u16 flags); - extern struct perf_guest_info_callbacks *perf_guest_cbs; extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern void perf_event_exec(void); extern void perf_event_comm(struct task_struct *tsk, bool exec); -extern void perf_event_namespaces(struct task_struct *tsk); extern void perf_event_fork(struct task_struct *tsk); -extern void perf_event_text_poke(const void *addr, - const void *old_bytes, size_t old_len, - const void *new_bytes, size_t new_len); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); @@ -1259,11 +1120,8 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct extern struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark); -extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); -extern struct perf_callchain_entry *get_callchain_entry(int *rctx); -extern void put_callchain_entry(int rctx); extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_contexts_per_stack; @@ -1293,55 +1151,41 @@ static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 } } -extern int sysctl_perf_event_paranoid; +extern int sysctl_perf_event_legitimately_concerned; extern int sysctl_perf_event_mlock; extern int sysctl_perf_event_sample_rate; extern int sysctl_perf_cpu_time_max_percent; extern void perf_sample_event_took(u64 sample_len_ns); -int perf_proc_update_handler(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); -int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); +extern int perf_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + int perf_event_max_stack_handler(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); + void __user *buffer, size_t *lenp, loff_t *ppos); -/* Access to perf_event_open(2) syscall. */ -#define PERF_SECURITY_OPEN 0 - -/* Finer grained perf_event_open(2) access control. */ -#define PERF_SECURITY_CPU 1 -#define PERF_SECURITY_KERNEL 2 -#define PERF_SECURITY_TRACEPOINT 3 - -static inline int perf_is_paranoid(void) +static inline bool perf_paranoid_any(void) { - return sysctl_perf_event_paranoid > -1; + return sysctl_perf_event_legitimately_concerned > 2; } -static inline int perf_allow_kernel(struct perf_event_attr *attr) +static inline bool perf_paranoid_tracepoint_raw(void) { - if (sysctl_perf_event_paranoid > 1 && !perfmon_capable()) - return -EACCES; - - return security_perf_event_open(attr, PERF_SECURITY_KERNEL); + return sysctl_perf_event_legitimately_concerned > -1; } -static inline int perf_allow_cpu(struct perf_event_attr *attr) +static inline bool perf_paranoid_cpu(void) { - if (sysctl_perf_event_paranoid > 0 && !perfmon_capable()) - return -EACCES; - - return security_perf_event_open(attr, PERF_SECURITY_CPU); + return sysctl_perf_event_legitimately_concerned > 0; } -static inline int perf_allow_tracepoint(struct perf_event_attr *attr) +static inline bool perf_paranoid_kernel(void) { - if (sysctl_perf_event_paranoid > -1 && !perfmon_capable()) - return -EPERM; - - return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT); + return sysctl_perf_event_legitimately_concerned > 1; } extern void perf_event_init(void); @@ -1356,9 +1200,6 @@ extern void perf_bp_event(struct perf_event *event, void *data); (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) # define perf_instruction_pointer(regs) instruction_pointer(regs) #endif -#ifndef perf_arch_bpf_user_pt_regs -# define perf_arch_bpf_user_pt_regs(regs) regs -#endif static inline bool has_branch_stack(struct perf_event *event) { @@ -1402,14 +1243,11 @@ perf_event_addr_filters(struct perf_event *event) extern void perf_event_addr_filters_sync(struct perf_event *event); extern int perf_output_begin(struct perf_output_handle *handle, - struct perf_sample_data *data, struct perf_event *event, unsigned int size); extern int perf_output_begin_forward(struct perf_output_handle *handle, - struct perf_sample_data *data, - struct perf_event *event, - unsigned int size); + struct perf_event *event, + unsigned int size); extern int perf_output_begin_backward(struct perf_output_handle *handle, - struct perf_sample_data *data, struct perf_event *event, unsigned int size); @@ -1418,9 +1256,6 @@ extern unsigned int perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len); extern unsigned int perf_output_skip(struct perf_output_handle *handle, unsigned int len); -extern long perf_output_copy_aux(struct perf_output_handle *aux_handle, - struct perf_output_handle *handle, - unsigned long from, unsigned long to); extern int perf_swevent_get_recursion_context(void); extern void perf_swevent_put_recursion_context(int rctx); extern u64 perf_swevent_set_period(struct perf_event *event); @@ -1429,16 +1264,13 @@ extern void perf_event_disable(struct perf_event *event); extern void perf_event_disable_local(struct perf_event *event); extern void perf_event_disable_inatomic(struct perf_event *event); extern void perf_event_task_tick(void); -extern int perf_event_account_interrupt(struct perf_event *event); -extern int perf_event_period(struct perf_event *event, u64 value); -extern u64 perf_event_pause(struct perf_event *event, bool reset); #else /* !CONFIG_PERF_EVENTS: */ static inline void * perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event) { return NULL; } static inline void -perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) - { } +perf_aux_output_end(struct perf_output_handle *handle, unsigned long size, + bool truncated) { } static inline int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size) { return -EINVAL; } @@ -1452,25 +1284,16 @@ perf_event_task_sched_in(struct task_struct *prev, static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { } -static inline int perf_event_init_task(struct task_struct *child, - u64 clone_flags) { return 0; } +static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_delayed_put(struct task_struct *task) { } static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } -static inline const struct perf_event *perf_get_event(struct file *file) -{ - return ERR_PTR(-EINVAL); -} static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) { return ERR_PTR(-EINVAL); } -static inline int perf_event_read_local(struct perf_event *event, u64 *value, - u64 *enabled, u64 *running) -{ - return -EINVAL; -} +static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; } static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } @@ -1482,6 +1305,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh) static inline void perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } static inline void +perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } +static inline void perf_bp_event(struct perf_event *event, void *data) { } static inline int perf_register_guest_info_callbacks @@ -1490,22 +1315,9 @@ static inline int perf_unregister_guest_info_callbacks (struct perf_guest_info_callbacks *callbacks) { return 0; } static inline void perf_event_mmap(struct vm_area_struct *vma) { } - -typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data); -static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, - bool unregister, const char *sym) { } -static inline void perf_event_bpf_event(struct bpf_prog *prog, - enum perf_bpf_event_type type, - u16 flags) { } static inline void perf_event_exec(void) { } static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } -static inline void perf_event_namespaces(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } -static inline void perf_event_text_poke(const void *addr, - const void *old_bytes, - size_t old_len, - const void *new_bytes, - size_t new_len) { } static inline void perf_event_init(void) { } static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline void perf_swevent_put_recursion_context(int rctx) { } @@ -1515,14 +1327,6 @@ static inline void perf_event_disable(struct perf_event *event) { } static inline int __perf_event_disable(void *info) { return -1; } static inline void perf_event_task_tick(void) { } static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } -static inline int perf_event_period(struct perf_event *event, u64 value) -{ - return -EINVAL; -} -static inline u64 perf_event_pause(struct perf_event *event, bool reset) -{ - return 0; -} #endif #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) @@ -1542,7 +1346,7 @@ struct perf_pmu_events_attr { struct device_attribute attr; u64 id; const char *event_str; -}; +} __do_const; struct perf_pmu_events_ht_attr { struct device_attribute attr; @@ -1551,18 +1355,6 @@ struct perf_pmu_events_ht_attr { const char *event_str_noht; }; -struct perf_pmu_events_hybrid_attr { - struct device_attribute attr; - u64 id; - const char *event_str; - u64 pmu_type; -}; - -struct perf_pmu_format_hybrid_attr { - struct device_attribute attr; - u64 pmu_type; -}; - ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); @@ -1579,12 +1371,6 @@ static struct perf_pmu_events_attr _var = { \ .event_str = _str, \ }; -#define PMU_EVENT_ATTR_ID(_name, _show, _id) \ - (&((struct perf_pmu_events_attr[]) { \ - { .attr = __ATTR(_name, 0444, _show, NULL), \ - .id = _id, } \ - })[0].attr.attr) - #define PMU_FORMAT_ATTR(_name, _format) \ static ssize_t \ _name##_show(struct device *dev, \ @@ -1606,12 +1392,4 @@ int perf_event_exit_cpu(unsigned int cpu); #define perf_event_exit_cpu NULL #endif -extern void __weak arch_perf_update_userpage(struct perf_event *event, - struct perf_event_mmap_page *userpg, - u64 now); - -#ifdef CONFIG_MMU -extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr); -#endif - #endif /* _LINUX_PERF_EVENT_H */ diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index f632c5725f..a5f98d53d7 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h @@ -1,9 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PERF_REGS_H #define _LINUX_PERF_REGS_H -#include - struct perf_regs { __u64 abi; struct pt_regs *regs; @@ -11,20 +8,13 @@ struct perf_regs { #ifdef CONFIG_HAVE_PERF_REGS #include - -#ifndef PERF_REG_EXTENDED_MASK -#define PERF_REG_EXTENDED_MASK 0 -#endif - u64 perf_reg_value(struct pt_regs *regs, int idx); int perf_reg_validate(u64 mask); u64 perf_reg_abi(struct task_struct *task); void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs); + struct pt_regs *regs, + struct pt_regs *regs_user_copy); #else - -#define PERF_REG_EXTENDED_MASK 0 - static inline u64 perf_reg_value(struct pt_regs *regs, int idx) { return 0; @@ -41,7 +31,8 @@ static inline u64 perf_reg_abi(struct task_struct *task) } static inline void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs) + struct pt_regs *regs, + struct pt_regs *regs_user_copy) { regs_user->regs = task_pt_regs(current); regs_user->abi = perf_reg_abi(current); diff --git a/include/linux/personality.h b/include/linux/personality.h index fc16fbc659..aeb7892b24 100644 --- a/include/linux/personality.h +++ b/include/linux/personality.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PERSONALITY_H #define _LINUX_PERSONALITY_H diff --git a/include/linux/pfn.h b/include/linux/pfn.h index 14bc053c53..1132953235 100644 --- a/include/linux/pfn.h +++ b/include/linux/pfn.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PFN_H_ #define _LINUX_PFN_H_ diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 2d9148221e..a3d90b9da1 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PFN_T_H_ #define _LINUX_PFN_T_H_ #include @@ -9,22 +8,12 @@ * PFN_SG_LAST - pfn references a page and is the last scatterlist entry * PFN_DEV - pfn is not covered by system memmap by default * PFN_MAP - pfn has a dynamic page mapping established by a device driver - * PFN_SPECIAL - for CONFIG_FS_DAX_LIMITED builds to allow XIP, but not - * get_user_pages */ -#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) +#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) #define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1)) #define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) #define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) -#define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5)) - -#define PFN_FLAGS_TRACE \ - { PFN_SPECIAL, "SPECIAL" }, \ - { PFN_SG_CHAIN, "SG_CHAIN" }, \ - { PFN_SG_LAST, "SG_LAST" }, \ - { PFN_DEV, "DEV" }, \ - { PFN_MAP, "MAP" } static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags) { @@ -66,6 +55,13 @@ static inline phys_addr_t pfn_t_to_phys(pfn_t pfn) return PFN_PHYS(pfn_t_to_pfn(pfn)); } +static inline void *pfn_t_to_virt(pfn_t pfn) +{ + if (pfn_t_has_page(pfn)) + return __va(pfn_t_to_phys(pfn)); + return NULL; +} + static inline pfn_t page_to_pfn_t(struct page *page) { return pfn_to_pfn_t(page_to_pfn(page)); @@ -88,16 +84,9 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) { return pfn_pmd(pfn_t_to_pfn(pfn), pgprot); } - -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD -static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) -{ - return pfn_pud(pfn_t_to_pfn(pfn), pgprot); -} -#endif #endif -#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP +#ifdef __HAVE_ARCH_PTE_DEVMAP static inline bool pfn_t_devmap(pfn_t pfn) { const u64 flags = PFN_DEV|PFN_MAP; @@ -111,21 +100,5 @@ static inline bool pfn_t_devmap(pfn_t pfn) } pte_t pte_mkdevmap(pte_t pte); pmd_t pmd_mkdevmap(pmd_t pmd); -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ - defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) -pud_t pud_mkdevmap(pud_t pud); #endif -#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ - -#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL -static inline bool pfn_t_special(pfn_t pfn) -{ - return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL; -} -#else -static inline bool pfn_t_special(pfn_t pfn) -{ - return false; -} -#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ #endif /* _LINUX_PFN_T_H_ */ diff --git a/include/linux/phonet.h b/include/linux/phonet.h index bc7d1e529e..f691b04fc5 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /** * file phonet.h * * Phonet sockets kernel interface * * Copyright (C) 2008 Nokia Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA */ #ifndef LINUX_PHONET_H #define LINUX_PHONET_H diff --git a/include/linux/phy.h b/include/linux/phy.h index 736e1d1a47..e25f1830fb 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1,11 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Framework and drivers for configuring and reading different PHYs - * Based on code in sungem_phy.c and (long-removed) gianfar_phy.c + * Based on code in sungem_phy.c and gianfar_phy.c * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * */ #ifndef __PHY_H @@ -14,19 +19,12 @@ #include #include #include -#include -#include #include #include -#include #include #include #include #include -#include -#include -#include -#include #include @@ -43,92 +41,36 @@ #define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \ SUPPORTED_1000baseT_Full) -extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init; -extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init; -extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; -extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; -extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; -extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; -extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; -extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; +#define PHY_BASIC_FEATURES (PHY_10BT_FEATURES | \ + PHY_100BT_FEATURES | \ + PHY_DEFAULT_FEATURES) -#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) -#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features) -#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features) -#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) -#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) -#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) -#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features) -#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) +#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \ + PHY_1000BT_FEATURES) -extern const int phy_basic_ports_array[3]; -extern const int phy_fibre_port_array[1]; -extern const int phy_all_ports_features_array[7]; -extern const int phy_10_100_features_array[4]; -extern const int phy_basic_t1_features_array[2]; -extern const int phy_gbit_features_array[2]; -extern const int phy_10gbit_features_array[1]; /* * Set phydev->irq to PHY_POLL if interrupts are not supported, - * or not desired for this PHY. Set to PHY_MAC_INTERRUPT if - * the attached MAC driver handles the interrupt + * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if + * the attached driver handles the interrupt */ #define PHY_POLL -1 -#define PHY_MAC_INTERRUPT -2 +#define PHY_IGNORE_INTERRUPT -2 -#define PHY_IS_INTERNAL 0x00000001 -#define PHY_RST_AFTER_CLK_EN 0x00000002 -#define PHY_POLL_CABLE_TEST 0x00000004 +#define PHY_HAS_INTERRUPT 0x00000001 +#define PHY_HAS_MAGICANEG 0x00000002 +#define PHY_IS_INTERNAL 0x00000004 #define MDIO_DEVICE_IS_PHY 0x80000000 -/** - * enum phy_interface_t - Interface Mode definitions - * - * @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch - * @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined - * @PHY_INTERFACE_MODE_MII: Median-independent interface - * @PHY_INTERFACE_MODE_GMII: Gigabit median-independent interface - * @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface - * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface - * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface - * @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface - * @PHY_INTERFACE_MODE_REVRMII: Reduced Media Independent Interface in PHY role - * @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface - * @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay - * @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay - * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal RX delay - * @PHY_INTERFACE_MODE_RTBI: Reduced TBI - * @PHY_INTERFACE_MODE_SMII: ??? MII - * @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface - * @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface - * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax - * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII - * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII - * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX - * @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX - * @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX - * @PHY_INTERFACE_MODE_5GBASER: 5G BaseR - * @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI - * @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface - * @PHY_INTERFACE_MODE_10GBASER: 10G BaseR - * @PHY_INTERFACE_MODE_25GBASER: 25G BaseR - * @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII - * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN - * @PHY_INTERFACE_MODE_MAX: Book keeping - * - * Describes the interface between the MAC and PHY. - */ +/* Interface Mode definitions */ typedef enum { PHY_INTERFACE_MODE_NA, - PHY_INTERFACE_MODE_INTERNAL, PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_SGMII, PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_REVMII, PHY_INTERFACE_MODE_RMII, - PHY_INTERFACE_MODE_REVRMII, PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_RGMII_ID, PHY_INTERFACE_MODE_RGMII_RXID, @@ -136,47 +78,22 @@ typedef enum { PHY_INTERFACE_MODE_RTBI, PHY_INTERFACE_MODE_SMII, PHY_INTERFACE_MODE_XGMII, - PHY_INTERFACE_MODE_XLGMII, PHY_INTERFACE_MODE_MOCA, PHY_INTERFACE_MODE_QSGMII, PHY_INTERFACE_MODE_TRGMII, - PHY_INTERFACE_MODE_100BASEX, - PHY_INTERFACE_MODE_1000BASEX, - PHY_INTERFACE_MODE_2500BASEX, - PHY_INTERFACE_MODE_5GBASER, - PHY_INTERFACE_MODE_RXAUI, - PHY_INTERFACE_MODE_XAUI, - /* 10GBASE-R, XFI, SFI - single lane 10G Serdes */ - PHY_INTERFACE_MODE_10GBASER, - PHY_INTERFACE_MODE_25GBASER, - PHY_INTERFACE_MODE_USXGMII, - /* 10GBASE-KR - with Clause 73 AN */ - PHY_INTERFACE_MODE_10GKR, PHY_INTERFACE_MODE_MAX, } phy_interface_t; -/* - * phy_supported_speeds - return all speeds currently supported by a PHY device - */ -unsigned int phy_supported_speeds(struct phy_device *phy, - unsigned int *speeds, - unsigned int size); - /** - * phy_modes - map phy_interface_t enum to device tree binding of phy-mode - * @interface: enum phy_interface_t value - * - * Description: maps enum &phy_interface_t defined in this file + * It maps 'enum phy_interface_t' found in include/linux/phy.h * into the device tree binding of 'phy-mode', so that Ethernet - * device driver can get PHY interface from device tree. + * device driver can get phy interface from device tree. */ static inline const char *phy_modes(phy_interface_t interface) { switch (interface) { case PHY_INTERFACE_MODE_NA: return ""; - case PHY_INTERFACE_MODE_INTERNAL: - return "internal"; case PHY_INTERFACE_MODE_MII: return "mii"; case PHY_INTERFACE_MODE_GMII: @@ -189,8 +106,6 @@ static inline const char *phy_modes(phy_interface_t interface) return "rev-mii"; case PHY_INTERFACE_MODE_RMII: return "rmii"; - case PHY_INTERFACE_MODE_REVRMII: - return "rev-rmii"; case PHY_INTERFACE_MODE_RGMII: return "rgmii"; case PHY_INTERFACE_MODE_RGMII_ID: @@ -205,34 +120,12 @@ static inline const char *phy_modes(phy_interface_t interface) return "smii"; case PHY_INTERFACE_MODE_XGMII: return "xgmii"; - case PHY_INTERFACE_MODE_XLGMII: - return "xlgmii"; case PHY_INTERFACE_MODE_MOCA: return "moca"; case PHY_INTERFACE_MODE_QSGMII: return "qsgmii"; case PHY_INTERFACE_MODE_TRGMII: return "trgmii"; - case PHY_INTERFACE_MODE_1000BASEX: - return "1000base-x"; - case PHY_INTERFACE_MODE_2500BASEX: - return "2500base-x"; - case PHY_INTERFACE_MODE_5GBASER: - return "5gbase-r"; - case PHY_INTERFACE_MODE_RXAUI: - return "rxaui"; - case PHY_INTERFACE_MODE_XAUI: - return "xaui"; - case PHY_INTERFACE_MODE_10GBASER: - return "10gbase-r"; - case PHY_INTERFACE_MODE_25GBASER: - return "25gbase-r"; - case PHY_INTERFACE_MODE_USXGMII: - return "usxgmii"; - case PHY_INTERFACE_MODE_10GKR: - return "10gbase-kr"; - case PHY_INTERFACE_MODE_100BASEX: - return "100base-x"; default: return "unknown"; } @@ -240,77 +133,29 @@ static inline const char *phy_modes(phy_interface_t interface) #define PHY_INIT_TIMEOUT 100000 +#define PHY_STATE_TIME 1 #define PHY_FORCE_TIMEOUT 10 +#define PHY_AN_TIMEOUT 10 #define PHY_MAX_ADDR 32 /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ #define PHY_ID_FMT "%s:%02x" -#define MII_BUS_ID_SIZE 61 +/* + * Need to be a little smaller than phydev->dev.bus_id to leave room + * for the ":%02x" + */ +#define MII_BUS_ID_SIZE (20 - 3) + +/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit + IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ +#define MII_ADDR_C45 (1<<30) struct device; -struct phylink; -struct sfp_bus; -struct sfp_upstream_ops; struct sk_buff; -/** - * struct mdio_bus_stats - Statistics counters for MDIO busses - * @transfers: Total number of transfers, i.e. @writes + @reads - * @errors: Number of MDIO transfers that returned an error - * @writes: Number of write transfers - * @reads: Number of read transfers - * @syncp: Synchronisation for incrementing statistics - */ -struct mdio_bus_stats { - u64_stats_t transfers; - u64_stats_t errors; - u64_stats_t writes; - u64_stats_t reads; - /* Must be last, add new statistics above */ - struct u64_stats_sync syncp; -}; - -/** - * struct phy_package_shared - Shared information in PHY packages - * @addr: Common PHY address used to combine PHYs in one package - * @refcnt: Number of PHYs connected to this shared data - * @flags: Initialization of PHY package - * @priv_size: Size of the shared private data @priv - * @priv: Driver private data shared across a PHY package - * - * Represents a shared structure between different phydev's in the same - * package, for example a quad PHY. See phy_package_join() and - * phy_package_leave(). - */ -struct phy_package_shared { - int addr; - refcount_t refcnt; - unsigned long flags; - size_t priv_size; - - /* private data pointer */ - /* note that this pointer is shared between different phydevs and - * the user has to take care of appropriate locking. It is allocated - * and freed automatically by phy_package_join() and - * phy_package_leave(). - */ - void *priv; -}; - -/* used as bit number in atomic bitops */ -#define PHY_SHARED_F_INIT_DONE 0 -#define PHY_SHARED_F_PROBE_DONE 1 - -/** - * struct mii_bus - Represents an MDIO bus - * - * @owner: Who owns this device - * @name: User friendly name for this MDIO device, or driver name - * @id: Unique identifier for this bus, typical from bus hierarchy - * @priv: Driver private data - * +/* * The Bus class for PHYs. Devices which provide access to * PHYs should register using this structure */ @@ -319,93 +164,50 @@ struct mii_bus { const char *name; char id[MII_BUS_ID_SIZE]; void *priv; - /** @read: Perform a read transfer on the bus */ int (*read)(struct mii_bus *bus, int addr, int regnum); - /** @write: Perform a write transfer on the bus */ int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val); - /** @reset: Perform a reset of the bus */ int (*reset)(struct mii_bus *bus); - /** @stats: Statistic counters per device on the bus */ - struct mdio_bus_stats stats[PHY_MAX_ADDR]; - - /** - * @mdio_lock: A lock to ensure that only one thing can read/write + /* + * A lock to ensure that only one thing can read/write * the MDIO bus at a time */ struct mutex mdio_lock; - /** @parent: Parent device of this bus */ struct device *parent; - /** @state: State of bus structure */ enum { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED, MDIOBUS_UNREGISTERED, MDIOBUS_RELEASED, } state; - - /** @dev: Kernel device representation */ struct device dev; - /** @mdio_map: list of all MDIO devices on bus */ + /* list of all PHYs on bus */ struct mdio_device *mdio_map[PHY_MAX_ADDR]; - /** @phy_mask: PHY addresses to be ignored when probing */ + /* PHY addresses to be ignored when probing */ u32 phy_mask; - /** @phy_ignore_ta_mask: PHY addresses to ignore the TA/read failure */ + /* PHY addresses to ignore the TA/read failure */ u32 phy_ignore_ta_mask; - /** - * @irq: An array of interrupts, each PHY's interrupt at the index + /* + * An array of interrupts, each PHY's interrupt at the index * matching its address */ int irq[PHY_MAX_ADDR]; - - /** @reset_delay_us: GPIO reset pulse width in microseconds */ - int reset_delay_us; - /** @reset_post_delay_us: GPIO reset deassert delay in microseconds */ - int reset_post_delay_us; - /** @reset_gpiod: Reset GPIO descriptor pointer */ - struct gpio_desc *reset_gpiod; - - /** @probe_capabilities: bus capabilities, used for probing */ - enum { - MDIOBUS_NO_CAP = 0, - MDIOBUS_C22, - MDIOBUS_C45, - MDIOBUS_C22_C45, - } probe_capabilities; - - /** @shared_lock: protect access to the shared element */ - struct mutex shared_lock; - - /** @shared: shared state across different PHYs */ - struct phy_package_shared *shared[PHY_MAX_ADDR]; }; #define to_mii_bus(d) container_of(d, struct mii_bus, dev) -struct mii_bus *mdiobus_alloc_size(size_t size); - -/** - * mdiobus_alloc - Allocate an MDIO bus structure - * - * The internal state of the MDIO bus will be set of MDIOBUS_ALLOCATED ready - * for the driver to register the bus. - */ +struct mii_bus *mdiobus_alloc_size(size_t); static inline struct mii_bus *mdiobus_alloc(void) { return mdiobus_alloc_size(0); } int __mdiobus_register(struct mii_bus *bus, struct module *owner); -int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus, - struct module *owner); #define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) -#define devm_mdiobus_register(dev, bus) \ - __devm_mdiobus_register(dev, bus, THIS_MODULE) - void mdiobus_unregister(struct mii_bus *bus); void mdiobus_free(struct mii_bus *bus); struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); @@ -414,144 +216,140 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) return devm_mdiobus_alloc_size(dev, 0); } -struct mii_bus *mdio_find_bus(const char *mdio_name); +void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); -#define PHY_INTERRUPT_DISABLED false -#define PHY_INTERRUPT_ENABLED true +#define PHY_INTERRUPT_DISABLED 0x0 +#define PHY_INTERRUPT_ENABLED 0x80000000 -/** - * enum phy_state - PHY state machine states: +/* PHY state machine states: * - * @PHY_DOWN: PHY device and driver are not ready for anything. probe + * DOWN: PHY device and driver are not ready for anything. probe * should be called if and only if the PHY is in this state, * given that the PHY device exists. - * - PHY driver probe function will set the state to @PHY_READY + * - PHY driver probe function will, depending on the PHY, set + * the state to STARTING or READY * - * @PHY_READY: PHY is ready to send and receive packets, but the + * STARTING: PHY device is coming up, and the ethernet driver is + * not ready. PHY drivers may set this in the probe function. + * If they do, they are responsible for making sure the state is + * eventually set to indicate whether the PHY is UP or READY, + * depending on the state when the PHY is done starting up. + * - PHY driver will set the state to READY + * - start will set the state to PENDING + * + * READY: PHY is ready to send and receive packets, but the * controller is not. By default, PHYs which do not implement - * probe will be set to this state by phy_probe(). + * probe will be set to this state by phy_probe(). If the PHY + * driver knows the PHY is ready, and the PHY state is STARTING, + * then it sets this STATE. * - start will set the state to UP * - * @PHY_UP: The PHY and attached device are ready to do work. + * PENDING: PHY device is coming up, but the ethernet driver is + * ready. phy_start will set this state if the PHY state is + * STARTING. + * - PHY driver will set the state to UP when the PHY is ready + * + * UP: The PHY and attached device are ready to do work. * Interrupts should be started here. - * - timer moves to @PHY_NOLINK or @PHY_RUNNING + * - timer moves to AN * - * @PHY_NOLINK: PHY is up, but not currently plugged in. - * - irq or timer will set @PHY_RUNNING if link comes back - * - phy_stop moves to @PHY_HALTED + * AN: The PHY is currently negotiating the link state. Link is + * therefore down for now. phy_timer will set this state when it + * detects the state is UP. config_aneg will set this state + * whenever called with phydev->autoneg set to AUTONEG_ENABLE. + * - If autonegotiation finishes, but there's no link, it sets + * the state to NOLINK. + * - If aneg finishes with link, it sets the state to RUNNING, + * and calls adjust_link + * - If autonegotiation did not finish after an arbitrary amount + * of time, autonegotiation should be tried again if the PHY + * supports "magic" autonegotiation (back to AN) + * - If it didn't finish, and no magic_aneg, move to FORCING. * - * @PHY_RUNNING: PHY is currently up, running, and possibly sending + * NOLINK: PHY is up, but not currently plugged in. + * - If the timer notes that the link comes back, we move to RUNNING + * - config_aneg moves to AN + * - phy_stop moves to HALTED + * + * FORCING: PHY is being configured with forced settings + * - if link is up, move to RUNNING + * - If link is down, we drop to the next highest setting, and + * retry (FORCING) after a timeout + * - phy_stop moves to HALTED + * + * RUNNING: PHY is currently up, running, and possibly sending * and/or receiving packets - * - irq or timer will set @PHY_NOLINK if link goes down - * - phy_stop moves to @PHY_HALTED + * - timer will set CHANGELINK if we're polling (this ensures the + * link state is polled every other cycle of this state machine, + * which makes it every other second) + * - irq will set CHANGELINK + * - config_aneg will set AN + * - phy_stop moves to HALTED * - * @PHY_CABLETEST: PHY is performing a cable test. Packet reception/sending - * is not expected to work, carrier will be indicated as down. PHY will be - * poll once per second, or on interrupt for it current state. - * Once complete, move to UP to restart the PHY. - * - phy_stop aborts the running test and moves to @PHY_HALTED + * CHANGELINK: PHY experienced a change in link state + * - timer moves to RUNNING if link + * - timer moves to NOLINK if the link is down + * - phy_stop moves to HALTED * - * @PHY_HALTED: PHY is up, but no polling or interrupts are done. Or + * HALTED: PHY is up, but no polling or interrupts are done. Or * PHY is in an error state. - * - phy_start moves to @PHY_UP + * + * - phy_start moves to RESUMING + * + * RESUMING: PHY was halted, but now wants to run again. + * - If we are forcing, or aneg is done, timer moves to RUNNING + * - If aneg is not done, timer moves to AN + * - phy_stop moves to HALTED */ enum phy_state { PHY_DOWN = 0, + PHY_STARTING, PHY_READY, - PHY_HALTED, + PHY_PENDING, PHY_UP, + PHY_AN, PHY_RUNNING, PHY_NOLINK, - PHY_CABLETEST, + PHY_FORCING, + PHY_CHANGELINK, + PHY_HALTED, + PHY_RESUMING }; -#define MDIO_MMD_NUM 32 - /** * struct phy_c45_device_ids - 802.3-c45 Device Identifiers - * @devices_in_package: IEEE 802.3 devices in package register value. - * @mmds_present: bit vector of MMDs present. + * @devices_in_package: Bit vector of devices present. * @device_ids: The device identifer for each present device. */ struct phy_c45_device_ids { u32 devices_in_package; - u32 mmds_present; - u32 device_ids[MDIO_MMD_NUM]; + u32 device_ids[8]; }; -struct macsec_context; -struct macsec_ops; - -/** - * struct phy_device - An instance of a PHY +/* phy_device: An instance of a PHY * - * @mdio: MDIO bus this PHY is on - * @drv: Pointer to the driver for this PHY instance - * @phy_id: UID for this device found during discovery - * @c45_ids: 802.3-c45 Device Identifiers if is_c45. - * @is_c45: Set to true if this PHY uses clause 45 addressing. - * @is_internal: Set to true if this PHY is internal to a MAC. - * @is_pseudo_fixed_link: Set to true if this PHY is an Ethernet switch, etc. - * @is_gigabit_capable: Set to true if PHY supports 1000Mbps - * @has_fixups: Set to true if this PHY has fixups/quirks. - * @suspended: Set to true if this PHY has been suspended successfully. - * @suspended_by_mdio_bus: Set to true if this PHY was suspended by MDIO bus. - * @sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal. - * @loopback_enabled: Set true if this PHY has been loopbacked successfully. - * @downshifted_rate: Set true if link speed has been downshifted. - * @is_on_sfp_module: Set true if PHY is located on an SFP module. - * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY - * @state: State of the PHY for management purposes - * @dev_flags: Device-specific flags used by the PHY driver. - * Bits [15:0] are free to use by the PHY driver to communicate - * driver specific behavior. - * Bits [23:16] are currently reserved for future use. - * Bits [31:24] are reserved for defining generic - * PHY driver behavior. - * @irq: IRQ number of the PHY's interrupt (-1 if none) - * @phy_timer: The timer for handling the state machine - * @phylink: Pointer to phylink instance for this PHY - * @sfp_bus_attached: Flag indicating whether the SFP bus has been attached - * @sfp_bus: SFP bus attached to this PHY's fiber port - * @attached_dev: The attached enet driver's device instance ptr - * @adjust_link: Callback for the enet controller to respond to changes: in the - * link state. - * @phy_link_change: Callback for phylink for notification of link change - * @macsec_ops: MACsec offloading ops. + * drv: Pointer to the driver for this PHY instance + * phy_id: UID for this device found during discovery + * c45_ids: 802.3-c45 Device Identifers if is_c45. + * is_c45: Set to true if this phy uses clause 45 addressing. + * is_internal: Set to true if this phy is internal to a MAC. + * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc. + * has_fixups: Set to true if this phy has fixups/quirks. + * suspended: Set to true if this phy has been suspended successfully. + * state: state of the PHY for management purposes + * dev_flags: Device-specific flags used by the PHY driver. + * link_timeout: The number of timer firings to wait before the + * giving up on the current attempt at acquiring a link + * irq: IRQ number of the PHY's interrupt (-1 if none) + * phy_timer: The timer for handling the state machine + * phy_queue: A work_queue for the interrupt + * attached_dev: The attached enet driver's device instance ptr + * adjust_link: Callback for the enet controller to respond to + * changes in the link state. * - * @speed: Current link speed - * @duplex: Current duplex - * @port: Current port - * @pause: Current pause - * @asym_pause: Current asymmetric pause - * @supported: Combined MAC/PHY supported linkmodes - * @advertising: Currently advertised linkmodes - * @adv_old: Saved advertised while power saving for WoL - * @lp_advertising: Current link partner advertised linkmodes - * @eee_broken_modes: Energy efficient ethernet modes which should be prohibited - * @autoneg: Flag autoneg being used - * @link: Current link state - * @autoneg_complete: Flag auto negotiation of the link has completed - * @mdix: Current crossover - * @mdix_ctrl: User setting of crossover - * @interrupts: Flag interrupts have been enabled - * @interface: enum phy_interface_t value - * @skb: Netlink message for cable diagnostics - * @nest: Netlink nest used for cable diagnostics - * @ehdr: nNtlink header for cable diagnostics - * @phy_led_triggers: Array of LED triggers - * @phy_num_led_triggers: Number of triggers in @phy_led_triggers - * @led_link_trigger: LED trigger for link up/down - * @last_triggered: last LED trigger for link speed - * @master_slave_set: User requested master/slave configuration - * @master_slave_get: Current master/slave advertisement - * @master_slave_state: Current master/slave configuration - * @mii_ts: Pointer to time stamper callbacks - * @lock: Mutex for serialization access to PHY - * @state_queue: Work queue for state machine - * @shared: Pointer to private data shared by phys in one package - * @priv: Pointer to driver private data + * speed, duplex, pause, supported, advertising, lp_advertising, + * and autoneg are used like in mii_if_info * * interrupts currently only supports enabled or disabled, * but could be changed in the future to support enabling @@ -570,26 +368,11 @@ struct phy_device { u32 phy_id; struct phy_c45_device_ids c45_ids; - unsigned is_c45:1; - unsigned is_internal:1; - unsigned is_pseudo_fixed_link:1; - unsigned is_gigabit_capable:1; - unsigned has_fixups:1; - unsigned suspended:1; - unsigned suspended_by_mdio_bus:1; - unsigned sysfs_links:1; - unsigned loopback_enabled:1; - unsigned downshifted_rate:1; - unsigned is_on_sfp_module:1; - unsigned mac_managed_pm:1; - - unsigned autoneg:1; - /* The most recently read link state */ - unsigned link:1; - unsigned autoneg_complete:1; - - /* Interrupts are enabled */ - unsigned interrupts:1; + bool is_c45; + bool is_internal; + bool is_pseudo_fixed_link; + bool has_fixups; + bool suspended; enum phy_state state; @@ -603,31 +386,24 @@ struct phy_device { */ int speed; int duplex; - int port; int pause; int asym_pause; - u8 master_slave_get; - u8 master_slave_set; - u8 master_slave_state; - /* Union of PHY and Attached devices' supported link modes */ - /* See ethtool.h for more info */ - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); - __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); - __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); - /* used with phy_speed_down */ - __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); + /* The most recently read link state */ + int link; - /* Energy efficient ethernet modes which should be prohibited */ - u32 eee_broken_modes; + /* Enabled Interrupts */ + u32 interrupts; -#ifdef CONFIG_LED_TRIGGER_PHY - struct phy_led_trigger *phy_led_triggers; - unsigned int phy_num_led_triggers; - struct phy_led_trigger *last_triggered; + /* Union of PHY and Attached devices' supported modes */ + /* See mii.h for more info */ + u32 supported; + u32 advertising; + u32 lp_advertising; - struct phy_led_trigger *led_link_trigger; -#endif + int autoneg; + + int link_timeout; /* * Interrupt number for this PHY @@ -639,264 +415,180 @@ struct phy_device { /* For use by PHYs to maintain extra state */ void *priv; - /* shared data pointer */ - /* For use by PHYs inside the same package that need a shared state. */ - struct phy_package_shared *shared; - - /* Reporting cable test results */ - struct sk_buff *skb; - void *ehdr; - struct nlattr *nest; - /* Interrupt and Polling infrastructure */ + struct work_struct phy_queue; struct delayed_work state_queue; + atomic_t irq_disable; struct mutex lock; - /* This may be modified under the rtnl lock */ - bool sfp_bus_attached; - struct sfp_bus *sfp_bus; - struct phylink *phylink; struct net_device *attached_dev; - struct mii_timestamper *mii_ts; u8 mdix; - u8 mdix_ctrl; - void (*phy_link_change)(struct phy_device *phydev, bool up); void (*adjust_link)(struct net_device *dev); - -#if IS_ENABLED(CONFIG_MACSEC) - /* MACsec management functions */ - const struct macsec_ops *macsec_ops; -#endif }; +#define to_phy_device(d) container_of(to_mdio_device(d), \ + struct phy_device, mdio) -static inline struct phy_device *to_phy_device(const struct device *dev) -{ - return container_of(to_mdio_device(dev), struct phy_device, mdio); -} - -/** - * struct phy_tdr_config - Configuration of a TDR raw test +/* struct phy_driver: Driver structure for a particular PHY type * - * @first: Distance for first data collection point - * @last: Distance for last data collection point - * @step: Step between data collection points - * @pair: Bitmap of cable pairs to collect data for - * - * A structure containing possible configuration parameters - * for a TDR cable test. The driver does not need to implement - * all the parameters, but should report what is actually used. - * All distances are in centimeters. - */ -struct phy_tdr_config { - u32 first; - u32 last; - u32 step; - s8 pair; -}; -#define PHY_PAIR_ALL -1 - -/** - * struct phy_driver - Driver structure for a particular PHY type - * - * @mdiodrv: Data common to all MDIO devices - * @phy_id: The result of reading the UID registers of this PHY + * driver_data: static driver data + * phy_id: The result of reading the UID registers of this PHY * type, and ANDing them with the phy_id_mask. This driver * only works for PHYs with IDs which match this field - * @name: The friendly name of this PHY type - * @phy_id_mask: Defines the important bits of the phy_id - * @features: A mandatory list of features (speed, duplex, etc) - * supported by this PHY - * @flags: A bitfield defining certain other features this PHY + * name: The friendly name of this PHY type + * phy_id_mask: Defines the important bits of the phy_id + * features: A list of features (speed, duplex, etc) supported + * by this PHY + * flags: A bitfield defining certain other features this PHY * supports (like interrupts) - * @driver_data: Static driver data * - * All functions are optional. If config_aneg or read_status - * are not implemented, the phy core uses the genphy versions. - * Note that none of these functions should be called from - * interrupt time. The goal is for the bus read/write functions - * to be able to block when the bus transaction is happening, - * and be freed up by an interrupt (The MPC85xx has this ability, - * though it is not currently supported in the driver). + * The drivers must implement config_aneg and read_status. All + * other functions are optional. Note that none of these + * functions should be called from interrupt time. The goal is + * for the bus read/write functions to be able to block when the + * bus transaction is happening, and be freed up by an interrupt + * (The MPC85xx has this ability, though it is not currently + * supported in the driver). */ struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; - u32 phy_id_mask; - const unsigned long * const features; + unsigned int phy_id_mask; + u32 features; u32 flags; const void *driver_data; - /** - * @soft_reset: Called to issue a PHY software reset + /* + * Called to issue a PHY software reset */ int (*soft_reset)(struct phy_device *phydev); - /** - * @config_init: Called to initialize the PHY, + /* + * Called to initialize the PHY, * including after a reset */ int (*config_init)(struct phy_device *phydev); - /** - * @probe: Called during discovery. Used to set + /* + * Called during discovery. Used to set * up device-specific structures, if any */ int (*probe)(struct phy_device *phydev); - /** - * @get_features: Probe the hardware to determine what - * abilities it has. Should only set phydev->supported. - */ - int (*get_features)(struct phy_device *phydev); - /* PHY Power Management */ - /** @suspend: Suspend the hardware, saving state if needed */ int (*suspend)(struct phy_device *phydev); - /** @resume: Resume the hardware, restoring state if needed */ int (*resume)(struct phy_device *phydev); - /** - * @config_aneg: Configures the advertisement and resets + /* + * Configures the advertisement and resets * autonegotiation if phydev->autoneg is on, * forces the speed to the current settings in phydev * if phydev->autoneg is off */ int (*config_aneg)(struct phy_device *phydev); - /** @aneg_done: Determines the auto negotiation result */ + /* Determines the auto negotiation result */ int (*aneg_done)(struct phy_device *phydev); - /** @read_status: Determines the negotiated speed and duplex */ + /* Determines the negotiated speed and duplex */ int (*read_status)(struct phy_device *phydev); - /** - * @config_intr: Enables or disables interrupts. - * It should also clear any pending interrupts prior to enabling the - * IRQs and after disabling them. - */ + /* Clears any pending interrupts */ + int (*ack_interrupt)(struct phy_device *phydev); + + /* Enables or disables interrupts */ int (*config_intr)(struct phy_device *phydev); - /** @handle_interrupt: Override default interrupt handling */ - irqreturn_t (*handle_interrupt)(struct phy_device *phydev); + /* + * Checks if the PHY generated an interrupt. + * For multi-PHY devices with shared PHY interrupt pin + */ + int (*did_interrupt)(struct phy_device *phydev); - /** @remove: Clears up any memory if needed */ + /* Clears up any memory if needed */ void (*remove)(struct phy_device *phydev); - /** - * @match_phy_device: Returns true if this is a suitable - * driver for the given phydev. If NULL, matching is based on - * phy_id and phy_id_mask. + /* Returns true if this is a suitable driver for the given + * phydev. If NULL, matching is based on phy_id and + * phy_id_mask. */ int (*match_phy_device)(struct phy_device *phydev); - /** - * @set_wol: Some devices (e.g. qnap TS-119P II) require PHY - * register changes to enable Wake on LAN, so set_wol is - * provided to be called in the ethernet driver's set_wol - * function. + /* Handles ethtool queries for hardware time stamping. */ + int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti); + + /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */ + int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr); + + /* + * Requests a Rx timestamp for 'skb'. If the skb is accepted, + * the phy driver promises to deliver it using netif_rx() as + * soon as a timestamp becomes available. One of the + * PTP_CLASS_ values is passed in 'type'. The function must + * return true if the skb is accepted for delivery. */ + bool (*rxtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); + + /* + * Requests a Tx timestamp for 'skb'. The phy driver promises + * to deliver it using skb_complete_tx_timestamp() as soon as a + * timestamp becomes available. One of the PTP_CLASS_ values + * is passed in 'type'. + */ + void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); + + /* Some devices (e.g. qnap TS-119P II) require PHY register changes to + * enable Wake on LAN, so set_wol is provided to be called in the + * ethernet driver's set_wol function. */ int (*set_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); - /** - * @get_wol: See set_wol, but for checking whether Wake on LAN - * is enabled. - */ + /* See set_wol, but for checking whether Wake on LAN is enabled. */ void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); - /** - * @link_change_notify: Called to inform a PHY device driver - * when the core is about to change the link state. This - * callback is supposed to be used as fixup hook for drivers - * that need to take action when the link state - * changes. Drivers are by no means allowed to mess with the + /* + * Called to inform a PHY device driver when the core is about to + * change the link state. This callback is supposed to be used as + * fixup hook for drivers that need to take action when the link + * state changes. Drivers are by no means allowed to mess with the * PHY device structure in their implementations. */ void (*link_change_notify)(struct phy_device *dev); - /** - * @read_mmd: PHY specific driver override for reading a MMD - * register. This function is optional for PHY specific - * drivers. When not provided, the default MMD read function - * will be used by phy_read_mmd(), which will use either a - * direct read for Clause 45 PHYs or an indirect read for - * Clause 22 PHYs. devnum is the MMD device number within the - * PHY device, regnum is the register within the selected MMD - * device. + /* A function provided by a phy specific driver to override the + * the PHY driver framework support for reading a MMD register + * from the PHY. If not supported, return -1. This function is + * optional for PHY specific drivers, if not provided then the + * default MMD read function is used by the PHY framework. */ - int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum); + int (*read_mmd_indirect)(struct phy_device *dev, int ptrad, + int devnum, int regnum); - /** - * @write_mmd: PHY specific driver override for writing a MMD - * register. This function is optional for PHY specific - * drivers. When not provided, the default MMD write function - * will be used by phy_write_mmd(), which will use either a - * direct write for Clause 45 PHYs, or an indirect write for - * Clause 22 PHYs. devnum is the MMD device number within the - * PHY device, regnum is the register within the selected MMD - * device. val is the value to be written. + /* A function provided by a phy specific driver to override the + * the PHY driver framework support for writing a MMD register + * from the PHY. This function is optional for PHY specific drivers, + * if not provided then the default MMD read function is used by + * the PHY framework. */ - int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum, - u16 val); + void (*write_mmd_indirect)(struct phy_device *dev, int ptrad, + int devnum, int regnum, u32 val); - /** @read_page: Return the current PHY register page number */ - int (*read_page)(struct phy_device *dev); - /** @write_page: Set the current PHY register page number */ - int (*write_page)(struct phy_device *dev, int page); - - /** - * @module_info: Get the size and type of the eeprom contained - * within a plug-in module - */ + /* Get the size and type of the eeprom contained within a plug-in + * module */ int (*module_info)(struct phy_device *dev, struct ethtool_modinfo *modinfo); - /** - * @module_eeprom: Get the eeprom information from the plug-in - * module - */ + /* Get the eeprom information from the plug-in module */ int (*module_eeprom)(struct phy_device *dev, struct ethtool_eeprom *ee, u8 *data); - /** @cable_test_start: Start a cable test */ - int (*cable_test_start)(struct phy_device *dev); - - /** @cable_test_tdr_start: Start a raw TDR cable test */ - int (*cable_test_tdr_start)(struct phy_device *dev, - const struct phy_tdr_config *config); - - /** - * @cable_test_get_status: Once per second, or on interrupt, - * request the status of the test. - */ - int (*cable_test_get_status)(struct phy_device *dev, bool *finished); - - /* Get statistics from the PHY using ethtool */ - /** @get_sset_count: Number of statistic counters */ + /* Get statistics from the phy using ethtool */ int (*get_sset_count)(struct phy_device *dev); - /** @get_strings: Names of the statistic counters */ void (*get_strings)(struct phy_device *dev, u8 *data); - /** @get_stats: Return the statistic counter values */ void (*get_stats)(struct phy_device *dev, struct ethtool_stats *stats, u64 *data); - - /* Get and Set PHY tunables */ - /** @get_tunable: Return the value of a tunable */ - int (*get_tunable)(struct phy_device *dev, - struct ethtool_tunable *tuna, void *data); - /** @set_tunable: Set the value of a tunable */ - int (*set_tunable)(struct phy_device *dev, - struct ethtool_tunable *tuna, - const void *data); - /** @set_loopback: Set the loopback mood of the PHY */ - int (*set_loopback)(struct phy_device *dev, bool enable); - /** @get_sqi: Get the signal quality indication */ - int (*get_sqi)(struct phy_device *dev); - /** @get_sqi_max: Get the maximum signal quality indication */ - int (*get_sqi_max)(struct phy_device *dev); }; #define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ struct phy_driver, mdiodrv) @@ -904,52 +596,43 @@ struct phy_driver { #define PHY_ANY_ID "MATCH ANY PHY" #define PHY_ANY_UID 0xffffffff -#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0) -#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4) -#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10) - /* A Structure for boards to register fixups with the PHY Lib */ struct phy_fixup { struct list_head list; - char bus_id[MII_BUS_ID_SIZE + 3]; + char bus_id[20]; u32 phy_uid; u32 phy_uid_mask; int (*run)(struct phy_device *phydev); }; -const char *phy_speed_to_str(int speed); -const char *phy_duplex_to_str(unsigned int duplex); - -/* A structure for mapping a particular speed and duplex - * combination to a particular SUPPORTED and ADVERTISED value - */ -struct phy_setting { - u32 speed; - u8 duplex; - u8 bit; -}; - -const struct phy_setting * -phy_lookup_setting(int speed, int duplex, const unsigned long *mask, - bool exact); -size_t phy_speeds(unsigned int *speeds, size_t size, - unsigned long *mask); -void of_set_phy_supported(struct phy_device *phydev); -void of_set_phy_eee_broken(struct phy_device *phydev); -int phy_speed_down_core(struct phy_device *phydev); - /** - * phy_is_started - Convenience function to check whether PHY is started + * phy_read_mmd - Convenience function for reading a register + * from an MMD on a given PHY. * @phydev: The phy_device struct + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * + * Same rules as for phy_read(); */ -static inline bool phy_is_started(struct phy_device *phydev) +static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) { - return phydev->state >= PHY_UP; + if (!phydev->is_c45) + return -EOPNOTSUPP; + + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, + MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff)); } -void phy_resolve_aneg_pause(struct phy_device *phydev); -void phy_resolve_aneg_linkmode(struct phy_device *phydev); -void phy_check_downshift(struct phy_device *phydev); +/** + * phy_read_mmd_indirect - reads data from the MMD registers + * @phydev: The PHY device bus + * @prtad: MMD Address + * @addr: PHY address on the MII bus + * + * Description: it reads data from the MMD registers (clause 22 to access to + * clause 45) of the specified phy address. + */ +int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad); /** * phy_read - Convenience function for reading a given PHY register @@ -965,31 +648,6 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum) return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); } -#define phy_read_poll_timeout(phydev, regnum, val, cond, sleep_us, \ - timeout_us, sleep_before_read) \ -({ \ - int __ret = read_poll_timeout(phy_read, val, (cond) || val < 0, \ - sleep_us, timeout_us, sleep_before_read, phydev, regnum); \ - if (val < 0) \ - __ret = val; \ - if (__ret) \ - phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \ - __ret; \ -}) - - -/** - * __phy_read - convenience function for reading a given PHY register - * @phydev: the phy_device struct - * @regnum: register number to read - * - * The caller must have taken the MDIO bus lock. - */ -static inline int __phy_read(struct phy_device *phydev, u32 regnum) -{ - return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); -} - /** * phy_write - Convenience function for writing a given PHY register * @phydev: the phy_device struct @@ -1005,303 +663,16 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val); } -/** - * __phy_write - Convenience function for writing a given PHY register - * @phydev: the phy_device struct - * @regnum: register number to write - * @val: value to write to @regnum - * - * The caller must have taken the MDIO bus lock. - */ -static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) -{ - return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, - val); -} - -/** - * __phy_modify_changed() - Convenience function for modifying a PHY register - * @phydev: a pointer to a &struct phy_device - * @regnum: register number - * @mask: bit mask of bits to clear - * @set: bit mask of bits to set - * - * Unlocked helper function which allows a PHY register to be modified as - * new register value = (old register value & ~mask) | set - * - * Returns negative errno, 0 if there was no change, and 1 in case of change - */ -static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum, - u16 mask, u16 set) -{ - return __mdiobus_modify_changed(phydev->mdio.bus, phydev->mdio.addr, - regnum, mask, set); -} - -/* - * phy_read_mmd - Convenience function for reading a register - * from an MMD on a given PHY. - */ -int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); - -/** - * phy_read_mmd_poll_timeout - Periodically poll a PHY register until a - * condition is met or a timeout occurs - * - * @phydev: The phy_device struct - * @devaddr: The MMD to read from - * @regnum: The register on the MMD to read - * @val: Variable to read the register into - * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). - * @timeout_us: Timeout in us, 0 means never timeout - * @sleep_before_read: if it is true, sleep @sleep_us before read. - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @args is stored in @val. Must not - * be called from atomic context if sleep_us or timeout_us are used. - */ -#define phy_read_mmd_poll_timeout(phydev, devaddr, regnum, val, cond, \ - sleep_us, timeout_us, sleep_before_read) \ -({ \ - int __ret = read_poll_timeout(phy_read_mmd, val, (cond) || val < 0, \ - sleep_us, timeout_us, sleep_before_read, \ - phydev, devaddr, regnum); \ - if (val < 0) \ - __ret = val; \ - if (__ret) \ - phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \ - __ret; \ -}) - -/* - * __phy_read_mmd - Convenience function for reading a register - * from an MMD on a given PHY. - */ -int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); - -/* - * phy_write_mmd - Convenience function for writing a register - * on an MMD on a given PHY. - */ -int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); - -/* - * __phy_write_mmd - Convenience function for writing a register - * on an MMD on a given PHY. - */ -int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); - -int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, - u16 set); -int phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, - u16 set); -int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); -int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); - -int __phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum, - u16 mask, u16 set); -int phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum, - u16 mask, u16 set); -int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, - u16 mask, u16 set); -int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, - u16 mask, u16 set); - -/** - * __phy_set_bits - Convenience function for setting bits in a PHY register - * @phydev: the phy_device struct - * @regnum: register number to write - * @val: bits to set - * - * The caller must have taken the MDIO bus lock. - */ -static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) -{ - return __phy_modify(phydev, regnum, 0, val); -} - -/** - * __phy_clear_bits - Convenience function for clearing bits in a PHY register - * @phydev: the phy_device struct - * @regnum: register number to write - * @val: bits to clear - * - * The caller must have taken the MDIO bus lock. - */ -static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum, - u16 val) -{ - return __phy_modify(phydev, regnum, val, 0); -} - -/** - * phy_set_bits - Convenience function for setting bits in a PHY register - * @phydev: the phy_device struct - * @regnum: register number to write - * @val: bits to set - */ -static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) -{ - return phy_modify(phydev, regnum, 0, val); -} - -/** - * phy_clear_bits - Convenience function for clearing bits in a PHY register - * @phydev: the phy_device struct - * @regnum: register number to write - * @val: bits to clear - */ -static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val) -{ - return phy_modify(phydev, regnum, val, 0); -} - -/** - * __phy_set_bits_mmd - Convenience function for setting bits in a register - * on MMD - * @phydev: the phy_device struct - * @devad: the MMD containing register to modify - * @regnum: register number to modify - * @val: bits to set - * - * The caller must have taken the MDIO bus lock. - */ -static inline int __phy_set_bits_mmd(struct phy_device *phydev, int devad, - u32 regnum, u16 val) -{ - return __phy_modify_mmd(phydev, devad, regnum, 0, val); -} - -/** - * __phy_clear_bits_mmd - Convenience function for clearing bits in a register - * on MMD - * @phydev: the phy_device struct - * @devad: the MMD containing register to modify - * @regnum: register number to modify - * @val: bits to clear - * - * The caller must have taken the MDIO bus lock. - */ -static inline int __phy_clear_bits_mmd(struct phy_device *phydev, int devad, - u32 regnum, u16 val) -{ - return __phy_modify_mmd(phydev, devad, regnum, val, 0); -} - -/** - * phy_set_bits_mmd - Convenience function for setting bits in a register - * on MMD - * @phydev: the phy_device struct - * @devad: the MMD containing register to modify - * @regnum: register number to modify - * @val: bits to set - */ -static inline int phy_set_bits_mmd(struct phy_device *phydev, int devad, - u32 regnum, u16 val) -{ - return phy_modify_mmd(phydev, devad, regnum, 0, val); -} - -/** - * phy_clear_bits_mmd - Convenience function for clearing bits in a register - * on MMD - * @phydev: the phy_device struct - * @devad: the MMD containing register to modify - * @regnum: register number to modify - * @val: bits to clear - */ -static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad, - u32 regnum, u16 val) -{ - return phy_modify_mmd(phydev, devad, regnum, val, 0); -} - /** * phy_interrupt_is_valid - Convenience function for testing a given PHY irq * @phydev: the phy_device struct * * NOTE: must be kept in sync with addition/removal of PHY_POLL and - * PHY_MAC_INTERRUPT + * PHY_IGNORE_INTERRUPT */ static inline bool phy_interrupt_is_valid(struct phy_device *phydev) { - return phydev->irq != PHY_POLL && phydev->irq != PHY_MAC_INTERRUPT; -} - -/** - * phy_polling_mode - Convenience function for testing whether polling is - * used to detect PHY status changes - * @phydev: the phy_device struct - */ -static inline bool phy_polling_mode(struct phy_device *phydev) -{ - if (phydev->state == PHY_CABLETEST) - if (phydev->drv->flags & PHY_POLL_CABLE_TEST) - return true; - - return phydev->irq == PHY_POLL; -} - -/** - * phy_has_hwtstamp - Tests whether a PHY time stamp configuration. - * @phydev: the phy_device struct - */ -static inline bool phy_has_hwtstamp(struct phy_device *phydev) -{ - return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp; -} - -/** - * phy_has_rxtstamp - Tests whether a PHY supports receive time stamping. - * @phydev: the phy_device struct - */ -static inline bool phy_has_rxtstamp(struct phy_device *phydev) -{ - return phydev && phydev->mii_ts && phydev->mii_ts->rxtstamp; -} - -/** - * phy_has_tsinfo - Tests whether a PHY reports time stamping and/or - * PTP hardware clock capabilities. - * @phydev: the phy_device struct - */ -static inline bool phy_has_tsinfo(struct phy_device *phydev) -{ - return phydev && phydev->mii_ts && phydev->mii_ts->ts_info; -} - -/** - * phy_has_txtstamp - Tests whether a PHY supports transmit time stamping. - * @phydev: the phy_device struct - */ -static inline bool phy_has_txtstamp(struct phy_device *phydev) -{ - return phydev && phydev->mii_ts && phydev->mii_ts->txtstamp; -} - -static inline int phy_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) -{ - return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr); -} - -static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb, - int type) -{ - return phydev->mii_ts->rxtstamp(phydev->mii_ts, skb, type); -} - -static inline int phy_ts_info(struct phy_device *phydev, - struct ethtool_ts_info *tsinfo) -{ - return phydev->mii_ts->ts_info(phydev->mii_ts, tsinfo); -} - -static inline void phy_txtstamp(struct phy_device *phydev, struct sk_buff *skb, - int type) -{ - phydev->mii_ts->txtstamp(phydev->mii_ts, skb, type); + return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT; } /** @@ -1313,40 +684,6 @@ static inline bool phy_is_internal(struct phy_device *phydev) return phydev->is_internal; } -/** - * phy_on_sfp - Convenience function for testing if a PHY is on an SFP module - * @phydev: the phy_device struct - */ -static inline bool phy_on_sfp(struct phy_device *phydev) -{ - return phydev->is_on_sfp_module; -} - -/** - * phy_interface_mode_is_rgmii - Convenience function for testing if a - * PHY interface mode is RGMII (all variants) - * @mode: the &phy_interface_t enum - */ -static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode) -{ - return mode >= PHY_INTERFACE_MODE_RGMII && - mode <= PHY_INTERFACE_MODE_RGMII_TXID; -}; - -/** - * phy_interface_mode_is_8023z() - does the PHY interface mode use 802.3z - * negotiation - * @mode: one of &enum phy_interface_t - * - * Returns true if the PHY interface mode uses the 16-bit negotiation - * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding) - */ -static inline bool phy_interface_mode_is_8023z(phy_interface_t mode) -{ - return mode == PHY_INTERFACE_MODE_1000BASEX || - mode == PHY_INTERFACE_MODE_2500BASEX; -} - /** * phy_interface_is_rgmii - Convenience function for testing if a PHY interface * is RGMII (all variants) @@ -1354,10 +691,11 @@ static inline bool phy_interface_mode_is_8023z(phy_interface_t mode) */ static inline bool phy_interface_is_rgmii(struct phy_device *phydev) { - return phy_interface_mode_is_rgmii(phydev->interface); + return phydev->interface >= PHY_INTERFACE_MODE_RGMII && + phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID; }; -/** +/* * phy_is_pseudo_fixed_link - Convenience function for testing if this * PHY is the CPU port facing side of an Ethernet switch, or similar. * @phydev: the phy_device struct @@ -1367,80 +705,49 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev) return phydev->is_pseudo_fixed_link; } -int phy_save_page(struct phy_device *phydev); -int phy_select_page(struct phy_device *phydev, int page); -int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); -int phy_read_paged(struct phy_device *phydev, int page, u32 regnum); -int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val); -int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum, - u16 mask, u16 set); -int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, - u16 mask, u16 set); +/** + * phy_write_mmd - Convenience function for writing a register + * on an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * @val: value to write to @regnum + * + * Same rules as for phy_write(); + */ +static inline int phy_write_mmd(struct phy_device *phydev, int devad, + u32 regnum, u16 val) +{ + if (!phydev->is_c45) + return -EOPNOTSUPP; -struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, + regnum = MII_ADDR_C45 | ((devad & 0x1f) << 16) | (regnum & 0xffff); + + return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val); +} + +/** + * phy_write_mmd_indirect - writes data to the MMD registers + * @phydev: The PHY device + * @prtad: MMD Address + * @devad: MMD DEVAD + * @data: data to write in the MMD register + * + * Description: Write data from the MMD registers of the specified + * phy address. + */ +void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, + int devad, u32 data); + +struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); -#if IS_ENABLED(CONFIG_PHYLIB) -int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id); -struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode); -struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode); -struct phy_device *device_phy_find_device(struct device *dev); -struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode); struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); int phy_device_register(struct phy_device *phy); -void phy_device_free(struct phy_device *phydev); -#else -static inline int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id) -{ - return 0; -} -static inline -struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode) -{ - return 0; -} - -static inline -struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode) -{ - return NULL; -} - -static inline struct phy_device *device_phy_find_device(struct device *dev) -{ - return NULL; -} - -static inline -struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode) -{ - return NULL; -} - -static inline -struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) -{ - return NULL; -} - -static inline int phy_device_register(struct phy_device *phy) -{ - return 0; -} - -static inline void phy_device_free(struct phy_device *phydev) { } -#endif /* CONFIG_PHYLIB */ void phy_device_remove(struct phy_device *phydev); -int phy_get_c45_ids(struct phy_device *phydev); int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); -int __phy_resume(struct phy_device *phydev); -int phy_loopback(struct phy_device *phydev, bool enable); -void phy_sfp_attach(void *upstream, struct sfp_bus *bus); -void phy_sfp_detach(void *upstream, struct sfp_bus *bus); -int phy_sfp_probe(struct phy_device *phydev, - const struct sfp_upstream_ops *ops); struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, phy_interface_t interface); struct phy_device *phy_find_first(struct mii_bus *bus); @@ -1456,192 +763,60 @@ void phy_disconnect(struct phy_device *phydev); void phy_detach(struct phy_device *phydev); void phy_start(struct phy_device *phydev); void phy_stop(struct phy_device *phydev); -int phy_config_aneg(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); -int phy_aneg_done(struct phy_device *phydev); -int phy_speed_down(struct phy_device *phydev, bool sync); -int phy_speed_up(struct phy_device *phydev); -int phy_restart_aneg(struct phy_device *phydev); -int phy_reset_after_clk_enable(struct phy_device *phydev); +int phy_stop_interrupts(struct phy_device *phydev); -#if IS_ENABLED(CONFIG_PHYLIB) -int phy_start_cable_test(struct phy_device *phydev, - struct netlink_ext_ack *extack); -int phy_start_cable_test_tdr(struct phy_device *phydev, - struct netlink_ext_ack *extack, - const struct phy_tdr_config *config); -#else -static inline -int phy_start_cable_test(struct phy_device *phydev, - struct netlink_ext_ack *extack) +static inline int phy_read_status(struct phy_device *phydev) { - NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support"); - return -EOPNOTSUPP; -} -static inline -int phy_start_cable_test_tdr(struct phy_device *phydev, - struct netlink_ext_ack *extack, - const struct phy_tdr_config *config) -{ - NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support"); - return -EOPNOTSUPP; -} -#endif - -int phy_cable_test_result(struct phy_device *phydev, u8 pair, u16 result); -int phy_cable_test_fault_length(struct phy_device *phydev, u8 pair, - u16 cm); - -static inline void phy_device_reset(struct phy_device *phydev, int value) -{ - mdio_device_reset(&phydev->mdio, value); + return phydev->drv->read_status(phydev); } #define phydev_err(_phydev, format, args...) \ dev_err(&_phydev->mdio.dev, format, ##args) -#define phydev_info(_phydev, format, args...) \ - dev_info(&_phydev->mdio.dev, format, ##args) - -#define phydev_warn(_phydev, format, args...) \ - dev_warn(&_phydev->mdio.dev, format, ##args) - #define phydev_dbg(_phydev, format, args...) \ - dev_dbg(&_phydev->mdio.dev, format, ##args) + dev_dbg(&_phydev->mdio.dev, format, ##args); static inline const char *phydev_name(const struct phy_device *phydev) { return dev_name(&phydev->mdio.dev); } -static inline void phy_lock_mdio_bus(struct phy_device *phydev) -{ - mutex_lock(&phydev->mdio.bus->mdio_lock); -} - -static inline void phy_unlock_mdio_bus(struct phy_device *phydev) -{ - mutex_unlock(&phydev->mdio.bus->mdio_lock); -} - void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) __printf(2, 3); -char *phy_attached_info_irq(struct phy_device *phydev) - __malloc; void phy_attached_info(struct phy_device *phydev); - -/* Clause 22 PHY */ -int genphy_read_abilities(struct phy_device *phydev); +int genphy_config_init(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); -int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart); -int genphy_config_eee_advert(struct phy_device *phydev); -int __genphy_config_aneg(struct phy_device *phydev, bool changed); +int genphy_config_aneg(struct phy_device *phydev); int genphy_aneg_done(struct phy_device *phydev); int genphy_update_link(struct phy_device *phydev); -int genphy_read_lpa(struct phy_device *phydev); -int genphy_read_status_fixed(struct phy_device *phydev); int genphy_read_status(struct phy_device *phydev); int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); -int genphy_loopback(struct phy_device *phydev, bool enable); int genphy_soft_reset(struct phy_device *phydev); -irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev); - -static inline int genphy_config_aneg(struct phy_device *phydev) -{ - return __genphy_config_aneg(phydev, false); -} - -static inline int genphy_no_config_intr(struct phy_device *phydev) -{ - return 0; -} -int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, - u16 regnum); -int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, - u16 regnum, u16 val); - -/* Clause 37 */ -int genphy_c37_config_aneg(struct phy_device *phydev); -int genphy_c37_read_status(struct phy_device *phydev); - -/* Clause 45 PHY */ -int genphy_c45_restart_aneg(struct phy_device *phydev); -int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart); -int genphy_c45_aneg_done(struct phy_device *phydev); -int genphy_c45_read_link(struct phy_device *phydev); -int genphy_c45_read_lpa(struct phy_device *phydev); -int genphy_c45_read_pma(struct phy_device *phydev); -int genphy_c45_pma_setup_forced(struct phy_device *phydev); -int genphy_c45_an_config_aneg(struct phy_device *phydev); -int genphy_c45_an_disable_aneg(struct phy_device *phydev); -int genphy_c45_read_mdix(struct phy_device *phydev); -int genphy_c45_pma_read_abilities(struct phy_device *phydev); -int genphy_c45_read_status(struct phy_device *phydev); -int genphy_c45_config_aneg(struct phy_device *phydev); -int genphy_c45_loopback(struct phy_device *phydev, bool enable); -int genphy_c45_pma_resume(struct phy_device *phydev); -int genphy_c45_pma_suspend(struct phy_device *phydev); - -/* Generic C45 PHY driver */ -extern struct phy_driver genphy_c45_driver; - -/* The gen10g_* functions are the old Clause 45 stub */ -int gen10g_config_aneg(struct phy_device *phydev); - -static inline int phy_read_status(struct phy_device *phydev) -{ - if (!phydev->drv) - return -EIO; - - if (phydev->drv->read_status) - return phydev->drv->read_status(phydev); - else - return genphy_read_status(phydev); -} - void phy_driver_unregister(struct phy_driver *drv); void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_driver_register(struct phy_driver *new_driver, struct module *owner); int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner); -void phy_error(struct phy_device *phydev); void phy_state_machine(struct work_struct *work); -void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies); -void phy_trigger_machine(struct phy_device *phydev); -void phy_mac_interrupt(struct phy_device *phydev); +void phy_change(struct work_struct *work); +void phy_mac_interrupt(struct phy_device *phydev, int new_link); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); -void phy_ethtool_ksettings_get(struct phy_device *phydev, - struct ethtool_link_ksettings *cmd); +int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); +int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); +int phy_ethtool_ksettings_get(struct phy_device *phydev, + struct ethtool_link_ksettings *cmd); int phy_ethtool_ksettings_set(struct phy_device *phydev, const struct ethtool_link_ksettings *cmd); int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); -int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); -int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd); -int phy_disable_interrupts(struct phy_device *phydev); -void phy_request_interrupt(struct phy_device *phydev); -void phy_free_interrupt(struct phy_device *phydev); +int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); +void phy_device_free(struct phy_device *phydev); int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); -void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode); -void phy_advertise_supported(struct phy_device *phydev); -void phy_support_sym_pause(struct phy_device *phydev); -void phy_support_asym_pause(struct phy_device *phydev); -void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx, - bool autoneg); -void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); -bool phy_validate_pause(struct phy_device *phydev, - struct ethtool_pauseparam *pp); -void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause); - -s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, - const int *delay_values, int size, bool is_rx); - -void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv, - bool *tx_pause, bool *rx_pause); int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); @@ -1650,10 +825,6 @@ int phy_register_fixup_for_id(const char *bus_id, int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); -int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask); -int phy_unregister_fixup_for_id(const char *bus_id); -int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask); - int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable); int phy_get_eee_err(struct phy_device *phydev); int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data); @@ -1665,110 +836,15 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd); int phy_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd); -int phy_ethtool_nway_reset(struct net_device *ndev); -int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size); -void phy_package_leave(struct phy_device *phydev); -int devm_phy_package_join(struct device *dev, struct phy_device *phydev, - int addr, size_t priv_size); -#if IS_ENABLED(CONFIG_PHYLIB) int __init mdio_bus_init(void); void mdio_bus_exit(void); -#endif - -int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data); -int phy_ethtool_get_sset_count(struct phy_device *phydev); -int phy_ethtool_get_stats(struct phy_device *phydev, - struct ethtool_stats *stats, u64 *data); - -static inline int phy_package_read(struct phy_device *phydev, u32 regnum) -{ - struct phy_package_shared *shared = phydev->shared; - - if (!shared) - return -EIO; - - return mdiobus_read(phydev->mdio.bus, shared->addr, regnum); -} - -static inline int __phy_package_read(struct phy_device *phydev, u32 regnum) -{ - struct phy_package_shared *shared = phydev->shared; - - if (!shared) - return -EIO; - - return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum); -} - -static inline int phy_package_write(struct phy_device *phydev, - u32 regnum, u16 val) -{ - struct phy_package_shared *shared = phydev->shared; - - if (!shared) - return -EIO; - - return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val); -} - -static inline int __phy_package_write(struct phy_device *phydev, - u32 regnum, u16 val) -{ - struct phy_package_shared *shared = phydev->shared; - - if (!shared) - return -EIO; - - return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val); -} - -static inline bool __phy_package_set_once(struct phy_device *phydev, - unsigned int b) -{ - struct phy_package_shared *shared = phydev->shared; - - if (!shared) - return false; - - return !test_and_set_bit(b, &shared->flags); -} - -static inline bool phy_package_init_once(struct phy_device *phydev) -{ - return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE); -} - -static inline bool phy_package_probe_once(struct phy_device *phydev) -{ - return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE); -} extern struct bus_type mdio_bus_type; -struct mdio_board_info { - const char *bus_id; - char modalias[MDIO_NAME_SIZE]; - int mdio_addr; - const void *platform_data; -}; - -#if IS_ENABLED(CONFIG_MDIO_DEVICE) -int mdiobus_register_board_info(const struct mdio_board_info *info, - unsigned int n); -#else -static inline int mdiobus_register_board_info(const struct mdio_board_info *i, - unsigned int n) -{ - return 0; -} -#endif - - /** - * phy_module_driver() - Helper macro for registering PHY drivers + * module_phy_driver() - Helper macro for registering PHY drivers * @__phy_drivers: array of PHY drivers to register - * @__count: Numbers of members in array * * Helper macro for PHY drivers which do not do anything special in module * init/exit. Each module may only use this macro once, and calling it @@ -1789,7 +865,4 @@ module_exit(phy_module_exit) #define module_phy_driver(__phy_drivers) \ phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) -bool phy_driver_is_genphy(struct phy_device *phydev); -bool phy_driver_is_genphy_10g(struct phy_device *phydev); - #endif /* __PHY_H */ diff --git a/include/linux/phy/omap_control_phy.h b/include/linux/phy/omap_control_phy.h index aec57dd784..eb7d4a135a 100644 --- a/include/linux/phy/omap_control_phy.h +++ b/include/linux/phy/omap_control_phy.h @@ -1,9 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * omap_control_phy.h - Header file for the PHY part of control module. * * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * * Author: Kishon Vijay Abraham I + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __OMAP_CONTROL_PHY_H__ diff --git a/include/linux/phy/omap_usb.h b/include/linux/phy/omap_usb.h index e23b52df93..2e5fb870ef 100644 --- a/include/linux/phy/omap_usb.h +++ b/include/linux/phy/omap_usb.h @@ -1,15 +1,79 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * omap_usb.h -- omap usb2 phy header file * - * Copyright (C) 2012-2020 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * * Author: Kishon Vijay Abraham I + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __DRIVERS_OMAP_USB2_H #define __DRIVERS_OMAP_USB2_H -#include +#include +#include + +struct usb_dpll_params { + u16 m; + u8 n; + u8 freq:3; + u8 sd; + u32 mf; +}; + +enum omap_usb_phy_type { + TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */ + TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */ + TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */ +}; + +struct omap_usb { + struct usb_phy phy; + struct phy_companion *comparator; + void __iomem *pll_ctrl_base; + void __iomem *phy_base; + struct device *dev; + struct device *control_dev; + struct clk *wkupclk; + struct clk *optclk; + u8 flags; + enum omap_usb_phy_type type; + struct regmap *syscon_phy_power; /* ctrl. reg. acces */ + unsigned int power_reg; /* power reg. index within syscon */ + u32 mask; + u32 power_on; + u32 power_off; +}; + +struct usb_phy_data { + const char *label; + u8 flags; + u32 mask; + u32 power_on; + u32 power_off; +}; + +/* Driver Flags */ +#define OMAP_USB2_HAS_START_SRP (1 << 0) +#define OMAP_USB2_HAS_SET_VBUS (1 << 1) +#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2) + +#define OMAP_DEV_PHY_PD BIT(0) +#define OMAP_USB2_PHY_PD BIT(28) + +#define AM437X_USB2_PHY_PD BIT(0) +#define AM437X_USB2_OTG_PD BIT(1) +#define AM437X_USB2_OTGVDET_EN BIT(19) +#define AM437X_USB2_OTGSESSEND_EN BIT(20) #define phy_to_omapusb(x) container_of((x), struct omap_usb, phy) @@ -22,4 +86,15 @@ static inline int omap_usb2_set_comparator(struct phy_companion *comparator) } #endif +static inline u32 omap_usb_readl(void __iomem *addr, unsigned offset) +{ + return __raw_readl(addr + offset); +} + +static inline void omap_usb_writel(void __iomem *addr, unsigned offset, + u32 data) +{ + __raw_writel(data, addr + offset); +} + #endif /* __DRIVERS_OMAP_USB_H */ diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h new file mode 100644 index 0000000000..9d18e9f948 --- /dev/null +++ b/include/linux/phy/phy-qcom-ufs.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef PHY_QCOM_UFS_H_ +#define PHY_QCOM_UFS_H_ + +#include "phy.h" + +/** + * ufs_qcom_phy_enable_ref_clk() - Enable the phy + * ref clock. + * @phy: reference to a generic phy + * + * returns 0 for success, and non-zero for error. + */ +int ufs_qcom_phy_enable_ref_clk(struct phy *phy); + +/** + * ufs_qcom_phy_disable_ref_clk() - Disable the phy + * ref clock. + * @phy: reference to a generic phy. + */ +void ufs_qcom_phy_disable_ref_clk(struct phy *phy); + +/** + * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device + * ref clock. + * @phy: reference to a generic phy. + */ +void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy); + +/** + * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device + * ref clock. + * @phy: reference to a generic phy. + */ +void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy); + +int ufs_qcom_phy_enable_iface_clk(struct phy *phy); +void ufs_qcom_phy_disable_iface_clk(struct phy *phy); +int ufs_qcom_phy_start_serdes(struct phy *phy); +int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes); +int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B); +int ufs_qcom_phy_is_pcs_ready(struct phy *phy); +void ufs_qcom_phy_save_controller_version(struct phy *phy, + u8 major, u16 minor, u16 step); + +#endif /* PHY_QCOM_UFS_H_ */ diff --git a/include/linux/phy/phy-sun4i-usb.h b/include/linux/phy/phy-sun4i-usb.h index 91eb755ee7..50aed92ea8 100644 --- a/include/linux/phy/phy-sun4i-usb.h +++ b/include/linux/phy/phy-sun4i-usb.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015 Hans de Goede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef PHY_SUN4I_USB_H_ diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index f3286f4cd3..78bb0d7f6b 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * phy.h -- generic phy header file * * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com * * Author: Kishon Vijay Abraham I + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __DRIVERS_PHY_H @@ -16,51 +20,13 @@ #include #include -#include -#include - struct phy; enum phy_mode { PHY_MODE_INVALID, PHY_MODE_USB_HOST, - PHY_MODE_USB_HOST_LS, - PHY_MODE_USB_HOST_FS, - PHY_MODE_USB_HOST_HS, - PHY_MODE_USB_HOST_SS, PHY_MODE_USB_DEVICE, - PHY_MODE_USB_DEVICE_LS, - PHY_MODE_USB_DEVICE_FS, - PHY_MODE_USB_DEVICE_HS, - PHY_MODE_USB_DEVICE_SS, PHY_MODE_USB_OTG, - PHY_MODE_UFS_HS_A, - PHY_MODE_UFS_HS_B, - PHY_MODE_PCIE, - PHY_MODE_ETHERNET, - PHY_MODE_MIPI_DPHY, - PHY_MODE_SATA, - PHY_MODE_LVDS, - PHY_MODE_DP -}; - -enum phy_media { - PHY_MEDIA_DEFAULT, - PHY_MEDIA_SR, - PHY_MEDIA_DAC, -}; - -/** - * union phy_configure_opts - Opaque generic phy configuration - * - * @mipi_dphy: Configuration set applicable for phys supporting - * the MIPI_DPHY phy mode. - * @dp: Configuration set applicable for phys supporting - * the DisplayPort protocol. - */ -union phy_configure_opts { - struct phy_configure_opts_mipi_dphy mipi_dphy; - struct phy_configure_opts_dp dp; }; /** @@ -70,11 +36,7 @@ union phy_configure_opts { * @power_on: powering on the phy * @power_off: powering off the phy * @set_mode: set the mode of the phy - * @set_media: set the media type of the phy (optional) - * @set_speed: set the speed of the phy (optional) * @reset: resetting the phy - * @calibrate: calibrate the phy - * @release: ops to be performed while the consumer relinquishes the PHY * @owner: the module owner containing the ops */ struct phy_ops { @@ -82,56 +44,17 @@ struct phy_ops { int (*exit)(struct phy *phy); int (*power_on)(struct phy *phy); int (*power_off)(struct phy *phy); - int (*set_mode)(struct phy *phy, enum phy_mode mode, int submode); - int (*set_media)(struct phy *phy, enum phy_media media); - int (*set_speed)(struct phy *phy, int speed); - - /** - * @configure: - * - * Optional. - * - * Used to change the PHY parameters. phy_init() must have - * been called on the phy. - * - * Returns: 0 if successful, an negative error code otherwise - */ - int (*configure)(struct phy *phy, union phy_configure_opts *opts); - - /** - * @validate: - * - * Optional. - * - * Used to check that the current set of parameters can be - * handled by the phy. Implementations are free to tune the - * parameters passed as arguments if needed by some - * implementation detail or constraints. It must not change - * any actual configuration of the PHY, so calling it as many - * times as deemed fit by the consumer must have no side - * effect. - * - * Returns: 0 if the configuration can be applied, an negative - * error code otherwise - */ - int (*validate)(struct phy *phy, enum phy_mode mode, int submode, - union phy_configure_opts *opts); + int (*set_mode)(struct phy *phy, enum phy_mode mode); int (*reset)(struct phy *phy); - int (*calibrate)(struct phy *phy); - void (*release)(struct phy *phy); struct module *owner; }; /** * struct phy_attrs - represents phy attributes * @bus_width: Data path width implemented by PHY - * @max_link_rate: Maximum link rate supported by PHY (units to be decided by producer and consumer) - * @mode: PHY mode */ struct phy_attrs { u32 bus_width; - u32 max_link_rate; - enum phy_mode mode; }; /** @@ -139,11 +62,11 @@ struct phy_attrs { * @dev: phy device * @id: id of the phy device * @ops: function pointers for performing phy operations + * @init_data: list of PHY consumers (non-dt only) * @mutex: mutex to protect phy_ops * @init_count: used to protect when the PHY is used by multiple consumers * @power_count: used to protect when the PHY is used by multiple consumers - * @attrs: used to specify PHY specific attributes - * @pwr: power regulator associated with the phy + * @phy_attrs: used to specify PHY specific attributes */ struct phy { struct device dev; @@ -159,10 +82,9 @@ struct phy { /** * struct phy_provider - represents the phy provider * @dev: phy provider device - * @children: can be used to override the default (dev->of_node) child node * @owner: the module owner having of_xlate - * @list: to maintain a linked list of PHY providers * @of_xlate: function pointer to obtain phy instance from phy pointer + * @list: to maintain a linked list of PHY providers */ struct phy_provider { struct device *dev; @@ -173,13 +95,6 @@ struct phy_provider { struct of_phandle_args *args); }; -/** - * struct phy_lookup - PHY association in list of phys managed by the phy driver - * @node: list node - * @dev_id: the device of the association - * @con_id: connection ID string on device - * @phy: the phy of the association - */ struct phy_lookup { struct list_head node; const char *dev_id; @@ -222,21 +137,8 @@ int phy_init(struct phy *phy); int phy_exit(struct phy *phy); int phy_power_on(struct phy *phy); int phy_power_off(struct phy *phy); -int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode); -#define phy_set_mode(phy, mode) \ - phy_set_mode_ext(phy, mode, 0) -int phy_set_media(struct phy *phy, enum phy_media media); -int phy_set_speed(struct phy *phy, int speed); -int phy_configure(struct phy *phy, union phy_configure_opts *opts); -int phy_validate(struct phy *phy, enum phy_mode mode, int submode, - union phy_configure_opts *opts); - -static inline enum phy_mode phy_get_mode(struct phy *phy) -{ - return phy->attrs.mode; -} +int phy_set_mode(struct phy *phy, enum phy_mode mode); int phy_reset(struct phy *phy); -int phy_calibrate(struct phy *phy); static inline int phy_get_bus_width(struct phy *phy) { return phy->attrs.bus_width; @@ -253,8 +155,7 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, const char *con_id); struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np, int index); -void of_phy_put(struct phy *phy); -void phy_put(struct device *dev, struct phy *phy); +void phy_put(struct phy *phy); void devm_phy_put(struct device *dev, struct phy *phy); struct phy *of_phy_get(struct device_node *np, const char *con_id); struct phy *of_phy_simple_xlate(struct device *dev, @@ -345,36 +246,13 @@ static inline int phy_power_off(struct phy *phy) return -ENOSYS; } -static inline int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, - int submode) +static inline int phy_set_mode(struct phy *phy, enum phy_mode mode) { if (!phy) return 0; return -ENOSYS; } -#define phy_set_mode(phy, mode) \ - phy_set_mode_ext(phy, mode, 0) - -static inline int phy_set_media(struct phy *phy, enum phy_media media) -{ - if (!phy) - return 0; - return -ENODEV; -} - -static inline int phy_set_speed(struct phy *phy, int speed) -{ - if (!phy) - return 0; - return -ENODEV; -} - -static inline enum phy_mode phy_get_mode(struct phy *phy) -{ - return PHY_MODE_INVALID; -} - static inline int phy_reset(struct phy *phy) { if (!phy) @@ -382,31 +260,6 @@ static inline int phy_reset(struct phy *phy) return -ENOSYS; } -static inline int phy_calibrate(struct phy *phy) -{ - if (!phy) - return 0; - return -ENOSYS; -} - -static inline int phy_configure(struct phy *phy, - union phy_configure_opts *opts) -{ - if (!phy) - return 0; - - return -ENOSYS; -} - -static inline int phy_validate(struct phy *phy, enum phy_mode mode, int submode, - union phy_configure_opts *opts) -{ - if (!phy) - return 0; - - return -ENOSYS; -} - static inline int phy_get_bus_width(struct phy *phy) { return -ENOSYS; @@ -436,7 +289,7 @@ static inline struct phy *devm_phy_get(struct device *dev, const char *string) static inline struct phy *devm_phy_optional_get(struct device *dev, const char *string) { - return NULL; + return ERR_PTR(-ENOSYS); } static inline struct phy *devm_of_phy_get(struct device *dev, @@ -453,11 +306,7 @@ static inline struct phy *devm_of_phy_get_by_index(struct device *dev, return ERR_PTR(-ENOSYS); } -static inline void of_phy_put(struct phy *phy) -{ -} - -static inline void phy_put(struct device *dev, struct phy *phy) +static inline void phy_put(struct phy *phy) { } diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h index 3a35e74cdc..8e1a57a78d 100644 --- a/include/linux/phy/tegra/xusb.h +++ b/include/linux/phy/tegra/xusb.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef PHY_TEGRA_XUSB_H @@ -8,7 +16,6 @@ struct tegra_xusb_padctl; struct device; -enum usb_device_speed; struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev); void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl); @@ -19,16 +26,5 @@ int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl, unsigned int port, bool idle); int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl, unsigned int port, bool enable); -int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl, - bool val); -int tegra_phy_xusb_utmi_port_reset(struct phy *phy); -int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl, - unsigned int port); -int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy, - enum usb_device_speed speed); -int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy); -int tegra_xusb_padctl_enable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy); -int tegra_xusb_padctl_disable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy); -bool tegra_xusb_padctl_remote_wake_detected(struct tegra_xusb_padctl *padctl, struct phy *phy); #endif /* PHY_TEGRA_XUSB_H */ diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 52bc8e487e..1d41ec44e3 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PHY_FIXED_H #define __PHY_FIXED_H @@ -11,46 +10,36 @@ struct fixed_phy_status { }; struct device_node; -struct gpio_desc; #if IS_ENABLED(CONFIG_FIXED_PHY) -extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); extern int fixed_phy_add(unsigned int irq, int phy_id, - struct fixed_phy_status *status); + struct fixed_phy_status *status, + int link_gpio); extern struct phy_device *fixed_phy_register(unsigned int irq, struct fixed_phy_status *status, + int link_gpio, struct device_node *np); - -extern struct phy_device * -fixed_phy_register_with_gpiod(unsigned int irq, - struct fixed_phy_status *status, - struct gpio_desc *gpiod); - extern void fixed_phy_unregister(struct phy_device *phydev); extern int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, struct fixed_phy_status *)); +extern int fixed_phy_update_state(struct phy_device *phydev, + const struct fixed_phy_status *status, + const struct fixed_phy_status *changed); #else static inline int fixed_phy_add(unsigned int irq, int phy_id, - struct fixed_phy_status *status) + struct fixed_phy_status *status, + int link_gpio) { return -ENODEV; } static inline struct phy_device *fixed_phy_register(unsigned int irq, struct fixed_phy_status *status, + int gpio_link, struct device_node *np) { return ERR_PTR(-ENODEV); } - -static inline struct phy_device * -fixed_phy_register_with_gpiod(unsigned int irq, - struct fixed_phy_status *status, - struct gpio_desc *gpiod) -{ - return ERR_PTR(-ENODEV); -} - static inline void fixed_phy_unregister(struct phy_device *phydev) { } @@ -60,9 +49,11 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, { return -ENODEV; } -static inline int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier) +static inline int fixed_phy_update_state(struct phy_device *phydev, + const struct fixed_phy_status *status, + const struct fixed_phy_status *changed) { - return -EINVAL; + return -ENODEV; } #endif /* CONFIG_FIXED_PHY */ diff --git a/include/linux/pid.h b/include/linux/pid.h index af308e15f1..af2bfb481e 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -1,18 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PID_H #define _LINUX_PID_H -#include -#include -#include +#include enum pid_type { PIDTYPE_PID, - PIDTYPE_TGID, PIDTYPE_PGID, PIDTYPE_SID, - PIDTYPE_MAX, + PIDTYPE_MAX }; /* @@ -52,47 +48,39 @@ enum pid_type */ struct upid { + /* Try to keep pid_chain in the same cacheline as nr for find_vpid */ int nr; struct pid_namespace *ns; + struct hlist_node pid_chain; }; struct pid { - refcount_t count; + atomic_t count; unsigned int level; - spinlock_t lock; /* lists of tasks that use this pid */ struct hlist_head tasks[PIDTYPE_MAX]; - struct hlist_head inodes; - /* wait queue for pidfd notifications */ - wait_queue_head_t wait_pidfd; struct rcu_head rcu; struct upid numbers[1]; }; extern struct pid init_struct_pid; -extern const struct file_operations pidfd_fops; - -struct file; - -extern struct pid *pidfd_pid(const struct file *file); -struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags); -int pidfd_create(struct pid *pid, unsigned int flags); +struct pid_link +{ + struct hlist_node node; + struct pid *pid; +}; static inline struct pid *get_pid(struct pid *pid) { if (pid) - refcount_inc(&pid->count); + atomic_inc(&pid->count); return pid; } extern void put_pid(struct pid *pid); extern struct task_struct *pid_task(struct pid *pid, enum pid_type); -static inline bool pid_has_task(struct pid *pid, enum pid_type type) -{ - return !hlist_empty(&pid->tasks[type]); -} extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type); extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); @@ -104,16 +92,12 @@ extern void attach_pid(struct task_struct *task, enum pid_type); extern void detach_pid(struct task_struct *task, enum pid_type); extern void change_pid(struct task_struct *task, enum pid_type, struct pid *pid); -extern void exchange_tids(struct task_struct *task, struct task_struct *old); extern void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type); struct pid_namespace; extern struct pid_namespace init_pid_ns; -extern int pid_max; -extern int pid_max_min, pid_max_max; - /* * look up a PID in the hash table. Must be called with the tasklist_lock * or rcu_read_lock() held. @@ -131,9 +115,9 @@ extern struct pid *find_vpid(int nr); */ extern struct pid *find_get_pid(int nr); extern struct pid *find_ge_pid(int nr, struct pid_namespace *); +int next_pidmap(struct pid_namespace *pid_ns, unsigned int last); -extern struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, - size_t set_tid_size); +extern struct pid *alloc_pid(struct pid_namespace *ns); extern void free_pid(struct pid *pid); extern void disable_pid_allocation(struct pid_namespace *ns); @@ -185,14 +169,14 @@ static inline pid_t pid_nr(struct pid *pid) return nr; } -pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns); -pid_t pid_vnr(struct pid *pid); +pid_t pid_nr_ns(const struct pid *pid, const struct pid_namespace *ns); +pid_t pid_vnr(const struct pid *pid); #define do_each_pid_task(pid, type, task) \ do { \ if ((pid) != NULL) \ hlist_for_each_entry_rcu((task), \ - &(pid)->tasks[type], pid_links[type]) { + &(pid)->tasks[type], pids[type].node) { /* * Both old and new leaders may be attached to @@ -207,10 +191,10 @@ pid_t pid_vnr(struct pid *pid); #define do_each_pid_thread(pid, type, task) \ do_each_pid_task(pid, type, task) { \ struct task_struct *tg___ = task; \ - for_each_thread(tg___, task) { + do { #define while_each_pid_thread(pid, type, task) \ - } \ + } while_each_thread(tg___, task); \ task = tg___; \ } while_each_pid_task(pid, type, task) #endif /* _LINUX_PID_H */ diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 7c7e627503..c925afb667 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PID_NS_H #define _LINUX_PID_NS_H @@ -8,40 +7,56 @@ #include #include #include +#include #include -#include -/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */ -#define MAX_PID_NS_LEVEL 32 +struct pidmap { + atomic_t nr_free; + void *page; +}; + +#define BITS_PER_PAGE (PAGE_SIZE * 8) +#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) +#define PIDMAP_ENTRIES ((PID_MAX_LIMIT+BITS_PER_PAGE-1)/BITS_PER_PAGE) struct fs_pin; struct pid_namespace { - struct idr idr; + struct kref kref; + struct pidmap pidmap[PIDMAP_ENTRIES]; struct rcu_head rcu; - unsigned int pid_allocated; + int last_pid; + unsigned int nr_hashed; struct task_struct *child_reaper; struct kmem_cache *pid_cachep; unsigned int level; struct pid_namespace *parent; +#ifdef CONFIG_PROC_FS + struct vfsmount *proc_mnt; + struct dentry *proc_self; + struct dentry *proc_thread_self; +#endif #ifdef CONFIG_BSD_PROCESS_ACCT struct fs_pin *bacct; #endif struct user_namespace *user_ns; struct ucounts *ucounts; + struct work_struct proc_work; + kgid_t pid_gid; + int hide_pid; int reboot; /* group exit code if this pidns was rebooted */ struct ns_common ns; } __randomize_layout; extern struct pid_namespace init_pid_ns; -#define PIDNS_ADDING (1U << 31) +#define PIDNS_HASH_ADDING (1U << 31) #ifdef CONFIG_PID_NS static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) { if (ns != &init_pid_ns) - refcount_inc(&ns->ns.count); + kref_get(&ns->kref); return ns; } @@ -84,6 +99,6 @@ static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); void pidhash_init(void); -void pid_idr_init(void); +void pidmap_init(void); #endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/pim.h b/include/linux/pim.h index 290d4d2ed9..e1d756f813 100644 --- a/include/linux/pim.h +++ b/include/linux/pim.h @@ -1,8 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PIM_H #define __LINUX_PIM_H -#include #include /* Message types - V1 */ @@ -11,86 +9,24 @@ /* Message types - V2 */ #define PIM_VERSION 2 - -/* RFC7761, sec 4.9: - * Type - * Types for specific PIM messages. PIM Types are: - * - * Message Type Destination - * --------------------------------------------------------------------- - * 0 = Hello Multicast to ALL-PIM-ROUTERS - * 1 = Register Unicast to RP - * 2 = Register-Stop Unicast to source of Register - * packet - * 3 = Join/Prune Multicast to ALL-PIM-ROUTERS - * 4 = Bootstrap Multicast to ALL-PIM-ROUTERS - * 5 = Assert Multicast to ALL-PIM-ROUTERS - * 6 = Graft (used in PIM-DM only) Unicast to RPF'(S) - * 7 = Graft-Ack (used in PIM-DM only) Unicast to source of Graft - * packet - * 8 = Candidate-RP-Advertisement Unicast to Domain's BSR - */ -enum { - PIM_TYPE_HELLO, - PIM_TYPE_REGISTER, - PIM_TYPE_REGISTER_STOP, - PIM_TYPE_JOIN_PRUNE, - PIM_TYPE_BOOTSTRAP, - PIM_TYPE_ASSERT, - PIM_TYPE_GRAFT, - PIM_TYPE_GRAFT_ACK, - PIM_TYPE_CANDIDATE_RP_ADV -}; +#define PIM_REGISTER 1 #define PIM_NULL_REGISTER cpu_to_be32(0x40000000) -/* RFC7761, sec 4.9: - * The PIM header common to all PIM messages is: - * 0 1 2 3 - * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * |PIM Ver| Type | Reserved | Checksum | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - */ -struct pimhdr { - __u8 type; - __u8 reserved; - __be16 csum; -}; - -/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ -struct pimreghdr { - __u8 type; - __u8 reserved; - __be16 csum; - __be32 flags; -}; - -int pim_rcv_v1(struct sk_buff *skb); - static inline bool ipmr_pimsm_enabled(void) { return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2); } -static inline struct pimhdr *pim_hdr(const struct sk_buff *skb) +/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ +struct pimreghdr { - return (struct pimhdr *)skb_transport_header(skb); -} + __u8 type; + __u8 reserved; + __be16 csum; + __be32 flags; +}; -static inline u8 pim_hdr_version(const struct pimhdr *pimhdr) -{ - return pimhdr->type >> 4; -} - -static inline u8 pim_hdr_type(const struct pimhdr *pimhdr) -{ - return pimhdr->type & 0xf; -} - -/* check if the address is 224.0.0.13, RFC7761 sec 4.3.1 */ -static inline bool pim_ipv4_all_pim_routers(__be32 addr) -{ - return addr == htonl(0xE000000D); -} +struct sk_buff; +extern int pim_rcv_v1(struct sk_buff *); #endif diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 019fecd75d..d7e5d608fa 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Consumer interface the pin control subsystem * @@ -7,6 +6,8 @@ * Based on bits of regulator core, gpio core and clk core * * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 */ #ifndef __LINUX_PINCTRL_CONSUMER_H #define __LINUX_PINCTRL_CONSUMER_H @@ -24,12 +25,10 @@ struct device; #ifdef CONFIG_PINCTRL /* External interface to pin control */ -extern bool pinctrl_gpio_can_use_line(unsigned gpio); -extern int pinctrl_gpio_request(unsigned gpio); -extern void pinctrl_gpio_free(unsigned gpio); +extern int pinctrl_request_gpio(unsigned gpio); +extern void pinctrl_free_gpio(unsigned gpio); extern int pinctrl_gpio_direction_input(unsigned gpio); extern int pinctrl_gpio_direction_output(unsigned gpio); -extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config); extern struct pinctrl * __must_check pinctrl_get(struct device *dev); extern void pinctrl_put(struct pinctrl *p); @@ -40,7 +39,6 @@ extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s); extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev); extern void devm_pinctrl_put(struct pinctrl *p); -extern int pinctrl_select_default_state(struct device *dev); #ifdef CONFIG_PM extern int pinctrl_pm_select_default_state(struct device *dev); @@ -63,17 +61,12 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev) #else /* !CONFIG_PINCTRL */ -static inline bool pinctrl_gpio_can_use_line(unsigned gpio) -{ - return true; -} - -static inline int pinctrl_gpio_request(unsigned gpio) +static inline int pinctrl_request_gpio(unsigned gpio) { return 0; } -static inline void pinctrl_gpio_free(unsigned gpio) +static inline void pinctrl_free_gpio(unsigned gpio) { } @@ -87,11 +80,6 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio) return 0; } -static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config) -{ - return 0; -} - static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) { return NULL; @@ -123,11 +111,6 @@ static inline void devm_pinctrl_put(struct pinctrl *p) { } -static inline int pinctrl_select_default_state(struct device *dev) -{ - return 0; -} - static inline int pinctrl_pm_select_default_state(struct device *dev) { return 0; diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h index a48ff69acd..05082e407c 100644 --- a/include/linux/pinctrl/devinfo.h +++ b/include/linux/pinctrl/devinfo.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Per-device information from the pin control system. * This is the stuff that get included into the device @@ -9,6 +8,8 @@ * This interface is used in the core to keep track of pins. * * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 */ #ifndef PINCTRL_DEVINFO_H @@ -42,8 +43,6 @@ extern int pinctrl_init_done(struct device *dev); #else -struct device; - /* Stubs if we're not using pinctrl */ static inline int pinctrl_bind_pins(struct device *dev) diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h index e987dc9fd2..e5b1716f98 100644 --- a/include/linux/pinctrl/machine.h +++ b/include/linux/pinctrl/machine.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Machine interface for the pinctrl subsystem. * @@ -7,6 +6,8 @@ * Based on bits of regulator core, gpio core and clk core * * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 */ #ifndef __LINUX_PINCTRL_MACHINE_H #define __LINUX_PINCTRL_MACHINE_H @@ -151,22 +152,17 @@ struct pinctrl_map { #ifdef CONFIG_PINCTRL -extern int pinctrl_register_mappings(const struct pinctrl_map *map, +extern int pinctrl_register_mappings(struct pinctrl_map const *map, unsigned num_maps); -extern void pinctrl_unregister_mappings(const struct pinctrl_map *map); extern void pinctrl_provide_dummies(void); #else -static inline int pinctrl_register_mappings(const struct pinctrl_map *map, +static inline int pinctrl_register_mappings(struct pinctrl_map const *map, unsigned num_maps) { return 0; } -static inline void pinctrl_unregister_mappings(const struct pinctrl_map *map) -{ -} - static inline void pinctrl_provide_dummies(void) { } diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index eee0e39485..12343caa11 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Interface the generic pinconfig portions of the pinctrl subsystem * @@ -7,15 +6,17 @@ * This interface is used in the core to keep track of pins. * * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 */ #ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H #define __LINUX_PINCTRL_PINCONF_GENERIC_H -#include -#include - -struct pinctrl_dev; -struct pinctrl_map; +/* + * You shouldn't even be able to compile with these enums etc unless you're + * using generic pin config. That is why this is defined out. + */ +#ifdef CONFIG_GENERIC_PINCONF /** * enum pin_config_param - possible pin configuration parameters @@ -60,8 +61,6 @@ struct pinctrl_map; * push-pull mode, the argument is ignored. * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current * passed as argument. The argument is in mA. - * @PIN_CONFIG_DRIVE_STRENGTH_UA: the pin will sink or source at most the current - * passed as argument. The argument is in uA. * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, * which means it will wait for signals to settle when reading inputs. The * argument gives the debounce time in usecs. Setting the @@ -76,38 +75,23 @@ struct pinctrl_map; * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, * schmitt-trigger mode is disabled. - * @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power + * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power * operation, if several modes of operation are supported these can be * passed in the argument on a custom form, else just use argument 1 * to indicate low power mode, argument 0 turns low power mode off. - * @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM - * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a - * value on the line. Use argument 1 to indicate high level, argument 0 to - * indicate low level. (Please see Documentation/driver-api/pin-control.rst, - * section "GPIO mode pitfalls" for a discussion around this parameter.) - * @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode - * without driving a value there. For most platforms this reduces to - * enable the output buffers and then let the pin controller current - * configuration (eg. the currently selected mux function) drive values on - * the line. Use argument 1 to enable output mode, argument 0 to disable - * it. - * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset + * @PIN_CONFIG_OUTPUT: this will configure the pin as an output. Use argument + * 1 to indicate high level, argument 0 to indicate low level. (Please + * see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a + * discussion around this parameter.) * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power * supplies, the argument to this parameter (on a custom format) tells * the driver which alternative power source to use. - * @PIN_CONFIG_SKEW_DELAY: if the pin has programmable skew rate (on inputs) - * or latch delay (on outputs) this parameter (in a custom format) - * specifies the clock skew or latch delay. It typically controls how - * many double inverters are put in front of the line. - * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state. * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to * this parameter (on a custom format) tells the driver which alternative * slew rate to use. * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if * you need to pass in custom configurations to the pin controller, use * PIN_CONFIG_END+1 as the base offset. - * @PIN_CONFIG_MAX: this is the maximum configuration value that can be - * presented using the packed format. */ enum pin_config_param { PIN_CONFIG_BIAS_BUS_HOLD, @@ -120,55 +104,20 @@ enum pin_config_param { PIN_CONFIG_DRIVE_OPEN_SOURCE, PIN_CONFIG_DRIVE_PUSH_PULL, PIN_CONFIG_DRIVE_STRENGTH, - PIN_CONFIG_DRIVE_STRENGTH_UA, PIN_CONFIG_INPUT_DEBOUNCE, PIN_CONFIG_INPUT_ENABLE, PIN_CONFIG_INPUT_SCHMITT, PIN_CONFIG_INPUT_SCHMITT_ENABLE, - PIN_CONFIG_MODE_LOW_POWER, - PIN_CONFIG_MODE_PWM, + PIN_CONFIG_LOW_POWER_MODE, PIN_CONFIG_OUTPUT, - PIN_CONFIG_OUTPUT_ENABLE, - PIN_CONFIG_PERSIST_STATE, PIN_CONFIG_POWER_SOURCE, - PIN_CONFIG_SKEW_DELAY, - PIN_CONFIG_SLEEP_HARDWARE_STATE, PIN_CONFIG_SLEW_RATE, - PIN_CONFIG_END = 0x7F, - PIN_CONFIG_MAX = 0xFF, + PIN_CONFIG_END = 0x7FFF, }; -/* - * Helpful configuration macro to be used in tables etc. - */ -#define PIN_CONF_PACKED(p, a) ((a << 8) | ((unsigned long) p & 0xffUL)) - -/* - * The following inlines stuffs a configuration parameter and data value - * into and out of an unsigned long argument, as used by the generic pin config - * system. We put the parameter in the lower 8 bits and the argument in the - * upper 24 bits. - */ - -static inline enum pin_config_param pinconf_to_config_param(unsigned long config) -{ - return (enum pin_config_param) (config & 0xffUL); -} - -static inline u32 pinconf_to_config_argument(unsigned long config) -{ - return (u32) ((config >> 8) & 0xffffffUL); -} - -static inline unsigned long pinconf_to_config_packed(enum pin_config_param param, - u32 argument) -{ - return PIN_CONF_PACKED(param, argument); -} - -#define PCONFDUMP(a, b, c, d) { \ - .param = a, .display = b, .format = c, .has_arg = d \ - } +#ifdef CONFIG_DEBUG_FS +#define PCONFDUMP(a, b, c, d) { .param = a, .display = b, .format = c, \ + .has_arg = d } struct pin_config_item { const enum pin_config_param param; @@ -176,6 +125,42 @@ struct pin_config_item { const char * const format; bool has_arg; }; +#endif /* CONFIG_DEBUG_FS */ + +/* + * Helpful configuration macro to be used in tables etc. + */ +#define PIN_CONF_PACKED(p, a) ((a << 16) | ((unsigned long) p & 0xffffUL)) + +/* + * The following inlines stuffs a configuration parameter and data value + * into and out of an unsigned long argument, as used by the generic pin config + * system. We put the parameter in the lower 16 bits and the argument in the + * upper 16 bits. + */ + +static inline enum pin_config_param pinconf_to_config_param(unsigned long config) +{ + return (enum pin_config_param) (config & 0xffffUL); +} + +static inline u16 pinconf_to_config_argument(unsigned long config) +{ + return (enum pin_config_param) ((config >> 16) & 0xffffUL); +} + +static inline unsigned long pinconf_to_config_packed(enum pin_config_param param, + u16 argument) +{ + return PIN_CONF_PACKED(param, argument); +} + +#ifdef CONFIG_OF + +#include +#include +struct pinctrl_dev; +struct pinctrl_map; struct pinconf_generic_params { const char * const property; @@ -220,5 +205,8 @@ static inline int pinconf_generic_dt_node_to_map_all( return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, PIN_MAP_TYPE_INVALID); } +#endif + +#endif /* CONFIG_GENERIC_PINCONF */ #endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */ diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h index f8a8215e90..09eb80f257 100644 --- a/include/linux/pinctrl/pinconf.h +++ b/include/linux/pinctrl/pinconf.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Interface the pinconfig portions of the pinctrl subsystem * @@ -7,11 +6,15 @@ * This interface is used in the core to keep track of pins. * * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 */ #ifndef __LINUX_PINCTRL_PINCONF_H #define __LINUX_PINCTRL_PINCONF_H -#include +#ifdef CONFIG_PINCONF + +#include struct pinctrl_dev; struct seq_file; @@ -25,9 +28,9 @@ struct seq_file; * is not available on this controller this should return -ENOTSUPP * and if it is available but disabled it should return -EINVAL * @pin_config_set: configure an individual pin - * @pin_config_group_get: get configurations for an entire pin group; should - * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get. + * @pin_config_group_get: get configurations for an entire pin group * @pin_config_group_set: configure all pins in a group + * @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration * @pin_config_dbg_show: optional debugfs display hook that will provide * per-device info for a certain pin in debugfs * @pin_config_group_dbg_show: optional debugfs display hook that will provide @@ -53,6 +56,9 @@ struct pinconf_ops { unsigned selector, unsigned long *configs, unsigned num_configs); + int (*pin_config_dbg_parse_modify) (struct pinctrl_dev *pctldev, + const char *arg, + unsigned long *config); void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset); @@ -64,4 +70,6 @@ struct pinconf_ops { unsigned long config); }; +#endif + #endif /* __LINUX_PINCTRL_PINCONF_H */ diff --git a/include/linux/pinctrl/pinctrl-state.h b/include/linux/pinctrl/pinctrl-state.h index 635d97e928..2307351933 100644 --- a/include/linux/pinctrl/pinctrl-state.h +++ b/include/linux/pinctrl/pinctrl-state.h @@ -1,11 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Standard pin control state definitions */ -#ifndef __LINUX_PINCTRL_PINCTRL_STATE_H -#define __LINUX_PINCTRL_PINCTRL_STATE_H - /** * @PINCTRL_STATE_DEFAULT: the state the pinctrl handle shall be put * into as default, usually this means the pins are up and ready to @@ -34,5 +30,3 @@ #define PINCTRL_STATE_INIT "init" #define PINCTRL_STATE_IDLE "idle" #define PINCTRL_STATE_SLEEP "sleep" - -#endif /* __LINUX_PINCTRL_PINCTRL_STATE_H */ diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 70b45d28e7..a42e57da27 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Interface the pinctrl subsystem * @@ -7,15 +6,18 @@ * This interface is used in the core to keep track of pins. * * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 */ #ifndef __LINUX_PINCTRL_PINCTRL_H #define __LINUX_PINCTRL_PINCTRL_H +#ifdef CONFIG_PINCTRL + #include #include #include #include -#include struct device; struct pinctrl_dev; @@ -51,8 +53,8 @@ struct pinctrl_pin_desc { * @id: an ID number for the chip in this range * @base: base offset of the GPIO range * @pin_base: base pin number of the GPIO range if pins == NULL - * @npins: number of pins in the GPIO range, including the base number * @pins: enumeration of pins in GPIO range or NULL + * @npins: number of pins in the GPIO range, including the base number * @gc: an optional pointer to a gpio_chip */ struct pinctrl_gpio_range { @@ -61,8 +63,8 @@ struct pinctrl_gpio_range { unsigned int id; unsigned int base; unsigned int pin_base; - unsigned int npins; unsigned const *pins; + unsigned int npins; struct gpio_chip *gc; }; @@ -122,10 +124,6 @@ struct pinctrl_ops { * the hardware description * @custom_conf_items: Information how to print @params in debugfs, must be * the same size as the @custom_params, i.e. @num_custom_params - * @link_consumers: If true create a device link between pinctrl and its - * consumers (i.e. the devices requesting pin control states). This is - * sometimes necessary to ascertain the right suspend/resume order for - * example. */ struct pinctrl_desc { const char *name; @@ -140,35 +138,19 @@ struct pinctrl_desc { const struct pinconf_generic_params *custom_params; const struct pin_config_item *custom_conf_items; #endif - bool link_consumers; }; /* External interface to pin controller */ - -extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, - struct device *dev, void *driver_data, - struct pinctrl_dev **pctldev); -extern int pinctrl_enable(struct pinctrl_dev *pctldev); - -/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */ extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data); - extern void pinctrl_unregister(struct pinctrl_dev *pctldev); - -extern int devm_pinctrl_register_and_init(struct device *dev, - struct pinctrl_desc *pctldesc, - void *driver_data, - struct pinctrl_dev **pctldev); - -/* Please use devm_pinctrl_register_and_init() instead */ extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev, struct pinctrl_desc *pctldesc, void *driver_data); - extern void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev); +extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin); extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range); extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev, @@ -186,7 +168,7 @@ extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev, const char *pin_group, const unsigned **pins, unsigned *num_pins); -#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL) +#ifdef CONFIG_OF extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np); #else static inline @@ -199,5 +181,16 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np) extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev); extern const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev); extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev); +#else + +struct pinctrl_dev; + +/* Sufficiently stupid default functions when pinctrl is not in use */ +static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin) +{ + return pin >= 0; +} + +#endif /* !CONFIG_PINCTRL */ #endif /* __LINUX_PINCTRL_PINCTRL_H */ diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h index 9a647fa5c8..ace60d775b 100644 --- a/include/linux/pinctrl/pinmux.h +++ b/include/linux/pinctrl/pinmux.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Interface the pinmux subsystem * @@ -7,6 +6,8 @@ * Based on bits of regulator core, gpio core and clk core * * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 */ #ifndef __LINUX_PINCTRL_PINMUX_H #define __LINUX_PINCTRL_PINMUX_H @@ -15,6 +16,8 @@ #include #include +#ifdef CONFIG_PINMUX + struct pinctrl_dev; /** @@ -82,4 +85,6 @@ struct pinmux_ops { bool strict; }; +#endif /* CONFIG_PINMUX */ + #endif /* __LINUX_PINCTRL_PINMUX_H */ diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index fc5642431b..43ebf073f1 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PIPE_FS_I_H #define _LINUX_PIPE_FS_I_H @@ -8,11 +7,6 @@ #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ #define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ -#define PIPE_BUF_FLAG_CAN_MERGE 0x10 /* can merge buffers */ -#define PIPE_BUF_FLAG_WHOLE 0x20 /* read() must return entire buffer or error */ -#ifdef CONFIG_WATCH_QUEUE -#define PIPE_BUF_FLAG_LOSS 0x40 /* Message loss happened after this buffer */ -#endif /** * struct pipe_buffer - a linux kernel pipe buffer @@ -34,71 +28,63 @@ struct pipe_buffer { /** * struct pipe_inode_info - a linux kernel pipe * @mutex: mutex protecting the whole thing - * @rd_wait: reader wait point in case of empty pipe - * @wr_wait: writer wait point in case of full pipe - * @head: The point of buffer production - * @tail: The point of buffer consumption - * @note_loss: The next read() should insert a data-lost message - * @max_usage: The maximum number of slots that may be used in the ring - * @ring_size: total number of buffers (should be a power of 2) - * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs + * @wait: reader/writer wait point in case of empty/full pipe + * @nrbufs: the number of non-empty pipe buffers in this pipe + * @buffers: total number of buffers (should be a power of 2) + * @curbuf: the current pipe buffer entry * @tmp_page: cached released page * @readers: number of current readers of this pipe * @writers: number of current writers of this pipe * @files: number of struct file referring this pipe (protected by ->i_lock) + * @waiting_writers: number of writers blocked waiting for room * @r_counter: reader counter * @w_counter: writer counter - * @poll_usage: is this pipe used for epoll, which has crazy wakeups? * @fasync_readers: reader side fasync * @fasync_writers: writer side fasync * @bufs: the circular array of pipe buffers * @user: the user who created this pipe - * @watch_queue: If this pipe is a watch_queue, this is the stuff for that **/ struct pipe_inode_info { struct mutex mutex; - wait_queue_head_t rd_wait, wr_wait; - unsigned int head; - unsigned int tail; - unsigned int max_usage; - unsigned int ring_size; -#ifdef CONFIG_WATCH_QUEUE - bool note_loss; -#endif - unsigned int nr_accounted; - unsigned int readers; - unsigned int writers; - unsigned int files; + wait_queue_head_t wait; + unsigned int nrbufs, curbuf, buffers; + atomic_t readers; + atomic_t writers; + atomic_t files; + atomic_t waiting_writers; unsigned int r_counter; unsigned int w_counter; - unsigned int poll_usage; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; -#ifdef CONFIG_WATCH_QUEUE - struct watch_queue *watch_queue; -#endif }; /* * Note on the nesting of these functions: * * ->confirm() - * ->try_steal() + * ->steal() * - * That is, ->try_steal() must be called on a confirmed buffer. See below for - * the meaning of each operation. Also see the kerneldoc in fs/pipe.c for the - * pipe and generic variants of these hooks. + * That is, ->steal() must be called on a confirmed buffer. + * See below for the meaning of each operation. Also see kerneldoc + * in fs/pipe.c for the pipe and generic variants of these hooks. */ struct pipe_buf_operations { + /* + * This is set to 1, if the generic pipe read/write may coalesce + * data into an existing buffer. If this is set to 0, a new pipe + * page segment is always used for new data. + */ + int can_merge; + /* * ->confirm() verifies that the data in the pipe buffer is there * and that the contents are good. If the pages in the pipe belong * to a file system, we may need to wait for IO completion in this * hook. Returns 0 for good, or a negative error value in case of - * error. If not present all pages are considered good. + * error. */ int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); @@ -110,83 +96,29 @@ struct pipe_buf_operations { /* * Attempt to take ownership of the pipe buffer and its contents. - * ->try_steal() returns %true for success, in which case the contents - * of the pipe (the buf->page) is locked and now completely owned by the - * caller. The page may then be transferred to a different mapping, the - * most often used case is insertion into different file address space - * cache. + * ->steal() returns 0 for success, in which case the contents + * of the pipe (the buf->page) is locked and now completely owned + * by the caller. The page may then be transferred to a different + * mapping, the most often used case is insertion into different + * file address space cache. */ - bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); + int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); /* * Get a reference to the pipe buffer. */ - bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); + void (*get)(struct pipe_inode_info *, struct pipe_buffer *); }; -/** - * pipe_empty - Return true if the pipe is empty - * @head: The pipe ring head pointer - * @tail: The pipe ring tail pointer - */ -static inline bool pipe_empty(unsigned int head, unsigned int tail) -{ - return head == tail; -} - -/** - * pipe_occupancy - Return number of slots used in the pipe - * @head: The pipe ring head pointer - * @tail: The pipe ring tail pointer - */ -static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail) -{ - return head - tail; -} - -/** - * pipe_full - Return true if the pipe is full - * @head: The pipe ring head pointer - * @tail: The pipe ring tail pointer - * @limit: The maximum amount of slots available. - */ -static inline bool pipe_full(unsigned int head, unsigned int tail, - unsigned int limit) -{ - return pipe_occupancy(head, tail) >= limit; -} - -/** - * pipe_space_for_user - Return number of slots available to userspace - * @head: The pipe ring head pointer - * @tail: The pipe ring tail pointer - * @pipe: The pipe info structure - */ -static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail, - struct pipe_inode_info *pipe) -{ - unsigned int p_occupancy, p_space; - - p_occupancy = pipe_occupancy(head, tail); - if (p_occupancy >= pipe->max_usage) - return 0; - p_space = pipe->ring_size - p_occupancy; - if (p_space > pipe->max_usage) - p_space = pipe->max_usage; - return p_space; -} - /** * pipe_buf_get - get a reference to a pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to get a reference to - * - * Return: %true if the reference was successfully obtained. */ -static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe, +static inline void pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { - return buf->ops->get(pipe, buf); + buf->ops->get(pipe, buf); } /** @@ -211,22 +143,18 @@ static inline void pipe_buf_release(struct pipe_inode_info *pipe, static inline int pipe_buf_confirm(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { - if (!buf->ops->confirm) - return 0; return buf->ops->confirm(pipe, buf); } /** - * pipe_buf_try_steal - attempt to take ownership of a pipe_buffer + * pipe_buf_steal - attempt to take ownership of a pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to attempt to steal */ -static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe, - struct pipe_buffer *buf) +static inline int pipe_buf_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) { - if (!buf->ops->try_steal) - return false; - return buf->ops->try_steal(pipe, buf); + return buf->ops->steal(pipe, buf); } /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual @@ -238,40 +166,29 @@ void pipe_lock(struct pipe_inode_info *); void pipe_unlock(struct pipe_inode_info *); void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); -extern unsigned int pipe_max_size; +extern unsigned int pipe_max_size, pipe_min_size; extern unsigned long pipe_user_pages_hard; extern unsigned long pipe_user_pages_soft; +int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *); -/* Wait for a pipe to be readable/writable while dropping the pipe lock */ -void pipe_wait_readable(struct pipe_inode_info *); -void pipe_wait_writable(struct pipe_inode_info *); +/* Drop the inode semaphore and wait for a pipe event, atomically */ +void pipe_wait(struct pipe_inode_info *pipe); struct pipe_inode_info *alloc_pipe_info(void); void free_pipe_info(struct pipe_inode_info *); /* Generic pipe buffer ops functions */ -bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); -bool generic_pipe_buf_try_steal(struct pipe_inode_info *, struct pipe_buffer *); +void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); +int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); +int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); extern const struct pipe_buf_operations nosteal_pipe_buf_ops; -#ifdef CONFIG_WATCH_QUEUE -unsigned long account_pipe_buffers(struct user_struct *user, - unsigned long old, unsigned long new); -bool too_many_pipe_buffers_soft(unsigned long user_bufs); -bool too_many_pipe_buffers_hard(unsigned long user_bufs); -bool pipe_is_unprivileged_user(void); -#endif - /* for F_SETPIPE_SZ and F_GETPIPE_SZ */ -#ifdef CONFIG_WATCH_QUEUE -int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots); -#endif long pipe_fcntl(struct file *, unsigned int, unsigned long arg); -struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice); +struct pipe_inode_info *get_pipe_info(struct file *file); int create_pipe_files(struct file **, int); -unsigned int round_pipe_size(unsigned long size); #endif diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h index 86be8bf27b..a1bacf1150 100644 --- a/include/linux/pkeys.h +++ b/include/linux/pkeys.h @@ -1,10 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PKEYS_H #define _LINUX_PKEYS_H -#include - -#define ARCH_DEFAULT_PKEY 0 +#include +#include #ifdef CONFIG_ARCH_HAS_PKEYS #include @@ -15,11 +13,6 @@ #define PKEY_DEDICATED_EXECUTE_ONLY 0 #define ARCH_VM_PKEY_FLAGS 0 -static inline int vma_pkey(struct vm_area_struct *vma) -{ - return 0; -} - static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) { return (pkey == 0); @@ -41,9 +34,8 @@ static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, return 0; } -static inline bool arch_pkeys_enabled(void) +static inline void copy_init_pkru_to_fpregs(void) { - return false; } #endif /* ! CONFIG_ARCH_HAS_PKEYS */ diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h index 174601554b..93d142ad15 100644 --- a/include/linux/pktcdvd.h +++ b/include/linux/pktcdvd.h @@ -186,7 +186,7 @@ struct pktcdvd_device sector_t current_sector; /* Keep track of where the elevator is */ atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */ /* needs to be run. */ - mempool_t rb_pool; /* mempool for pkt_rb_node allocations */ + mempool_t *rb_pool; /* mempool for pkt_rb_node allocations */ struct packet_iosched iosched; struct gendisk *disk; diff --git a/include/linux/pl320-ipc.h b/include/linux/pl320-ipc.h index 4b29e172ee..5161f63ec1 100644 --- a/include/linux/pl320-ipc.h +++ b/include/linux/pl320-ipc.h @@ -1,5 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . */ int pl320_ipc_transmit(u32 *data); diff --git a/include/linux/platform_data/ad5449.h b/include/linux/platform_data/ad5449.h index d687ef5726..bd712bd4b9 100644 --- a/include/linux/platform_data/ad5449.h +++ b/include/linux/platform_data/ad5449.h @@ -1,10 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog * Converter driver. * * Copyright 2012 Analog Devices Inc. * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. */ #ifndef __LINUX_PLATFORM_DATA_AD5449_H__ diff --git a/include/linux/platform_data/ad5755.h b/include/linux/platform_data/ad5755.h index e371e08f04..a5a1cb7518 100644 --- a/include/linux/platform_data/ad5755.h +++ b/include/linux/platform_data/ad5755.h @@ -1,6 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2012 Analog Devices Inc. + * + * Licensed under the GPL-2. */ #ifndef __LINUX_PLATFORM_DATA_AD5755_H__ #define __LINUX_PLATFORM_DATA_AD5755_H__ diff --git a/include/linux/platform_data/ad5761.h b/include/linux/platform_data/ad5761.h index 69e261e2ca..7bd8ed7d97 100644 --- a/include/linux/platform_data/ad5761.h +++ b/include/linux/platform_data/ad5761.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter * * Copyright 2016 Qtechnology A/S - * 2016 Ricardo Ribalda + * 2016 Ricardo Ribalda + * + * Licensed under the GPL-2. */ #ifndef __LINUX_PLATFORM_DATA_AD5761_H__ #define __LINUX_PLATFORM_DATA_AD5761_H__ diff --git a/include/linux/platform_data/ad7266.h b/include/linux/platform_data/ad7266.h index f0652567af..eabfdcb269 100644 --- a/include/linux/platform_data/ad7266.h +++ b/include/linux/platform_data/ad7266.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AD7266/65 SPI ADC driver * * Copyright 2012 Analog Devices Inc. + * + * Licensed under the GPL-2. */ #ifndef __IIO_ADC_AD7266_H__ @@ -40,11 +41,14 @@ enum ad7266_mode { * @range: Reference voltage range the device is configured for * @mode: Sample mode the device is configured for * @fixed_addr: Whether the address pins are hard-wired + * @addr_gpios: GPIOs used for controlling the address pins, only used if + * fixed_addr is set to false. */ struct ad7266_platform_data { enum ad7266_range range; enum ad7266_mode mode; bool fixed_addr; + unsigned int addr_gpios[3]; }; #endif diff --git a/include/linux/platform_data/ad7291.h b/include/linux/platform_data/ad7291.h new file mode 100644 index 0000000000..bbd89fa511 --- /dev/null +++ b/include/linux/platform_data/ad7291.h @@ -0,0 +1,12 @@ +#ifndef __IIO_AD7291_H__ +#define __IIO_AD7291_H__ + +/** + * struct ad7291_platform_data - AD7291 platform data + * @use_external_ref: Whether to use an external or internal reference voltage + */ +struct ad7291_platform_data { + bool use_external_ref; +}; + +#endif diff --git a/include/linux/platform_data/ad7298.h b/include/linux/platform_data/ad7298.h index 3e0ffe2d5d..fbf8adf136 100644 --- a/include/linux/platform_data/ad7298.h +++ b/include/linux/platform_data/ad7298.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AD7298 SPI ADC driver * * Copyright 2011 Analog Devices Inc. + * + * Licensed under the GPL-2. */ #ifndef __LINUX_PLATFORM_DATA_AD7298_H__ diff --git a/include/linux/platform_data/ad7303.h b/include/linux/platform_data/ad7303.h index c2bd0a13be..de6a7a6b4b 100644 --- a/include/linux/platform_data/ad7303.h +++ b/include/linux/platform_data/ad7303.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Analog Devices AD7303 DAC driver * * Copyright 2013 Analog Devices Inc. + * + * Licensed under the GPL-2. */ #ifndef __IIO_ADC_AD7303_H__ diff --git a/include/linux/platform_data/ad7791.h b/include/linux/platform_data/ad7791.h index cc7533049b..f9e4db1b82 100644 --- a/include/linux/platform_data/ad7791.h +++ b/include/linux/platform_data/ad7791.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PLATFORM_DATA_AD7791__ #define __LINUX_PLATFORM_DATA_AD7791__ diff --git a/include/linux/platform_data/ad7793.h b/include/linux/platform_data/ad7793.h index 7c697e58f0..7ea6751aae 100644 --- a/include/linux/platform_data/ad7793.h +++ b/include/linux/platform_data/ad7793.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AD7792/AD7793 SPI ADC driver * * Copyright 2011 Analog Devices Inc. + * + * Licensed under the GPL-2. */ #ifndef __LINUX_PLATFORM_DATA_AD7793_H__ #define __LINUX_PLATFORM_DATA_AD7793_H__ @@ -40,7 +41,7 @@ enum ad7793_bias_voltage { * enum ad7793_refsel - AD7793 reference voltage selection * @AD7793_REFSEL_REFIN1: External reference applied between REFIN1(+) * and REFIN1(-). - * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+) + * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+) and * and REFIN1(-). Only valid for AD7795/AD7796. * @AD7793_REFSEL_INTERNAL: Internal 1.17 V reference. */ diff --git a/include/linux/platform_data/ad7879.h b/include/linux/platform_data/ad7879.h new file mode 100644 index 0000000000..69e2e1fd2b --- /dev/null +++ b/include/linux/platform_data/ad7879.h @@ -0,0 +1,41 @@ +/* linux/platform_data/ad7879.h */ + +/* Touchscreen characteristics vary between boards and models. The + * platform_data for the device's "struct device" holds this information. + * + * It's OK if the min/max values are zero. + */ +struct ad7879_platform_data { + u16 model; /* 7879 */ + u16 x_plate_ohms; + u16 x_min, x_max; + u16 y_min, y_max; + u16 pressure_min, pressure_max; + + bool swap_xy; /* swap x and y axes */ + + /* [0..255] 0=OFF Starts at 1=550us and goes + * all the way to 9.440ms in steps of 35us. + */ + u8 pen_down_acc_interval; + /* [0..15] Starts at 0=128us and goes all the + * way to 4.096ms in steps of 128us. + */ + u8 first_conversion_delay; + /* [0..3] 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */ + u8 acquisition_time; + /* [0..3] Average X middle samples 0 = 2, 1 = 4, 2 = 8, 3 = 16 */ + u8 averaging; + /* [0..3] Perform X measurements 0 = OFF, + * 1 = 4, 2 = 8, 3 = 16 (median > averaging) + */ + u8 median; + /* 1 = AUX/VBAT/GPIO export GPIO to gpiolib + * requires CONFIG_GPIOLIB + */ + bool gpio_export; + /* identifies the first GPIO number handled by this chip; + * or, if negative, requests dynamic ID allocation. + */ + s32 gpio_base; +}; diff --git a/include/linux/platform_data/ad7887.h b/include/linux/platform_data/ad7887.h index 9b4dca6ae7..1e06eac317 100644 --- a/include/linux/platform_data/ad7887.h +++ b/include/linux/platform_data/ad7887.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * AD7887 SPI ADC driver * * Copyright 2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. */ #ifndef IIO_ADC_AD7887_H_ #define IIO_ADC_AD7887_H_ @@ -13,9 +14,13 @@ * second input channel, and Vref is internally connected to Vdd. If set to * false the device is used in single channel mode and AIN1/Vref is used as * VREF input. + * @use_onchip_ref: Whether to use the onchip reference. If set to true the + * internal 2.5V reference is used. If set to false a external reference is + * used. */ struct ad7887_platform_data { bool en_dual; + bool use_onchip_ref; }; #endif /* IIO_ADC_AD7887_H_ */ diff --git a/include/linux/platform_data/adau17x1.h b/include/linux/platform_data/adau17x1.h index 27a39cc6fa..9db1b905df 100644 --- a/include/linux/platform_data/adau17x1.h +++ b/include/linux/platform_data/adau17x1.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Driver for ADAU1361/ADAU1461/ADAU1761/ADAU1961/ADAU1381/ADAU1781 codecs * * Copyright 2011-2014 Analog Devices Inc. * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. */ #ifndef __LINUX_PLATFORM_DATA_ADAU17X1_H__ diff --git a/include/linux/platform_data/adau1977.h b/include/linux/platform_data/adau1977.h index 8666723507..bed11d908f 100644 --- a/include/linux/platform_data/adau1977.h +++ b/include/linux/platform_data/adau1977.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * ADAU1977/ADAU1978/ADAU1979 driver * * Copyright 2014 Analog Devices Inc. * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. */ #ifndef __LINUX_PLATFORM_DATA_ADAU1977_H__ diff --git a/include/linux/platform_data/ads7828.h b/include/linux/platform_data/ads7828.h index 0fa4186c61..3245f45f9d 100644 --- a/include/linux/platform_data/ads7828.h +++ b/include/linux/platform_data/ads7828.h @@ -1,11 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * TI ADS7828 A/D Converter platform data definition * * Copyright (c) 2012 Savoir-faire Linux Inc. * Vivien Didelot * - * For further information, see the Documentation/hwmon/ads7828.rst file. + * For further information, see the Documentation/hwmon/ads7828 file. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _PDATA_ADS7828_H diff --git a/include/linux/platform_data/arm-ux500-pm.h b/include/linux/platform_data/arm-ux500-pm.h index 9f6f01cfdd..8dff64b29e 100644 --- a/include/linux/platform_data/arm-ux500-pm.h +++ b/include/linux/platform_data/arm-ux500-pm.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010-2013 * Author: Rickard Andersson for * ST-Ericsson. * Author: Daniel Lezcano for Linaro. + * License terms: GNU General Public License (GPL) version 2 + * */ #ifndef ARM_UX500_PM_H diff --git a/include/linux/platform_data/asoc-imx-ssi.h b/include/linux/platform_data/asoc-imx-ssi.h index 902851aeb0..92c7fd72f6 100644 --- a/include/linux/platform_data/asoc-imx-ssi.h +++ b/include/linux/platform_data/asoc-imx-ssi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MACH_SSI_H #define __MACH_SSI_H diff --git a/include/linux/platform_data/asoc-kirkwood.h b/include/linux/platform_data/asoc-kirkwood.h index d442cefa39..d6a55bd2e5 100644 --- a/include/linux/platform_data/asoc-kirkwood.h +++ b/include/linux/platform_data/asoc-kirkwood.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PLAT_AUDIO_H #define __PLAT_AUDIO_H diff --git a/include/linux/platform_data/asoc-mx27vis.h b/include/linux/platform_data/asoc-mx27vis.h index 2107d0d992..409adcd04d 100644 --- a/include/linux/platform_data/asoc-mx27vis.h +++ b/include/linux/platform_data/asoc-mx27vis.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PLATFORM_DATA_ASOC_MX27VIS_H #define __PLATFORM_DATA_ASOC_MX27VIS_H diff --git a/include/linux/platform_data/asoc-palm27x.h b/include/linux/platform_data/asoc-palm27x.h index 22b69a393a..58afb30d52 100644 --- a/include/linux/platform_data/asoc-palm27x.h +++ b/include/linux/platform_data/asoc-palm27x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _INCLUDE_PALMASOC_H_ #define _INCLUDE_PALMASOC_H_ diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index f9c00f839e..15bf56ee8a 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2009 Samsung Electronics Co. Ltd * Author: Jaswinder Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ /* The machine init code calls s3c*_ac97_setup_gpio with @@ -15,7 +18,7 @@ extern void s3c64xx_ac97_setup_gpio(int); -struct samsung_i2s_type { +struct samsung_i2s { /* If the Primary DAI has 5.1 Channels */ #define QUIRK_PRI_6CHAN (1 << 0) /* If the I2S block has a Stereo Overlay Channel */ @@ -44,5 +47,7 @@ struct s3c_audio_pdata { void *dma_capture; void *dma_play_sec; void *dma_capture_mic; - struct samsung_i2s_type type; + union { + struct samsung_i2s i2s; + } type; }; diff --git a/include/linux/platform_data/asoc-s3c24xx_simtec.h b/include/linux/platform_data/asoc-s3c24xx_simtec.h index 1a7efc98d1..d220e54123 100644 --- a/include/linux/platform_data/asoc-s3c24xx_simtec.h +++ b/include/linux/platform_data/asoc-s3c24xx_simtec.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Simtec Audio support. */ diff --git a/include/linux/platform_data/asoc-ti-mcbsp.h b/include/linux/platform_data/asoc-ti-mcbsp.h index cc81977600..e684543254 100644 --- a/include/linux/platform_data/asoc-ti-mcbsp.h +++ b/include/linux/platform_data/asoc-ti-mcbsp.h @@ -1,9 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Defines for Multi-Channel Buffered Serial Port * * Copyright (C) 2002 RidgeRun, Inc. * Author: Steve Johnson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * */ #ifndef __ASOC_TI_MCBSP_H #define __ASOC_TI_MCBSP_H @@ -11,6 +25,10 @@ #include #include +#define MCBSP_CONFIG_TYPE2 0x2 +#define MCBSP_CONFIG_TYPE3 0x3 +#define MCBSP_CONFIG_TYPE4 0x4 + /* Platform specific configuration */ struct omap_mcbsp_ops { void (*request)(unsigned int); @@ -29,6 +47,14 @@ struct omap_mcbsp_platform_data { int (*force_ick_on)(struct clk *clk, bool force_on); }; +/** + * omap_mcbsp_dev_attr - OMAP McBSP device attributes for omap_hwmod + * @sidetone: name of the sidetone device + */ +struct omap_mcbsp_dev_attr { + const char *sidetone; +}; + void omap3_mcbsp_init_pdata_callback(struct omap_mcbsp_platform_data *pdata); #endif diff --git a/include/linux/platform_data/asoc-ux500-msp.h b/include/linux/platform_data/asoc-ux500-msp.h index b8d0f730dd..2f34bb98fe 100644 --- a/include/linux/platform_data/asoc-ux500-msp.h +++ b/include/linux/platform_data/asoc-ux500-msp.h @@ -1,8 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010 * * Author: Rabin Vincent for ST-Ericsson + * License terms: GNU General Public License (GPL), version 2. */ #ifndef __MSP_H diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h new file mode 100644 index 0000000000..271a4e25af --- /dev/null +++ b/include/linux/platform_data/at24.h @@ -0,0 +1,58 @@ +/* + * at24.h - platform_data for the at24 (generic eeprom) driver + * (C) Copyright 2008 by Pengutronix + * (C) Copyright 2012 by Wolfram Sang + * same license as the driver + */ + +#ifndef _LINUX_AT24_H +#define _LINUX_AT24_H + +#include +#include +#include + +/** + * struct at24_platform_data - data to set up at24 (generic eeprom) driver + * @byte_len: size of eeprom in byte + * @page_size: number of byte which can be written in one go + * @flags: tunable options, check AT24_FLAG_* defines + * @setup: an optional callback invoked after eeprom is probed; enables kernel + code to access eeprom via nvmem, see example + * @context: optional parameter passed to setup() + * + * If you set up a custom eeprom type, please double-check the parameters. + * Especially page_size needs extra care, as you risk data loss if your value + * is bigger than what the chip actually supports! + * + * An example in pseudo code for a setup() callback: + * + * void get_mac_addr(struct nvmem_device *nvmem, void *context) + * { + * u8 *mac_addr = ethernet_pdata->mac_addr; + * off_t offset = context; + * + * // Read MAC addr from EEPROM + * if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN) + * pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); + * } + * + * This function pointer and context can now be set up in at24_platform_data. + */ + +struct at24_platform_data { + u32 byte_len; /* size (sum of all addr) */ + u16 page_size; /* for writes */ + u8 flags; +#define AT24_FLAG_ADDR16 BIT(7) /* address pointer is 16 bit */ +#define AT24_FLAG_READONLY BIT(6) /* sysfs-entry will be read-only */ +#define AT24_FLAG_IRUGO BIT(5) /* sysfs-entry will be world-readable */ +#define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */ +#define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */ +#define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */ + + void (*setup)(struct nvmem_device *nvmem, void *context); + void *context; +}; + +#endif /* _LINUX_AT24_H */ diff --git a/include/linux/platform_data/at91_adc.h b/include/linux/platform_data/at91_adc.h index f20eaeb827..7819fc7877 100644 --- a/include/linux/platform_data/at91_adc.h +++ b/include/linux/platform_data/at91_adc.h @@ -1,6 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2011 Free Electrons + * + * Licensed under the GPLv2 or later. */ #ifndef _AT91_ADC_H_ diff --git a/include/linux/platform_data/ata-pxa.h b/include/linux/platform_data/ata-pxa.h index 0b65fd0aa5..6cf7df1d58 100644 --- a/include/linux/platform_data/ata-pxa.h +++ b/include/linux/platform_data/ata-pxa.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Generic PXA PATA driver * * Copyright (C) 2010 Marek Vasut + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __MACH_PATA_PXA_H__ diff --git a/include/linux/platform_data/ata-samsung_cf.h b/include/linux/platform_data/ata-samsung_cf.h index fccf969dc4..748e71642c 100644 --- a/include/linux/platform_data/ata-samsung_cf.h +++ b/include/linux/platform_data/ata-samsung_cf.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Samsung CF-ATA platform_device info + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __ATA_SAMSUNG_CF_H diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h index 73f63be509..3c8825b672 100644 --- a/include/linux/platform_data/atmel.h +++ b/include/linux/platform_data/atmel.h @@ -1,19 +1,57 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * atmel platform data + * + * GPL v2 Only */ #ifndef __ATMEL_H__ #define __ATMEL_H__ +#include +#include +#include + + /* Compact Flash */ +struct at91_cf_data { + int irq_pin; /* I/O IRQ */ + int det_pin; /* Card detect */ + int vcc_pin; /* power switching */ + int rst_pin; /* card reset */ + u8 chipselect; /* EBI Chip Select number */ + u8 flags; +#define AT91_CF_TRUE_IDE 0x01 +#define AT91_IDE_SWAP_A0_A2 0x02 +}; + + /* NAND / SmartMedia */ +struct atmel_nand_data { + int enable_pin; /* chip enable */ + int det_pin; /* card detect */ + int rdy_pin; /* ready/busy */ + u8 rdy_pin_active_low; /* rdy_pin value is inverted */ + u8 ale; /* address line number connected to ALE */ + u8 cle; /* address line number connected to CLE */ + u8 bus_width_16; /* buswidth is 16 bit */ + u8 ecc_mode; /* ecc mode */ + u8 on_flash_bbt; /* bbt on flash */ + struct mtd_partition *parts; + unsigned int num_parts; + bool has_dma; /* support dma transfer */ + + /* default is false, only for at32ap7000 chip is true */ + bool need_reset_workaround; +}; + + /* Serial */ +struct atmel_uart_data { + int num; /* port num */ + short use_dma_tx; /* use transmit DMA? */ + short use_dma_rx; /* use receive DMA? */ + void __iomem *regs; /* virt. base address, if any */ + struct serial_rs485 rs485; /* rs485 settings */ +}; + /* FIXME: this needs a better location, but gets stuff building again */ -#ifdef CONFIG_ATMEL_PM extern int at91_suspend_entering_slow_clock(void); -#else -static inline int at91_suspend_entering_slow_clock(void) -{ - return 0; -} -#endif #endif /* __ATMEL_H__ */ diff --git a/include/linux/platform_data/atmel_mxt_ts.h b/include/linux/platform_data/atmel_mxt_ts.h new file mode 100644 index 0000000000..695035a8d7 --- /dev/null +++ b/include/linux/platform_data/atmel_mxt_ts.h @@ -0,0 +1,31 @@ +/* + * Atmel maXTouch Touchscreen driver + * + * Copyright (C) 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H +#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H + +#include + +enum mxt_suspend_mode { + MXT_SUSPEND_DEEP_SLEEP = 0, + MXT_SUSPEND_T9_CTRL = 1, +}; + +/* The platform data for the Atmel maXTouch touchscreen driver */ +struct mxt_platform_data { + unsigned long irqflags; + u8 t19_num_keys; + const unsigned int *t19_keymap; + enum mxt_suspend_mode suspend_mode; +}; + +#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */ diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h index 6f6fed2b17..69d279c0da 100644 --- a/include/linux/platform_data/b53.h +++ b/include/linux/platform_data/b53.h @@ -19,13 +19,9 @@ #ifndef __B53_H #define __B53_H -#include -#include +#include struct b53_platform_data { - /* Must be first such that dsa_register_switch() can access it */ - struct dsa_chip_data cd; - u32 chip_id; u16 enabled_ports; diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h index d8f8738629..26af543219 100644 --- a/include/linux/platform_data/bcmgenet.h +++ b/include/linux/platform_data/bcmgenet.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__ #define __LINUX_PLATFORM_DATA_BCMGENET_H__ diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h index 54a06a4d26..671d6502d2 100644 --- a/include/linux/platform_data/bd6107.h +++ b/include/linux/platform_data/bd6107.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * bd6107.h - Rohm BD6107 LEDs Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __BD6107_H__ #define __BD6107_H__ @@ -9,6 +12,7 @@ struct device; struct bd6107_platform_data { struct device *fbdev; + int reset; /* Reset GPIO */ unsigned int def_value; }; diff --git a/include/linux/platform_data/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h new file mode 100644 index 0000000000..98829370fe --- /dev/null +++ b/include/linux/platform_data/bfin_rotary.h @@ -0,0 +1,117 @@ +/* + * board initialization should put one of these structures into platform_data + * and place the bfin-rotary onto platform_bus named "bfin-rotary". + * + * Copyright 2008-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _BFIN_ROTARY_H +#define _BFIN_ROTARY_H + +/* mode bitmasks */ +#define ROT_QUAD_ENC CNTMODE_QUADENC /* quadrature/grey code encoder mode */ +#define ROT_BIN_ENC CNTMODE_BINENC /* binary encoder mode */ +#define ROT_UD_CNT CNTMODE_UDCNT /* rotary counter mode */ +#define ROT_DIR_CNT CNTMODE_DIRCNT /* direction counter mode */ + +#define ROT_DEBE DEBE /* Debounce Enable */ + +#define ROT_CDGINV CDGINV /* CDG Pin Polarity Invert */ +#define ROT_CUDINV CUDINV /* CUD Pin Polarity Invert */ +#define ROT_CZMINV CZMINV /* CZM Pin Polarity Invert */ + +struct bfin_rotary_platform_data { + /* set rotary UP KEY_### or BTN_### in case you prefer + * bfin-rotary to send EV_KEY otherwise set 0 + */ + unsigned int rotary_up_key; + /* set rotary DOWN KEY_### or BTN_### in case you prefer + * bfin-rotary to send EV_KEY otherwise set 0 + */ + unsigned int rotary_down_key; + /* set rotary BUTTON KEY_### or BTN_### */ + unsigned int rotary_button_key; + /* set rotary Relative Axis REL_### in case you prefer + * bfin-rotary to send EV_REL otherwise set 0 + */ + unsigned int rotary_rel_code; + unsigned short debounce; /* 0..17 */ + unsigned short mode; + unsigned short pm_wakeup; + unsigned short *pin_list; +}; + +/* CNT_CONFIG bitmasks */ +#define CNTE (1 << 0) /* Counter Enable */ +#define DEBE (1 << 1) /* Debounce Enable */ +#define CDGINV (1 << 4) /* CDG Pin Polarity Invert */ +#define CUDINV (1 << 5) /* CUD Pin Polarity Invert */ +#define CZMINV (1 << 6) /* CZM Pin Polarity Invert */ +#define CNTMODE_SHIFT 8 +#define CNTMODE (0x7 << CNTMODE_SHIFT) /* Counter Operating Mode */ +#define ZMZC (1 << 1) /* CZM Zeroes Counter Enable */ +#define BNDMODE_SHIFT 12 +#define BNDMODE (0x3 << BNDMODE_SHIFT) /* Boundary register Mode */ +#define INPDIS (1 << 15) /* CUG and CDG Input Disable */ + +#define CNTMODE_QUADENC (0 << CNTMODE_SHIFT) /* quadrature encoder mode */ +#define CNTMODE_BINENC (1 << CNTMODE_SHIFT) /* binary encoder mode */ +#define CNTMODE_UDCNT (2 << CNTMODE_SHIFT) /* up/down counter mode */ +#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT) /* direction counter mode */ +#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT) /* direction timer mode */ + +#define BNDMODE_COMP (0 << BNDMODE_SHIFT) /* boundary compare mode */ +#define BNDMODE_ZERO (1 << BNDMODE_SHIFT) /* boundary compare and zero mode */ +#define BNDMODE_CAPT (2 << BNDMODE_SHIFT) /* boundary capture mode */ +#define BNDMODE_AEXT (3 << BNDMODE_SHIFT) /* boundary auto-extend mode */ + +/* CNT_IMASK bitmasks */ +#define ICIE (1 << 0) /* Illegal Gray/Binary Code Interrupt Enable */ +#define UCIE (1 << 1) /* Up count Interrupt Enable */ +#define DCIE (1 << 2) /* Down count Interrupt Enable */ +#define MINCIE (1 << 3) /* Min Count Interrupt Enable */ +#define MAXCIE (1 << 4) /* Max Count Interrupt Enable */ +#define COV31IE (1 << 5) /* Bit 31 Overflow Interrupt Enable */ +#define COV15IE (1 << 6) /* Bit 15 Overflow Interrupt Enable */ +#define CZEROIE (1 << 7) /* Count to Zero Interrupt Enable */ +#define CZMIE (1 << 8) /* CZM Pin Interrupt Enable */ +#define CZMEIE (1 << 9) /* CZM Error Interrupt Enable */ +#define CZMZIE (1 << 10) /* CZM Zeroes Counter Interrupt Enable */ + +/* CNT_STATUS bitmasks */ +#define ICII (1 << 0) /* Illegal Gray/Binary Code Interrupt Identifier */ +#define UCII (1 << 1) /* Up count Interrupt Identifier */ +#define DCII (1 << 2) /* Down count Interrupt Identifier */ +#define MINCII (1 << 3) /* Min Count Interrupt Identifier */ +#define MAXCII (1 << 4) /* Max Count Interrupt Identifier */ +#define COV31II (1 << 5) /* Bit 31 Overflow Interrupt Identifier */ +#define COV15II (1 << 6) /* Bit 15 Overflow Interrupt Identifier */ +#define CZEROII (1 << 7) /* Count to Zero Interrupt Identifier */ +#define CZMII (1 << 8) /* CZM Pin Interrupt Identifier */ +#define CZMEII (1 << 9) /* CZM Error Interrupt Identifier */ +#define CZMZII (1 << 10) /* CZM Zeroes Counter Interrupt Identifier */ + +/* CNT_COMMAND bitmasks */ +#define W1LCNT 0xf /* Load Counter Register */ +#define W1LMIN 0xf0 /* Load Min Register */ +#define W1LMAX 0xf00 /* Load Max Register */ +#define W1ZMONCE (1 << 12) /* Enable CZM Clear Counter Once */ + +#define W1LCNT_ZERO (1 << 0) /* write 1 to load CNT_COUNTER with zero */ +#define W1LCNT_MIN (1 << 2) /* write 1 to load CNT_COUNTER from CNT_MIN */ +#define W1LCNT_MAX (1 << 3) /* write 1 to load CNT_COUNTER from CNT_MAX */ + +#define W1LMIN_ZERO (1 << 4) /* write 1 to load CNT_MIN with zero */ +#define W1LMIN_CNT (1 << 5) /* write 1 to load CNT_MIN from CNT_COUNTER */ +#define W1LMIN_MAX (1 << 7) /* write 1 to load CNT_MIN from CNT_MAX */ + +#define W1LMAX_ZERO (1 << 8) /* write 1 to load CNT_MAX with zero */ +#define W1LMAX_CNT (1 << 9) /* write 1 to load CNT_MAX from CNT_COUNTER */ +#define W1LMAX_MIN (1 << 10) /* write 1 to load CNT_MAX from CNT_MIN */ + +/* CNT_DEBOUNCE bitmasks */ +#define DPRESCALE 0xf /* Load Counter Register */ + +#endif diff --git a/include/linux/platform_data/bt-nokia-h4p.h b/include/linux/platform_data/bt-nokia-h4p.h new file mode 100644 index 0000000000..30d169dfad --- /dev/null +++ b/include/linux/platform_data/bt-nokia-h4p.h @@ -0,0 +1,38 @@ +/* + * This file is part of Nokia H4P bluetooth driver + * + * Copyright (C) 2010 Nokia Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + + +/** + * struct hci_h4p_platform data - hci_h4p Platform data structure + */ +struct hci_h4p_platform_data { + int chip_type; + int bt_sysclk; + unsigned int bt_wakeup_gpio; + unsigned int host_wakeup_gpio; + unsigned int reset_gpio; + int reset_gpio_shared; + unsigned int uart_irq; + phys_addr_t uart_base; + const char *uart_iclk; + const char *uart_fclk; + void (*set_pm_limits)(struct device *dev, bool set); +}; diff --git a/include/linux/platform_data/clk-integrator.h b/include/linux/platform_data/clk-integrator.h new file mode 100644 index 0000000000..addd48cac6 --- /dev/null +++ b/include/linux/platform_data/clk-integrator.h @@ -0,0 +1,2 @@ +void integrator_impd1_clk_init(void __iomem *base, unsigned int id); +void integrator_impd1_clk_exit(unsigned int id); diff --git a/include/linux/platform_data/clk-lpss.h b/include/linux/platform_data/clk-lpss.h new file mode 100644 index 0000000000..23901992b9 --- /dev/null +++ b/include/linux/platform_data/clk-lpss.h @@ -0,0 +1,23 @@ +/* + * Intel Low Power Subsystem clocks. + * + * Copyright (C) 2013, Intel Corporation + * Authors: Mika Westerberg + * Rafael J. Wysocki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CLK_LPSS_H +#define __CLK_LPSS_H + +struct lpss_clk_data { + const char *name; + struct clk *clk; +}; + +extern int lpt_clk_init(void); + +#endif /* __CLK_LPSS_H */ diff --git a/include/linux/platform_data/clk-realview.h b/include/linux/platform_data/clk-realview.h new file mode 100644 index 0000000000..2e426a7dbc --- /dev/null +++ b/include/linux/platform_data/clk-realview.h @@ -0,0 +1 @@ +void realview_clk_init(void __iomem *sysbase, bool is_pb1176); diff --git a/include/linux/platform_data/cpuidle-exynos.h b/include/linux/platform_data/cpuidle-exynos.h index 075cbf0302..bfa40e4c5d 100644 --- a/include/linux/platform_data/cpuidle-exynos.h +++ b/include/linux/platform_data/cpuidle-exynos.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __CPUIDLE_EXYNOS_H diff --git a/include/linux/platform_data/crypto-atmel.h b/include/linux/platform_data/crypto-atmel.h new file mode 100644 index 0000000000..b46e0d9062 --- /dev/null +++ b/include/linux/platform_data/crypto-atmel.h @@ -0,0 +1,22 @@ +#ifndef __LINUX_CRYPTO_ATMEL_H +#define __LINUX_CRYPTO_ATMEL_H + +#include + +/** + * struct crypto_dma_data - DMA data for AES/TDES/SHA + */ +struct crypto_dma_data { + struct at_dma_slave txdata; + struct at_dma_slave rxdata; +}; + +/** + * struct crypto_platform_data - board-specific AES/TDES/SHA configuration + * @dma_slave: DMA slave interface to use in data transfers. + */ +struct crypto_platform_data { + struct crypto_dma_data *dma_slave; +}; + +#endif /* __LINUX_CRYPTO_ATMEL_H */ diff --git a/include/linux/platform_data/crypto-ux500.h b/include/linux/platform_data/crypto-ux500.h index 5d43350e32..94df96d9a3 100644 --- a/include/linux/platform_data/crypto-ux500.h +++ b/include/linux/platform_data/crypto-ux500.h @@ -1,8 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2011 * * Author: Joakim Bech for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 */ #ifndef _CRYPTO_UX500_H #define _CRYPTO_UX500_H diff --git a/include/linux/platform_data/cyttsp4.h b/include/linux/platform_data/cyttsp4.h index 5dc9d2be38..6eba54aff1 100644 --- a/include/linux/platform_data/cyttsp4.h +++ b/include/linux/platform_data/cyttsp4.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Header file for: * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. @@ -10,7 +9,22 @@ * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. * Copyright (C) 2012 Javier Martinez Canillas * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, and only version 2, as published by the + * Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com) + * */ #ifndef _CYTTSP4_H_ #define _CYTTSP4_H_ diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 76b13ef675..85ad68f920 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -1,7 +1,7 @@ /* * TI DaVinci Audio Serial Port support * - * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -79,7 +79,6 @@ struct davinci_mcasp_pdata { /* McASP specific fields */ int tdm_slots; u8 op_mode; - u8 dismod; u8 num_serializer; u8 *serial_dir; u8 version; @@ -96,7 +95,6 @@ enum { MCASP_VERSION_2, /* DA8xx/OMAPL1x */ MCASP_VERSION_3, /* TI81xx/AM33xx */ MCASP_VERSION_4, /* DRA7xxx */ - MCASP_VERSION_OMAP, /* OMAP4/5 */ }; enum mcbsp_clk_input_pin { diff --git a/include/linux/platform_data/db8500_thermal.h b/include/linux/platform_data/db8500_thermal.h new file mode 100644 index 0000000000..3bf60902e9 --- /dev/null +++ b/include/linux/platform_data/db8500_thermal.h @@ -0,0 +1,38 @@ +/* + * db8500_thermal.h - DB8500 Thermal Management Implementation + * + * Copyright (C) 2012 ST-Ericsson + * Copyright (C) 2012 Linaro Ltd. + * + * Author: Hongbo Zhang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DB8500_THERMAL_H_ +#define _DB8500_THERMAL_H_ + +#include + +#define COOLING_DEV_MAX 8 + +struct db8500_trip_point { + unsigned long temp; + enum thermal_trip_type type; + char cdev_name[COOLING_DEV_MAX][THERMAL_NAME_LENGTH]; +}; + +struct db8500_thsens_platform_data { + struct db8500_trip_point trip_points[THERMAL_MAX_TRIPS]; + int num_trips; +}; + +#endif /* _DB8500_THERMAL_H_ */ diff --git a/include/linux/platform_data/dma-atmel.h b/include/linux/platform_data/dma-atmel.h index 069637e600..e95f19c658 100644 --- a/include/linux/platform_data/dma-atmel.h +++ b/include/linux/platform_data/dma-atmel.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Header file for the Atmel AHB DMA Controller driver * * Copyright (C) 2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef AT_HDMAC_H #define AT_HDMAC_H diff --git a/include/linux/platform_data/dma-coh901318.h b/include/linux/platform_data/dma-coh901318.h index 4cca529f8d..c4cb9590d1 100644 --- a/include/linux/platform_data/dma-coh901318.h +++ b/include/linux/platform_data/dma-coh901318.h @@ -1,7 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for the COH901318 DMA controller * Copyright (C) 2007-2013 ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 */ #ifndef PLAT_COH901318_H diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 860ba4bc5e..5f0e11e735 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -1,22 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _PLATFORM_DATA_DMA_DW_H #define _PLATFORM_DATA_DMA_DW_H -#include -#include +#include #define DW_DMA_MAX_NR_MASTERS 4 -#define DW_DMA_MAX_NR_CHANNELS 8 -#define DW_DMA_MIN_BURST 1 -#define DW_DMA_MAX_BURST 256 - -struct device; /** * struct dw_dma_slave - Controller-specific information about a slave @@ -26,7 +23,6 @@ struct device; * @dst_id: dst request line * @m_master: memory master for transfers on allocated channel * @p_master: peripheral master for transfers on allocated channel - * @channels: mask of the channels permitted for allocation (zero value means any) * @hs_polarity:set active low polarity of handshake interface */ struct dw_dma_slave { @@ -35,45 +31,37 @@ struct dw_dma_slave { u8 dst_id; u8 m_master; u8 p_master; - u8 channels; bool hs_polarity; }; /** * struct dw_dma_platform_data - Controller configuration parameters - * @nr_masters: Number of AHB masters supported by the controller * @nr_channels: Number of channels supported by hardware (max 8) + * @is_private: The device channels should be marked as private and not for + * by the general purpose DMA channel allocator. + * @is_memcpy: The device channels do support memory-to-memory transfers. + * @is_nollp: The device channels does not support multi block transfers. * @chan_allocation_order: Allocate channels starting from 0 or 7 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. * @block_size: Maximum block size supported by the controller + * @nr_masters: Number of AHB masters supported by the controller * @data_width: Maximum data width supported by hardware per AHB master * (in bytes, power of 2) - * @multi_block: Multi block transfers supported by hardware per channel. - * @max_burst: Maximum value of burst transaction size supported by hardware - * per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH). - * @protctl: Protection control signals setting per channel. - * @quirks: Optional platform quirks. */ struct dw_dma_platform_data { - u32 nr_masters; - u32 nr_channels; + unsigned int nr_channels; + bool is_private; + bool is_memcpy; + bool is_nollp; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ - u32 chan_allocation_order; + unsigned char chan_allocation_order; #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ - u32 chan_priority; - u32 block_size; - u32 data_width[DW_DMA_MAX_NR_MASTERS]; - u32 multi_block[DW_DMA_MAX_NR_CHANNELS]; - u32 max_burst[DW_DMA_MAX_NR_CHANNELS]; -#define CHAN_PROTCTL_PRIVILEGED BIT(0) -#define CHAN_PROTCTL_BUFFERABLE BIT(1) -#define CHAN_PROTCTL_CACHEABLE BIT(2) -#define CHAN_PROTCTL_MASK GENMASK(2, 0) - u32 protctl; -#define DW_DMA_QUIRK_XBAR_PRESENT BIT(0) - u32 quirks; + unsigned char chan_priority; + unsigned int block_size; + unsigned char nr_masters; + unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; }; #endif /* _PLATFORM_DATA_DMA_DW_H */ diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h index eb9805bb3f..e82c642fa5 100644 --- a/include/linux/platform_data/dma-ep93xx.h +++ b/include/linux/platform_data/dma-ep93xx.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARCH_DMA_H #define __ASM_ARCH_DMA_H @@ -85,7 +84,7 @@ static inline enum dma_transfer_direction ep93xx_dma_chan_direction(struct dma_chan *chan) { if (!ep93xx_dma_chan_is_m2p(chan)) - return DMA_TRANS_NONE; + return DMA_NONE; /* even channels are for TX, odd for RX */ return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h index c65b412b2b..3453fa6555 100644 --- a/include/linux/platform_data/dma-hsu.h +++ b/include/linux/platform_data/dma-hsu.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Driver for the High Speed UART DMA * * Copyright (C) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _PLATFORM_DATA_DMA_HSU_H diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h index 30e676b36b..2d08816720 100644 --- a/include/linux/platform_data/dma-imx-sdma.h +++ b/include/linux/platform_data/dma-imx-sdma.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MACH_MXC_SDMA_H__ #define __MACH_MXC_SDMA_H__ @@ -51,10 +50,7 @@ struct sdma_script_start_addrs { /* End of v2 array */ s32 zcanfd_2_mcu_addr; s32 zqspi_2_mcu_addr; - s32 mcu_2_ecspi_addr; /* End of v3 array */ - s32 mcu_2_zqspi_addr; - /* End of v4 array */ }; /** diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 281adbb26e..7d964e7872 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __ASM_ARCH_MXC_DMA_H__ diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h index 8bec5484dc..422d4504db 100644 --- a/include/linux/platform_data/dma-mmp_tdma.h +++ b/include/linux/platform_data/dma-mmp_tdma.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * SRAM Memory Management * * Copyright (c) 2011 Marvell Semiconductors Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __DMA_MMP_TDMA_H diff --git a/include/linux/platform_data/dma-mv_xor.h b/include/linux/platform_data/dma-mv_xor.h index 6867a7ea32..92ffd3245f 100644 --- a/include/linux/platform_data/dma-mv_xor.h +++ b/include/linux/platform_data/dma-mv_xor.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Marvell XOR platform device data definition file. */ diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h index 96d02dbeea..4f9aba405e 100644 --- a/include/linux/platform_data/dma-s3c24xx.h +++ b/include/linux/platform_data/dma-s3c24xx.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * S3C24XX DMA handling * * Copyright (c) 2013 Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. */ /* Helper to encode the source selection constraints for early s3c socs. */ diff --git a/include/linux/platform_data/dma-ste-dma40.h b/include/linux/platform_data/dma-ste-dma40.h index 10641633fa..1bb9b18522 100644 --- a/include/linux/platform_data/dma-ste-dma40.h +++ b/include/linux/platform_data/dma-ste-dma40.h @@ -1,8 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2007-2010 * Author: Per Forlin for ST-Ericsson * Author: Jonas Aaberg for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 */ diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h index 95d852aef1..a19b78d826 100644 --- a/include/linux/platform_data/dmtimer-omap.h +++ b/include/linux/platform_data/dmtimer-omap.h @@ -1,58 +1,31 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * DMTIMER platform data for TI OMAP platforms * * Copyright (C) 2012 Texas Instruments * Author: Jon Hunter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . */ #ifndef __PLATFORM_DATA_DMTIMER_OMAP_H__ #define __PLATFORM_DATA_DMTIMER_OMAP_H__ -struct omap_dm_timer_ops { - struct omap_dm_timer *(*request_by_node)(struct device_node *np); - struct omap_dm_timer *(*request_specific)(int timer_id); - struct omap_dm_timer *(*request)(void); - - int (*free)(struct omap_dm_timer *timer); - - void (*enable)(struct omap_dm_timer *timer); - void (*disable)(struct omap_dm_timer *timer); - - int (*get_irq)(struct omap_dm_timer *timer); - int (*set_int_enable)(struct omap_dm_timer *timer, - unsigned int value); - int (*set_int_disable)(struct omap_dm_timer *timer, u32 mask); - - struct clk *(*get_fclk)(struct omap_dm_timer *timer); - - int (*start)(struct omap_dm_timer *timer); - int (*stop)(struct omap_dm_timer *timer); - int (*set_source)(struct omap_dm_timer *timer, int source); - - int (*set_load)(struct omap_dm_timer *timer, unsigned int value); - int (*set_match)(struct omap_dm_timer *timer, int enable, - unsigned int match); - int (*set_pwm)(struct omap_dm_timer *timer, int def_on, - int toggle, int trigger, int autoreload); - int (*get_pwm_status)(struct omap_dm_timer *timer); - int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler); - - unsigned int (*read_counter)(struct omap_dm_timer *timer); - int (*write_counter)(struct omap_dm_timer *timer, - unsigned int value); - unsigned int (*read_status)(struct omap_dm_timer *timer); - int (*write_status)(struct omap_dm_timer *timer, - unsigned int value); -}; - struct dmtimer_platform_data { /* set_timer_src - Only used for OMAP1 devices */ int (*set_timer_src)(struct platform_device *pdev, int source); u32 timer_capability; u32 timer_errata; int (*get_context_loss_count)(struct device *); - const struct omap_dm_timer_ops *timer_ops; }; #endif /* __PLATFORM_DATA_DMTIMER_OMAP_H__ */ diff --git a/include/linux/platform_data/drv260x-pdata.h b/include/linux/platform_data/drv260x-pdata.h new file mode 100644 index 0000000000..0a03b09444 --- /dev/null +++ b/include/linux/platform_data/drv260x-pdata.h @@ -0,0 +1,28 @@ +/* + * Platform data for DRV260X haptics driver family + * + * Author: Dan Murphy + * + * Copyright: (C) 2014 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _LINUX_DRV260X_PDATA_H +#define _LINUX_DRV260X_PDATA_H + +struct drv260x_platform_data { + u32 library_selection; + u32 mode; + u32 vib_rated_voltage; + u32 vib_overdrive_voltage; +}; + +#endif diff --git a/include/linux/platform_data/dwc3-omap.h b/include/linux/platform_data/dwc3-omap.h new file mode 100644 index 0000000000..1d36ca874c --- /dev/null +++ b/include/linux/platform_data/dwc3-omap.h @@ -0,0 +1,43 @@ +/** + * dwc3-omap.h - OMAP Specific Glue layer, header. + * + * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com + * All rights reserved. + * + * Author: Felipe Balbi + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the above-listed copyright holders may not be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2, as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +enum dwc3_omap_utmi_mode { + DWC3_OMAP_UTMI_MODE_UNKNOWN = 0, + DWC3_OMAP_UTMI_MODE_HW, + DWC3_OMAP_UTMI_MODE_SW, +}; diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index ee13d5ca63..0a533f9443 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * TI EDMA definitions * * Copyright (C) 2006-2013 Texas Instruments. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ /* diff --git a/include/linux/platform_data/efm32-spi.h b/include/linux/platform_data/efm32-spi.h index a2c56fcd05..31b19ca1d7 100644 --- a/include/linux/platform_data/efm32-spi.h +++ b/include/linux/platform_data/efm32-spi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ #define __LINUX_PLATFORM_DATA_EFM32_SPI_H__ diff --git a/include/linux/platform_data/efm32-uart.h b/include/linux/platform_data/efm32-uart.h index ccbb8f11db..ed0e975b3c 100644 --- a/include/linux/platform_data/efm32-uart.h +++ b/include/linux/platform_data/efm32-uart.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * * diff --git a/include/linux/platform_data/ehci-sh.h b/include/linux/platform_data/ehci-sh.h new file mode 100644 index 0000000000..5c15a738e1 --- /dev/null +++ b/include/linux/platform_data/ehci-sh.h @@ -0,0 +1,28 @@ +/* + * EHCI SuperH driver platform data + * + * Copyright (C) 2012 Nobuhiro Iwamatsu + * Copyright (C) 2012 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __USB_EHCI_SH_H +#define __USB_EHCI_SH_H + +struct ehci_sh_platdata { + void (*phy_init)(void); /* Phy init function */ +}; + +#endif /* __USB_EHCI_SH_H */ diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h index 3cc78f0447..b8686c00f1 100644 --- a/include/linux/platform_data/elm.h +++ b/include/linux/platform_data/elm.h @@ -1,8 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * BCH Error Location Module * - * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __ELM_H @@ -50,6 +60,6 @@ static inline int elm_config(struct device *dev, enum bch_ecc bch_type, { return -ENOSYS; } -#endif /* CONFIG_MTD_NAND_OMAP_BCH */ +#endif /* CONFIG_MTD_NAND_ECC_BCH */ #endif /* __ELM_H */ diff --git a/include/linux/platform_data/emif_plat.h b/include/linux/platform_data/emif_plat.h index b93feef5d5..5c19a2a647 100644 --- a/include/linux/platform_data/emif_plat.h +++ b/include/linux/platform_data/emif_plat.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Definitions for TI EMIF device platform data * * Copyright (C) 2012 Texas Instruments, Inc. * * Aneesh V + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __EMIF_PLAT_H #define __EMIF_PLAT_H diff --git a/include/linux/platform_data/eth-netx.h b/include/linux/platform_data/eth-netx.h new file mode 100644 index 0000000000..a395159725 --- /dev/null +++ b/include/linux/platform_data/eth-netx.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2005 Sascha Hauer , Pengutronix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ETH_NETX_H +#define __ETH_NETX_H + +struct netxeth_platform_data { + unsigned int xcno; /* number of xmac/xpec engine this eth uses */ +}; + +#endif diff --git a/include/linux/platform_data/fsa9480.h b/include/linux/platform_data/fsa9480.h new file mode 100644 index 0000000000..72dddcb4be --- /dev/null +++ b/include/linux/platform_data/fsa9480.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2010 Samsung Electronics + * Minkyu Kang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _FSA9480_H_ +#define _FSA9480_H_ + +#define FSA9480_ATTACHED 1 +#define FSA9480_DETACHED 0 + +struct fsa9480_platform_data { + void (*cfg_gpio) (void); + void (*usb_cb) (u8 attached); + void (*uart_cb) (u8 attached); + void (*charger_cb) (u8 attached); + void (*jig_cb) (u8 attached); + void (*reset_cb) (void); + void (*usb_power) (u8 on); + int wakeup; +}; + +#endif /* _FSA9480_H_ */ diff --git a/include/linux/platform_data/g762.h b/include/linux/platform_data/g762.h index 249257ee21..d3c5128376 100644 --- a/include/linux/platform_data/g762.h +++ b/include/linux/platform_data/g762.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Platform data structure for g762 fan controller driver * * Copyright (C) 2013, Arnaud EBALARD + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_PLATFORM_DATA_G762_H__ #define __LINUX_PLATFORM_DATA_G762_H__ diff --git a/include/linux/platform_data/gpio-ath79.h b/include/linux/platform_data/gpio-ath79.h index 3ea6dd942c..88b0db7bee 100644 --- a/include/linux/platform_data/gpio-ath79.h +++ b/include/linux/platform_data/gpio-ath79.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Atheros AR7XXX/AR9XXX GPIO controller platform data * * Copyright (C) 2015 Alban Bedel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h index e182a46e60..6ace3fd32b 100644 --- a/include/linux/platform_data/gpio-davinci.h +++ b/include/linux/platform_data/gpio-davinci.h @@ -1,7 +1,7 @@ /* * DaVinci GPIO Platform Related Defines * - * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -16,14 +16,40 @@ #ifndef __DAVINCI_GPIO_PLATFORM_H #define __DAVINCI_GPIO_PLATFORM_H +#include +#include + +#include + struct davinci_gpio_platform_data { - bool no_auto_base; - u32 base; u32 ngpio; u32 gpio_unbanked; }; + +struct davinci_gpio_controller { + struct gpio_chip chip; + struct irq_domain *irq_domain; + /* Serialize access to GPIO registers */ + spinlock_t lock; + void __iomem *regs; + void __iomem *set_data; + void __iomem *clr_data; + void __iomem *in_data; + int gpio_unbanked; + unsigned gpio_irq; +}; + +/* + * basic gpio routines + */ +#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */ + /* Convert GPIO signal to GPIO pin number */ #define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio)) +static inline u32 __gpio_mask(unsigned gpio) +{ + return 1 << (gpio % 32); +} #endif diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h index 0aa5c67202..2dc7f4a8ab 100644 --- a/include/linux/platform_data/gpio-dwapb.h +++ b/include/linux/platform_data/gpio-dwapb.h @@ -1,19 +1,25 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef GPIO_DW_APB_H #define GPIO_DW_APB_H -#define DWAPB_MAX_GPIOS 32 - struct dwapb_port_property { struct fwnode_handle *fwnode; unsigned int idx; unsigned int ngpio; unsigned int gpio_base; - int irq[DWAPB_MAX_GPIOS]; + unsigned int irq; bool irq_shared; }; diff --git a/include/linux/platform_data/gpio-htc-egpio.h b/include/linux/platform_data/gpio-htc-egpio.h index eaefba0b64..b4201c9713 100644 --- a/include/linux/platform_data/gpio-htc-egpio.h +++ b/include/linux/platform_data/gpio-htc-egpio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * HTC simple EGPIO irq and gpio extender */ @@ -6,6 +5,8 @@ #ifndef __HTC_EGPIO_H__ #define __HTC_EGPIO_H__ +#include + /* Descriptive values for all-in or all-out htc_egpio_chip descriptors. */ #define HTC_EGPIO_OUTPUT (~0) #define HTC_EGPIO_INPUT 0 @@ -50,4 +51,7 @@ struct htc_egpio_platform_data { int num_chips; }; +/* Determine the wakeup irq, to be called during early resume */ +extern int htc_egpio_get_wakeup_irq(struct device *dev); + #endif diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index f377817ce7..cb2618147c 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -1,19 +1,31 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * OMAP GPIO handling defines and functions * * Copyright (C) 2003-2005 Nokia Corporation * * Written by Juha Yrjölä + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * */ #ifndef __ASM_ARCH_OMAP_GPIO_H #define __ASM_ARCH_OMAP_GPIO_H -#ifndef __ASSEMBLER__ #include #include -#endif #define OMAP1_MPUIO_BASE 0xfffb5000 @@ -85,7 +97,6 @@ * omap2+ specific GPIO registers */ #define OMAP24XX_GPIO_REVISION 0x0000 -#define OMAP24XX_GPIO_SYSCONFIG 0x0010 #define OMAP24XX_GPIO_IRQSTATUS1 0x0018 #define OMAP24XX_GPIO_IRQSTATUS2 0x0028 #define OMAP24XX_GPIO_IRQENABLE2 0x002c @@ -109,7 +120,6 @@ #define OMAP24XX_GPIO_SETDATAOUT 0x0094 #define OMAP4_GPIO_REVISION 0x0000 -#define OMAP4_GPIO_SYSCONFIG 0x0010 #define OMAP4_GPIO_EOI 0x0020 #define OMAP4_GPIO_IRQSTATUSRAW0 0x0024 #define OMAP4_GPIO_IRQSTATUSRAW1 0x0028 @@ -147,10 +157,13 @@ #define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr)) #define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES) -#ifndef __ASSEMBLER__ +struct omap_gpio_dev_attr { + int bank_width; /* GPIO bank width */ + bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ +}; + struct omap_gpio_reg_offs { u16 revision; - u16 sysconfig; u16 direction; u16 datain; u16 dataout; @@ -189,12 +202,23 @@ struct omap_gpio_platform_data { bool is_mpuio; /* whether the bank is of type MPUIO */ u32 non_wakeup_gpios; - const struct omap_gpio_reg_offs *regs; + struct omap_gpio_reg_offs *regs; /* Return context loss count due to PM states changing */ int (*get_context_loss_count)(struct device *dev); }; -#endif /* __ASSEMBLER__ */ +#if IS_BUILTIN(CONFIG_GPIO_OMAP) +extern void omap2_gpio_prepare_for_idle(int off_mode); +extern void omap2_gpio_resume_after_idle(void); +#else +static inline void omap2_gpio_prepare_for_idle(int off_mode) +{ +} + +static inline void omap2_gpio_resume_after_idle(void) +{ +} +#endif #endif diff --git a/include/linux/platform_data/gpio-ts5500.h b/include/linux/platform_data/gpio-ts5500.h new file mode 100644 index 0000000000..b10d11c9bb --- /dev/null +++ b/include/linux/platform_data/gpio-ts5500.h @@ -0,0 +1,27 @@ +/* + * GPIO (DIO) header for Technologic Systems TS-5500 + * + * Copyright (c) 2012 Savoir-faire Linux Inc. + * Vivien Didelot + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _PDATA_GPIO_TS5500_H +#define _PDATA_GPIO_TS5500_H + +/** + * struct ts5500_dio_platform_data - TS-5500 pin block configuration + * @base: The GPIO base number to use. + * @strap: The only pin connected to an interrupt in a block is input-only. + * If you need a bidirectional line which can trigger an IRQ, you + * may strap it with an in/out pin. This flag indicates this case. + */ +struct ts5500_dio_platform_data { + int base; + bool strap; +}; + +#endif /* _PDATA_GPIO_TS5500_H */ diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h index 1a8b5b1946..5ae0d9c80d 100644 --- a/include/linux/platform_data/gpio_backlight.h +++ b/include/linux/platform_data/gpio_backlight.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * gpio_backlight.h - Simple GPIO-controlled backlight + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __GPIO_BACKLIGHT_H__ #define __GPIO_BACKLIGHT_H__ @@ -9,6 +12,10 @@ struct device; struct gpio_backlight_platform_data { struct device *fbdev; + int gpio; + int def_value; + bool active_low; + const char *name; }; #endif diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h index c9cc4e3243..67ccdb0e16 100644 --- a/include/linux/platform_data/gpmc-omap.h +++ b/include/linux/platform_data/gpmc-omap.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * OMAP GPMC Platform data * - * Copyright (C) 2014 Texas Instruments, Inc. - https://www.ti.com + * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com * Roger Quadros + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. */ #ifndef _GPMC_OMAP_H_ diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h index 7124a5f4bf..8e981be2e2 100644 --- a/include/linux/platform_data/hsmmc-omap.h +++ b/include/linux/platform_data/hsmmc-omap.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MMC definitions for OMAP2 * * Copyright (C) 2006 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ /* @@ -52,6 +55,9 @@ struct omap_hsmmc_platform_data { u32 caps; /* Used for the MMC driver on 2430 and later */ u32 pm_caps; /* PM capabilities of the mmc */ + /* use the internal clock */ + unsigned internal_clock:1; + /* nonremovable e.g. eMMC */ unsigned nonremovable:1; @@ -64,8 +70,18 @@ struct omap_hsmmc_platform_data { #define HSMMC_HAS_HSPE_SUPPORT (1 << 2) unsigned features; - /* string specifying a particular variant of hardware */ - char *version; + int gpio_cd; /* gpio (card detect) */ + int gpio_cod; /* gpio (cover detect) */ + int gpio_wp; /* gpio (write protect) */ + + int (*set_power)(struct device *dev, int power_on, int vdd); + void (*remux)(struct device *dev, int power_on); + /* Call back before enabling / disabling regulators */ + void (*before_set_reg)(struct device *dev, int power_on, int vdd); + /* Call back after enabling / disabling regulators */ + void (*after_set_reg)(struct device *dev, int power_on, int vdd); + /* if we have special card, init it using this callback */ + void (*init_card)(struct mmc_card *card); const char *name; u32 ocr_mask; diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h index 1707ad4147..0e3cce130f 100644 --- a/include/linux/platform_data/hwmon-s3c.h +++ b/include/linux/platform_data/hwmon-s3c.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2005 Simtec Electronics * Ben Dooks * http://armlinux.simtec.co.uk/ * * S3C - HWMon interface for ADC + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __HWMON_S3C_H__ diff --git a/include/linux/platform_data/i2c-cbus-gpio.h b/include/linux/platform_data/i2c-cbus-gpio.h new file mode 100644 index 0000000000..6faa992a95 --- /dev/null +++ b/include/linux/platform_data/i2c-cbus-gpio.h @@ -0,0 +1,27 @@ +/* + * i2c-cbus-gpio.h - CBUS I2C platform_data definition + * + * Copyright (C) 2004-2009 Nokia Corporation + * + * Written by Felipe Balbi and Aaro Koskinen. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __INCLUDE_LINUX_I2C_CBUS_GPIO_H +#define __INCLUDE_LINUX_I2C_CBUS_GPIO_H + +struct i2c_cbus_platform_data { + int dat_gpio; + int clk_gpio; + int sel_gpio; +}; + +#endif /* __INCLUDE_LINUX_I2C_CBUS_GPIO_H */ diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h index 98967df074..89fd34727a 100644 --- a/include/linux/platform_data/i2c-davinci.h +++ b/include/linux/platform_data/i2c-davinci.h @@ -16,8 +16,9 @@ struct davinci_i2c_platform_data { unsigned int bus_freq; /* standard bus frequency (kHz) */ unsigned int bus_delay; /* post-transaction delay (usec) */ - bool gpio_recovery; /* Use GPIO recovery method */ - bool has_pfunc; /* Chip has a ICPFUNC register */ + unsigned int sda_pin; /* GPIO pin ID to use for SDA */ + unsigned int scl_pin; /* GPIO pin ID to use for SCL */ + bool has_pfunc; /*chip has a ICPFUNC register */ }; /* for board setup code */ diff --git a/include/linux/platform_data/i2c-designware.h b/include/linux/platform_data/i2c-designware.h index 014c4a5a7e..7a61fb27c2 100644 --- a/include/linux/platform_data/i2c-designware.h +++ b/include/linux/platform_data/i2c-designware.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef I2C_DESIGNWARE_H diff --git a/include/linux/platform_data/i2c-imx.h b/include/linux/platform_data/i2c-imx.h index 962bfc3273..8289d915e6 100644 --- a/include/linux/platform_data/i2c-imx.h +++ b/include/linux/platform_data/i2c-imx.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * i2c.h - i.MX I2C driver header file * * Copyright (c) 2008, Darius Augulis + * + * This file is released under the GPLv2 */ #ifndef __ASM_ARCH_I2C_H_ diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h index 2543c2a1c9..c68712aadf 100644 --- a/include/linux/platform_data/i2c-mux-reg.h +++ b/include/linux/platform_data/i2c-mux-reg.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * I2C multiplexer using a single register * * Copyright 2015 Freescale Semiconductor * York Sun + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __LINUX_PLATFORM_DATA_I2C_MUX_REG_H diff --git a/include/linux/platform_data/i2c-nuc900.h b/include/linux/platform_data/i2c-nuc900.h new file mode 100644 index 0000000000..9ffb12d06e --- /dev/null +++ b/include/linux/platform_data/i2c-nuc900.h @@ -0,0 +1,9 @@ +#ifndef __ASM_ARCH_NUC900_I2C_H +#define __ASM_ARCH_NUC900_I2C_H + +struct nuc900_platform_i2c { + int bus_num; + unsigned long bus_freq; +}; + +#endif /* __ASM_ARCH_NUC900_I2C_H */ diff --git a/include/linux/platform_data/i2c-s3c2410.h b/include/linux/platform_data/i2c-s3c2410.h index 5507467151..05af66b840 100644 --- a/include/linux/platform_data/i2c-s3c2410.h +++ b/include/linux/platform_data/i2c-s3c2410.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2004-2009 Simtec Electronics * Ben Dooks * * S3C - I2C Controller platform_device info + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __I2C_S3C2410_H diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h index 2aa5ee9a90..9abc0ca725 100644 --- a/include/linux/platform_data/ina2xx.h +++ b/include/linux/platform_data/ina2xx.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Driver for Texas Instruments INA219, INA226 power monitor chips * - * Copyright (C) 2012 Lothar Felten + * Copyright (C) 2012 Lothar Felten * - * For further information, see the Documentation/hwmon/ina2xx.rst file. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * For further information, see the Documentation/hwmon/ina2xx file. */ /** diff --git a/include/linux/platform_data/intel-mid_wdt.h b/include/linux/platform_data/intel-mid_wdt.h index 8dba70b4b0..b98253466a 100644 --- a/include/linux/platform_data/intel-mid_wdt.h +++ b/include/linux/platform_data/intel-mid_wdt.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * intel-mid_wdt: generic Intel MID SCU watchdog driver * * Copyright (C) 2014 Intel Corporation. All rights reserved. * Contact: David Cohen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General + * Public License as published by the Free Software Foundation. */ #ifndef __INTEL_MID_WDT_H__ diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h index f05b37521f..554b59801a 100644 --- a/include/linux/platform_data/invensense_mpu6050.h +++ b/include/linux/platform_data/invensense_mpu6050.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Invensense, Inc. +* +* This software is licensed under the terms of the GNU General Public +* License version 2, as published by the Free Software Foundation, and +* may be copied, distributed, and modified under those terms. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. */ #ifndef __INV_MPU6050_PLATFORM_H_ @@ -12,7 +20,7 @@ * mounting matrix retrieved from device-tree) * * Contains platform specific information on how to configure the MPU6050 to - * work on this platform. The orientation matrices are 3x3 rotation matrices + * work on this platform. The orientation matricies are 3x3 rotation matricies * that are applied to the data to rotate from the mounting orientation to the * platform orientation. The values must be one of 0, 1, or -1 and each row and * column should have exactly 1 non-zero value. diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h index 8474a0208b..0496d17170 100644 --- a/include/linux/platform_data/iommu-omap.h +++ b/include/linux/platform_data/iommu-omap.h @@ -1,20 +1,39 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * omap iommu: main structures * * Copyright (C) 2008-2009 Nokia Corporation * * Written by Hiroshi DOYU + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #include +#define MMU_REG_SIZE 256 + +/** + * struct iommu_arch_data - omap iommu private data + * @name: name of the iommu device + * @iommu_dev: handle of the iommu device + * + * This is an omap iommu private data object, which binds an iommu user + * to its iommu device. This object should be placed at the iommu user's + * dev_archdata so generic IOMMU API can be used without having to + * utilize omap-specific plumbing anymore. + */ +struct omap_iommu_arch_data { + const char *name; + struct omap_iommu *iommu_dev; +}; + struct iommu_platform_data { + const char *name; const char *reset_name; + int nr_tlb_entries; + int (*assert_reset)(struct platform_device *pdev, const char *name); int (*deassert_reset)(struct platform_device *pdev, const char *name); - int (*device_enable)(struct platform_device *pdev); - int (*device_idle)(struct platform_device *pdev); - int (*set_pwrdm_constraint)(struct platform_device *pdev, bool request, - u8 *pwrst); }; diff --git a/include/linux/platform_data/irda-pxaficp.h b/include/linux/platform_data/irda-pxaficp.h index bd35ddcf30..3cd41f77dd 100644 --- a/include/linux/platform_data/irda-pxaficp.h +++ b/include/linux/platform_data/irda-pxaficp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef ASMARM_ARCH_IRDA_H #define ASMARM_ARCH_IRDA_H diff --git a/include/linux/platform_data/irda-sa11x0.h b/include/linux/platform_data/irda-sa11x0.h index 7db59c9175..38f77b5e56 100644 --- a/include/linux/platform_data/irda-sa11x0.h +++ b/include/linux/platform_data/irda-sa11x0.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/mach/irda.h * * Copyright (C) 2004 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __ASM_ARM_MACH_IRDA_H #define __ASM_ARM_MACH_IRDA_H diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h index 6893fdaae7..1419133fa6 100644 --- a/include/linux/platform_data/isl9305.h +++ b/include/linux/platform_data/isl9305.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * isl9305 - Intersil ISL9305 DCDC regulator * * Copyright 2014 Linaro Ltd * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __ISL9305_H @@ -20,7 +24,7 @@ struct regulator_init_data; struct isl9305_pdata { - struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR + 1]; + struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR]; }; #endif diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h index 45d860cac2..f16542c77f 100644 --- a/include/linux/platform_data/itco_wdt.h +++ b/include/linux/platform_data/itco_wdt.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Platform data for the Intel TCO Watchdog */ @@ -12,16 +11,9 @@ #define ICH_RES_MEM_OFF 2 #define ICH_RES_MEM_GCS_PMC 0 -/** - * struct itco_wdt_platform_data - iTCO_wdt platform data - * @name: Name of the platform - * @version: iTCO version - * @no_reboot_use_pmc: Use PMC BXT API to set and clear NO_REBOOT bit - */ struct itco_wdt_platform_data { char name[32]; unsigned int version; - bool no_reboot_use_pmc; }; #endif /* _ITCO_WDT_H_ */ diff --git a/include/linux/platform_data/keyboard-pxa930_rotary.h b/include/linux/platform_data/keyboard-pxa930_rotary.h index 3271aa01cb..053587caff 100644 --- a/include/linux/platform_data/keyboard-pxa930_rotary.h +++ b/include/linux/platform_data/keyboard-pxa930_rotary.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARCH_PXA930_ROTARY_H #define __ASM_ARCH_PXA930_ROTARY_H diff --git a/include/linux/platform_data/keypad-ep93xx.h b/include/linux/platform_data/keypad-ep93xx.h index 3054fced85..adccee25b1 100644 --- a/include/linux/platform_data/keypad-ep93xx.h +++ b/include/linux/platform_data/keypad-ep93xx.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __KEYPAD_EP93XX_H #define __KEYPAD_EP93XX_H @@ -9,7 +8,8 @@ struct matrix_keymap_data; #define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */ #define EP93XX_KEYPAD_BACK_DRIVE (1<<2) /* back driving mode */ #define EP93XX_KEYPAD_TEST_MODE (1<<3) /* scan only column 0 */ -#define EP93XX_KEYPAD_AUTOREPEAT (1<<4) /* enable key autorepeat */ +#define EP93XX_KEYPAD_KDIV (1<<4) /* 1/4 clock or 1/16 clock */ +#define EP93XX_KEYPAD_AUTOREPEAT (1<<5) /* enable key autorepeat */ /** * struct ep93xx_keypad_platform_data - platform specific device structure @@ -23,7 +23,6 @@ struct ep93xx_keypad_platform_data { unsigned int debounce; unsigned int prescale; unsigned int flags; - unsigned int clk_rate; }; #define EP93XX_MATRIX_ROWS (8) diff --git a/include/linux/platform_data/keypad-nomadik-ske.h b/include/linux/platform_data/keypad-nomadik-ske.h index 7efabbca1d..31382fbc07 100644 --- a/include/linux/platform_data/keypad-nomadik-ske.h +++ b/include/linux/platform_data/keypad-nomadik-ske.h @@ -1,7 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010 * + * License Terms: GNU General Public License v2 * Author: Naveen Kumar Gaddipati * * ux500 Scroll key and Keypad Encoder (SKE) header diff --git a/include/linux/platform_data/keypad-omap.h b/include/linux/platform_data/keypad-omap.h index 3e7c64c854..c3a3abae98 100644 --- a/include/linux/platform_data/keypad-omap.h +++ b/include/linux/platform_data/keypad-omap.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2006 Komal Shah + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __KEYPAD_OMAP_H #define __KEYPAD_OMAP_H diff --git a/include/linux/platform_data/keypad-pxa27x.h b/include/linux/platform_data/keypad-pxa27x.h index a376442b99..24625569d1 100644 --- a/include/linux/platform_data/keypad-pxa27x.h +++ b/include/linux/platform_data/keypad-pxa27x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARCH_PXA27x_KEYPAD_H #define __ASM_ARCH_PXA27x_KEYPAD_H diff --git a/include/linux/platform_data/keypad-w90p910.h b/include/linux/platform_data/keypad-w90p910.h new file mode 100644 index 0000000000..556778e8dd --- /dev/null +++ b/include/linux/platform_data/keypad-w90p910.h @@ -0,0 +1,15 @@ +#ifndef __ASM_ARCH_W90P910_KEYPAD_H +#define __ASM_ARCH_W90P910_KEYPAD_H + +#include + +extern void mfp_set_groupi(struct device *dev); + +struct w90p910_keypad_platform_data { + const struct matrix_keymap_data *keymap_data; + + unsigned int prescale; + unsigned int debounce; +}; + +#endif /* __ASM_ARCH_W90P910_KEYPAD_H */ diff --git a/include/linux/platform_data/keyscan-davinci.h b/include/linux/platform_data/keyscan-davinci.h index 260d596ba0..7a560e05bd 100644 --- a/include/linux/platform_data/keyscan-davinci.h +++ b/include/linux/platform_data/keyscan-davinci.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2009 Texas Instruments, Inc * * Author: Miguel Aguilar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef DAVINCI_KEYSCAN_H diff --git a/include/linux/platform_data/lcd-mipid.h b/include/linux/platform_data/lcd-mipid.h index 63f05eb238..8e52c65722 100644 --- a/include/linux/platform_data/lcd-mipid.h +++ b/include/linux/platform_data/lcd-mipid.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LCD_MIPID_H #define __LCD_MIPID_H diff --git a/include/linux/platform_data/leds-kirkwood-netxbig.h b/include/linux/platform_data/leds-kirkwood-netxbig.h new file mode 100644 index 0000000000..3c85a735c3 --- /dev/null +++ b/include/linux/platform_data/leds-kirkwood-netxbig.h @@ -0,0 +1,54 @@ +/* + * Platform data structure for netxbig LED driver + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LEDS_KIRKWOOD_NETXBIG_H +#define __LEDS_KIRKWOOD_NETXBIG_H + +struct netxbig_gpio_ext { + unsigned *addr; + int num_addr; + unsigned *data; + int num_data; + unsigned enable; +}; + +enum netxbig_led_mode { + NETXBIG_LED_OFF, + NETXBIG_LED_ON, + NETXBIG_LED_SATA, + NETXBIG_LED_TIMER1, + NETXBIG_LED_TIMER2, + NETXBIG_LED_MODE_NUM, +}; + +#define NETXBIG_LED_INVALID_MODE NETXBIG_LED_MODE_NUM + +struct netxbig_led_timer { + unsigned long delay_on; + unsigned long delay_off; + enum netxbig_led_mode mode; +}; + +struct netxbig_led { + const char *name; + const char *default_trigger; + int mode_addr; + int *mode_val; + int bright_addr; + int bright_max; +}; + +struct netxbig_led_platform_data { + struct netxbig_gpio_ext *gpio_ext; + struct netxbig_led_timer *timer; + int num_timer; + struct netxbig_led *leds; + int num_leds; +}; + +#endif /* __LEDS_KIRKWOOD_NETXBIG_H */ diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h new file mode 100644 index 0000000000..eb8a6860e8 --- /dev/null +++ b/include/linux/platform_data/leds-kirkwood-ns2.h @@ -0,0 +1,38 @@ +/* + * Platform data structure for Network Space v2 LED driver + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LEDS_KIRKWOOD_NS2_H +#define __LEDS_KIRKWOOD_NS2_H + +enum ns2_led_modes { + NS_V2_LED_OFF, + NS_V2_LED_ON, + NS_V2_LED_SATA, +}; + +struct ns2_led_modval { + enum ns2_led_modes mode; + int cmd_level; + int slow_level; +}; + +struct ns2_led { + const char *name; + const char *default_trigger; + unsigned cmd; + unsigned slow; + int num_modes; + struct ns2_led_modval *modval; +}; + +struct ns2_led_platform_data { + int num_leds; + struct ns2_led *leds; +}; + +#endif /* __LEDS_KIRKWOOD_NS2_H */ diff --git a/include/linux/platform_data/leds-lm355x.h b/include/linux/platform_data/leds-lm355x.h index b1090487b4..b88724bb0b 100644 --- a/include/linux/platform_data/leds-lm355x.h +++ b/include/linux/platform_data/leds-lm355x.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Texas Instruments * + * License Terms: GNU General Public License v2 + * * Simple driver for Texas Instruments LM355x LED driver chip * * Author: G.Shark Jeong diff --git a/include/linux/platform_data/leds-lm3642.h b/include/linux/platform_data/leds-lm3642.h index 2490a2fb65..72d6ee6ade 100644 --- a/include/linux/platform_data/leds-lm3642.h +++ b/include/linux/platform_data/leds-lm3642.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Texas Instruments * +* License Terms: GNU General Public License v2 +* * Simple driver for Texas Instruments LM3642 LED driver chip * * Author: G.Shark Jeong diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h index 3441064713..624ff9edad 100644 --- a/include/linux/platform_data/leds-lp55xx.h +++ b/include/linux/platform_data/leds-lp55xx.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * LP55XX Platform Data Header * @@ -6,32 +5,27 @@ * * Author: Milo(Woogyom) Kim * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * * Derived from leds-lp5521.h, leds-lp5523.h */ #ifndef _LEDS_LP55XX_H #define _LEDS_LP55XX_H -#include -#include - /* Clock configuration */ #define LP55XX_CLOCK_AUTO 0 #define LP55XX_CLOCK_INT 1 #define LP55XX_CLOCK_EXT 2 -#define LP55XX_MAX_GROUPED_CHAN 4 - struct lp55xx_led_config { const char *name; const char *default_trigger; u8 chan_nr; u8 led_current; /* mA x10, 0 if led is not connected */ u8 max_current; - int num_colors; - unsigned int max_channel; - int color_id[LED_COLOR_ID_MAX]; - int output_num[LED_COLOR_ID_MAX]; }; struct lp55xx_predef_pattern { @@ -58,7 +52,7 @@ enum lp8501_pwr_sel { * @clock_mode : Input clock mode. LP55XX_CLOCK_AUTO or _INT or _EXT * @setup_resources : Platform specific function before enabling the chip * @release_resources : Platform specific function after disabling the chip - * @enable_gpiod : enable GPIO descriptor + * @enable : EN pin control by platform side * @patterns : Predefined pattern data for RGB channels * @num_patterns : Number of patterns * @update_config : Value of CONFIG register @@ -74,7 +68,7 @@ struct lp55xx_platform_data { u8 clock_mode; /* optional enable GPIO */ - struct gpio_desc *enable_gpiod; + int enable_gpio; /* Predefined pattern data */ struct lp55xx_predef_pattern *patterns; diff --git a/include/linux/platform_data/leds-omap.h b/include/linux/platform_data/leds-omap.h index dd1a3ec86f..56c9b2a0ad 100644 --- a/include/linux/platform_data/leds-omap.h +++ b/include/linux/platform_data/leds-omap.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2006 Samsung Electronics * Kyungmin Park + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef ASMARM_ARCH_LED_H #define ASMARM_ARCH_LED_H diff --git a/include/linux/platform_data/leds-pca963x.h b/include/linux/platform_data/leds-pca963x.h new file mode 100644 index 0000000000..e731f00363 --- /dev/null +++ b/include/linux/platform_data/leds-pca963x.h @@ -0,0 +1,42 @@ +/* + * PCA963X LED chip driver. + * + * Copyright 2012 bct electronic GmbH + * Copyright 2013 Qtechnology A/S + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_PCA963X_H +#define __LINUX_PCA963X_H +#include + +enum pca963x_outdrv { + PCA963X_OPEN_DRAIN, + PCA963X_TOTEM_POLE, /* aka push-pull */ +}; + +enum pca963x_blink_type { + PCA963X_SW_BLINK, + PCA963X_HW_BLINK, +}; + +struct pca963x_platform_data { + struct led_platform_data leds; + enum pca963x_outdrv outdrv; + enum pca963x_blink_type blink_type; +}; + +#endif /* __LINUX_PCA963X_H*/ diff --git a/include/linux/platform_data/leds-s3c24xx.h b/include/linux/platform_data/leds-s3c24xx.h index 64f8d14876..441a6f2906 100644 --- a/include/linux/platform_data/leds-s3c24xx.h +++ b/include/linux/platform_data/leds-s3c24xx.h @@ -1,16 +1,25 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2006 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks * * S3C24XX - LEDs GPIO connector + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LEDS_S3C24XX_H #define __LEDS_S3C24XX_H +#define S3C24XX_LEDF_ACTLOW (1<<0) /* LED is on when GPIO low */ +#define S3C24XX_LEDF_TRISTATE (1<<1) /* tristate to turn off */ + struct s3c24xx_led_platdata { + unsigned int gpio; + unsigned int flags; + char *name; char *def_trigger; }; diff --git a/include/linux/platform_data/lm3630a_bl.h b/include/linux/platform_data/lm3630a_bl.h index 530be93187..7538e38e27 100644 --- a/include/linux/platform_data/lm3630a_bl.h +++ b/include/linux/platform_data/lm3630a_bl.h @@ -1,7 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Simple driver for Texas Instruments LM3630A LED Flash driver chip * Copyright (C) 2012 Texas Instruments +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License version 2 as +* published by the Free Software Foundation. +* */ #ifndef __LINUX_LM3630A_H @@ -34,11 +38,9 @@ enum lm3630a_ledb_ctrl { #define LM3630A_MAX_BRIGHTNESS 255 /* - *@leda_label : optional led a label. *@leda_init_brt : led a init brightness. 4~255 *@leda_max_brt : led a max brightness. 4~255 *@leda_ctrl : led a disable, enable linear, enable exponential - *@ledb_label : optional led b label. *@ledb_init_brt : led b init brightness. 4~255 *@ledb_max_brt : led b max brightness. 4~255 *@ledb_ctrl : led b disable, enable linear, enable exponential @@ -48,12 +50,10 @@ enum lm3630a_ledb_ctrl { struct lm3630a_platform_data { /* led a config. */ - const char *leda_label; int leda_init_brt; int leda_max_brt; enum lm3630a_leda_ctrl leda_ctrl; /* led b config. */ - const char *ledb_label; int ledb_init_brt; int ledb_max_brt; enum lm3630a_ledb_ctrl ledb_ctrl; diff --git a/include/linux/platform_data/lm3639_bl.h b/include/linux/platform_data/lm3639_bl.h index 341f24051d..5234cd5ed1 100644 --- a/include/linux/platform_data/lm3639_bl.h +++ b/include/linux/platform_data/lm3639_bl.h @@ -1,7 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Simple driver for Texas Instruments LM3630 LED Flash driver chip * Copyright (C) 2012 Texas Instruments +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License version 2 as +* published by the Free Software Foundation. +* */ #ifndef __LINUX_LM3639_H diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h index ab222dd05b..1b2ba24e4e 100644 --- a/include/linux/platform_data/lp855x.h +++ b/include/linux/platform_data/lp855x.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * LP855x Backlight Driver * * Copyright (C) 2011 Texas Instruments + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef _LP855X_H diff --git a/include/linux/platform_data/lp8727.h b/include/linux/platform_data/lp8727.h index c701a7b96f..47128a50e0 100644 --- a/include/linux/platform_data/lp8727.h +++ b/include/linux/platform_data/lp8727.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * LP8727 Micro/Mini USB IC with integrated charger * * Copyright (C) 2011 Texas Instruments * Copyright (C) 2011 National Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _LP8727_H diff --git a/include/linux/platform_data/lp8755.h b/include/linux/platform_data/lp8755.h index 7bf4221d62..a7fd0776c9 100644 --- a/include/linux/platform_data/lp8755.h +++ b/include/linux/platform_data/lp8755.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * LP8755 High Performance Power Management Unit Driver:System Interface Driver * @@ -6,6 +5,11 @@ * * Author: Daniel(Geon Si) Jeong * G.Shark Jeong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef _LP8755_H diff --git a/include/linux/platform_data/lv5207lp.h b/include/linux/platform_data/lv5207lp.h index c9da8d4027..7dc4d9a219 100644 --- a/include/linux/platform_data/lv5207lp.h +++ b/include/linux/platform_data/lv5207lp.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * lv5207lp.h - Sanyo LV5207LP LEDs Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LV5207LP_H__ #define __LV5207LP_H__ diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h new file mode 100644 index 0000000000..21b15f6fee --- /dev/null +++ b/include/linux/platform_data/macb.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __MACB_PDATA_H__ +#define __MACB_PDATA_H__ + +/** + * struct macb_platform_data - platform data for MACB Ethernet + * @phy_mask: phy mask passed when register the MDIO bus + * within the driver + * @phy_irq_pin: PHY IRQ + * @is_rmii: using RMII interface? + * @rev_eth_addr: reverse Ethernet address byte order + */ +struct macb_platform_data { + u32 phy_mask; + int phy_irq_pin; + u8 is_rmii; + u8 rev_eth_addr; +}; + +#endif /* __MACB_PDATA_H__ */ diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h index 03ef46f9cd..8da8f94ee1 100644 --- a/include/linux/platform_data/max197.h +++ b/include/linux/platform_data/max197.h @@ -1,11 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Maxim MAX197 A/D Converter Driver * * Copyright (c) 2012 Savoir-faire Linux Inc. * Vivien Didelot * - * For further information, see the Documentation/hwmon/max197.rst file. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * For further information, see the Documentation/hwmon/max197 file. */ #ifndef _PDATA_MAX197_H diff --git a/include/linux/platform_data/max3421-hcd.h b/include/linux/platform_data/max3421-hcd.h index 5947a6f43d..0303d19700 100644 --- a/include/linux/platform_data/max3421-hcd.h +++ b/include/linux/platform_data/max3421-hcd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 eGauge Systems LLC * Contributed by David Mosberger-Tang diff --git a/include/linux/platform_data/max6697.h b/include/linux/platform_data/max6697.h index 6fbb700055..ed9d3b3daf 100644 --- a/include/linux/platform_data/max6697.h +++ b/include/linux/platform_data/max6697.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * max6697.h * Copyright (c) 2012 Guenter Roeck + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef MAX6697_H diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h index 13874fa6e7..11f00cdabe 100644 --- a/include/linux/platform_data/mdio-gpio.h +++ b/include/linux/platform_data/mdio-gpio.h @@ -1,14 +1,33 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * MDIO-GPIO bus platform data structure + * MDIO-GPIO bus platform data structures + * + * Copyright (C) 2008, Paulius Zaleckas + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. */ -#ifndef __LINUX_MDIO_GPIO_PDATA_H -#define __LINUX_MDIO_GPIO_PDATA_H +#ifndef __LINUX_MDIO_GPIO_H +#define __LINUX_MDIO_GPIO_H + +#include struct mdio_gpio_platform_data { + /* GPIO numbers for bus pins */ + unsigned int mdc; + unsigned int mdio; + unsigned int mdo; + + bool mdc_active_low; + bool mdio_active_low; + bool mdo_active_low; + u32 phy_mask; u32 phy_ignore_ta_mask; + int irqs[PHY_MAX_ADDR]; + /* reset callback */ + int (*reset)(struct mii_bus *bus); }; -#endif /* __LINUX_MDIO_GPIO_PDATA_H */ +#endif /* __LINUX_MDIO_GPIO_H */ diff --git a/include/linux/platform_data/media/camera-mx2.h b/include/linux/platform_data/media/camera-mx2.h index 8cfa76b6e1..7ded6f1f74 100644 --- a/include/linux/platform_data/media/camera-mx2.h +++ b/include/linux/platform_data/media/camera-mx2.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * mx2-cam.h - i.MX27/i.MX25 camera driver header file * * Copyright (C) 2003, Intel Corporation * Copyright (C) 2008, Sascha Hauer * Copyright (C) 2010, Baruch Siach + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef __MACH_MX2_CAM_H_ diff --git a/include/linux/platform_data/media/camera-mx3.h b/include/linux/platform_data/media/camera-mx3.h index 781c004e55..a910dadc82 100644 --- a/include/linux/platform_data/media/camera-mx3.h +++ b/include/linux/platform_data/media/camera-mx3.h @@ -1,8 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * mx3_camera.h - i.MX3x camera driver header file * * Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _MX3_CAMERA_H_ diff --git a/include/linux/platform_data/media/camera-pxa.h b/include/linux/platform_data/media/camera-pxa.h index 846a47b8c5..ce5d90e1a6 100644 --- a/include/linux/platform_data/media/camera-pxa.h +++ b/include/linux/platform_data/media/camera-pxa.h @@ -1,10 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* camera.h - PXA camera driver header file Copyright (C) 2003, Intel Corporation Copyright (C) 2008, Guennadi Liakhovetski + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __ASM_ARCH_CAMERA_H_ diff --git a/include/linux/platform_data/media/coda.h b/include/linux/platform_data/media/coda.h index 293b61b60c..6ad4410d9e 100644 --- a/include/linux/platform_data/media/coda.h +++ b/include/linux/platform_data/media/coda.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2013 Philipp Zabel, Pengutronix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef PLATFORM_CODA_H #define PLATFORM_CODA_H diff --git a/include/linux/platform_data/media/gpio-ir-recv.h b/include/linux/platform_data/media/gpio-ir-recv.h new file mode 100644 index 0000000000..0c298f569d --- /dev/null +++ b/include/linux/platform_data/media/gpio-ir-recv.h @@ -0,0 +1,23 @@ +/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __GPIO_IR_RECV_H__ +#define __GPIO_IR_RECV_H__ + +struct gpio_ir_recv_platform_data { + int gpio_nr; + bool active_low; + u64 allowed_protos; + const char *map_name; +}; + +#endif /* __GPIO_IR_RECV_H__ */ diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h new file mode 100644 index 0000000000..812d873078 --- /dev/null +++ b/include/linux/platform_data/media/ir-rx51.h @@ -0,0 +1,8 @@ +#ifndef _LIRC_RX51_H +#define _LIRC_RX51_H + +struct lirc_rx51_platform_data { + int(*set_max_mpu_wakeup_lat)(struct device *dev, long t); +}; + +#endif diff --git a/include/linux/platform_data/media/mmp-camera.h b/include/linux/platform_data/media/mmp-camera.h index 53adaab64f..7611963a25 100644 --- a/include/linux/platform_data/media/mmp-camera.h +++ b/include/linux/platform_data/media/mmp-camera.h @@ -1,25 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Information for the Marvell Armada MMP camera */ -#include - -enum dphy3_algo { - DPHY3_ALGO_DEFAULT = 0, - DPHY3_ALGO_PXA910, - DPHY3_ALGO_PXA2128 -}; - struct mmp_camera_platform_data { - enum v4l2_mbus_type bus_type; - int mclk_src; /* which clock source the MCLK derives from */ - int mclk_div; /* Clock Divider Value for MCLK */ - /* - * MIPI support - */ - int dphy[3]; /* DPHY: CSI2_DPHY3, CSI2_DPHY5, CSI2_DPHY6 */ - enum dphy3_algo dphy3_algo; /* algos for calculate CSI2_DPHY3 */ - int lane; /* ccic used lane number; 0 means DVP mode */ - int lane_clk; + struct platform_device *i2c_device; + int sensor_power_gpio; + int sensor_reset_gpio; }; diff --git a/include/linux/platform_data/media/omap1_camera.h b/include/linux/platform_data/media/omap1_camera.h new file mode 100644 index 0000000000..819767cf04 --- /dev/null +++ b/include/linux/platform_data/media/omap1_camera.h @@ -0,0 +1,35 @@ +/* + * Header for V4L2 SoC Camera driver for OMAP1 Camera Interface + * + * Copyright (C) 2010, Janusz Krzysztofik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MEDIA_OMAP1_CAMERA_H_ +#define __MEDIA_OMAP1_CAMERA_H_ + +#include + +#define OMAP1_CAMERA_IOSIZE 0x1c + +enum omap1_cam_vb_mode { + OMAP1_CAM_DMA_CONTIG = 0, + OMAP1_CAM_DMA_SG, +}; + +#define OMAP1_CAMERA_MIN_BUF_COUNT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? 3 : 2) + +struct omap1_cam_platform_data { + unsigned long camexclk_khz; + unsigned long lclk_khz_max; + unsigned long flags; +}; + +#define OMAP1_CAMERA_LCLK_RISING BIT(0) +#define OMAP1_CAMERA_RST_LOW BIT(1) +#define OMAP1_CAMERA_RST_HIGH BIT(2) + +#endif /* __MEDIA_OMAP1_CAMERA_H_ */ diff --git a/include/linux/platform_data/media/omap4iss.h b/include/linux/platform_data/media/omap4iss.h index 2a511a8fcd..0d7620db5e 100644 --- a/include/linux/platform_data/media/omap4iss.h +++ b/include/linux/platform_data/media/omap4iss.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef ARCH_ARM_PLAT_OMAP4_ISS_H #define ARCH_ARM_PLAT_OMAP4_ISS_H diff --git a/include/linux/platform_data/media/s5p_hdmi.h b/include/linux/platform_data/media/s5p_hdmi.h index 457321e917..bb9cacb0cb 100644 --- a/include/linux/platform_data/media/s5p_hdmi.h +++ b/include/linux/platform_data/media/s5p_hdmi.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Driver header for S5P HDMI chip. * * Copyright (c) 2011 Samsung Electronics, Co. Ltd * Contact: Tomasz Stanislawski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef S5P_HDMI_H diff --git a/include/linux/platform_data/media/si4713.h b/include/linux/platform_data/media/si4713.h index 13b3eb7a90..932668ad54 100644 --- a/include/linux/platform_data/media/si4713.h +++ b/include/linux/platform_data/media/si4713.h @@ -31,7 +31,7 @@ struct si4713_platform_data { */ struct si4713_rnl { __u32 index; /* modulator index */ - __u32 frequency; /* frequency to perform rnl measurement */ + __u32 frequency; /* frequency to peform rnl measurement */ __s32 rnl; /* result of measurement in dBuV */ __u32 reserved[4]; /* drivers and apps must init this to 0 */ }; @@ -40,7 +40,7 @@ struct si4713_rnl { * This is the ioctl number to query for rnl. Users must pass a * struct si4713_rnl pointer specifying desired frequency in 'frequency' field * following driver capabilities (i.e V4L2_TUNER_CAP_LOW). - * Driver must return measured value in the same structure, filling 'rnl' field. + * Driver must return measured value in the same struture, filling 'rnl' field. */ #define SI4713_IOC_MEASURE_RNL _IOWR('V', BASE_VIDIOC_PRIVATE + 0, \ struct si4713_rnl) diff --git a/include/linux/platform_data/media/sii9234.h b/include/linux/platform_data/media/sii9234.h new file mode 100644 index 0000000000..6a4a809fe9 --- /dev/null +++ b/include/linux/platform_data/media/sii9234.h @@ -0,0 +1,24 @@ +/* + * Driver header for SII9234 MHL converter chip. + * + * Copyright (c) 2011 Samsung Electronics, Co. Ltd + * Contact: Tomasz Stanislawski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef SII9234_H +#define SII9234_H + +/** + * @gpio_n_reset: GPIO driving nRESET pin + */ + +struct sii9234_platform_data { + int gpio_n_reset; +}; + +#endif /* SII9234_H */ diff --git a/include/linux/platform_data/media/soc_camera_platform.h b/include/linux/platform_data/media/soc_camera_platform.h new file mode 100644 index 0000000000..1e5065dab4 --- /dev/null +++ b/include/linux/platform_data/media/soc_camera_platform.h @@ -0,0 +1,83 @@ +/* + * Generic Platform Camera Driver Header + * + * Copyright (C) 2008 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SOC_CAMERA_H__ +#define __SOC_CAMERA_H__ + +#include +#include +#include + +struct device; + +struct soc_camera_platform_info { + const char *format_name; + unsigned long format_depth; + struct v4l2_mbus_framefmt format; + unsigned long mbus_param; + enum v4l2_mbus_type mbus_type; + struct soc_camera_device *icd; + int (*set_capture)(struct soc_camera_platform_info *info, int enable); +}; + +static inline void soc_camera_platform_release(struct platform_device **pdev) +{ + *pdev = NULL; +} + +static inline int soc_camera_platform_add(struct soc_camera_device *icd, + struct platform_device **pdev, + struct soc_camera_link *plink, + void (*release)(struct device *dev), + int id) +{ + struct soc_camera_subdev_desc *ssdd = + (struct soc_camera_subdev_desc *)plink; + struct soc_camera_platform_info *info = ssdd->drv_priv; + int ret; + + if (&icd->sdesc->subdev_desc != ssdd) + return -ENODEV; + + if (*pdev) + return -EBUSY; + + *pdev = platform_device_alloc("soc_camera_platform", id); + if (!*pdev) + return -ENOMEM; + + info->icd = icd; + + (*pdev)->dev.platform_data = info; + (*pdev)->dev.release = release; + + ret = platform_device_add(*pdev); + if (ret < 0) { + platform_device_put(*pdev); + *pdev = NULL; + info->icd = NULL; + } + + return ret; +} + +static inline void soc_camera_platform_del(const struct soc_camera_device *icd, + struct platform_device *pdev, + const struct soc_camera_link *plink) +{ + const struct soc_camera_subdev_desc *ssdd = + (const struct soc_camera_subdev_desc *)plink; + if (&icd->sdesc->subdev_desc != ssdd || !pdev) + return; + + platform_device_unregister(pdev); +} + +#endif /* __SOC_CAMERA_H__ */ diff --git a/include/linux/platform_data/media/timb_radio.h b/include/linux/platform_data/media/timb_radio.h index 109a0d4a4f..a40a6a348d 100644 --- a/include/linux/platform_data/media/timb_radio.h +++ b/include/linux/platform_data/media/timb_radio.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * timb_radio.h Platform struct for the Timberdale radio driver * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _TIMB_RADIO_ diff --git a/include/linux/platform_data/media/timb_video.h b/include/linux/platform_data/media/timb_video.h index 38764cc09b..70ae43970a 100644 --- a/include/linux/platform_data/media/timb_video.h +++ b/include/linux/platform_data/media/timb_video.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * timb_video.h Platform struct for the Timberdale video driver * Copyright (c) 2009-2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _TIMB_VIDEO_ diff --git a/include/linux/platform_data/mfd-mcp-sa11x0.h b/include/linux/platform_data/mfd-mcp-sa11x0.h index b589e61bbc..747cd6baf7 100644 --- a/include/linux/platform_data/mfd-mcp-sa11x0.h +++ b/include/linux/platform_data/mfd-mcp-sa11x0.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2005 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __MFD_MCP_SA11X0_H #define __MFD_MCP_SA11X0_H diff --git a/include/linux/platform_data/mmc-davinci.h b/include/linux/platform_data/mmc-davinci.h index 87a8bed3b6..9cea4ee377 100644 --- a/include/linux/platform_data/mmc-davinci.h +++ b/include/linux/platform_data/mmc-davinci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Board-specific MMC configuration */ diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index cba1184b36..7daa78a2f3 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright 2010 Wolfram Sang + * Copyright 2010 Wolfram Sang + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. */ #ifndef __ASM_ARCH_IMX_ESDHC_H @@ -26,17 +30,22 @@ enum cd_types { * * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35. * + * @wp_gpio: gpio for write_protect + * @cd_gpio: gpio for card_detect interrupt * @wp_type: type of write_protect method (see wp_types enum above) * @cd_type: type of card_detect method (see cd_types enum above) + * @support_vsel: indicate it supports 1.8v switching */ struct esdhc_platform_data { + unsigned int wp_gpio; + unsigned int cd_gpio; enum wp_types wp_type; enum cd_types cd_type; int max_bus_width; + bool support_vsel; unsigned int delay_line; unsigned int tuning_step; /* The delay cell steps in tuning procedure */ unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */ - unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */ }; #endif /* __ASM_ARCH_IMX_ESDHC_H */ diff --git a/include/linux/platform_data/mmc-mxcmmc.h b/include/linux/platform_data/mmc-mxcmmc.h index ac67735131..29115f405a 100644 --- a/include/linux/platform_data/mmc-mxcmmc.h +++ b/include/linux/platform_data/mmc-mxcmmc.h @@ -1,8 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef ASMARM_ARCH_MMC_H #define ASMARM_ARCH_MMC_H -#include #include struct device; diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h index 91051e9907..9294692914 100644 --- a/include/linux/platform_data/mmc-omap.h +++ b/include/linux/platform_data/mmc-omap.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MMC definitions for OMAP2 * * Copyright (C) 2006 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #define OMAP_MMC_MAX_SLOTS 2 @@ -108,13 +111,11 @@ struct omap_mmc_platform_data { const char *name; u32 ocr_mask; - /* Card detection */ + /* Card detection IRQs */ + int card_detect_irq; int (*card_detect)(struct device *dev, int slot); unsigned int ban_openended:1; } slots[OMAP_MMC_MAX_SLOTS]; }; - -extern void omap_mmc_notify_cover_event(struct device *dev, int slot, - int is_closed); diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h index 7e44e84e71..1706b3597c 100644 --- a/include/linux/platform_data/mmc-pxamci.h +++ b/include/linux/platform_data/mmc-pxamci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef ASMARM_ARCH_MMC_H #define ASMARM_ARCH_MMC_H @@ -15,7 +14,11 @@ struct pxamci_platform_data { int (*get_ro)(struct device *); int (*setpower)(struct device *, unsigned int); void (*exit)(struct device *, void *); + int gpio_card_detect; /* gpio detecting card insertion */ + int gpio_card_ro; /* gpio detecting read only toggle */ bool gpio_card_ro_invert; /* gpio ro is inverted */ + int gpio_power; /* gpio powering up MMC bus */ + bool gpio_power_invert; /* gpio power is inverted */ }; extern void pxa_set_mci_info(struct pxamci_platform_data *info); diff --git a/include/linux/platform_data/mmc-s3cmci.h b/include/linux/platform_data/mmc-s3cmci.h index bacb86db31..c42d317119 100644 --- a/include/linux/platform_data/mmc-s3cmci.h +++ b/include/linux/platform_data/mmc-s3cmci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ARCH_MCI_H #define _ARCH_MCI_H @@ -7,6 +6,7 @@ * @no_wprotect: Set this to indicate there is no write-protect switch. * @no_detect: Set this if there is no detect switch. * @wprotect_invert: Invert the default sense of the write protect switch. + * @detect_invert: Invert the default sense of the write protect switch. * @use_dma: Set to allow the use of DMA. * @gpio_detect: GPIO number for the card detect line. * @gpio_wprotect: GPIO number for the write protect line. @@ -30,12 +30,14 @@ struct s3c24xx_mci_pdata { unsigned int no_wprotect:1; unsigned int no_detect:1; unsigned int wprotect_invert:1; + unsigned int detect_invert:1; /* set => detect active high */ unsigned int use_dma:1; + unsigned int gpio_detect; + unsigned int gpio_wprotect; unsigned long ocr_avail; void (*set_power)(unsigned char power_mode, unsigned short vdd); - struct gpio_desc *bus[6]; }; /** @@ -45,7 +47,6 @@ struct s3c24xx_mci_pdata { * Copy the platform data supplied by @pdata so that this can be marked * __initdata. */ -extern void s3c24xx_mci_def_set_power(unsigned char power_mode, unsigned short vdd); extern void s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata); #endif /* _ARCH_NCI_H */ diff --git a/include/linux/platform_data/mmc-sdhci-s3c.h b/include/linux/platform_data/mmc-sdhci-s3c.h index 74a54eeb27..249f02387a 100644 --- a/include/linux/platform_data/mmc-sdhci-s3c.h +++ b/include/linux/platform_data/mmc-sdhci-s3c.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PLATFORM_DATA_SDHCI_S3C_H #define __PLATFORM_DATA_SDHCI_S3C_H diff --git a/include/linux/platform_data/mmp_audio.h b/include/linux/platform_data/mmp_audio.h index 83428d8ee1..0f25d165ab 100644 --- a/include/linux/platform_data/mmp_audio.h +++ b/include/linux/platform_data/mmp_audio.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MMP Platform AUDIO Management * * Copyright (c) 2011 Marvell Semiconductors Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef MMP_AUDIO_H diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h index 030241cb9c..d1397c8ed9 100644 --- a/include/linux/platform_data/mmp_dma.h +++ b/include/linux/platform_data/mmp_dma.h @@ -1,20 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MMP Platform DMA Management * * Copyright (c) 2011 Marvell Semiconductors Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef MMP_DMA_H #define MMP_DMA_H -struct dma_slave_map; - struct mmp_dma_platdata { int dma_channels; int nb_requestors; - int slave_map_cnt; - const struct dma_slave_map *slave_map; }; #endif /* MMP_DMA_H */ diff --git a/include/linux/platform_data/mouse-pxa930_trkball.h b/include/linux/platform_data/mouse-pxa930_trkball.h index ba0ac7a30d..5e0789bc47 100644 --- a/include/linux/platform_data/mouse-pxa930_trkball.h +++ b/include/linux/platform_data/mouse-pxa930_trkball.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARCH_PXA930_TRKBALL_H #define __ASM_ARCH_PXA930_TRKBALL_H diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h index a49826214a..97948ac2bb 100644 --- a/include/linux/platform_data/mtd-davinci-aemif.h +++ b/include/linux/platform_data/mtd-davinci-aemif.h @@ -1,7 +1,7 @@ /* * TI DaVinci AEMIF support * - * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/ + * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/ * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any @@ -33,4 +33,5 @@ struct davinci_aemif_timing { u8 ta; }; +int davinci_aemif_setup(struct platform_device *pdev); #endif diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h index dd474dd448..1cf555aef8 100644 --- a/include/linux/platform_data/mtd-davinci.h +++ b/include/linux/platform_data/mtd-davinci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * mach-davinci/nand.h * @@ -10,12 +9,26 @@ * Dirk Behme * * -------------------------------------------------------------------------- + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __ARCH_ARM_DAVINCI_NAND_H #define __ARCH_ARM_DAVINCI_NAND_H -#include +#include #define NANDFCR_OFFSET 0x60 #define NANDFSR_OFFSET 0x64 @@ -43,16 +56,6 @@ struct davinci_nand_pdata { /* platform_data */ uint32_t mask_ale; uint32_t mask_cle; - /* - * 0-indexed chip-select number of the asynchronous - * interface to which the NAND device has been connected. - * - * So, if you have NAND connected to CS3 of DA850, you - * will pass '1' here. Since the asynchronous interface - * on DA850 starts from CS2. - */ - uint32_t core_chipsel; - /* for packages using two chipselects */ uint32_t mask_chipsel; @@ -60,16 +63,15 @@ struct davinci_nand_pdata { /* platform_data */ struct mtd_partition *parts; unsigned nr_parts; - /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!) - * soft == NAND_ECC_ENGINE_TYPE_SOFT - * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits + /* none == NAND_ECC_NONE (strongly *not* advised!!) + * soft == NAND_ECC_SOFT + * else == NAND_ECC_HW, according to ecc_bits * * All DaVinci-family chips support 1-bit hardware ECC. * Newer ones also support 4-bit ECC, but are awkward * using it with large page chips. */ - enum nand_ecc_engine_type engine_type; - enum nand_ecc_placement ecc_placement; + nand_ecc_modes_t ecc_mode; u8 ecc_bits; /* e.g. NAND_BUSWIDTH_16 */ diff --git a/include/linux/platform_data/mtd-mxc_nand.h b/include/linux/platform_data/mtd-mxc_nand.h index d1230030c6..6bb96ef160 100644 --- a/include/linux/platform_data/mtd-mxc_nand.h +++ b/include/linux/platform_data/mtd-mxc_nand.h @@ -1,7 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2008 Sascha Hauer, kernel@pengutronix.de + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. */ #ifndef __ASM_ARCH_NAND_H diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h index de6ada7391..17d57a18ba 100644 --- a/include/linux/platform_data/mtd-nand-omap2.h +++ b/include/linux/platform_data/mtd-nand-omap2.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2006 Micron Technology Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _MTD_NAND_OMAP2_H @@ -60,5 +63,24 @@ struct gpmc_nand_regs { void __iomem *gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; + /* Deprecated. Do not use */ + void __iomem *gpmc_status; +}; + +struct omap_nand_platform_data { + int cs; + struct mtd_partition *parts; + int nr_parts; + bool flash_bbt; + enum nand_io xfer_type; + int devsize; + enum omap_ecc ecc_opt; + + struct device_node *elm_of_node; + + /* deprecated */ + struct gpmc_nand_regs reg; + struct device_node *of_node; + bool dev_ready; }; #endif diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h index 4fd0f592a2..394d15597d 100644 --- a/include/linux/platform_data/mtd-nand-pxa3xx.h +++ b/include/linux/platform_data/mtd-nand-pxa3xx.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARCH_PXA3XX_NAND_H #define __ASM_ARCH_PXA3XX_NAND_H @@ -6,22 +5,41 @@ #include /* - * Current pxa3xx_nand controller has two chip select which both be workable but - * historically all platforms remaining on platform data used only one. Switch - * to device tree if you need more. + * Current pxa3xx_nand controller has two chip select which + * both be workable. + * + * Notice should be taken that: + * When you want to use this feature, you should not enable the + * keep configuration feature, for two chip select could be + * attached with different nand chip. The different page size + * and timing requirement make the keep configuration impossible. */ + +/* The max num of chip select current support */ +#define NUM_CHIP_SELECT (2) struct pxa3xx_nand_platform_data { - /* Keep OBM/bootloader NFC timing configuration */ - bool keep_config; - /* Use a flash-based bad block table */ - bool flash_bbt; - /* Requested ECC strength and ECC step size */ + + /* the data flash bus is shared between the Static Memory + * Controller and the Data Flash Controller, the arbiter + * controls the ownership of the bus + */ + int enable_arbiter; + + /* allow platform code to keep OBM/bootloader defined NFC config */ + int keep_config; + + /* indicate how many chip selects will be used */ + int num_cs; + + /* use an flash-based bad block table */ + bool flash_bbt; + + /* requested ECC strength and ECC step size */ int ecc_strength, ecc_step_size; - /* Partitions */ - const struct mtd_partition *parts; - unsigned int nr_parts; + + const struct mtd_partition *parts[NUM_CHIP_SELECT]; + unsigned int nr_parts[NUM_CHIP_SELECT]; }; extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); - #endif /* __ASM_ARCH_PXA3XX_NAND_H */ diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h index 25390fc3e7..c55e42ee57 100644 --- a/include/linux/platform_data/mtd-nand-s3c2410.h +++ b/include/linux/platform_data/mtd-nand-s3c2410.h @@ -1,18 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2004 Simtec Electronics * Ben Dooks * * S3C2410 - NAND device controller platform_device info + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __MTD_NAND_S3C2410_H #define __MTD_NAND_S3C2410_H -#include - /** * struct s3c2410_nand_set - define a set of one or more nand chips + * @disable_ecc: Entirely disable ECC - Dangerous * @flash_bbt: Openmoko u-boot can create a Bad Block Table * Setting this flag will allow the kernel to * look for it at boot time and also skip the NAND @@ -29,6 +31,7 @@ * a warning at boot time. */ struct s3c2410_nand_set { + unsigned int disable_ecc:1; unsigned int flash_bbt:1; unsigned int options; @@ -37,7 +40,6 @@ struct s3c2410_nand_set { char *name; int *nr_map; struct mtd_partition *partitions; - struct device_node *of_node; }; struct s3c2410_platform_nand { @@ -49,8 +51,6 @@ struct s3c2410_platform_nand { unsigned int ignore_unset_ecc:1; - enum nand_ecc_engine_type engine_type; - int nr_sets; struct s3c2410_nand_set *sets; diff --git a/include/linux/platform_data/mtd-onenand-omap2.h b/include/linux/platform_data/mtd-onenand-omap2.h new file mode 100644 index 0000000000..56ff0e6f5a --- /dev/null +++ b/include/linux/platform_data/mtd-onenand-omap2.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2006 Nokia Corporation + * Author: Juha Yrjola + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MTD_ONENAND_OMAP2_H +#define __MTD_ONENAND_OMAP2_H + +#include +#include + +#define ONENAND_SYNC_READ (1 << 0) +#define ONENAND_SYNC_READWRITE (1 << 1) +#define ONENAND_IN_OMAP34XX (1 << 2) + +struct omap_onenand_platform_data { + int cs; + int gpio_irq; + struct mtd_partition *parts; + int nr_parts; + int (*onenand_setup)(void __iomem *, int *freq_ptr); + int dma_channel; + u8 flags; + u8 regulator_can_sleep; + u8 skip_initial_unlocking; + + /* for passing the partitions */ + struct device_node *of_node; +}; +#endif diff --git a/include/linux/platform_data/mtd-orion_nand.h b/include/linux/platform_data/mtd-orion_nand.h index 34828eb859..a7ce77c7c1 100644 --- a/include/linux/platform_data/mtd-orion_nand.h +++ b/include/linux/platform_data/mtd-orion_nand.h @@ -12,6 +12,7 @@ */ struct orion_nand_data { struct mtd_partition *parts; + int (*dev_ready)(struct mtd_info *mtd); u32 nr_parts; u8 ale; /* address line number connected to ALE */ u8 cle; /* address line number connected to CLE */ diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h index 20d239c02b..98b7925f1a 100644 --- a/include/linux/platform_data/mv_usb.h +++ b/include/linux/platform_data/mv_usb.h @@ -1,11 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2011 Marvell International Ltd. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef __MV_PLATFORM_USB_H #define __MV_PLATFORM_USB_H +enum pxa_ehci_type { + EHCI_UNDEFINED = 0, + PXA_U2OEHCI, /* pxa 168, 9xx */ + PXA_SPH, /* pxa 168, 9xx SPH */ + MMP3_HSIC, /* mmp3 hsic */ + MMP3_FSIC, /* mmp3 fsic */ +}; + enum { MV_USB_MODE_OTG, MV_USB_MODE_HOST, @@ -36,5 +48,6 @@ struct mv_usb_platform_data { int (*phy_init)(void __iomem *regbase); void (*phy_deinit)(void __iomem *regbase); int (*set_vbus)(unsigned int vbus); + int (*private_init)(void __iomem *opregs, void __iomem *phyregs); }; #endif diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h index c510734405..c6fbc3ce4a 100644 --- a/include/linux/platform_data/net-cw1200.h +++ b/include/linux/platform_data/net-cw1200.h @@ -1,8 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2011 * * Author: Dmitry Tarnyagin + * License terms: GNU General Public License (GPL) version 2 */ #ifndef CW1200_PLAT_H_INCLUDED diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h index 9e75ac8d19..a6f9d633f5 100644 --- a/include/linux/platform_data/nfcmrvl.h +++ b/include/linux/platform_data/nfcmrvl.h @@ -23,7 +23,7 @@ struct nfcmrvl_platform_data { */ /* GPIO that is wired to RESET_N signal */ - int reset_n_io; + unsigned int reset_n_io; /* Tell if transport is muxed in HCI one */ unsigned int hci_muxed; diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h index b324d03e58..698d0d59db 100644 --- a/include/linux/platform_data/ntc_thermistor.h +++ b/include/linux/platform_data/ntc_thermistor.h @@ -1,9 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ntc_thermistor.h - NTC Thermistors * * Copyright (C) 2010 Samsung Electronics * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _LINUX_NTC_H #define _LINUX_NTC_H @@ -11,11 +24,10 @@ struct iio_channel; enum ntc_thermistor_type { - TYPE_B57330V2103, - TYPE_B57891S0103, TYPE_NCPXXWB473, - TYPE_NCPXXWF104, TYPE_NCPXXWL333, + TYPE_B57330V2103, + TYPE_NCPXXWF104, TYPE_NCPXXXH103, }; @@ -29,7 +41,7 @@ struct ntc_thermistor_platform_data { * read_uV() * * How to setup pullup_ohm, pulldown_ohm, and connect is - * described at Documentation/hwmon/ntc_thermistor.rst + * described at Documentation/hwmon/ntc_thermistor * * pullup/down_ohm: 0 for infinite / not-connected * diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h new file mode 100644 index 0000000000..d6ed28679b --- /dev/null +++ b/include/linux/platform_data/nxp-nci.h @@ -0,0 +1,27 @@ +/* + * Generic platform data for the NXP NCI NFC chips. + * + * Copyright (C) 2014 NXP Semiconductors All rights reserved. + * + * Authors: Clément Perrochaud + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _NXP_NCI_H_ +#define _NXP_NCI_H_ + +struct nxp_nci_nfc_platform_data { + unsigned int gpio_en; + unsigned int gpio_fw; + unsigned int irq; +}; + +#endif /* _NXP_NCI_H_ */ diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h index 0dd851ea1c..ee60ef79d7 100644 --- a/include/linux/platform_data/omap-twl4030.h +++ b/include/linux/platform_data/omap-twl4030.h @@ -1,12 +1,25 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /** * omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030 * codec, header. * - * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com * All rights reserved. * * Author: Peter Ujfalusi + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA */ #ifndef _OMAP_TWL4030_H_ diff --git a/include/linux/platform_data/omap-wd-timer.h b/include/linux/platform_data/omap-wd-timer.h index f2788ec984..d75f5f802d 100644 --- a/include/linux/platform_data/omap-wd-timer.h +++ b/include/linux/platform_data/omap-wd-timer.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * OMAP2+ WDTIMER-specific function prototypes * * Copyright (C) 2012 Texas Instruments, Inc. * Paul Walmsley + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __LINUX_PLATFORM_DATA_OMAP_WD_TIMER_H diff --git a/include/linux/platform_data/omap1_bl.h b/include/linux/platform_data/omap1_bl.h index 5e8b17d77a..881a8e92d6 100644 --- a/include/linux/platform_data/omap1_bl.h +++ b/include/linux/platform_data/omap1_bl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __OMAP1_BL_H__ #define __OMAP1_BL_H__ diff --git a/include/linux/platform_data/omap_drm.h b/include/linux/platform_data/omap_drm.h new file mode 100644 index 0000000000..f4e4a237eb --- /dev/null +++ b/include/linux/platform_data/omap_drm.h @@ -0,0 +1,53 @@ +/* + * DRM/KMS platform data for TI OMAP platforms + * + * Copyright (C) 2012 Texas Instruments + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __PLATFORM_DATA_OMAP_DRM_H__ +#define __PLATFORM_DATA_OMAP_DRM_H__ + +/* + * Optional platform data to configure the default configuration of which + * pipes/overlays/CRTCs are used.. if this is not provided, then instead the + * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to + * one manager, with priority given to managers that are connected to + * detected devices. Remaining overlays are used as video planes. This + * should be a good default behavior for most cases, but yet there still + * might be times when you wish to do something different. + */ +struct omap_kms_platform_data { + /* overlays to use as CRTCs: */ + int ovl_cnt; + const int *ovl_ids; + + /* overlays to use as video planes: */ + int pln_cnt; + const int *pln_ids; + + int mgr_cnt; + const int *mgr_ids; + + int dev_cnt; + const char **dev_names; +}; + +struct omap_drm_platform_data { + uint32_t omaprev; + struct omap_kms_platform_data *kms_pdata; +}; + +#endif /* __PLATFORM_DATA_OMAP_DRM_H__ */ diff --git a/include/linux/platform_data/omapdss.h b/include/linux/platform_data/omapdss.h index a377090d90..6791779290 100644 --- a/include/linux/platform_data/omapdss.h +++ b/include/linux/platform_data/omapdss.h @@ -1,6 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2016 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __OMAPDSS_PDATA_H @@ -23,6 +27,7 @@ enum omapdss_version { /* Board specific data */ struct omap_dss_board_info { + const char *default_display_name; int (*dsi_enable_pads)(int dsi_id, unsigned int lane_mask); void (*dsi_disable_pads)(int dsi_id, unsigned int lane_mask); int (*set_min_bus_tput)(struct device *dev, unsigned long r); diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h index 4eb53e0239..3c98dd4f90 100644 --- a/include/linux/platform_data/pca953x.h +++ b/include/linux/platform_data/pca953x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PCA953X_H #define _LINUX_PCA953X_H diff --git a/include/linux/platform_data/pcmcia-pxa2xx_viper.h b/include/linux/platform_data/pcmcia-pxa2xx_viper.h index a23b58aff9..d428be4db4 100644 --- a/include/linux/platform_data/pcmcia-pxa2xx_viper.h +++ b/include/linux/platform_data/pcmcia-pxa2xx_viper.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ARCOM_PCMCIA_H #define __ARCOM_PCMCIA_H diff --git a/include/linux/platform_data/pinctrl-adi2.h b/include/linux/platform_data/pinctrl-adi2.h new file mode 100644 index 0000000000..8f91300617 --- /dev/null +++ b/include/linux/platform_data/pinctrl-adi2.h @@ -0,0 +1,40 @@ +/* + * Pinctrl Driver for ADI GPIO2 controller + * + * Copyright 2007-2013 Analog Devices Inc. + * + * Licensed under the GPLv2 or later + */ + + +#ifndef PINCTRL_ADI2_H +#define PINCTRL_ADI2_H + +#include +#include + +/** + * struct adi_pinctrl_gpio_platform_data - Pinctrl gpio platform data + * for ADI GPIO2 device. + * + * @port_gpio_base: Optional global GPIO index of the GPIO bank. + * 0 means driver decides. + * @port_pin_base: Pin index of the pin controller device. + * @port_width: PIN number of the GPIO bank device + * @pint_id: GPIO PINT device id that this GPIO bank should map to. + * @pint_assign: The 32-bit GPIO PINT registers can be divided into 2 parts. A + * GPIO bank can be mapped into either low 16 bits[0] or high 16 + * bits[1] of each PINT register. + * @pint_map: GIOP bank mapping code in PINT device + */ +struct adi_pinctrl_gpio_platform_data { + unsigned int port_gpio_base; + unsigned int port_pin_base; + unsigned int port_width; + u8 pinctrl_id; + u8 pint_id; + bool pint_assign; + u8 pint_map; +}; + +#endif diff --git a/include/linux/platform_data/pinctrl-single.h b/include/linux/platform_data/pinctrl-single.h index 7473d3c4ca..72eacda9b3 100644 --- a/include/linux/platform_data/pinctrl-single.h +++ b/include/linux/platform_data/pinctrl-single.h @@ -1,8 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef _PINCTRL_SINGLE_H -#define _PINCTRL_SINGLE_H - /** * irq: optional wake-up interrupt * rearm: optional soc specific rearm function @@ -15,5 +10,3 @@ struct pcs_pdata { int irq; void (*rearm)(void); }; - -#endif /* _PINCTRL_SINGLE_H */ diff --git a/include/linux/platform_data/pixcir_i2c_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h new file mode 100644 index 0000000000..646af6f8b8 --- /dev/null +++ b/include/linux/platform_data/pixcir_i2c_ts.h @@ -0,0 +1,63 @@ +#ifndef _PIXCIR_I2C_TS_H +#define _PIXCIR_I2C_TS_H + +/* + * Register map + */ +#define PIXCIR_REG_POWER_MODE 51 +#define PIXCIR_REG_INT_MODE 52 + +/* + * Power modes: + * active: max scan speed + * idle: lower scan speed with automatic transition to active on touch + * halt: datasheet says sleep but this is more like halt as the chip + * clocks are cut and it can only be brought out of this mode + * using the RESET pin. + */ +enum pixcir_power_mode { + PIXCIR_POWER_ACTIVE, + PIXCIR_POWER_IDLE, + PIXCIR_POWER_HALT, +}; + +#define PIXCIR_POWER_MODE_MASK 0x03 +#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2) + +/* + * Interrupt modes: + * periodical: interrupt is asserted periodicaly + * diff coordinates: interrupt is asserted when coordinates change + * level on touch: interrupt level asserted during touch + * pulse on touch: interrupt pulse asserted druing touch + * + */ +enum pixcir_int_mode { + PIXCIR_INT_PERIODICAL, + PIXCIR_INT_DIFF_COORD, + PIXCIR_INT_LEVEL_TOUCH, + PIXCIR_INT_PULSE_TOUCH, +}; + +#define PIXCIR_INT_MODE_MASK 0x03 +#define PIXCIR_INT_ENABLE (1UL << 3) +#define PIXCIR_INT_POL_HIGH (1UL << 2) + +/** + * struct pixcir_irc_chip_data - chip related data + * @max_fingers: Max number of fingers reported simultaneously by h/w + * @has_hw_ids: Hardware supports finger tracking IDs + * + */ +struct pixcir_i2c_chip_data { + u8 max_fingers; + bool has_hw_ids; +}; + +struct pixcir_ts_platform_data { + int x_max; + int y_max; + struct pixcir_i2c_chip_data chip; +}; + +#endif diff --git a/include/linux/platform_data/pn544.h b/include/linux/platform_data/pn544.h new file mode 100644 index 0000000000..5ce1ab983f --- /dev/null +++ b/include/linux/platform_data/pn544.h @@ -0,0 +1,43 @@ +/* + * Driver include for the PN544 NFC chip. + * + * Copyright (C) Nokia Corporation + * + * Author: Jari Vanhala + * Contact: Matti Aaltoenn + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef _PN544_H_ +#define _PN544_H_ + +#include + +enum { + NFC_GPIO_ENABLE, + NFC_GPIO_FW_RESET, + NFC_GPIO_IRQ +}; + +/* board config */ +struct pn544_nfc_platform_data { + int (*request_resources) (struct i2c_client *client); + void (*free_resources) (void); + void (*enable) (int fw); + int (*test) (void); + void (*disable) (void); + int (*get_gpio)(int type); +}; + +#endif /* _PN544_H_ */ diff --git a/include/linux/platform_data/pwm_omap_dmtimer.h b/include/linux/platform_data/pwm_omap_dmtimer.h new file mode 100644 index 0000000000..e7d521e488 --- /dev/null +++ b/include/linux/platform_data/pwm_omap_dmtimer.h @@ -0,0 +1,90 @@ +/* + * include/linux/platform_data/pwm_omap_dmtimer.h + * + * OMAP Dual-Mode Timer PWM platform data + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * Tarun Kanti DebBarma + * Thara Gopinath + * + * Platform device conversion and hwmod support. + * + * Copyright (C) 2005 Nokia Corporation + * Author: Lauri Leukkunen + * PWM and clock framework support by Timo Teras. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __PWM_OMAP_DMTIMER_PDATA_H +#define __PWM_OMAP_DMTIMER_PDATA_H + +/* clock sources */ +#define PWM_OMAP_DMTIMER_SRC_SYS_CLK 0x00 +#define PWM_OMAP_DMTIMER_SRC_32_KHZ 0x01 +#define PWM_OMAP_DMTIMER_SRC_EXT_CLK 0x02 + +/* timer interrupt enable bits */ +#define PWM_OMAP_DMTIMER_INT_CAPTURE (1 << 2) +#define PWM_OMAP_DMTIMER_INT_OVERFLOW (1 << 1) +#define PWM_OMAP_DMTIMER_INT_MATCH (1 << 0) + +/* trigger types */ +#define PWM_OMAP_DMTIMER_TRIGGER_NONE 0x00 +#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW 0x01 +#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02 + +struct omap_dm_timer; +typedef struct omap_dm_timer pwm_omap_dmtimer; + +struct pwm_omap_dmtimer_pdata { + pwm_omap_dmtimer *(*request_by_node)(struct device_node *np); + pwm_omap_dmtimer *(*request_specific)(int timer_id); + pwm_omap_dmtimer *(*request)(void); + + int (*free)(pwm_omap_dmtimer *timer); + + void (*enable)(pwm_omap_dmtimer *timer); + void (*disable)(pwm_omap_dmtimer *timer); + + int (*get_irq)(pwm_omap_dmtimer *timer); + int (*set_int_enable)(pwm_omap_dmtimer *timer, unsigned int value); + int (*set_int_disable)(pwm_omap_dmtimer *timer, u32 mask); + + struct clk *(*get_fclk)(pwm_omap_dmtimer *timer); + + int (*start)(pwm_omap_dmtimer *timer); + int (*stop)(pwm_omap_dmtimer *timer); + int (*set_source)(pwm_omap_dmtimer *timer, int source); + + int (*set_load)(pwm_omap_dmtimer *timer, int autoreload, + unsigned int value); + int (*set_match)(pwm_omap_dmtimer *timer, int enable, + unsigned int match); + int (*set_pwm)(pwm_omap_dmtimer *timer, int def_on, + int toggle, int trigger); + int (*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler); + + unsigned int (*read_counter)(pwm_omap_dmtimer *timer); + int (*write_counter)(pwm_omap_dmtimer *timer, unsigned int value); + unsigned int (*read_status)(pwm_omap_dmtimer *timer); + int (*write_status)(pwm_omap_dmtimer *timer, unsigned int value); +}; + +#endif /* __PWM_OMAP_DMTIMER_PDATA_H */ diff --git a/include/linux/platform_data/pxa2xx_udc.h b/include/linux/platform_data/pxa2xx_udc.h index ff9c35dca5..c6c5e98b5b 100644 --- a/include/linux/platform_data/pxa2xx_udc.h +++ b/include/linux/platform_data/pxa2xx_udc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This supports machine-specific differences in how the PXA2xx * USB Device Controller (UDC) is wired. diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h index 899457cee4..9e20c2fb4f 100644 --- a/include/linux/platform_data/pxa_sdhci.h +++ b/include/linux/platform_data/pxa_sdhci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/platform_data/pxa_sdhci.h * @@ -6,6 +5,10 @@ * Zhangfei Gao * * PXA Platform - SDHCI platform data definitions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _PXA_SDHCI_H_ @@ -30,6 +33,8 @@ * 1: choose feedback clk + delay value * 2: choose internal clk * @clk_delay_enable: enable clk_delay or not, used on pxa910 + * @ext_cd_gpio: gpio pin used for external CD line + * @ext_cd_gpio_invert: invert values for external CD gpio line * @max_speed: the maximum speed supported * @host_caps: Standard MMC host capabilities bit field. * @quirks: quirks of platfrom @@ -41,6 +46,8 @@ struct sdhci_pxa_platdata { unsigned int clk_delay_cycles; unsigned int clk_delay_sel; bool clk_delay_enable; + unsigned int ext_cd_gpio; + bool ext_cd_gpio_invert; unsigned int max_speed; u32 host_caps; u32 host_caps2; diff --git a/include/linux/platform_data/regulator-haptic.h b/include/linux/platform_data/regulator-haptic.h index 4213e1b013..5658e58e07 100644 --- a/include/linux/platform_data/regulator-haptic.h +++ b/include/linux/platform_data/regulator-haptic.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Regulator Haptic Platform Data * * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Jaewon Kim * Author: Hyunhee Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _REGULATOR_HAPTIC_H diff --git a/include/linux/platform_data/remoteproc-omap.h b/include/linux/platform_data/remoteproc-omap.h new file mode 100644 index 0000000000..71a1b2399c --- /dev/null +++ b/include/linux/platform_data/remoteproc-omap.h @@ -0,0 +1,59 @@ +/* + * Remote Processor - omap-specific bits + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2011 Google, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _PLAT_REMOTEPROC_H +#define _PLAT_REMOTEPROC_H + +struct rproc_ops; +struct platform_device; + +/* + * struct omap_rproc_pdata - omap remoteproc's platform data + * @name: the remoteproc's name + * @oh_name: omap hwmod device + * @oh_name_opt: optional, secondary omap hwmod device + * @firmware: name of firmware file to load + * @mbox_name: name of omap mailbox device to use with this rproc + * @ops: start/stop rproc handlers + * @device_enable: omap-specific handler for enabling a device + * @device_shutdown: omap-specific handler for shutting down a device + * @set_bootaddr: omap-specific handler for setting the rproc boot address + */ +struct omap_rproc_pdata { + const char *name; + const char *oh_name; + const char *oh_name_opt; + const char *firmware; + const char *mbox_name; + const struct rproc_ops *ops; + int (*device_enable)(struct platform_device *pdev); + int (*device_shutdown)(struct platform_device *pdev); + void (*set_bootaddr)(u32); +}; + +#if defined(CONFIG_OMAP_REMOTEPROC) || defined(CONFIG_OMAP_REMOTEPROC_MODULE) + +void __init omap_rproc_reserve_cma(void); + +#else + +static inline void __init omap_rproc_reserve_cma(void) +{ +} + +#endif + +#endif /* _PLAT_REMOTEPROC_H */ diff --git a/include/linux/platform_data/rtc-m48t86.h b/include/linux/platform_data/rtc-m48t86.h new file mode 100644 index 0000000000..915d6b4f0f --- /dev/null +++ b/include/linux/platform_data/rtc-m48t86.h @@ -0,0 +1,16 @@ +/* + * ST M48T86 / Dallas DS12887 RTC driver + * Copyright (c) 2006 Tower Technologies + * + * Author: Alessandro Zummo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +struct m48t86_ops +{ + void (*writebyte)(unsigned char value, unsigned long addr); + unsigned char (*readbyte)(unsigned long addr); +}; diff --git a/include/linux/platform_data/s3c-hsotg.h b/include/linux/platform_data/s3c-hsotg.h index 004ddaf650..3982586ba6 100644 --- a/include/linux/platform_data/s3c-hsotg.h +++ b/include/linux/platform_data/s3c-hsotg.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* include/linux/platform_data/s3c-hsotg.h * * Copyright 2008 Openmoko, Inc. @@ -7,6 +6,10 @@ * http://armlinux.simtec.co.uk/ * * S3C USB2.0 High-speed / OtG platform information + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_USB_S3C_HSOTG_H diff --git a/include/linux/platform_data/s3c-hsudc.h b/include/linux/platform_data/s3c-hsudc.h index a170939832..6fa109339b 100644 --- a/include/linux/platform_data/s3c-hsudc.h +++ b/include/linux/platform_data/s3c-hsudc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * S3C24XX USB 2.0 High-speed USB controller gadget driver * @@ -8,6 +7,10 @@ * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints. * Each endpoint can be configured as either in or out endpoint. Endpoints * can be configured for Bulk or Interrupt transfer mode. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_USB_S3C_HSUDC_H @@ -26,8 +29,6 @@ struct s3c24xx_hsudc_platdata { unsigned int epnum; void (*gpio_init)(void); void (*gpio_uninit)(void); - void (*phy_init)(void); - void (*phy_uninit)(void); }; #endif /* __LINUX_USB_S3C_HSUDC_H */ diff --git a/include/linux/platform_data/sa11x0-serial.h b/include/linux/platform_data/sa11x0-serial.h index 8b79ab08af..009e1d83fe 100644 --- a/include/linux/platform_data/sa11x0-serial.h +++ b/include/linux/platform_data/sa11x0-serial.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Author: Nicolas Pitre * diff --git a/include/linux/platform_data/sc18is602.h b/include/linux/platform_data/sc18is602.h index 0e91489edf..997b066341 100644 --- a/include/linux/platform_data/sc18is602.h +++ b/include/linux/platform_data/sc18is602.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for NXP SC18IS602/603 * * Copyright (C) 2012 Guenter Roeck * - * For further information, see the Documentation/spi/spi-sc18is602.rst file. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * For further information, see the Documentation/spi/sc18is602 file. */ /** diff --git a/include/linux/platform_data/sdhci-pic32.h b/include/linux/platform_data/sdhci-pic32.h index 8a53fd34da..7e0efe64c8 100644 --- a/include/linux/platform_data/sdhci-pic32.h +++ b/include/linux/platform_data/sdhci-pic32.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Purna Chandra Mandal, purna.mandal@microchip.com * Copyright (C) 2015 Microchip Technology Inc. All rights reserved. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. */ #ifndef __PIC32_SDHCI_PDATA_H__ #define __PIC32_SDHCI_PDATA_H__ diff --git a/include/linux/platform_data/serial-imx.h b/include/linux/platform_data/serial-imx.h index 0844b21372..a938eba2f1 100644 --- a/include/linux/platform_data/serial-imx.h +++ b/include/linux/platform_data/serial-imx.h @@ -1,6 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2008 by Sascha Hauer + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. */ #ifndef ASMARM_ARCH_UART_H diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h index 0061d24519..2ba2c34ca3 100644 --- a/include/linux/platform_data/serial-omap.h +++ b/include/linux/platform_data/serial-omap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Driver for OMAP-UART controller. * Based on drivers/serial/8250.c @@ -8,6 +7,11 @@ * Authors: * Govindraj R * Thara Gopinath + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __OMAP_SERIAL_H__ diff --git a/include/linux/platform_data/serial-sccnxp.h b/include/linux/platform_data/serial-sccnxp.h index dc670f24e7..af0c8c3b89 100644 --- a/include/linux/platform_data/serial-sccnxp.h +++ b/include/linux/platform_data/serial-sccnxp.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * NXP (Philips) SCC+++(SCN+++) serial driver * * Copyright (C) 2012 Alexander Shiyan * * Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef _PLATFORM_DATA_SERIAL_SCCNXP_H_ diff --git a/include/linux/platform_data/sh_ipmmu.h b/include/linux/platform_data/sh_ipmmu.h new file mode 100644 index 0000000000..39f7405cda --- /dev/null +++ b/include/linux/platform_data/sh_ipmmu.h @@ -0,0 +1,18 @@ +/* sh_ipmmu.h + * + * Copyright (C) 2012 Hideki EIRAKU + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#ifndef __SH_IPMMU_H__ +#define __SH_IPMMU_H__ + +struct shmobile_ipmmu_platform_data { + const char * const *dev_names; + unsigned int num_dev_names; +}; + +#endif /* __SH_IPMMU_H__ */ diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h index d661399b21..7c686d335c 100644 --- a/include/linux/platform_data/shmob_drm.h +++ b/include/linux/platform_data/shmob_drm.h @@ -1,17 +1,26 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * shmob_drm.h -- SH Mobile DRM driver * * Copyright (C) 2012 Renesas Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __SHMOB_DRM_H__ #define __SHMOB_DRM_H__ +#include + #include +struct sh_mobile_meram_cfg; +struct sh_mobile_meram_info; + enum shmob_drm_clk_source { SHMOB_DRM_CLK_BUS, SHMOB_DRM_CLK_PERIPHERAL, @@ -84,6 +93,7 @@ struct shmob_drm_platform_data { struct shmob_drm_interface_data iface; struct shmob_drm_panel_data panel; struct shmob_drm_backlight_data backlight; + const struct sh_mobile_meram_cfg *meram; }; #endif /* __SHMOB_DRM_H__ */ diff --git a/include/linux/platform_data/sht15.h b/include/linux/platform_data/sht15.h new file mode 100644 index 0000000000..12289c1e94 --- /dev/null +++ b/include/linux/platform_data/sht15.h @@ -0,0 +1,38 @@ +/* + * sht15.h - support for the SHT15 Temperature and Humidity Sensor + * + * Copyright (c) 2009 Jonathan Cameron + * + * Copyright (c) 2007 Wouter Horre + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * For further information, see the Documentation/hwmon/sht15 file. + */ + +#ifndef _PDATA_SHT15_H +#define _PDATA_SHT15_H + +/** + * struct sht15_platform_data - sht15 connectivity info + * @gpio_data: no. of gpio to which bidirectional data line is + * connected. + * @gpio_sck: no. of gpio to which the data clock is connected. + * @supply_mv: supply voltage in mv. Overridden by regulator if + * available. + * @checksum: flag to indicate the checksum should be validated. + * @no_otp_reload: flag to indicate no reload from OTP. + * @low_resolution: flag to indicate the temp/humidity resolution to use. + */ +struct sht15_platform_data { + int gpio_data; + int gpio_sck; + int supply_mv; + bool checksum; + bool no_otp_reload; + bool low_resolution; +}; + +#endif /* _PDATA_SHT15_H */ diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h index 14680d2a98..2e5eea3581 100644 --- a/include/linux/platform_data/sht3x.h +++ b/include/linux/platform_data/sht3x.h @@ -1,8 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2016 Sensirion AG, Switzerland * Author: David Frey * Author: Pascal Sachs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __SHT3X_H_ diff --git a/include/linux/platform_data/shtc1.h b/include/linux/platform_data/shtc1.h index 5ba6f8f9a9..7b8c353f7d 100644 --- a/include/linux/platform_data/shtc1.h +++ b/include/linux/platform_data/shtc1.h @@ -1,7 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Sensirion AG, Switzerland * Author: Johannes Winkelmann + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __SHTC1_H_ diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h index c71a2dd661..533d9807e5 100644 --- a/include/linux/platform_data/si5351.h +++ b/include/linux/platform_data/si5351.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Si5351A/B/C programmable clock generator platform_data. */ @@ -86,7 +85,6 @@ enum si5351_disable_state { * @multisynth_src: multisynth source clock * @clkout_src: clkout source clock * @pll_master: if true, clkout can also change pll rate - * @pll_reset: if true, clkout can reset its pll * @drive: output drive strength * @rate: initial clkout rate, or default if 0 */ @@ -96,7 +94,6 @@ struct si5351_clkout_config { enum si5351_drive_strength drive; enum si5351_disable_state disable_state; bool pll_master; - bool pll_reset; unsigned long rate; }; diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h index 27ea99af6e..077303cedb 100644 --- a/include/linux/platform_data/simplefb.h +++ b/include/linux/platform_data/simplefb.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * simplefb.h - Simple Framebuffer Device * * Copyright (C) 2013 David Herrmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __PLATFORM_DATA_SIMPLEFB_H__ @@ -10,13 +14,12 @@ #include #include -#include +#include /* format array, use it to initialize a "struct simplefb_format" array */ #define SIMPLEFB_FORMATS \ { \ { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \ - { "r5g5b5a1", 16, {11, 5}, {6, 5}, {1, 5}, {0, 1}, DRM_FORMAT_RGBA5551 }, \ { "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \ { "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \ { "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \ diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h new file mode 100644 index 0000000000..1231e9bb00 --- /dev/null +++ b/include/linux/platform_data/sky81452-backlight.h @@ -0,0 +1,46 @@ +/* + * sky81452.h SKY81452 backlight driver + * + * Copyright 2014 Skyworks Solutions Inc. + * Author : Gyungoh Yoo + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef _SKY81452_BACKLIGHT_H +#define _SKY81452_BACKLIGHT_H + +/** + * struct sky81452_platform_data + * @name: backlight driver name. + If it is not defined, default name is lcd-backlight. + * @gpio_enable:GPIO number which control EN pin + * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. + * @ignore_pwm: true if DPWMI should be ignored. + * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. + * @phase_shift:true is phase shift mode. + * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V. + * @boost_current_limit: It should be one of 2300, 2750mA. + */ +struct sky81452_bl_platform_data { + const char *name; + int gpio_enable; + unsigned int enable; + bool ignore_pwm; + bool dpwm_mode; + bool phase_shift; + unsigned int short_detection_threshold; + unsigned int boost_current_limit; +}; + +#endif diff --git a/include/linux/platform_data/spi-clps711x.h b/include/linux/platform_data/spi-clps711x.h index efaa596848..301956e631 100644 --- a/include/linux/platform_data/spi-clps711x.h +++ b/include/linux/platform_data/spi-clps711x.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * CLPS711X SPI bus driver definitions * * Copyright (C) 2012 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h index 2cb5cc70fd..f4edcb03c4 100644 --- a/include/linux/platform_data/spi-davinci.h +++ b/include/linux/platform_data/spi-davinci.h @@ -1,6 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2009 Texas Instruments. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __ARCH_ARM_DAVINCI_SPI_H @@ -23,6 +36,9 @@ enum { * @num_chipselect: number of chipselects supported by this SPI master * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt * controller withn the SoC. Possible values are 0 and 1. + * @chip_sel: list of GPIOs which can act as chip-selects for the SPI. + * SPI_INTERN_CS denotes internal SPI chip-select. Not necessary + * to populate if all chip-selects are internal. * @cshold_bug: set this to true if the SPI controller on your chip requires * a write to CSHOLD bit in between transfers (like in DM355). * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any @@ -32,6 +48,7 @@ struct davinci_spi_platform_data { u8 version; u8 num_chipselect; u8 intr_line; + u8 *chip_sel; u8 prescaler_limit; bool cshold_bug; enum dma_event_q dma_event_q; diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h index b439f2a896..9bb63ac13f 100644 --- a/include/linux/platform_data/spi-ep93xx.h +++ b/include/linux/platform_data/spi-ep93xx.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_MACH_EP93XX_SPI_H #define __ASM_MACH_EP93XX_SPI_H @@ -6,10 +5,25 @@ struct spi_device; /** * struct ep93xx_spi_info - EP93xx specific SPI descriptor + * @num_chipselect: number of chip selects on this board, must be + * at least one * @use_dma: use DMA for the transfers */ struct ep93xx_spi_info { + int num_chipselect; bool use_dma; }; +/** + * struct ep93xx_spi_chip_ops - operation callbacks for SPI slave device + * @setup: setup the chip select mechanism + * @cleanup: cleanup the chip select mechanism + * @cs_control: control the device chip select + */ +struct ep93xx_spi_chip_ops { + int (*setup)(struct spi_device *spi); + void (*cleanup)(struct spi_device *spi); + void (*cs_control)(struct spi_device *spi, int value); +}; + #endif /* __ASM_MACH_EP93XX_SPI_H */ diff --git a/include/linux/platform_data/spi-imx.h b/include/linux/platform_data/spi-imx.h new file mode 100644 index 0000000000..08be445e8e --- /dev/null +++ b/include/linux/platform_data/spi-imx.h @@ -0,0 +1,27 @@ + +#ifndef __MACH_SPI_H_ +#define __MACH_SPI_H_ + +/* + * struct spi_imx_master - device.platform_data for SPI controller devices. + * @chipselect: Array of chipselects for this master. Numbers >= 0 mean gpio + * pins, numbers < 0 mean internal CSPI chipselects according + * to MXC_SPI_CS(). Normally you want to use gpio based chip + * selects as the CSPI module tries to be intelligent about + * when to assert the chipselect: The CSPI module deasserts the + * chipselect once it runs out of input data. The other problem + * is that it is not possible to mix between high active and low + * active chipselects on one single bus using the internal + * chipselects. Unfortunately Freescale decided to put some + * chipselects on dedicated pins which are not usable as gpios, + * so we have to support the internal chipselects. + * @num_chipselect: ARRAY_SIZE(chipselect) + */ +struct spi_imx_master { + int *chipselect; + int num_chipselect; +}; + +#define MXC_SPI_CS(no) ((no) - 32) + +#endif /* __MACH_SPI_H_*/ diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h index f0db674f07..54b0448397 100644 --- a/include/linux/platform_data/spi-mt65xx.h +++ b/include/linux/platform_data/spi-mt65xx.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * MTK SPI bus driver definitions * * Copyright (c) 2015 MediaTek Inc. * Author: Leilk Liu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef ____LINUX_PLATFORM_DATA_SPI_MTK_H @@ -11,7 +14,7 @@ /* Board specific platform_data */ struct mtk_chip_config { - u32 sample_sel; - u32 tick_delay; + u32 tx_mlsb; + u32 rx_mlsb; }; #endif diff --git a/include/linux/platform_data/spi-nuc900.h b/include/linux/platform_data/spi-nuc900.h new file mode 100644 index 0000000000..4b3f46832e --- /dev/null +++ b/include/linux/platform_data/spi-nuc900.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2009 Nuvoton technology corporation. + * + * Wan ZongShun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation;version 2 of the License. + * + */ + +#ifndef __SPI_NUC900_H +#define __SPI_NUC900_H + +extern void mfp_set_groupg(struct device *dev, const char *subname); + +struct nuc900_spi_info { + unsigned int num_cs; + unsigned int lsb; + unsigned int txneg; + unsigned int rxneg; + unsigned int divider; + unsigned int sleep; + unsigned int txnum; + unsigned int txbitlen; + int bus_num; +}; + +struct nuc900_spi_chip { + unsigned char bits_per_word; +}; + +#endif /* __SPI_NUC900_H */ diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h index 3b400b1919..c100456eab 100644 --- a/include/linux/platform_data/spi-omap2-mcspi.h +++ b/include/linux/platform_data/spi-omap2-mcspi.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _OMAP2_MCSPI_H #define _OMAP2_MCSPI_H +#define OMAP2_MCSPI_REV 0 +#define OMAP3_MCSPI_REV 1 +#define OMAP4_MCSPI_REV 2 + #define OMAP4_MCSPI_REG_OFFSET 0x100 #define MCSPI_PINDIR_D0_IN_D1_OUT 0 @@ -11,7 +14,10 @@ struct omap2_mcspi_platform_config { unsigned short num_cs; unsigned int regs_offset; unsigned int pin_dir:1; - size_t max_xfer_len; +}; + +struct omap2_mcspi_dev_attr { + unsigned short num_chipselect; }; struct omap2_mcspi_device_config { diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index 773daf7915..5c1e21c872 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -1,8 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - /* * Copyright (C) 2009 Samsung Electronics Ltd. * Jaswinder Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __SPI_S3C64XX_H @@ -38,6 +40,9 @@ struct s3c64xx_spi_info { int num_cs; bool no_cs; int (*cfg_gpio)(void); + dma_filter_fn filter; + void *dma_tx; + void *dma_rx; }; /** diff --git a/include/linux/platform_data/ssm2518.h b/include/linux/platform_data/ssm2518.h index 3f9e632d6f..9a8e3ea287 100644 --- a/include/linux/platform_data/ssm2518.h +++ b/include/linux/platform_data/ssm2518.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * SSM2518 amplifier audio driver * * Copyright 2013 Analog Devices Inc. * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. */ #ifndef __LINUX_PLATFORM_DATA_SSM2518_H__ diff --git a/include/linux/platform_data/st-nci.h b/include/linux/platform_data/st-nci.h new file mode 100644 index 0000000000..f6494b347c --- /dev/null +++ b/include/linux/platform_data/st-nci.h @@ -0,0 +1,31 @@ +/* + * Driver include for ST NCI NFC chip family. + * + * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef _ST_NCI_H_ +#define _ST_NCI_H_ + +#define ST_NCI_DRIVER_NAME "st_nci" + +struct st_nci_nfc_platform_data { + unsigned int gpio_reset; + unsigned int irq_polarity; + bool is_ese_present; + bool is_uicc_present; +}; + +#endif /* _ST_NCI_H_ */ diff --git a/include/linux/platform_data/st1232_pdata.h b/include/linux/platform_data/st1232_pdata.h new file mode 100644 index 0000000000..cac3e7b4c4 --- /dev/null +++ b/include/linux/platform_data/st1232_pdata.h @@ -0,0 +1,13 @@ +#ifndef _LINUX_ST1232_PDATA_H +#define _LINUX_ST1232_PDATA_H + +/* + * Optional platform data + * + * Use this if you want the driver to drive the reset pin. + */ +struct st1232_pdata { + int reset_gpio; +}; + +#endif diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h new file mode 100644 index 0000000000..cc2bdafb0c --- /dev/null +++ b/include/linux/platform_data/st21nfca.h @@ -0,0 +1,33 @@ +/* + * Driver include for the ST21NFCA NFC chip. + * + * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef _ST21NFCA_HCI_H_ +#define _ST21NFCA_HCI_H_ + +#include + +#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" + +struct st21nfca_nfc_platform_data { + unsigned int gpio_ena; + unsigned int irq_polarity; + bool is_ese_present; + bool is_uicc_present; +}; + +#endif /* _ST21NFCA_HCI_H_ */ diff --git a/include/linux/platform_data/st33zp24.h b/include/linux/platform_data/st33zp24.h index 61db674f36..6f0fb6ebd7 100644 --- a/include/linux/platform_data/st33zp24.h +++ b/include/linux/platform_data/st33zp24.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24 * Copyright (C) 2009 - 2016 STMicroelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . */ #ifndef __ST33ZP24_H__ #define __ST33ZP24_H__ diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h index 897051e51b..79b0e4cdb8 100644 --- a/include/linux/platform_data/st_sensors_pdata.h +++ b/include/linux/platform_data/st_sensors_pdata.h @@ -1,10 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * STMicroelectronics sensors platform-data driver * * Copyright 2013 STMicroelectronics Inc. * * Denis Ciocca + * + * Licensed under the GPL-2. */ #ifndef ST_SENSORS_PDATA_H @@ -13,20 +14,13 @@ /** * struct st_sensors_platform_data - Platform data for the ST sensors * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). - * Available only for accelerometer, magnetometer and pressure sensors. + * Available only for accelerometer and pressure sensors. * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). - * Magnetometer DRDY is supported only on LSM9DS0. * @open_drain: set the interrupt line to be open drain if possible. - * @spi_3wire: enable spi-3wire mode. - * @pullups: enable/disable i2c controller pullup resistors. - * @wakeup_source: enable/disable device as wakeup generator. */ struct st_sensors_platform_data { u8 drdy_int_pin; bool open_drain; - bool spi_3wire; - bool pullups; - bool wakeup_source; }; #endif /* ST_SENSORS_PDATA_H */ diff --git a/include/linux/platform_data/syscon.h b/include/linux/platform_data/syscon.h index 2c089dd3e2..2354c6fa37 100644 --- a/include/linux/platform_data/syscon.h +++ b/include/linux/platform_data/syscon.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef PLATFORM_DATA_SYSCON_H #define PLATFORM_DATA_SYSCON_H diff --git a/include/linux/platform_data/touchscreen-s3c2410.h b/include/linux/platform_data/touchscreen-s3c2410.h index bf8d3b9d7c..71eccaa983 100644 --- a/include/linux/platform_data/touchscreen-s3c2410.h +++ b/include/linux/platform_data/touchscreen-s3c2410.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2005 Arnaud Patard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __TOUCHSCREEN_S3C2410_H diff --git a/include/linux/platform_data/tsl2563.h b/include/linux/platform_data/tsl2563.h index 9cf9309c3f..c90d7a09dd 100644 --- a/include/linux/platform_data/tsl2563.h +++ b/include/linux/platform_data/tsl2563.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_TSL2563_H #define __LINUX_TSL2563_H diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h index 31f2e22661..3d47d21982 100644 --- a/include/linux/platform_data/uio_pruss.h +++ b/include/linux/platform_data/uio_pruss.h @@ -3,7 +3,7 @@ * * Platform data for uio_pruss driver * - * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h index 879f5c78b9..e0bc4abe69 100644 --- a/include/linux/platform_data/usb-davinci.h +++ b/include/linux/platform_data/usb-davinci.h @@ -11,8 +11,45 @@ #ifndef __ASM_ARCH_USB_H #define __ASM_ARCH_USB_H +/* DA8xx CFGCHIP2 (USB 2.0 PHY Control) register bits */ +#define CFGCHIP2_PHYCLKGD (1 << 17) +#define CFGCHIP2_VBUSSENSE (1 << 16) +#define CFGCHIP2_RESET (1 << 15) +#define CFGCHIP2_OTGMODE (3 << 13) +#define CFGCHIP2_NO_OVERRIDE (0 << 13) +#define CFGCHIP2_FORCE_HOST (1 << 13) +#define CFGCHIP2_FORCE_DEVICE (2 << 13) +#define CFGCHIP2_FORCE_HOST_VBUS_LOW (3 << 13) +#define CFGCHIP2_USB1PHYCLKMUX (1 << 12) +#define CFGCHIP2_USB2PHYCLKMUX (1 << 11) +#define CFGCHIP2_PHYPWRDN (1 << 10) +#define CFGCHIP2_OTGPWRDN (1 << 9) +#define CFGCHIP2_DATPOL (1 << 8) +#define CFGCHIP2_USB1SUSPENDM (1 << 7) +#define CFGCHIP2_PHY_PLLON (1 << 6) /* override PLL suspend */ +#define CFGCHIP2_SESENDEN (1 << 5) /* Vsess_end comparator */ +#define CFGCHIP2_VBDTCTEN (1 << 4) /* Vbus comparator */ +#define CFGCHIP2_REFFREQ (0xf << 0) +#define CFGCHIP2_REFFREQ_12MHZ (1 << 0) +#define CFGCHIP2_REFFREQ_24MHZ (2 << 0) +#define CFGCHIP2_REFFREQ_48MHZ (3 << 0) + +struct da8xx_ohci_root_hub; + +typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub, + unsigned port); + /* Passed as the platform data to the OHCI driver */ struct da8xx_ohci_root_hub { + /* Switch the port power on/off */ + int (*set_power)(unsigned port, int on); + /* Read the port power status */ + int (*get_power)(unsigned port); + /* Read the port over-current indicator */ + int (*get_oci)(unsigned port); + /* Over-current indicator change notification (pass NULL to disable) */ + int (*ocic_notify)(da8xx_ocic_handler_t handler); + /* Time from power on to power good (in 2 ms units) */ u8 potpgt; }; diff --git a/include/linux/platform_data/usb-ehci-mxc.h b/include/linux/platform_data/usb-ehci-mxc.h index ad9794d09b..157e71f79f 100644 --- a/include/linux/platform_data/usb-ehci-mxc.h +++ b/include/linux/platform_data/usb-ehci-mxc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H #define __INCLUDE_ASM_ARCH_MXC_EHCI_H diff --git a/include/linux/platform_data/usb-musb-ux500.h b/include/linux/platform_data/usb-musb-ux500.h index 8909f396fe..dd9c83ac7d 100644 --- a/include/linux/platform_data/usb-musb-ux500.h +++ b/include/linux/platform_data/usb-musb-ux500.h @@ -1,8 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2011 * * Author: Mian Yousaf Kaukab + * License terms: GNU General Public License (GPL) version 2 */ #ifndef __ASM_ARCH_USB_H #define __ASM_ARCH_USB_H diff --git a/include/linux/platform_data/usb-mx2.h b/include/linux/platform_data/usb-mx2.h index 97a670f3d8..22d0b59626 100644 --- a/include/linux/platform_data/usb-mx2.h +++ b/include/linux/platform_data/usb-mx2.h @@ -1,6 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2009 Martin Fuzzey + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __ASM_ARCH_MX21_USBH diff --git a/include/linux/platform_data/usb-ohci-pxa27x.h b/include/linux/platform_data/usb-ohci-pxa27x.h index 69adea7694..95b6e2a6e5 100644 --- a/include/linux/platform_data/usb-ohci-pxa27x.h +++ b/include/linux/platform_data/usb-ohci-pxa27x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef ASMARM_ARCH_OHCI_H #define ASMARM_ARCH_OHCI_H diff --git a/include/linux/platform_data/usb-ohci-s3c2410.h b/include/linux/platform_data/usb-ohci-s3c2410.h index 558a9605be..7fa1fbefc3 100644 --- a/include/linux/platform_data/usb-ohci-s3c2410.h +++ b/include/linux/platform_data/usb-ohci-s3c2410.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* arch/arm/plat-samsung/include/plat/usb-control.h * * Copyright (c) 2004 Simtec Electronics * Ben Dooks * * S3C - USB host port information + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __ASM_ARCH_USBCONTROL_H @@ -28,7 +31,7 @@ struct s3c2410_hcd_info { void (*report_oc)(struct s3c2410_hcd_info *, int ports); }; -static inline void s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports) +static void inline s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports) { if (info->report_oc != NULL) { (info->report_oc)(info, ports); diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h index 5e70d66703..fa579b4c66 100644 --- a/include/linux/platform_data/usb-omap.h +++ b/include/linux/platform_data/usb-omap.h @@ -1,7 +1,7 @@ /* * usb-omap.h - Platform data for the various OMAP USB IPs * - * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. diff --git a/include/linux/platform_data/usb-omap1.h b/include/linux/platform_data/usb-omap1.h index 878e572a78..43b5ce139c 100644 --- a/include/linux/platform_data/usb-omap1.h +++ b/include/linux/platform_data/usb-omap1.h @@ -48,8 +48,6 @@ struct omap_usb_config { u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup); int (*ocpi_enable)(void); - - void (*lb_reset)(void); }; #endif /* __LINUX_USB_OMAP1_H */ diff --git a/include/linux/platform_data/usb-pxa3xx-ulpi.h b/include/linux/platform_data/usb-pxa3xx-ulpi.h index 4d31a5cbde..9d82cb65ea 100644 --- a/include/linux/platform_data/usb-pxa3xx-ulpi.h +++ b/include/linux/platform_data/usb-pxa3xx-ulpi.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * PXA3xx U2D header * * Copyright (C) 2010 CompuLab Ltd. * * Igor Grinberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __PXA310_U2D__ #define __PXA310_U2D__ diff --git a/include/linux/platform_data/usb-s3c2410_udc.h b/include/linux/platform_data/usb-s3c2410_udc.h index 07394819d0..de8e2288a5 100644 --- a/include/linux/platform_data/usb-s3c2410_udc.h +++ b/include/linux/platform_data/usb-s3c2410_udc.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* arch/arm/plat-samsung/include/plat/udc.h * * Copyright (c) 2005 Arnaud Patard * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * * Changelog: * 14-Mar-2005 RTP Created file * 02-Aug-2005 RTP File rename diff --git a/include/linux/platform_data/usb3503.h b/include/linux/platform_data/usb3503.h index d01ef97ddf..1d1b6ef871 100644 --- a/include/linux/platform_data/usb3503.h +++ b/include/linux/platform_data/usb3503.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __USB3503_H__ #define __USB3503_H__ @@ -17,6 +16,9 @@ enum usb3503_mode { struct usb3503_platform_data { enum usb3503_mode initial_mode; u8 port_off_mask; + int gpio_intn; + int gpio_connect; + int gpio_reset; }; #endif diff --git a/include/linux/platform_data/ux500_wdt.h b/include/linux/platform_data/ux500_wdt.h index de6a4ad41e..1689ff4c3b 100644 --- a/include/linux/platform_data/ux500_wdt.h +++ b/include/linux/platform_data/ux500_wdt.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST Ericsson SA 2011 * + * License Terms: GNU General Public License v2 + * * STE Ux500 Watchdog platform data */ #ifndef __UX500_WDT_H diff --git a/include/linux/platform_data/video-clcd-versatile.h b/include/linux/platform_data/video-clcd-versatile.h new file mode 100644 index 0000000000..09ccf182af --- /dev/null +++ b/include/linux/platform_data/video-clcd-versatile.h @@ -0,0 +1,27 @@ +#ifndef PLAT_CLCD_H +#define PLAT_CLCD_H + +#ifdef CONFIG_PLAT_VERSATILE_CLCD +struct clcd_panel *versatile_clcd_get_panel(const char *); +int versatile_clcd_setup_dma(struct clcd_fb *, unsigned long); +int versatile_clcd_mmap_dma(struct clcd_fb *, struct vm_area_struct *); +void versatile_clcd_remove_dma(struct clcd_fb *); +#else +static inline struct clcd_panel *versatile_clcd_get_panel(const char *s) +{ + return NULL; +} +static inline int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize) +{ + return -ENODEV; +} +static inline int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vm) +{ + return -ENODEV; +} +static inline void versatile_clcd_remove_dma(struct clcd_fb *fb) +{ +} +#endif + +#endif diff --git a/include/linux/platform_data/video-ep93xx.h b/include/linux/platform_data/video-ep93xx.h index a6f3ccdec1..699ac41093 100644 --- a/include/linux/platform_data/video-ep93xx.h +++ b/include/linux/platform_data/video-ep93xx.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __VIDEO_EP93XX_H #define __VIDEO_EP93XX_H diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h index 02812651af..18e9083245 100644 --- a/include/linux/platform_data/video-imxfb.h +++ b/include/linux/platform_data/video-imxfb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This structure describes the machine which we are running on. */ @@ -48,10 +47,13 @@ #define LSCR1_GRAY2(x) (((x) & 0xf) << 4) #define LSCR1_GRAY1(x) (((x) & 0xf)) +#define DMACR_BURST (1 << 31) +#define DMACR_HM(x) (((x) & 0xf) << 16) +#define DMACR_TM(x) ((x) & 0xf) + struct imx_fb_videomode { struct fb_videomode mode; u32 pcr; - bool aus_mode; unsigned char bpp; }; diff --git a/include/linux/platform_data/video-mx3fb.h b/include/linux/platform_data/video-mx3fb.h index d03dc322a6..fdbe600015 100644 --- a/include/linux/platform_data/video-mx3fb.h +++ b/include/linux/platform_data/video-mx3fb.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2008 * Guennadi Liakhovetski, DENX Software Engineering, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __ASM_ARCH_MX3FB_H__ diff --git a/include/linux/platform_data/video-nuc900fb.h b/include/linux/platform_data/video-nuc900fb.h new file mode 100644 index 0000000000..cec5ece765 --- /dev/null +++ b/include/linux/platform_data/video-nuc900fb.h @@ -0,0 +1,83 @@ +/* linux/include/asm/arch-nuc900/fb.h + * + * Copyright (c) 2008 Nuvoton technology corporation + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Changelog: + * + * 2008/08/26 vincen.zswan modify this file for LCD. + */ + +#ifndef __ASM_ARM_FB_H +#define __ASM_ARM_FB_H + + + +/* LCD Controller Hardware Desc */ +struct nuc900fb_hw { + unsigned int lcd_dccs; + unsigned int lcd_device_ctrl; + unsigned int lcd_mpulcd_cmd; + unsigned int lcd_int_cs; + unsigned int lcd_crtc_size; + unsigned int lcd_crtc_dend; + unsigned int lcd_crtc_hr; + unsigned int lcd_crtc_hsync; + unsigned int lcd_crtc_vr; + unsigned int lcd_va_baddr0; + unsigned int lcd_va_baddr1; + unsigned int lcd_va_fbctrl; + unsigned int lcd_va_scale; + unsigned int lcd_va_test; + unsigned int lcd_va_win; + unsigned int lcd_va_stuff; +}; + +/* LCD Display Description */ +struct nuc900fb_display { + /* LCD Image type */ + unsigned type; + + /* LCD Screen Size */ + unsigned short width; + unsigned short height; + + /* LCD Screen Info */ + unsigned short xres; + unsigned short yres; + unsigned short bpp; + + unsigned long pixclock; + unsigned short left_margin; + unsigned short right_margin; + unsigned short hsync_len; + unsigned short upper_margin; + unsigned short lower_margin; + unsigned short vsync_len; + + /* hardware special register value */ + unsigned int dccs; + unsigned int devctl; + unsigned int fbctrl; + unsigned int scale; +}; + +struct nuc900fb_mach_info { + struct nuc900fb_display *displays; + unsigned num_displays; + unsigned default_display; + /* GPIO Setting Info */ + unsigned gpio_dir; + unsigned gpio_dir_mask; + unsigned gpio_data; + unsigned gpio_data_mask; +}; + +extern void __init nuc900_fb_set_platdata(struct nuc900fb_mach_info *); + +#endif /* __ASM_ARM_FB_H */ diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h index b3d5747783..07c6c1e153 100644 --- a/include/linux/platform_data/video-pxafb.h +++ b/include/linux/platform_data/video-pxafb.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Support for the xscale frame buffer. * * Author: Jean-Frederic Clere * Created: Sep 22, 2003 * Copyright: jfclere@sinix.net + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #include diff --git a/include/linux/platform_data/video_s3c.h b/include/linux/platform_data/video_s3c.h index dd7747ba32..48883995f4 100644 --- a/include/linux/platform_data/video_s3c.h +++ b/include/linux/platform_data/video_s3c.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PLATFORM_DATA_VIDEO_S3C #define __PLATFORM_DATA_VIDEO_S3C diff --git a/include/linux/platform_data/voltage-omap.h b/include/linux/platform_data/voltage-omap.h index 43e8da9fb4..5be4d5def4 100644 --- a/include/linux/platform_data/voltage-omap.h +++ b/include/linux/platform_data/voltage-omap.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * OMAP Voltage Management Routines * * Copyright (C) 2011, Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __ARCH_ARM_OMAP_VOLTAGE_H diff --git a/include/linux/platform_data/wiznet.h b/include/linux/platform_data/wiznet.h index 1154c4db8a..b5d8c192d8 100644 --- a/include/linux/platform_data/wiznet.h +++ b/include/linux/platform_data/wiznet.h @@ -1,6 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Ethernet driver for the WIZnet W5x00 chip. + * + * Licensed under the GPL-2 or later. */ #ifndef PLATFORM_DATA_WIZNET_H diff --git a/include/linux/platform_data/wkup_m3.h b/include/linux/platform_data/wkup_m3.h index 629660ff58..3f1d77effd 100644 --- a/include/linux/platform_data/wkup_m3.h +++ b/include/linux/platform_data/wkup_m3.h @@ -1,10 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * TI Wakeup M3 remote processor platform data * * Copyright (C) 2014-2015 Texas Instruments, Inc. * * Dave Gerlach + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _LINUX_PLATFORM_DATA_WKUP_M3_H diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h index 2463a4a856..7bdece8ef3 100644 --- a/include/linux/platform_data/zforce_ts.h +++ b/include/linux/platform_data/zforce_ts.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* drivers/input/touchscreen/zforce.c * * Copyright (C) 2012-2013 MundoReader S.L. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _LINUX_INPUT_ZFORCE_TS_H diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 7c96f169d2..98c2a7c710 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -1,32 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * platform_device.h - generic, centralized driver model * * Copyright (c) 2001-2003 Patrick Mochel * - * See Documentation/driver-api/driver-model/ for more information. + * This file is released under the GPLv2 + * + * See Documentation/driver-model/ for more information. */ #ifndef _PLATFORM_DEVICE_H_ #define _PLATFORM_DEVICE_H_ #include +#include #define PLATFORM_DEVID_NONE (-1) #define PLATFORM_DEVID_AUTO (-2) -struct irq_affinity; struct mfd_cell; struct property_entry; -struct platform_device_id; struct platform_device { const char *name; int id; bool id_auto; struct device dev; - u64 platform_dma_mask; - struct device_dma_parameters dma_parms; u32 num_resources; struct resource *resource; @@ -42,7 +40,6 @@ struct platform_device { #define platform_get_device_id(pdev) ((pdev)->id_entry) -#define dev_is_platform(dev) ((dev)->bus == &platform_bus_type) #define to_platform_device(x) container_of((x), struct platform_device, dev) extern int platform_device_register(struct platform_device *); @@ -51,43 +48,20 @@ extern void platform_device_unregister(struct platform_device *); extern struct bus_type platform_bus_type; extern struct device platform_bus; +extern void arch_setup_pdev_archdata(struct platform_device *); extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int); -extern struct resource *platform_get_mem_or_io(struct platform_device *, - unsigned int); - -extern struct device * -platform_find_device_by_driver(struct device *start, - const struct device_driver *drv); -extern void __iomem * -devm_platform_get_and_ioremap_resource(struct platform_device *pdev, - unsigned int index, struct resource **res); -extern void __iomem * -devm_platform_ioremap_resource(struct platform_device *pdev, - unsigned int index); -extern void __iomem * -devm_platform_ioremap_resource_byname(struct platform_device *pdev, - const char *name); extern int platform_get_irq(struct platform_device *, unsigned int); -extern int platform_get_irq_optional(struct platform_device *, unsigned int); extern int platform_irq_count(struct platform_device *); -extern int devm_platform_get_irqs_affinity(struct platform_device *dev, - struct irq_affinity *affd, - unsigned int minvec, - unsigned int maxvec, - int **irqs); extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, const char *); extern int platform_get_irq_byname(struct platform_device *, const char *); -extern int platform_get_irq_byname_optional(struct platform_device *dev, - const char *name); extern int platform_add_devices(struct platform_device **, int); struct platform_device_info { struct device *parent; struct fwnode_handle *fwnode; - bool of_node_reused; const char *name; int id; @@ -99,7 +73,7 @@ struct platform_device_info { size_t size_data; u64 dma_mask; - const struct property_entry *properties; + struct property_entry *properties; }; extern struct platform_device *platform_device_register_full( const struct platform_device_info *pdevinfo); @@ -197,6 +171,8 @@ extern int platform_device_add_resources(struct platform_device *pdev, unsigned int num); extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); +extern int platform_device_add_properties(struct platform_device *pdev, + struct property_entry *properties); extern int platform_device_add(struct platform_device *pdev); extern void platform_device_del(struct platform_device *pdev); extern void platform_device_put(struct platform_device *pdev); @@ -308,6 +284,58 @@ void platform_unregister_drivers(struct platform_driver * const *drivers, #define platform_register_drivers(drivers, count) \ __platform_register_drivers(drivers, count, THIS_MODULE) +/* early platform driver interface */ +struct early_platform_driver { + const char *class_str; + struct platform_driver *pdrv; + struct list_head list; + int requested_id; + char *buffer; + int bufsize; +}; + +#define EARLY_PLATFORM_ID_UNSET -2 +#define EARLY_PLATFORM_ID_ERROR -3 + +extern int early_platform_driver_register(struct early_platform_driver *epdrv, + char *buf); +extern void early_platform_add_devices(struct platform_device **devs, int num); + +static inline int is_early_platform_device(struct platform_device *pdev) +{ + return !pdev->dev.driver; +} + +extern void early_platform_driver_register_all(char *class_str); +extern int early_platform_driver_probe(char *class_str, + int nr_probe, int user_only); +extern void early_platform_cleanup(void); + +#define early_platform_init(class_string, platdrv) \ + early_platform_init_buffer(class_string, platdrv, NULL, 0) + +#ifndef MODULE +#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ +static __initdata struct early_platform_driver early_driver = { \ + .class_str = class_string, \ + .buffer = buf, \ + .bufsize = bufsiz, \ + .pdrv = platdrv, \ + .requested_id = EARLY_PLATFORM_ID_UNSET, \ +}; \ +static int __init early_platform_driver_setup_func(char *buffer) \ +{ \ + return early_platform_driver_register(&early_driver, buffer); \ +} \ +early_param(class_string, early_platform_driver_setup_func) +#else /* MODULE */ +#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ +static inline char *early_platform_driver_setup_func(void) \ +{ \ + return bufsiz ? buf : NULL; \ +} +#endif /* MODULE */ + #ifdef CONFIG_SUSPEND extern int platform_pm_suspend(struct device *dev); extern int platform_pm_resume(struct device *dev); @@ -328,8 +356,6 @@ extern int platform_pm_restore(struct device *dev); #define platform_pm_restore NULL #endif -extern int platform_dma_configure(struct device *dev); - #ifdef CONFIG_PM_SLEEP #define USE_PLATFORM_PM_SLEEP_OPS \ .suspend = platform_pm_suspend, \ @@ -342,19 +368,4 @@ extern int platform_dma_configure(struct device *dev); #define USE_PLATFORM_PM_SLEEP_OPS #endif -#ifndef CONFIG_SUPERH -/* - * REVISIT: This stub is needed for all non-SuperH users of early platform - * drivers. It should go away once we introduce the new platform_device-based - * early driver framework. - */ -static inline int is_sh_early_platform_device(struct platform_device *pdev) -{ - return 0; -} -#endif /* CONFIG_SUPERH */ - -/* For now only SuperH uses it */ -void early_platform_cleanup(void); - #endif /* _PLATFORM_DEVICE_H_ */ diff --git a/include/linux/plist.h b/include/linux/plist.h index 66bab1bca3..97883604a3 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Descending-priority-sorted double-linked list * @@ -13,6 +12,8 @@ * Simplifications of the original code by * Oleg Nesterov * + * Licensed under the FSF's GNU Public License v2 or later. + * * Based on simple lists (include/linux/list.h). * * This is a priority-sorted list of nodes; each node has a @@ -69,6 +70,7 @@ * is lowest priority. * * No locking is done, up to the caller. + * */ #ifndef _LINUX_PLIST_H_ #define _LINUX_PLIST_H_ @@ -229,7 +231,7 @@ static inline int plist_node_empty(const struct plist_node *node) * @type: the type of the struct this is embedded in * @member: the name of the list_head within the struct */ -#ifdef CONFIG_DEBUG_PLIST +#ifdef CONFIG_DEBUG_PI_LIST # define plist_first_entry(head, type, member) \ ({ \ WARN_ON(plist_head_empty(head)); \ @@ -246,7 +248,7 @@ static inline int plist_node_empty(const struct plist_node *node) * @type: the type of the struct this is embedded in * @member: the name of the list_head within the struct */ -#ifdef CONFIG_DEBUG_PLIST +#ifdef CONFIG_DEBUG_PI_LIST # define plist_last_entry(head, type, member) \ ({ \ WARN_ON(plist_head_empty(head)); \ diff --git a/include/linux/pm-trace.h b/include/linux/pm-trace.h index b8604f8847..ecbde7a554 100644 --- a/include/linux/pm-trace.h +++ b/include/linux/pm-trace.h @@ -1,18 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef PM_TRACE_H #define PM_TRACE_H -#include #ifdef CONFIG_PM_TRACE #include +#include extern int pm_trace_enabled; -extern bool pm_trace_rtc_abused; - -static inline bool pm_trace_rtc_valid(void) -{ - return !pm_trace_rtc_abused; -} static inline int pm_trace_is_enabled(void) { @@ -31,7 +24,6 @@ extern int show_trace_dev_match(char *buf, size_t size); #else -static inline bool pm_trace_rtc_valid(void) { return true; } static inline int pm_trace_is_enabled(void) { return 0; } #define TRACE_DEVICE(dev) do { } while (0) diff --git a/include/linux/pm.h b/include/linux/pm.h index 1d8209c096..dbf4a341b8 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * pm.h - Power management interface * * Copyright (C) 2000 Andrew Henroid + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _LINUX_PM_H @@ -13,7 +26,6 @@ #include #include #include -#include #include /* @@ -39,6 +51,7 @@ static inline void pm_vt_switch_unregister(struct device *dev) * Device power management */ +struct device; #ifdef CONFIG_PM extern const char power_group_name[]; /* = "power" */ @@ -51,7 +64,24 @@ typedef struct pm_message { } pm_message_t; /** - * struct dev_pm_ops - device PM callbacks. + * struct dev_pm_ops - device PM callbacks + * + * Several device power state transitions are externally visible, affecting + * the state of pending I/O queues and (for drivers that touch hardware) + * interrupts, wakeups, DMA, and other hardware state. There may also be + * internal transitions to various low-power modes which are transparent + * to the rest of the driver stack (such as a driver that's ON gating off + * clocks which are not in active use). + * + * The externally visible transitions are handled with the help of callbacks + * included in this structure in such a way that two levels of callbacks are + * involved. First, the PM core executes callbacks provided by PM domains, + * device types, classes and bus types. They are the subsystem-level callbacks + * supposed to execute callbacks provided by device drivers, although they may + * choose not to do that. If the driver callbacks are executed, they have to + * collaborate with the subsystem-level callbacks to achieve the goals + * appropriate for the given system transition, given transition phase and the + * subsystem the device belongs to. * * @prepare: The principal role of this callback is to prevent new children of * the device from being registered after it has returned (the driver's @@ -210,6 +240,34 @@ typedef struct pm_message { * driver's interrupt handler, which is guaranteed not to run while * @restore_noirq() is being executed. Analogous to @resume_noirq(). * + * All of the above callbacks, except for @complete(), return error codes. + * However, the error codes returned by the resume operations, @resume(), + * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do + * not cause the PM core to abort the resume transition during which they are + * returned. The error codes returned in those cases are only printed by the PM + * core to the system logs for debugging purposes. Still, it is recommended + * that drivers only return error codes from their resume methods in case of an + * unrecoverable failure (i.e. when the device being handled refuses to resume + * and becomes unusable) to allow us to modify the PM core in the future, so + * that it can avoid attempting to handle devices that failed to resume and + * their children. + * + * It is allowed to unregister devices while the above callbacks are being + * executed. However, a callback routine must NOT try to unregister the device + * it was called for, although it may unregister children of that device (for + * example, if it detects that a child was unplugged while the system was + * asleep). + * + * Refer to Documentation/power/devices.txt for more information about the role + * of the above callbacks in the system suspend process. + * + * There also are callbacks related to runtime power management of devices. + * Again, these callbacks are executed by the PM core only for subsystems + * (PM domains, device types, classes and bus types) and the subsystem-level + * callbacks are supposed to invoke the driver callbacks. Moreover, the exact + * actions to be performed by a device driver's callbacks generally depend on + * the platform and subsystem the device belongs to. + * * @runtime_suspend: Prepare the device for a condition in which it won't be * able to communicate with the CPU(s) and RAM due to power management. * This need not mean that the device should be put into a low-power state. @@ -229,51 +287,11 @@ typedef struct pm_message { * Check these conditions, and return 0 if it's appropriate to let the PM * core queue a suspend request for the device. * - * Several device power state transitions are externally visible, affecting - * the state of pending I/O queues and (for drivers that touch hardware) - * interrupts, wakeups, DMA, and other hardware state. There may also be - * internal transitions to various low-power modes which are transparent - * to the rest of the driver stack (such as a driver that's ON gating off - * clocks which are not in active use). + * Refer to Documentation/power/runtime_pm.txt for more information about the + * role of the above callbacks in device runtime power management. * - * The externally visible transitions are handled with the help of callbacks - * included in this structure in such a way that, typically, two levels of - * callbacks are involved. First, the PM core executes callbacks provided by PM - * domains, device types, classes and bus types. They are the subsystem-level - * callbacks expected to execute callbacks provided by device drivers, although - * they may choose not to do that. If the driver callbacks are executed, they - * have to collaborate with the subsystem-level callbacks to achieve the goals - * appropriate for the given system transition, given transition phase and the - * subsystem the device belongs to. - * - * All of the above callbacks, except for @complete(), return error codes. - * However, the error codes returned by @resume(), @thaw(), @restore(), - * @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do not cause the PM - * core to abort the resume transition during which they are returned. The - * error codes returned in those cases are only printed to the system logs for - * debugging purposes. Still, it is recommended that drivers only return error - * codes from their resume methods in case of an unrecoverable failure (i.e. - * when the device being handled refuses to resume and becomes unusable) to - * allow the PM core to be modified in the future, so that it can avoid - * attempting to handle devices that failed to resume and their children. - * - * It is allowed to unregister devices while the above callbacks are being - * executed. However, a callback routine MUST NOT try to unregister the device - * it was called for, although it may unregister children of that device (for - * example, if it detects that a child was unplugged while the system was - * asleep). - * - * There also are callbacks related to runtime power management of devices. - * Again, as a rule these callbacks are executed by the PM core for subsystems - * (PM domains, device types, classes and bus types) and the subsystem-level - * callbacks are expected to invoke the driver callbacks. Moreover, the exact - * actions to be performed by a device driver's callbacks generally depend on - * the platform and subsystem the device belongs to. - * - * Refer to Documentation/power/runtime_pm.rst for more information about the - * role of the @runtime_suspend(), @runtime_resume() and @runtime_idle() - * callbacks in device runtime power management. */ + struct dev_pm_ops { int (*prepare)(struct device *dev); void (*complete)(struct device *dev); @@ -350,7 +368,7 @@ struct dev_pm_ops { * to RAM and hibernation. */ #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ -const struct dev_pm_ops __maybe_unused name = { \ +const struct dev_pm_ops name = { \ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } @@ -368,18 +386,12 @@ const struct dev_pm_ops __maybe_unused name = { \ * .runtime_resume(), respectively (and analogously for hibernation). */ #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ -const struct dev_pm_ops __maybe_unused name = { \ +const struct dev_pm_ops name = { \ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ } -#ifdef CONFIG_PM -#define pm_ptr(_ptr) (_ptr) -#else -#define pm_ptr(_ptr) NULL -#endif - -/* +/** * PM_EVENT_ messages * * The following PM_EVENT_ messages are defined for the internal use of the PM @@ -475,7 +487,7 @@ const struct dev_pm_ops __maybe_unused name = { \ #define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) -/* +/** * Device run-time power management status. * * These status labels are used internally by the PM core to indicate the @@ -505,7 +517,7 @@ enum rpm_status { RPM_SUSPENDING, }; -/* +/** * Device run-time power management request types. * * RPM_REQ_NONE Do nothing. @@ -536,8 +548,6 @@ struct pm_subsys_data { spinlock_t lock; unsigned int refcount; #ifdef CONFIG_PM_CLK - unsigned int clock_op_might_sleep; - struct mutex clock_mutex; struct list_head clock_list; #endif #ifdef CONFIG_PM_GENERIC_DOMAINS @@ -545,37 +555,16 @@ struct pm_subsys_data { #endif }; -/* - * Driver flags to control system suspend/resume behavior. - * - * These flags can be set by device drivers at the probe time. They need not be - * cleared by the drivers as the driver core will take care of that. - * - * NO_DIRECT_COMPLETE: Do not apply direct-complete optimization to the device. - * SMART_PREPARE: Take the driver ->prepare callback return value into account. - * SMART_SUSPEND: Avoid resuming the device from runtime suspend. - * MAY_SKIP_RESUME: Allow driver "noirq" and "early" callbacks to be skipped. - * - * See Documentation/driver-api/pm/devices.rst for details. - */ -#define DPM_FLAG_NO_DIRECT_COMPLETE BIT(0) -#define DPM_FLAG_SMART_PREPARE BIT(1) -#define DPM_FLAG_SMART_SUSPEND BIT(2) -#define DPM_FLAG_MAY_SKIP_RESUME BIT(3) - struct dev_pm_info { pm_message_t power_state; unsigned int can_wakeup:1; unsigned int async_suspend:1; - bool in_dpm_list:1; /* Owned by the PM core */ bool is_prepared:1; /* Owned by the PM core */ bool is_suspended:1; /* Ditto */ bool is_noirq_suspended:1; bool is_late_suspended:1; - bool no_pm:1; bool early_init:1; /* Owned by the PM core */ bool direct_complete:1; /* Owned by the PM core */ - u32 driver_flags; spinlock_t lock; #ifdef CONFIG_PM_SLEEP struct list_head entry; @@ -584,14 +573,12 @@ struct dev_pm_info { bool wakeup_path:1; bool syscore:1; bool no_pm_callbacks:1; /* Owned by the PM core */ - unsigned int must_resume:1; /* Owned by the PM core */ - unsigned int may_skip_resume:1; /* Set by subsystems */ #else unsigned int should_wakeup:1; #endif #ifdef CONFIG_PM - struct hrtimer suspend_timer; - u64 timer_expires; + struct timer_list suspend_timer; + unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; @@ -601,7 +588,7 @@ struct dev_pm_info { unsigned int idle_notification:1; unsigned int request_pending:1; unsigned int deferred_resume:1; - unsigned int needs_force_resume:1; + unsigned int run_wake:1; unsigned int runtime_auto:1; bool ignore_children:1; unsigned int no_callbacks:1; @@ -609,46 +596,42 @@ struct dev_pm_info { unsigned int use_autosuspend:1; unsigned int timer_autosuspends:1; unsigned int memalloc_noio:1; - unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; - u64 last_busy; - u64 active_time; - u64 suspended_time; - u64 accounting_timestamp; + unsigned long last_busy; + unsigned long active_jiffies; + unsigned long suspended_jiffies; + unsigned long accounting_timestamp; #endif struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ void (*set_latency_tolerance)(struct device *, s32); struct dev_pm_qos *qos; }; +extern void update_pm_runtime_accounting(struct device *dev); extern int dev_pm_get_subsys_data(struct device *dev); extern void dev_pm_put_subsys_data(struct device *dev); -/** - * struct dev_pm_domain - power management domain representation. +/* + * Power domains provide callbacks that are executed during system suspend, + * hibernation, system resume and during runtime PM transitions along with + * subsystem-level and driver-level callbacks. * - * @ops: Power management operations associated with this domain. - * @start: Called when a user needs to start the device via the domain. * @detach: Called when removing a device from the domain. * @activate: Called before executing probe routines for bus types and drivers. * @sync: Called after successful driver probe. * @dismiss: Called after unsuccessful driver probe and after driver removal. - * - * Power domains provide callbacks that are executed during system suspend, - * hibernation, system resume and during runtime PM transitions instead of - * subsystem-level and driver-level callbacks. */ struct dev_pm_domain { struct dev_pm_ops ops; - int (*start)(struct device *dev); void (*detach)(struct device *dev, bool power_off); int (*activate)(struct device *dev); void (*sync)(struct device *dev); void (*dismiss)(struct device *dev); }; +typedef struct dev_pm_domain __no_const dev_pm_domain_no_const; /* * The PM_EVENT_ messages are also used by drivers implementing the legacy @@ -751,9 +734,7 @@ extern int pm_generic_poweroff_noirq(struct device *dev); extern int pm_generic_poweroff_late(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); - -extern bool dev_pm_skip_resume(struct device *dev); -extern bool dev_pm_skip_suspend(struct device *dev); +extern void pm_complete_with_resume_check(struct device *dev); #else /* !CONFIG_PM_SLEEP */ diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h index b8fac96f05..85c16defe1 100644 --- a/include/linux/pm2301_charger.h +++ b/include/linux/pm2301_charger.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * PM2301 charger driver. * * Copyright (C) 2012 ST Ericsson Corporation * * Contact: Olivier LAUNAY (olivier.launay@stericsson.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA */ #ifndef __LINUX_PM2301_H diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index ada3a0ab10..09779b0ae7 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * pm_clock.h - Definitions and headers related to device clocks. * * Copyright (C) 2011 Rafael J. Wysocki , Renesas Electronics Corp. + * + * This file is released under the GPLv2. */ #ifndef _LINUX_PM_CLOCK_H @@ -47,7 +48,6 @@ extern void pm_clk_remove(struct device *dev, const char *con_id); extern void pm_clk_remove_clk(struct device *dev, struct clk *clk); extern int pm_clk_suspend(struct device *dev); extern int pm_clk_resume(struct device *dev); -extern int devm_pm_clk_create(struct device *dev); #else static inline bool pm_clk_no_clocks(struct device *dev) { @@ -84,10 +84,6 @@ static inline void pm_clk_remove(struct device *dev, const char *con_id) static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk) { } -static inline int devm_pm_clk_create(struct device *dev) -{ - return -EINVAL; -} #endif #ifdef CONFIG_HAVE_CLK diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 67017c9390..2b98af0b89 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -1,84 +1,29 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * pm_domain.h - Definitions and headers related to device power domains. * * Copyright (C) 2011 Rafael J. Wysocki , Renesas Electronics Corp. + * + * This file is released under the GPLv2. */ #ifndef _LINUX_PM_DOMAIN_H #define _LINUX_PM_DOMAIN_H #include -#include #include #include #include #include #include -#include -#include -/* - * Flags to control the behaviour of a genpd. - * - * These flags may be set in the struct generic_pm_domain's flags field by a - * genpd backend driver. The flags must be set before it calls pm_genpd_init(), - * which initializes a genpd. - * - * GENPD_FLAG_PM_CLK: Instructs genpd to use the PM clk framework, - * while powering on/off attached devices. - * - * GENPD_FLAG_IRQ_SAFE: This informs genpd that its backend callbacks, - * ->power_on|off(), doesn't sleep. Hence, these - * can be invoked from within atomic context, which - * enables genpd to power on/off the PM domain, - * even when pm_runtime_is_irq_safe() returns true, - * for any of its attached devices. Note that, a - * genpd having this flag set, requires its - * masterdomains to also have it set. - * - * GENPD_FLAG_ALWAYS_ON: Instructs genpd to always keep the PM domain - * powered on. - * - * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered - * on, in case any of its attached devices is used - * in the wakeup path to serve system wakeups. - * - * GENPD_FLAG_CPU_DOMAIN: Instructs genpd that it should expect to get - * devices attached, which may belong to CPUs or - * possibly have subdomains with CPUs attached. - * This flag enables the genpd backend driver to - * deploy idle power management support for CPUs - * and groups of CPUs. Note that, the backend - * driver must then comply with the so called, - * last-man-standing algorithm, for the CPUs in the - * PM domain. - * - * GENPD_FLAG_RPM_ALWAYS_ON: Instructs genpd to always keep the PM domain - * powered on except for system suspend. - * - * GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its - * components' next wakeup when determining the - * optimal idle state. - */ -#define GENPD_FLAG_PM_CLK (1U << 0) -#define GENPD_FLAG_IRQ_SAFE (1U << 1) -#define GENPD_FLAG_ALWAYS_ON (1U << 2) -#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) -#define GENPD_FLAG_CPU_DOMAIN (1U << 4) -#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5) -#define GENPD_FLAG_MIN_RESIDENCY (1U << 6) +/* Defines used for the flags field in the struct generic_pm_domain */ +#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ + +#define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */ enum gpd_status { - GENPD_STATE_ON = 0, /* PM domain is on */ - GENPD_STATE_OFF, /* PM domain is off */ -}; - -enum genpd_notication { - GENPD_NOTIFY_PRE_OFF = 0, - GENPD_NOTIFY_OFF, - GENPD_NOTIFY_PRE_ON, - GENPD_NOTIFY_ON, + GPD_STATE_ACTIVE = 0, /* PM domain is active */ + GPD_STATE_POWER_OFF, /* PM domain is off */ }; struct dev_power_governor { @@ -89,30 +34,21 @@ struct dev_power_governor { struct gpd_dev_ops { int (*start)(struct device *dev); int (*stop)(struct device *dev); -}; + bool (*active_wakeup)(struct device *dev); +} __no_const; struct genpd_power_state { s64 power_off_latency_ns; s64 power_on_latency_ns; - s64 residency_ns; - u64 usage; - u64 rejected; - struct fwnode_handle *fwnode; - ktime_t idle_time; - void *data; }; -struct genpd_lock_ops; -struct dev_pm_opp; -struct opp_table; - struct generic_pm_domain { - struct device dev; struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ - struct list_head parent_links; /* Links with PM domain as a parent */ - struct list_head child_links; /* Links with PM domain as a child */ + struct list_head master_links; /* Links with PM domain as a master */ + struct list_head slave_links; /* Links with PM domain as a slave */ struct list_head dev_list; /* List of devices */ + struct mutex lock; struct dev_power_governor *gov; struct work_struct power_off_work; struct fwnode_handle *provider; /* Identity of the domain provider */ @@ -123,42 +59,20 @@ struct generic_pm_domain { unsigned int device_count; /* Number of devices */ unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ - unsigned int performance_state; /* Aggregated max performance state */ - cpumask_var_t cpus; /* A cpumask of the attached CPUs */ int (*power_off)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain); - struct raw_notifier_head power_notifiers; /* Power on/off notifiers */ - struct opp_table *opp_table; /* OPP table of the genpd */ - unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd, - struct dev_pm_opp *opp); - int (*set_performance_state)(struct generic_pm_domain *genpd, - unsigned int state); struct gpd_dev_ops dev_ops; s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ - ktime_t next_wakeup; /* Maintained by the domain governor */ bool max_off_time_changed; bool cached_power_down_ok; - bool cached_power_down_state_idx; int (*attach_dev)(struct generic_pm_domain *domain, struct device *dev); void (*detach_dev)(struct generic_pm_domain *domain, struct device *dev); unsigned int flags; /* Bit field of configs for genpd */ - struct genpd_power_state *states; - void (*free_states)(struct genpd_power_state *states, - unsigned int state_count); + struct genpd_power_state states[GENPD_MAX_NUM_STATES]; unsigned int state_count; /* number of states */ unsigned int state_idx; /* state that genpd will go to when off */ - ktime_t on_time; - ktime_t accounting_time; - const struct genpd_lock_ops *lock_ops; - union { - struct mutex mlock; - struct { - spinlock_t slock; - unsigned long lock_flags; - }; - }; }; @@ -168,14 +82,10 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) } struct gpd_link { - struct generic_pm_domain *parent; - struct list_head parent_node; - struct generic_pm_domain *child; - struct list_head child_node; - - /* Sub-domain's per-master domain performance state */ - unsigned int performance_state; - unsigned int prev_performance_state; + struct generic_pm_domain *master; + struct list_head master_node; + struct generic_pm_domain *slave; + struct list_head slave_node; }; struct gpd_timing_data { @@ -195,13 +105,6 @@ struct generic_pm_domain_data { struct pm_domain_data base; struct gpd_timing_data td; struct notifier_block nb; - struct notifier_block *power_nb; - int cpu; - unsigned int performance_state; - unsigned int default_pstate; - unsigned int rpm_pstate; - ktime_t next_wakeup; - void *data; }; #ifdef CONFIG_PM_GENERIC_DOMAINS @@ -215,47 +118,46 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) return to_gpd_data(dev->power.subsys_data->domain_data); } -int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev); -int pm_genpd_remove_device(struct device *dev); -int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *subdomain); -int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *subdomain); -int pm_genpd_init(struct generic_pm_domain *genpd, - struct dev_power_governor *gov, bool is_off); -int pm_genpd_remove(struct generic_pm_domain *genpd); -int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state); -int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb); -int dev_pm_genpd_remove_notifier(struct device *dev); -void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next); +extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev, + struct gpd_timing_data *td); + +extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev); +extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *new_subdomain); +extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *target); +extern int pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off); +extern int pm_genpd_remove(struct generic_pm_domain *genpd); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; -#ifdef CONFIG_CPU_IDLE -extern struct dev_power_governor pm_domain_cpu_gov; -#endif #else static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) { return ERR_PTR(-ENOSYS); } -static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, - struct device *dev) +static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev, + struct gpd_timing_data *td) { return -ENOSYS; } -static inline int pm_genpd_remove_device(struct device *dev) +static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev) { return -ENOSYS; } static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *subdomain) + struct generic_pm_domain *new_sd) { return -ENOSYS; } static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *subdomain) + struct generic_pm_domain *target) { return -ENOSYS; } @@ -266,51 +168,30 @@ static inline int pm_genpd_init(struct generic_pm_domain *genpd, } static inline int pm_genpd_remove(struct generic_pm_domain *genpd) { - return -EOPNOTSUPP; + return -ENOTSUPP; } - -static inline int dev_pm_genpd_set_performance_state(struct device *dev, - unsigned int state) -{ - return -EOPNOTSUPP; -} - -static inline int dev_pm_genpd_add_notifier(struct device *dev, - struct notifier_block *nb) -{ - return -EOPNOTSUPP; -} - -static inline int dev_pm_genpd_remove_notifier(struct device *dev) -{ - return -EOPNOTSUPP; -} - -static inline void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) -{ } - -#define simple_qos_governor (*(struct dev_power_governor *)(NULL)) -#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL)) #endif +static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + return __pm_genpd_add_device(genpd, dev, NULL); +} + #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP -void dev_pm_genpd_suspend(struct device *dev); -void dev_pm_genpd_resume(struct device *dev); +extern void pm_genpd_syscore_poweroff(struct device *dev); +extern void pm_genpd_syscore_poweron(struct device *dev); #else -static inline void dev_pm_genpd_suspend(struct device *dev) {} -static inline void dev_pm_genpd_resume(struct device *dev) {} +static inline void pm_genpd_syscore_poweroff(struct device *dev) {} +static inline void pm_genpd_syscore_poweron(struct device *dev) {} #endif /* OF PM domain providers */ struct of_device_id; -typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args, - void *data); - struct genpd_onecell_data { struct generic_pm_domain **domains; unsigned int num_domains; - genpd_xlate_t xlate; }; #ifdef CONFIG_PM_GENERIC_DOMAINS_OF @@ -319,33 +200,24 @@ int of_genpd_add_provider_simple(struct device_node *np, int of_genpd_add_provider_onecell(struct device_node *np, struct genpd_onecell_data *data); void of_genpd_del_provider(struct device_node *np); -int of_genpd_add_device(struct of_phandle_args *args, struct device *dev); -int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, - struct of_phandle_args *subdomain_spec); -int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, - struct of_phandle_args *subdomain_spec); -struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); -int of_genpd_parse_idle_states(struct device_node *dn, - struct genpd_power_state **states, int *n); -unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, - struct dev_pm_opp *opp); +extern int of_genpd_add_device(struct of_phandle_args *args, + struct device *dev); +extern int of_genpd_add_subdomain(struct of_phandle_args *parent, + struct of_phandle_args *new_subdomain); +extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int genpd_dev_pm_attach(struct device *dev); -struct device *genpd_dev_pm_attach_by_id(struct device *dev, - unsigned int index); -struct device *genpd_dev_pm_attach_by_name(struct device *dev, - const char *name); #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ static inline int of_genpd_add_provider_simple(struct device_node *np, struct generic_pm_domain *genpd) { - return -EOPNOTSUPP; + return -ENOTSUPP; } static inline int of_genpd_add_provider_onecell(struct device_node *np, struct genpd_onecell_data *data) { - return -EOPNOTSUPP; + return -ENOTSUPP; } static inline void of_genpd_del_provider(struct device_node *np) {} @@ -356,84 +228,34 @@ static inline int of_genpd_add_device(struct of_phandle_args *args, return -ENODEV; } -static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, - struct of_phandle_args *subdomain_spec) +static inline int of_genpd_add_subdomain(struct of_phandle_args *parent, + struct of_phandle_args *new_subdomain) { return -ENODEV; } -static inline int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, - struct of_phandle_args *subdomain_spec) -{ - return -ENODEV; -} - -static inline int of_genpd_parse_idle_states(struct device_node *dn, - struct genpd_power_state **states, int *n) -{ - return -ENODEV; -} - -static inline unsigned int -pm_genpd_opp_to_performance_state(struct device *genpd_dev, - struct dev_pm_opp *opp) -{ - return 0; -} - static inline int genpd_dev_pm_attach(struct device *dev) { - return 0; -} - -static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev, - unsigned int index) -{ - return NULL; -} - -static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev, - const char *name) -{ - return NULL; + return -ENODEV; } static inline struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) { - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOTSUPP); } #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ #ifdef CONFIG_PM -int dev_pm_domain_attach(struct device *dev, bool power_on); -struct device *dev_pm_domain_attach_by_id(struct device *dev, - unsigned int index); -struct device *dev_pm_domain_attach_by_name(struct device *dev, - const char *name); -void dev_pm_domain_detach(struct device *dev, bool power_off); -int dev_pm_domain_start(struct device *dev); -void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd); +extern int dev_pm_domain_attach(struct device *dev, bool power_on); +extern void dev_pm_domain_detach(struct device *dev, bool power_off); +extern void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd); #else static inline int dev_pm_domain_attach(struct device *dev, bool power_on) { - return 0; -} -static inline struct device *dev_pm_domain_attach_by_id(struct device *dev, - unsigned int index) -{ - return NULL; -} -static inline struct device *dev_pm_domain_attach_by_name(struct device *dev, - const char *name) -{ - return NULL; + return -ENODEV; } static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {} -static inline int dev_pm_domain_start(struct device *dev) -{ - return 0; -} static inline void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd) {} #endif diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 84150a22fd..f6bc765019 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic OPP Interface * @@ -6,181 +5,72 @@ * Nishanth Menon * Romit Dasgupta * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_OPP_H__ #define __LINUX_OPP_H__ -#include #include #include -struct clk; -struct regulator; struct dev_pm_opp; struct device; struct opp_table; enum dev_pm_opp_event { OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, - OPP_EVENT_ADJUST_VOLTAGE, -}; - -/** - * struct dev_pm_opp_supply - Power supply voltage/current values - * @u_volt: Target voltage in microvolts corresponding to this OPP - * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP - * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP - * @u_amp: Maximum current drawn by the device in microamperes - * - * This structure stores the voltage/current values for a single power supply. - */ -struct dev_pm_opp_supply { - unsigned long u_volt; - unsigned long u_volt_min; - unsigned long u_volt_max; - unsigned long u_amp; -}; - -/** - * struct dev_pm_opp_icc_bw - Interconnect bandwidth values - * @avg: Average bandwidth corresponding to this OPP (in icc units) - * @peak: Peak bandwidth corresponding to this OPP (in icc units) - * - * This structure stores the bandwidth values for a single interconnect path. - */ -struct dev_pm_opp_icc_bw { - u32 avg; - u32 peak; -}; - -/** - * struct dev_pm_opp_info - OPP freq/voltage/current values - * @rate: Target clk rate in hz - * @supplies: Array of voltage/current values for all power supplies - * - * This structure stores the freq/voltage/current values for a single OPP. - */ -struct dev_pm_opp_info { - unsigned long rate; - struct dev_pm_opp_supply *supplies; -}; - -/** - * struct dev_pm_set_opp_data - Set OPP data - * @old_opp: Old OPP info - * @new_opp: New OPP info - * @regulators: Array of regulator pointers - * @regulator_count: Number of regulators - * @clk: Pointer to clk - * @dev: Pointer to the struct device - * - * This structure contains all information required for setting an OPP. - */ -struct dev_pm_set_opp_data { - struct dev_pm_opp_info old_opp; - struct dev_pm_opp_info new_opp; - - struct regulator **regulators; - unsigned int regulator_count; - struct clk *clk; - struct device *dev; }; #if defined(CONFIG_PM_OPP) -struct opp_table *dev_pm_opp_get_opp_table(struct device *dev); -void dev_pm_opp_put_opp_table(struct opp_table *opp_table); - unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); -unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp); - -unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, - unsigned int index); - bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); int dev_pm_opp_get_opp_count(struct device *dev); unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev); unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev); -unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev); +struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev); struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available); -struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, - unsigned int level); -struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, - unsigned int *level); struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); -struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, - unsigned long u_volt); struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq); -void dev_pm_opp_put(struct dev_pm_opp *opp); int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); void dev_pm_opp_remove(struct device *dev, unsigned long freq); -void dev_pm_opp_remove_all_dynamic(struct device *dev); - -int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, - unsigned long u_volt, unsigned long u_volt_min, - unsigned long u_volt_max); int dev_pm_opp_enable(struct device *dev, unsigned long freq); int dev_pm_opp_disable(struct device *dev, unsigned long freq); -int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb); -int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb); - -struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count); -void dev_pm_opp_put_supported_hw(struct opp_table *opp_table); -int devm_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count); -struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name); -void dev_pm_opp_put_prop_name(struct opp_table *opp_table); -struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); -void dev_pm_opp_put_regulators(struct opp_table *opp_table); -int devm_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); -struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name); -void dev_pm_opp_put_clkname(struct opp_table *opp_table); -int devm_pm_opp_set_clkname(struct device *dev, const char *name); -struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); -void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); -int devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); -struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); -void dev_pm_opp_detach_genpd(struct opp_table *opp_table); -int devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); -struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp); -int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); +struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev); +int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, + unsigned int count); +void dev_pm_opp_put_supported_hw(struct device *dev); +int dev_pm_opp_set_prop_name(struct device *dev, const char *name); +void dev_pm_opp_put_prop_name(struct device *dev); +struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name); +void dev_pm_opp_put_regulator(struct opp_table *opp_table); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); -int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); void dev_pm_opp_remove_table(struct device *dev); void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); -int dev_pm_opp_sync_regulators(struct device *dev); #else -static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {} - static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) { return 0; @@ -191,18 +81,6 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) return 0; } -static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) -{ - return 0; -} - -static inline -unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, - unsigned int index) -{ - return 0; -} - static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) { return false; @@ -228,71 +106,39 @@ static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device return 0; } -static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) +static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) { - return 0; + return NULL; } static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { - return ERR_PTR(-EOPNOTSUPP); -} - -static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, - unsigned int level) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, - unsigned int *level) -{ - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { - return ERR_PTR(-EOPNOTSUPP); -} - -static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, - unsigned long u_volt) -{ - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-ENOTSUPP); } -static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} - static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { - return -EOPNOTSUPP; + return -ENOTSUPP; } static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) { } -static inline void dev_pm_opp_remove_all_dynamic(struct device *dev) -{ -} - -static inline int -dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, - unsigned long u_volt, unsigned long u_volt_min, - unsigned long u_volt_max) -{ - return 0; -} - static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) { return 0; @@ -303,117 +149,43 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq) return 0; } -static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) +static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( + struct device *dev) { - return -EOPNOTSUPP; + return ERR_PTR(-ENOTSUPP); } -static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb) +static inline int dev_pm_opp_set_supported_hw(struct device *dev, + const u32 *versions, + unsigned int count) { - return -EOPNOTSUPP; + return -ENOTSUPP; } -static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, - const u32 *versions, - unsigned int count) +static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} + +static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) { - return ERR_PTR(-EOPNOTSUPP); + return -ENOTSUPP; } -static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {} +static inline void dev_pm_opp_put_prop_name(struct device *dev) {} -static inline int devm_pm_opp_set_supported_hw(struct device *dev, - const u32 *versions, - unsigned int count) +static inline struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name) { - return -EOPNOTSUPP; + return ERR_PTR(-ENOTSUPP); } -static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, - int (*set_opp)(struct dev_pm_set_opp_data *data)) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {} - -static inline int devm_pm_opp_register_set_opp_helper(struct device *dev, - int (*set_opp)(struct dev_pm_set_opp_data *data)) -{ - return -EOPNOTSUPP; -} - -static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {} - -static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} - -static inline int devm_pm_opp_set_regulators(struct device *dev, - const char * const names[], - unsigned int count) -{ - return -EOPNOTSUPP; -} - -static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} - -static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name) -{ - return -EOPNOTSUPP; -} - -static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} - -static inline int devm_pm_opp_attach_genpd(struct device *dev, - const char **names, - struct device ***virt_devs) -{ - return -EOPNOTSUPP; -} - -static inline struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, - struct opp_table *dst_table, struct dev_pm_opp *src_opp) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate) -{ - return -EOPNOTSUPP; -} +static inline void dev_pm_opp_put_regulator(struct opp_table *opp_table) {} static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { - return -EOPNOTSUPP; -} - -static inline int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) -{ - return -EOPNOTSUPP; + return -ENOTSUPP; } static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { - return -EOPNOTSUPP; + return -ENOTSUPP; } static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) @@ -429,59 +201,27 @@ static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask { } -static inline int dev_pm_opp_sync_regulators(struct device *dev) -{ - return -EOPNOTSUPP; -} - #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int dev_pm_opp_of_add_table(struct device *dev); -int dev_pm_opp_of_add_table_indexed(struct device *dev, int index); -int dev_pm_opp_of_add_table_noclk(struct device *dev, int index); void dev_pm_opp_of_remove_table(struct device *dev); -int devm_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); -struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); -struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); -int of_get_required_opp_performance_state(struct device_node *np, int index); -int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table); -int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus); -static inline void dev_pm_opp_of_unregister_em(struct device *dev) -{ - em_dev_unregister_perf_domain(dev); -} #else static inline int dev_pm_opp_of_add_table(struct device *dev) { - return -EOPNOTSUPP; -} - -static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) -{ - return -EOPNOTSUPP; -} - -static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index) -{ - return -EOPNOTSUPP; + return -ENOTSUPP; } static inline void dev_pm_opp_of_remove_table(struct device *dev) { } -static inline int devm_pm_opp_of_add_table(struct device *dev) -{ - return -EOPNOTSUPP; -} - static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) { - return -EOPNOTSUPP; + return -ENOTSUPP; } static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) @@ -490,37 +230,7 @@ static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpum static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - return -EOPNOTSUPP; -} - -static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) -{ - return NULL; -} - -static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) -{ - return NULL; -} - -static inline int dev_pm_opp_of_register_em(struct device *dev, - struct cpumask *cpus) -{ - return -EOPNOTSUPP; -} - -static inline void dev_pm_opp_of_unregister_em(struct device *dev) -{ -} - -static inline int of_get_required_opp_performance_state(struct device_node *np, int index) -{ - return -EOPNOTSUPP; -} - -static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) -{ - return -EOPNOTSUPP; + return -ENOTSUPP; } #endif diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 4a69d4af3f..0f65d36c2a 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -1,20 +1,25 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Definitions related to Power Management Quality of Service (PM QoS). - * - * Copyright (C) 2020 Intel Corporation - * - * Authors: - * Mark Gross - * Rafael J. Wysocki - */ - #ifndef _LINUX_PM_QOS_H #define _LINUX_PM_QOS_H - +/* interface for the pm_qos_power infrastructure of the linux kernel. + * + * Mark Gross + */ #include #include +#include #include +#include + +enum { + PM_QOS_RESERVED = 0, + PM_QOS_CPU_DMA_LATENCY, + PM_QOS_NETWORK_LATENCY, + PM_QOS_NETWORK_THROUGHPUT, + PM_QOS_MEMORY_BANDWIDTH, + + /* insert new class ID */ + PM_QOS_NUM_CLASSES, +}; enum pm_qos_flags_status { PM_QOS_FLAGS_UNDEFINED = -1, @@ -23,25 +28,51 @@ enum pm_qos_flags_status { PM_QOS_FLAGS_ALL, }; -#define PM_QOS_DEFAULT_VALUE (-1) -#define PM_QOS_LATENCY_ANY S32_MAX -#define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC) +#define PM_QOS_DEFAULT_VALUE -1 -#define PM_QOS_CPU_LATENCY_DEFAULT_VALUE (2000 * USEC_PER_SEC) -#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY -#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY -#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS +#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) +#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) +#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 +#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0 +#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0 #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 -#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0 -#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE FREQ_QOS_MAX_DEFAULT_VALUE #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) +#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1)) #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) +#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1) + +struct pm_qos_request { + struct plist_node node; + int pm_qos_class; + struct delayed_work work; /* for pm_qos_update_request_timeout */ +}; + +struct pm_qos_flags_request { + struct list_head node; + s32 flags; /* Do not change to 64 bit */ +}; + +enum dev_pm_qos_req_type { + DEV_PM_QOS_RESUME_LATENCY = 1, + DEV_PM_QOS_LATENCY_TOLERANCE, + DEV_PM_QOS_FLAGS, +}; + +struct dev_pm_qos_request { + enum dev_pm_qos_req_type type; + union { + struct plist_node pnode; + struct pm_qos_flags_request flr; + } data; + struct device *dev; +}; enum pm_qos_type { PM_QOS_UNITIALIZED, PM_QOS_MAX, /* return the largest value */ PM_QOS_MIN, /* return the smallest value */ + PM_QOS_SUM /* return the sum */ }; /* @@ -58,66 +89,14 @@ struct pm_qos_constraints { struct blocking_notifier_head *notifiers; }; -struct pm_qos_request { - struct plist_node node; - struct pm_qos_constraints *qos; -}; - -struct pm_qos_flags_request { - struct list_head node; - s32 flags; /* Do not change to 64 bit */ -}; - struct pm_qos_flags { struct list_head list; s32 effective_flags; /* Do not change to 64 bit */ }; - -#define FREQ_QOS_MIN_DEFAULT_VALUE 0 -#define FREQ_QOS_MAX_DEFAULT_VALUE S32_MAX - -enum freq_qos_req_type { - FREQ_QOS_MIN = 1, - FREQ_QOS_MAX, -}; - -struct freq_constraints { - struct pm_qos_constraints min_freq; - struct blocking_notifier_head min_freq_notifiers; - struct pm_qos_constraints max_freq; - struct blocking_notifier_head max_freq_notifiers; -}; - -struct freq_qos_request { - enum freq_qos_req_type type; - struct plist_node pnode; - struct freq_constraints *qos; -}; - - -enum dev_pm_qos_req_type { - DEV_PM_QOS_RESUME_LATENCY = 1, - DEV_PM_QOS_LATENCY_TOLERANCE, - DEV_PM_QOS_MIN_FREQUENCY, - DEV_PM_QOS_MAX_FREQUENCY, - DEV_PM_QOS_FLAGS, -}; - -struct dev_pm_qos_request { - enum dev_pm_qos_req_type type; - union { - struct plist_node pnode; - struct pm_qos_flags_request flr; - struct freq_qos_request freq; - } data; - struct device *dev; -}; - struct dev_pm_qos { struct pm_qos_constraints resume_latency; struct pm_qos_constraints latency_tolerance; - struct freq_constraints freq; struct pm_qos_flags flags; struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; @@ -136,47 +115,40 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) return req->dev != NULL; } -s32 pm_qos_read_value(struct pm_qos_constraints *c); int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, enum pm_qos_req_action action, int value); bool pm_qos_update_flags(struct pm_qos_flags *pqf, struct pm_qos_flags_request *req, enum pm_qos_req_action action, s32 val); +void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, + s32 value); +void pm_qos_update_request(struct pm_qos_request *req, + s32 new_value); +void pm_qos_update_request_timeout(struct pm_qos_request *req, + s32 new_value, unsigned long timeout_us); +void pm_qos_remove_request(struct pm_qos_request *req); -#ifdef CONFIG_CPU_IDLE -s32 cpu_latency_qos_limit(void); -bool cpu_latency_qos_request_active(struct pm_qos_request *req); -void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value); -void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value); -void cpu_latency_qos_remove_request(struct pm_qos_request *req); -#else -static inline s32 cpu_latency_qos_limit(void) { return INT_MAX; } -static inline bool cpu_latency_qos_request_active(struct pm_qos_request *req) -{ - return false; -} -static inline void cpu_latency_qos_add_request(struct pm_qos_request *req, - s32 value) {} -static inline void cpu_latency_qos_update_request(struct pm_qos_request *req, - s32 new_value) {} -static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {} -#endif +int pm_qos_request(int pm_qos_class); +int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); +int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); +int pm_qos_request_active(struct pm_qos_request *req); +s32 pm_qos_read_value(struct pm_qos_constraints *c); #ifdef CONFIG_PM enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); -s32 __dev_pm_qos_resume_latency(struct device *dev); -s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type); +s32 __dev_pm_qos_read_value(struct device *dev); +s32 dev_pm_qos_read_value(struct device *dev); int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value); int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); int dev_pm_qos_add_notifier(struct device *dev, - struct notifier_block *notifier, - enum dev_pm_qos_req_type type); + struct notifier_block *notifier); int dev_pm_qos_remove_notifier(struct device *dev, - struct notifier_block *notifier, - enum dev_pm_qos_req_type type); + struct notifier_block *notifier); +int dev_pm_qos_add_global_notifier(struct notifier_block *notifier); +int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, @@ -201,13 +173,6 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return dev->power.qos->flags_req->data.flr.flags; } - -static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) -{ - return IS_ERR_OR_NULL(dev->power.qos) ? - PM_QOS_RESUME_LATENCY_NO_CONSTRAINT : - pm_qos_read_value(&dev->power.qos->resume_latency); -} #else static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) @@ -215,24 +180,10 @@ static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) { return PM_QOS_FLAGS_UNDEFINED; } -static inline s32 __dev_pm_qos_resume_latency(struct device *dev) - { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } -static inline s32 dev_pm_qos_read_value(struct device *dev, - enum dev_pm_qos_req_type type) -{ - switch (type) { - case DEV_PM_QOS_RESUME_LATENCY: - return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; - case DEV_PM_QOS_MIN_FREQUENCY: - return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE; - case DEV_PM_QOS_MAX_FREQUENCY: - return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE; - default: - WARN_ON(1); - return 0; - } -} - +static inline s32 __dev_pm_qos_read_value(struct device *dev) + { return 0; } +static inline s32 dev_pm_qos_read_value(struct device *dev) + { return 0; } static inline int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, @@ -244,12 +195,16 @@ static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) { return 0; } static inline int dev_pm_qos_add_notifier(struct device *dev, - struct notifier_block *notifier, - enum dev_pm_qos_req_type type) + struct notifier_block *notifier) { return 0; } static inline int dev_pm_qos_remove_notifier(struct device *dev, - struct notifier_block *notifier, - enum dev_pm_qos_req_type type) + struct notifier_block *notifier) + { return 0; } +static inline int dev_pm_qos_add_global_notifier( + struct notifier_block *notifier) + { return 0; } +static inline int dev_pm_qos_remove_global_notifier( + struct notifier_block *notifier) { return 0; } static inline void dev_pm_qos_constraints_init(struct device *dev) { @@ -280,40 +235,8 @@ static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev) { return 0; } static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} -static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) -{ - return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; -} +static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } -static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) -{ - return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; -} #endif -static inline int freq_qos_request_active(struct freq_qos_request *req) -{ - return !IS_ERR_OR_NULL(req->qos); -} - -void freq_constraints_init(struct freq_constraints *qos); - -s32 freq_qos_read_value(struct freq_constraints *qos, - enum freq_qos_req_type type); - -int freq_qos_add_request(struct freq_constraints *qos, - struct freq_qos_request *req, - enum freq_qos_req_type type, s32 value); -int freq_qos_update_request(struct freq_qos_request *req, s32 new_value); -int freq_qos_remove_request(struct freq_qos_request *req); -int freq_qos_apply(struct freq_qos_request *req, - enum pm_qos_req_action action, s32 value); - -int freq_qos_add_notifier(struct freq_constraints *qos, - enum freq_qos_req_type type, - struct notifier_block *notifier); -int freq_qos_remove_notifier(struct freq_constraints *qos, - enum freq_qos_req_type type, - struct notifier_block *notifier); - #endif diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 222da43b70..aed7c639c8 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * pm_runtime.h - Device run-time power management helper functions. * * Copyright (C) 2009 Rafael J. Wysocki + * + * This file is released under the GPLv2. */ #ifndef _LINUX_PM_RUNTIME_H @@ -38,7 +39,7 @@ extern int pm_runtime_force_resume(struct device *dev); extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); extern int __pm_runtime_resume(struct device *dev, int rpmflags); -extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count); +extern int pm_runtime_get_if_in_use(struct device *dev); extern int pm_schedule_suspend(struct device *dev, unsigned int delay); extern int __pm_runtime_set_status(struct device *dev, unsigned int status); extern int pm_runtime_barrier(struct device *dev); @@ -50,169 +51,79 @@ extern void pm_runtime_no_callbacks(struct device *dev); extern void pm_runtime_irq_safe(struct device *dev); extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); -extern u64 pm_runtime_autosuspend_expiration(struct device *dev); +extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); -extern void pm_runtime_get_suppliers(struct device *dev); -extern void pm_runtime_put_suppliers(struct device *dev); -extern void pm_runtime_new_link(struct device *dev); -extern void pm_runtime_drop_link(struct device_link *link); -extern int devm_pm_runtime_enable(struct device *dev); - -/** - * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. - * @dev: Target device. - * - * Increment the runtime PM usage counter of @dev if its runtime PM status is - * %RPM_ACTIVE and its runtime PM usage counter is greater than 0. - */ -static inline int pm_runtime_get_if_in_use(struct device *dev) -{ - return pm_runtime_get_if_active(dev, false); -} - -/** - * pm_suspend_ignore_children - Set runtime PM behavior regarding children. - * @dev: Target device. - * @enable: Whether or not to ignore possible dependencies on children. - * - * The dependencies of @dev on its children will not be taken into account by - * the runtime PM framework going forward if @enable is %true, or they will - * be taken into account otherwise. - */ static inline void pm_suspend_ignore_children(struct device *dev, bool enable) { dev->power.ignore_children = enable; } -/** - * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device. - * @dev: Target device. - */ +static inline bool pm_children_suspended(struct device *dev) +{ + return dev->power.ignore_children + || !atomic_read(&dev->power.child_count); +} + static inline void pm_runtime_get_noresume(struct device *dev) { atomic_inc(&dev->power.usage_count); } -/** - * pm_runtime_put_noidle - Drop runtime PM usage counter of a device. - * @dev: Target device. - * - * Decrement the runtime PM usage counter of @dev unless it is 0 already. - */ static inline void pm_runtime_put_noidle(struct device *dev) { atomic_add_unless(&dev->power.usage_count, -1, 0); } -/** - * pm_runtime_suspended - Check whether or not a device is runtime-suspended. - * @dev: Target device. - * - * Return %true if runtime PM is enabled for @dev and its runtime PM status is - * %RPM_SUSPENDED, or %false otherwise. - * - * Note that the return value of this function can only be trusted if it is - * called under the runtime PM lock of @dev or under conditions in which - * runtime PM cannot be either disabled or enabled for @dev and its runtime PM - * status cannot change. - */ +static inline bool device_run_wake(struct device *dev) +{ + return dev->power.run_wake; +} + +static inline void device_set_run_wake(struct device *dev, bool enable) +{ + dev->power.run_wake = enable; +} + static inline bool pm_runtime_suspended(struct device *dev) { return dev->power.runtime_status == RPM_SUSPENDED && !dev->power.disable_depth; } -/** - * pm_runtime_active - Check whether or not a device is runtime-active. - * @dev: Target device. - * - * Return %true if runtime PM is enabled for @dev and its runtime PM status is - * %RPM_ACTIVE, or %false otherwise. - * - * Note that the return value of this function can only be trusted if it is - * called under the runtime PM lock of @dev or under conditions in which - * runtime PM cannot be either disabled or enabled for @dev and its runtime PM - * status cannot change. - */ static inline bool pm_runtime_active(struct device *dev) { return dev->power.runtime_status == RPM_ACTIVE || dev->power.disable_depth; } -/** - * pm_runtime_status_suspended - Check if runtime PM status is "suspended". - * @dev: Target device. - * - * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false - * otherwise, regardless of whether or not runtime PM has been enabled for @dev. - * - * Note that the return value of this function can only be trusted if it is - * called under the runtime PM lock of @dev or under conditions in which the - * runtime PM status of @dev cannot change. - */ static inline bool pm_runtime_status_suspended(struct device *dev) { return dev->power.runtime_status == RPM_SUSPENDED; } -/** - * pm_runtime_enabled - Check if runtime PM is enabled. - * @dev: Target device. - * - * Return %true if runtime PM is enabled for @dev or %false otherwise. - * - * Note that the return value of this function can only be trusted if it is - * called under the runtime PM lock of @dev or under conditions in which - * runtime PM cannot be either disabled or enabled for @dev. - */ static inline bool pm_runtime_enabled(struct device *dev) { return !dev->power.disable_depth; } -/** - * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present. - * @dev: Target device. - * - * Return %true if @dev is a special device without runtime PM callbacks or - * %false otherwise. - */ -static inline bool pm_runtime_has_no_callbacks(struct device *dev) +static inline bool pm_runtime_callbacks_present(struct device *dev) { - return dev->power.no_callbacks; + return !dev->power.no_callbacks; } -/** - * pm_runtime_mark_last_busy - Update the last access time of a device. - * @dev: Target device. - * - * Update the last access time of @dev used by the runtime PM autosuspend - * mechanism to the current time as returned by ktime_get_mono_fast_ns(). - */ static inline void pm_runtime_mark_last_busy(struct device *dev) { - WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns()); + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies; } -/** - * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context. - * @dev: Target device. - * - * Return %true if @dev has been marked as an "IRQ-safe" device (with respect - * to runtime PM), in which case its runtime PM callabcks can be expected to - * work correctly when invoked from interrupt handlers. - */ static inline bool pm_runtime_is_irq_safe(struct device *dev) { return dev->power.irq_safe; } -extern u64 pm_runtime_suspended_time(struct device *dev); - #else /* !CONFIG_PM */ static inline bool queue_pm_work(struct work_struct *work) { return false; } @@ -242,11 +153,6 @@ static inline int pm_runtime_get_if_in_use(struct device *dev) { return -EINVAL; } -static inline int pm_runtime_get_if_active(struct device *dev, - bool ign_usage_count) -{ - return -EINVAL; -} static inline int __pm_runtime_set_status(struct device *dev, unsigned int status) { return 0; } static inline int pm_runtime_barrier(struct device *dev) { return 0; } @@ -255,11 +161,12 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {} static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} -static inline int devm_pm_runtime_enable(struct device *dev) { return 0; } - static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} +static inline bool pm_children_suspended(struct device *dev) { return false; } static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} +static inline bool device_run_wake(struct device *dev) { return false; } +static inline void device_set_run_wake(struct device *dev, bool enable) {} static inline bool pm_runtime_suspended(struct device *dev) { return false; } static inline bool pm_runtime_active(struct device *dev) { return true; } static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } @@ -269,291 +176,110 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {} static inline void pm_runtime_irq_safe(struct device *dev) {} static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; } -static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; } +static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } static inline void pm_runtime_mark_last_busy(struct device *dev) {} static inline void __pm_runtime_use_autosuspend(struct device *dev, bool use) {} static inline void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) {} -static inline u64 pm_runtime_autosuspend_expiration( +static inline unsigned long pm_runtime_autosuspend_expiration( struct device *dev) { return 0; } static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} -static inline void pm_runtime_get_suppliers(struct device *dev) {} -static inline void pm_runtime_put_suppliers(struct device *dev) {} -static inline void pm_runtime_new_link(struct device *dev) {} -static inline void pm_runtime_drop_link(struct device_link *link) {} #endif /* !CONFIG_PM */ -/** - * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it. - * @dev: Target device. - * - * Invoke the "idle check" callback of @dev and, depending on its return value, - * set up autosuspend of @dev or suspend it (depending on whether or not - * autosuspend has been enabled for it). - */ static inline int pm_runtime_idle(struct device *dev) { return __pm_runtime_idle(dev, 0); } -/** - * pm_runtime_suspend - Suspend a device synchronously. - * @dev: Target device. - */ static inline int pm_runtime_suspend(struct device *dev) { return __pm_runtime_suspend(dev, 0); } -/** - * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it. - * @dev: Target device. - * - * Set up autosuspend of @dev or suspend it (depending on whether or not - * autosuspend is enabled for it) without engaging its "idle check" callback. - */ static inline int pm_runtime_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_AUTO); } -/** - * pm_runtime_resume - Resume a device synchronously. - * @dev: Target device. - */ static inline int pm_runtime_resume(struct device *dev) { return __pm_runtime_resume(dev, 0); } -/** - * pm_request_idle - Queue up "idle check" execution for a device. - * @dev: Target device. - * - * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev - * asynchronously. - */ static inline int pm_request_idle(struct device *dev) { return __pm_runtime_idle(dev, RPM_ASYNC); } -/** - * pm_request_resume - Queue up runtime-resume of a device. - * @dev: Target device. - */ static inline int pm_request_resume(struct device *dev) { return __pm_runtime_resume(dev, RPM_ASYNC); } -/** - * pm_request_autosuspend - Queue up autosuspend of a device. - * @dev: Target device. - * - * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev - * asynchronously. - */ static inline int pm_request_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); } -/** - * pm_runtime_get - Bump up usage counter and queue up resume of a device. - * @dev: Target device. - * - * Bump up the runtime PM usage counter of @dev and queue up a work item to - * carry out runtime-resume of it. - */ static inline int pm_runtime_get(struct device *dev) { return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); } -/** - * pm_runtime_get_sync - Bump up usage counter of a device and resume it. - * @dev: Target device. - * - * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of - * it synchronously. - * - * The possible return values of this function are the same as for - * pm_runtime_resume() and the runtime PM usage counter of @dev remains - * incremented in all cases, even if it returns an error code. - * Consider using pm_runtime_resume_and_get() instead of it, especially - * if its return value is checked by the caller, as this is likely to result - * in cleaner code. - */ static inline int pm_runtime_get_sync(struct device *dev) { return __pm_runtime_resume(dev, RPM_GET_PUT); } -/** - * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it. - * @dev: Target device. - * - * Resume @dev synchronously and if that is successful, increment its runtime - * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been - * incremented or a negative error code otherwise. - */ -static inline int pm_runtime_resume_and_get(struct device *dev) -{ - int ret; - - ret = __pm_runtime_resume(dev, RPM_GET_PUT); - if (ret < 0) { - pm_runtime_put_noidle(dev); - return ret; - } - - return 0; -} - -/** - * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0. - * @dev: Target device. - * - * Decrement the runtime PM usage counter of @dev and if it turns out to be - * equal to 0, queue up a work item for @dev like in pm_request_idle(). - */ static inline int pm_runtime_put(struct device *dev) { return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); } -/** - * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0. - * @dev: Target device. - * - * Decrement the runtime PM usage counter of @dev and if it turns out to be - * equal to 0, queue up a work item for @dev like in pm_request_autosuspend(). - */ static inline int pm_runtime_put_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); } -/** - * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0. - * @dev: Target device. - * - * Decrement the runtime PM usage counter of @dev and if it turns out to be - * equal to 0, invoke the "idle check" callback of @dev and, depending on its - * return value, set up autosuspend of @dev or suspend it (depending on whether - * or not autosuspend has been enabled for it). - * - * The possible return values of this function are the same as for - * pm_runtime_idle() and the runtime PM usage counter of @dev remains - * decremented in all cases, even if it returns an error code. - */ static inline int pm_runtime_put_sync(struct device *dev) { return __pm_runtime_idle(dev, RPM_GET_PUT); } -/** - * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0. - * @dev: Target device. - * - * Decrement the runtime PM usage counter of @dev and if it turns out to be - * equal to 0, carry out runtime-suspend of @dev synchronously. - * - * The possible return values of this function are the same as for - * pm_runtime_suspend() and the runtime PM usage counter of @dev remains - * decremented in all cases, even if it returns an error code. - */ static inline int pm_runtime_put_sync_suspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT); } -/** - * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0. - * @dev: Target device. - * - * Decrement the runtime PM usage counter of @dev and if it turns out to be - * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending - * on whether or not autosuspend has been enabled for it). - * - * The possible return values of this function are the same as for - * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains - * decremented in all cases, even if it returns an error code. - */ static inline int pm_runtime_put_sync_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); } -/** - * pm_runtime_set_active - Set runtime PM status to "active". - * @dev: Target device. - * - * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies - * of it will be taken into account. - * - * It is not valid to call this function for devices with runtime PM enabled. - */ static inline int pm_runtime_set_active(struct device *dev) { return __pm_runtime_set_status(dev, RPM_ACTIVE); } -/** - * pm_runtime_set_suspended - Set runtime PM status to "suspended". - * @dev: Target device. - * - * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that - * dependencies of it will be taken into account. - * - * It is not valid to call this function for devices with runtime PM enabled. - */ -static inline int pm_runtime_set_suspended(struct device *dev) +static inline void pm_runtime_set_suspended(struct device *dev) { - return __pm_runtime_set_status(dev, RPM_SUSPENDED); + __pm_runtime_set_status(dev, RPM_SUSPENDED); } -/** - * pm_runtime_disable - Disable runtime PM for a device. - * @dev: Target device. - * - * Prevent the runtime PM framework from working with @dev (by incrementing its - * "blocking" counter). - * - * For each invocation of this function for @dev there must be a matching - * pm_runtime_enable() call in order for runtime PM to be enabled for it. - */ static inline void pm_runtime_disable(struct device *dev) { __pm_runtime_disable(dev, true); } -/** - * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device. - * @dev: Target device. - * - * Allow the runtime PM autosuspend mechanism to be used for @dev whenever - * requested (or "autosuspend" will be handled as direct runtime-suspend for - * it). - */ static inline void pm_runtime_use_autosuspend(struct device *dev) { __pm_runtime_use_autosuspend(dev, true); } -/** - * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used. - * @dev: Target device. - * - * Prevent the runtime PM autosuspend mechanism from being used for @dev which - * means that "autosuspend" will be handled as direct runtime-suspend for it - * going forward. - */ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) { __pm_runtime_use_autosuspend(dev, false); diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 196a157456..a3447932df 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -1,9 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * pm_wakeup.h - Power management wakeup interface * * Copyright (C) 2008 Alan Stern * Copyright (C) 2010 Rafael J. Wysocki, Novell Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _LINUX_PM_WAKEUP_H @@ -21,7 +34,6 @@ struct wake_irq; * struct wakeup_source - Representation of wakeup sources * * @name: Name of the wakeup source - * @id: Wakeup source id * @entry: Wakeup source list entry * @lock: Wakeup source lock * @wakeirq: Optional device specific wakeirq @@ -36,13 +48,11 @@ struct wake_irq; * @relax_count: Number of times the wakeup source was deactivated. * @expire_count: Number of times the wakeup source's timeout has expired. * @wakeup_count: Number of times the wakeup source might abort suspend. - * @dev: Struct device for sysfs statistics about the wakeup source. * @active: Status of the wakeup source. - * @autosleep_enabled: Autosleep is active, so update @prevent_sleep_time. + * @has_timeout: The wakeup source has been activated with a timeout. */ struct wakeup_source { const char *name; - int id; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; @@ -58,16 +68,10 @@ struct wakeup_source { unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; - struct device *dev; bool active:1; bool autosleep_enabled:1; }; -#define for_each_wakeup_source(ws) \ - for ((ws) = wakeup_sources_walk_start(); \ - (ws); \ - (ws) = wakeup_sources_walk_next((ws))) - #ifdef CONFIG_PM_SLEEP /* @@ -84,28 +88,15 @@ static inline bool device_may_wakeup(struct device *dev) return dev->power.can_wakeup && !!dev->power.wakeup; } -static inline bool device_wakeup_path(struct device *dev) -{ - return dev->power.wakeup_path; -} - -static inline void device_set_wakeup_path(struct device *dev) -{ - dev->power.wakeup_path = true; -} - /* drivers/base/power/wakeup.c */ +extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name); extern struct wakeup_source *wakeup_source_create(const char *name); +extern void wakeup_source_drop(struct wakeup_source *ws); extern void wakeup_source_destroy(struct wakeup_source *ws); extern void wakeup_source_add(struct wakeup_source *ws); extern void wakeup_source_remove(struct wakeup_source *ws); -extern struct wakeup_source *wakeup_source_register(struct device *dev, - const char *name); +extern struct wakeup_source *wakeup_source_register(const char *name); extern void wakeup_source_unregister(struct wakeup_source *ws); -extern int wakeup_sources_read_lock(void); -extern void wakeup_sources_read_unlock(int idx); -extern struct wakeup_source *wakeup_sources_walk_start(void); -extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws); extern int device_wakeup_enable(struct device *dev); extern int device_wakeup_disable(struct device *dev); extern void device_set_wakeup_capable(struct device *dev, bool capable); @@ -115,8 +106,8 @@ extern void __pm_stay_awake(struct wakeup_source *ws); extern void pm_stay_awake(struct device *dev); extern void __pm_relax(struct wakeup_source *ws); extern void pm_relax(struct device *dev); -extern void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard); -extern void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard); +extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec); +extern void pm_wakeup_event(struct device *dev, unsigned int msec); #else /* !CONFIG_PM_SLEEP */ @@ -130,19 +121,23 @@ static inline bool device_can_wakeup(struct device *dev) return dev->power.can_wakeup; } +static inline void wakeup_source_prepare(struct wakeup_source *ws, + const char *name) {} + static inline struct wakeup_source *wakeup_source_create(const char *name) { return NULL; } +static inline void wakeup_source_drop(struct wakeup_source *ws) {} + static inline void wakeup_source_destroy(struct wakeup_source *ws) {} static inline void wakeup_source_add(struct wakeup_source *ws) {} static inline void wakeup_source_remove(struct wakeup_source *ws) {} -static inline struct wakeup_source *wakeup_source_register(struct device *dev, - const char *name) +static inline struct wakeup_source *wakeup_source_register(const char *name) { return NULL; } @@ -179,13 +174,6 @@ static inline bool device_may_wakeup(struct device *dev) return dev->power.can_wakeup && dev->power.should_wakeup; } -static inline bool device_wakeup_path(struct device *dev) -{ - return false; -} - -static inline void device_set_wakeup_path(struct device *dev) {} - static inline void __pm_stay_awake(struct wakeup_source *ws) {} static inline void pm_stay_awake(struct device *dev) {} @@ -194,27 +182,23 @@ static inline void __pm_relax(struct wakeup_source *ws) {} static inline void pm_relax(struct device *dev) {} -static inline void pm_wakeup_ws_event(struct wakeup_source *ws, - unsigned int msec, bool hard) {} +static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) {} -static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec, - bool hard) {} +static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} #endif /* !CONFIG_PM_SLEEP */ -static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) +static inline void wakeup_source_init(struct wakeup_source *ws, + const char *name) { - return pm_wakeup_ws_event(ws, msec, false); + wakeup_source_prepare(ws, name); + wakeup_source_add(ws); } -static inline void pm_wakeup_event(struct device *dev, unsigned int msec) +static inline void wakeup_source_trash(struct wakeup_source *ws) { - return pm_wakeup_dev_event(dev, msec, false); -} - -static inline void pm_wakeup_hard_event(struct device *dev) -{ - return pm_wakeup_dev_event(dev, 0, true); + wakeup_source_remove(ws); + wakeup_source_drop(ws); } #endif /* _LINUX_PM_WAKEUP_H */ diff --git a/include/linux/pmem.h b/include/linux/pmem.h new file mode 100644 index 0000000000..e856c2cb0f --- /dev/null +++ b/include/linux/pmem.h @@ -0,0 +1,165 @@ +/* + * Copyright(c) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef __PMEM_H__ +#define __PMEM_H__ + +#include +#include + +#ifdef CONFIG_ARCH_HAS_PMEM_API +#define ARCH_MEMREMAP_PMEM MEMREMAP_WB +#include +#else +#define ARCH_MEMREMAP_PMEM MEMREMAP_WT +/* + * These are simply here to enable compilation, all call sites gate + * calling these symbols with arch_has_pmem_api() and redirect to the + * implementation in asm/pmem.h. + */ +static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n) +{ + BUG(); +} + +static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n) +{ + BUG(); + return -EFAULT; +} + +static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, + struct iov_iter *i) +{ + BUG(); + return 0; +} + +static inline void arch_clear_pmem(void *addr, size_t size) +{ + BUG(); +} + +static inline void arch_wb_cache_pmem(void *addr, size_t size) +{ + BUG(); +} + +static inline void arch_invalidate_pmem(void *addr, size_t size) +{ + BUG(); +} +#endif + +static inline bool arch_has_pmem_api(void) +{ + return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); +} + +/* + * memcpy_from_pmem - read from persistent memory with error handling + * @dst: destination buffer + * @src: source buffer + * @size: transfer length + * + * Returns 0 on success negative error code on failure. + */ +static inline int memcpy_from_pmem(void *dst, void const *src, size_t size) +{ + if (arch_has_pmem_api()) + return arch_memcpy_from_pmem(dst, src, size); + else + memcpy(dst, src, size); + return 0; +} + +/** + * memcpy_to_pmem - copy data to persistent memory + * @dst: destination buffer for the copy + * @src: source buffer for the copy + * @n: length of the copy in bytes + * + * Perform a memory copy that results in the destination of the copy + * being effectively evicted from, or never written to, the processor + * cache hierarchy after the copy completes. After memcpy_to_pmem() + * data may still reside in cpu or platform buffers, so this operation + * must be followed by a blkdev_issue_flush() on the pmem block device. + */ +static inline void memcpy_to_pmem(void *dst, const void *src, size_t n) +{ + if (arch_has_pmem_api()) + arch_memcpy_to_pmem(dst, src, n); + else + memcpy(dst, src, n); +} + +/** + * copy_from_iter_pmem - copy data from an iterator to PMEM + * @addr: PMEM destination address + * @bytes: number of bytes to copy + * @i: iterator with source data + * + * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. + * See blkdev_issue_flush() note for memcpy_to_pmem(). + */ +static inline size_t copy_from_iter_pmem(void *addr, size_t bytes, + struct iov_iter *i) +{ + if (arch_has_pmem_api()) + return arch_copy_from_iter_pmem(addr, bytes, i); + return copy_from_iter_nocache(addr, bytes, i); +} + +/** + * clear_pmem - zero a PMEM memory range + * @addr: virtual start address + * @size: number of bytes to zero + * + * Write zeros into the memory range starting at 'addr' for 'size' bytes. + * See blkdev_issue_flush() note for memcpy_to_pmem(). + */ +static inline void clear_pmem(void *addr, size_t size) +{ + if (arch_has_pmem_api()) + arch_clear_pmem(addr, size); + else + memset(addr, 0, size); +} + +/** + * invalidate_pmem - flush a pmem range from the cache hierarchy + * @addr: virtual start address + * @size: bytes to invalidate (internally aligned to cache line size) + * + * For platforms that support clearing poison this flushes any poisoned + * ranges out of the cache + */ +static inline void invalidate_pmem(void *addr, size_t size) +{ + if (arch_has_pmem_api()) + arch_invalidate_pmem(addr, size); +} + +/** + * wb_cache_pmem - write back processor cache for PMEM memory range + * @addr: virtual start address + * @size: number of bytes to write back + * + * Write back the processor cache range starting at 'addr' for 'size' bytes. + * See blkdev_issue_flush() note for memcpy_to_pmem(). + */ +static inline void wb_cache_pmem(void *addr, size_t size) +{ + if (arch_has_pmem_api()) + arch_wb_cache_pmem(addr, size); +} +#endif /* __PMEM_H__ */ diff --git a/include/linux/pmu.h b/include/linux/pmu.h index 52453a24a2..99b400b8a2 100644 --- a/include/linux/pmu.h +++ b/include/linux/pmu.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for talking to the PMU. The PMU is a microcontroller * which controls battery charging and system power on PowerBook 3400 @@ -9,7 +8,6 @@ #ifndef _LINUX_PMU_H #define _LINUX_PMU_H -#include #include @@ -37,9 +35,6 @@ static inline void pmu_resume(void) extern void pmu_enable_irled(int on); -extern time64_t pmu_get_time(void); -extern int pmu_set_rtc_time(struct rtc_time *tm); - extern void pmu_restart(void); extern void pmu_shutdown(void); extern void pmu_unlock(void); diff --git a/include/linux/pnp.h b/include/linux/pnp.h index c2a7cfbca7..b705409b7c 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Linux Plug and Play Support * Copyright by Adam Belay @@ -220,8 +219,10 @@ struct pnp_card { #define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list) #define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list) #define to_pnp_card(n) container_of(n, struct pnp_card, dev) -#define pnp_for_each_card(card) \ - list_for_each_entry(card, &pnp_cards, global_list) +#define pnp_for_each_card(card) \ + for((card) = global_to_pnp_card(pnp_cards.next); \ + (card) != global_to_pnp_card(&pnp_cards); \ + (card) = global_to_pnp_card((card)->global_list.next)) struct pnp_card_link { struct pnp_card *card; @@ -274,9 +275,14 @@ struct pnp_dev { #define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list) #define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list) #define to_pnp_dev(n) container_of(n, struct pnp_dev, dev) -#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list) -#define card_for_each_dev(card, dev) \ - list_for_each_entry(dev, &(card)->devices, card_list) +#define pnp_for_each_dev(dev) \ + for((dev) = global_to_pnp_dev(pnp_global.next); \ + (dev) != global_to_pnp_dev(&pnp_global); \ + (dev) = global_to_pnp_dev((dev)->global_list.next)) +#define card_for_each_dev(card,dev) \ + for((dev) = card_to_pnp_dev((card)->devices.next); \ + (dev) != card_to_pnp_dev(&(card)->devices); \ + (dev) = card_to_pnp_dev((dev)->card_list.next)) #define pnp_dev_name(dev) (dev)->name static inline void *pnp_get_drvdata(struct pnp_dev *pdev) @@ -292,7 +298,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data) struct pnp_fixup { char id[7]; void (*quirk_function) (struct pnp_dev * dev); /* fixup function */ -}; +} __do_const; /* config parameters */ #define PNP_CONFIG_NORMAL 0x0001 @@ -372,7 +378,7 @@ struct pnp_id { }; struct pnp_driver { - const char *name; + char *name; const struct pnp_device_id *id_table; unsigned int flags; int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id); @@ -430,10 +436,14 @@ struct pnp_protocol { }; #define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list) -#define protocol_for_each_card(protocol, card) \ - list_for_each_entry(card, &(protocol)->cards, protocol_list) -#define protocol_for_each_dev(protocol, dev) \ - list_for_each_entry(dev, &(protocol)->devices, protocol_list) +#define protocol_for_each_card(protocol,card) \ + for((card) = protocol_to_pnp_card((protocol)->cards.next); \ + (card) != protocol_to_pnp_card(&(protocol)->cards); \ + (card) = protocol_to_pnp_card((card)->protocol_list.next)) +#define protocol_for_each_dev(protocol,dev) \ + for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \ + (dev) != protocol_to_pnp_dev(&(protocol)->devices); \ + (dev) = protocol_to_pnp_dev((dev)->protocol_list.next)) extern struct bus_type pnp_bus_type; diff --git a/include/linux/poison.h b/include/linux/poison.h index d62ef5a6b4..7fda393eb4 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_POISON_H #define _LINUX_POISON_H @@ -20,14 +19,22 @@ * under normal circumstances, used to verify that nobody uses * non-initialized list entries. */ -#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) -#define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA) +#define LIST_POISON1 ((void *) (long)0xFFFFFF02) +#define LIST_POISON2 ((void *) (long)0xFFFFFF04) /********** include/linux/timer.h **********/ +/* + * Magic number "tsta" to indicate a static timer initializer + * for the object debugging code. + */ #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) -/********** mm/page_poison.c **********/ +/********** mm/debug-pagealloc.c **********/ +#ifdef CONFIG_PAGE_POISONING_ZERO +#define PAGE_POISON 0x00 +#else #define PAGE_POISON 0xaa +#endif /********** mm/page_alloc.c ************/ @@ -73,12 +80,11 @@ /********** kernel/mutexes **********/ #define MUTEX_DEBUG_INIT 0x11 #define MUTEX_DEBUG_FREE 0x22 -#define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA) + +/********** lib/flex_array.c **********/ +#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ /********** security/ **********/ #define KEY_DESTROY 0xbd -/********** net/core/page_pool.c **********/ -#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA) - #endif diff --git a/include/linux/poll.h b/include/linux/poll.h index 1cdc32b1f1..37b057b63b 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_POLL_H #define _LINUX_POLL_H @@ -9,25 +8,20 @@ #include #include #include -#include +#include #include -#include extern struct ctl_table epoll_table[]; /* for sysctl */ /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating additional memory. */ -#ifdef __clang__ -#define MAX_STACK_ALLOC 768 -#else #define MAX_STACK_ALLOC 832 -#endif #define FRONTEND_STACK_ALLOC 256 #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) -#define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM) +#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) struct poll_table_struct; @@ -42,7 +36,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_ */ typedef struct poll_table_struct { poll_queue_proc _qproc; - __poll_t _key; + unsigned long _key; } poll_table; static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) @@ -67,33 +61,21 @@ static inline bool poll_does_not_wait(const poll_table *p) * to be started implicitly on poll(). You typically only want to do that * if the application is actually polling for POLLIN and/or POLLOUT. */ -static inline __poll_t poll_requested_events(const poll_table *p) +static inline unsigned long poll_requested_events(const poll_table *p) { - return p ? p->_key : ~(__poll_t)0; + return p ? p->_key : ~0UL; } static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) { pt->_qproc = qproc; - pt->_key = ~(__poll_t)0; /* all events enabled */ -} - -static inline bool file_can_poll(struct file *file) -{ - return file->f_op->poll; -} - -static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) -{ - if (unlikely(!file->f_op->poll)) - return DEFAULT_POLLMASK; - return file->f_op->poll(file, pt); + pt->_key = ~0UL; /* all events enabled */ } struct poll_table_entry { struct file *filp; - __poll_t key; - wait_queue_entry_t wait; + unsigned long key; + wait_queue_t wait; wait_queue_head_t *wait_address; }; @@ -112,38 +94,72 @@ struct poll_wqueues { extern void poll_initwait(struct poll_wqueues *pwq); extern void poll_freewait(struct poll_wqueues *pwq); +extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, + ktime_t *expires, unsigned long slack); extern u64 select_estimate_accuracy(struct timespec64 *tv); + +static inline int poll_schedule(struct poll_wqueues *pwq, int state) +{ + return poll_schedule_timeout(pwq, state, NULL, 0); +} + +/* + * Scalable version of the fd_set. + */ + +typedef struct { + unsigned long *in, *out, *ex; + unsigned long *res_in, *res_out, *res_ex; +} fd_set_bits; + +/* + * How many longwords for "nr" bits? + */ +#define FDS_BITPERLONG (8*sizeof(long)) +#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) +#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) + +/* + * We do a VERIFY_WRITE here even though we are only reading this time: + * we'll write to it eventually.. + * + * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. + */ +static inline +int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) +{ + nr = FDS_BYTES(nr); + if (ufdset) + return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; + + memset(fdset, 0, nr); + return 0; +} + +static inline unsigned long __must_check +set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) +{ + if (ufdset) + return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); + return 0; +} + +static inline +void zero_fd_set(unsigned long nr, unsigned long *fdset) +{ + memset(fdset, 0, FDS_BYTES(nr)); +} + #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) +extern int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time); +extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, + struct timespec64 *end_time); extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timespec64 *end_time); extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec); -#define __MAP(v, from, to) \ - (from < to ? (v & from) * (to/from) : (v & from) / (from/to)) - -static inline __u16 mangle_poll(__poll_t val) -{ - __u16 v = (__force __u16)val; -#define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X) - return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | - M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | - M(HUP) | M(RDHUP) | M(MSG); -#undef M -} - -static inline __poll_t demangle_poll(u16 val) -{ -#define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X) - return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | - M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | - M(HUP) | M(RDHUP) | M(MSG); -#undef M -} -#undef __MAP - - #endif /* _LINUX_POLL_H */ diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 468328b1e1..34c4498b80 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * posix-clock.h - support for dynamic clock devices * * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _LINUX_POSIX_CLOCK_H_ #define _LINUX_POSIX_CLOCK_H_ @@ -29,6 +42,12 @@ struct posix_clock; * @clock_gettime: Read the current time * @clock_getres: Get the clock resolution * @clock_settime: Set the current time value + * @timer_create: Create a new timer + * @timer_delete: Remove a previously created timer + * @timer_gettime: Get remaining time and interval of a timer + * @timer_settime: Set a timer's initial expiration and interval + * @fasync: Optional character device fasync method + * @mmap: Optional character device mmap method * @open: Optional character device open method * @release: Optional character device release method * @ioctl: Optional character device ioctl method @@ -38,24 +57,40 @@ struct posix_clock; struct posix_clock_operations { struct module *owner; - int (*clock_adjtime)(struct posix_clock *pc, struct __kernel_timex *tx); + int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx); - int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts); + int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts); - int (*clock_getres) (struct posix_clock *pc, struct timespec64 *ts); + int (*clock_getres) (struct posix_clock *pc, struct timespec *ts); int (*clock_settime)(struct posix_clock *pc, - const struct timespec64 *ts); + const struct timespec *ts); + int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit); + + int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit); + + void (*timer_gettime)(struct posix_clock *pc, + struct k_itimer *kit, struct itimerspec *tsp); + + int (*timer_settime)(struct posix_clock *pc, + struct k_itimer *kit, int flags, + struct itimerspec *tsp, struct itimerspec *old); /* * Optional character device methods: */ + int (*fasync) (struct posix_clock *pc, + int fd, struct file *file, int on); + long (*ioctl) (struct posix_clock *pc, unsigned int cmd, unsigned long arg); + int (*mmap) (struct posix_clock *pc, + struct vm_area_struct *vma); + int (*open) (struct posix_clock *pc, fmode_t f_mode); - __poll_t (*poll) (struct posix_clock *pc, + uint (*poll) (struct posix_clock *pc, struct file *file, poll_table *wait); int (*release) (struct posix_clock *pc); @@ -69,32 +104,29 @@ struct posix_clock_operations { * * @ops: Functional interface to the clock * @cdev: Character device instance for this clock - * @dev: Pointer to the clock's device. + * @kref: Reference count. * @rwsem: Protects the 'zombie' field from concurrent access. * @zombie: If 'zombie' is true, then the hardware has disappeared. + * @release: A function to free the structure when the reference count reaches + * zero. May be NULL if structure is statically allocated. * * Drivers should embed their struct posix_clock within a private * structure, obtaining a reference to it during callbacks using * container_of(). - * - * Drivers should supply an initialized but not exposed struct device - * to posix_clock_register(). It is used to manage lifetime of the - * driver's private structure. It's 'release' field should be set to - * a release function for this private structure. */ struct posix_clock { struct posix_clock_operations ops; struct cdev cdev; - struct device *dev; + struct kref kref; struct rw_semaphore rwsem; bool zombie; + void (*release)(struct posix_clock *clk); }; /** * posix_clock_register() - register a new clock - * @clk: Pointer to the clock. Caller must provide 'ops' field - * @dev: Pointer to the initialized device. Caller must provide - * 'release' field + * @clk: Pointer to the clock. Caller must provide 'ops' and 'release' + * @devid: Allocated device id * * A clock driver calls this function to register itself with the * clock device subsystem. If 'clk' points to dynamically allocated @@ -103,7 +135,7 @@ struct posix_clock { * * Returns zero on success, non-zero otherwise. */ -int posix_clock_register(struct posix_clock *clk, struct device *dev); +int posix_clock_register(struct posix_clock *clk, dev_t devid); /** * posix_clock_unregister() - unregister a clock diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 00fef00643..62d44c1760 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -1,15 +1,29 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _linux_POSIX_TIMERS_H #define _linux_POSIX_TIMERS_H #include #include +#include +#include #include -#include -#include -struct kernel_siginfo; -struct task_struct; + +static inline unsigned long long cputime_to_expires(cputime_t expires) +{ + return (__force unsigned long long)expires; +} + +static inline cputime_t expires_to_cputime(unsigned long long expires) +{ + return (__force cputime_t)expires; +} + +struct cpu_timer_list { + struct list_head entry; + unsigned long long expires, incr; + struct task_struct *task; + int firing; +}; /* * Bit fields within a clockid: @@ -36,222 +50,89 @@ struct task_struct; #define CLOCKFD CPUCLOCK_MAX #define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) -static inline clockid_t make_process_cpuclock(const unsigned int pid, - const clockid_t clock) -{ - return ((~pid) << 3) | clock; -} -static inline clockid_t make_thread_cpuclock(const unsigned int tid, - const clockid_t clock) -{ - return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK); -} +#define MAKE_PROCESS_CPUCLOCK(pid, clock) \ + ((~(clockid_t) (pid) << 3) | (clockid_t) (clock)) +#define MAKE_THREAD_CPUCLOCK(tid, clock) \ + MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK) -static inline clockid_t fd_to_clockid(const int fd) -{ - return make_process_cpuclock((unsigned int) fd, CLOCKFD); -} +#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD) +#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3)) -static inline int clockid_to_fd(const clockid_t clk) -{ - return ~(clk >> 3); -} - -#ifdef CONFIG_POSIX_TIMERS - -/** - * cpu_timer - Posix CPU timer representation for k_itimer - * @node: timerqueue node to queue in the task/sig - * @head: timerqueue head on which this timer is queued - * @task: Pointer to target task - * @elist: List head for the expiry list - * @firing: Timer is currently firing - */ -struct cpu_timer { - struct timerqueue_node node; - struct timerqueue_head *head; - struct pid *pid; - struct list_head elist; - int firing; -}; - -static inline bool cpu_timer_enqueue(struct timerqueue_head *head, - struct cpu_timer *ctmr) -{ - ctmr->head = head; - return timerqueue_add(head, &ctmr->node); -} - -static inline bool cpu_timer_queued(struct cpu_timer *ctmr) -{ - return !!ctmr->head; -} - -static inline bool cpu_timer_dequeue(struct cpu_timer *ctmr) -{ - if (cpu_timer_queued(ctmr)) { - timerqueue_del(ctmr->head, &ctmr->node); - ctmr->head = NULL; - return true; - } - return false; -} - -static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr) -{ - return ctmr->node.expires; -} - -static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp) -{ - ctmr->node.expires = exp; -} - -/** - * posix_cputimer_base - Container per posix CPU clock - * @nextevt: Earliest-expiration cache - * @tqhead: timerqueue head for cpu_timers - */ -struct posix_cputimer_base { - u64 nextevt; - struct timerqueue_head tqhead; -}; - -/** - * posix_cputimers - Container for posix CPU timer related data - * @bases: Base container for posix CPU clocks - * @timers_active: Timers are queued. - * @expiry_active: Timer expiry is active. Used for - * process wide timers to avoid multiple - * task trying to handle expiry concurrently - * - * Used in task_struct and signal_struct - */ -struct posix_cputimers { - struct posix_cputimer_base bases[CPUCLOCK_MAX]; - unsigned int timers_active; - unsigned int expiry_active; -}; - -/** - * posix_cputimers_work - Container for task work based posix CPU timer expiry - * @work: The task work to be scheduled - * @scheduled: @work has been scheduled already, no further processing - */ -struct posix_cputimers_work { - struct callback_head work; - unsigned int scheduled; -}; - -static inline void posix_cputimers_init(struct posix_cputimers *pct) -{ - memset(pct, 0, sizeof(*pct)); - pct->bases[0].nextevt = U64_MAX; - pct->bases[1].nextevt = U64_MAX; - pct->bases[2].nextevt = U64_MAX; -} - -void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit); - -static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct, - u64 runtime) -{ - pct->bases[CPUCLOCK_SCHED].nextevt = runtime; -} - -/* Init task static initializer */ -#define INIT_CPU_TIMERBASE(b) { \ - .nextevt = U64_MAX, \ -} - -#define INIT_CPU_TIMERBASES(b) { \ - INIT_CPU_TIMERBASE(b[0]), \ - INIT_CPU_TIMERBASE(b[1]), \ - INIT_CPU_TIMERBASE(b[2]), \ -} - -#define INIT_CPU_TIMERS(s) \ - .posix_cputimers = { \ - .bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \ - }, -#else -struct posix_cputimers { }; -struct cpu_timer { }; -#define INIT_CPU_TIMERS(s) -static inline void posix_cputimers_init(struct posix_cputimers *pct) { } -static inline void posix_cputimers_group_init(struct posix_cputimers *pct, - u64 cpu_limit) { } -#endif - -#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK -void posix_cputimers_init_work(void); -#else -static inline void posix_cputimers_init_work(void) { } -#endif - -#define REQUEUE_PENDING 1 - -/** - * struct k_itimer - POSIX.1b interval timer structure. - * @list: List head for binding the timer to signals->posix_timers - * @t_hash: Entry in the posix timer hash table - * @it_lock: Lock protecting the timer - * @kclock: Pointer to the k_clock struct handling this timer - * @it_clock: The posix timer clock id - * @it_id: The posix timer id for identifying the timer - * @it_active: Marker that timer is active - * @it_overrun: The overrun counter for pending signals - * @it_overrun_last: The overrun at the time of the last delivered signal - * @it_requeue_pending: Indicator that timer waits for being requeued on - * signal delivery - * @it_sigev_notify: The notify word of sigevent struct for signal delivery - * @it_interval: The interval for periodic timers - * @it_signal: Pointer to the creators signal struct - * @it_pid: The pid of the process/task targeted by the signal - * @it_process: The task to wakeup on clock_nanosleep (CPU timers) - * @sigq: Pointer to preallocated sigqueue - * @it: Union representing the various posix timer type - * internals. - * @rcu: RCU head for freeing the timer. - */ +/* POSIX.1b interval timer structure. */ struct k_itimer { - struct list_head list; - struct hlist_node t_hash; - spinlock_t it_lock; - const struct k_clock *kclock; - clockid_t it_clock; - timer_t it_id; - int it_active; - s64 it_overrun; - s64 it_overrun_last; - int it_requeue_pending; - int it_sigev_notify; - ktime_t it_interval; - struct signal_struct *it_signal; + struct list_head list; /* free/ allocate list */ + struct hlist_node t_hash; + spinlock_t it_lock; + clockid_t it_clock; /* which timer type */ + timer_t it_id; /* timer id */ + int it_overrun; /* overrun on pending signal */ + int it_overrun_last; /* overrun on last delivered signal */ + int it_requeue_pending; /* waiting to requeue this timer */ +#define REQUEUE_PENDING 1 + int it_sigev_notify; /* notify word of sigevent struct */ + struct signal_struct *it_signal; union { - struct pid *it_pid; - struct task_struct *it_process; + struct pid *it_pid; /* pid of process to send signal to */ + struct task_struct *it_process; /* for clock_nanosleep */ }; - struct sigqueue *sigq; + struct sigqueue *sigq; /* signal queue entry. */ union { struct { - struct hrtimer timer; + struct hrtimer timer; + ktime_t interval; } real; - struct cpu_timer cpu; + struct cpu_timer_list cpu; struct { - struct alarm alarmtimer; + unsigned int clock; + unsigned int node; + unsigned long incr; + unsigned long expires; + } mmtimer; + struct { + struct alarm alarmtimer; + ktime_t interval; } alarm; + struct rcu_head rcu; } it; - struct rcu_head rcu; }; -void run_posix_cpu_timers(void); +struct k_clock { + int (*clock_getres) (const clockid_t which_clock, struct timespec *tp); + int (*clock_set) (const clockid_t which_clock, + const struct timespec *tp); + int (*clock_get) (const clockid_t which_clock, struct timespec * tp); + int (*clock_adj) (const clockid_t which_clock, struct timex *tx); + int (*timer_create) (struct k_itimer *timer); + int (*nsleep) (const clockid_t which_clock, int flags, + struct timespec *, struct timespec __user *); + long (*nsleep_restart) (struct restart_block *restart_block); + int (*timer_set) (struct k_itimer * timr, int flags, + struct itimerspec * new_setting, + struct itimerspec * old_setting); + int (*timer_del) (struct k_itimer * timr); +#define TIMER_RETRY 1 + void (*timer_get) (struct k_itimer * timr, + struct itimerspec * cur_setting); +}; + +extern struct k_clock clock_posix_cpu; +extern struct k_clock clock_posix_dynamic; + +void posix_timers_register_clock(const clockid_t clock_id, struct k_clock *new_clock); + +/* function to call to trigger timer event */ +int posix_timer_event(struct k_itimer *timr, int si_private); + +void posix_cpu_timer_schedule(struct k_itimer *timer); + +void run_posix_cpu_timers(struct task_struct *task); void posix_cpu_timers_exit(struct task_struct *task); void posix_cpu_timers_exit_group(struct task_struct *task); void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, - u64 *newval, u64 *oldval); + cputime_t *newval, cputime_t *oldval); + +long clock_nanosleep_restart(struct restart_block *restart_block); void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); -void posixtimer_rearm(struct kernel_siginfo *info); #endif diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index b65c877d92..5a9a739acd 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* File: linux/posix_acl.h @@ -12,11 +11,8 @@ #include #include #include -#include #include -struct user_namespace; - struct posix_acl_entry { short e_tag; unsigned short e_perm; @@ -27,10 +23,10 @@ struct posix_acl_entry { }; struct posix_acl { - refcount_t a_refcount; + atomic_t a_refcount; struct rcu_head a_rcu; unsigned int a_count; - struct posix_acl_entry a_entries[]; + struct posix_acl_entry a_entries[0]; }; #define FOREACH_ACL_ENTRY(pa, acl, pe) \ @@ -44,7 +40,7 @@ static inline struct posix_acl * posix_acl_dup(struct posix_acl *acl) { if (acl) - refcount_inc(&acl->a_refcount); + atomic_inc(&acl->a_refcount); return acl; } @@ -54,7 +50,7 @@ posix_acl_dup(struct posix_acl *acl) static inline void posix_acl_release(struct posix_acl *acl) { - if (acl && refcount_dec_and_test(&acl->a_refcount)) + if (acl && atomic_dec_and_test(&acl->a_refcount)) kfree_rcu(acl, a_rcu); } @@ -63,35 +59,30 @@ posix_acl_release(struct posix_acl *acl) extern void posix_acl_init(struct posix_acl *, int); extern struct posix_acl *posix_acl_alloc(int, gfp_t); +extern int posix_acl_valid(struct user_namespace *, const struct posix_acl *); +extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t); extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *); extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *); extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t); extern struct posix_acl *get_posix_acl(struct inode *, int); -extern int set_posix_acl(struct user_namespace *, struct inode *, int, - struct posix_acl *); - -struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); +extern int set_posix_acl(struct inode *, int, struct posix_acl *); #ifdef CONFIG_FS_POSIX_ACL -int posix_acl_chmod(struct user_namespace *, struct inode *, umode_t); +extern int posix_acl_chmod(struct inode *, umode_t); extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, struct posix_acl **); -int posix_acl_update_mode(struct user_namespace *, struct inode *, umode_t *, - struct posix_acl **); +extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **); -extern int simple_set_acl(struct user_namespace *, struct inode *, - struct posix_acl *, int); +extern int simple_set_acl(struct inode *, struct posix_acl *, int); extern int simple_acl_create(struct inode *, struct inode *); struct posix_acl *get_cached_acl(struct inode *inode, int type); +struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl); void forget_cached_acl(struct inode *inode, int type); void forget_all_cached_acls(struct inode *inode); -int posix_acl_valid(struct user_namespace *, const struct posix_acl *); -int posix_acl_permission(struct user_namespace *, struct inode *, - const struct posix_acl *, int); static inline void cache_no_acl(struct inode *inode) { @@ -99,8 +90,7 @@ static inline void cache_no_acl(struct inode *inode) inode->i_default_acl = NULL; } #else -static inline int posix_acl_chmod(struct user_namespace *mnt_userns, - struct inode *inode, umode_t mode) +static inline int posix_acl_chmod(struct inode *inode, umode_t mode) { return 0; } diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h index 060e8d2031..8b867e3bf3 100644 --- a/include/linux/posix_acl_xattr.h +++ b/include/linux/posix_acl_xattr.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* File: linux/posix_acl_xattr.h @@ -33,17 +32,13 @@ posix_acl_xattr_count(size_t size) } #ifdef CONFIG_FS_POSIX_ACL -void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, - void *value, size_t size); -void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, - void *value, size_t size); +void posix_acl_fix_xattr_from_user(void *value, size_t size); +void posix_acl_fix_xattr_to_user(void *value, size_t size); #else -static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, - void *value, size_t size) +static inline void posix_acl_fix_xattr_from_user(void *value, size_t size) { } -static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, - void *value, size_t size) +static inline void posix_acl_fix_xattr_to_user(void *value, size_t size) { } #endif diff --git a/include/linux/power/ab8500.h b/include/linux/power/ab8500.h index 51976b52f3..cdbb6c2a8c 100644 --- a/include/linux/power/ab8500.h +++ b/include/linux/power/ab8500.h @@ -1,7 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson 2013 * Author: Hongbo Zhang + * License terms: GNU General Public License v2 */ #ifndef PWR_AB8500_H diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h index f3c267f2a4..50762af8b8 100644 --- a/include/linux/power/bq2415x_charger.h +++ b/include/linux/power/bq2415x_charger.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * bq2415x charger driver * - * Copyright (C) 2011-2013 Pali Rohár + * Copyright (C) 2011-2013 Pali Rohár + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef BQ2415X_CHARGER_H @@ -14,8 +27,8 @@ * value is -1 then default chip value (specified in datasheet) will be * used. * - * Value resistor_sense is needed for configuring charge and - * termination current. If it is less or equal to zero, configuring charge + * Value resistor_sense is needed for for configuring charge and + * termination current. It it is less or equal to zero, configuring charge * and termination current will not be possible. * * For automode support is needed to provide name of power supply device diff --git a/include/linux/power/bq24190_charger.h b/include/linux/power/bq24190_charger.h index 313e6fbcb7..9f0283721c 100644 --- a/include/linux/power/bq24190_charger.h +++ b/include/linux/power/bq24190_charger.h @@ -1,15 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for the TI bq24190 battery charger driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _BQ24190_CHARGER_H_ #define _BQ24190_CHARGER_H_ -#include - struct bq24190_platform_data { - const struct regulator_init_data *regulator_init_data; + unsigned int gpio_int; /* GPIO pin that's connected to INT# */ }; #endif diff --git a/include/linux/power/bq24735-charger.h b/include/linux/power/bq24735-charger.h index 321dd009ce..b04be59f91 100644 --- a/include/linux/power/bq24735-charger.h +++ b/include/linux/power/bq24735-charger.h @@ -1,5 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __CHARGER_BQ24735_H_ diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index a1aa68141d..bed9557b69 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -1,47 +1,37 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BQ27X00_BATTERY_H__ #define __LINUX_BQ27X00_BATTERY_H__ enum bq27xxx_chip { BQ27000 = 1, /* bq27000, bq27200 */ BQ27010, /* bq27010, bq27210 */ - BQ2750X, /* bq27500 deprecated alias */ - BQ2751X, /* bq27510, bq27520 deprecated alias */ - BQ2752X, - BQ27500, /* bq27500/1 */ - BQ27510G1, /* bq27510G1 */ - BQ27510G2, /* bq27510G2 */ - BQ27510G3, /* bq27510G3 */ - BQ27520G1, /* bq27520G1 */ - BQ27520G2, /* bq27520G2 */ - BQ27520G3, /* bq27520G3 */ - BQ27520G4, /* bq27520G4 */ - BQ27521, /* bq27521 */ + BQ27500, /* bq27500 */ + BQ27510, /* bq27510, bq27520 */ BQ27530, /* bq27530, bq27531 */ - BQ27531, BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ - BQ27542, - BQ27546, - BQ27742, BQ27545, /* bq27545 */ - BQ27411, - BQ27421, /* bq27421, bq27441, bq27621 */ - BQ27425, - BQ27426, - BQ27441, - BQ27621, - BQ27Z561, - BQ28Z610, - BQ34Z100, - BQ78Z100, + BQ27421, /* bq27421, bq27425, bq27441, bq27621 */ +}; + +/** + * struct bq27xxx_plaform_data - Platform data for bq27xxx devices + * @name: Name of the battery. + * @chip: Chip class number of this device. + * @read: HDQ read callback. + * This function should provide access to the HDQ bus the battery is + * connected to. + * The first parameter is a pointer to the battery device, the second the + * register to be read. The return value should either be the content of + * the passed register or an error value. + */ +struct bq27xxx_platform_data { + const char *name; + enum bq27xxx_chip chip; + int (*read)(struct device *dev, unsigned int); }; struct bq27xxx_device_info; struct bq27xxx_access_methods { int (*read)(struct bq27xxx_device_info *di, u8 reg, bool single); - int (*write)(struct bq27xxx_device_info *di, u8 reg, int value, bool single); - int (*read_bulk)(struct bq27xxx_device_info *di, u8 reg, u8 *data, int len); - int (*write_bulk)(struct bq27xxx_device_info *di, u8 reg, u8 *data, int len); }; struct bq27xxx_reg_cache { @@ -54,6 +44,7 @@ struct bq27xxx_reg_cache { int capacity; int energy; int flags; + int power_avg; int health; }; @@ -61,10 +52,7 @@ struct bq27xxx_device_info { struct device *dev; int id; enum bq27xxx_chip chip; - u32 opts; const char *name; - struct bq27xxx_dm_reg *dm_regs; - u32 unseal_key; struct bq27xxx_access_methods bus; struct bq27xxx_reg_cache cache; int charge_design_full; diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index 45e228b353..c4fa907c8f 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 Samsung Electronics Co., Ltd. * MyungJoo.Ham @@ -8,6 +7,9 @@ * monitor charging even in the context of suspend-to-RAM with * an interface combining the chargers. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. **/ #ifndef _CHARGER_MANAGER_H @@ -31,16 +33,22 @@ enum polling_modes { CM_POLL_CHARGING_ONLY, }; -enum cm_batt_temp { - CM_BATT_OK = 0, - CM_BATT_OVERHEAT, - CM_BATT_COLD, +enum cm_event_types { + CM_EVENT_UNKNOWN = 0, + CM_EVENT_BATT_FULL, + CM_EVENT_BATT_IN, + CM_EVENT_BATT_OUT, + CM_EVENT_BATT_OVERHEAT, + CM_EVENT_BATT_COLD, + CM_EVENT_EXT_PWR_IN_OUT, + CM_EVENT_CHG_START_STOP, + CM_EVENT_OTHERS, }; /** * struct charger_cable * @extcon_name: the name of extcon device. - * @name: the name of the cable connector + * @name: the name of charger cable(external connector). * @extcon_dev: the extcon device. * @wq: the workqueue to control charger according to the state of * charger cable. If charger cable is attached, enable charger. @@ -56,10 +64,9 @@ enum cm_batt_temp { struct charger_cable { const char *extcon_name; const char *name; - struct extcon_dev *extcon_dev; - u64 extcon_type; /* The charger-manager use Extcon framework */ + struct extcon_specific_cable_nb extcon_dev; struct work_struct wq; struct notifier_block nb; @@ -112,7 +119,7 @@ struct charger_regulator { struct charger_cable *cables; int num_cables; - struct attribute_group attr_grp; + struct attribute_group attr_g; struct device_attribute attr_name; struct device_attribute attr_state; struct device_attribute attr_externally_control; @@ -126,10 +133,11 @@ struct charger_regulator { * @psy_name: the name of power-supply-class for charger manager * @polling_mode: * Determine which polling mode will be used + * @fullbatt_vchkdrop_ms: * @fullbatt_vchkdrop_uV: * Check voltage drop after the battery is fully charged. - * If it has dropped more than fullbatt_vchkdrop_uV - * CM will restart charging. + * If it has dropped more than fullbatt_vchkdrop_uV after + * fullbatt_vchkdrop_ms, CM will restart charging. * @fullbatt_uV: voltage in microvolt * If VBATT >= fullbatt_uV, it is assumed to be full. * @fullbatt_soc: state of Charge in % @@ -166,6 +174,7 @@ struct charger_desc { enum polling_modes polling_mode; unsigned int polling_interval_ms; + unsigned int fullbatt_vchkdrop_ms; unsigned int fullbatt_vchkdrop_uV; unsigned int fullbatt_uV; unsigned int fullbatt_soc; @@ -177,7 +186,6 @@ struct charger_desc { int num_charger_regulators; struct charger_regulator *charger_regulators; - const struct attribute_group **sysfs_groups; const char *psy_fuel_gauge; @@ -204,6 +212,9 @@ struct charger_desc { * @charger_stat: array of power_supply for chargers * @tzd_batt : thermal zone device for battery * @charger_enabled: the state of charger + * @fullbatt_vchk_jiffies_at: + * jiffies at the time full battery check will occur. + * @fullbatt_vchk_work: work queue for full battery check * @emergency_stop: * When setting true, stop charging * @psy_name_buf: the name of power-supply-class for charger manager @@ -214,7 +225,6 @@ struct charger_desc { * saved status of battery before entering suspend-to-RAM * @charging_start_time: saved start time of enabling charging * @charging_end_time: saved end time of disabling charging - * @battery_status: Current battery status */ struct charger_manager { struct list_head entry; @@ -226,6 +236,9 @@ struct charger_manager { #endif bool charger_enabled; + unsigned long fullbatt_vchk_jiffies_at; + struct delayed_work fullbatt_vchk_work; + int emergency_stop; char psy_name_buf[PSY_NAME_MAX + 1]; @@ -234,8 +247,13 @@ struct charger_manager { u64 charging_start_time; u64 charging_end_time; - - int battery_status; }; +#ifdef CONFIG_CHARGER_MANAGER +extern void cm_notify_event(struct power_supply *psy, + enum cm_event_types type, char *msg); +#else +static inline void cm_notify_event(struct power_supply *psy, + enum cm_event_types type, char *msg) { } +#endif #endif /* _CHARGER_MANAGER_H */ diff --git a/include/linux/power/generic-adc-battery.h b/include/linux/power/generic-adc-battery.h index c68cbf34cd..b1ebe08533 100644 --- a/include/linux/power/generic-adc-battery.h +++ b/include/linux/power/generic-adc-battery.h @@ -1,6 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012, Anish Kumar + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef GENERIC_ADC_BATTERY_H @@ -11,12 +13,16 @@ * @battery_info: recommended structure to specify static power supply * parameters * @cal_charge: calculate charge level. + * @gpio_charge_finished: gpio for the charger. + * @gpio_inverted: Should be 1 if the GPIO is active low otherwise 0 * @jitter_delay: delay required after the interrupt to check battery * status.Default set is 10ms. */ struct gab_platform_data { struct power_supply_info battery_info; int (*cal_charge)(long value); + int gpio_charge_finished; + bool gpio_inverted; int jitter_delay; }; diff --git a/include/linux/power/gpio-charger.h b/include/linux/power/gpio-charger.h index c0b7657ac1..de1dfe09a0 100644 --- a/include/linux/power/gpio-charger.h +++ b/include/linux/power/gpio-charger.h @@ -1,6 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2010, Lars-Peter Clausen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef __LINUX_POWER_GPIO_CHARGER_H__ @@ -13,12 +22,18 @@ * struct gpio_charger_platform_data - platform_data for gpio_charger devices * @name: Name for the chargers power_supply device * @type: Type of the charger + * @gpio: GPIO which is used to indicate the chargers status + * @gpio_active_low: Should be set to 1 if the GPIO is active low otherwise 0 * @supplied_to: Array of battery names to which this chargers supplies power * @num_supplicants: Number of entries in the supplied_to array */ struct gpio_charger_platform_data { const char *name; enum power_supply_type type; + + int gpio; + int gpio_active_low; + char **supplied_to; size_t num_supplicants; }; diff --git a/include/linux/power/isp1704_charger.h b/include/linux/power/isp1704_charger.h new file mode 100644 index 0000000000..0105d9e7af --- /dev/null +++ b/include/linux/power/isp1704_charger.h @@ -0,0 +1,30 @@ +/* + * ISP1704 USB Charger Detection driver + * + * Copyright (C) 2011 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#ifndef __ISP1704_CHARGER_H +#define __ISP1704_CHARGER_H + +struct isp1704_charger_data { + void (*set_power)(bool on); + int enable_gpio; +}; + +#endif diff --git a/include/linux/power/jz4740-battery.h b/include/linux/power/jz4740-battery.h index 10da211678..19c9610c72 100644 --- a/include/linux/power/jz4740-battery.h +++ b/include/linux/power/jz4740-battery.h @@ -1,6 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2009, Jiejing Zhang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef __JZ4740_BATTERY_H diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h index dd24756a8a..522757ac9c 100644 --- a/include/linux/power/max17042_battery.h +++ b/include/linux/power/max17042_battery.h @@ -1,25 +1,31 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Fuel gauge driver for Maxim 17042 / 8966 / 8997 * Note that Maxim 8966 and 8997 are mfd and this is its subdevice. * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __MAX17042_BATTERY_H_ #define __MAX17042_BATTERY_H_ #define MAX17042_STATUS_BattAbsent (1 << 3) -#define MAX17042_BATTERY_FULL (95) /* Recommend. FullSOCThr value */ +#define MAX17042_BATTERY_FULL (100) #define MAX17042_DEFAULT_SNS_RESISTOR (10000) -#define MAX17042_DEFAULT_VMIN (3000) -#define MAX17042_DEFAULT_VMAX (4500) /* LiHV cell max */ -#define MAX17042_DEFAULT_TEMP_MIN (0) /* For sys without temp sensor */ -#define MAX17042_DEFAULT_TEMP_MAX (700) /* 70 degrees Celcius */ - -/* Consider RepCap which is less then 10 units below FullCAP full */ -#define MAX17042_FULL_THRESHOLD 10 #define MAX17042_CHARACTERIZATION_DATA_SIZE 48 @@ -69,7 +75,7 @@ enum max17042_register { MAX17042_RelaxCFG = 0x2A, MAX17042_MiscCFG = 0x2B, MAX17042_TGAIN = 0x2C, - MAX17042_TOFF = 0x2D, + MAx17042_TOFF = 0x2D, MAX17042_CGAIN = 0x2E, MAX17042_COFF = 0x2F, @@ -105,65 +111,18 @@ enum max17042_register { MAX17042_OCV = 0xEE, - MAX17042_OCVInternal = 0xFB, /* MAX17055 VFOCV */ + MAX17042_OCVInternal = 0xFB, MAX17042_VFSOC = 0xFF, }; -/* Registers specific to max17055 only */ -enum max17055_register { - MAX17055_QRes = 0x0C, - MAX17055_RCell = 0x14, - MAX17055_TTF = 0x20, - MAX17055_DieTemp = 0x34, - MAX17055_USER_MEM = 0x40, - MAX17055_RGAIN = 0x43, - - MAX17055_ConvgCfg = 0x49, - MAX17055_VFRemCap = 0x4A, - - MAX17055_STATUS2 = 0xB0, - MAX17055_POWER = 0xB1, - MAX17055_ID = 0xB2, - MAX17055_AvgPower = 0xB3, - MAX17055_IAlrtTh = 0xB4, - MAX17055_TTFCfg = 0xB5, - MAX17055_CVMixCap = 0xB6, - MAX17055_CVHalfTime = 0xB7, - MAX17055_CGTempCo = 0xB8, - MAX17055_Curve = 0xB9, - MAX17055_HibCfg = 0xBA, - MAX17055_Config2 = 0xBB, - MAX17055_VRipple = 0xBC, - MAX17055_RippleCfg = 0xBD, - MAX17055_TimerH = 0xBE, - - MAX17055_RSense = 0xD0, - MAX17055_ScOcvLim = 0xD1, - - MAX17055_SOCHold = 0xD3, - MAX17055_MaxPeakPwr = 0xD4, - MAX17055_SusPeakPwr = 0xD5, - MAX17055_PackResistance = 0xD6, - MAX17055_SysResistance = 0xD7, - MAX17055_MinSysV = 0xD8, - MAX17055_MPPCurrent = 0xD9, - MAX17055_SPPCurrent = 0xDA, - MAX17055_ModelCfg = 0xDB, - MAX17055_AtQResidual = 0xDC, - MAX17055_AtTTE = 0xDD, - MAX17055_AtAvSOC = 0xDE, - MAX17055_AtAvCap = 0xDF, -}; - -/* Registers specific to max17047/50/55 */ +/* Registers specific to max17047/50 */ enum max17047_register { MAX17047_QRTbl00 = 0x12, MAX17047_FullSOCThr = 0x13, MAX17047_QRTbl10 = 0x22, MAX17047_QRTbl20 = 0x32, MAX17047_V_empty = 0x3A, - MAX17047_TIMER = 0x3E, MAX17047_QRTbl30 = 0x42, }; @@ -172,7 +131,6 @@ enum max170xx_chip_type { MAXIM_DEVICE_TYPE_MAX17042, MAXIM_DEVICE_TYPE_MAX17047, MAXIM_DEVICE_TYPE_MAX17050, - MAXIM_DEVICE_TYPE_MAX17055, MAXIM_DEVICE_TYPE_NUM }; diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h index 02f94a1b32..89d3f1cb34 100644 --- a/include/linux/power/max8903_charger.h +++ b/include/linux/power/max8903_charger.h @@ -1,9 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * */ #ifndef __MAX8903_CHARGER_H__ diff --git a/include/linux/power/sbs-battery.h b/include/linux/power/sbs-battery.h index ccfe79783c..519b8b4323 100644 --- a/include/linux/power/sbs-battery.h +++ b/include/linux/power/sbs-battery.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Gas Gauge driver for SBS Compliant Gas Gauges * * Copyright (c) 2010, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef __LINUX_POWER_SBS_BATTERY_H_ diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h index 167b9b0400..9a9257ac2e 100644 --- a/include/linux/power/smartreflex.h +++ b/include/linux/power/smartreflex.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * OMAP Smartreflex Defines and Routines * @@ -12,6 +11,10 @@ * * Copyright (C) 2007 Texas Instruments, Inc. * Lesly A M + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __POWER_SMARTREFLEX_H @@ -140,13 +143,6 @@ #define OMAP3430_SR_ERRWEIGHT 0x04 #define OMAP3430_SR_ERRMAXLIMIT 0x02 -enum sr_instance { - OMAP_SR_MPU, /* shared with iva on omap3 */ - OMAP_SR_CORE, - OMAP_SR_IVA, - OMAP_SR_NR, -}; - struct omap_sr { char *name; struct list_head node; @@ -155,7 +151,6 @@ struct omap_sr { struct voltagedomain *voltdm; struct dentry *dbg_dir; unsigned int irq; - struct clk *fck; int srid; int ip_type; int nvalue_count; @@ -170,7 +165,6 @@ struct omap_sr { u32 senp_mod; u32 senn_mod; void __iomem *base; - unsigned long enabled:1; }; /** @@ -213,6 +207,7 @@ struct omap_smartreflex_dev_attr { const char *sensor_voltdm_name; }; +#ifdef CONFIG_POWER_AVS_OMAP /* * The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR. * The smartreflex class driver should pass the class type. @@ -243,7 +238,7 @@ struct omap_sr_class_data { int (*notify)(struct omap_sr *sr, u32 status); u8 notify_flags; u8 class_type; -}; +} __do_const; /** * struct omap_sr_nvalue_table - Smartreflex n-target value info @@ -295,16 +290,14 @@ struct omap_sr_data { struct voltagedomain *voltdm; }; - -extern struct omap_sr_data omap_sr_pdata[OMAP_SR_NR]; - -#ifdef CONFIG_POWER_AVS_OMAP - /* Smartreflex module enable/disable interface */ void omap_sr_enable(struct voltagedomain *voltdm); void omap_sr_disable(struct voltagedomain *voltdm); void omap_sr_disable_reset_volt(struct voltagedomain *voltdm); +/* API to register the pmic specific data with the smartreflex driver. */ +void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data); + /* Smartreflex driver hooks to be called from Smartreflex class driver */ int sr_enable(struct omap_sr *sr, unsigned long volt); void sr_disable(struct omap_sr *sr); @@ -319,5 +312,7 @@ static inline void omap_sr_enable(struct voltagedomain *voltdm) {} static inline void omap_sr_disable(struct voltagedomain *voltdm) {} static inline void omap_sr_disable_reset_volt( struct voltagedomain *voltdm) {} +static inline void omap_sr_register_pmic( + struct omap_sr_pmic_data *pmic_data) {} #endif #endif diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h new file mode 100644 index 0000000000..b3cb20dab5 --- /dev/null +++ b/include/linux/power/smb347-charger.h @@ -0,0 +1,117 @@ +/* + * Summit Microelectronics SMB347 Battery Charger Driver + * + * Copyright (C) 2011, Intel Corporation + * + * Authors: Bruce E. Robertson + * Mika Westerberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef SMB347_CHARGER_H +#define SMB347_CHARGER_H + +#include +#include + +enum { + /* use the default compensation method */ + SMB347_SOFT_TEMP_COMPENSATE_DEFAULT = -1, + + SMB347_SOFT_TEMP_COMPENSATE_NONE, + SMB347_SOFT_TEMP_COMPENSATE_CURRENT, + SMB347_SOFT_TEMP_COMPENSATE_VOLTAGE, +}; + +/* Use default factory programmed value for hard/soft temperature limit */ +#define SMB347_TEMP_USE_DEFAULT -273 + +/* + * Charging enable can be controlled by software (via i2c) by + * smb347-charger driver or by EN pin (active low/high). + */ +enum smb347_chg_enable { + SMB347_CHG_ENABLE_SW, + SMB347_CHG_ENABLE_PIN_ACTIVE_LOW, + SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH, +}; + +/** + * struct smb347_charger_platform_data - platform data for SMB347 charger + * @battery_info: Information about the battery + * @max_charge_current: maximum current (in uA) the battery can be charged + * @max_charge_voltage: maximum voltage (in uV) the battery can be charged + * @pre_charge_current: current (in uA) to use in pre-charging phase + * @termination_current: current (in uA) used to determine when the + * charging cycle terminates + * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to + * pre-charge to fast charge mode + * @mains_current_limit: maximum input current drawn from AC/DC input (in uA) + * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB + * input + * @chip_temp_threshold: die temperature where device starts limiting charge + * current [%100 - %130] (in degree C) + * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C), + * granularity is 5 deg C. + * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree C), + * granularity is 5 deg C. + * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C), + * granularity is 5 deg C. + * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C), + * granularity is 5 deg C. + * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit + * @soft_temp_limit_compensation: compensation method when soft temperature + * limit is hit + * @charge_current_compensation: current (in uA) for charging compensation + * current when temperature hits soft limits + * @use_mains: AC/DC input can be used + * @use_usb: USB input can be used + * @use_usb_otg: USB OTG output can be used (not implemented yet) + * @irq_gpio: GPIO number used for interrupts (%-1 if not used) + * @enable_control: how charging enable/disable is controlled + * (driver/pin controls) + * + * @use_main, @use_usb, and @use_usb_otg are means to enable/disable + * hardware support for these. This is useful when we want to have for + * example OTG charging controlled via OTG transceiver driver and not by + * the SMB347 hardware. + * + * Hard and soft temperature limit values are given as described in the + * device data sheet and assuming NTC beta value is %3750. Even if this is + * not the case, these values should be used. They can be mapped to the + * corresponding NTC beta values with the help of table %2 in the data + * sheet. So for example if NTC beta is %3375 and we want to program hard + * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50. + * + * If zero value is given in any of the current and voltage values, the + * factory programmed default will be used. For soft/hard temperature + * values, pass in %SMB347_TEMP_USE_DEFAULT instead. + */ +struct smb347_charger_platform_data { + struct power_supply_info battery_info; + unsigned int max_charge_current; + unsigned int max_charge_voltage; + unsigned int pre_charge_current; + unsigned int termination_current; + unsigned int pre_to_fast_voltage; + unsigned int mains_current_limit; + unsigned int usb_hc_current_limit; + unsigned int chip_temp_threshold; + int soft_cold_temp_limit; + int soft_hot_temp_limit; + int hard_cold_temp_limit; + int hard_hot_temp_limit; + bool suspend_on_hard_temp_limit; + unsigned int soft_temp_limit_compensation; + unsigned int charge_current_compensation; + bool use_mains; + bool use_usb; + bool use_usb_otg; + int irq_gpio; + enum smb347_chg_enable enable_control; +}; + +#endif /* SMB347_CHARGER_H */ diff --git a/include/linux/power/twl4030_madc_battery.h b/include/linux/power/twl4030_madc_battery.h index 26517e9dfa..23110dc777 100644 --- a/include/linux/power/twl4030_madc_battery.h +++ b/include/linux/power/twl4030_madc_battery.h @@ -1,9 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Dumb driver for LiIon batteries using TWL4030 madc. * * Copyright 2013 Golden Delicious Computers * Nikolaus Schaller + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef __TWL4030_MADC_BATTERY_H diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 9ca1f120a2..3965503315 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Universal power supply monitor class * @@ -7,6 +6,8 @@ * Copyright © 2003 Ian Molton * * Modified: 2004, Oct Szabolcs Gyurko + * + * You may use this code as per GPL version 2 */ #ifndef __LINUX_POWER_SUPPLY_H__ @@ -39,16 +40,11 @@ enum { POWER_SUPPLY_STATUS_FULL, }; -/* What algorithm is the charger using? */ enum { POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, POWER_SUPPLY_CHARGE_TYPE_NONE, - POWER_SUPPLY_CHARGE_TYPE_TRICKLE, /* slow speed */ - POWER_SUPPLY_CHARGE_TYPE_FAST, /* fast speed */ - POWER_SUPPLY_CHARGE_TYPE_STANDARD, /* normal speed */ - POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */ - POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */ - POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */ + POWER_SUPPLY_CHARGE_TYPE_TRICKLE, + POWER_SUPPLY_CHARGE_TYPE_FAST, }; enum { @@ -61,11 +57,6 @@ enum { POWER_SUPPLY_HEALTH_COLD, POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE, - POWER_SUPPLY_HEALTH_OVERCURRENT, - POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED, - POWER_SUPPLY_HEALTH_WARM, - POWER_SUPPLY_HEALTH_COOL, - POWER_SUPPLY_HEALTH_HOT, }; enum { @@ -130,11 +121,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, - POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD, /* in percents! */ - POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD, /* in percents! */ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, - POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT, - POWER_SUPPLY_PROP_INPUT_POWER_LIMIT, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, POWER_SUPPLY_PROP_ENERGY_FULL, @@ -144,7 +131,6 @@ enum power_supply_property { POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */ - POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_MAX, @@ -159,14 +145,9 @@ enum power_supply_property { POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */ - POWER_SUPPLY_PROP_USB_TYPE, POWER_SUPPLY_PROP_SCOPE, - POWER_SUPPLY_PROP_PRECHARGE_CURRENT, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, POWER_SUPPLY_PROP_CALIBRATE, - POWER_SUPPLY_PROP_MANUFACTURE_YEAR, - POWER_SUPPLY_PROP_MANUFACTURE_MONTH, - POWER_SUPPLY_PROP_MANUFACTURE_DAY, /* Properties of type `const char *' */ POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, @@ -178,28 +159,13 @@ enum power_supply_type { POWER_SUPPLY_TYPE_BATTERY, POWER_SUPPLY_TYPE_UPS, POWER_SUPPLY_TYPE_MAINS, - POWER_SUPPLY_TYPE_USB, /* Standard Downstream Port */ - POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */ - POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */ - POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */ - POWER_SUPPLY_TYPE_USB_TYPE_C, /* Type C Port */ - POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */ - POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */ - POWER_SUPPLY_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */ - POWER_SUPPLY_TYPE_WIRELESS, /* Wireless */ -}; - -enum power_supply_usb_type { - POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, - POWER_SUPPLY_USB_TYPE_SDP, /* Standard Downstream Port */ - POWER_SUPPLY_USB_TYPE_DCP, /* Dedicated Charging Port */ - POWER_SUPPLY_USB_TYPE_CDP, /* Charging Downstream Port */ - POWER_SUPPLY_USB_TYPE_ACA, /* Accessory Charger Adapters */ - POWER_SUPPLY_USB_TYPE_C, /* Type C Port */ - POWER_SUPPLY_USB_TYPE_PD, /* Power Delivery Port */ - POWER_SUPPLY_USB_TYPE_PD_DRP, /* PD Dual Role Port */ - POWER_SUPPLY_USB_TYPE_PD_PPS, /* PD Programmable Power Supply */ - POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */ + POWER_SUPPLY_TYPE_USB, /* Standard Downstream Port */ + POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */ + POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */ + POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */ + POWER_SUPPLY_TYPE_USB_TYPE_C, /* Type C Port */ + POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */ + POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */ }; enum power_supply_notifier_events { @@ -217,14 +183,9 @@ struct power_supply; /* Run-time specific power supply configuration */ struct power_supply_config { struct device_node *of_node; - struct fwnode_handle *fwnode; - /* Driver private data */ void *drv_data; - /* Device specific sysfs attributes */ - const struct attribute_group **attr_grp; - char **supplied_to; size_t num_supplicants; }; @@ -233,9 +194,7 @@ struct power_supply_config { struct power_supply_desc { const char *name; enum power_supply_type type; - const enum power_supply_usb_type *usb_types; - size_t num_usb_types; - const enum power_supply_property *properties; + enum power_supply_property *properties; size_t num_properties; /* @@ -290,7 +249,6 @@ struct power_supply { spinlock_t changed_lock; bool changed; bool initialized; - bool removing; atomic_t use_cnt; #ifdef CONFIG_THERMAL struct thermal_zone_device *tzd; @@ -330,66 +288,11 @@ struct power_supply_info { int use_for_apm; }; -struct power_supply_battery_ocv_table { - int ocv; /* microVolts */ - int capacity; /* percent */ -}; - -struct power_supply_resistance_temp_table { - int temp; /* celsius */ - int resistance; /* internal resistance percent */ -}; - -#define POWER_SUPPLY_OCV_TEMP_MAX 20 - -/* - * This is the recommended struct to manage static battery parameters, - * populated by power_supply_get_battery_info(). Most platform drivers should - * use these for consistency. - * Its field names must correspond to elements in enum power_supply_property. - * The default field value is -EINVAL. - * Power supply class itself doesn't use this. - */ - -struct power_supply_battery_info { - unsigned int technology; /* from the enum above */ - int energy_full_design_uwh; /* microWatt-hours */ - int charge_full_design_uah; /* microAmp-hours */ - int voltage_min_design_uv; /* microVolts */ - int voltage_max_design_uv; /* microVolts */ - int tricklecharge_current_ua; /* microAmps */ - int precharge_current_ua; /* microAmps */ - int precharge_voltage_max_uv; /* microVolts */ - int charge_term_current_ua; /* microAmps */ - int charge_restart_voltage_uv; /* microVolts */ - int overvoltage_limit_uv; /* microVolts */ - int constant_charge_current_max_ua; /* microAmps */ - int constant_charge_voltage_max_uv; /* microVolts */ - int factory_internal_resistance_uohm; /* microOhms */ - int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];/* celsius */ - int temp_ambient_alert_min; /* celsius */ - int temp_ambient_alert_max; /* celsius */ - int temp_alert_min; /* celsius */ - int temp_alert_max; /* celsius */ - int temp_min; /* celsius */ - int temp_max; /* celsius */ - struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX]; - int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX]; - struct power_supply_resistance_temp_table *resist_table; - int resist_table_size; -}; - extern struct atomic_notifier_head power_supply_notifier; extern int power_supply_reg_notifier(struct notifier_block *nb); extern void power_supply_unreg_notifier(struct notifier_block *nb); -#if IS_ENABLED(CONFIG_POWER_SUPPLY) extern struct power_supply *power_supply_get_by_name(const char *name); extern void power_supply_put(struct power_supply *psy); -#else -static inline void power_supply_put(struct power_supply *psy) {} -static inline struct power_supply *power_supply_get_by_name(const char *name) -{ return NULL; } -#endif #ifdef CONFIG_OF extern struct power_supply *power_supply_get_by_phandle(struct device_node *np, const char *property); @@ -403,25 +306,8 @@ static inline struct power_supply * devm_power_supply_get_by_phandle(struct device *dev, const char *property) { return NULL; } #endif /* CONFIG_OF */ - -extern int power_supply_get_battery_info(struct power_supply *psy, - struct power_supply_battery_info *info); -extern void power_supply_put_battery_info(struct power_supply *psy, - struct power_supply_battery_info *info); -extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, - int table_len, int ocv); -extern struct power_supply_battery_ocv_table * -power_supply_find_ocv2cap_table(struct power_supply_battery_info *info, - int temp, int *table_len); -extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, - int ocv, int temp); -extern int -power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table, - int table_len, int temp); extern void power_supply_changed(struct power_supply *psy); extern int power_supply_am_i_supplied(struct power_supply *psy); -extern int power_supply_set_input_current_limit_from_supplier( - struct power_supply *psy); extern int power_supply_set_battery_charged(struct power_supply *psy); #ifdef CONFIG_POWER_SUPPLY @@ -433,16 +319,9 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; } extern int power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val); -#if IS_ENABLED(CONFIG_POWER_SUPPLY) extern int power_supply_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val); -#else -static inline int power_supply_set_property(struct power_supply *psy, - enum power_supply_property psp, - const union power_supply_propval *val) -{ return 0; } -#endif extern int power_supply_property_is_writeable(struct power_supply *psy, enum power_supply_property psp); extern void power_supply_external_power_changed(struct power_supply *psy); @@ -466,8 +345,6 @@ devm_power_supply_register_no_ws(struct device *parent, extern void power_supply_unregister(struct power_supply *psy); extern int power_supply_powers(struct power_supply *psy, struct device *dev); -#define to_power_supply(device) container_of(device, struct power_supply, dev) - extern void *power_supply_get_drvdata(struct power_supply *psy); /* For APM emulation, think legacy userspace. */ extern struct class *power_supply_class; @@ -482,20 +359,18 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp) case POWER_SUPPLY_PROP_CHARGE_NOW: case POWER_SUPPLY_PROP_CHARGE_AVG: case POWER_SUPPLY_PROP_CHARGE_COUNTER: - case POWER_SUPPLY_PROP_PRECHARGE_CURRENT: - case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: case POWER_SUPPLY_PROP_CURRENT_MAX: case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_CURRENT_AVG: case POWER_SUPPLY_PROP_CURRENT_BOOT: - return true; + return 1; default: break; } - return false; + return 0; } static inline bool power_supply_is_watt_property(enum power_supply_property psp) @@ -518,25 +393,12 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp) case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: case POWER_SUPPLY_PROP_POWER_NOW: - return true; + return 1; default: break; } - return false; -} - -#ifdef CONFIG_POWER_SUPPLY_HWMON -int power_supply_add_hwmon_sysfs(struct power_supply *psy); -void power_supply_remove_hwmon_sysfs(struct power_supply *psy); -#else -static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy) -{ return 0; } -static inline -void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {} -#endif - #endif /* __LINUX_POWER_SUPPLY_H__ */ diff --git a/include/linux/powercap.h b/include/linux/powercap.h index 3d557bbcd2..f0a4e6257d 100644 --- a/include/linux/powercap.h +++ b/include/linux/powercap.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * powercap.h: Data types and headers for sysfs power capping interface * Copyright (c) 2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc. + * */ #ifndef __POWERCAP_H__ @@ -44,18 +56,19 @@ struct powercap_control_type_ops { }; /** - * struct powercap_control_type - Defines a powercap control_type + * struct powercap_control_type- Defines a powercap control_type + * @name: name of control_type * @dev: device for this control_type * @idr: idr to have unique id for its child - * @nr_zones: counter for number of zones of this type + * @root_node: Root holding power zones for this control_type * @ops: Pointer to callback struct - * @lock: mutex for control type + * @node_lock: mutex for control type * @allocated: This is possible that client owns the memory * used by this structure. In this case * this flag is set to false by framework to * prevent deallocation during release process. * Otherwise this flag is set to true. - * @node: linked-list node + * @ctrl_inst: link to the control_type list * * Defines powercap control_type. This acts as a container for power * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc. @@ -128,7 +141,7 @@ struct powercap_zone_ops { * this flag is set to false by framework to * prevent deallocation during release process. * Otherwise this flag is set to true. - * @constraints: List of constraints for this zone. + * @constraint_ptr: List of constraints for this zone. * * This defines a power zone instance. The fields of this structure are * private, and should not be used by client drivers. diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h index 9d3ffc8f5e..80f4b335ee 100644 --- a/include/linux/ppp-comp.h +++ b/include/linux/ppp-comp.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * ppp-comp.h - Definitions for doing PPP packet compression. * * Copyright 1994-1998 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. */ #ifndef _NET_PPP_COMP_H #define _NET_PPP_COMP_H @@ -81,7 +84,7 @@ struct compressor { struct module *owner; /* Extra skb space needed by the compressor algorithm */ unsigned int comp_extra; -}; +} __do_const; /* * The return value from decompress routine is the length of the diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h index 91f9a92834..5d87f810a3 100644 --- a/include/linux/ppp_channel.h +++ b/include/linux/ppp_channel.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _PPP_CHANNEL_H_ #define _PPP_CHANNEL_H_ /* @@ -12,6 +11,11 @@ * * Copyright 1999 Paul Mackerras. * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * * ==FILEVERSION 20000322== */ @@ -28,9 +32,6 @@ struct ppp_channel_ops { int (*start_xmit)(struct ppp_channel *, struct sk_buff *); /* Handle an ioctl call that has come in via /dev/ppp. */ int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long); - int (*fill_forward_path)(struct net_device_path_ctx *, - struct net_device_path *, - const struct ppp_channel *); }; struct ppp_channel { diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h index 9d2b388fae..28aa0237c8 100644 --- a/include/linux/ppp_defs.h +++ b/include/linux/ppp_defs.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * ppp_defs.h - PPP definitions. * * Copyright 1994-2000 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. */ #ifndef _PPP_DEFS_H_ #define _PPP_DEFS_H_ diff --git a/include/linux/pps-gpio.h b/include/linux/pps-gpio.h index 7bf49908be..0035abe41b 100644 --- a/include/linux/pps-gpio.h +++ b/include/linux/pps-gpio.h @@ -1,19 +1,32 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * pps-gpio.h -- PPS client for GPIOs * + * * Copyright (C) 2011 James Nuss + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _PPS_GPIO_H #define _PPS_GPIO_H struct pps_gpio_platform_data { - struct gpio_desc *gpio_pin; - struct gpio_desc *echo_pin; bool assert_falling_edge; bool capture_clear; - unsigned int echo_active_ms; + unsigned int gpio_pin; + const char *gpio_label; }; -#endif /* _PPS_GPIO_H */ +#endif diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index 78c8ac4951..35ac903956 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h @@ -1,14 +1,28 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * PPS API kernel header * * Copyright (C) 2009 Rodolfo Giometti + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef LINUX_PPS_KERNEL_H #define LINUX_PPS_KERNEL_H #include + #include #include #include @@ -21,9 +35,9 @@ struct pps_device; /* The specific PPS source info */ struct pps_source_info { - char name[PPS_MAX_NAME_LEN]; /* symbolic name */ + char name[PPS_MAX_NAME_LEN]; /* simbolic name */ char path[PPS_MAX_NAME_LEN]; /* path of connected device */ - int mode; /* PPS allowed mode */ + int mode; /* PPS's allowed mode */ void (*echo)(struct pps_device *pps, int event, void *data); /* PPS echo function */ @@ -43,10 +57,10 @@ struct pps_event_time { struct pps_device { struct pps_source_info info; /* PSS source info */ - struct pps_kparams params; /* PPS current params */ + struct pps_kparams params; /* PPS's current params */ - __u32 assert_sequence; /* PPS assert event seq # */ - __u32 clear_sequence; /* PPS clear event seq # */ + __u32 assert_sequence; /* PPS' assert event seq # */ + __u32 clear_sequence; /* PPS' clear event seq # */ struct pps_ktime assert_tu; struct pps_ktime clear_tu; int current_mode; /* PPS mode at event time */ @@ -55,7 +69,7 @@ struct pps_device { wait_queue_head_t queue; /* PPS event queue */ unsigned int id; /* PPS source unique ID */ - void const *lookup_cookie; /* For pps_lookup_dev() only */ + void const *lookup_cookie; /* pps_lookup_dev only */ struct cdev cdev; struct device *dev; struct fasync_struct *async_queue; /* fasync method */ @@ -87,7 +101,7 @@ extern struct pps_device *pps_register_source( extern void pps_unregister_source(struct pps_device *pps); extern void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, void *data); -/* Look up a pps_device by magic cookie */ +/* Look up a pps device by magic cookie */ struct pps_device *pps_lookup_dev(void const *cookie); static inline void timespec_to_pps_ktime(struct pps_ktime *kt, @@ -118,3 +132,4 @@ static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta } #endif /* LINUX_PPS_KERNEL_H */ + diff --git a/include/linux/pr.h b/include/linux/pr.h index 94ceec713a..65c01c10b3 100644 --- a/include/linux/pr.h +++ b/include/linux/pr.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_PR_H #define LINUX_PR_H diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 4d244e295e..fcfde15ae3 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PREEMPT_H #define __LINUX_PREEMPT_H @@ -26,13 +25,13 @@ * PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 * HARDIRQ_MASK: 0x000f0000 - * NMI_MASK: 0x00f00000 + * NMI_MASK: 0x00100000 * PREEMPT_NEED_RESCHED: 0x80000000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 #define HARDIRQ_BITS 4 -#define NMI_BITS 4 +#define NMI_BITS 1 #define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) @@ -53,61 +52,32 @@ #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) - -/* - * Disable preemption until the scheduler is running -- use an unconditional - * value so that it also works on !PREEMPT_COUNT kernels. - * - * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). - */ -#define INIT_PREEMPT_COUNT PREEMPT_OFFSET - -/* - * Initial preempt_count value; reflects the preempt_count schedule invariant - * which states that during context switches: - * - * preempt_count() == 2*PREEMPT_DISABLE_OFFSET - * - * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. - * Note: See finish_task_switch(). - */ -#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) +/* We use the MSB mostly because its available */ +#define PREEMPT_NEED_RESCHED 0x80000000 /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ #include -#define nmi_count() (preempt_count() & NMI_MASK) #define hardirq_count() (preempt_count() & HARDIRQ_MASK) -#ifdef CONFIG_PREEMPT_RT -# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK) -#else -# define softirq_count() (preempt_count() & SOFTIRQ_MASK) -#endif -#define irq_count() (nmi_count() | hardirq_count() | softirq_count()) +#define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) /* - * Macros to retrieve the current execution context: - * - * in_nmi() - We're in NMI context - * in_hardirq() - We're in hard IRQ context - * in_serving_softirq() - We're in softirq context - * in_task() - We're in task context - */ -#define in_nmi() (nmi_count()) -#define in_hardirq() (hardirq_count()) -#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) -#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq())) - -/* - * The following macros are deprecated and should not be used in new code: - * in_irq() - Obsolete version of in_hardirq() - * in_softirq() - We have BH disabled, or are processing softirqs - * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled + * Are we doing bottom half or hardware interrupt processing? + * Are we in a softirq context? Interrupt context? + * in_softirq - Are we currently processing softirq or have bh disabled? + * in_serving_softirq - Are we currently processing softirq? */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) + +/* + * Are we in NMI context? + */ +#define in_nmi() (preempt_count() & NMI_MASK) /* * The preempt_count offset after preempt_disable(); @@ -121,11 +91,7 @@ /* * The preempt_count offset after spin_lock() */ -#if !defined(CONFIG_PREEMPT_RT) #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET -#else -#define PREEMPT_LOCK_OFFSET 0 -#endif /* * The preempt_count offset needed for things like: @@ -157,7 +123,7 @@ */ #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) -#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) +#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) extern void preempt_count_add(int val); extern void preempt_count_sub(int val); #define preempt_count_dec_and_test() \ @@ -168,11 +134,16 @@ extern void preempt_count_sub(int val); #define preempt_count_dec_and_test() __preempt_count_dec_and_test() #endif +#define raw_preempt_count_add(val) __preempt_count_add(val) +#define raw_preempt_count_sub(val) __preempt_count_sub(val) + #define __preempt_count_inc() __preempt_count_add(1) #define __preempt_count_dec() __preempt_count_sub(1) #define preempt_count_inc() preempt_count_add(1) +#define raw_preempt_count_inc() raw_preempt_count_add(1) #define preempt_count_dec() preempt_count_sub(1) +#define raw_preempt_count_dec() raw_preempt_count_sub(1) #ifdef CONFIG_PREEMPT_COUNT @@ -182,6 +153,12 @@ do { \ barrier(); \ } while (0) +#define raw_preempt_disable() \ +do { \ + raw_preempt_count_inc(); \ + barrier(); \ +} while (0) + #define sched_preempt_enable_no_resched() \ do { \ barrier(); \ @@ -190,9 +167,15 @@ do { \ #define preempt_enable_no_resched() sched_preempt_enable_no_resched() +#define raw_preempt_enable_no_resched() \ +do { \ + barrier(); \ + raw_preempt_count_dec(); \ +} while (0) + #define preemptible() (preempt_count() == 0 && !irqs_disabled()) -#ifdef CONFIG_PREEMPTION +#ifdef CONFIG_PREEMPT #define preempt_enable() \ do { \ barrier(); \ @@ -213,7 +196,7 @@ do { \ __preempt_schedule(); \ } while (0) -#else /* !CONFIG_PREEMPTION */ +#else /* !CONFIG_PREEMPT */ #define preempt_enable() \ do { \ barrier(); \ @@ -227,7 +210,7 @@ do { \ } while (0) #define preempt_check_resched() do { } while (0) -#endif /* CONFIG_PREEMPTION */ +#endif /* CONFIG_PREEMPT */ #define preempt_disable_notrace() \ do { \ @@ -250,8 +233,10 @@ do { \ * region. */ #define preempt_disable() barrier() +#define raw_preempt_disable() barrier() #define sched_preempt_enable_no_resched() barrier() #define preempt_enable_no_resched() barrier() +#define raw_preempt_enable_no_resched() barrier() #define preempt_enable() barrier() #define preempt_check_resched() do { } while (0) @@ -266,11 +251,13 @@ do { \ /* * Modules have no business playing preemption tricks. */ +#ifndef CONFIG_PAX_KERNEXEC #undef sched_preempt_enable_no_resched #undef preempt_enable_no_resched #undef preempt_enable_no_resched_notrace #undef preempt_check_resched #endif +#endif #define preempt_set_need_resched() \ do { \ @@ -332,71 +319,4 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier, #endif -#ifdef CONFIG_SMP - -/* - * Migrate-Disable and why it is undesired. - * - * When a preempted task becomes elegible to run under the ideal model (IOW it - * becomes one of the M highest priority tasks), it might still have to wait - * for the preemptee's migrate_disable() section to complete. Thereby suffering - * a reduction in bandwidth in the exact duration of the migrate_disable() - * section. - * - * Per this argument, the change from preempt_disable() to migrate_disable() - * gets us: - * - * - a higher priority tasks gains reduced wake-up latency; with preempt_disable() - * it would have had to wait for the lower priority task. - * - * - a lower priority tasks; which under preempt_disable() could've instantly - * migrated away when another CPU becomes available, is now constrained - * by the ability to push the higher priority task away, which might itself be - * in a migrate_disable() section, reducing it's available bandwidth. - * - * IOW it trades latency / moves the interference term, but it stays in the - * system, and as long as it remains unbounded, the system is not fully - * deterministic. - * - * - * The reason we have it anyway. - * - * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a - * number of primitives into becoming preemptible, they would also allow - * migration. This turns out to break a bunch of per-cpu usage. To this end, - * all these primitives employ migirate_disable() to restore this implicit - * assumption. - * - * This is a 'temporary' work-around at best. The correct solution is getting - * rid of the above assumptions and reworking the code to employ explicit - * per-cpu locking or short preempt-disable regions. - * - * The end goal must be to get rid of migrate_disable(), alternatively we need - * a schedulability theory that does not depend on abritrary migration. - * - * - * Notes on the implementation. - * - * The implementation is particularly tricky since existing code patterns - * dictate neither migrate_disable() nor migrate_enable() is allowed to block. - * This means that it cannot use cpus_read_lock() to serialize against hotplug, - * nor can it easily migrate itself into a pending affinity mask change on - * migrate_enable(). - * - * - * Note: even non-work-conserving schedulers like semi-partitioned depends on - * migration, so migrate_disable() is not only a problem for - * work-conserving schedulers. - * - */ -extern void migrate_disable(void); -extern void migrate_enable(void); - -#else - -static inline void migrate_disable(void) { } -static inline void migrate_enable(void) { } - -#endif /* CONFIG_SMP */ - #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h index b83a3f944f..a3bfbdf63d 100644 --- a/include/linux/prefetch.h +++ b/include/linux/prefetch.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Generic cache management functions. Everything is arch-specific, * but this header exists to make sure the defines/functions can be @@ -15,7 +14,6 @@ #include #include -struct page; /* prefetch(x) attempts to pre-emptively get the memory pointed to by address "x" into the CPU L1 cache. @@ -63,11 +61,4 @@ static inline void prefetch_range(void *addr, size_t len) #endif } -static inline void prefetch_page_address(struct page *page) -{ -#if defined(WANT_PAGE_VIRTUAL) || defined(HASHED_PAGE_VIRTUAL) - prefetch(page); -#endif -} - #endif diff --git a/include/linux/printk.h b/include/linux/printk.h index 85b656f82d..a29982a62f 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -1,27 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __KERNEL_PRINTK__ #define __KERNEL_PRINTK__ -#include +#include #include #include #include #include -#include -#include extern const char linux_banner[]; extern const char linux_proc_banner[]; -extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ - -#define PRINTK_MAX_SINGLE_HEADER_LEN 2 - static inline int printk_get_level(const char *buffer) { if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { switch (buffer[1]) { case '0' ... '7': + case 'd': /* KERN_DEFAULT */ case 'c': /* KERN_CONT */ return buffer[1]; } @@ -37,14 +31,6 @@ static inline const char *printk_skip_level(const char *buffer) return buffer; } -static inline const char *printk_skip_headers(const char *buffer) -{ - while (printk_get_level(buffer)) - buffer = printk_skip_level(buffer); - - return buffer; -} - #define CONSOLE_EXT_LOG_MAX 8192 /* printk's without a loglevel use this.. */ @@ -53,32 +39,34 @@ static inline const char *printk_skip_headers(const char *buffer) /* We show everything that is MORE important than this.. */ #define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ #define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ +#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */ +#define CONSOLE_LOGLEVEL_DEFAULT 7 /* anything MORE serious than KERN_DEBUG */ #define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ #define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ -/* - * Default used to be hard-coded at 7, quiet used to be hardcoded at 4, - * we're now allowing both to be set from kernel config. - */ -#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT -#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET - -extern int console_printk[]; +extern int console_printk[4]; #define console_loglevel (console_printk[0]) #define default_message_loglevel (console_printk[1]) #define minimum_console_loglevel (console_printk[2]) #define default_console_loglevel (console_printk[3]) -extern void console_verbose(void); +static inline void console_silent(void) +{ + console_loglevel = CONSOLE_LOGLEVEL_SILENT; +} + +static inline void console_verbose(void) +{ + if (console_loglevel) + console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; +} /* strlen("ratelimit") + 1 */ #define DEVKMSG_STR_MAX_SIZE 10 extern char devkmsg_log_str[]; struct ctl_table; -extern int suppress_printk; - struct va_format { const char *fmt; va_list *va; @@ -128,8 +116,10 @@ struct va_format { */ #define no_printk(fmt, ...) \ ({ \ - if (0) \ - printk(fmt, ##__VA_ARGS__); \ + do { \ + if (0) \ + printk(fmt, ##__VA_ARGS__); \ + } while (0); \ 0; \ }) @@ -141,41 +131,50 @@ static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } #endif -struct dev_printk_info; +#ifdef CONFIG_PRINTK_NMI +extern void printk_nmi_init(void); +extern void printk_nmi_enter(void); +extern void printk_nmi_exit(void); +extern void printk_nmi_flush(void); +extern void printk_nmi_flush_on_panic(void); +#else +static inline void printk_nmi_init(void) { } +static inline void printk_nmi_enter(void) { } +static inline void printk_nmi_exit(void) { } +static inline void printk_nmi_flush(void) { } +static inline void printk_nmi_flush_on_panic(void) { } +#endif /* PRINTK_NMI */ + +extern int kptr_restrict; #ifdef CONFIG_PRINTK -asmlinkage __printf(4, 0) +asmlinkage __printf(5, 0) int vprintk_emit(int facility, int level, - const struct dev_printk_info *dev_info, + const char *dict, size_t dictlen, const char *fmt, va_list args); asmlinkage __printf(1, 0) int vprintk(const char *fmt, va_list args); +asmlinkage __printf(5, 6) __cold +int printk_emit(int facility, int level, + const char *dict, size_t dictlen, + const char *fmt, ...); + asmlinkage __printf(1, 2) __cold -int _printk(const char *fmt, ...); +int printk(const char *fmt, ...); /* * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ ! */ -__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...); - -extern void __printk_safe_enter(void); -extern void __printk_safe_exit(void); -/* - * The printk_deferred_enter/exit macros are available only as a hack for - * some code paths that need to defer all printk console printing. Interrupts - * must be disabled for the deferred duration. - */ -#define printk_deferred_enter __printk_safe_enter -#define printk_deferred_exit __printk_safe_exit +__printf(1, 2) __cold int printk_deferred(const char *fmt, ...); /* * Please don't use printk_ratelimit(), because it shares ratelimiting state * with all other unrelated printk_ratelimit() callsites. Instead use * printk_ratelimited() or plain old __ratelimit(). */ -extern int __printk_ratelimit(const char *func); +extern int __printk_ratelimit(const char *func) __nocapture(1); #define printk_ratelimit() __printk_ratelimit(__func__) extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); @@ -184,20 +183,18 @@ extern int printk_delay_msec; extern int dmesg_restrict; extern int -devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void *buf, +devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf, size_t *lenp, loff_t *ppos); extern void wake_up_klogd(void); char *log_buf_addr_get(void); u32 log_buf_len_get(void); -void log_buf_vmcoreinfo_setup(void); +void log_buf_kexec_setup(void); void __init setup_log_buf(int early); __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); -extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold; -extern asmlinkage void dump_stack(void) __cold; #else static inline __printf(1, 0) int vprintk(const char *s, va_list args) @@ -205,24 +202,15 @@ int vprintk(const char *s, va_list args) return 0; } static inline __printf(1, 2) __cold -int _printk(const char *s, ...) +int printk(const char *s, ...) { return 0; } static inline __printf(1, 2) __cold -int _printk_deferred(const char *s, ...) +int printk_deferred(const char *s, ...) { return 0; } - -static inline void printk_deferred_enter(void) -{ -} - -static inline void printk_deferred_exit(void) -{ -} - static inline int printk_ratelimit(void) { return 0; @@ -247,7 +235,7 @@ static inline u32 log_buf_len_get(void) return 0; } -static inline void log_buf_vmcoreinfo_setup(void) +static inline void log_buf_kexec_setup(void) { } @@ -266,280 +254,44 @@ static inline void dump_stack_print_info(const char *log_lvl) static inline void show_regs_print_info(const char *log_lvl) { } - -static inline void dump_stack_lvl(const char *log_lvl) -{ -} - -static inline void dump_stack(void) -{ -} #endif -#ifdef CONFIG_SMP -extern int __printk_cpu_trylock(void); -extern void __printk_wait_on_cpu_lock(void); -extern void __printk_cpu_unlock(void); +extern asmlinkage void dump_stack(void) __cold; -/** - * printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning - * lock and disable interrupts. - * @flags: Stack-allocated storage for saving local interrupt state, - * to be passed to printk_cpu_unlock_irqrestore(). - * - * If the lock is owned by another CPU, spin until it becomes available. - * Interrupts are restored while spinning. - */ -#define printk_cpu_lock_irqsave(flags) \ - for (;;) { \ - local_irq_save(flags); \ - if (__printk_cpu_trylock()) \ - break; \ - local_irq_restore(flags); \ - __printk_wait_on_cpu_lock(); \ - } - -/** - * printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning - * lock and restore interrupts. - * @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave(). - */ -#define printk_cpu_unlock_irqrestore(flags) \ - do { \ - __printk_cpu_unlock(); \ - local_irq_restore(flags); \ - } while (0) \ - -#else - -#define printk_cpu_lock_irqsave(flags) ((void)flags) -#define printk_cpu_unlock_irqrestore(flags) ((void)flags) - -#endif /* CONFIG_SMP */ - -extern int kptr_restrict; - -/** - * pr_fmt - used by the pr_*() macros to generate the printk format string - * @fmt: format string passed from a pr_*() macro - * - * This macro can be used to generate a unified format string for pr_*() - * macros. A common use is to prefix all pr_*() messages in a file with a common - * string. For example, defining this at the top of a source file: - * - * #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - * - * would prefix all pr_info, pr_emerg... messages in the file with the module - * name. - */ #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif -struct module; - -#ifdef CONFIG_PRINTK_INDEX -struct pi_entry { - const char *fmt; - const char *func; - const char *file; - unsigned int line; - - /* - * While printk and pr_* have the level stored in the string at compile - * time, some subsystems dynamically add it at runtime through the - * format string. For these dynamic cases, we allow the subsystem to - * tell us the level at compile time. - * - * NULL indicates that the level, if any, is stored in fmt. - */ - const char *level; - - /* - * The format string used by various subsystem specific printk() - * wrappers to prefix the message. - * - * Note that the static prefix defined by the pr_fmt() macro is stored - * directly in the message format (@fmt), not here. - */ - const char *subsys_fmt_prefix; -} __packed; - -#define __printk_index_emit(_fmt, _level, _subsys_fmt_prefix) \ - do { \ - if (__builtin_constant_p(_fmt) && __builtin_constant_p(_level)) { \ - /* - * We check __builtin_constant_p multiple times here - * for the same input because GCC will produce an error - * if we try to assign a static variable to fmt if it - * is not a constant, even with the outer if statement. - */ \ - static const struct pi_entry _entry \ - __used = { \ - .fmt = __builtin_constant_p(_fmt) ? (_fmt) : NULL, \ - .func = __func__, \ - .file = __FILE__, \ - .line = __LINE__, \ - .level = __builtin_constant_p(_level) ? (_level) : NULL, \ - .subsys_fmt_prefix = _subsys_fmt_prefix,\ - }; \ - static const struct pi_entry *_entry_ptr \ - __used __section(".printk_index") = &_entry; \ - } \ - } while (0) - -#else /* !CONFIG_PRINTK_INDEX */ -#define __printk_index_emit(...) do {} while (0) -#endif /* CONFIG_PRINTK_INDEX */ - /* - * Some subsystems have their own custom printk that applies a va_format to a - * generic format, for example, to include a device number or other metadata - * alongside the format supplied by the caller. - * - * In order to store these in the way they would be emitted by the printk - * infrastructure, the subsystem provides us with the start, fixed string, and - * any subsequent text in the format string. - * - * We take a variable argument list as pr_fmt/dev_fmt/etc are sometimes passed - * as multiple arguments (eg: `"%s: ", "blah"`), and we must only take the - * first one. - * - * subsys_fmt_prefix must be known at compile time, or compilation will fail - * (since this is a mistake). If fmt or level is not known at compile time, no - * index entry will be made (since this can legitimately happen). - */ -#define printk_index_subsys_emit(subsys_fmt_prefix, level, fmt, ...) \ - __printk_index_emit(fmt, level, subsys_fmt_prefix) - -#define printk_index_wrap(_p_func, _fmt, ...) \ - ({ \ - __printk_index_emit(_fmt, NULL, NULL); \ - _p_func(_fmt, ##__VA_ARGS__); \ - }) - - -/** - * printk - print a kernel message - * @fmt: format string - * - * This is printk(). It can be called from any context. We want it to work. - * - * If printk indexing is enabled, _printk() is called from printk_index_wrap. - * Otherwise, printk is simply #defined to _printk. - * - * We try to grab the console_lock. If we succeed, it's easy - we log the - * output and call the console drivers. If we fail to get the semaphore, we - * place the output into the log buffer and return. The current holder of - * the console_sem will notice the new output in console_unlock(); and will - * send it to the consoles before releasing the lock. - * - * One effect of this deferred printing is that code which calls printk() and - * then changes console_loglevel may break. This is because console_loglevel - * is inspected when the actual printing occurs. - * - * See also: - * printf(3) - * - * See the vsnprintf() documentation for format string extensions over C99. - */ -#define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__) -#define printk_deferred(fmt, ...) \ - printk_index_wrap(_printk_deferred, fmt, ##__VA_ARGS__) - -/** - * pr_emerg - Print an emergency-level message - * @fmt: format string - * @...: arguments for the format string - * - * This macro expands to a printk with KERN_EMERG loglevel. It uses pr_fmt() to - * generate the format string. + * These can be used to print at the various log levels. + * All of these will print unconditionally, although note that pr_debug() + * and other debug macros are compiled out unless either DEBUG is defined + * or CONFIG_DYNAMIC_DEBUG is set. */ #define pr_emerg(fmt, ...) \ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) -/** - * pr_alert - Print an alert-level message - * @fmt: format string - * @...: arguments for the format string - * - * This macro expands to a printk with KERN_ALERT loglevel. It uses pr_fmt() to - * generate the format string. - */ #define pr_alert(fmt, ...) \ printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) -/** - * pr_crit - Print a critical-level message - * @fmt: format string - * @...: arguments for the format string - * - * This macro expands to a printk with KERN_CRIT loglevel. It uses pr_fmt() to - * generate the format string. - */ #define pr_crit(fmt, ...) \ printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) -/** - * pr_err - Print an error-level message - * @fmt: format string - * @...: arguments for the format string - * - * This macro expands to a printk with KERN_ERR loglevel. It uses pr_fmt() to - * generate the format string. - */ #define pr_err(fmt, ...) \ printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) -/** - * pr_warn - Print a warning-level message - * @fmt: format string - * @...: arguments for the format string - * - * This macro expands to a printk with KERN_WARNING loglevel. It uses pr_fmt() - * to generate the format string. - */ -#define pr_warn(fmt, ...) \ +#define pr_warning(fmt, ...) \ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) -/** - * pr_notice - Print a notice-level message - * @fmt: format string - * @...: arguments for the format string - * - * This macro expands to a printk with KERN_NOTICE loglevel. It uses pr_fmt() to - * generate the format string. - */ +#define pr_warn pr_warning #define pr_notice(fmt, ...) \ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) -/** - * pr_info - Print an info-level message - * @fmt: format string - * @...: arguments for the format string - * - * This macro expands to a printk with KERN_INFO loglevel. It uses pr_fmt() to - * generate the format string. - */ #define pr_info(fmt, ...) \ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) - -/** - * pr_cont - Continues a previous log message in the same line. - * @fmt: format string - * @...: arguments for the format string - * - * This macro expands to a printk with KERN_CONT loglevel. It should only be - * used when continuing a log message with no newline ('\n') enclosed. Otherwise - * it defaults back to KERN_DEFAULT loglevel. +/* + * Like KERN_CONT, pr_cont() should only be used when continuing + * a line with no newline ('\n') enclosed. Otherwise it defaults + * back to KERN_DEFAULT. */ #define pr_cont(fmt, ...) \ printk(KERN_CONT fmt, ##__VA_ARGS__) -/** - * pr_devel - Print a debug-level message conditionally - * @fmt: format string - * @...: arguments for the format string - * - * This macro expands to a printk with KERN_DEBUG loglevel if DEBUG is - * defined. Otherwise it does nothing. - * - * It uses pr_fmt() to generate the format string. - */ +/* pr_devel() should produce zero code unless DEBUG is defined */ #ifdef DEBUG #define pr_devel(fmt, ...) \ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) @@ -550,23 +302,11 @@ struct pi_entry { /* If you are writing a driver, please use dev_dbg instead */ -#if defined(CONFIG_DYNAMIC_DEBUG) || \ - (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) +#if defined(CONFIG_DYNAMIC_DEBUG) #include -/** - * pr_debug - Print a debug-level message conditionally - * @fmt: format string - * @...: arguments for the format string - * - * This macro expands to dynamic_pr_debug() if CONFIG_DYNAMIC_DEBUG is - * set. Otherwise, if DEBUG is defined, it's equivalent to a printk with - * KERN_DEBUG loglevel. If DEBUG is not defined it does nothing. - * - * It uses pr_fmt() to generate the format string (dynamic_pr_debug() uses - * pr_fmt() internally). - */ -#define pr_debug(fmt, ...) \ +/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ +#define pr_debug(fmt, ...) \ dynamic_pr_debug(fmt, ##__VA_ARGS__) #elif defined(DEBUG) #define pr_debug(fmt, ...) \ @@ -582,9 +322,27 @@ struct pi_entry { #ifdef CONFIG_PRINTK #define printk_once(fmt, ...) \ - DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__) +({ \ + static bool __print_once __read_mostly; \ + bool __ret_print_once = !__print_once; \ + \ + if (!__print_once) { \ + __print_once = true; \ + printk(fmt, ##__VA_ARGS__); \ + } \ + unlikely(__ret_print_once); \ +}) #define printk_deferred_once(fmt, ...) \ - DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__) +({ \ + static bool __print_once __read_mostly; \ + bool __ret_print_once = !__print_once; \ + \ + if (!__print_once) { \ + __print_once = true; \ + printk_deferred(fmt, ##__VA_ARGS__); \ + } \ + unlikely(__ret_print_once); \ +}) #else #define printk_once(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) @@ -606,7 +364,8 @@ struct pi_entry { printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info_once(fmt, ...) \ printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -/* no pr_cont_once, don't do that... */ +#define pr_cont_once(fmt, ...) \ + printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__) #if defined(DEBUG) #define pr_devel_once(fmt, ...) \ @@ -669,8 +428,7 @@ struct pi_entry { #endif /* If you are writing a driver, please use dev_dbg instead */ -#if defined(CONFIG_DYNAMIC_DEBUG) || \ - (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) +#if defined(CONFIG_DYNAMIC_DEBUG) /* descriptor check is first to prevent flooding with "callbacks suppressed" */ #define pr_debug_ratelimited(fmt, ...) \ do { \ @@ -678,7 +436,7 @@ do { \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \ - if (DYNAMIC_DEBUG_BRANCH(descriptor) && \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ __ratelimit(&_rs)) \ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \ } while (0) @@ -704,6 +462,13 @@ extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, extern void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii); +#if defined(CONFIG_DYNAMIC_DEBUG) +#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \ + dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true) +#else +extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, + const void *buf, size_t len); +#endif /* defined(CONFIG_DYNAMIC_DEBUG) */ #else static inline void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, @@ -717,8 +482,7 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type, #endif -#if defined(CONFIG_DYNAMIC_DEBUG) || \ - (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) +#if defined(CONFIG_DYNAMIC_DEBUG) #define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ @@ -736,19 +500,4 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type, } #endif -/** - * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params - * @prefix_str: string to prefix each line with; - * caller supplies trailing spaces for alignment if desired - * @prefix_type: controls whether prefix of an offset, address, or none - * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) - * @buf: data blob to dump - * @len: number of bytes in the @buf - * - * Calls print_hex_dump(), with log level of KERN_DEBUG, - * rowsize of 16, groupsize of 1, and ASCII output included. - */ -#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \ - print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true) - #endif diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 069c7fd953..f14c92d4ca 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -1,113 +1,55 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * The proc filesystem constants/structures */ #ifndef _LINUX_PROC_FS_H #define _LINUX_PROC_FS_H -#include #include #include struct proc_dir_entry; -struct seq_file; -struct seq_operations; - -enum { - /* - * All /proc entries using this ->proc_ops instance are never removed. - * - * If in doubt, ignore this flag. - */ -#ifdef MODULE - PROC_ENTRY_PERMANENT = 0U, -#else - PROC_ENTRY_PERMANENT = 1U << 0, -#endif -}; - -struct proc_ops { - unsigned int proc_flags; - int (*proc_open)(struct inode *, struct file *); - ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *); - ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); - ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *); - /* mandatory unless nonseekable_open() or equivalent is used */ - loff_t (*proc_lseek)(struct file *, loff_t, int); - int (*proc_release)(struct inode *, struct file *); - __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); - long (*proc_ioctl)(struct file *, unsigned int, unsigned long); -#ifdef CONFIG_COMPAT - long (*proc_compat_ioctl)(struct file *, unsigned int, unsigned long); -#endif - int (*proc_mmap)(struct file *, struct vm_area_struct *); - unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); -} __randomize_layout; - -/* definitions for hide_pid field */ -enum proc_hidepid { - HIDEPID_OFF = 0, - HIDEPID_NO_ACCESS = 1, - HIDEPID_INVISIBLE = 2, - HIDEPID_NOT_PTRACEABLE = 4, /* Limit pids to only ptraceable pids */ -}; - -/* definitions for proc mount option pidonly */ -enum proc_pidonly { - PROC_PIDONLY_OFF = 0, - PROC_PIDONLY_ON = 1, -}; - -struct proc_fs_info { - struct pid_namespace *pid_ns; - struct dentry *proc_self; /* For /proc/self */ - struct dentry *proc_thread_self; /* For /proc/thread-self */ - kgid_t pid_gid; - enum proc_hidepid hide_pid; - enum proc_pidonly pidonly; -}; - -static inline struct proc_fs_info *proc_sb_info(struct super_block *sb) -{ - return sb->s_fs_info; -} #ifdef CONFIG_PROC_FS -typedef int (*proc_write_t)(struct file *, char *, size_t); - extern void proc_root_init(void); -extern void proc_flush_pid(struct pid *); +extern void proc_flush_task(struct task_struct *); extern struct proc_dir_entry *proc_symlink(const char *, struct proc_dir_entry *, const char *); -struct proc_dir_entry *_proc_mkdir(const char *, umode_t, struct proc_dir_entry *, void *, bool); extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *); +extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *); extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t, struct proc_dir_entry *, void *); +extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t, + struct proc_dir_entry *, void *); extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t, struct proc_dir_entry *); -struct proc_dir_entry *proc_create_mount_point(const char *name); - -struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode, - struct proc_dir_entry *parent, const struct seq_operations *ops, - unsigned int state_size, void *data); -#define proc_create_seq_data(name, mode, parent, ops, data) \ - proc_create_seq_private(name, mode, parent, ops, 0, data) -#define proc_create_seq(name, mode, parent, ops) \ - proc_create_seq_private(name, mode, parent, ops, 0, NULL) -struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode, - struct proc_dir_entry *parent, - int (*show)(struct seq_file *, void *), void *data); -#define proc_create_single(name, mode, parent, show) \ - proc_create_single_data(name, mode, parent, show, NULL) extern struct proc_dir_entry *proc_create_data(const char *, umode_t, struct proc_dir_entry *, - const struct proc_ops *, + const struct file_operations *, void *); -struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops); +static inline struct proc_dir_entry *proc_create( + const char *name, umode_t mode, struct proc_dir_entry *parent, + const struct file_operations *proc_fops) +{ + return proc_create_data(name, mode, parent, proc_fops, NULL); +} + +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode, + struct proc_dir_entry *parent, const struct file_operations *proc_fops) +{ +#ifdef CONFIG_GRKERNSEC_PROC_USER + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL); +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL); +#else + return proc_create_data(name, mode, parent, proc_fops, NULL); +#endif +} + + extern void proc_set_size(struct proc_dir_entry *, loff_t); extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t); extern void *PDE_DATA(const struct inode *); @@ -116,46 +58,13 @@ extern void proc_remove(struct proc_dir_entry *); extern void remove_proc_entry(const char *, struct proc_dir_entry *); extern int remove_proc_subtree(const char *, struct proc_dir_entry *); -struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode, - struct proc_dir_entry *parent, const struct seq_operations *ops, - unsigned int state_size, void *data); -#define proc_create_net(name, mode, parent, ops, state_size) \ - proc_create_net_data(name, mode, parent, ops, state_size, NULL) -struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode, - struct proc_dir_entry *parent, - int (*show)(struct seq_file *, void *), void *data); -struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode, - struct proc_dir_entry *parent, - const struct seq_operations *ops, - proc_write_t write, - unsigned int state_size, void *data); -struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode, - struct proc_dir_entry *parent, - int (*show)(struct seq_file *, void *), - proc_write_t write, - void *data); -extern struct pid *tgid_pidfd_to_pid(const struct file *file); - -struct bpf_iter_aux_info; -extern int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux); -extern void bpf_iter_fini_seq_net(void *priv_data); - -#ifdef CONFIG_PROC_PID_ARCH_STATUS -/* - * The architecture which selects CONFIG_PROC_PID_ARCH_STATUS must - * provide proc_pid_arch_status() definition. - */ -int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns, - struct pid *pid, struct task_struct *task); -#endif /* CONFIG_PROC_PID_ARCH_STATUS */ - #else /* CONFIG_PROC_FS */ static inline void proc_root_init(void) { } -static inline void proc_flush_pid(struct pid *pid) +static inline void proc_flush_task(struct task_struct *task) { } @@ -163,23 +72,16 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent,const char *dest) { return NULL;} static inline struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) {return NULL;} -static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; } -static inline struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode, - struct proc_dir_entry *parent, void *data, bool force_lookup) -{ - return NULL; -} +static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name, + struct proc_dir_entry *parent) { return NULL; } static inline struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } +static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, + umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode, struct proc_dir_entry *parent) { return NULL; } -#define proc_create_seq_private(name, mode, parent, ops, size, data) ({NULL;}) -#define proc_create_seq_data(name, mode, parent, ops, data) ({NULL;}) -#define proc_create_seq(name, mode, parent, ops) ({NULL;}) -#define proc_create_single(name, mode, parent, show) ({NULL;}) -#define proc_create_single_data(name, mode, parent, show, data) ({NULL;}) -#define proc_create(name, mode, parent, proc_ops) ({NULL;}) -#define proc_create_data(name, mode, parent, proc_ops, data) ({NULL;}) +#define proc_create(name, mode, parent, proc_fops) ({NULL;}) +#define proc_create_data(name, mode, parent, proc_fops, data) ({NULL;}) static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {} static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {} @@ -190,15 +92,6 @@ static inline void proc_remove(struct proc_dir_entry *de) {} #define remove_proc_entry(name, parent) do {} while (0) static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; } -#define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;}) -#define proc_create_net(name, mode, parent, state_size, ops) ({NULL;}) -#define proc_create_net_single(name, mode, parent, show, data) ({NULL;}) - -static inline struct pid *tgid_pidfd_to_pid(const struct file *file) -{ - return ERR_PTR(-EBADF); -} - #endif /* CONFIG_PROC_FS */ struct net; @@ -206,19 +99,7 @@ struct net; static inline struct proc_dir_entry *proc_net_mkdir( struct net *net, const char *name, struct proc_dir_entry *parent) { - return _proc_mkdir(name, 0, parent, net, true); + return proc_mkdir_data_restrict(name, 0, parent, net); } -struct ns_common; -int open_related_ns(struct ns_common *ns, - struct ns_common *(*get_ns)(struct ns_common *ns)); - -/* get the associated pid namespace for a file in procfs */ -static inline struct pid_namespace *proc_pid_ns(struct super_block *sb) -{ - return proc_sb_info(sb)->pid_ns; -} - -bool proc_ns_file(const struct file *file); - #endif /* _LINUX_PROC_FS_H */ diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h index 75807ecef8..00d5162045 100644 --- a/include/linux/proc_ns.h +++ b/include/linux/proc_ns.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * procfs namespace bits */ @@ -8,32 +7,28 @@ #include struct pid_namespace; -struct nsset; +struct nsproxy; struct path; struct task_struct; struct inode; struct proc_ns_operations { const char *name; - const char *real_ns_name; int type; struct ns_common *(*get)(struct task_struct *task); void (*put)(struct ns_common *ns); - int (*install)(struct nsset *nsset, struct ns_common *ns); + int (*install)(struct nsproxy *nsproxy, struct ns_common *ns); struct user_namespace *(*owner)(struct ns_common *ns); struct ns_common *(*get_parent)(struct ns_common *ns); -} __randomize_layout; +} __do_const __randomize_layout; extern const struct proc_ns_operations netns_operations; extern const struct proc_ns_operations utsns_operations; extern const struct proc_ns_operations ipcns_operations; extern const struct proc_ns_operations pidns_operations; -extern const struct proc_ns_operations pidns_for_children_operations; extern const struct proc_ns_operations userns_operations; extern const struct proc_ns_operations mntns_operations; extern const struct proc_ns_operations cgroupns_operations; -extern const struct proc_ns_operations timens_operations; -extern const struct proc_ns_operations timens_for_children_operations; /* * We always define these enumerators @@ -45,16 +40,20 @@ enum { PROC_USER_INIT_INO = 0xEFFFFFFDU, PROC_PID_INIT_INO = 0xEFFFFFFCU, PROC_CGROUP_INIT_INO = 0xEFFFFFFBU, - PROC_TIME_INIT_INO = 0xEFFFFFFAU, }; #ifdef CONFIG_PROC_FS +extern int pid_ns_prepare_proc(struct pid_namespace *ns); +extern void pid_ns_release_proc(struct pid_namespace *ns); extern int proc_alloc_inum(unsigned int *pino); extern void proc_free_inum(unsigned int inum); #else /* CONFIG_PROC_FS */ +static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; } +static inline void pid_ns_release_proc(struct pid_namespace *ns) {} + static inline int proc_alloc_inum(unsigned int *inum) { *inum = 1; @@ -74,13 +73,8 @@ static inline int ns_alloc_inum(struct ns_common *ns) extern struct file *proc_ns_fget(int fd); #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) -extern int ns_get_path(struct path *path, struct task_struct *task, +extern void *ns_get_path(struct path *path, struct task_struct *task, const struct proc_ns_operations *ns_ops); -typedef struct ns_common *ns_get_path_helper_t(void *); -extern int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb, - void *private_data); - -extern bool ns_match(const struct ns_common *ns, dev_t dev, ino_t ino); extern int ns_get_name(char *buf, size_t size, struct task_struct *task, const struct proc_ns_operations *ns_ops); diff --git a/include/linux/profile.h b/include/linux/profile.h index fd18ca96f5..b537a25ffa 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PROFILE_H #define _LINUX_PROFILE_H @@ -15,6 +14,7 @@ #define KVM_PROFILING 4 struct proc_dir_entry; +struct pt_regs; struct notifier_block; #if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) @@ -83,6 +83,8 @@ int task_handoff_unregister(struct notifier_block * n); int profile_event_register(enum profile_type, struct notifier_block * n); int profile_event_unregister(enum profile_type, struct notifier_block * n); +struct pt_regs; + #else #define prof_on 0 diff --git a/include/linux/projid.h b/include/linux/projid.h index 613730622a..8c1f2c5522 100644 --- a/include/linux/projid.h +++ b/include/linux/projid.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PROJID_H #define _LINUX_PROJID_H diff --git a/include/linux/property.h b/include/linux/property.h index 357513a977..856e50b214 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -1,16 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * property.h - Unified device property interface. * * Copyright (C) 2014, Intel Corporation * Authors: Rafael J. Wysocki * Mika Westerberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _LINUX_PROPERTY_H_ #define _LINUX_PROPERTY_H_ -#include #include #include @@ -22,7 +24,7 @@ enum dev_prop_type { DEV_PROP_U32, DEV_PROP_U64, DEV_PROP_STRING, - DEV_PROP_REF, + DEV_PROP_MAX, }; enum dev_dma_attr { @@ -31,8 +33,6 @@ enum dev_dma_attr { DEV_DMA_COHERENT, }; -struct fwnode_handle *dev_fwnode(struct device *dev); - bool device_property_present(struct device *dev, const char *propname); int device_property_read_u8_array(struct device *dev, const char *propname, u8 *val, size_t nval); @@ -49,78 +49,39 @@ int device_property_read_string(struct device *dev, const char *propname, int device_property_match_string(struct device *dev, const char *propname, const char *string); -bool fwnode_device_is_available(const struct fwnode_handle *fwnode); -bool fwnode_property_present(const struct fwnode_handle *fwnode, - const char *propname); -int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode, +bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname); +int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, const char *propname, u8 *val, size_t nval); -int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode, +int fwnode_property_read_u16_array(struct fwnode_handle *fwnode, const char *propname, u16 *val, size_t nval); -int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode, +int fwnode_property_read_u32_array(struct fwnode_handle *fwnode, const char *propname, u32 *val, size_t nval); -int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode, +int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, const char *propname, u64 *val, size_t nval); -int fwnode_property_read_string_array(const struct fwnode_handle *fwnode, +int fwnode_property_read_string_array(struct fwnode_handle *fwnode, const char *propname, const char **val, size_t nval); -int fwnode_property_read_string(const struct fwnode_handle *fwnode, +int fwnode_property_read_string(struct fwnode_handle *fwnode, const char *propname, const char **val); -int fwnode_property_match_string(const struct fwnode_handle *fwnode, +int fwnode_property_match_string(struct fwnode_handle *fwnode, const char *propname, const char *string); -int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, - const char *prop, const char *nargs_prop, - unsigned int nargs, unsigned int index, - struct fwnode_reference_args *args); -struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode, - const char *name, - unsigned int index); - -const char *fwnode_get_name(const struct fwnode_handle *fwnode); -const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode); -struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode); -struct fwnode_handle *fwnode_get_next_parent( - struct fwnode_handle *fwnode); -struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode); -unsigned int fwnode_count_parents(const struct fwnode_handle *fwn); -struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn, - unsigned int depth); -bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor, - struct fwnode_handle *test_child); -struct fwnode_handle *fwnode_get_next_child_node( - const struct fwnode_handle *fwnode, struct fwnode_handle *child); -struct fwnode_handle *fwnode_get_next_available_child_node( - const struct fwnode_handle *fwnode, struct fwnode_handle *child); - -#define fwnode_for_each_child_node(fwnode, child) \ - for (child = fwnode_get_next_child_node(fwnode, NULL); child; \ - child = fwnode_get_next_child_node(fwnode, child)) - -#define fwnode_for_each_available_child_node(fwnode, child) \ - for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\ - child = fwnode_get_next_available_child_node(fwnode, child)) - -struct fwnode_handle *device_get_next_child_node( - struct device *dev, struct fwnode_handle *child); +struct fwnode_handle *device_get_next_child_node(struct device *dev, + struct fwnode_handle *child); #define device_for_each_child_node(dev, child) \ for (child = device_get_next_child_node(dev, NULL); child; \ child = device_get_next_child_node(dev, child)) -struct fwnode_handle *fwnode_get_named_child_node( - const struct fwnode_handle *fwnode, const char *childname); struct fwnode_handle *device_get_named_child_node(struct device *dev, const char *childname); -struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); void fwnode_handle_put(struct fwnode_handle *fwnode); -int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); - unsigned int device_get_child_node_count(struct device *dev); static inline bool device_property_read_bool(struct device *dev, @@ -153,347 +114,143 @@ static inline int device_property_read_u64(struct device *dev, return device_property_read_u64_array(dev, propname, val, 1); } -static inline int device_property_count_u8(struct device *dev, const char *propname) -{ - return device_property_read_u8_array(dev, propname, NULL, 0); -} - -static inline int device_property_count_u16(struct device *dev, const char *propname) -{ - return device_property_read_u16_array(dev, propname, NULL, 0); -} - -static inline int device_property_count_u32(struct device *dev, const char *propname) -{ - return device_property_read_u32_array(dev, propname, NULL, 0); -} - -static inline int device_property_count_u64(struct device *dev, const char *propname) -{ - return device_property_read_u64_array(dev, propname, NULL, 0); -} - -static inline int device_property_string_array_count(struct device *dev, - const char *propname) -{ - return device_property_read_string_array(dev, propname, NULL, 0); -} - -static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode, +static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode, const char *propname) { return fwnode_property_present(fwnode, propname); } -static inline int fwnode_property_read_u8(const struct fwnode_handle *fwnode, +static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode, const char *propname, u8 *val) { return fwnode_property_read_u8_array(fwnode, propname, val, 1); } -static inline int fwnode_property_read_u16(const struct fwnode_handle *fwnode, +static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode, const char *propname, u16 *val) { return fwnode_property_read_u16_array(fwnode, propname, val, 1); } -static inline int fwnode_property_read_u32(const struct fwnode_handle *fwnode, +static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode, const char *propname, u32 *val) { return fwnode_property_read_u32_array(fwnode, propname, val, 1); } -static inline int fwnode_property_read_u64(const struct fwnode_handle *fwnode, +static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode, const char *propname, u64 *val) { return fwnode_property_read_u64_array(fwnode, propname, val, 1); } -static inline int fwnode_property_count_u8(const struct fwnode_handle *fwnode, - const char *propname) -{ - return fwnode_property_read_u8_array(fwnode, propname, NULL, 0); -} - -static inline int fwnode_property_count_u16(const struct fwnode_handle *fwnode, - const char *propname) -{ - return fwnode_property_read_u16_array(fwnode, propname, NULL, 0); -} - -static inline int fwnode_property_count_u32(const struct fwnode_handle *fwnode, - const char *propname) -{ - return fwnode_property_read_u32_array(fwnode, propname, NULL, 0); -} - -static inline int fwnode_property_count_u64(const struct fwnode_handle *fwnode, - const char *propname) -{ - return fwnode_property_read_u64_array(fwnode, propname, NULL, 0); -} - -static inline int -fwnode_property_string_array_count(const struct fwnode_handle *fwnode, - const char *propname) -{ - return fwnode_property_read_string_array(fwnode, propname, NULL, 0); -} - -struct software_node; - -/** - * struct software_node_ref_args - Reference property with additional arguments - * @node: Reference to a software node - * @nargs: Number of elements in @args array - * @args: Integer arguments - */ -struct software_node_ref_args { - const struct software_node *node; - unsigned int nargs; - u64 args[NR_FWNODE_REFERENCE_ARGS]; -}; - -#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \ -(const struct software_node_ref_args) { \ - .node = _ref_, \ - .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \ - .args = { __VA_ARGS__ }, \ -} - /** * struct property_entry - "Built-in" device property representation. * @name: Name of the property. * @length: Length of data making up the value. - * @is_inline: True when the property value is stored inline. - * @type: Type of the data in unions. - * @pointer: Pointer to the property when it is not stored inline. - * @value: Value of the property when it is stored inline. + * @is_array: True when the property is an array. + * @is_string: True when property is a string. + * @pointer: Pointer to the property (an array of items of the given type). + * @value: Value of the property (when it is a single item of the given type). */ struct property_entry { const char *name; size_t length; - bool is_inline; - enum dev_prop_type type; + bool is_array; + bool is_string; union { - const void *pointer; union { - u8 u8_data[sizeof(u64) / sizeof(u8)]; - u16 u16_data[sizeof(u64) / sizeof(u16)]; - u32 u32_data[sizeof(u64) / sizeof(u32)]; - u64 u64_data[sizeof(u64) / sizeof(u64)]; - const char *str[sizeof(u64) / sizeof(char *)]; + void *raw_data; + u8 *u8_data; + u16 *u16_data; + u32 *u32_data; + u64 *u64_data; + const char **str; + } pointer; + union { + unsigned long long raw_data; + u8 u8_data; + u16 u16_data; + u32 u32_data; + u64 u64_data; + const char *str; } value; }; }; /* - * Note: the below initializers for the anonymous union are carefully + * Note: the below four initializers for the anonymous union are carefully * crafted to avoid gcc-4.4.4's problems with initialization of anon unions * and structs. */ -#define __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_) \ - sizeof(((struct property_entry *)NULL)->value._elem_[0]) - -#define __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, _elsize_, _Type_, \ - _val_, _len_) \ -(struct property_entry) { \ - .name = _name_, \ - .length = (_len_) * (_elsize_), \ - .type = DEV_PROP_##_Type_, \ - { .pointer = _val_ }, \ +#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \ +{ \ + .name = _name_, \ + .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ + .is_array = true, \ + .is_string = false, \ + { .pointer = { ._type_##_data = _val_ } }, \ } -#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\ - __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \ - __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \ - _Type_, _val_, _len_) +#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, _val_) +#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, _val_) +#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, _val_) +#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_) -#define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_) \ - __PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_) -#define PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, _len_) \ - __PROPERTY_ENTRY_ARRAY_LEN(_name_, u16_data, U16, _val_, _len_) -#define PROPERTY_ENTRY_U32_ARRAY_LEN(_name_, _val_, _len_) \ - __PROPERTY_ENTRY_ARRAY_LEN(_name_, u32_data, U32, _val_, _len_) -#define PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, _len_) \ - __PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_) -#define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_) \ - __PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_) -#define PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, _len_) \ - __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \ - sizeof(struct software_node_ref_args), \ - REF, _val_, _len_) - -#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ - PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) -#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \ - PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) -#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \ - PROPERTY_ENTRY_U32_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) -#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \ - PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) -#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ - PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) -#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \ - PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) - -#define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_) \ -(struct property_entry) { \ - .name = _name_, \ - .length = __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \ - .is_inline = true, \ - .type = DEV_PROP_##_Type_, \ - { .value = { ._elem_[0] = _val_ } }, \ +#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ +{ \ + .name = _name_, \ + .length = ARRAY_SIZE(_val_) * sizeof(const char *), \ + .is_array = true, \ + .is_string = true, \ + { .pointer = { .str = _val_ } }, \ } -#define PROPERTY_ENTRY_U8(_name_, _val_) \ - __PROPERTY_ENTRY_ELEMENT(_name_, u8_data, U8, _val_) -#define PROPERTY_ENTRY_U16(_name_, _val_) \ - __PROPERTY_ENTRY_ELEMENT(_name_, u16_data, U16, _val_) -#define PROPERTY_ENTRY_U32(_name_, _val_) \ - __PROPERTY_ENTRY_ELEMENT(_name_, u32_data, U32, _val_) -#define PROPERTY_ENTRY_U64(_name_, _val_) \ - __PROPERTY_ENTRY_ELEMENT(_name_, u64_data, U64, _val_) -#define PROPERTY_ENTRY_STRING(_name_, _val_) \ - __PROPERTY_ENTRY_ELEMENT(_name_, str, STRING, _val_) +#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \ +{ \ + .name = _name_, \ + .length = sizeof(_type_), \ + .is_string = false, \ + { .value = { ._type_##_data = _val_ } }, \ +} + +#define PROPERTY_ENTRY_U8(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u8, _val_) +#define PROPERTY_ENTRY_U16(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u16, _val_) +#define PROPERTY_ENTRY_U32(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u32, _val_) +#define PROPERTY_ENTRY_U64(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u64, _val_) + +#define PROPERTY_ENTRY_STRING(_name_, _val_) \ +{ \ + .name = _name_, \ + .length = sizeof(_val_), \ + .is_string = true, \ + { .value = { .str = _val_ } }, \ +} #define PROPERTY_ENTRY_BOOL(_name_) \ -(struct property_entry) { \ +{ \ .name = _name_, \ - .is_inline = true, \ } -#define PROPERTY_ENTRY_REF(_name_, _ref_, ...) \ -(struct property_entry) { \ - .name = _name_, \ - .length = sizeof(struct software_node_ref_args), \ - .type = DEV_PROP_REF, \ - { .pointer = &SOFTWARE_NODE_REFERENCE(_ref_, ##__VA_ARGS__), }, \ -} - -struct property_entry * -property_entries_dup(const struct property_entry *properties); - -void property_entries_free(const struct property_entry *properties); - int device_add_properties(struct device *dev, - const struct property_entry *properties); + struct property_entry *properties); void device_remove_properties(struct device *dev); bool device_dma_supported(struct device *dev); enum dev_dma_attr device_get_dma_attr(struct device *dev); -const void *device_get_match_data(struct device *dev); - int device_get_phy_mode(struct device *dev); void *device_get_mac_address(struct device *dev, char *addr, int alen); -int fwnode_get_phy_mode(struct fwnode_handle *fwnode); -void *fwnode_get_mac_address(struct fwnode_handle *fwnode, - char *addr, int alen); -struct fwnode_handle *fwnode_graph_get_next_endpoint( - const struct fwnode_handle *fwnode, struct fwnode_handle *prev); -struct fwnode_handle * -fwnode_graph_get_port_parent(const struct fwnode_handle *fwnode); -struct fwnode_handle *fwnode_graph_get_remote_port_parent( - const struct fwnode_handle *fwnode); -struct fwnode_handle *fwnode_graph_get_remote_port( - const struct fwnode_handle *fwnode); -struct fwnode_handle *fwnode_graph_get_remote_endpoint( - const struct fwnode_handle *fwnode); -struct fwnode_handle * -fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port, - u32 endpoint); - -static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode) -{ - return fwnode_property_present(fwnode, "remote-endpoint"); -} - -/* - * Fwnode lookup flags - * - * @FWNODE_GRAPH_ENDPOINT_NEXT: In the case of no exact match, look for the - * closest endpoint ID greater than the specified - * one. - * @FWNODE_GRAPH_DEVICE_DISABLED: That the device to which the remote - * endpoint of the given endpoint belongs to, - * may be disabled. - */ -#define FWNODE_GRAPH_ENDPOINT_NEXT BIT(0) -#define FWNODE_GRAPH_DEVICE_DISABLED BIT(1) - -struct fwnode_handle * -fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode, - u32 port, u32 endpoint, unsigned long flags); - -#define fwnode_graph_for_each_endpoint(fwnode, child) \ - for (child = NULL; \ - (child = fwnode_graph_get_next_endpoint(fwnode, child)); ) - -int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, - struct fwnode_endpoint *endpoint); - -typedef void *(*devcon_match_fn_t)(struct fwnode_handle *fwnode, const char *id, - void *data); - -void *fwnode_connection_find_match(struct fwnode_handle *fwnode, - const char *con_id, void *data, - devcon_match_fn_t match); - -static inline void *device_connection_find_match(struct device *dev, - const char *con_id, void *data, - devcon_match_fn_t match) -{ - return fwnode_connection_find_match(dev_fwnode(dev), con_id, data, match); -} - -/* -------------------------------------------------------------------------- */ -/* Software fwnode support - when HW description is incomplete or missing */ - -/** - * struct software_node - Software node description - * @name: Name of the software node - * @parent: Parent of the software node - * @properties: Array of device properties - */ -struct software_node { - const char *name; - const struct software_node *parent; - const struct property_entry *properties; -}; - -bool is_software_node(const struct fwnode_handle *fwnode); -const struct software_node * -to_software_node(const struct fwnode_handle *fwnode); -struct fwnode_handle *software_node_fwnode(const struct software_node *node); - -const struct software_node * -software_node_find_by_name(const struct software_node *parent, - const char *name); - -int software_node_register_nodes(const struct software_node *nodes); -void software_node_unregister_nodes(const struct software_node *nodes); - -int software_node_register_node_group(const struct software_node **node_group); -void software_node_unregister_node_group(const struct software_node **node_group); - -int software_node_register(const struct software_node *node); -void software_node_unregister(const struct software_node *node); - -struct fwnode_handle * -fwnode_create_software_node(const struct property_entry *properties, - const struct fwnode_handle *parent); -void fwnode_remove_software_node(struct fwnode_handle *fwnode); - -int device_add_software_node(struct device *dev, const struct software_node *node); -void device_remove_software_node(struct device *dev); - -int device_create_managed_software_node(struct device *dev, - const struct property_entry *properties, - const struct software_node *parent); - #endif /* _LINUX_PROPERTY_H_ */ diff --git a/include/linux/psci.h b/include/linux/psci.h index 4ca0060a3f..a094b75ec3 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -1,5 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. * * Copyright (C) 2015 ARM Limited */ @@ -7,7 +14,6 @@ #ifndef __LINUX_PSCI_H #define __LINUX_PSCI_H -#include #include #include @@ -16,13 +22,10 @@ bool psci_tos_resident_on(int cpu); -int psci_cpu_suspend_enter(u32 state); -bool psci_power_state_is_valid(u32 state); -int psci_set_osi_mode(bool enable); -bool psci_has_osi_support(void); +int psci_cpu_init_idle(unsigned int cpu); +int psci_cpu_suspend_enter(unsigned long index); struct psci_operations { - u32 (*get_version)(void); int (*cpu_suspend)(u32 state, unsigned long entry_point); int (*cpu_off)(u32 state); int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); @@ -30,19 +33,10 @@ struct psci_operations { int (*affinity_info)(unsigned long target_affinity, unsigned long lowest_affinity_level); int (*migrate_info_type)(void); -}; +} __no_const; extern struct psci_operations psci_ops; -struct psci_0_1_function_ids { - u32 cpu_suspend; - u32 cpu_on; - u32 cpu_off; - u32 migrate; -}; - -struct psci_0_1_function_ids get_psci_0_1_function_ids(void); - #if defined(CONFIG_ARM_PSCI_FW) int __init psci_dt_init(void); #else @@ -52,11 +46,10 @@ static inline int psci_dt_init(void) { return 0; } #if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI) int __init psci_acpi_init(void); bool __init acpi_psci_present(void); -bool acpi_psci_use_hvc(void); +bool __init acpi_psci_use_hvc(void); #else static inline int psci_acpi_init(void) { return 0; } static inline bool acpi_psci_present(void) { return false; } -static inline bool acpi_psci_use_hvc(void) {return false; } #endif #endif /* __LINUX_PSCI_H */ diff --git a/include/linux/pstore.h b/include/linux/pstore.h index eb93a54cff..92013cc9cc 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Persistent Storage - pstore.h * @@ -6,6 +5,19 @@ * * This code is the generic layer to export data records from platform * level persistent storage via a file system. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _LINUX_PSTORE_H #define _LINUX_PSTORE_H @@ -14,272 +26,67 @@ #include #include #include -#include +#include #include #include -struct module; - -/* - * pstore record types (see fs/pstore/platform.c for pstore_type_names[]) - * These values may be written to storage (see EFI vars backend), so - * they are kind of an ABI. Be careful changing the mappings. - */ +/* types */ enum pstore_type_id { - /* Frontend storage types */ PSTORE_TYPE_DMESG = 0, PSTORE_TYPE_MCE = 1, PSTORE_TYPE_CONSOLE = 2, PSTORE_TYPE_FTRACE = 3, - - /* PPC64-specific partition types */ + /* PPC64 partition types */ PSTORE_TYPE_PPC_RTAS = 4, PSTORE_TYPE_PPC_OF = 5, PSTORE_TYPE_PPC_COMMON = 6, PSTORE_TYPE_PMSG = 7, PSTORE_TYPE_PPC_OPAL = 8, - - /* End of the list */ - PSTORE_TYPE_MAX + PSTORE_TYPE_UNKNOWN = 255 }; -const char *pstore_type_to_name(enum pstore_type_id type); -enum pstore_type_id pstore_name_to_type(const char *name); +struct module; -struct pstore_info; -/** - * struct pstore_record - details of a pstore record entry - * @psi: pstore backend driver information - * @type: pstore record type - * @id: per-type unique identifier for record - * @time: timestamp of the record - * @buf: pointer to record contents - * @size: size of @buf - * @ecc_notice_size: - * ECC information for @buf - * - * Valid for PSTORE_TYPE_DMESG @type: - * - * @count: Oops count since boot - * @reason: kdump reason for notification - * @part: position in a multipart record - * @compressed: whether the buffer is compressed - * - */ -struct pstore_record { - struct pstore_info *psi; - enum pstore_type_id type; - u64 id; - struct timespec64 time; - char *buf; - ssize_t size; - ssize_t ecc_notice_size; - - int count; - enum kmsg_dump_reason reason; - unsigned int part; - bool compressed; -}; - -/** - * struct pstore_info - backend pstore driver structure - * - * @owner: module which is responsible for this backend driver - * @name: name of the backend driver - * - * @buf_lock: semaphore to serialize access to @buf - * @buf: preallocated crash dump buffer - * @bufsize: size of @buf available for crash dump bytes (must match - * smallest number of bytes available for writing to a - * backend entry, since compressed bytes don't take kindly - * to being truncated) - * - * @read_mutex: serializes @open, @read, @close, and @erase callbacks - * @flags: bitfield of frontends the backend can accept writes for - * @max_reason: Used when PSTORE_FLAGS_DMESG is set. Contains the - * kmsg_dump_reason enum value. KMSG_DUMP_UNDEF means - * "use existing kmsg_dump() filtering, based on the - * printk.always_kmsg_dump boot param" (which is either - * KMSG_DUMP_OOPS when false, or KMSG_DUMP_MAX when - * true); see printk.always_kmsg_dump for more details. - * @data: backend-private pointer passed back during callbacks - * - * Callbacks: - * - * @open: - * Notify backend that pstore is starting a full read of backend - * records. Followed by one or more @read calls, and a final @close. - * - * @psi: in: pointer to the struct pstore_info for the backend - * - * Returns 0 on success, and non-zero on error. - * - * @close: - * Notify backend that pstore has finished a full read of backend - * records. Always preceded by an @open call and one or more @read - * calls. - * - * @psi: in: pointer to the struct pstore_info for the backend - * - * Returns 0 on success, and non-zero on error. (Though pstore will - * ignore the error.) - * - * @read: - * Read next available backend record. Called after a successful - * @open. - * - * @record: - * pointer to record to populate. @buf should be allocated - * by the backend and filled. At least @type and @id should - * be populated, since these are used when creating pstorefs - * file names. - * - * Returns record size on success, zero when no more records are - * available, or negative on error. - * - * @write: - * A newly generated record needs to be written to backend storage. - * - * @record: - * pointer to record metadata. When @type is PSTORE_TYPE_DMESG, - * @buf will be pointing to the preallocated @psi.buf, since - * memory allocation may be broken during an Oops. Regardless, - * @buf must be proccesed or copied before returning. The - * backend is also expected to write @id with something that - * can help identify this record to a future @erase callback. - * The @time field will be prepopulated with the current time, - * when available. The @size field will have the size of data - * in @buf. - * - * Returns 0 on success, and non-zero on error. - * - * @write_user: - * Perform a frontend write to a backend record, using a specified - * buffer that is coming directly from userspace, instead of the - * @record @buf. - * - * @record: pointer to record metadata. - * @buf: pointer to userspace contents to write to backend - * - * Returns 0 on success, and non-zero on error. - * - * @erase: - * Delete a record from backend storage. Different backends - * identify records differently, so entire original record is - * passed back to assist in identification of what the backend - * should remove from storage. - * - * @record: pointer to record metadata. - * - * Returns 0 on success, and non-zero on error. - * - */ struct pstore_info { struct module *owner; - const char *name; - - struct semaphore buf_lock; + char *name; + spinlock_t buf_lock; /* serialize access to 'buf' */ char *buf; size_t bufsize; - - struct mutex read_mutex; - + struct mutex read_mutex; /* serialize open/read/close */ int flags; - int max_reason; - void *data; - int (*open)(struct pstore_info *psi); int (*close)(struct pstore_info *psi); - ssize_t (*read)(struct pstore_record *record); - int (*write)(struct pstore_record *record); - int (*write_user)(struct pstore_record *record, - const char __user *buf); - int (*erase)(struct pstore_record *record); + ssize_t (*read)(u64 *id, enum pstore_type_id *type, + int *count, struct timespec *time, char **buf, + bool *compressed, ssize_t *ecc_notice_size, + struct pstore_info *psi); + int (*write)(enum pstore_type_id type, + enum kmsg_dump_reason reason, u64 *id, + unsigned int part, int count, bool compressed, + size_t size, struct pstore_info *psi); + int (*write_buf)(enum pstore_type_id type, + enum kmsg_dump_reason reason, u64 *id, + unsigned int part, const char *buf, bool compressed, + size_t size, struct pstore_info *psi); + int (*write_buf_user)(enum pstore_type_id type, + enum kmsg_dump_reason reason, u64 *id, + unsigned int part, const char __user *buf, + bool compressed, size_t size, struct pstore_info *psi); + int (*erase)(enum pstore_type_id type, u64 id, + int count, struct timespec time, + struct pstore_info *psi); + void *data; }; -/* Supported frontends */ -#define PSTORE_FLAGS_DMESG BIT(0) -#define PSTORE_FLAGS_CONSOLE BIT(1) -#define PSTORE_FLAGS_FTRACE BIT(2) -#define PSTORE_FLAGS_PMSG BIT(3) +#define PSTORE_FLAGS_DMESG (1 << 0) +#define PSTORE_FLAGS_FRAGILE PSTORE_FLAGS_DMESG +#define PSTORE_FLAGS_CONSOLE (1 << 1) +#define PSTORE_FLAGS_FTRACE (1 << 2) +#define PSTORE_FLAGS_PMSG (1 << 3) extern int pstore_register(struct pstore_info *); extern void pstore_unregister(struct pstore_info *); - -struct pstore_ftrace_record { - unsigned long ip; - unsigned long parent_ip; - u64 ts; -}; - -/* - * ftrace related stuff: Both backends and frontends need these so expose - * them here. - */ - -#if NR_CPUS <= 2 && defined(CONFIG_ARM_THUMB) -#define PSTORE_CPU_IN_IP 0x1 -#elif NR_CPUS <= 4 && defined(CONFIG_ARM) -#define PSTORE_CPU_IN_IP 0x3 -#endif - -#define TS_CPU_SHIFT 8 -#define TS_CPU_MASK (BIT(TS_CPU_SHIFT) - 1) - -/* - * If CPU number can be stored in IP, store it there, otherwise store it in - * the time stamp. This means more timestamp resolution is available when - * the CPU can be stored in the IP. - */ -#ifdef PSTORE_CPU_IN_IP -static inline void -pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu) -{ - rec->ip |= cpu; -} - -static inline unsigned int -pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec) -{ - return rec->ip & PSTORE_CPU_IN_IP; -} - -static inline u64 -pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec) -{ - return rec->ts; -} - -static inline void -pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val) -{ - rec->ts = val; -} -#else -static inline void -pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu) -{ - rec->ts &= ~(TS_CPU_MASK); - rec->ts |= cpu; -} - -static inline unsigned int -pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec) -{ - return rec->ts & TS_CPU_MASK; -} - -static inline u64 -pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec) -{ - return rec->ts >> TS_CPU_SHIFT; -} - -static inline void -pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val) -{ - rec->ts = (rec->ts & TS_CPU_MASK) | (val << TS_CPU_SHIFT); -} -#endif +extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); #endif /*_LINUX_PSTORE_H*/ diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 9f16afec72..c668c861c9 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -1,8 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2010 Marco Stornelli * Copyright (C) 2011 Kees Cook * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef __LINUX_PSTORE_RAM_H__ @@ -13,21 +22,8 @@ #include #include #include -#include #include -/* - * Choose whether access to the RAM zone requires locking or not. If a zone - * can be written to from different CPUs like with ftrace for example, then - * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required. - */ -#define PRZ_FLAG_NO_LOCK BIT(0) -/* - * If a PRZ should only have a single-boot lifetime, this marks it as - * getting wiped after its contents get copied out after boot. - */ -#define PRZ_FLAG_ZAP_OLD BIT(1) - struct persistent_ram_buffer; struct rs_control; @@ -36,58 +32,16 @@ struct persistent_ram_ecc_info { int ecc_size; int symsize; int poly; - uint16_t *par; }; -/** - * struct persistent_ram_zone - Details of a persistent RAM zone (PRZ) - * used as a pstore backend - * - * @paddr: physical address of the mapped RAM area - * @size: size of mapping - * @label: unique name of this PRZ - * @type: frontend type for this PRZ - * @flags: holds PRZ_FLAGS_* bits - * - * @buffer_lock: - * locks access to @buffer "size" bytes and "start" offset - * @buffer: - * pointer to actual RAM area managed by this PRZ - * @buffer_size: - * bytes in @buffer->data (not including any trailing ECC bytes) - * - * @par_buffer: - * pointer into @buffer->data containing ECC bytes for @buffer->data - * @par_header: - * pointer into @buffer->data containing ECC bytes for @buffer header - * (i.e. all fields up to @data) - * @rs_decoder: - * RSLIB instance for doing ECC calculations - * @corrected_bytes: - * ECC corrected bytes accounting since boot - * @bad_blocks: - * ECC uncorrectable bytes accounting since boot - * @ecc_info: - * ECC configuration details - * - * @old_log: - * saved copy of @buffer->data prior to most recent wipe - * @old_log_size: - * bytes contained in @old_log - * - */ struct persistent_ram_zone { phys_addr_t paddr; size_t size; void *vaddr; - char *label; - enum pstore_type_id type; - u32 flags; - - raw_spinlock_t buffer_lock; struct persistent_ram_buffer *buffer; size_t buffer_size; + /* ECC correction */ char *par_buffer; char *par_header; struct rs_control *rs_decoder; @@ -101,7 +55,7 @@ struct persistent_ram_zone { struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, u32 sig, struct persistent_ram_ecc_info *ecc_info, - unsigned int memtype, u32 flags, char *label); + unsigned int memtype); void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz); @@ -123,8 +77,6 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, * @mem_address physical memory address to contain ramoops */ -#define RAMOOPS_FLAG_FTRACE_PER_CPU BIT(0) - struct ramoops_platform_data { unsigned long mem_size; phys_addr_t mem_address; @@ -133,8 +85,7 @@ struct ramoops_platform_data { unsigned long console_size; unsigned long ftrace_size; unsigned long pmsg_size; - int max_reason; - u32 flags; + int dump_oops; struct persistent_ram_ecc_info ecc_info; }; diff --git a/include/linux/pti.h b/include/linux/pti.h index 1a941efcaa..b3ea01a319 100644 --- a/include/linux/pti.h +++ b/include/linux/pti.h @@ -1,12 +1,43 @@ -// SPDX-License-Identifier: GPL-2.0 -#ifndef _INCLUDE_PTI_H -#define _INCLUDE_PTI_H +/* + * Copyright (C) Intel 2011 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * The PTI (Parallel Trace Interface) driver directs trace data routed from + * various parts in the system out through the Intel Penwell PTI port and + * out of the mobile device for analysis with a debugging tool + * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, + * compact JTAG, standard. + * + * This header file will allow other parts of the OS to use the + * interface to write out it's contents for debugging a mobile system. + */ -#ifdef CONFIG_PAGE_TABLE_ISOLATION -#include -#else -static inline void pti_init(void) { } -static inline void pti_finalize(void) { } -#endif +#ifndef PTI_H_ +#define PTI_H_ -#endif +/* offset for last dword of any PTI message. Part of MIPI P1149.7 */ +#define PTI_LASTDWORD_DTS 0x30 + +/* basic structure used as a write address to the PTI HW */ +struct pti_masterchannel { + u8 master; + u8 channel; +}; + +/* the following functions are defined in misc/pti.c */ +void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count); +struct pti_masterchannel *pti_request_masterchannel(u8 type, + const char *thread_name); +void pti_release_masterchannel(struct pti_masterchannel *mc); + +#endif /*PTI_H_*/ diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index ae04968a3a..a079656b61 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * PTP 1588 support * * This file implements a BPF that recognizes PTP event messages. * * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _PTP_CLASSIFY_H_ @@ -31,16 +44,12 @@ #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) #define PTP_CLASS_L4 (PTP_CLASS_IPV4 | PTP_CLASS_IPV6) -#define PTP_MSGTYPE_SYNC 0x0 -#define PTP_MSGTYPE_DELAY_REQ 0x1 -#define PTP_MSGTYPE_PDELAY_REQ 0x2 -#define PTP_MSGTYPE_PDELAY_RESP 0x3 - #define PTP_EV_PORT 319 #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ #define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */ #define OFF_PTP_SEQUENCE_ID 30 +#define OFF_PTP_CONTROL 32 /* PTPv1 only */ /* Below defines should actually be removed at some point in time. */ #define IP6_HLEN 40 @@ -48,30 +57,6 @@ #define OFF_IHL 14 #define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2) -struct clock_identity { - u8 id[8]; -} __packed; - -struct port_identity { - struct clock_identity clock_identity; - __be16 port_number; -} __packed; - -struct ptp_header { - u8 tsmt; /* transportSpecific | messageType */ - u8 ver; /* reserved | versionPTP */ - __be16 message_length; - u8 domain_number; - u8 reserved1; - u8 flag_field[2]; - __be64 correction; - __be32 reserved2; - struct port_identity source_port_identity; - __be16 sequence_id; - u8 control; - u8 log_message_interval; -} __packed; - #if defined(CONFIG_NET_PTP_CLASSIFY) /** * ptp_classify_raw - classify a PTP packet @@ -85,67 +70,10 @@ struct ptp_header { */ unsigned int ptp_classify_raw(const struct sk_buff *skb); -/** - * ptp_parse_header - Get pointer to the PTP v2 header - * @skb: packet buffer - * @type: type of the packet (see ptp_classify_raw()) - * - * This function takes care of the VLAN, UDP, IPv4 and IPv6 headers. The length - * is checked. - * - * Note, internally skb_mac_header() is used. Make sure that the @skb is - * initialized accordingly. - * - * Return: Pointer to the ptp v2 header or NULL if not found - */ -struct ptp_header *ptp_parse_header(struct sk_buff *skb, unsigned int type); - -/** - * ptp_get_msgtype - Extract ptp message type from given header - * @hdr: ptp header - * @type: type of the packet (see ptp_classify_raw()) - * - * This function returns the message type for a given ptp header. It takes care - * of the different ptp header versions (v1 or v2). - * - * Return: The message type - */ -static inline u8 ptp_get_msgtype(const struct ptp_header *hdr, - unsigned int type) -{ - u8 msgtype; - - if (unlikely(type & PTP_CLASS_V1)) { - /* msg type is located at the control field for ptp v1 */ - msgtype = hdr->control; - } else { - msgtype = hdr->tsmt & 0x0f; - } - - return msgtype; -} - void __init ptp_classifier_init(void); #else static inline void ptp_classifier_init(void) { } -static inline unsigned int ptp_classify_raw(struct sk_buff *skb) -{ - return PTP_CLASS_NONE; -} -static inline struct ptp_header *ptp_parse_header(struct sk_buff *skb, - unsigned int type) -{ - return NULL; -} -static inline u8 ptp_get_msgtype(const struct ptp_header *hdr, - unsigned int type) -{ - /* The return is meaningless. The stub function would not be - * executed since no available header from ptp_parse_header. - */ - return PTP_MSGTYPE_SYNC; -} #endif #endif /* _PTP_CLASSIFY_H_ */ diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 2e55650673..5ad54fc66c 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * PTP 1588 clock support * * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _PTP_CLOCK_KERNEL_H_ @@ -11,23 +24,7 @@ #include #include #include -#include -#include -#define PTP_CLOCK_NAME_LEN 32 -/** - * struct ptp_clock_request - request PTP clock event - * - * @type: The type of the request. - * EXTTS: Configure external trigger timestamping - * PEROUT: Configure periodic output signal (e.g. PPS) - * PPS: trigger internal PPS event for input - * into kernel PPS subsystem - * @extts: describes configuration for external trigger timestamping. - * This is only valid when event == PTP_CLK_REQ_EXTTS. - * @perout: describes configuration for periodic output. - * This is only valid when event == PTP_CLK_REQ_PEROUT. - */ struct ptp_clock_request { enum { @@ -42,17 +39,8 @@ struct ptp_clock_request { }; struct system_device_crosststamp; - /** - * struct ptp_system_timestamp - system time corresponding to a PHC timestamp - */ -struct ptp_system_timestamp { - struct timespec64 pre_ts; - struct timespec64 post_ts; -}; - -/** - * struct ptp_clock_info - describes a PTP hardware clock + * struct ptp_clock_info - decribes a PTP hardware clock * * @owner: The clock driver should set to THIS_MODULE. * @name: A short "friendly name" to identify the clock and to @@ -70,36 +58,16 @@ struct ptp_system_timestamp { * * clock operations * - * @adjfine: Adjusts the frequency of the hardware clock. - * parameter scaled_ppm: Desired frequency offset from - * nominal frequency in parts per million, but with a - * 16 bit binary fractional field. - * * @adjfreq: Adjusts the frequency of the hardware clock. - * This method is deprecated. New drivers should implement - * the @adjfine method instead. * parameter delta: Desired frequency offset from nominal frequency * in parts per billion * - * @adjphase: Adjusts the phase offset of the hardware clock. - * parameter delta: Desired change in nanoseconds. - * * @adjtime: Shifts the time of the hardware clock. * parameter delta: Desired change in nanoseconds. * * @gettime64: Reads the current time from the hardware clock. - * This method is deprecated. New drivers should implement - * the @gettimex64 method instead. * parameter ts: Holds the result. * - * @gettimex64: Reads the current time from the hardware clock and optionally - * also the system clock. - * parameter ts: Holds the PHC timestamp. - * parameter sts: If not NULL, it holds a pair of timestamps from - * the system clock. The first reading is made right before - * reading the lowest bits of the PHC timestamp and the second - * reading immediately follows that. - * * @getcrosststamp: Reads the current time from the hardware clock and * system clock simultaneously. * parameter cts: Contains timestamp (device,system) pair, @@ -124,11 +92,6 @@ struct ptp_system_timestamp { * parameter func: the desired function to use. * parameter chan: the function channel index to use. * - * @do_aux_work: Request driver to perform auxiliary (periodic) operations - * Driver should return delay of the next auxiliary work - * scheduling time (>=0) or negative value in case further - * scheduling is not required. - * * Drivers should embed their ptp_clock_info within a private * structure, obtaining a reference to it using container_of(). * @@ -137,7 +100,7 @@ struct ptp_system_timestamp { struct ptp_clock_info { struct module *owner; - char name[PTP_CLOCK_NAME_LEN]; + char name[16]; s32 max_adj; int n_alarm; int n_ext_ts; @@ -145,13 +108,9 @@ struct ptp_clock_info { int n_pins; int pps; struct ptp_pin_desc *pin_config; - int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm); int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); - int (*adjphase)(struct ptp_clock_info *ptp, s32 phase); int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); - int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts, - struct ptp_system_timestamp *sts); int (*getcrosststamp)(struct ptp_clock_info *ptp, struct system_device_crosststamp *cts); int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); @@ -159,11 +118,34 @@ struct ptp_clock_info { struct ptp_clock_request *request, int on); int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan); - long (*do_aux_work)(struct ptp_clock_info *ptp); }; struct ptp_clock; +/** + * ptp_clock_register() - register a PTP hardware clock driver + * + * @info: Structure describing the new clock. + * @parent: Pointer to the parent device of the new clock. + * + * Returns a valid pointer on success or PTR_ERR on failure. If PHC + * support is missing at the configuration level, this function + * returns NULL, and drivers are expected to gracefully handle that + * case separately. + */ + +extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, + struct device *parent); + +/** + * ptp_clock_unregister() - unregister a PTP hardware clock driver + * + * @ptp: The clock to remove from service. + */ + +extern int ptp_clock_unregister(struct ptp_clock *ptp); + + enum ptp_clock_events { PTP_CLOCK_ALARM, PTP_CLOCK_EXTTS, @@ -189,57 +171,6 @@ struct ptp_clock_event { }; }; -/** - * scaled_ppm_to_ppb() - convert scaled ppm to ppb - * - * @ppm: Parts per million, but with a 16 bit binary fractional field - */ -static inline long scaled_ppm_to_ppb(long ppm) -{ - /* - * The 'freq' field in the 'struct timex' is in parts per - * million, but with a 16 bit binary fractional field. - * - * We want to calculate - * - * ppb = scaled_ppm * 1000 / 2^16 - * - * which simplifies to - * - * ppb = scaled_ppm * 125 / 2^13 - */ - s64 ppb = 1 + ppm; - - ppb *= 125; - ppb >>= 13; - return (long)ppb; -} - -#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) - -/** - * ptp_clock_register() - register a PTP hardware clock driver - * - * @info: Structure describing the new clock. - * @parent: Pointer to the parent device of the new clock. - * - * Returns a valid pointer on success or PTR_ERR on failure. If PHC - * support is missing at the configuration level, this function - * returns NULL, and drivers are expected to gracefully handle that - * case separately. - */ - -extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, - struct device *parent); - -/** - * ptp_clock_unregister() - unregister a PTP hardware clock driver - * - * @ptp: The clock to remove from service. - */ - -extern int ptp_clock_unregister(struct ptp_clock *ptp); - /** * ptp_clock_event() - notify the PTP layer about an event * @@ -261,12 +192,6 @@ extern int ptp_clock_index(struct ptp_clock *ptp); /** * ptp_find_pin() - obtain the pin index of a given auxiliary function * - * The caller must hold ptp_clock::pincfg_mux. Drivers do not have - * access to that mutex as ptp_clock is an opaque type. However, the - * core code acquires the mutex before invoking the driver's - * ptp_clock_info::enable() callback, and so drivers may call this - * function from that context. - * * @ptp: The clock obtained from ptp_clock_register(). * @func: One of the ptp_pin_function enumerated values. * @chan: The particular functional channel to find. @@ -277,102 +202,4 @@ extern int ptp_clock_index(struct ptp_clock *ptp); int ptp_find_pin(struct ptp_clock *ptp, enum ptp_pin_function func, unsigned int chan); -/** - * ptp_find_pin_unlocked() - wrapper for ptp_find_pin() - * - * This function acquires the ptp_clock::pincfg_mux mutex before - * invoking ptp_find_pin(). Instead of using this function, drivers - * should most likely call ptp_find_pin() directly from their - * ptp_clock_info::enable() method. - * - */ - -int ptp_find_pin_unlocked(struct ptp_clock *ptp, - enum ptp_pin_function func, unsigned int chan); - -/** - * ptp_schedule_worker() - schedule ptp auxiliary work - * - * @ptp: The clock obtained from ptp_clock_register(). - * @delay: number of jiffies to wait before queuing - * See kthread_queue_delayed_work() for more info. - */ - -int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay); - -/** - * ptp_cancel_worker_sync() - cancel ptp auxiliary clock - * - * @ptp: The clock obtained from ptp_clock_register(). - */ -void ptp_cancel_worker_sync(struct ptp_clock *ptp); - -#else -static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, - struct device *parent) -{ return NULL; } -static inline int ptp_clock_unregister(struct ptp_clock *ptp) -{ return 0; } -static inline void ptp_clock_event(struct ptp_clock *ptp, - struct ptp_clock_event *event) -{ } -static inline int ptp_clock_index(struct ptp_clock *ptp) -{ return -1; } -static inline int ptp_find_pin(struct ptp_clock *ptp, - enum ptp_pin_function func, unsigned int chan) -{ return -1; } -static inline int ptp_schedule_worker(struct ptp_clock *ptp, - unsigned long delay) -{ return -EOPNOTSUPP; } -static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp) -{ } -#endif - -#if IS_BUILTIN(CONFIG_PTP_1588_CLOCK) -/* - * These are called by the network core, and don't work if PTP is in - * a loadable module. - */ - -/** - * ptp_get_vclocks_index() - get all vclocks index on pclock, and - * caller is responsible to free memory - * of vclock_index - * - * @pclock_index: phc index of ptp pclock. - * @vclock_index: pointer to pointer of vclock index. - * - * return number of vclocks. - */ -int ptp_get_vclocks_index(int pclock_index, int **vclock_index); - -/** - * ptp_convert_timestamp() - convert timestamp to a ptp vclock time - * - * @hwtstamps: skb_shared_hwtstamps structure pointer - * @vclock_index: phc index of ptp vclock. - */ -void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, - int vclock_index); -#else -static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index) -{ return 0; } -static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, - int vclock_index) -{ } - -#endif - -static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts) -{ - if (sts) - ktime_get_real_ts64(&sts->pre_ts); -} - -static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts) -{ - if (sts) - ktime_get_real_ts64(&sts->post_ts); -} - #endif diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 808f9d3ee5..6c70444da3 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Definitions for the 'struct ptr_ring' datastructure. * @@ -7,6 +6,11 @@ * * Copyright (C) 2016 Red Hat, Inc. * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * * This is a limited-size FIFO maintaining pointers in FIFO order, with * one CPU producing entries and another consuming entries from a FIFO. * @@ -22,29 +26,26 @@ #include #include #include +#include #include -#include #include #endif struct ptr_ring { int producer ____cacheline_aligned_in_smp; spinlock_t producer_lock; - int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */ - int consumer_tail; /* next entry to invalidate */ + int consumer ____cacheline_aligned_in_smp; spinlock_t consumer_lock; /* Shared consumer/producer data */ /* Read-only by both the producer and the consumer */ int size ____cacheline_aligned_in_smp; /* max entries in queue */ - int batch; /* number of entries to consume in a batch */ void **queue; }; /* Note: callers invoking this in a loop must use a compiler barrier, - * for example cpu_relax(). - * - * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock: - * see e.g. ptr_ring_full. + * for example cpu_relax(). If ring is ever resized, callers must hold + * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold + * producer_lock, the next call to __ptr_ring_produce may fail. */ static inline bool __ptr_ring_full(struct ptr_ring *r) { @@ -98,19 +99,13 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r) /* Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). Callers must hold producer_lock. - * Callers are responsible for making sure pointer that is being queued - * points to a valid data. */ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) { if (unlikely(!r->size) || r->queue[r->producer]) return -ENOSPC; - /* Make sure the pointer we are storing points to a valid data. */ - /* Pairs with the dependency ordering in __ptr_ring_consume. */ - smp_wmb(); - - WRITE_ONCE(r->queue[r->producer++], ptr); + r->queue[r->producer++] = ptr; if (unlikely(r->producer >= r->size)) r->producer = 0; return 0; @@ -166,36 +161,26 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) return ret; } +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must take consumer_lock + * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. + * If ring is never resized, and if the pointer is merely + * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. + */ static inline void *__ptr_ring_peek(struct ptr_ring *r) { if (likely(r->size)) - return READ_ONCE(r->queue[r->consumer_head]); + return r->queue[r->consumer]; return NULL; } -/* - * Test ring empty status without taking any locks. - * - * NB: This is only safe to call if ring is never resized. - * - * However, if some other CPU consumes ring entries at the same time, the value - * returned is not guaranteed to be correct. - * - * In this case - to avoid incorrectly detecting the ring - * as empty - the CPU consuming the ring entries is responsible - * for either consuming all ring entries until the ring is empty, - * or synchronizing with some other CPU and causing it to - * re-test __ptr_ring_empty and/or consume the ring enteries - * after the synchronization point. - * - * Note: callers invoking this in a loop must use a compiler barrier, - * for example cpu_relax(). +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must take consumer_lock + * if the ring is ever resized - see e.g. ptr_ring_empty. */ static inline bool __ptr_ring_empty(struct ptr_ring *r) { - if (likely(r->size)) - return !r->queue[READ_ONCE(r->consumer_head)]; - return true; + return !__ptr_ring_peek(r); } static inline bool ptr_ring_empty(struct ptr_ring *r) @@ -246,56 +231,15 @@ static inline bool ptr_ring_empty_bh(struct ptr_ring *r) /* Must only be called after __ptr_ring_peek returned !NULL */ static inline void __ptr_ring_discard_one(struct ptr_ring *r) { - /* Fundamentally, what we want to do is update consumer - * index and zero out the entry so producer can reuse it. - * Doing it naively at each consume would be as simple as: - * consumer = r->consumer; - * r->queue[consumer++] = NULL; - * if (unlikely(consumer >= r->size)) - * consumer = 0; - * r->consumer = consumer; - * but that is suboptimal when the ring is full as producer is writing - * out new entries in the same cache line. Defer these updates until a - * batch of entries has been consumed. - */ - /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty - * to work correctly. - */ - int consumer_head = r->consumer_head; - int head = consumer_head++; - - /* Once we have processed enough entries invalidate them in - * the ring all at once so producer can reuse their space in the ring. - * We also do this when we reach end of the ring - not mandatory - * but helps keep the implementation simple. - */ - if (unlikely(consumer_head - r->consumer_tail >= r->batch || - consumer_head >= r->size)) { - /* Zero out entries in the reverse order: this way we touch the - * cache line that producer might currently be reading the last; - * producer won't make progress and touch other cache lines - * besides the first one until we write out all entries. - */ - while (likely(head >= r->consumer_tail)) - r->queue[head--] = NULL; - r->consumer_tail = consumer_head; - } - if (unlikely(consumer_head >= r->size)) { - consumer_head = 0; - r->consumer_tail = 0; - } - /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ - WRITE_ONCE(r->consumer_head, consumer_head); + r->queue[r->consumer++] = NULL; + if (unlikely(r->consumer >= r->size)) + r->consumer = 0; } static inline void *__ptr_ring_consume(struct ptr_ring *r) { void *ptr; - /* The READ_ONCE in __ptr_ring_peek guarantees that anyone - * accessing data through the pointer is up to date. Pairs - * with smp_wmb in __ptr_ring_produce. - */ ptr = __ptr_ring_peek(r); if (ptr) __ptr_ring_discard_one(r); @@ -303,22 +247,6 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) return ptr; } -static inline int __ptr_ring_consume_batched(struct ptr_ring *r, - void **array, int n) -{ - void *ptr; - int i; - - for (i = 0; i < n; i++) { - ptr = __ptr_ring_consume(r); - if (!ptr) - break; - array[i] = ptr; - } - - return i; -} - /* * Note: resize (below) nests producer lock within consumer lock, so if you * call this in interrupt or BH context, you must disable interrupts/BH when @@ -369,55 +297,6 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) return ptr; } -static inline int ptr_ring_consume_batched(struct ptr_ring *r, - void **array, int n) -{ - int ret; - - spin_lock(&r->consumer_lock); - ret = __ptr_ring_consume_batched(r, array, n); - spin_unlock(&r->consumer_lock); - - return ret; -} - -static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, - void **array, int n) -{ - int ret; - - spin_lock_irq(&r->consumer_lock); - ret = __ptr_ring_consume_batched(r, array, n); - spin_unlock_irq(&r->consumer_lock); - - return ret; -} - -static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, - void **array, int n) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&r->consumer_lock, flags); - ret = __ptr_ring_consume_batched(r, array, n); - spin_unlock_irqrestore(&r->consumer_lock, flags); - - return ret; -} - -static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, - void **array, int n) -{ - int ret; - - spin_lock_bh(&r->consumer_lock); - ret = __ptr_ring_consume_batched(r, array, n); - spin_unlock_bh(&r->consumer_lock); - - return ret; -} - /* Cast to structure type and call a function without discarding from FIFO. * Function must return a value. * Callers must take consumer_lock. @@ -461,27 +340,9 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, __PTR_RING_PEEK_CALL_v; \ }) -/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See - * documentation for vmalloc for which of them are legal. - */ -static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) +static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp) { - if (size > KMALLOC_MAX_SIZE / sizeof(void *)) - return NULL; - return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); -} - -static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) -{ - r->size = size; - r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); - /* We need to set batch at least to 1 to make logic - * in __ptr_ring_discard_one work correctly. - * Batching too much (because ring is small) would cause a lot of - * burstiness. Needs tuning, for now disable batching. - */ - if (r->batch > r->size / 2 || !r->batch) - r->batch = 1; + return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp); } static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) @@ -490,71 +351,14 @@ static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) if (!r->queue) return -ENOMEM; - __ptr_ring_set_size(r, size); - r->producer = r->consumer_head = r->consumer_tail = 0; + r->size = size; + r->producer = r->consumer = 0; spin_lock_init(&r->producer_lock); spin_lock_init(&r->consumer_lock); return 0; } -/* - * Return entries into ring. Destroy entries that don't fit. - * - * Note: this is expected to be a rare slow path operation. - * - * Note: producer lock is nested within consumer lock, so if you - * resize you must make sure all uses nest correctly. - * In particular if you consume ring in interrupt or BH context, you must - * disable interrupts/BH when doing so. - */ -static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, - void (*destroy)(void *)) -{ - unsigned long flags; - int head; - - spin_lock_irqsave(&r->consumer_lock, flags); - spin_lock(&r->producer_lock); - - if (!r->size) - goto done; - - /* - * Clean out buffered entries (for simplicity). This way following code - * can test entries for NULL and if not assume they are valid. - */ - head = r->consumer_head - 1; - while (likely(head >= r->consumer_tail)) - r->queue[head--] = NULL; - r->consumer_tail = r->consumer_head; - - /* - * Go over entries in batch, start moving head back and copy entries. - * Stop when we run into previously unconsumed entries. - */ - while (n) { - head = r->consumer_head - 1; - if (head < 0) - head = r->size - 1; - if (r->queue[head]) { - /* This batch entry will have to be destroyed. */ - goto done; - } - r->queue[head] = batch[--n]; - r->consumer_tail = head; - /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ - WRITE_ONCE(r->consumer_head, head); - } - -done: - /* Destroy all entries left in the batch. */ - while (n) - destroy(batch[--n]); - spin_unlock(&r->producer_lock); - spin_unlock_irqrestore(&r->consumer_lock, flags); -} - static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, int size, gfp_t gfp, void (*destroy)(void *)) @@ -569,12 +373,9 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, else if (destroy) destroy(ptr); - if (producer >= size) - producer = 0; - __ptr_ring_set_size(r, size); + r->size = size; r->producer = producer; - r->consumer_head = 0; - r->consumer_tail = 0; + r->consumer = 0; old = r->queue; r->queue = queue; @@ -605,7 +406,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, spin_unlock(&(r)->producer_lock); spin_unlock_irqrestore(&(r)->consumer_lock, flags); - kvfree(old); + kfree(old); return 0; } @@ -616,8 +417,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */ -static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, - unsigned int nrings, +static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, int size, gfp_t gfp, void (*destroy)(void *)) { @@ -625,7 +425,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, void ***queues; int i; - queues = kmalloc_array(nrings, sizeof(*queues), gfp); + queues = kmalloc(nrings * sizeof *queues, gfp); if (!queues) goto noqueues; @@ -645,7 +445,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, } for (i = 0; i < nrings; ++i) - kvfree(queues[i]); + kfree(queues[i]); kfree(queues); @@ -653,7 +453,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, nomem: while (--i >= 0) - kvfree(queues[i]); + kfree(queues[i]); kfree(queues); @@ -668,7 +468,7 @@ static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) if (destroy) while ((ptr = ptr_ring_consume(r))) destroy(ptr); - kvfree(r->queue); + kfree(r->queue); } #endif /* _LINUX_PTR_RING_H */ diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index b5ebf6c012..e0e539321a 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -1,21 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PTRACE_H #define _LINUX_PTRACE_H #include /* For unlikely. */ #include /* For struct task_struct. */ -#include /* For send_sig(), same_thread_group(), etc. */ #include /* for IS_ERR_VALUE */ #include /* For BUG_ON. */ #include /* For task_active_pid_ns. */ #include -#include - -/* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */ -struct syscall_info { - __u64 sp; - struct seccomp_data data; -}; extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags); @@ -62,15 +53,14 @@ extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern void ptrace_notify(int exit_code); extern void __ptrace_link(struct task_struct *child, - struct task_struct *new_parent, - const struct cred *ptracer_cred); + struct task_struct *new_parent); extern void __ptrace_unlink(struct task_struct *child); extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 #define PTRACE_MODE_ATTACH 0x02 #define PTRACE_MODE_NOAUDIT 0x04 -#define PTRACE_MODE_FSCREDS 0x08 -#define PTRACE_MODE_REALCREDS 0x10 +#define PTRACE_MODE_FSCREDS 0x08 +#define PTRACE_MODE_REALCREDS 0x10 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) @@ -171,7 +161,7 @@ static inline void ptrace_event(int event, unsigned long message) * * Check whether @event is enabled and, if so, report @event and @pid * to the ptrace parent. @pid is reported as the pid_t seen from the - * ptrace parent's pid namespace. + * the ptrace parent's pid namespace. * * Called without locks. */ @@ -215,15 +205,15 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) if (unlikely(ptrace) && current->ptrace) { child->ptrace = current->ptrace; - __ptrace_link(child, current->parent, current->ptracer_cred); + __ptrace_link(child, current->parent); if (child->ptrace & PT_SEIZED) task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); else sigaddset(&child->pending.signal, SIGSTOP); + + set_tsk_thread_flag(child, TIF_SIGPENDING); } - else - child->ptracer_cred = NULL; } /** @@ -343,19 +333,15 @@ static inline void user_enable_block_step(struct task_struct *task) extern void user_enable_block_step(struct task_struct *); #endif /* arch_has_block_step */ -#ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT -extern void user_single_step_report(struct pt_regs *regs); +#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO +extern void user_single_step_siginfo(struct task_struct *tsk, + struct pt_regs *regs, siginfo_t *info); #else -static inline void user_single_step_report(struct pt_regs *regs) +static inline void user_single_step_siginfo(struct task_struct *tsk, + struct pt_regs *regs, siginfo_t *info) { - kernel_siginfo_t info; - clear_siginfo(&info); - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = SI_USER; - info.si_pid = 0; - info.si_uid = 0; - force_sig_info(&info); + memset(info, 0, sizeof(*info)); + info->si_signo = SIGTRAP; } #endif @@ -401,6 +387,10 @@ static inline void user_single_step_report(struct pt_regs *regs) #define current_pt_regs() task_pt_regs(current) #endif +#ifndef ptrace_signal_deliver +#define ptrace_signal_deliver() ((void)0) +#endif + /* * unlike current_pt_regs(), this one is equal to task_pt_regs(current) * on *all* architectures; the only reason to have a per-arch definition @@ -414,7 +404,8 @@ static inline void user_single_step_report(struct pt_regs *regs) #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) #endif -extern int task_current_syscall(struct task_struct *target, struct syscall_info *info); +extern int task_current_syscall(struct task_struct *target, long *callno, + unsigned long args[6], unsigned int maxargs, + unsigned long *sp, unsigned long *pc); -extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); #endif diff --git a/include/linux/pvclock_gtod.h b/include/linux/pvclock_gtod.h index f63549581f..a71d2dbd36 100644 --- a/include/linux/pvclock_gtod.h +++ b/include/linux/pvclock_gtod.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PVCLOCK_GTOD_H #define _PVCLOCK_GTOD_H diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 725c9b784e..2c6c5114c0 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PWM_H #define __LINUX_PWM_H @@ -39,7 +38,7 @@ enum pwm_polarity { * current PWM hardware state. */ struct pwm_args { - u64 period; + unsigned int period; enum pwm_polarity polarity; }; @@ -54,17 +53,12 @@ enum { * @duty_cycle: PWM duty cycle (in nanoseconds) * @polarity: PWM polarity * @enabled: PWM enabled status - * @usage_power: If set, the PWM driver is only required to maintain the power - * output but has more freedom regarding signal form. - * If supported, the signal can be optimized, for example to - * improve EMI by phase shifting individual channels. */ struct pwm_state { - u64 period; - u64 duty_cycle; + unsigned int period; + unsigned int duty_cycle; enum pwm_polarity polarity; bool enabled; - bool usage_power; }; /** @@ -76,8 +70,7 @@ struct pwm_state { * @chip: PWM chip providing this PWM device * @chip_data: chip-private data associated with the PWM device * @args: PWM arguments - * @state: last applied state - * @last: last implemented state (for PWM_DEBUG) + * @state: curent PWM channel state */ struct pwm_device { const char *label; @@ -89,18 +82,12 @@ struct pwm_device { struct pwm_args args; struct pwm_state state; - struct pwm_state last; }; /** * pwm_get_state() - retrieve the current PWM state * @pwm: PWM device * @state: state to fill with the current PWM state - * - * The returned PWM state represents the state that was applied by a previous call to - * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to - * hardware. If pwm_apply_state() was never called, this returns either the current hardware - * state (if supported) or the default settings. */ static inline void pwm_get_state(const struct pwm_device *pwm, struct pwm_state *state) @@ -117,13 +104,13 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm) return state.enabled; } -static inline void pwm_set_period(struct pwm_device *pwm, u64 period) +static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) { if (pwm) pwm->state.period = period; } -static inline u64 pwm_get_period(const struct pwm_device *pwm) +static inline unsigned int pwm_get_period(const struct pwm_device *pwm) { struct pwm_state state; @@ -138,7 +125,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) pwm->state.duty_cycle = duty; } -static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm) +static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) { struct pwm_state state; @@ -193,7 +180,6 @@ static inline void pwm_init_state(const struct pwm_device *pwm, state->period = args.period; state->polarity = args.polarity; state->duty_cycle = 0; - state->usage_power = false; } /** @@ -255,61 +241,68 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, * struct pwm_ops - PWM controller operations * @request: optional hook for requesting a PWM * @free: optional hook for freeing a PWM + * @config: configure duty cycles and period length for this PWM + * @set_polarity: configure the polarity of this PWM * @capture: capture and report PWM signal - * @apply: atomically apply a new PWM config + * @enable: enable PWM output toggling + * @disable: disable PWM output toggling + * @apply: atomically apply a new PWM config. The state argument + * should be adjusted with the real hardware config (if the + * approximate the period or duty_cycle value, state should + * reflect it) * @get_state: get the current PWM state. This function is only * called once per PWM device when the PWM chip is * registered. + * @dbg_show: optional routine to show contents in debugfs * @owner: helps prevent removal of modules exporting active PWMs - * @config: configure duty cycles and period length for this PWM - * @set_polarity: configure the polarity of this PWM - * @enable: enable PWM output toggling - * @disable: disable PWM output toggling */ struct pwm_ops { int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); void (*free)(struct pwm_chip *chip, struct pwm_device *pwm); - int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, - struct pwm_capture *result, unsigned long timeout); - int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, - const struct pwm_state *state); - void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, - struct pwm_state *state); - struct module *owner; - - /* Only used by legacy drivers */ int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns); int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity); + int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_capture *result, unsigned long timeout); int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); + int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state); + void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state); +#ifdef CONFIG_DEBUG_FS + void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s); +#endif + struct module *owner; }; /** * struct pwm_chip - abstract a PWM controller * @dev: device providing the PWMs + * @list: list node for internal use * @ops: callbacks for this PWM controller * @base: number of first PWM controlled by this chip * @npwm: number of PWMs controlled by this chip + * @pwms: array of PWM devices allocated by the framework * @of_xlate: request a PWM device given a device tree PWM specifier * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier - * @list: list node for internal use - * @pwms: array of PWM devices allocated by the framework + * @can_sleep: must be true if the .config(), .enable() or .disable() + * operations may sleep */ struct pwm_chip { struct device *dev; + struct list_head list; const struct pwm_ops *ops; int base; unsigned int npwm; + struct pwm_device *pwms; + struct pwm_device * (*of_xlate)(struct pwm_chip *pc, const struct of_phandle_args *args); unsigned int of_pwm_n_cells; - - /* only used internally by the PWM framework */ - struct list_head list; - struct pwm_device *pwms; + bool can_sleep; }; /** @@ -326,7 +319,7 @@ struct pwm_capture { /* PWM user APIs */ struct pwm_device *pwm_request(int pwm_id, const char *label); void pwm_free(struct pwm_device *pwm); -int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state); +int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state); int pwm_adjust_config(struct pwm_device *pwm); /** @@ -357,6 +350,42 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns, return pwm_apply_state(pwm, &state); } +/** + * pwm_set_polarity() - configure the polarity of a PWM signal + * @pwm: PWM device + * @polarity: new polarity of the PWM signal + * + * Note that the polarity cannot be configured while the PWM device is + * enabled. + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_set_polarity(struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.polarity == polarity) + return 0; + + /* + * Changing the polarity of a running PWM without adjusting the + * dutycycle/period value is a bit risky (can introduce glitches). + * Return -EBUSY in this case. + * Note that this is allowed when using pwm_apply_state() because + * the user specifies all the parameters. + */ + if (state.enabled) + return -EBUSY; + + state.polarity = polarity; + return pwm_apply_state(pwm, &state); +} + /** * pwm_enable() - start a PWM output toggling * @pwm: PWM device @@ -403,11 +432,10 @@ int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, int pwm_set_chip_data(struct pwm_device *pwm, void *data); void *pwm_get_chip_data(struct pwm_device *pwm); +int pwmchip_add_with_polarity(struct pwm_chip *chip, + enum pwm_polarity polarity); int pwmchip_add(struct pwm_chip *chip); -void pwmchip_remove(struct pwm_chip *chip); - -int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip); - +int pwmchip_remove(struct pwm_chip *chip); struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, unsigned int index, const char *label); @@ -416,16 +444,15 @@ struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args); struct pwm_device *pwm_get(struct device *dev, const char *con_id); -struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np, - const char *con_id); +struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id); void pwm_put(struct pwm_device *pwm); struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id); struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, const char *con_id); -struct pwm_device *devm_fwnode_pwm_get(struct device *dev, - struct fwnode_handle *fwnode, - const char *con_id); +void devm_pwm_put(struct device *dev, struct pwm_device *pwm); + +bool pwm_can_sleep(struct pwm_device *pwm); #else static inline struct pwm_device *pwm_request(int pwm_id, const char *label) { @@ -460,6 +487,12 @@ static inline int pwm_capture(struct pwm_device *pwm, return -EINVAL; } +static inline int pwm_set_polarity(struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + return -ENOTSUPP; +} + static inline int pwm_enable(struct pwm_device *pwm) { return -EINVAL; @@ -484,6 +517,11 @@ static inline int pwmchip_add(struct pwm_chip *chip) return -EINVAL; } +static inline int pwmchip_add_inversed(struct pwm_chip *chip) +{ + return -EINVAL; +} + static inline int pwmchip_remove(struct pwm_chip *chip) { return -EINVAL; @@ -502,8 +540,7 @@ static inline struct pwm_device *pwm_get(struct device *dev, return ERR_PTR(-ENODEV); } -static inline struct pwm_device *of_pwm_get(struct device *dev, - struct device_node *np, +static inline struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id) { return ERR_PTR(-ENODEV); @@ -526,11 +563,13 @@ static inline struct pwm_device *devm_of_pwm_get(struct device *dev, return ERR_PTR(-ENODEV); } -static inline struct pwm_device * -devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode, - const char *con_id) +static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) { - return ERR_PTR(-ENODEV); +} + +static inline bool pwm_can_sleep(struct pwm_device *pwm) +{ + return false; } #endif @@ -562,7 +601,6 @@ static inline void pwm_apply_args(struct pwm_device *pwm) state.enabled = false; state.polarity = pwm->args.polarity; state.period = pwm->args.period; - state.usage_power = false; pwm_apply_state(pwm, &state); } @@ -575,24 +613,17 @@ struct pwm_lookup { const char *con_id; unsigned int period; enum pwm_polarity polarity; - const char *module; /* optional, may be NULL */ }; -#define PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, \ - _period, _polarity, _module) \ - { \ - .provider = _provider, \ - .index = _index, \ - .dev_id = _dev_id, \ - .con_id = _con_id, \ - .period = _period, \ - .polarity = _polarity, \ - .module = _module, \ - } - #define PWM_LOOKUP(_provider, _index, _dev_id, _con_id, _period, _polarity) \ - PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, _period, \ - _polarity, NULL) + { \ + .provider = _provider, \ + .index = _index, \ + .dev_id = _dev_id, \ + .con_id = _con_id, \ + .period = _period, \ + .polarity = _polarity \ + } #if IS_ENABLED(CONFIG_PWM) void pwm_add_table(struct pwm_lookup *table, size_t num); @@ -610,6 +641,7 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num) #ifdef CONFIG_PWM_SYSFS void pwmchip_sysfs_export(struct pwm_chip *chip); void pwmchip_sysfs_unexport(struct pwm_chip *chip); +void pwmchip_sysfs_unexport_children(struct pwm_chip *chip); #else static inline void pwmchip_sysfs_export(struct pwm_chip *chip) { @@ -618,6 +650,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip) static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip) { } + +static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) +{ +} #endif /* CONFIG_PWM_SYSFS */ #endif /* __LINUX_PWM_H */ diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index 06086cb93b..efdd9227a4 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Generic PWM backlight driver data - see drivers/video/backlight/pwm_bl.c */ @@ -14,8 +13,8 @@ struct platform_pwm_backlight_data { unsigned int lth_brightness; unsigned int pwm_period_ns; unsigned int *levels; - unsigned int post_pwm_on_delay; - unsigned int pwm_off_delay; + /* TODO remove once all users are switched to gpiod_* API */ + int enable_gpio; int (*init)(struct device *dev); int (*notify)(struct device *dev, int brightness); void (*notify_after)(struct device *dev, int brightness); diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h index fb09c2c7cb..e1ab6e86cd 100644 --- a/include/linux/pxa168_eth.h +++ b/include/linux/pxa168_eth.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* *pxa168 ethernet platform device data definition file. */ diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index a3fec2de51..2d6f0c39ed 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -1,6 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2003 Russell King, All Rights Reserved. + * pxa2xx_ssp.h + * + * Copyright (C) 2003 Russell King, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. * * This driver supports the following PXA CPU/SSP ports:- * @@ -11,19 +16,13 @@ * PXA3xx SSP1, SSP2, SSP3, SSP4 */ -#ifndef __LINUX_PXA2XX_SSP_H -#define __LINUX_PXA2XX_SSP_H +#ifndef __LINUX_SSP_H +#define __LINUX_SSP_H -#include -#include -#include -#include #include -#include +#include +#include -struct clk; -struct device; -struct device_node; /* * SSP Serial Port Registers @@ -38,6 +37,7 @@ struct device_node; #define SSDR (0x10) /* SSP Data Write/Data Read Register */ #define SSTO (0x28) /* SSP Time Out Register */ +#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */ #define SSPSP (0x2C) /* SSP Programmable Serial Protocol */ #define SSTSA (0x30) /* SSP Tx Timeslot Active */ #define SSRSA (0x34) /* SSP Rx Timeslot Active */ @@ -46,170 +46,141 @@ struct device_node; #define SSACDD (0x40) /* SSP Audio Clock Dither Divider */ /* Common PXA2xx bits first */ -#define SSCR0_DSS GENMASK(3, 0) /* Data Size Select (mask) */ +#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */ #define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */ -#define SSCR0_FRF GENMASK(5, 4) /* FRame Format (mask) */ +#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */ #define SSCR0_Motorola (0x0 << 4) /* Motorola's Serial Peripheral Interface (SPI) */ #define SSCR0_TI (0x1 << 4) /* Texas Instruments' Synchronous Serial Protocol (SSP) */ #define SSCR0_National (0x2 << 4) /* National Microwire */ -#define SSCR0_ECS BIT(6) /* External clock select */ -#define SSCR0_SSE BIT(7) /* Synchronous Serial Port Enable */ +#define SSCR0_ECS (1 << 6) /* External clock select */ +#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */ #define SSCR0_SCR(x) ((x) << 8) /* Serial Clock Rate (mask) */ /* PXA27x, PXA3xx */ -#define SSCR0_EDSS BIT(20) /* Extended data size select */ -#define SSCR0_NCS BIT(21) /* Network clock select */ -#define SSCR0_RIM BIT(22) /* Receive FIFO overrun interrupt mask */ -#define SSCR0_TUM BIT(23) /* Transmit FIFO underrun interrupt mask */ -#define SSCR0_FRDC GENMASK(26, 24) /* Frame rate divider control (mask) */ +#define SSCR0_EDSS (1 << 20) /* Extended data size select */ +#define SSCR0_NCS (1 << 21) /* Network clock select */ +#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */ +#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */ +#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */ #define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */ -#define SSCR0_FPCKE BIT(29) /* FIFO packing enable */ -#define SSCR0_ACS BIT(30) /* Audio clock select */ -#define SSCR0_MOD BIT(31) /* Mode (normal or network) */ +#define SSCR0_FPCKE (1 << 29) /* FIFO packing enable */ +#define SSCR0_ACS (1 << 30) /* Audio clock select */ +#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */ -#define SSCR1_RIE BIT(0) /* Receive FIFO Interrupt Enable */ -#define SSCR1_TIE BIT(1) /* Transmit FIFO Interrupt Enable */ -#define SSCR1_LBM BIT(2) /* Loop-Back Mode */ -#define SSCR1_SPO BIT(3) /* Motorola SPI SSPSCLK polarity setting */ -#define SSCR1_SPH BIT(4) /* Motorola SPI SSPSCLK phase setting */ -#define SSCR1_MWDS BIT(5) /* Microwire Transmit Data Size */ -#define SSSR_ALT_FRM_MASK GENMASK(1, 0) /* Masks the SFRM signal number */ -#define SSSR_TNF BIT(2) /* Transmit FIFO Not Full */ -#define SSSR_RNE BIT(3) /* Receive FIFO Not Empty */ -#define SSSR_BSY BIT(4) /* SSP Busy */ -#define SSSR_TFS BIT(5) /* Transmit FIFO Service Request */ -#define SSSR_RFS BIT(6) /* Receive FIFO Service Request */ -#define SSSR_ROR BIT(7) /* Receive FIFO Overrun */ +#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */ +#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */ +#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */ +#define SSCR1_SPO (1 << 3) /* Motorola SPI SSPSCLK polarity setting */ +#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */ +#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */ + +#define SSSR_ALT_FRM_MASK 3 /* Masks the SFRM signal number */ +#define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */ +#define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */ +#define SSSR_BSY (1 << 4) /* SSP Busy */ +#define SSSR_TFS (1 << 5) /* Transmit FIFO Service Request */ +#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */ +#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */ #define RX_THRESH_DFLT 8 #define TX_THRESH_DFLT 8 -#define SSSR_TFL_MASK GENMASK(11, 8) /* Transmit FIFO Level mask */ -#define SSSR_RFL_MASK GENMASK(15, 12) /* Receive FIFO Level mask */ +#define SSSR_TFL_MASK (0xf << 8) /* Transmit FIFO Level mask */ +#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */ -#define SSCR1_TFT GENMASK(9, 6) /* Transmit FIFO Threshold (mask) */ -#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ -#define SSCR1_RFT GENMASK(13, 10) /* Receive FIFO Threshold (mask) */ -#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ +#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */ +#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ +#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */ +#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ #define RX_THRESH_CE4100_DFLT 2 #define TX_THRESH_CE4100_DFLT 2 -#define CE4100_SSSR_TFL_MASK GENMASK(9, 8) /* Transmit FIFO Level mask */ -#define CE4100_SSSR_RFL_MASK GENMASK(13, 12) /* Receive FIFO Level mask */ +#define CE4100_SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */ +#define CE4100_SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */ -#define CE4100_SSCR1_TFT GENMASK(7, 6) /* Transmit FIFO Threshold (mask) */ +#define CE4100_SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */ #define CE4100_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */ -#define CE4100_SSCR1_RFT GENMASK(11, 10) /* Receive FIFO Threshold (mask) */ +#define CE4100_SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */ #define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ -/* Intel Quark X1000 */ -#define DDS_RATE 0x28 /* SSP DDS Clock Rate Register */ - /* QUARK_X1000 SSCR0 bit definition */ -#define QUARK_X1000_SSCR0_DSS GENMASK(4, 0) /* Data Size Select (mask) */ -#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ -#define QUARK_X1000_SSCR0_FRF GENMASK(6, 5) /* FRame Format (mask) */ +#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */ +#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ +#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */ #define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */ #define RX_THRESH_QUARK_X1000_DFLT 1 #define TX_THRESH_QUARK_X1000_DFLT 16 -#define QUARK_X1000_SSSR_TFL_MASK GENMASK(12, 8) /* Transmit FIFO Level mask */ -#define QUARK_X1000_SSSR_RFL_MASK GENMASK(17, 13) /* Receive FIFO Level mask */ +#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */ +#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */ -#define QUARK_X1000_SSCR1_TFT GENMASK(10, 6) /* Transmit FIFO Threshold (mask) */ +#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */ #define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */ -#define QUARK_X1000_SSCR1_RFT GENMASK(15, 11) /* Receive FIFO Threshold (mask) */ +#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */ #define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */ -#define QUARK_X1000_SSCR1_EFWR BIT(16) /* Enable FIFO Write/Read */ -#define QUARK_X1000_SSCR1_STRF BIT(17) /* Select FIFO or EFWR */ +#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */ +#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */ -/* Extra bits in PXA255, PXA26x and PXA27x SSP ports */ +/* extra bits in PXA255, PXA26x and PXA27x SSP ports */ #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ #define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ +#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */ +#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */ +#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */ +#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */ +#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */ +#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */ +#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */ +#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */ +#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */ +#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */ +#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */ +#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */ +#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */ +#define SSCR1_PINTE (1 << 18) /* Peripheral Trailing Byte Interrupt Enable */ +#define SSCR1_IFS (1 << 16) /* Invert Frame Signal */ +#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */ +#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */ -#define SSCR1_EFWR BIT(14) /* Enable FIFO Write/Read */ -#define SSCR1_STRF BIT(15) /* Select FIFO or EFWR */ -#define SSCR1_IFS BIT(16) /* Invert Frame Signal */ -#define SSCR1_PINTE BIT(18) /* Peripheral Trailing Byte Interrupt Enable */ -#define SSCR1_TINTE BIT(19) /* Receiver Time-out Interrupt enable */ -#define SSCR1_RSRE BIT(20) /* Receive Service Request Enable */ -#define SSCR1_TSRE BIT(21) /* Transmit Service Request Enable */ -#define SSCR1_TRAIL BIT(22) /* Trailing Byte */ -#define SSCR1_RWOT BIT(23) /* Receive Without Transmit */ -#define SSCR1_SFRMDIR BIT(24) /* Frame Direction */ -#define SSCR1_SCLKDIR BIT(25) /* Serial Bit Rate Clock Direction */ -#define SSCR1_ECRB BIT(26) /* Enable Clock request B */ -#define SSCR1_ECRA BIT(27) /* Enable Clock Request A */ -#define SSCR1_SCFR BIT(28) /* Slave Clock free Running */ -#define SSCR1_EBCEI BIT(29) /* Enable Bit Count Error interrupt */ -#define SSCR1_TTE BIT(30) /* TXD Tristate Enable */ -#define SSCR1_TTELP BIT(31) /* TXD Tristate Enable Last Phase */ +#define SSSR_BCE (1 << 23) /* Bit Count Error */ +#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */ +#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */ +#define SSSR_EOC (1 << 20) /* End Of Chain */ +#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */ +#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */ -#define SSSR_PINT BIT(18) /* Peripheral Trailing Byte Interrupt */ -#define SSSR_TINT BIT(19) /* Receiver Time-out Interrupt */ -#define SSSR_EOC BIT(20) /* End Of Chain */ -#define SSSR_TUR BIT(21) /* Transmit FIFO Under Run */ -#define SSSR_CSS BIT(22) /* Clock Synchronisation Status */ -#define SSSR_BCE BIT(23) /* Bit Count Error */ #define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */ -#define SSPSP_SFRMP BIT(2) /* Serial Frame Polarity */ -#define SSPSP_ETDS BIT(3) /* End of Transfer data State */ +#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */ +#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */ #define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */ #define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */ #define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */ #define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */ #define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */ -#define SSPSP_FSRT BIT(25) /* Frame Sync Relative Timing */ +#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */ /* PXA3xx */ #define SSPSP_EDMYSTRT(x) ((x) << 26) /* Extended Dummy Start */ #define SSPSP_EDMYSTOP(x) ((x) << 28) /* Extended Dummy Stop */ #define SSPSP_TIMING_MASK (0x7f8001f0) -#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ -#define SSACD_ACDS_1 (0) -#define SSACD_ACDS_2 (1) -#define SSACD_ACDS_4 (2) -#define SSACD_ACDS_8 (3) -#define SSACD_ACDS_16 (4) -#define SSACD_ACDS_32 (5) -#define SSACD_SCDB BIT(3) /* SSPSYSCLK Divider Bypass */ -#define SSACD_SCDB_4X (0) -#define SSACD_SCDB_1X (1) +#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */ #define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */ -#define SSACD_SCDX8 BIT(7) /* SYSCLK division ratio select */ - -/* Intel Merrifield SSP */ -#define SFIFOL 0x68 /* FIFO level */ -#define SFIFOTT 0x6c /* FIFO trigger threshold */ - -#define RX_THRESH_MRFLD_DFLT 16 -#define TX_THRESH_MRFLD_DFLT 16 - -#define SFIFOL_TFL_MASK GENMASK(15, 0) /* Transmit FIFO Level mask */ -#define SFIFOL_RFL_MASK GENMASK(31, 16) /* Receive FIFO Level mask */ - -#define SFIFOTT_TFT GENMASK(15, 0) /* Transmit FIFO Threshold (mask) */ -#define SFIFOTT_TxThresh(x) (((x) - 1) << 0) /* TX FIFO trigger threshold / level */ -#define SFIFOTT_RFT GENMASK(31, 16) /* Receive FIFO Threshold (mask) */ -#define SFIFOTT_RxThresh(x) (((x) - 1) << 16) /* RX FIFO trigger threshold / level */ +#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ +#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */ /* LPSS SSP */ #define SSITF 0x44 /* TX FIFO trigger level */ -#define SSITF_TxHiThresh(x) (((x) - 1) << 0) #define SSITF_TxLoThresh(x) (((x) - 1) << 8) +#define SSITF_TxHiThresh(x) ((x) - 1) #define SSIRF 0x48 /* RX FIFO trigger level */ #define SSIRF_RxThresh(x) ((x) - 1) -/* LPT/WPT SSP */ -#define SSCR2 (0x40) /* SSP Command / Status 2 */ -#define SSPSP2 (0x44) /* SSP Programmable Serial Protocol 2 */ - enum pxa_ssp_type { SSP_UNDEFINED = 0, PXA25x_SSP, /* pxa 210, 250, 255, 26x */ @@ -217,22 +188,18 @@ enum pxa_ssp_type { PXA27x_SSP, PXA3xx_SSP, PXA168_SSP, - MMP2_SSP, PXA910_SSP, CE4100_SSP, - MRFLD_SSP, QUARK_X1000_SSP, - /* Keep LPSS types sorted with lpss_platforms[] */ - LPSS_LPT_SSP, + LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ LPSS_BYT_SSP, LPSS_BSW_SSP, LPSS_SPT_SSP, LPSS_BXT_SSP, - LPSS_CNL_SSP, }; struct ssp_device { - struct device *dev; + struct platform_device *pdev; struct list_head node; struct clk *clk; @@ -241,9 +208,11 @@ struct ssp_device { const char *label; int port_id; - enum pxa_ssp_type type; + int type; int use_count; int irq; + int drcmr_rx; + int drcmr_tx; struct device_node *of_node; }; @@ -271,22 +240,6 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg) return __raw_readl(dev->mmio_base + reg); } -static inline void pxa_ssp_enable(struct ssp_device *ssp) -{ - u32 sscr0; - - sscr0 = pxa_ssp_read_reg(ssp, SSCR0) | SSCR0_SSE; - pxa_ssp_write_reg(ssp, SSCR0, sscr0); -} - -static inline void pxa_ssp_disable(struct ssp_device *ssp) -{ - u32 sscr0; - - sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~SSCR0_SSE; - pxa_ssp_write_reg(ssp, SSCR0, sscr0); -} - #if IS_ENABLED(CONFIG_PXA_SSP) struct ssp_device *pxa_ssp_request(int port, const char *label); void pxa_ssp_free(struct ssp_device *); @@ -305,4 +258,4 @@ static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n, static inline void pxa_ssp_free(struct ssp_device *ssp) {} #endif -#endif /* __LINUX_PXA2XX_SSP_H */ +#endif diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 81cad9e1e4..cc32ab852f 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -1,17 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __QCOM_SCM_H #define __QCOM_SCM_H -#include -#include -#include +extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); +extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); -#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) -#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 -#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 #define QCOM_SCM_HDCP_MAX_REQ_CNT 5 struct qcom_scm_hdcp_req { @@ -19,99 +23,27 @@ struct qcom_scm_hdcp_req { u32 val; }; -struct qcom_scm_vmperm { - int vmid; - int perm; -}; - -enum qcom_scm_ocmem_client { - QCOM_SCM_OCMEM_UNUSED_ID = 0x0, - QCOM_SCM_OCMEM_GRAPHICS_ID, - QCOM_SCM_OCMEM_VIDEO_ID, - QCOM_SCM_OCMEM_LP_AUDIO_ID, - QCOM_SCM_OCMEM_SENSORS_ID, - QCOM_SCM_OCMEM_OTHER_OS_ID, - QCOM_SCM_OCMEM_DEBUG_ID, -}; - -enum qcom_scm_sec_dev_id { - QCOM_SCM_MDSS_DEV_ID = 1, - QCOM_SCM_OCMEM_DEV_ID = 5, - QCOM_SCM_PCIE0_DEV_ID = 11, - QCOM_SCM_PCIE1_DEV_ID = 12, - QCOM_SCM_GFX_DEV_ID = 18, - QCOM_SCM_UFS_DEV_ID = 19, - QCOM_SCM_ICE_DEV_ID = 20, -}; - -enum qcom_scm_ice_cipher { - QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0, - QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1, - QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3, - QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4, -}; - -#define QCOM_SCM_VMID_HLOS 0x3 -#define QCOM_SCM_VMID_MSS_MSA 0xF -#define QCOM_SCM_VMID_WLAN 0x18 -#define QCOM_SCM_VMID_WLAN_CE 0x19 -#define QCOM_SCM_PERM_READ 0x4 -#define QCOM_SCM_PERM_WRITE 0x2 -#define QCOM_SCM_PERM_EXEC 0x1 -#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE) -#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC) - extern bool qcom_scm_is_available(void); -extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); -extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); -extern void qcom_scm_cpu_power_down(u32 flags); -extern int qcom_scm_set_remote_state(u32 state, u32 id); - -extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, - size_t size); -extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, - phys_addr_t size); -extern int qcom_scm_pas_auth_and_reset(u32 peripheral); -extern int qcom_scm_pas_shutdown(u32 peripheral); -extern bool qcom_scm_pas_supported(u32 peripheral); - -extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); -extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); - -extern bool qcom_scm_restore_sec_cfg_available(void); -extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); -extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); -extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); -extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, - u32 cp_nonpixel_start, - u32 cp_nonpixel_size); -extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, - unsigned int *src, - const struct qcom_scm_vmperm *newvm, - unsigned int dest_cnt); - -extern bool qcom_scm_ocmem_lock_available(void); -extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, - u32 size, u32 mode); -extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, - u32 size); - -extern bool qcom_scm_ice_available(void); -extern int qcom_scm_ice_invalidate_key(u32 index); -extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, - enum qcom_scm_ice_cipher cipher, - u32 data_unit_size); - extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, - u32 *resp); + u32 *resp); -extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en); +extern bool qcom_scm_pas_supported(u32 peripheral); +extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, + size_t size); +extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, + phys_addr_t size); +extern int qcom_scm_pas_auth_and_reset(u32 peripheral); +extern int qcom_scm_pas_shutdown(u32 peripheral); -extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, - u64 limit_node, u32 node_id, u64 version); -extern int qcom_scm_lmh_profile_change(u32 profile_id); -extern bool qcom_scm_lmh_dcvsh_available(void); +#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 +#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 + +extern void qcom_scm_cpu_power_down(u32 flags); + +#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) + +extern u32 qcom_scm_get_version(void); #endif diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 0a3807e927..734deb0946 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -1,20 +1,18 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ - #ifndef _COMMON_HSI_H #define _COMMON_HSI_H - #include #include #include #include /* dma_addr_t manip */ -#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) -#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) #define DMA_REGPAIR_LE(x, val) do { \ @@ -22,44 +20,38 @@ (x).lo = DMA_LO_LE((val)); \ } while (0) -#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) -#define HILO_64(hi, lo) \ - HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64) -#define HILO_64_REGPAIR(regpair) ({ \ - typeof(regpair) __regpair = (regpair); \ - HILO_64(__regpair.hi, __regpair.lo); }) +#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) +#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) +#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) #define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) #ifndef __COMMON_HSI__ #define __COMMON_HSI__ -/********************************/ -/* PROTOCOL COMMON FW CONSTANTS */ -/********************************/ -#define X_FINAL_CLEANUP_AGG_INT 1 +#define X_FINAL_CLEANUP_AGG_INT 1 -#define EVENT_RING_PAGE_SIZE_BYTES 4096 +#define EVENT_RING_PAGE_SIZE_BYTES 4096 -#define NUM_OF_GLOBAL_QUEUES 128 -#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 +#define NUM_OF_GLOBAL_QUEUES 128 +#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 -#define ISCSI_CDU_TASK_SEG_TYPE 0 -#define FCOE_CDU_TASK_SEG_TYPE 0 -#define RDMA_CDU_TASK_SEG_TYPE 1 +#define ISCSI_CDU_TASK_SEG_TYPE 0 +#define RDMA_CDU_TASK_SEG_TYPE 1 -#define FW_ASSERT_GENERAL_ATTN_IDX 32 +#define FW_ASSERT_GENERAL_ATTN_IDX 32 +#define MAX_PINNED_CCFC 32 /* Queue Zone sizes in bytes */ -#define TSTORM_QZONE_SIZE 8 -#define MSTORM_QZONE_SIZE 16 -#define USTORM_QZONE_SIZE 8 -#define XSTORM_QZONE_SIZE 8 -#define YSTORM_QZONE_SIZE 0 -#define PSTORM_QZONE_SIZE 0 +#define TSTORM_QZONE_SIZE 8 +#define MSTORM_QZONE_SIZE 16 +#define USTORM_QZONE_SIZE 8 +#define XSTORM_QZONE_SIZE 8 +#define YSTORM_QZONE_SIZE 0 +#define PSTORM_QZONE_SIZE 0 -#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 +#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 @@ -78,19 +70,12 @@ #define CORE_SPQE_PAGE_SIZE_BYTES 4096 -/* Number of LL2 RAM based queues */ -#define MAX_NUM_LL2_RX_RAM_QUEUES 32 - -/* Number of LL2 context based queues */ -#define MAX_NUM_LL2_RX_CTX_QUEUES 208 -#define MAX_NUM_LL2_RX_QUEUES \ - (MAX_NUM_LL2_RX_RAM_QUEUES + MAX_NUM_LL2_RX_CTX_QUEUES) - -#define MAX_NUM_LL2_TX_STATS_COUNTERS 48 +#define MAX_NUM_LL2_RX_QUEUES 32 +#define MAX_NUM_LL2_TX_STATS_COUNTERS 32 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 42 -#define FW_REVISION_VERSION 2 +#define FW_MINOR_VERSION 10 +#define FW_REVISION_VERSION 10 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -102,20 +87,20 @@ #define MAX_NUM_PORTS_BB (2) #define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) -#define MAX_NUM_PFS_K2 (16) -#define MAX_NUM_PFS_BB (8) -#define MAX_NUM_PFS (MAX_NUM_PFS_K2) -#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ +#define MAX_NUM_PFS_K2 (16) +#define MAX_NUM_PFS_BB (8) +#define MAX_NUM_PFS (MAX_NUM_PFS_K2) +#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ #define MAX_NUM_VFS_K2 (192) #define MAX_NUM_VFS_BB (120) #define MAX_NUM_VFS (MAX_NUM_VFS_K2) #define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB) +#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS) #define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB) -#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2) -#define MAX_NUM_FUNCTIONS (MAX_FUNCTION_NUMBER_K2) +#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS) #define MAX_NUM_VPORTS_K2 (208) #define MAX_NUM_VPORTS_BB (160) @@ -128,14 +113,29 @@ /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ #define NUM_PHYS_TCS_4PORT_K2 (4) #define NUM_OF_PHYS_TCS (8) -#define PURE_LB_TC NUM_OF_PHYS_TCS + #define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) #define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) +#define LB_TC (NUM_OF_PHYS_TCS) + +/* Num of possible traffic priority values */ +#define NUM_OF_PRIO (8) + +#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) +#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) +#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) +#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) + /* CIDs */ -#define NUM_OF_CONNECTION_TYPES_E4 (8) -#define NUM_OF_LCIDS (320) -#define NUM_OF_LTIDS (320) +#define NUM_OF_CONNECTION_TYPES (8) +#define NUM_OF_LCIDS (320) +#define NUM_OF_LTIDS (320) + +/* Clock values */ +#define MASTER_CLK_FREQ_E4 (375e6) +#define STORM_CLK_FREQ_E4 (1000e6) +#define CLK25M_CLK_FREQ_E4 (25e6) /* Global PXP windows (GTT) */ #define NUM_OF_GTT 19 @@ -144,75 +144,57 @@ #define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS) /* Tools Version */ -#define TOOLS_VERSION 10 +#define TOOLS_VERSION 10 /*****************/ /* CDU CONSTANTS */ /*****************/ -#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) -#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) - -#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) -#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) - -#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) -#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) -#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2) -#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3) -#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4) -#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5) +#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) +#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) /*****************/ /* DQ CONSTANTS */ /*****************/ /* DEMS */ #define DQ_DEMS_LEGACY 0 -#define DQ_DEMS_TOE_MORE_TO_SEND 3 -#define DQ_DEMS_TOE_LOCAL_ADV_WND 4 -#define DQ_DEMS_ROCE_CQ_CONS 7 -/* XCM agg val selection (HW) */ -#define DQ_XCM_AGG_VAL_SEL_WORD2 0 -#define DQ_XCM_AGG_VAL_SEL_WORD3 1 -#define DQ_XCM_AGG_VAL_SEL_WORD4 2 -#define DQ_XCM_AGG_VAL_SEL_WORD5 3 -#define DQ_XCM_AGG_VAL_SEL_REG3 4 -#define DQ_XCM_AGG_VAL_SEL_REG4 5 -#define DQ_XCM_AGG_VAL_SEL_REG5 6 -#define DQ_XCM_AGG_VAL_SEL_REG6 7 +/* XCM agg val selection */ +#define DQ_XCM_AGG_VAL_SEL_WORD2 0 +#define DQ_XCM_AGG_VAL_SEL_WORD3 1 +#define DQ_XCM_AGG_VAL_SEL_WORD4 2 +#define DQ_XCM_AGG_VAL_SEL_WORD5 3 +#define DQ_XCM_AGG_VAL_SEL_REG3 4 +#define DQ_XCM_AGG_VAL_SEL_REG4 5 +#define DQ_XCM_AGG_VAL_SEL_REG5 6 +#define DQ_XCM_AGG_VAL_SEL_REG6 7 -/* XCM agg val selection (FW) */ -#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 -#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 -#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 -#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 -#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 -#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 -#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 -#define DQ_XCM_ROCE_ACK_EDPM_DORQ_SEQ_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +/* XCM agg val selection */ +#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 +#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 /* UCM agg val selection (HW) */ #define DQ_UCM_AGG_VAL_SEL_WORD0 0 #define DQ_UCM_AGG_VAL_SEL_WORD1 1 #define DQ_UCM_AGG_VAL_SEL_WORD2 2 #define DQ_UCM_AGG_VAL_SEL_WORD3 3 -#define DQ_UCM_AGG_VAL_SEL_REG0 4 -#define DQ_UCM_AGG_VAL_SEL_REG1 5 -#define DQ_UCM_AGG_VAL_SEL_REG2 6 -#define DQ_UCM_AGG_VAL_SEL_REG3 7 +#define DQ_UCM_AGG_VAL_SEL_REG0 4 +#define DQ_UCM_AGG_VAL_SEL_REG1 5 +#define DQ_UCM_AGG_VAL_SEL_REG2 6 +#define DQ_UCM_AGG_VAL_SEL_REG3 7 /* UCM agg val selection (FW) */ #define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 @@ -236,7 +218,7 @@ #define DQ_TCM_ROCE_RQ_PROD_CMD \ DQ_TCM_AGG_VAL_SEL_WORD0 -/* XCM agg counter flag selection (HW) */ +/* XCM agg counter flag selection */ #define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 #define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 #define DQ_XCM_AGG_FLG_SHIFT_CF12 2 @@ -246,20 +228,17 @@ #define DQ_XCM_AGG_FLG_SHIFT_CF22 6 #define DQ_XCM_AGG_FLG_SHIFT_CF23 7 -/* XCM agg counter flag selection (FW) */ -#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) -#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) -#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +/* XCM agg counter flag selection */ +#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) /* UCM agg counter flag selection (HW) */ #define DQ_UCM_AGG_FLG_SHIFT_CF0 0 @@ -276,9 +255,6 @@ #define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) #define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) #define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) -#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF3) -#define DQ_UCM_TOE_SLOW_PATH_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) -#define DQ_UCM_TOE_DQ_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) /* TCM agg counter flag selection (HW) */ #define DQ_TCM_AGG_FLG_SHIFT_CF0 0 @@ -290,28 +266,22 @@ #define DQ_TCM_AGG_FLG_SHIFT_CF6 6 #define DQ_TCM_AGG_FLG_SHIFT_CF7 7 /* TCM agg counter flag selection (FW) */ -#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) -#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) -#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) -#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) -#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) -#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) /* PWM address mapping */ -#define DQ_PWM_OFFSET_DPM_BASE 0x0 -#define DQ_PWM_OFFSET_DPM_END 0x27 +#define DQ_PWM_OFFSET_DPM_BASE 0x0 +#define DQ_PWM_OFFSET_DPM_END 0x27 #define DQ_PWM_OFFSET_XCM16_BASE 0x40 #define DQ_PWM_OFFSET_XCM32_BASE 0x44 #define DQ_PWM_OFFSET_UCM16_BASE 0x48 #define DQ_PWM_OFFSET_UCM32_BASE 0x4C -#define DQ_PWM_OFFSET_UCM16_4 0x50 +#define DQ_PWM_OFFSET_UCM16_4 0x50 #define DQ_PWM_OFFSET_TCM16_BASE 0x58 #define DQ_PWM_OFFSET_TCM32_BASE 0x5C -#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 -#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 -#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B +#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 +#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 +#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B #define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2) #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE) @@ -320,15 +290,10 @@ #define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) - -/* DQ_DEMS_AGG_VAL_BASE */ -#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \ - (DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4) - -#define DQ_REGION_SHIFT (12) +#define DQ_REGION_SHIFT (12) /* DPM */ -#define DQ_DPM_WQE_BUFF_SIZE (320) +#define DQ_DPM_WQE_BUFF_SIZE (320) /* Conn type ranges */ #define DQ_CONN_TYPE_RANGE_SHIFT (4) @@ -337,30 +302,29 @@ /* QM CONSTANTS */ /*****************/ -/* Number of TX queues in the QM */ +/* number of TX queues in the QM */ #define MAX_QM_TX_QUEUES_K2 512 #define MAX_QM_TX_QUEUES_BB 448 #define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 -/* Number of Other queues in the QM */ +/* number of Other queues in the QM */ #define MAX_QM_OTHER_QUEUES_BB 64 #define MAX_QM_OTHER_QUEUES_K2 128 #define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 -/* Number of queues in a PF queue group */ +/* number of queues in a PF queue group */ #define QM_PF_QUEUE_GROUP_SIZE 8 -/* The size of a single queue element in bytes */ -#define QM_PQ_ELEMENT_SIZE 4 +/* the size of a single queue element in bytes */ +#define QM_PQ_ELEMENT_SIZE 4 -/* Base number of Tx PQs in the CM PQ representation. - * Should be used when storing PQ IDs in CM PQ registers and context. +/* base number of Tx PQs in the CM PQ representation. + * should be used when storing PQ IDs in CM PQ registers and context */ -#define CM_TX_PQ_BASE 0x200 +#define CM_TX_PQ_BASE 0x200 -/* Number of global Vport/QCN rate limiters */ +/* number of global Vport/QCN rate limiters */ #define MAX_QM_GLOBAL_RLS 256 - /* QM registers data */ #define QM_LINE_CRD_REG_WIDTH 16 #define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1)) @@ -379,8 +343,7 @@ #define CAU_FSM_ETH_TX 1 /* Number of Protocol Indices per Status Block */ -#define PIS_PER_SB_E4 12 -#define MAX_PIS_PER_SB PIS_PER_SB +#define PIS_PER_SB 12 #define CAU_HC_STOPPED_STATE 3 #define CAU_HC_DISABLE_STATE 4 @@ -411,6 +374,9 @@ #define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff #define IGU_CMD_INT_ACK_BASE 0x0400 +#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ + MAX_TOT_SB_PER_PATH - \ + 1) #define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff #define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 @@ -423,6 +389,9 @@ #define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6 #define IGU_CMD_PROD_UPD_BASE 0x0600 +#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ + MAX_TOT_SB_PER_PATH - \ + 1) #define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff /*****************/ @@ -441,6 +410,7 @@ #define PXP_BAR_DQ 1 /* PTT and GTT */ +#define PXP_NUM_PF_WINDOWS 12 #define PXP_PER_PF_ENTRY_SIZE 8 #define PXP_NUM_GLOBAL_WINDOWS 243 #define PXP_GLOBAL_ENTRY_SIZE 4 @@ -465,7 +435,6 @@ #define PXP_PF_ME_OPAQUE_ADDR 0x1f8 #define PXP_PF_ME_CONCRETE_ADDR 0x1fc -#define PXP_NUM_PF_WINDOWS 12 #define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 #define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS #define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 @@ -488,161 +457,165 @@ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) /* PF BAR */ -#define PXP_BAR0_START_GRC 0x0000 -#define PXP_BAR0_GRC_LENGTH 0x1C00000 -#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ - PXP_BAR0_GRC_LENGTH - 1) +#define PXP_BAR0_START_GRC 0x0000 +#define PXP_BAR0_GRC_LENGTH 0x1C00000 +#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ + PXP_BAR0_GRC_LENGTH - 1) -#define PXP_BAR0_START_IGU 0x1C00000 -#define PXP_BAR0_IGU_LENGTH 0x10000 -#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ - PXP_BAR0_IGU_LENGTH - 1) +#define PXP_BAR0_START_IGU 0x1C00000 +#define PXP_BAR0_IGU_LENGTH 0x10000 +#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ + PXP_BAR0_IGU_LENGTH - 1) -#define PXP_BAR0_START_TSDM 0x1C80000 -#define PXP_BAR0_SDM_LENGTH 0x40000 +#define PXP_BAR0_START_TSDM 0x1C80000 +#define PXP_BAR0_SDM_LENGTH 0x40000 #define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000 -#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_MSDM 0x1D00000 -#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_MSDM 0x1D00000 +#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_USDM 0x1D80000 -#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_USDM 0x1D80000 +#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_XSDM 0x1E00000 -#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_XSDM 0x1E00000 +#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_YSDM 0x1E80000 -#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_YSDM 0x1E80000 +#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_PSDM 0x1F00000 -#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_PSDM 0x1F00000 +#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ + PXP_BAR0_SDM_LENGTH - 1) #define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1) /* VF BAR */ -#define PXP_VF_BAR0 0 +#define PXP_VF_BAR0 0 -#define PXP_VF_BAR0_START_IGU 0 -#define PXP_VF_BAR0_IGU_LENGTH 0x3000 -#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ - PXP_VF_BAR0_IGU_LENGTH - 1) +#define PXP_VF_BAR0_START_GRC 0x3E00 +#define PXP_VF_BAR0_GRC_LENGTH 0x200 +#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ + PXP_VF_BAR0_GRC_LENGTH - 1) -#define PXP_VF_BAR0_START_DQ 0x3000 -#define PXP_VF_BAR0_DQ_LENGTH 0x200 -#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 -#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ - PXP_VF_BAR0_DQ_OPAQUE_OFFSET) -#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ - + 4) -#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ - PXP_VF_BAR0_DQ_LENGTH - 1) +#define PXP_VF_BAR0_START_IGU 0 +#define PXP_VF_BAR0_IGU_LENGTH 0x3000 +#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ + PXP_VF_BAR0_IGU_LENGTH - 1) -#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 -#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 -#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) +#define PXP_VF_BAR0_START_DQ 0x3000 +#define PXP_VF_BAR0_DQ_LENGTH 0x200 +#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 +#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET) +#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ + + 4) +#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_LENGTH - 1) -#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 -#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) +#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 +#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 +#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) -#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 -#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) +#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 +#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) -#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 -#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) +#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 +#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) -#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 -#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) +#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 +#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) -#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 -#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) +#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 +#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) -#define PXP_VF_BAR0_START_GRC 0x3E00 -#define PXP_VF_BAR0_GRC_LENGTH 0x200 -#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ - PXP_VF_BAR0_GRC_LENGTH - 1) +#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 +#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \ + + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ + - 1) -#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 -#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 +#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 +#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 -#define PXP_VF_BAR0_START_IGU2 0x10000 -#define PXP_VF_BAR0_IGU2_LENGTH 0xD000 -#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \ - PXP_VF_BAR0_IGU2_LENGTH - 1) +#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 -#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 - -#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 -#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 /* ILT Records */ #define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_K2 11000 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) - -/* Host Interface */ -#define PXP_QUEUES_ZONE_MAX_NUM 320 - +#define PXP_QUEUES_ZONE_MAX_NUM 320 /*****************/ /* PRM CONSTANTS */ /*****************/ #define PRM_DMA_PAD_BYTES_NUM 2 - -/*****************/ -/* SDMs CONSTANTS */ -/*****************/ - -#define SDM_OP_GEN_TRIG_NONE 0 +/******************/ +/* SDMs CONSTANTS */ +/******************/ +#define SDM_OP_GEN_TRIG_NONE 0 #define SDM_OP_GEN_TRIG_WAKE_THREAD 1 -#define SDM_OP_GEN_TRIG_AGG_INT 2 -#define SDM_OP_GEN_TRIG_LOADER 4 -#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 -#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 +#define SDM_OP_GEN_TRIG_AGG_INT 2 +#define SDM_OP_GEN_TRIG_LOADER 4 +#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 +#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7 -/********************/ -/* Completion types */ -/********************/ +#define SDM_COMP_TYPE_NONE 0 +#define SDM_COMP_TYPE_WAKE_THREAD 1 +#define SDM_COMP_TYPE_AGG_INT 2 +#define SDM_COMP_TYPE_CM 3 +#define SDM_COMP_TYPE_LOADER 4 +#define SDM_COMP_TYPE_PXP 5 +#define SDM_COMP_TYPE_INDICATE_ERROR 6 +#define SDM_COMP_TYPE_RELEASE_THREAD 7 +#define SDM_COMP_TYPE_RAM 8 -#define SDM_COMP_TYPE_NONE 0 -#define SDM_COMP_TYPE_WAKE_THREAD 1 -#define SDM_COMP_TYPE_AGG_INT 2 -#define SDM_COMP_TYPE_CM 3 -#define SDM_COMP_TYPE_LOADER 4 -#define SDM_COMP_TYPE_PXP 5 -#define SDM_COMP_TYPE_INDICATE_ERROR 6 -#define SDM_COMP_TYPE_RELEASE_THREAD 7 -#define SDM_COMP_TYPE_RAM 8 -#define SDM_COMP_TYPE_INC_ORDER_CNT 9 - -/*****************/ -/* PBF CONSTANTS */ -/*****************/ +/******************/ +/* PBF CONSTANTS */ +/******************/ /* Number of PBF command queue lines. Each line is 32B. */ -#define PBF_MAX_CMD_LINES 3328 +#define PBF_MAX_CMD_LINES 3328 /* Number of BTB blocks. Each block is 256B. */ -#define BTB_MAX_BLOCKS_BB 1440 -#define BTB_MAX_BLOCKS_K2 1840 +#define BTB_MAX_BLOCKS 1440 + /*****************/ /* PRS CONSTANTS */ /*****************/ #define PRS_GFT_CAM_LINES_NO_MATCH 31 -/* Interrupt coalescing TimeSet */ +/* Async data KCQ CQE */ +struct async_data { + __le32 cid; + __le16 itid; + u8 error_code; + u8 fw_debug_param; +}; + struct coalescing_timeset { u8 value; #define COALESCING_TIMESET_TIMESET_MASK 0x7F @@ -656,32 +629,23 @@ struct common_queue_zone { __le16 reserved; }; -/* ETH Rx producers data */ struct eth_rx_prod_data { __le16 bd_prod; __le16 cqe_prod; }; -struct tcp_ulp_connect_done_params { - __le16 mss; - u8 snd_wnd_scale; - u8 flags; -#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1 -#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0 -#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F -#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1 +struct regpair { + __le32 lo; + __le32 hi; }; -struct iscsi_connect_done_results { - __le16 icid; - __le16 conn_id; - struct tcp_ulp_connect_done_params params; +struct vf_pf_channel_eqe_data { + struct regpair msg_addr; }; struct iscsi_eqe_data { - __le16 icid; + __le32 cid; __le16 conn_id; - __le16 reserved; u8 error_code; u8 error_pdu_opcode_reserved; #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F @@ -692,6 +656,42 @@ struct iscsi_eqe_data { #define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 }; +struct malicious_vf_eqe_data { + u8 vf_id; + u8 err_id; + __le16 reserved[3]; +}; + +struct initial_cleanup_eqe_data { + u8 vf_id; + u8 reserved[7]; +}; + +/* Event Data Union */ +union event_ring_data { + u8 bytes[8]; + struct vf_pf_channel_eqe_data vf_pf_channel; + struct iscsi_eqe_data iscsi_info; + struct malicious_vf_eqe_data malicious_vf; + struct initial_cleanup_eqe_data vf_init_cleanup; + struct regpair roce_handle; +}; + +/* Event Ring Entry */ +struct event_ring_entry { + u8 protocol_id; + u8 opcode; + __le16 reserved0; + __le16 echo; + u8 fw_return_code; + u8 flags; +#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 +#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 +#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F +#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 + union event_ring_data data; +}; + /* Multi function mode */ enum mf_mode { ERROR_MODE /* Unsupported mode */, @@ -702,43 +702,19 @@ enum mf_mode { /* Per-protocol connection types */ enum protocol_type { - PROTOCOLID_TCP_ULP, - PROTOCOLID_FCOE, + PROTOCOLID_ISCSI, + PROTOCOLID_RESERVED2, PROTOCOLID_ROCE, PROTOCOLID_CORE, PROTOCOLID_ETH, - PROTOCOLID_IWARP, - PROTOCOLID_RESERVED0, + PROTOCOLID_RESERVED4, + PROTOCOLID_RESERVED5, PROTOCOLID_PREROCE, PROTOCOLID_COMMON, - PROTOCOLID_RESERVED1, - PROTOCOLID_RDMA, - PROTOCOLID_SCSI, + PROTOCOLID_RESERVED6, MAX_PROTOCOL_TYPE }; -struct regpair { - __le32 lo; - __le32 hi; -}; - -/* RoCE Destroy Event Data */ -struct rdma_eqe_destroy_qp { - __le32 cid; - u8 reserved[4]; -}; - -/* RDMA Event Data Union */ -union rdma_eqe_data { - struct regpair async_handle; - struct rdma_eqe_destroy_qp rdma_destroy_qp_data; -}; - -struct tstorm_queue_zone { - __le32 reserved[2]; -}; - -/* Ustorm Queue Zone */ struct ustorm_eth_queue_zone { struct coalescing_timeset int_coalescing_timeset; u8 reserved[3]; @@ -749,71 +725,62 @@ struct ustorm_queue_zone { struct common_queue_zone common; }; -/* Status block structure */ +/* status block structure */ struct cau_pi_entry { - __le32 prod; -#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF -#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 -#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F -#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 -#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 -#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 -#define CAU_PI_ENTRY_RESERVED_MASK 0xFF -#define CAU_PI_ENTRY_RESERVED_SHIFT 24 + u32 prod; +#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF +#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 +#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F +#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 +#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 +#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 +#define CAU_PI_ENTRY_RESERVED_MASK 0xFF +#define CAU_PI_ENTRY_RESERVED_SHIFT 24 }; -/* Status block structure */ +/* status block structure */ struct cau_sb_entry { - __le32 data; -#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF -#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 -#define CAU_SB_ENTRY_STATE0_MASK 0xF -#define CAU_SB_ENTRY_STATE0_SHIFT 24 -#define CAU_SB_ENTRY_STATE1_MASK 0xF -#define CAU_SB_ENTRY_STATE1_SHIFT 28 - __le32 params; -#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F -#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 -#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F -#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 -#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 -#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 -#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 -#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 -#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF -#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 -#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 -#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 -#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF -#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 -#define CAU_SB_ENTRY_TPH_MASK 0x1 -#define CAU_SB_ENTRY_TPH_SHIFT 31 + u32 data; +#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF +#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 +#define CAU_SB_ENTRY_STATE0_MASK 0xF +#define CAU_SB_ENTRY_STATE0_SHIFT 24 +#define CAU_SB_ENTRY_STATE1_MASK 0xF +#define CAU_SB_ENTRY_STATE1_SHIFT 28 + u32 params; +#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 +#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 +#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 +#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 +#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF +#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 +#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 +#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 +#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF +#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 +#define CAU_SB_ENTRY_TPH_MASK 0x1 +#define CAU_SB_ENTRY_TPH_SHIFT 31 }; -/* Igu cleanup bit values to distinguish between clean or producer consumer - * update. - */ -enum command_type_bit { - IGU_COMMAND_TYPE_NOP = 0, - IGU_COMMAND_TYPE_SET = 1, - MAX_COMMAND_TYPE_BIT -}; - -/* Core doorbell data */ +/* core doorbell data */ struct core_db_data { u8 params; -#define CORE_DB_DATA_DEST_MASK 0x3 -#define CORE_DB_DATA_DEST_SHIFT 0 -#define CORE_DB_DATA_AGG_CMD_MASK 0x3 -#define CORE_DB_DATA_AGG_CMD_SHIFT 2 -#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 -#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 -#define CORE_DB_DATA_RESERVED_MASK 0x1 -#define CORE_DB_DATA_RESERVED_SHIFT 5 -#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 - u8 agg_flags; - __le16 spq_prod; +#define CORE_DB_DATA_DEST_MASK 0x3 +#define CORE_DB_DATA_DEST_SHIFT 0 +#define CORE_DB_DATA_AGG_CMD_MASK 0x3 +#define CORE_DB_DATA_AGG_CMD_SHIFT 2 +#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 +#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 +#define CORE_DB_DATA_RESERVED_MASK 0x1 +#define CORE_DB_DATA_RESERVED_SHIFT 5 +#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 spq_prod; }; /* Enum of doorbell aggregative command selection */ @@ -837,7 +804,7 @@ enum db_dest { /* Enum of doorbell DPM types */ enum db_dpm_type { DPM_LEGACY, - DPM_RDMA, + DPM_ROCE, DPM_L2_INLINE, DPM_L2_BD, MAX_DB_DPM_TYPE @@ -860,8 +827,8 @@ struct db_l2_dpm_data { #define DB_L2_DPM_DATA_RESERVED0_SHIFT 27 #define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7 #define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28 -#define DB_L2_DPM_DATA_TGFS_SRC_EN_MASK 0x1 -#define DB_L2_DPM_DATA_TGFS_SRC_EN_SHIFT 31 +#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1 +#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31 }; /* Structure for SGE in a DPM doorbell of type DPM_L2_BD */ @@ -869,73 +836,69 @@ struct db_l2_dpm_sge { struct regpair addr; __le16 nbytes; __le16 bitfields; -#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF -#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 -#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 -#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 -#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 -#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 -#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF -#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 +#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF +#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 +#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 +#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 +#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 +#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 +#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF +#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 __le32 reserved2; }; /* Structure for doorbell address, in legacy mode */ struct db_legacy_addr { __le32 addr; -#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 -#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 -#define DB_LEGACY_ADDR_DEMS_MASK 0x7 -#define DB_LEGACY_ADDR_DEMS_SHIFT 2 -#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF -#define DB_LEGACY_ADDR_ICID_SHIFT 5 +#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 +#define DB_LEGACY_ADDR_DEMS_MASK 0x7 +#define DB_LEGACY_ADDR_DEMS_SHIFT 2 +#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF +#define DB_LEGACY_ADDR_ICID_SHIFT 5 }; /* Structure for doorbell address, in PWM mode */ struct db_pwm_addr { __le32 addr; #define DB_PWM_ADDR_RESERVED0_MASK 0x7 -#define DB_PWM_ADDR_RESERVED0_SHIFT 0 -#define DB_PWM_ADDR_OFFSET_MASK 0x7F +#define DB_PWM_ADDR_RESERVED0_SHIFT 0 +#define DB_PWM_ADDR_OFFSET_MASK 0x7F #define DB_PWM_ADDR_OFFSET_SHIFT 3 -#define DB_PWM_ADDR_WID_MASK 0x3 -#define DB_PWM_ADDR_WID_SHIFT 10 -#define DB_PWM_ADDR_DPI_MASK 0xFFFF -#define DB_PWM_ADDR_DPI_SHIFT 12 +#define DB_PWM_ADDR_WID_MASK 0x3 +#define DB_PWM_ADDR_WID_SHIFT 10 +#define DB_PWM_ADDR_DPI_MASK 0xFFFF +#define DB_PWM_ADDR_DPI_SHIFT 12 #define DB_PWM_ADDR_RESERVED1_MASK 0xF -#define DB_PWM_ADDR_RESERVED1_SHIFT 28 +#define DB_PWM_ADDR_RESERVED1_SHIFT 28 }; -/* Parameters to RDMA firmware, passed in EDPM doorbell */ -struct db_rdma_dpm_params { +/* Parameters to RoCE firmware, passed in EDPM doorbell */ +struct db_roce_dpm_params { __le32 params; -#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F -#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 -#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 -#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 -#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF -#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 -#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF -#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 -#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 -#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28 -#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30 -#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 +#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0 +#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6 +#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8 +#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF +#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16 +#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1 +#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27 +#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3 +#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30 }; -/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a - * DPM burst. - */ -struct db_rdma_dpm_data { +/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ +struct db_roce_dpm_data { __le16 icid; __le16 prod_val; - struct db_rdma_dpm_params params; + struct db_roce_dpm_params params; }; /* Igu interrupt command */ @@ -949,22 +912,22 @@ enum igu_int_cmd { /* IGU producer or consumer update command */ struct igu_prod_cons_update { - __le32 sb_id_and_flags; -#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF -#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 -#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 -#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 -#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 -#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 -#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 -#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 -#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 -#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 - __le32 reserved1; + u32 sb_id_and_flags; +#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF +#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 +#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 + u32 reserved1; }; /* Igu segments access for default status block only */ @@ -974,406 +937,333 @@ enum igu_seg_access { MAX_IGU_SEG_ACCESS }; -/* Enumeration for L3 type field of parsing_and_err_flags. - * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6 - * (This field can be filled according to the last-ethertype) - */ -enum l3_type { - e_l3_type_unknown, - e_l3_type_ipv4, - e_l3_type_ipv6, - MAX_L3_TYPE -}; - -/* Enumeration for l4Protocol field of parsing_and_err_flags. - * L4-protocol: 0 - none, 1 - TCP, 2 - UDP. - * If the packet is IPv4 fragment, and its not the first fragment, the - * protocol-type should be set to none. - */ -enum l4_protocol { - e_l4_protocol_none, - e_l4_protocol_tcp, - e_l4_protocol_udp, - MAX_L4_PROTOCOL -}; - -/* Parsing and error flags field */ struct parsing_and_err_flags { __le16 flags; -#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 -#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 -#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 -#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 -#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 -#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 -#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 -#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 -#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 -#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 -#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 -#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 -#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 -#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 +#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 }; -/* Parsing error flags bitmap */ -struct parsing_err_flags { - __le16 flags; -#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 -#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0 -#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1 -#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1 -#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1 -#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2 -#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1 -#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3 -#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1 -#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4 -#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1 -#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5 -#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1 -#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6 -#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1 -#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7 -#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1 -#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8 -#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1 -#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9 -#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1 -#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10 -#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1 -#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11 -#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1 -#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12 -#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1 -#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13 -#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1 -#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14 -#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1 -#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 -}; - -/* Pb context */ struct pb_context { __le32 crc[4]; }; -/* Concrete Function ID */ struct pxp_concrete_fid { __le16 fid; -#define PXP_CONCRETE_FID_PFID_MASK 0xF -#define PXP_CONCRETE_FID_PFID_SHIFT 0 -#define PXP_CONCRETE_FID_PORT_MASK 0x3 -#define PXP_CONCRETE_FID_PORT_SHIFT 4 -#define PXP_CONCRETE_FID_PATH_MASK 0x1 -#define PXP_CONCRETE_FID_PATH_SHIFT 6 -#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 -#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 -#define PXP_CONCRETE_FID_VFID_MASK 0xFF -#define PXP_CONCRETE_FID_VFID_SHIFT 8 +#define PXP_CONCRETE_FID_PFID_MASK 0xF +#define PXP_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_CONCRETE_FID_PORT_MASK 0x3 +#define PXP_CONCRETE_FID_PORT_SHIFT 4 +#define PXP_CONCRETE_FID_PATH_MASK 0x1 +#define PXP_CONCRETE_FID_PATH_SHIFT 6 +#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_CONCRETE_FID_VFID_SHIFT 8 }; -/* Concrete Function ID */ struct pxp_pretend_concrete_fid { __le16 fid; -#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF -#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 -#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 -#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 -#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 -#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 -#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF -#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 +#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF +#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 }; -/* Function ID */ union pxp_pretend_fid { struct pxp_pretend_concrete_fid concrete_fid; - __le16 opaque_fid; + __le16 opaque_fid; }; -/* Pxp Pretend Command Register */ +/* Pxp Pretend Command Register. */ struct pxp_pretend_cmd { - union pxp_pretend_fid fid; - __le16 control; -#define PXP_PRETEND_CMD_PATH_MASK 0x1 -#define PXP_PRETEND_CMD_PATH_SHIFT 0 -#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 -#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 -#define PXP_PRETEND_CMD_PORT_MASK 0x3 -#define PXP_PRETEND_CMD_PORT_SHIFT 2 -#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF -#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 -#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF -#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 -#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 -#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 -#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 -#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 -#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 + union pxp_pretend_fid fid; + __le16 control; +#define PXP_PRETEND_CMD_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PATH_SHIFT 0 +#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 +#define PXP_PRETEND_CMD_PORT_MASK 0x3 +#define PXP_PRETEND_CMD_PORT_SHIFT 2 +#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 +#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 +#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 +#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 +#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 +#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 }; -/* PTT Record in PXP Admin Window */ +/* PTT Record in PXP Admin Window. */ struct pxp_ptt_entry { - __le32 offset; -#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF -#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 -#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF -#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 - struct pxp_pretend_cmd pretend; + __le32 offset; +#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF +#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 +#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF +#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 + struct pxp_pretend_cmd pretend; }; -/* VF Zone A Permission Register */ +/* VF Zone A Permission Register. */ struct pxp_vf_zone_a_permission { __le32 control; -#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF -#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 -#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 -#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 -#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F -#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 -#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF -#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 +#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF +#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 +#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 +#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 }; -/* Rdif context */ +/* RSS hash type */ struct rdif_task_context { __le32 initial_ref_tag; __le16 app_tag_value; __le16 app_tag_mask; u8 flags0; -#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 -#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 -#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 -#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 -#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 -#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 -#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 -#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 -#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 -#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 -#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 -#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 -#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7 +#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 +#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 +#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 +#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 +#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 +#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 +#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 +#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 +#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 +#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 +#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7 u8 partial_dif_data[7]; __le16 partial_crc_value; __le16 partial_checksum_value; __le32 offset_in_io; __le16 flags1; -#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 -#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 -#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 -#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 -#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 -#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 -#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 -#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 -#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 -#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 -#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 -#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 -#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 -#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 -#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 -#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 -#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 -#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 +#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 +#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 +#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 +#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 +#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 +#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 +#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 +#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 +#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 +#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 +#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 +#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 +#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 +#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 +#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14 +#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15 __le16 state; -#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF -#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0 -#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF -#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4 -#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1 -#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8 -#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1 -#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9 -#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF -#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10 -#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 -#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 +#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0 +#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4 +#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1 +#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8 +#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 +#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 +#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF +#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10 +#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 +#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 __le32 reserved2; }; -/* Status block structure */ -struct status_block_e4 { - __le16 pi_array[PIS_PER_SB_E4]; - __le32 sb_num; -#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF -#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0 -#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F -#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9 -#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF -#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16 - __le32 prod_index; -#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF -#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0 -#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF -#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24 +/* RSS hash type */ +enum rss_hash_type { + RSS_HASH_TYPE_DEFAULT = 0, + RSS_HASH_TYPE_IPV4 = 1, + RSS_HASH_TYPE_TCP_IPV4 = 2, + RSS_HASH_TYPE_IPV6 = 3, + RSS_HASH_TYPE_TCP_IPV6 = 4, + RSS_HASH_TYPE_UDP_IPV4 = 5, + RSS_HASH_TYPE_UDP_IPV6 = 6, + MAX_RSS_HASH_TYPE +}; + +/* status block structure */ +struct status_block { + __le16 pi_array[PIS_PER_SB]; + __le32 sb_num; +#define STATUS_BLOCK_SB_NUM_MASK 0x1FF +#define STATUS_BLOCK_SB_NUM_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F +#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 +#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF +#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 + __le32 prod_index; +#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF +#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF +#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 }; -/* Tdif context */ struct tdif_task_context { __le32 initial_ref_tag; __le16 app_tag_value; __le16 app_tag_mask; - __le16 partial_crc_value_b; - __le16 partial_checksum_value_b; + __le16 partial_crc_valueB; + __le16 partial_checksum_valueB; __le16 stateB; -#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF -#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0 -#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF -#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4 -#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1 -#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8 -#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1 -#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9 -#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F -#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0 +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4 +#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 +#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F +#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 u8 reserved1; u8 flags0; -#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 -#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 -#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 -#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 -#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 -#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 -#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 -#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 -#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 -#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 -#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 -#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 +#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 +#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 +#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 +#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 +#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 +#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 +#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 +#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 +#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 +#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 __le32 flags1; -#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 -#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 -#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 -#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 -#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 -#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 -#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 -#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 -#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 -#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 -#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 -#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 -#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 -#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 -#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 -#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF -#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14 -#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF -#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18 -#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1 -#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22 -#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1 -#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23 -#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF -#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24 -#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28 -#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29 -#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 -#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30 -#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 - __le32 offset_in_io_b; +#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 +#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 +#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 +#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 +#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 +#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 +#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 +#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 +#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 +#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 +#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 +#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 +#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 +#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14 +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18 +#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23 +#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF +#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28 +#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29 +#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 +#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30 +#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 + __le32 offset_in_iob; __le16 partial_crc_value_a; - __le16 partial_checksum_value_a; - __le32 offset_in_io_a; + __le16 partial_checksum_valuea_; + __le32 offset_in_ioa; u8 partial_dif_data_a[8]; u8 partial_dif_data_b[8]; }; -/* Timers context */ struct timers_context { __le32 logical_client_0; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 -#define TIMERS_CONTEXT_RESERVED0_MASK 0x1 -#define TIMERS_CONTEXT_RESERVED0_SHIFT 27 -#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 +#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED0_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED0_SHIFT 30 __le32 logical_client_1; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 -#define TIMERS_CONTEXT_RESERVED2_MASK 0x1 -#define TIMERS_CONTEXT_RESERVED2_SHIFT 27 -#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED3_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED3_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 +#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 __le32 logical_client_2; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 -#define TIMERS_CONTEXT_RESERVED4_MASK 0x1 -#define TIMERS_CONTEXT_RESERVED4_SHIFT 27 -#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED5_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED5_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 +#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED2_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED2_SHIFT 30 __le32 host_expiration_fields; -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 -#define TIMERS_CONTEXT_RESERVED6_MASK 0x1 -#define TIMERS_CONTEXT_RESERVED6_SHIFT 27 -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 -#define TIMERS_CONTEXT_RESERVED7_MASK 0x7 -#define TIMERS_CONTEXT_RESERVED7_SHIFT 29 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 +#define TIMERS_CONTEXT_RESERVED3_MASK 0x7 +#define TIMERS_CONTEXT_RESERVED3_SHIFT 29 }; - -/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */ -enum tunnel_next_protocol { - e_unknown = 0, - e_l2 = 1, - e_ipv4 = 2, - e_ipv6 = 3, - MAX_TUNNEL_NEXT_PROTOCOL -}; - #endif /* __COMMON_HSI__ */ #endif diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index cd1207ad4a..1aa0727c41 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -1,7 +1,9 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ #ifndef __ETH_COMMON__ @@ -10,170 +12,146 @@ /********************/ /* ETH FW CONSTANTS */ /********************/ +#define ETH_HSI_VER_MAJOR 3 +#define ETH_HSI_VER_MINOR 10 -#define ETH_HSI_VER_MAJOR 3 -#define ETH_HSI_VER_MINOR 11 +#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 -#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 -/* Maximum number of pinned L2 connections (CIDs) */ -#define ETH_PINNED_CONN_MAX_NUM 32 +#define ETH_CACHE_LINE_SIZE 64 +#define ETH_RX_CQE_GAP 32 +#define ETH_MAX_RAMROD_PER_CON 8 +#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_NUM_NEXT_PAGE_BDS 2 -#define ETH_CACHE_LINE_SIZE 64 -#define ETH_RX_CQE_GAP 32 -#define ETH_MAX_RAMROD_PER_CON 8 -#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 -#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 -#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 -#define ETH_RX_NUM_NEXT_PAGE_BDS 2 +#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 +#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 +#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 +#define ETH_TX_MAX_LSO_HDR_NBD 4 +#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 +#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 +#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 +#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 +#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) +#define ETH_TX_MAX_LSO_HDR_BYTES 510 +#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) +#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 +#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 +#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 +#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF -#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 -#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 - -#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 -#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 -#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 -#define ETH_TX_MAX_LSO_HDR_NBD 4 -#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 -#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 -#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 -#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 -#define ETH_TX_MIN_BDS_PER_PKT_W_VPORT_FORWARDING 4 -#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) -#define ETH_TX_MAX_LSO_HDR_BYTES 510 -#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) -#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 -#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 -#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 -#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF - -#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \ (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2) #define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) -#define ETH_RX_MAX_BUFF_PER_PKT 5 -#define ETH_RX_BD_THRESHOLD 16 +/* Maximum number of buffers, used for RX packet placement */ +#define ETH_RX_MAX_BUFF_PER_PKT 5 -/* Num of MAC/VLAN filters */ -#define ETH_NUM_MAC_FILTERS 512 -#define ETH_NUM_VLAN_FILTERS 512 +/* num of MAC/VLAN filters */ +#define ETH_NUM_MAC_FILTERS 512 +#define ETH_NUM_VLAN_FILTERS 512 -/* Approx. multicast constants */ -#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 -#define ETH_MULTICAST_MAC_BINS 256 -#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) +/* approx. multicast constants */ +#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 +#define ETH_MULTICAST_MAC_BINS 256 +#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) -/* Ethernet vport update constants */ -#define ETH_FILTER_RULES_COUNT 10 -#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 -#define ETH_RSS_KEY_SIZE_REGS 10 -#define ETH_RSS_ENGINE_NUM_K2 207 -#define ETH_RSS_ENGINE_NUM_BB 127 +/* ethernet vport update constants */ +#define ETH_FILTER_RULES_COUNT 10 +#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 +#define ETH_RSS_KEY_SIZE_REGS 10 +#define ETH_RSS_ENGINE_NUM_K2 207 +#define ETH_RSS_ENGINE_NUM_BB 127 /* TPA constants */ -#define ETH_TPA_MAX_AGGS_NUM 64 -#define ETH_TPA_CQE_START_BW_LEN_LIST_SIZE 2 -#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 -#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 +#define ETH_TPA_MAX_AGGS_NUM 64 +#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT +#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 +#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 /* Control frame check constants */ -#define ETH_CTL_FRAME_ETH_TYPE_NUM 4 - -/* GFS constants */ -#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */ - -/* Destination port mode */ -enum dst_port_mode { - DST_PORT_PHY, - DST_PORT_LOOPBACK, - DST_PORT_PHY_LOOPBACK, - DST_PORT_DROP, - MAX_DST_PORT_MODE -}; - -/* Ethernet address type */ -enum eth_addr_type { - BROADCAST_ADDRESS, - MULTICAST_ADDRESS, - UNICAST_ADDRESS, - UNKNOWN_ADDRESS, - MAX_ETH_ADDR_TYPE -}; +#define ETH_CTL_FRAME_ETH_TYPE_NUM 4 struct eth_tx_1st_bd_flags { u8 bitfields; -#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 -#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 -#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 -#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 -#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 -#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 -#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 -#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 +#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 }; -/* The parsing information data fo rthe first tx bd of a given packet */ +/* The parsing information data fo rthe first tx bd of a given packet. */ struct eth_tx_data_1st_bd { __le16 vlan; u8 nbds; struct eth_tx_1st_bd_flags bd_flags; __le16 bitfields; -#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 -#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 -#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 -#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 -#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF -#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 +#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 }; -/* The parsing information data for the second tx bd of a given packet */ +/* The parsing information data for the second tx bd of a given packet. */ struct eth_tx_data_2nd_bd { __le16 tunn_ip_size; __le16 bitfields1; -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 -#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_SHIFT 6 -#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 -#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 -#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 -#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 -#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 __le16 bitfields2; -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 -#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 }; -/* Firmware data for L2-EDPM packet */ +/* Firmware data for L2-EDPM packet. */ struct eth_edpm_fw_data { struct eth_tx_data_1st_bd data_1st_bd; struct eth_tx_data_2nd_bd data_2nd_bd; __le32 reserved; }; -/* Tunneling parsing flags */ +struct eth_fast_path_cqe_fw_debug { + __le16 reserved2; +}; + +/* tunneling parsing flags */ struct eth_tunnel_parsing_flags { u8 flags; #define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 @@ -193,24 +171,24 @@ struct eth_tunnel_parsing_flags { /* PMD flow control bits */ struct eth_pmd_flow_flags { u8 flags; -#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 -#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 -#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 -#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 -#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F -#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 +#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 +#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F +#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 }; -/* Regular ETH Rx FP CQE */ +/* Regular ETH Rx FP CQE. */ struct eth_fast_path_rx_reg_cqe { u8 type; u8 bitfields; -#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 -#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 -#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF -#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 -#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 -#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 __le16 pkt_len; struct parsing_and_err_flags pars_flags; __le16 vlan_tag; @@ -219,14 +197,13 @@ struct eth_fast_path_rx_reg_cqe { u8 placement_offset; struct eth_tunnel_parsing_flags tunnel_pars_flags; u8 bd_num; - u8 reserved; - __le16 reserved2; - __le32 flow_id_or_resource_id; - u8 reserved1[7]; + u8 reserved[9]; + struct eth_fast_path_cqe_fw_debug fw_debug; + u8 reserved1[3]; struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-continue ETH Rx FP CQE */ +/* TPA-continue ETH Rx FP CQE. */ struct eth_fast_path_rx_tpa_cont_cqe { u8 type; u8 tpa_agg_index; @@ -238,7 +215,7 @@ struct eth_fast_path_rx_tpa_cont_cqe { struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-end ETH Rx FP CQE */ +/* TPA-end ETH Rx FP CQE. */ struct eth_fast_path_rx_tpa_end_cqe { u8 type; u8 tpa_agg_index; @@ -254,16 +231,16 @@ struct eth_fast_path_rx_tpa_end_cqe { struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-start ETH Rx FP CQE */ +/* TPA-start ETH Rx FP CQE. */ struct eth_fast_path_rx_tpa_start_cqe { u8 type; u8 bitfields; -#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 -#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF -#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 __le16 seg_len; struct parsing_and_err_flags pars_flags; __le16 vlan_tag; @@ -273,10 +250,9 @@ struct eth_fast_path_rx_tpa_start_cqe { struct eth_tunnel_parsing_flags tunnel_pars_flags; u8 tpa_agg_index; u8 header_len; - __le16 bw_ext_bd_len_list[ETH_TPA_CQE_START_BW_LEN_LIST_SIZE]; - __le16 reserved2; - __le32 flow_id_or_resource_id; - u8 reserved[3]; + __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; + struct eth_fast_path_cqe_fw_debug fw_debug; + u8 reserved; struct eth_pmd_flow_flags pmd_flags; }; @@ -291,24 +267,24 @@ struct eth_rx_bd { struct regpair addr; }; -/* Regular ETH Rx SP CQE */ +/* regular ETH Rx SP CQE */ struct eth_slow_path_rx_cqe { - u8 type; - u8 ramrod_cmd_id; - u8 error_flag; - u8 reserved[25]; - __le16 echo; - u8 reserved1; + u8 type; + u8 ramrod_cmd_id; + u8 error_flag; + u8 reserved[25]; + __le16 echo; + u8 reserved1; struct eth_pmd_flow_flags pmd_flags; }; -/* Union for all ETH Rx CQE types */ +/* union for all ETH Rx CQE types */ union eth_rx_cqe { - struct eth_fast_path_rx_reg_cqe fast_path_regular; - struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; - struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; - struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; - struct eth_slow_path_rx_cqe slow_path; + struct eth_fast_path_rx_reg_cqe fast_path_regular; + struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; + struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; + struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; + struct eth_slow_path_rx_cqe slow_path; }; /* ETH Rx CQE type */ @@ -335,7 +311,7 @@ enum eth_rx_tunn_type { MAX_ETH_RX_TUNN_TYPE }; -/* Aggregation end reason. */ +/* Aggregation end reason. */ enum eth_tpa_end_reason { ETH_AGG_END_UNUSED, ETH_AGG_END_SP_UPDATE, @@ -350,89 +326,65 @@ enum eth_tpa_end_reason { /* The first tx bd of a given packet */ struct eth_tx_1st_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_1st_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_1st_bd data; }; /* The second tx bd of a given packet */ struct eth_tx_2nd_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_2nd_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_2nd_bd data; }; -/* The parsing information data for the third tx bd of a given packet */ +/* The parsing information data for the third tx bd of a given packet. */ struct eth_tx_data_3rd_bd { __le16 lso_mss; __le16 bitfields; -#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF -#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 -#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF -#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 -#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F -#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 +#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF +#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 +#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F +#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 u8 tunn_l4_hdr_start_offset_w; u8 tunn_hdr_size_w; }; /* The third tx bd of a given packet */ struct eth_tx_3rd_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_3rd_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_3rd_bd data; }; -/* The parsing information data for the forth tx bd of a given packet. */ -struct eth_tx_data_4th_bd { - u8 dst_vport_id; - u8 reserved4; - __le16 bitfields; -#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_MASK 0x1 -#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_SHIFT 0 -#define ETH_TX_DATA_4TH_BD_RESERVED1_MASK 0x7F -#define ETH_TX_DATA_4TH_BD_RESERVED1_SHIFT 1 -#define ETH_TX_DATA_4TH_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_4TH_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_4TH_BD_RESERVED2_MASK 0x7F -#define ETH_TX_DATA_4TH_BD_RESERVED2_SHIFT 9 - __le16 reserved3; -}; - -/* The forth tx bd of a given packet */ -struct eth_tx_4th_bd { - struct regpair addr; /* Single continuous buffer */ - __le16 nbytes; /* Number of bytes in this BD */ - struct eth_tx_data_4th_bd data; /* Parsing information data */ -}; - -/* Complementary information for the regular tx bd of a given packet */ +/* Complementary information for the regular tx bd of a given packet. */ struct eth_tx_data_bd { - __le16 reserved0; - __le16 bitfields; -#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF -#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 -#define ETH_TX_DATA_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F -#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 + __le16 reserved0; + __le16 bitfields; +#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF +#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 +#define ETH_TX_DATA_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F +#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 __le16 reserved3; }; /* The common non-special TX BD ring element */ struct eth_tx_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_bd data; }; union eth_tx_bd_types { struct eth_tx_1st_bd first_bd; struct eth_tx_2nd_bd second_bd; struct eth_tx_3rd_bd third_bd; - struct eth_tx_4th_bd fourth_bd; struct eth_tx_bd reg_bd; }; @@ -445,12 +397,6 @@ enum eth_tx_tunn_type { MAX_ETH_TX_TUNN_TYPE }; -/* Mstorm Queue Zone */ -struct mstorm_eth_queue_zone { - struct eth_rx_prod_data rx_producers; - __le32 reserved[3]; -}; - /* Ystorm Queue Zone */ struct xstorm_eth_queue_zone { struct coalescing_timeset int_coalescing_timeset; @@ -460,30 +406,18 @@ struct xstorm_eth_queue_zone { /* ETH doorbell data */ struct eth_db_data { u8 params; -#define ETH_DB_DATA_DEST_MASK 0x3 -#define ETH_DB_DATA_DEST_SHIFT 0 -#define ETH_DB_DATA_AGG_CMD_MASK 0x3 -#define ETH_DB_DATA_AGG_CMD_SHIFT 2 -#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 -#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 -#define ETH_DB_DATA_RESERVED_MASK 0x1 -#define ETH_DB_DATA_RESERVED_SHIFT 5 -#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 +#define ETH_DB_DATA_DEST_MASK 0x3 +#define ETH_DB_DATA_DEST_SHIFT 0 +#define ETH_DB_DATA_AGG_CMD_MASK 0x3 +#define ETH_DB_DATA_AGG_CMD_SHIFT 2 +#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 +#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 +#define ETH_DB_DATA_RESERVED_MASK 0x1 +#define ETH_DB_DATA_RESERVED_SHIFT 5 +#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 bd_prod; }; -/* RSS hash type */ -enum rss_hash_type { - RSS_HASH_TYPE_DEFAULT = 0, - RSS_HASH_TYPE_IPV4 = 1, - RSS_HASH_TYPE_TCP_IPV4 = 2, - RSS_HASH_TYPE_IPV6 = 3, - RSS_HASH_TYPE_TCP_IPV6 = 4, - RSS_HASH_TYPE_UDP_IPV4 = 5, - RSS_HASH_TYPE_UDP_IPV6 = 6, - MAX_RSS_HASH_TYPE -}; - #endif /* __ETH_COMMON__ */ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 157019f716..8f64b1223c 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -1,106 +1,116 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ #ifndef __ISCSI_COMMON__ #define __ISCSI_COMMON__ - /**********************/ /* ISCSI FW CONSTANTS */ /**********************/ /* iSCSI HSI constants */ -#define ISCSI_DEFAULT_MTU (1500) +#define ISCSI_DEFAULT_MTU (1500) + +/* Current iSCSI HSI version number composed of two fields (16 bit) */ +#define ISCSI_HSI_MAJOR_VERSION (0) +#define ISCSI_HSI_MINOR_VERSION (0) /* KWQ (kernel work queue) layer codes */ -#define ISCSI_SLOW_PATH_LAYER_CODE (6) +#define ISCSI_SLOW_PATH_LAYER_CODE (6) + +/* CQE completion status */ +#define ISCSI_EQE_COMPLETION_SUCCESS (0x0) +#define ISCSI_EQE_RST_CONN_RCVD (0x1) /* iSCSI parameter defaults */ -#define ISCSI_DEFAULT_HEADER_DIGEST (0) -#define ISCSI_DEFAULT_DATA_DIGEST (0) -#define ISCSI_DEFAULT_INITIAL_R2T (1) -#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) -#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) -#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) -#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) -#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) +#define ISCSI_DEFAULT_HEADER_DIGEST (0) +#define ISCSI_DEFAULT_DATA_DIGEST (0) +#define ISCSI_DEFAULT_INITIAL_R2T (1) +#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) +#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) +#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) +#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) +#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) /* iSCSI parameter limits */ -#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) -#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) -#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) -#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) -#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) -#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) - -#define ISCSI_AHS_CNTL_SIZE 4 - -#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) +#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) +#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) +#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) +#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) /* iSCSI reserved params */ #define ISCSI_ITT_ALL_ONES (0xffffffff) #define ISCSI_TTT_ALL_ONES (0xffffffff) -#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 -#define ISCSI_OPTION_2_ON_CHIP_TCP 2 +#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 +#define ISCSI_OPTION_2_ON_CHIP_TCP 2 -#define ISCSI_INITIATOR_MODE 0 -#define ISCSI_TARGET_MODE 1 +#define ISCSI_INITIATOR_MODE 0 +#define ISCSI_TARGET_MODE 1 /* iSCSI request op codes */ -#define ISCSI_OPCODE_NOP_OUT (0) -#define ISCSI_OPCODE_SCSI_CMD (1) -#define ISCSI_OPCODE_TMF_REQUEST (2) -#define ISCSI_OPCODE_LOGIN_REQUEST (3) -#define ISCSI_OPCODE_TEXT_REQUEST (4) -#define ISCSI_OPCODE_DATA_OUT (5) -#define ISCSI_OPCODE_LOGOUT_REQUEST (6) +#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0) +#define ISCSI_OPCODE_NOP_OUT ( \ + ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40) +#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1) +#define ISCSI_OPCODE_SCSI_CMD ( \ + ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40) +#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2) +#define ISCSI_OPCODE_TMF_REQUEST ( \ + ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3) +#define ISCSI_OPCODE_LOGIN_REQUEST ( \ + ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4) +#define ISCSI_OPCODE_TEXT_REQUEST ( \ + ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_DATA_OUT (5) +#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6) +#define ISCSI_OPCODE_LOGOUT_REQUEST ( \ + ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40) /* iSCSI response/messages op codes */ -#define ISCSI_OPCODE_NOP_IN (0x20) -#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) -#define ISCSI_OPCODE_TMF_RESPONSE (0x22) -#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) -#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) -#define ISCSI_OPCODE_DATA_IN (0x25) -#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) -#define ISCSI_OPCODE_R2T (0x31) -#define ISCSI_OPCODE_ASYNC_MSG (0x32) -#define ISCSI_OPCODE_REJECT (0x3f) +#define ISCSI_OPCODE_NOP_IN (0x20) +#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) +#define ISCSI_OPCODE_TMF_RESPONSE (0x22) +#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) +#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) +#define ISCSI_OPCODE_DATA_IN (0x25) +#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) +#define ISCSI_OPCODE_R2T (0x31) +#define ISCSI_OPCODE_ASYNC_MSG (0x32) +#define ISCSI_OPCODE_REJECT (0x3f) /* iSCSI stages */ -#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) -#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) -#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) +#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) +#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) +#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) /* iSCSI CQE errors */ -#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) -#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) +#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) +#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) -/* Union of data bd_opaque/ tq_tid */ -union bd_opaque_tq_union { - __le16 bd_opaque; - __le16 tq_tid; -}; - -/* ISCSI SGL entry */ struct cqe_error_bitmap { u8 cqe_error_status_bits; -#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 -#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 -#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 -#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 -#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 -#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 -#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 +#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 +#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 }; union cqe_error_status { @@ -108,168 +118,69 @@ union cqe_error_status { struct cqe_error_bitmap error_bits; }; -/* iSCSI Login Response PDU header */ struct data_hdr { __le32 data[12]; }; -struct lun_mapper_addr_reserved { - struct regpair lun_mapper_addr; - u8 reserved0[8]; -}; - -/* rdif conetxt for dif on immediate */ -struct dif_on_immediate_params { - __le32 initial_ref_tag; - __le16 application_tag; - __le16 application_tag_mask; - __le16 flags1; -#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0 -#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1 -#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2 -#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3 -#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4 -#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5 -#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6 -#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7 -#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3 -#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8 -#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF -#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10 -#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 -#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 - u8 flags0; -#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0 -#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1 -#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2 -#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3 -#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3 -#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4 -#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6 -#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1 -#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7 - u8 reserved_zero[5]; -}; - -/* iSCSI dif on immediate mode attributes union */ -union dif_configuration_params { - struct lun_mapper_addr_reserved lun_mapper_address; - struct dif_on_immediate_params def_dif_conf; -}; - -/* Union of data/r2t sequence number */ -union iscsi_seq_num { - __le16 data_sn; - __le16 r2t_sn; -}; - -/* iSCSI DIF flags */ -struct iscsi_dif_flags { - u8 flags; -#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF -#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 -#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 -#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 -#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 -#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 -}; - -/* The iscsi storm task context of Ystorm */ -struct ystorm_iscsi_task_state { - struct scsi_cached_sges data_desc; - struct scsi_sgl_params sgl_params; - __le32 exp_r2t_sn; - __le32 buffer_offset; - union iscsi_seq_num seq_num; - struct iscsi_dif_flags dif_flags; - u8 flags; -#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 -#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 -#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 -#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 -#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1 -#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2 -#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F -#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3 -}; - -/* The iscsi storm task context of Ystorm */ -struct ystorm_iscsi_task_rxmit_opt { - __le32 fast_rxmit_sge_offset; - __le32 scan_start_buffer_offset; - __le32 fast_rxmit_buffer_offset; - u8 scan_start_sgl_index; - u8 fast_rxmit_sgl_index; - __le16 reserved; -}; - -/* iSCSI Common PDU header */ -struct iscsi_common_hdr { - u8 hdr_status; - u8 hdr_response; - u8 hdr_flags; - u8 hdr_first_byte; -#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F -#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 -#define ISCSI_COMMON_HDR_IMM_MASK 0x1 -#define ISCSI_COMMON_HDR_IMM_SHIFT 6 -#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 -#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 +struct iscsi_async_msg_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F +#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 +#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 + u8 opcode; __le32 hdr_second_dword; -#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 - struct regpair lun_reserved; - __le32 itt; - __le32 ttt; - __le32 cmdstat_sn; - __le32 exp_statcmd_sn; +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 all_ones; + __le32 reserved1; + __le32 stat_sn; + __le32 exp_cmd_sn; __le32 max_cmd_sn; - __le32 data[3]; + __le16 param1_rsrv; + u8 async_vcode; + u8 async_event; + __le16 param3_rsrv; + __le16 param2_rsrv; + __le32 reserved7; +}; + +struct iscsi_sge { + struct regpair sge_addr; + __le16 sge_len; + __le16 reserved0; + __le32 reserved1; +}; + +struct iscsi_cached_sge_ctx { + struct iscsi_sge sge; + struct regpair reserved; + __le32 dsgl_curr_offset[2]; }; -/* iSCSI Command PDU header */ struct iscsi_cmd_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_CMD_HDR_READ_MASK 0x1 -#define ISCSI_CMD_HDR_READ_SHIFT 6 -#define ISCSI_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_CMD_HDR_FINAL_SHIFT 7 - u8 hdr_first_byte; -#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F -#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 -#define ISCSI_CMD_HDR_IMM_MASK 0x1 -#define ISCSI_CMD_HDR_IMM_SHIFT 6 -#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 -#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 +#define ISCSI_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_CMD_HDR_READ_MASK 0x1 +#define ISCSI_CMD_HDR_READ_SHIFT 6 +#define ISCSI_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_CMD_HDR_FINAL_SHIFT 7 + u8 opcode; __le32 hdr_second_dword; -#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 expected_transfer_length; @@ -278,55 +189,128 @@ struct iscsi_cmd_hdr { __le32 cdb[4]; }; -/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */ +struct iscsi_common_hdr { + u8 hdr_status; + u8 hdr_response; + u8 hdr_flags; + u8 hdr_first_byte; +#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F +#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 +#define ISCSI_COMMON_HDR_IMM_MASK 0x1 +#define ISCSI_COMMON_HDR_IMM_SHIFT 6 +#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 +#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 + __le32 hdr_second_dword; +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 lun_reserved[4]; + __le32 data[6]; +}; + +struct iscsi_conn_offload_params { + struct regpair sq_pbl_addr; + struct regpair r2tq_pbl_addr; + struct regpair xhq_pbl_addr; + struct regpair uhq_pbl_addr; + __le32 initial_ack; + __le16 physical_q0; + __le16 physical_q1; + u8 flags; +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2 + u8 pbl_page_size_log; + u8 pbe_page_size_log; + u8 default_cq; + __le32 stat_sn; +}; + +struct iscsi_slow_path_hdr { + u8 op_code; + u8 flags; +#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF +#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 +}; + +struct iscsi_conn_update_ramrod_params { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 flags; +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4 + u8 reserved0[3]; + __le32 max_seq_size; + __le32 max_send_pdu_length; + __le32 max_recv_pdu_length; + __le32 first_seq_length; + __le32 exp_stat_sn; +}; + struct iscsi_ext_cdb_cmd_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 -#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF -#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 struct regpair lun; __le32 itt; __le32 expected_transfer_length; __le32 cmd_sn; __le32 exp_stat_sn; - struct scsi_sge cdb_sge; + struct iscsi_sge cdb_sge; }; -/* iSCSI login request PDU header */ struct iscsi_login_req_hdr { u8 version_min; u8 version_max; u8 flags_attr; -#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 -#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 -#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 -#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 -#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 -#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 -#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 +#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 isid_tabc; __le16 tsih; __le16 isid_d; @@ -338,7 +322,6 @@ struct iscsi_login_req_hdr { __le32 reserved2[4]; }; -/* iSCSI logout request PDU header */ struct iscsi_logout_req_hdr { __le16 reserved0; u8 reason_code; @@ -353,14 +336,13 @@ struct iscsi_logout_req_hdr { __le32 reserved4[4]; }; -/* iSCSI Data-out PDU header */ struct iscsi_data_out_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F -#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 -#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 -#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 +#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -374,23 +356,22 @@ struct iscsi_data_out_hdr { __le32 reserved5; }; -/* iSCSI Data-in PDU header */ struct iscsi_data_in_hdr { u8 status_rsvd; u8 reserved1; u8 flags; -#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 -#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 -#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 -#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 -#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 -#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 -#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 -#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 -#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 -#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 -#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 -#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 +#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 +#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 +#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 +#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 +#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 +#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 +#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -404,7 +385,6 @@ struct iscsi_data_in_hdr { __le32 residual_count; }; -/* iSCSI R2T PDU header */ struct iscsi_r2t_hdr { u8 reserved0[3]; u8 opcode; @@ -420,14 +400,13 @@ struct iscsi_r2t_hdr { __le32 desired_data_trns_len; }; -/* iSCSI NOP-out PDU header */ struct iscsi_nop_out_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F -#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 -#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 -#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 +#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -441,20 +420,19 @@ struct iscsi_nop_out_hdr { __le32 reserved6; }; -/* iSCSI NOP-in PDU header */ struct iscsi_nop_in_hdr { __le16 reserved0; u8 flags_attr; -#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F -#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 -#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 -#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 +#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -466,27 +444,26 @@ struct iscsi_nop_in_hdr { __le32 reserved7; }; -/* iSCSI Login Response PDU header */ struct iscsi_login_response_hdr { u8 version_active; u8 version_max; u8 flags_attr; -#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 -#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 -#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 -#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 -#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 -#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 isid_tabc; __le16 tsih; __le16 isid_d; @@ -501,17 +478,16 @@ struct iscsi_login_response_hdr { __le32 reserved4[2]; }; -/* iSCSI Logout Response PDU header */ struct iscsi_logout_response_hdr { u8 reserved1; u8 response; u8 flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 reserved2[2]; __le32 itt; __le32 reserved3; @@ -519,27 +495,26 @@ struct iscsi_logout_response_hdr { __le32 exp_cmd_sn; __le32 max_cmd_sn; __le32 reserved4; - __le16 time_2_retain; - __le16 time_2_wait; + __le16 time2retain; + __le16 time2wait; __le32 reserved5[1]; }; -/* iSCSI Text Request PDU header */ struct iscsi_text_request_hdr { __le16 reserved0; u8 flags_attr; -#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F -#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 -#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 -#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 -#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 -#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 +#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 +#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -548,22 +523,21 @@ struct iscsi_text_request_hdr { __le32 reserved4[4]; }; -/* iSCSI Text Response PDU header */ struct iscsi_text_response_hdr { __le16 reserved1; u8 flags; -#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F -#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 -#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 -#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 -#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -573,16 +547,15 @@ struct iscsi_text_response_hdr { __le32 reserved4[3]; }; -/* iSCSI TMF Request PDU header */ struct iscsi_tmf_request_hdr { __le16 reserved0; u8 function; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 rtt; @@ -599,30 +572,29 @@ struct iscsi_tmf_response_hdr { u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; __le32 itt; - __le32 reserved1; + __le32 rtt; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; __le32 reserved4[3]; }; -/* iSCSI Response PDU header */ struct iscsi_response_hdr { u8 hdr_status; u8 hdr_response; u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 snack_tag; @@ -634,19 +606,18 @@ struct iscsi_response_hdr { __le32 residual_count; }; -/* iSCSI Reject PDU header */ struct iscsi_reject_hdr { u8 reserved4; u8 hdr_reason; u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; - __le32 all_ones; + __le32 reserved1; __le32 reserved2; __le32 stat_sn; __le32 exp_cmd_sn; @@ -655,35 +626,6 @@ struct iscsi_reject_hdr { __le32 reserved3[2]; }; -/* iSCSI Asynchronous Message PDU header */ -struct iscsi_async_msg_hdr { - __le16 reserved0; - u8 flags_attr; -#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F -#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 -#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 -#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 - u8 opcode; - __le32 hdr_second_dword; -#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 - struct regpair lun; - __le32 all_ones; - __le32 reserved1; - __le32 stat_sn; - __le32 exp_cmd_sn; - __le32 max_cmd_sn; - __le16 param1_rsrv; - u8 async_vcode; - u8 async_event; - __le16 param3_rsrv; - __le16 param2_rsrv; - __le32 reserved7; -}; - -/* PDU header part of Ystorm task context */ union iscsi_task_hdr { struct iscsi_common_hdr common; struct data_hdr data; @@ -707,336 +649,6 @@ union iscsi_task_hdr { struct iscsi_async_msg_hdr async_msg; }; -/* The iscsi storm task context of Ystorm */ -struct ystorm_iscsi_task_st_ctx { - struct ystorm_iscsi_task_state state; - struct ystorm_iscsi_task_rxmit_opt rxmit_opt; - union iscsi_task_hdr pdu_hdr; -}; - -struct e4_ystorm_iscsi_task_ag_ctx { - u8 reserved; - u8 byte1; - __le16 word0; - u8 flags0; -#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */ -#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7 - u8 flags1; -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 byte2; - __le32 TTT; - u8 byte3; - u8 byte4; - __le16 word1; -}; - -struct e4_mstorm_iscsi_task_ag_ctx { - u8 cdu_validation; - u8 byte1; - __le16 task_cid; - u8 flags0; -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 - u8 flags1; -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 byte2; - __le32 reg0; - u8 byte3; - u8 byte4; - __le16 word1; -}; - -struct e4_ustorm_iscsi_task_ag_ctx { - u8 reserved; - u8 state; - __le16 icid; - u8 flags0; -#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 -#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 - u8 flags1; -#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 -#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 - u8 flags2; -#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 - u8 flags3; -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 - __le32 dif_err_intervals; - __le32 dif_error_1st_interval; - __le32 rcv_cont_len; - __le32 exp_cont_len; - __le32 total_data_acked; - __le32 exp_data_acked; - u8 byte2; - u8 byte3; - __le16 word1; - __le16 next_tid; - __le16 word3; - __le32 hdr_residual_count; - __le32 exp_r2t_sn; -}; - -/* The iscsi storm task context of Mstorm */ -struct mstorm_iscsi_task_st_ctx { - struct scsi_cached_sges data_desc; - struct scsi_sgl_params sgl_params; - __le32 rem_task_size; - __le32 data_buffer_offset; - u8 task_type; - struct iscsi_dif_flags dif_flags; - __le16 dif_task_icid; - struct regpair sense_db; - __le32 expected_itt; - __le32 reserved1; -}; - -struct iscsi_reg1 { - __le32 reg1_map; -#define ISCSI_REG1_NUM_SGES_MASK 0xF -#define ISCSI_REG1_NUM_SGES_SHIFT 0 -#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF -#define ISCSI_REG1_RESERVED1_SHIFT 4 -}; - -struct tqe_opaque { - __le16 opaque[2]; -}; - -/* The iscsi storm task context of Ustorm */ -struct ustorm_iscsi_task_st_ctx { - __le32 rem_rcv_len; - __le32 exp_data_transfer_len; - __le32 exp_data_sn; - struct regpair lun; - struct iscsi_reg1 reg1; - u8 flags2; -#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 - struct iscsi_dif_flags dif_flags; - __le16 reserved3; - struct tqe_opaque tqe_opaque_list; - __le32 reserved5; - __le32 reserved6; - __le32 reserved7; - u8 task_type; - u8 error_flags; -#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 -#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 - u8 flags; -#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 -#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 -#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 -#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 -#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 -#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 - u8 cq_rss_number; -}; - -/* iscsi task context */ -struct e4_iscsi_task_context { - struct ystorm_iscsi_task_st_ctx ystorm_st_context; - struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context; - struct regpair ystorm_ag_padding[2]; - struct tdif_task_context tdif_context; - struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context; - struct regpair mstorm_ag_padding[2]; - struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context; - struct mstorm_iscsi_task_st_ctx mstorm_st_context; - struct ustorm_iscsi_task_st_ctx ustorm_st_context; - struct rdif_task_context rdif_context; -}; - -/* iSCSI connection offload params passed by driver to FW in ISCSI offload - * ramrod. - */ -struct iscsi_conn_offload_params { - struct regpair sq_pbl_addr; - struct regpair r2tq_pbl_addr; - struct regpair xhq_pbl_addr; - struct regpair uhq_pbl_addr; - __le16 physical_q0; - __le16 physical_q1; - u8 flags; -#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 -#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F -#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 - u8 default_cq; - __le16 reserved0; - __le32 stat_sn; - __le32 initial_ack; -}; - -/* iSCSI connection statistics */ -struct iscsi_conn_stats_params { - struct regpair iscsi_tcp_tx_packets_cnt; - struct regpair iscsi_tcp_tx_bytes_cnt; - struct regpair iscsi_tcp_tx_rxmit_cnt; - struct regpair iscsi_tcp_rx_packets_cnt; - struct regpair iscsi_tcp_rx_bytes_cnt; - struct regpair iscsi_tcp_rx_dup_ack_cnt; - __le32 iscsi_tcp_rx_chksum_err_cnt; - __le32 reserved; -}; - - -/* iSCSI connection update params passed by driver to FW in ISCSI update - *ramrod. - */ -struct iscsi_conn_update_ramrod_params { - __le16 reserved0; - __le16 conn_id; - __le32 reserved1; - u8 flags; -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7 - u8 reserved3[3]; - __le32 max_seq_size; - __le32 max_send_pdu_length; - __le32 max_recv_pdu_length; - __le32 first_seq_length; - __le32 exp_stat_sn; - union dif_configuration_params dif_on_imme_params; -}; - -/* iSCSI CQ element */ struct iscsi_cqe_common { __le16 conn_id; u8 cqe_type; @@ -1045,7 +657,6 @@ struct iscsi_cqe_common { union iscsi_task_hdr iscsi_hdr; }; -/* iSCSI CQ element */ struct iscsi_cqe_solicited { __le16 conn_id; u8 cqe_type; @@ -1053,13 +664,10 @@ struct iscsi_cqe_solicited { __le16 itid; u8 task_type; u8 fw_dbg_field; - u8 caused_conn_err; - u8 reserved0[3]; - __le32 data_truncated_bytes; + __le32 reserved1[2]; union iscsi_task_hdr iscsi_hdr; }; -/* iSCSI CQ element */ struct iscsi_cqe_unsolicited { __le16 conn_id; u8 cqe_type; @@ -1067,19 +675,16 @@ struct iscsi_cqe_unsolicited { __le16 reserved0; u8 reserved1; u8 unsol_cqe_type; - __le16 rqe_opaque; - __le16 reserved2[3]; + struct regpair rqe_opaque; union iscsi_task_hdr iscsi_hdr; }; -/* iSCSI CQ element */ union iscsi_cqe { struct iscsi_cqe_common cqe_common; struct iscsi_cqe_solicited cqe_solicited; struct iscsi_cqe_unsolicited cqe_unsolicited; }; -/* iSCSI CQE type */ enum iscsi_cqes_type { ISCSI_CQE_TYPE_SOLICITED = 1, ISCSI_CQE_TYPE_UNSOLICITED, @@ -1089,7 +694,6 @@ enum iscsi_cqes_type { MAX_ISCSI_CQES_TYPE }; -/* iSCSI CQE type */ enum iscsi_cqe_unsolicited_type { ISCSI_CQE_UNSOLICITED_NONE, ISCSI_CQE_UNSOLICITED_SINGLE, @@ -1099,28 +703,64 @@ enum iscsi_cqe_unsolicited_type { MAX_ISCSI_CQE_UNSOLICITED_TYPE }; -/* iscsi debug modes */ -struct iscsi_debug_modes { - u8 flags; -#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6 -#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7 +struct iscsi_virt_sgl_ctx { + struct regpair sgl_base; + struct regpair dsgl_base; + __le32 sgl_initial_offset; + __le32 dsgl_initial_offset; + __le32 dsgl_curr_offset[2]; +}; + +struct iscsi_sgl_var_params { + u8 sgl_ptr; + u8 dsgl_ptr; + __le16 sge_offset; + __le16 dsge_offset; +}; + +struct iscsi_phys_sgl_ctx { + struct regpair sgl_base; + struct regpair dsgl_base; + u8 sgl_size; + u8 dsgl_size; + __le16 reserved; + struct iscsi_sgl_var_params var_params[2]; +}; + +union iscsi_data_desc_ctx { + struct iscsi_virt_sgl_ctx virt_sgl; + struct iscsi_phys_sgl_ctx phys_sgl; + struct iscsi_cached_sge_ctx cached_sge; +}; + +struct iscsi_debug_modes { + u8 flags; +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 +#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3 +#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6 +}; + +struct iscsi_dif_flags { + u8 flags; +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 }; -/* iSCSI kernel completion queue IDs */ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_INIT_FUNC = 0, ISCSI_EVENT_TYPE_DESTROY_FUNC, @@ -1129,9 +769,9 @@ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_CLEAR_SQ, ISCSI_EVENT_TYPE_TERMINATE_CONN, ISCSI_EVENT_TYPE_MAC_UPDATE_CONN, - ISCSI_EVENT_TYPE_COLLECT_STATS_CONN, ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, + RESERVED9, ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD, @@ -1142,10 +782,10 @@ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2, ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR, ISCSI_EVENT_TYPE_TCP_CONN_ERROR, + ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES, MAX_ISCSI_EQE_OPCODE }; -/* iSCSI EQE and CQE completion status */ enum iscsi_error_types { ISCSI_STATUS_NONE = 0, ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1, @@ -1158,9 +798,9 @@ enum iscsi_error_types { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, ISCSI_CONN_ERROR_DATA_OVERRUN, ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, - ISCSI_CONN_ERROR_IP_OPTIONS_ERROR, - ISCSI_CONN_ERROR_PRS_ERRORS, - ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION, + ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR, + ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR, + ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION, ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE, @@ -1192,12 +832,32 @@ enum iscsi_error_types { ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX, ISCSI_CONN_ERROR_SENSE_DATA_LENGTH, ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR, - ISCSI_CONN_ERROR_INVALID_ITT, ISCSI_ERROR_UNKNOWN, MAX_ISCSI_ERROR_TYPES }; -/* iSCSI Ramrod Command IDs */ +struct iscsi_mflags { + u8 mflags; +#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1 +#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0 +#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1 +#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1 +#define ISCSI_MFLAGS_RESERVED_MASK 0x3F +#define ISCSI_MFLAGS_RESERVED_SHIFT 2 +}; + +struct iscsi_sgl { + struct regpair sgl_addr; + __le16 updated_sge_size; + __le16 updated_sge_offset; + __le32 byte_offset; +}; + +union iscsi_mstorm_sgl { + struct iscsi_sgl sgl_struct; + struct iscsi_sge single_sge; +}; + enum iscsi_ramrod_cmd_id { ISCSI_RAMROD_CMD_ID_UNUSED = 0, ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1, @@ -1207,88 +867,347 @@ enum iscsi_ramrod_cmd_id { ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7, - ISCSI_RAMROD_CMD_ID_CONN_STATS = 8, MAX_ISCSI_RAMROD_CMD_ID }; -/* iSCSI connection termination request */ +struct iscsi_reg1 { + __le32 reg1_map; +#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7 +#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0 +#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF +#define ISCSI_REG1_RESERVED1_SHIFT 3 +}; + +union iscsi_seq_num { + __le16 data_sn; + __le16 r2t_sn; +}; + struct iscsi_spe_conn_mac_update { - __le16 reserved0; + struct iscsi_slow_path_hdr hdr; __le16 conn_id; - __le32 reserved1; + __le32 fw_cid; __le16 remote_mac_addr_lo; __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; - u8 reserved2[2]; + u8 reserved0[2]; }; -/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in - * iSCSI offload ramrod. - */ struct iscsi_spe_conn_offload { - __le16 reserved0; + struct iscsi_slow_path_hdr hdr; __le16 conn_id; - __le32 reserved1; + __le32 fw_cid; struct iscsi_conn_offload_params iscsi; struct tcp_offload_params tcp; }; -/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in - * iSCSI offload ramrod. - */ struct iscsi_spe_conn_offload_option2 { - __le16 reserved0; + struct iscsi_slow_path_hdr hdr; __le16 conn_id; - __le32 reserved1; + __le32 fw_cid; struct iscsi_conn_offload_params iscsi; struct tcp_offload_params_opt2 tcp; }; -/* iSCSI collect connection statistics request */ -struct iscsi_spe_conn_statistics { - __le16 reserved0; - __le16 conn_id; - __le32 reserved1; - u8 reset_stats; - u8 reserved2[7]; - struct regpair stats_cnts_addr; -}; - -/* iSCSI connection termination request */ struct iscsi_spe_conn_termination { - __le16 reserved0; + struct iscsi_slow_path_hdr hdr; __le16 conn_id; - __le32 reserved1; + __le32 fw_cid; u8 abortive; - u8 reserved2[7]; + u8 reserved0[7]; struct regpair queue_cnts_addr; struct regpair query_params_addr; }; -/* iSCSI firmware function init parameters */ +struct iscsi_spe_func_dstry { + struct iscsi_slow_path_hdr hdr; + __le16 reserved0; + __le32 reserved1; +}; + struct iscsi_spe_func_init { + struct iscsi_slow_path_hdr hdr; __le16 half_way_close_timeout; u8 num_sq_pages_in_ring; u8 num_r2tq_pages_in_ring; u8 num_uhq_pages_in_ring; u8 ll2_rx_queue_id; - u8 flags; -#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1 -#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0 -#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F -#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1 + u8 ooo_enable; struct iscsi_debug_modes debug_mode; - u8 params; -#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF -#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0 -#define ISCSI_SPE_FUNC_INIT_RESERVED1_MASK 0xF -#define ISCSI_SPE_FUNC_INIT_RESERVED1_SHIFT 4 - u8 reserved2[7]; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; + __le32 reserved4; struct scsi_init_func_params func_params; struct scsi_init_func_queues q_params; }; -/* iSCSI task type */ +struct ystorm_iscsi_task_state { + union iscsi_data_desc_ctx sgl_ctx_union; + __le32 buffer_offset[2]; + __le16 bytes_nxt_dif; + __le16 rxmit_bytes_nxt_dif; + union iscsi_seq_num seq_num_union; + u8 dif_bytes_leftover; + u8 rxmit_dif_bytes_leftover; + __le16 reuse_count; + struct iscsi_dif_flags dif_flags; + u8 local_comp; + __le32 exp_r2t_sn; + __le32 sgl_offset[2]; +}; + +struct ystorm_iscsi_task_st_ctx { + struct ystorm_iscsi_task_state state; + union iscsi_task_hdr pdu_hdr; +}; + +struct ystorm_iscsi_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 word0; + u8 flags0; +#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 TTT; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct mstorm_iscsi_task_ag_ctx { + u8 cdu_validation; + u8 byte1; + __le16 task_cid; + u8 flags0; +#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 + u8 flags1; +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct ustorm_iscsi_task_ag_ctx { + u8 reserved; + u8 state; + __le16 icid; + u8 flags0; +#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 + u8 flags1; +#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 +#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 + u8 flags3; +#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 rcv_cont_len; + __le32 exp_cont_len; + __le32 total_data_acked; + __le32 exp_data_acked; + u8 next_tid_valid; + u8 byte3; + __le16 word1; + __le16 next_tid; + __le16 word3; + __le32 hdr_residual_count; + __le32 exp_r2t_sn; +}; + +struct mstorm_iscsi_task_st_ctx { + union iscsi_mstorm_sgl sgl_union; + struct iscsi_dif_flags dif_flags; + struct iscsi_mflags flags; + u8 sgl_size; + u8 host_sge_index; + __le16 dix_cur_sge_offset; + __le16 dix_cur_sge_size; + __le32 data_offset_rtid; + u8 dif_offset; + u8 dix_sgl_size; + u8 dix_sge_index; + u8 task_type; + struct regpair sense_db; + struct regpair dix_sgl_cur_sge; + __le32 rem_task_size; + __le16 reuse_count; + __le16 dif_data_residue; + u8 reserved0[4]; + __le32 reserved1[1]; +}; + +struct ustorm_iscsi_task_st_ctx { + __le32 rem_rcv_len; + __le32 exp_data_transfer_len; + __le32 exp_data_sn; + struct regpair lun; + struct iscsi_reg1 reg1; + u8 flags2; +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 + u8 reserved2; + __le16 reserved3; + __le32 reserved4; + __le32 reserved5; + __le32 reserved6; + __le32 reserved7; + u8 task_type; + u8 error_flags; +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 + u8 flags; +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 +#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4 +#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 + u8 cq_rss_number; +}; + +struct iscsi_task_context { + struct ystorm_iscsi_task_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; + struct regpair ystorm_ag_padding[2]; + struct tdif_task_context tdif_context; + struct mstorm_iscsi_task_ag_ctx mstorm_ag_context; + struct regpair mstorm_ag_padding[2]; + struct ustorm_iscsi_task_ag_ctx ustorm_ag_context; + struct mstorm_iscsi_task_st_ctx mstorm_st_context; + struct ustorm_iscsi_task_st_ctx ustorm_st_context; + struct rdif_task_context rdif_context; +}; + enum iscsi_task_type { ISCSI_TASK_TYPE_INITIATOR_WRITE, ISCSI_TASK_TYPE_INITIATOR_READ, @@ -1300,57 +1219,63 @@ enum iscsi_task_type { ISCSI_TASK_TYPE_TARGET_READ, ISCSI_TASK_TYPE_TARGET_RESPONSE, ISCSI_TASK_TYPE_LOGIN_RESPONSE, - ISCSI_TASK_TYPE_TARGET_IMM_W_DIF, MAX_ISCSI_TASK_TYPE }; -/* iSCSI DesiredDataTransferLength/ttt union */ union iscsi_ttt_txlen_union { __le32 desired_tx_len; __le32 ttt; }; -/* iSCSI uHQ element */ struct iscsi_uhqe { __le32 reg1; -#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF -#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 -#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 -#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 -#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 -#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 -#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 -#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 -#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 -#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 -#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF -#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 +#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 +#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 +#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 +#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 +#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 +#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 +#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 __le32 reg2; -#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF -#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 -#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF -#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 +#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF +#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 +#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 +}; + +struct iscsi_wqe_field { + __le32 contlen_cdbsize_field; +#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF +#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0 +#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF +#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24 +}; + +union iscsi_wqe_field_union { + struct iscsi_wqe_field cont_field; + __le32 prev_tid; }; -/* iSCSI WQ element */ struct iscsi_wqe { __le16 task_id; u8 flags; -#define ISCSI_WQE_WQE_TYPE_MASK 0x7 -#define ISCSI_WQE_WQE_TYPE_SHIFT 0 -#define ISCSI_WQE_NUM_SGES_MASK 0xF -#define ISCSI_WQE_NUM_SGES_SHIFT 3 -#define ISCSI_WQE_RESPONSE_MASK 0x1 -#define ISCSI_WQE_RESPONSE_SHIFT 7 +#define ISCSI_WQE_WQE_TYPE_MASK 0x7 +#define ISCSI_WQE_WQE_TYPE_SHIFT 0 +#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7 +#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3 +#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1 +#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6 +#define ISCSI_WQE_RESPONSE_MASK 0x1 +#define ISCSI_WQE_RESPONSE_SHIFT 7 struct iscsi_dif_flags prot_flags; - __le32 contlen_cdbsize; -#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF -#define ISCSI_WQE_CONT_LEN_SHIFT 0 -#define ISCSI_WQE_CDB_SIZE_MASK 0xFF -#define ISCSI_WQE_CDB_SIZE_SHIFT 24 + union iscsi_wqe_field_union cont_prevtid_union; }; -/* iSCSI wqe type */ enum iscsi_wqe_type { ISCSI_WQE_TYPE_NORMAL, ISCSI_WQE_TYPE_TASK_CLEANUP, @@ -1362,7 +1287,6 @@ enum iscsi_wqe_type { MAX_ISCSI_WQE_TYPE }; -/* iSCSI xHQ element */ struct iscsi_xhqe { union iscsi_ttt_txlen_union ttt_or_txlen; __le32 exp_stat_sn; @@ -1370,134 +1294,138 @@ struct iscsi_xhqe { u8 total_ahs_length; u8 opcode; u8 flags; -#define ISCSI_XHQE_FINAL_MASK 0x1 -#define ISCSI_XHQE_FINAL_SHIFT 0 -#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 -#define ISCSI_XHQE_STATUS_BIT_SHIFT 1 -#define ISCSI_XHQE_NUM_SGES_MASK 0xF -#define ISCSI_XHQE_NUM_SGES_SHIFT 2 -#define ISCSI_XHQE_RESERVED0_MASK 0x3 -#define ISCSI_XHQE_RESERVED0_SHIFT 6 - union iscsi_seq_num seq_num; +#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7 +#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0 +#define ISCSI_XHQE_FINAL_MASK 0x1 +#define ISCSI_XHQE_FINAL_SHIFT 3 +#define ISCSI_XHQE_SUPER_IO_MASK 0x1 +#define ISCSI_XHQE_SUPER_IO_SHIFT 4 +#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 +#define ISCSI_XHQE_STATUS_BIT_SHIFT 5 +#define ISCSI_XHQE_RESERVED_MASK 0x3 +#define ISCSI_XHQE_RESERVED_SHIFT 6 + union iscsi_seq_num seq_num_union; __le16 reserved1; }; -/* Per PF iSCSI receive path statistics - mStorm RAM structure */ struct mstorm_iscsi_stats_drv { struct regpair iscsi_rx_dropped_pdus_task_not_valid; - struct regpair iscsi_rx_dup_ack_cnt; }; -/* Per PF iSCSI transmit path statistics - pStorm RAM structure */ struct pstorm_iscsi_stats_drv { struct regpair iscsi_tx_bytes_cnt; struct regpair iscsi_tx_packet_cnt; }; -/* Per PF iSCSI receive path statistics - tStorm RAM structure */ struct tstorm_iscsi_stats_drv { struct regpair iscsi_rx_bytes_cnt; struct regpair iscsi_rx_packet_cnt; struct regpair iscsi_rx_new_ooo_isle_events_cnt; - struct regpair iscsi_rx_tcp_payload_bytes_cnt; - struct regpair iscsi_rx_tcp_pkt_cnt; - struct regpair iscsi_rx_pure_ack_cnt; __le32 iscsi_cmdq_threshold_cnt; __le32 iscsi_rq_threshold_cnt; __le32 iscsi_immq_threshold_cnt; }; -/* Per PF iSCSI receive path statistics - uStorm RAM structure */ struct ustorm_iscsi_stats_drv { struct regpair iscsi_rx_data_pdu_cnt; struct regpair iscsi_rx_r2t_pdu_cnt; struct regpair iscsi_rx_total_pdu_cnt; }; -/* Per PF iSCSI transmit path statistics - xStorm RAM structure */ struct xstorm_iscsi_stats_drv { struct regpair iscsi_tx_go_to_slow_start_event_cnt; struct regpair iscsi_tx_fast_retransmit_event_cnt; - struct regpair iscsi_tx_pure_ack_cnt; - struct regpair iscsi_tx_delayed_ack_cnt; }; -/* Per PF iSCSI transmit path statistics - yStorm RAM structure */ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_data_pdu_cnt; struct regpair iscsi_tx_r2t_pdu_cnt; struct regpair iscsi_tx_total_pdu_cnt; - struct regpair iscsi_tx_tcp_payload_bytes_cnt; - struct regpair iscsi_tx_tcp_pkt_cnt; }; -struct e4_tstorm_iscsi_task_ag_ctx { +struct iscsi_db_data { + u8 params; +#define ISCSI_DB_DATA_DEST_MASK 0x3 +#define ISCSI_DB_DATA_DEST_SHIFT 0 +#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 +#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 +#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 +#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 +#define ISCSI_DB_DATA_RESERVED_MASK 0x1 +#define ISCSI_DB_DATA_RESERVED_SHIFT 5 +#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; + +struct tstorm_iscsi_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 +#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; @@ -1510,21 +1438,4 @@ struct e4_tstorm_iscsi_task_ag_ctx { __le32 reg2; }; -/* iSCSI doorbell data */ -struct iscsi_db_data { - u8 params; -#define ISCSI_DB_DATA_DEST_MASK 0x3 -#define ISCSI_DB_DATA_DEST_SHIFT 0 -#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 -#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 -#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 -#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 -#define ISCSI_DB_DATA_RESERVED_MASK 0x1 -#define ISCSI_DB_DATA_RESERVED_SHIFT 5 -#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 - u8 agg_flags; - __le16 sq_prod; -}; - #endif /* __ISCSI_COMMON__ */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index f34dbd0db7..72d88cf3ca 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -1,7 +1,9 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ #ifndef _QED_CHAIN_H @@ -11,7 +13,6 @@ #include #include #include -#include #include #include @@ -19,7 +20,7 @@ enum qed_chain_mode { /* Each Page contains a next pointer at its end */ QED_CHAIN_MODE_NEXT_PTR, - /* Chain is a single page (next ptr) is not required */ + /* Chain is a single page (next ptr) is unrequired */ QED_CHAIN_MODE_SINGLE, /* Page pointers are located in a side list */ @@ -27,9 +28,9 @@ enum qed_chain_mode { }; enum qed_chain_use_mode { - QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ - QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ - QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ + QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ }; enum qed_chain_cnt_type { @@ -41,236 +42,169 @@ enum qed_chain_cnt_type { }; struct qed_chain_next { - struct regpair next_phys; - void *next_virt; + struct regpair next_phys; + void *next_virt; }; struct qed_chain_pbl_u16 { - u16 prod_page_idx; - u16 cons_page_idx; + u16 prod_page_idx; + u16 cons_page_idx; }; struct qed_chain_pbl_u32 { - u32 prod_page_idx; - u32 cons_page_idx; + u32 prod_page_idx; + u32 cons_page_idx; +}; + +struct qed_chain_pbl { + /* Base address of a pre-allocated buffer for pbl */ + dma_addr_t p_phys_table; + void *p_virt_table; + + /* Table for keeping the virtual addresses of the chain pages, + * respectively to the physical addresses in the pbl table. + */ + void **pp_virt_addr_tbl; + + /* Index to current used page by producer/consumer */ + union { + struct qed_chain_pbl_u16 pbl16; + struct qed_chain_pbl_u32 pbl32; + } u; }; struct qed_chain_u16 { - /* Cyclic index of next element to produce/consume */ - u16 prod_idx; - u16 cons_idx; + /* Cyclic index of next element to produce/consme */ + u16 prod_idx; + u16 cons_idx; }; struct qed_chain_u32 { - /* Cyclic index of next element to produce/consume */ - u32 prod_idx; - u32 cons_idx; -}; - -struct addr_tbl_entry { - void *virt_addr; - dma_addr_t dma_map; + /* Cyclic index of next element to produce/consme */ + u32 prod_idx; + u32 cons_idx; }; struct qed_chain { - /* Fastpath portion of the chain - required for commands such - * as produce / consume. - */ + void *p_virt_addr; + dma_addr_t p_phys_addr; + void *p_prod_elem; + void *p_cons_elem; - /* Point to next element to produce/consume */ - void *p_prod_elem; - void *p_cons_elem; - - /* Fastpath portions of the PBL [if exists] */ - - struct { - /* Table for keeping the virtual and physical addresses of the - * chain pages, respectively to the physical addresses - * in the pbl table. - */ - struct addr_tbl_entry *pp_addr_tbl; - - union { - struct qed_chain_pbl_u16 u16; - struct qed_chain_pbl_u32 u32; - } c; - } pbl; + enum qed_chain_mode mode; + enum qed_chain_use_mode intended_use; /* used to produce/consume */ + enum qed_chain_cnt_type cnt_type; union { - struct qed_chain_u16 chain16; - struct qed_chain_u32 chain32; - } u; + struct qed_chain_u16 chain16; + struct qed_chain_u32 chain32; + } u; - /* Capacity counts only usable elements */ - u32 capacity; - u32 page_cnt; + u32 page_cnt; - enum qed_chain_mode mode; + /* Number of elements - capacity is for usable elements only, + * while size will contain total number of elements [for entire chain]. + */ + u32 capacity; + u32 size; /* Elements information for fast calculations */ - u16 elem_per_page; - u16 elem_per_page_mask; - u16 elem_size; - u16 next_page_mask; - u16 usable_per_page; - u8 elem_unusable; - - enum qed_chain_cnt_type cnt_type; - - /* Slowpath of the chain - required for initialization and destruction, - * but isn't involved in regular functionality. - */ - - u32 page_size; - - /* Base address of a pre-allocated buffer for pbl */ - struct { - __le64 *table_virt; - dma_addr_t table_phys; - size_t table_size; - } pbl_sp; - - /* Address of first page of the chain - the address is required - * for fastpath operation [consume/produce] but only for the SINGLE - * flavour which isn't considered fastpath [== SPQ]. - */ - void *p_virt_addr; - dma_addr_t p_phys_addr; - - /* Total number of elements [for entire chain] */ - u32 size; - - enum qed_chain_use_mode intended_use; - - bool b_external_pbl; + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_unusable; + u16 usable_per_page; + u16 elem_size; + u16 next_page_mask; + struct qed_chain_pbl pbl; }; -struct qed_chain_init_params { - enum qed_chain_mode mode; - enum qed_chain_use_mode intended_use; - enum qed_chain_cnt_type cnt_type; +#define QED_CHAIN_PBL_ENTRY_SIZE (8) +#define QED_CHAIN_PAGE_SIZE (0x1000) +#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) - u32 page_size; - u32 num_elems; - size_t elem_size; +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \ + (1 + ((sizeof(struct qed_chain_next) - 1) / \ + (elem_size))) : 0) - void *ext_pbl_virt; - dma_addr_t ext_pbl_phys; -}; +#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((u32)(ELEMS_PER_PAGE(elem_size) - \ + UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) -#define QED_CHAIN_PAGE_SIZE SZ_4K +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ + DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) -#define ELEMS_PER_PAGE(elem_size, page_size) \ - ((page_size) / (elem_size)) - -#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ - (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ - (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \ - 0) - -#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \ - ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \ - UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode)))) - -#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \ - DIV_ROUND_UP((elem_cnt), \ - USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode))) - -#define is_chain_u16(p) \ - ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) -#define is_chain_u32(p) \ - ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) +#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) +#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) /* Accessors */ - -static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain) +static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) { - return chain->u.chain16.prod_idx; + return p_chain->u.chain16.prod_idx; } -static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain) +static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) { - return chain->u.chain16.cons_idx; + return p_chain->u.chain16.cons_idx; } -static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain) +static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) { - return chain->u.chain32.prod_idx; + return p_chain->u.chain32.cons_idx; } -static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain) +static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) { - return chain->u.chain32.cons_idx; -} - -static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain) -{ - u32 prod = qed_chain_get_prod_idx(chain); - u32 cons = qed_chain_get_cons_idx(chain); - u16 elem_per_page = chain->elem_per_page; u16 used; - if (prod < cons) - prod += (u32)U16_MAX + 1; + used = (u16) (((u32)0x10000 + + (u32)p_chain->u.chain16.prod_idx) - + (u32)p_chain->u.chain16.cons_idx); + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - + p_chain->u.chain16.cons_idx / p_chain->elem_per_page; - used = (u16)(prod - cons); - if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= (u16)(prod / elem_per_page - cons / elem_per_page); - - return used; + return (u16)(p_chain->capacity - used); } -static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain) +static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) { - return (u16)(chain->capacity - qed_chain_get_elem_used(chain)); -} - -static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain) -{ - u64 prod = qed_chain_get_prod_idx_u32(chain); - u64 cons = qed_chain_get_cons_idx_u32(chain); - u16 elem_per_page = chain->elem_per_page; u32 used; - if (prod < cons) - prod += (u64)U32_MAX + 1; + used = (u32) (((u64)0x100000000ULL + + (u64)p_chain->u.chain32.prod_idx) - + (u64)p_chain->u.chain32.cons_idx); + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - + p_chain->u.chain32.cons_idx / p_chain->elem_per_page; - used = (u32)(prod - cons); - if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= (u32)(prod / elem_per_page - cons / elem_per_page); - - return used; + return p_chain->capacity - used; } -static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain) +static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) { - return chain->capacity - qed_chain_get_elem_used_u32(chain); + return p_chain->usable_per_page; } -static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain) +static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) { - return chain->usable_per_page; + return p_chain->elem_unusable; } -static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain) +static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) { - return chain->elem_unusable; + return p_chain->page_cnt; } -static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain) +static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) { - return chain->page_cnt; -} - -static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain) -{ - return chain->pbl_sp.table_phys; + return p_chain->pbl.p_phys_table; } /** * @brief qed_chain_advance_page - * - * Advance the next element across pages for a linked chain + * Advance the next element accros pages for a linked chain * * @param p_chain * @param p_next_elem @@ -280,10 +214,10 @@ static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain) static inline void qed_chain_advance_page(struct qed_chain *p_chain, void **p_next_elem, void *idx_to_inc, void *page_to_inc) + { struct qed_chain_next *p_next = NULL; u32 page_index = 0; - switch (p_chain->mode) { case QED_CHAIN_MODE_NEXT_PTR: p_next = *p_next_elem; @@ -307,7 +241,7 @@ qed_chain_advance_page(struct qed_chain *p_chain, *(u32 *)page_to_inc = 0; page_index = *(u32 *)page_to_inc; } - *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr; + *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index]; } } @@ -371,7 +305,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) if ((p_chain->u.chain16.prod_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_prod_idx = &p_chain->u.chain16.prod_idx; - p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx; + p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, p_prod_idx, p_prod_page_idx); } @@ -380,7 +314,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) if ((p_chain->u.chain32.prod_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_prod_idx = &p_chain->u.chain32.prod_idx; - p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx; + p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, p_prod_idx, p_prod_page_idx); } @@ -444,7 +378,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) if ((p_chain->u.chain16.cons_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_cons_idx = &p_chain->u.chain16.cons_idx; - p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx; + p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, p_cons_idx, p_cons_page_idx); } @@ -453,8 +387,8 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) if ((p_chain->u.chain32.cons_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_cons_idx = &p_chain->u.chain32.cons_idx; - p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx; - qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, + p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, p_cons_idx, p_cons_page_idx); } p_chain->u.chain32.cons_idx++; @@ -470,7 +404,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) /** * @brief qed_chain_reset - Resets the chain to its start state * - * @param p_chain pointer to a previously allocated chain + * @param p_chain pointer to a previously allocted chain */ static inline void qed_chain_reset(struct qed_chain *p_chain) { @@ -495,29 +429,140 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) u32 reset_val = p_chain->page_cnt - 1; if (is_chain_u16(p_chain)) { - p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val; - p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val; + p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val; + p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val; } else { - p_chain->pbl.c.u32.prod_page_idx = reset_val; - p_chain->pbl.c.u32.cons_page_idx = reset_val; + p_chain->pbl.u.pbl32.prod_page_idx = reset_val; + p_chain->pbl.u.pbl32.cons_page_idx = reset_val; } } switch (p_chain->intended_use) { + case QED_CHAIN_USE_TO_CONSUME_PRODUCE: + case QED_CHAIN_USE_TO_PRODUCE: + /* Do nothing */ + break; + case QED_CHAIN_USE_TO_CONSUME: /* produce empty elements */ for (i = 0; i < p_chain->capacity; i++) qed_chain_recycle_consumed(p_chain); break; - - case QED_CHAIN_USE_TO_CONSUME_PRODUCE: - case QED_CHAIN_USE_TO_PRODUCE: - default: - /* Do nothing */ - break; } } +/** + * @brief qed_chain_init - Initalizes a basic chain struct + * + * @param p_chain + * @param p_virt_addr + * @param p_phys_addr physical address of allocated buffer's beginning + * @param page_cnt number of pages in the allocated buffer + * @param elem_size size of each element in the chain + * @param intended_use + * @param mode + */ +static inline void qed_chain_init_params(struct qed_chain *p_chain, + u32 page_cnt, + u8 elem_size, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + enum qed_chain_cnt_type cnt_type) +{ + /* chain fixed parameters */ + p_chain->p_virt_addr = NULL; + p_chain->p_phys_addr = 0; + p_chain->elem_size = elem_size; + p_chain->intended_use = intended_use; + p_chain->mode = mode; + p_chain->cnt_type = cnt_type; + + p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); + p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); + p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; + p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); + p_chain->next_page_mask = (p_chain->usable_per_page & + p_chain->elem_per_page_mask); + + p_chain->page_cnt = page_cnt; + p_chain->capacity = p_chain->usable_per_page * page_cnt; + p_chain->size = p_chain->elem_per_page * page_cnt; + + p_chain->pbl.p_phys_table = 0; + p_chain->pbl.p_virt_table = NULL; + p_chain->pbl.pp_virt_addr_tbl = NULL; +} + +/** + * @brief qed_chain_init_mem - + * + * Initalizes a basic chain struct with its chain buffers + * + * @param p_chain + * @param p_virt_addr virtual address of allocated buffer's beginning + * @param p_phys_addr physical address of allocated buffer's beginning + * + */ +static inline void qed_chain_init_mem(struct qed_chain *p_chain, + void *p_virt_addr, dma_addr_t p_phys_addr) +{ + p_chain->p_virt_addr = p_virt_addr; + p_chain->p_phys_addr = p_phys_addr; +} + +/** + * @brief qed_chain_init_pbl_mem - + * + * Initalizes a basic chain struct with its pbl buffers + * + * @param p_chain + * @param p_virt_pbl pointer to a pre allocated side table which will hold + * virtual page addresses. + * @param p_phys_pbl pointer to a pre-allocated side table which will hold + * physical page addresses. + * @param pp_virt_addr_tbl + * pointer to a pre-allocated side table which will hold + * the virtual addresses of the chain pages. + * + */ +static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, + void *p_virt_pbl, + dma_addr_t p_phys_pbl, + void **pp_virt_addr_tbl) +{ + p_chain->pbl.p_phys_table = p_phys_pbl; + p_chain->pbl.p_virt_table = p_virt_pbl; + p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; +} + +/** + * @brief qed_chain_init_next_ptr_elem - + * + * Initalizes a next pointer element + * + * @param p_chain + * @param p_virt_curr virtual address of a chain page of which the next + * pointer element is initialized + * @param p_virt_next virtual address of the next chain page + * @param p_phys_next physical address of the next chain page + * + */ +static inline void +qed_chain_init_next_ptr_elem(struct qed_chain *p_chain, + void *p_virt_curr, + void *p_virt_next, dma_addr_t p_phys_next) +{ + struct qed_chain_next *p_next; + u32 size; + + size = p_chain->elem_size * p_chain->usable_per_page; + p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size); + + DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); + + p_next->next_virt = p_virt_next; +} + /** * @brief qed_chain_get_last_elem - * @@ -552,7 +597,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) break; case QED_CHAIN_MODE_PBL: last_page_idx = p_chain->page_cnt - 1; - p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr; + p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx]; break; } /* p_virt_addr points at this stage to the last page of the chain */ @@ -571,37 +616,6 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) static inline void qed_chain_set_prod(struct qed_chain *p_chain, u32 prod_idx, void *p_prod_elem) { - if (p_chain->mode == QED_CHAIN_MODE_PBL) { - u32 cur_prod, page_mask, page_cnt, page_diff; - - cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx : - p_chain->u.chain32.prod_idx; - - /* Assume that number of elements in a page is power of 2 */ - page_mask = ~p_chain->elem_per_page_mask; - - /* Use "cur_prod - 1" and "prod_idx - 1" since producer index - * reaches the first element of next page before the page index - * is incremented. See qed_chain_produce(). - * Index wrap around is not a problem because the difference - * between current and given producer indices is always - * positive and lower than the chain's capacity. - */ - page_diff = (((cur_prod - 1) & page_mask) - - ((prod_idx - 1) & page_mask)) / - p_chain->elem_per_page; - - page_cnt = qed_chain_get_page_cnt(p_chain); - if (is_chain_u16(p_chain)) - p_chain->pbl.c.u16.prod_page_idx = - (p_chain->pbl.c.u16.prod_page_idx - - page_diff + page_cnt) % page_cnt; - else - p_chain->pbl.c.u32.prod_page_idx = - (p_chain->pbl.c.u32.prod_page_idx - - page_diff + page_cnt) % page_cnt; - } - if (is_chain_u16(p_chain)) p_chain->u.chain16.prod_idx = (u16) prod_idx; else @@ -624,8 +638,8 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) page_cnt = qed_chain_get_page_cnt(p_chain); for (i = 0; i < page_cnt; i++) - memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, - p_chain->page_size); + memset(p_chain->pbl.pp_virt_addr_tbl[i], 0, + QED_CHAIN_PAGE_SIZE); } #endif diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 812a4d7511..33c24ebc9b 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -1,7 +1,9 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ #ifndef _QED_ETH_IF_H @@ -13,67 +15,6 @@ #include #include -/* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */ -#define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2) -#define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS)) - -struct qed_queue_start_common_params { - /* Should always be relative to entity sending this. */ - u8 vport_id; - u16 queue_id; - - /* Relative, but relevant only for PFs */ - u8 stats_id; - - struct qed_sb_info *p_sb; - u8 sb_idx; - - u8 tc; -}; - -struct qed_rxq_start_ret_params { - void __iomem *p_prod; - void *p_handle; -}; - -struct qed_txq_start_ret_params { - void __iomem *p_doorbell; - void *p_handle; -}; - -enum qed_filter_config_mode { - QED_FILTER_CONFIG_MODE_DISABLE, - QED_FILTER_CONFIG_MODE_5_TUPLE, - QED_FILTER_CONFIG_MODE_L4_PORT, - QED_FILTER_CONFIG_MODE_IP_DEST, - QED_FILTER_CONFIG_MODE_IP_SRC, -}; - -struct qed_ntuple_filter_params { - /* Physically mapped address containing header of buffer to be used - * as filter. - */ - dma_addr_t addr; - - /* Length of header in bytes */ - u16 length; - - /* Relative queue-id to receive classified packet */ -#define QED_RFS_NTUPLE_QID_RSS ((u16)-1) - u16 qid; - - /* Identifier can either be according to vport-id or vfid */ - bool b_is_vf; - u8 vport_id; - u8 vf_id; - - /* true iff this filter is to be added. Else to be removed */ - bool b_is_add; - - /* If flow needs to be dropped */ - bool b_is_drop; -}; - struct qed_dev_eth_info { struct qed_dev_info common; @@ -81,18 +22,14 @@ struct qed_dev_eth_info { u8 num_tc; u8 port_mac[ETH_ALEN]; - u16 num_vlan_filters; - u16 num_mac_filters; + u8 num_vlan_filters; /* Legacy VF - this affects the datapath, so qede has to know */ bool is_legacy; - - /* Might depend on available resources [in case of VF] */ - bool xdp_supported; }; struct qed_update_vport_rss_params { - void *rss_ind_table[128]; + u16 rss_ind_table[128]; u32 rss_key[10]; u8 rss_caps; }; @@ -111,7 +48,6 @@ struct qed_update_vport_params { struct qed_start_vport_params { bool remove_inner_vlan; - bool handle_ptp_pkts; bool gro_enable; bool drop_ttl0; u8 vport_id; @@ -119,6 +55,18 @@ struct qed_start_vport_params { bool clear_stats; }; +struct qed_stop_rxq_params { + u8 rss_id; + u8 rx_queue_id; + u8 vport_id; + bool eq_completion_only; +}; + +struct qed_stop_txq_params { + u8 rss_id; + u8 tx_queue_id; +}; + enum qed_filter_rx_mode_type { QED_FILTER_RX_MODE_TYPE_REGULAR, QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, @@ -163,6 +111,15 @@ struct qed_filter_params { union qed_filter_type_params filter; }; +struct qed_queue_start_common_params { + u8 rss_id; + u8 queue_id; + u8 vport_id; + u16 sb; + u16 sb_idx; + u16 vf_qid; +}; + struct qed_tunn_params { u16 vxlan_port; u8 update_vxlan_port; @@ -172,28 +129,7 @@ struct qed_tunn_params { struct qed_eth_cb_ops { struct qed_common_cb_ops common; - void (*force_mac) (void *dev, u8 *mac, bool forced); - void (*ports_update)(void *dev, u16 vxlan_port, u16 geneve_port); -}; - -#define QED_MAX_PHC_DRIFT_PPB 291666666 - -enum qed_ptp_filter_type { - QED_PTP_FILTER_NONE, - QED_PTP_FILTER_ALL, - QED_PTP_FILTER_V1_L4_EVENT, - QED_PTP_FILTER_V1_L4_GEN, - QED_PTP_FILTER_V2_L4_EVENT, - QED_PTP_FILTER_V2_L4_GEN, - QED_PTP_FILTER_V2_L2_EVENT, - QED_PTP_FILTER_V2_L2_GEN, - QED_PTP_FILTER_V2_EVENT, - QED_PTP_FILTER_V2_GEN -}; - -enum qed_ptp_hwtstamp_tx_type { - QED_PTP_HWTSTAMP_TX_OFF, - QED_PTP_HWTSTAMP_TX_ON, + void (*force_mac) (void *dev, u8 *mac); }; #ifdef CONFIG_DCB @@ -255,17 +191,6 @@ struct qed_eth_dcbnl_ops { }; #endif -struct qed_eth_ptp_ops { - int (*cfg_filters)(struct qed_dev *, enum qed_ptp_filter_type, - enum qed_ptp_hwtstamp_tx_type); - int (*read_rx_ts)(struct qed_dev *, u64 *); - int (*read_tx_ts)(struct qed_dev *, u64 *); - int (*read_cc)(struct qed_dev *, u64 *); - int (*disable)(struct qed_dev *); - int (*adjfreq)(struct qed_dev *, s32); - int (*enable)(struct qed_dev *); -}; - struct qed_eth_ops { const struct qed_common_ops *common; #ifdef CONFIG_QED_SRIOV @@ -274,7 +199,6 @@ struct qed_eth_ops { #ifdef CONFIG_DCB const struct qed_eth_dcbnl_ops *dcb; #endif - const struct qed_eth_ptp_ops *ptp; int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); @@ -295,24 +219,24 @@ struct qed_eth_ops { struct qed_update_vport_params *params); int (*q_rx_start)(struct qed_dev *cdev, - u8 rss_num, struct qed_queue_start_common_params *params, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, - struct qed_rxq_start_ret_params *ret_params); + void __iomem **pp_prod); - int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); + int (*q_rx_stop)(struct qed_dev *cdev, + struct qed_stop_rxq_params *params); int (*q_tx_start)(struct qed_dev *cdev, - u8 rss_num, struct qed_queue_start_common_params *params, dma_addr_t pbl_addr, u16 pbl_size, - struct qed_txq_start_ret_params *ret_params); + void __iomem **pp_doorbell); - int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); + int (*q_tx_stop)(struct qed_dev *cdev, + struct qed_stop_txq_params *params); int (*filter_config)(struct qed_dev *cdev, struct qed_filter_params *params); @@ -328,15 +252,6 @@ struct qed_eth_ops { int (*tunn_config)(struct qed_dev *cdev, struct qed_tunn_params *params); - - int (*ntuple_filter_config)(struct qed_dev *cdev, - void *cookie, - struct qed_ntuple_filter_params *params); - - int (*configure_arfs_searcher)(struct qed_dev *cdev, - enum qed_filter_config_mode mode); - int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); - int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac); }; const struct qed_eth_ops *qed_get_eth_ops(void); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 850b989916..8978a60371 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1,18 +1,21 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ #ifndef _QED_IF_H #define _QED_IF_H -#include #include #include #include #include #include +#include #include #include #include @@ -21,8 +24,6 @@ #include #include #include -#include -#include enum dcbx_protocol_type { DCBX_PROTOCOL_ISCSI, @@ -35,6 +36,7 @@ enum dcbx_protocol_type { #define QED_ROCE_PROTOCOL_INDEX (3) +#ifdef CONFIG_DCB #define QED_LLDP_CHASSIS_ID_STAT_LEN 4 #define QED_LLDP_PORT_ID_STAT_LEN 4 #define QED_DCBX_MAX_APP_PROTOCOL 32 @@ -120,7 +122,6 @@ struct qed_dcbx_operational_params { bool enabled; bool ieee; bool cee; - bool local; u32 err; }; @@ -131,27 +132,7 @@ struct qed_dcbx_get { struct qed_dcbx_remote_params remote; struct qed_dcbx_admin_params local; }; - -enum qed_nvm_images { - QED_NVM_IMAGE_ISCSI_CFG, - QED_NVM_IMAGE_FCOE_CFG, - QED_NVM_IMAGE_MDUMP, - QED_NVM_IMAGE_NVM_CFG1, - QED_NVM_IMAGE_DEFAULT_CFG, - QED_NVM_IMAGE_NVM_META, -}; - -struct qed_link_eee_params { - u32 tx_lpi_timer; -#define QED_EEE_1G_ADV BIT(0) -#define QED_EEE_10G_ADV BIT(1) - - /* Capabilities are represented using QED_EEE_*_ADV values */ - u8 adv_caps; - u8 lp_adv_caps; - bool enable; - bool tx_lpi_enable; -}; +#endif enum qed_led_mode { QED_LED_MODE_OFF, @@ -159,293 +140,13 @@ enum qed_led_mode { QED_LED_MODE_RESTORE }; -struct qed_mfw_tlv_eth { - u16 lso_maxoff_size; - bool lso_maxoff_size_set; - u16 lso_minseg_size; - bool lso_minseg_size_set; - u8 prom_mode; - bool prom_mode_set; - u16 tx_descr_size; - bool tx_descr_size_set; - u16 rx_descr_size; - bool rx_descr_size_set; - u16 netq_count; - bool netq_count_set; - u32 tcp4_offloads; - bool tcp4_offloads_set; - u32 tcp6_offloads; - bool tcp6_offloads_set; - u16 tx_descr_qdepth; - bool tx_descr_qdepth_set; - u16 rx_descr_qdepth; - bool rx_descr_qdepth_set; - u8 iov_offload; -#define QED_MFW_TLV_IOV_OFFLOAD_NONE (0) -#define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1) -#define QED_MFW_TLV_IOV_OFFLOAD_VEB (2) -#define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3) - bool iov_offload_set; - u8 txqs_empty; - bool txqs_empty_set; - u8 rxqs_empty; - bool rxqs_empty_set; - u8 num_txqs_full; - bool num_txqs_full_set; - u8 num_rxqs_full; - bool num_rxqs_full_set; -}; - -#define QED_MFW_TLV_TIME_SIZE 14 -struct qed_mfw_tlv_time { - bool b_set; - u8 month; - u8 day; - u8 hour; - u8 min; - u16 msec; - u16 usec; -}; - -struct qed_mfw_tlv_fcoe { - u8 scsi_timeout; - bool scsi_timeout_set; - u32 rt_tov; - bool rt_tov_set; - u32 ra_tov; - bool ra_tov_set; - u32 ed_tov; - bool ed_tov_set; - u32 cr_tov; - bool cr_tov_set; - u8 boot_type; - bool boot_type_set; - u8 npiv_state; - bool npiv_state_set; - u32 num_npiv_ids; - bool num_npiv_ids_set; - u8 switch_name[8]; - bool switch_name_set; - u16 switch_portnum; - bool switch_portnum_set; - u8 switch_portid[3]; - bool switch_portid_set; - u8 vendor_name[8]; - bool vendor_name_set; - u8 switch_model[8]; - bool switch_model_set; - u8 switch_fw_version[8]; - bool switch_fw_version_set; - u8 qos_pri; - bool qos_pri_set; - u8 port_alias[3]; - bool port_alias_set; - u8 port_state; -#define QED_MFW_TLV_PORT_STATE_OFFLINE (0) -#define QED_MFW_TLV_PORT_STATE_LOOP (1) -#define QED_MFW_TLV_PORT_STATE_P2P (2) -#define QED_MFW_TLV_PORT_STATE_FABRIC (3) - bool port_state_set; - u16 fip_tx_descr_size; - bool fip_tx_descr_size_set; - u16 fip_rx_descr_size; - bool fip_rx_descr_size_set; - u16 link_failures; - bool link_failures_set; - u8 fcoe_boot_progress; - bool fcoe_boot_progress_set; - u64 rx_bcast; - bool rx_bcast_set; - u64 tx_bcast; - bool tx_bcast_set; - u16 fcoe_txq_depth; - bool fcoe_txq_depth_set; - u16 fcoe_rxq_depth; - bool fcoe_rxq_depth_set; - u64 fcoe_rx_frames; - bool fcoe_rx_frames_set; - u64 fcoe_rx_bytes; - bool fcoe_rx_bytes_set; - u64 fcoe_tx_frames; - bool fcoe_tx_frames_set; - u64 fcoe_tx_bytes; - bool fcoe_tx_bytes_set; - u16 crc_count; - bool crc_count_set; - u32 crc_err_src_fcid[5]; - bool crc_err_src_fcid_set[5]; - struct qed_mfw_tlv_time crc_err[5]; - u16 losync_err; - bool losync_err_set; - u16 losig_err; - bool losig_err_set; - u16 primtive_err; - bool primtive_err_set; - u16 disparity_err; - bool disparity_err_set; - u16 code_violation_err; - bool code_violation_err_set; - u32 flogi_param[4]; - bool flogi_param_set[4]; - struct qed_mfw_tlv_time flogi_tstamp; - u32 flogi_acc_param[4]; - bool flogi_acc_param_set[4]; - struct qed_mfw_tlv_time flogi_acc_tstamp; - u32 flogi_rjt; - bool flogi_rjt_set; - struct qed_mfw_tlv_time flogi_rjt_tstamp; - u32 fdiscs; - bool fdiscs_set; - u8 fdisc_acc; - bool fdisc_acc_set; - u8 fdisc_rjt; - bool fdisc_rjt_set; - u8 plogi; - bool plogi_set; - u8 plogi_acc; - bool plogi_acc_set; - u8 plogi_rjt; - bool plogi_rjt_set; - u32 plogi_dst_fcid[5]; - bool plogi_dst_fcid_set[5]; - struct qed_mfw_tlv_time plogi_tstamp[5]; - u32 plogi_acc_src_fcid[5]; - bool plogi_acc_src_fcid_set[5]; - struct qed_mfw_tlv_time plogi_acc_tstamp[5]; - u8 tx_plogos; - bool tx_plogos_set; - u8 plogo_acc; - bool plogo_acc_set; - u8 plogo_rjt; - bool plogo_rjt_set; - u32 plogo_src_fcid[5]; - bool plogo_src_fcid_set[5]; - struct qed_mfw_tlv_time plogo_tstamp[5]; - u8 rx_logos; - bool rx_logos_set; - u8 tx_accs; - bool tx_accs_set; - u8 tx_prlis; - bool tx_prlis_set; - u8 rx_accs; - bool rx_accs_set; - u8 tx_abts; - bool tx_abts_set; - u8 rx_abts_acc; - bool rx_abts_acc_set; - u8 rx_abts_rjt; - bool rx_abts_rjt_set; - u32 abts_dst_fcid[5]; - bool abts_dst_fcid_set[5]; - struct qed_mfw_tlv_time abts_tstamp[5]; - u8 rx_rscn; - bool rx_rscn_set; - u32 rx_rscn_nport[4]; - bool rx_rscn_nport_set[4]; - u8 tx_lun_rst; - bool tx_lun_rst_set; - u8 abort_task_sets; - bool abort_task_sets_set; - u8 tx_tprlos; - bool tx_tprlos_set; - u8 tx_nos; - bool tx_nos_set; - u8 rx_nos; - bool rx_nos_set; - u8 ols; - bool ols_set; - u8 lr; - bool lr_set; - u8 lrr; - bool lrr_set; - u8 tx_lip; - bool tx_lip_set; - u8 rx_lip; - bool rx_lip_set; - u8 eofa; - bool eofa_set; - u8 eofni; - bool eofni_set; - u8 scsi_chks; - bool scsi_chks_set; - u8 scsi_cond_met; - bool scsi_cond_met_set; - u8 scsi_busy; - bool scsi_busy_set; - u8 scsi_inter; - bool scsi_inter_set; - u8 scsi_inter_cond_met; - bool scsi_inter_cond_met_set; - u8 scsi_rsv_conflicts; - bool scsi_rsv_conflicts_set; - u8 scsi_tsk_full; - bool scsi_tsk_full_set; - u8 scsi_aca_active; - bool scsi_aca_active_set; - u8 scsi_tsk_abort; - bool scsi_tsk_abort_set; - u32 scsi_rx_chk[5]; - bool scsi_rx_chk_set[5]; - struct qed_mfw_tlv_time scsi_chk_tstamp[5]; -}; - -struct qed_mfw_tlv_iscsi { - u8 target_llmnr; - bool target_llmnr_set; - u8 header_digest; - bool header_digest_set; - u8 data_digest; - bool data_digest_set; - u8 auth_method; -#define QED_MFW_TLV_AUTH_METHOD_NONE (1) -#define QED_MFW_TLV_AUTH_METHOD_CHAP (2) -#define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3) - bool auth_method_set; - u16 boot_taget_portal; - bool boot_taget_portal_set; - u16 frame_size; - bool frame_size_set; - u16 tx_desc_size; - bool tx_desc_size_set; - u16 rx_desc_size; - bool rx_desc_size_set; - u8 boot_progress; - bool boot_progress_set; - u16 tx_desc_qdepth; - bool tx_desc_qdepth_set; - u16 rx_desc_qdepth; - bool rx_desc_qdepth_set; - u64 rx_frames; - bool rx_frames_set; - u64 rx_bytes; - bool rx_bytes_set; - u64 tx_frames; - bool tx_frames_set; - u64 tx_bytes; - bool tx_bytes_set; -}; - -enum qed_db_rec_width { - DB_REC_WIDTH_32B, - DB_REC_WIDTH_64B, -}; - -enum qed_db_rec_space { - DB_REC_KERNEL, - DB_REC_USER, -}; - #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) -#define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \ - (void __iomem *)(reg_addr)) - -#define QED_COALESCE_MAX 0x1FF +#define QED_COALESCE_MAX 0xFF #define QED_DEFAULT_RX_USECS 12 -#define QED_DEFAULT_TX_USECS 48 /* forward */ struct qed_dev; @@ -456,58 +157,20 @@ struct qed_eth_pf_params { * to update_pf_params routine invoked before slowpath start */ u16 num_cons; - - /* per-VF number of CIDs */ - u8 num_vf_cons; -#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32) - - /* To enable arfs, previous to HW-init a positive number needs to be - * set [as filters require allocated searcher ILT memory]. - * This will set the maximal number of configured steering-filters. - */ - u32 num_arfs_filters; }; -struct qed_fcoe_pf_params { - /* The following parameters are used during protocol-init */ - u64 glbl_q_params_addr; - u64 bdq_pbl_base_addr[2]; - - /* The following parameters are used during HW-init - * and these parameters need to be passed as arguments - * to update_pf_params routine invoked before slowpath start - */ - u16 num_cons; - u16 num_tasks; - - /* The following parameters are used during protocol-init */ - u16 sq_num_pbl_pages; - - u16 cq_num_entries; - u16 cmdq_num_entries; - u16 rq_buffer_log_size; - u16 mtu; - u16 dummy_icid; - u16 bdq_xoff_threshold[2]; - u16 bdq_xon_threshold[2]; - u16 rq_buffer_size; - u8 num_cqs; /* num of global CQs */ - u8 log_page_size; - u8 gl_rq_pi; - u8 gl_cmd_pi; - u8 debug_mode; - u8 is_target; - u8 bdq_pbl_num_entries[2]; -}; - -/* Most of the parameters below are described in the FW iSCSI / TCP HSI */ +/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ struct qed_iscsi_pf_params { u64 glbl_q_params_addr; - u64 bdq_pbl_base_addr[3]; + u64 bdq_pbl_base_addr[2]; + u32 max_cwnd; u16 cq_num_entries; u16 cmdq_num_entries; - u32 two_msl_timer; + u16 dup_ack_threshold; u16 tx_sws_timer; + u16 min_rto; + u16 min_rto_rt; + u16 max_rto; /* The following parameters are used during HW-init * and these parameters need to be passed as arguments @@ -518,8 +181,8 @@ struct qed_iscsi_pf_params { /* The following parameters are used during protocol-init */ u16 half_way_close_timeout; - u16 bdq_xoff_threshold[3]; - u16 bdq_xon_threshold[3]; + u16 bdq_xoff_threshold[2]; + u16 bdq_xon_threshold[2]; u16 cmdq_xoff_threshold; u16 cmdq_xon_threshold; u16 rq_buffer_size; @@ -535,27 +198,10 @@ struct qed_iscsi_pf_params { u8 gl_cmd_pi; u8 debug_mode; u8 ll2_ooo_queue_id; + u8 ooo_enable; u8 is_target; - u8 is_soc_en; - u8 soc_num_of_blocks_log; - u8 bdq_pbl_num_entries[3]; -}; - -struct qed_nvmetcp_pf_params { - u64 glbl_q_params_addr; - u16 cq_num_entries; - u16 num_cons; - u16 num_tasks; - u8 num_sq_pages_in_ring; - u8 num_r2tq_pages_in_ring; - u8 num_uhq_pages_in_ring; - u8 num_queues; - u8 gl_rq_pi; - u8 gl_cmd_pi; - u8 debug_mode; - u8 ll2_ooo_queue_id; - u16 min_rto; + u8 bdq_pbl_num_entries[2]; }; struct qed_rdma_pf_params { @@ -563,6 +209,7 @@ struct qed_rdma_pf_params { * the doorbell BAR). */ u32 min_dpis; /* number of requested DPIs */ + u32 num_mrs; /* number of requested memory regions */ u32 num_qps; /* number of requested Queue Pairs */ u32 num_srqs; /* number of requested SRQ */ u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */ @@ -574,9 +221,7 @@ struct qed_rdma_pf_params { struct qed_pf_params { struct qed_eth_pf_params eth_pf_params; - struct qed_fcoe_pf_params fcoe_pf_params; struct qed_iscsi_pf_params iscsi_pf_params; - struct qed_nvmetcp_pf_params nvmetcp_pf_params; struct qed_rdma_pf_params rdma_pf_params; }; @@ -588,32 +233,16 @@ enum qed_int_mode { }; struct qed_sb_info { - struct status_block_e4 *sb_virt; - dma_addr_t sb_phys; - u32 sb_ack; /* Last given ack */ - u16 igu_sb_id; - void __iomem *igu_addr; - u8 flags; -#define QED_SB_INFO_INIT 0x1 -#define QED_SB_INFO_SETUP 0x2 + struct status_block *sb_virt; + dma_addr_t sb_phys; + u32 sb_ack; /* Last given ack */ + u16 igu_sb_id; + void __iomem *igu_addr; + u8 flags; +#define QED_SB_INFO_INIT 0x1 +#define QED_SB_INFO_SETUP 0x2 - struct qed_dev *cdev; -}; - -enum qed_hw_err_type { - QED_HW_ERR_FAN_FAIL, - QED_HW_ERR_MFW_RESP_FAIL, - QED_HW_ERR_HW_ATTN, - QED_HW_ERR_DMAE_FAIL, - QED_HW_ERR_RAMROD_FAIL, - QED_HW_ERR_FW_ASSERT, - QED_HW_ERR_LAST, -}; - -enum qed_dev_type { - QED_DEV_TYPE_BB, - QED_DEV_TYPE_AH, - QED_DEV_TYPE_E5, + struct qed_dev *cdev; }; struct qed_dev_info { @@ -623,6 +252,7 @@ struct qed_dev_info { u8 num_hwfns; u8 hw_mac[ETH_ALEN]; + bool is_mf_default; /* FW version */ u16 fw_major; @@ -632,123 +262,75 @@ struct qed_dev_info { /* MFW version */ u32 mfw_rev; -#define QED_MFW_VERSION_0_MASK 0x000000FF -#define QED_MFW_VERSION_0_OFFSET 0 -#define QED_MFW_VERSION_1_MASK 0x0000FF00 -#define QED_MFW_VERSION_1_OFFSET 8 -#define QED_MFW_VERSION_2_MASK 0x00FF0000 -#define QED_MFW_VERSION_2_OFFSET 16 -#define QED_MFW_VERSION_3_MASK 0xFF000000 -#define QED_MFW_VERSION_3_OFFSET 24 u32 flash_size; - bool b_arfs_capable; - bool b_inter_pf_switch; + u8 mf_mode; bool tx_switching; bool rdma_supported; - u16 mtu; - - bool wol_support; - bool smart_an; - - /* MBI version */ - u32 mbi_version; -#define QED_MBI_VERSION_0_MASK 0x000000FF -#define QED_MBI_VERSION_0_OFFSET 0 -#define QED_MBI_VERSION_1_MASK 0x0000FF00 -#define QED_MBI_VERSION_1_OFFSET 8 -#define QED_MBI_VERSION_2_MASK 0x00FF0000 -#define QED_MBI_VERSION_2_OFFSET 16 - - enum qed_dev_type dev_type; - - /* Output parameters for qede */ - bool vxlan_enable; - bool gre_enable; - bool geneve_enable; - - u8 abs_pf_id; }; enum qed_sb_type { QED_SB_TYPE_L2_QUEUE, QED_SB_TYPE_CNQ, - QED_SB_TYPE_STORAGE, }; enum qed_protocol { QED_PROTOCOL_ETH, QED_PROTOCOL_ISCSI, - QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI, - QED_PROTOCOL_FCOE, }; -enum qed_fec_mode { - QED_FEC_MODE_NONE = BIT(0), - QED_FEC_MODE_FIRECODE = BIT(1), - QED_FEC_MODE_RS = BIT(2), - QED_FEC_MODE_AUTO = BIT(3), - QED_FEC_MODE_UNSUPPORTED = BIT(4), +enum qed_link_mode_bits { + QED_LM_FIBRE_BIT = BIT(0), + QED_LM_Autoneg_BIT = BIT(1), + QED_LM_Asym_Pause_BIT = BIT(2), + QED_LM_Pause_BIT = BIT(3), + QED_LM_1000baseT_Half_BIT = BIT(4), + QED_LM_1000baseT_Full_BIT = BIT(5), + QED_LM_10000baseKR_Full_BIT = BIT(6), + QED_LM_25000baseKR_Full_BIT = BIT(7), + QED_LM_40000baseLR4_Full_BIT = BIT(8), + QED_LM_50000baseKR2_Full_BIT = BIT(9), + QED_LM_100000baseKR4_Full_BIT = BIT(10), + QED_LM_COUNT = 11 }; struct qed_link_params { - bool link_up; + bool link_up; - u32 override_flags; -#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) -#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) -#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) -#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) -#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) -#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) -#define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6) - - bool autoneg; - __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds); - u32 forced_speed; - - u32 pause_config; -#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) -#define QED_LINK_PAUSE_RX_ENABLE BIT(1) -#define QED_LINK_PAUSE_TX_ENABLE BIT(2) - - u32 loopback_mode; -#define QED_LINK_LOOPBACK_NONE BIT(0) -#define QED_LINK_LOOPBACK_INT_PHY BIT(1) -#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) -#define QED_LINK_LOOPBACK_EXT BIT(3) -#define QED_LINK_LOOPBACK_MAC BIT(4) -#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5) -#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6) -#define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7) -#define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8) -#define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9) - - struct qed_link_eee_params eee; - u32 fec; +#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) +#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) +#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) +#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) +#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) + u32 override_flags; + bool autoneg; + u32 adv_speeds; + u32 forced_speed; +#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) +#define QED_LINK_PAUSE_RX_ENABLE BIT(1) +#define QED_LINK_PAUSE_TX_ENABLE BIT(2) + u32 pause_config; +#define QED_LINK_LOOPBACK_NONE BIT(0) +#define QED_LINK_LOOPBACK_INT_PHY BIT(1) +#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) +#define QED_LINK_LOOPBACK_EXT BIT(3) +#define QED_LINK_LOOPBACK_MAC BIT(4) + u32 loopback_mode; }; struct qed_link_output { - bool link_up; + bool link_up; - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps); - __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps); - __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps); + /* In QED_LM_* defs */ + u32 supported_caps; + u32 advertised_caps; + u32 lp_caps; - u32 speed; /* In Mb/s */ - u8 duplex; /* In DUPLEX defs */ - u8 port; /* In PORT defs */ - bool autoneg; - u32 pause_config; - - /* EEE - capability & param */ - bool eee_supported; - bool eee_active; - u8 sup_caps; - struct qed_link_eee_params eee; - - u32 sup_fec; - u32 active_fec; + u32 speed; /* In Mb/s */ + u8 duplex; /* In DUPLEX defs */ + u8 port; /* In PORT defs */ + bool autoneg; + u32 pause_config; }; struct qed_probe_params { @@ -756,7 +338,6 @@ struct qed_probe_params { u32 dp_module; u8 dp_level; bool is_vf; - bool recov_in_prog; }; #define QED_DRV_VER_STR_SIZE 12 @@ -779,42 +360,9 @@ struct qed_int_info { u8 used_cnt; }; -struct qed_generic_tlvs { -#define QED_TLV_IP_CSUM BIT(0) -#define QED_TLV_LSO BIT(1) - u16 feat_flags; -#define QED_TLV_MAC_COUNT 3 - u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN]; -}; - -#define QED_I2C_DEV_ADDR_A0 0xA0 -#define QED_I2C_DEV_ADDR_A2 0xA2 - -#define QED_NVM_SIGNATURE 0x12435687 - -enum qed_nvm_flash_cmd { - QED_NVM_FLASH_CMD_FILE_DATA = 0x2, - QED_NVM_FLASH_CMD_FILE_START = 0x3, - QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4, - QED_NVM_FLASH_CMD_NVM_CFG_ID = 0x5, - QED_NVM_FLASH_CMD_NVM_MAX, -}; - -struct qed_devlink { - struct qed_dev *cdev; - struct devlink_health_reporter *fw_reporter; -}; - struct qed_common_cb_ops { - void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); - void (*link_update)(void *dev, struct qed_link_output *link); - void (*schedule_recovery_handler)(void *dev); - void (*schedule_hw_err_handler)(void *dev, - enum qed_hw_err_type err_type); - void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); - void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data); - void (*get_protocol_tlv_data)(void *dev, void *data); - void (*bw_update)(void *dev); + void (*link_update)(void *dev, + struct qed_link_output *link); }; struct qed_selftest_ops { @@ -853,15 +401,6 @@ struct qed_selftest_ops { * @return 0 on success, error otherwise. */ int (*selftest_clock)(struct qed_dev *cdev); - -/** - * @brief selftest_nvram - Perform nvram test - * - * @param cdev - * - * @return 0 on success, error otherwise. - */ - int (*selftest_nvram) (struct qed_dev *cdev); }; struct qed_common_ops { @@ -870,61 +409,58 @@ struct qed_common_ops { struct qed_dev* (*probe)(struct pci_dev *dev, struct qed_probe_params *params); - void (*remove)(struct qed_dev *cdev); + void (*remove)(struct qed_dev *cdev); - int (*set_power_state)(struct qed_dev *cdev, pci_power_t state); + int (*set_power_state)(struct qed_dev *cdev, + pci_power_t state); - void (*set_name) (struct qed_dev *cdev, char name[]); + void (*set_id)(struct qed_dev *cdev, + char name[], + char ver_str[]); /* Client drivers need to make this call before slowpath_start. * PF params required for the call before slowpath_start is * documented within the qed_pf_params structure definition. */ - void (*update_pf_params)(struct qed_dev *cdev, - struct qed_pf_params *params); + void (*update_pf_params)(struct qed_dev *cdev, + struct qed_pf_params *params); + int (*slowpath_start)(struct qed_dev *cdev, + struct qed_slowpath_params *params); - int (*slowpath_start)(struct qed_dev *cdev, - struct qed_slowpath_params *params); - - int (*slowpath_stop)(struct qed_dev *cdev); + int (*slowpath_stop)(struct qed_dev *cdev); /* Requests to use `cnt' interrupts for fastpath. * upon success, returns number of interrupts allocated for fastpath. */ - int (*set_fp_int)(struct qed_dev *cdev, u16 cnt); + int (*set_fp_int)(struct qed_dev *cdev, + u16 cnt); /* Fills `info' with pointers required for utilizing interrupts */ - int (*get_fp_int)(struct qed_dev *cdev, struct qed_int_info *info); + int (*get_fp_int)(struct qed_dev *cdev, + struct qed_int_info *info); - u32 (*sb_init)(struct qed_dev *cdev, - struct qed_sb_info *sb_info, - void *sb_virt_addr, - dma_addr_t sb_phy_addr, - u16 sb_id, - enum qed_sb_type type); + u32 (*sb_init)(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, + u16 sb_id, + enum qed_sb_type type); - u32 (*sb_release)(struct qed_dev *cdev, - struct qed_sb_info *sb_info, - u16 sb_id, - enum qed_sb_type type); + u32 (*sb_release)(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + u16 sb_id); - void (*simd_handler_config)(struct qed_dev *cdev, - void *token, - int index, - void (*handler)(void *)); + void (*simd_handler_config)(struct qed_dev *cdev, + void *token, + int index, + void (*handler)(void *)); - void (*simd_handler_clean)(struct qed_dev *cdev, int index); + void (*simd_handler_clean)(struct qed_dev *cdev, + int index); - int (*dbg_grc)(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); + int (*dbg_all_data) (struct qed_dev *cdev, void *buffer); - int (*dbg_grc_size)(struct qed_dev *cdev); - - int (*dbg_all_data)(struct qed_dev *cdev, void *buffer); - - int (*dbg_all_data_size)(struct qed_dev *cdev); - - int (*report_fatal_error)(struct devlink *devlink, - enum qed_hw_err_type err_type); + int (*dbg_all_data_size) (struct qed_dev *cdev); /** * @brief can_link_change - can the instance change the link or not @@ -974,34 +510,25 @@ struct qed_common_ops { u8 dp_level); int (*chain_alloc)(struct qed_dev *cdev, - struct qed_chain *chain, - struct qed_chain_init_params *params); + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + enum qed_chain_cnt_type cnt_type, + u32 num_elems, + size_t elem_size, + struct qed_chain *p_chain); void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain); /** - * @brief nvm_flash - Flash nvm data. + * @brief get_coalesce - Get coalesce parameters in usec * * @param cdev - * @param name - file containing the data + * @param rx_coal - Rx coalesce value in usec + * @param tx_coal - Tx coalesce value in usec * - * @return 0 on success, error otherwise. */ - int (*nvm_flash)(struct qed_dev *cdev, const char *name); - -/** - * @brief nvm_get_image - reads an entire image from nvram - * - * @param cdev - * @param type - type of the request nvram image - * @param buf - preallocated buffer to fill with the image - * @param len - length of the allocated buffer - * - * @return 0 on success, error otherwise - */ - int (*nvm_get_image)(struct qed_dev *cdev, - enum qed_nvm_images type, u8 *buf, u16 len); + void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal); /** * @brief set_coalesce - Configure Rx coalesce value in usec @@ -1014,8 +541,8 @@ struct qed_common_ops { * * @return 0 on success, error otherwise. */ - int (*set_coalesce)(struct qed_dev *cdev, - u16 rx_coal, u16 tx_coal, void *handle); + int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, + u8 qid, u16 sb_id); /** * @brief set_led - Configure LED mode @@ -1027,146 +554,6 @@ struct qed_common_ops { */ int (*set_led)(struct qed_dev *cdev, enum qed_led_mode mode); - -/** - * @brief attn_clr_enable - Prevent attentions from being reasserted - * - * @param cdev - * @param clr_enable - */ - void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable); - -/** - * @brief db_recovery_add - add doorbell information to the doorbell - * recovery mechanism. - * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address of where db_data is stored - * @param db_is_32b - doorbell is 32b pr 64b - * @param db_is_user - doorbell recovery addresses are user or kernel space - */ - int (*db_recovery_add)(struct qed_dev *cdev, - void __iomem *db_addr, - void *db_data, - enum qed_db_rec_width db_width, - enum qed_db_rec_space db_space); - -/** - * @brief db_recovery_del - remove doorbell information from the doorbell - * recovery mechanism. db_data serves as key (db_addr is not unique). - * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address where db_data is stored. Serves as key for the - * entry to delete. - */ - int (*db_recovery_del)(struct qed_dev *cdev, - void __iomem *db_addr, void *db_data); - -/** - * @brief recovery_process - Trigger a recovery process - * - * @param cdev - * - * @return 0 on success, error otherwise. - */ - int (*recovery_process)(struct qed_dev *cdev); - -/** - * @brief recovery_prolog - Execute the prolog operations of a recovery process - * - * @param cdev - * - * @return 0 on success, error otherwise. - */ - int (*recovery_prolog)(struct qed_dev *cdev); - -/** - * @brief update_drv_state - API to inform the change in the driver state. - * - * @param cdev - * @param active - * - */ - int (*update_drv_state)(struct qed_dev *cdev, bool active); - -/** - * @brief update_mac - API to inform the change in the mac address - * - * @param cdev - * @param mac - * - */ - int (*update_mac)(struct qed_dev *cdev, u8 *mac); - -/** - * @brief update_mtu - API to inform the change in the mtu - * - * @param cdev - * @param mtu - * - */ - int (*update_mtu)(struct qed_dev *cdev, u16 mtu); - -/** - * @brief update_wol - update of changes in the WoL configuration - * - * @param cdev - * @param enabled - true iff WoL should be enabled. - */ - int (*update_wol) (struct qed_dev *cdev, bool enabled); - -/** - * @brief read_module_eeprom - * - * @param cdev - * @param buf - buffer - * @param dev_addr - PHY device memory region - * @param offset - offset into eeprom contents to be read - * @param len - buffer length, i.e., max bytes to be read - */ - int (*read_module_eeprom)(struct qed_dev *cdev, - char *buf, u8 dev_addr, u32 offset, u32 len); - -/** - * @brief get_affin_hwfn_idx - * - * @param cdev - */ - u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev); - -/** - * @brief read_nvm_cfg - Read NVM config attribute value. - * @param cdev - * @param buf - buffer - * @param cmd - NVM CFG command id - * @param entity_id - Entity id - * - */ - int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd, - u32 entity_id); -/** - * @brief read_nvm_cfg - Read NVM config attribute value. - * @param cdev - * @param cmd - NVM CFG command id - * - * @return config id length, 0 on error. - */ - int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd); - -/** - * @brief set_grc_config - Configure value for grc config id. - * @param cdev - * @param cfg_id - grc config id - * @param val - grc config value - * - */ - int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val); - - struct devlink* (*devlink_register)(struct qed_dev *cdev); - - void (*devlink_unregister)(struct devlink *devlink); }; #define MASK_FIELD(_name, _value) \ @@ -1184,25 +571,12 @@ struct qed_common_ops { #define GET_FIELD(value, name) \ (((value) >> (name ## _SHIFT)) & name ## _MASK) -#define GET_MFW_FIELD(name, field) \ - (((name) & (field ## _MASK)) >> (field ## _OFFSET)) - -#define SET_MFW_FIELD(name, field, value) \ - do { \ - (name) &= ~(field ## _MASK); \ - (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\ - } while (0) - -#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) - /* Debug print definitions */ -#define DP_ERR(cdev, fmt, ...) \ - do { \ - pr_err("[%s:%d(%s)]" fmt, \ - __func__, __LINE__, \ - DP_NAME(cdev) ? DP_NAME(cdev) : "", \ - ## __VA_ARGS__); \ - } while (0) +#define DP_ERR(cdev, fmt, ...) \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__) \ #define DP_NOTICE(cdev, fmt, ...) \ do { \ @@ -1269,7 +643,7 @@ enum qed_mf_mode { QED_MF_NPAR, }; -struct qed_eth_stats_common { +struct qed_eth_stats { u64 no_buff_discards; u64 packet_too_big_discard; u64 ttl0_discard; @@ -1281,7 +655,6 @@ struct qed_eth_stats_common { u64 rx_bcast_pkts; u64 mftag_filter_discards; u64 mac_filter_discards; - u64 gft_filter_drop; u64 tx_ucast_bytes; u64 tx_mcast_bytes; u64 tx_bcast_bytes; @@ -1302,6 +675,11 @@ struct qed_eth_stats_common { u64 rx_256_to_511_byte_packets; u64 rx_512_to_1023_byte_packets; u64 rx_1024_to_1518_byte_packets; + u64 rx_1519_to_1522_byte_packets; + u64 rx_1519_to_2047_byte_packets; + u64 rx_2048_to_4095_byte_packets; + u64 rx_4096_to_9216_byte_packets; + u64 rx_9217_to_16383_byte_packets; u64 rx_crc_errors; u64 rx_mac_crtl_frames; u64 rx_pause_frames; @@ -1318,8 +696,14 @@ struct qed_eth_stats_common { u64 tx_256_to_511_byte_packets; u64 tx_512_to_1023_byte_packets; u64 tx_1024_to_1518_byte_packets; + u64 tx_1519_to_2047_byte_packets; + u64 tx_2048_to_4095_byte_packets; + u64 tx_4096_to_9216_byte_packets; + u64 tx_9217_to_16383_byte_packets; u64 tx_pause_frames; u64 tx_pfc_frames; + u64 tx_lpi_entry_count; + u64 tx_total_collisions; u64 brb_truncates; u64 brb_discards; u64 rx_mac_bytes; @@ -1332,35 +716,6 @@ struct qed_eth_stats_common { u64 tx_mac_mc_packets; u64 tx_mac_bc_packets; u64 tx_mac_ctrl_frames; - u64 link_change_count; -}; - -struct qed_eth_stats_bb { - u64 rx_1519_to_1522_byte_packets; - u64 rx_1519_to_2047_byte_packets; - u64 rx_2048_to_4095_byte_packets; - u64 rx_4096_to_9216_byte_packets; - u64 rx_9217_to_16383_byte_packets; - u64 tx_1519_to_2047_byte_packets; - u64 tx_2048_to_4095_byte_packets; - u64 tx_4096_to_9216_byte_packets; - u64 tx_9217_to_16383_byte_packets; - u64 tx_lpi_entry_count; - u64 tx_total_collisions; -}; - -struct qed_eth_stats_ah { - u64 rx_1519_to_max_byte_packets; - u64 tx_1519_to_max_byte_packets; -}; - -struct qed_eth_stats { - struct qed_eth_stats_common common; - - union { - struct qed_eth_stats_bb bb; - struct qed_eth_stats_ah ah; - }; }; #define QED_SB_IDX 0x0002 @@ -1369,15 +724,9 @@ struct qed_eth_stats { #define TX_PI(tc) (RX_PI + 1 + tc) struct qed_sb_cnt_info { - /* Original, current, and free SBs for PF */ - int orig; - int cnt; - int free_cnt; - - /* Original, current and free SBS for child VFs */ - int iov_orig; - int iov_cnt; - int free_cnt_iov; + int sb_cnt; + int sb_iov_cnt; + int sb_free_blk; }; static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) @@ -1386,13 +735,14 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) u16 rc = 0; prod = le32_to_cpu(sb_info->sb_virt->prod_index) & - STATUS_BLOCK_E4_PROD_INDEX_MASK; + STATUS_BLOCK_PROD_INDEX_MASK; if (sb_info->sb_ack != prod) { sb_info->sb_ack = prod; rc |= QED_SB_IDX; } /* Let SB update */ + mmiowb(); return rc; } @@ -1414,19 +764,21 @@ static inline void qed_sb_ack(struct qed_sb_info *sb_info, enum igu_int_cmd int_cmd, u8 upd_flg) { - u32 igu_ack; + struct igu_prod_cons_update igu_ack = { 0 }; - igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | - (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | - (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | - (IGU_SEG_ACCESS_REG << - IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + igu_ack.sb_id_and_flags = + ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_REG << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); - DIRECT_REG_WR(sb_info->igu_addr, igu_ack); + DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags); /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. */ + mmiowb(); barrier(); } diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index 8e31a28e51..5a4f8d0899 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -1,7 +1,9 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ #ifndef _QED_IOV_IF_H @@ -27,8 +29,6 @@ struct qed_iov_hv_ops { int (*set_rate) (struct qed_dev *cdev, int vfid, u32 min_rate, u32 max_rate); - - int (*set_trust) (struct qed_dev *cdev, int vfid, bool trust); }; #endif diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index ff808d2488..fd75c265db 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ #ifndef _QED_LL2_IF_H @@ -12,47 +15,11 @@ #include #include #include +#include #include #include #include -enum qed_ll2_conn_type { - QED_LL2_TYPE_FCOE, - QED_LL2_TYPE_TCP_ULP, - QED_LL2_TYPE_TEST, - QED_LL2_TYPE_OOO, - QED_LL2_TYPE_RESERVED2, - QED_LL2_TYPE_ROCE, - QED_LL2_TYPE_IWARP, - QED_LL2_TYPE_RESERVED3, - MAX_QED_LL2_CONN_TYPE -}; - -enum qed_ll2_rx_conn_type { - QED_LL2_RX_TYPE_LEGACY, - QED_LL2_RX_TYPE_CTX, - MAX_QED_LL2_RX_CONN_TYPE -}; - -enum qed_ll2_roce_flavor_type { - QED_LL2_ROCE, - QED_LL2_RROCE, - MAX_QED_LL2_ROCE_FLAVOR_TYPE -}; - -enum qed_ll2_tx_dest { - QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ - QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ - QED_LL2_TX_DEST_DROP, /* Light L2 Drop the TX packet */ - QED_LL2_TX_DEST_MAX -}; - -enum qed_ll2_error_handle { - QED_LL2_DROP_PACKET, - QED_LL2_DO_NOTHING, - QED_LL2_ASSERT, -}; - struct qed_ll2_stats { u64 gsi_invalid_hdr; u64 gsi_invalid_pkt_length; @@ -77,114 +44,6 @@ struct qed_ll2_stats { u64 sent_bcast_pkts; }; -struct qed_ll2_comp_rx_data { - void *cookie; - dma_addr_t rx_buf_addr; - u16 parse_flags; - u16 err_flags; - u16 vlan; - bool b_last_packet; - u8 connection_handle; - - union { - u16 packet_length; - u16 data_length; - } length; - - u32 opaque_data_0; - u32 opaque_data_1; - - /* GSI only */ - u32 src_qp; - u16 qp_id; - - union { - u8 placement_offset; - u8 data_length_error; - } u; -}; - -typedef -void (*qed_ll2_complete_rx_packet_cb)(void *cxt, - struct qed_ll2_comp_rx_data *data); - -typedef -void (*qed_ll2_release_rx_packet_cb)(void *cxt, - u8 connection_handle, - void *cookie, - dma_addr_t rx_buf_addr, - bool b_last_packet); - -typedef -void (*qed_ll2_complete_tx_packet_cb)(void *cxt, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, - bool b_last_packet); - -typedef -void (*qed_ll2_release_tx_packet_cb)(void *cxt, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, bool b_last_packet); - -typedef -void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle, - u32 opaque_data_0, u32 opaque_data_1); - -struct qed_ll2_cbs { - qed_ll2_complete_rx_packet_cb rx_comp_cb; - qed_ll2_release_rx_packet_cb rx_release_cb; - qed_ll2_complete_tx_packet_cb tx_comp_cb; - qed_ll2_release_tx_packet_cb tx_release_cb; - qed_ll2_slowpath_cb slowpath_cb; - void *cookie; -}; - -struct qed_ll2_acquire_data_inputs { - enum qed_ll2_rx_conn_type rx_conn_type; - enum qed_ll2_conn_type conn_type; - u16 mtu; - u16 rx_num_desc; - u16 rx_num_ooo_buffers; - u8 rx_drop_ttl0_flg; - u8 rx_vlan_removal_en; - u16 tx_num_desc; - u8 tx_max_bds_per_packet; - u8 tx_tc; - enum qed_ll2_tx_dest tx_dest; - enum qed_ll2_error_handle ai_err_packet_too_big; - enum qed_ll2_error_handle ai_err_no_buf; - bool secondary_queue; - u8 gsi_enable; -}; - -struct qed_ll2_acquire_data { - struct qed_ll2_acquire_data_inputs input; - const struct qed_ll2_cbs *cbs; - - /* Output container for LL2 connection's handle */ - u8 *p_connection_handle; -}; - -struct qed_ll2_tx_pkt_info { - void *cookie; - dma_addr_t first_frag; - enum qed_ll2_tx_dest tx_dest; - enum qed_ll2_roce_flavor_type qed_roce_flavor; - u16 vlan; - u16 l4_hdr_offset_w; /* from start of packet */ - u16 first_frag_len; - u8 num_of_bds; - u8 bd_flags; - bool enable_ip_cksum; - bool enable_l4_cksum; - bool calc_ip_len; - bool remove_stag; -}; - #define QED_LL2_UNUSED_HANDLE (0xff) struct qed_ll2_cb_ops { @@ -201,11 +60,6 @@ struct qed_ll2_params { u8 ll2_mac_address[ETH_ALEN]; }; -enum qed_ll2_xmit_flags { - /* FIP discovery packet */ - QED_LL2_XMIT_FLAGS_FIP_DISCOVERY -}; - struct qed_ll2_ops { /** * @brief start - initializes ll2 @@ -231,12 +85,10 @@ struct qed_ll2_ops { * * @param cdev * @param skb - * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags. * * @return 0 on success, otherwise error value. */ - int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb, - unsigned long xmit_flags); + int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb); /** * @brief register_cb_ops - protocol driver register the callback for Rx/Tx diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h new file mode 100644 index 0000000000..53047d3fa6 --- /dev/null +++ b/include/linux/qed/qed_roce_if.h @@ -0,0 +1,604 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _QED_ROCE_IF_H +#define _QED_ROCE_IF_H +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum qed_roce_ll2_tx_dest { + /* Light L2 TX Destination to the Network */ + QED_ROCE_LL2_TX_DEST_NW, + + /* Light L2 TX Destination to the Loopback */ + QED_ROCE_LL2_TX_DEST_LB, + QED_ROCE_LL2_TX_DEST_MAX +}; + +#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) + +/* rdma interface */ + +enum qed_roce_qp_state { + QED_ROCE_QP_STATE_RESET, + QED_ROCE_QP_STATE_INIT, + QED_ROCE_QP_STATE_RTR, + QED_ROCE_QP_STATE_RTS, + QED_ROCE_QP_STATE_SQD, + QED_ROCE_QP_STATE_ERR, + QED_ROCE_QP_STATE_SQE +}; + +enum qed_rdma_tid_type { + QED_RDMA_TID_REGISTERED_MR, + QED_RDMA_TID_FMR, + QED_RDMA_TID_MW_TYPE1, + QED_RDMA_TID_MW_TYPE2A +}; + +struct qed_rdma_events { + void *context; + void (*affiliated_event)(void *context, u8 fw_event_code, + void *fw_handle); + void (*unaffiliated_event)(void *context, u8 event_code); +}; + +struct qed_rdma_device { + u32 vendor_id; + u32 vendor_part_id; + u32 hw_ver; + u64 fw_ver; + + u64 node_guid; + u64 sys_image_guid; + + u8 max_cnq; + u8 max_sge; + u8 max_srq_sge; + u16 max_inline; + u32 max_wqe; + u32 max_srq_wqe; + u8 max_qp_resp_rd_atomic_resc; + u8 max_qp_req_rd_atomic_resc; + u64 max_dev_resp_rd_atomic_resc; + u32 max_cq; + u32 max_qp; + u32 max_srq; + u32 max_mr; + u64 max_mr_size; + u32 max_cqe; + u32 max_mw; + u32 max_fmr; + u32 max_mr_mw_fmr_pbl; + u64 max_mr_mw_fmr_size; + u32 max_pd; + u32 max_ah; + u8 max_pkey; + u16 max_srq_wr; + u8 max_stats_queues; + u32 dev_caps; + + /* Abilty to support RNR-NAK generation */ + +#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1 +#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0 + /* Abilty to support shutdown port */ +#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1 +#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1 + /* Abilty to support port active event */ +#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1 +#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2 + /* Abilty to support port change event */ +#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1 +#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3 + /* Abilty to support system image GUID */ +#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1 +#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4 + /* Abilty to support bad P_Key counter support */ +#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1 +#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5 + /* Abilty to support atomic operations */ +#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1 +#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6 +#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1 +#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7 + /* Abilty to support modifying the maximum number of + * outstanding work requests per QP + */ +#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1 +#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8 + /* Abilty to support automatic path migration */ +#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1 +#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9 + /* Abilty to support the base memory management extensions */ +#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1 +#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10 +#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1 +#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11 + /* Abilty to support multipile page sizes per memory region */ +#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1 +#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12 + /* Abilty to support block list physical buffer list */ +#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1 +#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13 + /* Abilty to support zero based virtual addresses */ +#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1 +#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14 + /* Abilty to support local invalidate fencing */ +#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1 +#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15 + /* Abilty to support Loopback on QP */ +#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1 +#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16 + u64 page_size_caps; + u8 dev_ack_delay; + u32 reserved_lkey; + u32 bad_pkey_counter; + struct qed_rdma_events events; +}; + +enum qed_port_state { + QED_RDMA_PORT_UP, + QED_RDMA_PORT_DOWN, +}; + +enum qed_roce_capability { + QED_ROCE_V1 = 1 << 0, + QED_ROCE_V2 = 1 << 1, +}; + +struct qed_rdma_port { + enum qed_port_state port_state; + int link_speed; + u64 max_msg_size; + u8 source_gid_table_len; + void *source_gid_table_ptr; + u8 pkey_table_len; + void *pkey_table_ptr; + u32 pkey_bad_counter; + enum qed_roce_capability capability; +}; + +struct qed_rdma_cnq_params { + u8 num_pbl_pages; + u64 pbl_ptr; +}; + +/* The CQ Mode affects the CQ doorbell transaction size. + * 64/32 bit machines should configure to 32/16 bits respectively. + */ +enum qed_rdma_cq_mode { + QED_RDMA_CQ_MODE_16_BITS, + QED_RDMA_CQ_MODE_32_BITS, +}; + +struct qed_roce_dcqcn_params { + u8 notification_point; + u8 reaction_point; + + /* fields for notification point */ + u32 cnp_send_timeout; + + /* fields for reaction point */ + u32 rl_bc_rate; + u16 rl_max_rate; + u16 rl_r_ai; + u16 rl_r_hai; + u16 dcqcn_g; + u32 dcqcn_k_us; + u32 dcqcn_timeout_us; +}; + +struct qed_rdma_start_in_params { + struct qed_rdma_events *events; + struct qed_rdma_cnq_params cnq_pbl_list[128]; + u8 desired_cnq; + enum qed_rdma_cq_mode cq_mode; + struct qed_roce_dcqcn_params dcqcn_params; + u16 max_mtu; + u8 mac_addr[ETH_ALEN]; + u8 iwarp_flags; +}; + +struct qed_rdma_add_user_out_params { + u16 dpi; + u64 dpi_addr; + u64 dpi_phys_addr; + u32 dpi_size; +}; + +enum roce_mode { + ROCE_V1, + ROCE_V2_IPV4, + ROCE_V2_IPV6, + MAX_ROCE_MODE +}; + +union qed_gid { + u8 bytes[16]; + u16 words[8]; + u32 dwords[4]; + u64 qwords[2]; + u32 ipv4_addr; +}; + +struct qed_rdma_register_tid_in_params { + u32 itid; + enum qed_rdma_tid_type tid_type; + u8 key; + u16 pd; + bool local_read; + bool local_write; + bool remote_read; + bool remote_write; + bool remote_atomic; + bool mw_bind; + u64 pbl_ptr; + bool pbl_two_level; + u8 pbl_page_size_log; + u8 page_size_log; + u32 fbo; + u64 length; + u64 vaddr; + bool zbva; + bool phy_mr; + bool dma_mr; + + bool dif_enabled; + u64 dif_error_addr; + u64 dif_runt_addr; +}; + +struct qed_rdma_create_cq_in_params { + u32 cq_handle_lo; + u32 cq_handle_hi; + u32 cq_size; + u16 dpi; + bool pbl_two_level; + u64 pbl_ptr; + u16 pbl_num_pages; + u8 pbl_page_size_log; + u8 cnq_id; + u16 int_timeout; +}; + +struct qed_rdma_create_srq_in_params { + u64 pbl_base_addr; + u64 prod_pair_addr; + u16 num_pages; + u16 pd_id; + u16 page_size; +}; + +struct qed_rdma_destroy_cq_in_params { + u16 icid; +}; + +struct qed_rdma_destroy_cq_out_params { + u16 num_cq_notif; +}; + +struct qed_rdma_create_qp_in_params { + u32 qp_handle_lo; + u32 qp_handle_hi; + u32 qp_handle_async_lo; + u32 qp_handle_async_hi; + bool use_srq; + bool signal_all; + bool fmr_and_reserved_lkey; + u16 pd; + u16 dpi; + u16 sq_cq_id; + u16 sq_num_pages; + u64 sq_pbl_ptr; + u8 max_sq_sges; + u16 rq_cq_id; + u16 rq_num_pages; + u64 rq_pbl_ptr; + u16 srq_id; + u8 stats_queue; +}; + +struct qed_rdma_create_qp_out_params { + u32 qp_id; + u16 icid; + void *rq_pbl_virt; + dma_addr_t rq_pbl_phys; + void *sq_pbl_virt; + dma_addr_t sq_pbl_phys; +}; + +struct qed_rdma_modify_qp_in_params { + u32 modify_flags; +#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0 +#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1 +#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2 +#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3 +#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4 +#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5 +#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8 +#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9 +#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10 +#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11 +#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12 +#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13 +#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14 + + enum qed_roce_qp_state new_state; + u16 pkey; + bool incoming_rdma_read_en; + bool incoming_rdma_write_en; + bool incoming_atomic_en; + bool e2e_flow_control_en; + u32 dest_qp; + bool lb_indication; + u16 mtu; + u8 traffic_class_tos; + u8 hop_limit_ttl; + u32 flow_label; + union qed_gid sgid; + union qed_gid dgid; + u16 udp_src_port; + + u16 vlan_id; + + u32 rq_psn; + u32 sq_psn; + u8 max_rd_atomic_resp; + u8 max_rd_atomic_req; + u32 ack_timeout; + u8 retry_cnt; + u8 rnr_retry_cnt; + u8 min_rnr_nak_timer; + bool sqd_async; + u8 remote_mac_addr[6]; + u8 local_mac_addr[6]; + bool use_local_mac; + enum roce_mode roce_mode; +}; + +struct qed_rdma_query_qp_out_params { + enum qed_roce_qp_state state; + u32 rq_psn; + u32 sq_psn; + bool draining; + u16 mtu; + u32 dest_qp; + bool incoming_rdma_read_en; + bool incoming_rdma_write_en; + bool incoming_atomic_en; + bool e2e_flow_control_en; + union qed_gid sgid; + union qed_gid dgid; + u32 flow_label; + u8 hop_limit_ttl; + u8 traffic_class_tos; + u32 timeout; + u8 rnr_retry; + u8 retry_cnt; + u8 min_rnr_nak_timer; + u16 pkey_index; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + bool sqd_async; +}; + +struct qed_rdma_create_srq_out_params { + u16 srq_id; +}; + +struct qed_rdma_destroy_srq_in_params { + u16 srq_id; +}; + +struct qed_rdma_modify_srq_in_params { + u32 wqe_limit; + u16 srq_id; +}; + +struct qed_rdma_stats_out_params { + u64 sent_bytes; + u64 sent_pkts; + u64 rcv_bytes; + u64 rcv_pkts; +}; + +struct qed_rdma_counters_out_params { + u64 pd_count; + u64 max_pd; + u64 dpi_count; + u64 max_dpi; + u64 cq_count; + u64 max_cq; + u64 qp_count; + u64 max_qp; + u64 tid_count; + u64 max_tid; +}; + +#define QED_ROCE_TX_HEAD_FAILURE (1) +#define QED_ROCE_TX_FRAG_FAILURE (2) + +struct qed_roce_ll2_header { + void *vaddr; + dma_addr_t baddr; + size_t len; +}; + +struct qed_roce_ll2_buffer { + dma_addr_t baddr; + size_t len; +}; + +struct qed_roce_ll2_packet { + struct qed_roce_ll2_header header; + int n_seg; + struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE]; + int roce_mode; + enum qed_roce_ll2_tx_dest tx_dest; +}; + +struct qed_roce_ll2_tx_params { + int reserved; +}; + +struct qed_roce_ll2_rx_params { + u16 vlan_id; + u8 smac[ETH_ALEN]; + int rc; +}; + +struct qed_roce_ll2_cbs { + void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt); + + void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt, + struct qed_roce_ll2_rx_params *params); +}; + +struct qed_roce_ll2_params { + u16 max_rx_buffers; + u16 max_tx_buffers; + u16 mtu; + u8 mac_address[ETH_ALEN]; + struct qed_roce_ll2_cbs cbs; + void *cb_cookie; +}; + +struct qed_roce_ll2_info { + u8 handle; + struct qed_roce_ll2_cbs cbs; + u8 mac_address[ETH_ALEN]; + void *cb_cookie; + + /* Lock to protect ll2 */ + struct mutex lock; +}; + +enum qed_rdma_type { + QED_RDMA_TYPE_ROCE, +}; + +struct qed_dev_rdma_info { + struct qed_dev_info common; + enum qed_rdma_type rdma_type; +}; + +struct qed_rdma_ops { + const struct qed_common_ops *common; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_rdma_info *info); + void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev); + + int (*rdma_init)(struct qed_dev *dev, + struct qed_rdma_start_in_params *iparams); + + int (*rdma_add_user)(void *rdma_cxt, + struct qed_rdma_add_user_out_params *oparams); + + void (*rdma_remove_user)(void *rdma_cxt, u16 dpi); + int (*rdma_stop)(void *rdma_cxt); + struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt); + struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt); + int (*rdma_get_start_sb)(struct qed_dev *cdev); + int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev); + void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod); + int (*rdma_get_rdma_int)(struct qed_dev *cdev, + struct qed_int_info *info); + int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); + int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd); + void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd); + int (*rdma_create_cq)(void *rdma_cxt, + struct qed_rdma_create_cq_in_params *params, + u16 *icid); + int (*rdma_destroy_cq)(void *rdma_cxt, + struct qed_rdma_destroy_cq_in_params *iparams, + struct qed_rdma_destroy_cq_out_params *oparams); + struct qed_rdma_qp * + (*rdma_create_qp)(void *rdma_cxt, + struct qed_rdma_create_qp_in_params *iparams, + struct qed_rdma_create_qp_out_params *oparams); + + int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp, + struct qed_rdma_modify_qp_in_params *iparams); + + int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *oparams); + int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp); + int + (*rdma_register_tid)(void *rdma_cxt, + struct qed_rdma_register_tid_in_params *iparams); + int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); + int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); + void (*rdma_free_tid)(void *rdma_cxt, u32 itid); + int (*roce_ll2_start)(struct qed_dev *cdev, + struct qed_roce_ll2_params *params); + int (*roce_ll2_stop)(struct qed_dev *cdev); + int (*roce_ll2_tx)(struct qed_dev *cdev, + struct qed_roce_ll2_packet *packet, + struct qed_roce_ll2_tx_params *params); + int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev, + struct qed_roce_ll2_buffer *buf, + u64 cookie, u8 notify_fw); + int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev, + u8 *old_mac_address, + u8 *new_mac_address); + int (*roce_ll2_stats)(struct qed_dev *cdev, + struct qed_ll2_stats *stats); +}; + +const struct qed_rdma_ops *qed_get_rdma_ops(void); + +#endif diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h new file mode 100644 index 0000000000..f48d64b0e2 --- /dev/null +++ b/include/linux/qed/qede_roce.h @@ -0,0 +1,88 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef QEDE_ROCE_H +#define QEDE_ROCE_H + +struct qedr_dev; +struct qed_dev; +struct qede_dev; + +enum qede_roce_event { + QEDE_UP, + QEDE_DOWN, + QEDE_CHANGE_ADDR, + QEDE_CLOSE +}; + +struct qede_roce_event_work { + struct list_head list; + struct work_struct work; + void *ptr; + enum qede_roce_event event; +}; + +struct qedr_driver { + unsigned char name[32]; + + struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *, + struct net_device *); + + void (*remove)(struct qedr_dev *); + void (*notify)(struct qedr_dev *, enum qede_roce_event); +}; + +/* APIs for RoCE driver to register callback handlers, + * which will be invoked when device is added, removed, ifup, ifdown + */ +int qede_roce_register_driver(struct qedr_driver *drv); +void qede_roce_unregister_driver(struct qedr_driver *drv); + +bool qede_roce_supported(struct qede_dev *dev); + +#if IS_ENABLED(CONFIG_QED_RDMA) +int qede_roce_dev_add(struct qede_dev *dev); +void qede_roce_dev_event_open(struct qede_dev *dev); +void qede_roce_dev_event_close(struct qede_dev *dev); +void qede_roce_dev_remove(struct qede_dev *dev); +void qede_roce_event_changeaddr(struct qede_dev *qedr); +#else +static inline int qede_roce_dev_add(struct qede_dev *dev) +{ + return 0; +} + +static inline void qede_roce_dev_event_open(struct qede_dev *dev) {} +static inline void qede_roce_dev_event_close(struct qede_dev *dev) {} +static inline void qede_roce_dev_remove(struct qede_dev *dev) {} +static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {} +#endif +#endif diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index bab078b258..7663725faa 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -1,36 +1,34 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ #ifndef __RDMA_COMMON__ #define __RDMA_COMMON__ - /************************/ /* RDMA FW CONSTANTS */ /************************/ -#define RDMA_RESERVED_LKEY (0) -#define RDMA_RING_PAGE_SIZE (0x1000) +#define RDMA_RESERVED_LKEY (0) +#define RDMA_RING_PAGE_SIZE (0x1000) -#define RDMA_MAX_SGE_PER_SQ_WQE (4) -#define RDMA_MAX_SGE_PER_RQ_WQE (4) +#define RDMA_MAX_SGE_PER_SQ_WQE (4) +#define RDMA_MAX_SGE_PER_RQ_WQE (4) -#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) +#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF) -#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) -#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) +#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) +#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) -#define RDMA_MAX_CQS (64 * 1024) -#define RDMA_MAX_TIDS (128 * 1024 - 1) -#define RDMA_MAX_PDS (64 * 1024) -#define RDMA_MAX_XRC_SRQS (1024) -#define RDMA_MAX_SRQS (32 * 1024) +#define RDMA_MAX_CQS (64 * 1024) +#define RDMA_MAX_TIDS (128 * 1024 - 1) +#define RDMA_MAX_PDS (64 * 1024) -#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS -#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 -#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB +#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB #define RDMA_TASK_TYPE (PROTOCOLID_ROCE) diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index ccddd7a96b..2eeaf3dc66 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -1,43 +1,17 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ #ifndef __ROCE_COMMON__ #define __ROCE_COMMON__ -/************************/ -/* ROCE FW CONSTANTS */ -/************************/ +#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) +#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) -#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) -#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) - -#define ROCE_MAX_QPS (32 * 1024) -#define ROCE_DCQCN_NP_MAX_QPS (64) -#define ROCE_DCQCN_RP_MAX_QPS (64) -#define ROCE_LKEY_MW_DIF_EN_BIT (28) - -/* Affiliated asynchronous events / errors enumeration */ -enum roce_async_events_type { - ROCE_ASYNC_EVENT_NONE = 0, - ROCE_ASYNC_EVENT_COMM_EST = 1, - ROCE_ASYNC_EVENT_SQ_DRAINED, - ROCE_ASYNC_EVENT_SRQ_LIMIT, - ROCE_ASYNC_EVENT_LAST_WQE_REACHED, - ROCE_ASYNC_EVENT_CQ_ERR, - ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR, - ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR, - ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR, - ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR, - ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR, - ROCE_ASYNC_EVENT_SRQ_EMPTY, - ROCE_ASYNC_EVENT_DESTROY_QP_DONE, - ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR, - ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR, - ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR, - MAX_ROCE_ASYNC_EVENTS_TYPE -}; +#define ROCE_MAX_QPS (32 * 1024) #endif /* __ROCE_COMMON__ */ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 91896e8793..3b8e1efd9b 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -1,92 +1,46 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ #ifndef __STORAGE_COMMON__ #define __STORAGE_COMMON__ -/*********************/ -/* SCSI CONSTANTS */ -/*********************/ +#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) +#define BDQ_NUM_RESOURCES (4) -#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2) -#define BDQ_NUM_RESOURCES (4) +#define BDQ_ID_RQ (0) +#define BDQ_ID_IMM_DATA (1) +#define BDQ_NUM_IDS (2) -#define BDQ_ID_RQ (0) -#define BDQ_ID_IMM_DATA (1) -#define BDQ_ID_TQ (2) -#define BDQ_NUM_IDS (3) +#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) -#define SCSI_NUM_SGES_SLOW_SGL_THR 8 - -#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15) - -/* SCSI op codes */ -#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89) -#define SCSI_OPCODE_READ_10 (0x28) -#define SCSI_OPCODE_WRITE_6 (0x0A) -#define SCSI_OPCODE_WRITE_10 (0x2A) -#define SCSI_OPCODE_WRITE_12 (0xAA) -#define SCSI_OPCODE_WRITE_16 (0x8A) -#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E) -#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE) -#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E) - -/* iSCSI Drv opaque */ -struct iscsi_drv_opaque { - __le16 reserved_zero[3]; - __le16 opaque; -}; - -/* Scsi 2B/8B opaque union */ -union scsi_opaque { - struct regpair fcoe_opaque; - struct iscsi_drv_opaque iscsi_opaque; -}; - -/* SCSI buffer descriptor */ struct scsi_bd { struct regpair address; - union scsi_opaque opaque; + struct regpair opaque; }; -/* Scsi Drv BDQ struct */ struct scsi_bdq_ram_drv_data { __le16 external_producer; __le16 reserved0[3]; }; -/* SCSI SGE entry */ -struct scsi_sge { - struct regpair sge_addr; - __le32 sge_len; - __le32 reserved; -}; - -/* Cached SGEs section */ -struct scsi_cached_sges { - struct scsi_sge sge[4]; -}; - -/* Scsi Drv CMDQ struct */ struct scsi_drv_cmdq { __le16 cmdq_cons; __le16 reserved0; __le32 reserved1; }; -/* Common SCSI init params passed by driver to FW in function init ramrod */ struct scsi_init_func_params { __le16 num_tasks; u8 log_page_size; - u8 log_page_size_conn; u8 debug_mode; - u8 reserved2[11]; + u8 reserved2[12]; }; -/* SCSI RQ/CQ/CMDQ firmware function init parameters */ struct scsi_init_func_queues { struct regpair glbl_q_params_addr; __le16 rq_buffer_size; @@ -94,64 +48,44 @@ struct scsi_init_func_queues { __le16 cmdq_num_entries; u8 bdq_resource_id; u8 q_validity; -#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 -#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 -#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 -#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3 -#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4 -#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7 -#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5 - __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS]; +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 +#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F +#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3 u8 num_queues; u8 queue_relative_offset; u8 cq_sb_pi; u8 cmdq_sb_pi; + __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS]; + __le16 reserved0; u8 bdq_pbl_num_entries[BDQ_NUM_IDS]; - u8 reserved1; struct regpair bdq_pbl_base_address[BDQ_NUM_IDS]; __le16 bdq_xoff_threshold[BDQ_NUM_IDS]; - __le16 cmdq_xoff_threshold; __le16 bdq_xon_threshold[BDQ_NUM_IDS]; + __le16 cmdq_xoff_threshold; __le16 cmdq_xon_threshold; + __le32 reserved1; }; -/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */ struct scsi_ram_per_bdq_resource_drv_data { struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; }; -/* SCSI SGL types */ -enum scsi_sgl_mode { - SCSI_TX_SLOW_SGL, - SCSI_FAST_SGL, - MAX_SCSI_SGL_MODE +struct scsi_sge { + struct regpair sge_addr; + __le16 sge_len; + __le16 reserved0; + __le32 reserved1; }; -/* SCSI SGL parameters */ -struct scsi_sgl_params { - struct regpair sgl_addr; - __le32 sgl_total_length; - __le32 sge_offset; - __le16 sgl_num_sges; - u8 sgl_index; - u8 reserved; -}; - -/* SCSI terminate connection params */ struct scsi_terminate_extra_params { __le16 unsolicited_cq_count; __le16 cmdq_count; u8 reserved[4]; }; -/* SCSI Task Queue Element */ -struct scsi_tqe { - __le16 itid; -}; - #endif /* __STORAGE_COMMON__ */ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index 2b2c87d10e..dc3889d1bb 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -1,19 +1,16 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. */ #ifndef __TCP_COMMON__ #define __TCP_COMMON__ -/********************/ -/* TCP FW CONSTANTS */ -/********************/ +#define TCP_INVALID_TIMEOUT_VAL -1 -#define TCP_INVALID_TIMEOUT_VAL -1 - -/* OOO opaque data received from LL2 */ struct ooo_opaque { __le32 cid; u8 drop_isle; @@ -22,29 +19,25 @@ struct ooo_opaque { u8 ooo_isle; }; -/* tcp connect mode enum */ enum tcp_connect_mode { TCP_CONNECT_ACTIVE, TCP_CONNECT_PASSIVE, MAX_TCP_CONNECT_MODE }; -/* tcp function init parameters */ struct tcp_init_params { __le32 two_msl_timer; __le16 tx_sws_timer; - u8 max_fin_rt; + u8 maxfinrt; u8 reserved[9]; }; -/* tcp IPv4/IPv6 enum */ enum tcp_ip_version { TCP_IPV4, TCP_IPV6, MAX_TCP_IP_VERSION }; -/* tcp offload parameters */ struct tcp_offload_params { __le16 local_mac_addr_lo; __le16 local_mac_addr_mid; @@ -53,29 +46,24 @@ struct tcp_offload_params { __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; __le16 vlan_id; - __le16 flags; -#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 -#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 -#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 -#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3 -#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4 -#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5 -#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6 -#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7 -#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8 -#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F -#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9 + u8 flags; +#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6 +#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7 u8 ip_version; - u8 reserved0[3]; __le32 remote_ip[4]; __le32 local_ip[4]; __le32 flow_label; @@ -87,22 +75,19 @@ struct tcp_offload_params { u8 rcv_wnd_scale; u8 connect_mode; __le16 srtt; - __le32 ss_thresh; - __le32 rcv_wnd; __le32 cwnd; + __le32 ss_thresh; + __le16 reserved1; u8 ka_max_probe_cnt; u8 dup_ack_theshold; - __le16 reserved1; - __le32 ka_timeout; - __le32 ka_interval; - __le32 max_rt_time; - __le32 initial_rcv_wnd; __le32 rcv_next; __le32 snd_una; __le32 snd_next; __le32 snd_max; __le32 snd_wnd; + __le32 rcv_wnd; __le32 snd_wl1; + __le32 ts_time; __le32 ts_recent; __le32 ts_recent_age; __le32 total_rt; @@ -113,14 +98,17 @@ struct tcp_offload_params { u8 ka_probe_cnt; u8 rt_cnt; __le16 rtt_var; - __le16 fw_internal; + __le16 reserved2; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 initial_rcv_wnd; u8 snd_wnd_scale; u8 ack_frequency; __le16 da_timeout_value; - __le32 reserved3; + __le32 ts_ticks_per_second; }; -/* tcp offload parameters */ struct tcp_offload_params_opt2 { __le16 local_mac_addr_lo; __le16 local_mac_addr_mid; @@ -129,19 +117,16 @@ struct tcp_offload_params_opt2 { __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; __le16 vlan_id; - __le16 flags; -#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 -#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 -#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 -#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3 -#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF -#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4 + u8 flags; +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3 u8 ip_version; - u8 reserved1[3]; __le32 remote_ip[4]; __le32 local_ip[4]; __le32 flow_label; @@ -155,62 +140,53 @@ struct tcp_offload_params_opt2 { __le16 syn_ip_payload_length; __le32 syn_phy_addr_lo; __le32 syn_phy_addr_hi; - __le32 cwnd; - u8 ka_max_probe_cnt; - u8 reserved2[3]; - __le32 ka_timeout; - __le32 ka_interval; - __le32 max_rt_time; - __le32 reserved3[16]; + __le32 reserved1[22]; }; -/* tcp IPv4/IPv6 enum */ enum tcp_seg_placement_event { TCP_EVENT_ADD_PEN, TCP_EVENT_ADD_NEW_ISLE, TCP_EVENT_ADD_ISLE_RIGHT, TCP_EVENT_ADD_ISLE_LEFT, TCP_EVENT_JOIN, - TCP_EVENT_DELETE_ISLES, TCP_EVENT_NOP, MAX_TCP_SEG_PLACEMENT_EVENT }; -/* tcp init parameters */ struct tcp_update_params { __le16 flags; -#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 -#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 -#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 -#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 -#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 -#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 -#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 -#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 -#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 -#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 -#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 -#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 -#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 -#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 -#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 -#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 -#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 -#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 +#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 +#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 +#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 __le16 remote_mac_addr_lo; __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; @@ -226,7 +202,6 @@ struct tcp_update_params { u8 reserved1[7]; }; -/* toe upload parameters */ struct tcp_upload_params { __le32 rcv_next; __le32 snd_una; diff --git a/include/linux/qnx6_fs.h b/include/linux/qnx6_fs.h index 13373d437c..26049eab90 100644 --- a/include/linux/qnx6_fs.h +++ b/include/linux/qnx6_fs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Name : qnx6_fs.h * Author : Kai Bankett diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h new file mode 100644 index 0000000000..3bdfa70bc6 --- /dev/null +++ b/include/linux/quicklist.h @@ -0,0 +1,93 @@ +#ifndef LINUX_QUICKLIST_H +#define LINUX_QUICKLIST_H +/* + * Fast allocations and disposal of pages. Pages must be in the condition + * as needed after allocation when they are freed. Per cpu lists of pages + * are kept that only contain node local pages. + * + * (C) 2007, SGI. Christoph Lameter + */ +#include +#include +#include + +#ifdef CONFIG_QUICKLIST + +struct quicklist { + void *page; + int nr_pages; +}; + +DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; + +/* + * The two key functions quicklist_alloc and quicklist_free are inline so + * that they may be custom compiled for the platform. + * Specifying a NULL ctor can remove constructor support. Specifying + * a constant quicklist allows the determination of the exact address + * in the per cpu area. + * + * The fast patch in quicklist_alloc touched only a per cpu cacheline and + * the first cacheline of the page itself. There is minmal overhead involved. + */ +static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) +{ + struct quicklist *q; + void **p = NULL; + + q =&get_cpu_var(quicklist)[nr]; + p = q->page; + if (likely(p)) { + q->page = p[0]; + p[0] = NULL; + q->nr_pages--; + } + put_cpu_var(quicklist); + if (likely(p)) + return p; + + p = (void *)__get_free_page(flags | __GFP_ZERO); + if (ctor && p) + ctor(p); + return p; +} + +static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p, + struct page *page) +{ + struct quicklist *q; + + q = &get_cpu_var(quicklist)[nr]; + *(void **)p = q->page; + q->page = p; + q->nr_pages++; + put_cpu_var(quicklist); +} + +static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) +{ + __quicklist_free(nr, dtor, pp, virt_to_page(pp)); +} + +static inline void quicklist_free_page(int nr, void (*dtor)(void *), + struct page *page) +{ + __quicklist_free(nr, dtor, page_address(page), page); +} + +void quicklist_trim(int nr, void (*dtor)(void *), + unsigned long min_pages, unsigned long max_free); + +unsigned long quicklist_total_size(void); + +#else + +static inline unsigned long quicklist_total_size(void) +{ + return 0; +} + +#endif + +#endif /* LINUX_QUICKLIST_H */ + diff --git a/include/linux/quota.h b/include/linux/quota.h index 18ebd39c94..eb061784c8 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -76,7 +76,7 @@ struct kqid { /* Type in which we store the quota identifier */ extern bool qid_eq(struct kqid left, struct kqid right); extern bool qid_lt(struct kqid left, struct kqid right); -extern qid_t from_kqid(struct user_namespace *to, struct kqid qid); +extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1); extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid); extern bool qid_valid(struct kqid qid); @@ -223,12 +223,12 @@ struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; /* Id of the dqi_format - used when turning * quotas on after remount RW */ - struct list_head dqi_dirty_list; /* List of dirty dquots [dq_list_lock] */ - unsigned long dqi_flags; /* DFQ_ flags [dq_data_lock] */ - unsigned int dqi_bgrace; /* Space grace time [dq_data_lock] */ - unsigned int dqi_igrace; /* Inode grace time [dq_data_lock] */ - qsize_t dqi_max_spc_limit; /* Maximum space limit [static] */ - qsize_t dqi_max_ino_limit; /* Maximum inode limit [static] */ + struct list_head dqi_dirty_list; /* List of dirty dquots */ + unsigned long dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + qsize_t dqi_max_spc_limit; + qsize_t dqi_max_ino_limit; void *dqi_priv; }; @@ -263,10 +263,11 @@ enum { }; struct dqstats { - unsigned long stat[_DQST_DQSTAT_LAST]; + int stat[_DQST_DQSTAT_LAST]; struct percpu_counter counter[_DQST_DQSTAT_LAST]; }; +extern struct dqstats *dqstats_pcpu; extern struct dqstats dqstats; static inline void dqstats_inc(unsigned int type) @@ -292,18 +293,18 @@ static inline void dqstats_dec(unsigned int type) * clear them when it sees fit. */ struct dquot { - struct hlist_node dq_hash; /* Hash list in memory [dq_list_lock] */ - struct list_head dq_inuse; /* List of all quotas [dq_list_lock] */ - struct list_head dq_free; /* Free list element [dq_list_lock] */ - struct list_head dq_dirty; /* List of dirty dquots [dq_list_lock] */ + struct hlist_node dq_hash; /* Hash list in memory */ + struct list_head dq_inuse; /* List of all quotas */ + struct list_head dq_free; /* Free list element */ + struct list_head dq_dirty; /* List of dirty dquots */ struct mutex dq_lock; /* dquot IO lock */ - spinlock_t dq_dqb_lock; /* Lock protecting dq_dqb changes */ atomic_t dq_count; /* Use count */ + wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */ struct super_block *dq_sb; /* superblock this applies to */ struct kqid dq_id; /* ID this applies to (uid, gid, projid) */ - loff_t dq_off; /* Offset of dquot on disk [dq_lock, stable once set] */ + loff_t dq_off; /* Offset of dquot on disk */ unsigned long dq_flags; /* See DQ_* */ - struct mem_dqblk dq_dqb; /* Diskquota usage [dq_dqb_lock] */ + struct mem_dqblk dq_dqb; /* Diskquota usage */ }; /* Operations which must be implemented by each quota format */ @@ -331,8 +332,6 @@ struct dquot_operations { * quota code only */ qsize_t *(*get_reserved_space) (struct inode *); int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */ - /* Get number of inodes that were charged for a given inode */ - int (*get_inode_usage) (struct inode *, qsize_t *); /* Get next ID with active quota structure */ int (*get_next_id) (struct super_block *sb, struct kqid *qid); }; @@ -408,7 +407,13 @@ struct qc_type_state { struct qc_state { unsigned int s_incoredqs; /* Number of dquots in core */ - struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */ + /* + * Per quota type information. The array should really have + * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in + * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS + * supports project quotas, this can be changed to MAXQUOTAS + */ + struct qc_type_state s_state[XQM_MAXQUOTAS]; }; /* Structure for communicating via ->set_info */ @@ -426,7 +431,7 @@ struct qc_info { /* Operations handling requests from userspace */ struct quotactl_ops { - int (*quota_on)(struct super_block *, int, int, const struct path *); + int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); @@ -448,18 +453,17 @@ struct quota_format_type { }; /** - * Quota state flags - they come in three flavors - for users, groups and projects. + * Quota state flags - they actually come in two flavors - for users and groups. * * Actual typed flags layout: - * USRQUOTA GRPQUOTA PRJQUOTA - * DQUOT_USAGE_ENABLED 0x0001 0x0002 0x0004 - * DQUOT_LIMITS_ENABLED 0x0008 0x0010 0x0020 - * DQUOT_SUSPENDED 0x0040 0x0080 0x0100 + * USRQUOTA GRPQUOTA + * DQUOT_USAGE_ENABLED 0x0001 0x0002 + * DQUOT_LIMITS_ENABLED 0x0004 0x0008 + * DQUOT_SUSPENDED 0x0010 0x0020 * * Following bits are used for non-typed flags: - * DQUOT_QUOTA_SYS_FILE 0x0200 - * DQUOT_NEGATIVE_USAGE 0x0400 - * DQUOT_NOLIST_DIRTY 0x0800 + * DQUOT_QUOTA_SYS_FILE 0x0040 + * DQUOT_NEGATIVE_USAGE 0x0080 */ enum { _DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */ @@ -485,9 +489,6 @@ enum { */ #define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1)) /* Allow negative quota usage */ -/* Do not track dirty dquots in a list */ -#define DQUOT_NOLIST_DIRTY (1 << (DQUOT_STATE_LAST + 2)) - static inline unsigned int dquot_state_flag(unsigned int flags, int type) { return flags << type; @@ -518,7 +519,8 @@ static inline void quota_send_warning(struct kqid qid, dev_t dev, struct quota_info { unsigned int flags; /* Flags for diskquotas on this device */ - struct rw_semaphore dqio_sem; /* Lock quota file while I/O in progress */ + struct mutex dqio_mutex; /* lock device while I/O in progress */ + struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */ struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index a0f6668924..f00fa86ac9 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for diskquota-operations. When diskquota is configured these * macros expand to the right source-code. @@ -22,7 +21,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb) /* i_mutex must being held */ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) { - return (ia->ia_valid & ATTR_SIZE) || + return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) || (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) || (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid)); } @@ -39,8 +38,12 @@ void __quota_error(struct super_block *sb, const char *func, /* * declaration of quota_function calls in kernel. */ +void inode_add_rsv_space(struct inode *inode, qsize_t number); +void inode_claim_rsv_space(struct inode *inode, qsize_t number); +void inode_sub_rsv_space(struct inode *inode, qsize_t number); +void inode_reclaim_rsv_space(struct inode *inode, qsize_t number); + int dquot_initialize(struct inode *inode); -bool dquot_initialize_needed(struct inode *inode); void dquot_drop(struct inode *inode); struct dquot *dqget(struct super_block *sb, struct kqid qid); static inline struct dquot *dqgrab(struct dquot *dquot) @@ -51,16 +54,6 @@ static inline struct dquot *dqgrab(struct dquot *dquot) atomic_inc(&dquot->dq_count); return dquot; } - -static inline bool dquot_is_busy(struct dquot *dquot) -{ - if (test_bit(DQ_MOD_B, &dquot->dq_flags)) - return true; - if (atomic_read(&dquot->dq_count) > 1) - return true; - return false; -} - void dqput(struct dquot *dquot); int dquot_scan_active(struct super_block *sb, int (*fn)(struct dquot *dquot, unsigned long priv), @@ -94,12 +87,10 @@ int dquot_mark_dquot_dirty(struct dquot *dquot); int dquot_file_open(struct inode *inode, struct file *file); -int dquot_load_quota_sb(struct super_block *sb, int type, int format_id, - unsigned int flags); -int dquot_load_quota_inode(struct inode *inode, int type, int format_id, +int dquot_enable(struct inode *inode, int type, int format_id, unsigned int flags); int dquot_quota_on(struct super_block *sb, int type, int format_id, - const struct path *path); + struct path *path); int dquot_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); int dquot_quota_off(struct super_block *sb, int type); @@ -171,6 +162,7 @@ static inline bool sb_has_quota_active(struct super_block *sb, int type) * Operations supported for diskquotas. */ extern const struct dquot_operations dquot_operations; +extern const struct quotactl_ops dquot_quotactl_ops; extern const struct quotactl_ops dquot_quotactl_sysfile_ops; #else @@ -216,11 +208,6 @@ static inline int dquot_initialize(struct inode *inode) return 0; } -static inline bool dquot_initialize_needed(struct inode *inode) -{ - return false; -} - static inline void dquot_drop(struct inode *inode) { } diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 64ad900ac7..af3581b8a4 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -1,54 +1,60 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig * Copyright (C) 2006 Nick Piggin * Copyright (C) 2012 Konstantin Khlebnikov + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _LINUX_RADIX_TREE_H #define _LINUX_RADIX_TREE_H #include -#include -#include -#include #include -#include -#include #include -#include -#include - -/* Keep unconverted code working */ -#define radix_tree_root xarray -#define radix_tree_node xa_node - -struct radix_tree_preload { - local_lock_t lock; - unsigned nr; - /* nodes->parent points to next preallocated node */ - struct radix_tree_node *nodes; -}; -DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); +#include +#include +#include /* * The bottom two bits of the slot determine how the remaining bits in the * slot are interpreted: * * 00 - data pointer - * 10 - internal entry - * x1 - value entry + * 01 - internal entry + * 10 - exceptional entry + * 11 - this bit combination is currently unused/reserved * * The internal entry may be a pointer to the next level in the tree, a * sibling entry, or an indicator that the entry in this slot has been moved * to another location in the tree and the lookup should be restarted. While * NULL fits the 'data pointer' pattern, it means that there is no entry in * the tree for this index (no matter what level of the tree it is found at). - * This means that storing a NULL entry in the tree is the same as deleting - * the entry from the tree. + * This means that you cannot store NULL in the tree as a value for the index. */ #define RADIX_TREE_ENTRY_MASK 3UL -#define RADIX_TREE_INTERNAL_NODE 2UL +#define RADIX_TREE_INTERNAL_NODE 1UL + +/* + * Most users of the radix tree store pointers but shmem/tmpfs stores swap + * entries in the same tree. They are marked as exceptional entries to + * distinguish them from pointers to struct page. + * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. + */ +#define RADIX_TREE_EXCEPTIONAL_ENTRY 2 +#define RADIX_TREE_EXCEPTIONAL_SHIFT 2 static inline bool radix_tree_is_internal_node(void *ptr) { @@ -58,55 +64,70 @@ static inline bool radix_tree_is_internal_node(void *ptr) /*** radix-tree API starts here ***/ -#define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT +#define RADIX_TREE_MAX_TAGS 3 + +#ifndef RADIX_TREE_MAP_SHIFT +#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) +#endif + #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) -#define RADIX_TREE_MAX_TAGS XA_MAX_MARKS -#define RADIX_TREE_TAG_LONGS XA_MARK_LONGS +#define RADIX_TREE_TAG_LONGS \ + ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) -/* The IDR tag is stored in the low bits of xa_flags */ -#define ROOT_IS_IDR ((__force gfp_t)4) -/* The top bits of xa_flags are used to store the root tags */ -#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT) +/* Internally used bits of node->count */ +#define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1) +#define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1) -#define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask) +struct radix_tree_node { + unsigned char shift; /* Bits remaining in each slot */ + unsigned char offset; /* Slot offset in parent */ + unsigned int count; + union { + struct { + /* Used when ascending tree */ + struct radix_tree_node *parent; + /* For tree user */ + void *private_data; + }; + /* Used when freeing node */ + struct rcu_head rcu_head; + }; + /* For tree user */ + struct list_head private_list; + void __rcu *slots[RADIX_TREE_MAP_SIZE]; + unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; +}; -#define RADIX_TREE(name, mask) \ - struct radix_tree_root name = RADIX_TREE_INIT(name, mask) +/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ +struct radix_tree_root { + gfp_t gfp_mask; + struct radix_tree_node __rcu *rnode; +}; -#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask) - -static inline bool radix_tree_empty(const struct radix_tree_root *root) -{ - return root->xa_head == NULL; +#define RADIX_TREE_INIT(mask) { \ + .gfp_mask = (mask), \ + .rnode = NULL, \ } -/** - * struct radix_tree_iter - radix tree iterator state - * - * @index: index of current slot - * @next_index: one beyond the last index for this chunk - * @tags: bit-mask for tag-iterating - * @node: node that contains current slot - * - * This radix tree iterator works in terms of "chunks" of slots. A chunk is a - * subinterval of slots contained within one radix tree leaf node. It is - * described by a pointer to its first slot and a struct radix_tree_iter - * which holds the chunk's position in the tree and its size. For tagged - * iteration radix_tree_iter also holds the slots' bit-mask for one chosen - * radix tree tag. - */ -struct radix_tree_iter { - unsigned long index; - unsigned long next_index; - unsigned long tags; - struct radix_tree_node *node; -}; +#define RADIX_TREE(name, mask) \ + struct radix_tree_root name = RADIX_TREE_INIT(mask) + +#define INIT_RADIX_TREE(root, mask) \ +do { \ + (root)->gfp_mask = (mask); \ + (root)->rnode = NULL; \ +} while (0) + +static inline bool radix_tree_empty(struct radix_tree_root *root) +{ + return root->rnode == NULL; +} /** * Radix-tree synchronization @@ -131,11 +152,12 @@ struct radix_tree_iter { * radix_tree_lookup_slot * radix_tree_tag_get * radix_tree_gang_lookup + * radix_tree_gang_lookup_slot * radix_tree_gang_lookup_tag * radix_tree_gang_lookup_tag_slot * radix_tree_tagged * - * The first 7 functions are able to be called locklessly, using RCU. The + * The first 8 functions are able to be called locklessly, using RCU. The * caller must ensure calls to these functions are made within rcu_read_lock() * regions. Other readers (lock-free or otherwise) and modifications may be * running concurrently. @@ -160,8 +182,10 @@ struct radix_tree_iter { */ /** - * radix_tree_deref_slot - dereference a slot - * @slot: slot pointer, returned by radix_tree_lookup_slot + * radix_tree_deref_slot - dereference a slot + * @pslot: pointer to slot, returned by radix_tree_lookup_slot + * Returns: item that was stored in that slot with any direct pointer flag + * removed. * * For use with radix_tree_lookup_slot(). Caller must hold tree at least read * locked across slot lookup and dereference. Not required if write lock is @@ -169,27 +193,26 @@ struct radix_tree_iter { * * radix_tree_deref_retry must be used to confirm validity of the pointer if * only the read lock is held. - * - * Return: entry stored in that slot. */ -static inline void *radix_tree_deref_slot(void __rcu **slot) +static inline void *radix_tree_deref_slot(void **pslot) { - return rcu_dereference(*slot); + return rcu_dereference(*pslot); } /** - * radix_tree_deref_slot_protected - dereference a slot with tree lock held - * @slot: slot pointer, returned by radix_tree_lookup_slot + * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held + * @pslot: pointer to slot, returned by radix_tree_lookup_slot + * Returns: item that was stored in that slot with any direct pointer flag + * removed. * - * Similar to radix_tree_deref_slot. The caller does not hold the RCU read - * lock but it must hold the tree lock to prevent parallel updates. - * - * Return: entry stored in that slot. + * Similar to radix_tree_deref_slot but only used during migration when a pages + * mapping is being moved. The caller does not hold the RCU read lock but it + * must hold the tree lock to prevent parallel updates. */ -static inline void *radix_tree_deref_slot_protected(void __rcu **slot, +static inline void *radix_tree_deref_slot_protected(void **pslot, spinlock_t *treelock) { - return rcu_dereference_protected(*slot, lockdep_is_held(treelock)); + return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); } /** @@ -204,6 +227,17 @@ static inline int radix_tree_deref_retry(void *arg) return unlikely(radix_tree_is_internal_node(arg)); } +/** + * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry? + * @arg: value returned by radix_tree_deref_slot + * Returns: 0 if well-aligned pointer, non-0 if exceptional entry. + */ +static inline int radix_tree_exceptional_entry(void *arg) +{ + /* Not unlikely because radix_tree_exception often tested first */ + return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY; +} + /** * radix_tree_exception - radix_tree_deref_slot returned either exception? * @arg: value returned by radix_tree_deref_slot @@ -214,60 +248,114 @@ static inline int radix_tree_exception(void *arg) return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } -int radix_tree_insert(struct radix_tree_root *, unsigned long index, - void *); -void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, - struct radix_tree_node **nodep, void __rcu ***slotp); -void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); -void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, - unsigned long index); -void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, - void __rcu **slot, void *entry); -void radix_tree_iter_replace(struct radix_tree_root *, - const struct radix_tree_iter *, void __rcu **slot, void *entry); -void radix_tree_replace_slot(struct radix_tree_root *, - void __rcu **slot, void *entry); -void radix_tree_iter_delete(struct radix_tree_root *, - struct radix_tree_iter *iter, void __rcu **slot); +/** + * radix_tree_replace_slot - replace item in a slot + * @pslot: pointer to slot, returned by radix_tree_lookup_slot + * @item: new item to store in the slot. + * + * For use with radix_tree_lookup_slot(). Caller must hold tree write locked + * across slot lookup and replacement. + */ +static inline void radix_tree_replace_slot(void **pslot, void *item) +{ + BUG_ON(radix_tree_is_internal_node(item)); + rcu_assign_pointer(*pslot, item); +} + +int __radix_tree_create(struct radix_tree_root *root, unsigned long index, + unsigned order, struct radix_tree_node **nodep, + void ***slotp); +int __radix_tree_insert(struct radix_tree_root *, unsigned long index, + unsigned order, void *); +static inline int radix_tree_insert(struct radix_tree_root *root, + unsigned long index, void *entry) +{ + return __radix_tree_insert(root, index, 0, entry); +} +void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, + struct radix_tree_node **nodep, void ***slotp); +void *radix_tree_lookup(struct radix_tree_root *, unsigned long); +void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); +bool __radix_tree_delete_node(struct radix_tree_root *root, + struct radix_tree_node *node); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); -unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, +void radix_tree_clear_tags(struct radix_tree_root *root, + struct radix_tree_node *node, + void **slot); +unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items); +unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, + void ***results, unsigned long *indices, + unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); +int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); void radix_tree_init(void); -void *radix_tree_tag_set(struct radix_tree_root *, +void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); -void *radix_tree_tag_clear(struct radix_tree_root *, +void *radix_tree_tag_clear(struct radix_tree_root *root, unsigned long index, unsigned int tag); -int radix_tree_tag_get(const struct radix_tree_root *, +int radix_tree_tag_get(struct radix_tree_root *root, unsigned long index, unsigned int tag); -void radix_tree_iter_tag_clear(struct radix_tree_root *, - const struct radix_tree_iter *iter, unsigned int tag); -unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, - void **results, unsigned long first_index, - unsigned int max_items, unsigned int tag); -unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, - void __rcu ***results, unsigned long first_index, - unsigned int max_items, unsigned int tag); -int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); +unsigned int +radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, + unsigned long first_index, unsigned int max_items, + unsigned int tag); +unsigned int +radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items, + unsigned int tag); +unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, + unsigned long *first_indexp, unsigned long last_index, + unsigned long nr_to_tag, + unsigned int fromtag, unsigned int totag); +int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); +unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); static inline void radix_tree_preload_end(void) { - local_unlock(&radix_tree_preloads.lock); + preempt_enable(); } -void __rcu **idr_get_free(struct radix_tree_root *root, - struct radix_tree_iter *iter, gfp_t gfp, - unsigned long max); - -enum { - RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ - RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ - RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ +/** + * struct radix_tree_iter - radix tree iterator state + * + * @index: index of current slot + * @next_index: one beyond the last index for this chunk + * @tags: bit-mask for tag-iterating + * @shift: shift for the node that holds our slots + * + * This radix tree iterator works in terms of "chunks" of slots. A chunk is a + * subinterval of slots contained within one radix tree leaf node. It is + * described by a pointer to its first slot and a struct radix_tree_iter + * which holds the chunk's position in the tree and its size. For tagged + * iteration radix_tree_iter also holds the slots' bit-mask for one chosen + * radix tree tag. + */ +struct radix_tree_iter { + unsigned long index; + unsigned long next_index; + unsigned long tags; +#ifdef CONFIG_RADIX_TREE_MULTIORDER + unsigned int shift; +#endif }; +static inline unsigned int iter_shift(struct radix_tree_iter *iter) +{ +#ifdef CONFIG_RADIX_TREE_MULTIORDER + return iter->shift; +#else + return 0; +#endif +} + +#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ +#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ +#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ + /** * radix_tree_iter_init - initialize radix tree iterator * @@ -275,7 +363,7 @@ enum { * @start: iteration starting index * Returns: NULL */ -static __always_inline void __rcu ** +static __always_inline void ** radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) { /* @@ -304,27 +392,9 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) * Also it fills @iter with data about chunk: position in the tree (index), * its end (next_index), and constructs a bit mask for tagged iterating (tags). */ -void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, +void **radix_tree_next_chunk(struct radix_tree_root *root, struct radix_tree_iter *iter, unsigned flags); -/** - * radix_tree_iter_lookup - look up an index in the radix tree - * @root: radix tree root - * @iter: iterator state - * @index: key to look up - * - * If @index is present in the radix tree, this function returns the slot - * containing it and updates @iter to describe the entry. If @index is not - * present, it returns NULL. - */ -static inline void __rcu ** -radix_tree_iter_lookup(const struct radix_tree_root *root, - struct radix_tree_iter *iter, unsigned long index) -{ - radix_tree_iter_init(iter, index); - return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); -} - /** * radix_tree_iter_retry - retry this chunk of the iteration * @iter: iterator state @@ -335,7 +405,7 @@ radix_tree_iter_lookup(const struct radix_tree_root *root, * and continue the iteration. */ static inline __must_check -void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) +void **radix_tree_iter_retry(struct radix_tree_iter *iter) { iter->next_index = iter->index; iter->tags = 0; @@ -345,21 +415,24 @@ void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) static inline unsigned long __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) { - return iter->index + slots; + return iter->index + (slots << iter_shift(iter)); } /** - * radix_tree_iter_resume - resume iterating when the chunk may be invalid - * @slot: pointer to current slot - * @iter: iterator state - * Returns: New slot pointer + * radix_tree_iter_next - resume iterating when the chunk may be invalid + * @iter: iterator state * * If the iterator needs to release then reacquire a lock, the chunk may * have been invalidated by an insertion or deletion. Call this function - * before releasing the lock to continue the iteration from the next index. + * to continue the iteration from the next index. */ -void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, - struct radix_tree_iter *iter); +static inline __must_check +void **radix_tree_iter_next(struct radix_tree_iter *iter) +{ + iter->next_index = __radix_tree_iter_add(iter, 1); + iter->tags = 0; + return NULL; +} /** * radix_tree_chunk_size - get current chunk size @@ -370,14 +443,19 @@ void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, static __always_inline long radix_tree_chunk_size(struct radix_tree_iter *iter) { - return iter->next_index - iter->index; + return (iter->next_index - iter->index) >> iter_shift(iter); +} + +static inline struct radix_tree_node *entry_to_node(void *ptr) +{ + return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); } /** * radix_tree_next_slot - find next slot in chunk * * @slot: pointer to current slot - * @iter: pointer to iterator state + * @iter: pointer to interator state * @flags: RADIX_TREE_ITER_*, should be constant * Returns: pointer to next slot, or NULL if there no more left * @@ -385,42 +463,62 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) * For tagged lookup it also eats @iter->tags. * * There are several cases where 'slot' can be passed in as NULL to this - * function. These cases result from the use of radix_tree_iter_resume() or + * function. These cases result from the use of radix_tree_iter_next() or * radix_tree_iter_retry(). In these cases we don't end up dereferencing * 'slot' because either: * a) we are doing tagged iteration and iter->tags has been set to 0, or * b) we are doing non-tagged iteration, and iter->index and iter->next_index * have been set up so that radix_tree_chunk_size() returns 1 or 0. */ -static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, - struct radix_tree_iter *iter, unsigned flags) +static __always_inline void ** +radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { + void *canon = slot; + iter->tags >>= 1; if (unlikely(!iter->tags)) return NULL; + while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && + radix_tree_is_internal_node(slot[1])) { + if (entry_to_node(slot[1]) == canon) { + iter->tags >>= 1; + iter->index = __radix_tree_iter_add(iter, 1); + slot++; + continue; + } + iter->next_index = __radix_tree_iter_add(iter, 1); + return NULL; + } if (likely(iter->tags & 1ul)) { iter->index = __radix_tree_iter_add(iter, 1); - slot++; - goto found; + return slot + 1; } if (!(flags & RADIX_TREE_ITER_CONTIG)) { unsigned offset = __ffs(iter->tags); - iter->tags >>= offset++; - iter->index = __radix_tree_iter_add(iter, offset); - slot += offset; - goto found; + iter->tags >>= offset; + iter->index = __radix_tree_iter_add(iter, offset + 1); + return slot + offset + 1; } } else { long count = radix_tree_chunk_size(iter); + void *canon = slot; while (--count > 0) { slot++; iter->index = __radix_tree_iter_add(iter, 1); + if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && + radix_tree_is_internal_node(*slot)) { + if (entry_to_node(*slot) == canon) + continue; + iter->next_index = iter->index; + break; + } + if (likely(*slot)) - goto found; + return slot; if (flags & RADIX_TREE_ITER_CONTIG) { /* forbid switching to the next chunk */ iter->next_index = 0; @@ -429,9 +527,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, } } return NULL; - - found: - return slot; } /** @@ -449,6 +544,23 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ slot = radix_tree_next_slot(slot, iter, 0)) +/** + * radix_tree_for_each_contig - iterate over contiguous slots + * + * @slot: the void** variable for pointer to slot + * @root: the struct radix_tree_root pointer + * @iter: the struct radix_tree_iter pointer + * @start: iteration starting index + * + * @slot points to radix tree slot, @iter->index contains its index. + */ +#define radix_tree_for_each_contig(slot, root, iter, start) \ + for (slot = radix_tree_iter_init(iter, start) ; \ + slot || (slot = radix_tree_next_chunk(root, iter, \ + RADIX_TREE_ITER_CONTIG)) ; \ + slot = radix_tree_next_slot(slot, iter, \ + RADIX_TREE_ITER_CONTIG)) + /** * radix_tree_for_each_tagged - iterate over tagged slots * @@ -465,6 +577,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, slot || (slot = radix_tree_next_chunk(root, iter, \ RADIX_TREE_ITER_TAGGED | tag)) ; \ slot = radix_tree_next_slot(slot, iter, \ - RADIX_TREE_ITER_TAGGED | tag)) + RADIX_TREE_ITER_TAGGED)) #endif /* _LINUX_RADIX_TREE_H */ diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h new file mode 100644 index 0000000000..358c04bfbe --- /dev/null +++ b/include/linux/raid/md_u.h @@ -0,0 +1,20 @@ +/* + md_u.h : user <=> kernel API between Linux raidtools and RAID drivers + Copyright (C) 1998 Ingo Molnar + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + You should have received a copy of the GNU General Public License + (for example /usr/src/linux/COPYING); if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ +#ifndef _MD_U_H +#define _MD_U_H + +#include + +extern int mdp_major; +#endif diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index 154e954b71..4d57bbaaa1 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -1,8 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* -*- linux-c -*- ------------------------------------------------------- * * * Copyright 2003 H. Peter Anvin - All Rights Reserved * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Boston MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * * ----------------------------------------------------------------------- */ #ifndef LINUX_RAID_RAID6_H @@ -27,10 +32,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #include #include +#include #include -#include #include -#include #include /* Not standard, but glibc defines it */ @@ -44,16 +48,11 @@ typedef uint64_t u64; #ifndef PAGE_SIZE # define PAGE_SIZE 4096 #endif -#ifndef PAGE_SHIFT -# define PAGE_SHIFT 12 -#endif extern const char raid6_empty_zero_page[PAGE_SIZE]; #define __init #define __exit -#ifndef __attribute_const__ -# define __attribute_const__ __attribute__((const)) -#endif +#define __attribute_const__ __attribute__((const)) #define noinline __attribute__((noinline)) #define preempt_enable() @@ -62,17 +61,12 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #define enable_kernel_altivec() #define disable_kernel_altivec() -#undef EXPORT_SYMBOL #define EXPORT_SYMBOL(sym) -#undef EXPORT_SYMBOL_GPL #define EXPORT_SYMBOL_GPL(sym) #define MODULE_LICENSE(licence) #define MODULE_DESCRIPTION(desc) #define subsys_initcall(x) #define module_exit(x) - -#define IS_ENABLED(x) (x) -#define CONFIG_RAID6_PQ_BENCHMARK 1 #endif /* __KERNEL__ */ /* Routine choices */ @@ -111,11 +105,8 @@ extern const struct raid6_calls raid6_avx2x4; extern const struct raid6_calls raid6_avx512x1; extern const struct raid6_calls raid6_avx512x2; extern const struct raid6_calls raid6_avx512x4; +extern const struct raid6_calls raid6_tilegx8; extern const struct raid6_calls raid6_s390vx8; -extern const struct raid6_calls raid6_vpermxor1; -extern const struct raid6_calls raid6_vpermxor2; -extern const struct raid6_calls raid6_vpermxor4; -extern const struct raid6_calls raid6_vpermxor8; struct raid6_recov_calls { void (*data2)(int, size_t, int, int, void **); @@ -130,7 +121,6 @@ extern const struct raid6_recov_calls raid6_recov_ssse3; extern const struct raid6_recov_calls raid6_recov_avx2; extern const struct raid6_recov_calls raid6_recov_avx512; extern const struct raid6_recov_calls raid6_recov_s390xc; -extern const struct raid6_recov_calls raid6_recov_neon; extern const struct raid6_calls raid6_neonx1; extern const struct raid6_calls raid6_neonx2; @@ -152,7 +142,6 @@ int raid6_select_algo(void); extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256))); extern const u8 raid6_vgfmul[256][32] __attribute__((aligned(256))); extern const u8 raid6_gfexp[256] __attribute__((aligned(256))); -extern const u8 raid6_gflog[256] __attribute__((aligned(256))); extern const u8 raid6_gfinv[256] __attribute__((aligned(256))); extern const u8 raid6_gfexi[256] __attribute__((aligned(256))); diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h index 2a9fee8dda..5a210959e3 100644 --- a/include/linux/raid/xor.h +++ b/include/linux/raid/xor.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _XOR_H #define _XOR_H diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h index 5cdfcb873a..31e1ff69ef 100644 --- a/include/linux/raid_class.h +++ b/include/linux/raid_class.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * raid_class.h - a generic raid visualisation class * * Copyright (c) 2005 - James Bottomley + * + * This file is licensed under GPLv2 */ #include @@ -37,7 +38,6 @@ enum raid_level { RAID_LEVEL_5, RAID_LEVEL_50, RAID_LEVEL_6, - RAID_LEVEL_JBOD, }; struct raid_data { diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index 917528d102..ecc730977a 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -1,12 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RAMFS_H #define _LINUX_RAMFS_H -#include // bleh... - struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev); -extern int ramfs_init_fs_context(struct fs_context *fc); +extern struct dentry *ramfs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data); #ifdef CONFIG_MMU static inline int @@ -18,8 +16,10 @@ ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); #endif -extern const struct fs_parameter_spec ramfs_fs_parameters[]; extern const struct file_operations ramfs_file_operations; extern const struct vm_operations_struct generic_file_vm_ops; +extern int __init init_ramfs_fs(void); + +int ramfs_fill_super(struct super_block *sb, void *data, int silent); #endif diff --git a/include/linux/random.h b/include/linux/random.h index f45b8be3e3..147be0025a 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/random.h * @@ -7,8 +6,6 @@ #ifndef _LINUX_RANDOM_H #define _LINUX_RANDOM_H -#include -#include #include #include @@ -21,9 +18,8 @@ struct random_ready_callback { }; extern void add_device_randomness(const void *, unsigned int); -extern void add_bootloader_randomness(const void *, unsigned int); -#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) +#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__) static inline void add_latent_entropy(void) { add_device_randomness((const void *)&latent_entropy, @@ -38,124 +34,112 @@ extern void add_input_randomness(unsigned int type, unsigned int code, extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; extern void get_random_bytes(void *buf, int nbytes); -extern int wait_for_random_bytes(void); -extern int __init rand_initialize(void); -extern bool rng_is_initialized(void); extern int add_random_ready_callback(struct random_ready_callback *rdy); extern void del_random_ready_callback(struct random_ready_callback *rdy); -extern int __must_check get_random_bytes_arch(void *buf, int nbytes); +extern void get_random_bytes_arch(void *buf, int nbytes); #ifndef MODULE extern const struct file_operations random_fops, urandom_fops; #endif -u32 get_random_u32(void); -u64 get_random_u64(void); -static inline unsigned int get_random_int(void) -{ - return get_random_u32(); -} -static inline unsigned long get_random_long(void) -{ -#if BITS_PER_LONG == 64 - return get_random_u64(); -#else - return get_random_u32(); -#endif -} - -/* - * On 64-bit architectures, protect against non-terminated C string overflows - * by zeroing out the first byte of the canary; this leaves 56 bits of entropy. - */ -#ifdef CONFIG_64BIT -# ifdef __LITTLE_ENDIAN -# define CANARY_MASK 0xffffffffffffff00UL -# else /* big endian, 64 bits: */ -# define CANARY_MASK 0x00ffffffffffffffUL -# endif -#else /* 32 bits: */ -# define CANARY_MASK 0xffffffffUL -#endif - -static inline unsigned long get_random_canary(void) -{ - unsigned long val = get_random_long(); - - return val & CANARY_MASK; -} - -/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). - * Returns the result of the call to wait_for_random_bytes. */ -static inline int get_random_bytes_wait(void *buf, int nbytes) -{ - int ret = wait_for_random_bytes(); - get_random_bytes(buf, nbytes); - return ret; -} - -#define declare_get_random_var_wait(var) \ - static inline int get_random_ ## var ## _wait(var *out) { \ - int ret = wait_for_random_bytes(); \ - if (unlikely(ret)) \ - return ret; \ - *out = get_random_ ## var(); \ - return 0; \ - } -declare_get_random_var_wait(u32) -declare_get_random_var_wait(u64) -declare_get_random_var_wait(int) -declare_get_random_var_wait(long) -#undef declare_get_random_var - +unsigned int get_random_int(void); +unsigned long get_random_long(void); unsigned long randomize_page(unsigned long start, unsigned long range); -/* - * This is designed to be standalone for just prandom - * users, but for now we include it from - * for legacy reasons. +u32 prandom_u32(void); +void prandom_bytes(void *buf, size_t nbytes); +void prandom_seed(u32 seed); +void prandom_reseed_late(void); + +struct rnd_state { + __u32 s1, s2, s3, s4; +}; + +u32 prandom_u32_state(struct rnd_state *state); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); + +#define prandom_init_once(pcpu_state) \ + DO_ONCE(prandom_seed_full_state, (pcpu_state)) + +static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void) +{ + return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0); +} + +/** + * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) + * @ep_ro: right open interval endpoint + * + * Returns a pseudo-random number that is in interval [0, ep_ro). Note + * that the result depends on PRNG being well distributed in [0, ~0U] + * u32 space. Here we use maximally equidistributed combined Tausworthe + * generator, that is, prandom_u32(). This is useful when requesting a + * random index of an array containing ep_ro elements, for example. + * + * Returns: pseudo-random number in interval [0, ep_ro) */ -#include +static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64) prandom_u32() * ep_ro) >> 32); +} + +/* + * Handle minimum values for seeds + */ +static inline u32 __seed(u32 x, u32 m) +{ + return (x < m) ? x + m : x; +} + +/** + * prandom_seed_state - set seed for prandom_u32_state(). + * @state: pointer to state structure to receive the seed. + * @seed: arbitrary 64-bit value to use as a seed. + */ +static inline void prandom_seed_state(struct rnd_state *state, u64 seed) +{ + u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + + state->s1 = __seed(i, 2U); + state->s2 = __seed(i, 8U); + state->s3 = __seed(i, 16U); + state->s4 = __seed(i, 128U); +} #ifdef CONFIG_ARCH_RANDOM # include #else -static inline bool __must_check arch_get_random_long(unsigned long *v) +static inline bool arch_get_random_long(unsigned long *v) { - return false; + return 0; } -static inline bool __must_check arch_get_random_int(unsigned int *v) +static inline bool arch_get_random_int(unsigned int *v) { - return false; + return 0; } -static inline bool __must_check arch_get_random_seed_long(unsigned long *v) +static inline bool arch_has_random(void) { - return false; + return 0; } -static inline bool __must_check arch_get_random_seed_int(unsigned int *v) +static inline bool arch_get_random_seed_long(unsigned long *v) { - return false; + return 0; +} +static inline bool arch_get_random_seed_int(unsigned int *v) +{ + return 0; +} +static inline bool arch_has_random_seed(void) +{ + return 0; } #endif -/* - * Called from the boot CPU during startup; not valid to call once - * secondary CPUs are up and preemption is possible. - */ -#ifndef arch_get_random_seed_long_early -static inline bool __init arch_get_random_seed_long_early(unsigned long *v) +/* Pseudo random number generator from numerical recipes. */ +static inline u32 next_pseudo_random32(u32 seed) { - WARN_ON(system_state != SYSTEM_BOOTING); - return arch_get_random_seed_long(v); + return seed * 1664525 + 1013904223; } -#endif - -#ifndef arch_get_random_long_early -static inline bool __init arch_get_random_long_early(unsigned long *v) -{ - WARN_ON(system_state != SYSTEM_BOOTING); - return arch_get_random_long(v); -} -#endif #endif /* _LINUX_RANDOM_H */ diff --git a/include/linux/range.h b/include/linux/range.h index 274681cc31..bd184a5db7 100644 --- a/include/linux/range.h +++ b/include/linux/range.h @@ -1,18 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RANGE_H #define _LINUX_RANGE_H -#include struct range { u64 start; u64 end; }; -static inline u64 range_len(const struct range *range) -{ - return range->end - range->start + 1; -} - int add_range(struct range *range, int az, int nr_range, u64 start, u64 end); diff --git a/include/linux/ras.h b/include/linux/ras.h index 1f4048bf26..2aceeafd6f 100644 --- a/include/linux/ras.h +++ b/include/linux/ras.h @@ -1,38 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __RAS_H__ #define __RAS_H__ -#include -#include -#include - #ifdef CONFIG_DEBUG_FS int ras_userspace_consumers(void); void ras_debugfs_init(void); int ras_add_daemon_trace(void); #else static inline int ras_userspace_consumers(void) { return 0; } -static inline void ras_debugfs_init(void) { } +static inline void ras_debugfs_init(void) { return; } static inline int ras_add_daemon_trace(void) { return 0; } #endif -#ifdef CONFIG_RAS_CEC -int __init parse_cec_param(char *str); #endif - -#ifdef CONFIG_RAS -void log_non_standard_event(const guid_t *sec_type, - const guid_t *fru_id, const char *fru_text, - const u8 sev, const u8 *err, const u32 len); -void log_arm_hw_error(struct cper_sec_proc_arm *err); -#else -static inline void -log_non_standard_event(const guid_t *sec_type, - const guid_t *fru_id, const char *fru_text, - const u8 sev, const u8 *err, const u32 len) -{ return; } -static inline void -log_arm_hw_error(struct cper_sec_proc_arm *err) { return; } -#endif - -#endif /* __RAS_H__ */ diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index b17e0cd0a3..fe141261a4 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -1,11 +1,41 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RATELIMIT_H #define _LINUX_RATELIMIT_H -#include +#include #include #include +#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) +#define DEFAULT_RATELIMIT_BURST 10 + +/* issue num suppressed message on exit */ +#define RATELIMIT_MSG_ON_RELEASE BIT(0) + +struct ratelimit_state { + raw_spinlock_t lock; /* protect the state */ + + int interval; + int burst; + int printed; + int missed; + unsigned long begin; + unsigned long flags; +}; + +#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .interval = interval_init, \ + .burst = burst_init, \ + } + +#define RATELIMIT_STATE_INIT_DISABLED \ + RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) + +#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ + \ + struct ratelimit_state name = \ + RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ + static inline void ratelimit_state_init(struct ratelimit_state *rs, int interval, int burst) { @@ -42,13 +72,14 @@ ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags) extern struct ratelimit_state printk_ratelimit_state; +extern __nocapture(2) +int ___ratelimit(struct ratelimit_state *rs, const char *func); +#define __ratelimit(state) ___ratelimit(state, __func__) + #ifdef CONFIG_PRINTK -#define WARN_ON_RATELIMIT(condition, state) ({ \ - bool __rtn_cond = !!(condition); \ - WARN_ON(__rtn_cond && __ratelimit(state)); \ - __rtn_cond; \ -}) +#define WARN_ON_RATELIMIT(condition, state) \ + WARN_ON((condition) && __ratelimit(state)) #define WARN_RATELIMIT(condition, format, ...) \ ({ \ diff --git a/include/linux/rational.h b/include/linux/rational.h index 33f5f5fc3e..bfa6a2bcfb 100644 --- a/include/linux/rational.h +++ b/include/linux/rational.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * rational fractions * diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 235047d7a1..e585018498 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -1,8 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Red Black Trees (C) 1999 Andrea Arcangeli + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA linux/include/linux/rbtree.h @@ -11,20 +23,31 @@ I know it's not the cleaner way, but in C (not in C++) to get performances and genericity... - See Documentation/core-api/rbtree.rst for documentation and samples. + See Documentation/rbtree.txt for documentation and samples. */ #ifndef _LINUX_RBTREE_H #define _LINUX_RBTREE_H -#include - #include #include #include +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); + /* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root { + struct rb_node *rb_node; +}; + + #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) +#define RB_ROOT (struct rb_root) { NULL, } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) @@ -102,233 +125,4 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent typeof(*pos), field); 1; }); \ pos = n) -/* Same as rb_first(), but O(1) */ -#define rb_first_cached(root) (root)->rb_leftmost - -static inline void rb_insert_color_cached(struct rb_node *node, - struct rb_root_cached *root, - bool leftmost) -{ - if (leftmost) - root->rb_leftmost = node; - rb_insert_color(node, &root->rb_root); -} - - -static inline struct rb_node * -rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) -{ - struct rb_node *leftmost = NULL; - - if (root->rb_leftmost == node) - leftmost = root->rb_leftmost = rb_next(node); - - rb_erase(node, &root->rb_root); - - return leftmost; -} - -static inline void rb_replace_node_cached(struct rb_node *victim, - struct rb_node *new, - struct rb_root_cached *root) -{ - if (root->rb_leftmost == victim) - root->rb_leftmost = new; - rb_replace_node(victim, new, &root->rb_root); -} - -/* - * The below helper functions use 2 operators with 3 different - * calling conventions. The operators are related like: - * - * comp(a->key,b) < 0 := less(a,b) - * comp(a->key,b) > 0 := less(b,a) - * comp(a->key,b) == 0 := !less(a,b) && !less(b,a) - * - * If these operators define a partial order on the elements we make no - * guarantee on which of the elements matching the key is found. See - * rb_find(). - * - * The reason for this is to allow the find() interface without requiring an - * on-stack dummy object, which might not be feasible due to object size. - */ - -/** - * rb_add_cached() - insert @node into the leftmost cached tree @tree - * @node: node to insert - * @tree: leftmost cached tree to insert @node into - * @less: operator defining the (partial) node order - * - * Returns @node when it is the new leftmost, or NULL. - */ -static __always_inline struct rb_node * -rb_add_cached(struct rb_node *node, struct rb_root_cached *tree, - bool (*less)(struct rb_node *, const struct rb_node *)) -{ - struct rb_node **link = &tree->rb_root.rb_node; - struct rb_node *parent = NULL; - bool leftmost = true; - - while (*link) { - parent = *link; - if (less(node, parent)) { - link = &parent->rb_left; - } else { - link = &parent->rb_right; - leftmost = false; - } - } - - rb_link_node(node, parent, link); - rb_insert_color_cached(node, tree, leftmost); - - return leftmost ? node : NULL; -} - -/** - * rb_add() - insert @node into @tree - * @node: node to insert - * @tree: tree to insert @node into - * @less: operator defining the (partial) node order - */ -static __always_inline void -rb_add(struct rb_node *node, struct rb_root *tree, - bool (*less)(struct rb_node *, const struct rb_node *)) -{ - struct rb_node **link = &tree->rb_node; - struct rb_node *parent = NULL; - - while (*link) { - parent = *link; - if (less(node, parent)) - link = &parent->rb_left; - else - link = &parent->rb_right; - } - - rb_link_node(node, parent, link); - rb_insert_color(node, tree); -} - -/** - * rb_find_add() - find equivalent @node in @tree, or add @node - * @node: node to look-for / insert - * @tree: tree to search / modify - * @cmp: operator defining the node order - * - * Returns the rb_node matching @node, or NULL when no match is found and @node - * is inserted. - */ -static __always_inline struct rb_node * -rb_find_add(struct rb_node *node, struct rb_root *tree, - int (*cmp)(struct rb_node *, const struct rb_node *)) -{ - struct rb_node **link = &tree->rb_node; - struct rb_node *parent = NULL; - int c; - - while (*link) { - parent = *link; - c = cmp(node, parent); - - if (c < 0) - link = &parent->rb_left; - else if (c > 0) - link = &parent->rb_right; - else - return parent; - } - - rb_link_node(node, parent, link); - rb_insert_color(node, tree); - return NULL; -} - -/** - * rb_find() - find @key in tree @tree - * @key: key to match - * @tree: tree to search - * @cmp: operator defining the node order - * - * Returns the rb_node matching @key or NULL. - */ -static __always_inline struct rb_node * -rb_find(const void *key, const struct rb_root *tree, - int (*cmp)(const void *key, const struct rb_node *)) -{ - struct rb_node *node = tree->rb_node; - - while (node) { - int c = cmp(key, node); - - if (c < 0) - node = node->rb_left; - else if (c > 0) - node = node->rb_right; - else - return node; - } - - return NULL; -} - -/** - * rb_find_first() - find the first @key in @tree - * @key: key to match - * @tree: tree to search - * @cmp: operator defining node order - * - * Returns the leftmost node matching @key, or NULL. - */ -static __always_inline struct rb_node * -rb_find_first(const void *key, const struct rb_root *tree, - int (*cmp)(const void *key, const struct rb_node *)) -{ - struct rb_node *node = tree->rb_node; - struct rb_node *match = NULL; - - while (node) { - int c = cmp(key, node); - - if (c <= 0) { - if (!c) - match = node; - node = node->rb_left; - } else if (c > 0) { - node = node->rb_right; - } - } - - return match; -} - -/** - * rb_next_match() - find the next @key in @tree - * @key: key to match - * @tree: tree to search - * @cmp: operator defining node order - * - * Returns the next node matching @key, or NULL. - */ -static __always_inline struct rb_node * -rb_next_match(const void *key, struct rb_node *node, - int (*cmp)(const void *key, const struct rb_node *)) -{ - node = rb_next(node); - if (node && cmp(key, node)) - node = NULL; - return node; -} - -/** - * rb_for_each() - iterates a subtree matching @key - * @node: iterator - * @key: key to match - * @tree: tree to search - * @cmp: operator defining node order - */ -#define rb_for_each(node, key, tree, cmp) \ - for ((node) = rb_find_first((key), (tree), (cmp)); \ - (node); (node) = rb_next_match((key), (node), (cmp))) - #endif /* _LINUX_RBTREE_H */ diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index d1c53e9d8c..9702b6e183 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -1,10 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Red Black Trees (C) 1999 Andrea Arcangeli (C) 2002 David Woodhouse (C) 2012 Michel Lespinasse + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA linux/include/linux/rbtree_augmented.h */ @@ -14,14 +26,13 @@ #include #include -#include /* * Please note - only struct rb_augment_callbacks and the prototypes for * rb_insert_augmented() and rb_erase_augmented() are intended to be public. * The rest are implementation details you are not expected to depend on. * - * See Documentation/core-api/rbtree.rst for documentation and samples. + * See Documentation/rbtree.txt for documentation and samples. */ struct rb_augment_callbacks { @@ -32,14 +43,13 @@ struct rb_augment_callbacks { extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); - /* * Fixup the rbtree and update the augmented information when rebalancing. * * On insertion, the user must update the augmented information on the path * leading to the inserted node, then call rb_link_node() as usual and - * rb_insert_augmented() instead of the usual rb_insert_color() call. - * If rb_insert_augmented() rebalances the rbtree, it will callback into + * rb_augment_inserted() instead of the usual rb_insert_color() call. + * If rb_augment_inserted() rebalances the rbtree, it will callback into * a user provided function to update the augmented information on the * affected subtrees. */ @@ -50,97 +60,41 @@ rb_insert_augmented(struct rb_node *node, struct rb_root *root, __rb_insert_augmented(node, root, augment->rotate); } -static inline void -rb_insert_augmented_cached(struct rb_node *node, - struct rb_root_cached *root, bool newleft, - const struct rb_augment_callbacks *augment) -{ - if (newleft) - root->rb_leftmost = node; - rb_insert_augmented(node, &root->rb_root, augment); -} - -/* - * Template for declaring augmented rbtree callbacks (generic case) - * - * RBSTATIC: 'static' or empty - * RBNAME: name of the rb_augment_callbacks structure - * RBSTRUCT: struct type of the tree nodes - * RBFIELD: name of struct rb_node field within RBSTRUCT - * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree - * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data - */ - -#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ - RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \ +#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ + rbtype, rbaugmented, rbcompute) \ static inline void \ -RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \ +rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ { \ while (rb != stop) { \ - RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ - if (RBCOMPUTE(node, true)) \ + rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ + rbtype augmented = rbcompute(node); \ + if (node->rbaugmented == augmented) \ break; \ - rb = rb_parent(&node->RBFIELD); \ + node->rbaugmented = augmented; \ + rb = rb_parent(&node->rbfield); \ } \ } \ static inline void \ -RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ +rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ { \ - RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ - RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ - new->RBAUGMENTED = old->RBAUGMENTED; \ + rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ + rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ + new->rbaugmented = old->rbaugmented; \ } \ static void \ -RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ +rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ { \ - RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ - RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ - new->RBAUGMENTED = old->RBAUGMENTED; \ - RBCOMPUTE(old, false); \ + rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ + rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ + new->rbaugmented = old->rbaugmented; \ + old->rbaugmented = rbcompute(old); \ } \ -RBSTATIC const struct rb_augment_callbacks RBNAME = { \ - .propagate = RBNAME ## _propagate, \ - .copy = RBNAME ## _copy, \ - .rotate = RBNAME ## _rotate \ +rbstatic const struct rb_augment_callbacks rbname = { \ + .propagate = rbname ## _propagate, \ + .copy = rbname ## _copy, \ + .rotate = rbname ## _rotate \ }; -/* - * Template for declaring augmented rbtree callbacks, - * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes. - * - * RBSTATIC: 'static' or empty - * RBNAME: name of the rb_augment_callbacks structure - * RBSTRUCT: struct type of the tree nodes - * RBFIELD: name of struct rb_node field within RBSTRUCT - * RBTYPE: type of the RBAUGMENTED field - * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree - * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar - */ - -#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ - RBTYPE, RBAUGMENTED, RBCOMPUTE) \ -static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \ -{ \ - RBSTRUCT *child; \ - RBTYPE max = RBCOMPUTE(node); \ - if (node->RBFIELD.rb_left) { \ - child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \ - if (child->RBAUGMENTED > max) \ - max = child->RBAUGMENTED; \ - } \ - if (node->RBFIELD.rb_right) { \ - child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \ - if (child->RBAUGMENTED > max) \ - max = child->RBAUGMENTED; \ - } \ - if (exit && node->RBAUGMENTED == max) \ - return true; \ - node->RBAUGMENTED = max; \ - return false; \ -} \ -RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ - RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max) - #define RB_RED 0 #define RB_BLACK 1 @@ -283,12 +237,14 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, __rb_change_child(node, successor, tmp, root); if (child2) { + successor->__rb_parent_color = pc; rb_set_parent_color(child2, parent, RB_BLACK); rebalance = NULL; } else { - rebalance = rb_is_black(successor) ? parent : NULL; + unsigned long pc2 = successor->__rb_parent_color; + successor->__rb_parent_color = pc; + rebalance = __rb_is_black(pc2) ? parent : NULL; } - successor->__rb_parent_color = pc; tmp = successor; } @@ -305,13 +261,4 @@ rb_erase_augmented(struct rb_node *node, struct rb_root *root, __rb_erase_color(rebalance, root, augment->rotate); } -static __always_inline void -rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, - const struct rb_augment_callbacks *augment) -{ - if (root->rb_leftmost == node) - root->rb_leftmost = rb_next(node); - rb_erase_augmented(node, &root->rb_root, augment); -} - #endif /* _LINUX_RBTREE_AUGMENTED_H */ diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h index 3d1a9e716b..4f3432c61d 100644 --- a/include/linux/rbtree_latch.h +++ b/include/linux/rbtree_latch.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Latched RB-trees * @@ -35,15 +34,14 @@ #include #include -#include struct latch_tree_node { struct rb_node node[2]; }; struct latch_tree_root { - seqcount_latch_t seq; - struct rb_root tree[2]; + seqcount_t seq; + struct rb_root tree[2]; }; /** @@ -206,7 +204,7 @@ latch_tree_find(void *key, struct latch_tree_root *root, do { seq = raw_read_seqcount_latch(&root->seq); node = __lt_find(key, root, seq & 1, ops->comp); - } while (read_seqcount_latch_retry(&root->seq, seq)); + } while (read_seqcount_retry(&root->seq, seq)); return node; } diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index 0027d4c808..ece7ed9a4a 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -1,7 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU-based infrastructure for lightweight reader-writer locking * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * * Copyright (c) 2015, Red Hat, Inc. * * Author: Oleg Nesterov @@ -13,42 +26,62 @@ #include #include +enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC }; + /* Structure to mediate between updaters and fastpath-using readers. */ struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; + int cb_state; struct rcu_head cb_head; + + enum rcu_sync_type gp_type; }; +extern void rcu_sync_lockdep_assert(struct rcu_sync *); + /** * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? * @rsp: Pointer to rcu_sync structure to use for synchronization * - * Returns true if readers are permitted to use their fastpaths. Must be - * invoked within some flavor of RCU read-side critical section. + * Returns true if readers are permitted to use their fastpaths. + * Must be invoked within an RCU read-side critical section whose + * flavor matches that of the rcu_sync struture. */ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) { - RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), - "suspicious rcu_sync_is_idle() usage"); - return !READ_ONCE(rsp->gp_state); /* GP_IDLE */ +#ifdef CONFIG_PROVE_RCU + rcu_sync_lockdep_assert(rsp); +#endif + return !rsp->gp_state; /* GP_IDLE */ } -extern void rcu_sync_init(struct rcu_sync *); +extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); extern void rcu_sync_enter_start(struct rcu_sync *); extern void rcu_sync_enter(struct rcu_sync *); extern void rcu_sync_exit(struct rcu_sync *); extern void rcu_sync_dtor(struct rcu_sync *); -#define __RCU_SYNC_INITIALIZER(name) { \ +#define __RCU_SYNC_INITIALIZER(name, type) { \ .gp_state = 0, \ .gp_count = 0, \ .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ + .cb_state = 0, \ + .gp_type = type, \ } -#define DEFINE_RCU_SYNC(name) \ - struct rcu_sync name = __RCU_SYNC_INITIALIZER(name) +#define __DEFINE_RCU_SYNC(name, type) \ + struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type) + +#define DEFINE_RCU_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_SYNC) + +#define DEFINE_RCU_SCHED_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC) + +#define DEFINE_RCU_BH_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_BH_SYNC) #endif /* _LINUX_RCU_SYNC_H_ */ diff --git a/include/linux/rculist.h b/include/linux/rculist.h index d29740be48..c515d454b4 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_H #define _LINUX_RCULIST_H @@ -10,6 +9,15 @@ #include #include +/* + * Why is there no list_empty_rcu()? Because list_empty() serves this + * purpose. The list_empty() function fetches the RCU-protected pointer + * and compares it to the address of the list head, but neither dereferences + * this pointer itself nor provides this pointer to the caller. Therefore, + * it is not necessary to use rcu_dereference(), so that list_empty() can + * be used anywhere you would want to use a list_empty_rcu(). + */ + /* * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers * @list: list to be initialized @@ -31,59 +39,28 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) */ #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) -/** - * list_tail_rcu - returns the prev pointer of the head of the list - * @head: the head of the list - * - * Note: This should only be used with the list header, and even then - * only if list_del() and similar primitives are not also used on the - * list header. - */ -#define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev))) - -/* - * Check during list traversal that we are within an RCU reader - */ - -#define check_arg_count_one(dummy) - -#ifdef CONFIG_PROVE_RCU_LIST -#define __list_check_rcu(dummy, cond, extra...) \ - ({ \ - check_arg_count_one(extra); \ - RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \ - "RCU-list traversed in non-reader section!"); \ - }) - -#define __list_check_srcu(cond) \ - ({ \ - RCU_LOCKDEP_WARN(!(cond), \ - "RCU-list traversed without holding the required lock!");\ - }) -#else -#define __list_check_rcu(dummy, cond, extra...) \ - ({ check_arg_count_one(extra); }) - -#define __list_check_srcu(cond) ({ }) -#endif - /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ +#ifndef CONFIG_DEBUG_LIST static inline void __list_add_rcu(struct list_head *new, struct list_head *prev, struct list_head *next) { - if (!__list_add_valid(new, prev, next)) - return; - new->next = next; new->prev = prev; rcu_assign_pointer(list_next_rcu(prev), new); next->prev = new; } +#else +void __list_add_rcu(struct list_head *new, + struct list_head *prev, struct list_head *next); +#endif + +void __pax_list_add_rcu(struct list_head *new, + struct list_head *prev, struct list_head *next); /** * list_add_rcu - add a new entry to rcu-protected list @@ -106,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head) __list_add_rcu(new, head, head->next); } +static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head) +{ + __pax_list_add_rcu(new, head, head->next); +} + /** * list_add_tail_rcu - add a new entry to rcu-protected list * @new: new entry to be added @@ -128,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new, __list_add_rcu(new, head->prev, head); } +static inline void pax_list_add_tail_rcu(struct list_head *new, + struct list_head *head) +{ + __pax_list_add_rcu(new, head->prev, head); +} + /** * list_del_rcu - deletes entry from list without re-initialization * @entry: the element to delete from the list. @@ -158,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry) entry->prev = LIST_POISON2; } +extern void pax_list_del_rcu(struct list_head *entry); + /** * hlist_del_init_rcu - deletes entry from hash list with re-initialization * @n: the element to delete from the hash list. @@ -182,7 +172,7 @@ static inline void hlist_del_init_rcu(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); - WRITE_ONCE(n->pprev, NULL); + n->pprev = NULL; } } @@ -209,7 +199,7 @@ static inline void list_replace_rcu(struct list_head *old, * @list: the RCU-protected list to splice * @prev: points to the last element of the existing list * @next: points to the first element of the existing list - * @sync: synchronize_rcu, synchronize_rcu_expedited, ... + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... * * The list pointed to by @prev and @next can be RCU-read traversed * concurrently with this function. @@ -247,8 +237,6 @@ static inline void __list_splice_init_rcu(struct list_head *list, */ sync(); - ASSERT_EXCLUSIVE_ACCESS(*first); - ASSERT_EXCLUSIVE_ACCESS(*last); /* * Readers are finished with the source list, so perform splice. @@ -269,7 +257,7 @@ static inline void __list_splice_init_rcu(struct list_head *list, * designed for stacks. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into - * @sync: synchronize_rcu, synchronize_rcu_expedited, ... + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... */ static inline void list_splice_init_rcu(struct list_head *list, struct list_head *head, @@ -284,7 +272,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list, designed for queues. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into - * @sync: synchronize_rcu, synchronize_rcu_expedited, ... + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... */ static inline void list_splice_tail_init_rcu(struct list_head *list, struct list_head *head, @@ -304,34 +292,26 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_entry_rcu(ptr, type, member) \ - container_of(READ_ONCE(ptr), type, member) + container_of(lockless_dereference(ptr), type, member) -/* +/** * Where are list_empty_rcu() and list_first_entry_rcu()? * - * They do not exist because they would lead to subtle race conditions: + * Implementing those functions following their counterparts list_empty() and + * list_first_entry() is not advisable because they lead to subtle race + * conditions as the following snippet shows: * * if (!list_empty_rcu(mylist)) { * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); * do_something(bar); * } * - * The list might be non-empty when list_empty_rcu() checks it, but it - * might have become empty by the time that list_first_entry_rcu() rereads - * the ->next pointer, which would result in a SEGV. + * The list may not be empty when list_empty_rcu checks it, but it may be when + * list_first_entry_rcu rereads the ->next pointer. * - * When not using RCU, it is OK for list_first_entry() to re-read that - * pointer because both functions should be protected by some lock that - * blocks writers. - * - * When using RCU, list_empty() uses READ_ONCE() to fetch the - * RCU-protected ->next pointer and then compares it to the address of the - * list head. However, it neither dereferences this pointer nor provides - * this pointer to its caller. Thus, READ_ONCE() suffices (that is, - * rcu_dereference() is not needed), which means that list_empty() can be - * used anywhere you would want to use list_empty_rcu(). Just don't - * expect anything useful to happen if you do a subsequent lockless - * call to list_first_entry_rcu()!!! + * Rereading the ->next pointer is not a problem for list_empty() and + * list_first_entry() because they would be protected by a lock that blocks + * writers. * * See list_first_or_null_rcu for an alternative. */ @@ -380,35 +360,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. - * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ -#define list_for_each_entry_rcu(pos, head, member, cond...) \ - for (__list_check_rcu(dummy, ## cond, 0), \ - pos = list_entry_rcu((head)->next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) - -/** - * list_for_each_entry_srcu - iterate over rcu list of given type - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_head within the struct. - * @cond: lockdep expression for the lock required to traverse the list. - * - * This list-traversal primitive may safely run concurrently with - * the _rcu list-mutation primitives such as list_add_rcu() - * as long as the traversal is guarded by srcu_read_lock(). - * The lockdep expression srcu_read_lock_held() can be passed as the - * cond argument from read side. - */ -#define list_for_each_entry_srcu(pos, head, member, cond) \ - for (__list_check_srcu(cond), \ - pos = list_entry_rcu((head)->next, typeof(*pos), member); \ - &pos->member != (head); \ +#define list_for_each_entry_rcu(pos, head, member) \ + for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** @@ -417,15 +376,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * - * This primitive may safely run concurrently with the _rcu - * list-mutation primitives such as list_add_rcu(), but requires some - * implicit RCU read-side guarding. One example is running within a special - * exception-time environment where preemption is disabled and where lockdep - * cannot be invoked. Another example is when items are added to the list, - * but never deleted. + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu(), but requires some implicit RCU + * read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where + * lockdep cannot be invoked (in which case updaters must use RCU-sched, + * as in synchronize_sched(), call_rcu_sched(), and friends). Another + * example is when items are added to the list, but never deleted. */ #define list_entry_lockless(ptr, type, member) \ - container_of((typeof(ptr))READ_ONCE(ptr), type, member) + container_of((typeof(ptr))lockless_dereference(ptr), type, member) /** * list_for_each_entry_lockless - iterate over rcu list of given type @@ -433,12 +393,13 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @head: the head for your list. * @member: the name of the list_struct within the struct. * - * This primitive may safely run concurrently with the _rcu - * list-mutation primitives such as list_add_rcu(), but requires some - * implicit RCU read-side guarding. One example is running within a special - * exception-time environment where preemption is disabled and where lockdep - * cannot be invoked. Another example is when items are added to the list, - * but never deleted. + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu(), but requires some implicit RCU + * read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where + * lockdep cannot be invoked (in which case updaters must use RCU-sched, + * as in synchronize_sched(), call_rcu_sched(), and friends). Another + * example is when items are added to the list, but never deleted. */ #define list_for_each_entry_lockless(pos, head, member) \ for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ @@ -452,43 +413,13 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after - * the current position which must have been in the list when the RCU read - * lock was taken. - * This would typically require either that you obtained the node from a - * previous walk of the list in the same RCU read-side critical section, or - * that you held some sort of non-RCU reference (such as a reference count) - * to keep the node alive *and* in the list. - * - * This iterator is similar to list_for_each_entry_from_rcu() except - * this starts after the given position and that one starts at the given - * position. + * the current position. */ #define list_for_each_entry_continue_rcu(pos, head, member) \ for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) -/** - * list_for_each_entry_from_rcu - iterate over a list from current point - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_node within the struct. - * - * Iterate over the tail of a list starting from a given position, - * which must have been in the list when the RCU read lock was taken. - * This would typically require either that you obtained the node from a - * previous walk of the list in the same RCU read-side critical section, or - * that you held some sort of non-RCU reference (such as a reference count) - * to keep the node alive *and* in the list. - * - * This iterator is similar to list_for_each_entry_continue_rcu() except - * this starts from the given position and that one starts from the position - * after the given position. - */ -#define list_for_each_entry_from_rcu(pos, head, member) \ - for (; &(pos)->member != (head); \ - pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member)) - /** * hlist_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. @@ -511,7 +442,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, static inline void hlist_del_rcu(struct hlist_node *n) { __hlist_del(n); - WRITE_ONCE(n->pprev, LIST_POISON2); + n->pprev = LIST_POISON2; } /** @@ -527,32 +458,11 @@ static inline void hlist_replace_rcu(struct hlist_node *old, struct hlist_node *next = old->next; new->next = next; - WRITE_ONCE(new->pprev, old->pprev); + new->pprev = old->pprev; rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); if (next) - WRITE_ONCE(new->next->pprev, &new->next); - WRITE_ONCE(old->pprev, LIST_POISON2); -} - -/** - * hlists_swap_heads_rcu - swap the lists the hlist heads point to - * @left: The hlist head on the left - * @right: The hlist head on the right - * - * The lists start out as [@left ][node1 ... ] and - * [@right ][node2 ... ] - * The lists end up as [@left ][node2 ... ] - * [@right ][node1 ... ] - */ -static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right) -{ - struct hlist_node *node1 = left->first; - struct hlist_node *node2 = right->first; - - rcu_assign_pointer(left->first, node2); - rcu_assign_pointer(right->first, node1); - WRITE_ONCE(node2->pprev, &left->first); - WRITE_ONCE(node1->pprev, &right->first); + new->next->pprev = &new->next; + old->pprev = LIST_POISON2; } /* @@ -587,10 +497,10 @@ static inline void hlist_add_head_rcu(struct hlist_node *n, struct hlist_node *first = h->first; n->next = first; - WRITE_ONCE(n->pprev, &h->first); + n->pprev = &h->first; rcu_assign_pointer(hlist_first_rcu(h), n); if (first) - WRITE_ONCE(first->pprev, &n->next); + first->pprev = &n->next; } /** @@ -617,13 +527,12 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n, { struct hlist_node *i, *last = NULL; - /* Note: write side code, so rcu accessors are not needed. */ - for (i = h->first; i; i = i->next) + for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i)) last = i; if (last) { n->next = last->next; - WRITE_ONCE(n->pprev, &last->next); + n->pprev = &last->next; rcu_assign_pointer(hlist_next_rcu(last), n); } else { hlist_add_head_rcu(n, h); @@ -651,10 +560,10 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n, static inline void hlist_add_before_rcu(struct hlist_node *n, struct hlist_node *next) { - WRITE_ONCE(n->pprev, next->pprev); + n->pprev = next->pprev; n->next = next; rcu_assign_pointer(hlist_pprev_rcu(n), n); - WRITE_ONCE(next->pprev, &n->next); + next->pprev = &n->next; } /** @@ -679,10 +588,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, struct hlist_node *prev) { n->next = prev->next; - WRITE_ONCE(n->pprev, &prev->next); + n->pprev = &prev->next; rcu_assign_pointer(hlist_next_rcu(prev), n); if (n->next) - WRITE_ONCE(n->next->pprev, &n->next); + n->next->pprev = &n->next; } #define __hlist_for_each_rcu(pos, head) \ @@ -695,36 +604,13 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. - * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ -#define hlist_for_each_entry_rcu(pos, head, member, cond...) \ - for (__list_check_rcu(dummy, ## cond, 0), \ - pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ - typeof(*(pos)), member); \ - pos; \ - pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ - &(pos)->member)), typeof(*(pos)), member)) - -/** - * hlist_for_each_entry_srcu - iterate over rcu list of given type - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the hlist_node within the struct. - * @cond: lockdep expression for the lock required to traverse the list. - * - * This list-traversal primitive may safely run concurrently with - * the _rcu list-mutation primitives such as hlist_add_head_rcu() - * as long as the traversal is guarded by srcu_read_lock(). - * The lockdep expression srcu_read_lock_held() can be passed as the - * cond argument from read side. - */ -#define hlist_for_each_entry_srcu(pos, head, member, cond) \ - for (__list_check_srcu(cond), \ - pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ +#define hlist_for_each_entry_rcu(pos, head, member) \ + for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ @@ -744,10 +630,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * not do any RCU debugging or tracing. */ #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ - for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\ + for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ - pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\ + pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h index 0b952d06eb..4f216c59e7 100644 --- a/include/linux/rculist_bl.h +++ b/include/linux/rculist_bl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_BL_H #define _LINUX_RCULIST_BL_H @@ -24,6 +23,34 @@ static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h) ((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK); } +/** + * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: hlist_bl_unhashed() on the node returns true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_bl_add_head_rcu() or + * hlist_bl_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_bl_for_each_entry_rcu(). + */ +static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n) +{ + if (!hlist_bl_unhashed(n)) { + __hlist_bl_del(n); + n->pprev = NULL; + } +} + /** * hlist_bl_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index d8afdb8784..4ae95f7e85 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_NULLS_H #define _LINUX_RCULIST_NULLS_H @@ -34,21 +33,13 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) { if (!hlist_nulls_unhashed(n)) { __hlist_nulls_del(n); - WRITE_ONCE(n->pprev, NULL); + n->pprev = NULL; } } -/** - * hlist_nulls_first_rcu - returns the first element of the hash list. - * @head: the head of the list. - */ #define hlist_nulls_first_rcu(head) \ (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) -/** - * hlist_nulls_next_rcu - returns the element of the list after @node. - * @node: element of the list. - */ #define hlist_nulls_next_rcu(node) \ (*((struct hlist_nulls_node __rcu __force **)&(node)->next)) @@ -74,7 +65,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n) { __hlist_nulls_del(n); - WRITE_ONCE(n->pprev, LIST_POISON2); + n->pprev = LIST_POISON2; } /** @@ -102,10 +93,10 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, struct hlist_nulls_node *first = h->first; n->next = first; - WRITE_ONCE(n->pprev, &h->first); + n->pprev = &h->first; rcu_assign_pointer(hlist_nulls_first_rcu(h), n); if (!is_a_nulls(first)) - WRITE_ONCE(first->pprev, &n->next); + first->pprev = &n->next; } /** @@ -114,8 +105,9 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, * @h: the list to add to. * * Description: - * Adds the specified element to the specified hlist_nulls, - * while permitting racing traversals. + * Adds the specified element to the end of the specified hlist_nulls, + * while permitting racing traversals. NOTE: tail insertion requires + * list traversal. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing @@ -128,41 +120,34 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, - struct hlist_nulls_head *h) + struct hlist_nulls_head *h) { struct hlist_nulls_node *i, *last = NULL; - /* Note: write side code, so rcu accessors are not needed. */ - for (i = h->first; !is_a_nulls(i); i = i->next) + for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i); + i = hlist_nulls_next_rcu(i)) last = i; if (last) { n->next = last->next; n->pprev = &last->next; - rcu_assign_pointer(hlist_next_rcu(last), n); + rcu_assign_pointer(hlist_nulls_next_rcu(last), n); } else { hlist_nulls_add_head_rcu(n, h); } } -/* after that hlist_nulls_del will work */ -static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n) -{ - n->pprev = &n->next; - n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL); -} - /** * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_nulls_node to use as a loop cursor. - * @head: the head of the list. + * @head: the head for your list. * @member: the name of the hlist_nulls_node within the struct. * * The barrier() is needed to make sure compiler doesn't cache first element [1], * as this loop can be restarted [2] - * [1] Documentation/memory-barriers.txt around line 1533 - * [2] Documentation/RCU/rculist_nulls.rst around line 146 + * [1] Documentation/atomic_ops.txt around line 114 + * [2] Documentation/RCU/rculist_nulls.txt around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ for (({barrier();}), \ @@ -171,19 +156,5 @@ static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n) ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) -/** - * hlist_nulls_for_each_entry_safe - - * iterate over list of given type safe against removal of list entry - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct hlist_nulls_node to use as a loop cursor. - * @head: the head of the list. - * @member: the name of the hlist_nulls_node within the struct. - */ -#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \ - for (({barrier();}), \ - pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ - (!is_a_nulls(pos)) && \ - ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \ - pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });) #endif #endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 434d12fe2d..20f3e97ea9 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -1,12 +1,25 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma * - * Based on the original work by Paul McKenney + * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf @@ -21,31 +34,265 @@ #define __LINUX_RCUPDATE_H #include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef CONFIG_TINY_RCU +extern int rcu_expedited; /* for sysctl */ +extern int rcu_normal; /* also for sysctl */ +#endif /* #ifndef CONFIG_TINY_RCU */ + +#ifdef CONFIG_TINY_RCU +/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ +static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */ +{ + return true; +} +static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ +{ + return false; +} + +static inline void rcu_expedite_gp(void) +{ +} + +static inline void rcu_unexpedite_gp(void) +{ +} +#else /* #ifdef CONFIG_TINY_RCU */ +bool rcu_gp_is_normal(void); /* Internal RCU use. */ +bool rcu_gp_is_expedited(void); /* Internal RCU use. */ +void rcu_expedite_gp(void); +void rcu_unexpedite_gp(void); +#endif /* #else #ifdef CONFIG_TINY_RCU */ + +enum rcutorture_type { + RCU_FLAVOR, + RCU_BH_FLAVOR, + RCU_SCHED_FLAVOR, + RCU_TASKS_FLAVOR, + SRCU_FLAVOR, + INVALID_RCU_FLAVOR +}; + +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) +void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, + unsigned long *gpnum, unsigned long *completed); +void rcutorture_record_test_transition(void); +void rcutorture_record_progress(unsigned long vernum); +void do_trace_rcu_torture_read(const char *rcutorturename, + struct rcu_head *rhp, + unsigned long secs, + unsigned long c_old, + unsigned long c); +#else +static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, + int *flags, + unsigned long *gpnum, + unsigned long *completed) +{ + *flags = 0; + *gpnum = 0; + *completed = 0; +} +static inline void rcutorture_record_test_transition(void) +{ +} +static inline void rcutorture_record_progress(unsigned long vernum) +{ +} +#ifdef CONFIG_RCU_TRACE +void do_trace_rcu_torture_read(const char *rcutorturename, + struct rcu_head *rhp, + unsigned long secs, + unsigned long c_old, + unsigned long c); +#else +#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ + do { } while (0) +#endif +#endif + +#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) +#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) #define ulong2long(a) (*(long *)(&(a))) -#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b))) -#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b))) /* Exported common interfaces */ -void call_rcu(struct rcu_head *head, rcu_callback_t func); + +#ifdef CONFIG_PREEMPT_RCU + +/** + * call_rcu() - Queue an RCU callback for invocation after a grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual callback function to be invoked after the grace period + * + * The callback function will be invoked some time after a full grace + * period elapses, in other words after all pre-existing RCU read-side + * critical sections have completed. However, the callback function + * might well execute concurrently with RCU read-side critical sections + * that started after call_rcu() was invoked. RCU read-side critical + * sections are delimited by rcu_read_lock() and rcu_read_unlock(), + * and may be nested. + * + * Note that all CPUs must agree that the grace period extended beyond + * all pre-existing RCU read-side critical section. On systems with more + * than one CPU, this means that when "func()" is invoked, each CPU is + * guaranteed to have executed a full memory barrier since the end of its + * last RCU read-side critical section whose beginning preceded the call + * to call_rcu(). It also means that each CPU executing an RCU read-side + * critical section that continues beyond the start of "func()" must have + * executed a memory barrier after the call_rcu() but before the beginning + * of that RCU read-side critical section. Note that these guarantees + * include CPUs that are offline, idle, or executing in user mode, as + * well as CPUs that are executing in the kernel. + * + * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the + * resulting RCU callback function "func()", then both CPU A and CPU B are + * guaranteed to execute a full memory barrier during the time interval + * between the call to call_rcu() and the invocation of "func()" -- even + * if CPU A and CPU B are the same CPU (but again only if the system has + * more than one CPU). + */ +void call_rcu(struct rcu_head *head, + rcu_callback_t func); + +#else /* #ifdef CONFIG_PREEMPT_RCU */ + +/* In classic RCU, call_rcu() is just call_rcu_sched(). */ +#define call_rcu call_rcu_sched + +#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + +/** + * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual callback function to be invoked after the grace period + * + * The callback function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_bh() assumes + * that the read-side critical sections end on completion of a softirq + * handler. This means that read-side critical sections in process + * context must not be interrupted by softirqs. This interface is to be + * used when most of the read-side critical sections are in softirq context. + * RCU read-side critical sections are delimited by : + * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. + * OR + * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. + * These may be nested. + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. + */ +void call_rcu_bh(struct rcu_head *head, + rcu_callback_t func); + +/** + * call_rcu_sched() - Queue an RCU for invocation after sched grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual callback function to be invoked after the grace period + * + * The callback function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_sched() assumes + * that the read-side critical sections end on enabling of preemption + * or on voluntary preemption. + * RCU read-side critical sections are delimited by : + * - rcu_read_lock_sched() and rcu_read_unlock_sched(), + * OR + * anything that disables preemption. + * These may be nested. + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. + */ +void call_rcu_sched(struct rcu_head *head, + rcu_callback_t func); + +void synchronize_sched(void); + +/* + * Structure allowing asynchronous waiting on RCU. + */ +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; +}; +void wakeme_after_rcu(struct rcu_head *head); + +void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, + struct rcu_synchronize *rs_array); + +#define _wait_rcu_gp(checktiny, ...) \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ + __crcu_array, __rs_array); \ +} while (0) + +#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) + +/** + * synchronize_rcu_mult - Wait concurrently for multiple grace periods + * @...: List of call_rcu() functions for the flavors to wait on. + * + * This macro waits concurrently for multiple flavors of RCU grace periods. + * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait + * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU + * domain requires you to write a wrapper function for that SRCU domain's + * call_srcu() function, supplying the corresponding srcu_struct. + * + * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU + * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called + * is automatically a grace period. + */ +#define synchronize_rcu_mult(...) \ + _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) + +/** + * call_rcu_tasks() - Queue an RCU for invocation task-based grace period + * @head: structure to be used for queueing the RCU updates. + * @func: actual callback function to be invoked after the grace period + * + * The callback function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_tasks() assumes + * that the read-side critical sections end at a voluntary context + * switch (not a preemption!), entry into idle, or transition to usermode + * execution. As such, there are no read-side primitives analogous to + * rcu_read_lock() and rcu_read_unlock() because this primitive is intended + * to determine that all tasks have passed through a safe state, not so + * much for data-strcuture synchronization. + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. + */ +void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); +void synchronize_rcu_tasks(void); void rcu_barrier_tasks(void); -void rcu_barrier_tasks_rude(void); -void synchronize_rcu(void); #ifdef CONFIG_PREEMPT_RCU void __rcu_read_lock(void); void __rcu_read_unlock(void); +void rcu_read_unlock_special(struct task_struct *t); +void synchronize_rcu(void); /* * Defined as a macro as it is a very low level header included from @@ -53,25 +300,25 @@ void __rcu_read_unlock(void); * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ -#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting) +#define rcu_preempt_depth() (current->rcu_read_lock_nesting) #else /* #ifdef CONFIG_PREEMPT_RCU */ -#ifdef CONFIG_TINY_RCU -#define rcu_read_unlock_strict() do { } while (0) -#else -void rcu_read_unlock_strict(void); -#endif - static inline void __rcu_read_lock(void) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) + preempt_disable(); } static inline void __rcu_read_unlock(void) { - preempt_enable(); - rcu_read_unlock_strict(); + if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) + preempt_enable(); +} + +static inline void synchronize_rcu(void) +{ + synchronize_sched(); } static inline int rcu_preempt_depth(void) @@ -83,23 +330,28 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); -extern int rcu_scheduler_active __read_mostly; -void rcu_sched_clock_irq(int user); +void rcu_sched_qs(void); +void rcu_bh_qs(void); +void rcu_check_callbacks(int user); void rcu_report_dead(unsigned int cpu); -void rcutree_migrate_callbacks(int cpu); +void rcu_cpu_starting(unsigned int cpu); -#ifdef CONFIG_TASKS_RCU_GENERIC -void rcu_init_tasks_generic(void); -#else -static inline void rcu_init_tasks_generic(void) { } -#endif +#ifndef CONFIG_TINY_RCU +void rcu_end_inkernel_boot(void); +#else /* #ifndef CONFIG_TINY_RCU */ +static inline void rcu_end_inkernel_boot(void) { } +#endif /* #ifndef CONFIG_TINY_RCU */ #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); #else /* #ifdef CONFIG_RCU_STALL_COMMON */ -static inline void rcu_sysrq_start(void) { } -static inline void rcu_sysrq_end(void) { } +static inline void rcu_sysrq_start(void) +{ +} +static inline void rcu_sysrq_end(void) +{ +} #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ #ifdef CONFIG_NO_HZ_FULL @@ -112,24 +364,21 @@ static inline void rcu_user_exit(void) { } #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); -int rcu_nocb_cpu_offload(int cpu); -int rcu_nocb_cpu_deoffload(int cpu); -void rcu_nocb_flush_deferred_wakeup(void); #else /* #ifdef CONFIG_RCU_NOCB_CPU */ -static inline void rcu_init_nohz(void) { } -static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; } -static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } -static inline void rcu_nocb_flush_deferred_wakeup(void) { } +static inline void rcu_init_nohz(void) +{ +} #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /** * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers * @a: Code that RCU needs to pay attention to. * - * RCU read-side critical sections are forbidden in the inner idle loop, - * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU - * will happily ignore any such read-side critical sections. However, - * things like powertop need tracepoints in the inner idle loop. + * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden + * in the inner idle loop, that is, between the rcu_idle_enter() and + * the rcu_idle_exit() -- RCU will happily ignore any such read-side + * critical sections. However, things like powertop need tracepoints + * in the inner idle loop. * * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) * will tell RCU that it needs to pay attention, invoke its argument @@ -148,80 +397,46 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { } } while (0) /* - * Note a quasi-voluntary context switch for RCU-tasks's benefit. - * This is a macro rather than an inline function to avoid #include hell. + * Note a voluntary context switch for RCU-tasks benefit. This is a + * macro rather than an inline function to avoid #include hell. */ -#ifdef CONFIG_TASKS_RCU_GENERIC - -# ifdef CONFIG_TASKS_RCU -# define rcu_tasks_classic_qs(t, preempt) \ - do { \ - if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \ - WRITE_ONCE((t)->rcu_tasks_holdout, false); \ +#ifdef CONFIG_TASKS_RCU +#define TASKS_RCU(x) x +extern struct srcu_struct tasks_rcu_exit_srcu; +#define rcu_note_voluntary_context_switch(t) \ + do { \ + rcu_all_qs(); \ + if (READ_ONCE((t)->rcu_tasks_holdout)) \ + WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) -void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); -void synchronize_rcu_tasks(void); -# else -# define rcu_tasks_classic_qs(t, preempt) do { } while (0) -# define call_rcu_tasks call_rcu -# define synchronize_rcu_tasks synchronize_rcu -# endif - -# ifdef CONFIG_TASKS_TRACE_RCU -# define rcu_tasks_trace_qs(t) \ - do { \ - if (!likely(READ_ONCE((t)->trc_reader_checked)) && \ - !unlikely(READ_ONCE((t)->trc_reader_nesting))) { \ - smp_store_release(&(t)->trc_reader_checked, true); \ - smp_mb(); /* Readers partitioned by store. */ \ - } \ - } while (0) -# else -# define rcu_tasks_trace_qs(t) do { } while (0) -# endif - -#define rcu_tasks_qs(t, preempt) \ -do { \ - rcu_tasks_classic_qs((t), (preempt)); \ - rcu_tasks_trace_qs((t)); \ -} while (0) - -# ifdef CONFIG_TASKS_RUDE_RCU -void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func); -void synchronize_rcu_tasks_rude(void); -# endif - -#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) -void exit_tasks_rcu_start(void); -void exit_tasks_rcu_finish(void); -#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ -#define rcu_tasks_qs(t, preempt) do { } while (0) -#define rcu_note_voluntary_context_switch(t) do { } while (0) -#define call_rcu_tasks call_rcu -#define synchronize_rcu_tasks synchronize_rcu -static inline void exit_tasks_rcu_start(void) { } -static inline void exit_tasks_rcu_finish(void) { } -#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ +#else /* #ifdef CONFIG_TASKS_RCU */ +#define TASKS_RCU(x) do { } while (0) +#define rcu_note_voluntary_context_switch(t) rcu_all_qs() +#endif /* #else #ifdef CONFIG_TASKS_RCU */ /** - * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU + * cond_resched_rcu_qs - Report potential quiescent states to RCU * * This macro resembles cond_resched(), except that it is defined to * report potential quiescent states to RCU-tasks even if the cond_resched() - * machinery were to be shut off, as some advocate for PREEMPTION kernels. + * machinery were to be shut off, as some advocate for PREEMPT kernels. */ -#define cond_resched_tasks_rcu_qs() \ +#define cond_resched_rcu_qs() \ do { \ - rcu_tasks_qs(current, false); \ - cond_resched(); \ + if (!cond_resched()) \ + rcu_note_voluntary_context_switch(current); \ } while (0) +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) +bool __rcu_is_watching(void); +#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ + /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. */ -#if defined(CONFIG_TREE_RCU) +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) #include #elif defined(CONFIG_TINY_RCU) #include @@ -229,13 +444,15 @@ do { \ #error "Unknown RCU implementation specified to kernel configuration" #endif +#define RCU_SCHEDULER_INACTIVE 0 +#define RCU_SCHEDULER_INIT 1 +#define RCU_SCHEDULER_RUNNING 2 + /* - * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls - * are needed for dynamic initialization and destruction of rcu_head - * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for - * dynamic initialization and destruction of statically allocated rcu_head - * structures. However, rcu_head structures allocated dynamically in the - * heap don't need any initialization. + * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic + * initialization and destruction of rcu_head on the stack. rcu_head structures + * allocated dynamically in the heap or defined statically don't need any + * initialization. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD void init_rcu_head(struct rcu_head *head); @@ -243,23 +460,32 @@ void destroy_rcu_head(struct rcu_head *head); void init_rcu_head_on_stack(struct rcu_head *head); void destroy_rcu_head_on_stack(struct rcu_head *head); #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ -static inline void init_rcu_head(struct rcu_head *head) { } -static inline void destroy_rcu_head(struct rcu_head *head) { } -static inline void init_rcu_head_on_stack(struct rcu_head *head) { } -static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } +static inline void init_rcu_head(struct rcu_head *head) +{ +} + +static inline void destroy_rcu_head(struct rcu_head *head) +{ +} + +static inline void init_rcu_head_on_stack(struct rcu_head *head) +{ +} + +static inline void destroy_rcu_head_on_stack(struct rcu_head *head) +{ +} #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ -static inline bool rcu_lockdep_current_cpu_online(void) { return true; } +static inline bool rcu_lockdep_current_cpu_online(void) +{ + return true; +} #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ -extern struct lockdep_map rcu_lock_map; -extern struct lockdep_map rcu_bh_lock_map; -extern struct lockdep_map rcu_sched_lock_map; -extern struct lockdep_map rcu_callback_map; - #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void rcu_lock_acquire(struct lockdep_map *map) @@ -269,14 +495,27 @@ static inline void rcu_lock_acquire(struct lockdep_map *map) static inline void rcu_lock_release(struct lockdep_map *map) { - lock_release(map, _THIS_IP_); + lock_release(map, 1, _THIS_IP_); } +extern struct lockdep_map rcu_lock_map; +extern struct lockdep_map rcu_bh_lock_map; +extern struct lockdep_map rcu_sched_lock_map; +extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); + int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); + +/** + * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? + * + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an + * RCU-sched read-side critical section. In absence of + * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side + * critical section unless it can prove otherwise. + */ int rcu_read_lock_sched_held(void); -int rcu_read_lock_any_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -297,12 +536,6 @@ static inline int rcu_read_lock_sched_held(void) { return !preemptible(); } - -static inline int rcu_read_lock_any_held(void) -{ - return !preemptible(); -} - #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_PROVE_RCU @@ -314,8 +547,8 @@ static inline int rcu_read_lock_any_held(void) */ #define RCU_LOCKDEP_WARN(c, s) \ do { \ - static bool __section(".data.unlikely") __warned; \ - if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \ + static bool __section(.data.unlikely) __warned; \ + if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ @@ -328,14 +561,15 @@ static inline void rcu_preempt_sleep_check(void) "Illegal context switch in RCU read-side critical section"); } #else /* #ifdef CONFIG_PROVE_RCU */ -static inline void rcu_preempt_sleep_check(void) { } +static inline void rcu_preempt_sleep_check(void) +{ +} #endif /* #else #ifdef CONFIG_PROVE_RCU */ #define rcu_sleep_check() \ do { \ rcu_preempt_sleep_check(); \ - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ + RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ "Illegal context switch in RCU-bh read-side critical section"); \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ "Illegal context switch in RCU-sched read-side critical section"); \ @@ -343,7 +577,7 @@ static inline void rcu_preempt_sleep_check(void) { } #else /* #ifdef CONFIG_PROVE_RCU */ -#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c)) +#define RCU_LOCKDEP_WARN(c, s) do { } while (0) #define rcu_sleep_check() do { } while (0) #endif /* #else #ifdef CONFIG_PROVE_RCU */ @@ -352,55 +586,42 @@ static inline void rcu_preempt_sleep_check(void) { } * Helper functions for rcu_dereference_check(), rcu_dereference_protected() * and rcu_assign_pointer(). Some of these could be folded into their * callers, but they are left separate in order to ease introduction of - * multiple pointers markings to match different RCU implementations - * (e.g., __srcu), should this make sense in the future. + * multiple flavors of pointers to match the multiple flavors of RCU + * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in + * the future. */ #ifdef __CHECKER__ -#define rcu_check_sparse(p, space) \ +#define rcu_dereference_sparse(p, space) \ ((void)(((typeof(*p) space *)p) == p)) #else /* #ifdef __CHECKER__ */ -#define rcu_check_sparse(p, space) +#define rcu_dereference_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ -/** - * unrcu_pointer - mark a pointer as not being RCU protected - * @p: pointer needing to lose its __rcu property - * - * Converts @p from an __rcu pointer to a __kernel pointer. - * This allows an __rcu pointer to be used with xchg() and friends. - */ -#define unrcu_pointer(p) \ -({ \ - typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \ - rcu_check_sparse(p, __rcu); \ - ((typeof(*p) __force __kernel *)(_________p1)); \ -}) - #define __rcu_access_pointer(p, space) \ ({ \ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ - rcu_check_sparse(p, space); \ + rcu_dereference_sparse(p, space); \ ((typeof(*p) __force __kernel *)(_________p1)); \ }) #define __rcu_dereference_check(p, c, space) \ ({ \ /* Dependency order vs. p above. */ \ - typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ + typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ - rcu_check_sparse(p, space); \ + rcu_dereference_sparse(p, space); \ ((typeof(*p) __force __kernel *)(________p1)); \ }) #define __rcu_dereference_protected(p, c, space) \ ({ \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ - rcu_check_sparse(p, space); \ + rcu_dereference_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) #define rcu_dereference_raw(p) \ ({ \ /* Dependency order vs. p above. */ \ - typeof(p) ________p1 = READ_ONCE(p); \ + typeof(p) ________p1 = lockless_dereference(p); \ ((typeof(*p) __force __kernel *)(________p1)); \ }) @@ -442,32 +663,14 @@ static inline void rcu_preempt_sleep_check(void) { } * other macros that it invokes. */ #define rcu_assign_pointer(p, v) \ -do { \ +({ \ uintptr_t _r_a_p__v = (uintptr_t)(v); \ - rcu_check_sparse(p, __rcu); \ \ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ else \ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ -} while (0) - -/** - * rcu_replace_pointer() - replace an RCU pointer, returning its old value - * @rcu_ptr: RCU pointer, whose old value is returned - * @ptr: regular pointer - * @c: the lockdep conditions under which the dereference will take place - * - * Perform a replacement, where @rcu_ptr is an RCU-annotated - * pointer and @c is the lockdep argument that is passed to the - * rcu_dereference_protected() call used to read that pointer. The old - * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr. - */ -#define rcu_replace_pointer(rcu_ptr, ptr, c) \ -({ \ - typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ - rcu_assign_pointer((rcu_ptr), (ptr)); \ - __tmp; \ + _r_a_p__v; \ }) /** @@ -475,12 +678,12 @@ do { \ * @p: The pointer to read * * Return the value of the specified RCU-protected pointer, but omit the - * lockdep checks for being in an RCU read-side critical section. This is - * useful when the value of this pointer is accessed, but the pointer is - * not dereferenced, for example, when testing an RCU-protected pointer - * against NULL. Although rcu_access_pointer() may also be used in cases - * where update-side locks prevent the value of the pointer from changing, - * you should instead use rcu_dereference_protected() for this use case. + * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful + * when the value of this pointer is accessed, but the pointer is not + * dereferenced, for example, when testing an RCU-protected pointer against + * NULL. Although rcu_access_pointer() may also be used in cases where + * update-side locks prevent the value of the pointer from changing, you + * should instead use rcu_dereference_protected() for this use case. * * It is also permissible to use rcu_access_pointer() when read-side * access to the pointer was removed at least one grace period ago, as @@ -532,12 +735,7 @@ do { \ * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * - * This is the RCU-bh counterpart to rcu_dereference_check(). However, - * please note that starting in v5.0 kernels, vanilla RCU grace periods - * wait for local_bh_disable() regions of code in addition to regions of - * code demarked by rcu_read_lock() and rcu_read_unlock(). This means - * that synchronize_rcu(), call_rcu, and friends all take not only - * rcu_read_lock() but also rcu_read_lock_bh() into account. + * This is the RCU-bh counterpart to rcu_dereference_check(). */ #define rcu_dereference_bh_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) @@ -548,11 +746,6 @@ do { \ * @c: The conditions under which the dereference will take place * * This is the RCU-sched counterpart to rcu_dereference_check(). - * However, please note that starting in v5.0 kernels, vanilla RCU grace - * periods wait for preempt_disable() regions of code in addition to - * regions of code demarked by rcu_read_lock() and rcu_read_unlock(). - * This means that synchronize_rcu(), call_rcu, and friends all take not - * only rcu_read_lock() but also rcu_read_lock_sched() into account. */ #define rcu_dereference_sched_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ @@ -565,7 +758,7 @@ do { \ * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ -#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu) +#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) /** * rcu_dereference_protected() - fetch RCU pointer when updates prevented @@ -573,11 +766,12 @@ do { \ * @c: The conditions under which the dereference will take place * * Return the value of the specified RCU-protected pointer, but omit - * the READ_ONCE(). This is useful in cases where update-side locks - * prevent the value of the pointer from changing. Please note that this - * primitive does *not* prevent the compiler from repeating this reference - * or combining it with other references, so it should not be used without - * protection of appropriate locks. + * both the smp_read_barrier_depends() and the READ_ONCE(). This + * is useful in cases where update-side locks prevent the value of the + * pointer from changing. Please note that this primitive does -not- + * prevent the compiler from repeating this reference or combining it + * with other references, so it should not be used without protection + * of appropriate locks. * * This function is only for update-side use. Using this function * when protected only by rcu_read_lock() will result in infrequent @@ -618,7 +812,7 @@ do { \ * This is simply an identity function, but it documents where a pointer * is handed off from RCU to some other synchronization mechanism, for * example, reference counting or locking. In C11, it would map to - * kill_dependency(). It could be used as follows:: + * kill_dependency(). It could be used as follows: * * rcu_read_lock(); * p = rcu_dereference(gp); @@ -644,12 +838,6 @@ do { \ * sections, invocation of the corresponding RCU callback is deferred * until after the all the other CPUs exit their critical sections. * - * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also - * wait for regions of code with preemption disabled, including regions of - * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which - * define synchronize_sched(), only code enclosed within rcu_read_lock() - * and rcu_read_unlock() are guaranteed to be waited for. - * * Note, however, that RCU callbacks are permitted to run concurrently * with new RCU read-side critical sections. One way that this can happen * is via the following sequence of events: (1) CPU 0 enters an RCU @@ -668,19 +856,20 @@ do { \ * * You can avoid reading and understanding the next paragraph by * following this rule: don't put anything in an rcu_read_lock() RCU - * read-side critical section that would block in a !PREEMPTION kernel. + * read-side critical section that would block in a !PREEMPT kernel. * But if you want the full story, read on! * - * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU), + * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), * it is illegal to block while in an RCU read-side critical section. - * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION + * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT * kernel builds, RCU read-side critical sections may be preempted, * but explicit blocking is illegal. Finally, in preemptible RCU * implementations in real-time (with -rt patchset) kernel builds, RCU * read-side critical sections may be preempted and they may also block, but * only when acquiring spinlocks that are subject to priority inheritance. */ -static __always_inline void rcu_read_lock(void) +static inline void rcu_read_lock(void) __acquires(RCU); +static inline void rcu_read_lock(void) { __rcu_read_lock(); __acquire(RCU); @@ -702,15 +891,39 @@ static __always_inline void rcu_read_lock(void) /** * rcu_read_unlock() - marks the end of an RCU read-side critical section. * - * In almost all situations, rcu_read_unlock() is immune from deadlock. - * In recent kernels that have consolidated synchronize_sched() and - * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity - * also extends to the scheduler's runqueue and priority-inheritance - * spinlocks, courtesy of the quiescent-state deferral that is carried - * out when rcu_read_unlock() is invoked with interrupts disabled. + * In most situations, rcu_read_unlock() is immune from deadlock. + * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() + * is responsible for deboosting, which it does via rt_mutex_unlock(). + * Unfortunately, this function acquires the scheduler's runqueue and + * priority-inheritance spinlocks. This means that deadlock could result + * if the caller of rcu_read_unlock() already holds one of these locks or + * any lock that is ever acquired while holding them; or any lock which + * can be taken from interrupt context because rcu_boost()->rt_mutex_lock() + * does not disable irqs while taking ->wait_lock. + * + * That said, RCU readers are never priority boosted unless they were + * preempted. Therefore, one way to avoid deadlock is to make sure + * that preemption never happens within any RCU read-side critical + * section whose outermost rcu_read_unlock() is called with one of + * rt_mutex_unlock()'s locks held. Such preemption can be avoided in + * a number of ways, for example, by invoking preempt_disable() before + * critical section's outermost rcu_read_lock(). + * + * Given that the set of locks acquired by rt_mutex_unlock() might change + * at any time, a somewhat more future-proofed approach is to make sure + * that that preemption never happens within any RCU read-side critical + * section whose outermost rcu_read_unlock() is called with irqs disabled. + * This approach relies on the fact that rt_mutex_unlock() currently only + * acquires irq-disabled locks. + * + * The second of these two approaches is best in most situations, + * however, the first approach can also be useful, at least to those + * developers willing to keep abreast of the set of locks acquired by + * rt_mutex_unlock(). * * See rcu_read_lock() for more information. */ +static inline void rcu_read_unlock(void) __releases(RCU); static inline void rcu_read_unlock(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), @@ -723,17 +936,21 @@ static inline void rcu_read_unlock(void) /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * - * This is equivalent to rcu_read_lock(), but also disables softirqs. - * Note that anything else that disables softirqs can also serve as an RCU - * read-side critical section. However, please note that this equivalence - * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and - * rcu_read_lock_bh() were unrelated. + * This is equivalent of rcu_read_lock(), but to be used when updates + * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since + * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a + * softirq handler to be a quiescent state, a process in RCU read-side + * critical section must be protected by disabling softirqs. Read-side + * critical sections in interrupt context can use just rcu_read_lock(), + * though this should at least be commented to avoid confusing people + * reading the code. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() * was invoked from some other task. */ +static inline void rcu_read_lock_bh(void) __acquires(RCU_BH); static inline void rcu_read_lock_bh(void) { local_bh_disable(); @@ -743,11 +960,12 @@ static inline void rcu_read_lock_bh(void) "rcu_read_lock_bh() used illegally while idle"); } -/** - * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section +/* + * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ +static inline void rcu_read_unlock_bh(void) __releases(RCU_BH); static inline void rcu_read_unlock_bh(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), @@ -760,18 +978,17 @@ static inline void rcu_read_unlock_bh(void) /** * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * - * This is equivalent to rcu_read_lock(), but also disables preemption. - * Read-side critical sections can also be introduced by anything else that - * disables preemption, including local_irq_disable() and friends. However, - * please note that the equivalence to rcu_read_lock() applies only to - * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched() - * were unrelated. + * This is equivalent of rcu_read_lock(), but to be used when updates + * are being done using call_rcu_sched() or synchronize_rcu_sched(). + * Read-side critical sections can also be introduced by anything that + * disables preemption, including local_irq_disable() and friends. * * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_sched() from process context if the matching * rcu_read_lock_sched() was invoked from an NMI handler. */ +static inline void rcu_read_lock_sched(void) __acquires(RCU_SCHED); static inline void rcu_read_lock_sched(void) { preempt_disable(); @@ -782,17 +999,19 @@ static inline void rcu_read_lock_sched(void) } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ +static inline notrace void rcu_read_lock_sched_notrace(void) __acquires(RCU_SCHED); static inline notrace void rcu_read_lock_sched_notrace(void) { preempt_disable_notrace(); __acquire(RCU_SCHED); } -/** - * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section +/* + * rcu_read_unlock_sched - marks the end of a RCU-classic critical section * - * See rcu_read_lock_sched() for more information. + * See rcu_read_lock_sched for more information. */ +static inline void rcu_read_unlock_sched(void) __releases(RCU_SCHED); static inline void rcu_read_unlock_sched(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), @@ -803,6 +1022,7 @@ static inline void rcu_read_unlock_sched(void) } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ +static inline notrace void rcu_read_unlock_sched_notrace(void) __releases(RCU_SCHED); static inline notrace void rcu_read_unlock_sched_notrace(void) { __release(RCU_SCHED); @@ -811,21 +1031,18 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) /** * RCU_INIT_POINTER() - initialize an RCU protected pointer - * @p: The pointer to be initialized. - * @v: The value to initialized the pointer to. * * Initialize an RCU-protected pointer in special cases where readers * do not need ordering constraints on the CPU or the compiler. These * special cases are: * - * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or* + * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- * 2. The caller has taken whatever steps are required to prevent - * RCU readers from concurrently accessing this pointer *or* + * RCU readers from concurrently accessing this pointer -or- * 3. The referenced data structure has already been exposed to - * readers either at compile time or via rcu_assign_pointer() *and* - * - * a. You have not made *any* reader-visible changes to - * this structure since then *or* + * readers either at compile time or via rcu_assign_pointer() -and- + * a. You have not made -any- reader-visible changes to + * this structure since then -or- * b. It is OK for readers accessing this structure from its * new location to see the old state of the structure. (For * example, the changes were to statistical counters or to @@ -841,7 +1058,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * by a single external-to-structure RCU-protected pointer, then you may * use RCU_INIT_POINTER() to initialize the internal RCU-protected * pointers, but you must use rcu_assign_pointer() to initialize the - * external-to-structure pointer *after* you have completely initialized + * external-to-structure pointer -after- you have completely initialized * the reader-accessible portions of the linked structure. * * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no @@ -849,14 +1066,12 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) */ #define RCU_INIT_POINTER(p, v) \ do { \ - rcu_check_sparse(p, __rcu); \ + rcu_dereference_sparse(p, __rcu); \ WRITE_ONCE(p, RCU_INITIALIZER(v)); \ } while (0) /** * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer - * @p: The pointer to be initialized. - * @v: The value to initialized the pointer to. * * GCC-style initialization for an RCU-protected pointer in a structure field. */ @@ -865,15 +1080,23 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) /* * Does the specified offset indicate that the corresponding rcu_head - * structure can be handled by kvfree_rcu()? + * structure can be handled by kfree_rcu()? */ -#define __is_kvfree_rcu_offset(offset) ((offset) < 4096) +#define __is_kfree_rcu_offset(offset) ((offset) < 4096) + +/* + * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. + */ +#define __kfree_rcu(head, offset) \ + do { \ + BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ + kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ + } while (0) /** * kfree_rcu() - kfree an object after a grace period. - * @ptr: pointer to kfree for both single- and double-argument invocations. - * @rhf: the name of the struct rcu_head within the type of @ptr, - * but only for double-argument invocations. + * @ptr: pointer to kfree + * @rcu_head: the name of the struct rcu_head within the type of @ptr. * * Many rcu callbacks functions just call kfree() on the base structure. * These functions are trivial, but their size adds up, and furthermore @@ -886,7 +1109,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * Because the functions are not allowed in the low-order 4096 bytes of * kernel virtual memory, offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will - * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can + * be generated in __kfree_rcu(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * @@ -896,113 +1119,55 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. */ -#define kfree_rcu(ptr, rhf...) kvfree_rcu(ptr, ## rhf) +#define kfree_rcu(ptr, rcu_head) \ + __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) -/** - * kvfree_rcu() - kvfree an object after a grace period. - * - * This macro consists of one or two arguments and it is - * based on whether an object is head-less or not. If it - * has a head then a semantic stays the same as it used - * to be before: - * - * kvfree_rcu(ptr, rhf); - * - * where @ptr is a pointer to kvfree(), @rhf is the name - * of the rcu_head structure within the type of @ptr. - * - * When it comes to head-less variant, only one argument - * is passed and that is just a pointer which has to be - * freed after a grace period. Therefore the semantic is - * - * kvfree_rcu(ptr); - * - * where @ptr is a pointer to kvfree(). - * - * Please note, head-less way of freeing is permitted to - * use from a context that has to follow might_sleep() - * annotation. Otherwise, please switch and embed the - * rcu_head structure within the type of @ptr. - */ -#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \ - kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__) - -#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME -#define kvfree_rcu_arg_2(ptr, rhf) \ -do { \ - typeof (ptr) ___p = (ptr); \ - \ - if (___p) { \ - BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \ - kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long) \ - (offsetof(typeof(*(ptr)), rhf))); \ - } \ -} while (0) - -#define kvfree_rcu_arg_1(ptr) \ -do { \ - typeof(ptr) ___p = (ptr); \ - \ - if (___p) \ - kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \ -} while (0) - -/* - * Place this after a lock-acquisition primitive to guarantee that - * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies - * if the UNLOCK and LOCK are executed by the same CPU or if the - * UNLOCK and LOCK operate on the same lock variable. - */ -#ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE -#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ -#else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ -#define smp_mb__after_unlock_lock() do { } while (0) -#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ - - -/* Has the specified rcu_head structure been handed to call_rcu()? */ - -/** - * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() - * @rhp: The rcu_head structure to initialize. - * - * If you intend to invoke rcu_head_after_call_rcu() to test whether a - * given rcu_head structure has already been passed to call_rcu(), then - * you must also invoke this rcu_head_init() function on it just after - * allocating that structure. Calls to this function must not race with - * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation. - */ -static inline void rcu_head_init(struct rcu_head *rhp) +#ifdef CONFIG_TINY_RCU +static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) { - rhp->func = (rcu_callback_t)~0L; + *nextevt = KTIME_MAX; + return 0; } +#endif /* #ifdef CONFIG_TINY_RCU */ -/** - * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()? - * @rhp: The rcu_head structure to test. - * @f: The function passed to call_rcu() along with @rhp. - * - * Returns @true if the @rhp has been passed to call_rcu() with @func, - * and @false otherwise. Emits a warning in any other case, including - * the case where @rhp has already been invoked after a grace period. - * Calls to this function must not race with callback invocation. One way - * to avoid such races is to enclose the call to rcu_head_after_call_rcu() - * in an RCU read-side critical section that includes a read-side fetch - * of the pointer to the structure containing @rhp. - */ -static inline bool -rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) +#if defined(CONFIG_RCU_NOCB_CPU_ALL) +static inline bool rcu_is_nocb_cpu(int cpu) { return true; } +#elif defined(CONFIG_RCU_NOCB_CPU) +bool rcu_is_nocb_cpu(int cpu); +#else +static inline bool rcu_is_nocb_cpu(int cpu) { return false; } +#endif + + +/* Only for use by adaptive-ticks code. */ +#ifdef CONFIG_NO_HZ_FULL_SYSIDLE +bool rcu_sys_is_idle(void); +void rcu_sysidle_force_exit(void); +#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ + +static inline bool rcu_sys_is_idle(void) { - rcu_callback_t func = READ_ONCE(rhp->func); - - if (func == f) - return true; - WARN_ON_ONCE(func != (rcu_callback_t)~0L); return false; } -/* kernel/ksysfs.c definitions */ -extern int rcu_expedited; -extern int rcu_normal; +static inline void rcu_sysidle_force_exit(void) +{ +} + +#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ + + +/* + * Dump the ftrace buffer, but only one time per callsite per boot. + */ +#define rcu_ftrace_dump(oops_dump_mode) \ +do { \ + static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ + \ + if (!atomic_read(&___rfd_beenhere) && \ + !atomic_xchg(&___rfd_beenhere, 1)) \ + ftrace_dump(oops_dump_mode); \ +} while (0) + #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 9be015305f..ac81e4063b 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * * Copyright IBM Corporation, 2008 * - * Author: Paul E. McKenney + * Author: Paul E. McKenney * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU @@ -12,100 +25,223 @@ #ifndef __LINUX_TINY_H #define __LINUX_TINY_H -#include /* for HZ */ +#include -unsigned long get_state_synchronize_rcu(void); -unsigned long start_poll_synchronize_rcu(void); -bool poll_state_synchronize_rcu(unsigned long oldstate); +static inline unsigned long get_state_synchronize_rcu(void) +{ + return 0; +} static inline void cond_synchronize_rcu(unsigned long oldstate) { might_sleep(); } -extern void rcu_barrier(void); +static inline unsigned long get_state_synchronize_sched(void) +{ + return 0; +} + +static inline void cond_synchronize_sched(unsigned long oldstate) +{ + might_sleep(); +} + +static inline void rcu_barrier_bh(void) +{ + wait_rcu_gp(call_rcu_bh); +} + +static inline void rcu_barrier_sched(void) +{ + wait_rcu_gp(call_rcu_sched); +} static inline void synchronize_rcu_expedited(void) { - synchronize_rcu(); + synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ } -/* - * Add one more declaration of kvfree() here. It is - * not so straight forward to just include - * where it is defined due to getting many compile - * errors caused by that include. - */ -extern void kvfree(const void *addr); - -static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) +static inline void rcu_barrier(void) { - if (head) { - call_rcu(head, func); - return; - } - - // kvfree_rcu(one_arg) call. - might_sleep(); - synchronize_rcu(); - kvfree((void *) func); + rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ } -void rcu_qs(void); - -static inline void rcu_softirq_qs(void) +static inline void synchronize_rcu_bh(void) { - rcu_qs(); + synchronize_sched(); } -#define rcu_note_context_switch(preempt) \ - do { \ - rcu_qs(); \ - rcu_tasks_qs(current, (preempt)); \ - } while (0) - -static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) +static inline void synchronize_rcu_bh_expedited(void) { - *nextevt = KTIME_MAX; - return 0; + synchronize_sched(); +} + +static inline void synchronize_sched_expedited(void) +{ + synchronize_sched(); +} + +static inline void kfree_call_rcu(struct rcu_head *head, + rcu_callback_t func) +{ + call_rcu(head, func); +} + +static inline void rcu_note_context_switch(void) +{ + rcu_sched_qs(); } /* * Take advantage of the fact that there is only one CPU, which * allows us to ignore virtualization-based context switches. */ -static inline void rcu_virt_note_context_switch(int cpu) { } -static inline void rcu_cpu_stall_reset(void) { } -static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } -static inline void rcu_idle_enter(void) { } -static inline void rcu_idle_exit(void) { } -static inline void rcu_irq_enter(void) { } -static inline void rcu_irq_exit_irqson(void) { } -static inline void rcu_irq_enter_irqson(void) { } -static inline void rcu_irq_exit(void) { } -static inline void rcu_irq_exit_check_preempt(void) { } -#define rcu_is_idle_cpu(cpu) \ - (is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq()) -static inline void exit_rcu(void) { } -static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) +static inline void rcu_virt_note_context_switch(int cpu) { - return false; } -static inline void rcu_preempt_deferred_qs(struct task_struct *t) { } -#ifdef CONFIG_SRCU -void rcu_scheduler_starting(void); -#else /* #ifndef CONFIG_SRCU */ -static inline void rcu_scheduler_starting(void) { } -#endif /* #else #ifndef CONFIG_SRCU */ -static inline void rcu_end_inkernel_boot(void) { } -static inline bool rcu_inkernel_boot_has_ended(void) { return true; } -static inline bool rcu_is_watching(void) { return true; } -static inline void rcu_momentary_dyntick_idle(void) { } -static inline void kfree_rcu_scheduler_running(void) { } -static inline bool rcu_gp_might_be_stalled(void) { return false; } -/* Avoid RCU read-side critical sections leaking across. */ -static inline void rcu_all_qs(void) { barrier(); } +/* + * Return the number of grace periods started. + */ +static inline unsigned long rcu_batches_started(void) +{ + return 0; +} + +/* + * Return the number of bottom-half grace periods started. + */ +static inline unsigned long rcu_batches_started_bh(void) +{ + return 0; +} + +/* + * Return the number of sched grace periods started. + */ +static inline unsigned long rcu_batches_started_sched(void) +{ + return 0; +} + +/* + * Return the number of grace periods completed. + */ +static inline unsigned long rcu_batches_completed(void) +{ + return 0; +} + +/* + * Return the number of bottom-half grace periods completed. + */ +static inline unsigned long rcu_batches_completed_bh(void) +{ + return 0; +} + +/* + * Return the number of sched grace periods completed. + */ +static inline unsigned long rcu_batches_completed_sched(void) +{ + return 0; +} + +/* + * Return the number of expedited grace periods completed. + */ +static inline unsigned long rcu_exp_batches_completed(void) +{ + return 0; +} + +/* + * Return the number of expedited sched grace periods completed. + */ +static inline unsigned long rcu_exp_batches_completed_sched(void) +{ + return 0; +} + +static inline void rcu_force_quiescent_state(void) +{ +} + +static inline void rcu_bh_force_quiescent_state(void) +{ +} + +static inline void rcu_sched_force_quiescent_state(void) +{ +} + +static inline void show_rcu_gp_kthreads(void) +{ +} + +static inline void rcu_cpu_stall_reset(void) +{ +} + +static inline void rcu_idle_enter(void) +{ +} + +static inline void rcu_idle_exit(void) +{ +} + +static inline void rcu_irq_enter(void) +{ +} + +static inline void rcu_irq_exit_irqson(void) +{ +} + +static inline void rcu_irq_enter_irqson(void) +{ +} + +static inline void rcu_irq_exit(void) +{ +} + +static inline void exit_rcu(void) +{ +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern int rcu_scheduler_active __read_mostly; +void rcu_scheduler_starting(void); +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +static inline void rcu_scheduler_starting(void) +{ +} +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) + +static inline bool rcu_is_watching(void) +{ + return __rcu_is_watching(); +} + +#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ + +static inline bool rcu_is_watching(void) +{ + return true; +} + +#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ + +static inline void rcu_all_qs(void) +{ + barrier(); /* Avoid RCU read-side critical sections leaking across. */ +} /* RCUtree hotplug events */ #define rcutree_prepare_cpu NULL @@ -113,6 +249,5 @@ static inline void rcu_all_qs(void) { barrier(); } #define rcutree_offline_cpu NULL #define rcutree_dead_cpu NULL #define rcutree_dying_cpu NULL -static inline void rcu_cpu_starting(unsigned int cpu) { } #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 53209d6694..63a4e4cf40 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -1,13 +1,26 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion (tree-based version) * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * * Copyright IBM Corporation, 2008 * * Author: Dipankar Sarma - * Paul E. McKenney Hierarchical algorithm + * Paul E. McKenney Hierarchical algorithm * - * Based on the original work by Paul McKenney + * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - @@ -17,8 +30,7 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H -void rcu_softirq_qs(void); -void rcu_note_context_switch(bool preempt); +void rcu_note_context_switch(void); int rcu_needs_cpu(u64 basem, u64 *nextevt); void rcu_cpu_stall_reset(void); @@ -29,21 +41,59 @@ void rcu_cpu_stall_reset(void); */ static inline void rcu_virt_note_context_switch(int cpu) { - rcu_note_context_switch(false); + rcu_note_context_switch(); } +void synchronize_rcu_bh(void); +void synchronize_sched_expedited(void); void synchronize_rcu_expedited(void); -void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func); + +void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); + +/** + * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period + * + * Wait for an RCU-bh grace period to elapse, but use a "big hammer" + * approach to force the grace period to end quickly. This consumes + * significant time on all CPUs and is unfriendly to real-time workloads, + * so is thus not recommended for any sort of common-case code. In fact, + * if you are using synchronize_rcu_bh_expedited() in a loop, please + * restructure your code to batch your updates, and then use a single + * synchronize_rcu_bh() instead. + * + * Note that it is illegal to call this function while holding any lock + * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal + * to call this function from a CPU-hotplug notifier. Failing to observe + * these restriction will result in deadlock. + */ +static inline void synchronize_rcu_bh_expedited(void) +{ + synchronize_sched_expedited(); +} void rcu_barrier(void); -bool rcu_eqs_special_set(int cpu); -void rcu_momentary_dyntick_idle(void); -void kfree_rcu_scheduler_running(void); -bool rcu_gp_might_be_stalled(void); +void rcu_barrier_bh(void); +void rcu_barrier_sched(void); unsigned long get_state_synchronize_rcu(void); -unsigned long start_poll_synchronize_rcu(void); -bool poll_state_synchronize_rcu(unsigned long oldstate); void cond_synchronize_rcu(unsigned long oldstate); +unsigned long get_state_synchronize_sched(void); +void cond_synchronize_sched(unsigned long oldstate); + +extern unsigned long rcutorture_testseq; +extern unsigned long rcutorture_vernum; +unsigned long rcu_batches_started(void); +unsigned long rcu_batches_started_bh(void); +unsigned long rcu_batches_started_sched(void); +unsigned long rcu_batches_completed(void); +unsigned long rcu_batches_completed_bh(void); +unsigned long rcu_batches_completed_sched(void); +unsigned long rcu_exp_batches_completed(void); +unsigned long rcu_exp_batches_completed_sched(void); +void show_rcu_gp_kthreads(void); + +void rcu_force_quiescent_state(void); +void rcu_bh_force_quiescent_state(void); +void rcu_sched_force_quiescent_state(void); void rcu_idle_enter(void); void rcu_idle_exit(void); @@ -51,24 +101,15 @@ void rcu_irq_enter(void); void rcu_irq_exit(void); void rcu_irq_enter_irqson(void); void rcu_irq_exit_irqson(void); -bool rcu_is_idle_cpu(int cpu); - -#ifdef CONFIG_PROVE_RCU -void rcu_irq_exit_check_preempt(void); -#else -static inline void rcu_irq_exit_check_preempt(void) { } -#endif void exit_rcu(void); void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; -void rcu_end_inkernel_boot(void); -bool rcu_inkernel_boot_has_ended(void); + bool rcu_is_watching(void); -#ifndef CONFIG_PREEMPTION + void rcu_all_qs(void); -#endif /* RCUtree hotplug events */ int rcutree_prepare_cpu(unsigned int cpu); @@ -76,6 +117,5 @@ int rcutree_online_cpu(unsigned int cpu); int rcutree_offline_cpu(unsigned int cpu); int rcutree_dead_cpu(unsigned int cpu); int rcutree_dying_cpu(unsigned int cpu); -void rcu_cpu_starting(unsigned int cpu); #endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/reboot.h b/include/linux/reboot.h index af907a3d68..03e2fa8e7c 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_REBOOT_H #define _LINUX_REBOOT_H @@ -6,15 +5,12 @@ #include #include -struct device; - #define SYS_DOWN 0x0001 /* Notify of system down */ #define SYS_RESTART SYS_DOWN #define SYS_HALT 0x0002 /* Notify of system halt */ #define SYS_POWER_OFF 0x0003 /* Notify of system power off */ enum reboot_mode { - REBOOT_UNDEFINED = -1, REBOOT_COLD = 0, REBOOT_WARM, REBOOT_HARD, @@ -22,7 +18,6 @@ enum reboot_mode { REBOOT_GPIO, }; extern enum reboot_mode reboot_mode; -extern enum reboot_mode panic_reboot_mode; enum reboot_type { BOOT_TRIPLE = 't', @@ -43,8 +38,6 @@ extern int reboot_force; extern int register_reboot_notifier(struct notifier_block *); extern int unregister_reboot_notifier(struct notifier_block *); -extern int devm_register_reboot_notifier(struct device *, struct notifier_block *); - extern int register_restart_handler(struct notifier_block *); extern int unregister_restart_handler(struct notifier_block *); extern void do_kernel_restart(char *cmd); @@ -54,9 +47,9 @@ extern void do_kernel_restart(char *cmd); */ extern void migrate_to_reboot_cpu(void); -extern void machine_restart(char *cmd); -extern void machine_halt(void); -extern void machine_power_off(void); +extern void machine_restart(char *cmd) __noreturn; +extern void machine_halt(void) __noreturn; +extern void machine_power_off(void) __noreturn; extern void machine_shutdown(void); struct pt_regs; @@ -67,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *); */ extern void kernel_restart_prepare(char *cmd); -extern void kernel_restart(char *cmd); -extern void kernel_halt(void); -extern void kernel_power_off(void); +extern void kernel_restart(char *cmd) __noreturn; +extern void kernel_halt(void) __noreturn; +extern void kernel_power_off(void) __noreturn; extern int C_A_D; /* for sysctl */ void ctrl_alt_del(void); @@ -79,13 +72,12 @@ extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN]; extern void orderly_poweroff(bool force); extern void orderly_reboot(void); -void hw_protection_shutdown(const char *reason, int ms_until_forced); /* * Emergency restart, callable from an interrupt handler. */ -extern void emergency_restart(void); +extern void emergency_restart(void) __noreturn; #include #endif /* _LINUX_REBOOT_H */ diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h index 585ce89c0f..8c5a3fb6c6 100644 --- a/include/linux/reciprocal_div.h +++ b/include/linux/reciprocal_div.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RECIPROCAL_DIV_H #define _LINUX_RECIPROCAL_DIV_H @@ -25,9 +24,6 @@ struct reciprocal_value { u8 sh1, sh2; }; -/* "reciprocal_value" and "reciprocal_divide" together implement the basic - * version of the algorithm described in Figure 4.1 of the paper. - */ struct reciprocal_value reciprocal_value(u32 d); static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R) @@ -36,69 +32,4 @@ static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R) return (t + ((a - t) >> R.sh1)) >> R.sh2; } -struct reciprocal_value_adv { - u32 m; - u8 sh, exp; - bool is_wide_m; -}; - -/* "reciprocal_value_adv" implements the advanced version of the algorithm - * described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose - * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The - * exception case could be easily handled before calling "reciprocal_value_adv". - * - * The advanced version requires more complex calculation to get the reciprocal - * multiplier and other control variables, but then could reduce the required - * emulation operations. - * - * It makes no sense to use this advanced version for host divide emulation, - * those extra complexities for calculating multiplier etc could completely - * waive our saving on emulation operations. - * - * However, it makes sense to use it for JIT divide code generation for which - * we are willing to trade performance of JITed code with that of host. As shown - * by the following pseudo code, the required emulation operations could go down - * from 6 (the basic version) to 3 or 4. - * - * To use the result of "reciprocal_value_adv", suppose we want to calculate - * n/d, the pseudo C code will be: - * - * struct reciprocal_value_adv rvalue; - * u8 pre_shift, exp; - * - * // handle exception case. - * if (d >= (1U << 31)) { - * result = n >= d; - * return; - * } - * - * rvalue = reciprocal_value_adv(d, 32) - * exp = rvalue.exp; - * if (rvalue.is_wide_m && !(d & 1)) { - * // floor(log2(d & (2^32 -d))) - * pre_shift = fls(d & -d) - 1; - * rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift); - * } else { - * pre_shift = 0; - * } - * - * // code generation starts. - * if (imm == 1U << exp) { - * result = n >> exp; - * } else if (rvalue.is_wide_m) { - * // pre_shift must be zero when reached here. - * t = (n * rvalue.m) >> 32; - * result = n - t; - * result >>= 1; - * result += t; - * result >>= rvalue.sh - 1; - * } else { - * if (pre_shift) - * result = n >> pre_shift; - * result = ((u64)result * rvalue.m) >> 32; - * result >>= rvalue.sh; - * } - */ -struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec); - #endif /* _LINUX_RECIPROCAL_DIV_H */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index e3c9a25a85..f667313243 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __LINUX_REGMAP_H #define __LINUX_REGMAP_H @@ -8,34 +7,29 @@ * Copyright 2011 Wolfson Microelectronics plc * * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #include #include -#include #include #include #include #include -#include -#include struct module; -struct clk; struct device; -struct device_node; struct i2c_client; -struct i3c_device; struct irq_domain; -struct mdio_device; -struct slim_device; struct spi_device; struct spmi_device; struct regmap; struct regmap_range_cfg; struct regmap_field; struct snd_ac97; -struct sdw_slave; /* An enum of all the supported cache types */ enum regcache_type { @@ -46,13 +40,12 @@ enum regcache_type { }; /** - * struct reg_default - Default value for a register. + * Default value for a register. We use an array of structs rather + * than a simple array as many modern devices have very sparse + * register maps. * * @reg: Register address. * @def: Register default value. - * - * We use an array of structs rather than a simple array as many modern devices - * have very sparse register maps. */ struct reg_default { unsigned int reg; @@ -60,14 +53,12 @@ struct reg_default { }; /** - * struct reg_sequence - An individual write from a sequence of writes. + * Register/value pairs for sequences of writes with an optional delay in + * microseconds to be applied after each write. * * @reg: Register address. * @def: Register value. * @delay_us: Delay to be applied after the register write in microseconds - * - * Register/value pairs for sequences of writes with an optional delay in - * microseconds to be applied after each write. */ struct reg_sequence { unsigned int reg; @@ -75,23 +66,45 @@ struct reg_sequence { unsigned int delay_us; }; -#define REG_SEQ(_reg, _def, _delay_us) { \ - .reg = _reg, \ - .def = _def, \ - .delay_us = _delay_us, \ - } -#define REG_SEQ0(_reg, _def) REG_SEQ(_reg, _def, 0) +#define regmap_update_bits(map, reg, mask, val) \ + regmap_update_bits_base(map, reg, mask, val, NULL, false, false) +#define regmap_update_bits_async(map, reg, mask, val)\ + regmap_update_bits_base(map, reg, mask, val, NULL, true, false) +#define regmap_update_bits_check(map, reg, mask, val, change)\ + regmap_update_bits_base(map, reg, mask, val, change, false, false) +#define regmap_update_bits_check_async(map, reg, mask, val, change)\ + regmap_update_bits_base(map, reg, mask, val, change, true, false) + +#define regmap_write_bits(map, reg, mask, val) \ + regmap_update_bits_base(map, reg, mask, val, NULL, false, true) + +#define regmap_field_write(field, val) \ + regmap_field_update_bits_base(field, ~0, val, NULL, false, false) +#define regmap_field_force_write(field, val) \ + regmap_field_update_bits_base(field, ~0, val, NULL, false, true) +#define regmap_field_update_bits(field, mask, val)\ + regmap_field_update_bits_base(field, mask, val, NULL, false, false) +#define regmap_field_force_update_bits(field, mask, val) \ + regmap_field_update_bits_base(field, mask, val, NULL, false, true) + +#define regmap_fields_write(field, id, val) \ + regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false) +#define regmap_fields_force_write(field, id, val) \ + regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true) +#define regmap_fields_update_bits(field, id, mask, val)\ + regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false) +#define regmap_fields_force_update_bits(field, id, mask, val) \ + regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true) /** * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs - * * @map: Regmap to read from * @addr: Address to poll * @val: Unsigned integer variable to read the value into * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * is used (see Documentation/timers/timers-howto.txt). * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read @@ -103,81 +116,23 @@ struct reg_sequence { */ #define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ ({ \ - int __ret, __tmp; \ - __tmp = read_poll_timeout(regmap_read, __ret, __ret || (cond), \ - sleep_us, timeout_us, false, (map), (addr), &(val)); \ - __ret ?: __tmp; \ -}) - -/** - * regmap_read_poll_timeout_atomic - Poll until a condition is met or a timeout occurs - * - * @map: Regmap to read from - * @addr: Address to poll - * @val: Unsigned integer variable to read the value into - * @cond: Break condition (usually involving @val) - * @delay_us: Time to udelay between reads in us (0 tight-loops). - * Should be less than ~10us since udelay is used - * (see Documentation/timers/timers-howto.rst). - * @timeout_us: Timeout in us, 0 means never timeout - * - * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read - * error return value in case of a error read. In the two former cases, - * the last read value at @addr is stored in @val. - * - * This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h. - * - * Note: In general regmap cannot be used in atomic context. If you want to use - * this macro then first setup your regmap for atomic use (flat or no cache - * and MMIO regmap). - */ -#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \ -({ \ - u64 __timeout_us = (timeout_us); \ - unsigned long __delay_us = (delay_us); \ - ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ - int __ret; \ + ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ + int pollret; \ + might_sleep_if(sleep_us); \ for (;;) { \ - __ret = regmap_read((map), (addr), &(val)); \ - if (__ret) \ + pollret = regmap_read((map), (addr), &(val)); \ + if (pollret) \ break; \ if (cond) \ break; \ - if ((__timeout_us) && \ - ktime_compare(ktime_get(), __timeout) > 0) { \ - __ret = regmap_read((map), (addr), &(val)); \ + if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ + pollret = regmap_read((map), (addr), &(val)); \ break; \ } \ - if (__delay_us) \ - udelay(__delay_us); \ + if (sleep_us) \ + usleep_range((sleep_us >> 2) + 1, sleep_us); \ } \ - __ret ?: ((cond) ? 0 : -ETIMEDOUT); \ -}) - -/** - * regmap_field_read_poll_timeout - Poll until a condition is met or timeout - * - * @field: Regmap field to read from - * @val: Unsigned integer variable to read the value into - * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). - * @timeout_us: Timeout in us, 0 means never timeout - * - * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read - * error return value in case of a error read. In the two former cases, - * the last read value at @addr is stored in @val. Must not be called - * from atomic context if sleep_us or timeout_us are used. - * - * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. - */ -#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \ -({ \ - int __ret, __tmp; \ - __tmp = read_poll_timeout(regmap_field_read, __ret, __ret || (cond), \ - sleep_us, timeout_us, false, (field), &(val)); \ - __ret ?: __tmp; \ + pollret ?: ((cond) ? 0 : -ETIMEDOUT); \ }) #ifdef CONFIG_REGMAP @@ -191,8 +146,8 @@ enum regmap_endian { }; /** - * struct regmap_range - A register range, used for access related checks - * (readable/writeable/volatile/precious checks) + * A register range, used for access related checks + * (readable/writeable/volatile/precious checks) * * @range_min: address of first register * @range_max: address of last register @@ -204,18 +159,16 @@ struct regmap_range { #define regmap_reg_range(low, high) { .range_min = low, .range_max = high, } -/** - * struct regmap_access_table - A table of register ranges for access checks +/* + * A table of ranges including some yes ranges and some no ranges. + * If a register belongs to a no_range, the corresponding check function + * will return false. If a register belongs to a yes range, the corresponding + * check function will return true. "no_ranges" are searched first. * * @yes_ranges : pointer to an array of regmap ranges used as "yes ranges" * @n_yes_ranges: size of the above array * @no_ranges: pointer to an array of regmap ranges used as "no ranges" * @n_no_ranges: size of the above array - * - * A table of ranges including some yes ranges and some no ranges. - * If a register belongs to a no_range, the corresponding check function - * will return false. If a register belongs to a yes range, the corresponding - * check function will return true. "no_ranges" are searched first. */ struct regmap_access_table { const struct regmap_range *yes_ranges; @@ -228,7 +181,7 @@ typedef void (*regmap_lock)(void *); typedef void (*regmap_unlock)(void *); /** - * struct regmap_config - Configuration for the register map of a device. + * Configuration for the register map of a device. * * @name: Optional name of the regmap. Useful when a device has multiple * register regions. @@ -261,23 +214,6 @@ typedef void (*regmap_unlock)(void *); * field is NULL but precious_table (see below) is not, the * check is performed on such table (a register is precious if * it belongs to one of the ranges specified by precious_table). - * @writeable_noinc_reg: Optional callback returning true if the register - * supports multiple write operations without incrementing - * the register number. If this field is NULL but - * wr_noinc_table (see below) is not, the check is - * performed on such table (a register is no increment - * writeable if it belongs to one of the ranges specified - * by wr_noinc_table). - * @readable_noinc_reg: Optional callback returning true if the register - * supports multiple read operations without incrementing - * the register number. If this field is NULL but - * rd_noinc_table (see below) is not, the check is - * performed on such table (a register is no increment - * readable if it belongs to one of the ranges specified - * by rd_noinc_table). - * @disable_locking: This regmap is either protected by external means or - * is guaranteed not to be accessed from multiple threads. - * Don't use any locking mechanisms. * @lock: Optional lock callback (overrides regmap's default lock * function, based on spinlock or mutex). * @unlock: As above for unlocking. @@ -302,8 +238,6 @@ typedef void (*regmap_unlock)(void *); * @rd_table: As above, for read access. * @volatile_table: As above, for volatile registers. * @precious_table: As above, for precious registers. - * @wr_noinc_table: As above, for no increment writeable registers. - * @rd_noinc_table: As above, for no increment readable registers. * @reg_defaults: Power on reset values for registers (for use with * register cache support). * @num_reg_defaults: Number of elements in reg_defaults. @@ -312,20 +246,10 @@ typedef void (*regmap_unlock)(void *); * a read. * @write_flag_mask: Mask to be set in the top bytes of the register when doing * a write. If both read_flag_mask and write_flag_mask are - * empty and zero_flag_mask is not set the regmap_bus default - * masks are used. - * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even - * if they are both empty. - * @use_relaxed_mmio: If set, MMIO R/W operations will not use memory barriers. - * This can avoid load on devices which don't require strict - * orderings, but drivers should carefully add any explicit - * memory barriers when they may require them. - * @use_single_read: If set, converts the bulk read operation into a series of - * single read operations. This is useful for a device that - * does not support bulk read. - * @use_single_write: If set, converts the bulk write operation into a series of - * single write operations. This is useful for a device that - * does not support bulk write. + * empty the regmap_bus default masks are used. + * @use_single_rw: If set, converts the bulk read and write operations into + * a series of single read and write operations. This is useful + * for device that does not support bulk read and write. * @can_multi_write: If set, the device supports the multi write mode of bulk * write operations, if clear multi write requests will be * split into individual write operations @@ -343,12 +267,6 @@ typedef void (*regmap_unlock)(void *); * * @ranges: Array of configuration entries for virtual address ranges. * @num_ranges: Number of range configuration entries. - * @use_hwlock: Indicate if a hardware spinlock should be used. - * @use_raw_spinlock: Indicate if a raw spinlock should be used. - * @hwlock_id: Specify the hardware spinlock id. - * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE, - * HWLOCK_IRQ or 0. - * @can_sleep: Optional, specifies whether regmap operations can sleep. */ struct regmap_config { const char *name; @@ -362,10 +280,6 @@ struct regmap_config { bool (*readable_reg)(struct device *dev, unsigned int reg); bool (*volatile_reg)(struct device *dev, unsigned int reg); bool (*precious_reg)(struct device *dev, unsigned int reg); - bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg); - bool (*readable_noinc_reg)(struct device *dev, unsigned int reg); - - bool disable_locking; regmap_lock lock; regmap_unlock unlock; void *lock_arg; @@ -380,8 +294,6 @@ struct regmap_config { const struct regmap_access_table *rd_table; const struct regmap_access_table *volatile_table; const struct regmap_access_table *precious_table; - const struct regmap_access_table *wr_noinc_table; - const struct regmap_access_table *rd_noinc_table; const struct reg_default *reg_defaults; unsigned int num_reg_defaults; enum regcache_type cache_type; @@ -390,11 +302,8 @@ struct regmap_config { unsigned long read_flag_mask; unsigned long write_flag_mask; - bool zero_flag_mask; - bool use_single_read; - bool use_single_write; - bool use_relaxed_mmio; + bool use_single_rw; bool can_multi_write; enum regmap_endian reg_format_endian; @@ -402,34 +311,25 @@ struct regmap_config { const struct regmap_range_cfg *ranges; unsigned int num_ranges; - - bool use_hwlock; - bool use_raw_spinlock; - unsigned int hwlock_id; - unsigned int hwlock_mode; - - bool can_sleep; }; /** - * struct regmap_range_cfg - Configuration for indirectly accessed or paged - * registers. + * Configuration for indirectly accessed or paged registers. + * Registers, mapped to this virtual range, are accessed in two steps: + * 1. page selector register update; + * 2. access through data window registers. * * @name: Descriptive name for diagnostics * * @range_min: Address of the lowest register address in virtual range. * @range_max: Address of the highest register in virtual range. * - * @selector_reg: Register with selector field. - * @selector_mask: Bit mask for selector value. - * @selector_shift: Bit shift for selector value. + * @page_sel_reg: Register with selector field. + * @page_sel_mask: Bit shift for selector value. + * @page_sel_shift: Bit mask for selector value. * * @window_start: Address of first (lowest) register in data window. * @window_len: Number of registers in data window. - * - * Registers, mapped to this virtual range, are accessed in two steps: - * 1. page selector register update; - * 2. access through data window registers. */ struct regmap_range_cfg { const char *name; @@ -472,8 +372,7 @@ typedef struct regmap_async *(*regmap_hw_async_alloc)(void); typedef void (*regmap_hw_free_context)(void *context); /** - * struct regmap_bus - Description of a hardware bus for the register map - * infrastructure. + * Description of a hardware bus for the register map infrastructure. * * @fast_io: Register IO is fast. Use a spinlock instead of a mutex * to perform locking. This field is ignored if custom lock/unlock @@ -486,10 +385,6 @@ typedef void (*regmap_hw_free_context)(void *context); * must serialise with respect to non-async I/O. * @reg_write: Write a single register value to the given register address. This * write operation has to complete when returning from the function. - * @reg_update_bits: Update bits operation to be used against volatile - * registers, intended for devices supporting some mechanism - * for setting clearing bits without having to - * read/modify/write. * @read: Read operation. Data is returned in the buffer used to transmit * data. * @reg_read: Read a single register value from a given register address. @@ -505,7 +400,6 @@ typedef void (*regmap_hw_free_context)(void *context); * DEFAULT, BIG is assumed. * @max_raw_read: Max raw read size that can be used on the bus. * @max_raw_write: Max raw write size that can be used on the bus. - * @free_on_exit: kfree this on exit of regmap */ struct regmap_bus { bool fast_io; @@ -523,7 +417,6 @@ struct regmap_bus { enum regmap_endian val_format_endian_default; size_t max_raw_read; size_t max_raw_write; - bool free_on_exit; }; /* @@ -543,18 +436,6 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); -struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); -struct regmap *__regmap_init_sccb(struct i2c_client *i2c, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); -struct regmap *__regmap_init_slimbus(struct slim_device *slimbus, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); struct regmap *__regmap_init_spi(struct spi_device *dev, const struct regmap_config *config, struct lock_class_key *lock_key, @@ -567,10 +448,6 @@ struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); -struct regmap *__regmap_init_w1(struct device *w1_dev, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id, void __iomem *regs, const struct regmap_config *config, @@ -580,18 +457,6 @@ struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); -struct regmap *__regmap_init_sdw(struct sdw_slave *sdw, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); -struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); -struct regmap *__regmap_init_spi_avmm(struct spi_device *spi, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); struct regmap *__devm_regmap_init(struct device *dev, const struct regmap_bus *bus, @@ -603,14 +468,6 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); -struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); -struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); struct regmap *__devm_regmap_init_spi(struct spi_device *dev, const struct regmap_config *config, struct lock_class_key *lock_key, @@ -623,10 +480,6 @@ struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); -struct regmap *__devm_regmap_init_w1(struct device *w1_dev, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); struct regmap *__devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, void __iomem *regs, @@ -637,26 +490,7 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); -struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); -struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); -struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); -struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); -struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi, - const struct regmap_config *config, - struct lock_class_key *lock_key, - const char *lock_name); + /* * Wrapper for regmap_init macros to include a unique lockdep key and name * for each call. No-op if CONFIG_LOCKDEP is not set. @@ -680,7 +514,7 @@ struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi, #endif /** - * regmap_init() - Initialise register map + * regmap_init(): Initialise register map * * @dev: Device that will be interacted with * @bus: Bus-specific callbacks to use with device @@ -698,7 +532,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, const struct regmap_config *config); /** - * regmap_init_i2c() - Initialise register map + * regmap_init_i2c(): Initialise register map * * @i2c: Device that will be interacted with * @config: Configuration for register map @@ -711,48 +545,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, i2c, config) /** - * regmap_init_mdio() - Initialise register map + * regmap_init_spi(): Initialise register map * - * @mdio_dev: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -#define regmap_init_mdio(mdio_dev, config) \ - __regmap_lockdep_wrapper(__regmap_init_mdio, #config, \ - mdio_dev, config) - -/** - * regmap_init_sccb() - Initialise register map - * - * @i2c: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -#define regmap_init_sccb(i2c, config) \ - __regmap_lockdep_wrapper(__regmap_init_sccb, #config, \ - i2c, config) - -/** - * regmap_init_slimbus() - Initialise register map - * - * @slimbus: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -#define regmap_init_slimbus(slimbus, config) \ - __regmap_lockdep_wrapper(__regmap_init_slimbus, #config, \ - slimbus, config) - -/** - * regmap_init_spi() - Initialise register map - * - * @dev: Device that will be interacted with + * @spi: Device that will be interacted with * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer to @@ -763,9 +558,8 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, dev, config) /** - * regmap_init_spmi_base() - Create regmap for the Base register space - * - * @dev: SPMI device that will be interacted with + * regmap_init_spmi_base(): Create regmap for the Base register space + * @sdev: SPMI device that will be interacted with * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer to @@ -776,9 +570,8 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, dev, config) /** - * regmap_init_spmi_ext() - Create regmap for Ext register space - * - * @dev: Device that will be interacted with + * regmap_init_spmi_ext(): Create regmap for Ext register space + * @sdev: Device that will be interacted with * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer to @@ -789,20 +582,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, dev, config) /** - * regmap_init_w1() - Initialise register map - * - * @w1_dev: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -#define regmap_init_w1(w1_dev, config) \ - __regmap_lockdep_wrapper(__regmap_init_w1, #config, \ - w1_dev, config) - -/** - * regmap_init_mmio_clk() - Initialise register map with register clock + * regmap_init_mmio_clk(): Initialise register map with register clock * * @dev: Device that will be interacted with * @clk_id: register clock consumer ID @@ -817,7 +597,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, dev, clk_id, regs, config) /** - * regmap_init_mmio() - Initialise register map + * regmap_init_mmio(): Initialise register map * * @dev: Device that will be interacted with * @regs: Pointer to memory-mapped IO region @@ -830,7 +610,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, regmap_init_mmio_clk(dev, NULL, regs, config) /** - * regmap_init_ac97() - Initialise AC'97 register map + * regmap_init_ac97(): Initialise AC'97 register map * * @ac97: Device that will be interacted with * @config: Configuration for register map @@ -844,47 +624,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); /** - * regmap_init_sdw() - Initialise register map - * - * @sdw: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -#define regmap_init_sdw(sdw, config) \ - __regmap_lockdep_wrapper(__regmap_init_sdw, #config, \ - sdw, config) - -/** - * regmap_init_sdw_mbq() - Initialise register map - * - * @sdw: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -#define regmap_init_sdw_mbq(sdw, config) \ - __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \ - sdw, config) - -/** - * regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave - * to AVMM Bus Bridge - * - * @spi: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. - */ -#define regmap_init_spi_avmm(spi, config) \ - __regmap_lockdep_wrapper(__regmap_init_spi_avmm, #config, \ - spi, config) - -/** - * devm_regmap_init() - Initialise managed register map + * devm_regmap_init(): Initialise managed register map * * @dev: Device that will be interacted with * @bus: Bus-specific callbacks to use with device @@ -901,7 +641,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); dev, bus, bus_context, config) /** - * devm_regmap_init_i2c() - Initialise managed register map + * devm_regmap_init_i2c(): Initialise managed register map * * @i2c: Device that will be interacted with * @config: Configuration for register map @@ -915,37 +655,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); i2c, config) /** - * devm_regmap_init_mdio() - Initialise managed register map + * devm_regmap_init_spi(): Initialise register map * - * @mdio_dev: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -#define devm_regmap_init_mdio(mdio_dev, config) \ - __regmap_lockdep_wrapper(__devm_regmap_init_mdio, #config, \ - mdio_dev, config) - -/** - * devm_regmap_init_sccb() - Initialise managed register map - * - * @i2c: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -#define devm_regmap_init_sccb(i2c, config) \ - __regmap_lockdep_wrapper(__devm_regmap_init_sccb, #config, \ - i2c, config) - -/** - * devm_regmap_init_spi() - Initialise register map - * - * @dev: Device that will be interacted with + * @spi: Device that will be interacted with * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer @@ -957,9 +669,8 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); dev, config) /** - * devm_regmap_init_spmi_base() - Create managed regmap for Base register space - * - * @dev: SPMI device that will be interacted with + * devm_regmap_init_spmi_base(): Create managed regmap for Base register space + * @sdev: SPMI device that will be interacted with * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer @@ -971,9 +682,8 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); dev, config) /** - * devm_regmap_init_spmi_ext() - Create managed regmap for Ext register space - * - * @dev: SPMI device that will be interacted with + * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space + * @sdev: SPMI device that will be interacted with * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer @@ -985,20 +695,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); dev, config) /** - * devm_regmap_init_w1() - Initialise managed register map - * - * @w1_dev: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -#define devm_regmap_init_w1(w1_dev, config) \ - __regmap_lockdep_wrapper(__devm_regmap_init_w1, #config, \ - w1_dev, config) -/** - * devm_regmap_init_mmio_clk() - Initialise managed register map with clock + * devm_regmap_init_mmio_clk(): Initialise managed register map with clock * * @dev: Device that will be interacted with * @clk_id: register clock consumer ID @@ -1014,7 +711,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); dev, clk_id, regs, config) /** - * devm_regmap_init_mmio() - Initialise managed register map + * devm_regmap_init_mmio(): Initialise managed register map * * @dev: Device that will be interacted with * @regs: Pointer to memory-mapped IO region @@ -1028,7 +725,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); devm_regmap_init_mmio_clk(dev, NULL, regs, config) /** - * devm_regmap_init_ac97() - Initialise AC'97 register map + * devm_regmap_init_ac97(): Initialise AC'97 register map * * @ac97: Device that will be interacted with * @config: Configuration for register map @@ -1041,79 +738,6 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \ ac97, config) -/** - * devm_regmap_init_sdw() - Initialise managed register map - * - * @sdw: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -#define devm_regmap_init_sdw(sdw, config) \ - __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \ - sdw, config) - -/** - * devm_regmap_init_sdw_mbq() - Initialise managed register map - * - * @sdw: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -#define devm_regmap_init_sdw_mbq(sdw, config) \ - __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, #config, \ - sdw, config) - -/** - * devm_regmap_init_slimbus() - Initialise managed register map - * - * @slimbus: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -#define devm_regmap_init_slimbus(slimbus, config) \ - __regmap_lockdep_wrapper(__devm_regmap_init_slimbus, #config, \ - slimbus, config) - -/** - * devm_regmap_init_i3c() - Initialise managed register map - * - * @i3c: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -#define devm_regmap_init_i3c(i3c, config) \ - __regmap_lockdep_wrapper(__devm_regmap_init_i3c, #config, \ - i3c, config) - -/** - * devm_regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave - * to AVMM Bus Bridge - * - * @spi: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The map will be automatically freed by the - * device management code. - */ -#define devm_regmap_init_spi_avmm(spi, config) \ - __regmap_lockdep_wrapper(__devm_regmap_init_spi_avmm, #config, \ - spi, config) - -int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk); -void regmap_mmio_detach_clk(struct regmap *map); void regmap_exit(struct regmap *map); int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config); @@ -1123,8 +747,6 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val); int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val); int regmap_raw_write(struct regmap *map, unsigned int reg, const void *val, size_t val_len); -int regmap_noinc_write(struct regmap *map, unsigned int reg, - const void *val, size_t val_len); int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, size_t val_count); int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, @@ -1137,49 +759,11 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg, int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val); int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, size_t val_len); -int regmap_noinc_read(struct regmap *map, unsigned int reg, - void *val, size_t val_len); int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, size_t val_count); int regmap_update_bits_base(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change, bool async, bool force); - -static inline int regmap_update_bits(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val) -{ - return regmap_update_bits_base(map, reg, mask, val, NULL, false, false); -} - -static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val) -{ - return regmap_update_bits_base(map, reg, mask, val, NULL, true, false); -} - -static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val, - bool *change) -{ - return regmap_update_bits_base(map, reg, mask, val, - change, false, false); -} - -static inline int -regmap_update_bits_check_async(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val, - bool *change) -{ - return regmap_update_bits_base(map, reg, mask, val, - change, true, false); -} - -static inline int regmap_write_bits(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val) -{ - return regmap_update_bits_base(map, reg, mask, val, NULL, false, true); -} - int regmap_get_val_bytes(struct regmap *map); int regmap_get_max_register(struct regmap *map); int regmap_get_reg_stride(struct regmap *map); @@ -1215,23 +799,8 @@ bool regmap_reg_in_ranges(unsigned int reg, const struct regmap_range *ranges, unsigned int nranges); -static inline int regmap_set_bits(struct regmap *map, - unsigned int reg, unsigned int bits) -{ - return regmap_update_bits_base(map, reg, bits, bits, - NULL, false, false); -} - -static inline int regmap_clear_bits(struct regmap *map, - unsigned int reg, unsigned int bits) -{ - return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false); -} - -int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits); - /** - * struct reg_field - Description of an register field + * Description of an register field * * @reg: Offset of the register within the regmap bank * @lsb: lsb of the register field. @@ -1253,14 +822,6 @@ struct reg_field { .msb = _msb, \ } -#define REG_FIELD_ID(_reg, _lsb, _msb, _size, _offset) { \ - .reg = _reg, \ - .lsb = _lsb, \ - .msb = _msb, \ - .id_size = _size, \ - .id_offset = _offset, \ - } - struct regmap_field *regmap_field_alloc(struct regmap *regmap, struct reg_field reg_field); void regmap_field_free(struct regmap_field *field); @@ -1269,18 +830,6 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev, struct regmap *regmap, struct reg_field reg_field); void devm_regmap_field_free(struct device *dev, struct regmap_field *field); -int regmap_field_bulk_alloc(struct regmap *regmap, - struct regmap_field **rm_field, - const struct reg_field *reg_field, - int num_fields); -void regmap_field_bulk_free(struct regmap_field *field); -int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap, - struct regmap_field **field, - const struct reg_field *reg_field, - int num_fields); -void devm_regmap_field_bulk_free(struct device *dev, - struct regmap_field *field); - int regmap_field_read(struct regmap_field *field, unsigned int *val); int regmap_field_update_bits_base(struct regmap_field *field, unsigned int mask, unsigned int val, @@ -1291,168 +840,48 @@ int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, unsigned int mask, unsigned int val, bool *change, bool async, bool force); -static inline int regmap_field_write(struct regmap_field *field, - unsigned int val) -{ - return regmap_field_update_bits_base(field, ~0, val, - NULL, false, false); -} - -static inline int regmap_field_force_write(struct regmap_field *field, - unsigned int val) -{ - return regmap_field_update_bits_base(field, ~0, val, NULL, false, true); -} - -static inline int regmap_field_update_bits(struct regmap_field *field, - unsigned int mask, unsigned int val) -{ - return regmap_field_update_bits_base(field, mask, val, - NULL, false, false); -} - -static inline int -regmap_field_force_update_bits(struct regmap_field *field, - unsigned int mask, unsigned int val) -{ - return regmap_field_update_bits_base(field, mask, val, - NULL, false, true); -} - -static inline int regmap_fields_write(struct regmap_field *field, - unsigned int id, unsigned int val) -{ - return regmap_fields_update_bits_base(field, id, ~0, val, - NULL, false, false); -} - -static inline int regmap_fields_force_write(struct regmap_field *field, - unsigned int id, unsigned int val) -{ - return regmap_fields_update_bits_base(field, id, ~0, val, - NULL, false, true); -} - -static inline int -regmap_fields_update_bits(struct regmap_field *field, unsigned int id, - unsigned int mask, unsigned int val) -{ - return regmap_fields_update_bits_base(field, id, mask, val, - NULL, false, false); -} - -static inline int -regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id, - unsigned int mask, unsigned int val) -{ - return regmap_fields_update_bits_base(field, id, mask, val, - NULL, false, true); -} - /** - * struct regmap_irq_type - IRQ type definitions. - * - * @type_reg_offset: Offset register for the irq type setting. - * @type_rising_val: Register value to configure RISING type irq. - * @type_falling_val: Register value to configure FALLING type irq. - * @type_level_low_val: Register value to configure LEVEL_LOW type irq. - * @type_level_high_val: Register value to configure LEVEL_HIGH type irq. - * @types_supported: logical OR of IRQ_TYPE_* flags indicating supported types. - */ -struct regmap_irq_type { - unsigned int type_reg_offset; - unsigned int type_reg_mask; - unsigned int type_rising_val; - unsigned int type_falling_val; - unsigned int type_level_low_val; - unsigned int type_level_high_val; - unsigned int types_supported; -}; - -/** - * struct regmap_irq - Description of an IRQ for the generic regmap irq_chip. + * Description of an IRQ for the generic regmap irq_chip. * * @reg_offset: Offset of the status/mask register within the bank * @mask: Mask used to flag/control the register. - * @type: IRQ trigger type setting details if supported. + * @type_reg_offset: Offset register for the irq type setting. + * @type_rising_mask: Mask bit to configure RISING type irq. + * @type_falling_mask: Mask bit to configure FALLING type irq. */ struct regmap_irq { unsigned int reg_offset; unsigned int mask; - struct regmap_irq_type type; + unsigned int type_reg_offset; + unsigned int type_rising_mask; + unsigned int type_falling_mask; }; #define REGMAP_IRQ_REG(_irq, _off, _mask) \ [_irq] = { .reg_offset = (_off), .mask = (_mask) } -#define REGMAP_IRQ_REG_LINE(_id, _reg_bits) \ - [_id] = { \ - .mask = BIT((_id) % (_reg_bits)), \ - .reg_offset = (_id) / (_reg_bits), \ - } - -#define REGMAP_IRQ_MAIN_REG_OFFSET(arr) \ - { .num_regs = ARRAY_SIZE((arr)), .offset = &(arr)[0] } - -struct regmap_irq_sub_irq_map { - unsigned int num_regs; - unsigned int *offset; -}; - /** - * struct regmap_irq_chip - Description of a generic regmap irq_chip. + * Description of a generic regmap irq_chip. This is not intended to + * handle every possible interrupt controller, but it should handle a + * substantial proportion of those that are found in the wild. * * @name: Descriptive name for IRQ controller. * - * @main_status: Base main status register address. For chips which have - * interrupts arranged in separate sub-irq blocks with own IRQ - * registers and which have a main IRQ registers indicating - * sub-irq blocks with unhandled interrupts. For such chips fill - * sub-irq register information in status_base, mask_base and - * ack_base. - * @num_main_status_bits: Should be given to chips where number of meaningfull - * main status bits differs from num_regs. - * @sub_reg_offsets: arrays of mappings from main register bits to sub irq - * registers. First item in array describes the registers - * for first main status bit. Second array for second bit etc. - * Offset is given as sub register status offset to - * status_base. Should contain num_regs arrays. - * Can be provided for chips with more complex mapping than - * 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ... - * When used with not_fixed_stride, each one-element array - * member contains offset calculated as address from each - * peripheral to first peripheral. - * @num_main_regs: Number of 'main status' irq registers for chips which have - * main_status set. - * * @status_base: Base status register address. * @mask_base: Base mask register address. - * @mask_writeonly: Base mask register is write only. * @unmask_base: Base unmask register address. for chips who have * separate mask and unmask registers * @ack_base: Base ack address. If zero then the chip is clear on read. * Using zero value is possible with @use_ack bit. * @wake_base: Base address for wake enables. If zero unsupported. * @type_base: Base address for irq type. If zero unsupported. - * @virt_reg_base: Base addresses for extra config regs. * @irq_reg_stride: Stride to use for chips where registers are not contiguous. * @init_ack_masked: Ack all masked interrupts once during initalization. * @mask_invert: Inverted mask register: cleared bits are masked out. * @use_ack: Use @ack register even if it is zero. * @ack_invert: Inverted ack register: cleared bits for ack. - * @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts. * @wake_invert: Inverted wake register: cleared bits are wake enabled. * @type_invert: Invert the type flags. - * @type_in_mask: Use the mask registers for controlling irq type. For - * interrupts defining type_rising/falling_mask use mask_base - * for edge configuration and never update bits in type_base. - * @clear_on_unmask: For chips with interrupts cleared on read: read the status - * registers before unmasking interrupts to clear any bits - * set when they were masked. - * @not_fixed_stride: Used when chip peripherals are not laid out with fixed - * stride. Must be used with sub_reg_offsets containing the - * offsets to each peripheral. - * @status_invert: Inverted status register: cleared bits are active interrupts. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * * @num_regs: Number of registers in each control bank. @@ -1460,52 +889,32 @@ struct regmap_irq_sub_irq_map { * assigned based on the index in the array of the interrupt. * @num_irqs: Number of descriptors. * @num_type_reg: Number of type registers. - * @num_virt_regs: Number of non-standard irq configuration registers. - * If zero unsupported. * @type_reg_stride: Stride to use for chips where type registers are not * contiguous. * @handle_pre_irq: Driver specific callback to handle interrupt from device * before regmap_irq_handler process the interrupts. * @handle_post_irq: Driver specific callback to handle interrupt from device * after handling the interrupts in regmap_irq_handler(). - * @set_type_virt: Driver specific callback to extend regmap_irq_set_type() - * and configure virt regs. * @irq_drv_data: Driver specific IRQ data which is passed as parameter when * driver specific pre/post interrupt handler is called. - * - * This is not intended to handle every possible interrupt controller, but - * it should handle a substantial proportion of those that are found in the - * wild. */ struct regmap_irq_chip { const char *name; - unsigned int main_status; - unsigned int num_main_status_bits; - struct regmap_irq_sub_irq_map *sub_reg_offsets; - int num_main_regs; - unsigned int status_base; unsigned int mask_base; unsigned int unmask_base; unsigned int ack_base; unsigned int wake_base; unsigned int type_base; - unsigned int *virt_reg_base; unsigned int irq_reg_stride; - bool mask_writeonly:1; bool init_ack_masked:1; bool mask_invert:1; bool use_ack:1; bool ack_invert:1; - bool clear_ack:1; bool wake_invert:1; bool runtime_pm:1; bool type_invert:1; - bool type_in_mask:1; - bool clear_on_unmask:1; - bool not_fixed_stride:1; - bool status_invert:1; int num_regs; @@ -1513,13 +922,10 @@ struct regmap_irq_chip { int num_irqs; int num_type_reg; - int num_virt_regs; unsigned int type_reg_stride; int (*handle_pre_irq)(void *irq_drv_data); int (*handle_post_irq)(void *irq_drv_data); - int (*set_type_virt)(unsigned int **buf, unsigned int type, - unsigned long hwirq, int reg); void *irq_drv_data; }; @@ -1528,23 +934,12 @@ struct regmap_irq_chip_data; int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data); -int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, - struct regmap *map, int irq, - int irq_flags, int irq_base, - const struct regmap_irq_chip *chip, - struct regmap_irq_chip_data **data); void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data); -int devm_regmap_add_irq_chip_fwnode(struct device *dev, - struct fwnode_handle *fwnode, - struct regmap *map, int irq, - int irq_flags, int irq_base, - const struct regmap_irq_chip *chip, - struct regmap_irq_chip_data **data); void devm_regmap_del_irq_chip(struct device *dev, int irq, struct regmap_irq_chip_data *data); @@ -1589,13 +984,6 @@ static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg, return -EINVAL; } -static inline int regmap_noinc_write(struct regmap *map, unsigned int reg, - const void *val, size_t val_len) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - static inline int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, size_t val_count) { @@ -1617,13 +1005,6 @@ static inline int regmap_raw_read(struct regmap *map, unsigned int reg, return -EINVAL; } -static inline int regmap_noinc_read(struct regmap *map, unsigned int reg, - void *val, size_t val_len) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - static inline int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, size_t val_count) { @@ -1639,27 +1020,6 @@ static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg, return -EINVAL; } -static inline int regmap_set_bits(struct regmap *map, - unsigned int reg, unsigned int bits) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_clear_bits(struct regmap *map, - unsigned int reg, unsigned int bits) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_test_bits(struct regmap *map, - unsigned int reg, unsigned int bits) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - static inline int regmap_field_update_bits_base(struct regmap_field *field, unsigned int mask, unsigned int val, bool *change, bool async, bool force) @@ -1677,103 +1037,6 @@ static inline int regmap_fields_update_bits_base(struct regmap_field *field, return -EINVAL; } -static inline int regmap_update_bits(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val, - bool *change) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int -regmap_update_bits_check_async(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val, - bool *change) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_write_bits(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_field_write(struct regmap_field *field, - unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_field_force_write(struct regmap_field *field, - unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_field_update_bits(struct regmap_field *field, - unsigned int mask, unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int -regmap_field_force_update_bits(struct regmap_field *field, - unsigned int mask, unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_fields_write(struct regmap_field *field, - unsigned int id, unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_fields_force_write(struct regmap_field *field, - unsigned int id, unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int -regmap_fields_update_bits(struct regmap_field *field, unsigned int id, - unsigned int mask, unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int -regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id, - unsigned int mask, unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - static inline int regmap_get_val_bytes(struct regmap *map) { WARN_ONCE(1, "regmap API is disabled"); diff --git a/include/linux/regset.h b/include/linux/regset.h index a00765f0e8..ac4d22149a 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * User-mode machine state access * * Copyright (C) 2007 Red Hat, Inc. All rights reserved. * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * * Red Hat Author: Roland McGrath. */ @@ -17,64 +20,6 @@ struct task_struct; struct user_regset; -struct membuf { - void *p; - size_t left; -}; - -static inline int membuf_zero(struct membuf *s, size_t size) -{ - if (s->left) { - if (size > s->left) - size = s->left; - memset(s->p, 0, size); - s->p += size; - s->left -= size; - } - return s->left; -} - -static inline int membuf_write(struct membuf *s, const void *v, size_t size) -{ - if (s->left) { - if (size > s->left) - size = s->left; - memcpy(s->p, v, size); - s->p += size; - s->left -= size; - } - return s->left; -} - -static inline struct membuf membuf_at(const struct membuf *s, size_t offs) -{ - struct membuf n = *s; - - if (offs > n.left) - offs = n.left; - n.p += offs; - n.left -= offs; - - return n; -} - -/* current s->p must be aligned for v; v must be a scalar */ -#define membuf_store(s, v) \ -({ \ - struct membuf *__s = (s); \ - if (__s->left) { \ - typeof(v) __v = (v); \ - size_t __size = sizeof(__v); \ - if (unlikely(__size > __s->left)) { \ - __size = __s->left; \ - memcpy(__s->p, &__v, __size); \ - } else { \ - *(typeof(__v + 0) *)__s->p = __v; \ - } \ - __s->p += __size; \ - __s->left -= __size; \ - } \ - __s->left;}) /** * user_regset_active_fn - type of @active function in &struct user_regset @@ -94,9 +39,26 @@ static inline struct membuf membuf_at(const struct membuf *s, size_t offs) typedef int user_regset_active_fn(struct task_struct *target, const struct user_regset *regset); -typedef int user_regset_get2_fn(struct task_struct *target, +/** + * user_regset_get_fn - type of @get function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * @pos: offset into the regset data to access, in bytes + * @count: amount of data to copy, in bytes + * @kbuf: if not %NULL, a kernel-space pointer to copy into + * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into + * + * Fetch register values. Return %0 on success; -%EIO or -%ENODEV + * are usual failure returns. The @pos and @count values are in + * bytes, but must be properly aligned. If @kbuf is non-null, that + * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then + * ubuf gives a userland pointer to access directly, and an -%EFAULT + * return value is possible. + */ +typedef int user_regset_get_fn(struct task_struct *target, const struct user_regset *regset, - struct membuf to); + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf); /** * user_regset_set_fn - type of @set function in &struct user_regset @@ -160,22 +122,14 @@ typedef int user_regset_writeback_fn(struct task_struct *target, * This is part of the state of an individual thread, not necessarily * actual CPU registers per se. A register set consists of a number of * similar slots, given by @n. Each slot is @size bytes, and aligned to - * @align bytes (which is at least @size). For dynamically-sized - * regsets, @n must contain the maximum possible number of slots for the - * regset. + * @align bytes (which is at least @size). * - * For backward compatibility, the @get and @set methods must pad to, or - * accept, @n * @size bytes, even if the current regset size is smaller. - * The precise semantics of these operations depend on the regset being - * accessed. - * - * The functions to which &struct user_regset members point must be - * called only on the current thread or on a thread that is in - * %TASK_STOPPED or %TASK_TRACED state, that we are guaranteed will not - * be woken up and return to user mode, and that we have called - * wait_task_inactive() on. (The target thread always might wake up for - * SIGKILL while these functions are working, in which case that - * thread's user_regset state might be scrambled.) + * These functions must be called only on the current thread or on a + * thread that is in %TASK_STOPPED or %TASK_TRACED state, that we are + * guaranteed will not be woken up and return to user mode, and that we + * have called wait_task_inactive() on. (The target thread always might + * wake up for SIGKILL while these functions are working, in which case + * that thread's user_regset state might be scrambled.) * * The @pos argument must be aligned according to @align; the @count * argument must be a multiple of @size. These functions are not @@ -198,7 +152,7 @@ typedef int user_regset_writeback_fn(struct task_struct *target, * omitted when there is an @active function and it returns zero. */ struct user_regset { - user_regset_get2_fn *regset_get; + user_regset_get_fn *get; user_regset_set_fn *set; user_regset_active_fn *active; user_regset_writeback_fn *writeback; @@ -207,7 +161,8 @@ struct user_regset { unsigned int align; unsigned int bias; unsigned int core_note_type; -}; +} __do_const; +typedef struct user_regset __no_const user_regset_no_const; /** * struct user_regset_view - available regsets @@ -250,6 +205,44 @@ struct user_regset_view { */ const struct user_regset_view *task_user_regset_view(struct task_struct *tsk); + +/* + * These are helpers for writing regset get/set functions in arch code. + * Because @start_pos and @end_pos are always compile-time constants, + * these are inlined into very little code though they look large. + * + * Use one or more calls sequentially for each chunk of regset data stored + * contiguously in memory. Call with constants for @start_pos and @end_pos, + * giving the range of byte positions in the regset that data corresponds + * to; @end_pos can be -1 if this chunk is at the end of the regset layout. + * Each call updates the arguments to point past its chunk. + */ + +static inline int user_regset_copyout(unsigned int *pos, unsigned int *count, + void **kbuf, + void __user **ubuf, const void *data, + const int start_pos, const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + data += *pos - start_pos; + if (*kbuf) { + memcpy(*kbuf, data, copy); + *kbuf += copy; + } else if (__copy_to_user(*ubuf, data, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, const void **kbuf, const void __user **ubuf, void *data, @@ -275,6 +268,35 @@ static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, return 0; } +/* + * These two parallel the two above, but for portions of a regset layout + * that always read as all-zero or for which writes are ignored. + */ +static inline int user_regset_copyout_zero(unsigned int *pos, + unsigned int *count, + void **kbuf, void __user **ubuf, + const int start_pos, + const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + if (*kbuf) { + memset(*kbuf, 0, copy); + *kbuf += copy; + } else if (__clear_user(*ubuf, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + static inline int user_regset_copyin_ignore(unsigned int *pos, unsigned int *count, const void **kbuf, @@ -298,19 +320,31 @@ static inline int user_regset_copyin_ignore(unsigned int *pos, return 0; } -extern int regset_get(struct task_struct *target, - const struct user_regset *regset, - unsigned int size, void *data); +/** + * copy_regset_to_user - fetch a thread's user_regset data into user memory + * @target: thread to be examined + * @view: &struct user_regset_view describing user thread machine state + * @setno: index in @view->regsets + * @offset: offset into the regset data, in bytes + * @size: amount of data to copy, in bytes + * @data: user-mode pointer to copy into + */ +static inline int copy_regset_to_user(struct task_struct *target, + const struct user_regset_view *view, + unsigned int setno, + unsigned int offset, unsigned int size, + void __user *data) +{ + const struct user_regset *regset = &view->regsets[setno]; -extern int regset_get_alloc(struct task_struct *target, - const struct user_regset *regset, - unsigned int size, - void **data); + if (!regset->get) + return -EOPNOTSUPP; -extern int copy_regset_to_user(struct task_struct *target, - const struct user_regset_view *view, - unsigned int setno, unsigned int offset, - unsigned int size, void __user *data); + if (!access_ok(VERIFY_WRITE, data, size)) + return -EFAULT; + + return regset->get(target, regset, offset, size, NULL, data); +} /** * copy_regset_from_user - store into thread's user_regset data from user memory @@ -332,10 +366,11 @@ static inline int copy_regset_from_user(struct task_struct *target, if (!regset->set) return -EOPNOTSUPP; - if (!access_ok(data, size)) + if (!access_ok(VERIFY_READ, data, size)) return -EFAULT; return regset->set(target, regset, offset, size, NULL, data); } + #endif /* */ diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h index 3ab1ddf151..d8ecefaf63 100644 --- a/include/linux/regulator/ab8500.h +++ b/include/linux/regulator/ab8500.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010 * + * License Terms: GNU General Public License v2 + * * Authors: Sundar Iyer for ST-Ericsson * Bengt Jonsson for ST-Ericsson * Daniel Willerud for ST-Ericsson @@ -37,15 +38,58 @@ enum ab8505_regulator_id { AB8505_LDO_AUX6, AB8505_LDO_INTCORE, AB8505_LDO_ADC, + AB8505_LDO_USB, AB8505_LDO_AUDIO, AB8505_LDO_ANAMIC1, AB8505_LDO_ANAMIC2, AB8505_LDO_AUX8, AB8505_LDO_ANA, + AB8505_SYSCLKREQ_2, + AB8505_SYSCLKREQ_4, AB8505_NUM_REGULATORS, }; -/* AB8500 and AB8505 register initialization */ +/* AB9540 regulators */ +enum ab9540_regulator_id { + AB9540_LDO_AUX1, + AB9540_LDO_AUX2, + AB9540_LDO_AUX3, + AB9540_LDO_AUX4, + AB9540_LDO_INTCORE, + AB9540_LDO_TVOUT, + AB9540_LDO_USB, + AB9540_LDO_AUDIO, + AB9540_LDO_ANAMIC1, + AB9540_LDO_ANAMIC2, + AB9540_LDO_DMIC, + AB9540_LDO_ANA, + AB9540_SYSCLKREQ_2, + AB9540_SYSCLKREQ_4, + AB9540_NUM_REGULATORS, +}; + +/* AB8540 regulators */ +enum ab8540_regulator_id { + AB8540_LDO_AUX1, + AB8540_LDO_AUX2, + AB8540_LDO_AUX3, + AB8540_LDO_AUX4, + AB8540_LDO_AUX5, + AB8540_LDO_AUX6, + AB8540_LDO_INTCORE, + AB8540_LDO_TVOUT, + AB8540_LDO_AUDIO, + AB8540_LDO_ANAMIC1, + AB8540_LDO_ANAMIC2, + AB8540_LDO_DMIC, + AB8540_LDO_ANA, + AB8540_LDO_SDIO, + AB8540_SYSCLKREQ_2, + AB8540_SYSCLKREQ_4, + AB8540_NUM_REGULATORS, +}; + +/* AB8500, AB8505, and AB9540 register initialization */ struct ab8500_regulator_reg_init { int id; u8 mask; @@ -141,6 +185,121 @@ enum ab8505_regulator_reg { AB8505_NUM_REGULATOR_REGISTERS, }; +/* AB9540 registers */ +enum ab9540_regulator_reg { + AB9540_REGUREQUESTCTRL1, + AB9540_REGUREQUESTCTRL2, + AB9540_REGUREQUESTCTRL3, + AB9540_REGUREQUESTCTRL4, + AB9540_REGUSYSCLKREQ1HPVALID1, + AB9540_REGUSYSCLKREQ1HPVALID2, + AB9540_REGUHWHPREQ1VALID1, + AB9540_REGUHWHPREQ1VALID2, + AB9540_REGUHWHPREQ2VALID1, + AB9540_REGUHWHPREQ2VALID2, + AB9540_REGUSWHPREQVALID1, + AB9540_REGUSWHPREQVALID2, + AB9540_REGUSYSCLKREQVALID1, + AB9540_REGUSYSCLKREQVALID2, + AB9540_REGUVAUX4REQVALID, + AB9540_REGUMISC1, + AB9540_VAUDIOSUPPLY, + AB9540_REGUCTRL1VAMIC, + AB9540_VSMPS1REGU, + AB9540_VSMPS2REGU, + AB9540_VSMPS3REGU, /* NOTE! PRCMU register */ + AB9540_VPLLVANAREGU, + AB9540_EXTSUPPLYREGU, + AB9540_VAUX12REGU, + AB9540_VRF1VAUX3REGU, + AB9540_VSMPS1SEL1, + AB9540_VSMPS1SEL2, + AB9540_VSMPS1SEL3, + AB9540_VSMPS2SEL1, + AB9540_VSMPS2SEL2, + AB9540_VSMPS2SEL3, + AB9540_VSMPS3SEL1, /* NOTE! PRCMU register */ + AB9540_VSMPS3SEL2, /* NOTE! PRCMU register */ + AB9540_VAUX1SEL, + AB9540_VAUX2SEL, + AB9540_VRF1VAUX3SEL, + AB9540_REGUCTRL2SPARE, + AB9540_VAUX4REQCTRL, + AB9540_VAUX4REGU, + AB9540_VAUX4SEL, + AB9540_REGUCTRLDISCH, + AB9540_REGUCTRLDISCH2, + AB9540_REGUCTRLDISCH3, + AB9540_NUM_REGULATOR_REGISTERS, +}; + +/* AB8540 registers */ +enum ab8540_regulator_reg { + AB8540_REGUREQUESTCTRL1, + AB8540_REGUREQUESTCTRL2, + AB8540_REGUREQUESTCTRL3, + AB8540_REGUREQUESTCTRL4, + AB8540_REGUSYSCLKREQ1HPVALID1, + AB8540_REGUSYSCLKREQ1HPVALID2, + AB8540_REGUHWHPREQ1VALID1, + AB8540_REGUHWHPREQ1VALID2, + AB8540_REGUHWHPREQ2VALID1, + AB8540_REGUHWHPREQ2VALID2, + AB8540_REGUSWHPREQVALID1, + AB8540_REGUSWHPREQVALID2, + AB8540_REGUSYSCLKREQVALID1, + AB8540_REGUSYSCLKREQVALID2, + AB8540_REGUVAUX4REQVALID, + AB8540_REGUVAUX5REQVALID, + AB8540_REGUVAUX6REQVALID, + AB8540_REGUVCLKBREQVALID, + AB8540_REGUVRF1REQVALID, + AB8540_REGUMISC1, + AB8540_VAUDIOSUPPLY, + AB8540_REGUCTRL1VAMIC, + AB8540_VHSIC, + AB8540_VSDIO, + AB8540_VSMPS1REGU, + AB8540_VSMPS2REGU, + AB8540_VSMPS3REGU, + AB8540_VPLLVANAREGU, + AB8540_EXTSUPPLYREGU, + AB8540_VAUX12REGU, + AB8540_VRF1VAUX3REGU, + AB8540_VSMPS1SEL1, + AB8540_VSMPS1SEL2, + AB8540_VSMPS1SEL3, + AB8540_VSMPS2SEL1, + AB8540_VSMPS2SEL2, + AB8540_VSMPS2SEL3, + AB8540_VSMPS3SEL1, + AB8540_VSMPS3SEL2, + AB8540_VAUX1SEL, + AB8540_VAUX2SEL, + AB8540_VRF1VAUX3SEL, + AB8540_REGUCTRL2SPARE, + AB8540_VAUX4REQCTRL, + AB8540_VAUX4REGU, + AB8540_VAUX4SEL, + AB8540_VAUX5REQCTRL, + AB8540_VAUX5REGU, + AB8540_VAUX5SEL, + AB8540_VAUX6REQCTRL, + AB8540_VAUX6REGU, + AB8540_VAUX6SEL, + AB8540_VCLKBREQCTRL, + AB8540_VCLKBREGU, + AB8540_VCLKBSEL, + AB8540_VRF1REQCTRL, + AB8540_REGUCTRLDISCH, + AB8540_REGUCTRLDISCH2, + AB8540_REGUCTRLDISCH3, + AB8540_REGUCTRLDISCH4, + AB8540_VSIMSYSCLKCTRL, + AB8540_VANAVPLLSEL, + AB8540_NUM_REGULATOR_REGISTERS, +}; + /* AB8500 external regulators */ struct ab8500_ext_regulator_cfg { bool hwreq; /* requires hw mode or high power mode */ diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h index d25e24f596..113d861a1e 100644 --- a/include/linux/regulator/act8865.h +++ b/include/linux/regulator/act8865.h @@ -1,8 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * act8865.h -- Voltage regulation for active-semi act88xx PMUs * * Copyright (C) 2013 Atmel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __LINUX_REGULATOR_ACT8865_H diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index bbf6590a6d..6921082222 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * consumer.h -- SoC Regulator consumer support. * @@ -6,6 +5,10 @@ * * Author: Liam Girdwood * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Regulator Consumer Interface. * * A Power Management Regulator framework for SoC based devices. @@ -26,18 +29,17 @@ * but this drops rapidly to 60% when below 100mA. Regulator r has > 90% * efficiency in IDLE mode at loads < 10mA. Thus regulator r will operate * in normal mode for loads > 10mA and in IDLE mode for load <= 10mA. + * */ #ifndef __LINUX_REGULATOR_CONSUMER_H_ #define __LINUX_REGULATOR_CONSUMER_H_ #include -#include struct device; struct notifier_block; struct regmap; -struct regulator_dev; /* * Regulator operating modes. @@ -78,7 +80,6 @@ struct regulator_dev; * These modes can be OR'ed together to make up a mask of valid register modes. */ -#define REGULATOR_MODE_INVALID 0x0 #define REGULATOR_MODE_FAST 0x1 #define REGULATOR_MODE_NORMAL 0x2 #define REGULATOR_MODE_IDLE 0x4 @@ -118,40 +119,6 @@ struct regulator_dev; #define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200 #define REGULATOR_EVENT_PRE_DISABLE 0x400 #define REGULATOR_EVENT_ABORT_DISABLE 0x800 -#define REGULATOR_EVENT_ENABLE 0x1000 -/* - * Following notifications should be emitted only if detected condition - * is such that the HW is likely to still be working but consumers should - * take a recovery action to prevent problems esacalating into errors. - */ -#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000 -#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000 -#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000 -#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000 -#define REGULATOR_EVENT_WARN_MASK 0x1E000 - -/* - * Regulator errors that can be queried using regulator_get_error_flags - * - * UNDER_VOLTAGE Regulator output is under voltage. - * OVER_CURRENT Regulator output current is too high. - * REGULATION_OUT Regulator output is out of regulation. - * FAIL Regulator output has failed. - * OVER_TEMP Regulator over temp. - * - * NOTE: These errors can be OR'ed together. - */ - -#define REGULATOR_ERROR_UNDER_VOLTAGE BIT(1) -#define REGULATOR_ERROR_OVER_CURRENT BIT(2) -#define REGULATOR_ERROR_REGULATION_OUT BIT(3) -#define REGULATOR_ERROR_FAIL BIT(4) -#define REGULATOR_ERROR_OVER_TEMP BIT(5) - -#define REGULATOR_ERROR_UNDER_VOLTAGE_WARN BIT(6) -#define REGULATOR_ERROR_OVER_CURRENT_WARN BIT(7) -#define REGULATOR_ERROR_OVER_VOLTAGE_WARN BIT(8) -#define REGULATOR_ERROR_OVER_TEMP_WARN BIT(9) /** * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event @@ -222,12 +189,17 @@ void regulator_bulk_unregister_supply_alias(struct device *dev, int devm_regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, const char *alias_id); +void devm_regulator_unregister_supply_alias(struct device *dev, + const char *id); int devm_regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, struct device *alias_dev, const char *const *alias_id, int num_id); +void devm_regulator_bulk_unregister_supply_alias(struct device *dev, + const char *const *id, + int num_id); /* regulator output control and status */ int __must_check regulator_enable(struct regulator *regulator); @@ -265,8 +237,6 @@ int regulator_get_current_limit(struct regulator *regulator); int regulator_set_mode(struct regulator *regulator, unsigned int mode); unsigned int regulator_get_mode(struct regulator *regulator); -int regulator_get_error_flags(struct regulator *regulator, - unsigned int *flags); int regulator_set_load(struct regulator *regulator, int load_uA); int regulator_allow_bypass(struct regulator *regulator, bool allow); @@ -288,26 +258,10 @@ int regulator_unregister_notifier(struct regulator *regulator, void devm_regulator_unregister_notifier(struct regulator *regulator, struct notifier_block *nb); -/* regulator suspend */ -int regulator_suspend_enable(struct regulator_dev *rdev, - suspend_state_t state); -int regulator_suspend_disable(struct regulator_dev *rdev, - suspend_state_t state); -int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV, - int max_uV, suspend_state_t state); - /* driver data - core doesn't touch */ void *regulator_get_drvdata(struct regulator *regulator); void regulator_set_drvdata(struct regulator *regulator, void *data); -/* misc helpers */ - -void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, - const char *const *supply_names, - unsigned int num_supplies); - -bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2); - #else /* @@ -340,12 +294,6 @@ regulator_get_exclusive(struct device *dev, const char *id) return ERR_PTR(-ENODEV); } -static inline struct regulator *__must_check -devm_regulator_get_exclusive(struct device *dev, const char *id) -{ - return ERR_PTR(-ENODEV); -} - static inline struct regulator *__must_check regulator_get_optional(struct device *dev, const char *id) { @@ -403,6 +351,11 @@ static inline int devm_regulator_register_supply_alias(struct device *dev, return 0; } +static inline void devm_regulator_unregister_supply_alias(struct device *dev, + const char *id) +{ +} + static inline int devm_regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, struct device *alias_dev, @@ -412,6 +365,11 @@ static inline int devm_regulator_bulk_register_supply_alias(struct device *dev, return 0; } +static inline void devm_regulator_bulk_unregister_supply_alias( + struct device *dev, const char *const *id, int num_id) +{ +} + static inline int regulator_enable(struct regulator *regulator) { return 0; @@ -491,22 +449,12 @@ static inline int regulator_get_voltage(struct regulator *regulator) return -EINVAL; } -static inline int regulator_sync_voltage(struct regulator *regulator) -{ - return -EINVAL; -} - static inline int regulator_is_supported_voltage(struct regulator *regulator, int min_uV, int max_uV) { return 0; } -static inline unsigned int regulator_get_linear_step(struct regulator *regulator) -{ - return 0; -} - static inline int regulator_set_current_limit(struct regulator *regulator, int min_uA, int max_uA) { @@ -529,15 +477,9 @@ static inline unsigned int regulator_get_mode(struct regulator *regulator) return REGULATOR_MODE_NORMAL; } -static inline int regulator_get_error_flags(struct regulator *regulator, - unsigned int *flags) -{ - return -EINVAL; -} - static inline int regulator_set_load(struct regulator *regulator, int load_uA) { - return 0; + return REGULATOR_MODE_NORMAL; } static inline int regulator_allow_bypass(struct regulator *regulator, @@ -588,25 +530,6 @@ static inline int devm_regulator_unregister_notifier(struct regulator *regulator return 0; } -static inline int regulator_suspend_enable(struct regulator_dev *rdev, - suspend_state_t state) -{ - return -EINVAL; -} - -static inline int regulator_suspend_disable(struct regulator_dev *rdev, - suspend_state_t state) -{ - return -EINVAL; -} - -static inline int regulator_set_suspend_voltage(struct regulator *regulator, - int min_uV, int max_uV, - suspend_state_t state) -{ - return -EINVAL; -} - static inline void *regulator_get_drvdata(struct regulator *regulator) { return NULL; @@ -627,18 +550,6 @@ static inline int regulator_list_voltage(struct regulator *regulator, unsigned s return -EINVAL; } -static inline void -regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, - const char *const *supply_names, - unsigned int num_supplies) -{ -} - -static inline bool -regulator_is_equal(struct regulator *reg1, struct regulator *reg2) -{ - return false; -} #endif static inline int regulator_set_voltage_triplet(struct regulator *regulator, diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h index 0d3c0f0ebc..80cb40b7c8 100644 --- a/include/linux/regulator/da9211.h +++ b/include/linux/regulator/da9211.h @@ -1,8 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * da9211.h - Regulator device driver for DA9211/DA9212 - * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 + * /DA9213/DA9214/DA9215 * Copyright (C) 2015 Dialog Semiconductor Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __LINUX_REGULATOR_DA9211_H @@ -12,17 +21,12 @@ #define DA9211_MAX_REGULATORS 2 -struct gpio_desc; - enum da9211_chip_id { DA9211, DA9212, DA9213, - DA9223, DA9214, - DA9224, DA9215, - DA9225, }; struct da9211_pdata { @@ -32,7 +36,7 @@ struct da9211_pdata { * 2 : 2 phase 2 buck */ int num_buck; - struct gpio_desc *gpiod_ren[DA9211_MAX_REGULATORS]; + int gpio_ren[DA9211_MAX_REGULATORS]; struct device_node *reg_node[DA9211_MAX_REGULATORS]; struct regulator_init_data *init_data[DA9211_MAX_REGULATORS]; }; diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h index f90df9ee70..612062313b 100644 --- a/include/linux/regulator/db8500-prcmu.h +++ b/include/linux/regulator/db8500-prcmu.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010 * + * License Terms: GNU General Public License v2 + * * Author: Bengt Jonsson for ST-Ericsson * * Interface to power domain regulators on DB8500 diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index bd7a73db2e..37b5324105 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * driver.h -- SoC Regulator driver support. * @@ -6,6 +5,10 @@ * * Author: Liam Girdwood * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Regulator Driver Interface. */ @@ -13,12 +16,9 @@ #define __LINUX_REGULATOR_DRIVER_H_ #include -#include #include #include -#include -struct gpio_desc; struct regmap; struct regulator_dev; struct regulator_config; @@ -40,22 +40,31 @@ enum regulator_status { REGULATOR_STATUS_UNDEFINED, }; -enum regulator_detection_severity { - /* Hardware shut down voltage outputs if condition is detected */ - REGULATOR_SEVERITY_PROT, - /* Hardware is probably damaged/inoperable */ - REGULATOR_SEVERITY_ERR, - /* Hardware is still recoverable but recovery action must be taken */ - REGULATOR_SEVERITY_WARN, +/** + * struct regulator_linear_range - specify linear voltage ranges + * + * Specify a range of voltages for regulator_map_linar_range() and + * regulator_list_linear_range(). + * + * @min_uV: Lowest voltage in range + * @min_sel: Lowest selector for range + * @max_sel: Highest selector for range + * @uV_step: Step size + */ +struct regulator_linear_range { + unsigned int min_uV; + unsigned int min_sel; + unsigned int max_sel; + unsigned int uV_step; }; -/* Initialize struct linear_range for regulators */ +/* Initialize struct regulator_linear_range */ #define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \ { \ - .min = _min_uV, \ + .min_uV = _min_uV, \ .min_sel = _min_sel, \ .max_sel = _max_sel, \ - .step = _step_uV, \ + .uV_step = _step_uV, \ } /** @@ -71,12 +80,9 @@ enum regulator_detection_severity { * @set_voltage_sel: Set the voltage for the regulator using the specified * selector. * @map_voltage: Convert a voltage into a selector - * @get_voltage: Return the currently configured voltage for the regulator; - * return -ENOTRECOVERABLE if regulator can't be read at - * bootup and hasn't been set yet. + * @get_voltage: Return the currently configured voltage for the regulator. * @get_voltage_sel: Return the currently configured voltage selector for the - * regulator; return -ENOTRECOVERABLE if regulator can't - * be read at bootup and hasn't been set yet. + * regulator. * @list_voltage: Return one of the supported voltages, in microvolts; zero * if the selector indicates a voltage that is unusable on this system; * or negative errno. Selectors range from zero to one less than @@ -87,31 +93,13 @@ enum regulator_detection_severity { * @get_current_limit: Get the configured limit for a current-limited regulator. * @set_input_current_limit: Configure an input limit. * - * @set_over_current_protection: Support enabling of and setting limits for over - * current situation detection. Detection can be configured for three - * levels of severity. - * REGULATOR_SEVERITY_PROT should automatically shut down the regulator(s). - * REGULATOR_SEVERITY_ERR should indicate that over-current situation is - * caused by an unrecoverable error but HW does not perform - * automatic shut down. - * REGULATOR_SEVERITY_WARN should indicate situation where hardware is - * still believed to not be damaged but that a board sepcific - * recovery action is needed. If lim_uA is 0 the limit should not - * be changed but the detection should just be enabled/disabled as - * is requested. - * @set_over_voltage_protection: Support enabling of and setting limits for over - * voltage situation detection. Detection can be configured for same - * severities as over current protection. - * @set_under_voltage_protection: Support enabling of and setting limits for - * under situation detection. - * @set_thermal_protection: Support enabling of and setting limits for over - * temperature situation detection. + * @set_over_current_protection: Support capability of automatically shutting + * down when detecting an over current event. * * @set_active_discharge: Set active discharge enable/disable of regulators. * * @set_mode: Set the configured operating mode for the regulator. * @get_mode: Get the configured operating mode for the regulator. - * @get_error_flags: Get the current error(s) for the regulator. * @get_status: Return actual (not as-configured) status of regulator, as a * REGULATOR_STATUS value (or negative errno) * @get_optimum_mode: Get the most efficient operating mode for the regulator @@ -143,7 +131,7 @@ enum regulator_detection_severity { * suspended. * @set_suspend_mode: Set the operating mode for the regulator when the * system is suspended. - * @resume: Resume operation of suspended regulator. + * * @set_pull_down: Configure the regulator to pull down when the regulator * is disabled. * @@ -169,15 +157,8 @@ struct regulator_ops { int (*get_current_limit) (struct regulator_dev *); int (*set_input_current_limit) (struct regulator_dev *, int lim_uA); - int (*set_over_current_protection)(struct regulator_dev *, int lim_uA, - int severity, bool enable); - int (*set_over_voltage_protection)(struct regulator_dev *, int lim_uV, - int severity, bool enable); - int (*set_under_voltage_protection)(struct regulator_dev *, int lim_uV, - int severity, bool enable); - int (*set_thermal_protection)(struct regulator_dev *, int lim, - int severity, bool enable); - int (*set_active_discharge)(struct regulator_dev *, bool enable); + int (*set_over_current_protection) (struct regulator_dev *); + int (*set_active_discharge) (struct regulator_dev *, bool enable); /* enable/disable regulator */ int (*enable) (struct regulator_dev *); @@ -188,9 +169,6 @@ struct regulator_ops { int (*set_mode) (struct regulator_dev *, unsigned int mode); unsigned int (*get_mode) (struct regulator_dev *); - /* retrieve current error flags on the regulator */ - int (*get_error_flags)(struct regulator_dev *, unsigned int *flags); - /* Time taken to enable or set voltage on the regulator */ int (*enable_time) (struct regulator_dev *); int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay); @@ -232,8 +210,6 @@ struct regulator_ops { /* set regulator suspend operating mode (defined in consumer.h) */ int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); - int (*resume)(struct regulator_dev *rdev); - int (*set_pull_down) (struct regulator_dev *); }; @@ -256,8 +232,6 @@ enum regulator_type { * @name: Identifying name for the regulator. * @supply_name: Identifying the regulator supply * @of_match: Name used to identify regulator in DT. - * @of_match_full_name: A flag to indicate that the of_match string, if - * present, should be matched against the node full_name. * @regulators_node: Name of node containing regulator definitions in DT. * @of_parse_cb: Optional callback called only if of_match is present. * Will be called for each regulator parsed from DT, during @@ -277,7 +251,6 @@ enum regulator_type { * @continuous_voltage_range: Indicates if the regulator can set any * voltage within constrains range. * @n_voltages: Number of selectors available for ops.list_voltage(). - * @n_current_limits: Number of selectors available for current limits * * @min_uV: Voltage given by the lowest selector (if linear mapping) * @uV_step: Voltage increase with each selector (if linear mapping) @@ -286,26 +259,13 @@ enum regulator_type { * @ramp_delay: Time to settle down after voltage change (unit: uV/us) * @min_dropout_uV: The minimum dropout voltage this regulator can handle * @linear_ranges: A constant table of possible voltage ranges. - * @linear_range_selectors: A constant table of voltage range selectors. - * If pickable ranges are used each range must - * have corresponding selector here. - * @n_linear_ranges: Number of entries in the @linear_ranges (and in - * linear_range_selectors if used) table(s). + * @n_linear_ranges: Number of entries in the @linear_ranges table. * @volt_table: Voltage mapping table (if table based mapping) - * @curr_table: Current limit mapping table (if table based mapping) * - * @vsel_range_reg: Register for range selector when using pickable ranges - * and ``regulator_map_*_voltage_*_pickable`` functions. - * @vsel_range_mask: Mask for register bitfield used for range selector - * @vsel_reg: Register for selector when using ``regulator_map_*_voltage_*`` + * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ * @vsel_mask: Mask for register bitfield used for selector - * @vsel_step: Specify the resolution of selector stepping when setting - * voltage. If 0, then no stepping is done (requested selector is - * set directly), if >0 then the regulator API will ramp the - * voltage up/down gradually each time increasing/decreasing the - * selector by the specified step value. - * @csel_reg: Register for current limit selector using regmap set_current_limit - * @csel_mask: Mask for register bitfield used for current limit selector + * @csel_reg: Register for TPS65218 LS3 current regulator + * @csel_mask: Mask for TPS65218 LS3 current regulator * @apply_reg: Register for initiate voltage change on the output when * using regulator_set_voltage_sel_regmap * @apply_bit: Register bitfield used for initiate voltage change on the @@ -328,34 +288,16 @@ enum regulator_type { * set_active_discharge * @active_discharge_reg: Register for control when using regmap * set_active_discharge - * @soft_start_reg: Register for control when using regmap set_soft_start - * @soft_start_mask: Mask for control when using regmap set_soft_start - * @soft_start_val_on: Enabling value for control when using regmap - * set_soft_start - * @pull_down_reg: Register for control when using regmap set_pull_down - * @pull_down_mask: Mask for control when using regmap set_pull_down - * @pull_down_val_on: Enabling value for control when using regmap - * set_pull_down - * - * @ramp_reg: Register for controlling the regulator ramp-rate. - * @ramp_mask: Bitmask for the ramp-rate control register. - * @ramp_delay_table: Table for mapping the regulator ramp-rate values. Values - * should be given in units of V/S (uV/uS). See the - * regulator_set_ramp_delay_regmap(). * * @enable_time: Time taken for initial enable of regulator (in uS). * @off_on_delay: guard time (in uS), before re-enabling a regulator * - * @poll_enabled_time: The polling interval (in uS) to use while checking that - * the regulator was actually enabled. Max upto enable_time. - * * @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode */ struct regulator_desc { const char *name; const char *supply_name; const char *of_match; - bool of_match_full_name; const char *regulators_node; int (*of_parse_cb)(struct device_node *, const struct regulator_desc *, @@ -363,7 +305,6 @@ struct regulator_desc { int id; unsigned int continuous_voltage_range:1; unsigned n_voltages; - unsigned int n_current_limits; const struct regulator_ops *ops; int irq; enum regulator_type type; @@ -376,19 +317,13 @@ struct regulator_desc { unsigned int ramp_delay; int min_dropout_uV; - const struct linear_range *linear_ranges; - const unsigned int *linear_range_selectors; - + const struct regulator_linear_range *linear_ranges; int n_linear_ranges; const unsigned int *volt_table; - const unsigned int *curr_table; - unsigned int vsel_range_reg; - unsigned int vsel_range_mask; unsigned int vsel_reg; unsigned int vsel_mask; - unsigned int vsel_step; unsigned int csel_reg; unsigned int csel_mask; unsigned int apply_reg; @@ -406,23 +341,11 @@ struct regulator_desc { unsigned int active_discharge_off; unsigned int active_discharge_mask; unsigned int active_discharge_reg; - unsigned int soft_start_reg; - unsigned int soft_start_mask; - unsigned int soft_start_val_on; - unsigned int pull_down_reg; - unsigned int pull_down_mask; - unsigned int pull_down_val_on; - unsigned int ramp_reg; - unsigned int ramp_mask; - const unsigned int *ramp_delay_table; - unsigned int n_ramp_values; unsigned int enable_time; unsigned int off_on_delay; - unsigned int poll_enabled_time; - unsigned int (*of_map_mode)(unsigned int mode); }; @@ -440,7 +363,12 @@ struct regulator_desc { * NULL). * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is * insufficient. - * @ena_gpiod: GPIO controlling regulator enable. + * @ena_gpio_initialized: GPIO controlling regulator enable was properly + * initialized, meaning that >= 0 is a valid gpio + * identifier and < 0 is a non existent gpio. + * @ena_gpio: GPIO controlling regulator enable. + * @ena_gpio_invert: Sense for GPIO enable control. + * @ena_gpio_flags: Flags to use when calling gpio_request_one() */ struct regulator_config { struct device *dev; @@ -449,145 +377,10 @@ struct regulator_config { struct device_node *of_node; struct regmap *regmap; - struct gpio_desc *ena_gpiod; -}; - -/** - * struct regulator_err_state - regulator error/notification status - * - * @rdev: Regulator which status the struct indicates. - * @notifs: Events which have occurred on the regulator. - * @errors: Errors which are active on the regulator. - * @possible_errs: Errors which can be signaled (by given IRQ). - */ -struct regulator_err_state { - struct regulator_dev *rdev; - unsigned long notifs; - unsigned long errors; - int possible_errs; -}; - -/** - * struct regulator_irq_data - regulator error/notification status data - * - * @states: Status structs for each of the associated regulators. - * @num_states: Amount of associated regulators. - * @data: Driver data pointer given at regulator_irq_desc. - * @opaque: Value storage for IC driver. Core does not update this. ICs - * may want to store status register value here at map_event and - * compare contents at 'renable' callback to see if new problems - * have been added to status. If that is the case it may be - * desirable to return REGULATOR_ERROR_CLEARED and not - * REGULATOR_ERROR_ON to allow IRQ fire again and to generate - * notifications also for the new issues. - * - * This structure is passed to 'map_event' and 'renable' callbacks for - * reporting regulator status to core. - */ -struct regulator_irq_data { - struct regulator_err_state *states; - int num_states; - void *data; - long opaque; -}; - -/** - * struct regulator_irq_desc - notification sender for IRQ based events. - * - * @name: The visible name for the IRQ - * @fatal_cnt: If this IRQ is used to signal HW damaging condition it may be - * best to shut-down regulator(s) or reboot the SOC if error - * handling is repeatedly failing. If fatal_cnt is given the IRQ - * handling is aborted if it fails for fatal_cnt times and die() - * callback (if populated) or BUG() is called to try to prevent - * further damage. - * @reread_ms: The time which is waited before attempting to re-read status - * at the worker if IC reading fails. Immediate re-read is done - * if time is not specified. - * @irq_off_ms: The time which IRQ is kept disabled before re-evaluating the - * status for devices which keep IRQ disabled for duration of the - * error. If this is not given the IRQ is left enabled and renable - * is not called. - * @skip_off: If set to true the IRQ handler will attempt to check if any of - * the associated regulators are enabled prior to taking other - * actions. If no regulators are enabled and this is set to true - * a spurious IRQ is assumed and IRQ_NONE is returned. - * @high_prio: Boolean to indicate that high priority WQ should be used. - * @data: Driver private data pointer which will be passed as such to - * the renable, map_event and die callbacks in regulator_irq_data. - * @die: Protection callback. If IC status reading or recovery actions - * fail fatal_cnt times this callback or BUG() is called. This - * callback should implement a final protection attempt like - * disabling the regulator. If protection succeeded this may - * return 0. If anything else is returned the core assumes final - * protection failed and calls BUG() as a last resort. - * @map_event: Driver callback to map IRQ status into regulator devices with - * events / errors. NOTE: callback MUST initialize both the - * errors and notifs for all rdevs which it signals having - * active events as core does not clean the map data. - * REGULATOR_FAILED_RETRY can be returned to indicate that the - * status reading from IC failed. If this is repeated for - * fatal_cnt times the core will call die() callback or power-off - * the system as a last resort to protect the HW. - * @renable: Optional callback to check status (if HW supports that) before - * re-enabling IRQ. If implemented this should clear the error - * flags so that errors fetched by regulator_get_error_flags() - * are updated. If callback is not implemented then errors are - * assumed to be cleared and IRQ is re-enabled. - * REGULATOR_FAILED_RETRY can be returned to - * indicate that the status reading from IC failed. If this is - * repeated for 'fatal_cnt' times the core will call die() - * callback or if die() is not populated then attempt to power-off - * the system as a last resort to protect the HW. - * Returning zero indicates that the problem in HW has been solved - * and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON - * indicates the error condition is still active and keeps IRQ - * disabled. Please note that returning REGULATOR_ERROR_ON does - * not retrigger evaluating what events are active or resending - * notifications. If this is needed you probably want to return - * zero and allow IRQ to retrigger causing events to be - * re-evaluated and re-sent. - * - * This structure is used for registering regulator IRQ notification helper. - */ -struct regulator_irq_desc { - const char *name; - int irq_flags; - int fatal_cnt; - int reread_ms; - int irq_off_ms; - bool skip_off; - bool high_prio; - void *data; - - int (*die)(struct regulator_irq_data *rid); - int (*map_event)(int irq, struct regulator_irq_data *rid, - unsigned long *dev_mask); - int (*renable)(struct regulator_irq_data *rid); -}; - -/* - * Return values for regulator IRQ helpers. - */ -enum { - REGULATOR_ERROR_CLEARED, - REGULATOR_FAILED_RETRY, - REGULATOR_ERROR_ON, -}; - -/* - * struct coupling_desc - * - * Describes coupling of regulators. Each regulator should have - * at least a pointer to itself in coupled_rdevs array. - * When a new coupled regulator is resolved, n_resolved is - * incremented. - */ -struct coupling_desc { - struct regulator_dev **coupled_rdevs; - struct regulator_coupler *coupler; - int n_resolved; - int n_coupled; + bool ena_gpio_initialized; + int ena_gpio; + unsigned int ena_gpio_invert:1; + unsigned int ena_gpio_flags; }; /* @@ -613,12 +406,8 @@ struct regulator_dev { /* lists we own */ struct list_head consumer_list; /* consumers we supply */ - struct coupling_desc coupling_desc; - struct blocking_notifier_head notifier; - struct ww_mutex mutex; /* consumer lock */ - struct task_struct *mutex_owner; - int ref_cnt; + struct mutex mutex; /* consumer lock */ struct module *owner; struct device dev; struct regulation_constraints *constraints; @@ -627,6 +416,7 @@ struct regulator_dev { struct regmap *regmap; struct delayed_work disable_work; + int deferred_disables; void *reg_data; /* regulator_dev data */ @@ -635,13 +425,8 @@ struct regulator_dev { struct regulator_enable_gpio *ena_pin; unsigned int ena_gpio_state:1; - unsigned int is_switch:1; - /* time when this regulator was disabled last time */ - ktime_t last_off; - int cached_err; - bool use_cached_err; - spinlock_t err_lock; + unsigned long last_off_jiffy; }; struct regulator_dev * @@ -652,48 +437,31 @@ devm_regulator_register(struct device *dev, const struct regulator_desc *regulator_desc, const struct regulator_config *config); void regulator_unregister(struct regulator_dev *rdev); +void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev); int regulator_notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); -void *devm_regulator_irq_helper(struct device *dev, - const struct regulator_irq_desc *d, int irq, - int irq_flags, int common_errs, - int *per_rdev_errs, struct regulator_dev **rdev, - int rdev_amount); -void *regulator_irq_helper(struct device *dev, - const struct regulator_irq_desc *d, int irq, - int irq_flags, int common_errs, int *per_rdev_errs, - struct regulator_dev **rdev, int rdev_amount); -void regulator_irq_helper_cancel(void **handle); void *rdev_get_drvdata(struct regulator_dev *rdev); struct device *rdev_get_dev(struct regulator_dev *rdev); -struct regmap *rdev_get_regmap(struct regulator_dev *rdev); int rdev_get_id(struct regulator_dev *rdev); int regulator_mode_to_status(unsigned int); int regulator_list_voltage_linear(struct regulator_dev *rdev, unsigned int selector); -int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev, - unsigned int selector); int regulator_list_voltage_linear_range(struct regulator_dev *rdev, unsigned int selector); int regulator_list_voltage_table(struct regulator_dev *rdev, unsigned int selector); int regulator_map_voltage_linear(struct regulator_dev *rdev, int min_uV, int max_uV); -int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev, - int min_uV, int max_uV); int regulator_map_voltage_linear_range(struct regulator_dev *rdev, int min_uV, int max_uV); int regulator_map_voltage_iterate(struct regulator_dev *rdev, int min_uV, int max_uV); int regulator_map_voltage_ascend(struct regulator_dev *rdev, int min_uV, int max_uV); -int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev); -int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev, - unsigned int sel); int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev); int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel); int regulator_is_enabled_regmap(struct regulator_dev *rdev); @@ -704,35 +472,9 @@ int regulator_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int new_selector); int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable); int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable); -int regulator_set_soft_start_regmap(struct regulator_dev *rdev); -int regulator_set_pull_down_regmap(struct regulator_dev *rdev); int regulator_set_active_discharge_regmap(struct regulator_dev *rdev, bool enable); -int regulator_set_current_limit_regmap(struct regulator_dev *rdev, - int min_uA, int max_uA); -int regulator_get_current_limit_regmap(struct regulator_dev *rdev); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); -int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay); -int regulator_sync_voltage_rdev(struct regulator_dev *rdev); - -/* - * Helper functions intended to be used by regulator drivers prior registering - * their regulators. - */ -int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc, - unsigned int selector); - -int regulator_desc_list_voltage_linear(const struct regulator_desc *desc, - unsigned int selector); - -#ifdef CONFIG_REGULATOR -const char *rdev_get_name(struct regulator_dev *rdev); -#else -static inline const char *rdev_get_name(struct regulator_dev *rdev) -{ - return NULL; -} -#endif #endif diff --git a/include/linux/regulator/fan53555.h b/include/linux/regulator/fan53555.h index ce8df21863..f13880e84d 100644 --- a/include/linux/regulator/fan53555.h +++ b/include/linux/regulator/fan53555.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * fan53555.h - Fairchild Regulator FAN53555 Driver * * Copyright (C) 2012 Marvell Technology Ltd. * Yunfan Zhang + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __FAN53555_H__ diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h index 55319943fc..48918be649 100644 --- a/include/linux/regulator/fixed.h +++ b/include/linux/regulator/fixed.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * fixed.h * @@ -8,6 +7,11 @@ * * Copyright (c) 2009 Nokia Corporation * Roger Quadros + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. */ #ifndef __REGULATOR_FIXED_H @@ -20,7 +24,17 @@ struct regulator_init_data; * @supply_name: Name of the regulator supply * @input_supply: Name of the input regulator supply * @microvolts: Output voltage of regulator + * @gpio: GPIO to use for enable control + * set to -EINVAL if not used * @startup_delay: Start-up time in microseconds + * @gpio_is_open_drain: Gpio pin is open drain or normal type. + * If it is open drain type then HIGH will be set + * through PULL-UP with setting gpio as input + * and low will be set as gpio-output with driven + * to low. For non-open-drain case, the gpio will + * will be in output and drive to low/high accordingly. + * @enable_high: Polarity of enable GPIO + * 1 = Active high, 0 = Active low * @enabled_at_boot: Whether regulator has been enabled at * boot or not. 1 = Yes, 0 = No * This is used to keep the regulator at @@ -35,8 +49,10 @@ struct fixed_voltage_config { const char *supply_name; const char *input_supply; int microvolts; + int gpio; unsigned startup_delay; - unsigned int off_on_delay; + unsigned gpio_is_open_drain:1; + unsigned enable_high:1; unsigned enabled_at_boot:1; struct regulator_init_data *init_data; }; diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h index fdeb312cda..19fbd26740 100644 --- a/include/linux/regulator/gpio-regulator.h +++ b/include/linux/regulator/gpio-regulator.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * gpio-regulator.h * @@ -12,13 +11,16 @@ * * Copyright (c) 2009 Nokia Corporation * Roger Quadros + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. */ #ifndef __REGULATOR_GPIO_H #define __REGULATOR_GPIO_H -#include - struct regulator_init_data; enum regulator_type; @@ -42,14 +44,18 @@ struct gpio_regulator_state { /** * struct gpio_regulator_config - config structure * @supply_name: Name of the regulator supply + * @enable_gpio: GPIO to use for enable control + * set to -EINVAL if not used + * @enable_high: Polarity of enable GPIO + * 1 = Active high, 0 = Active low * @enabled_at_boot: Whether regulator has been enabled at * boot or not. 1 = Yes, 0 = No * This is used to keep the regulator at * the default state * @startup_delay: Start-up time in microseconds - * @gflags: Array of GPIO configuration flags for initial - * states - * @ngpios: Number of GPIOs and configurations available + * @gpios: Array containing the gpios needed to control + * the setting of the regulator + * @nr_gpios: Number of gpios * @states: Array of gpio_regulator_state entries describing * the gpio state for specific voltages * @nr_states: Number of states available @@ -63,11 +69,13 @@ struct gpio_regulator_state { struct gpio_regulator_config { const char *supply_name; + int enable_gpio; + unsigned enable_high:1; unsigned enabled_at_boot:1; unsigned startup_delay; - enum gpiod_flags *gflags; - int ngpios; + struct gpio *gpios; + int nr_gpios; struct gpio_regulator_state *states; int nr_states; diff --git a/include/linux/regulator/lp3971.h b/include/linux/regulator/lp3971.h index 0522e82d47..61401649fe 100644 --- a/include/linux/regulator/lp3971.h +++ b/include/linux/regulator/lp3971.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * National Semiconductors LP3971 PMIC chip client interface * @@ -6,6 +5,20 @@ * Author: Marek Szyprowski * * Based on wm8400.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_REGULATOR_LP3971_H diff --git a/include/linux/regulator/lp3972.h b/include/linux/regulator/lp3972.h index 160a3def31..9bb7389b7a 100644 --- a/include/linux/regulator/lp3972.h +++ b/include/linux/regulator/lp3972.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * National Semiconductors LP3972 PMIC chip client interface * * Based on lp3971.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_REGULATOR_LP3972_H diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h index d780dbb8b4..6029279f4e 100644 --- a/include/linux/regulator/lp872x.h +++ b/include/linux/regulator/lp872x.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2012 Texas Instruments * * Author: Milo(Woogyom) Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef __LP872X_REGULATOR_H__ diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 621b7f4a36..ad3e5158e5 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * machine.h -- SoC Regulator support, machine/board driver API. * @@ -6,6 +5,10 @@ * * Author: Liam Girdwood * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Regulator Machine/Board Interface. */ @@ -39,16 +42,6 @@ struct regulator; #define REGULATOR_CHANGE_DRMS 0x10 #define REGULATOR_CHANGE_BYPASS 0x20 -/* - * operations in suspend mode - * DO_NOTHING_IN_SUSPEND - the default value - * DISABLE_IN_SUSPEND - turn off regulator in suspend states - * ENABLE_IN_SUSPEND - keep regulator on in suspend states - */ -#define DO_NOTHING_IN_SUSPEND 0 -#define DISABLE_IN_SUSPEND 1 -#define ENABLE_IN_SUSPEND 2 - /* Regulator active discharge flags */ enum regulator_active_discharge { REGULATOR_ACTIVE_DISCHARGE_DEFAULT, @@ -63,32 +56,16 @@ enum regulator_active_discharge { * state. One of enabled or disabled must be set for the * configuration to be applied. * - * @uV: Default operating voltage during suspend, it can be adjusted - * among . - * @min_uV: Minimum suspend voltage may be set. - * @max_uV: Maximum suspend voltage may be set. + * @uV: Operating voltage during suspend. * @mode: Operating mode during suspend. - * @enabled: operations during suspend. - * - DO_NOTHING_IN_SUSPEND - * - DISABLE_IN_SUSPEND - * - ENABLE_IN_SUSPEND - * @changeable: Is this state can be switched between enabled/disabled, + * @enabled: Enabled during suspend. + * @disabled: Disabled during suspend. */ struct regulator_state { - int uV; - int min_uV; - int max_uV; - unsigned int mode; - int enabled; - bool changeable; -}; - -#define REGULATOR_NOTIF_LIMIT_DISABLE -1 -#define REGULATOR_NOTIF_LIMIT_ENABLE -2 -struct notification_limit { - int prot; - int err; - int warn; + int uV; /* suspend voltage */ + unsigned int mode; /* suspend regulator operating mode */ + int enabled; /* is regulator enabled in this suspend state */ + int disabled; /* is the regulator disbled in this suspend state */ }; /** @@ -108,13 +85,6 @@ struct notification_limit { * @ilim_uA: Maximum input current. * @system_load: Load that isn't captured by any consumer requests. * - * @over_curr_limits: Limits for acting on over current. - * @over_voltage_limits: Limits for acting on over voltage. - * @under_voltage_limits: Limits for acting on under voltage. - * @temp_limits: Limits for acting on over temperature. - * - * @max_spread: Max possible spread between coupled regulators - * @max_uV_step: Max possible step change in voltage * @valid_modes_mask: Mask of modes which may be configured by consumers. * @valid_ops_mask: Operations which may be performed by consumers. * @@ -129,11 +99,6 @@ struct notification_limit { * @pull_down: Enable pull down when regulator is disabled. * @over_current_protection: Auto disable on over current event. * - * @over_current_detection: Configure over current limits. - * @over_voltage_detection: Configure over voltage limits. - * @under_voltage_detection: Configure under voltage limits. - * @over_temp_detection: Configure over temperature limits. - * * @input_uV: Input voltage for regulator when supplied by another regulator. * * @state_disk: State for regulator when system is suspended in disk mode. @@ -143,12 +108,6 @@ struct notification_limit { * @initial_state: Suspend state to set by default. * @initial_mode: Mode to set at startup. * @ramp_delay: Time to settle down after voltage change (unit: uV/us) - * @settling_time: Time to settle down after voltage change when voltage - * change is non-linear (unit: microseconds). - * @settling_time_up: Time to settle down after voltage increase when voltage - * change is non-linear (unit: microseconds). - * @settling_time_down : Time to settle down after voltage decrease when - * voltage change is non-linear (unit: microseconds). * @active_discharge: Enable/disable active discharge. The enum * regulator_active_discharge values are used for * initialisation. @@ -171,12 +130,6 @@ struct regulation_constraints { int system_load; - /* used for coupled regulators */ - u32 *max_spread; - - /* used for changing voltage in steps */ - int max_uV_step; - /* valid regulator operating modes for this machine */ unsigned int valid_modes_mask; @@ -190,19 +143,12 @@ struct regulation_constraints { struct regulator_state state_disk; struct regulator_state state_mem; struct regulator_state state_standby; - struct notification_limit over_curr_limits; - struct notification_limit over_voltage_limits; - struct notification_limit under_voltage_limits; - struct notification_limit temp_limits; suspend_state_t initial_state; /* suspend state to set at init */ /* mode to set on startup */ unsigned int initial_mode; unsigned int ramp_delay; - unsigned int settling_time; - unsigned int settling_time_up; - unsigned int settling_time_down; unsigned int enable_time; unsigned int active_discharge; @@ -215,10 +161,6 @@ struct regulation_constraints { unsigned soft_start:1; /* ramp voltage slowly */ unsigned pull_down:1; /* pull down resistor when regulator off */ unsigned over_current_protection:1; /* auto disable on over current */ - unsigned over_current_detection:1; /* notify on over current */ - unsigned over_voltage_detection:1; /* notify on over voltage */ - unsigned under_voltage_detection:1; /* notify on under voltage */ - unsigned over_temp_detection:1; /* notify on over temperature */ }; /** @@ -274,12 +216,12 @@ struct regulator_init_data { #ifdef CONFIG_REGULATOR void regulator_has_full_constraints(void); +int regulator_suspend_prepare(suspend_state_t state); +int regulator_suspend_finish(void); #else static inline void regulator_has_full_constraints(void) { } -#endif - static inline int regulator_suspend_prepare(suspend_state_t state) { return 0; @@ -288,5 +230,6 @@ static inline int regulator_suspend_finish(void) { return 0; } +#endif #endif diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h index 969f4c9484..cedd0febe8 100644 --- a/include/linux/regulator/max1586.h +++ b/include/linux/regulator/max1586.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * max1586.h -- Voltage regulation for the Maxim 1586 * * Copyright (C) 2008 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef REGULATOR_MAX1586 diff --git a/include/linux/regulator/max8649.h b/include/linux/regulator/max8649.h index bc9b9c98c1..417d14ecd5 100644 --- a/include/linux/regulator/max8649.h +++ b/include/linux/regulator/max8649.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Interface of Maxim max8649 * * Copyright (C) 2009-2010 Marvell International Ltd. * Haojian Zhuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_REGULATOR_MAX8649_H diff --git a/include/linux/regulator/max8660.h b/include/linux/regulator/max8660.h index e1b9f9020e..f8a6a48448 100644 --- a/include/linux/regulator/max8660.h +++ b/include/linux/regulator/max8660.h @@ -1,8 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * max8660.h -- Voltage regulation for the Maxim 8660/8661 * * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_REGULATOR_MAX8660_H diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h index 8712c091ab..4dbb63a1d4 100644 --- a/include/linux/regulator/max8952.h +++ b/include/linux/regulator/max8952.h @@ -1,9 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * max8952.h - Voltage regulation for the Maxim 8952 * * Copyright (C) 2010 Samsung Electrnoics * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef REGULATOR_MAX8952 @@ -105,6 +118,10 @@ enum { #define MAX8952_NUM_DVS_MODE 4 struct max8952_platform_data { + int gpio_vid0; + int gpio_vid1; + int gpio_en; + u32 default_mode; u32 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */ diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h index 8313e7ed6a..2fcb998026 100644 --- a/include/linux/regulator/max8973-regulator.h +++ b/include/linux/regulator/max8973-regulator.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * max8973-regulator.h -- MAXIM 8973 regulator * @@ -8,6 +7,21 @@ * Copyright (C) 2012 NVIDIA Corporation * Author: Laxman Dewangan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * */ #ifndef __LINUX_REGULATOR_MAX8973_H diff --git a/include/linux/regulator/mt6311.h b/include/linux/regulator/mt6311.h index eb20c9d1ad..8473259395 100644 --- a/include/linux/regulator/mt6311.h +++ b/include/linux/regulator/mt6311.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015 MediaTek Inc. * Author: Henry Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __LINUX_REGULATOR_MT6311_H diff --git a/include/linux/regulator/mt6323-regulator.h b/include/linux/regulator/mt6323-regulator.h index c8103b8175..67011cd1ce 100644 --- a/include/linux/regulator/mt6323-regulator.h +++ b/include/linux/regulator/mt6323-regulator.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2016 MediaTek Inc. * Author: Chen Zhong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __LINUX_REGULATOR_MT6323_H diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h index 99b266711b..30cc5963e2 100644 --- a/include/linux/regulator/mt6397-regulator.h +++ b/include/linux/regulator/mt6397-regulator.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 MediaTek Inc. * Author: Flora Fu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __LINUX_REGULATOR_MT6397_H diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h index df7f154a2e..763953f7e3 100644 --- a/include/linux/regulator/of_regulator.h +++ b/include/linux/regulator/of_regulator.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * OpenFirmware regulator support routines * diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h index c964fe8ab6..70c6c66c5b 100644 --- a/include/linux/regulator/pfuze100.h +++ b/include/linux/regulator/pfuze100.h @@ -1,6 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef __LINUX_REG_PFUZE100_H #define __LINUX_REG_PFUZE100_H @@ -20,8 +33,7 @@ #define PFUZE100_VGEN4 12 #define PFUZE100_VGEN5 13 #define PFUZE100_VGEN6 14 -#define PFUZE100_COIN 15 -#define PFUZE100_MAX_REGULATOR 16 +#define PFUZE100_MAX_REGULATOR 15 #define PFUZE200_SW1AB 0 #define PFUZE200_SW2 1 @@ -36,7 +48,6 @@ #define PFUZE200_VGEN4 10 #define PFUZE200_VGEN5 11 #define PFUZE200_VGEN6 12 -#define PFUZE200_COIN 13 #define PFUZE3000_SW1A 0 #define PFUZE3000_SW1B 1 @@ -52,15 +63,10 @@ #define PFUZE3000_VLDO3 11 #define PFUZE3000_VLDO4 12 -#define PFUZE3001_SW1 0 -#define PFUZE3001_SW2 1 -#define PFUZE3001_SW3 2 -#define PFUZE3001_VSNVS 3 -#define PFUZE3001_VLDO1 4 -#define PFUZE3001_VLDO2 5 -#define PFUZE3001_VCCSD 6 -#define PFUZE3001_V33 7 -#define PFUZE3001_VLDO3 8 -#define PFUZE3001_VLDO4 9 +struct regulator_init_data; + +struct pfuze_regulator_platform_data { + struct regulator_init_data *init_data[PFUZE100_MAX_REGULATOR]; +}; #endif /* __LINUX_REG_PFUZE100_H */ diff --git a/include/linux/regulator/tps51632-regulator.h b/include/linux/regulator/tps51632-regulator.h index 1413d77c2f..d00841e1a7 100644 --- a/include/linux/regulator/tps51632-regulator.h +++ b/include/linux/regulator/tps51632-regulator.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * tps51632-regulator.h -- TPS51632 regulator * @@ -8,6 +7,21 @@ * Copyright (C) 2012 NVIDIA Corporation * Author: Laxman Dewangan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * */ #ifndef __LINUX_REGULATOR_TPS51632_H diff --git a/include/linux/regulator/tps62360.h b/include/linux/regulator/tps62360.h index 94a90c06f1..a4c49394c4 100644 --- a/include/linux/regulator/tps62360.h +++ b/include/linux/regulator/tps62360.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * tps62360.h -- TI tps62360 * @@ -7,6 +6,21 @@ * Copyright (C) 2012 NVIDIA Corporation * Author: Laxman Dewangan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * */ #ifndef __LINUX_REGULATOR_TPS62360_H diff --git a/include/linux/regulator/tps6507x.h b/include/linux/regulator/tps6507x.h index 58117575de..4892f591ba 100644 --- a/include/linux/regulator/tps6507x.h +++ b/include/linux/regulator/tps6507x.h @@ -1,8 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * tps6507x.h -- Voltage regulation for the Texas Instruments TPS6507X * * Copyright (C) 2010 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef REGULATOR_TPS6507X diff --git a/include/linux/regulator/userspace-consumer.h b/include/linux/regulator/userspace-consumer.h index b5dba06289..b4554ce9d4 100644 --- a/include/linux/regulator/userspace-consumer.h +++ b/include/linux/regulator/userspace-consumer.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __REGULATOR_PLATFORM_CONSUMER_H_ #define __REGULATOR_PLATFORM_CONSUMER_H_ diff --git a/include/linux/relay.h b/include/linux/relay.h index 72b876dd5c..3ef7baffe3 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/relay.h * @@ -62,11 +61,11 @@ struct rchan size_t subbuf_size; /* sub-buffer size */ size_t n_subbufs; /* number of sub-buffers per buffer */ size_t alloc_size; /* total buffer size allocated */ - const struct rchan_callbacks *cb; /* client callbacks */ + struct rchan_callbacks *cb; /* client callbacks */ struct kref kref; /* channel refcount */ void *private_data; /* for user-defined data */ size_t last_toobig; /* tried to log event > subbuf size */ - struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */ + struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */ int is_global; /* One global buffer ? */ struct list_head list; /* for channel list */ struct dentry *parent; /* parent dentry passed to open */ @@ -89,8 +88,6 @@ struct rchan_callbacks * The client should return 1 to continue logging, 0 to stop * logging. * - * This callback is optional. - * * NOTE: subbuf_start will also be invoked when the buffer is * created, so that the first sub-buffer can be initialized * if necessary. In this case, prev_subbuf will be NULL. @@ -103,6 +100,25 @@ struct rchan_callbacks void *prev_subbuf, size_t prev_padding); + /* + * buf_mapped - relay buffer mmap notification + * @buf: the channel buffer + * @filp: relay file pointer + * + * Called when a relay file is successfully mmapped + */ + void (*buf_mapped)(struct rchan_buf *buf, + struct file *filp); + + /* + * buf_unmapped - relay buffer unmap notification + * @buf: the channel buffer + * @filp: relay file pointer + * + * Called when a relay file is successfully unmapped + */ + void (*buf_unmapped)(struct rchan_buf *buf, + struct file *filp); /* * create_buf_file - create file to represent a relay channel buffer * @filename: the name of the file to create @@ -124,9 +140,7 @@ struct rchan_callbacks * cause relay_open() to create a single global buffer rather * than the default set of per-cpu buffers. * - * This callback is mandatory. - * - * See Documentation/filesystems/relay.rst for more info. + * See Documentation/filesystems/relay.txt for more info. */ struct dentry *(*create_buf_file)(const char *filename, struct dentry *parent, @@ -143,11 +157,9 @@ struct rchan_callbacks * channel buffer. * * The callback should return 0 if successful, negative if not. - * - * This callback is mandatory. */ int (*remove_buf_file)(struct dentry *dentry); -}; +} __no_const; /* * CONFIG_RELAY kernel API, kernel/relay.c @@ -157,7 +169,7 @@ struct rchan *relay_open(const char *base_filename, struct dentry *parent, size_t subbuf_size, size_t n_subbufs, - const struct rchan_callbacks *cb, + struct rchan_callbacks *cb, void *private_data); extern int relay_late_setup_files(struct rchan *chan, const char *base_filename, diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 83c09ac36b..930023b7c8 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include @@ -74,7 +73,7 @@ struct resource_table { u32 ver; u32 num; u32 reserved[2]; - u32 offset[]; + u32 offset[0]; } __packed; /** @@ -88,7 +87,7 @@ struct resource_table { */ struct fw_rsc_hdr { u32 type; - u8 data[]; + u8 data[0]; } __packed; /** @@ -101,9 +100,7 @@ struct fw_rsc_hdr { * the remote processor will be writing logs. * @RSC_VDEV: declare support for a virtio device, and serve as its * virtio header. - * @RSC_LAST: just keep this one at the end of standard resources - * @RSC_VENDOR_START: start of the vendor specific resource types range - * @RSC_VENDOR_END: end of the vendor specific resource types range + * @RSC_LAST: just keep this one at the end * * For more details regarding a specific resource type, please see its * dedicated structure below. @@ -114,13 +111,11 @@ struct fw_rsc_hdr { * please update it as needed. */ enum fw_resource_type { - RSC_CARVEOUT = 0, - RSC_DEVMEM = 1, - RSC_TRACE = 2, - RSC_VDEV = 3, - RSC_LAST = 4, - RSC_VENDOR_START = 128, - RSC_VENDOR_END = 512, + RSC_CARVEOUT = 0, + RSC_DEVMEM = 1, + RSC_TRACE = 2, + RSC_VDEV = 3, + RSC_LAST = 4, }; #define FW_RSC_ADDR_ANY (-1) @@ -243,7 +238,7 @@ struct fw_rsc_trace { * @da: device address * @align: the alignment between the consumer and producer parts of the vring * @num: num of buffers supported by this vring (must be power of two) - * @notifyid: a unique rproc-wide notify index for this vring. This notify + * @notifyid is a unique rproc-wide notify index for this vring. This notify * index is used when kicking a remote processor, to let it know that this * vring is triggered. * @pa: physical address @@ -266,18 +261,18 @@ struct fw_rsc_vdev_vring { /** * struct fw_rsc_vdev - virtio device header * @id: virtio device id (as in virtio_ids.h) - * @notifyid: a unique rproc-wide notify index for this vdev. This notify + * @notifyid is a unique rproc-wide notify index for this vdev. This notify * index is used when kicking a remote processor, to let it know that the * status/features of this vdev have changes. - * @dfeatures: specifies the virtio device features supported by the firmware - * @gfeatures: a place holder used by the host to write back the + * @dfeatures specifies the virtio device features supported by the firmware + * @gfeatures is a place holder used by the host to write back the * negotiated features that are supported by both sides. - * @config_len: the size of the virtio config space of this vdev. The config + * @config_len is the size of the virtio config space of this vdev. The config * space lies in the resource table immediate after this vdev header. - * @status: a place holder where the host will indicate its virtio progress. - * @num_of_vrings: indicates how many vrings are described in this vdev header + * @status is a place holder where the host will indicate its virtio progress. + * @num_of_vrings indicates how many vrings are described in this vdev header * @reserved: reserved (must be zero) - * @vring: an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'. + * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'. * * This resource is a virtio device header: it provides information about * the vdev, and is then used by the host and its peer remote processors @@ -287,17 +282,16 @@ struct fw_rsc_vdev_vring { * to statically allocate a vdev upon registration of the rproc (dynamic vdev * allocation is not yet supported). * - * Note: - * 1. unlike virtualization systems, the term 'host' here means - * the Linux side which is running remoteproc to control the remote - * processors. We use the name 'gfeatures' to comply with virtio's terms, - * though there isn't really any virtualized guest OS here: it's the host - * which is responsible for negotiating the final features. - * Yeah, it's a bit confusing. + * Note: unlike virtualization systems, the term 'host' here means + * the Linux side which is running remoteproc to control the remote + * processors. We use the name 'gfeatures' to comply with virtio's terms, + * though there isn't really any virtualized guest OS here: it's the host + * which is responsible for negotiating the final features. + * Yeah, it's a bit confusing. * - * 2. immediately following this structure is the virtio config space for - * this vdev (which is specific to the vdev; for more info, read the virtio - * spec). The size of the config space is specified by @config_len. + * Note: immediately following this structure is the virtio config space for + * this vdev (which is specific to the vdev; for more info, read the virtio + * spec). the size of the config space is specified by @config_len. */ struct fw_rsc_vdev { u32 id; @@ -308,101 +302,41 @@ struct fw_rsc_vdev { u8 status; u8 num_of_vrings; u8 reserved[2]; - struct fw_rsc_vdev_vring vring[]; + struct fw_rsc_vdev_vring vring[0]; } __packed; -struct rproc; - /** * struct rproc_mem_entry - memory entry descriptor * @va: virtual address - * @is_iomem: io memory * @dma: dma address * @len: length, in bytes * @da: device address - * @release: release associated memory * @priv: associated data - * @name: associated memory region name (optional) * @node: list node - * @rsc_offset: offset in resource table - * @flags: iommu protection flags - * @of_resm_idx: reserved memory phandle index - * @alloc: specific memory allocator function */ struct rproc_mem_entry { void *va; - bool is_iomem; dma_addr_t dma; - size_t len; + int len; u32 da; void *priv; - char name[32]; struct list_head node; - u32 rsc_offset; - u32 flags; - u32 of_resm_idx; - int (*alloc)(struct rproc *rproc, struct rproc_mem_entry *mem); - int (*release)(struct rproc *rproc, struct rproc_mem_entry *mem); }; -struct firmware; - -/** - * enum rsc_handling_status - return status of rproc_ops handle_rsc hook - * @RSC_HANDLED: resource was handled - * @RSC_IGNORED: resource was ignored - */ -enum rsc_handling_status { - RSC_HANDLED = 0, - RSC_IGNORED = 1, -}; +struct rproc; /** * struct rproc_ops - platform-specific device handlers - * @prepare: prepare device for code loading - * @unprepare: unprepare device after stop * @start: power on the device and boot it * @stop: power off the device - * @attach: attach to a device that his already powered up - * @detach: detach from a device, leaving it powered up * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations - * @parse_fw: parse firmware to extract information (e.g. resource table) - * @handle_rsc: optional platform hook to handle vendor resources. Should return - * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled - * and a negative value on error - * @find_loaded_rsc_table: find the loaded resource table from firmware image - * @get_loaded_rsc_table: get resource table installed in memory - * by external entity - * @load: load firmware to memory, where the remote processor - * expects to find it - * @sanity_check: sanity check the fw image - * @get_boot_addr: get boot address to entry point specified in firmware - * @panic: optional callback to react to system panic, core will delay - * panic at least the returned number of milliseconds - * @coredump: collect firmware dump after the subsystem is shutdown */ struct rproc_ops { - int (*prepare)(struct rproc *rproc); - int (*unprepare)(struct rproc *rproc); int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); - int (*attach)(struct rproc *rproc); - int (*detach)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); - void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len, bool *is_iomem); - int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); - int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc, - int offset, int avail); - struct resource_table *(*find_loaded_rsc_table)( - struct rproc *rproc, const struct firmware *fw); - struct resource_table *(*get_loaded_rsc_table)( - struct rproc *rproc, size_t *size); - int (*load)(struct rproc *rproc, const struct firmware *fw); - int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); - u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); - unsigned long (*panic)(struct rproc *rproc); - void (*coredump)(struct rproc *rproc); + void * (*da_to_va)(struct rproc *rproc, u64 da, int len); }; /** @@ -412,11 +346,6 @@ struct rproc_ops { * a message. * @RPROC_RUNNING: device is up and running * @RPROC_CRASHED: device has crashed; need to start recovery - * @RPROC_DELETED: device is deleted - * @RPROC_ATTACHED: device has been booted by another entity and the core - * has attached to it - * @RPROC_DETACHED: device has been booted by another entity and waiting - * for the core to attach to it * @RPROC_LAST: just keep this one at the end * * Please note that the values of these states are used as indices @@ -430,17 +359,14 @@ enum rproc_state { RPROC_SUSPENDED = 1, RPROC_RUNNING = 2, RPROC_CRASHED = 3, - RPROC_DELETED = 4, - RPROC_ATTACHED = 5, - RPROC_DETACHED = 6, - RPROC_LAST = 7, + RPROC_LAST = 4, }; /** * enum rproc_crash_type - remote processor crash types * @RPROC_MMUFAULT: iommu fault * @RPROC_WATCHDOG: watchdog bite - * @RPROC_FATAL_ERROR: fatal error + * @RPROC_FATAL_ERROR fatal error * * Each element of the enum is used as an array index. So that, the value of * the elements should be always something sane. @@ -453,42 +379,6 @@ enum rproc_crash_type { RPROC_FATAL_ERROR, }; -/** - * enum rproc_dump_mechanism - Coredump options for core - * @RPROC_COREDUMP_DISABLED: Don't perform any dump - * @RPROC_COREDUMP_ENABLED: Copy dump to separate buffer and carry on with - * recovery - * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall - * recovery until all segments are read - */ -enum rproc_dump_mechanism { - RPROC_COREDUMP_DISABLED, - RPROC_COREDUMP_ENABLED, - RPROC_COREDUMP_INLINE, -}; - -/** - * struct rproc_dump_segment - segment info from ELF header - * @node: list node related to the rproc segment list - * @da: device address of the segment - * @size: size of the segment - * @priv: private data associated with the dump_segment - * @dump: custom dump function to fill device memory segment associated - * with coredump - * @offset: offset of the segment - */ -struct rproc_dump_segment { - struct list_head node; - - dma_addr_t da; - size_t size; - - void *priv; - void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest, size_t offset, size_t size); - loff_t offset; -}; - /** * struct rproc - represents a physical remote processor device * @node: list node of this rproc object @@ -498,37 +388,28 @@ struct rproc_dump_segment { * @priv: private data which belongs to the platform-specific rproc module * @ops: platform-specific start/stop rproc handlers * @dev: virtual device for refcounting and common remoteproc behavior + * @fw_ops: firmware-specific handlers * @power: refcount of users who need this rproc powered up * @state: state of the device - * @dump_conf: Currently selected coredump configuration * @lock: lock which protects concurrent manipulations of the rproc * @dbg_dir: debugfs directory of this rproc device * @traces: list of trace buffers * @num_traces: number of trace buffers * @carveouts: list of physically contiguous memory allocations * @mappings: list of iommu mappings we initiated, needed on shutdown + * @firmware_loading_complete: marks e/o asynchronous firmware loading * @bootaddr: address of first instruction to boot rproc with (optional) * @rvdevs: list of remote virtio devices - * @subdevs: list of subdevices, to following the running state * @notifyids: idr for dynamically assigning rproc-wide unique notify ids * @index: index of this rproc device * @crash_handler: workqueue for handling a crash * @crash_cnt: crash counter + * @crash_comp: completion used to sync crash handler and the rproc reload * @recovery_disabled: flag that state if recovery was disabled * @max_notifyid: largest allocated notify id. * @table_ptr: pointer to the resource table in effect - * @clean_table: copy of the resource table without modifications. Used - * when a remote processor is attached or detached from the core * @cached_table: copy of the resource table - * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU - * @auto_boot: flag to indicate if remote processor should be auto-started - * @dump_segments: list of segments in the firmware - * @nb_vdev: number of vdev currently handled by rproc - * @elf_class: firmware ELF class - * @elf_machine: firmware ELF machine - * @cdev: character device of the rproc - * @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release */ struct rproc { struct list_head node; @@ -536,56 +417,31 @@ struct rproc { const char *name; const char *firmware; void *priv; - struct rproc_ops *ops; + const struct rproc_ops *ops; struct device dev; + const struct rproc_fw_ops *fw_ops; atomic_t power; unsigned int state; - enum rproc_dump_mechanism dump_conf; struct mutex lock; struct dentry *dbg_dir; struct list_head traces; int num_traces; struct list_head carveouts; struct list_head mappings; - u64 bootaddr; + struct completion firmware_loading_complete; + u32 bootaddr; struct list_head rvdevs; - struct list_head subdevs; struct idr notifyids; int index; struct work_struct crash_handler; unsigned int crash_cnt; + struct completion crash_comp; bool recovery_disabled; int max_notifyid; struct resource_table *table_ptr; - struct resource_table *clean_table; struct resource_table *cached_table; - size_t table_sz; bool has_iommu; bool auto_boot; - struct list_head dump_segments; - int nb_vdev; - u8 elf_class; - u16 elf_machine; - struct cdev cdev; - bool cdev_put_on_release; -}; - -/** - * struct rproc_subdev - subdevice tied to a remoteproc - * @node: list node related to the rproc subdevs list - * @prepare: prepare function, called before the rproc is started - * @start: start function, called after the rproc has been started - * @stop: stop function, called before the rproc is stopped; the @crashed - * parameter indicates if this originates from a recovery - * @unprepare: unprepare function, called after the rproc has been stopped - */ -struct rproc_subdev { - struct list_head node; - - int (*prepare)(struct rproc_subdev *subdev); - int (*start)(struct rproc_subdev *subdev); - void (*stop)(struct rproc_subdev *subdev, bool crashed); - void (*unprepare)(struct rproc_subdev *subdev); }; /* we currently support only two vrings per rvdev */ @@ -595,6 +451,7 @@ struct rproc_subdev { /** * struct rproc_vring - remoteproc vring state * @va: virtual address + * @dma: dma address * @len: length, in bytes * @da: device address * @align: vring alignment @@ -604,6 +461,7 @@ struct rproc_subdev { */ struct rproc_vring { void *va; + dma_addr_t dma; int len; u32 da; u32 align; @@ -614,33 +472,21 @@ struct rproc_vring { /** * struct rproc_vdev - remoteproc state for a supported virtio device - * @refcount: reference counter for the vdev and vring allocations - * @subdev: handle for registering the vdev as a rproc subdevice - * @dev: device struct used for reference count semantics - * @id: virtio device id (as in virtio_ids.h) * @node: list node * @rproc: the rproc handle + * @vdev: the virio device * @vring: the vrings for this vdev * @rsc_offset: offset of the vdev's resource entry - * @index: vdev position versus other vdev declared in resource table */ struct rproc_vdev { - struct kref refcount; - - struct rproc_subdev subdev; - struct device dev; - - unsigned int id; struct list_head node; struct rproc *rproc; + struct virtio_device vdev; struct rproc_vring vring[RVDEV_NUM_VRINGS]; u32 rsc_offset; - u32 index; }; struct rproc *rproc_get_by_phandle(phandle phandle); -struct rproc *rproc_get_by_child(struct device *dev); - struct rproc *rproc_alloc(struct device *dev, const char *name, const struct rproc_ops *ops, const char *firmware, int len); @@ -648,45 +494,14 @@ void rproc_put(struct rproc *rproc); int rproc_add(struct rproc *rproc); int rproc_del(struct rproc *rproc); void rproc_free(struct rproc *rproc); -void rproc_resource_cleanup(struct rproc *rproc); - -struct rproc *devm_rproc_alloc(struct device *dev, const char *name, - const struct rproc_ops *ops, - const char *firmware, int len); -int devm_rproc_add(struct device *dev, struct rproc *rproc); - -void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem); - -struct rproc_mem_entry * -rproc_mem_entry_init(struct device *dev, - void *va, dma_addr_t dma, size_t len, u32 da, - int (*alloc)(struct rproc *, struct rproc_mem_entry *), - int (*release)(struct rproc *, struct rproc_mem_entry *), - const char *name, ...); - -struct rproc_mem_entry * -rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, - u32 da, const char *name, ...); int rproc_boot(struct rproc *rproc); void rproc_shutdown(struct rproc *rproc); -int rproc_detach(struct rproc *rproc); -int rproc_set_firmware(struct rproc *rproc, const char *fw_name); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); -void rproc_coredump_using_sections(struct rproc *rproc); -int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size); -int rproc_coredump_add_custom_segment(struct rproc *rproc, - dma_addr_t da, size_t size, - void (*dumpfn)(struct rproc *rproc, - struct rproc_dump_segment *segment, - void *dest, size_t offset, - size_t size), - void *priv); -int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine); static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) { - return container_of(vdev->dev.parent, struct rproc_vdev, dev); + return container_of(vdev, struct rproc_vdev, vdev); } static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) @@ -696,8 +511,4 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) return rvdev->rproc; } -void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev); - -void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev); - #endif /* REMOTEPROC_H */ diff --git a/include/linux/reservation.h b/include/linux/reservation.h new file mode 100644 index 0000000000..b0f305e77b --- /dev/null +++ b/include/linux/reservation.h @@ -0,0 +1,213 @@ +/* + * Header file for reservations for dma-buf and ttm + * + * Copyright(C) 2011 Linaro Limited. All rights reserved. + * Copyright (C) 2012-2013 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark + * Maarten Lankhorst + * Thomas Hellstrom + * + * Based on bo.c which bears the following copyright notice, + * but is dual licensed: + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _LINUX_RESERVATION_H +#define _LINUX_RESERVATION_H + +#include +#include +#include +#include +#include + +extern struct ww_class reservation_ww_class; +extern struct lock_class_key reservation_seqcount_class; +extern const char reservation_seqcount_string[]; + +/** + * struct reservation_object_list - a list of shared fences + * @rcu: for internal use + * @shared_count: table of shared fences + * @shared_max: for growing shared fence table + * @shared: shared fence table + */ +struct reservation_object_list { + struct rcu_head rcu; + u32 shared_count, shared_max; + struct fence __rcu *shared[]; +}; + +/** + * struct reservation_object - a reservation object manages fences for a buffer + * @lock: update side lock + * @seq: sequence count for managing RCU read-side synchronization + * @fence_excl: the exclusive fence, if there is one currently + * @fence: list of current shared fences + * @staged: staged copy of shared fences for RCU updates + */ +struct reservation_object { + struct ww_mutex lock; + seqcount_t seq; + + struct fence __rcu *fence_excl; + struct reservation_object_list __rcu *fence; + struct reservation_object_list *staged; +}; + +#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) +#define reservation_object_assert_held(obj) \ + lockdep_assert_held(&(obj)->lock.base) + +/** + * reservation_object_init - initialize a reservation object + * @obj: the reservation object + */ +static inline void +reservation_object_init(struct reservation_object *obj) +{ + ww_mutex_init(&obj->lock, &reservation_ww_class); + + __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); + RCU_INIT_POINTER(obj->fence, NULL); + RCU_INIT_POINTER(obj->fence_excl, NULL); + obj->staged = NULL; +} + +/** + * reservation_object_fini - destroys a reservation object + * @obj: the reservation object + */ +static inline void +reservation_object_fini(struct reservation_object *obj) +{ + int i; + struct reservation_object_list *fobj; + struct fence *excl; + + /* + * This object should be dead and all references must have + * been released to it, so no need to be protected with rcu. + */ + excl = rcu_dereference_protected(obj->fence_excl, 1); + if (excl) + fence_put(excl); + + fobj = rcu_dereference_protected(obj->fence, 1); + if (fobj) { + for (i = 0; i < fobj->shared_count; ++i) + fence_put(rcu_dereference_protected(fobj->shared[i], 1)); + + kfree(fobj); + } + kfree(obj->staged); + + ww_mutex_destroy(&obj->lock); +} + +/** + * reservation_object_get_list - get the reservation object's + * shared fence list, with update-side lock held + * @obj: the reservation object + * + * Returns the shared fence list. Does NOT take references to + * the fence. The obj->lock must be held. + */ +static inline struct reservation_object_list * +reservation_object_get_list(struct reservation_object *obj) +{ + return rcu_dereference_protected(obj->fence, + reservation_object_held(obj)); +} + +/** + * reservation_object_get_excl - get the reservation object's + * exclusive fence, with update-side lock held + * @obj: the reservation object + * + * Returns the exclusive fence (if any). Does NOT take a + * reference. The obj->lock must be held. + * + * RETURNS + * The exclusive fence or NULL + */ +static inline struct fence * +reservation_object_get_excl(struct reservation_object *obj) +{ + return rcu_dereference_protected(obj->fence_excl, + reservation_object_held(obj)); +} + +/** + * reservation_object_get_excl_rcu - get the reservation object's + * exclusive fence, without lock held. + * @obj: the reservation object + * + * If there is an exclusive fence, this atomically increments it's + * reference count and returns it. + * + * RETURNS + * The exclusive fence or NULL if none + */ +static inline struct fence * +reservation_object_get_excl_rcu(struct reservation_object *obj) +{ + struct fence *fence; + unsigned seq; +retry: + seq = read_seqcount_begin(&obj->seq); + rcu_read_lock(); + fence = rcu_dereference(obj->fence_excl); + if (read_seqcount_retry(&obj->seq, seq)) { + rcu_read_unlock(); + goto retry; + } + fence = fence_get(fence); + rcu_read_unlock(); + return fence; +} + +int reservation_object_reserve_shared(struct reservation_object *obj); +void reservation_object_add_shared_fence(struct reservation_object *obj, + struct fence *fence); + +void reservation_object_add_excl_fence(struct reservation_object *obj, + struct fence *fence); + +int reservation_object_get_fences_rcu(struct reservation_object *obj, + struct fence **pfence_excl, + unsigned *pshared_count, + struct fence ***pshared); + +long reservation_object_wait_timeout_rcu(struct reservation_object *obj, + bool wait_all, bool intr, + unsigned long timeout); + +bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + bool test_all); + +#endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index 0fa4f60e11..db1fe6772a 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RESET_CONTROLLER_H_ #define _LINUX_RESET_CONTROLLER_H_ @@ -7,7 +6,7 @@ struct reset_controller_dev; /** - * struct reset_control_ops - reset controller driver callbacks + * struct reset_control_ops * * @reset: for self-deasserting resets, does all necessary * things to reset the device @@ -26,31 +25,6 @@ struct module; struct device_node; struct of_phandle_args; -/** - * struct reset_control_lookup - represents a single lookup entry - * - * @list: internal list of all reset lookup entries - * @provider: name of the reset controller device controlling this reset line - * @index: ID of the reset controller in the reset controller device - * @dev_id: name of the device associated with this reset line - * @con_id: name of the reset line (can be NULL) - */ -struct reset_control_lookup { - struct list_head list; - const char *provider; - unsigned int index; - const char *dev_id; - const char *con_id; -}; - -#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \ - { \ - .provider = _provider, \ - .index = _index, \ - .dev_id = _dev_id, \ - .con_id = _con_id, \ - } - /** * struct reset_controller_dev - reset controller entity that might * provide multiple reset controls @@ -58,12 +32,10 @@ struct reset_control_lookup { * @owner: kernel module of the reset controller driver * @list: internal list of reset controller devices * @reset_control_head: head of internal list of requested reset controls - * @dev: corresponding driver model device struct * @of_node: corresponding device tree node as phandle target * @of_reset_n_cells: number of cells in reset line specifiers * @of_xlate: translation function to translate from specifier as found in the - * device tree to id as given to the reset control ops, defaults - * to :c:func:`of_reset_simple_xlate`. + * device tree to id as given to the reset control ops * @nr_resets: number of reset controls in this reset controller device */ struct reset_controller_dev { @@ -71,7 +43,6 @@ struct reset_controller_dev { struct module *owner; struct list_head list; struct list_head reset_control_head; - struct device *dev; struct device_node *of_node; int of_reset_n_cells; int (*of_xlate)(struct reset_controller_dev *rcdev, @@ -79,7 +50,6 @@ struct reset_controller_dev { unsigned int nr_resets; }; -#if IS_ENABLED(CONFIG_RESET_CONTROLLER) int reset_controller_register(struct reset_controller_dev *rcdev); void reset_controller_unregister(struct reset_controller_dev *rcdev); @@ -87,28 +57,4 @@ struct device; int devm_reset_controller_register(struct device *dev, struct reset_controller_dev *rcdev); -void reset_controller_add_lookup(struct reset_control_lookup *lookup, - unsigned int num_entries); -#else -static inline int reset_controller_register(struct reset_controller_dev *rcdev) -{ - return 0; -} - -static inline void reset_controller_unregister(struct reset_controller_dev *rcdev) -{ -} - -static inline int devm_reset_controller_register(struct device *dev, - struct reset_controller_dev *rcdev) -{ - return 0; -} - -static inline void reset_controller_add_lookup(struct reset_control_lookup *lookup, - unsigned int num_entries) -{ -} -#endif - #endif diff --git a/include/linux/reset.h b/include/linux/reset.h index db0e6115a2..5daff15722 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -1,223 +1,88 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_ -#include -#include -#include +#include -struct device; -struct device_node; struct reset_control; -/** - * struct reset_control_bulk_data - Data used for bulk reset control operations. - * - * @id: reset control consumer ID - * @rstc: struct reset_control * to store the associated reset control - * - * The reset APIs provide a series of reset_control_bulk_*() API calls as - * a convenience to consumers which require multiple reset controls. - * This structure is used to manage data for these calls. - */ -struct reset_control_bulk_data { - const char *id; - struct reset_control *rstc; -}; - #ifdef CONFIG_RESET_CONTROLLER int reset_control_reset(struct reset_control *rstc); -int reset_control_rearm(struct reset_control *rstc); int reset_control_assert(struct reset_control *rstc); int reset_control_deassert(struct reset_control *rstc); int reset_control_status(struct reset_control *rstc); -int reset_control_acquire(struct reset_control *rstc); -void reset_control_release(struct reset_control *rstc); - -int reset_control_bulk_reset(int num_rstcs, struct reset_control_bulk_data *rstcs); -int reset_control_bulk_assert(int num_rstcs, struct reset_control_bulk_data *rstcs); -int reset_control_bulk_deassert(int num_rstcs, struct reset_control_bulk_data *rstcs); -int reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rstcs); -void reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs); struct reset_control *__of_reset_control_get(struct device_node *node, - const char *id, int index, bool shared, - bool optional, bool acquired); -struct reset_control *__reset_control_get(struct device *dev, const char *id, - int index, bool shared, - bool optional, bool acquired); + const char *id, int index, int shared); void reset_control_put(struct reset_control *rstc); -int __reset_control_bulk_get(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired); -void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs); - -int __device_reset(struct device *dev, bool optional); struct reset_control *__devm_reset_control_get(struct device *dev, - const char *id, int index, bool shared, - bool optional, bool acquired); -int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired); + const char *id, int index, int shared); -struct reset_control *devm_reset_control_array_get(struct device *dev, - bool shared, bool optional); -struct reset_control *of_reset_control_array_get(struct device_node *np, - bool shared, bool optional, - bool acquired); +int __must_check device_reset(struct device *dev); -int reset_control_get_count(struct device *dev); +static inline int device_reset_optional(struct device *dev) +{ + return device_reset(dev); +} #else static inline int reset_control_reset(struct reset_control *rstc) { - return 0; -} - -static inline int reset_control_rearm(struct reset_control *rstc) -{ + WARN_ON(1); return 0; } static inline int reset_control_assert(struct reset_control *rstc) { + WARN_ON(1); return 0; } static inline int reset_control_deassert(struct reset_control *rstc) { + WARN_ON(1); return 0; } static inline int reset_control_status(struct reset_control *rstc) { + WARN_ON(1); return 0; } -static inline int reset_control_acquire(struct reset_control *rstc) -{ - return 0; -} - -static inline void reset_control_release(struct reset_control *rstc) -{ -} - static inline void reset_control_put(struct reset_control *rstc) { + WARN_ON(1); } -static inline int __device_reset(struct device *dev, bool optional) -{ - return optional ? 0 : -ENOTSUPP; -} - -static inline struct reset_control *__of_reset_control_get( - struct device_node *node, - const char *id, int index, bool shared, - bool optional, bool acquired) -{ - return optional ? NULL : ERR_PTR(-ENOTSUPP); -} - -static inline struct reset_control *__reset_control_get( - struct device *dev, const char *id, - int index, bool shared, bool optional, - bool acquired) -{ - return optional ? NULL : ERR_PTR(-ENOTSUPP); -} - -static inline int -reset_control_bulk_reset(int num_rstcs, struct reset_control_bulk_data *rstcs) -{ - return 0; -} - -static inline int -reset_control_bulk_assert(int num_rstcs, struct reset_control_bulk_data *rstcs) -{ - return 0; -} - -static inline int -reset_control_bulk_deassert(int num_rstcs, struct reset_control_bulk_data *rstcs) -{ - return 0; -} - -static inline int -reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rstcs) -{ - return 0; -} - -static inline void -reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs) -{ -} - -static inline int -__reset_control_bulk_get(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired) -{ - return optional ? 0 : -EOPNOTSUPP; -} - -static inline void -reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs) -{ -} - -static inline struct reset_control *__devm_reset_control_get( - struct device *dev, const char *id, - int index, bool shared, bool optional, - bool acquired) -{ - return optional ? NULL : ERR_PTR(-ENOTSUPP); -} - -static inline int -__devm_reset_control_bulk_get(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired) -{ - return optional ? 0 : -EOPNOTSUPP; -} - -static inline struct reset_control * -devm_reset_control_array_get(struct device *dev, bool shared, bool optional) -{ - return optional ? NULL : ERR_PTR(-ENOTSUPP); -} - -static inline struct reset_control * -of_reset_control_array_get(struct device_node *np, bool shared, bool optional, - bool acquired) -{ - return optional ? NULL : ERR_PTR(-ENOTSUPP); -} - -static inline int reset_control_get_count(struct device *dev) -{ - return -ENOENT; -} - -#endif /* CONFIG_RESET_CONTROLLER */ - static inline int __must_check device_reset(struct device *dev) { - return __device_reset(dev, false); + WARN_ON(1); + return -ENOTSUPP; } static inline int device_reset_optional(struct device *dev) { - return __device_reset(dev, true); + return -ENOTSUPP; } +static inline struct reset_control *__of_reset_control_get( + struct device_node *node, + const char *id, int index, int shared) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline struct reset_control *__devm_reset_control_get( + struct device *dev, + const char *id, int index, int shared) +{ + return ERR_PTR(-ENOTSUPP); +} + +#endif /* CONFIG_RESET_CONTROLLER */ + /** * reset_control_get_exclusive - Lookup and obtain an exclusive reference * to a reset controller. @@ -225,10 +90,10 @@ static inline int device_reset_optional(struct device *dev) * @id: reset line name * * Returns a struct reset_control or IS_ERR() condition containing errno. - * If this function is called more than once for the same reset_control it will + * If this function is called more then once for the same reset_control it will * return -EBUSY. * - * See reset_control_get_shared() for details on shared references to + * See reset_control_get_shared for details on shared references to * reset-controls. * * Use of id names is optional. @@ -236,87 +101,10 @@ static inline int device_reset_optional(struct device *dev) static inline struct reset_control * __must_check reset_control_get_exclusive(struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, false, false, true); -} - -/** - * reset_control_bulk_get_exclusive - Lookup and obtain exclusive references to - * multiple reset controllers. - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Fills the rstcs array with pointers to exclusive reset controls and - * returns 0, or an IS_ERR() condition containing errno. - */ -static inline int __must_check -reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true); -} - -/** - * reset_control_get_exclusive_released - Lookup and obtain a temoprarily - * exclusive reference to a reset - * controller. - * @dev: device to be reset by the controller - * @id: reset line name - * - * Returns a struct reset_control or IS_ERR() condition containing errno. - * reset-controls returned by this function must be acquired via - * reset_control_acquire() before they can be used and should be released - * via reset_control_release() afterwards. - * - * Use of id names is optional. - */ -static inline struct reset_control * -__must_check reset_control_get_exclusive_released(struct device *dev, - const char *id) -{ - return __reset_control_get(dev, id, 0, false, false, false); -} - -/** - * reset_control_bulk_get_exclusive_released - Lookup and obtain temporarily - * exclusive references to multiple reset - * controllers. - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Fills the rstcs array with pointers to exclusive reset controls and - * returns 0, or an IS_ERR() condition containing errno. - * reset-controls returned by this function must be acquired via - * reset_control_bulk_acquire() before they can be used and should be released - * via reset_control_bulk_release() afterwards. - */ -static inline int __must_check -reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false); -} - -/** - * reset_control_bulk_get_optional_exclusive_released - Lookup and obtain optional - * temporarily exclusive references to multiple - * reset controllers. - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Optional variant of reset_control_bulk_get_exclusive_released(). If the - * requested reset is not specified in the device tree, this function returns 0 - * instead of an error and missing rtsc is set to NULL. - * - * See reset_control_bulk_get_exclusive_released() for more information. - */ -static inline int __must_check -reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false); +#ifndef CONFIG_RESET_CONTROLLER + WARN_ON(1); +#endif + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); } /** @@ -327,7 +115,7 @@ reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_r * * Returns a struct reset_control or IS_ERR() condition containing errno. * This function is intended for use with reset-controls which are shared - * between hardware blocks. + * between hardware-blocks. * * When a reset-control is shared, the behavior of reset_control_assert / * deassert is changed, the reset-core will keep track of a deassert_count @@ -344,98 +132,19 @@ reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_r static inline struct reset_control *reset_control_get_shared( struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, true, false, false); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); } -/** - * reset_control_bulk_get_shared - Lookup and obtain shared references to - * multiple reset controllers. - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Fills the rstcs array with pointers to shared reset controls and - * returns 0, or an IS_ERR() condition containing errno. - */ -static inline int __must_check -reset_control_bulk_get_shared(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false); -} - -/** - * reset_control_get_optional_exclusive - optional reset_control_get_exclusive() - * @dev: device to be reset by the controller - * @id: reset line name - * - * Optional variant of reset_control_get_exclusive(). If the requested reset - * is not specified in the device tree, this function returns NULL instead of - * an error. - * - * See reset_control_get_exclusive() for more information. - */ static inline struct reset_control *reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, false, true, true); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); } -/** - * reset_control_bulk_get_optional_exclusive - optional - * reset_control_bulk_get_exclusive() - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Optional variant of reset_control_bulk_get_exclusive(). If any of the - * requested resets are not specified in the device tree, this function sets - * them to NULL instead of returning an error. - * - * See reset_control_bulk_get_exclusive() for more information. - */ -static inline int __must_check -reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true); -} - -/** - * reset_control_get_optional_shared - optional reset_control_get_shared() - * @dev: device to be reset by the controller - * @id: reset line name - * - * Optional variant of reset_control_get_shared(). If the requested reset - * is not specified in the device tree, this function returns NULL instead of - * an error. - * - * See reset_control_get_shared() for more information. - */ static inline struct reset_control *reset_control_get_optional_shared( struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, true, true, false); -} - -/** - * reset_control_bulk_get_optional_shared - optional - * reset_control_bulk_get_shared() - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Optional variant of reset_control_bulk_get_shared(). If the requested resets - * are not specified in the device tree, this function sets them to NULL - * instead of returning an error. - * - * See reset_control_bulk_get_shared() for more information. - */ -static inline int __must_check -reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); } /** @@ -451,11 +160,11 @@ reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, static inline struct reset_control *of_reset_control_get_exclusive( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, false, false, true); + return __of_reset_control_get(node, id, 0, 0); } /** - * of_reset_control_get_shared - Lookup and obtain a shared reference + * of_reset_control_get_shared - Lookup and obtain an shared reference * to a reset controller. * @node: device to be reset by the controller * @id: reset line name @@ -476,7 +185,7 @@ static inline struct reset_control *of_reset_control_get_exclusive( static inline struct reset_control *of_reset_control_get_shared( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, true, false, false); + return __of_reset_control_get(node, id, 0, 1); } /** @@ -493,11 +202,11 @@ static inline struct reset_control *of_reset_control_get_shared( static inline struct reset_control *of_reset_control_get_exclusive_by_index( struct device_node *node, int index) { - return __of_reset_control_get(node, NULL, index, false, false, true); + return __of_reset_control_get(node, NULL, index, 0); } /** - * of_reset_control_get_shared_by_index - Lookup and obtain a shared + * of_reset_control_get_shared_by_index - Lookup and obtain an shared * reference to a reset controller * by index. * @node: device to be reset by the controller @@ -521,7 +230,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index( static inline struct reset_control *of_reset_control_get_shared_by_index( struct device_node *node, int index) { - return __of_reset_control_get(node, NULL, index, true, false, false); + return __of_reset_control_get(node, NULL, index, 1); } /** @@ -540,105 +249,10 @@ static inline struct reset_control * __must_check devm_reset_control_get_exclusive(struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, false, false, true); -} - -/** - * devm_reset_control_bulk_get_exclusive - resource managed - * reset_control_bulk_get_exclusive() - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Managed reset_control_bulk_get_exclusive(). For reset controllers returned - * from this function, reset_control_put() is called automatically on driver - * detach. - * - * See reset_control_bulk_get_exclusive() for more information. - */ -static inline int __must_check -devm_reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true); -} - -/** - * devm_reset_control_get_exclusive_released - resource managed - * reset_control_get_exclusive_released() - * @dev: device to be reset by the controller - * @id: reset line name - * - * Managed reset_control_get_exclusive_released(). For reset controllers - * returned from this function, reset_control_put() is called automatically on - * driver detach. - * - * See reset_control_get_exclusive_released() for more information. - */ -static inline struct reset_control * -__must_check devm_reset_control_get_exclusive_released(struct device *dev, - const char *id) -{ - return __devm_reset_control_get(dev, id, 0, false, false, false); -} - -/** - * devm_reset_control_bulk_get_exclusive_released - resource managed - * reset_control_bulk_get_exclusive_released() - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Managed reset_control_bulk_get_exclusive_released(). For reset controllers - * returned from this function, reset_control_put() is called automatically on - * driver detach. - * - * See reset_control_bulk_get_exclusive_released() for more information. - */ -static inline int __must_check -devm_reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false); -} - -/** - * devm_reset_control_get_optional_exclusive_released - resource managed - * reset_control_get_optional_exclusive_released() - * @dev: device to be reset by the controller - * @id: reset line name - * - * Managed-and-optional variant of reset_control_get_exclusive_released(). For - * reset controllers returned from this function, reset_control_put() is called - * automatically on driver detach. - * - * See reset_control_get_exclusive_released() for more information. - */ -static inline struct reset_control * -__must_check devm_reset_control_get_optional_exclusive_released(struct device *dev, - const char *id) -{ - return __devm_reset_control_get(dev, id, 0, false, true, false); -} - -/** - * devm_reset_control_bulk_get_optional_exclusive_released - resource managed - * reset_control_bulk_optional_get_exclusive_released() - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Managed reset_control_bulk_optional_get_exclusive_released(). For reset - * controllers returned from this function, reset_control_put() is called - * automatically on driver detach. - * - * See reset_control_bulk_optional_get_exclusive_released() for more information. - */ -static inline int __must_check -devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false); +#ifndef CONFIG_RESET_CONTROLLER + WARN_ON(1); +#endif + return __devm_reset_control_get(dev, id, 0, 0); } /** @@ -653,103 +267,19 @@ devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int static inline struct reset_control *devm_reset_control_get_shared( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, true, false, false); + return __devm_reset_control_get(dev, id, 0, 1); } -/** - * devm_reset_control_bulk_get_shared - resource managed - * reset_control_bulk_get_shared() - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Managed reset_control_bulk_get_shared(). For reset controllers returned - * from this function, reset_control_put() is called automatically on driver - * detach. - * - * See reset_control_bulk_get_shared() for more information. - */ -static inline int __must_check -devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false); -} - -/** - * devm_reset_control_get_optional_exclusive - resource managed - * reset_control_get_optional_exclusive() - * @dev: device to be reset by the controller - * @id: reset line name - * - * Managed reset_control_get_optional_exclusive(). For reset controllers - * returned from this function, reset_control_put() is called automatically on - * driver detach. - * - * See reset_control_get_optional_exclusive() for more information. - */ static inline struct reset_control *devm_reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, false, true, true); + return __devm_reset_control_get(dev, id, 0, 0); } -/** - * devm_reset_control_bulk_get_optional_exclusive - resource managed - * reset_control_bulk_get_optional_exclusive() - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Managed reset_control_bulk_get_optional_exclusive(). For reset controllers - * returned from this function, reset_control_put() is called automatically on - * driver detach. - * - * See reset_control_bulk_get_optional_exclusive() for more information. - */ -static inline int __must_check -devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, true); -} - -/** - * devm_reset_control_get_optional_shared - resource managed - * reset_control_get_optional_shared() - * @dev: device to be reset by the controller - * @id: reset line name - * - * Managed reset_control_get_optional_shared(). For reset controllers returned - * from this function, reset_control_put() is called automatically on driver - * detach. - * - * See reset_control_get_optional_shared() for more information. - */ static inline struct reset_control *devm_reset_control_get_optional_shared( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, true, true, false); -} - -/** - * devm_reset_control_bulk_get_optional_shared - resource managed - * reset_control_bulk_get_optional_shared() - * @dev: device to be reset by the controller - * @num_rstcs: number of entries in rstcs array - * @rstcs: array of struct reset_control_bulk_data with reset line names set - * - * Managed reset_control_bulk_get_optional_shared(). For reset controllers - * returned from this function, reset_control_put() is called automatically on - * driver detach. - * - * See reset_control_bulk_get_optional_shared() for more information. - */ -static inline int __must_check -devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, - struct reset_control_bulk_data *rstcs) -{ - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false); + return __devm_reset_control_get(dev, id, 0, 1); } /** @@ -767,12 +297,12 @@ devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, static inline struct reset_control * devm_reset_control_get_exclusive_by_index(struct device *dev, int index) { - return __devm_reset_control_get(dev, NULL, index, false, false, true); + return __devm_reset_control_get(dev, NULL, index, 0); } /** * devm_reset_control_get_shared_by_index - resource managed - * reset_control_get_shared + * reset_control_get_shared * @dev: device to be reset by the controller * @index: index of the reset controller * @@ -783,7 +313,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index) static inline struct reset_control * devm_reset_control_get_shared_by_index(struct device *dev, int index) { - return __devm_reset_control_get(dev, NULL, index, true, false, false); + return __devm_reset_control_get(dev, NULL, index, 1); } /* @@ -794,6 +324,18 @@ devm_reset_control_get_shared_by_index(struct device *dev, int index) * These inline function calls will be removed once all consumers * have been moved over to the new explicit API. */ +static inline struct reset_control *reset_control_get( + struct device *dev, const char *id) +{ + return reset_control_get_exclusive(dev, id); +} + +static inline struct reset_control *reset_control_get_optional( + struct device *dev, const char *id) +{ + return reset_control_get_optional_exclusive(dev, id); +} + static inline struct reset_control *of_reset_control_get( struct device_node *node, const char *id) { @@ -824,61 +366,4 @@ static inline struct reset_control *devm_reset_control_get_by_index( { return devm_reset_control_get_exclusive_by_index(dev, index); } - -/* - * APIs to manage a list of reset controllers - */ -static inline struct reset_control * -devm_reset_control_array_get_exclusive(struct device *dev) -{ - return devm_reset_control_array_get(dev, false, false); -} - -static inline struct reset_control * -devm_reset_control_array_get_shared(struct device *dev) -{ - return devm_reset_control_array_get(dev, true, false); -} - -static inline struct reset_control * -devm_reset_control_array_get_optional_exclusive(struct device *dev) -{ - return devm_reset_control_array_get(dev, false, true); -} - -static inline struct reset_control * -devm_reset_control_array_get_optional_shared(struct device *dev) -{ - return devm_reset_control_array_get(dev, true, true); -} - -static inline struct reset_control * -of_reset_control_array_get_exclusive(struct device_node *node) -{ - return of_reset_control_array_get(node, false, false, true); -} - -static inline struct reset_control * -of_reset_control_array_get_exclusive_released(struct device_node *node) -{ - return of_reset_control_array_get(node, false, false, false); -} - -static inline struct reset_control * -of_reset_control_array_get_shared(struct device_node *node) -{ - return of_reset_control_array_get(node, true, false, true); -} - -static inline struct reset_control * -of_reset_control_array_get_optional_exclusive(struct device_node *node) -{ - return of_reset_control_array_get(node, false, true, true); -} - -static inline struct reset_control * -of_reset_control_array_get_optional_shared(struct device_node *node) -{ - return of_reset_control_array_get(node, true, true, true); -} #endif diff --git a/include/linux/resource.h b/include/linux/resource.h index bdf491cbca..5bc3116e64 100644 --- a/include/linux/resource.h +++ b/include/linux/resource.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RESOURCE_H #define _LINUX_RESOURCE_H @@ -7,7 +6,7 @@ struct task_struct; -void getrusage(struct task_struct *p, int who, struct rusage *ru); +int getrusage(struct task_struct *p, int who, struct rusage __user *ru); int do_prlimit(struct task_struct *tsk, unsigned int resource, struct rlimit *new_rlim, struct rlimit *old_rlim); diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h index ff0339df56..e2bf63d881 100644 --- a/include/linux/resource_ext.h +++ b/include/linux/resource_ext.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015, Intel Corporation * Author: Jiang Liu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef _LINUX_RESOURCE_EXT_H #define _LINUX_RESOURCE_EXT_H @@ -66,16 +74,4 @@ resource_list_destroy_entry(struct resource_entry *entry) #define resource_list_for_each_entry_safe(entry, tmp, list) \ list_for_each_entry_safe((entry), (tmp), (list), node) -static inline struct resource_entry * -resource_list_first_type(struct list_head *list, unsigned long type) -{ - struct resource_entry *entry; - - resource_list_for_each_entry(entry, list) { - if (resource_type(entry->res) == type) - return entry; - } - return NULL; -} - #endif /* _LINUX_RESOURCE_EXT_H */ diff --git a/include/linux/rfkill-regulator.h b/include/linux/rfkill-regulator.h new file mode 100644 index 0000000000..aca36bc833 --- /dev/null +++ b/include/linux/rfkill-regulator.h @@ -0,0 +1,48 @@ +/* + * rfkill-regulator.c - Regulator consumer driver for rfkill + * + * Copyright (C) 2009 Guiming Zhuo + * Copyright (C) 2011 Antonio Ospite + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __LINUX_RFKILL_REGULATOR_H +#define __LINUX_RFKILL_REGULATOR_H + +/* + * Use "vrfkill" as supply id when declaring the regulator consumer: + * + * static struct regulator_consumer_supply pcap_regulator_V6_consumers [] = { + * { .dev_name = "rfkill-regulator.0", .supply = "vrfkill" }, + * }; + * + * If you have several regulator driven rfkill, you can append a numerical id to + * .dev_name as done above, and use the same id when declaring the platform + * device: + * + * static struct rfkill_regulator_platform_data ezx_rfkill_bt_data = { + * .name = "ezx-bluetooth", + * .type = RFKILL_TYPE_BLUETOOTH, + * }; + * + * static struct platform_device a910_rfkill = { + * .name = "rfkill-regulator", + * .id = 0, + * .dev = { + * .platform_data = &ezx_rfkill_bt_data, + * }, + * }; + */ + +#include + +struct rfkill_regulator_platform_data { + char *name; /* the name for the rfkill switch */ + enum rfkill_type type; /* the type as specified in rfkill.h */ +}; + +#endif /* __LINUX_RFKILL_REGULATOR_H */ diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 231e06b74b..e6a0031d1b 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -66,7 +66,7 @@ struct rfkill_ops { #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) /** - * rfkill_alloc - Allocate rfkill structure + * rfkill_alloc - allocate rfkill structure * @name: name of the struct -- the string is not copied internally * @parent: device that has rf switch on it * @type: type of the switch (RFKILL_TYPE_*) @@ -112,7 +112,7 @@ void rfkill_pause_polling(struct rfkill *rfkill); /** * rfkill_resume_polling(struct rfkill *rfkill) * - * Resume polling + * Pause polling -- say transmitter is off for other reasons. * NOTE: not necessary for suspend/resume -- in that case the * core stops polling anyway */ @@ -130,28 +130,17 @@ void rfkill_resume_polling(struct rfkill *rfkill); void rfkill_unregister(struct rfkill *rfkill); /** - * rfkill_destroy - Free rfkill structure + * rfkill_destroy - free rfkill structure * @rfkill: rfkill structure to be destroyed * * Destroys the rfkill structure. */ void rfkill_destroy(struct rfkill *rfkill); -/** - * rfkill_set_hw_state_reason - Set the internal rfkill hardware block state - * with a reason - * @rfkill: pointer to the rfkill class to modify. - * @blocked: the current hardware block state to set - * @reason: one of &enum rfkill_hard_block_reasons - * - * Prefer to use rfkill_set_hw_state if you don't need any special reason. - */ -bool rfkill_set_hw_state_reason(struct rfkill *rfkill, - bool blocked, unsigned long reason); /** * rfkill_set_hw_state - Set the internal rfkill hardware block state * @rfkill: pointer to the rfkill class to modify. - * @blocked: the current hardware block state to set + * @state: the current hardware block state to set * * rfkill drivers that get events when the hard-blocked state changes * use this function to notify the rfkill core (and through that also @@ -167,16 +156,12 @@ bool rfkill_set_hw_state_reason(struct rfkill *rfkill, * should be blocked) so that drivers need not keep track of the soft * block state -- which they might not be able to. */ -static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) -{ - return rfkill_set_hw_state_reason(rfkill, blocked, - RFKILL_HARD_BLOCK_SIGNAL); -} +bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); /** * rfkill_set_sw_state - Set the internal rfkill software block state * @rfkill: pointer to the rfkill class to modify. - * @blocked: the current software block state to set + * @state: the current software block state to set * * rfkill drivers that get events when the soft-blocked state changes * (yes, some platforms directly act on input but allow changing again) @@ -198,7 +183,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked); /** * rfkill_init_sw_state - Initialize persistent software block state * @rfkill: pointer to the rfkill class to modify. - * @blocked: the current software block state to set + * @state: the current software block state to set * * rfkill drivers that preserve their software block state over power off * use this function to notify the rfkill core (and through that also @@ -223,17 +208,17 @@ void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked); void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); /** - * rfkill_blocked - Query rfkill block state + * rfkill_blocked - query rfkill block * * @rfkill: rfkill struct to query */ bool rfkill_blocked(struct rfkill *rfkill); /** - * rfkill_find_type - Helper for finding rfkill type by name + * rfkill_find_type - Helpper for finding rfkill type by name * @name: the name of the type * - * Returns enum rfkill_type that corresponds to the name. + * Returns enum rfkill_type that conrresponds the name. */ enum rfkill_type rfkill_find_type(const char *name); @@ -271,13 +256,6 @@ static inline void rfkill_destroy(struct rfkill *rfkill) { } -static inline bool rfkill_set_hw_state_reason(struct rfkill *rfkill, - bool blocked, - unsigned long reason) -{ - return blocked; -} - static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) { return blocked; @@ -318,7 +296,7 @@ static inline enum rfkill_type rfkill_find_type(const char *name) const char *rfkill_get_led_trigger_name(struct rfkill *rfkill); /** - * rfkill_set_led_trigger_name - Set the LED trigger name + * rfkill_set_led_trigger_name -- set the LED trigger name * @rfkill: rfkill struct * @name: LED trigger name * diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 68dab3e08a..5c132d3188 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Resizable, Scalable, Concurrent Hash Table * @@ -18,98 +17,202 @@ #ifndef _LINUX_RHASHTABLE_H #define _LINUX_RHASHTABLE_H +#include +#include #include #include #include #include #include -#include -#include +#include +#include -#include /* - * Objects in an rhashtable have an embedded struct rhash_head - * which is linked into as hash chain from the hash table - or one - * of two or more hash tables when the rhashtable is being resized. * The end of the chain is marked with a special nulls marks which has - * the least significant bit set but otherwise stores the address of - * the hash bucket. This allows us to be sure we've found the end - * of the right list. - * The value stored in the hash bucket has BIT(0) used as a lock bit. - * This bit must be atomically set before any changes are made to - * the chain. To avoid dereferencing this pointer without clearing - * the bit first, we use an opaque 'struct rhash_lock_head *' for the - * pointer stored in the bucket. This struct needs to be defined so - * that rcu_dereference() works on it, but it has no content so a - * cast is needed for it to be useful. This ensures it isn't - * used by mistake with clearing the lock bit first. + * the following format: + * + * +-------+-----------------------------------------------------+-+ + * | Base | Hash |1| + * +-------+-----------------------------------------------------+-+ + * + * Base (4 bits) : Reserved to distinguish between multiple tables. + * Specified via &struct rhashtable_params.nulls_base. + * Hash (27 bits): Full hash (unmasked) of first element added to bucket + * 1 (1 bit) : Nulls marker (always set) + * + * The remaining bits of the next pointer remain unused for now. */ -struct rhash_lock_head {}; +#define RHT_BASE_BITS 4 +#define RHT_HASH_BITS 27 +#define RHT_BASE_SHIFT RHT_HASH_BITS -/* Maximum chain length before rehash - * - * The maximum (not average) chain length grows with the size of the hash - * table, at a rate of (log N)/(log log N). - * - * The value of 16 is selected so that even if the hash table grew to - * 2^32 you would not expect the maximum chain length to exceed it - * unless we are under attack (or extremely unlucky). - * - * As this limit is only to detect attacks, we don't need to set it to a - * lower value as you'd need the chain length to vastly exceed 16 to have - * any real effect on the system. - */ -#define RHT_ELASTICITY 16u +/* Base bits plus 1 bit for nulls marker */ +#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) + +struct rhash_head { + struct rhash_head __rcu *next; +}; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head __rcu *next; +}; /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets - * @nest: Number of bits of first-level nested table. * @rehash: Current bucket being rehashed * @hash_rnd: Random seed to fold into hash + * @locks_mask: Mask to apply before accessing locks[] + * @locks: Array of spinlocks protecting individual buckets * @walkers: List of active walkers * @rcu: RCU structure for freeing the table * @future_tbl: Table under construction during rehashing - * @ntbl: Nested table used when out of memory. * @buckets: size * hash buckets */ struct bucket_table { unsigned int size; - unsigned int nest; + unsigned int rehash; u32 hash_rnd; + unsigned int locks_mask; + spinlock_t *locks; struct list_head walkers; struct rcu_head rcu; struct bucket_table __rcu *future_tbl; - struct lockdep_map dep_map; - - struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; + struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; -/* - * NULLS_MARKER() expects a hash value with the low - * bits mostly likely to be significant, and it discards - * the msb. - * We give it an address, in which the bottom bit is - * always 0, and the msb might be significant. - * So we shift the address down one bit to align with - * expectations and avoid losing a significant bit. - * - * We never store the NULLS_MARKER in the hash table - * itself as we need the lsb for locking. - * Instead we store a NULL +/** + * struct rhashtable_compare_arg - Key for the function rhashtable_compare + * @ht: Hash table + * @key: Key to compare against */ -#define RHT_NULLS_MARKER(ptr) \ - ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1)) -#define INIT_RHT_NULLS_HEAD(ptr) \ - ((ptr) = NULL) +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); +typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed); +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, + const void *obj); + +struct rhashtable; + +/** + * struct rhashtable_params - Hash table construction parameters + * @nelem_hint: Hint on number of elements, should be 75% of desired size + * @key_len: Length of key + * @key_offset: Offset of key in struct to be hashed + * @head_offset: Offset of rhash_head in struct to be hashed + * @insecure_max_entries: Maximum number of entries (may be exceeded) + * @max_size: Maximum size while expanding + * @min_size: Minimum size while shrinking + * @nulls_base: Base value to generate nulls marker + * @insecure_elasticity: Set to true to disable chain length checks + * @automatic_shrinking: Enable automatic shrinking of tables + * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) + * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) + * @obj_hashfn: Function to hash object + * @obj_cmpfn: Function to compare key with object + */ +struct rhashtable_params { + size_t nelem_hint; + size_t key_len; + size_t key_offset; + size_t head_offset; + unsigned int insecure_max_entries; + unsigned int max_size; + unsigned int min_size; + u32 nulls_base; + bool insecure_elasticity; + bool automatic_shrinking; + size_t locks_mul; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +/** + * struct rhashtable - Hash table handle + * @tbl: Bucket table + * @nelems: Number of elements in table + * @key_len: Key length for hashfn + * @elasticity: Maximum chain length before rehash + * @p: Configuration parameters + * @rhlist: True if this is an rhltable + * @run_work: Deferred worker to expand/shrink asynchronously + * @mutex: Mutex to protect current/future table swapping + * @lock: Spin lock to protect walker list + */ +struct rhashtable { + struct bucket_table __rcu *tbl; + atomic_t nelems; + unsigned int key_len; + unsigned int elasticity; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; +}; + +/** + * struct rhltable - Hash table with duplicate objects in a list + * @ht: Underlying rhtable + */ +struct rhltable { + struct rhashtable ht; +}; + +/** + * struct rhashtable_walker - Hash table walker + * @list: List entry on list of walkers + * @tbl: The table that we were walking over + */ +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; + +/** + * struct rhashtable_iter - Hash table iterator + * @ht: Table to iterate through + * @p: Current pointer + * @list: Current hash list pointer + * @walker: Associated rhashtable walker + * @slot: Current slot + * @skip: Number of entries to skip in slot + */ +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; +}; + +static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) +{ + return NULLS_MARKER(ht->p.nulls_base + hash); +} + +#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \ + ((ptr) = (typeof(ptr)) rht_marker(ht, hash)) static inline bool rht_is_a_nulls(const struct rhash_head *ptr) { return ((unsigned long) ptr & 1); } +static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr) +{ + return ((unsigned long) ptr) >> 1; +} + static inline void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) { @@ -119,44 +222,36 @@ static inline void *rht_obj(const struct rhashtable *ht, static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, unsigned int hash) { - return hash & (tbl->size - 1); -} - -static inline unsigned int rht_key_get_hash(struct rhashtable *ht, - const void *key, const struct rhashtable_params params, - unsigned int hash_rnd) -{ - unsigned int hash; - - /* params must be equal to ht->p if it isn't constant. */ - if (!__builtin_constant_p(params.key_len)) - hash = ht->p.hashfn(key, ht->key_len, hash_rnd); - else if (params.key_len) { - unsigned int key_len = params.key_len; - - if (params.hashfn) - hash = params.hashfn(key, key_len, hash_rnd); - else if (key_len & (sizeof(u32) - 1)) - hash = jhash(key, key_len, hash_rnd); - else - hash = jhash2(key, key_len / sizeof(u32), hash_rnd); - } else { - unsigned int key_len = ht->p.key_len; - - if (params.hashfn) - hash = params.hashfn(key, key_len, hash_rnd); - else - hash = jhash(key, key_len, hash_rnd); - } - - return hash; + return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); } static inline unsigned int rht_key_hashfn( struct rhashtable *ht, const struct bucket_table *tbl, const void *key, const struct rhashtable_params params) { - unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); + unsigned int hash; + + /* params must be equal to ht->p if it isn't constant. */ + if (!__builtin_constant_p(params.key_len)) + hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); + else if (params.key_len) { + unsigned int key_len = params.key_len; + + if (params.hashfn) + hash = params.hashfn(key, key_len, tbl->hash_rnd); + else if (key_len & (sizeof(u32) - 1)) + hash = jhash(key, key_len, tbl->hash_rnd); + else + hash = jhash2(key, key_len / sizeof(u32), + tbl->hash_rnd); + } else { + unsigned int key_len = ht->p.key_len; + + if (params.hashfn) + hash = params.hashfn(key, key_len, tbl->hash_rnd); + else + hash = jhash(key, key_len, tbl->hash_rnd); + } return rht_bucket_index(tbl, hash); } @@ -220,7 +315,27 @@ static inline bool rht_grow_above_100(const struct rhashtable *ht, static inline bool rht_grow_above_max(const struct rhashtable *ht, const struct bucket_table *tbl) { - return atomic_read(&ht->nelems) >= ht->max_elems; + return ht->p.insecure_max_entries && + atomic_read(&ht->nelems) >= ht->p.insecure_max_entries; +} + +/* The bucket lock is selected based on the hash and protects mutations + * on a group of hash buckets. + * + * A maximum of tbl->size/2 bucket locks is allocated. This ensures that + * a single lock always covers both buckets which may both contains + * entries which link to the same bucket of the old table during resizing. + * This allows to simplify the locking as locking the bucket in both + * tables during resize always guarantee protection. + * + * IMPORTANT: When holding the bucket lock of both the old and new table + * during expansions and shrinking, the old bucket lock must always be + * acquired first. + */ +static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl, + unsigned int hash) +{ + return &tbl->locks[hash & tbl->locks_mask]; } #ifdef CONFIG_PROVE_LOCKING @@ -239,21 +354,19 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, } #endif /* CONFIG_PROVE_LOCKING */ +int rhashtable_init(struct rhashtable *ht, + const struct rhashtable_params *params); +int rhltable_init(struct rhltable *hlt, + const struct rhashtable_params *params); + void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj); void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); -int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); - -static inline void rhashtable_walk_start(struct rhashtable_iter *iter) -{ - (void)rhashtable_walk_start_check(iter); -} - +int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); void *rhashtable_walk_next(struct rhashtable_iter *iter); -void *rhashtable_walk_peek(struct rhashtable_iter *iter); void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); void rhashtable_free_and_destroy(struct rhashtable *ht, @@ -261,13 +374,6 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void *arg); void rhashtable_destroy(struct rhashtable *ht); -struct rhash_lock_head __rcu **rht_bucket_nested( - const struct bucket_table *tbl, unsigned int hash); -struct rhash_lock_head __rcu **__rht_bucket_nested( - const struct bucket_table *tbl, unsigned int hash); -struct rhash_lock_head __rcu **rht_bucket_nested_insert( - struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash); - #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) @@ -283,137 +389,16 @@ struct rhash_lock_head __rcu **rht_bucket_nested_insert( #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) -static inline struct rhash_lock_head __rcu *const *rht_bucket( - const struct bucket_table *tbl, unsigned int hash) -{ - return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : - &tbl->buckets[hash]; -} - -static inline struct rhash_lock_head __rcu **rht_bucket_var( - struct bucket_table *tbl, unsigned int hash) -{ - return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : - &tbl->buckets[hash]; -} - -static inline struct rhash_lock_head __rcu **rht_bucket_insert( - struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) -{ - return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : - &tbl->buckets[hash]; -} - -/* - * We lock a bucket by setting BIT(0) in the pointer - this is always - * zero in real pointers. The NULLS mark is never stored in the bucket, - * rather we store NULL if the bucket is empty. - * bit_spin_locks do not handle contention well, but the whole point - * of the hashtable design is to achieve minimum per-bucket contention. - * A nested hash table might not have a bucket pointer. In that case - * we cannot get a lock. For remove and replace the bucket cannot be - * interesting and doesn't need locking. - * For insert we allocate the bucket if this is the last bucket_table, - * and then take the lock. - * Sometimes we unlock a bucket by writing a new pointer there. In that - * case we don't need to unlock, but we do need to reset state such as - * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer() - * provides the same release semantics that bit_spin_unlock() provides, - * this is safe. - * When we write to a bucket without unlocking, we use rht_assign_locked(). - */ - -static inline void rht_lock(struct bucket_table *tbl, - struct rhash_lock_head __rcu **bkt) -{ - local_bh_disable(); - bit_spin_lock(0, (unsigned long *)bkt); - lock_map_acquire(&tbl->dep_map); -} - -static inline void rht_lock_nested(struct bucket_table *tbl, - struct rhash_lock_head __rcu **bucket, - unsigned int subclass) -{ - local_bh_disable(); - bit_spin_lock(0, (unsigned long *)bucket); - lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_); -} - -static inline void rht_unlock(struct bucket_table *tbl, - struct rhash_lock_head __rcu **bkt) -{ - lock_map_release(&tbl->dep_map); - bit_spin_unlock(0, (unsigned long *)bkt); - local_bh_enable(); -} - -static inline struct rhash_head *__rht_ptr( - struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt) -{ - return (struct rhash_head *) - ((unsigned long)p & ~BIT(0) ?: - (unsigned long)RHT_NULLS_MARKER(bkt)); -} - -/* - * Where 'bkt' is a bucket and might be locked: - * rht_ptr_rcu() dereferences that pointer and clears the lock bit. - * rht_ptr() dereferences in a context where the bucket is locked. - * rht_ptr_exclusive() dereferences in a context where exclusive - * access is guaranteed, such as when destroying the table. - */ -static inline struct rhash_head *rht_ptr_rcu( - struct rhash_lock_head __rcu *const *bkt) -{ - return __rht_ptr(rcu_dereference(*bkt), bkt); -} - -static inline struct rhash_head *rht_ptr( - struct rhash_lock_head __rcu *const *bkt, - struct bucket_table *tbl, - unsigned int hash) -{ - return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); -} - -static inline struct rhash_head *rht_ptr_exclusive( - struct rhash_lock_head __rcu *const *bkt) -{ - return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); -} - -static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, - struct rhash_head *obj) -{ - if (rht_is_a_nulls(obj)) - obj = NULL; - rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0))); -} - -static inline void rht_assign_unlock(struct bucket_table *tbl, - struct rhash_lock_head __rcu **bkt, - struct rhash_head *obj) -{ - if (rht_is_a_nulls(obj)) - obj = NULL; - lock_map_release(&tbl->dep_map); - rcu_assign_pointer(*bkt, (void *)obj); - preempt_enable(); - __release(bitlock); - local_bh_enable(); -} - /** - * rht_for_each_from - iterate over hash chain from given head + * rht_for_each_continue - continue iterating over hash chain * @pos: the &struct rhash_head to use as a loop cursor. - * @head: the &struct rhash_head to start from + * @head: the previous &struct rhash_head to continue from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index */ -#define rht_for_each_from(pos, head, tbl, hash) \ - for (pos = head; \ - !rht_is_a_nulls(pos); \ +#define rht_for_each_continue(pos, head, tbl, hash) \ + for (pos = rht_dereference_bucket(head, tbl, hash); \ + !rht_is_a_nulls(pos); \ pos = rht_dereference_bucket((pos)->next, tbl, hash)) /** @@ -423,20 +408,19 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, * @hash: the hash value / bucket index */ #define rht_for_each(pos, tbl, hash) \ - rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ - tbl, hash) + rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash) /** - * rht_for_each_entry_from - iterate over hash chain from given head + * rht_for_each_entry_continue - continue iterating over hash chain * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. - * @head: the &struct rhash_head to start from + * @head: the previous &struct rhash_head to continue from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. */ -#define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \ - for (pos = head; \ +#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ + for (pos = rht_dereference_bucket(head, tbl, hash); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ pos = rht_dereference_bucket((pos)->next, tbl, hash)) @@ -449,9 +433,8 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, * @member: name of the &struct rhash_head within the hashable struct. */ #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ - rht_for_each_entry_from(tpos, pos, \ - rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ - tbl, hash, member) + rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \ + tbl, hash, member) /** * rht_for_each_entry_safe - safely iterate over hash chain of given type @@ -465,19 +448,19 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, * This hash chain list-traversal primitive allows for the looped code to * remove the loop cursor from the list. */ -#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ - for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ - next = !rht_is_a_nulls(pos) ? \ - rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ - (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ - pos = next, \ - next = !rht_is_a_nulls(pos) ? \ +#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ + for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ + next = !rht_is_a_nulls(pos) ? \ + rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ + pos = next, \ + next = !rht_is_a_nulls(pos) ? \ rht_dereference_bucket(pos->next, tbl, hash) : NULL) /** - * rht_for_each_rcu_from - iterate over rcu hash chain from given head + * rht_for_each_rcu_continue - continue iterating over rcu hash chain * @pos: the &struct rhash_head to use as a loop cursor. - * @head: the &struct rhash_head to start from + * @head: the previous &struct rhash_head to continue from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @@ -485,9 +468,9 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ -#define rht_for_each_rcu_from(pos, head, tbl, hash) \ +#define rht_for_each_rcu_continue(pos, head, tbl, hash) \ for (({barrier(); }), \ - pos = head; \ + pos = rht_dereference_bucket_rcu(head, tbl, hash); \ !rht_is_a_nulls(pos); \ pos = rcu_dereference_raw(pos->next)) @@ -501,17 +484,14 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ -#define rht_for_each_rcu(pos, tbl, hash) \ - for (({barrier(); }), \ - pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \ - !rht_is_a_nulls(pos); \ - pos = rcu_dereference_raw(pos->next)) +#define rht_for_each_rcu(pos, tbl, hash) \ + rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash) /** - * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head + * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. - * @head: the &struct rhash_head to start from + * @head: the previous &struct rhash_head to continue from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. @@ -520,9 +500,9 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ -#define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \ +#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ for (({barrier(); }), \ - pos = head; \ + pos = rht_dereference_bucket_rcu(head, tbl, hash); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) @@ -538,10 +518,9 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ -#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ - rht_for_each_entry_rcu_from(tpos, pos, \ - rht_ptr_rcu(rht_bucket(tbl, hash)), \ - tbl, hash, member) +#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ + rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ + tbl, hash, member) /** * rhl_for_each_rcu - iterate over rcu hash table list @@ -586,27 +565,20 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; - struct rhash_lock_head __rcu *const *bkt; - struct bucket_table *tbl; + const struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; tbl = rht_dereference_rcu(ht->tbl, ht); restart: hash = rht_key_hashfn(ht, tbl, key, params); - bkt = rht_bucket(tbl, hash); - do { - rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) { - if (params.obj_cmpfn ? - params.obj_cmpfn(&arg, rht_obj(ht, he)) : - rhashtable_compare(&arg, rht_obj(ht, he))) - continue; - return he; - } - /* An object might have been moved to a different hash chain, - * while we walk along it - better check and retry. - */ - } while (he != RHT_NULLS_MARKER(bkt)); + rht_for_each_rcu(he, tbl, hash) { + if (params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, he)) : + rhashtable_compare(&arg, rht_obj(ht, he))) + continue; + return he; + } /* Ensure we see any new tables. */ smp_rmb(); @@ -702,10 +674,10 @@ static inline void *__rhashtable_insert_fast( .ht = ht, .key = key, }; - struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct bucket_table *tbl; struct rhash_head *head; + spinlock_t *lock; unsigned int hash; int elasticity; void *data; @@ -714,22 +686,19 @@ static inline void *__rhashtable_insert_fast( tbl = rht_dereference_rcu(ht->tbl, ht); hash = rht_head_hashfn(ht, tbl, obj, params); - elasticity = RHT_ELASTICITY; - bkt = rht_bucket_insert(ht, tbl, hash); - data = ERR_PTR(-ENOMEM); - if (!bkt) - goto out; - pprev = NULL; - rht_lock(tbl, bkt); + lock = rht_bucket_lock(tbl, hash); + spin_lock_bh(lock); - if (unlikely(rcu_access_pointer(tbl->future_tbl))) { + if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) { slow_path: - rht_unlock(tbl, bkt); + spin_unlock_bh(lock); rcu_read_unlock(); return rhashtable_insert_slow(ht, key, obj); } - rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { + elasticity = ht->elasticity; + pprev = &tbl->buckets[hash]; + rht_for_each(head, tbl, hash) { struct rhlist_head *plist; struct rhlist_head *list; @@ -737,15 +706,13 @@ static inline void *__rhashtable_insert_fast( if (!key || (params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, head)) : - rhashtable_compare(&arg, rht_obj(ht, head)))) { - pprev = &head->next; + rhashtable_compare(&arg, rht_obj(ht, head)))) continue; - } data = rht_obj(ht, head); if (!rhlist) - goto out_unlock; + goto out; list = container_of(obj, struct rhlist_head, rhead); @@ -754,13 +721,9 @@ static inline void *__rhashtable_insert_fast( RCU_INIT_POINTER(list->next, plist); head = rht_dereference_bucket(head->next, tbl, hash); RCU_INIT_POINTER(list->rhead.next, head); - if (pprev) { - rcu_assign_pointer(*pprev, obj); - rht_unlock(tbl, bkt); - } else - rht_assign_unlock(tbl, bkt, obj); - data = NULL; - goto out; + rcu_assign_pointer(*pprev, obj); + + goto good; } if (elasticity <= 0) @@ -768,13 +731,12 @@ static inline void *__rhashtable_insert_fast( data = ERR_PTR(-E2BIG); if (unlikely(rht_grow_above_max(ht, tbl))) - goto out_unlock; + goto out; if (unlikely(rht_grow_above_100(ht, tbl))) goto slow_path; - /* Inserting at head of list makes unlocking free. */ - head = rht_ptr(bkt, tbl, hash); + head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); RCU_INIT_POINTER(obj->next, head); if (rhlist) { @@ -784,21 +746,20 @@ static inline void *__rhashtable_insert_fast( RCU_INIT_POINTER(list->next, NULL); } - atomic_inc(&ht->nelems); - rht_assign_unlock(tbl, bkt, obj); + rcu_assign_pointer(tbl->buckets[hash], obj); + atomic_inc(&ht->nelems); if (rht_grow_above_75(ht, tbl)) schedule_work(&ht->run_work); +good: data = NULL; + out: + spin_unlock_bh(lock); rcu_read_unlock(); return data; - -out_unlock: - rht_unlock(tbl, bkt); - goto out; } /** @@ -807,14 +768,15 @@ static inline void *__rhashtable_insert_fast( * @obj: pointer to hash head inside object * @params: hash table parameters * - * Will take the per bucket bitlock to protect against mutual mutations + * Will take a per bucket spinlock to protect against mutual mutations * on the same bucket. Multiple insertions may occur in parallel unless - * they map to the same bucket. + * they map to the same bucket lock. * * It is safe to call this function from atomic context. * - * Will trigger an automatic deferred table resizing if residency in the - * table grows beyond 70%. + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). */ static inline int rhashtable_insert_fast( struct rhashtable *ht, struct rhash_head *obj, @@ -836,14 +798,15 @@ static inline int rhashtable_insert_fast( * @list: pointer to hash list head inside object * @params: hash table parameters * - * Will take the per bucket bitlock to protect against mutual mutations + * Will take a per bucket spinlock to protect against mutual mutations * on the same bucket. Multiple insertions may occur in parallel unless - * they map to the same bucket. + * they map to the same bucket lock. * * It is safe to call this function from atomic context. * - * Will trigger an automatic deferred table resizing if residency in the - * table grows beyond 70%. + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). */ static inline int rhltable_insert_key( struct rhltable *hlt, const void *key, struct rhlist_head *list, @@ -859,14 +822,15 @@ static inline int rhltable_insert_key( * @list: pointer to hash list head inside object * @params: hash table parameters * - * Will take the per bucket bitlock to protect against mutual mutations + * Will take a per bucket spinlock to protect against mutual mutations * on the same bucket. Multiple insertions may occur in parallel unless - * they map to the same bucket. + * they map to the same bucket lock. * * It is safe to call this function from atomic context. * - * Will trigger an automatic deferred table resizing if residency in the - * table grows beyond 70%. + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). */ static inline int rhltable_insert( struct rhltable *hlt, struct rhlist_head *list, @@ -885,13 +849,20 @@ static inline int rhltable_insert( * @obj: pointer to hash head inside object * @params: hash table parameters * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * * This lookup function may only be used for fixed key hash table (key_len * parameter set). It will BUG() if used inappropriately. * * It is safe to call this function from atomic context. * - * Will trigger an automatic deferred table resizing if residency in the - * table grows beyond 70%. + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). */ static inline int rhashtable_lookup_insert_fast( struct rhashtable *ht, struct rhash_head *obj, @@ -910,28 +881,6 @@ static inline int rhashtable_lookup_insert_fast( return ret == NULL ? 0 : -EEXIST; } -/** - * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table - * @ht: hash table - * @obj: pointer to hash head inside object - * @params: hash table parameters - * - * Just like rhashtable_lookup_insert_fast(), but this function returns the - * object if it exists, NULL if it did not and the insertion was successful, - * and an ERR_PTR otherwise. - */ -static inline void *rhashtable_lookup_get_insert_fast( - struct rhashtable *ht, struct rhash_head *obj, - const struct rhashtable_params params) -{ - const char *key = rht_obj(ht, obj); - - BUG_ON(ht->p.obj_hashfn); - - return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, - false); -} - /** * rhashtable_lookup_insert_key - search and insert object to hash table * with explicit key @@ -940,10 +889,17 @@ static inline void *rhashtable_lookup_get_insert_fast( * @obj: pointer to hash head inside object * @params: hash table parameters * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * * Lookups may occur in parallel with hashtable mutations and resizing. * - * Will trigger an automatic deferred table resizing if residency in the - * table grows beyond 70%. + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). * * Returns zero on success. */ @@ -965,9 +921,9 @@ static inline int rhashtable_lookup_insert_key( /** * rhashtable_lookup_get_insert_key - lookup and insert object into hash table * @ht: hash table - * @key: key * @obj: pointer to hash head inside object * @params: hash table parameters + * @data: pointer to element data already in hashes * * Just like rhashtable_lookup_insert_key(), but this function returns the * object if it exists, NULL if it does not and the insertion was successful, @@ -988,20 +944,19 @@ static inline int __rhashtable_remove_fast_one( struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { - struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; + spinlock_t * lock; unsigned int hash; int err = -ENOENT; hash = rht_head_hashfn(ht, tbl, obj, params); - bkt = rht_bucket_var(tbl, hash); - if (!bkt) - return -ENOENT; - pprev = NULL; - rht_lock(tbl, bkt); + lock = rht_bucket_lock(tbl, hash); - rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { + spin_lock_bh(lock); + + pprev = &tbl->buckets[hash]; + rht_for_each(he, tbl, hash) { struct rhlist_head *list; list = container_of(he, struct rhlist_head, rhead); @@ -1041,17 +996,12 @@ static inline int __rhashtable_remove_fast_one( } } - if (pprev) { - rcu_assign_pointer(*pprev, obj); - rht_unlock(tbl, bkt); - } else { - rht_assign_unlock(tbl, bkt, obj); - } - goto unlocked; + rcu_assign_pointer(*pprev, obj); + break; } - rht_unlock(tbl, bkt); -unlocked: + spin_unlock_bh(lock); + if (err > 0) { atomic_dec(&ht->nelems); if (unlikely(ht->p.automatic_shrinking && @@ -1100,8 +1050,8 @@ static inline int __rhashtable_remove_fast( * walk the bucket chain upon removal. The removal operation is thus * considerable slow if the hash table is not correctly sized. * - * Will automatically shrink the table if permitted when residency drops - * below 30%. + * Will automatically shrink the table via rhashtable_expand() if the + * shrink_decision function specified at rhashtable_init() returns true. * * Returns zero on success, -ENOENT if the entry could not be found. */ @@ -1122,8 +1072,8 @@ static inline int rhashtable_remove_fast( * walk the bucket chain upon removal. The removal operation is thus * considerable slow if the hash table is not correctly sized. * - * Will automatically shrink the table if permitted when residency drops - * below 30% + * Will automatically shrink the table via rhashtable_expand() if the + * shrink_decision function specified at rhashtable_init() returns true. * * Returns zero on success, -ENOENT if the entry could not be found. */ @@ -1140,9 +1090,9 @@ static inline int __rhashtable_replace_fast( struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) { - struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; + spinlock_t *lock; unsigned int hash; int err = -ENOENT; @@ -1153,33 +1103,25 @@ static inline int __rhashtable_replace_fast( if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) return -EINVAL; - bkt = rht_bucket_var(tbl, hash); - if (!bkt) - return -ENOENT; + lock = rht_bucket_lock(tbl, hash); - pprev = NULL; - rht_lock(tbl, bkt); + spin_lock_bh(lock); - rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { + pprev = &tbl->buckets[hash]; + rht_for_each(he, tbl, hash) { if (he != obj_old) { pprev = &he->next; continue; } rcu_assign_pointer(obj_new->next, obj_old->next); - if (pprev) { - rcu_assign_pointer(*pprev, obj_new); - rht_unlock(tbl, bkt); - } else { - rht_assign_unlock(tbl, bkt, obj_new); - } + rcu_assign_pointer(*pprev, obj_new); err = 0; - goto unlocked; + break; } - rht_unlock(tbl, bkt); + spin_unlock_bh(lock); -unlocked: return err; } @@ -1224,6 +1166,14 @@ static inline int rhashtable_replace_fast( return err; } +/* Obsolete function, do not use in new code. */ +static inline int rhashtable_walk_init(struct rhashtable *ht, + struct rhashtable_iter *iter, gfp_t gfp) +{ + rhashtable_walk_enter(ht, iter); + return 0; +} + /** * rhltable_walk_enter - Initialise an iterator * @hlt: Table to walk over @@ -1239,9 +1189,8 @@ static inline int rhashtable_replace_fast( * For a completely stable walk you should construct your own data * structure outside the hash table. * - * This function may be called from any process context, including - * non-preemptable context, but cannot be called from softirq or - * hardirq context. + * This function may sleep so you must not call it from interrupt + * context or with spin locks held. * * You must call rhashtable_walk_exit after this function returns. */ diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index dac53fd3af..4acc552e92 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -1,19 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RING_BUFFER_H #define _LINUX_RING_BUFFER_H +#include #include #include #include -struct trace_buffer; +struct ring_buffer; struct ring_buffer_iter; /* * Don't refer to this struct directly, use functions below. */ struct ring_buffer_event { + kmemcheck_bitfield_begin(bitfield); u32 type_len:5, time_delta:27; + kmemcheck_bitfield_end(bitfield); u32 array[]; }; @@ -34,12 +36,10 @@ struct ring_buffer_event { * array[0] = time delta (28 .. 59) * size = 8 bytes * - * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp - * Same format as TIME_EXTEND except that the - * value is an absolute timestamp, not a delta - * event.time_delta contains bottom 27 bits - * array[0] = top (28 .. 59) bits - * size = 8 bytes + * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock + * array[0] = tv_nsec + * array[1..2] = tv_sec + * size = 16 bytes * * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: * Data record @@ -56,17 +56,16 @@ enum ring_buffer_type { RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, RINGBUF_TYPE_PADDING, RINGBUF_TYPE_TIME_EXTEND, + /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ RINGBUF_TYPE_TIME_STAMP, }; unsigned ring_buffer_event_length(struct ring_buffer_event *event); void *ring_buffer_event_data(struct ring_buffer_event *event); -u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, - struct ring_buffer_event *event); /* * ring_buffer_discard_commit will remove an event that has not - * been committed yet. If this is used, then ring_buffer_unlock_commit + * ben committed yet. If this is used, then ring_buffer_unlock_commit * must not be called on the discarded event. This function * will try to remove the event from the ring buffer completely * if another event has not been written after it. @@ -78,13 +77,13 @@ u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, * else * ring_buffer_unlock_commit(buffer, event); */ -void ring_buffer_discard_commit(struct trace_buffer *buffer, +void ring_buffer_discard_commit(struct ring_buffer *buffer, struct ring_buffer_event *event); /* * size is in bytes for each per CPU buffer. */ -struct trace_buffer * +struct ring_buffer * __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); /* @@ -98,103 +97,96 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k __ring_buffer_alloc((size), (flags), &__key); \ }) -int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full); -__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, +int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); +int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table); #define RING_BUFFER_ALL_CPUS -1 -void ring_buffer_free(struct trace_buffer *buffer); +void ring_buffer_free(struct ring_buffer *buffer); -int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu); +int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu); -void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val); +void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); -struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer, +struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length); -int ring_buffer_unlock_commit(struct trace_buffer *buffer, +int ring_buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event); -int ring_buffer_write(struct trace_buffer *buffer, +int ring_buffer_write(struct ring_buffer *buffer, unsigned long length, void *data); -void ring_buffer_nest_start(struct trace_buffer *buffer); -void ring_buffer_nest_end(struct trace_buffer *buffer); - struct ring_buffer_event * -ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, +ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events); struct ring_buffer_event * -ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, +ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events); struct ring_buffer_iter * -ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags); +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); void ring_buffer_read_prepare_sync(void); void ring_buffer_read_start(struct ring_buffer_iter *iter); void ring_buffer_read_finish(struct ring_buffer_iter *iter); struct ring_buffer_event * ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); -void ring_buffer_iter_advance(struct ring_buffer_iter *iter); +struct ring_buffer_event * +ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); void ring_buffer_iter_reset(struct ring_buffer_iter *iter); int ring_buffer_iter_empty(struct ring_buffer_iter *iter); -bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); -unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu); -void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); -void ring_buffer_reset_online_cpus(struct trace_buffer *buffer); -void ring_buffer_reset(struct trace_buffer *buffer); +void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); +void ring_buffer_reset(struct ring_buffer *buffer); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP -int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, - struct trace_buffer *buffer_b, int cpu); +int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, + struct ring_buffer *buffer_b, int cpu); #else static inline int -ring_buffer_swap_cpu(struct trace_buffer *buffer_a, - struct trace_buffer *buffer_b, int cpu) +ring_buffer_swap_cpu(struct ring_buffer *buffer_a, + struct ring_buffer *buffer_b, int cpu) { return -ENODEV; } #endif -bool ring_buffer_empty(struct trace_buffer *buffer); -bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu); +bool ring_buffer_empty(struct ring_buffer *buffer); +bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); -void ring_buffer_record_disable(struct trace_buffer *buffer); -void ring_buffer_record_enable(struct trace_buffer *buffer); -void ring_buffer_record_off(struct trace_buffer *buffer); -void ring_buffer_record_on(struct trace_buffer *buffer); -bool ring_buffer_record_is_on(struct trace_buffer *buffer); -bool ring_buffer_record_is_set_on(struct trace_buffer *buffer); -void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu); -void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu); +void ring_buffer_record_disable(struct ring_buffer *buffer); +void ring_buffer_record_enable(struct ring_buffer *buffer); +void ring_buffer_record_off(struct ring_buffer *buffer); +void ring_buffer_record_on(struct ring_buffer *buffer); +int ring_buffer_record_is_on(struct ring_buffer *buffer); +void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); +void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); -u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu); -unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu); -unsigned long ring_buffer_entries(struct trace_buffer *buffer); -unsigned long ring_buffer_overruns(struct trace_buffer *buffer); -unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu); -unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu); -unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu); -unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu); -unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu); +u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_entries(struct ring_buffer *buffer); +unsigned long ring_buffer_overruns(struct ring_buffer *buffer); +unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu); -u64 ring_buffer_time_stamp(struct trace_buffer *buffer); -void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, +u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); +void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, int cpu, u64 *ts); -void ring_buffer_set_clock(struct trace_buffer *buffer, +void ring_buffer_set_clock(struct ring_buffer *buffer, u64 (*clock)(void)); -void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs); -bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer); -size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu); -size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu); +size_t ring_buffer_page_len(void *page); -void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu); -void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data); -int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page, + +void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); +void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); +int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, size_t len, int cpu, int full); struct trace_seq; @@ -206,10 +198,4 @@ enum ring_buffer_flags { RB_FL_OVERWRITE = 1 << 0, }; -#ifdef CONFIG_RING_BUFFER -int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node); -#else -#define trace_rb_cpu_prepare NULL -#endif - #endif /* _LINUX_RING_BUFFER_H */ diff --git a/include/linux/rio.h b/include/linux/rio.h index 2cd637268b..2457ca9256 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -1,10 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RapidIO interconnect services * (RapidIO Interconnect Specification, http://www.rapidio.org) * * Copyright 2005 MontaVista Software, Inc. * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef LINUX_RIO_H @@ -100,7 +104,7 @@ struct rio_switch { u32 port_ok; struct rio_switch_ops *ops; spinlock_t lock; - struct rio_dev *nextdev[]; + struct rio_dev *nextdev[0]; }; /** @@ -201,7 +205,7 @@ struct rio_dev { u8 hopcount; struct rio_dev *prev; atomic_t state; - struct rio_switch rswitch[]; /* RIO switch info */ + struct rio_switch rswitch[0]; /* RIO switch info */ }; #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list) @@ -425,7 +429,7 @@ struct rio_ops { int (*map_outb)(struct rio_mport *mport, u16 destid, u64 rstart, u32 size, u32 flags, dma_addr_t *laddr); void (*unmap_outb)(struct rio_mport *mport, u16 destid, u64 rstart); -}; +} __no_const; #define RIO_RESOURCE_MEM 0x00000100 #define RIO_RESOURCE_DOORBELL 0x00000200 diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h index e49c32b0f3..0834264fb7 100644 --- a/include/linux/rio_drv.h +++ b/include/linux/rio_drv.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RapidIO driver services * * Copyright 2005 MontaVista Software, Inc. * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef LINUX_RIO_DRV_H @@ -444,6 +448,9 @@ static inline void rio_set_drvdata(struct rio_dev *rdev, void *data) /* Misc driver helpers */ extern u16 rio_local_get_device_id(struct rio_mport *port); extern void rio_local_set_device_id(struct rio_mport *port, u16 did); +extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from); +extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did, + struct rio_dev *from); extern int rio_init_mports(void); #endif /* LINUX_RIO_DRV_H */ diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h index 4846f72759..334c576c15 100644 --- a/include/linux/rio_ids.h +++ b/include/linux/rio_ids.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RapidIO devices * * Copyright 2005 MontaVista Software, Inc. * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef LINUX_RIO_IDS_H diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h index e975943254..40c04efe74 100644 --- a/include/linux/rio_regs.h +++ b/include/linux/rio_regs.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RapidIO register definitions * * Copyright 2005 MontaVista Software, Inc. * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef LINUX_RIO_REGS_H diff --git a/include/linux/rmap.h b/include/linux/rmap.h index c976cc6de2..f5a474863b 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RMAP_H #define _LINUX_RMAP_H /* @@ -10,7 +9,6 @@ #include #include #include -#include /* * The anon_vma heads a list of private "related" vmas, to scan if @@ -56,9 +54,7 @@ struct anon_vma { * is serialized by a system wide lock only visible to * mm_take_all_locks() (mm_all_locks_mutex). */ - - /* Interval tree of private "related" vmas */ - struct rb_root_cached rb_root; + struct rb_root rb_root; /* Interval tree of private "related" vmas */ }; /* @@ -77,7 +73,7 @@ struct anon_vma { struct anon_vma_chain { struct vm_area_struct *vma; struct anon_vma *anon_vma; - struct list_head same_vma; /* locked by mmap_lock & page_table_lock */ + struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ struct rb_node rb; /* locked by anon_vma->rwsem */ unsigned long rb_subtree_last; #ifdef CONFIG_DEBUG_VM_RB @@ -86,14 +82,19 @@ struct anon_vma_chain { }; enum ttu_flags { - TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ - TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ - TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ - TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */ - TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible + TTU_UNMAP = 1, /* unmap mode */ + TTU_MIGRATION = 2, /* migration mode */ + TTU_MUNLOCK = 4, /* munlock mode */ + TTU_LZFREE = 8, /* lazy free mode */ + TTU_SPLIT_HUGE_PMD = 16, /* split huge PMD if any */ + + TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ + TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ + TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ + TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible * and caller guarantees they will * do a final flush if necessary */ - TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock: + TTU_RMAP_LOCKED = (1 << 12) /* do not grab rmap lock: * caller holds it */ }; @@ -136,18 +137,10 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) * anon_vma helper functions. */ void anon_vma_init(void); /* create anon_vma_cachep */ -int __anon_vma_prepare(struct vm_area_struct *); +int anon_vma_prepare(struct vm_area_struct *); void unlink_anon_vmas(struct vm_area_struct *); -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); - -static inline int anon_vma_prepare(struct vm_area_struct *vma) -{ - if (likely(vma->anon_vma)) - return 0; - - return __anon_vma_prepare(vma); -} +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *); +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *); static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) @@ -191,38 +184,45 @@ static inline void page_dup_rmap(struct page *page, bool compound) int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); -void try_to_migrate(struct page *page, enum ttu_flags flags); -void try_to_unmap(struct page *, enum ttu_flags flags); +#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) -int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, - unsigned long end, struct page **pages, - void *arg); +int try_to_unmap(struct page *, enum ttu_flags flags); -/* Avoid racy checks */ -#define PVMW_SYNC (1 << 0) -/* Look for migarion entries rather than present PTEs */ -#define PVMW_MIGRATION (1 << 1) +/* + * Used by uprobes to replace a userspace page safely + */ +pte_t *__page_check_address(struct page *, struct mm_struct *, + unsigned long, spinlock_t **, int); -struct page_vma_mapped_walk { - struct page *page; - struct vm_area_struct *vma; - unsigned long address; - pmd_t *pmd; - pte_t *pte; - spinlock_t *ptl; - unsigned int flags; -}; - -static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) +static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, + unsigned long address, + spinlock_t **ptlp, int sync) { - /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ - if (pvmw->pte && !PageHuge(pvmw->page)) - pte_unmap(pvmw->pte); - if (pvmw->ptl) - spin_unlock(pvmw->ptl); + pte_t *ptep; + + __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, + ptlp, sync)); + return ptep; } -bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); +/* + * Used by idle page tracking to check if a page was referenced via page + * tables. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, + unsigned long address, pmd_t **pmdp, + pte_t **ptep, spinlock_t **ptlp); +#else +static inline bool page_check_address_transhuge(struct page *page, + struct mm_struct *mm, unsigned long address, + pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp) +{ + *ptep = page_check_address(page, mm, address, ptlp, 0); + *pmdp = NULL; + return !!*ptep; +} +#endif /* * Used by swapoff to help locate where page is expected in vma. @@ -241,7 +241,7 @@ int page_mkclean(struct page *); * called in munlock()/munmap() path to check for other vmas holding * the page mlocked. */ -void page_mlock(struct page *page); +int try_to_munlock(struct page *); void remove_migration_ptes(struct page *old, struct page *new, bool locked); @@ -263,19 +263,15 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); */ struct rmap_walk_control { void *arg; - /* - * Return false if page table scanning in rmap_walk should be stopped. - * Otherwise, return true. - */ - bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, + int (*rmap_one)(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg); int (*done)(struct page *page); struct anon_vma *(*anon_lock)(struct page *page); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; -void rmap_walk(struct page *page, struct rmap_walk_control *rwc); -void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); +int rmap_walk(struct page *page, struct rmap_walk_control *rwc); +int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ @@ -291,9 +287,7 @@ static inline int page_referenced(struct page *page, int is_locked, return 0; } -static inline void try_to_unmap(struct page *page, enum ttu_flags flags) -{ -} +#define try_to_unmap(page, refs) SWAP_FAIL static inline int page_mkclean(struct page *page) { @@ -303,4 +297,13 @@ static inline int page_mkclean(struct page *page) #endif /* CONFIG_MMU */ +/* + * Return values of try_to_unmap + */ +#define SWAP_SUCCESS 0 +#define SWAP_AGAIN 1 +#define SWAP_FAIL 2 +#define SWAP_MLOCK 3 +#define SWAP_LZFREE 4 + #endif /* _LINUX_RMAP_H */ diff --git a/include/linux/rmi.h b/include/linux/rmi.h index ab7eea01ab..e0aca14760 100644 --- a/include/linux/rmi.h +++ b/include/linux/rmi.h @@ -1,7 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2011-2016 Synaptics Incorporated * Copyright (c) 2011 Unixphere + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef _RMI_H @@ -10,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -97,37 +99,31 @@ struct rmi_2d_sensor_platform_data { bool topbuttonpad; bool kernel_tracking; int dmax; - int dribble; - int palm_detect; }; /** - * struct rmi_gpio_data - overrides defaults for a single F30/F3A GPIOs/LED - * chip. + * struct rmi_f30_data - overrides defaults for a single F30 GPIOs/LED chip. * @buttonpad - the touchpad is a buttonpad, so enable only the first actual * button that is found. - * @trackstick_buttons - Set when the function 30 or 3a is handling the physical - * buttons of the trackstick (as a PS/2 passthrough device). - * @disable - the touchpad incorrectly reports F30/F3A and it should be ignored. + * @trackstick_buttons - Set when the function 30 is handling the physical + * buttons of the trackstick (as a PD/2 passthrough device. + * @disable - the touchpad incorrectly reports F30 and it should be ignored. * This is a special case which is due to misconfigured firmware. */ -struct rmi_gpio_data { +struct rmi_f30_data { bool buttonpad; bool trackstick_buttons; bool disable; }; - -/* - * Set the state of a register - * DEFAULT - use the default value set by the firmware config - * OFF - explicitly disable the register - * ON - explicitly enable the register +/** + * struct rmi_f01_power - override default power management settings. + * */ -enum rmi_reg_state { - RMI_REG_STATE_DEFAULT = 0, - RMI_REG_STATE_OFF = 1, - RMI_REG_STATE_ON = 2 +enum rmi_f01_nosleep { + RMI_F01_NOSLEEP_DEFAULT = 0, + RMI_F01_NOSLEEP_OFF = 1, + RMI_F01_NOSLEEP_ON = 2 }; /** @@ -147,7 +143,7 @@ enum rmi_reg_state { * when the touch sensor is in doze mode, in units of 10ms. */ struct rmi_f01_power_management { - enum rmi_reg_state nosleep; + enum rmi_f01_nosleep nosleep; u8 wakeup_threshold; u8 doze_holdoff; u8 doze_interval; @@ -207,19 +203,17 @@ struct rmi_device_platform_data_spi { * * @reset_delay_ms - after issuing a reset command to the touch sensor, the * driver waits a few milliseconds to give the firmware a chance to - * re-initialize. You can override the default wait period here. - * @irq: irq associated with the attn gpio line, or negative + * to re-initialize. You can override the default wait period here. */ struct rmi_device_platform_data { int reset_delay_ms; - int irq; struct rmi_device_platform_data_spi spi_data; /* function handler pdata */ - struct rmi_2d_sensor_platform_data sensor_pdata; + struct rmi_2d_sensor_platform_data *sensor_pdata; struct rmi_f01_power_management power_management; - struct rmi_gpio_data gpio_data; + struct rmi_f30_data *f30_data; }; /** @@ -270,6 +264,9 @@ struct rmi_transport_dev { struct rmi_device_platform_data pdata; struct input_dev *input; + + void *attn_data; + int attn_size; }; /** @@ -327,24 +324,17 @@ struct rmi_device { }; -struct rmi4_attn_data { - unsigned long irq_status; - size_t size; - void *data; -}; - struct rmi_driver_data { struct list_head function_list; struct rmi_device *rmi_dev; struct rmi_function *f01_container; - struct rmi_function *f34_container; - bool bootloader_mode; + bool f01_bootloader_mode; + u32 attn_count; int num_of_irq_regs; int irq_count; - void *irq_memory; unsigned long *irq_status; unsigned long *fn_irq_bits; unsigned long *current_irq_mask; @@ -352,26 +342,18 @@ struct rmi_driver_data { struct mutex irq_mutex; struct input_dev *input; - struct irq_domain *irqdomain; - u8 pdt_props; - - u8 num_rx_electrodes; - u8 num_tx_electrodes; + u8 bsr; bool enabled; - struct mutex enabled_mutex; - struct rmi4_attn_data attn_data; - DECLARE_KFIFO(attn_fifo, struct rmi4_attn_data, 16); + void *data; }; int rmi_register_transport_device(struct rmi_transport_dev *xport); void rmi_unregister_transport_device(struct rmi_transport_dev *xport); +int rmi_process_interrupt_requests(struct rmi_device *rmi_dev); -void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status, - void *data, size_t size); - -int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake); -int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake); +int rmi_driver_suspend(struct rmi_device *rmi_dev); +int rmi_driver_resume(struct rmi_device *rmi_dev); #endif diff --git a/include/linux/rndis.h b/include/linux/rndis.h index 882587c2b1..93c0a64aef 100644 --- a/include/linux/rndis.h +++ b/include/linux/rndis.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Remote Network Driver Interface Specification (RNDIS) * definitions of the magic numbers used by this protocol diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h index 4e78651371..ed241aad7c 100644 --- a/include/linux/root_dev.h +++ b/include/linux/root_dev.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ROOT_DEV_H_ #define _ROOT_DEV_H_ @@ -8,7 +7,6 @@ enum { Root_NFS = MKDEV(UNNAMED_MAJOR, 255), - Root_CIFS = MKDEV(UNNAMED_MAJOR, 254), Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0), Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1), Root_FD0 = MKDEV(FLOPPY_MAJOR, 0), diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index d97dcd049f..452d393cc8 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -1,10 +1,35 @@ -/* SPDX-License-Identifier: BSD-3-Clause */ /* * Remote processor messaging * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2011 Google, Inc. * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Texas Instruments nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUX_RPMSG_H @@ -12,13 +37,11 @@ #include #include -#include #include #include #include -#include -#include -#include + +#define RPMSG_ADDR_ANY 0xFFFFFFFF struct rpmsg_device; struct rpmsg_endpoint; @@ -41,22 +64,18 @@ struct rpmsg_channel_info { * rpmsg_device - device that belong to the rpmsg bus * @dev: the device struct * @id: device id (used to match between rpmsg drivers and devices) - * @driver_override: driver name to force a match * @src: local address * @dst: destination address * @ept: the rpmsg endpoint of this channel * @announce: if set, rpmsg will announce the creation/removal of this channel - * @little_endian: True if transport is using little endian byte representation */ struct rpmsg_device { struct device dev; struct rpmsg_device_id id; - char *driver_override; u32 src; u32 dst; struct rpmsg_endpoint *ept; bool announce; - bool little_endian; const struct rpmsg_device_ops *ops; }; @@ -113,59 +132,8 @@ struct rpmsg_driver { int (*callback)(struct rpmsg_device *, void *, int, void *, u32); }; -static inline u16 rpmsg16_to_cpu(struct rpmsg_device *rpdev, __rpmsg16 val) -{ - if (!rpdev) - return __rpmsg16_to_cpu(rpmsg_is_little_endian(), val); - else - return __rpmsg16_to_cpu(rpdev->little_endian, val); -} - -static inline __rpmsg16 cpu_to_rpmsg16(struct rpmsg_device *rpdev, u16 val) -{ - if (!rpdev) - return __cpu_to_rpmsg16(rpmsg_is_little_endian(), val); - else - return __cpu_to_rpmsg16(rpdev->little_endian, val); -} - -static inline u32 rpmsg32_to_cpu(struct rpmsg_device *rpdev, __rpmsg32 val) -{ - if (!rpdev) - return __rpmsg32_to_cpu(rpmsg_is_little_endian(), val); - else - return __rpmsg32_to_cpu(rpdev->little_endian, val); -} - -static inline __rpmsg32 cpu_to_rpmsg32(struct rpmsg_device *rpdev, u32 val) -{ - if (!rpdev) - return __cpu_to_rpmsg32(rpmsg_is_little_endian(), val); - else - return __cpu_to_rpmsg32(rpdev->little_endian, val); -} - -static inline u64 rpmsg64_to_cpu(struct rpmsg_device *rpdev, __rpmsg64 val) -{ - if (!rpdev) - return __rpmsg64_to_cpu(rpmsg_is_little_endian(), val); - else - return __rpmsg64_to_cpu(rpdev->little_endian, val); -} - -static inline __rpmsg64 cpu_to_rpmsg64(struct rpmsg_device *rpdev, u64 val) -{ - if (!rpdev) - return __cpu_to_rpmsg64(rpmsg_is_little_endian(), val); - else - return __cpu_to_rpmsg64(rpdev->little_endian, val); -} - -#if IS_ENABLED(CONFIG_RPMSG) - -int rpmsg_register_device(struct rpmsg_device *rpdev); -int rpmsg_unregister_device(struct device *parent, - struct rpmsg_channel_info *chinfo); +int register_rpmsg_device(struct rpmsg_device *dev); +void unregister_rpmsg_device(struct rpmsg_device *dev); int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner); void unregister_rpmsg_driver(struct rpmsg_driver *drv); void rpmsg_destroy_ept(struct rpmsg_endpoint *); @@ -173,131 +141,6 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *, rpmsg_rx_cb_t cb, void *priv, struct rpmsg_channel_info chinfo); -int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); -int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); -int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len); - -int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); -int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); -int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len); - -__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, - poll_table *wait); - -#else - -static inline int rpmsg_register_device(struct rpmsg_device *rpdev) -{ - return -ENXIO; -} - -static inline int rpmsg_unregister_device(struct device *parent, - struct rpmsg_channel_info *chinfo) -{ - /* This shouldn't be possible */ - WARN_ON(1); - - return -ENXIO; -} - -static inline int __register_rpmsg_driver(struct rpmsg_driver *drv, - struct module *owner) -{ - /* This shouldn't be possible */ - WARN_ON(1); - - return -ENXIO; -} - -static inline void unregister_rpmsg_driver(struct rpmsg_driver *drv) -{ - /* This shouldn't be possible */ - WARN_ON(1); -} - -static inline void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) -{ - /* This shouldn't be possible */ - WARN_ON(1); -} - -static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev, - rpmsg_rx_cb_t cb, - void *priv, - struct rpmsg_channel_info chinfo) -{ - /* This shouldn't be possible */ - WARN_ON(1); - - return ERR_PTR(-ENXIO); -} - -static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) -{ - /* This shouldn't be possible */ - WARN_ON(1); - - return -ENXIO; -} - -static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, - u32 dst) -{ - /* This shouldn't be possible */ - WARN_ON(1); - - return -ENXIO; - -} - -static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, - u32 dst, void *data, int len) -{ - /* This shouldn't be possible */ - WARN_ON(1); - - return -ENXIO; -} - -static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) -{ - /* This shouldn't be possible */ - WARN_ON(1); - - return -ENXIO; -} - -static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, - int len, u32 dst) -{ - /* This shouldn't be possible */ - WARN_ON(1); - - return -ENXIO; -} - -static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, - u32 dst, void *data, int len) -{ - /* This shouldn't be possible */ - WARN_ON(1); - - return -ENXIO; -} - -static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, - struct file *filp, poll_table *wait) -{ - /* This shouldn't be possible */ - WARN_ON(1); - - return 0; -} - -#endif /* IS_ENABLED(CONFIG_RPMSG) */ - /* use a macro to avoid include chaining to get THIS_MODULE */ #define register_rpmsg_driver(drv) \ __register_rpmsg_driver(drv, THIS_MODULE) @@ -314,4 +157,14 @@ static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, module_driver(__rpmsg_driver, register_rpmsg_driver, \ unregister_rpmsg_driver) +int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); +int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, + void *data, int len); + +int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); +int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, + void *data, int len); + #endif /* _LINUX_RPMSG_H */ diff --git a/include/linux/rslib.h b/include/linux/rslib.h index 238bb85243..746580c193 100644 --- a/include/linux/rslib.h +++ b/include/linux/rslib.h @@ -1,21 +1,28 @@ -// SPDX-License-Identifier: GPL-2.0 /* - * Generic Reed Solomon encoder / decoder library + * include/linux/rslib.h + * + * Overview: + * Generic Reed Solomon encoder / decoder library * * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) * * RS code lifted from reed solomon library written by Phil Karn * Copyright 2002 Phil Karn, KA9Q + * + * $Id: rslib.h,v 1.4 2005/11/07 11:14:52 gleixner Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ + #ifndef _RSLIB_H_ #define _RSLIB_H_ #include -#include /* for gfp_t */ -#include /* for GFP_KERNEL */ /** - * struct rs_codec - rs codec data + * struct rs_control - rs control structure * * @mm: Bits per symbol * @nn: Symbols per block (= (1<= 3 * rs->nn */ -static inline int rs_modnn(struct rs_codec *rs, int x) +static inline int rs_modnn(struct rs_control *rs, int x) { while (x >= rs->nn) { x -= rs->nn; diff --git a/include/linux/rtc.h b/include/linux/rtc.h index bd611e2629..b693adac85 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Generic RTC interface. * This version contains the part of the user interface to the Real Time Clock @@ -15,7 +14,6 @@ #include #include -#include #include extern int rtc_month_days(unsigned int month, unsigned int year); @@ -34,6 +32,24 @@ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs) return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs); } +/** + * Deprecated. Use rtc_time64_to_tm(). + */ +static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) +{ + rtc_time64_to_tm(time, tm); +} + +/** + * Deprecated. Use rtc_tm_to_time64(). + */ +static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) +{ + *time = rtc_tm_to_time64(tm); + + return 0; +} + #include #include #include @@ -55,29 +71,43 @@ extern struct class *rtc_class; * * The (current) exceptions are mostly filesystem hooks: * - the proc() hook for procfs + * - non-ioctl() chardev hooks: open(), release(), read_callback() + * + * REVISIT those periodic irq calls *do* have ops_lock when they're + * issued through ioctl() ... */ struct rtc_class_ops { + int (*open)(struct device *); + void (*release)(struct device *); int (*ioctl)(struct device *, unsigned int, unsigned long); int (*read_time)(struct device *, struct rtc_time *); int (*set_time)(struct device *, struct rtc_time *); int (*read_alarm)(struct device *, struct rtc_wkalrm *); int (*set_alarm)(struct device *, struct rtc_wkalrm *); int (*proc)(struct device *, struct seq_file *); + int (*set_mmss64)(struct device *, time64_t secs); + int (*set_mmss)(struct device *, unsigned long secs); + int (*read_callback)(struct device *, int data); int (*alarm_irq_enable)(struct device *, unsigned int enabled); int (*read_offset)(struct device *, long *offset); int (*set_offset)(struct device *, long offset); }; -struct rtc_device; +#define RTC_DEVICE_NAME_SIZE 20 +typedef struct rtc_task { + void (*func)(void *private_data); + void *private_data; +} rtc_task_t; + struct rtc_timer { + struct rtc_task task; struct timerqueue_node node; ktime_t period; - void (*func)(struct rtc_device *rtc); - struct rtc_device *rtc; int enabled; }; + /* flags */ #define RTC_DEV_BUSY 0 @@ -86,6 +116,7 @@ struct rtc_device { struct module *owner; int id; + char name[RTC_DEVICE_NAME_SIZE]; const struct rtc_class_ops *ops; struct mutex ops_lock; @@ -98,6 +129,8 @@ struct rtc_device { wait_queue_head_t irq_queue; struct fasync_struct *async_queue; + struct rtc_task *irq_task; + spinlock_t irq_task_lock; int irq_freq; int max_user_freq; @@ -110,45 +143,6 @@ struct rtc_device { /* Some hardware can't support UIE mode */ int uie_unsupported; - /* - * This offset specifies the update timing of the RTC. - * - * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds - * - * The offset defines how tsched is computed so that the write to - * the RTC (t2.tv_sec - 1sec) is correct versus the time required - * for the transport of the write and the time which the RTC needs - * to increment seconds the first time after the write (t2). - * - * For direct accessible RTCs tsched ~= t1 because the write time - * is negligible. For RTCs behind slow busses the transport time is - * significant and has to be taken into account. - * - * The time between the write (t1) and the first increment after - * the write (t2) is RTC specific. For a MC146818 RTC it's 500ms, - * for many others it's exactly 1 second. Consult the datasheet. - * - * The value of this offset is also used to calculate the to be - * written value (t2.tv_sec - 1sec) at tsched. - * - * The default value for this is NSEC_PER_SEC + 10 msec default - * transport time. The offset can be adjusted by drivers so the - * calculation for the to be written value at tsched becomes - * correct: - * - * newval = tsched + set_offset_nsec - NSEC_PER_SEC - * and (tsched + set_offset_nsec) % NSEC_PER_SEC == 0 - */ - unsigned long set_offset_nsec; - - unsigned long features[BITS_TO_LONGS(RTC_FEATURE_CNT)]; - - time64_t range_min; - timeu64_t range_max; - time64_t start_secs; - time64_t offset_secs; - bool set_start_time; - #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL struct work_struct uie_task; struct timer_list uie_timer; @@ -162,28 +156,21 @@ struct rtc_device { }; #define to_rtc_device(d) container_of(d, struct rtc_device, dev) -#define rtc_lock(d) mutex_lock(&d->ops_lock) -#define rtc_unlock(d) mutex_unlock(&d->ops_lock) - -/* useful timestamps */ -#define RTC_TIMESTAMP_BEGIN_0000 -62167219200ULL /* 0000-01-01 00:00:00 */ -#define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */ -#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */ -#define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */ -#define RTC_TIMESTAMP_END_2079 3471292799LL /* 2079-12-31 23:59:59 */ -#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */ -#define RTC_TIMESTAMP_END_2199 7258118399LL /* 2199-12-31 23:59:59 */ -#define RTC_TIMESTAMP_END_9999 253402300799LL /* 9999-12-31 23:59:59 */ - +extern struct rtc_device *rtc_device_register(const char *name, + struct device *dev, + const struct rtc_class_ops *ops, + struct module *owner); extern struct rtc_device *devm_rtc_device_register(struct device *dev, const char *name, const struct rtc_class_ops *ops, struct module *owner); -struct rtc_device *devm_rtc_allocate_device(struct device *dev); -int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc); +extern void rtc_device_unregister(struct rtc_device *rtc); +extern void devm_rtc_device_unregister(struct device *dev, + struct rtc_device *rtc); extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); +extern int rtc_set_ntp_time(struct timespec64 now); int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); extern int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); @@ -197,20 +184,29 @@ extern void rtc_update_irq(struct rtc_device *rtc, extern struct rtc_device *rtc_class_open(const char *name); extern void rtc_class_close(struct rtc_device *rtc); -extern int rtc_irq_set_state(struct rtc_device *rtc, int enabled); -extern int rtc_irq_set_freq(struct rtc_device *rtc, int freq); +extern int rtc_irq_register(struct rtc_device *rtc, + struct rtc_task *task); +extern void rtc_irq_unregister(struct rtc_device *rtc, + struct rtc_task *task); +extern int rtc_irq_set_state(struct rtc_device *rtc, + struct rtc_task *task, int enabled); +extern int rtc_irq_set_freq(struct rtc_device *rtc, + struct rtc_task *task, int freq); extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled); void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); -void rtc_aie_update_irq(struct rtc_device *rtc); -void rtc_uie_update_irq(struct rtc_device *rtc); +void rtc_aie_update_irq(void *private); +void rtc_uie_update_irq(void *private); enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); -void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r), - struct rtc_device *rtc); +int rtc_register(rtc_task_t *task); +int rtc_unregister(rtc_task_t *task); +int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); + +void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data); int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, ktime_t expires, ktime_t period); void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer); @@ -223,40 +219,10 @@ static inline bool is_leap_year(unsigned int year) return (!(year % 4) && (year % 100)) || !(year % 400); } -#define devm_rtc_register_device(device) \ - __devm_rtc_register_device(THIS_MODULE, device) - #ifdef CONFIG_RTC_HCTOSYS_DEVICE extern int rtc_hctosys_ret; #else #define rtc_hctosys_ret -ENODEV #endif -#ifdef CONFIG_RTC_NVMEM -int devm_rtc_nvmem_register(struct rtc_device *rtc, - struct nvmem_config *nvmem_config); -#else -static inline int devm_rtc_nvmem_register(struct rtc_device *rtc, - struct nvmem_config *nvmem_config) -{ - return 0; -} -#endif - -#ifdef CONFIG_RTC_INTF_SYSFS -int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp); -int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps); -#else -static inline -int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp) -{ - return 0; -} - -static inline -int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps) -{ - return 0; -} -#endif #endif /* _LINUX_RTC_H_ */ diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h index 67ee9d20cc..e6337a56d7 100644 --- a/include/linux/rtc/ds1685.h +++ b/include/linux/rtc/ds1685.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Definitions for the registers, addresses, and platform data of the * DS1685/DS1687-series RTC chips. @@ -16,6 +15,10 @@ * DS17x85/DS17x87 3V/5V Real-Time Clocks, 19-5222, Rev 4/10. * DS1689/DS1693 3V/5V Serialized Real-Time Clocks, Rev 112105. * Application Note 90, Using the Multiplex Bus RTC Extended Features. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _LINUX_RTC_DS1685_H_ @@ -42,11 +45,16 @@ struct ds1685_priv { struct rtc_device *dev; void __iomem *regs; - void __iomem *data; u32 regstep; + resource_size_t baseaddr; + size_t size; + spinlock_t lock; + struct work_struct work; int irq_num; bool bcd_mode; bool no_irq; + bool uie_unsupported; + bool alloc_io_resources; u8 (*read)(struct ds1685_priv *, int); void (*write)(struct ds1685_priv *, int, u8); void (*prepare_poweroff)(void); @@ -71,13 +79,12 @@ struct ds1685_rtc_platform_data { const bool bcd_mode; const bool no_irq; const bool uie_unsupported; + const bool alloc_io_resources; + u8 (*plat_read)(struct ds1685_priv *, int); + void (*plat_write)(struct ds1685_priv *, int, u8); void (*plat_prepare_poweroff)(void); void (*plat_wake_alarm)(void); void (*plat_post_ram_clear)(void); - enum { - ds1685_reg_direct, - ds1685_reg_indirect - } access_type; }; diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h index 9465d5405f..6fc961459b 100644 --- a/include/linux/rtc/m48t59.h +++ b/include/linux/rtc/m48t59.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/rtc/m48t59.h * @@ -7,6 +6,10 @@ * Copyright (c) 2007 Wind River Systems, Inc. * * Mark Zhan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _LINUX_RTC_M48T59_H_ diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h index b31f285673..aefd997262 100644 --- a/include/linux/rtc/sirfsoc_rtciobrg.h +++ b/include/linux/rtc/sirfsoc_rtciobrg.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RTC I/O Bridge interfaces for CSR SiRFprimaII * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. */ #ifndef _SIRFSOC_RTC_IOBRG_H_ #define _SIRFSOC_RTC_IOBRG_H_ diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 9deedfeec2..1abba5ce2a 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * RT Mutexes: blocking mutual exclusion locks with PI support * @@ -13,51 +12,30 @@ #ifndef __LINUX_RT_MUTEX_H #define __LINUX_RT_MUTEX_H -#include #include -#include -#include +#include +#include extern int max_lock_depth; /* for sysctl */ -struct rt_mutex_base { - raw_spinlock_t wait_lock; - struct rb_root_cached waiters; - struct task_struct *owner; -}; - -#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \ -{ \ - .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \ - .waiters = RB_ROOT_CACHED, \ - .owner = NULL \ -} - -/** - * rt_mutex_base_is_locked - is the rtmutex locked - * @lock: the mutex to be queried - * - * Returns true if the mutex is locked, false if unlocked. - */ -static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock) -{ - return READ_ONCE(lock->owner) != NULL; -} - -extern void rt_mutex_base_init(struct rt_mutex_base *rtb); - /** * The rt_mutex structure * * @wait_lock: spinlock to protect the structure - * @waiters: rbtree root to enqueue waiters in priority order; - * caches top-waiter (leftmost node). + * @waiters: rbtree root to enqueue waiters in priority order + * @waiters_leftmost: top waiter * @owner: the mutex owner */ struct rt_mutex { - struct rt_mutex_base rtmutex; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; + raw_spinlock_t wait_lock; + struct rb_root waiters; + struct rb_node *waiters_leftmost; + struct task_struct *owner; +#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; + const char *name, *file; + int line; + void *magic; #endif }; @@ -65,47 +43,57 @@ struct rt_mutex_waiter; struct hrtimer_sleeper; #ifdef CONFIG_DEBUG_RT_MUTEXES -extern void rt_mutex_debug_task_free(struct task_struct *tsk); + extern int rt_mutex_debug_check_no_locks_freed(const void *from, + unsigned long len); + extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); #else -static inline void rt_mutex_debug_task_free(struct task_struct *tsk) { } + static inline int rt_mutex_debug_check_no_locks_freed(const void *from, + unsigned long len) + { + return 0; + } +# define rt_mutex_debug_check_no_locks_held(task) do { } while (0) #endif -#define rt_mutex_init(mutex) \ -do { \ - static struct lock_class_key __key; \ - __rt_mutex_init(mutex, __func__, &__key); \ -} while (0) - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ - .dep_map = { \ - .name = #mutexname, \ - .wait_type_inner = LD_WAIT_SLEEP, \ - } +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + , .name = #mutexname, .file = __FILE__, .line = __LINE__ +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) + extern void rt_mutex_debug_task_free(struct task_struct *tsk); #else -#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) +# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) +# define rt_mutex_debug_task_free(t) do { } while (0) #endif -#define __RT_MUTEX_INITIALIZER(mutexname) \ -{ \ - .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \ - __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ -} +#define __RT_MUTEX_INITIALIZER(mutexname) \ + { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .waiters = RB_ROOT \ + , .owner = NULL \ + __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) -extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); +/** + * rt_mutex_is_locked - is the mutex locked + * @lock: the mutex to be queried + * + * Returns 1 if the mutex is locked, 0 if unlocked. + */ +static inline int rt_mutex_is_locked(struct rt_mutex *lock) +{ + return lock->owner != NULL; +} + +extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); +extern void rt_mutex_destroy(struct rt_mutex *lock); -#ifdef CONFIG_DEBUG_LOCK_ALLOC -extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); -#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) -#else extern void rt_mutex_lock(struct rt_mutex *lock); -#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock) -#endif - extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); +extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout); + extern int rt_mutex_trylock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock); diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index bb9cb84114..57e54847b0 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_RTNETLINK_H #define __LINUX_RTNETLINK_H @@ -6,7 +5,6 @@ #include #include #include -#include #include extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); @@ -19,12 +17,8 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, long expires, u32 error); void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); -void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, - gfp_t flags, int *new_nsid, int new_ifindex); struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, - unsigned change, u32 event, - gfp_t flags, int *new_nsid, - int new_ifindex); + unsigned change, gfp_t flags); void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags); @@ -34,12 +28,9 @@ extern void rtnl_lock(void); extern void rtnl_unlock(void); extern int rtnl_trylock(void); extern int rtnl_is_locked(void); -extern int rtnl_lock_killable(void); -extern bool refcount_dec_and_rtnl_lock(refcount_t *r); extern wait_queue_head_t netdev_unregistering_wq; -extern struct rw_semaphore pernet_ops_rwsem; -extern struct rw_semaphore net_rwsem; +extern struct mutex net_mutex; #ifdef CONFIG_PROVE_LOCKING extern bool lockdep_rtnl_is_held(void); @@ -75,7 +66,8 @@ static inline bool lockdep_rtnl_is_held(void) * @p: The pointer to read, prior to dereferencing * * Return the value of the specified RCU-protected pointer, but omit - * the READ_ONCE(), because caller holds RTNL. + * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because + * caller holds RTNL. */ #define rtnl_dereference(p) \ rcu_dereference_protected(p, lockdep_rtnl_is_held()) @@ -85,11 +77,6 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) return rtnl_dereference(dev->ingress_queue); } -static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev) -{ - return rcu_dereference(dev->ingress_queue); -} - struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); #ifdef CONFIG_NET_INGRESS @@ -106,9 +93,13 @@ void rtnetlink_init(void); void __rtnl_unlock(void); void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); -#define ASSERT_RTNL() \ - WARN_ONCE(!rtnl_is_locked(), \ - "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) +#define ASSERT_RTNL() do { \ + if (unlikely(!rtnl_is_locked())) { \ + printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \ + __FILE__, __LINE__); \ + dump_stack(); \ + } \ +} while(0) extern int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index 7ce9a51ae5..bc2994ed66 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -38,15 +38,6 @@ do { \ extern int do_raw_write_trylock(rwlock_t *lock); extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); #else - -#ifndef arch_read_lock_flags -# define arch_read_lock_flags(lock, flags) arch_read_lock(lock) -#endif - -#ifndef arch_write_lock_flags -# define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#endif - # define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) # define do_raw_read_lock_flags(lock, flags) \ do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) @@ -59,6 +50,9 @@ do { \ # define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) #endif +#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) +#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) + /* * Define the various rw_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various @@ -128,11 +122,4 @@ do { \ 1 : ({ local_irq_restore(flags); 0; }); \ }) -#ifdef arch_rwlock_is_contended -#define rwlock_is_contended(lock) \ - arch_rwlock_is_contended(&(lock)->raw_lock) -#else -#define rwlock_is_contended(lock) ((void)(lock), 0) -#endif /* arch_rwlock_is_contended */ - #endif /* __LINUX_RWLOCK_H */ diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h index abfb53ab11..5b9b84b204 100644 --- a/include/linux/rwlock_api_smp.h +++ b/include/linux/rwlock_api_smp.h @@ -211,18 +211,18 @@ static inline void __raw_write_lock(rwlock_t *lock) LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); } -#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* CONFIG_PREEMPT */ static inline void __raw_write_unlock(rwlock_t *lock) { - rwlock_release(&lock->dep_map, _RET_IP_); + rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); preempt_enable(); } static inline void __raw_read_unlock(rwlock_t *lock) { - rwlock_release(&lock->dep_map, _RET_IP_); + rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); preempt_enable(); } @@ -230,7 +230,7 @@ static inline void __raw_read_unlock(rwlock_t *lock) static inline void __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { - rwlock_release(&lock->dep_map, _RET_IP_); + rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); local_irq_restore(flags); preempt_enable(); @@ -238,7 +238,7 @@ __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) static inline void __raw_read_unlock_irq(rwlock_t *lock) { - rwlock_release(&lock->dep_map, _RET_IP_); + rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); local_irq_enable(); preempt_enable(); @@ -246,7 +246,7 @@ static inline void __raw_read_unlock_irq(rwlock_t *lock) static inline void __raw_read_unlock_bh(rwlock_t *lock) { - rwlock_release(&lock->dep_map, _RET_IP_); + rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); } @@ -254,7 +254,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock) static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { - rwlock_release(&lock->dep_map, _RET_IP_); + rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); local_irq_restore(flags); preempt_enable(); @@ -262,7 +262,7 @@ static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, static inline void __raw_write_unlock_irq(rwlock_t *lock) { - rwlock_release(&lock->dep_map, _RET_IP_); + rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); local_irq_enable(); preempt_enable(); @@ -270,7 +270,7 @@ static inline void __raw_write_unlock_irq(rwlock_t *lock) static inline void __raw_write_unlock_bh(rwlock_t *lock) { - rwlock_release(&lock->dep_map, _RET_IP_); + rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); } diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index 1948442e77..cc0072e93e 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -1,29 +1,18 @@ #ifndef __LINUX_RWLOCK_TYPES_H #define __LINUX_RWLOCK_TYPES_H -#if !defined(__LINUX_SPINLOCK_TYPES_H) -# error "Do not include directly, include spinlock_types.h" -#endif - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RW_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_CONFIG, \ - } -#else -# define RW_DEP_MAP_INIT(lockname) -#endif - -#ifndef CONFIG_PREEMPT_RT /* - * generic rwlock type definitions and initializers + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers * * portions Copyright 2005, Red Hat, Inc., Ingo Molnar * Released under the General Public License (GPL). */ typedef struct { arch_rwlock_t raw_lock; +#ifdef CONFIG_GENERIC_LOCKBREAK + unsigned int break_lock; +#endif #ifdef CONFIG_DEBUG_SPINLOCK unsigned int magic, owner_cpu; void *owner; @@ -35,6 +24,12 @@ typedef struct { #define RWLOCK_MAGIC 0xdeaf1eed +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + #ifdef CONFIG_DEBUG_SPINLOCK #define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ @@ -50,29 +45,4 @@ typedef struct { #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) -#else /* !CONFIG_PREEMPT_RT */ - -#include - -typedef struct { - struct rwbase_rt rwbase; - atomic_t readers; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} rwlock_t; - -#define __RWLOCK_RT_INITIALIZER(name) \ -{ \ - .rwbase = __RWBASE_INITIALIZER(name), \ - RW_DEP_MAP_INIT(name) \ -} - -#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name) - -#define DEFINE_RWLOCK(name) \ - rwlock_t name = __RW_LOCK_UNLOCKED(name) - -#endif /* CONFIG_PREEMPT_RT */ - #endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h new file mode 100644 index 0000000000..ae0528b834 --- /dev/null +++ b/include/linux/rwsem-spinlock.h @@ -0,0 +1,45 @@ +/* rwsem-spinlock.h: fallback C implementation + * + * Copyright (c) 2001 David Howells (dhowells@redhat.com). + * - Derived partially from ideas by Andrea Arcangeli + * - Derived also from comments by Linus + */ + +#ifndef _LINUX_RWSEM_SPINLOCK_H +#define _LINUX_RWSEM_SPINLOCK_H + +#ifndef _LINUX_RWSEM_H +#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead" +#endif + +#ifdef __KERNEL__ +/* + * the rw-semaphore definition + * - if count is 0 then there are no active readers or writers + * - if count is +ve then that is the number of active readers + * - if count is -1 then there is one active writer + * - if wait_list is not empty, then there are processes waiting for the semaphore + */ +struct rw_semaphore { + __s32 count; + raw_spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define RWSEM_UNLOCKED_VALUE 0x00000000 + +extern void __down_read(struct rw_semaphore *sem); +extern int __down_read_trylock(struct rw_semaphore *sem); +extern void __down_write(struct rw_semaphore *sem); +extern int __must_check __down_write_killable(struct rw_semaphore *sem); +extern int __down_write_trylock(struct rw_semaphore *sem); +extern void __up_read(struct rw_semaphore *sem); +extern void __up_write(struct rw_semaphore *sem); +extern void __downgrade_write(struct rw_semaphore *sem); +extern int rwsem_is_locked(struct rw_semaphore *sem); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_RWSEM_SPINLOCK_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 352c6127cb..4d6d861ea0 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* rwsem.h: R/W semaphores, public interface * * Written by David Howells (dhowells@redhat.com). @@ -16,86 +15,77 @@ #include #include #include - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SLEEP, \ - }, -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - -#ifndef CONFIG_PREEMPT_RT - #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #include #endif -/* - * For an uncontended rwsem, count and owner are the only fields a task - * needs to touch when acquiring the rwsem. So they are put next to each - * other to increase the chance that they will share the same cacheline. - * - * In a contended rwsem, the owner is likely the most frequently accessed - * field in the structure as the optimistic waiter that holds the osq lock - * will spin on owner. For an embedded rwsem, other hot fields in the - * containing structure should be moved further away from the rwsem to - * reduce the chance that they will share the same cacheline causing - * cacheline bouncing problem. - */ +struct rw_semaphore; + +#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK +#include /* use a generic implementation */ +#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE +#else +/* All arch specific implementations share the same struct */ struct rw_semaphore { atomic_long_t count; - /* - * Write owner or one of the read owners as well flags regarding - * the current state of the rwsem. Can be used as a speculative - * check to see if the write owner is running on the cpu. - */ - atomic_long_t owner; + struct list_head wait_list; + raw_spinlock_t wait_lock; #ifdef CONFIG_RWSEM_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* spinner MCS lock */ -#endif - raw_spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_RWSEMS - void *magic; + /* + * Write owner. Used as a speculative check to see + * if the owner is running on the cpu. + */ + struct task_struct *owner; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif }; +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); + +extern struct rw_semaphore *call_rwsem_down_read_failed(struct rw_semaphore *sem) __rap_hash; +extern struct rw_semaphore *call_rwsem_down_write_failed(struct rw_semaphore *sem) __rap_hash; +extern struct rw_semaphore *call_rwsem_down_write_failed_killable(struct rw_semaphore *sem) __rap_hash; +extern struct rw_semaphore *call_rwsem_wake(struct rw_semaphore *) __rap_hash; +extern struct rw_semaphore *call_rwsem_downgrade_wake(struct rw_semaphore *sem) __rap_hash; + +/* Include the arch specific part */ +#include + /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { return atomic_long_read(&sem->count) != 0; } -#define RWSEM_UNLOCKED_VALUE 0L -#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) +#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) +#endif /* Common initializer macros and functions */ -#ifdef CONFIG_DEBUG_RWSEMS -# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname, +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else -# define __RWSEM_DEBUG_INIT(lockname) +# define __RWSEM_DEP_MAP_INIT(lockname) #endif #ifdef CONFIG_RWSEM_SPIN_ON_OWNER -#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED, +#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL #else #define __RWSEM_OPT_INIT(lockname) #endif #define __RWSEM_INITIALIZER(name) \ - { __RWSEM_COUNT_INIT(name), \ - .owner = ATOMIC_LONG_INIT(0), \ - __RWSEM_OPT_INIT(name) \ - .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\ + { __RWSEM_INIT_COUNT(name), \ .wait_list = LIST_HEAD_INIT((name).wait_list), \ - __RWSEM_DEBUG_INIT(name) \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ + __RWSEM_OPT_INIT(name) \ __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ @@ -113,7 +103,7 @@ do { \ /* * This is the same regardless of which rwsem implementation that is being used. - * It is just a heuristic meant to be called by somebody already holding the + * It is just a heuristic meant to be called by somebody alreadying holding the * rwsem to see if somebody from an incompatible type is wanting access to the * lock. */ @@ -122,59 +112,10 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) return !list_empty(&sem->wait_list); } -#else /* !CONFIG_PREEMPT_RT */ - -#include - -struct rw_semaphore { - struct rwbase_rt rwbase; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#define __RWSEM_INITIALIZER(name) \ - { \ - .rwbase = __RWBASE_INITIALIZER(name), \ - __RWSEM_DEP_MAP_INIT(name) \ - } - -#define DECLARE_RWSEM(lockname) \ - struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) - -extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name, - struct lock_class_key *key); - -#define init_rwsem(sem) \ -do { \ - static struct lock_class_key __key; \ - \ - __init_rwsem((sem), #sem, &__key); \ -} while (0) - -static __always_inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return rw_base_is_locked(&sem->rwbase); -} - -static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) -{ - return rw_base_is_contended(&sem->rwbase); -} - -#endif /* CONFIG_PREEMPT_RT */ - -/* - * The functions below are the same for all rwsem implementations including - * the RT specific variant. - */ - /* * lock for reading */ extern void down_read(struct rw_semaphore *sem); -extern int __must_check down_read_interruptible(struct rw_semaphore *sem); -extern int __must_check down_read_killable(struct rw_semaphore *sem); /* * trylock for reading -- returns 1 if successful, 0 if contention @@ -219,10 +160,9 @@ extern void downgrade_write(struct rw_semaphore *sem); * static then another method for expressing nested locking is * the explicit definition of lock class keys and the use of * lockdep_set_class() at lock initialization time. - * See Documentation/locking/lockdep-design.rst for more details.) + * See Documentation/locking/lockdep-design.txt for more details.) */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); -extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); @@ -243,7 +183,6 @@ extern void down_read_non_owner(struct rw_semaphore *sem); extern void up_read_non_owner(struct rw_semaphore *sem); #else # define down_read_nested(sem, subclass) down_read(sem) -# define down_read_killable_nested(sem, subclass) down_read_killable(sem) # define down_write_nest_lock(sem, nest_lock) down_write(sem) # define down_write_nested(sem, subclass) down_write(sem) # define down_write_killable_nested(sem, subclass) down_write_killable(sem) diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h new file mode 100644 index 0000000000..c68307bc30 --- /dev/null +++ b/include/linux/rxrpc.h @@ -0,0 +1,72 @@ +/* AF_RXRPC parameters + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_RXRPC_H +#define _LINUX_RXRPC_H + +#include +#include + +/* + * RxRPC socket address + */ +struct sockaddr_rxrpc { + sa_family_t srx_family; /* address family */ + u16 srx_service; /* service desired */ + u16 transport_type; /* type of transport socket (SOCK_DGRAM) */ + u16 transport_len; /* length of transport address */ + union { + sa_family_t family; /* transport address family */ + struct sockaddr_in sin; /* IPv4 transport address */ + struct sockaddr_in6 sin6; /* IPv6 transport address */ + } transport; +}; + +/* + * RxRPC socket options + */ +#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */ +#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ +#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */ +#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ + +/* + * RxRPC control messages + * - If neither abort or accept are specified, the message is a data message. + * - terminal messages mean that a user call ID tag can be recycled + * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg() + */ +#define RXRPC_USER_CALL_ID 1 /* sr: user call ID specifier */ +#define RXRPC_ABORT 2 /* sr: abort request / notification [terminal] */ +#define RXRPC_ACK 3 /* -r: [Service] RPC op final ACK received [terminal] */ +#define RXRPC_NET_ERROR 5 /* -r: network error received [terminal] */ +#define RXRPC_BUSY 6 /* -r: server busy received [terminal] */ +#define RXRPC_LOCAL_ERROR 7 /* -r: local error generated [terminal] */ +#define RXRPC_NEW_CALL 8 /* -r: [Service] new incoming call notification */ +#define RXRPC_ACCEPT 9 /* s-: [Service] accept request */ +#define RXRPC_EXCLUSIVE_CALL 10 /* s-: Call should be on exclusive connection */ + +/* + * RxRPC security levels + */ +#define RXRPC_SECURITY_PLAIN 0 /* plain secure-checksummed packets only */ +#define RXRPC_SECURITY_AUTH 1 /* authenticated packets */ +#define RXRPC_SECURITY_ENCRYPT 2 /* encrypted packets */ + +/* + * RxRPC security indices + */ +#define RXRPC_SECURITY_NONE 0 /* no security protocol */ +#define RXRPC_SECURITY_RXKAD 2 /* kaserver or kerberos 4 */ +#define RXRPC_SECURITY_RXGK 4 /* gssapi-based */ +#define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */ + +#endif /* _LINUX_RXRPC_H */ diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h index 57f982c375..99dadbffdd 100644 --- a/include/linux/s3c_adc_battery.h +++ b/include/linux/s3c_adc_battery.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _S3C_ADC_BATTERY_H #define _S3C_ADC_BATTERY_H @@ -14,6 +13,9 @@ struct s3c_adc_bat_pdata { void (*enable_charger)(void); void (*disable_charger)(void); + int gpio_charge_finished; + int gpio_inverted; + const struct s3c_adc_bat_thresh *lut_noac; unsigned int lut_noac_cnt; const struct s3c_adc_bat_thresh *lut_acin; diff --git a/include/linux/sa11x0-dma.h b/include/linux/sa11x0-dma.h new file mode 100644 index 0000000000..65839a58b8 --- /dev/null +++ b/include/linux/sa11x0-dma.h @@ -0,0 +1,24 @@ +/* + * SA11x0 DMA Engine support + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_SA11X0_DMA_H +#define __LINUX_SA11X0_DMA_H + +struct dma_chan; + +#if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE) +bool sa11x0_dma_filter_fn(struct dma_chan *, void *); +#else +static inline bool sa11x0_dma_filter_fn(struct dma_chan *c, void *d) +{ + return false; +} +#endif + +#endif diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 2713e689ad..f017fd6e69 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -1,9 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Fast and scalable bitmaps. * * Copyright (C) 2016 Facebook * Copyright (C) 2013-2014 Jens Axboe + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #ifndef __LINUX_SCALE_BITMAP_H @@ -12,26 +23,19 @@ #include #include -struct seq_file; - /** * struct sbitmap_word - Word in a &struct sbitmap. */ struct sbitmap_word { /** - * @depth: Number of bits being used in @word/@cleared + * @word: The bitmap word itself. + */ + unsigned long word; + + /** + * @depth: Number of bits being used in @word. */ unsigned long depth; - - /** - * @word: word holding free bits - */ - unsigned long word ____cacheline_aligned_in_smp; - - /** - * @cleared: word holding cleared bits - */ - unsigned long cleared ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp; /** @@ -56,23 +60,10 @@ struct sbitmap { */ unsigned int map_nr; - /** - * @round_robin: Allocate bits in strict round-robin order. - */ - bool round_robin; - /** * @map: Allocated bitmap. */ struct sbitmap_word *map; - - /* - * @alloc_hint: Cache of last successfully allocated or freed bit. - * - * This is per-cpu, which allows multiple users to stick to different - * cachelines until the map is exhausted. - */ - unsigned int __percpu *alloc_hint; }; #define SBQ_WAIT_QUEUES 8 @@ -108,6 +99,14 @@ struct sbitmap_queue { */ struct sbitmap sb; + /* + * @alloc_hint: Cache of last successfully allocated or freed bit. + * + * This is per-cpu, which allows multiple users to stick to different + * cachelines until the map is exhausted. + */ + unsigned int __percpu *alloc_hint; + /** * @wake_batch: Number of bits which must be freed before we wake up any * waiters. @@ -124,16 +123,10 @@ struct sbitmap_queue { */ struct sbq_wait_state *ws; - /* - * @ws_active: count of currently active ws waitqueues - */ - atomic_t ws_active; - /** - * @min_shallow_depth: The minimum shallow depth which may be passed to - * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). + * @round_robin: Allocate bits in strict round-robin order. */ - unsigned int min_shallow_depth; + bool round_robin; }; /** @@ -144,16 +137,11 @@ struct sbitmap_queue { * given, a good default is chosen. * @flags: Allocation flags. * @node: Memory node to allocate on. - * @round_robin: If true, be stricter about allocation order; always allocate - * starting from the last allocated bit. This is less efficient - * than the default behavior (false). - * @alloc_hint: If true, apply percpu hint for where to start searching for - * a free bit. * * Return: Zero on success or negative errno on failure. */ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, - gfp_t flags, int node, bool round_robin, bool alloc_hint); + gfp_t flags, int node); /** * sbitmap_free() - Free memory used by a &struct sbitmap. @@ -161,7 +149,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, */ static inline void sbitmap_free(struct sbitmap *sb) { - free_percpu(sb->alloc_hint); kfree(sb->map); sb->map = NULL; } @@ -179,29 +166,14 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth); /** * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap. * @sb: Bitmap to allocate from. - * - * This operation provides acquire barrier semantics if it succeeds. + * @alloc_hint: Hint for where to start searching for a free bit. + * @round_robin: If true, be stricter about allocation order; always allocate + * starting from the last allocated bit. This is less efficient + * than the default behavior (false). * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ -int sbitmap_get(struct sbitmap *sb); - -/** - * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, - * limiting the depth used from each word. - * @sb: Bitmap to allocate from. - * @shallow_depth: The maximum number of bits to allocate from a single word. - * - * This rather specific operation allows for having multiple users with - * different allocation limits. E.g., there can be a high-priority class that - * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow() - * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority - * class can only allocate half of the total bits in the bitmap, preventing it - * from starving out the high-priority class. - * - * Return: Non-negative allocated bit number if successful, -1 otherwise. - */ -int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth); +int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin); /** * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap. @@ -211,14 +183,19 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth); */ bool sbitmap_any_bit_set(const struct sbitmap *sb); -#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift) -#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U)) +/** + * sbitmap_any_bit_clear() - Check for an unset bit in a &struct + * sbitmap. + * @sb: Bitmap to check. + * + * Return: true if any bit in the bitmap is clear, false otherwise. + */ +bool sbitmap_any_bit_clear(const struct sbitmap *sb); typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); /** - * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. - * @start: Where to start the iteration. + * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. * @sb: Bitmap to iterate over. * @fn: Callback. Should return true to continue or false to break early. * @data: Pointer to pass to callback. @@ -226,63 +203,35 @@ typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); * This is inline even though it's non-trivial so that the function calls to the * callback will hopefully get optimized away. */ -static inline void __sbitmap_for_each_set(struct sbitmap *sb, - unsigned int start, - sb_for_each_fn fn, void *data) +static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn, + void *data) { - unsigned int index; - unsigned int nr; - unsigned int scanned = 0; + unsigned int i; - if (start >= sb->depth) - start = 0; - index = SB_NR_TO_INDEX(sb, start); - nr = SB_NR_TO_BIT(sb, start); + for (i = 0; i < sb->map_nr; i++) { + struct sbitmap_word *word = &sb->map[i]; + unsigned int off, nr; - while (scanned < sb->depth) { - unsigned long word; - unsigned int depth = min_t(unsigned int, - sb->map[index].depth - nr, - sb->depth - scanned); + if (!word->word) + continue; - scanned += depth; - word = sb->map[index].word & ~sb->map[index].cleared; - if (!word) - goto next; - - /* - * On the first iteration of the outer loop, we need to add the - * bit offset back to the size of the word for find_next_bit(). - * On all other iterations, nr is zero, so this is a noop. - */ - depth += nr; + nr = 0; + off = i << sb->shift; while (1) { - nr = find_next_bit(&word, depth, nr); - if (nr >= depth) + nr = find_next_bit(&word->word, word->depth, nr); + if (nr >= word->depth) break; - if (!fn(sb, (index << sb->shift) + nr, data)) + + if (!fn(sb, off + nr, data)) return; nr++; } -next: - nr = 0; - if (++index >= sb->map_nr) - index = 0; } } -/** - * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. - * @sb: Bitmap to iterate over. - * @fn: Callback. Should return true to continue or false to break early. - * @data: Pointer to pass to callback. - */ -static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn, - void *data) -{ - __sbitmap_for_each_set(sb, 0, fn, data); -} +#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift) +#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U)) static inline unsigned long *__sbitmap_word(struct sbitmap *sb, unsigned int bitnr) @@ -302,84 +251,13 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } -/* - * This one is special, since it doesn't actually clear the bit, rather it - * sets the corresponding bit in the ->cleared mask instead. Paired with - * the caller doing sbitmap_deferred_clear() if a given index is full, which - * will clear the previously freed entries in the corresponding ->word. - */ -static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) -{ - unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; - - set_bit(SB_NR_TO_BIT(sb, bitnr), addr); -} - -/* - * Pair of sbitmap_get, and this one applies both cleared bit and - * allocation hint. - */ -static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr) -{ - sbitmap_deferred_clear_bit(sb, bitnr); - - if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth)) - *raw_cpu_ptr(sb->alloc_hint) = bitnr; -} - static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) { return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } -static inline int sbitmap_calculate_shift(unsigned int depth) -{ - int shift = ilog2(BITS_PER_LONG); - - /* - * If the bitmap is small, shrink the number of bits per word so - * we spread over a few cachelines, at least. If less than 4 - * bits, just forget about it, it's not going to work optimally - * anyway. - */ - if (depth >= 4) { - while ((4U << shift) > depth) - shift--; - } - - return shift; -} - -/** - * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. - * @sb: Bitmap to show. - * @m: struct seq_file to write to. - * - * This is intended for debugging. The format may change at any time. - */ -void sbitmap_show(struct sbitmap *sb, struct seq_file *m); - - -/** - * sbitmap_weight() - Return how many set and not cleared bits in a &struct - * sbitmap. - * @sb: Bitmap to check. - * - * Return: How many set and not cleared bits set - */ unsigned int sbitmap_weight(const struct sbitmap *sb); -/** - * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct - * seq_file. - * @sb: Bitmap to show. - * @m: struct seq_file to write to. - * - * This is intended for debugging. The output isn't guaranteed to be internally - * consistent. - */ -void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m); - /** * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific * memory node. @@ -403,6 +281,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, static inline void sbitmap_queue_free(struct sbitmap_queue *sbq) { kfree(sbq->ws); + free_percpu(sbq->alloc_hint); sbitmap_free(&sbq->sb); } @@ -426,22 +305,6 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth); */ int __sbitmap_queue_get(struct sbitmap_queue *sbq); -/** - * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct - * sbitmap_queue, limiting the depth used from each word, with preemption - * already disabled. - * @sbq: Bitmap queue to allocate from. - * @shallow_depth: The maximum number of bits to allocate from a single word. - * See sbitmap_get_shallow(). - * - * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after - * initializing @sbq. - * - * Return: Non-negative allocated bit number if successful, -1 otherwise. - */ -int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, - unsigned int shallow_depth); - /** * sbitmap_queue_get() - Try to allocate a free bit from a &struct * sbitmap_queue. @@ -462,49 +325,6 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, return nr; } -/** - * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct - * sbitmap_queue, limiting the depth used from each word. - * @sbq: Bitmap queue to allocate from. - * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to - * sbitmap_queue_clear()). - * @shallow_depth: The maximum number of bits to allocate from a single word. - * See sbitmap_get_shallow(). - * - * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after - * initializing @sbq. - * - * Return: Non-negative allocated bit number if successful, -1 otherwise. - */ -static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, - unsigned int *cpu, - unsigned int shallow_depth) -{ - int nr; - - *cpu = get_cpu(); - nr = __sbitmap_queue_get_shallow(sbq, shallow_depth); - put_cpu(); - return nr; -} - -/** - * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the - * minimum shallow depth that will be used. - * @sbq: Bitmap queue in question. - * @min_shallow_depth: The minimum shallow depth that will be passed to - * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). - * - * sbitmap_queue_clear() batches wakeups as an optimization. The batch size - * depends on the depth of the bitmap. Since the shallow allocation functions - * effectively operate with a different depth, the shallow depth must be taken - * into account when calculating the batch size. This function must be called - * with the minimum shallow depth that will be used. Failure to do so can result - * in missed wakeups. - */ -void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, - unsigned int min_shallow_depth); - /** * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a * &struct sbitmap_queue. @@ -550,62 +370,4 @@ static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq, */ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); -/** - * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue - * on a &struct sbitmap_queue. - * @sbq: Bitmap queue to wake up. - */ -void sbitmap_queue_wake_up(struct sbitmap_queue *sbq); - -/** - * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct - * seq_file. - * @sbq: Bitmap queue to show. - * @m: struct seq_file to write to. - * - * This is intended for debugging. The format may change at any time. - */ -void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); - -struct sbq_wait { - struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */ - struct wait_queue_entry wait; -}; - -#define DEFINE_SBQ_WAIT(name) \ - struct sbq_wait name = { \ - .sbq = NULL, \ - .wait = { \ - .private = current, \ - .func = autoremove_wake_function, \ - .entry = LIST_HEAD_INIT((name).wait.entry), \ - } \ - } - -/* - * Wrapper around prepare_to_wait_exclusive(), which maintains some extra - * internal state. - */ -void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, - struct sbq_wait_state *ws, - struct sbq_wait *sbq_wait, int state); - -/* - * Must be paired with sbitmap_prepare_to_wait(). - */ -void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, - struct sbq_wait *sbq_wait); - -/* - * Wrapper around add_wait_queue(), which maintains some extra internal state - */ -void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, - struct sbq_wait_state *ws, - struct sbq_wait *sbq_wait); - -/* - * Must be paired with sbitmap_add_wait_queue() - */ -void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait); - #endif /* __LINUX_SCALE_BITMAP_H */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 266754a553..a001ef9073 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -1,7 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCATTERLIST_H #define _LINUX_SCATTERLIST_H +#include #include #include #include @@ -9,6 +9,9 @@ #include struct scatterlist { +#ifdef CONFIG_DEBUG_SG + unsigned long sg_magic; +#endif unsigned long page_link; unsigned int offset; unsigned int length; @@ -39,12 +42,6 @@ struct sg_table { unsigned int orig_nents; /* original size of list */ }; -struct sg_append_table { - struct sg_table sgt; /* The scatter list table */ - struct scatterlist *prv; /* last populated sge in the table */ - unsigned int total_nents; /* Total entries in the table */ -}; - /* * Notes on SG table design. * @@ -61,18 +58,17 @@ struct sg_append_table { * */ -#define SG_CHAIN 0x01UL -#define SG_END 0x02UL +#define SG_MAGIC 0x87654321 /* * We overload the LSB of the page pointer to indicate whether it's * a valid sg entry, or whether it points to the start of a new scatterlist. * Those low bits are there for everyone! (thanks mason :-) */ -#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN) -#define sg_is_last(sg) ((sg)->page_link & SG_END) +#define sg_is_chain(sg) ((sg)->page_link & 0x01) +#define sg_is_last(sg) ((sg)->page_link & 0x02) #define sg_chain_ptr(sg) \ - ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END))) + ((struct scatterlist *) ((sg)->page_link & ~0x03)) /** * sg_assign_page - Assign a given page to an SG entry @@ -86,14 +82,15 @@ struct sg_append_table { **/ static inline void sg_assign_page(struct scatterlist *sg, struct page *page) { - unsigned long page_link = sg->page_link & (SG_CHAIN | SG_END); + unsigned long page_link = sg->page_link & 0x3; /* * In order for the low bit stealing approach to work, pages * must be aligned at a 32-bit boundary as a minimum. */ - BUG_ON((unsigned long) page & (SG_CHAIN | SG_END)); + BUG_ON((unsigned long) page & 0x03); #ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif sg->page_link = page_link | (unsigned long) page; @@ -124,9 +121,10 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page, static inline struct page *sg_page(struct scatterlist *sg) { #ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif - return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END)); + return (struct page *)((sg)->page_link & ~0x3); } /** @@ -139,10 +137,12 @@ static inline struct page *sg_page(struct scatterlist *sg) static inline void sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen) { + const void *realbuf = gr_convert_stack_address_to_lowmem(buf); + #ifdef CONFIG_DEBUG_SG - BUG_ON(!virt_addr_valid(buf)); + BUG_ON(!virt_addr_valid(realbuf)); #endif - sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); + sg_set_page(sg, virt_to_page(realbuf), buflen, offset_in_page(realbuf)); } /* @@ -151,36 +151,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, #define for_each_sg(sglist, sg, nr, __i) \ for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) -/* - * Loop over each sg element in the given sg_table object. - */ -#define for_each_sgtable_sg(sgt, sg, i) \ - for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i) - -/* - * Loop over each sg element in the given *DMA mapped* sg_table object. - * Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses - * of the each element. - */ -#define for_each_sgtable_dma_sg(sgt, sg, i) \ - for_each_sg((sgt)->sgl, sg, (sgt)->nents, i) - -static inline void __sg_chain(struct scatterlist *chain_sg, - struct scatterlist *sgl) -{ - /* - * offset and length are unused for chain entry. Clear them. - */ - chain_sg->offset = 0; - chain_sg->length = 0; - - /* - * Set lowest bit to indicate a link pointer, and make sure to clear - * the termination bit if it happens to be set. - */ - chain_sg->page_link = ((unsigned long) sgl | SG_CHAIN) & ~SG_END; -} - /** * sg_chain - Chain two sglists together * @prv: First scatterlist @@ -194,7 +164,17 @@ static inline void __sg_chain(struct scatterlist *chain_sg, static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, struct scatterlist *sgl) { - __sg_chain(&prv[prv_nents - 1], sgl); + /* + * offset and length are unused for chain entry. Clear them. + */ + prv[prv_nents - 1].offset = 0; + prv[prv_nents - 1].length = 0; + + /* + * Set lowest bit to indicate a link pointer, and make sure to clear + * the termination bit if it happens to be set. + */ + prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02; } /** @@ -208,11 +188,14 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, **/ static inline void sg_mark_end(struct scatterlist *sg) { +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif /* * Set termination bit, clear potential chain bit */ - sg->page_link |= SG_END; - sg->page_link &= ~SG_CHAIN; + sg->page_link |= 0x02; + sg->page_link &= ~0x01; } /** @@ -225,7 +208,10 @@ static inline void sg_mark_end(struct scatterlist *sg) **/ static inline void sg_unmark_end(struct scatterlist *sg) { - sg->page_link &= ~SG_END; +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif + sg->page_link &= ~0x02; } /** @@ -258,18 +244,6 @@ static inline void *sg_virt(struct scatterlist *sg) return page_address(sg_page(sg)) + sg->offset; } -/** - * sg_init_marker - Initialize markers in sg table - * @sgl: The SG table - * @nents: Number of entries in table - * - **/ -static inline void sg_init_marker(struct scatterlist *sgl, - unsigned int nents) -{ - sg_mark_end(&sgl[nents - 1]); -} - int sg_nents(struct scatterlist *sg); int sg_nents_for_len(struct scatterlist *sg, u64 len); struct scatterlist *sg_next(struct scatterlist *); @@ -285,63 +259,15 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents, typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); typedef void (sg_free_fn)(struct scatterlist *, unsigned int); -void __sg_free_table(struct sg_table *, unsigned int, unsigned int, - sg_free_fn *, unsigned int); +void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *); void sg_free_table(struct sg_table *); -void sg_free_append_table(struct sg_append_table *sgt); int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, - struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); + struct scatterlist *, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); -int sg_alloc_append_table_from_pages(struct sg_append_table *sgt, - struct page **pages, unsigned int n_pages, - unsigned int offset, unsigned long size, - unsigned int max_segment, - unsigned int left_pages, gfp_t gfp_mask); -int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, - unsigned int n_pages, unsigned int offset, - unsigned long size, - unsigned int max_segment, gfp_t gfp_mask); - -/** - * sg_alloc_table_from_pages - Allocate and initialize an sg table from - * an array of pages - * @sgt: The sg table header to use - * @pages: Pointer to an array of page pointers - * @n_pages: Number of pages in the pages array - * @offset: Offset from start of the first page to the start of a buffer - * @size: Number of valid bytes in the buffer (after offset) - * @gfp_mask: GFP allocation mask - * - * Description: - * Allocate and initialize an sg table from a list of pages. Contiguous - * ranges of the pages are squashed into a single scatterlist node. A user - * may provide an offset at a start and a size of valid data in a buffer - * specified by the page array. The returned sg table is released by - * sg_free_table. - * - * Returns: - * 0 on success, negative error on failure - */ -static inline int sg_alloc_table_from_pages(struct sg_table *sgt, - struct page **pages, - unsigned int n_pages, - unsigned int offset, - unsigned long size, gfp_t gfp_mask) -{ - return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset, - size, UINT_MAX, gfp_mask); -} - -#ifdef CONFIG_SGL_ALLOC -struct scatterlist *sgl_alloc_order(unsigned long long length, - unsigned int order, bool chainable, - gfp_t gfp, unsigned int *nent_p); -struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp, - unsigned int *nent_p); -void sgl_free_n_order(struct scatterlist *sgl, int nents, int order); -void sgl_free_order(struct scatterlist *sgl, int order); -void sgl_free(struct scatterlist *sgl); -#endif /* CONFIG_SGL_ALLOC */ +int sg_alloc_table_from_pages(struct sg_table *sgt, + struct page **pages, unsigned int n_pages, + unsigned long offset, unsigned long size, + gfp_t gfp_mask); size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, off_t skip, bool to_buffer); @@ -355,8 +281,6 @@ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, const void *buf, size_t buflen, off_t skip); size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, off_t skip); -size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, - size_t buflen, off_t skip); /* * Maximum number of entries that will be allocated in one piece, if @@ -377,29 +301,27 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. */ -#ifdef CONFIG_ARCH_NO_SG_CHAIN -#define SG_MAX_SEGMENTS SG_CHUNK_SIZE -#else +#ifdef CONFIG_ARCH_HAS_SG_CHAIN #define SG_MAX_SEGMENTS 2048 +#else +#define SG_MAX_SEGMENTS SG_CHUNK_SIZE #endif #ifdef CONFIG_SG_POOL -void sg_free_table_chained(struct sg_table *table, - unsigned nents_first_chunk); +void sg_free_table_chained(struct sg_table *table, bool first_chunk); int sg_alloc_table_chained(struct sg_table *table, int nents, - struct scatterlist *first_chunk, - unsigned nents_first_chunk); + struct scatterlist *first_chunk); #endif /* * sg page iterator * - * Iterates over sg entries page-by-page. On each successful iteration, you - * can call sg_page_iter_page(@piter) to get the current page. - * @piter->sg will point to the sg holding this page and @piter->sg_pgoffset to - * the page's page offset within the sg. The iteration will stop either when a - * maximum number of sg entries was reached or a terminating sg - * (sg_last(sg) == true) was reached. + * Iterates over sg entries page-by-page. On each successful iteration, + * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter) + * to get the current page and its dma address. @piter->sg will point to the + * sg holding this page and @piter->sg_pgoffset to the page's page offset + * within the sg. The iteration will stop either when a maximum number of sg + * entries was reached or a terminating sg (sg_last(sg) == true) was reached. */ struct sg_page_iter { struct scatterlist *sg; /* sg holding the page */ @@ -411,19 +333,7 @@ struct sg_page_iter { * next step */ }; -/* - * sg page iterator for DMA addresses - * - * This is the same as sg_page_iter however you can call - * sg_page_iter_dma_address(@dma_iter) to get the page's DMA - * address. sg_page_iter_page() cannot be called on this iterator. - */ -struct sg_dma_page_iter { - struct sg_page_iter base; -}; - bool __sg_page_iter_next(struct sg_page_iter *piter); -bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter); void __sg_page_iter_start(struct sg_page_iter *piter, struct scatterlist *sglist, unsigned int nents, unsigned long pgoffset); @@ -439,13 +349,11 @@ static inline struct page *sg_page_iter_page(struct sg_page_iter *piter) /** * sg_page_iter_dma_address - get the dma address of the current page held by * the page iterator. - * @dma_iter: page iterator holding the page + * @piter: page iterator holding the page */ -static inline dma_addr_t -sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) +static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter) { - return sg_dma_address(dma_iter->base.sg) + - (dma_iter->base.sg_pgoffset << PAGE_SHIFT); + return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT); } /** @@ -453,66 +361,19 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) * @sglist: sglist to iterate over * @piter: page iterator to hold current page, sg, sg_pgoffset * @nents: maximum number of sg entries to iterate over - * @pgoffset: starting page offset (in pages) - * - * Callers may use sg_page_iter_page() to get each page pointer. - * In each loop it operates on PAGE_SIZE unit. + * @pgoffset: starting page offset */ #define for_each_sg_page(sglist, piter, nents, pgoffset) \ for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \ __sg_page_iter_next(piter);) -/** - * for_each_sg_dma_page - iterate over the pages of the given sg list - * @sglist: sglist to iterate over - * @dma_iter: DMA page iterator to hold current page - * @dma_nents: maximum number of sg entries to iterate over, this is the value - * returned from dma_map_sg - * @pgoffset: starting page offset (in pages) - * - * Callers may use sg_page_iter_dma_address() to get each page's DMA address. - * In each loop it operates on PAGE_SIZE unit. - */ -#define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \ - for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \ - pgoffset); \ - __sg_page_iter_dma_next(dma_iter);) - -/** - * for_each_sgtable_page - iterate over all pages in the sg_table object - * @sgt: sg_table object to iterate over - * @piter: page iterator to hold current page - * @pgoffset: starting page offset (in pages) - * - * Iterates over the all memory pages in the buffer described by - * a scatterlist stored in the given sg_table object. - * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit. - */ -#define for_each_sgtable_page(sgt, piter, pgoffset) \ - for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset) - -/** - * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object - * @sgt: sg_table object to iterate over - * @dma_iter: DMA page iterator to hold current page - * @pgoffset: starting page offset (in pages) - * - * Iterates over the all DMA mapped pages in the buffer described by - * a scatterlist stored in the given sg_table object. - * See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE - * unit. - */ -#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \ - for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset) - - /* * Mapping sg iterator * * Iterates over sg entries mapping page-by-page. On each successful * iteration, @miter->page points to the mapped page and * @miter->length bytes of data can be accessed at @miter->addr. As - * long as an iteration is enclosed between start and stop, the user + * long as an interation is enclosed between start and stop, the user * is free to choose control structure and when to stop. * * @miter->consumed is set to @miter->length on each iteration. It diff --git a/include/linux/scc.h b/include/linux/scc.h index 745eabd17c..c5a0049626 100644 --- a/include/linux/scc.h +++ b/include/linux/scc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* $Id: scc.h,v 1.29 1997/04/02 14:56:45 jreuter Exp jreuter $ */ #ifndef _SCC_H #define _SCC_H diff --git a/include/linux/sched.h b/include/linux/sched.h index c1a927ddec..2b11430f1d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1,72 +1,199 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_H #define _LINUX_SCHED_H -/* - * Define 'struct task_struct' and provide the main scheduler - * APIs (schedule(), wakeup variants, etc.) - */ - #include -#include +#include -#include + +struct sched_param { + unsigned int sched_priority; +}; + +#include /* for HZ */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include #include #include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include #include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ + +/* + * Extended scheduling parameters data structure. + * + * This is needed because the original struct sched_param can not be + * altered without introducing ABI issues with legacy applications + * (e.g., in sched_getparam()). + * + * However, the possibility of specifying more than just a priority for + * the tasks may be useful for a wide variety of application fields, e.g., + * multimedia, streaming, automation and control, and many others. + * + * This variant (sched_attr) is meant at describing a so-called + * sporadic time-constrained task. In such model a task is specified by: + * - the activation period or minimum instance inter-arrival time; + * - the maximum (or average, depending on the actual scheduling + * discipline) computation time of all instances, a.k.a. runtime; + * - the deadline (relative to the actual activation time) of each + * instance. + * Very briefly, a periodic (sporadic) task asks for the execution of + * some specific computation --which is typically called an instance-- + * (at most) every period. Moreover, each instance typically lasts no more + * than the runtime and must be completed by time instant t equal to + * the instance activation time + the deadline. + * + * This is reflected by the actual fields of the sched_attr structure: + * + * @size size of the structure, for fwd/bwd compat. + * + * @sched_policy task's scheduling policy + * @sched_flags for customizing the scheduler behaviour + * @sched_nice task's nice value (SCHED_NORMAL/BATCH) + * @sched_priority task's static priority (SCHED_FIFO/RR) + * @sched_deadline representative of the task's deadline + * @sched_runtime representative of the task's runtime + * @sched_period representative of the task's period + * + * Given this task model, there are a multiplicity of scheduling algorithms + * and policies, that can be used to ensure all the tasks will make their + * timing constraints. + * + * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the + * only user of this new interface. More information about the algorithm + * available in the scheduling class file or in Documentation/. + */ +struct sched_attr { + u32 size; + + u32 sched_policy; + u64 sched_flags; + + /* SCHED_NORMAL, SCHED_BATCH */ + s32 sched_nice; + + /* SCHED_FIFO, SCHED_RR */ + u32 sched_priority; + + /* SCHED_DEADLINE */ + u64 sched_runtime; + u64 sched_deadline; + u64 sched_period; +}; -/* task_struct member predeclarations (sorted alphabetically): */ -struct audit_context; -struct backing_dev_info; -struct bio_list; -struct blk_plug; -struct bpf_local_storage; -struct bpf_run_ctx; -struct capture_control; -struct cfs_rq; -struct fs_struct; struct futex_pi_state; -struct io_context; -struct io_uring_task; -struct mempolicy; -struct nameidata; -struct nsproxy; -struct perf_event_context; -struct pid_namespace; -struct pipe_inode_info; -struct rcu_node; -struct reclaim_state; struct robust_list_head; -struct root_domain; -struct rq; -struct sched_attr; -struct sched_param; +struct bio_list; +struct fs_struct; +struct perf_event_context; +struct blk_plug; +struct filename; +struct nameidata; +struct linux_binprm; + +#define VMACACHE_BITS 2 +#define VMACACHE_SIZE (1U << VMACACHE_BITS) +#define VMACACHE_MASK (VMACACHE_SIZE - 1) + +/* + * These are the constant used to fake the fixed-point load-average + * counting. Some notes: + * - 11 bit fractions expand to 22 bits by the multiplies: this gives + * a load-average precision of 10 bits integer + 11 bits fractional + * - if you want to count load-averages more often, you need more + * precision, or rounding will get you. With 2-second counting freq, + * the EXP_n values would be 1981, 2034 and 2043 if still using only + * 11 bit fractions. + */ +extern unsigned long avenrun[]; /* Load averages */ +extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); + +#define FSHIFT 11 /* nr of bits of precision */ +#define FIXED_1 (1<>= FSHIFT; + +extern unsigned long total_forks; +extern int nr_threads; +DECLARE_PER_CPU(unsigned long, process_counts); +extern int nr_processes(void); +extern unsigned long nr_running(void); +extern bool single_task_running(void); +extern unsigned long nr_iowait(void); +extern unsigned long nr_iowait_cpu(int cpu); +extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); + +extern void calc_global_load(unsigned long ticks); + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void cpu_load_update_nohz_start(void); +extern void cpu_load_update_nohz_stop(void); +#else +static inline void cpu_load_update_nohz_start(void) { } +static inline void cpu_load_update_nohz_stop(void) { } +#endif + +extern void dump_cpu_task(int cpu); + struct seq_file; -struct sighand_struct; -struct signal_struct; -struct task_delay_info; +struct cfs_rq; struct task_group; +#ifdef CONFIG_SCHED_DEBUG +extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); +extern void proc_sched_set_task(struct task_struct *p); +#endif /* * Task state bitmask. NOTE! These bits are also @@ -78,228 +205,368 @@ struct task_group; * modifying one set can't modify the other one by * mistake. */ +#define TASK_RUNNING 0 +#define TASK_INTERRUPTIBLE 1 +#define TASK_UNINTERRUPTIBLE 2 +#define __TASK_STOPPED 4 +#define __TASK_TRACED 8 +/* in tsk->exit_state */ +#define EXIT_DEAD 16 +#define EXIT_ZOMBIE 32 +#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) +/* in tsk->state again */ +#define TASK_DEAD 64 +#define TASK_WAKEKILL 128 +#define TASK_WAKING 256 +#define TASK_PARKED 512 +#define TASK_NOLOAD 1024 +#define TASK_NEW 2048 +#define TASK_STATE_MAX 4096 -/* Used in tsk->state: */ -#define TASK_RUNNING 0x0000 -#define TASK_INTERRUPTIBLE 0x0001 -#define TASK_UNINTERRUPTIBLE 0x0002 -#define __TASK_STOPPED 0x0004 -#define __TASK_TRACED 0x0008 -/* Used in tsk->exit_state: */ -#define EXIT_DEAD 0x0010 -#define EXIT_ZOMBIE 0x0020 -#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) -/* Used in tsk->state again: */ -#define TASK_PARKED 0x0040 -#define TASK_DEAD 0x0080 -#define TASK_WAKEKILL 0x0100 -#define TASK_WAKING 0x0200 -#define TASK_NOLOAD 0x0400 -#define TASK_NEW 0x0800 -/* RT specific auxilliary flag to mark RT lock waiters */ -#define TASK_RTLOCK_WAIT 0x1000 -#define TASK_STATE_MAX 0x2000 +#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" -/* Convenience macros for the sake of set_current_state: */ -#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) -#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) -#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) +extern char ___assert_task_state[1 - 2*!!( + sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; -#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) +/* Convenience macros for the sake of set_task_state */ +#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) +#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) +#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) -/* Convenience macros for the sake of wake_up(): */ -#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) +#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) -/* get_task_state(): */ -#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ - TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ - __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ - TASK_PARKED) +/* Convenience macros for the sake of wake_up */ +#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) +#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) -#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING) +/* get_task_state() */ +#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) -#define task_is_traced(task) ((READ_ONCE(task->__state) & __TASK_TRACED) != 0) - -#define task_is_stopped(task) ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0) - -#define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0) - -/* - * Special states are those that do not use the normal wait-loop pattern. See - * the comment with set_special_state(). - */ -#define is_special_task_state(state) \ - ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) +#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) +#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) +#define task_is_stopped_or_traced(task) \ + ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) +#define task_contributes_to_load(task) \ + ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0 && \ + (task->state & TASK_NOLOAD) == 0) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP -# define debug_normal_state_change(state_value) \ - do { \ - WARN_ON_ONCE(is_special_task_state(state_value)); \ - current->task_state_change = _THIS_IP_; \ - } while (0) -# define debug_special_state_change(state_value) \ - do { \ - WARN_ON_ONCE(!is_special_task_state(state_value)); \ - current->task_state_change = _THIS_IP_; \ +#define __set_task_state(tsk, state_value) \ + do { \ + (tsk)->task_state_change = _THIS_IP_; \ + (tsk)->state = (state_value); \ } while (0) - -# define debug_rtlock_wait_set_state() \ - do { \ - current->saved_state_change = current->task_state_change;\ - current->task_state_change = _THIS_IP_; \ +#define set_task_state(tsk, state_value) \ + do { \ + (tsk)->task_state_change = _THIS_IP_; \ + smp_store_mb((tsk)->state, (state_value)); \ } while (0) -# define debug_rtlock_wait_restore_state() \ - do { \ - current->task_state_change = current->saved_state_change;\ - } while (0) - -#else -# define debug_normal_state_change(cond) do { } while (0) -# define debug_special_state_change(cond) do { } while (0) -# define debug_rtlock_wait_set_state() do { } while (0) -# define debug_rtlock_wait_restore_state() do { } while (0) -#endif - /* * set_current_state() includes a barrier so that the write of current->state * is correctly serialised wrt the caller's subsequent test of whether to * actually sleep: * - * for (;;) { * set_current_state(TASK_UNINTERRUPTIBLE); - * if (CONDITION) - * break; + * if (do_i_need_to_sleep()) + * schedule(); * - * schedule(); - * } - * __set_current_state(TASK_RUNNING); - * - * If the caller does not need such serialisation (because, for instance, the - * CONDITION test and condition change and wakeup are under the same lock) then - * use __set_current_state(). - * - * The above is typically ordered against the wakeup, which does: - * - * CONDITION = 1; - * wake_up_state(p, TASK_UNINTERRUPTIBLE); - * - * where wake_up_state()/try_to_wake_up() executes a full memory barrier before - * accessing p->state. - * - * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, - * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a - * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). - * - * However, with slightly different timing the wakeup TASK_RUNNING store can - * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not - * a problem either because that will result in one extra go around the loop - * and our @cond test will save the day. - * - * Also see the comments of try_to_wake_up(). + * If the caller does not need such serialisation then use __set_current_state() */ -#define __set_current_state(state_value) \ - do { \ - debug_normal_state_change((state_value)); \ - WRITE_ONCE(current->__state, (state_value)); \ +#define __set_current_state(state_value) \ + do { \ + current->task_state_change = _THIS_IP_; \ + current->state = (state_value); \ + } while (0) +#define set_current_state(state_value) \ + do { \ + current->task_state_change = _THIS_IP_; \ + smp_store_mb(current->state, (state_value)); \ } while (0) -#define set_current_state(state_value) \ - do { \ - debug_normal_state_change((state_value)); \ - smp_store_mb(current->__state, (state_value)); \ - } while (0) +#else + +#define __set_task_state(tsk, state_value) \ + do { (tsk)->state = (state_value); } while (0) +#define set_task_state(tsk, state_value) \ + smp_store_mb((tsk)->state, (state_value)) /* - * set_special_state() should be used for those states when the blocking task - * can not use the regular condition based wait-loop. In that case we must - * serialize against wakeups such that any possible in-flight TASK_RUNNING - * stores will not collide with our state change. + * set_current_state() includes a barrier so that the write of current->state + * is correctly serialised wrt the caller's subsequent test of whether to + * actually sleep: + * + * set_current_state(TASK_UNINTERRUPTIBLE); + * if (do_i_need_to_sleep()) + * schedule(); + * + * If the caller does not need such serialisation then use __set_current_state() */ -#define set_special_state(state_value) \ - do { \ - unsigned long flags; /* may shadow */ \ - \ - raw_spin_lock_irqsave(¤t->pi_lock, flags); \ - debug_special_state_change((state_value)); \ - WRITE_ONCE(current->__state, (state_value)); \ - raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ - } while (0) +#define __set_current_state(state_value) \ + do { current->state = (state_value); } while (0) +#define set_current_state(state_value) \ + smp_store_mb(current->state, (state_value)) -/* - * PREEMPT_RT specific variants for "sleeping" spin/rwlocks - * - * RT's spin/rwlock substitutions are state preserving. The state of the - * task when blocking on the lock is saved in task_struct::saved_state and - * restored after the lock has been acquired. These operations are - * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT - * lock related wakeups while the task is blocked on the lock are - * redirected to operate on task_struct::saved_state to ensure that these - * are not dropped. On restore task_struct::saved_state is set to - * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail. - * - * The lock operation looks like this: - * - * current_save_and_set_rtlock_wait_state(); - * for (;;) { - * if (try_lock()) - * break; - * raw_spin_unlock_irq(&lock->wait_lock); - * schedule_rtlock(); - * raw_spin_lock_irq(&lock->wait_lock); - * set_current_state(TASK_RTLOCK_WAIT); - * } - * current_restore_rtlock_saved_state(); - */ -#define current_save_and_set_rtlock_wait_state() \ - do { \ - lockdep_assert_irqs_disabled(); \ - raw_spin_lock(¤t->pi_lock); \ - current->saved_state = current->__state; \ - debug_rtlock_wait_set_state(); \ - WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \ - raw_spin_unlock(¤t->pi_lock); \ - } while (0); - -#define current_restore_rtlock_saved_state() \ - do { \ - lockdep_assert_irqs_disabled(); \ - raw_spin_lock(¤t->pi_lock); \ - debug_rtlock_wait_restore_state(); \ - WRITE_ONCE(current->__state, current->saved_state); \ - current->saved_state = TASK_RUNNING; \ - raw_spin_unlock(¤t->pi_lock); \ - } while (0); - -#define get_current_state() READ_ONCE(current->__state) - -/* Task command name length: */ -#define TASK_COMM_LEN 16 - -extern void scheduler_tick(void); - -#define MAX_SCHEDULE_TIMEOUT LONG_MAX - -extern long schedule_timeout(long timeout); -extern long schedule_timeout_interruptible(long timeout); -extern long schedule_timeout_killable(long timeout); -extern long schedule_timeout_uninterruptible(long timeout); -extern long schedule_timeout_idle(long timeout); -asmlinkage void schedule(void); -extern void schedule_preempt_disabled(void); -asmlinkage void preempt_schedule_irq(void); -#ifdef CONFIG_PREEMPT_RT - extern void schedule_rtlock(void); #endif -extern int __must_check io_schedule_prepare(void); -extern void io_schedule_finish(int token); +/* Task command name length */ +#define TASK_COMM_LEN 16 + +#include + +/* + * This serializes "schedule()" and also protects + * the run-queue from deletions/modifications (but + * _adding_ to the beginning of the run-queue has + * a separate lock). + */ +extern rwlock_t tasklist_lock; +extern spinlock_t mmlist_lock; + +struct task_struct; + +#ifdef CONFIG_PROVE_RCU +extern int lockdep_tasklist_lock_is_held(void); +#endif /* #ifdef CONFIG_PROVE_RCU */ + +extern void sched_init(void); +extern void sched_init_smp(void); +extern asmlinkage void schedule_tail(struct task_struct *prev); +extern void init_idle(struct task_struct *idle, int cpu); +extern void init_idle_bootup_task(struct task_struct *idle); + +extern cpumask_var_t cpu_isolated_map; + +extern int runqueue_is_locked(int cpu); + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void nohz_balance_enter_idle(int cpu); +extern void set_cpu_sd_state_idle(void); +extern int get_nohz_timer_target(void); +#else +static inline void nohz_balance_enter_idle(int cpu) { } +static inline void set_cpu_sd_state_idle(void) { } +#endif + +/* + * Only dump TASK_* tasks. (0 for all tasks) + */ +extern void show_state_filter(unsigned long state_filter); + +static inline void show_state(void) +{ + show_state_filter(0); +} + +extern void show_regs(struct pt_regs *); + +/* + * TASK is a pointer to the task whose backtrace we want to see (or NULL for current + * task), SP is the stack pointer of the first frame that should be shown in the back + * trace (or NULL if the entire call-chain of the task should be shown). + */ +extern void show_stack(struct task_struct *task, unsigned long *sp); + +extern void cpu_init (void); +extern void trap_init(void); +extern void update_process_times(int user); +extern void scheduler_tick(void); +extern int sched_cpu_starting(unsigned int cpu); +extern int sched_cpu_activate(unsigned int cpu); +extern int sched_cpu_deactivate(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU +extern int sched_cpu_dying(unsigned int cpu); +#else +# define sched_cpu_dying NULL +#endif + +extern void sched_show_task(struct task_struct *p); + +#ifdef CONFIG_LOCKUP_DETECTOR +extern void touch_softlockup_watchdog_sched(void); +extern void touch_softlockup_watchdog(void); +extern void touch_softlockup_watchdog_sync(void); +extern void touch_all_softlockup_watchdogs(void); +extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); +extern unsigned int softlockup_panic; +extern unsigned int hardlockup_panic; +void lockup_detector_init(void); +#else +static inline void touch_softlockup_watchdog_sched(void) +{ +} +static inline void touch_softlockup_watchdog(void) +{ +} +static inline void touch_softlockup_watchdog_sync(void) +{ +} +static inline void touch_all_softlockup_watchdogs(void) +{ +} +static inline void lockup_detector_init(void) +{ +} +#endif + +#ifdef CONFIG_DETECT_HUNG_TASK +void reset_hung_task_detector(void); +#else +static inline void reset_hung_task_detector(void) +{ +} +#endif + +/* Attach to any functions which should be ignored in wchan output. */ +#define __sched __attribute__((__section__(".sched.text"))) + +/* Linker adds these: start and end of __sched functions */ +extern char __sched_text_start[], __sched_text_end[]; + +/* Is this address in the __sched functions? */ +extern int in_sched_functions(unsigned long addr); + +#define MAX_SCHEDULE_TIMEOUT LONG_MAX +extern signed long schedule_timeout(signed long timeout); +extern signed long schedule_timeout_interruptible(signed long timeout); +extern signed long schedule_timeout_killable(signed long timeout); +extern signed long schedule_timeout_uninterruptible(signed long timeout); +extern signed long schedule_timeout_idle(signed long timeout); +asmlinkage void schedule(void); +extern void schedule_preempt_disabled(void); + extern long io_schedule_timeout(long timeout); -extern void io_schedule(void); + +static inline void io_schedule(void) +{ + io_schedule_timeout(MAX_SCHEDULE_TIMEOUT); +} + +void __noreturn do_task_dead(void); + +struct nsproxy; +struct user_namespace; + +#ifdef CONFIG_MMU + +#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK +extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags); +#else +static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags) +{ + return 0; +} +#endif + +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset); + +extern void arch_pick_mmap_layout(struct mm_struct *mm); +extern unsigned long +arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +extern unsigned long +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#else +static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} +#endif + +#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ +#define SUID_DUMP_USER 1 /* Dump as user of process */ +#define SUID_DUMP_ROOT 2 /* Dump as root */ + +/* mm flags */ + +/* for SUID_DUMP_* above */ +#define MMF_DUMPABLE_BITS 2 +#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) + +extern void set_dumpable(struct mm_struct *mm, int value); +/* + * This returns the actual value of the suid_dumpable flag. For things + * that are using this for checking for privilege transitions, it must + * test against SUID_DUMP_USER rather than treating it as a boolean + * value. + */ +static inline int __get_dumpable(unsigned long mm_flags) +{ + return mm_flags & MMF_DUMPABLE_MASK; +} + +static inline int get_dumpable(struct mm_struct *mm) +{ + return __get_dumpable(mm->flags); +} + +/* coredump filter bits */ +#define MMF_DUMP_ANON_PRIVATE 2 +#define MMF_DUMP_ANON_SHARED 3 +#define MMF_DUMP_MAPPED_PRIVATE 4 +#define MMF_DUMP_MAPPED_SHARED 5 +#define MMF_DUMP_ELF_HEADERS 6 +#define MMF_DUMP_HUGETLB_PRIVATE 7 +#define MMF_DUMP_HUGETLB_SHARED 8 +#define MMF_DUMP_DAX_PRIVATE 9 +#define MMF_DUMP_DAX_SHARED 10 + +#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS +#define MMF_DUMP_FILTER_BITS 9 +#define MMF_DUMP_FILTER_MASK \ + (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) +#define MMF_DUMP_FILTER_DEFAULT \ + ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ + (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) + +#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS +# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) +#else +# define MMF_DUMP_MASK_DEFAULT_ELF 0 +#endif + /* leave room for more dump flags */ +#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ +#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ +#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ + +#define MMF_HAS_UPROBES 19 /* has uprobes */ +#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ +#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ +#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ +#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) + +struct sighand_struct { + atomic_t count; + struct k_sigaction action[_NSIG]; + spinlock_t siglock; + wait_queue_head_t signalfd_wqh; +}; + +struct pacct_struct { + int ac_flag; + long ac_exitcode; + unsigned long ac_mem; + cputime_t ac_utime, ac_stime; + unsigned long ac_minflt, ac_majflt; +}; + +struct cpu_itimer { + cputime_t expires; + cputime_t incr; + u32 error; + u32 incr_error; +}; /** - * struct prev_cputime - snapshot of system and user cputime + * struct prev_cputime - snaphsot of system and user cputime * @utime: time spent in user mode * @stime: time spent in system mode * @lock: protects the above two fields @@ -309,71 +576,416 @@ extern void io_schedule(void); */ struct prev_cputime { #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - u64 utime; - u64 stime; - raw_spinlock_t lock; + cputime_t utime; + cputime_t stime; + raw_spinlock_t lock; #endif }; -enum vtime_state { - /* Task is sleeping or running in a CPU with VTIME inactive: */ - VTIME_INACTIVE = 0, - /* Task is idle */ - VTIME_IDLE, - /* Task runs in kernelspace in a CPU with VTIME active: */ - VTIME_SYS, - /* Task runs in userspace in a CPU with VTIME active: */ - VTIME_USER, - /* Task runs as guests in a CPU with VTIME active: */ - VTIME_GUEST, +static inline void prev_cputime_init(struct prev_cputime *prev) +{ +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + prev->utime = prev->stime = 0; + raw_spin_lock_init(&prev->lock); +#endif +} + +/** + * struct task_cputime - collected CPU time counts + * @utime: time spent in user mode, in &cputime_t units + * @stime: time spent in kernel mode, in &cputime_t units + * @sum_exec_runtime: total time spent on the CPU, in nanoseconds + * + * This structure groups together three kinds of CPU time that are tracked for + * threads and thread groups. Most things considering CPU time want to group + * these counts together and treat all three of them in parallel. + */ +struct task_cputime { + cputime_t utime; + cputime_t stime; + unsigned long long sum_exec_runtime; }; -struct vtime { - seqcount_t seqcount; - unsigned long long starttime; - enum vtime_state state; - unsigned int cpu; - u64 utime; - u64 stime; - u64 gtime; -}; +/* Alternate field names when used to cache expirations. */ +#define virt_exp utime +#define prof_exp stime +#define sched_exp sum_exec_runtime + +#define INIT_CPUTIME \ + (struct task_cputime) { \ + .utime = 0, \ + .stime = 0, \ + .sum_exec_runtime = 0, \ + } /* - * Utilization clamp constraints. - * @UCLAMP_MIN: Minimum utilization - * @UCLAMP_MAX: Maximum utilization - * @UCLAMP_CNT: Utilization clamp constraints count + * This is the atomic variant of task_cputime, which can be used for + * storing and updating task_cputime statistics without locking. */ -enum uclamp_id { - UCLAMP_MIN = 0, - UCLAMP_MAX, - UCLAMP_CNT +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; }; -#ifdef CONFIG_SMP -extern struct root_domain def_root_domain; -extern struct mutex sched_domains_mutex; +#define INIT_CPUTIME_ATOMIC \ + (struct task_cputime_atomic) { \ + .utime = ATOMIC64_INIT(0), \ + .stime = ATOMIC64_INIT(0), \ + .sum_exec_runtime = ATOMIC64_INIT(0), \ + } + +#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + +/* + * Disable preemption until the scheduler is running -- use an unconditional + * value so that it also works on !PREEMPT_COUNT kernels. + * + * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). + */ +#define INIT_PREEMPT_COUNT PREEMPT_OFFSET + +/* + * Initial preempt_count value; reflects the preempt_count schedule invariant + * which states that during context switches: + * + * preempt_count() == 2*PREEMPT_DISABLE_OFFSET + * + * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. + * Note: See finish_task_switch(). + */ +#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + +/** + * struct thread_group_cputimer - thread group interval timer counts + * @cputime_atomic: atomic thread group interval timers. + * @running: true when there are timers running and + * @cputime_atomic receives updates. + * @checking_timer: true when a thread in the group is in the + * process of checking for thread group timers. + * + * This structure contains the version of task_cputime, above, that is + * used for thread group CPU timer calculations. + */ +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; + bool running; + bool checking_timer; +}; + +#include +struct autogroup; + +/* + * NOTE! "signal_struct" does not have its own + * locking, because a shared signal_struct always + * implies a shared sighand_struct, so locking + * sighand_struct is always a proper superset of + * the locking of signal_struct. + */ +struct signal_struct { + atomic_t sigcnt; + atomic_t live; + int nr_threads; + struct list_head thread_head; + + wait_queue_head_t wait_chldexit; /* for wait4() */ + + /* current thread group signal load-balancing target: */ + struct task_struct *curr_target; + + /* shared signal handling: */ + struct sigpending shared_pending; + + /* thread group exit support */ + int group_exit_code; + /* overloaded: + * - notify group_exit_task when ->count is equal to notify_count + * - everyone except group_exit_task is stopped during signal delivery + * of fatal signals, group_exit_task processes the signal. + */ + int notify_count; + struct task_struct *group_exit_task; + + /* thread group stop support, overloads group_exit_code too */ + int group_stop_count; + unsigned int flags; /* see SIGNAL_* flags below */ + + /* + * PR_SET_CHILD_SUBREAPER marks a process, like a service + * manager, to re-parent orphan (double-forking) child processes + * to this process instead of 'init'. The service manager is + * able to receive SIGCHLD signals and is able to investigate + * the process until it calls wait(). All children of this + * process will inherit a flag if they should look for a + * child_subreaper process at exit. + */ + unsigned int is_child_subreaper:1; + unsigned int has_child_subreaper:1; + + /* POSIX.1b Interval Timers */ + int posix_timer_id; + struct list_head posix_timers; + + /* ITIMER_REAL timer for the process */ + struct hrtimer real_timer; + struct pid *leader_pid; + ktime_t it_real_incr; + + /* + * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use + * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these + * values are defined to 0 and 1 respectively + */ + struct cpu_itimer it[2]; + + /* + * Thread group totals for process CPU timers. + * See thread_group_cputimer(), et al, for details. + */ + struct thread_group_cputimer cputimer; + + /* Earliest-expiration cache. */ + struct task_cputime cputime_expires; + +#ifdef CONFIG_NO_HZ_FULL + atomic_t tick_dep_mask; #endif -struct sched_info { + struct list_head cpu_timers[3]; + + struct pid *tty_old_pgrp; + + /* boolean value for session group leader */ + int leader; + + struct tty_struct *tty; /* NULL if no tty */ + +#ifdef CONFIG_SCHED_AUTOGROUP + struct autogroup *autogroup; +#endif + /* + * Cumulative resource counters for dead threads in the group, + * and for reaped dead child processes forked by this group. + * Live threads maintain their own counters and add to these + * in __exit_signal, except for the group leader. + */ + seqlock_t stats_lock; + cputime_t utime, stime, cutime, cstime; + cputime_t gtime; + cputime_t cgtime; + struct prev_cputime prev_cputime; + unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; + unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; + unsigned long inblock, oublock, cinblock, coublock; + unsigned long maxrss, cmaxrss; + struct task_io_accounting ioac; + + /* + * Cumulative ns of schedule CPU time fo dead threads in the + * group, not including a zombie group leader, (This only differs + * from jiffies_to_ns(utime + stime) if sched_clock uses something + * other than jiffies.) + */ + unsigned long long sum_sched_runtime; + + /* + * We don't bother to synchronize most readers of this at all, + * because there is no reader checking a limit that actually needs + * to get both rlim_cur and rlim_max atomically, and either one + * alone is a single word that can safely be read normally. + * getrlimit/setrlimit use task_lock(current->group_leader) to + * protect this instead of the siglock, because they really + * have no need to disable irqs. + */ + struct rlimit rlim[RLIM_NLIMITS]; + +#ifdef CONFIG_BSD_PROCESS_ACCT + struct pacct_struct pacct; /* per-process accounting information */ +#endif +#ifdef CONFIG_TASKSTATS + struct taskstats *stats; +#endif + +#ifdef CONFIG_GRKERNSEC + u32 curr_ip; + u32 saved_ip; + u32 gr_saddr; + u32 gr_daddr; + u16 gr_sport; + u16 gr_dport; + u8 used_accept:1; +#endif + +#ifdef CONFIG_AUDIT + unsigned audit_tty; + struct tty_audit_buf *tty_audit_buf; +#endif + + /* + * Thread is the potential origin of an oom condition; kill first on + * oom + */ + bool oom_flag_origin; + short oom_score_adj; /* OOM kill score adjustment */ + short oom_score_adj_min; /* OOM kill score adjustment min value. + * Only settable by CAP_SYS_RESOURCE. */ + struct mm_struct *oom_mm; /* recorded mm when the thread group got + * killed by the oom killer */ + + struct mutex cred_guard_mutex; /* guard against foreign influences on + * credential calculations + * (notably. ptrace) */ +} __randomize_layout; + +/* + * Bits in flags field of signal_struct. + */ +#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ +#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ +#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ +#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ +/* + * Pending notifications to parent. + */ +#define SIGNAL_CLD_STOPPED 0x00000010 +#define SIGNAL_CLD_CONTINUED 0x00000020 +#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) + +#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ + +/* If true, all threads except ->group_exit_task have pending SIGKILL */ +static inline int signal_group_exit(const struct signal_struct *sig) +{ + return (sig->flags & SIGNAL_GROUP_EXIT) || + (sig->group_exit_task != NULL); +} + +/* + * Some day this will be a full-fledged user tracking system.. + */ +struct user_struct { + atomic_t __count; /* reference count */ + atomic_t processes; /* How many processes does this user have? */ + atomic_t sigpending; /* How many pending signals does this user have? */ +#ifdef CONFIG_INOTIFY_USER + atomic_t inotify_watches; /* How many inotify watches does this user have? */ + atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ +#endif +#ifdef CONFIG_FANOTIFY + atomic_t fanotify_listeners; +#endif +#ifdef CONFIG_EPOLL + atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ +#endif +#ifdef CONFIG_POSIX_MQUEUE + /* protected by mq_lock */ + unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ +#endif + unsigned long locked_shm; /* How many pages of mlocked shm ? */ + unsigned long unix_inflight; /* How many files in flight in unix sockets */ + atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ + +#ifdef CONFIG_KEYS + struct key *uid_keyring; /* UID specific keyring */ + struct key *session_keyring; /* UID's default session keyring */ +#endif + +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT + unsigned char kernel_banned; +#endif +#ifdef CONFIG_GRKERNSEC_BRUTE + unsigned char sugid_banned; + unsigned long sugid_ban_expires; +#endif + + /* Hash table maintenance information */ + struct hlist_node uidhash_node; + kuid_t uid; + +#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) + atomic_long_t locked_vm; +#endif +} __randomize_layout; + +extern int uids_sysfs_init(void); + +extern struct user_struct *find_user(kuid_t); + +extern struct user_struct root_user; +#define INIT_USER (&root_user) + + +struct backing_dev_info; +struct reclaim_state; + #ifdef CONFIG_SCHED_INFO - /* Cumulative counters: */ - - /* # of times we have run on this CPU: */ - unsigned long pcount; - - /* Time spent waiting on a runqueue: */ - unsigned long long run_delay; - - /* Timestamps: */ - - /* When did we last run on a CPU? */ - unsigned long long last_arrival; - - /* When were we last queued to run? */ - unsigned long long last_queued; +struct sched_info { + /* cumulative counters */ + unsigned long pcount; /* # of times run on this cpu */ + unsigned long long run_delay; /* time spent waiting on a runqueue */ + /* timestamps */ + unsigned long long last_arrival,/* when we last ran on a cpu */ + last_queued; /* when we were last queued to run */ +}; #endif /* CONFIG_SCHED_INFO */ + +#ifdef CONFIG_TASK_DELAY_ACCT +struct task_delay_info { + spinlock_t lock; + unsigned int flags; /* Private per-task flags */ + + /* For each stat XXX, add following, aligned appropriately + * + * struct timespec XXX_start, XXX_end; + * u64 XXX_delay; + * u32 XXX_count; + * + * Atomicity of updates to XXX_delay, XXX_count protected by + * single lock above (split into XXX_lock if contention is an issue). + */ + + /* + * XXX_count is incremented on every XXX operation, the delay + * associated with the operation is added to XXX_delay. + * XXX_delay contains the accumulated delay time in nanoseconds. + */ + u64 blkio_start; /* Shared by blkio, swapin */ + u64 blkio_delay; /* wait for sync block io completion */ + u64 swapin_delay; /* wait for swapin block io completion */ + u32 blkio_count; /* total count of the number of sync block */ + /* io operations performed */ + u32 swapin_count; /* total count of the number of swapin block */ + /* io operations performed */ + + u64 freepages_start; + u64 freepages_delay; /* wait for memory reclaim */ + u32 freepages_count; /* total count of memory reclaim */ +}; +#endif /* CONFIG_TASK_DELAY_ACCT */ + +static inline int sched_info_on(void) +{ +#ifdef CONFIG_SCHEDSTATS + return 1; +#elif defined(CONFIG_TASK_DELAY_ACCT) + extern int delayacct_on; + return delayacct_on; +#else + return 0; +#endif +} + +#ifdef CONFIG_SCHEDSTATS +void force_schedstat_enabled(void); +#endif + +enum cpu_idle_type { + CPU_IDLE, + CPU_NOT_IDLE, + CPU_NEWLY_IDLE, + CPU_MAX_IDLE_TYPES }; /* @@ -383,84 +995,321 @@ struct sched_info { * We define a basic fixed point arithmetic range, and then formalize * all these metrics based on that basic range. */ -# define SCHED_FIXEDPOINT_SHIFT 10 -# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) - -/* Increase resolution of cpu_capacity calculations */ -# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT -# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) - -struct load_weight { - unsigned long weight; - u32 inv_weight; -}; - -/** - * struct util_est - Estimation utilization of FAIR tasks - * @enqueued: instantaneous estimated utilization of a task/cpu - * @ewma: the Exponential Weighted Moving Average (EWMA) - * utilization of a task - * - * Support data structure to track an Exponential Weighted Moving Average - * (EWMA) of a FAIR task's utilization. New samples are added to the moving - * average each time a task completes an activation. Sample's weight is chosen - * so that the EWMA will be relatively insensitive to transient changes to the - * task's workload. - * - * The enqueued attribute has a slightly different meaning for tasks and cpus: - * - task: the task's util_avg at last task dequeue time - * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU - * Thus, the util_est.enqueued of a task represents the contribution on the - * estimated utilization of the CPU where that task is currently enqueued. - * - * Only for tasks we track a moving average of the past instantaneous - * estimated utilization. This allows to absorb sporadic drops in utilization - * of an otherwise almost periodic task. - * - * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg - * updates. When a task is dequeued, its util_est should not be updated if its - * util_avg has not been updated in the meantime. - * This information is mapped into the MSB bit of util_est.enqueued at dequeue - * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg - * for a task) it is safe to use MSB. - */ -struct util_est { - unsigned int enqueued; - unsigned int ewma; -#define UTIL_EST_WEIGHT_SHIFT 2 -#define UTIL_AVG_UNCHANGED 0x80000000 -} __attribute__((__aligned__(sizeof(u64)))); +# define SCHED_FIXEDPOINT_SHIFT 10 +# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) /* - * The load/runnable/util_avg accumulates an infinite geometric series - * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). + * Increase resolution of cpu_capacity calculations + */ +#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT +#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) + +/* + * Wake-queues are lists of tasks with a pending wakeup, whose + * callers have already marked the task as woken internally, + * and can thus carry on. A common use case is being able to + * do the wakeups once the corresponding user lock as been + * released. + * + * We hold reference to each task in the list across the wakeup, + * thus guaranteeing that the memory is still valid by the time + * the actual wakeups are performed in wake_up_q(). + * + * One per task suffices, because there's never a need for a task to be + * in two wake queues simultaneously; it is forbidden to abandon a task + * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is + * already in a wake queue, the wakeup will happen soon and the second + * waker can just skip it. + * + * The WAKE_Q macro declares and initializes the list head. + * wake_up_q() does NOT reinitialize the list; it's expected to be + * called near the end of a function, where the fact that the queue is + * not used again will be easy to see by inspection. + * + * Note that this can cause spurious wakeups. schedule() callers + * must ensure the call is done inside a loop, confirming that the + * wakeup condition has in fact occurred. + */ +struct wake_q_node { + struct wake_q_node *next; +}; + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) + +#define WAKE_Q(name) \ + struct wake_q_head name = { WAKE_Q_TAIL, &name.first } + +extern void wake_q_add(struct wake_q_head *head, + struct task_struct *task); +extern void wake_up_q(struct wake_q_head *head); + +/* + * sched-domains (multiprocessor balancing) declarations: + */ +#ifdef CONFIG_SMP +#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ +#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ +#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ +#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ +#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ +#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ +#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ +#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ +#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ +#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ +#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ +#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ +#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ +#define SD_NUMA 0x4000 /* cross-node balancing */ + +#ifdef CONFIG_SCHED_SMT +static inline int cpu_smt_flags(void) +{ + return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_SCHED_MC +static inline int cpu_core_flags(void) +{ + return SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_NUMA +static inline int cpu_numa_flags(void) +{ + return SD_NUMA; +} +#endif + +struct sched_domain_attr { + int relax_domain_level; +}; + +#define SD_ATTR_INIT (struct sched_domain_attr) { \ + .relax_domain_level = -1, \ +} + +extern int sched_domain_level_max; + +struct sched_group; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; +}; + +struct sched_domain { + /* These fields must be setup */ + struct sched_domain *parent; /* top domain must be null terminated */ + struct sched_domain *child; /* bottom domain must be null terminated */ + struct sched_group *groups; /* the balancing groups of the domain */ + unsigned long min_interval; /* Minimum balance interval ms */ + unsigned long max_interval; /* Maximum balance interval ms */ + unsigned int busy_factor; /* less balancing by factor if busy */ + unsigned int imbalance_pct; /* No balance until over watermark */ + unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int busy_idx; + unsigned int idle_idx; + unsigned int newidle_idx; + unsigned int wake_idx; + unsigned int forkexec_idx; + unsigned int smt_gain; + + int nohz_idle; /* NOHZ IDLE status */ + int flags; /* See SD_* */ + int level; + + /* Runtime fields. */ + unsigned long last_balance; /* init to jiffies. units in jiffies */ + unsigned int balance_interval; /* initialise to 1. units in ms. */ + unsigned int nr_balance_failed; /* initialise to 0 */ + + /* idle_balance() stats */ + u64 max_newidle_lb_cost; + unsigned long next_decay_max_lb_cost; + + u64 avg_scan_cost; /* select_idle_sibling */ + +#ifdef CONFIG_SCHEDSTATS + /* load_balance() stats */ + unsigned int lb_count[CPU_MAX_IDLE_TYPES]; + unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; + unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; + + /* Active load balancing */ + unsigned int alb_count; + unsigned int alb_failed; + unsigned int alb_pushed; + + /* SD_BALANCE_EXEC stats */ + unsigned int sbe_count; + unsigned int sbe_balanced; + unsigned int sbe_pushed; + + /* SD_BALANCE_FORK stats */ + unsigned int sbf_count; + unsigned int sbf_balanced; + unsigned int sbf_pushed; + + /* try_to_wake_up() stats */ + unsigned int ttwu_wake_remote; + unsigned int ttwu_move_affine; + unsigned int ttwu_move_balance; +#endif +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif + union { + void *private; /* used during construction */ + struct rcu_head rcu; /* used during destruction */ + }; + struct sched_domain_shared *shared; + + unsigned int span_weight; + /* + * Span of all CPUs in this domain. + * + * NOTE: this field is variable length. (Allocated dynamically + * by attaching extra space to the end of the structure, + * depending on how many CPUs the kernel has booted up with) + */ + unsigned long span[0]; +}; + +static inline struct cpumask *sched_domain_span(struct sched_domain *sd) +{ + return to_cpumask(sd->span); +} + +extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new); + +/* Allocate an array of sched domains, for partition_sched_domains(). */ +cpumask_var_t *alloc_sched_domains(unsigned int ndoms); +void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); + +bool cpus_share_cache(int this_cpu, int that_cpu); + +typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); +typedef int (*sched_domain_flags_f)(void); + +#define SDTL_OVERLAP 0x01 + +struct sd_data { + struct sched_domain **__percpu sd; + struct sched_domain_shared **__percpu sds; + struct sched_group **__percpu sg; + struct sched_group_capacity **__percpu sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif +}; + +extern void set_sched_topology(struct sched_domain_topology_level *tl); +extern void wake_up_if_idle(int cpu); + +#ifdef CONFIG_SCHED_DEBUG +# define SD_INIT_NAME(type) .name = #type +#else +# define SD_INIT_NAME(type) +#endif + +#else /* CONFIG_SMP */ + +struct sched_domain_attr; + +static inline void +partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new) +{ +} + +static inline bool cpus_share_cache(int this_cpu, int that_cpu) +{ + return true; +} + +#endif /* !CONFIG_SMP */ + + +struct io_context; /* See blkdev.h */ + + +#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK +extern void prefetch_stack(struct task_struct *t); +#else +static inline void prefetch_stack(struct task_struct *t) { } +#endif + +struct audit_context; /* See audit.c */ +struct mempolicy; +struct pipe_inode_info; +struct uts_namespace; + +struct load_weight { + unsigned long weight; + u32 inv_weight; +}; + +/* + * The load_avg/util_avg accumulates an infinite geometric series + * (see __update_load_avg() in kernel/sched/fair.c). * * [load_avg definition] * * load_avg = runnable% * scale_load_down(load) * - * [runnable_avg definition] + * where runnable% is the time ratio that a sched_entity is runnable. + * For cfs_rq, it is the aggregated load_avg of all runnable and + * blocked sched_entities. * - * runnable_avg = runnable% * SCHED_CAPACITY_SCALE + * load_avg may also take frequency scaling into account: + * + * load_avg = runnable% * scale_load_down(load) * freq% + * + * where freq% is the CPU frequency normalized to the highest frequency. * * [util_avg definition] * * util_avg = running% * SCHED_CAPACITY_SCALE * - * where runnable% is the time ratio that a sched_entity is runnable and - * running% the time ratio that a sched_entity is running. + * where running% is the time ratio that a sched_entity is running on + * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable + * and blocked sched_entities. * - * For cfs_rq, they are the aggregated values of all runnable and blocked - * sched_entities. + * util_avg may also factor frequency scaling and CPU capacity scaling: * - * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU - * capacity scaling. The scaling is done through the rq_clock_pelt that is used - * for computing those signals (see update_rq_clock_pelt()) + * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% * - * N.B., the above ratios (runnable% and running%) themselves are in the - * range of [0, 1]. To do fixed point arithmetics, we therefore scale them - * to as large a range as necessary. This is for example reflected by - * util_avg's SCHED_CAPACITY_SCALE. + * where freq% is the same as above, and capacity% is the CPU capacity + * normalized to the greatest capacity (due to uarch differences, etc). + * + * N.B., the above ratios (runnable%, running%, freq%, and capacity%) + * themselves are in the range of [0, 1]. To do fixed point arithmetics, + * we therefore scale them to as large a range as necessary. This is for + * example reflected by util_avg's SCHED_CAPACITY_SCALE. * * [Overflow issue] * @@ -477,78 +1326,71 @@ struct util_est { * issues. */ struct sched_avg { - u64 last_update_time; - u64 load_sum; - u64 runnable_sum; - u32 util_sum; - u32 period_contrib; - unsigned long load_avg; - unsigned long runnable_avg; - unsigned long util_avg; - struct util_est util_est; -} ____cacheline_aligned; - -struct sched_statistics { -#ifdef CONFIG_SCHEDSTATS - u64 wait_start; - u64 wait_max; - u64 wait_count; - u64 wait_sum; - u64 iowait_count; - u64 iowait_sum; - - u64 sleep_start; - u64 sleep_max; - s64 sum_sleep_runtime; - - u64 block_start; - u64 block_max; - u64 exec_max; - u64 slice_max; - - u64 nr_migrations_cold; - u64 nr_failed_migrations_affine; - u64 nr_failed_migrations_running; - u64 nr_failed_migrations_hot; - u64 nr_forced_migrations; - - u64 nr_wakeups; - u64 nr_wakeups_sync; - u64 nr_wakeups_migrate; - u64 nr_wakeups_local; - u64 nr_wakeups_remote; - u64 nr_wakeups_affine; - u64 nr_wakeups_affine_attempts; - u64 nr_wakeups_passive; - u64 nr_wakeups_idle; -#endif + u64 last_update_time, load_sum; + u32 util_sum, period_contrib; + unsigned long load_avg, util_avg; }; +#ifdef CONFIG_SCHEDSTATS +struct sched_statistics { + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + + u64 block_start; + u64 block_max; + u64 exec_max; + u64 slice_max; + + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; +}; +#endif + struct sched_entity { - /* For load-balancing: */ - struct load_weight load; - struct rb_node run_node; - struct list_head group_node; - unsigned int on_rq; + struct load_weight load; /* for load-balancing */ + struct rb_node run_node; + struct list_head group_node; + unsigned int on_rq; - u64 exec_start; - u64 sum_exec_runtime; - u64 vruntime; - u64 prev_sum_exec_runtime; + u64 exec_start; + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; - u64 nr_migrations; + u64 nr_migrations; - struct sched_statistics statistics; +#ifdef CONFIG_SCHEDSTATS + struct sched_statistics statistics; +#endif #ifdef CONFIG_FAIR_GROUP_SCHED - int depth; - struct sched_entity *parent; + int depth; + struct sched_entity *parent; /* rq on which this entity is (to be) queued: */ - struct cfs_rq *cfs_rq; + struct cfs_rq *cfs_rq; /* rq "owned" by this entity/group: */ - struct cfs_rq *my_q; - /* cached value of my_q->h_nr_running */ - unsigned long runnable_weight; + struct cfs_rq *my_q; #endif #ifdef CONFIG_SMP @@ -558,50 +1400,49 @@ struct sched_entity { * Put into separate cache line so it does not * collide with read-mostly values above. */ - struct sched_avg avg; + struct sched_avg avg ____cacheline_aligned_in_smp; #endif }; struct sched_rt_entity { - struct list_head run_list; - unsigned long timeout; - unsigned long watchdog_stamp; - unsigned int time_slice; - unsigned short on_rq; - unsigned short on_list; + struct list_head run_list; + unsigned long timeout; + unsigned long watchdog_stamp; + unsigned int time_slice; + unsigned short on_rq; + unsigned short on_list; - struct sched_rt_entity *back; + struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED - struct sched_rt_entity *parent; + struct sched_rt_entity *parent; /* rq on which this entity is (to be) queued: */ - struct rt_rq *rt_rq; + struct rt_rq *rt_rq; /* rq "owned" by this entity/group: */ - struct rt_rq *my_q; + struct rt_rq *my_q; #endif -} __randomize_layout; +}; struct sched_dl_entity { - struct rb_node rb_node; + struct rb_node rb_node; /* * Original scheduling parameters. Copied here from sched_attr * during sched_setattr(), they will remain the same until * the next sched_setattr(). */ - u64 dl_runtime; /* Maximum runtime for each instance */ - u64 dl_deadline; /* Relative deadline of each instance */ - u64 dl_period; /* Separation of two instances (period) */ - u64 dl_bw; /* dl_runtime / dl_period */ - u64 dl_density; /* dl_runtime / dl_deadline */ + u64 dl_runtime; /* maximum runtime for each instance */ + u64 dl_deadline; /* relative deadline of each instance */ + u64 dl_period; /* separation of two instances (period) */ + u64 dl_bw; /* dl_runtime / dl_deadline */ /* * Actual scheduling parameters. Initialized with the values above, - * they are continuously updated during task execution. Note that + * they are continously updated during task execution. Note that * the remaining runtime could be < 0 in case we are in overrun. */ - s64 runtime; /* Remaining runtime for this instance */ - u64 deadline; /* Absolute deadline for this instance */ - unsigned int flags; /* Specifying the scheduler behaviour */ + s64 runtime; /* remaining runtime for this instance */ + u64 deadline; /* absolute deadline for this instance */ + unsigned int flags; /* specifying the scheduler behaviour */ /* * Some bool flags: @@ -614,93 +1455,28 @@ struct sched_dl_entity { * outside bandwidth enforcement mechanism (but only until we * exit the critical section); * - * @dl_yielded tells if task gave up the CPU before consuming + * @dl_yielded tells if task gave up the cpu before consuming * all its available runtime during the last job. - * - * @dl_non_contending tells if the task is inactive while still - * contributing to the active utilization. In other words, it - * indicates if the inactive timer has been armed and its handler - * has not been executed yet. This flag is useful to avoid race - * conditions between the inactive timer handler and the wakeup - * code. - * - * @dl_overrun tells if the task asked to be informed about runtime - * overruns. */ - unsigned int dl_throttled : 1; - unsigned int dl_yielded : 1; - unsigned int dl_non_contending : 1; - unsigned int dl_overrun : 1; + int dl_throttled, dl_boosted, dl_yielded; /* * Bandwidth enforcement timer. Each -deadline task has its * own bandwidth to be enforced, thus we need one timer per task. */ - struct hrtimer dl_timer; - - /* - * Inactive timer, responsible for decreasing the active utilization - * at the "0-lag time". When a -deadline task blocks, it contributes - * to GRUB's active utilization until the "0-lag time", hence a - * timer is needed to decrease the active utilization at the correct - * time. - */ - struct hrtimer inactive_timer; - -#ifdef CONFIG_RT_MUTEXES - /* - * Priority Inheritance. When a DEADLINE scheduling entity is boosted - * pi_se points to the donor, otherwise points to the dl_se it belongs - * to (the original one/itself). - */ - struct sched_dl_entity *pi_se; -#endif + struct hrtimer dl_timer; }; -#ifdef CONFIG_UCLAMP_TASK -/* Number of utilization clamp buckets (shorter alias) */ -#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT - -/* - * Utilization clamp for a scheduling entity - * @value: clamp value "assigned" to a se - * @bucket_id: bucket index corresponding to the "assigned" value - * @active: the se is currently refcounted in a rq's bucket - * @user_defined: the requested clamp value comes from user-space - * - * The bucket_id is the index of the clamp bucket matching the clamp value - * which is pre-computed and stored to avoid expensive integer divisions from - * the fast path. - * - * The active bit is set whenever a task has got an "effective" value assigned, - * which can be different from the clamp value "requested" from user-space. - * This allows to know a task is refcounted in the rq's bucket corresponding - * to the "effective" bucket_id. - * - * The user_defined bit is set whenever a task has got a task-specific clamp - * value requested from userspace, i.e. the system defaults apply to this task - * just as a restriction. This allows to relax default clamps when a less - * restrictive task-specific value has been requested, thus allowing to - * implement a "nice" semantic. For example, a task running with a 20% - * default boost can still drop its own boosting to 0%. - */ -struct uclamp_se { - unsigned int value : bits_per(SCHED_CAPACITY_SCALE); - unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); - unsigned int active : 1; - unsigned int user_defined : 1; -}; -#endif /* CONFIG_UCLAMP_TASK */ - union rcu_special { struct { - u8 blocked; - u8 need_qs; - u8 exp_hint; /* Hint for performance. */ - u8 need_mb; /* Readers need smp_mb(). */ + u8 blocked; + u8 need_qs; + u8 exp_need_qs; + u8 pad; /* Otherwise the compiler can store garbage here. */ } b; /* Bits. */ u32 s; /* Set of bits. */ }; +struct rcu_node; enum perf_event_task_context { perf_invalid_context = -1, @@ -709,15 +1485,23 @@ enum perf_event_task_context { perf_nr_task_contexts, }; -struct wake_q_node { - struct wake_q_node *next; -}; +/* Track pages that require TLB flushes */ +struct tlbflush_unmap_batch { + /* + * Each bit set is a CPU that potentially has a TLB entry for one of + * the PFNs being flushed. See set_tlb_ubc_flush_pending(). + */ + struct cpumask cpumask; -struct kmap_ctrl { -#ifdef CONFIG_KMAP_LOCAL - int idx; - pte_t pteval[KM_MAX_IDX]; -#endif + /* True if any bit in cpumask is set */ + bool flush_required; + + /* + * If true then the PTE was dirty when unmapped. The entry must be + * flushed before IO is initiated or a stale TLB entry potentially + * allows an update without redirtying the page. + */ + bool writable; }; struct task_struct { @@ -726,535 +1510,370 @@ struct task_struct { * For reasons of header soup (see current_thread_info()), this * must be the first element of task_struct. */ - struct thread_info thread_info; + struct thread_info thread_info; #endif - unsigned int __state; - -#ifdef CONFIG_PREEMPT_RT - /* saved state for "spinlock sleepers" */ - unsigned int saved_state; + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ + void *stack; +#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW + void *lowmem_stack; #endif - - /* - * This begins the randomizable portion of task_struct. Only - * scheduling-critical items should be added above here. - */ - randomized_struct_fields_start - - void *stack; - refcount_t usage; - /* Per task flags (PF_*), defined further below: */ - unsigned int flags; - unsigned int ptrace; + atomic_t usage; + unsigned int flags; /* per process flags, defined below */ + unsigned int ptrace; #ifdef CONFIG_SMP - int on_cpu; - struct __call_single_node wake_entry; + struct llist_node wake_entry; + int on_cpu; #ifdef CONFIG_THREAD_INFO_IN_TASK - /* Current CPU: */ - unsigned int cpu; + unsigned int cpu; /* current CPU */ #endif - unsigned int wakee_flips; - unsigned long wakee_flip_decay_ts; - struct task_struct *last_wakee; + unsigned int wakee_flips; + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; - /* - * recent_used_cpu is initially set as the last CPU used by a task - * that wakes affine another task. Waker/wakee relationships can - * push tasks around a CPU where each wakeup moves to the next one. - * Tracking a recently used CPU allows a quick search for a recently - * used CPU that may be idle. - */ - int recent_used_cpu; - int wake_cpu; -#endif - int on_rq; - - int prio; - int static_prio; - int normal_prio; - unsigned int rt_priority; - - const struct sched_class *sched_class; - struct sched_entity se; - struct sched_rt_entity rt; - struct sched_dl_entity dl; - -#ifdef CONFIG_SCHED_CORE - struct rb_node core_node; - unsigned long core_cookie; - unsigned int core_occupation; + int wake_cpu; #endif + int on_rq; + int prio, static_prio, normal_prio; + unsigned int rt_priority; + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; #ifdef CONFIG_CGROUP_SCHED - struct task_group *sched_task_group; -#endif - -#ifdef CONFIG_UCLAMP_TASK - /* - * Clamp values requested for a scheduling entity. - * Must be updated with task_rq_lock() held. - */ - struct uclamp_se uclamp_req[UCLAMP_CNT]; - /* - * Effective clamp values used for a scheduling entity. - * Must be updated with task_rq_lock() held. - */ - struct uclamp_se uclamp[UCLAMP_CNT]; + struct task_group *sched_task_group; #endif + struct sched_dl_entity dl; #ifdef CONFIG_PREEMPT_NOTIFIERS - /* List of struct preempt_notifier: */ - struct hlist_head preempt_notifiers; + /* list of struct preempt_notifier: */ + struct hlist_head preempt_notifiers; #endif #ifdef CONFIG_BLK_DEV_IO_TRACE - unsigned int btrace_seq; + unsigned int btrace_seq; #endif - unsigned int policy; - int nr_cpus_allowed; - const cpumask_t *cpus_ptr; - cpumask_t *user_cpus_ptr; - cpumask_t cpus_mask; - void *migration_pending; -#ifdef CONFIG_SMP - unsigned short migration_disabled; -#endif - unsigned short migration_flags; + unsigned int policy; + int nr_cpus_allowed; + cpumask_t cpus_allowed; #ifdef CONFIG_PREEMPT_RCU - int rcu_read_lock_nesting; - union rcu_special rcu_read_unlock_special; - struct list_head rcu_node_entry; - struct rcu_node *rcu_blocked_node; + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; #endif /* #ifdef CONFIG_PREEMPT_RCU */ - #ifdef CONFIG_TASKS_RCU - unsigned long rcu_tasks_nvcsw; - u8 rcu_tasks_holdout; - u8 rcu_tasks_idx; - int rcu_tasks_idle_cpu; - struct list_head rcu_tasks_holdout_list; + unsigned long rcu_tasks_nvcsw; + bool rcu_tasks_holdout; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_idle_cpu; #endif /* #ifdef CONFIG_TASKS_RCU */ -#ifdef CONFIG_TASKS_TRACE_RCU - int trc_reader_nesting; - int trc_ipi_to_cpu; - union rcu_special trc_reader_special; - bool trc_reader_checked; - struct list_head trc_holdout_list; -#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ +#ifdef CONFIG_SCHED_INFO + struct sched_info sched_info; +#endif - struct sched_info sched_info; - - struct list_head tasks; + struct list_head tasks; #ifdef CONFIG_SMP - struct plist_node pushable_tasks; - struct rb_node pushable_dl_tasks; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; #endif - struct mm_struct *mm; - struct mm_struct *active_mm; - - /* Per-thread vma caching: */ - struct vmacache vmacache; - -#ifdef SPLIT_RSS_COUNTING - struct task_rss_stat rss_stat; + struct mm_struct *mm, *active_mm; + /* per-thread vma caching */ + u32 vmacache_seqnum; + struct vm_area_struct *vmacache[VMACACHE_SIZE]; +#if defined(SPLIT_RSS_COUNTING) + struct task_rss_stat rss_stat; #endif - int exit_state; - int exit_code; - int exit_signal; - /* The signal sent when the parent dies: */ - int pdeath_signal; - /* JOBCTL_*, siglock protected: */ - unsigned long jobctl; +/* task state */ + int exit_state; + int exit_code, exit_signal; + int pdeath_signal; /* The signal sent when the parent dies */ + unsigned long jobctl; /* JOBCTL_*, siglock protected */ - /* Used for emulating ABI behavior of previous Linux versions: */ - unsigned int personality; + /* Used for emulating ABI behavior of previous Linux versions */ + unsigned int personality; - /* Scheduler bits, serialized by scheduler locks: */ - unsigned sched_reset_on_fork:1; - unsigned sched_contributes_to_load:1; - unsigned sched_migrated:1; -#ifdef CONFIG_PSI - unsigned sched_psi_wake_requeue:1; -#endif + /* scheduler bits, serialized by scheduler locks */ + unsigned sched_reset_on_fork:1; + unsigned sched_contributes_to_load:1; + unsigned sched_migrated:1; + unsigned sched_remote_wakeup:1; + unsigned :0; /* force alignment to the next boundary */ - /* Force alignment to the next boundary: */ - unsigned :0; - - /* Unserialized, strictly 'current' */ - - /* - * This field must not be in the scheduler word above due to wakelist - * queueing no longer being serialized by p->on_cpu. However: - * - * p->XXX = X; ttwu() - * schedule() if (p->on_rq && ..) // false - * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true - * deactivate_task() ttwu_queue_wakelist()) - * p->on_rq = 0; p->sched_remote_wakeup = Y; - * - * guarantees all stores of 'current' are visible before - * ->sched_remote_wakeup gets used, so it can be in this word. - */ - unsigned sched_remote_wakeup:1; - - /* Bit to tell LSMs we're in execve(): */ - unsigned in_execve:1; - unsigned in_iowait:1; -#ifndef TIF_RESTORE_SIGMASK - unsigned restore_sigmask:1; + /* unserialized, strictly 'current' */ + unsigned in_execve:1; /* bit to tell LSMs we're in execve */ + unsigned in_iowait:1; +#if !defined(TIF_RESTORE_SIGMASK) + unsigned restore_sigmask:1; #endif #ifdef CONFIG_MEMCG - unsigned in_user_fault:1; + unsigned memcg_may_oom:1; +#ifndef CONFIG_SLOB + unsigned memcg_kmem_skip_account:1; +#endif #endif #ifdef CONFIG_COMPAT_BRK - unsigned brk_randomized:1; + unsigned brk_randomized:1; #endif #ifdef CONFIG_CGROUPS /* disallow userland-initiated cgroup migration */ - unsigned no_cgroup_migration:1; - /* task is frozen/stopped (used by the cgroup freezer) */ - unsigned frozen:1; -#endif -#ifdef CONFIG_BLK_CGROUP - unsigned use_memdelay:1; -#endif -#ifdef CONFIG_PSI - /* Stalled due to lack of memory */ - unsigned in_memstall:1; -#endif -#ifdef CONFIG_PAGE_OWNER - /* Used by page_owner=on to detect recursion in page tracking. */ - unsigned in_page_owner:1; -#endif -#ifdef CONFIG_EVENTFD - /* Recursion prevention for eventfd_signal() */ - unsigned in_eventfd_signal:1; + unsigned no_cgroup_migration:1; #endif - unsigned long atomic_flags; /* Flags requiring atomic access. */ + unsigned long atomic_flags; /* Flags needing atomic access. */ - struct restart_block restart_block; + struct restart_block restart_block; - pid_t pid; - pid_t tgid; + pid_t pid; + pid_t tgid; -#ifdef CONFIG_STACKPROTECTOR - /* Canary value for the -fstack-protector GCC feature: */ - unsigned long stack_canary; +#ifdef CONFIG_CC_STACKPROTECTOR + /* Canary value for the -fstack-protector gcc feature */ + unsigned long stack_canary; #endif /* - * Pointers to the (original) parent process, youngest child, younger sibling, + * pointers to (original) parent process, youngest child, younger sibling, * older sibling, respectively. (p->father can be replaced with * p->real_parent->pid) */ - - /* Real parent process: */ - struct task_struct __rcu *real_parent; - - /* Recipient of SIGCHLD, wait4() reports: */ - struct task_struct __rcu *parent; - + struct task_struct __rcu *real_parent; /* real parent process */ + struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ /* - * Children/sibling form the list of natural children: + * children/sibling forms the list of my natural children */ - struct list_head children; - struct list_head sibling; - struct task_struct *group_leader; + struct list_head children; /* list of my children */ + struct list_head sibling; /* linkage in my parent's children list */ + struct task_struct *group_leader; /* threadgroup leader */ + + const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ /* - * 'ptraced' is the list of tasks this task is using ptrace() on. - * + * ptraced is the list of tasks this task is using ptrace on. * This includes both natural children and PTRACE_ATTACH targets. - * 'ptrace_entry' is this task's link on the p->parent->ptraced list. + * p->ptrace_entry is p's link on the p->parent->ptraced list. */ - struct list_head ptraced; - struct list_head ptrace_entry; + struct list_head ptraced; + struct list_head ptrace_entry; /* PID/PID hash table linkage. */ - struct pid *thread_pid; - struct hlist_node pid_links[PIDTYPE_MAX]; - struct list_head thread_group; - struct list_head thread_node; + struct pid_link pids[PIDTYPE_MAX]; + struct list_head thread_group; + struct list_head thread_node; - struct completion *vfork_done; + struct completion *vfork_done; /* for vfork() */ + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */ + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ - /* CLONE_CHILD_SETTID: */ - int __user *set_child_tid; - - /* CLONE_CHILD_CLEARTID: */ - int __user *clear_child_tid; - - /* PF_IO_WORKER */ - void *pf_io_worker; - - u64 utime; - u64 stime; -#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME - u64 utimescaled; - u64 stimescaled; -#endif - u64 gtime; - struct prev_cputime prev_cputime; + cputime_t utime, stime, utimescaled, stimescaled; + cputime_t gtime; + struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - struct vtime vtime; + seqcount_t vtime_seqcount; + unsigned long long vtime_snap; + enum { + /* Task is sleeping or running in a CPU with VTIME inactive */ + VTIME_INACTIVE = 0, + /* Task runs in userspace in a CPU with VTIME active */ + VTIME_USER, + /* Task runs in kernelspace in a CPU with VTIME active */ + VTIME_SYS, + } vtime_snap_whence; #endif #ifdef CONFIG_NO_HZ_FULL - atomic_t tick_dep_mask; + atomic_t tick_dep_mask; #endif - /* Context switch counts: */ - unsigned long nvcsw; - unsigned long nivcsw; + unsigned long nvcsw, nivcsw; /* context switch counts */ + u64 start_time; /* monotonic time in nsec */ + u64 real_start_time; /* boot based time in nsec */ +/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ + unsigned long min_flt, maj_flt; - /* Monotonic time in nsecs: */ - u64 start_time; - - /* Boot based time in nsecs: */ - u64 start_boottime; - - /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ - unsigned long min_flt; - unsigned long maj_flt; - - /* Empty if CONFIG_POSIX_CPUTIMERS=n */ - struct posix_cputimers posix_cputimers; - -#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK - struct posix_cputimers_work posix_cputimers_work; -#endif - - /* Process credentials: */ - - /* Tracer's credentials at attach: */ - const struct cred __rcu *ptracer_cred; - - /* Objective and real subjective task credentials (COW): */ - const struct cred __rcu *real_cred; - - /* Effective (overridable) subjective task credentials (COW): */ - const struct cred __rcu *cred; - -#ifdef CONFIG_KEYS - /* Cached requested key. */ - struct key *cached_requested_key; -#endif - - /* - * executable name, excluding path. - * - * - normally initialized setup_new_exec() - * - access it with [gs]et_task_comm() - * - lock it with task_lock() - */ - char comm[TASK_COMM_LEN]; - - struct nameidata *nameidata; + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; + char comm[TASK_COMM_LEN]; /* executable name excluding path + - access with [gs]et_task_comm (which lock + it with task_lock()) + - initialized normally by setup_new_exec */ +/* file system info */ + struct nameidata *nameidata; #ifdef CONFIG_SYSVIPC - struct sysv_sem sysvsem; - struct sysv_shm sysvshm; +/* ipc stuff */ + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; #endif #ifdef CONFIG_DETECT_HUNG_TASK - unsigned long last_switch_count; - unsigned long last_switch_time; +/* hung task detection */ + unsigned long last_switch_count; #endif - /* Filesystem information: */ - struct fs_struct *fs; +/* CPU-specific state of this task */ + struct thread_struct thread; +/* filesystem information */ + struct fs_struct *fs; +/* open file information */ + struct files_struct *files; +/* namespaces */ + struct nsproxy *nsproxy; +/* signal handlers */ + struct signal_struct *signal; + struct sighand_struct *sighand; - /* Open file information: */ - struct files_struct *files; + sigset_t real_blocked; + struct { + sigset_t blocked; + sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ + }; + struct sigpending pending; -#ifdef CONFIG_IO_URING - struct io_uring_task *io_uring; -#endif + unsigned long sas_ss_sp; + size_t sas_ss_size; + unsigned sas_ss_flags; - /* Namespaces: */ - struct nsproxy *nsproxy; + struct callback_head *task_works; - /* Signal handlers: */ - struct signal_struct *signal; - struct sighand_struct __rcu *sighand; - sigset_t blocked; - sigset_t real_blocked; - /* Restored if set_restore_sigmask() was used: */ - sigset_t saved_sigmask; - struct sigpending pending; - unsigned long sas_ss_sp; - size_t sas_ss_size; - unsigned int sas_ss_flags; - - struct callback_head *task_works; - -#ifdef CONFIG_AUDIT + struct audit_context *audit_context; #ifdef CONFIG_AUDITSYSCALL - struct audit_context *audit_context; + kuid_t loginuid; + unsigned int sessionid; #endif - kuid_t loginuid; - unsigned int sessionid; -#endif - struct seccomp seccomp; - struct syscall_user_dispatch syscall_dispatch; + struct seccomp seccomp; - /* Thread group tracking: */ - u64 parent_exec_id; - u64 self_exec_id; - - /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ - spinlock_t alloc_lock; +/* Thread group tracking */ + u32 parent_exec_id; + u32 self_exec_id; +/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, + * mempolicy */ + spinlock_t alloc_lock; /* Protection of the PI data structures: */ - raw_spinlock_t pi_lock; + raw_spinlock_t pi_lock; - struct wake_q_node wake_q; + struct wake_q_node wake_q; #ifdef CONFIG_RT_MUTEXES - /* PI waiters blocked on a rt_mutex held by this task: */ - struct rb_root_cached pi_waiters; - /* Updated under owner's pi_lock and rq lock */ - struct task_struct *pi_top_task; - /* Deadlock detection and priority inheritance handling: */ - struct rt_mutex_waiter *pi_blocked_on; + /* PI waiters blocked on a rt_mutex held by this task */ + struct rb_root pi_waiters; + struct rb_node *pi_waiters_leftmost; + /* Deadlock detection and priority inheritance handling */ + struct rt_mutex_waiter *pi_blocked_on; #endif #ifdef CONFIG_DEBUG_MUTEXES - /* Mutex deadlock detection: */ - struct mutex_waiter *blocked_on; + /* mutex deadlock detection */ + struct mutex_waiter *blocked_on; #endif - -#ifdef CONFIG_DEBUG_ATOMIC_SLEEP - int non_block_count; -#endif - #ifdef CONFIG_TRACE_IRQFLAGS - struct irqtrace_events irqtrace; - unsigned int hardirq_threaded; - u64 hardirq_chain_key; - int softirqs_enabled; - int softirq_context; - int irq_config; + unsigned int irq_events; + unsigned long hardirq_enable_ip; + unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; + unsigned int hardirq_disable_event; + int hardirqs_enabled; + int hardirq_context; + unsigned long softirq_disable_ip; + unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; + unsigned int softirq_enable_event; + int softirqs_enabled; + int softirq_context; #endif -#ifdef CONFIG_PREEMPT_RT - int softirq_disable_cnt; -#endif - #ifdef CONFIG_LOCKDEP -# define MAX_LOCK_DEPTH 48UL - u64 curr_chain_key; - int lockdep_depth; - unsigned int lockdep_recursion; - struct held_lock held_locks[MAX_LOCK_DEPTH]; +# define MAX_LOCK_DEPTH 48UL + u64 curr_chain_key; + int lockdep_depth; + unsigned int lockdep_recursion; + struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; +#endif +#ifdef CONFIG_UBSAN + unsigned int in_ubsan; #endif -#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) - unsigned int in_ubsan; -#endif +/* process credentials */ + const struct cred __rcu *real_cred; /* objective and real subjective task + * credentials (COW) */ - /* Journalling filesystem info: */ - void *journal_info; +/* journalling filesystem info */ + void *journal_info; - /* Stacked block device info: */ - struct bio_list *bio_list; +/* stacked block device info */ + struct bio_list *bio_list; #ifdef CONFIG_BLOCK - /* Stack plugging: */ - struct blk_plug *plug; +/* stack plugging */ + struct blk_plug *plug; #endif - /* VM state: */ - struct reclaim_state *reclaim_state; +/* VM state */ + struct reclaim_state *reclaim_state; - struct backing_dev_info *backing_dev_info; + struct backing_dev_info *backing_dev_info; - struct io_context *io_context; + struct io_context *io_context; -#ifdef CONFIG_COMPACTION - struct capture_control *capture_control; -#endif - /* Ptrace state: */ - unsigned long ptrace_message; - kernel_siginfo_t *last_siginfo; - - struct task_io_accounting ioac; -#ifdef CONFIG_PSI - /* Pressure stall state */ - unsigned int psi_flags; -#endif -#ifdef CONFIG_TASK_XACCT - /* Accumulated RSS usage: */ - u64 acct_rss_mem1; - /* Accumulated virtual memory usage: */ - u64 acct_vm_mem1; - /* stime + utime since last update: */ - u64 acct_timexpd; + unsigned long ptrace_message; + siginfo_t *last_siginfo; /* For ptrace use. */ + struct task_io_accounting ioac; +#if defined(CONFIG_TASK_XACCT) + u64 acct_rss_mem1; /* accumulated rss usage */ + u64 acct_vm_mem1; /* accumulated virtual memory usage */ + cputime_t acct_timexpd; /* stime + utime since last update */ #endif #ifdef CONFIG_CPUSETS - /* Protected by ->alloc_lock: */ - nodemask_t mems_allowed; - /* Sequence number to catch updates: */ - seqcount_spinlock_t mems_allowed_seq; - int cpuset_mem_spread_rotor; - int cpuset_slab_spread_rotor; + nodemask_t mems_allowed; /* Protected by alloc_lock */ + seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ + int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; #endif #ifdef CONFIG_CGROUPS - /* Control Group info protected by css_set_lock: */ - struct css_set __rcu *cgroups; - /* cg_list protected by css_set_lock and tsk->alloc_lock: */ - struct list_head cg_list; -#endif -#ifdef CONFIG_X86_CPU_RESCTRL - u32 closid; - u32 rmid; + /* Control Group info protected by css_set_lock */ + struct css_set __rcu *cgroups; + /* cg_list protected by css_set_lock and tsk->alloc_lock */ + struct list_head cg_list; #endif + + const struct cred __rcu *cred; /* effective (overridable) subjective task + * credentials (COW) */ + #ifdef CONFIG_FUTEX - struct robust_list_head __user *robust_list; + struct robust_list_head __user *robust_list; #ifdef CONFIG_COMPAT struct compat_robust_list_head __user *compat_robust_list; #endif - struct list_head pi_state_list; - struct futex_pi_state *pi_state_cache; - struct mutex futex_exit_mutex; - unsigned int futex_state; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; #endif #ifdef CONFIG_PERF_EVENTS - struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; - struct mutex perf_event_mutex; - struct list_head perf_event_list; + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; + struct mutex perf_event_mutex; + struct list_head perf_event_list; #endif #ifdef CONFIG_DEBUG_PREEMPT - unsigned long preempt_disable_ip; + unsigned long preempt_disable_ip; #endif #ifdef CONFIG_NUMA - /* Protected by alloc_lock: */ - struct mempolicy *mempolicy; - short il_prev; - short pref_node_fork; + struct mempolicy *mempolicy; /* Protected by alloc_lock */ + short il_next; + short pref_node_fork; #endif #ifdef CONFIG_NUMA_BALANCING - int numa_scan_seq; - unsigned int numa_scan_period; - unsigned int numa_scan_period_max; - int numa_preferred_nid; - unsigned long numa_migrate_retry; - /* Migration stamp: */ - u64 node_stamp; - u64 last_task_numa_placement; - u64 last_sum_exec_runtime; - struct callback_head numa_work; + int numa_scan_seq; + unsigned int numa_scan_period; + unsigned int numa_scan_period_max; + int numa_preferred_nid; + unsigned long numa_migrate_retry; + u64 node_stamp; /* migration stamp */ + u64 last_task_numa_placement; + u64 last_sum_exec_runtime; + struct callback_head numa_work; - /* - * This pointer is only modified for current in syscall and - * pagefault context (and for tasks being destroyed), so it can be read - * from any of the following contexts: - * - RCU read-side critical section - * - current->numa_group from everywhere - * - task's runqueue locked, task not running - */ - struct numa_group __rcu *numa_group; + struct list_head numa_entry; + struct numa_group *numa_group; /* * numa_faults is an array split into four regions: @@ -1270,8 +1889,8 @@ struct task_struct { * during the current scan window. When the scan completes, the counts * in faults_memory and faults_cpu decay and these values are copied. */ - unsigned long *numa_faults; - unsigned long total_numa_faults; + unsigned long *numa_faults; + unsigned long total_numa_faults; /* * numa_faults_locality tracks if faults recorded during the last @@ -1279,237 +1898,314 @@ struct task_struct { * period is adapted based on the locality of the faults with different * weights depending on whether they were shared or private faults */ - unsigned long numa_faults_locality[3]; + unsigned long numa_faults_locality[3]; - unsigned long numa_pages_migrated; + unsigned long numa_pages_migrated; #endif /* CONFIG_NUMA_BALANCING */ -#ifdef CONFIG_RSEQ - struct rseq __user *rseq; - u32 rseq_sig; +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + struct tlbflush_unmap_batch tlb_ubc; +#endif + + struct rcu_head rcu; + /* - * RmW on rseq_event_mask must be performed atomically - * with respect to preemption. + * cache last used pipe for splice */ - unsigned long rseq_event_mask; + struct pipe_inode_info *splice_pipe; + + struct page_frag task_frag; + +#ifdef CONFIG_TASK_DELAY_ACCT + struct task_delay_info *delays; #endif - - struct tlbflush_unmap_batch tlb_ubc; - - union { - refcount_t rcu_users; - struct rcu_head rcu; - }; - - /* Cache last used pipe for splice(): */ - struct pipe_inode_info *splice_pipe; - - struct page_frag task_frag; - -#ifdef CONFIG_TASK_DELAY_ACCT - struct task_delay_info *delays; -#endif - #ifdef CONFIG_FAULT_INJECTION - int make_it_fail; - unsigned int fail_nth; + int make_it_fail; #endif /* - * When (nr_dirtied >= nr_dirtied_pause), it's time to call - * balance_dirty_pages() for a dirty throttling pause: + * when (nr_dirtied >= nr_dirtied_pause), it's time to call + * balance_dirty_pages() for some dirty throttling pause */ - int nr_dirtied; - int nr_dirtied_pause; - /* Start of a write-and-pause period: */ - unsigned long dirty_paused_when; + int nr_dirtied; + int nr_dirtied_pause; + unsigned long dirty_paused_when; /* start of a write-and-pause period */ #ifdef CONFIG_LATENCYTOP - int latency_record_count; - struct latency_record latency_record[LT_SAVECOUNT]; + int latency_record_count; + struct latency_record latency_record[LT_SAVECOUNT]; #endif /* - * Time slack values; these are used to round up poll() and + * time slack values; these are used to round up poll() and * select() etc timeout values. These are in nanoseconds. */ - u64 timer_slack_ns; - u64 default_timer_slack_ns; + u64 timer_slack_ns; + u64 default_timer_slack_ns; -#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) - unsigned int kasan_depth; +#ifdef CONFIG_KASAN + unsigned int kasan_depth; #endif - -#ifdef CONFIG_KCSAN - struct kcsan_ctx kcsan_ctx; -#ifdef CONFIG_TRACE_IRQFLAGS - struct irqtrace_events kcsan_save_irqtrace; -#endif -#endif - -#if IS_ENABLED(CONFIG_KUNIT) - struct kunit *kunit_test; -#endif - #ifdef CONFIG_FUNCTION_GRAPH_TRACER - /* Index of current stored address in ret_stack: */ - int curr_ret_stack; - int curr_ret_depth; - - /* Stack of return addresses for return function tracing: */ - struct ftrace_ret_stack *ret_stack; - - /* Timestamp for last schedule: */ - unsigned long long ftrace_timestamp; - + /* Index of current stored address in ret_stack */ + int curr_ret_stack; + /* Stack of return addresses for return function tracing */ + struct ftrace_ret_stack *ret_stack; + /* time stamp for last schedule */ + unsigned long long ftrace_timestamp; /* * Number of functions that haven't been traced - * because of depth overrun: + * because of depth overrun. */ - atomic_t trace_overrun; - - /* Pause tracing: */ - atomic_t tracing_graph_pause; + atomic_unchecked_t trace_overrun; + /* Pause for the tracing */ + atomic_t tracing_graph_pause; #endif - #ifdef CONFIG_TRACING - /* State flags for use by tracers: */ - unsigned long trace; - - /* Bitmask and counter of trace recursion: */ - unsigned long trace_recursion; + /* state flags for use by tracers */ + unsigned long trace; + /* bitmask and counter of trace recursion */ + unsigned long trace_recursion; #endif /* CONFIG_TRACING */ - #ifdef CONFIG_KCOV - /* See kernel/kcov.c for more details. */ - - /* Coverage collection mode enabled for this task (0 if disabled): */ - unsigned int kcov_mode; - - /* Size of the kcov_area: */ - unsigned int kcov_size; - - /* Buffer for coverage collection: */ - void *kcov_area; - - /* KCOV descriptor wired with this task or NULL: */ - struct kcov *kcov; - - /* KCOV common handle for remote coverage collection: */ - u64 kcov_handle; - - /* KCOV sequence number: */ - int kcov_sequence; - - /* Collect coverage from softirq context: */ - unsigned int kcov_softirq; + /* Coverage collection mode enabled for this task (0 if disabled). */ + enum kcov_mode kcov_mode; + /* Size of the kcov_area. */ + unsigned kcov_size; + /* Buffer for coverage collection. */ + void *kcov_area; + /* kcov desciptor wired with this task or NULL. */ + struct kcov *kcov; #endif - #ifdef CONFIG_MEMCG - struct mem_cgroup *memcg_in_oom; - gfp_t memcg_oom_gfp_mask; - int memcg_oom_order; + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; - /* Number of pages to reclaim on returning to userland: */ - unsigned int memcg_nr_pages_over_high; - - /* Used by memcontrol for targeted memcg charge: */ - struct mem_cgroup *active_memcg; + /* number of pages to reclaim on returning to userland */ + unsigned int memcg_nr_pages_over_high; #endif - -#ifdef CONFIG_BLK_CGROUP - struct request_queue *throttle_queue; -#endif - #ifdef CONFIG_UPROBES - struct uprobe_task *utask; + struct uprobe_task *utask; #endif #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) - unsigned int sequential_io; - unsigned int sequential_io_avg; + unsigned int sequential_io; + unsigned int sequential_io_avg; #endif - struct kmap_ctrl kmap_ctrl; #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; -# ifdef CONFIG_PREEMPT_RT - unsigned long saved_state_change; -# endif + unsigned long task_state_change; #endif - int pagefault_disabled; + int pagefault_disabled; #ifdef CONFIG_MMU - struct task_struct *oom_reaper_list; + struct task_struct *oom_reaper_list; #endif #ifdef CONFIG_VMAP_STACK - struct vm_struct *stack_vm_area; + struct vm_struct *stack_vm_area; #endif #ifdef CONFIG_THREAD_INFO_IN_TASK - /* A live task holds one reference: */ - refcount_t stack_refcount; + /* A live task holds one reference. */ + atomic_t stack_refcount; + +/* thread_info moved to task_struct */ +#ifdef CONFIG_X86 + struct thread_info tinfo; #endif -#ifdef CONFIG_LIVEPATCH - int patch_state; -#endif -#ifdef CONFIG_SECURITY - /* Used by LSM modules for access restriction: */ - void *security; -#endif -#ifdef CONFIG_BPF_SYSCALL - /* Used by BPF task local storage */ - struct bpf_local_storage __rcu *bpf_storage; - /* Used for BPF run context */ - struct bpf_run_ctx *bpf_ctx; #endif -#ifdef CONFIG_GCC_PLUGIN_STACKLEAK - unsigned long lowest_stack; - unsigned long prev_lowest_stack; +#ifdef CONFIG_GRKERNSEC + /* grsecurity */ +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + u64 exec_id; +#endif +#ifdef CONFIG_GRKERNSEC_SETXID + const struct cred *delayed_cred; +#endif + struct dentry *gr_chroot_dentry; + struct acl_subject_label *acl; + struct acl_subject_label *tmpacl; + struct acl_role_label *role; + struct file *exec_file; + unsigned long brute_expires; + u16 acl_role_id; + u8 inherited; + /* is this the task that authenticated to the special role */ + u8 acl_sp_role; + u8 is_writable; + u8 brute; + u8 gr_is_chrooted; #endif -#ifdef CONFIG_X86_MCE - void __user *mce_vaddr; - __u64 mce_kflags; - u64 mce_addr; - __u64 mce_ripv : 1, - mce_whole_page : 1, - __mce_reserved : 62; - struct callback_head mce_kill_me; - int mce_count; +} __randomize_layout; + +#ifdef CONFIG_THREAD_INFO_IN_TASK +#ifndef current_thread_info +# define current_thread_info() (¤t->thread_info) +#endif #endif -#ifdef CONFIG_KRETPROBES - struct llist_head kretprobe_instances; +#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT +extern size_t arch_task_struct_size __read_mostly; +#else +# define arch_task_struct_size (sizeof(struct task_struct)) #endif -#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH - /* - * If L1D flush is supported on mm context switch - * then we use this callback head to queue kill work - * to kill tasks that are not running on SMT disabled - * cores - */ - struct callback_head l1d_flush_kill; +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */ +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */ +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */ +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */ +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */ +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */ + +#ifdef CONFIG_PAX_SOFTMODE +extern int pax_softmode; #endif +extern int pax_check_flags(unsigned long *); +#define PAX_PARSE_FLAGS_FALLBACK (~0UL) + +/* if tsk != current then task_lock must be held on it */ +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) +static inline unsigned long pax_get_flags(struct task_struct *tsk) +{ + if (likely(tsk->mm)) + return tsk->mm->pax_flags; + else + return 0UL; +} + +/* if tsk != current then task_lock must be held on it */ +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags) +{ + if (likely(tsk->mm)) { + tsk->mm->pax_flags = flags; + return 0; + } + return -EINVAL; +} +#endif + +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS +extern void pax_set_initial_flags(struct linux_binprm *bprm); +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); +#endif + +#ifdef CONFIG_PAX_SIZE_OVERFLOW +extern bool pax_size_overflow_report_only; +#endif + +struct path; +extern char *pax_get_path(const struct path *path, char *buf, int buflen); +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp); +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp); +extern void pax_report_refcount_error(struct pt_regs *regs, const char *kind); + +#ifdef CONFIG_VMAP_STACK +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return t->stack_vm_area; +} +#else +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return NULL; +} +#endif + +/* Future-safe accessor for struct task_struct's cpus_allowed. */ +#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) + +static inline int tsk_nr_cpus_allowed(struct task_struct *p) +{ + return p->nr_cpus_allowed; +} + +#define TNF_MIGRATED 0x01 +#define TNF_NO_GROUP 0x02 +#define TNF_SHARED 0x04 +#define TNF_FAULT_LOCAL 0x08 +#define TNF_MIGRATE_FAIL 0x10 + +static inline bool in_vfork(struct task_struct *tsk) +{ + bool ret; + /* - * New fields for task_struct should be added above here, so that - * they are included in the randomized portion of task_struct. - */ - randomized_struct_fields_end - - /* CPU-specific state of this task: */ - struct thread_struct thread; - - /* - * WARNING: on x86, 'thread_struct' contains a variable-sized - * structure. It *MUST* be at the end of 'task_struct'. + * need RCU to access ->real_parent if CLONE_VM was used along with + * CLONE_PARENT. * - * Do not put anything below here! + * We check real_parent->mm == tsk->mm because CLONE_VFORK does not + * imply CLONE_VM + * + * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus + * ->real_parent is not necessarily the task doing vfork(), so in + * theory we can't rely on task_lock() if we want to dereference it. + * + * And in this case we can't trust the real_parent->mm == tsk->mm + * check, it can be false negative. But we do not care, if init or + * another oom-unkillable task does this it should blame itself. */ -}; + rcu_read_lock(); + ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; + rcu_read_unlock(); + + return ret; +} + +#ifdef CONFIG_NUMA_BALANCING +extern void task_numa_fault(int last_node, int node, int pages, int flags); +extern pid_t task_numa_group_id(struct task_struct *p); +extern void set_numabalancing_state(bool enabled); +extern void task_numa_free(struct task_struct *p); +extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, + int src_nid, int dst_cpu); +#else +static inline void task_numa_fault(int last_node, int node, int pages, + int flags) +{ +} +static inline pid_t task_numa_group_id(struct task_struct *p) +{ + return 0; +} +static inline void set_numabalancing_state(bool enabled) +{ +} +static inline void task_numa_free(struct task_struct *p) +{ +} +static inline bool should_numa_migrate_memory(struct task_struct *p, + struct page *page, int src_nid, int dst_cpu) +{ + return true; +} +#endif static inline struct pid *task_pid(struct task_struct *task) { - return task->thread_pid; + return task->pids[PIDTYPE_PID].pid; } +static inline struct pid *task_tgid(struct task_struct *task) +{ + return task->group_leader->pids[PIDTYPE_PID].pid; +} + +/* + * Without tasklist or rcu lock it is not safe to dereference + * the result of task_pgrp/task_session even if task == current, + * we can race with another thread doing sys_setsid/sys_setpgid. + */ +static inline struct pid *task_pgrp(struct task_struct *task) +{ + return task->group_leader->pids[PIDTYPE_PGID].pid; +} + +static inline struct pid *task_session(struct task_struct *task) +{ + return task->group_leader->pids[PIDTYPE_SID].pid; +} + +struct pid_namespace; + /* * the helpers to get the task's different pids as they are seen * from various namespaces @@ -1519,16 +2215,20 @@ static inline struct pid *task_pid(struct task_struct *task) * current. * task_xid_nr_ns() : id seen from the ns specified; * + * set_task_vxid() : assigns a virtual id to a task; + * * see also pid_nr() etc in include/linux/pid.h */ -pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, + struct pid_namespace *ns); -static inline pid_t task_pid_nr(struct task_struct *tsk) +static inline pid_t task_pid_nr(const struct task_struct *tsk) { return tsk->pid; } -static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +static inline pid_t task_pid_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); } @@ -1544,52 +2244,15 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk) return tsk->tgid; } -/** - * pid_alive - check that a task structure is not stale - * @p: Task structure to be checked. - * - * Test if a process is not yet dead (at most zombie state) - * If pid_alive fails, then pointers within the task structure - * can be stale and must not be dereferenced. - * - * Return: 1 if the process is alive. 0 otherwise. - */ -static inline int pid_alive(const struct task_struct *p) -{ - return p->thread_pid != NULL; -} - -static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); -} - -static inline pid_t task_pgrp_vnr(struct task_struct *tsk) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); -} - - -static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); -} - -static inline pid_t task_session_vnr(struct task_struct *tsk) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); -} - -static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); -} +pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); static inline pid_t task_tgid_vnr(struct task_struct *tsk) { - return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL); + return pid_vnr(task_tgid(tsk)); } + +static inline int pid_alive(const struct task_struct *p); static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) { pid_t pid = 0; @@ -1607,40 +2270,48 @@ static inline pid_t task_ppid_nr(const struct task_struct *tsk) return task_ppid_nr_ns(tsk, &init_pid_ns); } -/* Obsolete, do not use: */ +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); +} + +static inline pid_t task_pgrp_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); +} + + +static inline pid_t task_session_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); +} + +static inline pid_t task_session_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); +} + +/* obsolete, do not use */ static inline pid_t task_pgrp_nr(struct task_struct *tsk) { return task_pgrp_nr_ns(tsk, &init_pid_ns); } -#define TASK_REPORT_IDLE (TASK_REPORT + 1) -#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) - -static inline unsigned int task_state_index(struct task_struct *tsk) +/** + * pid_alive - check that a task structure is not stale + * @p: Task structure to be checked. + * + * Test if a process is not yet dead (at most zombie state) + * If pid_alive fails, then pointers within the task structure + * can be stale and must not be dereferenced. + * + * Return: 1 if the process is alive. 0 otherwise. + */ +static inline int pid_alive(const struct task_struct *p) { - unsigned int tsk_state = READ_ONCE(tsk->__state); - unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; - - BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); - - if (tsk_state == TASK_IDLE) - state = TASK_REPORT_IDLE; - - return fls(state); -} - -static inline char task_index_to_char(unsigned int state) -{ - static const char state_char[] = "RSDTtXZPI"; - - BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); - - return state_char[state]; -} - -static inline char task_state_to_char(struct task_struct *tsk) -{ - return task_index_to_char(task_state_index(tsk)); + return p->pids[PIDTYPE_PID].pid != NULL; } /** @@ -1659,38 +2330,84 @@ static inline int is_global_init(struct task_struct *tsk) extern struct pid *cad_pid; +extern void free_task(struct task_struct *tsk); +#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + +extern void __put_task_struct(struct task_struct *t); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); +} + +struct task_struct *task_rcu_dereference(struct task_struct **ptask); +struct task_struct *try_get_task_struct(struct task_struct **ptask); + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +extern void task_cputime(struct task_struct *t, + cputime_t *utime, cputime_t *stime); +extern void task_cputime_scaled(struct task_struct *t, + cputime_t *utimescaled, cputime_t *stimescaled); +extern cputime_t task_gtime(struct task_struct *t); +#else +static inline void task_cputime(struct task_struct *t, + cputime_t *utime, cputime_t *stime) +{ + if (utime) + *utime = t->utime; + if (stime) + *stime = t->stime; +} + +static inline void task_cputime_scaled(struct task_struct *t, + cputime_t *utimescaled, + cputime_t *stimescaled) +{ + if (utimescaled) + *utimescaled = t->utimescaled; + if (stimescaled) + *stimescaled = t->stimescaled; +} + +static inline cputime_t task_gtime(struct task_struct *t) +{ + return t->gtime; +} +#endif +extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); +extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); + /* * Per process flags */ -#define PF_VCPU 0x00000001 /* I'm a virtual CPU */ -#define PF_IDLE 0x00000002 /* I am an IDLE thread */ -#define PF_EXITING 0x00000004 /* Getting shut down */ -#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ -#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ -#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ -#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ -#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ -#define PF_DUMPCORE 0x00000200 /* Dumped core */ -#define PF_SIGNALED 0x00000400 /* Killed by a signal */ -#define PF_MEMALLOC 0x00000800 /* Allocating memory */ -#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ -#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ -#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ -#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ -#define PF_FROZEN 0x00010000 /* Frozen for system suspend */ -#define PF_KSWAPD 0x00020000 /* I am kswapd */ -#define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ -#define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ -#define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to, - * I am cleaning dirty pages from some other bdi. */ -#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ -#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ -#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ -#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ -#define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */ -#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ -#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ +#define PF_EXITING 0x00000004 /* getting shut down */ +#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ +#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ +#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ +#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ +#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ +#define PF_DUMPCORE 0x00000200 /* dumped core */ +#define PF_SIGNALED 0x00000400 /* killed by a signal */ +#define PF_MEMALLOC 0x00000800 /* Allocating memory */ +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ +#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ +#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ +#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ +#define PF_FROZEN 0x00010000 /* frozen for system suspend */ +#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ +#define PF_KSWAPD 0x00040000 /* I am kswapd */ +#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ +#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ +#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ +#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ +#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ +#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ +#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ +#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -1703,51 +2420,55 @@ extern struct pid *cad_pid; * child is not running and in turn not changing child->flags * at the same time the parent does it. */ -#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) -#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) -#define clear_used_math() clear_stopped_child_used_math(current) -#define set_used_math() set_stopped_child_used_math(current) - +#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) +#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) +#define clear_used_math() clear_stopped_child_used_math(current) +#define set_used_math() set_stopped_child_used_math(current) #define conditional_stopped_child_used_math(condition, child) \ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) - -#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) - +#define conditional_used_math(condition) \ + conditional_stopped_child_used_math(condition, current) #define copy_to_stopped_child_used_math(child) \ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) - /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ -#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) -#define used_math() tsk_used_math(current) +#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) +#define used_math() tsk_used_math(current) -static __always_inline bool is_percpu_thread(void) +/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags + * __GFP_FS is also cleared as it implies __GFP_IO. + */ +static inline gfp_t memalloc_noio_flags(gfp_t flags) { -#ifdef CONFIG_SMP - return (current->flags & PF_NO_SETAFFINITY) && - (current->nr_cpus_allowed == 1); -#else - return true; -#endif + if (unlikely(current->flags & PF_MEMALLOC_NOIO)) + flags &= ~(__GFP_IO | __GFP_FS); + return flags; +} + +static inline unsigned int memalloc_noio_save(void) +{ + unsigned int flags = current->flags & PF_MEMALLOC_NOIO; + current->flags |= PF_MEMALLOC_NOIO; + return flags; +} + +static inline void memalloc_noio_restore(unsigned int flags) +{ + current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; } /* Per-process atomic flags. */ -#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ -#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ -#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ -#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ -#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ -#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ -#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ -#define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ +#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ +#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ +#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ +#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ + #define TASK_PFA_TEST(name, func) \ static inline bool task_##func(struct task_struct *p) \ { return test_bit(PFA_##name, &p->atomic_flags); } - #define TASK_PFA_SET(name, func) \ static inline void task_set_##func(struct task_struct *p) \ { set_bit(PFA_##name, &p->atomic_flags); } - #define TASK_PFA_CLEAR(name, func) \ static inline void task_clear_##func(struct task_struct *p) \ { clear_bit(PFA_##name, &p->atomic_flags); } @@ -1763,72 +2484,276 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab) TASK_PFA_SET(SPREAD_SLAB, spread_slab) TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) -TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) -TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) -TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) +TASK_PFA_TEST(LMK_WAITING, lmk_waiting) +TASK_PFA_SET(LMK_WAITING, lmk_waiting) -TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) -TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) -TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) +/* + * task->jobctl flags + */ +#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ -TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) -TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) +#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ +#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ +#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ +#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ +#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ +#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ +#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ -TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) -TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) -TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) +#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) +#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) +#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) +#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) +#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) +#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) +#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) -TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) -TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) +#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) -static inline void -current_restore_flags(unsigned long orig_flags, unsigned long flags) +extern bool task_set_jobctl_pending(struct task_struct *task, + unsigned long mask); +extern void task_clear_jobctl_trapping(struct task_struct *task); +extern void task_clear_jobctl_pending(struct task_struct *task, + unsigned long mask); + +static inline void rcu_copy_process(struct task_struct *p) { - current->flags &= ~flags; - current->flags |= orig_flags & flags; +#ifdef CONFIG_PREEMPT_RCU + p->rcu_read_lock_nesting = 0; + p->rcu_read_unlock_special.s = 0; + p->rcu_blocked_node = NULL; + INIT_LIST_HEAD(&p->rcu_node_entry); +#endif /* #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_TASKS_RCU + p->rcu_tasks_holdout = false; + INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); + p->rcu_tasks_idle_cpu = -1; +#endif /* #ifdef CONFIG_TASKS_RCU */ } -extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); -extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); +static inline void tsk_restore_flags(struct task_struct *task, + unsigned long orig_flags, unsigned long flags) +{ + task->flags &= ~flags; + task->flags |= orig_flags & flags; +} + +extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, + const struct cpumask *trial); +extern int task_can_attach(struct task_struct *p, + const struct cpumask *cs_cpus_allowed); #ifdef CONFIG_SMP -extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); -extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); -extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node); -extern void release_user_cpus_ptr(struct task_struct *p); -extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); -extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); -extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); +extern void do_set_cpus_allowed(struct task_struct *p, + const struct cpumask *new_mask); + +extern int set_cpus_allowed_ptr(struct task_struct *p, + const struct cpumask *new_mask); #else -static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +static inline void do_set_cpus_allowed(struct task_struct *p, + const struct cpumask *new_mask) { } -static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) +static inline int set_cpus_allowed_ptr(struct task_struct *p, + const struct cpumask *new_mask) { if (!cpumask_test_cpu(0, new_mask)) return -EINVAL; return 0; } -static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) +#endif + +#ifdef CONFIG_NO_HZ_COMMON +void calc_load_enter_idle(void); +void calc_load_exit_idle(void); +#else +static inline void calc_load_enter_idle(void) { } +static inline void calc_load_exit_idle(void) { } +#endif /* CONFIG_NO_HZ_COMMON */ + +/* + * Do not use outside of architecture code which knows its limitations. + * + * sched_clock() has no promise of monotonicity or bounded drift between + * CPUs, use (which you should not) requires disabling IRQs. + * + * Please use one of the three interfaces below. + */ +extern unsigned long long notrace sched_clock(void); +/* + * See the comment in kernel/sched/clock.c + */ +extern u64 running_clock(void); +extern u64 sched_clock_cpu(int cpu); + + +extern void sched_clock_init(void); + +#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW +static inline void populate_stack(void *stack, unsigned int size) { - if (src->user_cpus_ptr) - return -EINVAL; - return 0; + int c; + int *ptr = stack; + int *end = stack + size; + + while (ptr < end) { + c = *(volatile int *)ptr; + (void)c; + ptr += PAGE_SIZE/sizeof(int); + } } -static inline void release_user_cpus_ptr(struct task_struct *p) +#else +static inline void populate_stack(void *stack, unsigned int size) +{ +} +#endif + +const void *gr_convert_stack_address_to_lowmem(const void *buf); + +#ifdef CONFIG_GRKERNSEC +static inline bool current_is_ptracer(struct task_struct *task, u64 *exec_id) +{ + bool ret = false; + if (!task->ptrace) + return ret; + + rcu_read_lock(); + read_lock(&tasklist_lock); + if (task->parent && task->parent == current) { + ret = true; +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + if (exec_id) + *exec_id = task->parent->exec_id; +#endif + } + read_unlock(&tasklist_lock); + rcu_read_unlock(); + + return ret; +} +#endif + +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +static inline void sched_clock_tick(void) { - WARN_ON(p->user_cpus_ptr); } -static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) +static inline void sched_clock_idle_sleep_event(void) { - return 0; } + +static inline void sched_clock_idle_wakeup_event(u64 delta_ns) +{ +} + +static inline u64 cpu_clock(int cpu) +{ + return sched_clock(); +} + +static inline u64 local_clock(void) +{ + return sched_clock(); +} +#else +/* + * Architectures can set this to 1 if they have specified + * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, + * but then during bootup it turns out that sched_clock() + * is reliable after all: + */ +extern int sched_clock_stable(void); +extern void set_sched_clock_stable(void); +extern void clear_sched_clock_stable(void); + +extern void sched_clock_tick(void); +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(u64 delta_ns); + +/* + * As outlined in clock.c, provides a fast, high resolution, nanosecond + * time source that is monotonic per cpu argument and has bounded drift + * between cpus. + * + * ######################### BIG FAT WARNING ########################## + * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # + * # go backwards !! # + * #################################################################### + */ +static inline u64 cpu_clock(int cpu) +{ + return sched_clock_cpu(cpu); +} + +static inline u64 local_clock(void) +{ + return sched_clock_cpu(raw_smp_processor_id()); +} +#endif + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * An i/f to runtime opt-in for irq time accounting based off of sched_clock. + * The reason for this explicit opt-in is not to have perf penalty with + * slow sched_clocks. + */ +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +#else +static inline void enable_sched_clock_irqtime(void) {} +static inline void disable_sched_clock_irqtime(void) {} +#endif + +extern unsigned long long +task_sched_runtime(struct task_struct *task); + +/* sched_exec is called by processes performing an exec */ +#ifdef CONFIG_SMP +extern void sched_exec(void); +#else +#define sched_exec() {} +#endif + +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(u64 delta_ns); + +#ifdef CONFIG_HOTPLUG_CPU +extern void idle_task_exit(void); +#else +static inline void idle_task_exit(void) {} +#endif + +#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) +extern void wake_up_nohz_cpu(int cpu); +#else +static inline void wake_up_nohz_cpu(int cpu) { } +#endif + +#ifdef CONFIG_NO_HZ_FULL +extern u64 scheduler_tick_max_deferment(void); +#endif + +#ifdef CONFIG_SCHED_AUTOGROUP +extern void sched_autogroup_create_attach(struct task_struct *p); +extern void sched_autogroup_detach(struct task_struct *p); +extern void sched_autogroup_fork(struct signal_struct *sig); +extern void sched_autogroup_exit(struct signal_struct *sig); +extern void sched_autogroup_exit_task(struct task_struct *p); +#ifdef CONFIG_PROC_FS +extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); +extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); +#endif +#else +static inline void sched_autogroup_create_attach(struct task_struct *p) { } +static inline void sched_autogroup_detach(struct task_struct *p) { } +static inline void sched_autogroup_fork(struct signal_struct *sig) { } +static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_exit_task(struct task_struct *p) { } #endif extern int yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); extern int task_prio(const struct task_struct *p); - /** * task_nice - return the nice value of a given task. * @p: the task in question. @@ -1839,61 +2764,55 @@ static inline int task_nice(const struct task_struct *p) { return PRIO_TO_NICE((p)->static_prio); } - extern int can_nice(const struct task_struct *p, const int nice); extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); -extern int available_idle_cpu(int cpu); -extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); -extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); -extern void sched_set_fifo(struct task_struct *p); -extern void sched_set_fifo_low(struct task_struct *p); -extern void sched_set_normal(struct task_struct *p, int nice); -extern int sched_setattr(struct task_struct *, const struct sched_attr *); -extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); +extern int sched_setscheduler(struct task_struct *, int, + const struct sched_param *); +extern int sched_setscheduler_nocheck(struct task_struct *, int, + const struct sched_param *); +extern int sched_setattr(struct task_struct *, + const struct sched_attr *); extern struct task_struct *idle_task(int cpu); - /** * is_idle_task - is the specified task an idle task? * @p: the task in question. * * Return: 1 if @p is an idle task. 0 otherwise. */ -static __always_inline bool is_idle_task(const struct task_struct *p) +static inline bool is_idle_task(const struct task_struct *p) { - return !!(p->flags & PF_IDLE); + return p->pid == 0; } - extern struct task_struct *curr_task(int cpu); extern void ia64_set_curr_task(int cpu, struct task_struct *p); void yield(void); union thread_union { -#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK - struct task_struct task; -#endif #ifndef CONFIG_THREAD_INFO_IN_TASK struct thread_info thread_info; #endif unsigned long stack[THREAD_SIZE/sizeof(long)]; }; -#ifndef CONFIG_THREAD_INFO_IN_TASK -extern struct thread_info init_thread_info; -#endif - -extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; - -#ifdef CONFIG_THREAD_INFO_IN_TASK -static inline struct thread_info *task_thread_info(struct task_struct *task) +#ifndef __HAVE_ARCH_KSTACK_END +static inline int kstack_end(void *addr) { - return &task->thread_info; + /* Reliable end of stack detection: + * Some APM bios versions misalign the stack + */ + return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); } -#elif !defined(__HAVE_THREAD_FUNCTIONS) -# define task_thread_info(task) ((struct thread_info *)(task)->stack) #endif +extern union thread_union init_thread_union; +extern struct task_struct init_task; + +extern struct mm_struct init_mm; + +extern struct pid_namespace init_pid_ns; + /* * find a task by one of its numerical ids * @@ -1906,58 +2825,597 @@ static inline struct thread_info *task_thread_info(struct task_struct *task) */ extern struct task_struct *find_task_by_vpid(pid_t nr); -extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr); +extern struct task_struct *find_task_by_pid_ns(pid_t nr, + struct pid_namespace *ns); -/* - * find a task by its virtual pid and get the task struct - */ -extern struct task_struct *find_get_task_by_vpid(pid_t nr); +/* per-UID process charging. */ +extern struct user_struct * alloc_uid(kuid_t); +static inline struct user_struct *get_uid(struct user_struct *u) +{ + atomic_inc(&u->__count); + return u; +} +extern void free_uid(struct user_struct *); + +#include + +extern void xtime_update(unsigned long ticks); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk); - #ifdef CONFIG_SMP -extern void kick_process(struct task_struct *tsk); + extern void kick_process(struct task_struct *tsk); #else -static inline void kick_process(struct task_struct *tsk) { } + static inline void kick_process(struct task_struct *tsk) { } +#endif +extern int sched_fork(unsigned long clone_flags, struct task_struct *p); +extern void sched_dead(struct task_struct *p); + +extern void proc_caches_init(void); +extern void flush_signals(struct task_struct *); +extern void ignore_signals(struct task_struct *); +extern void flush_signal_handlers(struct task_struct *, int force_default); +extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) __must_hold(&tsk->sighand->siglock); + +static inline int kernel_dequeue_signal(siginfo_t *info) +{ + struct task_struct *tsk = current; + siginfo_t __info; + int ret; + + spin_lock_irq(&tsk->sighand->siglock); + ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); + spin_unlock_irq(&tsk->sighand->siglock); + + return ret; +} + +static inline void kernel_signal_stop(void) +{ + spin_lock_irq(¤t->sighand->siglock); + if (current->jobctl & JOBCTL_STOP_DEQUEUED) + __set_current_state(TASK_STOPPED); + spin_unlock_irq(¤t->sighand->siglock); + + schedule(); +} + +extern void release_task(struct task_struct * p); +extern int send_sig_info(int, struct siginfo *, struct task_struct *); +extern int force_sigsegv(int, struct task_struct *); +extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); +extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); +extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, + const struct cred *, u32); +extern int kill_pgrp(struct pid *pid, int sig, int priv); +extern int kill_pid(struct pid *pid, int sig, int priv); +extern int kill_proc_info(int, struct siginfo *, pid_t); +extern __must_check bool do_notify_parent(struct task_struct *, int); +extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); +extern void force_sig(int, struct task_struct *); +extern int send_sig(int, struct task_struct *, int); +extern int zap_other_threads(struct task_struct *p); +extern struct sigqueue *sigqueue_alloc(void); +extern void sigqueue_free(struct sigqueue *); +extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); +extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); + +#ifdef TIF_RESTORE_SIGMASK +/* + * Legacy restore_sigmask accessors. These are inefficient on + * SMP architectures because they require atomic operations. + */ + +/** + * set_restore_sigmask() - make sure saved_sigmask processing gets done + * + * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code + * will run before returning to user mode, to process the flag. For + * all callers, TIF_SIGPENDING is already set or it's no harm to set + * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the + * arch code will notice on return to user mode, in case those bits + * are scarce. We set TIF_SIGPENDING here to ensure that the arch + * signal code always gets run when TIF_RESTORE_SIGMASK is set. + */ +static inline void set_restore_sigmask(void) +{ + set_thread_flag(TIF_RESTORE_SIGMASK); + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + clear_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_restore_sigmask(void) +{ + return test_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_and_clear_restore_sigmask(void) +{ + return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); +} + +#else /* TIF_RESTORE_SIGMASK */ + +/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ +static inline void set_restore_sigmask(void) +{ + current->restore_sigmask = true; + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + current->restore_sigmask = false; +} +static inline bool test_restore_sigmask(void) +{ + return current->restore_sigmask; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + if (!current->restore_sigmask) + return false; + current->restore_sigmask = false; + return true; +} #endif -extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); +static inline void restore_saved_sigmask(void) +{ + if (test_and_clear_restore_sigmask()) + __set_current_blocked(¤t->saved_sigmask); +} +static inline sigset_t *sigmask_to_save(void) +{ + sigset_t *res = ¤t->blocked; + if (unlikely(test_restore_sigmask())) + res = ¤t->saved_sigmask; + return res; +} + +static inline int kill_cad_pid(int sig, int priv) +{ + return kill_pid(cad_pid, sig, priv); +} + +/* These can be the second arg to send_sig_info/send_group_sig_info. */ +#define SEND_SIG_NOINFO ((struct siginfo *) 0) +#define SEND_SIG_PRIV ((struct siginfo *) 1) +#define SEND_SIG_FORCED ((struct siginfo *) 2) + +/* + * True if we are on the alternate signal stack. + */ +static inline int on_sig_stack(unsigned long sp) +{ + /* + * If the signal stack is SS_AUTODISARM then, by construction, we + * can't be on the signal stack unless user code deliberately set + * SS_AUTODISARM when we were already on it. + * + * This improves reliability: if user state gets corrupted such that + * the stack pointer points very close to the end of the signal stack, + * then this check will enable the signal to be handled anyway. + */ + if (current->sas_ss_flags & SS_AUTODISARM) + return 0; + +#ifdef CONFIG_STACK_GROWSUP + return sp >= current->sas_ss_sp && + sp - current->sas_ss_sp < current->sas_ss_size; +#else + return sp > current->sas_ss_sp && + sp - current->sas_ss_sp <= current->sas_ss_size; +#endif +} + +static inline int sas_ss_flags(unsigned long sp) +{ + if (!current->sas_ss_size) + return SS_DISABLE; + + return on_sig_stack(sp) ? SS_ONSTACK : 0; +} + +static inline void sas_ss_reset(struct task_struct *p) +{ + p->sas_ss_sp = 0; + p->sas_ss_size = 0; + p->sas_ss_flags = SS_DISABLE; +} + +static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) +{ + if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) +#ifdef CONFIG_STACK_GROWSUP + return current->sas_ss_sp; +#else + return current->sas_ss_sp + current->sas_ss_size; +#endif + return sp; +} + +/* + * Routines for handling mm_structs + */ +extern struct mm_struct * mm_alloc(void); + +/* mmdrop drops the mm and the page tables */ +extern void __mmdrop(struct mm_struct *); +static inline void mmdrop(struct mm_struct *mm) +{ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); +} + +static inline void mmdrop_async_fn(struct work_struct *work) +{ + struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); + __mmdrop(mm); +} + +static inline void mmdrop_async(struct mm_struct *mm) +{ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) { + INIT_WORK(&mm->async_put_work, mmdrop_async_fn); + schedule_work(&mm->async_put_work); + } +} + +static inline bool mmget_not_zero(struct mm_struct *mm) +{ + return atomic_inc_not_zero(&mm->mm_users); +} + +/* mmput gets rid of the mappings and all user-space */ +extern void mmput(struct mm_struct *); +#ifdef CONFIG_MMU +/* same as above but performs the slow path from the async context. Can + * be called from the atomic context as well + */ +extern void mmput_async(struct mm_struct *); +#endif + +/* Grab a reference to a task's mm, if it is not already going away */ +extern struct mm_struct *get_task_mm(struct task_struct *task); +/* + * Grab a reference to a task's mm, if it is not already going away + * and ptrace_may_access with the mode parameter passed to it + * succeeds. + */ +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); +/* Remove the current tasks stale references to the old mm_struct */ +extern void mm_release(struct task_struct *, struct mm_struct *); + +#ifdef CONFIG_HAVE_COPY_THREAD_TLS +extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, + struct task_struct *, unsigned long); +#else +extern int copy_thread(unsigned long, unsigned long, unsigned long, + struct task_struct *); + +/* Architectures that haven't opted into copy_thread_tls get the tls argument + * via pt_regs, so ignore the tls argument passed via C. */ +static inline int copy_thread_tls( + unsigned long clone_flags, unsigned long sp, unsigned long arg, + struct task_struct *p, unsigned long tls) +{ + return copy_thread(clone_flags, sp, arg, p); +} +#endif +extern void flush_thread(void); + +#ifdef CONFIG_HAVE_EXIT_THREAD +extern void exit_thread(struct task_struct *tsk); +#else +static inline void exit_thread(struct task_struct *tsk) +{ +} +#endif + +extern void exit_files(struct task_struct *); +extern void __cleanup_sighand(struct sighand_struct *); + +extern void exit_itimers(struct signal_struct *); +extern void flush_itimer_signals(void); + +extern __noreturn void do_group_exit(int); + +extern int do_execve(struct filename *, + const char __user * const __user *, + const char __user * const __user *); +extern int do_execveat(int, struct filename *, + const char __user * const __user *, + const char __user * const __user *, + int); +extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); +extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); +struct task_struct *fork_idle(int); +extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); + +extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); static inline void set_task_comm(struct task_struct *tsk, const char *from) { __set_task_comm(tsk, from, false); } - -extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); -#define get_task_comm(buf, tsk) ({ \ - BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ - __get_task_comm(buf, sizeof(buf), tsk); \ -}) +extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -static __always_inline void scheduler_ipi(void) -{ - /* - * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting - * TIF_NEED_RESCHED remotely (for the first time) will also send - * this IPI. - */ - preempt_fold_need_resched(); -} -extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state); +void scheduler_ipi(void); +extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else static inline void scheduler_ipi(void) { } -static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) +static inline unsigned long wait_task_inactive(struct task_struct *p, + long match_state) { return 1; } #endif +#define tasklist_empty() \ + list_empty(&init_task.tasks) + +#define next_task(p) \ + list_entry_rcu((p)->tasks.next, struct task_struct, tasks) + +#define for_each_process(p) \ + for (p = &init_task ; (p = next_task(p)) != &init_task ; ) + +extern bool current_is_single_threaded(void); + /* - * Set thread flags in other task's structures. - * See asm/thread_info.h for TIF_xxxx flags available: + * Careful: do_each_thread/while_each_thread is a double loop so + * 'break' will not work as expected - use goto instead. + */ +#define do_each_thread(g, t) \ + for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do + +#define while_each_thread(g, t) \ + while ((t = next_thread(t)) != g) + +#define __for_each_thread(signal, t) \ + list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) + +#define for_each_thread(p, t) \ + __for_each_thread((p)->signal, t) + +/* Careful: this is a double loop, 'break' won't work as expected. */ +#define for_each_process_thread(p, t) \ + for_each_process(p) for_each_thread(p, t) + +static inline int get_nr_threads(struct task_struct *tsk) +{ + return tsk->signal->nr_threads; +} + +static inline bool thread_group_leader(struct task_struct *p) +{ + return p->exit_signal >= 0; +} + +/* Do to the insanities of de_thread it is possible for a process + * to have the pid of the thread group leader without actually being + * the thread group leader. For iteration through the pids in proc + * all we care about is that we have a task with the appropriate + * pid, we don't actually care if we have the right task. + */ +static inline bool has_group_leader_pid(struct task_struct *p) +{ + return task_pid(p) == p->signal->leader_pid; +} + +static inline +bool same_thread_group(struct task_struct *p1, struct task_struct *p2) +{ + return p1->signal == p2->signal; +} + +static inline struct task_struct *next_thread(const struct task_struct *p) +{ + return list_entry_rcu(p->thread_group.next, + struct task_struct, thread_group); +} + +static inline int thread_group_empty(struct task_struct *p) +{ + return list_empty(&p->thread_group); +} + +#define delay_group_leader(p) \ + (thread_group_leader(p) && !thread_group_empty(p)) + +/* + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring + * subscriptions and synchronises with wait4(). Also used in procfs. Also + * pins the final release of task.io_context. Also protects ->cpuset and + * ->cgroup.subsys[]. And ->vfork_done. + * + * Nests both inside and outside of read_lock(&tasklist_lock). + * It must not be nested with write_lock_irq(&tasklist_lock), + * neither inside nor outside. + */ +static inline void task_lock(struct task_struct *p) __acquires(&p->alloc_lock); +static inline void task_lock(struct task_struct *p) +{ + spin_lock(&p->alloc_lock); +} + +static inline void task_unlock(struct task_struct *p) __releases(&p->alloc_lock); +static inline void task_unlock(struct task_struct *p) +{ + spin_unlock(&p->alloc_lock); +} + +extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, + unsigned long *flags); + +static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + struct sighand_struct *ret; + + ret = __lock_task_sighand(tsk, flags); + (void)__cond_lock(&tsk->sighand->siglock, ret); + return ret; +} + +static inline void unlock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); +} + +/** + * threadgroup_change_begin - mark the beginning of changes to a threadgroup + * @tsk: task causing the changes + * + * All operations which modify a threadgroup - a new thread joining the + * group, death of a member thread (the assertion of PF_EXITING) and + * exec(2) dethreading the process and replacing the leader - are wrapped + * by threadgroup_change_{begin|end}(). This is to provide a place which + * subsystems needing threadgroup stability can hook into for + * synchronization. + */ +static inline void threadgroup_change_begin(struct task_struct *tsk) +{ + might_sleep(); + cgroup_threadgroup_change_begin(tsk); +} + +/** + * threadgroup_change_end - mark the end of changes to a threadgroup + * @tsk: task causing the changes + * + * See threadgroup_change_begin(). + */ +static inline void threadgroup_change_end(struct task_struct *tsk) +{ + cgroup_threadgroup_change_end(tsk); +} + +#ifdef CONFIG_THREAD_INFO_IN_TASK + +static inline struct thread_info *task_thread_info(struct task_struct *task) +{ + return &task->thread_info; +} + +/* + * When accessing the stack of a non-current task that might exit, use + * try_get_task_stack() instead. task_stack_page will return a pointer + * that could get freed out from under you. + */ +static inline void *task_stack_page(const struct task_struct *task) +{ + return task->stack; +} + +#define setup_thread_stack(new,old) do { } while(0) + +static inline unsigned long *end_of_stack(const struct task_struct *task) +{ + return (unsigned long *)task->stack + 1; +} + +#elif !defined(__HAVE_THREAD_FUNCTIONS) + +#define task_thread_info(task) ((struct thread_info *)(task)->stack) +#define task_stack_page(task) ((void *)(task)->stack) + +static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) +{ + *task_thread_info(p) = *task_thread_info(org); + task_thread_info(p)->task = p; +} + +/* + * Return the address of the last usable long on the stack. + * + * When the stack grows down, this is just above the thread + * info struct. Going any lower will corrupt the threadinfo. + * + * When the stack grows up, this is the highest address. + * Beyond that position, we corrupt data on the next page. + */ +static inline unsigned long *end_of_stack(struct task_struct *p) +{ +#ifdef CONFIG_STACK_GROWSUP + return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; +#else + return (unsigned long *)(task_thread_info(p) + 1); +#endif +} + +#endif + +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return atomic_inc_not_zero(&tsk->stack_refcount) ? + task_stack_page(tsk) : NULL; +} + +extern void put_task_stack(struct task_struct *tsk); +#else +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return task_stack_page(tsk); +} + +static inline void put_task_stack(struct task_struct *tsk) {} +#endif + +#define task_stack_end_corrupted(task) \ + (*(end_of_stack(task)) != STACK_END_MAGIC) + +static inline int object_starts_on_stack(const void *obj) +{ + const void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); +} + +#if defined(CONFIG_GRKERNSEC_KSTACKOVERFLOW) && defined(CONFIG_X86_64) +static inline int object_starts_on_irq_stack(const void *obj) +{ + const void *stack = this_cpu_read(irq_stack_ptr); + + return (obj >= stack) && (obj < (stack + IRQ_STACK_SIZE)); +} +#else +static inline int object_starts_on_irq_stack(const void *obj) { return 0; } +#endif + +extern void thread_stack_cache_init(void); + +#ifdef CONFIG_DEBUG_STACK_USAGE +static inline unsigned long stack_not_used(struct task_struct *p) +{ + unsigned long *n = end_of_stack(p); + + do { /* Skip over canary */ +# ifdef CONFIG_STACK_GROWSUP + n--; +# else + n++; +# endif + } while (!*n); + +# ifdef CONFIG_STACK_GROWSUP + return (unsigned long)end_of_stack(p) - (unsigned long)n; +# else + return (unsigned long)n - (unsigned long)end_of_stack(p); +# endif +} +#endif +extern void set_task_stack_end_magic(struct task_struct *tsk); + +/* set thread flags in other task's structures + * - see asm/thread_info.h for TIF_xxxx flags available */ static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) { @@ -1969,12 +3427,6 @@ static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) clear_ti_thread_flag(task_thread_info(tsk), flag); } -static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, - bool value) -{ - update_ti_thread_flag(task_thread_info(tsk), flag, value); -} - static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) { return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); @@ -2005,38 +3457,49 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } +static inline int restart_syscall(void) +{ + set_tsk_thread_flag(current, TIF_SIGPENDING); + return -ERESTARTNOINTR; +} + +static inline int signal_pending(struct task_struct *p) +{ + return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); +} + +static inline int __fatal_signal_pending(struct task_struct *p) +{ + return unlikely(sigismember(&p->pending.signal, SIGKILL)); +} + +static inline int fatal_signal_pending(struct task_struct *p) +{ + return signal_pending(p) && __fatal_signal_pending(p); +} + +static inline int signal_pending_state(long state, struct task_struct *p) +{ + if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) + return 0; + if (!signal_pending(p)) + return 0; + + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); +} + /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return * value indicates whether a reschedule was done in fact. * cond_resched_lock() will drop the spinlock before scheduling, + * cond_resched_softirq() will enable bhs before scheduling. */ -#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) -extern int __cond_resched(void); - -#ifdef CONFIG_PREEMPT_DYNAMIC - -DECLARE_STATIC_CALL(cond_resched, __cond_resched); - -static __always_inline int _cond_resched(void) -{ - return static_call_mod(cond_resched)(); -} - +#ifndef CONFIG_PREEMPT +extern int _cond_resched(void); #else - -static inline int _cond_resched(void) -{ - return __cond_resched(); -} - -#endif /* CONFIG_PREEMPT_DYNAMIC */ - -#else - static inline int _cond_resched(void) { return 0; } - -#endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */ +#endif #define cond_resched() ({ \ ___might_sleep(__FILE__, __LINE__, 0); \ @@ -2044,22 +3507,17 @@ static inline int _cond_resched(void) { return 0; } }) extern int __cond_resched_lock(spinlock_t *lock); -extern int __cond_resched_rwlock_read(rwlock_t *lock); -extern int __cond_resched_rwlock_write(rwlock_t *lock); #define cond_resched_lock(lock) ({ \ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ __cond_resched_lock(lock); \ }) -#define cond_resched_rwlock_read(lock) ({ \ - __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ - __cond_resched_rwlock_read(lock); \ -}) +extern int __cond_resched_softirq(void); -#define cond_resched_rwlock_write(lock) ({ \ - __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ - __cond_resched_rwlock_write(lock); \ +#define cond_resched_softirq() ({ \ + ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ }) static inline void cond_resched_rcu(void) @@ -2071,14 +3529,23 @@ static inline void cond_resched_rcu(void) #endif } +static inline unsigned long get_preempt_disable_ip(struct task_struct *p) +{ +#ifdef CONFIG_DEBUG_PREEMPT + return p->preempt_disable_ip; +#else + return 0; +#endif +} + /* * Does a critical section need to be broken due to another - * task waiting?: (technically does not depend on CONFIG_PREEMPTION, + * task waiting?: (technically does not depend on CONFIG_PREEMPT, * but a general need for low latency) */ static inline int spin_needbreak(spinlock_t *lock) { -#ifdef CONFIG_PREEMPTION +#ifdef CONFIG_PREEMPT return spin_is_contended(lock); #else return 0; @@ -2086,20 +3553,79 @@ static inline int spin_needbreak(spinlock_t *lock) } /* - * Check if a rwlock is contended. - * Returns non-zero if there is another task waiting on the rwlock. - * Returns zero if the lock is not contended or the system / underlying - * rwlock implementation does not support contention detection. - * Technically does not depend on CONFIG_PREEMPTION, but a general need - * for low latency. + * Idle thread specific functions to determine the need_resched + * polling state. */ -static inline int rwlock_needbreak(rwlock_t *lock) +#ifdef TIF_POLLING_NRFLAG +static inline int tsk_is_polling(struct task_struct *p) { -#ifdef CONFIG_PREEMPTION - return rwlock_is_contended(lock); + return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); +} + +static inline void __current_set_polling(void) +{ + set_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_set_polling_and_test(void) +{ + __current_set_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + +static inline void __current_clr_polling(void) +{ + clear_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_clr_polling_and_test(void) +{ + __current_clr_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + #else - return 0; +static inline int tsk_is_polling(struct task_struct *p) { return 0; } +static inline void __current_set_polling(void) { } +static inline void __current_clr_polling(void) { } + +static inline bool __must_check current_set_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} +static inline bool __must_check current_clr_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} #endif + +static inline void current_clr_polling(void) +{ + __current_clr_polling(); + + /* + * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. + * Once the bit is cleared, we'll get IPIs with every new + * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also + * fold. + */ + smp_mb(); /* paired with resched_curr() */ + + preempt_fold_need_resched(); } static __always_inline bool need_resched(void) @@ -2107,6 +3633,32 @@ static __always_inline bool need_resched(void) return unlikely(tif_need_resched()); } +/* + * Thread group CPU time accounting. + */ +void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); + +/* + * Reevaluate whether the task has signals pending delivery. + * Wake the task if so. + * This is required every time the blocked sigset_t changes. + * callers must hold sighand->siglock. + */ +extern void recalc_sigpending_and_wake(struct task_struct *t); +extern void recalc_sigpending(void); + +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline void signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); +} +static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? __TASK_TRACED : 0); +} + /* * Wrappers for p->thread_info->cpu access. No-op on UP. */ @@ -2115,12 +3667,17 @@ static __always_inline bool need_resched(void) static inline unsigned int task_cpu(const struct task_struct *p) { #ifdef CONFIG_THREAD_INFO_IN_TASK - return READ_ONCE(p->cpu); + return p->cpu; #else - return READ_ONCE(task_thread_info(p)->cpu); + return task_thread_info(p)->cpu; #endif } +static inline int task_node(const struct task_struct *p) +{ + return cpu_to_node(task_cpu(p)); +} + extern void set_task_cpu(struct task_struct *p, unsigned int cpu); #else @@ -2136,177 +3693,103 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ -extern bool sched_task_on_rq(struct task_struct *p); - -/* - * In order to reduce various lock holder preemption latencies provide an - * interface to see if a vCPU is currently running or not. - * - * This allows us to terminate optimistic spin loops and block, analogous to - * the native optimistic spin heuristic of testing if the lock owner task is - * running or not. - */ -#ifndef vcpu_is_preempted -static inline bool vcpu_is_preempted(int cpu) -{ - return false; -} -#endif - extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); +#ifdef CONFIG_CGROUP_SCHED +extern struct task_group root_task_group; +#endif /* CONFIG_CGROUP_SCHED */ + +extern int task_can_switch_user(struct user_struct *up, + struct task_struct *tsk); + +#ifdef CONFIG_TASK_XACCT +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.rchar += amt; +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.wchar += amt; +} + +static inline void inc_syscr(struct task_struct *tsk) +{ + tsk->ioac.syscr++; +} + +static inline void inc_syscw(struct task_struct *tsk) +{ + tsk->ioac.syscw++; +} +#else +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void inc_syscr(struct task_struct *tsk) +{ +} + +static inline void inc_syscw(struct task_struct *tsk) +{ +} +#endif + #ifndef TASK_SIZE_OF #define TASK_SIZE_OF(tsk) TASK_SIZE #endif -#ifdef CONFIG_SMP -/* Returns effective CPU energy utilization, as seen by the scheduler */ -unsigned long sched_cpu_util(int cpu, unsigned long max); -#endif /* CONFIG_SMP */ - -#ifdef CONFIG_RSEQ - -/* - * Map the event mask on the user-space ABI enum rseq_cs_flags - * for direct mask checks. - */ -enum rseq_event_mask_bits { - RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, - RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, - RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, -}; - -enum rseq_event_mask { - RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), - RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), - RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), -}; - -static inline void rseq_set_notify_resume(struct task_struct *t) -{ - if (t->rseq) - set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); -} - -void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); - -static inline void rseq_handle_notify_resume(struct ksignal *ksig, - struct pt_regs *regs) -{ - if (current->rseq) - __rseq_handle_notify_resume(ksig, regs); -} - -static inline void rseq_signal_deliver(struct ksignal *ksig, - struct pt_regs *regs) -{ - preempt_disable(); - __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); - preempt_enable(); - rseq_handle_notify_resume(ksig, regs); -} - -/* rseq_preempt() requires preemption to be disabled. */ -static inline void rseq_preempt(struct task_struct *t) -{ - __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); - rseq_set_notify_resume(t); -} - -/* rseq_migrate() requires preemption to be disabled. */ -static inline void rseq_migrate(struct task_struct *t) -{ - __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); - rseq_set_notify_resume(t); -} - -/* - * If parent process has a registered restartable sequences area, the - * child inherits. Unregister rseq for a clone with CLONE_VM set. - */ -static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) -{ - if (clone_flags & CLONE_VM) { - t->rseq = NULL; - t->rseq_sig = 0; - t->rseq_event_mask = 0; - } else { - t->rseq = current->rseq; - t->rseq_sig = current->rseq_sig; - t->rseq_event_mask = current->rseq_event_mask; - } -} - -static inline void rseq_execve(struct task_struct *t) -{ - t->rseq = NULL; - t->rseq_sig = 0; - t->rseq_event_mask = 0; -} - +#ifdef CONFIG_MEMCG +extern void mm_update_next_owner(struct mm_struct *mm); #else +static inline void mm_update_next_owner(struct mm_struct *mm) +{ +} +#endif /* CONFIG_MEMCG */ -static inline void rseq_set_notify_resume(struct task_struct *t) +static inline unsigned long task_rlimit(const struct task_struct *tsk, + unsigned int limit) { + return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); } -static inline void rseq_handle_notify_resume(struct ksignal *ksig, - struct pt_regs *regs) + +static inline unsigned long task_rlimit_max(const struct task_struct *tsk, + unsigned int limit) { + return READ_ONCE(tsk->signal->rlim[limit].rlim_max); } -static inline void rseq_signal_deliver(struct ksignal *ksig, - struct pt_regs *regs) + +static inline unsigned long rlimit(unsigned int limit) { + return task_rlimit(current, limit); } -static inline void rseq_preempt(struct task_struct *t) -{ -} -static inline void rseq_migrate(struct task_struct *t) -{ -} -static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) -{ -} -static inline void rseq_execve(struct task_struct *t) + +static inline unsigned long rlimit_max(unsigned int limit) { + return task_rlimit_max(current, limit); } - -#endif - -#ifdef CONFIG_DEBUG_RSEQ - -void rseq_syscall(struct pt_regs *regs); - -#else - -static inline void rseq_syscall(struct pt_regs *regs) -{ -} - -#endif - -const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq); -char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len); -int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq); - -const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq); -const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); -const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); - -int sched_trace_rq_cpu(struct rq *rq); -int sched_trace_rq_cpu_capacity(struct rq *rq); -int sched_trace_rq_nr_running(struct rq *rq); - -const struct cpumask *sched_trace_rd_span(struct root_domain *rd); - -#ifdef CONFIG_SCHED_CORE -extern void sched_core_free(struct task_struct *tsk); -extern void sched_core_fork(struct task_struct *p); -extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, - unsigned long uaddr); -#else -static inline void sched_core_free(struct task_struct *tsk) { } -static inline void sched_core_fork(struct task_struct *p) { } -#endif + +#define SCHED_CPUFREQ_RT (1U << 0) +#define SCHED_CPUFREQ_DL (1U << 1) +#define SCHED_CPUFREQ_IOWAIT (1U << 2) + +#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) + +#ifdef CONFIG_CPU_FREQ +struct update_util_data { + void (*func)(struct update_util_data *data, u64 time, unsigned int flags); +} __no_const; + +void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, + void (*func)(struct update_util_data *data, u64 time, + unsigned int flags)); +void cpufreq_remove_update_util_hook(int cpu); +#endif /* CONFIG_CPU_FREQ */ #endif diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 1aff00b65f..9089a2ae91 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -1,4 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SCHED_DEADLINE_H +#define _SCHED_DEADLINE_H /* * SCHED_DEADLINE tasks has negative priorities, reflecting @@ -25,10 +26,4 @@ static inline bool dl_time_before(u64 a, u64 b) return (s64)(a - b) < 0; } -#ifdef CONFIG_SMP - -struct root_domain; -extern void dl_add_task_root_domain(struct task_struct *p); -extern void dl_clear_root_domain(struct root_domain *rd); - -#endif /* CONFIG_SMP */ +#endif /* _SCHED_DEADLINE_H */ diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h index ab83d85e11..d9cf5a5762 100644 --- a/include/linux/sched/prio.h +++ b/include/linux/sched/prio.h @@ -1,6 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_SCHED_PRIO_H -#define _LINUX_SCHED_PRIO_H +#ifndef _SCHED_PRIO_H +#define _SCHED_PRIO_H #define MAX_NICE 19 #define MIN_NICE -20 @@ -11,9 +10,16 @@ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority * values are inverted: lower p->prio value means higher priority. + * + * The MAX_USER_RT_PRIO value allows the actual maximum + * RT priority to be separate from the value exported to + * user-space. This allows kernel threads to set their + * priority to a value higher than any user task. Note: + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. */ -#define MAX_RT_PRIO 100 +#define MAX_USER_RT_PRIO 100 +#define MAX_RT_PRIO MAX_USER_RT_PRIO #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) @@ -26,6 +32,15 @@ #define NICE_TO_PRIO(nice) ((nice) + DEFAULT_PRIO) #define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO) +/* + * 'User priority' is the nice value converted to something we + * can work with better when scaling various scheduler parameters, + * it's a [ 0 ... 39 ] range. + */ +#define USER_PRIO(p) ((p)-MAX_RT_PRIO) +#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) +#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) + /* * Convert nice value [19,-20] to rlimit style value [1,40]. */ @@ -42,4 +57,4 @@ static inline long rlimit_to_nice(long prio) return (MAX_NICE - prio + 1); } -#endif /* _LINUX_SCHED_PRIO_H */ +#endif /* _SCHED_PRIO_H */ diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index e5af028c08..a30b172df6 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -1,10 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_SCHED_RT_H -#define _LINUX_SCHED_RT_H +#ifndef _SCHED_RT_H +#define _SCHED_RT_H -#include - -struct task_struct; +#include static inline int rt_prio(int prio) { @@ -18,32 +15,28 @@ static inline int rt_task(struct task_struct *p) return rt_prio(p->prio); } -static inline bool task_is_realtime(struct task_struct *tsk) -{ - int policy = tsk->policy; - - if (policy == SCHED_FIFO || policy == SCHED_RR) - return true; - if (policy == SCHED_DEADLINE) - return true; - return false; -} - #ifdef CONFIG_RT_MUTEXES -/* - * Must hold either p->pi_lock or task_rq(p)->lock. - */ -static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p) -{ - return p->pi_top_task; -} -extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task); +extern int rt_mutex_getprio(struct task_struct *p); +extern void rt_mutex_setprio(struct task_struct *p, int prio); +extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio); +extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task); extern void rt_mutex_adjust_pi(struct task_struct *p); static inline bool tsk_is_pi_blocked(struct task_struct *tsk) { return tsk->pi_blocked_on != NULL; } #else +static inline int rt_mutex_getprio(struct task_struct *p) +{ + return p->normal_prio; +} + +static inline int rt_mutex_get_effective_prio(struct task_struct *task, + int newprio) +{ + return newprio; +} + static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) { return NULL; @@ -64,4 +57,4 @@ extern void normalize_rt_tasks(void); */ #define RR_TIMESLICE (100 * HZ / 1000) -#endif /* _LINUX_SCHED_RT_H */ +#endif /* _SCHED_RT_H */ diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 304f431178..22db1e6370 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -1,31 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_SCHED_SYSCTL_H -#define _LINUX_SCHED_SYSCTL_H - -#include - -struct ctl_table; +#ifndef _SCHED_SYSCTL_H +#define _SCHED_SYSCTL_H #ifdef CONFIG_DETECT_HUNG_TASK - -#ifdef CONFIG_SMP -extern unsigned int sysctl_hung_task_all_cpu_backtrace; -#else -#define sysctl_hung_task_all_cpu_backtrace 0 -#endif /* CONFIG_SMP */ - extern int sysctl_hung_task_check_count; extern unsigned int sysctl_hung_task_panic; extern unsigned long sysctl_hung_task_timeout_secs; -extern unsigned long sysctl_hung_task_check_interval_secs; extern int sysctl_hung_task_warnings; -int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); +extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); #else /* Avoid need for ifdefs elsewhere in the code */ enum { sysctl_hung_task_timeout_secs = 0 }; #endif +extern unsigned int sysctl_sched_latency; +extern unsigned int sysctl_sched_min_granularity; +extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_child_runs_first; enum sched_tunable_scaling { @@ -34,6 +25,23 @@ enum sched_tunable_scaling { SCHED_TUNABLESCALING_LINEAR, SCHED_TUNABLESCALING_END, }; +extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; + +extern unsigned int sysctl_numa_balancing_scan_delay; +extern unsigned int sysctl_numa_balancing_scan_period_min; +extern unsigned int sysctl_numa_balancing_scan_period_max; +extern unsigned int sysctl_numa_balancing_scan_size; + +#ifdef CONFIG_SCHED_DEBUG +extern unsigned int sysctl_sched_migration_cost; +extern unsigned int sysctl_sched_nr_migrate; +extern unsigned int sysctl_sched_time_avg; +extern unsigned int sysctl_sched_shares_window; + +int sched_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, + loff_t *ppos); +#endif /* * control realtime throttling: @@ -44,15 +52,6 @@ enum sched_tunable_scaling { extern unsigned int sysctl_sched_rt_period; extern int sysctl_sched_rt_runtime; -extern unsigned int sysctl_sched_dl_period_max; -extern unsigned int sysctl_sched_dl_period_min; - -#ifdef CONFIG_UCLAMP_TASK -extern unsigned int sysctl_sched_uclamp_util_min; -extern unsigned int sysctl_sched_uclamp_util_max; -extern unsigned int sysctl_sched_uclamp_util_min_rt_default; -#endif - #ifdef CONFIG_CFS_BANDWIDTH extern unsigned int sysctl_sched_cfs_bandwidth_slice; #endif @@ -61,24 +60,22 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice; extern unsigned int sysctl_sched_autogroup_enabled; #endif -extern int sysctl_sched_rr_timeslice; extern int sched_rr_timeslice; -int sched_rr_handler(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); -int sched_rt_handler(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); -int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); -int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); -int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); +extern int sched_rr_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); -#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) -extern unsigned int sysctl_sched_energy_aware; -int sched_energy_aware_handler(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); -#endif +extern int sched_rt_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); -#endif /* _LINUX_SCHED_SYSCTL_H */ +extern int sysctl_numa_balancing(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +extern int sysctl_schedstats(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +#endif /* _SCHED_SYSCTL_H */ diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h index 835ee87ed7..411b52e424 100644 --- a/include/linux/sched_clock.h +++ b/include/linux/sched_clock.h @@ -1,49 +1,25 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * sched_clock.h: support for extending counters to full 64-bit ns counter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef LINUX_SCHED_CLOCK #define LINUX_SCHED_CLOCK #ifdef CONFIG_GENERIC_SCHED_CLOCK -/** - * struct clock_read_data - data required to read from sched_clock() - * - * @epoch_ns: sched_clock() value at last update - * @epoch_cyc: Clock cycle value at last update. - * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit - * clocks. - * @read_sched_clock: Current clock source (or dummy source when suspended). - * @mult: Multiplier for scaled math conversion. - * @shift: Shift value for scaled math conversion. - * - * Care must be taken when updating this structure; it is read by - * some very hot code paths. It occupies <=40 bytes and, when combined - * with the seqcount used to synchronize access, comfortably fits into - * a 64 byte cache line. - */ -struct clock_read_data { - u64 epoch_ns; - u64 epoch_cyc; - u64 sched_clock_mask; - u64 (*read_sched_clock)(void); - u32 mult; - u32 shift; -}; - -extern struct clock_read_data *sched_clock_read_begin(unsigned int *seq); -extern int sched_clock_read_retry(unsigned int seq); - -extern void generic_sched_clock_init(void); +extern void sched_clock_postinit(void); extern void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate); #else -static inline void generic_sched_clock_init(void) { } +static inline void sched_clock_postinit(void) { } static inline void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) { + ; } #endif diff --git a/include/linux/scif.h b/include/linux/scif.h new file mode 100644 index 0000000000..c6209dd8ba --- /dev/null +++ b/include/linux/scif.h @@ -0,0 +1,1339 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2014 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Intel SCIF driver. + * + */ +#ifndef __SCIF_H__ +#define __SCIF_H__ + +#include +#include +#include +#include + +#define SCIF_ACCEPT_SYNC 1 +#define SCIF_SEND_BLOCK 1 +#define SCIF_RECV_BLOCK 1 + +enum { + SCIF_PROT_READ = (1 << 0), + SCIF_PROT_WRITE = (1 << 1) +}; + +enum { + SCIF_MAP_FIXED = 0x10, + SCIF_MAP_KERNEL = 0x20, +}; + +enum { + SCIF_FENCE_INIT_SELF = (1 << 0), + SCIF_FENCE_INIT_PEER = (1 << 1), + SCIF_SIGNAL_LOCAL = (1 << 4), + SCIF_SIGNAL_REMOTE = (1 << 5) +}; + +enum { + SCIF_RMA_USECPU = (1 << 0), + SCIF_RMA_USECACHE = (1 << 1), + SCIF_RMA_SYNC = (1 << 2), + SCIF_RMA_ORDERED = (1 << 3) +}; + +/* End of SCIF Admin Reserved Ports */ +#define SCIF_ADMIN_PORT_END 1024 + +/* End of SCIF Reserved Ports */ +#define SCIF_PORT_RSVD 1088 + +typedef struct scif_endpt *scif_epd_t; +typedef struct scif_pinned_pages *scif_pinned_pages_t; + +/** + * struct scif_range - SCIF registered range used in kernel mode + * @cookie: cookie used internally by SCIF + * @nr_pages: number of pages of PAGE_SIZE + * @prot_flags: R/W protection + * @phys_addr: Array of bus addresses + * @va: Array of kernel virtual addresses backed by the pages in the phys_addr + * array. The va is populated only when called on the host for a remote + * SCIF connection on MIC. This is required to support the use case of DMA + * between MIC and another device which is not a SCIF node e.g., an IB or + * ethernet NIC. + */ +struct scif_range { + void *cookie; + int nr_pages; + int prot_flags; + dma_addr_t *phys_addr; + void __iomem **va; +}; + +/** + * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll + * @epd: SCIF endpoint + * @events: requested events + * @revents: returned events + */ +struct scif_pollepd { + scif_epd_t epd; + short events; + short revents; +}; + +/** + * scif_peer_dev - representation of a peer SCIF device + * + * Peer devices show up as PCIe devices for the mgmt node but not the cards. + * The mgmt node discovers all the cards on the PCIe bus and informs the other + * cards about their peers. Upon notification of a peer a node adds a peer + * device to the peer bus to maintain symmetry in the way devices are + * discovered across all nodes in the SCIF network. + * + * @dev: underlying device + * @dnode - The destination node which this device will communicate with. + */ +struct scif_peer_dev { + struct device dev; + u8 dnode; +}; + +/** + * scif_client - representation of a SCIF client + * @name: client name + * @probe - client method called when a peer device is registered + * @remove - client method called when a peer device is unregistered + * @si - subsys_interface used internally for implementing SCIF clients + */ +struct scif_client { + const char *name; + void (*probe)(struct scif_peer_dev *spdev); + void (*remove)(struct scif_peer_dev *spdev); + struct subsys_interface si; +} __do_const; + +#define SCIF_OPEN_FAILED ((scif_epd_t)-1) +#define SCIF_REGISTER_FAILED ((off_t)-1) +#define SCIF_MMAP_FAILED ((void *)-1) + +/** + * scif_open() - Create an endpoint + * + * Return: + * Upon successful completion, scif_open() returns an endpoint descriptor to + * be used in subsequent SCIF functions calls to refer to that endpoint; + * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is + * returned and errno is set to indicate the error; in kernel mode a NULL + * scif_epd_t is returned. + * + * Errors: + * ENOMEM - Insufficient kernel memory was available + */ +scif_epd_t scif_open(void); + +/** + * scif_bind() - Bind an endpoint to a port + * @epd: endpoint descriptor + * @pn: port number + * + * scif_bind() binds endpoint epd to port pn, where pn is a port number on the + * local node. If pn is zero, a port number greater than or equal to + * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to + * exactly one local port. Ports less than 1024 when requested can only be bound + * by system (or root) processes or by processes executed by privileged users. + * + * Return: + * Upon successful completion, scif_bind() returns the port number to which epd + * is bound; otherwise in user mode -1 is returned and errno is set to + * indicate the error; in kernel mode the negative of one of the following + * errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * EINVAL - the endpoint or the port is already bound + * EISCONN - The endpoint is already connected + * ENOSPC - No port number available for assignment + * EACCES - The port requested is protected and the user is not the superuser + */ +int scif_bind(scif_epd_t epd, u16 pn); + +/** + * scif_listen() - Listen for connections on an endpoint + * @epd: endpoint descriptor + * @backlog: maximum pending connection requests + * + * scif_listen() marks the endpoint epd as a listening endpoint - that is, as + * an endpoint that will be used to accept incoming connection requests. Once + * so marked, the endpoint is said to be in the listening state and may not be + * used as the endpoint of a connection. + * + * The endpoint, epd, must have been bound to a port. + * + * The backlog argument defines the maximum length to which the queue of + * pending connections for epd may grow. If a connection request arrives when + * the queue is full, the client may receive an error with an indication that + * the connection was refused. + * + * Return: + * Upon successful completion, scif_listen() returns 0; otherwise in user mode + * -1 is returned and errno is set to indicate the error; in kernel mode the + * negative of one of the following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * EINVAL - the endpoint is not bound to a port + * EISCONN - The endpoint is already connected or listening + */ +int scif_listen(scif_epd_t epd, int backlog); + +/** + * scif_connect() - Initiate a connection on a port + * @epd: endpoint descriptor + * @dst: global id of port to which to connect + * + * The scif_connect() function requests the connection of endpoint epd to remote + * port dst. If the connection is successful, a peer endpoint, bound to dst, is + * created on node dst.node. On successful return, the connection is complete. + * + * If the endpoint epd has not already been bound to a port, scif_connect() + * will bind it to an unused local port. + * + * A connection is terminated when an endpoint of the connection is closed, + * either explicitly by scif_close(), or when a process that owns one of the + * endpoints of the connection is terminated. + * + * In user space, scif_connect() supports an asynchronous connection mode + * if the application has set the O_NONBLOCK flag on the endpoint via the + * fcntl() system call. Setting this flag will result in the calling process + * not to wait during scif_connect(). + * + * Return: + * Upon successful completion, scif_connect() returns the port ID to which the + * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is + * set to indicate the error; in kernel mode the negative of one of the + * following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNREFUSED - The destination was not listening for connections or refused + * the connection request + * EINVAL - dst.port is not a valid port ID + * EISCONN - The endpoint is already connected + * ENOMEM - No buffer space is available + * ENODEV - The destination node does not exist, or the node is lost or existed, + * but is not currently in the network since it may have crashed + * ENOSPC - No port number available for assignment + * EOPNOTSUPP - The endpoint is listening and cannot be connected + */ +int scif_connect(scif_epd_t epd, struct scif_port_id *dst); + +/** + * scif_accept() - Accept a connection on an endpoint + * @epd: endpoint descriptor + * @peer: global id of port to which connected + * @newepd: new connected endpoint descriptor + * @flags: flags + * + * The scif_accept() call extracts the first connection request from the queue + * of pending connections for the port on which epd is listening. scif_accept() + * creates a new endpoint, bound to the same port as epd, and allocates a new + * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new + * endpoint is connected to the endpoint through which the connection was + * requested. epd is unaffected by this call, and remains in the listening + * state. + * + * On successful return, peer holds the global port identifier (node id and + * local port number) of the port which requested the connection. + * + * A connection is terminated when an endpoint of the connection is closed, + * either explicitly by scif_close(), or when a process that owns one of the + * endpoints of the connection is terminated. + * + * The number of connections that can (subsequently) be accepted on epd is only + * limited by system resources (memory). + * + * The flags argument is formed by OR'ing together zero or more of the + * following values. + * SCIF_ACCEPT_SYNC - block until a connection request is presented. If + * SCIF_ACCEPT_SYNC is not in flags, and no pending + * connections are present on the queue, scif_accept() + * fails with an EAGAIN error + * + * In user mode, the select() and poll() functions can be used to determine + * when there is a connection request. In kernel mode, the scif_poll() + * function may be used for this purpose. A readable event will be delivered + * when a connection is requested. + * + * Return: + * Upon successful completion, scif_accept() returns 0; otherwise in user mode + * -1 is returned and errno is set to indicate the error; in kernel mode the + * negative of one of the following errors is returned. + * + * Errors: + * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be + * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete + * its connection request + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * EINTR - Interrupted function + * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is + * NULL, or newepd is NULL + * ENODEV - The requesting node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOENT - Secondary part of epd registration failed + */ +int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t + *newepd, int flags); + +/** + * scif_close() - Close an endpoint + * @epd: endpoint descriptor + * + * scif_close() closes an endpoint and performs necessary teardown of + * facilities associated with that endpoint. + * + * If epd is a listening endpoint then it will no longer accept connection + * requests on the port to which it is bound. Any pending connection requests + * are rejected. + * + * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs + * which are in-process through epd or its peer endpoint will complete before + * scif_close() returns. Registered windows of the local and peer endpoints are + * released as if scif_unregister() was called against each window. + * + * Closing a SCIF endpoint does not affect local registered memory mapped by + * a SCIF endpoint on a remote node. The local memory remains mapped by the peer + * SCIF endpoint explicitly removed by calling munmap(..) by the peer. + * + * If the peer endpoint's receive queue is not empty at the time that epd is + * closed, then the peer endpoint can be passed as the endpoint parameter to + * scif_recv() until the receive queue is empty. + * + * epd is freed and may no longer be accessed. + * + * Return: + * Upon successful completion, scif_close() returns 0; otherwise in user mode + * -1 is returned and errno is set to indicate the error; in kernel mode the + * negative of one of the following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + */ +int scif_close(scif_epd_t epd); + +/** + * scif_send() - Send a message + * @epd: endpoint descriptor + * @msg: message buffer address + * @len: message length + * @flags: blocking mode flags + * + * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data + * are copied from memory starting at address msg. On successful execution the + * return value of scif_send() is the number of bytes that were sent, and is + * zero if no bytes were sent because len was zero. scif_send() may be called + * only when the endpoint is in a connected state. + * + * If a scif_send() call is non-blocking, then it sends only those bytes which + * can be sent without waiting, up to a maximum of len bytes. + * + * If a scif_send() call is blocking, then it normally returns after sending + * all len bytes. If a blocking call is interrupted or the connection is + * reset, the call is considered successful if some bytes were sent or len is + * zero, otherwise the call is considered unsuccessful. + * + * In user mode, the select() and poll() functions can be used to determine + * when the send queue is not full. In kernel mode, the scif_poll() function + * may be used for this purpose. + * + * It is recommended that scif_send()/scif_recv() only be used for short + * control-type message communication between SCIF endpoints. The SCIF RMA + * APIs are expected to provide better performance for transfer sizes of + * 1024 bytes or longer for the current MIC hardware and software + * implementation. + * + * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK + * is passed as the flags argument. + * + * Return: + * Upon successful completion, scif_send() returns the number of bytes sent; + * otherwise in user mode -1 is returned and errno is set to indicate the + * error; in kernel mode the negative of one of the following errors is + * returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - flags is invalid, or len is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOTCONN - The endpoint is not connected + */ +int scif_send(scif_epd_t epd, void *msg, int len, int flags); + +/** + * scif_recv() - Receive a message + * @epd: endpoint descriptor + * @msg: message buffer address + * @len: message buffer length + * @flags: blocking mode flags + * + * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of + * data are copied to memory starting at address msg. On successful execution + * the return value of scif_recv() is the number of bytes that were received, + * and is zero if no bytes were received because len was zero. scif_recv() may + * be called only when the endpoint is in a connected state. + * + * If a scif_recv() call is non-blocking, then it receives only those bytes + * which can be received without waiting, up to a maximum of len bytes. + * + * If a scif_recv() call is blocking, then it normally returns after receiving + * all len bytes. If the blocking call was interrupted due to a disconnection, + * subsequent calls to scif_recv() will copy all bytes received upto the point + * of disconnection. + * + * In user mode, the select() and poll() functions can be used to determine + * when data is available to be received. In kernel mode, the scif_poll() + * function may be used for this purpose. + * + * It is recommended that scif_send()/scif_recv() only be used for short + * control-type message communication between SCIF endpoints. The SCIF RMA + * APIs are expected to provide better performance for transfer sizes of + * 1024 bytes or longer for the current MIC hardware and software + * implementation. + * + * scif_recv() will block until the entire message is received if + * SCIF_RECV_BLOCK is passed as the flags argument. + * + * Return: + * Upon successful completion, scif_recv() returns the number of bytes + * received; otherwise in user mode -1 is returned and errno is set to + * indicate the error; in kernel mode the negative of one of the following + * errors is returned. + * + * Errors: + * EAGAIN - The destination node is returning from a low power state + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - flags is invalid, or len is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOTCONN - The endpoint is not connected + */ +int scif_recv(scif_epd_t epd, void *msg, int len, int flags); + +/** + * scif_register() - Mark a memory region for remote access. + * @epd: endpoint descriptor + * @addr: starting virtual address + * @len: length of range + * @offset: offset of window + * @prot_flags: read/write protection flags + * @map_flags: mapping flags + * + * The scif_register() function opens a window, a range of whole pages of the + * registered address space of the endpoint epd, starting at offset po and + * continuing for len bytes. The value of po, further described below, is a + * function of the parameters offset and len, and the value of map_flags. Each + * page of the window represents the physical memory page which backs the + * corresponding page of the range of virtual address pages starting at addr + * and continuing for len bytes. addr and len are constrained to be multiples + * of the page size. A successful scif_register() call returns po. + * + * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset + * exactly, and offset is constrained to be a multiple of the page size. The + * mapping established by scif_register() will not replace any existing + * registration; an error is returned if any page within the range [offset, + * offset + len - 1] intersects an existing window. + * + * When SCIF_MAP_FIXED is not set, the implementation uses offset in an + * implementation-defined manner to arrive at po. The po value so chosen will + * be an area of the registered address space that the implementation deems + * suitable for a mapping of len bytes. An offset value of 0 is interpreted as + * granting the implementation complete freedom in selecting po, subject to + * constraints described below. A non-zero value of offset is taken to be a + * suggestion of an offset near which the mapping should be placed. When the + * implementation selects a value for po, it does not replace any extant + * window. In all cases, po will be a multiple of the page size. + * + * The physical pages which are so represented by a window are available for + * access in calls to mmap(), scif_readfrom(), scif_writeto(), + * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the + * physical pages represented by the window will not be reused by the memory + * subsystem for any other purpose. Note that the same physical page may be + * represented by multiple windows. + * + * Subsequent operations which change the memory pages to which virtual + * addresses are mapped (such as mmap(), munmap()) have no effect on + * existing window. + * + * If the process will fork(), it is recommended that the registered + * virtual address range be marked with MADV_DONTFORK. Doing so will prevent + * problems due to copy-on-write semantics. + * + * The prot_flags argument is formed by OR'ing together one or more of the + * following values. + * SCIF_PROT_READ - allow read operations from the window + * SCIF_PROT_WRITE - allow write operations to the window + * + * Return: + * Upon successful completion, scif_register() returns the offset at which the + * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that + * is (off_t *)-1) is returned and errno is set to indicate the error; in + * kernel mode the negative of one of the following errors is returned. + * + * Errors: + * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range + * [offset, offset + len -1] are already registered + * EAGAIN - The mapping could not be performed due to lack of resources + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is + * set in flags, and offset is not a multiple of the page size, or addr is not a + * multiple of the page size, or len is not a multiple of the page size, or is + * 0, or offset is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOTCONN -The endpoint is not connected + */ +off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset, + int prot_flags, int map_flags); + +/** + * scif_unregister() - Mark a memory region for remote access. + * @epd: endpoint descriptor + * @offset: start of range to unregister + * @len: length of range to unregister + * + * The scif_unregister() function closes those previously registered windows + * which are entirely within the range [offset, offset + len - 1]. It is an + * error to specify a range which intersects only a subrange of a window. + * + * On a successful return, pages within the window may no longer be specified + * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(), + * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window, + * however, continues to exist until all previous references against it are + * removed. A window is referenced if there is a mapping to it created by + * mmap(), or if scif_get_pages() was called against the window + * (and the pages have not been returned via scif_put_pages()). A window is + * also referenced while an RMA, in which some range of the window is a source + * or destination, is in progress. Finally a window is referenced while some + * offset in that window was specified to scif_fence_signal(), and the RMAs + * marked by that call to scif_fence_signal() have not completed. While a + * window is in this state, its registered address space pages are not + * available for use in a new registered window. + * + * When all such references to the window have been removed, its references to + * all the physical pages which it represents are removed. Similarly, the + * registered address space pages of the window become available for + * registration in a new window. + * + * Return: + * Upon successful completion, scif_unregister() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. In the event of an + * error, no windows are unregistered. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a + * window, or offset is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the + * registered address space of epd + */ +int scif_unregister(scif_epd_t epd, off_t offset, size_t len); + +/** + * scif_readfrom() - Copy from a remote address space + * @epd: endpoint descriptor + * @loffset: offset in local registered address space to + * which to copy + * @len: length of range to copy + * @roffset: offset in remote registered address space + * from which to copy + * @rma_flags: transfer mode flags + * + * scif_readfrom() copies len bytes from the remote registered address space of + * the peer of endpoint epd, starting at the offset roffset to the local + * registered address space of epd, starting at the offset loffset. + * + * Each of the specified ranges [loffset, loffset + len - 1] and [roffset, + * roffset + len - 1] must be within some registered window or windows of the + * local and remote nodes. A range may intersect multiple registered windows, + * but only if those windows are contiguous in the registered address space. + * + * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using + * programmed read/writes. Otherwise the data is copied using DMA. If rma_- + * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the + * transfer is complete. Otherwise, the transfer may be performed asynchron- + * ously. The order in which any two asynchronous RMA operations complete + * is non-deterministic. The synchronization functions, scif_fence_mark()/ + * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to + * the completion of asynchronous RMA operations on the same endpoint. + * + * The DMA transfer of individual bytes is not guaranteed to complete in + * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last + * cacheline or partial cacheline of the source range will become visible on + * the destination node after all other transferred data in the source + * range has become visible on the destination node. + * + * The optimal DMA performance will likely be realized if both + * loffset and roffset are cacheline aligned (are a multiple of 64). Lower + * performance will likely be realized if loffset and roffset are not + * cacheline aligned but are separated by some multiple of 64. The lowest level + * of performance is likely if loffset and roffset are not separated by a + * multiple of 64. + * + * The rma_flags argument is formed by ORing together zero or more of the + * following values. + * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA + * engine. + * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the + * transfer has completed. Passing this flag results in the + * current implementation busy waiting and consuming CPU cycles + * while the DMA transfer is in progress for best performance by + * avoiding the interrupt latency. + * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of + * the source range becomes visible on the destination node + * after all other transferred data in the source range has + * become visible on the destination + * + * Return: + * Upon successful completion, scif_readfrom() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EACCESS - Attempt to write to a read-only range + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - rma_flags is invalid + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered + * address space of epd, or, The range [roffset, roffset + len - 1] is invalid + * for the registered address space of the peer of epd, or loffset or roffset + * is negative + */ +int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t + roffset, int rma_flags); + +/** + * scif_writeto() - Copy to a remote address space + * @epd: endpoint descriptor + * @loffset: offset in local registered address space + * from which to copy + * @len: length of range to copy + * @roffset: offset in remote registered address space to + * which to copy + * @rma_flags: transfer mode flags + * + * scif_writeto() copies len bytes from the local registered address space of + * epd, starting at the offset loffset to the remote registered address space + * of the peer of endpoint epd, starting at the offset roffset. + * + * Each of the specified ranges [loffset, loffset + len - 1] and [roffset, + * roffset + len - 1] must be within some registered window or windows of the + * local and remote nodes. A range may intersect multiple registered windows, + * but only if those windows are contiguous in the registered address space. + * + * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using + * programmed read/writes. Otherwise the data is copied using DMA. If rma_- + * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the + * transfer is complete. Otherwise, the transfer may be performed asynchron- + * ously. The order in which any two asynchronous RMA operations complete + * is non-deterministic. The synchronization functions, scif_fence_mark()/ + * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to + * the completion of asynchronous RMA operations on the same endpoint. + * + * The DMA transfer of individual bytes is not guaranteed to complete in + * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last + * cacheline or partial cacheline of the source range will become visible on + * the destination node after all other transferred data in the source + * range has become visible on the destination node. + * + * The optimal DMA performance will likely be realized if both + * loffset and roffset are cacheline aligned (are a multiple of 64). Lower + * performance will likely be realized if loffset and roffset are not cacheline + * aligned but are separated by some multiple of 64. The lowest level of + * performance is likely if loffset and roffset are not separated by a multiple + * of 64. + * + * The rma_flags argument is formed by ORing together zero or more of the + * following values. + * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA + * engine. + * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the + * transfer has completed. Passing this flag results in the + * current implementation busy waiting and consuming CPU cycles + * while the DMA transfer is in progress for best performance by + * avoiding the interrupt latency. + * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of + * the source range becomes visible on the destination node + * after all other transferred data in the source range has + * become visible on the destination + * + * Return: + * Upon successful completion, scif_readfrom() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EACCESS - Attempt to write to a read-only range + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - rma_flags is invalid + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered + * address space of epd, or, The range [roffset , roffset + len -1] is invalid + * for the registered address space of the peer of epd, or loffset or roffset + * is negative + */ +int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t + roffset, int rma_flags); + +/** + * scif_vreadfrom() - Copy from a remote address space + * @epd: endpoint descriptor + * @addr: address to which to copy + * @len: length of range to copy + * @roffset: offset in remote registered address space + * from which to copy + * @rma_flags: transfer mode flags + * + * scif_vreadfrom() copies len bytes from the remote registered address + * space of the peer of endpoint epd, starting at the offset roffset, to local + * memory, starting at addr. + * + * The specified range [roffset, roffset + len - 1] must be within some + * registered window or windows of the remote nodes. The range may + * intersect multiple registered windows, but only if those windows are + * contiguous in the registered address space. + * + * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using + * programmed read/writes. Otherwise the data is copied using DMA. If rma_- + * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the + * transfer is complete. Otherwise, the transfer may be performed asynchron- + * ously. The order in which any two asynchronous RMA operations complete + * is non-deterministic. The synchronization functions, scif_fence_mark()/ + * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to + * the completion of asynchronous RMA operations on the same endpoint. + * + * The DMA transfer of individual bytes is not guaranteed to complete in + * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last + * cacheline or partial cacheline of the source range will become visible on + * the destination node after all other transferred data in the source + * range has become visible on the destination node. + * + * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back + * the specified local memory range may be remain in a pinned state even after + * the specified transfer completes. This may reduce overhead if some or all of + * the same virtual address range is referenced in a subsequent call of + * scif_vreadfrom() or scif_vwriteto(). + * + * The optimal DMA performance will likely be realized if both + * addr and roffset are cacheline aligned (are a multiple of 64). Lower + * performance will likely be realized if addr and roffset are not + * cacheline aligned but are separated by some multiple of 64. The lowest level + * of performance is likely if addr and roffset are not separated by a + * multiple of 64. + * + * The rma_flags argument is formed by ORing together zero or more of the + * following values. + * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA + * engine. + * SCIF_RMA_USECACHE - enable registration caching + * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the + * transfer has completed. Passing this flag results in the + * current implementation busy waiting and consuming CPU cycles + * while the DMA transfer is in progress for best performance by + * avoiding the interrupt latency. + * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of + * the source range becomes visible on the destination node + * after all other transferred data in the source range has + * become visible on the destination + * + * Return: + * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EACCESS - Attempt to write to a read-only range + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - rma_flags is invalid + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the + * registered address space of epd + */ +int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset, + int rma_flags); + +/** + * scif_vwriteto() - Copy to a remote address space + * @epd: endpoint descriptor + * @addr: address from which to copy + * @len: length of range to copy + * @roffset: offset in remote registered address space to + * which to copy + * @rma_flags: transfer mode flags + * + * scif_vwriteto() copies len bytes from the local memory, starting at addr, to + * the remote registered address space of the peer of endpoint epd, starting at + * the offset roffset. + * + * The specified range [roffset, roffset + len - 1] must be within some + * registered window or windows of the remote nodes. The range may intersect + * multiple registered windows, but only if those windows are contiguous in the + * registered address space. + * + * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using + * programmed read/writes. Otherwise the data is copied using DMA. If rma_- + * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the + * transfer is complete. Otherwise, the transfer may be performed asynchron- + * ously. The order in which any two asynchronous RMA operations complete + * is non-deterministic. The synchronization functions, scif_fence_mark()/ + * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to + * the completion of asynchronous RMA operations on the same endpoint. + * + * The DMA transfer of individual bytes is not guaranteed to complete in + * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last + * cacheline or partial cacheline of the source range will become visible on + * the destination node after all other transferred data in the source + * range has become visible on the destination node. + * + * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back + * the specified local memory range may be remain in a pinned state even after + * the specified transfer completes. This may reduce overhead if some or all of + * the same virtual address range is referenced in a subsequent call of + * scif_vreadfrom() or scif_vwriteto(). + * + * The optimal DMA performance will likely be realized if both + * addr and offset are cacheline aligned (are a multiple of 64). Lower + * performance will likely be realized if addr and offset are not cacheline + * aligned but are separated by some multiple of 64. The lowest level of + * performance is likely if addr and offset are not separated by a multiple of + * 64. + * + * The rma_flags argument is formed by ORing together zero or more of the + * following values. + * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA + * engine. + * SCIF_RMA_USECACHE - allow registration caching + * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the + * transfer has completed. Passing this flag results in the + * current implementation busy waiting and consuming CPU cycles + * while the DMA transfer is in progress for best performance by + * avoiding the interrupt latency. + * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of + * the source range becomes visible on the destination node + * after all other transferred data in the source range has + * become visible on the destination + * + * Return: + * Upon successful completion, scif_vwriteto() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EACCESS - Attempt to write to a read-only range + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - rma_flags is invalid + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the + * registered address space of epd + */ +int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset, + int rma_flags); + +/** + * scif_fence_mark() - Mark previously issued RMAs + * @epd: endpoint descriptor + * @flags: control flags + * @mark: marked value returned as output. + * + * scif_fence_mark() returns after marking the current set of all uncompleted + * RMAs initiated through the endpoint epd or the current set of all + * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are + * marked with a value returned at mark. The application may subsequently call + * scif_fence_wait(), passing the value returned at mark, to await completion + * of all RMAs so marked. + * + * The flags argument has exactly one of the following values. + * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint + * epd are marked + * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer + * of endpoint epd are marked + * + * Return: + * Upon successful completion, scif_fence_mark() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - flags is invalid + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENOMEM - Insufficient kernel memory was available + */ +int scif_fence_mark(scif_epd_t epd, int flags, int *mark); + +/** + * scif_fence_wait() - Wait for completion of marked RMAs + * @epd: endpoint descriptor + * @mark: mark request + * + * scif_fence_wait() returns after all RMAs marked with mark have completed. + * The value passed in mark must have been obtained in a previous call to + * scif_fence_mark(). + * + * Return: + * Upon successful completion, scif_fence_wait() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENOMEM - Insufficient kernel memory was available + */ +int scif_fence_wait(scif_epd_t epd, int mark); + +/** + * scif_fence_signal() - Request a memory update on completion of RMAs + * @epd: endpoint descriptor + * @loff: local offset + * @lval: local value to write to loffset + * @roff: remote offset + * @rval: remote value to write to roffset + * @flags: flags + * + * scif_fence_signal() returns after marking the current set of all uncompleted + * RMAs initiated through the endpoint epd or marking the current set of all + * uncompleted RMAs initiated through the peer of endpoint epd. + * + * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the + * marked set, lval is written to memory at the address corresponding to offset + * loff in the local registered address space of epd. loff must be within a + * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion + * of the RMAs in the marked set, rval is written to memory at the address + * corresponding to offset roff in the remote registered address space of epd. + * roff must be within a remote registered window of the peer of epd. Note + * that any specified offset must be DWORD (4 byte / 32 bit) aligned. + * + * The flags argument is formed by OR'ing together the following. + * Exactly one of the following values. + * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint + * epd are marked + * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer + * of endpoint epd are marked + * One or more of the following values. + * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to + * memory at the address corresponding to offset loff in the local + * registered address space of epd. + * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to + * memory at the address corresponding to offset roff in the remote + * registered address space of epd. + * + * Return: + * Upon successful completion, scif_fence_signal() returns 0; otherwise in + * user mode -1 is returned and errno is set to indicate the error; in kernel + * mode the negative of one of the following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - flags is invalid, or loff or roff are not DWORD aligned + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - loff is invalid for the registered address of epd, or roff is invalid + * for the registered address space, of the peer of epd + */ +int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff, + u64 rval, int flags); + +/** + * scif_get_node_ids() - Return information about online nodes + * @nodes: array in which to return online node IDs + * @len: number of entries in the nodes array + * @self: address to place the node ID of the local node + * + * scif_get_node_ids() fills in the nodes array with up to len node IDs of the + * nodes in the SCIF network. If there is not enough space in nodes, as + * indicated by the len parameter, only len node IDs are returned in nodes. The + * return value of scif_get_node_ids() is the total number of nodes currently in + * the SCIF network. By checking the return value against the len parameter, + * the user may determine if enough space for nodes was allocated. + * + * The node ID of the local node is returned at self. + * + * Return: + * Upon successful completion, scif_get_node_ids() returns the actual number of + * online nodes in the SCIF network including 'self'; otherwise in user mode + * -1 is returned and errno is set to indicate the error; in kernel mode no + * errors are returned. + */ +int scif_get_node_ids(u16 *nodes, int len, u16 *self); + +/** + * scif_pin_pages() - Pin a set of pages + * @addr: Virtual address of range to pin + * @len: Length of range to pin + * @prot_flags: Page protection flags + * @map_flags: Page classification flags + * @pinned_pages: Handle to pinned pages + * + * scif_pin_pages() pins (locks in physical memory) the physical pages which + * back the range of virtual address pages starting at addr and continuing for + * len bytes. addr and len are constrained to be multiples of the page size. A + * successful scif_pin_pages() call returns a handle to pinned_pages which may + * be used in subsequent calls to scif_register_pinned_pages(). + * + * The pages will remain pinned as long as there is a reference against the + * scif_pinned_pages_t value returned by scif_pin_pages() and until + * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A + * reference is added to a scif_pinned_pages_t value each time a window is + * created by calling scif_register_pinned_pages() and passing the + * scif_pinned_pages_t value. A reference is removed from a + * scif_pinned_pages_t value each time such a window is deleted. + * + * Subsequent operations which change the memory pages to which virtual + * addresses are mapped (such as mmap(), munmap()) have no effect on the + * scif_pinned_pages_t value or windows created against it. + * + * If the process will fork(), it is recommended that the registered + * virtual address range be marked with MADV_DONTFORK. Doing so will prevent + * problems due to copy-on-write semantics. + * + * The prot_flags argument is formed by OR'ing together one or more of the + * following values. + * SCIF_PROT_READ - allow read operations against the pages + * SCIF_PROT_WRITE - allow write operations against the pages + * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a + * kernel space address. By default, addr is interpreted as a user space + * address. + * + * Return: + * Upon successful completion, scif_pin_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * + * Errors: + * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative + * ENOMEM - Not enough space + */ +int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags, + scif_pinned_pages_t *pinned_pages); + +/** + * scif_unpin_pages() - Unpin a set of pages + * @pinned_pages: Handle to pinned pages to be unpinned + * + * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new + * windows against pinned_pages. The physical pages represented by pinned_pages + * will remain pinned until all windows previously registered against + * pinned_pages are deleted (the window is scif_unregister()'d and all + * references to the window are removed (see scif_unregister()). + * + * pinned_pages must have been obtain from a previous call to scif_pin_pages(). + * After calling scif_unpin_pages(), it is an error to pass pinned_pages to + * scif_register_pinned_pages(). + * + * Return: + * Upon successful completion, scif_unpin_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * + * Errors: + * EINVAL - pinned_pages is not valid + */ +int scif_unpin_pages(scif_pinned_pages_t pinned_pages); + +/** + * scif_register_pinned_pages() - Mark a memory region for remote access. + * @epd: endpoint descriptor + * @pinned_pages: Handle to pinned pages + * @offset: Registered address space offset + * @map_flags: Flags which control where pages are mapped + * + * The scif_register_pinned_pages() function opens a window, a range of whole + * pages of the registered address space of the endpoint epd, starting at + * offset po. The value of po, further described below, is a function of the + * parameters offset and pinned_pages, and the value of map_flags. Each page of + * the window represents a corresponding physical memory page of the range + * represented by pinned_pages; the length of the window is the same as the + * length of range represented by pinned_pages. A successful + * scif_register_pinned_pages() call returns po as the return value. + * + * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset + * exactly, and offset is constrained to be a multiple of the page size. The + * mapping established by scif_register_pinned_pages() will not replace any + * existing registration; an error is returned if any page of the new window + * would intersect an existing window. + * + * When SCIF_MAP_FIXED is not set, the implementation uses offset in an + * implementation-defined manner to arrive at po. The po so chosen will be an + * area of the registered address space that the implementation deems suitable + * for a mapping of the required size. An offset value of 0 is interpreted as + * granting the implementation complete freedom in selecting po, subject to + * constraints described below. A non-zero value of offset is taken to be a + * suggestion of an offset near which the mapping should be placed. When the + * implementation selects a value for po, it does not replace any extant + * window. In all cases, po will be a multiple of the page size. + * + * The physical pages which are so represented by a window are available for + * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(), + * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the + * physical pages represented by the window will not be reused by the memory + * subsystem for any other purpose. Note that the same physical page may be + * represented by multiple windows. + * + * Windows created by scif_register_pinned_pages() are unregistered by + * scif_unregister(). + * + * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a + * fixed offset. + * + * Return: + * Upon successful completion, scif_register_pinned_pages() returns the offset + * at which the mapping was placed (po); otherwise the negative of one of the + * following errors is returned. + * + * Errors: + * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window + * would intersect an existing window + * EAGAIN - The mapping could not be performed due to lack of resources + * ECONNRESET - Connection reset by peer + * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and + * offset is not a multiple of the page size, or offset is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOTCONN - The endpoint is not connected + */ +off_t scif_register_pinned_pages(scif_epd_t epd, + scif_pinned_pages_t pinned_pages, + off_t offset, int map_flags); + +/** + * scif_get_pages() - Add references to remote registered pages + * @epd: endpoint descriptor + * @offset: remote registered offset + * @len: length of range of pages + * @pages: returned scif_range structure + * + * scif_get_pages() returns the addresses of the physical pages represented by + * those pages of the registered address space of the peer of epd, starting at + * offset and continuing for len bytes. offset and len are constrained to be + * multiples of the page size. + * + * All of the pages in the specified range [offset, offset + len - 1] must be + * within a single window of the registered address space of the peer of epd. + * + * The addresses are returned as a virtually contiguous array pointed to by the + * phys_addr component of the scif_range structure whose address is returned in + * pages. The nr_pages component of scif_range is the length of the array. The + * prot_flags component of scif_range holds the protection flag value passed + * when the pages were registered. + * + * Each physical page whose address is returned by scif_get_pages() remains + * available and will not be released for reuse until the scif_range structure + * is returned in a call to scif_put_pages(). The scif_range structure returned + * by scif_get_pages() must be unmodified. + * + * It is an error to call scif_close() on an endpoint on which a scif_range + * structure of that endpoint has not been returned to scif_put_pages(). + * + * Return: + * Upon successful completion, scif_get_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * Errors: + * ECONNRESET - Connection reset by peer. + * EINVAL - offset is not a multiple of the page size, or offset is negative, or + * len is not a multiple of the page size + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid + * for the registered address space of the peer epd + */ +int scif_get_pages(scif_epd_t epd, off_t offset, size_t len, + struct scif_range **pages); + +/** + * scif_put_pages() - Remove references from remote registered pages + * @pages: pages to be returned + * + * scif_put_pages() releases a scif_range structure previously obtained by + * calling scif_get_pages(). The physical pages represented by pages may + * be reused when the window which represented those pages is unregistered. + * Therefore, those pages must not be accessed after calling scif_put_pages(). + * + * Return: + * Upon successful completion, scif_put_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * Errors: + * EINVAL - pages does not point to a valid scif_range structure, or + * the scif_range structure pointed to by pages was already returned + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + */ +int scif_put_pages(struct scif_range *pages); + +/** + * scif_poll() - Wait for some event on an endpoint + * @epds: Array of endpoint descriptors + * @nepds: Length of epds + * @timeout: Upper limit on time for which scif_poll() will block + * + * scif_poll() waits for one of a set of endpoints to become ready to perform + * an I/O operation. + * + * The epds argument specifies the endpoint descriptors to be examined and the + * events of interest for each endpoint descriptor. epds is a pointer to an + * array with one member for each open endpoint descriptor of interest. + * + * The number of items in the epds array is specified in nepds. The epd field + * of scif_pollepd is an endpoint descriptor of an open endpoint. The field + * events is a bitmask specifying the events which the application is + * interested in. The field revents is an output parameter, filled by the + * kernel with the events that actually occurred. The bits returned in revents + * can include any of those specified in events, or one of the values POLLERR, + * POLLHUP, or POLLNVAL. (These three bits are meaningless in the events + * field, and will be set in the revents field whenever the corresponding + * condition is true.) + * + * If none of the events requested (and no error) has occurred for any of the + * endpoint descriptors, then scif_poll() blocks until one of the events occurs. + * + * The timeout argument specifies an upper limit on the time for which + * scif_poll() will block, in milliseconds. Specifying a negative value in + * timeout means an infinite timeout. + * + * The following bits may be set in events and returned in revents. + * POLLIN - Data may be received without blocking. For a connected + * endpoint, this means that scif_recv() may be called without blocking. For a + * listening endpoint, this means that scif_accept() may be called without + * blocking. + * POLLOUT - Data may be sent without blocking. For a connected endpoint, this + * means that scif_send() may be called without blocking. POLLOUT may also be + * used to block waiting for a non-blocking connect to complete. This bit value + * has no meaning for a listening endpoint and is ignored if specified. + * + * The following bits are only returned in revents, and are ignored if set in + * events. + * POLLERR - An error occurred on the endpoint + * POLLHUP - The connection to the peer endpoint was disconnected + * POLLNVAL - The specified endpoint descriptor is invalid. + * + * Return: + * Upon successful completion, scif_poll() returns a non-negative value. A + * positive value indicates the total number of endpoint descriptors that have + * been selected (that is, endpoint descriptors for which the revents member is + * non-zero). A value of 0 indicates that the call timed out and no endpoint + * descriptors have been selected. Otherwise in user mode -1 is returned and + * errno is set to indicate the error; in kernel mode the negative of one of + * the following errors is returned. + * + * Errors: + * EINTR - A signal occurred before any requested event + * EINVAL - The nepds argument is greater than {OPEN_MAX} + * ENOMEM - There was no space to allocate file descriptor tables + */ +int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout); + +/** + * scif_client_register() - Register a SCIF client + * @client: client to be registered + * + * scif_client_register() registers a SCIF client. The probe() method + * of the client is called when SCIF peer devices come online and the + * remove() method is called when the peer devices disappear. + * + * Return: + * Upon successful completion, scif_client_register() returns a non-negative + * value. Otherwise the return value is the same as subsys_interface_register() + * in the kernel. + */ +int scif_client_register(struct scif_client *client); + +/** + * scif_client_unregister() - Unregister a SCIF client + * @client: client to be unregistered + * + * scif_client_unregister() unregisters a SCIF client. + * + * Return: + * None + */ +void scif_client_unregister(struct scif_client *client); + +#endif /* __SCIF_H__ */ diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index d2176a5682..dc5f989be2 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h @@ -1,13 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * SCPI Message Protocol driver header * * Copyright (C) 2014 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . */ - -#ifndef _LINUX_SCPI_PROTOCOL_H -#define _LINUX_SCPI_PROTOCOL_H - #include struct scpi_opp { @@ -51,14 +58,6 @@ struct scpi_sensor_info { * OPP is an index to the list return by @dvfs_get_info * @dvfs_get_info: returns the DVFS capabilities of the given power * domain. It includes the OPP list and the latency information - * @device_domain_id: gets the scpi domain id for a given device - * @get_transition_latency: gets the DVFS transition latency for a given device - * @add_opps_to_device: adds all the OPPs for a given device - * @sensor_get_capability: get the list of capabilities for the sensors - * @sensor_get_info: get the information of the specified sensor - * @sensor_get_value: gets the current value of the sensor - * @device_get_power_state: gets the power state of a power domain - * @device_set_power_state: sets the power state of a power domain */ struct scpi_ops { u32 (*get_version)(void); @@ -68,9 +67,6 @@ struct scpi_ops { int (*dvfs_get_idx)(u8); int (*dvfs_set_idx)(u8, u8); struct scpi_dvfs_info *(*dvfs_get_info)(u8); - int (*device_domain_id)(struct device *); - int (*get_transition_latency)(struct device *); - int (*add_opps_to_device)(struct device *); int (*sensor_get_capability)(u16 *sensors); int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *); int (*sensor_get_value)(u16, u64 *); @@ -83,5 +79,3 @@ struct scpi_ops *get_scpi_ops(void); #else static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } #endif - -#endif /* _LINUX_SCPI_PROTOCOL_H */ diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h index eab7081392..f0f8bad54b 100644 --- a/include/linux/screen_info.h +++ b/include/linux/screen_info.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SCREEN_INFO_H #define _SCREEN_INFO_H diff --git a/include/linux/sctp.h b/include/linux/sctp.h index a86e852507..fcb4c36461 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* SCTP kernel reference Implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. @@ -11,6 +10,22 @@ * * Various protocol defined structures. * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * . + * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers @@ -42,12 +57,12 @@ #include /* Section 3.1. SCTP Common Header Format */ -struct sctphdr { +typedef struct sctphdr { __be16 source; __be16 dest; __be32 vtag; __le32 checksum; -}; +} __packed sctp_sctphdr_t; static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) { @@ -55,11 +70,11 @@ static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) } /* Section 3.2. Chunk Field Descriptions. */ -struct sctp_chunkhdr { +typedef struct sctp_chunkhdr { __u8 type; __u8 flags; __be16 length; -}; +} __packed sctp_chunkhdr_t; /* Section 3.2. Chunk Type Values. @@ -67,7 +82,7 @@ struct sctp_chunkhdr { * Value field. It takes a value from 0 to 254. The value of 255 is * reserved for future use as an extension field. */ -enum sctp_cid { +typedef enum { SCTP_CID_DATA = 0, SCTP_CID_INIT = 1, SCTP_CID_INIT_ACK = 2, @@ -87,19 +102,13 @@ enum sctp_cid { /* AUTH Extension Section 4.1 */ SCTP_CID_AUTH = 0x0F, - /* sctp ndata 5.1. I-DATA */ - SCTP_CID_I_DATA = 0x40, - /* PR-SCTP Sec 3.2 */ SCTP_CID_FWD_TSN = 0xC0, /* Use hex, as defined in ADDIP sec. 3.1 */ SCTP_CID_ASCONF = 0xC1, - SCTP_CID_I_FWD_TSN = 0xC2, SCTP_CID_ASCONF_ACK = 0x80, - SCTP_CID_RECONF = 0x82, - SCTP_CID_PAD = 0x84, -}; /* enum */ +} sctp_cid_t; /* enum */ /* Section 3.2 @@ -107,12 +116,12 @@ enum sctp_cid { * the action that must be taken if the processing endpoint does not * recognize the Chunk Type. */ -enum { +typedef enum { SCTP_CID_ACTION_DISCARD = 0x00, SCTP_CID_ACTION_DISCARD_ERR = 0x40, SCTP_CID_ACTION_SKIP = 0x80, SCTP_CID_ACTION_SKIP_ERR = 0xc0, -}; +} sctp_cid_action_t; enum { SCTP_CID_ACTION_MASK = 0xc0, }; @@ -152,12 +161,12 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 }; * Section 3.2.1 Optional/Variable-length Parmaeter Format. */ -struct sctp_paramhdr { +typedef struct sctp_paramhdr { __be16 type; __be16 length; -}; +} __packed sctp_paramhdr_t; -enum sctp_param { +typedef enum { /* RFC 2960 Section 3.3.5 */ SCTP_PARAM_HEARTBEAT_INFO = cpu_to_be16(1), @@ -190,14 +199,7 @@ enum sctp_param { SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005), SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006), - /* RE-CONFIG. Section 4 */ - SCTP_PARAM_RESET_OUT_REQUEST = cpu_to_be16(0x000d), - SCTP_PARAM_RESET_IN_REQUEST = cpu_to_be16(0x000e), - SCTP_PARAM_RESET_TSN_REQUEST = cpu_to_be16(0x000f), - SCTP_PARAM_RESET_RESPONSE = cpu_to_be16(0x0010), - SCTP_PARAM_RESET_ADD_OUT_STREAMS = cpu_to_be16(0x0011), - SCTP_PARAM_RESET_ADD_IN_STREAMS = cpu_to_be16(0x0012), -}; /* enum */ +} sctp_param_t; /* enum */ /* RFC 2960 Section 3.2.1 @@ -206,46 +208,29 @@ enum sctp_param { * not recognize the Parameter Type. * */ -enum { +typedef enum { SCTP_PARAM_ACTION_DISCARD = cpu_to_be16(0x0000), SCTP_PARAM_ACTION_DISCARD_ERR = cpu_to_be16(0x4000), SCTP_PARAM_ACTION_SKIP = cpu_to_be16(0x8000), SCTP_PARAM_ACTION_SKIP_ERR = cpu_to_be16(0xc000), -}; +} sctp_param_action_t; enum { SCTP_PARAM_ACTION_MASK = cpu_to_be16(0xc000), }; /* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */ -struct sctp_datahdr { +typedef struct sctp_datahdr { __be32 tsn; __be16 stream; __be16 ssn; - __u32 ppid; - __u8 payload[]; -}; + __be32 ppid; + __u8 payload[0]; +} __packed sctp_datahdr_t; -struct sctp_data_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_datahdr data_hdr; -}; - -struct sctp_idatahdr { - __be32 tsn; - __be16 stream; - __be16 reserved; - __be32 mid; - union { - __u32 ppid; - __be32 fsn; - }; - __u8 payload[0]; -}; - -struct sctp_idata_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_idatahdr data_hdr; -}; +typedef struct sctp_data_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_datahdr_t data_hdr; +} __packed sctp_data_chunk_t; /* DATA Chuck Specific Flags */ enum { @@ -264,101 +249,103 @@ enum { SCTP_DATA_FRAG_MASK = 0x03, }; * This chunk is used to initiate a SCTP association between two * endpoints. */ -struct sctp_inithdr { +typedef struct sctp_inithdr { __be32 init_tag; __be32 a_rwnd; __be16 num_outbound_streams; __be16 num_inbound_streams; __be32 initial_tsn; - __u8 params[]; -}; + __u8 params[0]; +} __packed sctp_inithdr_t; -struct sctp_init_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_inithdr init_hdr; -}; +typedef struct sctp_init_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_inithdr_t init_hdr; +} __packed sctp_init_chunk_t; /* Section 3.3.2.1. IPv4 Address Parameter (5) */ -struct sctp_ipv4addr_param { - struct sctp_paramhdr param_hdr; - struct in_addr addr; -}; +typedef struct sctp_ipv4addr_param { + sctp_paramhdr_t param_hdr; + struct in_addr addr; +} __packed sctp_ipv4addr_param_t; /* Section 3.3.2.1. IPv6 Address Parameter (6) */ -struct sctp_ipv6addr_param { - struct sctp_paramhdr param_hdr; +typedef struct sctp_ipv6addr_param { + sctp_paramhdr_t param_hdr; struct in6_addr addr; -}; +} __packed sctp_ipv6addr_param_t; /* Section 3.3.2.1 Cookie Preservative (9) */ -struct sctp_cookie_preserve_param { - struct sctp_paramhdr param_hdr; - __be32 lifespan_increment; -}; +typedef struct sctp_cookie_preserve_param { + sctp_paramhdr_t param_hdr; + __be32 lifespan_increment; +} __packed sctp_cookie_preserve_param_t; /* Section 3.3.2.1 Host Name Address (11) */ -struct sctp_hostname_param { - struct sctp_paramhdr param_hdr; - uint8_t hostname[]; -}; +typedef struct sctp_hostname_param { + sctp_paramhdr_t param_hdr; + uint8_t hostname[0]; +} __packed sctp_hostname_param_t; /* Section 3.3.2.1 Supported Address Types (12) */ -struct sctp_supported_addrs_param { - struct sctp_paramhdr param_hdr; - __be16 types[]; -}; +typedef struct sctp_supported_addrs_param { + sctp_paramhdr_t param_hdr; + __be16 types[0]; +} __packed sctp_supported_addrs_param_t; + +/* Appendix A. ECN Capable (32768) */ +typedef struct sctp_ecn_capable_param { + sctp_paramhdr_t param_hdr; +} __packed sctp_ecn_capable_param_t; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ -struct sctp_adaptation_ind_param { +typedef struct sctp_adaptation_ind_param { struct sctp_paramhdr param_hdr; __be32 adaptation_ind; -}; +} __packed sctp_adaptation_ind_param_t; /* ADDIP Section 4.2.7 Supported Extensions Parameter */ -struct sctp_supported_ext_param { +typedef struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; - __u8 chunks[]; -}; + __u8 chunks[0]; +} __packed sctp_supported_ext_param_t; /* AUTH Section 3.1 Random */ -struct sctp_random_param { - struct sctp_paramhdr param_hdr; - __u8 random_val[]; -}; +typedef struct sctp_random_param { + sctp_paramhdr_t param_hdr; + __u8 random_val[0]; +} __packed sctp_random_param_t; /* AUTH Section 3.2 Chunk List */ -struct sctp_chunks_param { - struct sctp_paramhdr param_hdr; - __u8 chunks[]; -}; +typedef struct sctp_chunks_param { + sctp_paramhdr_t param_hdr; + __u8 chunks[0]; +} __packed sctp_chunks_param_t; /* AUTH Section 3.3 HMAC Algorithm */ -struct sctp_hmac_algo_param { - struct sctp_paramhdr param_hdr; - __be16 hmac_ids[]; -}; +typedef struct sctp_hmac_algo_param { + sctp_paramhdr_t param_hdr; + __be16 hmac_ids[0]; +} __packed sctp_hmac_algo_param_t; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): * The INIT ACK chunk is used to acknowledge the initiation of an SCTP * association. */ -struct sctp_initack_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_inithdr init_hdr; -}; +typedef sctp_init_chunk_t sctp_initack_chunk_t; /* Section 3.3.3.1 State Cookie (7) */ -struct sctp_cookie_param { - struct sctp_paramhdr p; - __u8 body[]; -}; +typedef struct sctp_cookie_param { + sctp_paramhdr_t p; + __u8 body[0]; +} __packed sctp_cookie_param_t; /* Section 3.3.3.1 Unrecognized Parameters (8) */ -struct sctp_unrecognized_param { - struct sctp_paramhdr param_hdr; - struct sctp_paramhdr unrecognized; -}; +typedef struct sctp_unrecognized_param { + sctp_paramhdr_t param_hdr; + sctp_paramhdr_t unrecognized; +} __packed sctp_unrecognized_param_t; @@ -370,28 +357,30 @@ struct sctp_unrecognized_param { * subsequences of DATA chunks as represented by their TSNs. */ -struct sctp_gap_ack_block { +typedef struct sctp_gap_ack_block { __be16 start; __be16 end; -}; +} __packed sctp_gap_ack_block_t; -union sctp_sack_variable { - struct sctp_gap_ack_block gab; - __be32 dup; -}; +typedef __be32 sctp_dup_tsn_t; -struct sctp_sackhdr { +typedef union { + sctp_gap_ack_block_t gab; + sctp_dup_tsn_t dup; +} sctp_sack_variable_t; + +typedef struct sctp_sackhdr { __be32 cum_tsn_ack; __be32 a_rwnd; __be16 num_gap_ack_blocks; __be16 num_dup_tsns; - union sctp_sack_variable variable[]; -}; + sctp_sack_variable_t variable[0]; +} __packed sctp_sackhdr_t; -struct sctp_sack_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_sackhdr sack_hdr; -}; +typedef struct sctp_sack_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_sackhdr_t sack_hdr; +} __packed sctp_sack_chunk_t; /* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): @@ -401,55 +390,49 @@ struct sctp_sack_chunk { * the present association. */ -struct sctp_heartbeathdr { - struct sctp_paramhdr info; -}; +typedef struct sctp_heartbeathdr { + sctp_paramhdr_t info; +} __packed sctp_heartbeathdr_t; -struct sctp_heartbeat_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_heartbeathdr hb_hdr; -}; - - -/* PAD chunk could be bundled with heartbeat chunk to probe pmtu */ -struct sctp_pad_chunk { - struct sctp_chunkhdr uh; -}; +typedef struct sctp_heartbeat_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_heartbeathdr_t hb_hdr; +} __packed sctp_heartbeat_chunk_t; /* For the abort and shutdown ACK we must carry the init tag in the * common header. Just the common header is all that is needed with a * chunk descriptor. */ -struct sctp_abort_chunk { - struct sctp_chunkhdr uh; -}; +typedef struct sctp_abort_chunk { + sctp_chunkhdr_t uh; +} __packed sctp_abort_chunk_t; /* For the graceful shutdown we must carry the tag (in common header) * and the highest consecutive acking value. */ -struct sctp_shutdownhdr { +typedef struct sctp_shutdownhdr { __be32 cum_tsn_ack; -}; +} __packed sctp_shutdownhdr_t; -struct sctp_shutdown_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_shutdownhdr shutdown_hdr; -}; +struct sctp_shutdown_chunk_t { + sctp_chunkhdr_t chunk_hdr; + sctp_shutdownhdr_t shutdown_hdr; +} __packed; /* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ -struct sctp_errhdr { +typedef struct sctp_errhdr { __be16 cause; __be16 length; - __u8 variable[]; -}; + __u8 variable[0]; +} __packed sctp_errhdr_t; -struct sctp_operr_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_errhdr err_hdr; -}; +typedef struct sctp_operr_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_errhdr_t err_hdr; +} __packed sctp_operr_chunk_t; /* RFC 2960 3.3.10 - Operation Error * @@ -470,7 +453,7 @@ struct sctp_operr_chunk { * 9 No User Data * 10 Cookie Received While Shutting Down */ -enum sctp_error { +typedef enum { SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00), SCTP_ERROR_INV_STRM = cpu_to_be16(0x01), @@ -489,13 +472,11 @@ enum sctp_error { * 11 Restart of an association with new addresses * 12 User Initiated Abort * 13 Protocol Violation - * 14 Restart of an Association with New Encapsulation Port */ SCTP_ERROR_RESTART = cpu_to_be16(0x0b), SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c), SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d), - SCTP_ERROR_NEW_ENCAP_PORT = cpu_to_be16(0x0e), /* ADDIP Section 3.3 New Error Causes * @@ -527,28 +508,33 @@ enum sctp_error { * 0x0105 Unsupported HMAC Identifier */ SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105) -}; +} sctp_error_t; /* RFC 2960. Appendix A. Explicit Congestion Notification. * Explicit Congestion Notification Echo (ECNE) (12) */ -struct sctp_ecnehdr { +typedef struct sctp_ecnehdr { __be32 lowest_tsn; -}; +} sctp_ecnehdr_t; -struct sctp_ecne_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_ecnehdr ence_hdr; -}; +typedef struct sctp_ecne_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_ecnehdr_t ence_hdr; +} __packed sctp_ecne_chunk_t; /* RFC 2960. Appendix A. Explicit Congestion Notification. * Congestion Window Reduced (CWR) (13) */ -struct sctp_cwrhdr { +typedef struct sctp_cwrhdr { __be32 lowest_tsn; -}; +} sctp_cwrhdr_t; + +typedef struct sctp_cwr_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_cwrhdr_t cwr_hdr; +} __packed sctp_cwr_chunk_t; /* PR-SCTP * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) @@ -599,34 +585,18 @@ struct sctp_cwrhdr { struct sctp_fwdtsn_skip { __be16 stream; __be16 ssn; -}; +} __packed; struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; - struct sctp_fwdtsn_skip skip[]; -}; + struct sctp_fwdtsn_skip skip[0]; +} __packed; struct sctp_fwdtsn_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_fwdtsn_hdr fwdtsn_hdr; -}; +} __packed; -struct sctp_ifwdtsn_skip { - __be16 stream; - __u8 reserved; - __u8 flags; - __be32 mid; -}; - -struct sctp_ifwdtsn_hdr { - __be32 new_cum_tsn; - struct sctp_ifwdtsn_skip skip[]; -}; - -struct sctp_ifwdtsn_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_ifwdtsn_hdr fwdtsn_hdr; -}; /* ADDIP * Section 3.1.1 Address Configuration Change Chunk (ASCONF) @@ -660,20 +630,20 @@ struct sctp_ifwdtsn_chunk { * The ASCONF Parameter Response is used in the ASCONF-ACK to * report status of ASCONF processing. */ -struct sctp_addip_param { - struct sctp_paramhdr param_hdr; - __be32 crr_id; -}; +typedef struct sctp_addip_param { + sctp_paramhdr_t param_hdr; + __be32 crr_id; +} __packed sctp_addip_param_t; -struct sctp_addiphdr { +typedef struct sctp_addiphdr { __be32 serial; - __u8 params[]; -}; + __u8 params[0]; +} __packed sctp_addiphdr_t; -struct sctp_addip_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_addiphdr addip_hdr; -}; +typedef struct sctp_addip_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_addiphdr_t addip_hdr; +} __packed sctp_addip_chunk_t; /* AUTH * Section 4.1 Authentication Chunk (AUTH) @@ -724,100 +694,20 @@ struct sctp_addip_chunk { * HMAC: n bytes (unsigned integer) This hold the result of the HMAC * calculation. */ -struct sctp_authhdr { +typedef struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; - __u8 hmac[]; -}; + __u8 hmac[0]; +} __packed sctp_authhdr_t; -struct sctp_auth_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_authhdr auth_hdr; -}; +typedef struct sctp_auth_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_authhdr_t auth_hdr; +} __packed sctp_auth_chunk_t; struct sctp_infox { struct sctp_info *sctpinfo; struct sctp_association *asoc; }; -struct sctp_reconf_chunk { - struct sctp_chunkhdr chunk_hdr; - __u8 params[]; -}; - -struct sctp_strreset_outreq { - struct sctp_paramhdr param_hdr; - __be32 request_seq; - __be32 response_seq; - __be32 send_reset_at_tsn; - __be16 list_of_streams[]; -}; - -struct sctp_strreset_inreq { - struct sctp_paramhdr param_hdr; - __be32 request_seq; - __be16 list_of_streams[]; -}; - -struct sctp_strreset_tsnreq { - struct sctp_paramhdr param_hdr; - __be32 request_seq; -}; - -struct sctp_strreset_addstrm { - struct sctp_paramhdr param_hdr; - __be32 request_seq; - __be16 number_of_streams; - __be16 reserved; -}; - -enum { - SCTP_STRRESET_NOTHING_TO_DO = 0x00, - SCTP_STRRESET_PERFORMED = 0x01, - SCTP_STRRESET_DENIED = 0x02, - SCTP_STRRESET_ERR_WRONG_SSN = 0x03, - SCTP_STRRESET_ERR_IN_PROGRESS = 0x04, - SCTP_STRRESET_ERR_BAD_SEQNO = 0x05, - SCTP_STRRESET_IN_PROGRESS = 0x06, -}; - -struct sctp_strreset_resp { - struct sctp_paramhdr param_hdr; - __be32 response_seq; - __be32 result; -}; - -struct sctp_strreset_resptsn { - struct sctp_paramhdr param_hdr; - __be32 response_seq; - __be32 result; - __be32 senders_next_tsn; - __be32 receivers_next_tsn; -}; - -enum { - SCTP_DSCP_SET_MASK = 0x1, - SCTP_DSCP_VAL_MASK = 0xfc, - SCTP_FLOWLABEL_SET_MASK = 0x100000, - SCTP_FLOWLABEL_VAL_MASK = 0xfffff -}; - -/* UDP Encapsulation - * draft-tuexen-tsvwg-sctp-udp-encaps-cons-03.html#section-4-4 - * - * The error cause indicating an "Restart of an Association with - * New Encapsulation Port" - * - * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | Cause Code = 14 | Cause Length = 8 | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | Current Encapsulation Port | New Encapsulation Port | - * +-------------------------------+-------------------------------+ - */ -struct sctp_new_encap_port_hdr { - __be16 cur_port; - __be16 new_port; -}; - #endif /* __LINUX_SCTP_H__ */ diff --git a/include/linux/scx200.h b/include/linux/scx200.h index 652ec1a45f..de466e11e2 100644 --- a/include/linux/scx200.h +++ b/include/linux/scx200.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* linux/include/linux/scx200.h Copyright (c) 2001,2002 Christer Weinigel diff --git a/include/linux/scx200_gpio.h b/include/linux/scx200_gpio.h index 6386ddbb6b..ece4e553e9 100644 --- a/include/linux/scx200_gpio.h +++ b/include/linux/scx200_gpio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ u32 scx200_gpio_configure(unsigned index, u32 set, u32 clear); extern unsigned scx200_gpio_base; diff --git a/include/linux/sdb.h b/include/linux/sdb.h index a2404a2bbd..fbb76a46c8 100644 --- a/include/linux/sdb.h +++ b/include/linux/sdb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This is the official version 1.1 of sdb.h */ diff --git a/include/linux/sdla.h b/include/linux/sdla.h index 00e8b3b614..fe7a967d7d 100644 --- a/include/linux/sdla.h +++ b/include/linux/sdla.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -14,6 +13,11 @@ * 0.15 Mike McLagan Structure packing * * 0.20 Mike McLagan New flags for S508 buffer handling + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef SDLA_H #define SDLA_H diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 0c564e5d40..c61c5f9219 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -1,23 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SECCOMP_H #define _LINUX_SECCOMP_H #include -#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ - SECCOMP_FILTER_FLAG_LOG | \ - SECCOMP_FILTER_FLAG_SPEC_ALLOW | \ - SECCOMP_FILTER_FLAG_NEW_LISTENER | \ - SECCOMP_FILTER_FLAG_TSYNC_ESRCH) - -/* sizeof() the first published struct seccomp_notif_addfd */ -#define SECCOMP_NOTIFY_ADDFD_SIZE_VER0 24 -#define SECCOMP_NOTIFY_ADDFD_SIZE_LATEST SECCOMP_NOTIFY_ADDFD_SIZE_VER0 +#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC) #ifdef CONFIG_SECCOMP -#include -#include +//#include #include struct seccomp_filter; @@ -34,24 +24,17 @@ struct seccomp_filter; */ struct seccomp { int mode; - atomic_t filter_count; struct seccomp_filter *filter; }; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER -extern int __secure_computing(const struct seccomp_data *sd); -static inline int secure_computing(void) -{ - if (unlikely(test_syscall_work(SECCOMP))) - return __secure_computing(NULL); - return 0; -} +extern int secure_computing(const struct seccomp_data *sd); #else extern void secure_computing_strict(int this_syscall); #endif extern long prctl_get_seccomp(void); -extern long prctl_set_seccomp(unsigned long, void __user *); +extern long prctl_set_seccomp(unsigned long, char __user *); static inline int seccomp_mode(struct seccomp *s) { @@ -64,11 +47,9 @@ static inline int seccomp_mode(struct seccomp *s) struct seccomp { }; struct seccomp_filter { }; -struct seccomp_data; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER -static inline int secure_computing(void) { return 0; } -static inline int __secure_computing(const struct seccomp_data *sd) { return 0; } +static inline int secure_computing(struct seccomp_data *sd) { return 0; } #else static inline void secure_computing_strict(int this_syscall) { return; } #endif @@ -90,10 +71,10 @@ static inline int seccomp_mode(struct seccomp *s) #endif /* CONFIG_SECCOMP */ #ifdef CONFIG_SECCOMP_FILTER -extern void seccomp_filter_release(struct task_struct *tsk); +extern void put_seccomp_filter(struct task_struct *tsk); extern void get_seccomp_filter(struct task_struct *tsk); #else /* CONFIG_SECCOMP_FILTER */ -static inline void seccomp_filter_release(struct task_struct *tsk) +static inline void put_seccomp_filter(struct task_struct *tsk) { return; } @@ -106,26 +87,11 @@ static inline void get_seccomp_filter(struct task_struct *tsk) #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) extern long seccomp_get_filter(struct task_struct *task, unsigned long filter_off, void __user *data); -extern long seccomp_get_metadata(struct task_struct *task, - unsigned long filter_off, void __user *data); #else static inline long seccomp_get_filter(struct task_struct *task, unsigned long n, void __user *data) { return -EINVAL; } -static inline long seccomp_get_metadata(struct task_struct *task, - unsigned long filter_off, - void __user *data) -{ - return -EINVAL; -} #endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */ - -#ifdef CONFIG_SECCOMP_CACHE_DEBUG -struct seq_file; - -int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns, - struct pid *pid, struct task_struct *task); -#endif #endif /* _LINUX_SECCOMP_H */ diff --git a/include/linux/securebits.h b/include/linux/securebits.h index 6565286739..da1b33b33a 100644 --- a/include/linux/securebits.h +++ b/include/linux/securebits.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SECUREBITS_H #define _LINUX_SECUREBITS_H 1 diff --git a/include/linux/security.h b/include/linux/security.h index 5b72885213..365bf43bc9 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -6,7 +6,6 @@ * Copyright (C) 2001 Networks Associates Technology, Inc * Copyright (C) 2001 James Morris * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) - * Copyright (C) 2016 Mellanox Techonologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -23,7 +22,6 @@ #ifndef __LINUX_SECURITY_H #define __LINUX_SECURITY_H -#include #include #include #include @@ -31,11 +29,14 @@ #include #include #include +#include +#include struct linux_binprm; struct cred; struct rlimit; -struct kernel_siginfo; +struct siginfo; +struct sem_array; struct sembuf; struct kern_ipc_perm; struct audit_context; @@ -49,25 +50,18 @@ struct qstr; struct iattr; struct fown_struct; struct file_operations; +struct shmid_kernel; struct msg_msg; +struct msg_queue; struct xattr; -struct kernfs_node; struct xfrm_sec_ctx; struct mm_struct; -struct fs_context; -struct fs_parameter; -enum fs_value_type; -struct watch; -struct watch_notification; -/* Default (no) options for the capable function */ -#define CAP_OPT_NONE 0x0 /* If capable should audit the security request */ -#define CAP_OPT_NOAUDIT BIT(1) -/* If capable is being called by a setid function */ -#define CAP_OPT_INSETID BIT(2) +#define SECURITY_CAP_NOAUDIT 0 +#define SECURITY_CAP_AUDIT 1 -/* LSM Agnostic defines for fs_context::lsm_flags */ +/* LSM Agnostic defines for sb_set_mnt_opts */ #define SECURITY_LSM_NATIVE_LABELS 1 struct ctl_table; @@ -75,68 +69,9 @@ struct audit_krule; struct user_namespace; struct timezone; -enum lsm_event { - LSM_POLICY_CHANGE, -}; - -/* - * These are reasons that can be passed to the security_locked_down() - * LSM hook. Lockdown reasons that protect kernel integrity (ie, the - * ability for userland to modify kernel code) are placed before - * LOCKDOWN_INTEGRITY_MAX. Lockdown reasons that protect kernel - * confidentiality (ie, the ability for userland to extract - * information from the running kernel that would otherwise be - * restricted) are placed before LOCKDOWN_CONFIDENTIALITY_MAX. - * - * LSM authors should note that the semantics of any given lockdown - * reason are not guaranteed to be stable - the same reason may block - * one set of features in one kernel release, and a slightly different - * set of features in a later kernel release. LSMs that seek to expose - * lockdown policy at any level of granularity other than "none", - * "integrity" or "confidentiality" are responsible for either - * ensuring that they expose a consistent level of functionality to - * userland, or ensuring that userland is aware that this is - * potentially a moving target. It is easy to misuse this information - * in a way that could break userspace. Please be careful not to do - * so. - * - * If you add to this, remember to extend lockdown_reasons in - * security/lockdown/lockdown.c. - */ -enum lockdown_reason { - LOCKDOWN_NONE, - LOCKDOWN_MODULE_SIGNATURE, - LOCKDOWN_DEV_MEM, - LOCKDOWN_EFI_TEST, - LOCKDOWN_KEXEC, - LOCKDOWN_HIBERNATION, - LOCKDOWN_PCI_ACCESS, - LOCKDOWN_IOPORT, - LOCKDOWN_MSR, - LOCKDOWN_ACPI_TABLES, - LOCKDOWN_PCMCIA_CIS, - LOCKDOWN_TIOCSSERIAL, - LOCKDOWN_MODULE_PARAMETERS, - LOCKDOWN_MMIOTRACE, - LOCKDOWN_DEBUGFS, - LOCKDOWN_XMON_WR, - LOCKDOWN_BPF_WRITE_USER, - LOCKDOWN_INTEGRITY_MAX, - LOCKDOWN_KCORE, - LOCKDOWN_KPROBES, - LOCKDOWN_BPF_READ_KERNEL, - LOCKDOWN_PERF, - LOCKDOWN_TRACEFS, - LOCKDOWN_XMON_RW, - LOCKDOWN_XFRM_SECRET, - LOCKDOWN_CONFIDENTIALITY_MAX, -}; - -extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1]; - /* These functions are in security/commoncap.c */ extern int cap_capable(const struct cred *cred, struct user_namespace *ns, - int cap, unsigned int opts); + int cap, int audit); extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz); extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); extern int cap_ptrace_traceme(struct task_struct *parent); @@ -145,17 +80,13 @@ extern int cap_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); -extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); -int cap_inode_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags); -int cap_inode_removexattr(struct user_namespace *mnt_userns, - struct dentry *dentry, const char *name); -int cap_inode_need_killpriv(struct dentry *dentry); -int cap_inode_killpriv(struct user_namespace *mnt_userns, - struct dentry *dentry); -int cap_inode_getsecurity(struct user_namespace *mnt_userns, - struct inode *inode, const char *name, void **buffer, - bool alloc); +extern int cap_bprm_set_creds(struct linux_binprm *bprm); +extern int cap_bprm_secureexec(struct linux_binprm *bprm); +extern int cap_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +extern int cap_inode_removexattr(struct dentry *dentry, const char *name); +extern int cap_inode_need_killpriv(struct dentry *dentry); +extern int cap_inode_killpriv(struct dentry *dentry); extern int cap_mmap_addr(unsigned long addr); extern int cap_mmap_file(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags); @@ -172,14 +103,13 @@ struct sk_buff; struct sock; struct sockaddr; struct socket; -struct flowi_common; +struct flowi; struct dst_entry; struct xfrm_selector; struct xfrm_policy; struct xfrm_state; struct xfrm_user_sec_ctx; struct seq_file; -struct sctp_endpoint; #ifdef CONFIG_MMU extern unsigned long mmap_min_addr; @@ -204,10 +134,6 @@ extern unsigned long dac_mmap_min_addr; /* setfsuid or setfsgid, id0 == fsuid or fsgid */ #define LSM_SETID_FS 8 -/* Flags for security_task_prlimit(). */ -#define LSM_PRLIMIT_READ 1 -#define LSM_PRLIMIT_WRITE 2 - /* forward declares to avoid warnings */ struct sched_param; struct request_sock; @@ -215,47 +141,48 @@ struct request_sock; /* bprm->unsafe reasons */ #define LSM_UNSAFE_SHARE 1 #define LSM_UNSAFE_PTRACE 2 -#define LSM_UNSAFE_NO_NEW_PRIVS 4 +#define LSM_UNSAFE_PTRACE_CAP 4 +#define LSM_UNSAFE_NO_NEW_PRIVS 8 #ifdef CONFIG_MMU extern int mmap_min_addr_handler(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); + void __user *buffer, size_t *lenp, loff_t *ppos); #endif /* security_inode_init_security callback function to write xattrs */ typedef int (*initxattrs) (struct inode *inode, const struct xattr *xattr_array, void *fs_data); - -/* Keep the kernel_load_data_id enum in sync with kernel_read_file_id */ -#define __data_id_enumify(ENUM, dummy) LOADING_ ## ENUM, -#define __data_id_stringify(dummy, str) #str, - -enum kernel_load_data_id { - __kernel_read_file_id(__data_id_enumify) -}; - -static const char * const kernel_load_data_str[] = { - __kernel_read_file_id(__data_id_stringify) -}; - -static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id) -{ - if ((unsigned)id >= LOADING_MAX_ID) - return kernel_load_data_str[LOADING_UNKNOWN]; - - return kernel_load_data_str[id]; -} - #ifdef CONFIG_SECURITY -int call_blocking_lsm_notifier(enum lsm_event event, void *data); -int register_blocking_lsm_notifier(struct notifier_block *nb); -int unregister_blocking_lsm_notifier(struct notifier_block *nb); +struct security_mnt_opts { + char **mnt_opts; + int *mnt_opts_flags; + int num_mnt_opts; +}; + +static inline void security_init_mnt_opts(struct security_mnt_opts *opts) +{ + opts->mnt_opts = NULL; + opts->mnt_opts_flags = NULL; + opts->num_mnt_opts = 0; +} + +static inline void security_free_mnt_opts(struct security_mnt_opts *opts) +{ + int i; + if (opts->mnt_opts) + for (i = 0; i < opts->num_mnt_opts; i++) + kfree(opts->mnt_opts[i]); + kfree(opts->mnt_opts); + opts->mnt_opts = NULL; + kfree(opts->mnt_opts_flags); + opts->mnt_opts_flags = NULL; + opts->num_mnt_opts = 0; +} /* prototypes */ extern int security_init(void); -extern int early_security_init(void); /* Security operations */ int security_binder_set_context_mgr(struct task_struct *mgr); @@ -275,30 +202,31 @@ int security_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); -int security_capable(const struct cred *cred, - struct user_namespace *ns, - int cap, - unsigned int opts); +int security_capable(const struct cred *cred, struct user_namespace *ns, + int cap); +int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns, + int cap); int security_quotactl(int cmds, int type, int id, struct super_block *sb); int security_quota_on(struct dentry *dentry); int security_syslog(int type); int security_settime64(const struct timespec64 *ts, const struct timezone *tz); +static inline int security_settime(const struct timespec *ts, const struct timezone *tz) +{ + struct timespec64 ts64 = timespec_to_timespec64(*ts); + + return security_settime64(&ts64, tz); +} int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); -int security_bprm_creds_for_exec(struct linux_binprm *bprm); -int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); +int security_bprm_set_creds(struct linux_binprm *bprm); int security_bprm_check(struct linux_binprm *bprm); void security_bprm_committing_creds(struct linux_binprm *bprm); void security_bprm_committed_creds(struct linux_binprm *bprm); -int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); -int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); +int security_bprm_secureexec(struct linux_binprm *bprm); int security_sb_alloc(struct super_block *sb); -void security_sb_delete(struct super_block *sb); void security_sb_free(struct super_block *sb); -void security_free_mnt_opts(void **mnt_opts); -int security_sb_eat_lsm_opts(char *options, void **mnt_opts); -int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts); -int security_sb_remount(struct super_block *sb, void *mnt_opts); -int security_sb_kern_mount(struct super_block *sb); +int security_sb_copy_data(char *orig, char *copy); +int security_sb_remount(struct super_block *sb, void *data); +int security_sb_kern_mount(struct super_block *sb, int flags, void *data); int security_sb_show_options(struct seq_file *m, struct super_block *sb); int security_sb_statfs(struct dentry *dentry); int security_sb_mount(const char *dev_name, const struct path *path, @@ -306,16 +234,12 @@ int security_sb_mount(const char *dev_name, const struct path *path, int security_sb_umount(struct vfsmount *mnt, int flags); int security_sb_pivotroot(const struct path *old_path, const struct path *new_path); int security_sb_set_mnt_opts(struct super_block *sb, - void *mnt_opts, + struct security_mnt_opts *opts, unsigned long kern_flags, unsigned long *set_kern_flags); int security_sb_clone_mnt_opts(const struct super_block *oldsb, - struct super_block *newsb, - unsigned long kern_flags, - unsigned long *set_kern_flags); -int security_add_mnt_opt(const char *option, const char *val, - int len, void **mnt_opts); -int security_move_mount(const struct path *from_path, const struct path *to_path); + struct super_block *newsb); +int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts); int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, void **ctx, u32 *ctxlen); @@ -323,16 +247,12 @@ int security_dentry_create_files_as(struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, struct cred *new); -int security_path_notify(const struct path *path, u64 mask, - unsigned int obj_type); + int security_inode_alloc(struct inode *inode); void security_inode_free(struct inode *inode); int security_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, initxattrs initxattrs, void *fs_data); -int security_inode_init_security_anon(struct inode *inode, - const struct qstr *name, - const struct inode *context_inode); int security_old_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const char **name, void **value, size_t *len); @@ -354,28 +274,21 @@ int security_inode_follow_link(struct dentry *dentry, struct inode *inode, int security_inode_permission(struct inode *inode, int mask); int security_inode_setattr(struct dentry *dentry, struct iattr *attr); int security_inode_getattr(const struct path *path); -int security_inode_setxattr(struct user_namespace *mnt_userns, - struct dentry *dentry, const char *name, +int security_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); void security_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); int security_inode_getxattr(struct dentry *dentry, const char *name); int security_inode_listxattr(struct dentry *dentry); -int security_inode_removexattr(struct user_namespace *mnt_userns, - struct dentry *dentry, const char *name); +int security_inode_removexattr(struct dentry *dentry, const char *name); int security_inode_need_killpriv(struct dentry *dentry); -int security_inode_killpriv(struct user_namespace *mnt_userns, - struct dentry *dentry); -int security_inode_getsecurity(struct user_namespace *mnt_userns, - struct inode *inode, const char *name, - void **buffer, bool alloc); +int security_inode_killpriv(struct dentry *dentry); +int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc); int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); void security_inode_getsecid(struct inode *inode, u32 *secid); int security_inode_copy_up(struct dentry *src, struct cred **new); int security_inode_copy_up_xattr(const char *name); -int security_kernfs_init_security(struct kernfs_node *kn_dir, - struct kernfs_node *kn); int security_file_permission(struct file *file, int mask); int security_file_alloc(struct file *file); void security_file_free(struct file *file); @@ -391,46 +304,36 @@ void security_file_set_fowner(struct file *file); int security_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int sig); int security_file_receive(struct file *file); -int security_file_open(struct file *file); -int security_task_alloc(struct task_struct *task, unsigned long clone_flags); +int security_file_open(struct file *file, const struct cred *cred); +int security_task_create(unsigned long clone_flags); void security_task_free(struct task_struct *task); int security_cred_alloc_blank(struct cred *cred, gfp_t gfp); void security_cred_free(struct cred *cred); int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); void security_transfer_creds(struct cred *new, const struct cred *old); -void security_cred_getsecid(const struct cred *c, u32 *secid); int security_kernel_act_as(struct cred *new, u32 secid); int security_kernel_create_files_as(struct cred *new, struct inode *inode); int security_kernel_module_request(char *kmod_name); -int security_kernel_load_data(enum kernel_load_data_id id, bool contents); -int security_kernel_post_load_data(char *buf, loff_t size, - enum kernel_load_data_id id, - char *description); -int security_kernel_read_file(struct file *file, enum kernel_read_file_id id, - bool contents); +int security_kernel_read_file(struct file *file, enum kernel_read_file_id id); int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, enum kernel_read_file_id id); int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags); -int security_task_fix_setgid(struct cred *new, const struct cred *old, - int flags); int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); int security_task_getsid(struct task_struct *p); -void security_task_getsecid_subj(struct task_struct *p, u32 *secid); -void security_task_getsecid_obj(struct task_struct *p, u32 *secid); +void security_task_getsecid(struct task_struct *p, u32 *secid); int security_task_setnice(struct task_struct *p, int nice); int security_task_setioprio(struct task_struct *p, int ioprio); int security_task_getioprio(struct task_struct *p); -int security_task_prlimit(const struct cred *cred, const struct cred *tcred, - unsigned int flags); int security_task_setrlimit(struct task_struct *p, unsigned int resource, struct rlimit *new_rlim); int security_task_setscheduler(struct task_struct *p); int security_task_getscheduler(struct task_struct *p); int security_task_movememory(struct task_struct *p); -int security_task_kill(struct task_struct *p, struct kernel_siginfo *info, - int sig, const struct cred *cred); +int security_task_kill(struct task_struct *p, struct siginfo *info, + int sig, u32 secid); +int security_task_wait(struct task_struct *p); int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); void security_task_to_inode(struct task_struct *p, struct inode *inode); @@ -438,58 +341,47 @@ int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); int security_msg_msg_alloc(struct msg_msg *msg); void security_msg_msg_free(struct msg_msg *msg); -int security_msg_queue_alloc(struct kern_ipc_perm *msq); -void security_msg_queue_free(struct kern_ipc_perm *msq); -int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg); -int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd); -int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, +int security_msg_queue_alloc(struct msg_queue *msq); +void security_msg_queue_free(struct msg_queue *msq); +int security_msg_queue_associate(struct msg_queue *msq, int msqflg); +int security_msg_queue_msgctl(struct msg_queue *msq, int cmd); +int security_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg, int msqflg); -int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg, +int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode); -int security_shm_alloc(struct kern_ipc_perm *shp); -void security_shm_free(struct kern_ipc_perm *shp); -int security_shm_associate(struct kern_ipc_perm *shp, int shmflg); -int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd); -int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg); -int security_sem_alloc(struct kern_ipc_perm *sma); -void security_sem_free(struct kern_ipc_perm *sma); -int security_sem_associate(struct kern_ipc_perm *sma, int semflg); -int security_sem_semctl(struct kern_ipc_perm *sma, int cmd); -int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, +int security_shm_alloc(struct shmid_kernel *shp); +void security_shm_free(struct shmid_kernel *shp); +int security_shm_associate(struct shmid_kernel *shp, int shmflg); +int security_shm_shmctl(struct shmid_kernel *shp, int cmd); +int security_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg); +int security_sem_alloc(struct sem_array *sma); +void security_sem_free(struct sem_array *sma); +int security_sem_associate(struct sem_array *sma, int semflg); +int security_sem_semctl(struct sem_array *sma, int cmd); +int security_sem_semop(struct sem_array *sma, struct sembuf *sops, unsigned nsops, int alter); void security_d_instantiate(struct dentry *dentry, struct inode *inode); -int security_getprocattr(struct task_struct *p, const char *lsm, char *name, - char **value); -int security_setprocattr(const char *lsm, const char *name, void *value, - size_t size); +int security_getprocattr(struct task_struct *p, char *name, char **value); +int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size); int security_netlink_send(struct sock *sk, struct sk_buff *skb); int security_ismaclabel(const char *name); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); void security_release_secctx(char *secdata, u32 seclen); + void security_inode_invalidate_secctx(struct inode *inode); int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); -int security_locked_down(enum lockdown_reason what); #else /* CONFIG_SECURITY */ +struct security_mnt_opts { +}; -static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data) +static inline void security_init_mnt_opts(struct security_mnt_opts *opts) { - return 0; } -static inline int register_blocking_lsm_notifier(struct notifier_block *nb) -{ - return 0; -} - -static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb) -{ - return 0; -} - -static inline void security_free_mnt_opts(void **mnt_opts) +static inline void security_free_mnt_opts(struct security_mnt_opts *opts) { } @@ -503,11 +395,6 @@ static inline int security_init(void) return 0; } -static inline int early_security_init(void) -{ - return 0; -} - static inline int security_binder_set_context_mgr(struct task_struct *mgr) { return 0; @@ -561,11 +448,14 @@ static inline int security_capset(struct cred *new, } static inline int security_capable(const struct cred *cred, - struct user_namespace *ns, - int cap, - unsigned int opts) + struct user_namespace *ns, int cap) { - return cap_capable(cred, ns, cap, opts); + return cap_capable(cred, ns, cap, SECURITY_CAP_AUDIT); +} + +static inline int security_capable_noaudit(const struct cred *cred, + struct user_namespace *ns, int cap) { + return cap_capable(cred, ns, cap, SECURITY_CAP_NOAUDIT); } static inline int security_quotactl(int cmds, int type, int id, @@ -590,20 +480,22 @@ static inline int security_settime64(const struct timespec64 *ts, return cap_settime(ts, tz); } +static inline int security_settime(const struct timespec *ts, + const struct timezone *tz) +{ + struct timespec64 ts64 = timespec_to_timespec64(*ts); + + return cap_settime(&ts64, tz); +} + static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) { return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages)); } -static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm) +static inline int security_bprm_set_creds(struct linux_binprm *bprm) { - return 0; -} - -static inline int security_bprm_creds_from_file(struct linux_binprm *bprm, - struct file *file) -{ - return cap_bprm_creds_from_file(bprm, file); + return cap_bprm_set_creds(bprm); } static inline int security_bprm_check(struct linux_binprm *bprm) @@ -619,15 +511,9 @@ static inline void security_bprm_committed_creds(struct linux_binprm *bprm) { } -static inline int security_fs_context_dup(struct fs_context *fc, - struct fs_context *src_fc) +static inline int security_bprm_secureexec(struct linux_binprm *bprm) { - return 0; -} -static inline int security_fs_context_parse_param(struct fs_context *fc, - struct fs_parameter *param) -{ - return -ENOPARAM; + return cap_bprm_secureexec(bprm); } static inline int security_sb_alloc(struct super_block *sb) @@ -635,32 +521,20 @@ static inline int security_sb_alloc(struct super_block *sb) return 0; } -static inline void security_sb_delete(struct super_block *sb) -{ } - static inline void security_sb_free(struct super_block *sb) { } -static inline int security_sb_eat_lsm_opts(char *options, - void **mnt_opts) +static inline int security_sb_copy_data(char *orig, char *copy) { return 0; } -static inline int security_sb_remount(struct super_block *sb, - void *mnt_opts) +static inline int security_sb_remount(struct super_block *sb, void *data) { return 0; } -static inline int security_sb_mnt_opts_compat(struct super_block *sb, - void *mnt_opts) -{ - return 0; -} - - -static inline int security_sb_kern_mount(struct super_block *sb) +static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data) { return 0; } @@ -695,7 +569,7 @@ static inline int security_sb_pivotroot(const struct path *old_path, } static inline int security_sb_set_mnt_opts(struct super_block *sb, - void *mnt_opts, + struct security_mnt_opts *opts, unsigned long kern_flags, unsigned long *set_kern_flags) { @@ -703,27 +577,12 @@ static inline int security_sb_set_mnt_opts(struct super_block *sb, } static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb, - struct super_block *newsb, - unsigned long kern_flags, - unsigned long *set_kern_flags) + struct super_block *newsb) { return 0; } -static inline int security_add_mnt_opt(const char *option, const char *val, - int len, void **mnt_opts) -{ - return 0; -} - -static inline int security_move_mount(const struct path *from_path, - const struct path *to_path) -{ - return 0; -} - -static inline int security_path_notify(const struct path *path, u64 mask, - unsigned int obj_type) +static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) { return 0; } @@ -763,13 +622,6 @@ static inline int security_inode_init_security(struct inode *inode, return 0; } -static inline int security_inode_init_security_anon(struct inode *inode, - const struct qstr *name, - const struct inode *context_inode) -{ - return 0; -} - static inline int security_old_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, @@ -863,9 +715,8 @@ static inline int security_inode_getattr(const struct path *path) return 0; } -static inline int security_inode_setxattr(struct user_namespace *mnt_userns, - struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) +static inline int security_inode_setxattr(struct dentry *dentry, + const char *name, const void *value, size_t size, int flags) { return cap_inode_setxattr(dentry, name, value, size, flags); } @@ -885,11 +736,10 @@ static inline int security_inode_listxattr(struct dentry *dentry) return 0; } -static inline int security_inode_removexattr(struct user_namespace *mnt_userns, - struct dentry *dentry, - const char *name) +static inline int security_inode_removexattr(struct dentry *dentry, + const char *name) { - return cap_inode_removexattr(mnt_userns, dentry, name); + return cap_inode_removexattr(dentry, name); } static inline int security_inode_need_killpriv(struct dentry *dentry) @@ -897,18 +747,14 @@ static inline int security_inode_need_killpriv(struct dentry *dentry) return cap_inode_need_killpriv(dentry); } -static inline int security_inode_killpriv(struct user_namespace *mnt_userns, - struct dentry *dentry) +static inline int security_inode_killpriv(struct dentry *dentry) { - return cap_inode_killpriv(mnt_userns, dentry); + return cap_inode_killpriv(dentry); } -static inline int security_inode_getsecurity(struct user_namespace *mnt_userns, - struct inode *inode, - const char *name, void **buffer, - bool alloc) +static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc) { - return cap_inode_getsecurity(mnt_userns, inode, name, buffer, alloc); + return -EOPNOTSUPP; } static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) @@ -931,12 +777,6 @@ static inline int security_inode_copy_up(struct dentry *src, struct cred **new) return 0; } -static inline int security_kernfs_init_security(struct kernfs_node *kn_dir, - struct kernfs_node *kn) -{ - return 0; -} - static inline int security_inode_copy_up_xattr(const char *name) { return -EOPNOTSUPP; @@ -1007,13 +847,13 @@ static inline int security_file_receive(struct file *file) return 0; } -static inline int security_file_open(struct file *file) +static inline int security_file_open(struct file *file, + const struct cred *cred) { return 0; } -static inline int security_task_alloc(struct task_struct *task, - unsigned long clone_flags) +static inline int security_task_create(unsigned long clone_flags) { return 0; } @@ -1057,21 +897,8 @@ static inline int security_kernel_module_request(char *kmod_name) return 0; } -static inline int security_kernel_load_data(enum kernel_load_data_id id, bool contents) -{ - return 0; -} - -static inline int security_kernel_post_load_data(char *buf, loff_t size, - enum kernel_load_data_id id, - char *description) -{ - return 0; -} - static inline int security_kernel_read_file(struct file *file, - enum kernel_read_file_id id, - bool contents) + enum kernel_read_file_id id) { return 0; } @@ -1090,13 +917,6 @@ static inline int security_task_fix_setuid(struct cred *new, return cap_task_fix_setuid(new, old, flags); } -static inline int security_task_fix_setgid(struct cred *new, - const struct cred *old, - int flags) -{ - return 0; -} - static inline int security_task_setpgid(struct task_struct *p, pid_t pgid) { return 0; @@ -1112,12 +932,7 @@ static inline int security_task_getsid(struct task_struct *p) return 0; } -static inline void security_task_getsecid_subj(struct task_struct *p, u32 *secid) -{ - *secid = 0; -} - -static inline void security_task_getsecid_obj(struct task_struct *p, u32 *secid) +static inline void security_task_getsecid(struct task_struct *p, u32 *secid) { *secid = 0; } @@ -1137,13 +952,6 @@ static inline int security_task_getioprio(struct task_struct *p) return 0; } -static inline int security_task_prlimit(const struct cred *cred, - const struct cred *tcred, - unsigned int flags) -{ - return 0; -} - static inline int security_task_setrlimit(struct task_struct *p, unsigned int resource, struct rlimit *new_rlim) @@ -1167,8 +975,13 @@ static inline int security_task_movememory(struct task_struct *p) } static inline int security_task_kill(struct task_struct *p, - struct kernel_siginfo *info, int sig, - const struct cred *cred) + struct siginfo *info, int sig, + u32 secid) +{ + return 0; +} + +static inline int security_task_wait(struct task_struct *p) { return 0; } @@ -1203,32 +1016,32 @@ static inline int security_msg_msg_alloc(struct msg_msg *msg) static inline void security_msg_msg_free(struct msg_msg *msg) { } -static inline int security_msg_queue_alloc(struct kern_ipc_perm *msq) +static inline int security_msg_queue_alloc(struct msg_queue *msq) { return 0; } -static inline void security_msg_queue_free(struct kern_ipc_perm *msq) +static inline void security_msg_queue_free(struct msg_queue *msq) { } -static inline int security_msg_queue_associate(struct kern_ipc_perm *msq, +static inline int security_msg_queue_associate(struct msg_queue *msq, int msqflg) { return 0; } -static inline int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd) +static inline int security_msg_queue_msgctl(struct msg_queue *msq, int cmd) { return 0; } -static inline int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, +static inline int security_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg, int msqflg) { return 0; } -static inline int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, +static inline int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode) @@ -1236,68 +1049,65 @@ static inline int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, return 0; } -static inline int security_shm_alloc(struct kern_ipc_perm *shp) +static inline int security_shm_alloc(struct shmid_kernel *shp) { return 0; } -static inline void security_shm_free(struct kern_ipc_perm *shp) +static inline void security_shm_free(struct shmid_kernel *shp) { } -static inline int security_shm_associate(struct kern_ipc_perm *shp, +static inline int security_shm_associate(struct shmid_kernel *shp, int shmflg) { return 0; } -static inline int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd) +static inline int security_shm_shmctl(struct shmid_kernel *shp, int cmd) { return 0; } -static inline int security_shm_shmat(struct kern_ipc_perm *shp, +static inline int security_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg) { return 0; } -static inline int security_sem_alloc(struct kern_ipc_perm *sma) +static inline int security_sem_alloc(struct sem_array *sma) { return 0; } -static inline void security_sem_free(struct kern_ipc_perm *sma) +static inline void security_sem_free(struct sem_array *sma) { } -static inline int security_sem_associate(struct kern_ipc_perm *sma, int semflg) +static inline int security_sem_associate(struct sem_array *sma, int semflg) { return 0; } -static inline int security_sem_semctl(struct kern_ipc_perm *sma, int cmd) +static inline int security_sem_semctl(struct sem_array *sma, int cmd) { return 0; } -static inline int security_sem_semop(struct kern_ipc_perm *sma, +static inline int security_sem_semop(struct sem_array *sma, struct sembuf *sops, unsigned nsops, int alter) { return 0; } -static inline void security_d_instantiate(struct dentry *dentry, - struct inode *inode) +static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode) { } -static inline int security_getprocattr(struct task_struct *p, const char *lsm, - char *name, char **value) +static inline int security_getprocattr(struct task_struct *p, char *name, char **value) { return -EINVAL; } -static inline int security_setprocattr(const char *lsm, char *name, - void *value, size_t size) +static inline int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size) { return -EINVAL; } @@ -1344,34 +1154,8 @@ static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 { return -EOPNOTSUPP; } -static inline int security_locked_down(enum lockdown_reason what) -{ - return 0; -} #endif /* CONFIG_SECURITY */ -#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) -int security_post_notification(const struct cred *w_cred, - const struct cred *cred, - struct watch_notification *n); -#else -static inline int security_post_notification(const struct cred *w_cred, - const struct cred *cred, - struct watch_notification *n) -{ - return 0; -} -#endif - -#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS) -int security_watch_key(struct key *key); -#else -static inline int security_watch_key(struct key *key) -{ - return 0; -} -#endif - #ifdef CONFIG_SECURITY_NETWORK int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); @@ -1379,7 +1163,6 @@ int security_unix_may_send(struct socket *sock, struct socket *other); int security_socket_create(int family, int type, int protocol, int kern); int security_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern); -int security_socket_socketpair(struct socket *socka, struct socket *sockb); int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen); int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen); int security_socket_listen(struct socket *sock, int backlog); @@ -1399,11 +1182,10 @@ int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u int security_sk_alloc(struct sock *sk, int family, gfp_t priority); void security_sk_free(struct sock *sk); void security_sk_clone(const struct sock *sk, struct sock *newsk); -void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic); -void security_req_classify_flow(const struct request_sock *req, - struct flowi_common *flic); +void security_sk_classify_flow(struct sock *sk, struct flowi *fl); +void security_req_classify_flow(const struct request_sock *req, struct flowi *fl); void security_sock_graft(struct sock*sk, struct socket *parent); -int security_inet_conn_request(const struct sock *sk, +int security_inet_conn_request(struct sock *sk, struct sk_buff *skb, struct request_sock *req); void security_inet_csk_clone(struct sock *newsk, const struct request_sock *req); @@ -1418,11 +1200,6 @@ int security_tun_dev_create(void); int security_tun_dev_attach_queue(void *security); int security_tun_dev_attach(struct sock *sk, void *security); int security_tun_dev_open(void *security); -int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb); -int security_sctp_bind_connect(struct sock *sk, int optname, - struct sockaddr *address, int addrlen); -void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk, - struct sock *newsk); #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, @@ -1452,12 +1229,6 @@ static inline int security_socket_post_create(struct socket *sock, return 0; } -static inline int security_socket_socketpair(struct socket *socka, - struct socket *sockb) -{ - return 0; -} - static inline int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) @@ -1552,13 +1323,11 @@ static inline void security_sk_clone(const struct sock *sk, struct sock *newsk) { } -static inline void security_sk_classify_flow(struct sock *sk, - struct flowi_common *flic) +static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl) { } -static inline void security_req_classify_flow(const struct request_sock *req, - struct flowi_common *flic) +static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) { } @@ -1566,7 +1335,7 @@ static inline void security_sock_graft(struct sock *sk, struct socket *parent) { } -static inline int security_inet_conn_request(const struct sock *sk, +static inline int security_inet_conn_request(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { return 0; @@ -1623,53 +1392,8 @@ static inline int security_tun_dev_open(void *security) { return 0; } - -static inline int security_sctp_assoc_request(struct sctp_endpoint *ep, - struct sk_buff *skb) -{ - return 0; -} - -static inline int security_sctp_bind_connect(struct sock *sk, int optname, - struct sockaddr *address, - int addrlen) -{ - return 0; -} - -static inline void security_sctp_sk_clone(struct sctp_endpoint *ep, - struct sock *sk, - struct sock *newsk) -{ -} #endif /* CONFIG_SECURITY_NETWORK */ -#ifdef CONFIG_SECURITY_INFINIBAND -int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey); -int security_ib_endport_manage_subnet(void *sec, const char *name, u8 port_num); -int security_ib_alloc_security(void **sec); -void security_ib_free_security(void *sec); -#else /* CONFIG_SECURITY_INFINIBAND */ -static inline int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey) -{ - return 0; -} - -static inline int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num) -{ - return 0; -} - -static inline int security_ib_alloc_security(void **sec) -{ - return 0; -} - -static inline void security_ib_free_security(void *sec) -{ -} -#endif /* CONFIG_SECURITY_INFINIBAND */ - #ifdef CONFIG_SECURITY_NETWORK_XFRM int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, @@ -1682,12 +1406,12 @@ int security_xfrm_state_alloc_acquire(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid); int security_xfrm_state_delete(struct xfrm_state *x); void security_xfrm_state_free(struct xfrm_state *x); -int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid); +int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); int security_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, - const struct flowi_common *flic); + const struct flowi *fl); int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid); -void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic); +void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); #else /* CONFIG_SECURITY_NETWORK_XFRM */ @@ -1733,14 +1457,13 @@ static inline int security_xfrm_state_delete(struct xfrm_state *x) return 0; } -static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid) +static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) { return 0; } static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x, - struct xfrm_policy *xp, - const struct flowi_common *flic) + struct xfrm_policy *xp, const struct flowi *fl) { return 1; } @@ -1750,8 +1473,7 @@ static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) return 0; } -static inline void security_skb_classify_flow(struct sk_buff *skb, - struct flowi_common *flic) +static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) { } @@ -1845,8 +1567,8 @@ static inline int security_path_chroot(const struct path *path) int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); void security_key_free(struct key *key); -int security_key_permission(key_ref_t key_ref, const struct cred *cred, - enum key_need_perm need_perm); +int security_key_permission(key_ref_t key_ref, + const struct cred *cred, unsigned perm); int security_key_getsecurity(struct key *key, char **_buffer); #else @@ -1864,7 +1586,7 @@ static inline void security_key_free(struct key *key) static inline int security_key_permission(key_ref_t key_ref, const struct cred *cred, - enum key_need_perm need_perm) + unsigned perm) { return 0; } @@ -1882,7 +1604,8 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer) #ifdef CONFIG_SECURITY int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); int security_audit_rule_known(struct audit_krule *krule); -int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule); +int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule, + struct audit_context *actx); void security_audit_rule_free(void *lsmrule); #else @@ -1899,7 +1622,7 @@ static inline int security_audit_rule_known(struct audit_krule *krule) } static inline int security_audit_rule_match(u32 secid, u32 field, u32 op, - void *lsmrule) + void *lsmrule, struct audit_context *actx) { return 0; } @@ -1916,10 +1639,6 @@ extern struct dentry *securityfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); -struct dentry *securityfs_create_symlink(const char *name, - struct dentry *parent, - const char *target, - const struct inode_operations *iops); extern void securityfs_remove(struct dentry *dentry); #else /* CONFIG_SECURITYFS */ @@ -1939,103 +1658,33 @@ static inline struct dentry *securityfs_create_file(const char *name, return ERR_PTR(-ENODEV); } -static inline struct dentry *securityfs_create_symlink(const char *name, - struct dentry *parent, - const char *target, - const struct inode_operations *iops) -{ - return ERR_PTR(-ENODEV); -} - static inline void securityfs_remove(struct dentry *dentry) {} #endif -#ifdef CONFIG_BPF_SYSCALL -union bpf_attr; -struct bpf_map; -struct bpf_prog; -struct bpf_prog_aux; #ifdef CONFIG_SECURITY -extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size); -extern int security_bpf_map(struct bpf_map *map, fmode_t fmode); -extern int security_bpf_prog(struct bpf_prog *prog); -extern int security_bpf_map_alloc(struct bpf_map *map); -extern void security_bpf_map_free(struct bpf_map *map); -extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux); -extern void security_bpf_prog_free(struct bpf_prog_aux *aux); + +static inline char *alloc_secdata(void) +{ + return (char *)get_zeroed_page(GFP_KERNEL); +} + +static inline void free_secdata(void *secdata) +{ + free_page((unsigned long)secdata); +} + #else -static inline int security_bpf(int cmd, union bpf_attr *attr, - unsigned int size) + +static inline char *alloc_secdata(void) { - return 0; + return (char *)1; } -static inline int security_bpf_map(struct bpf_map *map, fmode_t fmode) -{ - return 0; -} - -static inline int security_bpf_prog(struct bpf_prog *prog) -{ - return 0; -} - -static inline int security_bpf_map_alloc(struct bpf_map *map) -{ - return 0; -} - -static inline void security_bpf_map_free(struct bpf_map *map) -{ } - -static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux) -{ - return 0; -} - -static inline void security_bpf_prog_free(struct bpf_prog_aux *aux) +static inline void free_secdata(void *secdata) { } #endif /* CONFIG_SECURITY */ -#endif /* CONFIG_BPF_SYSCALL */ - -#ifdef CONFIG_PERF_EVENTS -struct perf_event_attr; -struct perf_event; - -#ifdef CONFIG_SECURITY -extern int security_perf_event_open(struct perf_event_attr *attr, int type); -extern int security_perf_event_alloc(struct perf_event *event); -extern void security_perf_event_free(struct perf_event *event); -extern int security_perf_event_read(struct perf_event *event); -extern int security_perf_event_write(struct perf_event *event); -#else -static inline int security_perf_event_open(struct perf_event_attr *attr, - int type) -{ - return 0; -} - -static inline int security_perf_event_alloc(struct perf_event *event) -{ - return 0; -} - -static inline void security_perf_event_free(struct perf_event *event) -{ -} - -static inline int security_perf_event_read(struct perf_event *event) -{ - return 0; -} - -static inline int security_perf_event_write(struct perf_event *event) -{ - return 0; -} -#endif /* CONFIG_SECURITY */ -#endif /* CONFIG_PERF_EVENTS */ #endif /* ! __LINUX_SECURITY_H */ + diff --git a/include/linux/selection.h b/include/linux/selection.h index 170ef28ff2..8e4624efdb 100644 --- a/include/linux/selection.h +++ b/include/linux/selection.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * selection.h * @@ -12,20 +11,17 @@ #include struct tty_struct; -struct vc_data; + +extern struct vc_data *sel_cons; +struct tty_struct; extern void clear_selection(void); -extern int set_selection_user(const struct tiocl_selection __user *sel, - struct tty_struct *tty); -extern int set_selection_kernel(struct tiocl_selection *v, - struct tty_struct *tty); +extern int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty); extern int paste_selection(struct tty_struct *tty); extern int sel_loadlut(char __user *p); extern int mouse_reporting(void); extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry); -bool vc_is_sel(struct vc_data *vc); - extern int console_blanked; extern const unsigned char color_table[]; @@ -33,24 +29,16 @@ extern unsigned char default_red[]; extern unsigned char default_grn[]; extern unsigned char default_blu[]; -extern unsigned short *screen_pos(const struct vc_data *vc, int w_offset, - bool viewed); -extern u16 screen_glyph(const struct vc_data *vc, int offset); -extern u32 screen_glyph_unicode(const struct vc_data *vc, int offset); +extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed); +extern u16 screen_glyph(struct vc_data *vc, int offset); extern void complement_pos(struct vc_data *vc, int offset); -extern void invert_screen(struct vc_data *vc, int offset, int count, bool viewed); +extern void invert_screen(struct vc_data *vc, int offset, int count, int shift); -extern void getconsxy(const struct vc_data *vc, unsigned char xy[static 2]); -extern void putconsxy(struct vc_data *vc, unsigned char xy[static const 2]); +extern void getconsxy(struct vc_data *vc, unsigned char *p); +extern void putconsxy(struct vc_data *vc, unsigned char *p); -extern u16 vcs_scr_readw(const struct vc_data *vc, const u16 *org); +extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org); extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org); extern void vcs_scr_updated(struct vc_data *vc); -extern int vc_uniscr_check(struct vc_data *vc); -extern void vc_uniscr_copy_line(const struct vc_data *vc, void *dest, - bool viewed, - unsigned int row, unsigned int col, - unsigned int nr); - #endif diff --git a/include/linux/selinux.h b/include/linux/selinux.h new file mode 100644 index 0000000000..44f4596126 --- /dev/null +++ b/include/linux/selinux.h @@ -0,0 +1,35 @@ +/* + * SELinux services exported to the rest of the kernel. + * + * Author: James Morris + * + * Copyright (C) 2005 Red Hat, Inc., James Morris + * Copyright (C) 2006 Trusted Computer Solutions, Inc. + * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, + * as published by the Free Software Foundation. + */ +#ifndef _LINUX_SELINUX_H +#define _LINUX_SELINUX_H + +struct selinux_audit_rule; +struct audit_context; +struct kern_ipc_perm; + +#ifdef CONFIG_SECURITY_SELINUX + +/** + * selinux_is_enabled - is SELinux enabled? + */ +bool selinux_is_enabled(void); +#else + +static inline bool selinux_is_enabled(void) +{ + return false; +} +#endif /* CONFIG_SECURITY_SELINUX */ + +#endif /* _LINUX_SELINUX_H */ diff --git a/include/linux/sem.h b/include/linux/sem.h index 5608a500c4..c68948cac6 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -1,11 +1,28 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SEM_H #define _LINUX_SEM_H +#include +#include +#include #include struct task_struct; -struct sem_undo_list; + +/* One sem_array data structure for each set of semaphores in the system. */ +struct sem_array { + struct kern_ipc_perm ____cacheline_aligned_in_smp + sem_perm; /* permissions .. see ipc.h */ + time_t sem_ctime; /* last change time */ + struct sem *sem_base; /* ptr to first semaphore in array */ + struct list_head pending_alter; /* pending operations */ + /* that alter the array */ + struct list_head pending_const; /* pending complex operations */ + /* that do not alter semvals */ + struct list_head list_id; /* undo requests on this array */ + int sem_nsems; /* no. of semaphores in array */ + int complex_count; /* pending complex operations */ + bool complex_mode; /* no parallel simple ops */ +} __randomize_layout; #ifdef CONFIG_SYSVIPC diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 6694d0019a..e8952092de 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h @@ -1,9 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2008 Intel Corporation * Author: Matthew Wilcox * - * Please see kernel/locking/semaphore.c for documentation of these functions + * Distributed under the terms of the GNU GPL, version 2 + * + * Please see kernel/semaphore.c for documentation of these functions */ #ifndef __LINUX_SEMAPHORE_H #define __LINUX_SEMAPHORE_H @@ -36,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val) } extern void down(struct semaphore *sem); -extern int __must_check down_interruptible(struct semaphore *sem); +extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1); extern int __must_check down_killable(struct semaphore *sem); extern int __must_check down_trylock(struct semaphore *sem); extern int __must_check down_timeout(struct semaphore *sem, long jiffies); diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index 5b31c51479..fcfd1023fc 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SEQ_BUF_H #define _LINUX_SEQ_BUF_H @@ -17,7 +16,7 @@ * @readpos: The next position to read in the buffer. */ struct seq_buf { - char *buffer; + unsigned char *buffer; size_t size; size_t len; loff_t readpos; @@ -30,7 +29,7 @@ static inline void seq_buf_clear(struct seq_buf *s) } static inline void -seq_buf_init(struct seq_buf *s, char *buf, unsigned int size) +seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) { s->buffer = buf; s->size = size; @@ -71,31 +70,6 @@ static inline unsigned int seq_buf_used(struct seq_buf *s) return min(s->len, s->size); } -/** - * seq_buf_terminate - Make sure buffer is nul terminated - * @s: the seq_buf descriptor to terminate. - * - * This makes sure that the buffer in @s is nul terminated and - * safe to read as a string. - * - * Note, if this is called when the buffer has overflowed, then - * the last byte of the buffer is zeroed, and the len will still - * point passed it. - * - * After this function is called, s->buffer is safe to use - * in string operations. - */ -static inline void seq_buf_terminate(struct seq_buf *s) -{ - if (WARN_ON(s->size == 0)) - return; - - if (seq_buf_buffer_left(s)) - s->buffer[s->len] = 0; - else - s->buffer[s->size - 1] = 0; -} - /** * seq_buf_get_buf - get buffer to write arbitrary data to * @s: the seq_buf handle @@ -104,7 +78,7 @@ static inline void seq_buf_terminate(struct seq_buf *s) * Return the number of bytes available in the buffer, or zero if * there's no space. */ -static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp) +static inline size_t seq_buf_get_buf(struct seq_buf *s, unsigned char **bufp) { WARN_ON(s->len > s->size + 1); @@ -150,9 +124,6 @@ extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, unsigned int len); extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); -extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, - int prefix_type, int rowsize, int groupsize, - const void *buf, size_t len, bool ascii); #ifdef CONFIG_BINARY_PRINTF extern int diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index dd99569595..44b0ad9516 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SEQ_FILE_H #define _LINUX_SEQ_FILE_H @@ -21,10 +20,14 @@ struct seq_file { size_t pad_until; loff_t index; loff_t read_pos; + u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + u64 exec_id; +#endif void *private; }; @@ -34,6 +37,7 @@ struct seq_operations { void * (*next) (struct seq_file *m, void *v, loff_t *pos); int (*show) (struct seq_file *m, void *v); }; +typedef struct seq_operations __no_const seq_operations_no_const; #define SEQ_SKIP 1 @@ -106,8 +110,8 @@ void seq_pad(struct seq_file *m, char c); char *mangle_path(char *s, const char *p, const char *esc); int seq_open(struct file *, const struct seq_operations *); +int seq_open_restrict(struct file *, const struct seq_operations *); ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); -ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter); loff_t seq_lseek(struct file *, loff_t, int); int seq_release(struct inode *, struct file *); int seq_write(struct seq_file *seq, const void *data, size_t len); @@ -118,23 +122,9 @@ __printf(2, 3) void seq_printf(struct seq_file *m, const char *fmt, ...); void seq_putc(struct seq_file *m, char c); void seq_puts(struct seq_file *m, const char *s); -void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter, - unsigned long long num, unsigned int width); void seq_put_decimal_ull(struct seq_file *m, const char *delimiter, unsigned long long num); void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num); -void seq_put_hex_ll(struct seq_file *m, const char *delimiter, - unsigned long long v, unsigned int width); - -void seq_escape_mem(struct seq_file *m, const char *src, size_t len, - unsigned int flags, const char *esc); - -static inline void seq_escape_str(struct seq_file *m, const char *src, - unsigned int flags, const char *esc) -{ - seq_escape_mem(m, src, strlen(src), flags, esc); -} - void seq_escape(struct seq_file *m, const char *s, const char *esc); void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, @@ -148,62 +138,13 @@ int seq_path_root(struct seq_file *m, const struct path *path, const struct path *root, const char *esc); int single_open(struct file *, int (*)(struct seq_file *, void *), void *); +int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *); int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t); int single_release(struct inode *, struct file *); void *__seq_open_private(struct file *, const struct seq_operations *, int); int seq_open_private(struct file *, const struct seq_operations *, int); int seq_release_private(struct inode *, struct file *); -#ifdef CONFIG_BINARY_PRINTF -void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary); -#endif - -#define DEFINE_SEQ_ATTRIBUTE(__name) \ -static int __name ## _open(struct inode *inode, struct file *file) \ -{ \ - int ret = seq_open(file, &__name ## _sops); \ - if (!ret && inode->i_private) { \ - struct seq_file *seq_f = file->private_data; \ - seq_f->private = inode->i_private; \ - } \ - return ret; \ -} \ - \ -static const struct file_operations __name ## _fops = { \ - .owner = THIS_MODULE, \ - .open = __name ## _open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = seq_release, \ -} - -#define DEFINE_SHOW_ATTRIBUTE(__name) \ -static int __name ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __name ## _show, inode->i_private); \ -} \ - \ -static const struct file_operations __name ## _fops = { \ - .owner = THIS_MODULE, \ - .open = __name ## _open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ -} - -#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \ -static int __name ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __name ## _show, inode->i_private); \ -} \ - \ -static const struct proc_ops __name ## _proc_ops = { \ - .proc_open = __name ## _open, \ - .proc_read = seq_read, \ - .proc_lseek = seq_lseek, \ - .proc_release = single_release, \ -} - static inline struct user_namespace *seq_user_ns(struct seq_file *seq) { #ifdef CONFIG_USER_NS @@ -285,5 +226,4 @@ extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *hea extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos); -void seq_file_init(void); #endif diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h index 0fdbe1ddd8..32c89bbe24 100644 --- a/include/linux/seq_file_net.h +++ b/include/linux/seq_file_net.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SEQ_FILE_NET_H__ #define __SEQ_FILE_NET_H__ @@ -13,6 +12,12 @@ struct seq_net_private { #endif }; +int seq_open_net(struct inode *, struct file *, + const struct seq_operations *, int); +int single_open_net(struct inode *, struct file *file, + int (*show)(struct seq_file *, void *)); +int seq_release_net(struct inode *, struct file *); +int single_release_net(struct inode *, struct file *); static inline struct net *seq_file_net(struct seq_file *seq) { #ifdef CONFIG_NET_NS @@ -22,17 +27,4 @@ static inline struct net *seq_file_net(struct seq_file *seq) #endif } -/* - * This one is needed for proc_create_net_single since net is stored directly - * in private not as a struct i.e. seq_file_net can't be used. - */ -static inline struct net *seq_file_single_net(struct seq_file *seq) -{ -#ifdef CONFIG_NET_NS - return (struct net *)seq->private; -#else - return &init_net; -#endif -} - #endif diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 37ded6b8fe..2379f94b24 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -1,66 +1,48 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SEQLOCK_H #define __LINUX_SEQLOCK_H - /* - * seqcount_t / seqlock_t - a reader-writer consistency mechanism with - * lockless readers (read-only retry loops), and no writer starvation. + * Reader/writer consistent mechanism without starving writers. This type of + * lock for data where the reader wants a consistent set of information + * and is willing to retry if the information changes. There are two types + * of readers: + * 1. Sequence readers which never block a writer but they may have to retry + * if a writer is in progress by detecting change in sequence number. + * Writers do not wait for a sequence reader. + * 2. Locking readers which will wait if a writer or another locking reader + * is in progress. A locking reader in progress will also block a writer + * from going forward. Unlike the regular rwlock, the read lock here is + * exclusive so that only one locking reader can get it. * - * See Documentation/locking/seqlock.rst + * This is not as cache friendly as brlock. Also, this may not work well + * for data that contains pointers, because any writer could + * invalidate a pointer that a reader was following. * - * Copyrights: - * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli - * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH + * Expected non-blocking reader usage: + * do { + * seq = read_seqbegin(&foo); + * ... + * } while (read_seqretry(&foo, seq)); + * + * + * On non-SMP the spin locks disappear but the writer still needs + * to increment the sequence variables because an interrupt routine could + * change the state of the data. + * + * Based on x86_64 vsyscall gettimeofday + * by Keith Owens and Andrea Arcangeli */ -#include -#include -#include -#include -#include -#include #include - +#include +#include +#include #include /* - * The seqlock seqcount_t interface does not prescribe a precise sequence of - * read begin/retry/end. For readers, typically there is a call to - * read_seqcount_begin() and read_seqcount_retry(), however, there are more - * esoteric cases which do not follow this pattern. - * - * As a consequence, we take the following best-effort approach for raw usage - * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, - * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as - * atomics; if there is a matching read_seqcount_retry() call, no following - * memory operations are considered atomic. Usage of the seqlock_t interface - * is not affected. - */ -#define KCSAN_SEQLOCK_REGION_MAX 1000 - -/* - * Sequence counters (seqcount_t) - * - * This is the raw counting mechanism, without any writer protection. - * - * Write side critical sections must be serialized and non-preemptible. - * - * If readers can be invoked from hardirq or softirq contexts, - * interrupts or bottom halves must also be respectively disabled before - * entering the write section. - * - * This mechanism can't be used if the protected data contains pointers, - * as the writer can invalidate a pointer that a reader is following. - * - * If the write serialization mechanism is one of the common kernel - * locking primitives, use a sequence counter with associated lock - * (seqcount_LOCKNAME_t) instead. - * - * If it's desired to automatically handle the sequence counter writer - * serialization and non-preemptibility requirements, use a sequential - * lock (seqlock_t) instead. - * - * See Documentation/locking/seqlock.rst + * Version using sequence counter only. + * This can be used when code has its own mutex protecting the + * updating starting before the write_seqcountbeqin() and ending + * after the write_seqcount_end(). */ typedef struct seqcount { unsigned sequence; @@ -80,18 +62,13 @@ static inline void __seqcount_init(seqcount_t *s, const char *name, } #ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SEQCOUNT_DEP_MAP_INIT(lockname) \ + .dep_map = { .name = #lockname } \ -# define SEQCOUNT_DEP_MAP_INIT(lockname) \ - .dep_map = { .name = #lockname } - -/** - * seqcount_init() - runtime initializer for seqcount_t - * @s: Pointer to the seqcount_t instance - */ -# define seqcount_init(s) \ - do { \ - static struct lock_class_key __key; \ - __seqcount_init((s), #s, &__key); \ +# define seqcount_init(s) \ + do { \ + static struct lock_class_key __key; \ + __seqcount_init((s), #s, &__key); \ } while (0) static inline void seqcount_lockdep_reader_access(const seqcount_t *s) @@ -101,7 +78,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) local_irq_save(flags); seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); - seqcount_release(&l->dep_map, _RET_IP_); + seqcount_release(&l->dep_map, 1, _RET_IP_); local_irq_restore(flags); } @@ -111,210 +88,13 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) # define seqcount_lockdep_reader_access(x) #endif -/** - * SEQCNT_ZERO() - static initializer for seqcount_t - * @name: Name of the seqcount_t instance - */ -#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } +#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)} -/* - * Sequence counters with associated locks (seqcount_LOCKNAME_t) - * - * A sequence counter which associates the lock used for writer - * serialization at initialization time. This enables lockdep to validate - * that the write side critical section is properly serialized. - * - * For associated locks which do not implicitly disable preemption, - * preemption protection is enforced in the write side function. - * - * Lockdep is never used in any for the raw write variants. - * - * See Documentation/locking/seqlock.rst - */ - -/* - * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot - * disable preemption. It can lead to higher latencies, and the write side - * sections will not be able to acquire locks which become sleeping locks - * (e.g. spinlock_t). - * - * To remain preemptible while avoiding a possible livelock caused by the - * reader preempting the writer, use a different technique: let the reader - * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the - * case, acquire then release the associated LOCKNAME writer serialization - * lock. This will allow any possibly-preempted writer to make progress - * until the end of its writer serialization lock critical section. - * - * This lock-unlock technique must be implemented for all of PREEMPT_RT - * sleeping locks. See Documentation/locking/locktypes.rst - */ -#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) -#define __SEQ_LOCK(expr) expr -#else -#define __SEQ_LOCK(expr) -#endif - -/* - * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated - * @seqcount: The real sequence counter - * @lock: Pointer to the associated lock - * - * A plain sequence counter with external writer synchronization by - * LOCKNAME @lock. The lock is associated to the sequence counter in the - * static initializer or init function. This enables lockdep to validate - * that the write side critical section is properly serialized. - * - * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex. - */ - -/* - * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t - * @s: Pointer to the seqcount_LOCKNAME_t instance - * @lock: Pointer to the associated lock - */ - -#define seqcount_LOCKNAME_init(s, _lock, lockname) \ - do { \ - seqcount_##lockname##_t *____s = (s); \ - seqcount_init(&____s->seqcount); \ - __SEQ_LOCK(____s->lock = (_lock)); \ - } while (0) - -#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) -#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) -#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) -#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) -#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex) - -/* - * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers - * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t - * - * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t - * @locktype: LOCKNAME canonical C data type - * @preemptible: preemptibility of above locktype - * @lockmember: argument for lockdep_assert_held() - * @lockbase: associated lock release function (prefix only) - * @lock_acquire: associated lock acquisition function (full call) - */ -#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \ -typedef struct seqcount_##lockname { \ - seqcount_t seqcount; \ - __SEQ_LOCK(locktype *lock); \ -} seqcount_##lockname##_t; \ - \ -static __always_inline seqcount_t * \ -__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \ -{ \ - return &s->seqcount; \ -} \ - \ -static __always_inline unsigned \ -__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ -{ \ - unsigned seq = READ_ONCE(s->seqcount.sequence); \ - \ - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ - return seq; \ - \ - if (preemptible && unlikely(seq & 1)) { \ - __SEQ_LOCK(lock_acquire); \ - __SEQ_LOCK(lockbase##_unlock(s->lock)); \ - \ - /* \ - * Re-read the sequence counter since the (possibly \ - * preempted) writer made progress. \ - */ \ - seq = READ_ONCE(s->seqcount.sequence); \ - } \ - \ - return seq; \ -} \ - \ -static __always_inline bool \ -__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \ -{ \ - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ - return preemptible; \ - \ - /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \ - return false; \ -} \ - \ -static __always_inline void \ -__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \ -{ \ - __SEQ_LOCK(lockdep_assert_held(lockmember)); \ -} - -/* - * __seqprop() for seqcount_t - */ - -static inline seqcount_t *__seqprop_ptr(seqcount_t *s) -{ - return s; -} - -static inline unsigned __seqprop_sequence(const seqcount_t *s) -{ - return READ_ONCE(s->sequence); -} - -static inline bool __seqprop_preemptible(const seqcount_t *s) -{ - return false; -} - -static inline void __seqprop_assert(const seqcount_t *s) -{ - lockdep_assert_preemption_disabled(); -} - -#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT) - -SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock)) -SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock)) -SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock)) -SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock)) -SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL)) - -/* - * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t - * @name: Name of the seqcount_LOCKNAME_t instance - * @lock: Pointer to the associated LOCKNAME - */ - -#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \ - .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ - __SEQ_LOCK(.lock = (assoc_lock)) \ -} - -#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) -#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) -#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) -#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) -#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) - -#define __seqprop_case(s, lockname, prop) \ - seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s)) - -#define __seqprop(s, prop) _Generic(*(s), \ - seqcount_t: __seqprop_##prop((void *)(s)), \ - __seqprop_case((s), raw_spinlock, prop), \ - __seqprop_case((s), spinlock, prop), \ - __seqprop_case((s), rwlock, prop), \ - __seqprop_case((s), mutex, prop), \ - __seqprop_case((s), ww_mutex, prop)) - -#define seqprop_ptr(s) __seqprop(s, ptr) -#define seqprop_sequence(s) __seqprop(s, sequence) -#define seqprop_preemptible(s) __seqprop(s, preemptible) -#define seqprop_assert(s) __seqprop(s, assert) /** - * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants + * __read_seqcount_begin - begin a seq-read critical section (without barrier) + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry * * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is @@ -323,96 +103,93 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu * * Use carefully, only in critical code, and comment how the barrier is * provided. - * - * Return: count to be passed to read_seqcount_retry() */ -#define __read_seqcount_begin(s) \ -({ \ - unsigned __seq; \ - \ - while ((__seq = seqprop_sequence(s)) & 1) \ - cpu_relax(); \ - \ - kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ - __seq; \ -}) +static inline unsigned __read_seqcount_begin(const seqcount_t *s) +{ + unsigned ret; + +repeat: + ret = READ_ONCE(s->sequence); + if (unlikely(ret & 1)) { + cpu_relax(); + goto repeat; + } + return ret; +} /** - * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants - * - * Return: count to be passed to read_seqcount_retry() - */ -#define raw_read_seqcount_begin(s) \ -({ \ - unsigned _seq = __read_seqcount_begin(s); \ - \ - smp_rmb(); \ - _seq; \ -}) - -/** - * read_seqcount_begin() - begin a seqcount_t read critical section - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants - * - * Return: count to be passed to read_seqcount_retry() - */ -#define read_seqcount_begin(s) \ -({ \ - seqcount_lockdep_reader_access(seqprop_ptr(s)); \ - raw_read_seqcount_begin(s); \ -}) - -/** - * raw_read_seqcount() - read the raw seqcount_t counter value - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants + * raw_read_seqcount - Read the raw seqcount + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry * * raw_read_seqcount opens a read critical section of the given - * seqcount_t, without any lockdep checking, and without checking or - * masking the sequence counter LSB. Calling code is responsible for - * handling that. - * - * Return: count to be passed to read_seqcount_retry() + * seqcount without any lockdep checking and without checking or + * masking the LSB. Calling code is responsible for handling that. */ -#define raw_read_seqcount(s) \ -({ \ - unsigned __seq = seqprop_sequence(s); \ - \ - smp_rmb(); \ - kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ - __seq; \ -}) +static inline unsigned raw_read_seqcount(const seqcount_t *s) +{ + unsigned ret = READ_ONCE(s->sequence); + smp_rmb(); + return ret; +} /** - * raw_seqcount_begin() - begin a seqcount_t read critical section w/o - * lockdep and w/o counter stabilization - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants + * raw_read_seqcount_begin - start seq-read critical section w/o lockdep + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry * - * raw_seqcount_begin opens a read critical section of the given - * seqcount_t. Unlike read_seqcount_begin(), this function will not wait - * for the count to stabilize. If a writer is active when it begins, it - * will fail the read_seqcount_retry() at the end of the read critical - * section instead of stabilizing at the beginning of it. - * - * Use this only in special kernel hot paths where the read section is - * small and has a high probability of success through other external - * means. It will save a single branching instruction. - * - * Return: count to be passed to read_seqcount_retry() + * raw_read_seqcount_begin opens a read critical section of the given + * seqcount, but without any lockdep checking. Validity of the critical + * section is tested by checking read_seqcount_retry function. */ -#define raw_seqcount_begin(s) \ -({ \ - /* \ - * If the counter is odd, let read_seqcount_retry() fail \ - * by decrementing the counter. \ - */ \ - raw_read_seqcount(s) & ~1; \ -}) +static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) +{ + unsigned ret = __read_seqcount_begin(s); + smp_rmb(); + return ret; +} /** - * __read_seqcount_retry() - end a seqcount_t read section w/o barrier - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants - * @start: count, from read_seqcount_begin() + * read_seqcount_begin - begin a seq-read critical section + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * read_seqcount_begin opens a read critical section of the given seqcount. + * Validity of the critical section is tested by checking read_seqcount_retry + * function. + */ +static inline unsigned read_seqcount_begin(const seqcount_t *s) +{ + seqcount_lockdep_reader_access(s); + return raw_read_seqcount_begin(s); +} + +/** + * raw_seqcount_begin - begin a seq-read critical section + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * raw_seqcount_begin opens a read critical section of the given seqcount. + * Validity of the critical section is tested by checking read_seqcount_retry + * function. + * + * Unlike read_seqcount_begin(), this function will not wait for the count + * to stabilize. If a writer is active when we begin, we will fail the + * read_seqcount_retry() instead of stabilizing at the beginning of the + * critical section. + */ +static inline unsigned raw_seqcount_begin(const seqcount_t *s) +{ + unsigned ret = READ_ONCE(s->sequence); + smp_rmb(); + return ret & ~1; +} + +/** + * __read_seqcount_retry - end a seq-read critical section (without barrier) + * @s: pointer to seqcount_t + * @start: count, from read_seqcount_begin + * Returns: 1 if retry is required, else 0 * * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is @@ -421,287 +198,94 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu * * Use carefully, only in critical code, and comment how the barrier is * provided. - * - * Return: true if a read section retry is required, else false */ -#define __read_seqcount_retry(s, start) \ - do___read_seqcount_retry(seqprop_ptr(s), start) - -static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) +static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) { - kcsan_atomic_next(0); - return unlikely(READ_ONCE(s->sequence) != start); + return unlikely(s->sequence != start); } /** - * read_seqcount_retry() - end a seqcount_t read critical section - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants - * @start: count, from read_seqcount_begin() + * read_seqcount_retry - end a seq-read critical section + * @s: pointer to seqcount_t + * @start: count, from read_seqcount_begin + * Returns: 1 if retry is required, else 0 * - * read_seqcount_retry closes the read critical section of given - * seqcount_t. If the critical section was invalid, it must be ignored - * (and typically retried). - * - * Return: true if a read section retry is required, else false + * read_seqcount_retry closes a read critical section of the given seqcount. + * If the critical section was invalid, it must be ignored (and typically + * retried). */ -#define read_seqcount_retry(s, start) \ - do_read_seqcount_retry(seqprop_ptr(s), start) - -static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start) +static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) { smp_rmb(); - return do___read_seqcount_retry(s, start); + return __read_seqcount_retry(s, start); } -/** - * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants - * - * Context: check write_seqcount_begin() - */ -#define raw_write_seqcount_begin(s) \ -do { \ - if (seqprop_preemptible(s)) \ - preempt_disable(); \ - \ - do_raw_write_seqcount_begin(seqprop_ptr(s)); \ -} while (0) -static inline void do_raw_write_seqcount_begin(seqcount_t *s) + +static inline void raw_write_seqcount_begin(seqcount_t *s) { - kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); } -/** - * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants - * - * Context: check write_seqcount_end() - */ -#define raw_write_seqcount_end(s) \ -do { \ - do_raw_write_seqcount_end(seqprop_ptr(s)); \ - \ - if (seqprop_preemptible(s)) \ - preempt_enable(); \ -} while (0) - -static inline void do_raw_write_seqcount_end(seqcount_t *s) +static inline void raw_write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; - kcsan_nestable_atomic_end(); } /** - * write_seqcount_begin_nested() - start a seqcount_t write section with - * custom lockdep nesting level - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants - * @subclass: lockdep nesting level + * raw_write_seqcount_barrier - do a seq write barrier + * @s: pointer to seqcount_t * - * See Documentation/locking/lockdep-design.rst - * Context: check write_seqcount_begin() - */ -#define write_seqcount_begin_nested(s, subclass) \ -do { \ - seqprop_assert(s); \ - \ - if (seqprop_preemptible(s)) \ - preempt_disable(); \ - \ - do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \ -} while (0) - -static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass) -{ - do_raw_write_seqcount_begin(s); - seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); -} - -/** - * write_seqcount_begin() - start a seqcount_t write side critical section - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants + * This can be used to provide an ordering guarantee instead of the + * usual consistency guarantee. It is one wmb cheaper, because we can + * collapse the two back-to-back wmb()s. * - * Context: sequence counter write side sections must be serialized and - * non-preemptible. Preemption will be automatically disabled if and - * only if the seqcount write serialization lock is associated, and - * preemptible. If readers can be invoked from hardirq or softirq - * context, interrupts or bottom halves must be respectively disabled. - */ -#define write_seqcount_begin(s) \ -do { \ - seqprop_assert(s); \ - \ - if (seqprop_preemptible(s)) \ - preempt_disable(); \ - \ - do_write_seqcount_begin(seqprop_ptr(s)); \ -} while (0) - -static inline void do_write_seqcount_begin(seqcount_t *s) -{ - do_write_seqcount_begin_nested(s, 0); -} - -/** - * write_seqcount_end() - end a seqcount_t write side critical section - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants + * seqcount_t seq; + * bool X = true, Y = false; * - * Context: Preemption will be automatically re-enabled if and only if - * the seqcount write serialization lock is associated, and preemptible. - */ -#define write_seqcount_end(s) \ -do { \ - do_write_seqcount_end(seqprop_ptr(s)); \ - \ - if (seqprop_preemptible(s)) \ - preempt_enable(); \ -} while (0) - -static inline void do_write_seqcount_end(seqcount_t *s) -{ - seqcount_release(&s->dep_map, _RET_IP_); - do_raw_write_seqcount_end(s); -} - -/** - * raw_write_seqcount_barrier() - do a seqcount_t write barrier - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants + * void read(void) + * { + * bool x, y; * - * This can be used to provide an ordering guarantee instead of the usual - * consistency guarantee. It is one wmb cheaper, because it can collapse - * the two back-to-back wmb()s. + * do { + * int s = read_seqcount_begin(&seq); * - * Note that writes surrounding the barrier should be declared atomic (e.g. - * via WRITE_ONCE): a) to ensure the writes become visible to other threads - * atomically, avoiding compiler optimizations; b) to document which writes are - * meant to propagate to the reader critical section. This is necessary because - * neither writes before and after the barrier are enclosed in a seq-writer - * critical section that would ensure readers are aware of ongoing writes:: + * x = X; y = Y; * - * seqcount_t seq; - * bool X = true, Y = false; + * } while (read_seqcount_retry(&seq, s)); * - * void read(void) - * { - * bool x, y; - * - * do { - * int s = read_seqcount_begin(&seq); - * - * x = X; y = Y; - * - * } while (read_seqcount_retry(&seq, s)); - * - * BUG_ON(!x && !y); + * BUG_ON(!x && !y); * } * * void write(void) * { - * WRITE_ONCE(Y, true); + * Y = true; * - * raw_write_seqcount_barrier(seq); + * raw_write_seqcount_barrier(seq); * - * WRITE_ONCE(X, false); + * X = false; * } */ -#define raw_write_seqcount_barrier(s) \ - do_raw_write_seqcount_barrier(seqprop_ptr(s)) - -static inline void do_raw_write_seqcount_barrier(seqcount_t *s) +static inline void raw_write_seqcount_barrier(seqcount_t *s) { - kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); s->sequence++; - kcsan_nestable_atomic_end(); } -/** - * write_seqcount_invalidate() - invalidate in-progress seqcount_t read - * side operations - * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants - * - * After write_seqcount_invalidate, no seqcount_t read side operations - * will complete successfully and see data older than this. - */ -#define write_seqcount_invalidate(s) \ - do_write_seqcount_invalidate(seqprop_ptr(s)) - -static inline void do_write_seqcount_invalidate(seqcount_t *s) +static inline int raw_read_seqcount_latch(seqcount_t *s) { - smp_wmb(); - kcsan_nestable_atomic_begin(); - s->sequence+=2; - kcsan_nestable_atomic_end(); -} - -/* - * Latch sequence counters (seqcount_latch_t) - * - * A sequence counter variant where the counter even/odd value is used to - * switch between two copies of protected data. This allows the read path, - * typically NMIs, to safely interrupt the write side critical section. - * - * As the write sections are fully preemptible, no special handling for - * PREEMPT_RT is needed. - */ -typedef struct { - seqcount_t seqcount; -} seqcount_latch_t; - -/** - * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t - * @seq_name: Name of the seqcount_latch_t instance - */ -#define SEQCNT_LATCH_ZERO(seq_name) { \ - .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ + int seq = READ_ONCE(s->sequence); + /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ + smp_read_barrier_depends(); + return seq; } /** - * seqcount_latch_init() - runtime initializer for seqcount_latch_t - * @s: Pointer to the seqcount_latch_t instance - */ -#define seqcount_latch_init(s) seqcount_init(&(s)->seqcount) - -/** - * raw_read_seqcount_latch() - pick even/odd latch data copy - * @s: Pointer to seqcount_latch_t - * - * See raw_write_seqcount_latch() for details and a full reader/writer - * usage example. - * - * Return: sequence counter raw value. Use the lowest bit as an index for - * picking which data copy to read. The full counter must then be checked - * with read_seqcount_latch_retry(). - */ -static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s) -{ - /* - * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). - * Due to the dependent load, a full smp_rmb() is not needed. - */ - return READ_ONCE(s->seqcount.sequence); -} - -/** - * read_seqcount_latch_retry() - end a seqcount_latch_t read section - * @s: Pointer to seqcount_latch_t - * @start: count, from raw_read_seqcount_latch() - * - * Return: true if a read section retry is required, else false - */ -static inline int -read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) -{ - return read_seqcount_retry(&s->seqcount, start); -} - -/** - * raw_write_seqcount_latch() - redirect latch readers to even/odd copy - * @s: Pointer to seqcount_latch_t + * raw_write_seqcount_latch - redirect readers to even/odd copy + * @s: pointer to seqcount_t * * The latch technique is a multiversion concurrency control method that allows * queries during non-atomic modifications. If you can guarantee queries never @@ -717,357 +301,272 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) * Very simply put: we first modify one copy and then the other. This ensures * there is always one copy in a stable state, ready to give us an answer. * - * The basic form is a data structure like:: + * The basic form is a data structure like: * - * struct latch_struct { - * seqcount_latch_t seq; - * struct data_struct data[2]; - * }; + * struct latch_struct { + * seqcount_t seq; + * struct data_struct data[2]; + * }; * * Where a modification, which is assumed to be externally serialized, does the - * following:: + * following: * - * void latch_modify(struct latch_struct *latch, ...) - * { - * smp_wmb(); // Ensure that the last data[1] update is visible - * latch->seq.sequence++; - * smp_wmb(); // Ensure that the seqcount update is visible + * void latch_modify(struct latch_struct *latch, ...) + * { + * smp_wmb(); <- Ensure that the last data[1] update is visible + * latch->seq++; + * smp_wmb(); <- Ensure that the seqcount update is visible * - * modify(latch->data[0], ...); + * modify(latch->data[0], ...); * - * smp_wmb(); // Ensure that the data[0] update is visible - * latch->seq.sequence++; - * smp_wmb(); // Ensure that the seqcount update is visible + * smp_wmb(); <- Ensure that the data[0] update is visible + * latch->seq++; + * smp_wmb(); <- Ensure that the seqcount update is visible * - * modify(latch->data[1], ...); - * } + * modify(latch->data[1], ...); + * } * - * The query will have a form like:: + * The query will have a form like: * - * struct entry *latch_query(struct latch_struct *latch, ...) - * { - * struct entry *entry; - * unsigned seq, idx; + * struct entry *latch_query(struct latch_struct *latch, ...) + * { + * struct entry *entry; + * unsigned seq, idx; * - * do { - * seq = raw_read_seqcount_latch(&latch->seq); + * do { + * seq = raw_read_seqcount_latch(&latch->seq); * - * idx = seq & 0x01; - * entry = data_query(latch->data[idx], ...); + * idx = seq & 0x01; + * entry = data_query(latch->data[idx], ...); * - * // This includes needed smp_rmb() - * } while (read_seqcount_latch_retry(&latch->seq, seq)); + * smp_rmb(); + * } while (seq != latch->seq); * - * return entry; - * } + * return entry; + * } * * So during the modification, queries are first redirected to data[1]. Then we * modify data[0]. When that is complete, we redirect queries back to data[0] * and we can modify data[1]. * - * NOTE: + * NOTE: The non-requirement for atomic modifications does _NOT_ include + * the publishing of new entries in the case where data is a dynamic + * data structure. * - * The non-requirement for atomic modifications does _NOT_ include - * the publishing of new entries in the case where data is a dynamic - * data structure. + * An iteration might start in data[0] and get suspended long enough + * to miss an entire modification sequence, once it resumes it might + * observe the new entry. * - * An iteration might start in data[0] and get suspended long enough - * to miss an entire modification sequence, once it resumes it might - * observe the new entry. - * - * NOTE2: - * - * When data is a dynamic data structure; one should use regular RCU - * patterns to manage the lifetimes of the objects within. + * NOTE: When data is a dynamic data structure; one should use regular RCU + * patterns to manage the lifetimes of the objects within. */ -static inline void raw_write_seqcount_latch(seqcount_latch_t *s) +static inline void raw_write_seqcount_latch(seqcount_t *s) { - smp_wmb(); /* prior stores before incrementing "sequence" */ - s->seqcount.sequence++; - smp_wmb(); /* increment "sequence" before following stores */ + smp_wmb(); /* prior stores before incrementing "sequence" */ + s->sequence++; + smp_wmb(); /* increment "sequence" before following stores */ } /* - * Sequential locks (seqlock_t) - * - * Sequence counters with an embedded spinlock for writer serialization - * and non-preemptibility. - * - * For more info, see: - * - Comments on top of seqcount_t - * - Documentation/locking/seqlock.rst + * Sequence counter only version assumes that callers are using their + * own mutexing. */ -typedef struct { - /* - * Make sure that readers don't starve writers on PREEMPT_RT: use - * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK(). - */ - seqcount_spinlock_t seqcount; - spinlock_t lock; -} seqlock_t; - -#define __SEQLOCK_UNLOCKED(lockname) \ - { \ - .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \ - .lock = __SPIN_LOCK_UNLOCKED(lockname) \ - } - -/** - * seqlock_init() - dynamic initializer for seqlock_t - * @sl: Pointer to the seqlock_t instance - */ -#define seqlock_init(sl) \ - do { \ - spin_lock_init(&(sl)->lock); \ - seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \ - } while (0) - -/** - * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t - * @sl: Name of the seqlock_t instance - */ -#define DEFINE_SEQLOCK(sl) \ - seqlock_t sl = __SEQLOCK_UNLOCKED(sl) - -/** - * read_seqbegin() - start a seqlock_t read side critical section - * @sl: Pointer to seqlock_t - * - * Return: count, to be passed to read_seqretry() - */ -static inline unsigned read_seqbegin(const seqlock_t *sl) +static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) { - unsigned ret = read_seqcount_begin(&sl->seqcount); + raw_write_seqcount_begin(s); + seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); +} - kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */ - kcsan_flat_atomic_begin(); - return ret; +static inline void write_seqcount_begin(seqcount_t *s) +{ + write_seqcount_begin_nested(s, 0); +} + +static inline void write_seqcount_end(seqcount_t *s) +{ + seqcount_release(&s->dep_map, 1, _RET_IP_); + raw_write_seqcount_end(s); } /** - * read_seqretry() - end a seqlock_t read side section - * @sl: Pointer to seqlock_t - * @start: count, from read_seqbegin() + * write_seqcount_invalidate - invalidate in-progress read-side seq operations + * @s: pointer to seqcount_t * - * read_seqretry closes the read side critical section of given seqlock_t. - * If the critical section was invalid, it must be ignored (and typically - * retried). - * - * Return: true if a read section retry is required, else false + * After write_seqcount_invalidate, no read-side seq operations will complete + * successfully and see data older than this. */ +static inline void write_seqcount_invalidate(seqcount_t *s) +{ + smp_wmb(); + s->sequence+=2; +} + +typedef struct { + struct seqcount seqcount; + spinlock_t lock; +} seqlock_t; + +/* + * These macros triggered gcc-3.x compile-time problems. We think these are + * OK now. Be cautious. + */ +#define __SEQLOCK_UNLOCKED(lockname) \ + { \ + .seqcount = SEQCNT_ZERO(lockname), \ + .lock = __SPIN_LOCK_UNLOCKED(lockname) \ + } + +#define seqlock_init(x) \ + do { \ + seqcount_init(&(x)->seqcount); \ + spin_lock_init(&(x)->lock); \ + } while (0) + +#define DEFINE_SEQLOCK(x) \ + seqlock_t x = __SEQLOCK_UNLOCKED(x) + +/* + * Read side functions for starting and finalizing a read side section. + */ +static inline unsigned read_seqbegin(const seqlock_t *sl) +{ + return read_seqcount_begin(&sl->seqcount); +} + static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { - /* - * Assume not nested: read_seqretry() may be called multiple times when - * completing read critical section. - */ - kcsan_flat_atomic_end(); - return read_seqcount_retry(&sl->seqcount, start); } /* - * For all seqlock_t write side functions, use the the internal - * do_write_seqcount_begin() instead of generic write_seqcount_begin(). - * This way, no redundant lockdep_assert_held() checks are added. - */ - -/** - * write_seqlock() - start a seqlock_t write side critical section - * @sl: Pointer to seqlock_t - * - * write_seqlock opens a write side critical section for the given - * seqlock_t. It also implicitly acquires the spinlock_t embedded inside - * that sequential lock. All seqlock_t write side sections are thus - * automatically serialized and non-preemptible. - * - * Context: if the seqlock_t read section, or other write side critical - * sections, can be invoked from hardirq or softirq contexts, use the - * _irqsave or _bh variants of this function instead. + * Lock out other writers and update the count. + * Acts like a normal spin_lock/unlock. + * Don't need preempt_disable() because that is in the spin_lock already. */ +static inline void write_seqlock(seqlock_t *sl) __acquires(sl); static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - do_write_seqcount_begin(&sl->seqcount.seqcount); + write_seqcount_begin(&sl->seqcount); } -/** - * write_sequnlock() - end a seqlock_t write side critical section - * @sl: Pointer to seqlock_t - * - * write_sequnlock closes the (serialized and non-preemptible) write side - * critical section of given seqlock_t. - */ +static inline void write_sequnlock(seqlock_t *sl) __releases(sl); static inline void write_sequnlock(seqlock_t *sl) { - do_write_seqcount_end(&sl->seqcount.seqcount); + write_seqcount_end(&sl->seqcount); spin_unlock(&sl->lock); } -/** - * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section - * @sl: Pointer to seqlock_t - * - * _bh variant of write_seqlock(). Use only if the read side section, or - * other write side sections, can be invoked from softirq contexts. - */ +static inline void write_seqlock_bh(seqlock_t *sl) __acquires(sl); static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - do_write_seqcount_begin(&sl->seqcount.seqcount); + write_seqcount_begin(&sl->seqcount); } -/** - * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section - * @sl: Pointer to seqlock_t - * - * write_sequnlock_bh closes the serialized, non-preemptible, and - * softirqs-disabled, seqlock_t write side critical section opened with - * write_seqlock_bh(). - */ +static inline void write_sequnlock_bh(seqlock_t *sl) __releases(sl); static inline void write_sequnlock_bh(seqlock_t *sl) { - do_write_seqcount_end(&sl->seqcount.seqcount); + write_seqcount_end(&sl->seqcount); spin_unlock_bh(&sl->lock); } -/** - * write_seqlock_irq() - start a non-interruptible seqlock_t write section - * @sl: Pointer to seqlock_t - * - * _irq variant of write_seqlock(). Use only if the read side section, or - * other write sections, can be invoked from hardirq contexts. - */ +static inline void write_seqlock_irq(seqlock_t *sl) __acquires(sl); static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - do_write_seqcount_begin(&sl->seqcount.seqcount); + write_seqcount_begin(&sl->seqcount); } -/** - * write_sequnlock_irq() - end a non-interruptible seqlock_t write section - * @sl: Pointer to seqlock_t - * - * write_sequnlock_irq closes the serialized and non-interruptible - * seqlock_t write side section opened with write_seqlock_irq(). - */ +static inline void write_sequnlock_irq(seqlock_t *sl) __releases(sl); static inline void write_sequnlock_irq(seqlock_t *sl) { - do_write_seqcount_end(&sl->seqcount.seqcount); + write_seqcount_end(&sl->seqcount); spin_unlock_irq(&sl->lock); } +static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) __acquires(sl); static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) { unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - do_write_seqcount_begin(&sl->seqcount.seqcount); + write_seqcount_begin(&sl->seqcount); return flags; } -/** - * write_seqlock_irqsave() - start a non-interruptible seqlock_t write - * section - * @lock: Pointer to seqlock_t - * @flags: Stack-allocated storage for saving caller's local interrupt - * state, to be passed to write_sequnlock_irqrestore(). - * - * _irqsave variant of write_seqlock(). Use it only if the read side - * section, or other write sections, can be invoked from hardirq context. - */ #define write_seqlock_irqsave(lock, flags) \ do { flags = __write_seqlock_irqsave(lock); } while (0) -/** - * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write - * section - * @sl: Pointer to seqlock_t - * @flags: Caller's saved interrupt state, from write_seqlock_irqsave() - * - * write_sequnlock_irqrestore closes the serialized and non-interruptible - * seqlock_t write section previously opened with write_seqlock_irqsave(). - */ +static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) __releases(sl); static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { - do_write_seqcount_end(&sl->seqcount.seqcount); + write_seqcount_end(&sl->seqcount); spin_unlock_irqrestore(&sl->lock, flags); } -/** - * read_seqlock_excl() - begin a seqlock_t locking reader section - * @sl: Pointer to seqlock_t - * - * read_seqlock_excl opens a seqlock_t locking reader critical section. A - * locking reader exclusively locks out *both* other writers *and* other - * locking readers, but it does not update the embedded sequence number. - * - * Locking readers act like a normal spin_lock()/spin_unlock(). - * - * Context: if the seqlock_t write section, *or other read sections*, can - * be invoked from hardirq or softirq contexts, use the _irqsave or _bh - * variant of this function instead. - * - * The opened read section must be closed with read_sequnlock_excl(). +/* + * A locking reader exclusively locks out other writers and locking readers, + * but doesn't update the sequence number. Acts like a normal spin_lock/unlock. + * Don't need preempt_disable() because that is in the spin_lock already. */ +static inline void read_seqlock_excl(seqlock_t *sl) __acquires(sl); static inline void read_seqlock_excl(seqlock_t *sl) { spin_lock(&sl->lock); } -/** - * read_sequnlock_excl() - end a seqlock_t locking reader critical section - * @sl: Pointer to seqlock_t - */ +static inline void read_sequnlock_excl(seqlock_t *sl) __releases(sl); static inline void read_sequnlock_excl(seqlock_t *sl) { spin_unlock(&sl->lock); } /** - * read_seqlock_excl_bh() - start a seqlock_t locking reader section with - * softirqs disabled - * @sl: Pointer to seqlock_t + * read_seqbegin_or_lock - begin a sequence number check or locking block + * @lock: sequence lock + * @seq : sequence number to be checked * - * _bh variant of read_seqlock_excl(). Use this variant only if the - * seqlock_t write side section, *or other read sections*, can be invoked - * from softirq contexts. + * First try it once optimistically without taking the lock. If that fails, + * take the lock. The sequence number is also used as a marker for deciding + * whether to be a reader (even) or writer (odd). + * N.B. seq must be initialized to an even number to begin with. */ +static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) +{ + if (!(*seq & 1)) /* Even */ + *seq = read_seqbegin(lock); + else /* Odd */ + read_seqlock_excl(lock); +} + +static inline int need_seqretry(seqlock_t *lock, int seq) +{ + return !(seq & 1) && read_seqretry(lock, seq); +} + +static inline void done_seqretry(seqlock_t *lock, int seq) +{ + if (seq & 1) + read_sequnlock_excl(lock); +} + static inline void read_seqlock_excl_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); } -/** - * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking - * reader section - * @sl: Pointer to seqlock_t - */ static inline void read_sequnlock_excl_bh(seqlock_t *sl) { spin_unlock_bh(&sl->lock); } -/** - * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking - * reader section - * @sl: Pointer to seqlock_t - * - * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t - * write side section, *or other read sections*, can be invoked from a - * hardirq context. - */ static inline void read_seqlock_excl_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); } -/** - * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t - * locking reader section - * @sl: Pointer to seqlock_t - */ static inline void read_sequnlock_excl_irq(seqlock_t *sl) { spin_unlock_irq(&sl->lock); @@ -1081,117 +580,15 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) return flags; } -/** - * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t - * locking reader section - * @lock: Pointer to seqlock_t - * @flags: Stack-allocated storage for saving caller's local interrupt - * state, to be passed to read_sequnlock_excl_irqrestore(). - * - * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t - * write side section, *or other read sections*, can be invoked from a - * hardirq context. - */ #define read_seqlock_excl_irqsave(lock, flags) \ do { flags = __read_seqlock_excl_irqsave(lock); } while (0) -/** - * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t - * locking reader section - * @sl: Pointer to seqlock_t - * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave() - */ static inline void read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) { spin_unlock_irqrestore(&sl->lock, flags); } -/** - * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader - * @lock: Pointer to seqlock_t - * @seq : Marker and return parameter. If the passed value is even, the - * reader will become a *lockless* seqlock_t reader as in read_seqbegin(). - * If the passed value is odd, the reader will become a *locking* reader - * as in read_seqlock_excl(). In the first call to this function, the - * caller *must* initialize and pass an even value to @seq; this way, a - * lockless read can be optimistically tried first. - * - * read_seqbegin_or_lock is an API designed to optimistically try a normal - * lockless seqlock_t read section first. If an odd counter is found, the - * lockless read trial has failed, and the next read iteration transforms - * itself into a full seqlock_t locking reader. - * - * This is typically used to avoid seqlock_t lockless readers starvation - * (too much retry loops) in the case of a sharp spike in write side - * activity. - * - * Context: if the seqlock_t write section, *or other read sections*, can - * be invoked from hardirq or softirq contexts, use the _irqsave or _bh - * variant of this function instead. - * - * Check Documentation/locking/seqlock.rst for template example code. - * - * Return: the encountered sequence counter value, through the @seq - * parameter, which is overloaded as a return parameter. This returned - * value must be checked with need_seqretry(). If the read section need to - * be retried, this returned value must also be passed as the @seq - * parameter of the next read_seqbegin_or_lock() iteration. - */ -static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) -{ - if (!(*seq & 1)) /* Even */ - *seq = read_seqbegin(lock); - else /* Odd */ - read_seqlock_excl(lock); -} - -/** - * need_seqretry() - validate seqlock_t "locking or lockless" read section - * @lock: Pointer to seqlock_t - * @seq: sequence count, from read_seqbegin_or_lock() - * - * Return: true if a read section retry is required, false otherwise - */ -static inline int need_seqretry(seqlock_t *lock, int seq) -{ - return !(seq & 1) && read_seqretry(lock, seq); -} - -/** - * done_seqretry() - end seqlock_t "locking or lockless" reader section - * @lock: Pointer to seqlock_t - * @seq: count, from read_seqbegin_or_lock() - * - * done_seqretry finishes the seqlock_t read side critical section started - * with read_seqbegin_or_lock() and validated by need_seqretry(). - */ -static inline void done_seqretry(seqlock_t *lock, int seq) -{ - if (seq & 1) - read_sequnlock_excl(lock); -} - -/** - * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or - * a non-interruptible locking reader - * @lock: Pointer to seqlock_t - * @seq: Marker and return parameter. Check read_seqbegin_or_lock(). - * - * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if - * the seqlock_t write section, *or other read sections*, can be invoked - * from hardirq context. - * - * Note: Interrupts will be disabled only for "locking reader" mode. - * - * Return: - * - * 1. The saved local interrupts state in case of a locking reader, to - * be passed to done_seqretry_irqrestore(). - * - * 2. The encountered sequence counter value, returned through @seq - * overloaded as a return parameter. Check read_seqbegin_or_lock(). - */ static inline unsigned long read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) { @@ -1205,18 +602,6 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) return flags; } -/** - * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a - * non-interruptible locking reader section - * @lock: Pointer to seqlock_t - * @seq: Count, from read_seqbegin_or_lock_irqsave() - * @flags: Caller's saved local interrupt state in case of a locking - * reader, also from read_seqbegin_or_lock_irqsave() - * - * This is the _irqrestore variant of done_seqretry(). The read section - * must've been opened with read_seqbegin_or_lock_irqsave(), and validated - * by need_seqretry(). - */ static inline void done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) { diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h index 3cca2b8fac..a1ba6a5ccd 100644 --- a/include/linux/seqno-fence.h +++ b/include/linux/seqno-fence.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * seqno-fence, using a dma-buf to synchronize fencing * @@ -7,12 +6,21 @@ * Authors: * Rob Clark * Maarten Lankhorst + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef __LINUX_SEQNO_FENCE_H #define __LINUX_SEQNO_FENCE_H -#include +#include #include enum seqno_fence_condition { @@ -21,15 +29,15 @@ enum seqno_fence_condition { }; struct seqno_fence { - struct dma_fence base; + struct fence base; - const struct dma_fence_ops *ops; + const struct fence_ops *ops; struct dma_buf *sync_buf; uint32_t seqno_ofs; enum seqno_fence_condition condition; }; -extern const struct dma_fence_ops seqno_fence_ops; +extern const struct fence_ops seqno_fence_ops; /** * to_seqno_fence - cast a fence to a seqno_fence @@ -39,7 +47,7 @@ extern const struct dma_fence_ops seqno_fence_ops; * or the seqno_fence otherwise. */ static inline struct seqno_fence * -to_seqno_fence(struct dma_fence *fence) +to_seqno_fence(struct fence *fence) { if (fence->ops != &seqno_fence_ops) return NULL; @@ -75,9 +83,9 @@ to_seqno_fence(struct dma_fence *fence) * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the * device's vm can be expensive. * - * It is recommended for creators of seqno_fence to call dma_fence_signal() + * It is recommended for creators of seqno_fence to call fence_signal * before destruction. This will prevent possible issues from wraparound at - * time of issue vs time of check, since users can check dma_fence_is_signaled() + * time of issue vs time of check, since users can check fence_is_signaled * before submitting instructions for the hardware to wait on the fence. * However, when ops.enable_signaling is not called, it doesn't have to be * done as soon as possible, just before there's any real danger of seqno @@ -88,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, struct dma_buf *sync_buf, uint32_t context, uint32_t seqno_ofs, uint32_t seqno, enum seqno_fence_condition cond, - const struct dma_fence_ops *ops) + const struct fence_ops *ops) { BUG_ON(!fence || !sync_buf || !ops); BUG_ON(!ops->wait || !ops->enable_signaling || !ops->get_driver_name || !ops->get_timeline_name); /* - * ops is used in dma_fence_init for get_driver_name, so needs to be + * ops is used in fence_init for get_driver_name, so needs to be * initialized first */ fence->ops = ops; - dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); + fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); get_dma_buf(sync_buf); fence->sync_buf = sync_buf; fence->seqno_ofs = seqno_ofs; diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 5db211f43b..48ec765198 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/include/linux/serial_8250.h * * Copyright (C) 2004 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef _LINUX_SERIAL_8250_H #define _LINUX_SERIAL_8250_H @@ -25,7 +29,6 @@ struct plat_serial8250_port { unsigned char regshift; /* register shift */ unsigned char iotype; /* UPIO_* */ unsigned char hub6; - unsigned char has_sysrq; /* supports magic SysRq */ upf_t flags; /* UPF_* flags */ unsigned int type; /* If UPF_FIXED_TYPE */ unsigned int (*serial_in)(struct uart_port *, int); @@ -33,8 +36,6 @@ struct plat_serial8250_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); - void (*set_ldisc)(struct uart_port *, - struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int state, @@ -77,11 +78,9 @@ struct uart_8250_ops { }; struct uart_8250_em485 { - struct hrtimer start_tx_timer; /* "rs485 start tx" timer */ - struct hrtimer stop_tx_timer; /* "rs485 stop tx" timer */ - struct hrtimer *active_timer; /* pointer to active timer */ - struct uart_8250_port *port; /* for hrtimer callbacks */ - unsigned int tx_stopped:1; /* tx is currently stopped */ + struct timer_list start_tx_timer; /* "rs485 start tx" timer */ + struct timer_list stop_tx_timer; /* "rs485 stop tx" timer */ + struct timer_list *active_timer; /* pointer to active timer */ }; /* @@ -95,7 +94,7 @@ struct uart_8250_port { struct uart_port port; struct timer_list timer; /* "no irq" timer */ struct list_head list; /* ports on this IRQ */ - u32 capabilities; /* port capabilities */ + unsigned short capabilities; /* port capabilities */ unsigned short bugs; /* port bugs */ bool fifo_bug; /* min RX trigger if enabled */ unsigned int tx_loadsz; /* transmit fifo load size */ @@ -112,7 +111,6 @@ struct uart_8250_port { * if no_console_suspend */ unsigned char probe; - struct mctrl_gpios *gpios; #define UART_PROBE_RSA (1 << 0) /* @@ -133,12 +131,6 @@ struct uart_8250_port { void (*dl_write)(struct uart_8250_port *, int); struct uart_8250_em485 *em485; - void (*rs485_start_tx)(struct uart_8250_port *); - void (*rs485_stop_tx)(struct uart_8250_port *); - - /* Serial port overrun backoff */ - struct delayed_work overrun_backoff; - u32 overrun_backoff_time_ms; }; static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) @@ -146,7 +138,7 @@ static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) return container_of(up, struct uart_8250_port, port); } -int serial8250_register_8250_port(const struct uart_8250_port *); +int serial8250_register_8250_port(struct uart_8250_port *); void serial8250_unregister_port(int line); void serial8250_suspend_port(int line); void serial8250_resume_port(int line); @@ -155,25 +147,17 @@ extern int early_serial_setup(struct uart_port *port); extern int early_serial8250_setup(struct earlycon_device *device, const char *options); -extern void serial8250_update_uartclk(struct uart_port *port, - unsigned int uartclk); extern void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old); -extern void serial8250_do_set_ldisc(struct uart_port *port, - struct ktermios *termios); extern unsigned int serial8250_do_get_mctrl(struct uart_port *port); extern int serial8250_do_startup(struct uart_port *port); extern void serial8250_do_shutdown(struct uart_port *port); extern void serial8250_do_pm(struct uart_port *port, unsigned int state, unsigned int oldstate); extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl); -extern void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud, - unsigned int quot, - unsigned int quot_frac); extern int fsl8250_handle_irq(struct uart_port *port); int serial8250_handle_irq(struct uart_port *port, unsigned int iir); unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr); -void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr); void serial8250_tx_chars(struct uart_8250_port *up); unsigned int serial8250_modem_status(struct uart_8250_port *up); void serial8250_init_port(struct uart_8250_port *up); @@ -181,15 +165,9 @@ void serial8250_set_defaults(struct uart_8250_port *up); void serial8250_console_write(struct uart_8250_port *up, const char *s, unsigned int count); int serial8250_console_setup(struct uart_port *port, char *options, bool probe); -int serial8250_console_exit(struct uart_port *port); extern void serial8250_set_isa_configurator(void (*v) (int port, struct uart_port *up, - u32 *capabilities)); - -#ifdef CONFIG_SERIAL_8250_RT288X -unsigned int au_serial_in(struct uart_port *p, int offset); -void au_serial_out(struct uart_port *p, int offset, int value); -#endif + unsigned short *capabilities)); #endif diff --git a/include/linux/serial_bcm63xx.h b/include/linux/serial_bcm63xx.h index b5e48ef897..570e964dc8 100644 --- a/include/linux/serial_bcm63xx.h +++ b/include/linux/serial_bcm63xx.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SERIAL_BCM63XX_H #define _LINUX_SERIAL_BCM63XX_H diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index c58cc142d2..3442014370 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -1,15 +1,27 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/drivers/char/serial_core.h * * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef LINUX_SERIAL_CORE_H #define LINUX_SERIAL_CORE_H -#include + #include -#include #include #include #include @@ -29,11 +41,10 @@ struct uart_port; struct serial_struct; struct device; -struct gpio_desc; /* * This structure describes all the operations that can be done on the - * physical hardware. See Documentation/driver-api/serial/driver.rst for details. + * physical hardware. See Documentation/serial/driver for details. */ struct uart_ops { unsigned int (*tx_empty)(struct uart_port *); @@ -100,8 +111,8 @@ struct uart_icount { __u32 buf_overrun; }; -typedef unsigned int __bitwise upf_t; -typedef unsigned int __bitwise upstat_t; +typedef unsigned int __bitwise__ upf_t; +typedef unsigned int __bitwise__ upstat_t; struct uart_port { spinlock_t lock; /* port lock */ @@ -112,17 +123,8 @@ struct uart_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); - void (*set_ldisc)(struct uart_port *, - struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); - unsigned int (*get_divisor)(struct uart_port *, - unsigned int baud, - unsigned int *frac); - void (*set_divisor)(struct uart_port *, - unsigned int baud, - unsigned int quot, - unsigned int quot_frac); int (*startup)(struct uart_port *port); void (*shutdown)(struct uart_port *port); void (*throttle)(struct uart_port *port); @@ -133,8 +135,6 @@ struct uart_port { void (*handle_break)(struct uart_port *); int (*rs485_config)(struct uart_port *, struct serial_rs485 *rs485); - int (*iso7816_config)(struct uart_port *, - struct serial_iso7816 *iso7816); unsigned int irq; /* irq number */ unsigned long irqflags; /* irq flags */ unsigned int uartclk; /* base uart clock */ @@ -142,7 +142,7 @@ struct uart_port { unsigned char x_char; /* xon/xoff char */ unsigned char regshift; /* reg offset shift */ unsigned char iotype; /* io access style */ - unsigned char quirks; /* internal quirks */ + unsigned char unused1; #define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */ #define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */ @@ -153,15 +153,16 @@ struct uart_port { #define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */ #define UPIO_MEM16 (SERIAL_IO_MEM16) /* 16b little endian */ - /* quirks must be updated while holding port mutex */ -#define UPQ_NO_TXEN_TEST BIT(0) - unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ struct uart_state *state; /* pointer to parent state */ struct uart_icount icount; /* statistics */ struct console *cons; /* struct console, if any */ +#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ) + unsigned long sysrq; /* sysrq timeout */ +#endif + /* flags must be updated while holding port mutex */ upf_t flags; @@ -172,6 +173,7 @@ struct uart_port { * [for bit definitions in the UPF_CHANGE_MASK] * * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable + * except bit 15 (UPF_NO_TXEN_TEST) which is masked off. * The remaining bits are serial-core specific and not modifiable by * userspace. */ @@ -188,9 +190,9 @@ struct uart_port { #define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ ) #define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ ) #define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ ) +#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15)) #define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ ) -#define UPF_NO_THRE_TEST ((__force upf_t) (1 << 19)) /* Port has hardware-assisted h/w flow control */ #define UPF_AUTO_CTS ((__force upf_t) (1 << 20)) #define UPF_AUTO_RTS ((__force upf_t) (1 << 21)) @@ -227,7 +229,6 @@ struct uart_port { #define UPSTAT_AUTORTS ((__force upstat_t) (1 << 2)) #define UPSTAT_AUTOCTS ((__force upstat_t) (1 << 3)) #define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4)) -#define UPSTAT_SYNC_FIFO ((__force upstat_t) (1 << 5)) int hw_stopped; /* sw-assisted CTS flow state */ unsigned int mctrl; /* current modem ctrl settings */ @@ -240,21 +241,13 @@ struct uart_port { resource_size_t mapbase; /* for ioremap */ resource_size_t mapsize; struct device *dev; /* parent device */ - - unsigned long sysrq; /* sysrq timeout */ - unsigned int sysrq_ch; /* char for sysrq */ - unsigned char has_sysrq; - unsigned char sysrq_seq; /* index in sysrq_toggle_seq */ - unsigned char hub6; /* this should be in the 8250 driver */ unsigned char suspended; - unsigned char console_reinit; - const char *name; /* port name */ + unsigned char irq_wake; + unsigned char unused[2]; struct attribute_group *attr_group; /* port specific attributes */ const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ struct serial_rs485 rs485; - struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */ - struct serial_iso7816 iso7816; void *private_data; /* generic platform data pointer */ }; @@ -351,11 +344,10 @@ struct earlycon_device { }; struct earlycon_id { - char name[15]; - char name_term; /* In case compiler didn't '\0' term name */ + char name[16]; char compatible[128]; int (*setup)(struct earlycon_device *, const char *options); -}; +} __aligned(32); extern const struct earlycon_id __earlycon_table[]; extern const struct earlycon_id __earlycon_table_end[]; @@ -367,12 +359,11 @@ extern const struct earlycon_id __earlycon_table_end[]; #endif #define OF_EARLYCON_DECLARE(_name, compat, fn) \ - static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ - EARLYCON_USED_OR_UNUSED __section("__earlycon_table") \ - __aligned(__alignof__(struct earlycon_id)) \ + static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ + EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \ = { .name = __stringify(_name), \ .compatible = compat, \ - .setup = fn } + .setup = fn } #define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn) @@ -381,10 +372,10 @@ extern int of_setup_earlycon(const struct earlycon_id *match, const char *options); #ifdef CONFIG_SERIAL_EARLYCON -extern bool earlycon_acpi_spcr_enable __initdata; +extern bool earlycon_init_is_deferred __initdata; int setup_earlycon(char *buf); #else -static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED; +static const bool earlycon_init_is_deferred; static inline int setup_earlycon(char *buf) { return 0; } #endif @@ -392,7 +383,7 @@ struct uart_port *uart_get_console(struct uart_port *ports, int nr, struct console *c); int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, char **options); -void uart_parse_options(const char *options, int *baud, int *parity, int *bits, +void uart_parse_options(char *options, int *baud, int *parity, int *bits, int *flow); int uart_set_options(struct uart_port *port, struct console *co, int baud, int parity, int bits, int flow); @@ -408,8 +399,7 @@ int uart_register_driver(struct uart_driver *uart); void uart_unregister_driver(struct uart_driver *uart); int uart_add_one_port(struct uart_driver *reg, struct uart_port *port); int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port); -bool uart_match_port(const struct uart_port *port1, - const struct uart_port *port2); +int uart_match_port(struct uart_port *port1, struct uart_port *port2); /* * Power Management @@ -429,7 +419,7 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port); static inline int uart_tx_stopped(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; - if ((tty && tty->flow.stopped) || port->hw_stopped) + if ((tty && tty->stopped) || port->hw_stopped) return 1; return 0; } @@ -458,104 +448,23 @@ extern void uart_handle_cts_change(struct uart_port *uport, extern void uart_insert_char(struct uart_port *port, unsigned int status, unsigned int overrun, unsigned int ch, unsigned int flag); -#ifdef CONFIG_MAGIC_SYSRQ_SERIAL -#define SYSRQ_TIMEOUT (HZ * 5) - -bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch); - -static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) +#ifdef SUPPORT_SYSRQ +static inline int +uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { - if (!port->sysrq) - return 0; - - if (ch && time_before(jiffies, port->sysrq)) { - if (sysrq_mask()) { + if (port->sysrq) { + if (ch && time_before(jiffies, port->sysrq)) { handle_sysrq(ch); port->sysrq = 0; return 1; } - if (uart_try_toggle_sysrq(port, ch)) - return 1; + port->sysrq = 0; } - port->sysrq = 0; - return 0; } - -static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) -{ - if (!port->sysrq) - return 0; - - if (ch && time_before(jiffies, port->sysrq)) { - if (sysrq_mask()) { - port->sysrq_ch = ch; - port->sysrq = 0; - return 1; - } - if (uart_try_toggle_sysrq(port, ch)) - return 1; - } - port->sysrq = 0; - - return 0; -} - -static inline void uart_unlock_and_check_sysrq(struct uart_port *port) -{ - int sysrq_ch; - - if (!port->has_sysrq) { - spin_unlock(&port->lock); - return; - } - - sysrq_ch = port->sysrq_ch; - port->sysrq_ch = 0; - - spin_unlock(&port->lock); - - if (sysrq_ch) - handle_sysrq(sysrq_ch); -} - -static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, - unsigned long flags) -{ - int sysrq_ch; - - if (!port->has_sysrq) { - spin_unlock_irqrestore(&port->lock, flags); - return; - } - - sysrq_ch = port->sysrq_ch; - port->sysrq_ch = 0; - - spin_unlock_irqrestore(&port->lock, flags); - - if (sysrq_ch) - handle_sysrq(sysrq_ch); -} -#else /* CONFIG_MAGIC_SYSRQ_SERIAL */ -static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) -{ - return 0; -} -static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) -{ - return 0; -} -static inline void uart_unlock_and_check_sysrq(struct uart_port *port) -{ - spin_unlock(&port->lock); -} -static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, - unsigned long flags) -{ - spin_unlock_irqrestore(&port->lock, flags); -} -#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ +#else +#define uart_handle_sysrq_char(port,ch) ({ (void)port; 0; }) +#endif /* * We do the SysRQ and SAK checking like this... @@ -567,10 +476,10 @@ static inline int uart_handle_break(struct uart_port *port) if (port->handle_break) port->handle_break(port); -#ifdef CONFIG_MAGIC_SYSRQ_SERIAL - if (port->has_sysrq && uart_console(port)) { +#ifdef SUPPORT_SYSRQ + if (port->cons && port->cons->index == port->line) { if (!port->sysrq) { - port->sysrq = jiffies + SYSRQ_TIMEOUT; + port->sysrq = jiffies + HZ*5; return 1; } port->sysrq = 0; @@ -588,5 +497,4 @@ static inline int uart_handle_break(struct uart_port *port) (cflag) & CRTSCTS || \ !((cflag) & CLOCAL)) -int uart_get_rs485_mode(struct uart_port *port); #endif /* LINUX_SERIAL_CORE_H */ diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h index befd55c08a..4976befb6a 100644 --- a/include/linux/serial_max3100.h +++ b/include/linux/serial_max3100.h @@ -1,7 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * * Copyright (C) 2007 Christian Pellegrin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ diff --git a/include/linux/serial_pnx8xxx.h b/include/linux/serial_pnx8xxx.h index 619d748dcd..79ad87b0be 100644 --- a/include/linux/serial_pnx8xxx.h +++ b/include/linux/serial_pnx8xxx.h @@ -1,6 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Embedded Alley Solutions, source@embeddedalley.com. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _LINUX_SERIAL_PNX8XXX_H diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h index cf0de4a866..a7f004a3c1 100644 --- a/include/linux/serial_s3c.h +++ b/include/linux/serial_s3c.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Internal header file for Samsung S3C2410 serial ports (UART0-2) * @@ -11,7 +10,21 @@ * Internal header file for MX1ADS serial ports (UART1 & 2) * * Copyright (C) 2002 Shane Nay (shane@minirl.com) - */ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ #ifndef __ASM_ARM_REGS_SERIAL_H #define __ASM_ARM_REGS_SERIAL_H @@ -27,15 +40,6 @@ #define S3C2410_UERSTAT (0x14) #define S3C2410_UFSTAT (0x18) #define S3C2410_UMSTAT (0x1C) -#define USI_CON (0xC4) -#define USI_OPTION (0xC8) - -#define USI_CON_RESET (1<<0) -#define USI_CON_RESET_MASK (1<<0) - -#define USI_OPTION_HWACG_CLKREQ_ON (1<<1) -#define USI_OPTION_HWACG_CLKSTOP_ON (1<<2) -#define USI_OPTION_HWACG_MASK (3<<1) #define S3C2410_LCON_CFGMASK ((0xF<<3)|(0x3)) @@ -255,22 +259,6 @@ S5PV210_UFCON_TXTRIG4 | \ S5PV210_UFCON_RXTRIG4) -#define APPLE_S5L_UCON_RXTO_ENA 9 -#define APPLE_S5L_UCON_RXTHRESH_ENA 12 -#define APPLE_S5L_UCON_TXTHRESH_ENA 13 -#define APPLE_S5L_UCON_RXTO_ENA_MSK (1 << APPLE_S5L_UCON_RXTO_ENA) -#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_RXTHRESH_ENA) -#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_TXTHRESH_ENA) - -#define APPLE_S5L_UCON_DEFAULT (S3C2410_UCON_TXIRQMODE | \ - S3C2410_UCON_RXIRQMODE | \ - S3C2410_UCON_RXFIFO_TOI) - -#define APPLE_S5L_UTRSTAT_RXTHRESH (1<<4) -#define APPLE_S5L_UTRSTAT_TXTHRESH (1<<5) -#define APPLE_S5L_UTRSTAT_RXTO (1<<9) -#define APPLE_S5L_UTRSTAT_ALL_FLAGS (0x3f0) - #ifndef __ASSEMBLY__ #include @@ -279,7 +267,7 @@ * serial port * * the pointer is setup by the machine specific initialisation from the - * arch/arm/mach-s3c/ directory. + * arch/arm/mach-s3c2410/ directory. */ struct s3c2410_uartcfg { diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 1c89611e0e..9f2bfd0557 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SERIAL_SCI_H #define __LINUX_SERIAL_SCI_H @@ -10,6 +9,8 @@ * Generic header for SuperH (H)SCI(F) (used by sh/sh64 and related parts) */ +#define SCIx_NOT_SUPPORTED (-1) + /* Serial Control Register (@ = not supported by all parts) */ #define SCSCR_TIE BIT(7) /* Transmit Interrupt Enable */ #define SCSCR_RIE BIT(6) /* Receive Interrupt Enable */ @@ -36,21 +37,28 @@ enum { SCIx_SH4_SCIF_FIFODATA_REGTYPE, SCIx_SH7705_SCIF_REGTYPE, SCIx_HSCIF_REGTYPE, - SCIx_RZ_SCIFA_REGTYPE, SCIx_NR_REGTYPES, }; +struct device; + struct plat_sci_port_ops { void (*init_pins)(struct uart_port *, unsigned int cflag); }; +/* + * Port-specific capabilities + */ +#define SCIx_HAVE_RTSCTS BIT(0) + /* * Platform device specific platform_data struct */ struct plat_sci_port { unsigned int type; /* SCI / SCIF / IRDA / HSCIF */ upf_t flags; /* UPF_* flags */ + unsigned long capabilities; /* Port features/capabilities */ unsigned int sampling_rate; unsigned int scscr; /* SCSCR initialization */ @@ -58,9 +66,14 @@ struct plat_sci_port { /* * Platform overrides if necessary, defaults otherwise. */ + int port_reg; + unsigned char regshift; unsigned char regtype; struct plat_sci_port_ops *ops; + + unsigned int dma_slave_tx; + unsigned int dma_slave_rx; }; #endif /* __LINUX_SERIAL_SCI_H */ diff --git a/include/linux/serio.h b/include/linux/serio.h index 6c27d413da..c733cff44e 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 1999-2002 Vojtech Pavlik +* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. */ #ifndef _SERIO_H #define _SERIO_H @@ -74,7 +77,6 @@ struct serio_driver { irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); int (*connect)(struct serio *, struct serio_driver *drv); int (*reconnect)(struct serio *); - int (*fast_reconnect)(struct serio *); void (*disconnect)(struct serio *); void (*cleanup)(struct serio *); diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h index 7bed5be886..645896b812 100644 --- a/include/linux/sh_clk.h +++ b/include/linux/sh_clk.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SH_CLOCK_H #define __SH_CLOCK_H diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h index 9f79806085..56b97eed28 100644 --- a/include/linux/sh_dma.h +++ b/include/linux/sh_dma.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Header for the new SH dmaengine driver * * Copyright (C) 2010 Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef SH_DMA_H #define SH_DMA_H diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index 6dfda97a6c..f2e27e0783 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h @@ -1,19 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_SH_ETH_H__ #define __ASM_SH_ETH_H__ #include #include +enum {EDMAC_LITTLE_ENDIAN}; + struct sh_eth_plat_data { int phy; int phy_irq; + int edmac_endian; phy_interface_t phy_interface; void (*set_mdio_gate)(void *addr); unsigned char mac_addr[ETH_ALEN]; unsigned no_ether_link:1; unsigned ether_link_active_low:1; + unsigned needs_init:1; }; #endif diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h index c255273b02..32383285da 100644 --- a/include/linux/sh_intc.h +++ b/include/linux/sh_intc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SH_INTC_H #define __SH_INTC_H diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h index 74fd5140bb..64638b0580 100644 --- a/include/linux/sh_timer.h +++ b/include/linux/sh_timer.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SH_TIMER_H__ #define __SH_TIMER_H__ diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index 6dfd05ef5c..d927647e63 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h @@ -1,5 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 - * +/* * Dmaengine driver base library for DMA controllers, found on SH-based SoCs * * extracted from shdma.c and headers @@ -8,6 +7,10 @@ * Copyright (C) 2009 Nobuhiro Iwamatsu * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. */ #ifndef SHDMA_BASE_H diff --git a/include/linux/shm.h b/include/linux/shm.h index d8e69aed3d..af8580566e 100644 --- a/include/linux/shm.h +++ b/include/linux/shm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SHM_H_ #define _LINUX_SHM_H_ @@ -7,7 +6,48 @@ #include #include -struct file; +struct shmid_kernel /* private to the kernel */ +{ + struct kern_ipc_perm shm_perm; + struct file *shm_file; + unsigned long shm_nattch; + unsigned long shm_segsz; + time_t shm_atim; + time_t shm_dtim; + time_t shm_ctim; + pid_t shm_cprid; + pid_t shm_lprid; + struct user_struct *mlock_user; + + /* The task created the shm object. NULL if the task is dead. */ + struct task_struct *shm_creator; + struct list_head shm_clist; /* list by creator */ +#ifdef CONFIG_GRKERNSEC + u64 shm_createtime; + pid_t shm_lapid; +#endif +} __randomize_layout; + +/* shm_mode upper byte flags */ +#define SHM_DEST 01000 /* segment will be destroyed on last detach */ +#define SHM_LOCKED 02000 /* segment will not be swapped */ +#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */ +#define SHM_NORESERVE 010000 /* don't check for reservations */ + +/* Bits [26:31] are reserved */ + +/* + * When SHM_HUGETLB is set bits [26:31] encode the log2 of the huge page size. + * This gives us 6 bits, which is enough until someone invents 128 bit address + * spaces. + * + * Assume these are all power of twos. + * When 0 use the default page size. + */ +#define SHM_HUGE_SHIFT 26 +#define SHM_HUGE_MASK 0x3f +#define SHM_HUGE_2MB (21 << SHM_HUGE_SHIFT) +#define SHM_HUGE_1GB (30 << SHM_HUGE_SHIFT) #ifdef CONFIG_SYSVIPC struct sysv_shm { diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 166158b6e9..ff078e7043 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SHMEM_FS_H #define __SHMEM_FS_H @@ -8,7 +7,6 @@ #include #include #include -#include /* inode in-kernel data */ @@ -18,12 +16,10 @@ struct shmem_inode_info { unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ - pgoff_t fallocend; /* highest fallocate endindex */ struct list_head shrinklist; /* shrinkable hpage inodes */ struct list_head swaplist; /* chain of maybes on swap */ struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ - atomic_t stop_eviction; /* hold when working on inode */ struct inode vfs_inode; }; @@ -32,14 +28,11 @@ struct shmem_sb_info { struct percpu_counter used_blocks; /* How many are allocated */ unsigned long max_inodes; /* How many inodes are allowed */ unsigned long free_inodes; /* How many are left for allocation */ - raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ + spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ umode_t mode; /* Mount mode for root directory */ unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ kgid_t gid; /* Mount gid for root directory */ - bool full_inums; /* If i_ino should be uint or ino_t */ - ino_t next_ino; /* The next per-sb inode number to use */ - ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */ struct mempolicy *mpol; /* default memory policy for mappings */ spinlock_t shrinklist_lock; /* Protects shrinklist */ struct list_head shrinklist; /* List of shinkable inodes */ @@ -54,44 +47,23 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) /* * Functions in mm/shmem.c called directly from elsewhere: */ -extern const struct fs_parameter_spec shmem_fs_parameters[]; extern int shmem_init(void); -extern int shmem_init_fs_context(struct fs_context *fc); +extern int shmem_fill_super(struct super_block *sb, void *data, int silent); extern struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags); -extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, - const char *name, loff_t size, unsigned long flags); extern int shmem_zero_setup(struct vm_area_struct *); extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts); -#ifdef CONFIG_SHMEM -extern const struct address_space_operations shmem_aops; -static inline bool shmem_mapping(struct address_space *mapping) -{ - return mapping->a_ops == &shmem_aops; -} -#else -static inline bool shmem_mapping(struct address_space *mapping) -{ - return false; -} -#endif /* CONFIG_SHMEM */ +extern int shmem_lock(struct file *file, int lock, struct user_struct *user); +extern bool shmem_mapping(struct address_space *mapping); extern void shmem_unlock_mapping(struct address_space *mapping); extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); -extern int shmem_unuse(unsigned int type, bool frontswap, - unsigned long *fs_pages_to_unuse); +extern int shmem_unuse(swp_entry_t entry, struct page *page); -extern bool shmem_is_huge(struct vm_area_struct *vma, - struct inode *inode, pgoff_t index); -static inline bool shmem_huge_enabled(struct vm_area_struct *vma) -{ - return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff); -} extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); @@ -99,8 +71,9 @@ extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, /* Flag allocation requirements to shmem_getpage */ enum sgp_type { SGP_READ, /* don't exceed i_size, don't allocate page */ - SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */ SGP_CACHE, /* don't exceed i_size, may allocate page */ + SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ + SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ }; @@ -124,33 +97,31 @@ static inline bool shmem_file(struct file *file) return shmem_mapping(file->f_mapping); } -/* - * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages - * beyond i_size's notion of EOF, which fallocate has committed to reserving: - * which split_huge_page() must therefore not delete. This use of a single - * "fallocend" per inode errs on the side of not deleting a reservation when - * in doubt: there are plenty of cases when it preserves unreserved pages. - */ -static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof) -{ - return max(eof, SHMEM_I(inode)->fallocend); -} - extern bool shmem_charge(struct inode *inode, long pages); extern void shmem_uncharge(struct inode *inode, long pages); -#ifdef CONFIG_USERFAULTFD -#ifdef CONFIG_SHMEM -extern int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, - struct vm_area_struct *dst_vma, - unsigned long dst_addr, - unsigned long src_addr, - bool zeropage, - struct page **pagep); -#else /* !CONFIG_SHMEM */ -#define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \ - src_addr, zeropage, pagep) ({ BUG(); 0; }) -#endif /* CONFIG_SHMEM */ -#endif /* CONFIG_USERFAULTFD */ +#ifdef CONFIG_TMPFS + +extern int shmem_add_seals(struct file *file, unsigned int seals); +extern int shmem_get_seals(struct file *file); +extern long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg); + +#else + +static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a) +{ + return -EINVAL; +} + +#endif + +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +extern bool shmem_huge_enabled(struct vm_area_struct *vma); +#else +static inline bool shmem_huge_enabled(struct vm_area_struct *vma) +{ + return false; +} +#endif #endif diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 9814fff58a..4fcacd915d 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -1,10 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SHRINKER_H #define _LINUX_SHRINKER_H /* * This struct is used to pass information from page reclaim to the shrinkers. - * We consolidate the values for easier extension later. + * We consolidate the values for easier extention later. * * The 'gfpmask' refers to the allocation we are currently trying to * fulfil. @@ -12,9 +11,6 @@ struct shrink_control { gfp_t gfp_mask; - /* current node being shrunk (for NUMA aware shrinkers) */ - int nid; - /* * How many objects scan_objects should scan and try to reclaim. * This is reset before every call, so it is safe for callees @@ -22,27 +18,20 @@ struct shrink_control { */ unsigned long nr_to_scan; - /* - * How many objects did scan_objects process? - * This defaults to nr_to_scan before every call, but the callee - * should track its actual progress. - */ - unsigned long nr_scanned; + /* current node being shrunk (for NUMA aware shrinkers) */ + int nid; /* current memcg being shrunk (for memcg aware shrinkers) */ struct mem_cgroup *memcg; }; #define SHRINK_STOP (~0UL) -#define SHRINK_EMPTY (~0UL - 1) /* * A callback you can register to apply pressure to ageable caches. * * @count_objects should return the number of freeable items in the cache. If - * there are no objects to free, it should return SHRINK_EMPTY, while 0 is - * returned in cases of the number of freeable items cannot be determined - * or shrinker should skip this cache for this time (e.g., their number - * is below shrinkable limit). No deadlock checks should be done during the + * there are no objects to free or the number of freeable items cannot be + * determined, it should return 0. No deadlock checks should be done during the * count callback - the shrinker relies on aggregating scan counts that couldn't * be executed due to potential deadlocks to be run at a later call when the * deadlock condition is no longer pending. @@ -63,34 +52,21 @@ struct shrinker { unsigned long (*scan_objects)(struct shrinker *, struct shrink_control *sc); - long batch; /* reclaim batch size, 0 = default */ int seeks; /* seeks to recreate an obj */ - unsigned flags; + long batch; /* reclaim batch size, 0 = default */ + unsigned long flags; /* These are for internal use */ struct list_head list; -#ifdef CONFIG_MEMCG - /* ID in shrinker_idr */ - int id; -#endif /* objs pending delete, per node */ atomic_long_t *nr_deferred; }; #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ /* Flags */ -#define SHRINKER_REGISTERED (1 << 0) -#define SHRINKER_NUMA_AWARE (1 << 1) -#define SHRINKER_MEMCG_AWARE (1 << 2) -/* - * It just makes sense when the shrinker is also MEMCG_AWARE for now, - * non-MEMCG_AWARE shrinker should not have this flag set. - */ -#define SHRINKER_NONSLAB (1 << 3) +#define SHRINKER_NUMA_AWARE (1 << 0) +#define SHRINKER_MEMCG_AWARE (1 << 1) -extern int prealloc_shrinker(struct shrinker *shrinker); -extern void register_shrinker_prepared(struct shrinker *shrinker); -extern int register_shrinker(struct shrinker *shrinker); -extern void unregister_shrinker(struct shrinker *shrinker); -extern void free_prealloced_shrinker(struct shrinker *shrinker); +extern int register_shrinker(struct shrinker *); +extern void unregister_shrinker(struct shrinker *); #endif diff --git a/include/linux/signal.h b/include/linux/signal.h index 3f96a6374e..fe39718ba6 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -1,56 +1,47 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SIGNAL_H #define _LINUX_SIGNAL_H +#include #include -#include -#include +#include struct task_struct; /* for sysctl */ extern int print_fatal_signals; +/* + * Real Time signals may be queued. + */ -static inline void copy_siginfo(kernel_siginfo_t *to, - const kernel_siginfo_t *from) -{ - memcpy(to, from, sizeof(*to)); -} - -static inline void clear_siginfo(kernel_siginfo_t *info) -{ - memset(info, 0, sizeof(*info)); -} - -#define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo)) - -static inline void copy_siginfo_to_external(siginfo_t *to, - const kernel_siginfo_t *from) -{ - memcpy(to, from, sizeof(*from)); - memset(((char *)to) + sizeof(struct kernel_siginfo), 0, - SI_EXPANSION_SIZE); -} - -int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from); -int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from); - -enum siginfo_layout { - SIL_KILL, - SIL_TIMER, - SIL_POLL, - SIL_FAULT, - SIL_FAULT_TRAPNO, - SIL_FAULT_MCEERR, - SIL_FAULT_BNDERR, - SIL_FAULT_PKUERR, - SIL_FAULT_PERF_EVENT, - SIL_CHLD, - SIL_RT, - SIL_SYS, +struct sigqueue { + struct list_head list; + int flags; + siginfo_t info; + struct user_struct *user; }; -enum siginfo_layout siginfo_layout(unsigned sig, int si_code); +/* flags values. */ +#define SIGQUEUE_PREALLOC 1 + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +#ifndef HAVE_ARCH_COPY_SIGINFO + +#include + +static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) +{ + if (from->si_code < 0) + memcpy(to, from, sizeof(*to)); + else + /* _sigchld is currently the largest know union member */ + memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); +} + +#endif /* * Define some primitives to manipulate sigset_t. @@ -106,23 +97,6 @@ static inline int sigisemptyset(sigset_t *set) } } -static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2) -{ - switch (_NSIG_WORDS) { - case 4: - return (set1->sig[3] == set2->sig[3]) && - (set1->sig[2] == set2->sig[2]) && - (set1->sig[1] == set2->sig[1]) && - (set1->sig[0] == set2->sig[0]); - case 2: - return (set1->sig[1] == set2->sig[1]) && - (set1->sig[0] == set2->sig[0]); - case 1: - return set1->sig[0] == set2->sig[0]; - } - return 0; -} - #define sigmask(sig) (1UL << ((sig) - 1)) #ifndef __HAVE_ARCH_SIG_SETOPS @@ -139,11 +113,9 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ b3 = b->sig[3]; b2 = b->sig[2]; \ r->sig[3] = op(a3, b3); \ r->sig[2] = op(a2, b2); \ - fallthrough; \ case 2: \ a1 = a->sig[1]; b1 = b->sig[1]; \ r->sig[1] = op(a1, b1); \ - fallthrough; \ case 1: \ a0 = a->sig[0]; b0 = b->sig[0]; \ r->sig[0] = op(a0, b0); \ @@ -173,9 +145,7 @@ static inline void name(sigset_t *set) \ switch (_NSIG_WORDS) { \ case 4: set->sig[3] = op(set->sig[3]); \ set->sig[2] = op(set->sig[2]); \ - fallthrough; \ case 2: set->sig[1] = op(set->sig[1]); \ - fallthrough; \ case 1: set->sig[0] = op(set->sig[0]); \ break; \ default: \ @@ -196,7 +166,6 @@ static inline void sigemptyset(sigset_t *set) memset(set, 0, sizeof(sigset_t)); break; case 2: set->sig[1] = 0; - fallthrough; case 1: set->sig[0] = 0; break; } @@ -209,7 +178,6 @@ static inline void sigfillset(sigset_t *set) memset(set, -1, sizeof(sigset_t)); break; case 2: set->sig[1] = -1; - fallthrough; case 1: set->sig[0] = -1; break; } @@ -240,7 +208,6 @@ static inline void siginitset(sigset_t *set, unsigned long mask) memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1)); break; case 2: set->sig[1] = 0; - break; case 1: ; } } @@ -253,7 +220,6 @@ static inline void siginitsetinv(sigset_t *set, unsigned long mask) memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1)); break; case 2: set->sig[1] = -1; - break; case 1: ; } } @@ -276,27 +242,60 @@ static inline int valid_signal(unsigned long sig) struct timespec; struct pt_regs; -enum pid_type; extern int next_signal(struct sigpending *pending, sigset_t *mask); -extern int do_send_sig_info(int sig, struct kernel_siginfo *info, - struct task_struct *p, enum pid_type type); -extern int group_send_sig_info(int sig, struct kernel_siginfo *info, - struct task_struct *p, enum pid_type type); -extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); +extern int do_send_sig_info(int sig, struct siginfo *info, + struct task_struct *p, bool group); +extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); +extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); +extern int do_sigtimedwait(const sigset_t *, siginfo_t *, + const struct timespec *); extern int sigprocmask(int, sigset_t *, sigset_t *); extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; -extern bool get_signal(struct ksignal *ksig); +struct sigaction { +#ifndef __ARCH_HAS_IRIX_SIGACTION + __sighandler_t sa_handler; + unsigned long sa_flags; +#else + unsigned int sa_flags; + __sighandler_t sa_handler; +#endif +#ifdef __ARCH_HAS_SA_RESTORER + __sigrestore_t sa_restorer; +#endif + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +#ifdef __ARCH_HAS_KA_RESTORER + __sigrestore_t ka_restorer; +#endif +}; + +#ifdef CONFIG_OLD_SIGACTION +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + __sigrestore_t sa_restorer; +}; +#endif + +struct ksignal { + struct k_sigaction ka; + siginfo_t info; + int sig; +}; + +extern int get_signal(struct ksignal *ksig); extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); extern void exit_signals(struct task_struct *tsk); extern void kernel_sigaction(int, __sighandler_t); -#define SIG_KTHREAD ((__force __sighandler_t)2) -#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3) - static inline void allow_signal(int sig) { /* @@ -304,17 +303,7 @@ static inline void allow_signal(int sig) * know it'll be handled, so that they don't get converted to * SIGKILL or just silently dropped. */ - kernel_sigaction(sig, SIG_KTHREAD); -} - -static inline void allow_kernel_signal(int sig) -{ - /* - * Kernel threads handle their own signals. Let the signal code - * know signals sent by the kernel will be handled, so that they - * don't get silently dropped. - */ - kernel_sigaction(sig, SIG_KTHREAD_KERNEL); + kernel_sigaction(sig, (__force_user __sighandler_t)2); } static inline void disallow_signal(int sig) @@ -324,7 +313,7 @@ static inline void disallow_signal(int sig) extern struct kmem_cache *sighand_cachep; -extern bool unhandled_signal(struct task_struct *tsk, int sig); +int unhandled_signal(struct task_struct *tsk, int sig); /* * In POSIX a signal is sent either to a specific thread (Linux task) @@ -413,7 +402,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig); #endif #define siginmask(sig, mask) \ - ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) + ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) #define SIG_KERNEL_ONLY_MASK (\ rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) @@ -434,18 +423,14 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig); rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \ rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) ) -#define SIG_SPECIFIC_SICODES_MASK (\ - rt_sigmask(SIGILL) | rt_sigmask(SIGFPE) | \ - rt_sigmask(SIGSEGV) | rt_sigmask(SIGBUS) | \ - rt_sigmask(SIGTRAP) | rt_sigmask(SIGCHLD) | \ - rt_sigmask(SIGPOLL) | rt_sigmask(SIGSYS) | \ - SIGEMT_MASK ) - #define sig_kernel_only(sig) siginmask(sig, SIG_KERNEL_ONLY_MASK) #define sig_kernel_coredump(sig) siginmask(sig, SIG_KERNEL_COREDUMP_MASK) #define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK) #define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK) -#define sig_specific_sicodes(sig) siginmask(sig, SIG_SPECIFIC_SICODES_MASK) + +#define sig_user_defined(t, signr) \ + (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ + ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) #define sig_fatal(t, signr) \ (!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ @@ -456,12 +441,14 @@ void signals_init(void); int restore_altstack(const stack_t __user *); int __save_altstack(stack_t __user *, unsigned long); -#define unsafe_save_altstack(uss, sp, label) do { \ +#define save_altstack_ex(uss, sp) do { \ stack_t __user *__uss = uss; \ struct task_struct *t = current; \ - unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \ - unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \ - unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \ + put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \ + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ + put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + if (t->sas_ss_flags & SS_AUTODISARM) \ + sas_ss_reset(t); \ } while (0); #ifdef CONFIG_PROC_FS @@ -469,18 +456,4 @@ struct seq_file; extern void render_sigset_t(struct seq_file *, const char *, sigset_t *); #endif -#ifndef arch_untagged_si_addr -/* - * Given a fault address and a signal and si_code which correspond to the - * _sigfault union member, returns the address that must appear in si_addr if - * the signal handler does not have SA_EXPOSE_TAGBITS enabled in sa_flags. - */ -static inline void __user *arch_untagged_si_addr(void __user *addr, - unsigned long sig, - unsigned long si_code) -{ - return addr; -} -#endif - #endif /* _LINUX_SIGNAL_H */ diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h index 9a47c380bd..eadbe227c2 100644 --- a/include/linux/signalfd.h +++ b/include/linux/signalfd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/signalfd.h * @@ -9,7 +8,7 @@ #define _LINUX_SIGNALFD_H #include -#include + #ifdef CONFIG_SIGNALFD diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h index 50161b6afb..29d959333d 100644 --- a/include/linux/sirfsoc_dma.h +++ b/include/linux/sirfsoc_dma.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SIRFSOC_DMA_H_ #define _SIRFSOC_DMA_H_ diff --git a/include/linux/sizes.h b/include/linux/sizes.h index 1ac79bcee2..ce3e8150c1 100644 --- a/include/linux/sizes.h +++ b/include/linux/sizes.h @@ -1,12 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/sizes.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_SIZES_H__ #define __LINUX_SIZES_H__ -#include - #define SZ_1 0x00000001 #define SZ_2 0x00000002 #define SZ_4 0x00000004 @@ -43,10 +44,4 @@ #define SZ_1G 0x40000000 #define SZ_2G 0x80000000 -#define SZ_4G _AC(0x100000000, ULL) -#define SZ_8G _AC(0x200000000, ULL) -#define SZ_16G _AC(0x400000000, ULL) -#define SZ_32G _AC(0x800000000, ULL) -#define SZ_64T _AC(0x400000000000, ULL) - #endif /* __LINUX_SIZES_H__ */ diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index e2d45b7cb6..f4dfade428 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Definitions for the 'struct skb_array' datastructure. * @@ -7,6 +6,11 @@ * * Copyright (C) 2016 Red Hat, Inc. * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * * Limited-size FIFO of skbs. Can be used more or less whenever * sk_buff_head can be used, except you need to know the queue size in * advance. @@ -65,12 +69,7 @@ static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb */ static inline bool __skb_array_empty(struct skb_array *a) { - return __ptr_ring_empty(&a->ring); -} - -static inline struct sk_buff *__skb_array_peek(struct skb_array *a) -{ - return __ptr_ring_peek(&a->ring); + return !__ptr_ring_peek(&a->ring); } static inline bool skb_array_empty(struct skb_array *a) @@ -93,56 +92,26 @@ static inline bool skb_array_empty_any(struct skb_array *a) return ptr_ring_empty_any(&a->ring); } -static inline struct sk_buff *__skb_array_consume(struct skb_array *a) -{ - return __ptr_ring_consume(&a->ring); -} - static inline struct sk_buff *skb_array_consume(struct skb_array *a) { return ptr_ring_consume(&a->ring); } -static inline int skb_array_consume_batched(struct skb_array *a, - struct sk_buff **array, int n) -{ - return ptr_ring_consume_batched(&a->ring, (void **)array, n); -} - static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a) { return ptr_ring_consume_irq(&a->ring); } -static inline int skb_array_consume_batched_irq(struct skb_array *a, - struct sk_buff **array, int n) -{ - return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n); -} - static inline struct sk_buff *skb_array_consume_any(struct skb_array *a) { return ptr_ring_consume_any(&a->ring); } -static inline int skb_array_consume_batched_any(struct skb_array *a, - struct sk_buff **array, int n) -{ - return ptr_ring_consume_batched_any(&a->ring, (void **)array, n); -} - - static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a) { return ptr_ring_consume_bh(&a->ring); } -static inline int skb_array_consume_batched_bh(struct skb_array *a, - struct sk_buff **array, int n) -{ - return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n); -} - static inline int __skb_array_len_with_tag(struct sk_buff *skb) { if (likely(skb)) { @@ -187,20 +156,13 @@ static void __skb_array_destroy_skb(void *ptr) kfree_skb(ptr); } -static inline void skb_array_unconsume(struct skb_array *a, - struct sk_buff **skbs, int n) -{ - ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb); -} - static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) { return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); } static inline int skb_array_resize_multiple(struct skb_array **rings, - int nrings, unsigned int size, - gfp_t gfp) + int nrings, int size, gfp_t gfp) { BUILD_BUG_ON(offsetof(struct skb_array, ring)); return ptr_ring_resize_multiple((struct ptr_ring **)rings, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 841e2f0f52..66eb624a99 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1,24 +1,27 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Definitions for the 'struct sk_buff' memory handlers. * * Authors: * Alan Cox, * Florian La Roche, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_SKBUFF_H #define _LINUX_SKBUFF_H #include +#include #include #include #include -#include #include #include #include -#include #include #include @@ -31,16 +34,11 @@ #include #include #include -#include #include #include #include #include #include -#include -#if IS_ENABLED(CONFIG_NF_CONNTRACK) -#include -#endif /* The interface for checksum offload between the stack and networking drivers * is as follows... @@ -48,8 +46,8 @@ * A. IP checksum related features * * Drivers advertise checksum offload capabilities in the features of a device. - * From the stack's point of view these are capabilities offered by the driver. - * A driver typically only advertises features that it is capable of offloading + * From the stack's point of view these are capabilities offered by the driver, + * a driver typically only advertises features that it is capable of offloading * to its device. * * The checksum related features are: @@ -64,7 +62,7 @@ * TCP or UDP packets over IPv4. These are specifically * unencapsulated packets of the form IPv4|TCP or * IPv4|UDP where the Protocol field in the IPv4 header - * is TCP or UDP. The IPv4 header may contain IP options. + * is TCP or UDP. The IPv4 header may contain IP options * This feature cannot be set in features for a device * with NETIF_F_HW_CSUM also set. This feature is being * DEPRECATED (see below). @@ -72,7 +70,7 @@ * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain * TCP or UDP packets over IPv6. These are specifically * unencapsulated packets of the form IPv6|TCP or - * IPv6|UDP where the Next Header field in the IPv6 + * IPv4|UDP where the Next Header field in the IPv6 * header is either TCP or UDP. IPv6 extension headers * are not supported with this feature. This feature * cannot be set in features for a device with @@ -80,13 +78,13 @@ * DEPRECATED (see below). * * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload. - * This flag is only used to disable the RX checksum + * This flag is used only used to disable the RX checksum * feature for a device. The stack will accept receive * checksum indication in packets received on a device * regardless of whether NETIF_F_RXCSUM is set. * * B. Checksumming of received packets by device. Indication of checksum - * verification is set in skb->ip_summed. Possible values are: + * verification is in set skb->ip_summed. Possible values are: * * CHECKSUM_NONE: * @@ -110,28 +108,25 @@ * may perform further validation in this case. * GRE: only if the checksum is present in the header. * SCTP: indicates the CRC in SCTP header has been validated. - * FCOE: indicates the CRC in FC frame has been validated. * * skb->csum_level indicates the number of consecutive checksums found in * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet * and a device is able to verify the checksums for UDP (possibly zero), - * GRE (checksum flag is set) and TCP, skb->csum_level would be set to + * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to * two. If the device were only able to verify the UDP checksum and not - * GRE, either because it doesn't support GRE checksum or because GRE + * GRE, either because it doesn't support GRE checksum of because GRE * checksum is bad, skb->csum_level would be set to zero (TCP checksum is * not considered in this case). * * CHECKSUM_COMPLETE: * * This is the most generic way. The device supplied checksum of the _whole_ - * packet as seen by netif_rx() and fills in skb->csum. This means the + * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the * hardware doesn't need to parse L3/L4 headers to implement this. * - * Notes: - * - Even if device supports only some protocols, but is able to produce - * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. - * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols. + * Note: Even if device supports only some protocols, but is able to produce + * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. * * CHECKSUM_PARTIAL: * @@ -154,8 +149,8 @@ * from skb->csum_start up to the end, and to record/write the checksum at * offset skb->csum_start + skb->csum_offset. A driver may verify that the * csum_start and csum_offset values are valid values given the length and - * offset of the packet, but it should not attempt to validate that the - * checksum refers to a legitimate transport layer checksum -- it is the + * offset of the packet, however they should not attempt to validate that the + * checksum refers to a legitimate transport layer checksum-- it is the * purview of the stack to validate that csum_start and csum_offset are set * correctly. * @@ -166,11 +161,14 @@ * * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate - * checksum offload capability. - * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based - * on network device checksumming capabilities: if a packet does not match - * them, skb_checksum_help or skb_crc32c_help (depending on the value of - * csum_not_inet, see item D.) is called to resolve the checksum. + * checksum offload capability. If a device has limited checksum capabilities + * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as + * described above) a helper function can be called to resolve + * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper + * function takes a spec argument that describes the protocol layer that is + * supported for checksum offload and can be called for each packet. If a + * packet does not match the specification for offload, skb_checksum_help + * is called to resolve the checksum. * * CHECKSUM_NONE: * @@ -179,32 +177,30 @@ * * CHECKSUM_UNNECESSARY: * - * This has the same meaning as CHECKSUM_NONE for checksum offload on + * This has the same meaning on as CHECKSUM_NONE for checksum offload on * output. * * CHECKSUM_COMPLETE: * Not used in checksum output. If a driver observes a packet with this value - * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set. + * set in skbuff, if should treat as CHECKSUM_NONE being set. * * D. Non-IP checksum (CRC) offloads * * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of * offloading the SCTP CRC in a packet. To perform this offload the stack - * will set csum_start and csum_offset accordingly, set ip_summed to - * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in - * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c. - * A driver that supports both IP checksum offload and SCTP CRC32c offload - * must verify which offload is configured for a packet by testing the - * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve - * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1. + * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset + * accordingly. Note the there is no indication in the skbuff that the + * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports + * both IP checksum offload and SCTP CRC offload must verify which offload + * is configured for a packet presumably by inspecting packet headers. * * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of * offloading the FCOE CRC in a packet. To perform this offload the stack * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset - * accordingly. Note that there is no indication in the skbuff that the - * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports + * accordingly. Note the there is no indication in the skbuff that the + * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports * both IP checksum offload and FCOE CRC offload must verify which offload - * is configured for a packet, presumably by inspecting packet headers. + * is configured for a packet presumably by inspecting packet headers. * * E. Checksumming on output with GSO. * @@ -212,9 +208,9 @@ * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as * part of the GSO operation is implied. If a checksum is being offloaded - * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and - * csum_offset are set to refer to the outermost checksum being offloaded - * (two offloaded checksums are possible with UDP encapsulation). + * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset + * are set to refer to the outermost checksum being offload (two offloaded + * checksums are possible with UDP encapsulation). */ /* Don't change this without changing skb_csum_unnecessary! */ @@ -239,18 +235,21 @@ SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) -struct ahash_request; struct net_device; struct scatterlist; struct pipe_inode_info; struct iov_iter; struct napi_struct; -struct bpf_prog; -union bpf_attr; -struct skb_ext; + +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +struct nf_conntrack { + atomic_t use; +}; +#endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { + atomic_t use; enum { BRNF_PROTO_UNCHANGED, BRNF_PROTO_8021Q, @@ -278,18 +277,6 @@ struct nf_bridge_info { }; #endif -#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) -/* Chain in tc_skb_ext will be used to share the tc chain with - * ovs recirc_id. It will be set to the current chain by tc - * and read by ovs to recirc_id. - */ -struct tc_skb_ext { - __u32 chain; - __u16 mru; - bool post_ct; -}; -#endif - struct sk_buff_head { /* These two members must be first. */ struct sk_buff *next; @@ -320,87 +307,41 @@ extern int sysctl_max_skb_frags; */ #define GSO_BY_FRAGS 0xFFFF -typedef struct bio_vec skb_frag_t; +typedef struct skb_frag_struct skb_frag_t; + +struct skb_frag_struct { + struct { + struct page *p; + } page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; + __u32 size; +#else + __u16 page_offset; + __u16 size; +#endif +}; -/** - * skb_frag_size() - Returns the size of a skb fragment - * @frag: skb fragment - */ static inline unsigned int skb_frag_size(const skb_frag_t *frag) { - return frag->bv_len; + return frag->size; } -/** - * skb_frag_size_set() - Sets the size of a skb fragment - * @frag: skb fragment - * @size: size of fragment - */ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) { - frag->bv_len = size; + frag->size = size; } -/** - * skb_frag_size_add() - Increments the size of a skb fragment by @delta - * @frag: skb fragment - * @delta: value to add - */ static inline void skb_frag_size_add(skb_frag_t *frag, int delta) { - frag->bv_len += delta; + frag->size += delta; } -/** - * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta - * @frag: skb fragment - * @delta: value to subtract - */ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) { - frag->bv_len -= delta; + frag->size -= delta; } -/** - * skb_frag_must_loop - Test if %p is a high memory page - * @p: fragment's page - */ -static inline bool skb_frag_must_loop(struct page *p) -{ -#if defined(CONFIG_HIGHMEM) - if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p)) - return true; -#endif - return false; -} - -/** - * skb_frag_foreach_page - loop over pages in a fragment - * - * @f: skb frag to operate on - * @f_off: offset from start of f->bv_page - * @f_len: length from f_off to loop over - * @p: (temp var) current page - * @p_off: (temp var) offset from start of current page, - * non-zero only on first page. - * @p_len: (temp var) length in current page, - * < PAGE_SIZE only on first and last page. - * @copied: (temp var) length so far, excluding current p_len. - * - * A fragment can hold a compound page, in which case per-page - * operations, notably kmap_atomic, must be called for each - * regular page. - */ -#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \ - for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \ - p_off = (f_off) & (PAGE_SIZE - 1), \ - p_len = skb_frag_must_loop(p) ? \ - min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \ - copied = 0; \ - copied < f_len; \ - copied += p_len, p++, p_off = 0, \ - p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \ - #define HAVE_HW_TIME_STAMP /** @@ -432,9 +373,19 @@ enum { /* device driver is going to provide hardware time stamp */ SKBTX_IN_PROGRESS = 1 << 2, + /* device driver supports TX zero-copy buffers */ + SKBTX_DEV_ZEROCOPY = 1 << 3, + /* generate wifi status information (where possible) */ SKBTX_WIFI_STATUS = 1 << 4, + /* This indicates at least one fragment might be overwritten + * (as in vmsplice(), sendfile() ...) + * If we need to compute a TX checksum, we'll need to copy + * all frags to avoid possible bad checksum + */ + SKBTX_SHARED_FRAG = 1 << 5, + /* generate software time stamp when entering packet scheduling */ SKBTX_SCHED_TSTAMP = 1 << 6, }; @@ -443,21 +394,6 @@ enum { SKBTX_SCHED_TSTAMP) #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP) -/* Definitions for flags in struct skb_shared_info */ -enum { - /* use zcopy routines */ - SKBFL_ZEROCOPY_ENABLE = BIT(0), - - /* This indicates at least one fragment might be overwritten - * (as in vmsplice(), sendfile() ...) - * If we need to compute a TX checksum, we'll need to copy - * all frags to avoid possible bad checksum - */ - SKBFL_SHARED_FRAG = BIT(1), -}; - -#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG) - /* * The callback notifies userspace to release buffers when skb DMA is done in * lower device, the skb last reference should be 0 when calling this. @@ -467,63 +403,25 @@ enum { * The desc field is used to track userspace buffer index. */ struct ubuf_info { - void (*callback)(struct sk_buff *, struct ubuf_info *, - bool zerocopy_success); - union { - struct { - unsigned long desc; - void *ctx; - }; - struct { - u32 id; - u16 len; - u16 zerocopy:1; - u32 bytelen; - }; - }; - refcount_t refcnt; - u8 flags; - - struct mmpin { - struct user_struct *user; - unsigned int num_pg; - } mmp; + void (*callback)(struct ubuf_info *, bool zerocopy_success); + void *ctx; + unsigned long desc; }; -#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) - -int mm_account_pinned_pages(struct mmpin *mmp, size_t size); -void mm_unaccount_pinned_pages(struct mmpin *mmp); - -struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size); -struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, - struct ubuf_info *uarg); - -void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); - -void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, - bool success); - -int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len); -int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, - struct msghdr *msg, int len, - struct ubuf_info *uarg); - /* This data is invariant across clones and lives at * the end of the header data, ie. at skb->end. */ struct skb_shared_info { - __u8 flags; - __u8 meta_len; - __u8 nr_frags; + unsigned char nr_frags; __u8 tx_flags; unsigned short gso_size; /* Warning: this field is not always filled in (UFO)! */ unsigned short gso_segs; + unsigned short gso_type; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; - unsigned int gso_type; u32 tskey; + __be32 ip6_frag_id; /* * Warning : all fields before dataref are cleared in __alloc_skb() @@ -561,44 +459,37 @@ enum { enum { SKB_GSO_TCPV4 = 1 << 0, + SKB_GSO_UDP = 1 << 1, /* This indicates the skb is from an untrusted source. */ - SKB_GSO_DODGY = 1 << 1, + SKB_GSO_DODGY = 1 << 2, /* This indicates the tcp segment has CWR set. */ - SKB_GSO_TCP_ECN = 1 << 2, + SKB_GSO_TCP_ECN = 1 << 3, - SKB_GSO_TCP_FIXEDID = 1 << 3, + SKB_GSO_TCP_FIXEDID = 1 << 4, - SKB_GSO_TCPV6 = 1 << 4, + SKB_GSO_TCPV6 = 1 << 5, - SKB_GSO_FCOE = 1 << 5, + SKB_GSO_FCOE = 1 << 6, - SKB_GSO_GRE = 1 << 6, + SKB_GSO_GRE = 1 << 7, - SKB_GSO_GRE_CSUM = 1 << 7, + SKB_GSO_GRE_CSUM = 1 << 8, - SKB_GSO_IPXIP4 = 1 << 8, + SKB_GSO_IPXIP4 = 1 << 9, - SKB_GSO_IPXIP6 = 1 << 9, + SKB_GSO_IPXIP6 = 1 << 10, - SKB_GSO_UDP_TUNNEL = 1 << 10, + SKB_GSO_UDP_TUNNEL = 1 << 11, - SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, + SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12, - SKB_GSO_PARTIAL = 1 << 12, + SKB_GSO_PARTIAL = 1 << 13, - SKB_GSO_TUNNEL_REMCSUM = 1 << 13, + SKB_GSO_TUNNEL_REMCSUM = 1 << 14, - SKB_GSO_SCTP = 1 << 14, - - SKB_GSO_ESP = 1 << 15, - - SKB_GSO_UDP = 1 << 16, - - SKB_GSO_UDP_L4 = 1 << 17, - - SKB_GSO_FRAGLIST = 1 << 18, + SKB_GSO_SCTP = 1 << 15, }; #if BITS_PER_LONG > 32 @@ -612,19 +503,73 @@ typedef unsigned char *sk_buff_data_t; #endif /** + * struct skb_mstamp - multi resolution time stamps + * @stamp_us: timestamp in us resolution + * @stamp_jiffies: timestamp in jiffies + */ +struct skb_mstamp { + union { + u64 v64; + struct { + u32 stamp_us; + u32 stamp_jiffies; + }; + }; +}; + +/** + * skb_mstamp_get - get current timestamp + * @cl: place to store timestamps + */ +static inline void skb_mstamp_get(struct skb_mstamp *cl) +{ + u64 val = local_clock(); + + do_div(val, NSEC_PER_USEC); + cl->stamp_us = (u32)val; + cl->stamp_jiffies = (u32)jiffies; +} + +/** + * skb_mstamp_delta - compute the difference in usec between two skb_mstamp + * @t1: pointer to newest sample + * @t0: pointer to oldest sample + */ +static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, + const struct skb_mstamp *t0) +{ + s32 delta_us = t1->stamp_us - t0->stamp_us; + u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies; + + /* If delta_us is negative, this might be because interval is too big, + * or local_clock() drift is too big : fallback using jiffies. + */ + if (delta_us <= 0 || + delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ))) + + delta_us = jiffies_to_usecs(delta_jiffies); + + return delta_us; +} + +static inline bool skb_mstamp_after(const struct skb_mstamp *t1, + const struct skb_mstamp *t0) +{ + s32 diff = t1->stamp_jiffies - t0->stamp_jiffies; + + if (!diff) + diff = t1->stamp_us - t0->stamp_us; + return diff > 0; +} + +/** * struct sk_buff - socket buffer * @next: Next buffer in list * @prev: Previous buffer in list * @tstamp: Time we arrived/left - * @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point - * for retransmit timer * @rbnode: RB tree node, alternative to next/prev for netem/tcp - * @list: queue head * @sk: Socket we are owned by - * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in - * fragmentation management * @dev: Device we arrived on/are leaving by - * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL * @cb: Control buffer. Free for use by every layer. Put private vars here * @_skb_refdst: destination entry (with norefcount bit) * @sp: the security path, used for xfrm @@ -640,37 +585,23 @@ typedef unsigned char *sk_buff_data_t; * @cloned: Head may be cloned (check refcnt to be sure) * @ip_summed: Driver fed us an IP checksum * @nohdr: Payload reference only, must not modify header + * @nfctinfo: Relationship of this skb to the connection * @pkt_type: Packet class * @fclone: skbuff clone status * @ipvs_property: skbuff is owned by ipvs - * @inner_protocol_type: whether the inner protocol is - * ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO - * @remcsum_offload: remote checksum offload is enabled - * @offload_fwd_mark: Packet was L2-forwarded in hardware - * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware - * @tc_skip_classify: do not classify packet. set by IFB device - * @tc_at_ingress: used within tc_classify to distinguish in/egress - * @redirected: packet was redirected by packet classifier - * @from_ingress: packet was redirected from the ingress path * @peeked: this packet has been seen already, so stats have been * done for it, don't do them again * @nf_trace: netfilter packet trace flag * @protocol: Packet protocol from driver * @destructor: Destruct function - * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue) - * @_sk_redir: socket redirection information for skmsg - * @_nfct: Associated connection, if any (with nfctinfo bits) + * @nfct: Associated connection, if any * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @skb_iif: ifindex of device we arrived on * @tc_index: Traffic control index + * @tc_verd: traffic control verdict * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices - * @head_frag: skb was allocated from page fragments, - * not allocated by kmalloc() or vmalloc(). - * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves - * @pp_recycle: mark the packet for recycling instead of freeing (implies - * page_pool support on driver) - * @active_extensions: active extensions (skb_ext_id types) + * @xmit_more: More SKBs are pending for this queue * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@ -679,43 +610,24 @@ typedef unsigned char *sk_buff_data_t; * @wifi_acked_valid: wifi_acked was set * @wifi_acked: whether frame was acked on wifi or not * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS - * @encapsulation: indicates the inner headers in the skbuff are valid - * @encap_hdr_csum: software checksum is needed - * @csum_valid: checksum is already valid - * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL - * @csum_complete_sw: checksum was completed by software - * @csum_level: indicates the number of consecutive checksums found in - * the packet minus one that have been verified as - * CHECKSUM_UNNECESSARY (max 3) - * @dst_pending_confirm: need to confirm neighbour - * @decrypted: Decrypted SKB - * @slow_gro: state present at GRO time, slower prepare step required - * @napi_id: id of the NAPI struct this skb came from - * @sender_cpu: (aka @napi_id) source CPU in XPS + * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark - * @reserved_tailroom: (aka @mark) number of bytes of free space available - * at the tail of an sk_buff - * @vlan_present: VLAN tag is present * @vlan_proto: vlan encapsulation protocol * @vlan_tci: vlan tag control information * @inner_protocol: Protocol (encapsulation) - * @inner_ipproto: (aka @inner_protocol) stores ipproto when - * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO; * @inner_transport_header: Inner transport layer header (encapsulation) * @inner_network_header: Network layer header (encapsulation) * @inner_mac_header: Link layer header (encapsulation) * @transport_header: Transport layer header * @network_header: Network layer header * @mac_header: Link layer header - * @kcov_handle: KCOV remote handle for remote coverage collection * @tail: Tail pointer * @end: End pointer * @head: Head of buffer * @data: Data head pointer * @truesize: Buffer size * @users: User count - see {datagram,tcp}.c - * @extensions: allocated extensions, valid if active_extensions is nonzero */ struct sk_buff { @@ -726,27 +638,15 @@ struct sk_buff { struct sk_buff *prev; union { - struct net_device *dev; - /* Some protocols might use this space to store information, - * while device pointer would be NULL. - * UDP receive path is one user. - */ - unsigned long dev_scratch; + ktime_t tstamp; + struct skb_mstamp skb_mstamp; }; }; - struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */ - struct list_head list; + struct rb_node rbnode; /* used in netem & tcp stack */ }; + struct sock *sk; + struct net_device *dev; - union { - struct sock *sk; - int ip_defrag_offset; - }; - - union { - ktime_t tstamp; - u64 skb_mstamp_ns; /* earliest departure time */ - }; /* * This is the control buffer. It is free to use for every * layer. Please put your private variables there. If you @@ -755,19 +655,16 @@ struct sk_buff { */ char cb[48] __aligned(8); - union { - struct { - unsigned long _skb_refdst; - void (*destructor)(struct sk_buff *skb); - }; - struct list_head tcp_tsorted_anchor; -#ifdef CONFIG_NET_SOCK_MSG - unsigned long _sk_redir; + unsigned long _skb_refdst; + void (*destructor)(struct sk_buff *skb); +#ifdef CONFIG_XFRM + struct sec_path *sp; #endif - }; - #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - unsigned long _nfct; + struct nf_conntrack *nfct; +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + struct nf_bridge_info *nf_bridge; #endif unsigned int len, data_len; @@ -777,6 +674,7 @@ struct sk_buff { /* Following fields are _not_ copied in __copy_skb_header() * Note that queue_mapping is here mostly to fill a hole. */ + kmemcheck_bitfield_begin(flags1); __u16 queue_mapping; /* if you move cloned around you also must adapt those constants */ @@ -787,19 +685,15 @@ struct sk_buff { #endif #define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset) - /* private: */ __u8 __cloned_offset[0]; - /* public: */ __u8 cloned:1, nohdr:1, fclone:2, peeked:1, head_frag:1, - pfmemalloc:1, - pp_recycle:1; /* page_pool recycle indicator */ -#ifdef CONFIG_SKB_EXTENSIONS - __u8 active_extensions; -#endif + xmit_more:1, + __unused:1; /* one bit hole */ + kmemcheck_bitfield_end(flags1); /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() @@ -816,65 +710,45 @@ struct sk_buff { #endif #define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset) - /* private: */ __u8 __pkt_type_offset[0]; - /* public: */ __u8 pkt_type:3; + __u8 pfmemalloc:1; __u8 ignore_df:1; + __u8 nfctinfo:3; + __u8 nf_trace:1; __u8 ip_summed:2; __u8 ooo_okay:1; - __u8 l4_hash:1; __u8 sw_hash:1; __u8 wifi_acked_valid:1; __u8 wifi_acked:1; + __u8 no_fcs:1; /* Indicates the inner headers are valid in the skbuff. */ __u8 encapsulation:1; __u8 encap_hdr_csum:1; __u8 csum_valid:1; - -#ifdef __BIG_ENDIAN_BITFIELD -#define PKT_VLAN_PRESENT_BIT 7 -#else -#define PKT_VLAN_PRESENT_BIT 0 -#endif -#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset) - /* private: */ - __u8 __pkt_vlan_present_offset[0]; - /* public: */ - __u8 vlan_present:1; __u8 csum_complete_sw:1; __u8 csum_level:2; - __u8 csum_not_inet:1; - __u8 dst_pending_confirm:1; + __u8 csum_bad:1; + #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif - __u8 ipvs_property:1; __u8 inner_protocol_type:1; __u8 remcsum_offload:1; #ifdef CONFIG_NET_SWITCHDEV __u8 offload_fwd_mark:1; - __u8 offload_l3_fwd_mark:1; #endif -#ifdef CONFIG_NET_CLS_ACT - __u8 tc_skip_classify:1; - __u8 tc_at_ingress:1; -#endif - __u8 redirected:1; -#ifdef CONFIG_NET_REDIRECT - __u8 from_ingress:1; -#endif -#ifdef CONFIG_TLS_DEVICE - __u8 decrypted:1; -#endif - __u8 slow_gro:1; + /* 2, 4 or 5 bit hole */ #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ +#ifdef CONFIG_NET_CLS_ACT + __u16 tc_verd; /* traffic control verdict */ +#endif #endif union { @@ -918,10 +792,6 @@ struct sk_buff { __u16 network_header; __u16 mac_header; -#ifdef CONFIG_KCOV - u64 kcov_handle; -#endif - /* private: */ __u32 headers_end[0]; /* public: */ @@ -932,27 +802,21 @@ struct sk_buff { unsigned char *head, *data; unsigned int truesize; - refcount_t users; - -#ifdef CONFIG_SKB_EXTENSIONS - /* only useable after checking ->active_extensions != 0 */ - struct skb_ext *extensions; -#endif + atomic_t users; }; #ifdef __KERNEL__ /* * Handling routines are only of interest to the kernel */ +#include + #define SKB_ALLOC_FCLONE 0x01 #define SKB_ALLOC_RX 0x02 #define SKB_ALLOC_NAPI 0x04 -/** - * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves - * @skb: buffer - */ +/* Returns true if the skb was allocated from PFMEMALLOC reserves */ static inline bool skb_pfmemalloc(const struct sk_buff *skb) { return unlikely(skb->pfmemalloc); @@ -973,7 +837,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb) */ static inline struct dst_entry *skb_dst(const struct sk_buff *skb) { - /* If refdst was not refcounted, check we still are in a + /* If refdst was not refcounted, check we still are in a * rcu_read_lock section */ WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && @@ -992,7 +856,6 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb) */ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) { - skb->slow_gro |= !!dst; skb->_skb_refdst = (unsigned long)dst; } @@ -1009,7 +872,6 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) { WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); - skb->slow_gro |= !!dst; skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; } @@ -1022,10 +884,6 @@ static inline bool skb_dst_is_noref(const struct sk_buff *skb) return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); } -/** - * skb_rtable - Returns the skb &rtable - * @skb: buffer - */ static inline struct rtable *skb_rtable(const struct sk_buff *skb) { return (struct rtable *)skb_dst(skb); @@ -1040,53 +898,10 @@ static inline bool skb_pkt_type_ok(u32 ptype) return ptype <= PACKET_OTHERHOST; } -/** - * skb_napi_id - Returns the skb's NAPI id - * @skb: buffer - */ -static inline unsigned int skb_napi_id(const struct sk_buff *skb) -{ -#ifdef CONFIG_NET_RX_BUSY_POLL - return skb->napi_id; -#else - return 0; -#endif -} - -/** - * skb_unref - decrement the skb's reference count - * @skb: buffer - * - * Returns true if we can free the skb. - */ -static inline bool skb_unref(struct sk_buff *skb) -{ - if (unlikely(!skb)) - return false; - if (likely(refcount_read(&skb->users) == 1)) - smp_rmb(); - else if (likely(!refcount_dec_and_test(&skb->users))) - return false; - - return true; -} - -void skb_release_head_state(struct sk_buff *skb); void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); -void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_tx_error(struct sk_buff *skb); - -#ifdef CONFIG_TRACEPOINTS void consume_skb(struct sk_buff *skb); -#else -static inline void consume_skb(struct sk_buff *skb) -{ - return kfree_skb(skb); -} -#endif - -void __consume_stateless_skb(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb); extern struct kmem_cache *skbuff_head_cache; @@ -1098,19 +913,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, int node); struct sk_buff *__build_skb(void *data, unsigned int frag_size); struct sk_buff *build_skb(void *data, unsigned int frag_size); -struct sk_buff *build_skb_around(struct sk_buff *skb, - void *data, unsigned int frag_size); - -struct sk_buff *napi_build_skb(void *data, unsigned int frag_size); - -/** - * alloc_skb - allocate a network buffer - * @size: size to allocate - * @priority: allocation mask - * - * This function is a convenient wrapper around __alloc_skb(). - */ -static inline struct sk_buff *alloc_skb(unsigned int size, +static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size, gfp_t priority) { return __alloc_skb(size, priority, 0, NUMA_NO_NODE); @@ -1121,7 +924,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, int max_page_order, int *errcode, gfp_t gfp_mask); -struct sk_buff *alloc_skb_for_msg(struct sk_buff *first); /* Layout of fast clones : [skb1][skb2][fclone_ref] */ struct sk_buff_fclones { @@ -1129,7 +931,7 @@ struct sk_buff_fclones { struct sk_buff skb2; - refcount_t fclone_ref; + atomic_t fclone_ref; }; /** @@ -1149,28 +951,25 @@ static inline bool skb_fclone_busy(const struct sock *sk, fclones = container_of(skb, struct sk_buff_fclones, skb1); return skb->fclone == SKB_FCLONE_ORIG && - refcount_read(&fclones->fclone_ref) > 1 && - READ_ONCE(fclones->skb2.sk) == sk; + atomic_read(&fclones->fclone_ref) > 1 && + fclones->skb2.sk == sk; } -/** - * alloc_skb_fclone - allocate a network buffer from fclone cache - * @size: size to allocate - * @priority: allocation mask - * - * This function is a convenient wrapper around __alloc_skb(). - */ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, gfp_t priority) { return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); } +struct sk_buff *__alloc_skb_head(gfp_t priority, int node); +static inline struct sk_buff *alloc_skb_head(gfp_t priority) +{ + return __alloc_skb_head(priority, -1); +} + struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); -void skb_headers_offset_update(struct sk_buff *skb, int off); int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); -void skb_copy_header(struct sk_buff *new, const struct sk_buff *old); struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, gfp_t gfp_mask, bool fclone); @@ -1183,33 +982,21 @@ static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); -struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom); struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority); -int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, - int offset, int len); -int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, - int offset, int len); +int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); +int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, + int len); int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); -int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error); - -/** - * skb_pad - zero pad the tail of an skb - * @skb: buffer to pad - * @pad: space to pad - * - * Ensure that a buffer is followed by a padding area that is zero - * filled. Used by network drivers which may DMA or transfer data - * beyond the buffer end onto the wire. - * - * May return error in out of memory cases. The skb is freed on error. - */ -static inline int skb_pad(struct sk_buff *skb, int pad) -{ - return __skb_pad(skb, pad, true); -} +int skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) consume_skb(a) +int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, + int getfrag(void *from, char *to, int offset, + int len, int odd, struct sk_buff *skb), + void *from, int length); + int skb_append_pagefrags(struct sk_buff *skb, struct page *page, int offset, size_t size); @@ -1221,7 +1008,6 @@ struct skb_seq_state { struct sk_buff *root_skb; struct sk_buff *cur_skb; __u8 *frag_data; - __u32 frag_off; }; void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, @@ -1301,12 +1087,12 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) } void __skb_get_hash(struct sk_buff *skb); -u32 __skb_get_hash_symmetric(const struct sk_buff *skb); +u32 __skb_get_hash_symmetric(struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); -u32 __skb_get_poff(const struct sk_buff *skb, const void *data, - const struct flow_keys_basic *keys, int hlen); +u32 __skb_get_poff(const struct sk_buff *skb, void *data, + const struct flow_keys *keys, int hlen); __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, - const void *data, int hlen_proto); + void *data, int hlen_proto); static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) @@ -1318,22 +1104,18 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, const struct flow_dissector_key *key, unsigned int key_count); -struct bpf_flow_dissector; -bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, - __be16 proto, int nhoff, int hlen, unsigned int flags); - -bool __skb_flow_dissect(const struct net *net, - const struct sk_buff *skb, +bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector *flow_dissector, - void *target_container, const void *data, - __be16 proto, int nhoff, int hlen, unsigned int flags); + void *target_container, + void *data, __be16 proto, int nhoff, int hlen, + unsigned int flags); static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, unsigned int flags) { - return __skb_flow_dissect(NULL, skb, flow_dissector, - target_container, NULL, 0, 0, 0, flags); + return __skb_flow_dissect(skb, flow_dissector, target_container, + NULL, 0, 0, 0, flags); } static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, @@ -1341,45 +1123,20 @@ static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, unsigned int flags) { memset(flow, 0, sizeof(*flow)); - return __skb_flow_dissect(NULL, skb, &flow_keys_dissector, - flow, NULL, 0, 0, 0, flags); + return __skb_flow_dissect(skb, &flow_keys_dissector, flow, + NULL, 0, 0, 0, flags); } -static inline bool -skb_flow_dissect_flow_keys_basic(const struct net *net, - const struct sk_buff *skb, - struct flow_keys_basic *flow, - const void *data, __be16 proto, - int nhoff, int hlen, unsigned int flags) +static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow, + void *data, __be16 proto, + int nhoff, int hlen, + unsigned int flags) { memset(flow, 0, sizeof(*flow)); - return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow, + return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow, data, proto, nhoff, hlen, flags); } -void skb_flow_dissect_meta(const struct sk_buff *skb, - struct flow_dissector *flow_dissector, - void *target_container); - -/* Gets a skb connection tracking info, ctinfo map should be a - * map of mapsize to translate enum ip_conntrack_info states - * to user states. - */ -void -skb_flow_dissect_ct(const struct sk_buff *skb, - struct flow_dissector *flow_dissector, - void *target_container, - u16 *ctinfo_map, size_t mapsize, - bool post_ct); -void -skb_flow_dissect_tunnel_info(const struct sk_buff *skb, - struct flow_dissector *flow_dissector, - void *target_container); - -void skb_flow_dissect_hash(const struct sk_buff *skb, - struct flow_dissector *flow_dissector, - void *target_container); - static inline __u32 skb_get_hash(struct sk_buff *skb) { if (!skb->l4_hash && !skb->sw_hash) @@ -1388,6 +1145,8 @@ static inline __u32 skb_get_hash(struct sk_buff *skb) return skb->hash; } +__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6); + static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6) { if (!skb->l4_hash && !skb->sw_hash) { @@ -1400,8 +1159,21 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 return skb->hash; } -__u32 skb_get_hash_perturb(const struct sk_buff *skb, - const siphash_key_t *perturb); +__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl); + +static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4) +{ + if (!skb->l4_hash && !skb->sw_hash) { + struct flow_keys keys; + __u32 hash = __get_hash_from_flowi4(fl4, &keys); + + __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); + } + + return skb->hash; +} + +__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) { @@ -1415,14 +1187,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) to->l4_hash = from->l4_hash; }; -static inline void skb_copy_decrypted(struct sk_buff *to, - const struct sk_buff *from) -{ -#ifdef CONFIG_TLS_DEVICE - to->decrypted = from->decrypted; -#endif -} - #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { @@ -1453,97 +1217,6 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) return &skb_shinfo(skb)->hwtstamps; } -static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) -{ - bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE; - - return is_zcopy ? skb_uarg(skb) : NULL; -} - -static inline void net_zcopy_get(struct ubuf_info *uarg) -{ - refcount_inc(&uarg->refcnt); -} - -static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg) -{ - skb_shinfo(skb)->destructor_arg = uarg; - skb_shinfo(skb)->flags |= uarg->flags; -} - -static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, - bool *have_ref) -{ - if (skb && uarg && !skb_zcopy(skb)) { - if (unlikely(have_ref && *have_ref)) - *have_ref = false; - else - net_zcopy_get(uarg); - skb_zcopy_init(skb, uarg); - } -} - -static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) -{ - skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); - skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG; -} - -static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) -{ - return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; -} - -static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) -{ - return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); -} - -static inline void net_zcopy_put(struct ubuf_info *uarg) -{ - if (uarg) - uarg->callback(NULL, uarg, true); -} - -static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref) -{ - if (uarg) { - if (uarg->callback == msg_zerocopy_callback) - msg_zerocopy_put_abort(uarg, have_uref); - else if (have_uref) - net_zcopy_put(uarg); - } -} - -/* Release a reference on a zerocopy structure */ -static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) -{ - struct ubuf_info *uarg = skb_zcopy(skb); - - if (uarg) { - if (!skb_zcopy_is_nouarg(skb)) - uarg->callback(skb, uarg, zerocopy_success); - - skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG; - } -} - -static inline void skb_mark_not_on_list(struct sk_buff *skb) -{ - skb->next = NULL; -} - -/* Iterate through singly-linked GSO fragments of an skb. */ -#define skb_list_walk_safe(first, skb, next_skb) \ - for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \ - (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL) - -static inline void skb_list_del_init(struct sk_buff *skb) -{ - __list_del_entry(&skb->list); - skb_mark_not_on_list(skb); -} - /** * skb_queue_empty - check if a queue is empty * @list: queue head @@ -1555,19 +1228,6 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) return list->next == (const struct sk_buff *) list; } -/** - * skb_queue_empty_lockless - check if a queue is empty - * @list: queue head - * - * Returns true if the queue is empty, false otherwise. - * This variant can be used in lockless contexts. - */ -static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list) -{ - return READ_ONCE(list->next) == (const struct sk_buff *) list; -} - - /** * skb_queue_is_last - check if skb is the last entry in the queue * @list: queue head @@ -1639,12 +1299,13 @@ static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, */ static inline struct sk_buff *skb_get(struct sk_buff *skb) { - refcount_inc(&skb->users); + atomic_inc(&skb->users); return skb; } /* - * If users == 1, we are the only owner and can avoid redundant atomic changes. + * If users == 1, we are the only owner and are can avoid redundant + * atomic change. */ /** @@ -1700,9 +1361,28 @@ static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri) return 0; } +/** + * skb_header_release - release reference to header + * @skb: buffer to operate on + * + * Drop a reference to the header part of the buffer. This is done + * by acquiring a payload reference. You must not read from the header + * part of skb->data after this. + * Note : Check if you can use __skb_header_release() instead. + */ +static inline void skb_header_release(struct sk_buff *skb) +{ + BUG_ON(skb->nohdr); + skb->nohdr = 1; + atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); +} + /** * __skb_header_release - release reference to header * @skb: buffer to operate on + * + * Variant of skb_header_release() assuming skb is private to caller. + * We can avoid one atomic operation. */ static inline void __skb_header_release(struct sk_buff *skb) { @@ -1720,7 +1400,7 @@ static inline void __skb_header_release(struct sk_buff *skb) */ static inline int skb_shared(const struct sk_buff *skb) { - return refcount_read(&skb->users) != 1; + return atomic_read(&skb->users) != 1; } /** @@ -1810,17 +1490,6 @@ static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) return skb; } -/** - * __skb_peek - peek at the head of a non-empty &sk_buff_head - * @list_: list to peek at - * - * Like skb_peek(), but the caller knows that the list is not empty. - */ -static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_) -{ - return list_->next; -} - /** * skb_peek_next - peek skb following the given one from a queue * @skb: skb to start from @@ -1855,7 +1524,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, */ static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) { - struct sk_buff *skb = READ_ONCE(list_->prev); + struct sk_buff *skb = list_->prev; if (skb == (struct sk_buff *)list_) skb = NULL; @@ -1874,18 +1543,6 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } -/** - * skb_queue_len_lockless - get queue length - * @list_: list to measure - * - * Return the length of an &sk_buff queue. - * This variant can be used in lockless contexts. - */ -static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) -{ - return READ_ONCE(list_->qlen); -} - /** * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head * @list: queue to initialize @@ -1929,18 +1586,16 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list, * The "__skb_xxxx()" functions are the non-atomic ones that * can only be called with interrupts disabled. */ +void skb_insert(struct sk_buff *old, struct sk_buff *newsk, + struct sk_buff_head *list); static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list) { - /* See skb_queue_empty_lockless() and skb_peek_tail() - * for the opposite READ_ONCE() - */ - WRITE_ONCE(newsk->next, next); - WRITE_ONCE(newsk->prev, prev); - WRITE_ONCE(next->prev, newsk); - WRITE_ONCE(prev->next, newsk); - WRITE_ONCE(list->qlen, list->qlen + 1); + newsk->next = next; + newsk->prev = prev; + next->prev = prev->next = newsk; + list->qlen++; } static inline void __skb_queue_splice(const struct sk_buff_head *list, @@ -1950,11 +1605,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list, struct sk_buff *first = list->next; struct sk_buff *last = list->prev; - WRITE_ONCE(first->prev, prev); - WRITE_ONCE(prev->next, first); + first->prev = prev; + prev->next = first; - WRITE_ONCE(last->next, next); - WRITE_ONCE(next->prev, last); + last->next = next; + next->prev = last; } /** @@ -2058,12 +1713,12 @@ static inline void __skb_queue_before(struct sk_buff_head *list, * * A buffer cannot be placed on two lists at the same time. */ +void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) { __skb_queue_after(list, (struct sk_buff *)list, newsk); } -void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); /** * __skb_queue_tail - queue a buffer at the list tail @@ -2075,12 +1730,12 @@ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); * * A buffer cannot be placed on two lists at the same time. */ +void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) { __skb_queue_before(list, (struct sk_buff *)list, newsk); } -void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); /* * remove sk_buff from list. _Must_ be called atomically, and with @@ -2091,12 +1746,12 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next, *prev; - WRITE_ONCE(list->qlen, list->qlen - 1); + list->qlen--; next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; - WRITE_ONCE(next->prev, prev); - WRITE_ONCE(prev->next, next); + next->prev = prev; + prev->next = next; } /** @@ -2107,6 +1762,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) * so must be used with appropriate locks held only. The head item is * returned or %NULL if the list is empty. */ +struct sk_buff *skb_dequeue(struct sk_buff_head *list); static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) { struct sk_buff *skb = skb_peek(list); @@ -2114,7 +1770,6 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) __skb_unlink(skb, list); return skb; } -struct sk_buff *skb_dequeue(struct sk_buff_head *list); /** * __skb_dequeue_tail - remove from the tail of the queue @@ -2124,6 +1779,7 @@ struct sk_buff *skb_dequeue(struct sk_buff_head *list); * so must be used with appropriate locks held only. The tail item is * returned or %NULL if the list is empty. */ +struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) { struct sk_buff *skb = skb_peek_tail(list); @@ -2131,7 +1787,6 @@ static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) __skb_unlink(skb, list); return skb; } -struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); static inline bool skb_is_nonlinear(const struct sk_buff *skb) @@ -2144,18 +1799,13 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb) return skb->len - skb->data_len; } -static inline unsigned int __skb_pagelen(const struct sk_buff *skb) +static inline int skb_pagelen(const struct sk_buff *skb) { - unsigned int i, len = 0; + int i, len = 0; - for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) + for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) len += skb_frag_size(&skb_shinfo(skb)->frags[i]); - return len; -} - -static inline unsigned int skb_pagelen(const struct sk_buff *skb) -{ - return skb_headlen(skb) + __skb_pagelen(skb); + return len + skb_headlen(skb); } /** @@ -2181,8 +1831,8 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, * that not all callers have unique ownership of the page but rely * on page_is_pfmemalloc doing the right thing(tm). */ - frag->bv_page = page; - frag->bv_offset = off; + frag->page.p = page; + frag->page_offset = off; skb_frag_size_set(frag, size); page = compound_head(page); @@ -2217,6 +1867,8 @@ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, unsigned int truesize); +#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) +#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) #ifdef NET_SKBUFF_DATA_USES_OFFSET @@ -2257,87 +1909,41 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) /* * Add data to an sk_buff */ -void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); -void *skb_put(struct sk_buff *skb, unsigned int len); -static inline void *__skb_put(struct sk_buff *skb, unsigned int len) +unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); +unsigned char *skb_put(struct sk_buff *skb, unsigned int len); +static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) { - void *tmp = skb_tail_pointer(skb); + unsigned char *tmp = skb_tail_pointer(skb); SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; return tmp; } -static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len) -{ - void *tmp = __skb_put(skb, len); - - memset(tmp, 0, len); - return tmp; -} - -static inline void *__skb_put_data(struct sk_buff *skb, const void *data, - unsigned int len) -{ - void *tmp = __skb_put(skb, len); - - memcpy(tmp, data, len); - return tmp; -} - -static inline void __skb_put_u8(struct sk_buff *skb, u8 val) -{ - *(u8 *)__skb_put(skb, 1) = val; -} - -static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len) -{ - void *tmp = skb_put(skb, len); - - memset(tmp, 0, len); - - return tmp; -} - -static inline void *skb_put_data(struct sk_buff *skb, const void *data, - unsigned int len) -{ - void *tmp = skb_put(skb, len); - - memcpy(tmp, data, len); - - return tmp; -} - -static inline void skb_put_u8(struct sk_buff *skb, u8 val) -{ - *(u8 *)skb_put(skb, 1) = val; -} - -void *skb_push(struct sk_buff *skb, unsigned int len); -static inline void *__skb_push(struct sk_buff *skb, unsigned int len) +unsigned char *skb_push(struct sk_buff *skb, unsigned int len); +static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) { skb->data -= len; skb->len += len; return skb->data; } -void *skb_pull(struct sk_buff *skb, unsigned int len); -static inline void *__skb_pull(struct sk_buff *skb, unsigned int len) +unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); +static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) { skb->len -= len; BUG_ON(skb->len < skb->data_len); return skb->data += len; } -static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len) +static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) { return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); } -void *__pskb_pull_tail(struct sk_buff *skb, int delta); +unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); -static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len) +static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) { if (len > skb_headlen(skb) && !__pskb_pull_tail(skb, len - skb_headlen(skb))) @@ -2346,22 +1952,20 @@ static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len) return skb->data += len; } -static inline void *pskb_pull(struct sk_buff *skb, unsigned int len) +static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) { return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); } -static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len) +static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) { if (likely(len <= skb_headlen(skb))) - return true; + return 1; if (unlikely(len > skb->len)) - return false; + return 0; return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; } -void skb_condense(struct sk_buff *skb); - /** * skb_headroom - bytes at buffer head * @skb: buffer to check @@ -2565,26 +2169,11 @@ static inline unsigned char *skb_mac_header(const struct sk_buff *skb) return skb->head + skb->mac_header; } -static inline int skb_mac_offset(const struct sk_buff *skb) -{ - return skb_mac_header(skb) - skb->data; -} - -static inline u32 skb_mac_header_len(const struct sk_buff *skb) -{ - return skb->network_header - skb->mac_header; -} - static inline int skb_mac_header_was_set(const struct sk_buff *skb) { return skb->mac_header != (typeof(skb->mac_header))~0U; } -static inline void skb_unset_mac_header(struct sk_buff *skb) -{ - skb->mac_header = (typeof(skb->mac_header))~0U; -} - static inline void skb_reset_mac_header(struct sk_buff *skb) { skb->mac_header = skb->data - skb->head; @@ -2601,16 +2190,17 @@ static inline void skb_pop_mac_header(struct sk_buff *skb) skb->mac_header = skb->network_header; } -static inline void skb_probe_transport_header(struct sk_buff *skb) +static inline void skb_probe_transport_header(struct sk_buff *skb, + const int offset_hint) { - struct flow_keys_basic keys; + struct flow_keys keys; if (skb_transport_header_was_set(skb)) return; - - if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, - NULL, 0, 0, 0, 0)) + else if (skb_flow_dissect_flow_keys(skb, &keys, 0)) skb_set_transport_header(skb, keys.control.thoff); + else + skb_set_transport_header(skb, offset_hint); } static inline void skb_mac_header_rebuild(struct sk_buff *skb) @@ -2633,7 +2223,7 @@ static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) return skb->head + skb->csum_start; } -static inline int skb_transport_offset(const struct sk_buff *skb) +static inline int __intentional_overflow(0) skb_transport_offset(const struct sk_buff *skb) { return skb_transport_header(skb) - skb->data; } @@ -2648,7 +2238,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb) return skb->inner_transport_header - skb->inner_network_header; } -static inline int skb_network_offset(const struct sk_buff *skb) +static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb) { return skb_network_header(skb) - skb->data; } @@ -2704,19 +2294,21 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) * * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) * to reduce average number of cache lines per packet. - * get_rps_cpu() for example only access one 64 bytes aligned block : + * get_rps_cpus() for example only access one 64 bytes aligned block : * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) */ #ifndef NET_SKB_PAD -#define NET_SKB_PAD max(32, L1_CACHE_BYTES) +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES) #endif int ___pskb_trim(struct sk_buff *skb, unsigned int len); static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) { - if (WARN_ON(skb_is_nonlinear(skb))) + if (unlikely(skb_is_nonlinear(skb))) { + WARN_ON(1); return; + } skb->len = len; skb_set_tail_pointer(skb, len); } @@ -2800,18 +2392,7 @@ static inline void skb_orphan(struct sk_buff *skb) */ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) { - if (likely(!skb_zcopy(skb))) - return 0; - if (!skb_zcopy_is_nouarg(skb) && - skb_uarg(skb)->callback == msg_zerocopy_callback) - return 0; - return skb_copy_ubufs(skb, gfp_mask); -} - -/* Frags must be orphaned, even if refcounted, if skb might loop to rx path */ -static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) -{ - if (likely(!skb_zcopy(skb))) + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY))) return 0; return skb_copy_ubufs(skb, gfp_mask); } @@ -2824,36 +2405,17 @@ static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) * the list and one reference dropped. This function does not take the * list lock and the caller must hold the relevant locks to use it. */ +void skb_queue_purge(struct sk_buff_head *list); static inline void __skb_queue_purge(struct sk_buff_head *list) { struct sk_buff *skb; while ((skb = __skb_dequeue(list)) != NULL) kfree_skb(skb); } -void skb_queue_purge(struct sk_buff_head *list); -unsigned int skb_rbtree_purge(struct rb_root *root); +void skb_rbtree_purge(struct rb_root *root); -void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask); - -/** - * netdev_alloc_frag - allocate a page fragment - * @fragsz: fragment size - * - * Allocates a frag from a page for receive buffer. - * Uses GFP_ATOMIC allocations. - */ -static inline void *netdev_alloc_frag(unsigned int fragsz) -{ - return __netdev_alloc_frag_align(fragsz, ~0u); -} - -static inline void *netdev_alloc_frag_align(unsigned int fragsz, - unsigned int align) -{ - WARN_ON_ONCE(!is_power_of_2(align)); - return __netdev_alloc_frag_align(fragsz, -align); -} +void *netdev_alloc_frag(unsigned int fragsz); struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask); @@ -2909,23 +2471,10 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, static inline void skb_free_frag(void *addr) { - page_frag_free(addr); -} - -void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask); - -static inline void *napi_alloc_frag(unsigned int fragsz) -{ - return __napi_alloc_frag_align(fragsz, ~0u); -} - -static inline void *napi_alloc_frag_align(unsigned int fragsz, - unsigned int align) -{ - WARN_ON_ONCE(!is_power_of_2(align)); - return __napi_alloc_frag_align(fragsz, -align); + __free_page_frag(addr); } +void *napi_alloc_frag(unsigned int fragsz); struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int length, gfp_t gfp_mask); static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, @@ -2935,7 +2484,7 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, } void napi_consume_skb(struct sk_buff *skb, int budget); -void napi_skb_free_stolen_head(struct sk_buff *skb); +void __kfree_skb_flush(void); void __kfree_skb_defer(struct sk_buff *skb); /** @@ -2958,7 +2507,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to * code in gfp_to_alloc_flags that should be enforcing this. */ - gfp_mask |= __GFP_COMP | __GFP_MEMALLOC; + gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC; return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); } @@ -2986,74 +2535,18 @@ static inline struct page *dev_alloc_page(void) return dev_alloc_pages(0); } -/** - * dev_page_is_reusable - check whether a page can be reused for network Rx - * @page: the page to test - * - * A page shouldn't be considered for reusing/recycling if it was allocated - * under memory pressure or at a distant memory node. - * - * Returns false if this page should be returned to page allocator, true - * otherwise. - */ -static inline bool dev_page_is_reusable(const struct page *page) -{ - return likely(page_to_nid(page) == numa_mem_id() && - !page_is_pfmemalloc(page)); -} - /** * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page * @page: The page that was allocated from skb_alloc_page * @skb: The skb that may need pfmemalloc set */ -static inline void skb_propagate_pfmemalloc(const struct page *page, - struct sk_buff *skb) +static inline void skb_propagate_pfmemalloc(struct page *page, + struct sk_buff *skb) { if (page_is_pfmemalloc(page)) skb->pfmemalloc = true; } -/** - * skb_frag_off() - Returns the offset of a skb fragment - * @frag: the paged fragment - */ -static inline unsigned int skb_frag_off(const skb_frag_t *frag) -{ - return frag->bv_offset; -} - -/** - * skb_frag_off_add() - Increments the offset of a skb fragment by @delta - * @frag: skb fragment - * @delta: value to add - */ -static inline void skb_frag_off_add(skb_frag_t *frag, int delta) -{ - frag->bv_offset += delta; -} - -/** - * skb_frag_off_set() - Sets the offset of a skb fragment - * @frag: skb fragment - * @offset: offset of fragment - */ -static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset) -{ - frag->bv_offset = offset; -} - -/** - * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment - * @fragto: skb fragment where offset is set - * @fragfrom: skb fragment offset is copied from - */ -static inline void skb_frag_off_copy(skb_frag_t *fragto, - const skb_frag_t *fragfrom) -{ - fragto->bv_offset = fragfrom->bv_offset; -} - /** * skb_frag_page - retrieve the page referred to by a paged fragment * @frag: the paged fragment @@ -3062,7 +2555,7 @@ static inline void skb_frag_off_copy(skb_frag_t *fragto, */ static inline struct page *skb_frag_page(const skb_frag_t *frag) { - return frag->bv_page; + return frag->page.p; } /** @@ -3091,20 +2584,12 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) /** * __skb_frag_unref - release a reference on a paged fragment. * @frag: the paged fragment - * @recycle: recycle the page if allocated via page_pool * - * Releases a reference on the paged fragment @frag - * or recycles the page via the page_pool API. + * Releases a reference on the paged fragment @frag. */ -static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) +static inline void __skb_frag_unref(skb_frag_t *frag) { - struct page *page = skb_frag_page(frag); - -#ifdef CONFIG_PAGE_POOL - if (recycle && page_pool_return_skb_page(page)) - return; -#endif - put_page(page); + put_page(skb_frag_page(frag)); } /** @@ -3116,7 +2601,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) */ static inline void skb_frag_unref(struct sk_buff *skb, int f) { - __skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle); + __skb_frag_unref(&skb_shinfo(skb)->frags[f]); } /** @@ -3128,7 +2613,7 @@ static inline void skb_frag_unref(struct sk_buff *skb, int f) */ static inline void *skb_frag_address(const skb_frag_t *frag) { - return page_address(skb_frag_page(frag)) + skb_frag_off(frag); + return page_address(skb_frag_page(frag)) + frag->page_offset; } /** @@ -3144,18 +2629,7 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag) if (unlikely(!ptr)) return NULL; - return ptr + skb_frag_off(frag); -} - -/** - * skb_frag_page_copy() - sets the page in a fragment from another fragment - * @fragto: skb fragment where page is set - * @fragfrom: skb fragment page is copied from - */ -static inline void skb_frag_page_copy(skb_frag_t *fragto, - const skb_frag_t *fragfrom) -{ - fragto->bv_page = fragfrom->bv_page; + return ptr + frag->page_offset; } /** @@ -3167,7 +2641,7 @@ static inline void skb_frag_page_copy(skb_frag_t *fragto, */ static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) { - frag->bv_page = page; + frag->page.p = page; } /** @@ -3193,7 +2667,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio); * @offset: the offset within the fragment (starting at the * fragment's own offset) * @size: the number of bytes to map - * @dir: the direction of the mapping (``PCI_DMA_*``) + * @dir: the direction of the mapping (%PCI_DMA_*) * * Maps the page associated with @frag to @device. */ @@ -3203,7 +2677,7 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev, enum dma_data_direction dir) { return dma_map_page(dev, skb_frag_page(frag), - skb_frag_off(frag) + offset, size, dir); + frag->page_offset + offset, size, dir); } static inline struct sk_buff *pskb_copy(struct sk_buff *skb, @@ -3305,32 +2779,6 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) return skb_pad(skb, len - size); } -/** - * __skb_put_padto - increase size and pad an skbuff up to a minimal size - * @skb: buffer to pad - * @len: minimal length - * @free_on_error: free buffer on error - * - * Pads up a buffer to ensure the trailing bytes exist and are - * blanked. If the buffer already contains sufficient data it - * is untouched. Otherwise it is extended. Returns zero on - * success. The skb is freed on error if @free_on_error is true. - */ -static inline int __must_check __skb_put_padto(struct sk_buff *skb, - unsigned int len, - bool free_on_error) -{ - unsigned int size = skb->len; - - if (unlikely(size < len)) { - len -= size; - if (__skb_pad(skb, len, free_on_error)) - return -ENOMEM; - __skb_put(skb, len); - } - return 0; -} - /** * skb_put_padto - increase size and pad an skbuff up to a minimal size * @skb: buffer to pad @@ -3341,9 +2789,17 @@ static inline int __must_check __skb_put_padto(struct sk_buff *skb, * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ -static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) +static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) { - return __skb_put_padto(skb, len, true); + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; } static inline int skb_add_data(struct sk_buff *skb, @@ -3353,12 +2809,12 @@ static inline int skb_add_data(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_NONE) { __wsum csum = 0; - if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy, - &csum, from)) { + if (csum_and_copy_from_iter(skb_put(skb, copy), copy, + &csum, from) == copy) { skb->csum = csum_block_add(skb->csum, csum, off); return 0; } - } else if (copy_from_iter_full(skb_put(skb, copy), copy, from)) + } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy) return 0; __skb_trim(skb, off); @@ -3368,13 +2824,11 @@ static inline int skb_add_data(struct sk_buff *skb, static inline bool skb_can_coalesce(struct sk_buff *skb, int i, const struct page *page, int off) { - if (skb_zcopy(skb)) - return false; if (i) { - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; return page == skb_frag_page(frag) && - off == skb_frag_off(frag) + skb_frag_size(frag); + off == frag->page_offset + skb_frag_size(frag); } return false; } @@ -3406,7 +2860,7 @@ static inline int skb_linearize(struct sk_buff *skb) static inline bool skb_has_shared_frag(const struct sk_buff *skb) { return skb_is_nonlinear(skb) && - skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; + skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; } /** @@ -3474,7 +2928,7 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb, __skb_postpush_rcsum(skb, start, len, 0); } -void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); +unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); /** * skb_push_rcsum - push skb and update receive checksum @@ -3487,14 +2941,14 @@ void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); * that the checksum difference is zero (e.g., a valid IP header) * or you are setting ip_summed to CHECKSUM_NONE. */ -static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len) +static inline unsigned char *skb_push_rcsum(struct sk_buff *skb, + unsigned int len) { skb_push(skb, len); skb_postpush_rcsum(skb, skb->data, len); return skb->data; } -int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); /** * pskb_trim_rcsum - trim received skb and update checksum * @skb: buffer to trim @@ -3502,14 +2956,15 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); * * This is exactly the same as pskb_trim except that it ensures the * checksum of received packets are still valid after the operation. - * It can change skb pointers. */ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) { if (likely(len >= skb->len)) return 0; - return pskb_trim_rcsum_slow(skb, len); + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; + return __pskb_trim(skb, len); } static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len) @@ -3527,12 +2982,6 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) return __skb_grow(skb, len); } -#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) -#define skb_rb_first(root) rb_to_skb(rb_first(root)) -#define skb_rb_last(root) rb_to_skb(rb_last(root)) -#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) -#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode)) - #define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ skb != (struct sk_buff *)(queue); \ @@ -3547,18 +2996,6 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) for (; skb != (struct sk_buff *)(queue); \ skb = skb->next) -#define skb_rbtree_walk(skb, root) \ - for (skb = skb_rb_first(root); skb != NULL; \ - skb = skb_rb_next(skb)) - -#define skb_rbtree_walk_from(skb) \ - for (; skb != NULL; \ - skb = skb_rb_next(skb)) - -#define skb_rbtree_walk_from_safe(skb, tmp) \ - for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \ - skb = tmp) - #define skb_queue_walk_from_safe(queue, skb, tmp) \ for (tmp = skb->next; \ skb != (struct sk_buff *)(queue); \ @@ -3593,37 +3030,26 @@ static inline void skb_frag_list_init(struct sk_buff *skb) for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) -int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue, - int *err, long *timeo_p, +int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, const struct sk_buff *skb); -struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, - struct sk_buff_head *queue, - unsigned int flags, - int *off, int *err, - struct sk_buff **last); -struct sk_buff *__skb_try_recv_datagram(struct sock *sk, - struct sk_buff_head *queue, - unsigned int flags, int *off, int *err, +struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, + int *peeked, int *off, int *err, struct sk_buff **last); -struct sk_buff *__skb_recv_datagram(struct sock *sk, - struct sk_buff_head *sk_queue, - unsigned int flags, int *off, int *err); +struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, + int *peeked, int *off, int *err); struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); -__poll_t datagram_poll(struct file *file, struct socket *sock, +unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); -int skb_copy_datagram_iter(const struct sk_buff *from, int offset, +int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset, struct iov_iter *to, int size); -static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, +static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset, struct msghdr *msg, int size) { return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); } int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, struct msghdr *msg); -int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset, - struct iov_iter *to, int len, - struct ahash_request *hash); int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, struct iov_iter *from, int len); int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); @@ -3638,13 +3064,10 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, - int len); + int len, __wsum csum); int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, struct pipe_inode_info *pipe, unsigned int len, unsigned int flags); -int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, - int len); -int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); unsigned int skb_zerocopy_headlen(const struct sk_buff *from); int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, @@ -3652,31 +3075,20 @@ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); void skb_scrub_packet(struct sk_buff *skb, bool xnet); -bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); -bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); +unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); +bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); -struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features, - unsigned int offset); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); int skb_ensure_writable(struct sk_buff *skb, int write_len); int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); -int skb_eth_pop(struct sk_buff *skb); -int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, - const unsigned char *src); -int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, - int mac_len, bool ethernet); -int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, - bool ethernet); -int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); -int skb_mpls_dec_ttl(struct sk_buff *skb); struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, gfp_t gfp); static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) { - return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT; + return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; } static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) @@ -3689,21 +3101,20 @@ struct skb_checksum_ops { __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); }; -extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly; - __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum, const struct skb_checksum_ops *ops); __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); static inline void * __must_check -__skb_header_pointer(const struct sk_buff *skb, int offset, int len, - const void *data, int hlen, void *buffer) +__skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *data, int hlen, void *buffer) { - if (likely(hlen - offset >= len)) - return (void *)data + offset; + if (hlen - offset >= len) + return data + offset; - if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0)) + if (!skb || + skb_copy_bits(skb, offset, buffer, len) < 0) return NULL; return buffer; @@ -3773,43 +3184,22 @@ static inline ktime_t skb_get_ktime(const struct sk_buff *skb) /** * skb_get_timestamp - get timestamp from a skb * @skb: skb to get stamp from - * @stamp: pointer to struct __kernel_old_timeval to store stamp in + * @stamp: pointer to struct timeval to store stamp in * * Timestamps are stored in the skb as offsets to a base timestamp. * This function converts the offset back to a struct timeval and stores * it in stamp. */ static inline void skb_get_timestamp(const struct sk_buff *skb, - struct __kernel_old_timeval *stamp) + struct timeval *stamp) { - *stamp = ns_to_kernel_old_timeval(skb->tstamp); -} - -static inline void skb_get_new_timestamp(const struct sk_buff *skb, - struct __kernel_sock_timeval *stamp) -{ - struct timespec64 ts = ktime_to_timespec64(skb->tstamp); - - stamp->tv_sec = ts.tv_sec; - stamp->tv_usec = ts.tv_nsec / 1000; + *stamp = ktime_to_timeval(skb->tstamp); } static inline void skb_get_timestampns(const struct sk_buff *skb, - struct __kernel_old_timespec *stamp) + struct timespec *stamp) { - struct timespec64 ts = ktime_to_timespec64(skb->tstamp); - - stamp->tv_sec = ts.tv_sec; - stamp->tv_nsec = ts.tv_nsec; -} - -static inline void skb_get_new_timestampns(const struct sk_buff *skb, - struct __kernel_timespec *stamp) -{ - struct timespec64 ts = ktime_to_timespec64(skb->tstamp); - - stamp->tv_sec = ts.tv_sec; - stamp->tv_nsec = ts.tv_nsec; + *stamp = ktime_to_timespec(skb->tstamp); } static inline void __net_timestamp(struct sk_buff *skb) @@ -3824,76 +3214,7 @@ static inline ktime_t net_timedelta(ktime_t t) static inline ktime_t net_invalid_timestamp(void) { - return 0; -} - -static inline u8 skb_metadata_len(const struct sk_buff *skb) -{ - return skb_shinfo(skb)->meta_len; -} - -static inline void *skb_metadata_end(const struct sk_buff *skb) -{ - return skb_mac_header(skb); -} - -static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, - const struct sk_buff *skb_b, - u8 meta_len) -{ - const void *a = skb_metadata_end(skb_a); - const void *b = skb_metadata_end(skb_b); - /* Using more efficient varaiant than plain call to memcmp(). */ -#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 - u64 diffs = 0; - - switch (meta_len) { -#define __it(x, op) (x -= sizeof(u##op)) -#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) - case 32: diffs |= __it_diff(a, b, 64); - fallthrough; - case 24: diffs |= __it_diff(a, b, 64); - fallthrough; - case 16: diffs |= __it_diff(a, b, 64); - fallthrough; - case 8: diffs |= __it_diff(a, b, 64); - break; - case 28: diffs |= __it_diff(a, b, 64); - fallthrough; - case 20: diffs |= __it_diff(a, b, 64); - fallthrough; - case 12: diffs |= __it_diff(a, b, 64); - fallthrough; - case 4: diffs |= __it_diff(a, b, 32); - break; - } - return diffs; -#else - return memcmp(a - meta_len, b - meta_len, meta_len); -#endif -} - -static inline bool skb_metadata_differs(const struct sk_buff *skb_a, - const struct sk_buff *skb_b) -{ - u8 len_a = skb_metadata_len(skb_a); - u8 len_b = skb_metadata_len(skb_b); - - if (!(len_a | len_b)) - return false; - - return len_a != len_b ? - true : __skb_metadata_differs(skb_a, skb_b, len_a); -} - -static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len) -{ - skb_shinfo(skb)->meta_len = meta_len; -} - -static inline void skb_metadata_clear(struct sk_buff *skb) -{ - skb_metadata_set(skb, 0); + return ktime_set(0, 0); } struct sk_buff *skb_clone_sk(struct sk_buff *skb); @@ -3924,14 +3245,14 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) * must call this function to return the skb back to the stack with a * timestamp. * - * @skb: clone of the original outgoing packet + * @skb: clone of the the original outgoing packet * @hwtstamps: hardware time stamps * */ void skb_complete_tx_timestamp(struct sk_buff *skb, struct skb_shared_hwtstamps *hwtstamps); -void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb, +void __skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps, struct sock *sk, int tstype); @@ -3949,6 +3270,13 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb, void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps); +static inline void sw_tx_timestamp(struct sk_buff *skb) +{ + if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && + !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) + skb_tstamp_tx(skb, NULL); +} + /** * skb_tx_timestamp() - Driver hook for transmit timestamping * @@ -3964,8 +3292,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, static inline void skb_tx_timestamp(struct sk_buff *skb) { skb_clone_tx_timestamp(skb); - if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) - skb_tstamp_tx(skb, NULL); + sw_tx_timestamp(skb); } /** @@ -4031,12 +3358,19 @@ static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) } } -static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb) +static inline void __skb_mark_checksum_bad(struct sk_buff *skb) { - if (skb->ip_summed == CHECKSUM_UNNECESSARY) { - skb->ip_summed = CHECKSUM_NONE; - skb->csum_level = 0; - } + /* Mark current checksum as bad (typically called from GRO + * path). In the case that ip_summed is CHECKSUM_NONE + * this must be the first checksum encountered in the packet. + * When ip_summed is CHECKSUM_UNNECESSARY, this is the first + * checksum after the last one validated. For UDP, a zero + * checksum can not be marked as bad. + */ + + if (skb->ip_summed == CHECKSUM_NONE || + skb->ip_summed == CHECKSUM_UNNECESSARY) + skb->csum_bad = 1; } /* Check if we need to perform checksum complete validation. @@ -4057,7 +3391,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, return true; } -/* For small packets <= CHECKSUM_BREAK perform checksum complete directly +/* For small packets <= CHECKSUM_BREAK peform checksum complete directly * in checksum_init. */ #define CHECKSUM_BREAK 76 @@ -4092,6 +3426,9 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, skb->csum_valid = 1; return 0; } + } else if (skb->csum_bad) { + /* ip_summed == CHECKSUM_NONE in this case */ + return (__force __sum16)1; } skb->csum = psum; @@ -4151,19 +3488,22 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) static inline bool __skb_checksum_convert_check(struct sk_buff *skb) { - return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); + return (skb->ip_summed == CHECKSUM_NONE && + skb->csum_valid && !skb->csum_bad); } -static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo) +static inline void __skb_checksum_convert(struct sk_buff *skb, + __sum16 check, __wsum pseudo) { skb->csum = ~pseudo; skb->ip_summed = CHECKSUM_COMPLETE; } -#define skb_checksum_try_convert(skb, proto, compute_pseudo) \ +#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \ do { \ if (__skb_checksum_convert_check(skb)) \ - __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \ + __skb_checksum_convert(skb, check, \ + compute_pseudo(skb, proto)); \ } while (0) static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, @@ -4200,151 +3540,43 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, skb->csum = csum_add(skb->csum, delta); } -static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +void nf_conntrack_destroy(struct nf_conntrack *nfct); +static inline void nf_conntrack_put(struct nf_conntrack *nfct) { -#if IS_ENABLED(CONFIG_NF_CONNTRACK) - return (void *)(skb->_nfct & NFCT_PTRMASK); -#else - return NULL; -#endif + if (nfct && atomic_dec_and_test(&nfct->use)) + nf_conntrack_destroy(nfct); } - -static inline unsigned long skb_get_nfct(const struct sk_buff *skb) +static inline void nf_conntrack_get(struct nf_conntrack *nfct) { -#if IS_ENABLED(CONFIG_NF_CONNTRACK) - return skb->_nfct; -#else - return 0UL; -#endif + if (nfct) + atomic_inc(&nfct->use); } - -static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) -{ -#if IS_ENABLED(CONFIG_NF_CONNTRACK) - skb->slow_gro |= !!nfct; - skb->_nfct = nfct; #endif -} - -#ifdef CONFIG_SKB_EXTENSIONS -enum skb_ext_id { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - SKB_EXT_BRIDGE_NF, -#endif -#ifdef CONFIG_XFRM - SKB_EXT_SEC_PATH, -#endif -#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - TC_SKB_EXT, -#endif -#if IS_ENABLED(CONFIG_MPTCP) - SKB_EXT_MPTCP, -#endif - SKB_EXT_NUM, /* must be last */ -}; - -/** - * struct skb_ext - sk_buff extensions - * @refcnt: 1 on allocation, deallocated on 0 - * @offset: offset to add to @data to obtain extension address - * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units - * @data: start of extension data, variable sized - * - * Note: offsets/lengths are stored in chunks of 8 bytes, this allows - * to use 'u8' types while allowing up to 2kb worth of extension data. - */ -struct skb_ext { - refcount_t refcnt; - u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */ - u8 chunks; /* same */ - char data[] __aligned(8); -}; - -struct skb_ext *__skb_ext_alloc(gfp_t flags); -void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, - struct skb_ext *ext); -void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id); -void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id); -void __skb_ext_put(struct skb_ext *ext); - -static inline void skb_ext_put(struct sk_buff *skb) +static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) { - if (skb->active_extensions) - __skb_ext_put(skb->extensions); + if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) + kfree(nf_bridge); } - -static inline void __skb_ext_copy(struct sk_buff *dst, - const struct sk_buff *src) +static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) { - dst->active_extensions = src->active_extensions; - - if (src->active_extensions) { - struct skb_ext *ext = src->extensions; - - refcount_inc(&ext->refcnt); - dst->extensions = ext; - } + if (nf_bridge) + atomic_inc(&nf_bridge->use); } - -static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src) -{ - skb_ext_put(dst); - __skb_ext_copy(dst, src); -} - -static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i) -{ - return !!ext->offset[i]; -} - -static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id) -{ - return skb->active_extensions & (1 << id); -} - -static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) -{ - if (skb_ext_exist(skb, id)) - __skb_ext_del(skb, id); -} - -static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) -{ - if (skb_ext_exist(skb, id)) { - struct skb_ext *ext = skb->extensions; - - return (void *)ext + (ext->offset[id] << 3); - } - - return NULL; -} - -static inline void skb_ext_reset(struct sk_buff *skb) -{ - if (unlikely(skb->active_extensions)) { - __skb_ext_put(skb->extensions); - skb->active_extensions = 0; - } -} - -static inline bool skb_has_extensions(struct sk_buff *skb) -{ - return unlikely(skb->active_extensions); -} -#else -static inline void skb_ext_put(struct sk_buff *skb) {} -static inline void skb_ext_reset(struct sk_buff *skb) {} -static inline void skb_ext_del(struct sk_buff *skb, int unused) {} -static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} -static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} -static inline bool skb_has_extensions(struct sk_buff *skb) { return false; } -#endif /* CONFIG_SKB_EXTENSIONS */ - -static inline void nf_reset_ct(struct sk_buff *skb) +#endif /* CONFIG_BRIDGE_NETFILTER */ +static inline void nf_reset(struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - nf_conntrack_put(skb_nfct(skb)); - skb->_nfct = 0; + nf_conntrack_put(skb->nfct); + skb->nfct = NULL; +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + nf_bridge_put(skb->nf_bridge); + skb->nf_bridge = NULL; +#endif +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) + skb->nf_trace = 0; #endif } @@ -4355,20 +3587,19 @@ static inline void nf_reset_trace(struct sk_buff *skb) #endif } -static inline void ipvs_reset(struct sk_buff *skb) -{ -#if IS_ENABLED(CONFIG_IP_VS) - skb->ipvs_property = 0; -#endif -} - -/* Note: This doesn't put any conntrack info in dst. */ +/* Note: This doesn't put any conntrack and bridge info in dst. */ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, bool copy) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - dst->_nfct = src->_nfct; - nf_conntrack_get(skb_nfct(src)); + dst->nfct = src->nfct; + nf_conntrack_get(src->nfct); + if (copy) + dst->nfctinfo = src->nfctinfo; +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + dst->nf_bridge = src->nf_bridge; + nf_bridge_get(src->nf_bridge); #endif #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) if (copy) @@ -4379,9 +3610,11 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - nf_conntrack_put(skb_nfct(dst)); + nf_conntrack_put(dst->nfct); +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + nf_bridge_put(dst->nf_bridge); #endif - dst->slow_gro = src->slow_gro; __nf_copy(dst, src, true); } @@ -4403,20 +3636,15 @@ static inline void skb_init_secmark(struct sk_buff *skb) { } #endif -static inline int secpath_exists(const struct sk_buff *skb) -{ -#ifdef CONFIG_XFRM - return skb_ext_exist(skb, SKB_EXT_SEC_PATH); -#else - return 0; -#endif -} - static inline bool skb_irq_freeable(const struct sk_buff *skb) { return !skb->destructor && - !secpath_exists(skb) && - !skb_nfct(skb) && +#if IS_ENABLED(CONFIG_XFRM) + !skb->sp && +#endif +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + !skb->nfct && +#endif !skb->_skb_refdst && !skb_has_frag_list(skb); } @@ -4451,20 +3679,10 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) return skb->queue_mapping != 0; } -static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val) -{ - skb->dst_pending_confirm = val; -} - -static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb) -{ - return skb->dst_pending_confirm != 0; -} - -static inline struct sec_path *skb_sec_path(const struct sk_buff *skb) +static inline struct sec_path *skb_sec_path(struct sk_buff *skb) { #ifdef CONFIG_XFRM - return skb_ext_find(skb, SKB_EXT_SEC_PATH); + return skb->sp; #else return NULL; #endif @@ -4485,8 +3703,8 @@ struct skb_gso_cb { __wsum csum; __u16 csum_start; }; -#define SKB_GSO_CB_OFFSET 32 -#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET)) +#define SKB_SGO_CB_OFFSET 32 +#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET)) static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) { @@ -4550,18 +3768,6 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb) return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; } -/* Note: Should be called only if skb_is_gso(skb) is true */ -static inline bool skb_is_gso_sctp(const struct sk_buff *skb) -{ - return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; -} - -/* Note: Should be called only if skb_is_gso(skb) is true */ -static inline bool skb_is_gso_tcp(const struct sk_buff *skb) -{ - return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); -} - static inline void skb_gso_reset(struct sk_buff *skb) { skb_shinfo(skb)->gso_size = 0; @@ -4569,22 +3775,6 @@ static inline void skb_gso_reset(struct sk_buff *skb) skb_shinfo(skb)->gso_type = 0; } -static inline void skb_increase_gso_size(struct skb_shared_info *shinfo, - u16 increment) -{ - if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) - return; - shinfo->gso_size += increment; -} - -static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo, - u16 decrement) -{ - if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) - return; - shinfo->gso_size -= decrement; -} - void __skb_warn_lro_forwarding(const struct sk_buff *skb); static inline bool skb_warn_if_lro(const struct sk_buff *skb) @@ -4644,10 +3834,27 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb) return !skb->head_frag || skb_cloned(skb); } +/** + * skb_gso_network_seglen - Return length of individual segments of a gso packet + * + * @skb: GSO skb + * + * skb_gso_network_seglen is used to determine the real size of the + * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). + * + * The MAC/L2 header is not accounted for. + */ +static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) +{ + unsigned int hdr_len = skb_transport_header(skb) - + skb_network_header(skb); + return hdr_len + skb_gso_transport_seglen(skb); +} + /* Local Checksum Offload. * Compute outer checksum based on the assumption that the * inner checksum will be offloaded later. - * See Documentation/networking/checksum-offloads.rst for + * See Documentation/networking/checksum-offloads.txt for * explanation of how this works. * Fill in outer checksum adjustment (e.g. with sum of outer * pseudo-header) before calling. @@ -4669,61 +3876,5 @@ static inline __wsum lco_csum(struct sk_buff *skb) return csum_partial(l4_hdr, csum_start - l4_hdr, partial); } -static inline bool skb_is_redirected(const struct sk_buff *skb) -{ - return skb->redirected; -} - -static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) -{ - skb->redirected = 1; -#ifdef CONFIG_NET_REDIRECT - skb->from_ingress = from_ingress; - if (skb->from_ingress) - skb->tstamp = 0; -#endif -} - -static inline void skb_reset_redirect(struct sk_buff *skb) -{ - skb->redirected = 0; -} - -static inline bool skb_csum_is_sctp(struct sk_buff *skb) -{ - return skb->csum_not_inet; -} - -static inline void skb_set_kcov_handle(struct sk_buff *skb, - const u64 kcov_handle) -{ -#ifdef CONFIG_KCOV - skb->kcov_handle = kcov_handle; -#endif -} - -static inline u64 skb_get_kcov_handle(struct sk_buff *skb) -{ -#ifdef CONFIG_KCOV - return skb->kcov_handle; -#else - return 0; -#endif -} - -#ifdef CONFIG_PAGE_POOL -static inline void skb_mark_for_recycle(struct sk_buff *skb) -{ - skb->pp_recycle = 1; -} -#endif - -static inline bool skb_pp_recycle(struct sk_buff *skb, void *data) -{ - if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) - return false; - return page_pool_return_skb_page(virt_to_page(data)); -} - #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 083f3ce550..9da238a10c 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). * @@ -13,34 +12,38 @@ #define _LINUX_SLAB_H #include -#include #include #include -#include +#include /* * Flags to pass to kmem_cache_create(). * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. */ -/* DEBUG: Perform (expensive) checks on alloc/free */ -#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) -/* DEBUG: Red zone objs in a cache */ -#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) -/* DEBUG: Poison objects */ -#define SLAB_POISON ((slab_flags_t __force)0x00000800U) -/* Align objs on cache lines */ -#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) -/* Use GFP_DMA memory */ -#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) -/* Use GFP_DMA32 memory */ -#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U) -/* DEBUG: Store the last owner for bug hunting */ -#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) -/* Panic if kmem_cache_create() fails */ -#define SLAB_PANIC ((slab_flags_t __force)0x00040000U) +#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */ + +#ifdef CONFIG_PAX_USERCOPY +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */ +#else +#define SLAB_USERCOPY 0x00000000UL +#endif + +#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ +#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ + +#ifdef CONFIG_PAX_MEMORY_SANITIZE +#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */ +#else +#define SLAB_NO_SANITIZE 0x00000000UL +#endif + +#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ +#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ +#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ +#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ /* - * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! + * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! * * This delays freeing the SLAB page by a grace period, it does _NOT_ * delay object freeing. This means that if you do kmem_cache_free() @@ -73,53 +76,46 @@ * * rcu_read_lock before reading the address, then rcu_read_unlock after * taking the spinlock within the structure expected at that address. - * - * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. */ -/* Defer freeing slabs to RCU */ -#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) -/* Spread some memory over cpuset */ -#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) -/* Trace allocations and frees */ -#define SLAB_TRACE ((slab_flags_t __force)0x00200000U) +#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ +#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ +#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ /* Flag to prevent checks on free */ #ifdef CONFIG_DEBUG_OBJECTS -# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) +# define SLAB_DEBUG_OBJECTS 0x00400000UL #else -# define SLAB_DEBUG_OBJECTS 0 +# define SLAB_DEBUG_OBJECTS 0x00000000UL #endif -/* Avoid kmemleak tracing */ -#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) +#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ -/* Fault injection mark */ +/* Don't track use of uninitialized memory */ +#ifdef CONFIG_KMEMCHECK +# define SLAB_NOTRACK 0x01000000UL +#else +# define SLAB_NOTRACK 0x00000000UL +#endif #ifdef CONFIG_FAILSLAB -# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) +# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ #else -# define SLAB_FAILSLAB 0 +# define SLAB_FAILSLAB 0x00000000UL #endif -/* Account to memcg */ -#ifdef CONFIG_MEMCG_KMEM -# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) +#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) +# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */ #else -# define SLAB_ACCOUNT 0 +# define SLAB_ACCOUNT 0x00000000UL #endif #ifdef CONFIG_KASAN -#define SLAB_KASAN ((slab_flags_t __force)0x08000000U) +#define SLAB_KASAN 0x08000000UL #else -#define SLAB_KASAN 0 +#define SLAB_KASAN 0x00000000UL #endif /* The following flags affect the page allocator grouping pages by mobility */ -/* Objects are reclaimable */ -#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) +#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ - -/* Slab deactivation flag */ -#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) - /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * @@ -128,11 +124,15 @@ * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. * Both make kfree a no-op. */ -#define ZERO_SIZE_PTR ((void *)16) +#define ZERO_SIZE_PTR \ +({ \ + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\ + (void *)(-MAX_ERRNO-1L); \ +}) -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ - (unsigned long)ZERO_SIZE_PTR) +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1) +#include #include struct mem_cgroup; @@ -142,19 +142,19 @@ struct mem_cgroup; void __init kmem_cache_init(void); bool slab_is_available(void); -extern bool usercopy_fallback; - -struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, - unsigned int align, slab_flags_t flags, - void (*ctor)(void *)); -struct kmem_cache *kmem_cache_create_usercopy(const char *name, - unsigned int size, unsigned int align, - slab_flags_t flags, - unsigned int useroffset, unsigned int usersize, - void (*ctor)(void *)); +struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, + unsigned long, + void (*)(void *)); +struct kmem_cache *kmem_cache_create_usercopy(const char *, size_t, size_t, + unsigned long, size_t, size_t, + void (*)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); +void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); +void memcg_deactivate_kmem_caches(struct mem_cgroup *); +void memcg_destroy_kmem_caches(struct mem_cgroup *); + /* * Please use this macro to create slab caches. Simply specify the * name of the structure and maybe some flags that are listed above. @@ -163,41 +163,27 @@ int kmem_cache_shrink(struct kmem_cache *); * f.e. add ____cacheline_aligned_in_smp to the struct declaration * then the objects will be properly aligned in SMP configurations. */ -#define KMEM_CACHE(__struct, __flags) \ - kmem_cache_create(#__struct, sizeof(struct __struct), \ - __alignof__(struct __struct), (__flags), NULL) +#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ + sizeof(struct __struct), __alignof__(struct __struct),\ + (__flags), NULL) -/* - * To whitelist a single field for copying to/from usercopy, use this - * macro instead for KMEM_CACHE() above. - */ -#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ - kmem_cache_create_usercopy(#__struct, \ - sizeof(struct __struct), \ - __alignof__(struct __struct), (__flags), \ - offsetof(struct __struct, __field), \ - sizeof_field(struct __struct, __field), NULL) +#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) kmem_cache_create_usercopy(#__struct,\ + sizeof(struct __struct), __alignof__(struct __struct),\ + (__flags), offsetof(struct __struct, __field),\ + sizeof(((struct __struct *)0)->__field), NULL) /* * Common kmalloc functions provided by all allocators */ +void * __must_check __krealloc(const void *, size_t, gfp_t); void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); -void kfree_sensitive(const void *); -size_t __ksize(const void *); +void kzfree(const void *); size_t ksize(const void *); -#ifdef CONFIG_PRINTK -bool kmem_valid_obj(void *object); -void kmem_dump_obj(void *object); -#endif +bool is_usercopy_object(const void *ptr); -#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR -void __check_heap_object(const void *ptr, unsigned long n, struct page *page, - bool to_user); -#else -static inline void __check_heap_object(const void *ptr, unsigned long n, - struct page *page, bool to_user) { } -#endif +const char *__check_heap_object(const void *ptr, unsigned long n, + struct page *page); /* * Some archs want to perform DMA into kmalloc caches and need a guaranteed @@ -258,7 +244,7 @@ static inline void __check_heap_object(const void *ptr, unsigned long n, * (PAGE_SIZE*2). Larger requests are passed to the page allocator. */ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif @@ -271,7 +257,7 @@ static inline void __check_heap_object(const void *ptr, unsigned long n, * be allocated from the same page. */ #define KMALLOC_SHIFT_HIGH PAGE_SHIFT -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) +#define KMALLOC_SHIFT_MAX 30 #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif @@ -281,7 +267,7 @@ static inline void __check_heap_object(const void *ptr, unsigned long n, #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) /* Maximum size for which we actually use a slab cache */ #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) -/* Maximum order allocatable via the slab allocator */ +/* Maximum order allocatable via the slab allocagtor */ #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) /* @@ -302,66 +288,15 @@ static inline void __check_heap_object(const void *ptr, unsigned long n, #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ (KMALLOC_MIN_SIZE) : 16) -/* - * Whenever changing this, take care of that kmalloc_type() and - * create_kmalloc_caches() still work as intended. - * - * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP - * is for accounted but unreclaimable and non-dma objects. All the other - * kmem caches can have both accounted and unaccounted objects. - */ -enum kmalloc_cache_type { - KMALLOC_NORMAL = 0, -#ifndef CONFIG_ZONE_DMA - KMALLOC_DMA = KMALLOC_NORMAL, -#endif -#ifndef CONFIG_MEMCG_KMEM - KMALLOC_CGROUP = KMALLOC_NORMAL, -#else - KMALLOC_CGROUP, -#endif - KMALLOC_RECLAIM, -#ifdef CONFIG_ZONE_DMA - KMALLOC_DMA, -#endif - NR_KMALLOC_TYPES -}; - #ifndef CONFIG_SLOB -extern struct kmem_cache * -kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; +extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; +#ifdef CONFIG_ZONE_DMA +extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; +#endif -/* - * Define gfp bits that should not be set for KMALLOC_NORMAL. - */ -#define KMALLOC_NOT_NORMAL_BITS \ - (__GFP_RECLAIMABLE | \ - (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \ - (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0)) - -static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) -{ - /* - * The most common case is KMALLOC_NORMAL, so test for it - * with a single branch for all the relevant flags. - */ - if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0)) - return KMALLOC_NORMAL; - - /* - * At least one of the flags has to be set. Their priorities in - * decreasing order are: - * 1) __GFP_DMA - * 2) __GFP_RECLAIMABLE - * 3) __GFP_ACCOUNT - */ - if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA)) - return KMALLOC_DMA; - if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE)) - return KMALLOC_RECLAIM; - else - return KMALLOC_CGROUP; -} +#ifdef CONFIG_PAX_USERCOPY +extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1]; +#endif /* * Figure out which kmalloc slab an allocation of a certain size @@ -370,14 +305,8 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) * 1 = 65 .. 96 bytes * 2 = 129 .. 192 bytes * n = 2^(n-1)+1 .. 2^n - * - * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized; - * typical usage is via kmalloc_index() and therefore evaluated at compile-time. - * Callers where !size_is_constant should only be test modules, where runtime - * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab(). */ -static __always_inline unsigned int __kmalloc_index(size_t size, - bool size_is_constant) +static __always_inline __size_overflow(1) int kmalloc_index(size_t size) { if (!size) return 0; @@ -412,20 +341,15 @@ static __always_inline unsigned int __kmalloc_index(size_t size, if (size <= 8 * 1024 * 1024) return 23; if (size <= 16 * 1024 * 1024) return 24; if (size <= 32 * 1024 * 1024) return 25; - - if ((IS_ENABLED(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 110000) - && !IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant) - BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()"); - else - BUG(); + if (size <= 64 * 1024 * 1024) return 26; + BUG(); /* Will never be reached. Needed because the compiler may complain */ return -1; } -#define kmalloc_index(s) __kmalloc_index(s, true) #endif /* !CONFIG_SLOB */ -void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; +void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc __alloc_size(1) __size_overflow(1); void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *, void *); @@ -449,10 +373,10 @@ static __always_inline void kfree_bulk(size_t size, void **p) } #ifdef CONFIG_NUMA -void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; +void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc __alloc_size(1) __size_overflow(1); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; #else -static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline void * __alloc_size(1) __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node) { return __kmalloc(size, flags); } @@ -486,7 +410,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, { void *ret = kmem_cache_alloc(s, flags); - ret = kasan_kmalloc(s, ret, size, flags); + kasan_kmalloc(s, ret, size, flags); return ret; } @@ -497,7 +421,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, { void *ret = kmem_cache_alloc_node(s, gfpflags, node); - ret = kasan_kmalloc(s, ret, size, gfpflags); + kasan_kmalloc(s, ret, size, gfpflags); return ret; } #endif /* CONFIG_TRACING */ @@ -528,92 +452,143 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) * kmalloc is the normal method of allocating memory * for objects smaller than page size in the kernel. * - * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN - * bytes. For @size of power of two bytes, the alignment is also guaranteed - * to be at least to the size. + * The @flags argument may be one of: * - * The @flags argument may be one of the GFP flags defined at - * include/linux/gfp.h and described at - * :ref:`Documentation/core-api/mm-api.rst ` + * %GFP_USER - Allocate memory on behalf of user. May sleep. * - * The recommended usage of the @flags is described at - * :ref:`Documentation/core-api/memory-allocation.rst ` + * %GFP_KERNEL - Allocate normal kernel ram. May sleep. * - * Below is a brief outline of the most useful GFP flags + * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. + * For example, use this inside interrupt handlers. * - * %GFP_KERNEL - * Allocate normal kernel ram. May sleep. + * %GFP_HIGHUSER - Allocate pages from high memory. * - * %GFP_NOWAIT - * Allocation will not sleep. + * %GFP_NOIO - Do not do any I/O at all while trying to get memory. * - * %GFP_ATOMIC - * Allocation will not sleep. May use emergency pools. + * %GFP_NOFS - Do not make any fs calls while trying to get memory. * - * %GFP_HIGHUSER - * Allocate memory from high memory on behalf of user. + * %GFP_NOWAIT - Allocation will not sleep. + * + * %__GFP_THISNODE - Allocate node-local memory only. + * + * %GFP_DMA - Allocation suitable for DMA. + * Should only be used for kmalloc() caches. Otherwise, use a + * slab created with SLAB_DMA. * * Also it is possible to set different flags by OR'ing * in one or more of the following additional @flags: * - * %__GFP_HIGH - * This allocation has high priority and may use emergency pools. + * %__GFP_COLD - Request cache-cold pages instead of + * trying to return cache-warm pages. * - * %__GFP_NOFAIL - * Indicate that this allocation is in no way allowed to fail - * (think twice before using). + * %__GFP_HIGH - This allocation has high priority and may use emergency pools. * - * %__GFP_NORETRY - * If memory is not immediately available, - * then give up at once. + * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail + * (think twice before using). * - * %__GFP_NOWARN - * If allocation fails, don't issue any warnings. + * %__GFP_NORETRY - If memory is not immediately available, + * then give up at once. * - * %__GFP_RETRY_MAYFAIL - * Try really hard to succeed the allocation but fail - * eventually. + * %__GFP_NOWARN - If allocation fails, don't issue any warnings. + * + * %__GFP_REPEAT - If allocation fails initially, try once more before failing. + * + * There are other flags available as well, but these are not intended + * for general use, and so are not documented here. For a full list of + * potential flags, always refer to linux/gfp.h. */ static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { -#ifndef CONFIG_SLOB - unsigned int index; -#endif if (size > KMALLOC_MAX_CACHE_SIZE) return kmalloc_large(size, flags); #ifndef CONFIG_SLOB - index = kmalloc_index(size); + if (!(flags & GFP_DMA)) { + int index = kmalloc_index(size); - if (!index) - return ZERO_SIZE_PTR; + if (!index) + return ZERO_SIZE_PTR; - return kmem_cache_alloc_trace( - kmalloc_caches[kmalloc_type(flags)][index], - flags, size); + return kmem_cache_alloc_trace(kmalloc_caches[index], + flags, size); + } #endif } return __kmalloc(size, flags); } +/* + * Determine size used for the nth kmalloc cache. + * return size or 0 if a kmalloc cache for that + * size does not exist + */ +static __always_inline int kmalloc_size(int n) +{ +#ifndef CONFIG_SLOB + if (n > 2) + return 1 << n; + + if (n == 1 && KMALLOC_MIN_SIZE <= 32) + return 96; + + if (n == 2 && KMALLOC_MIN_SIZE <= 64) + return 192; +#endif + return 0; +} + static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { #ifndef CONFIG_SLOB if (__builtin_constant_p(size) && - size <= KMALLOC_MAX_CACHE_SIZE) { - unsigned int i = kmalloc_index(size); + size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { + int i = kmalloc_index(size); if (!i) return ZERO_SIZE_PTR; - return kmem_cache_alloc_node_trace( - kmalloc_caches[kmalloc_type(flags)][i], + return kmem_cache_alloc_node_trace(kmalloc_caches[i], flags, node, size); } #endif return __kmalloc_node(size, flags, node); } +struct memcg_cache_array { + struct rcu_head rcu; + struct kmem_cache *entries[0]; +}; + +/* + * This is the main placeholder for memcg-related information in kmem caches. + * Both the root cache and the child caches will have it. For the root cache, + * this will hold a dynamically allocated array large enough to hold + * information about the currently limited memcgs in the system. To allow the + * array to be accessed without taking any locks, on relocation we free the old + * version only after a grace period. + * + * Child caches will hold extra metadata needed for its operation. Fields are: + * + * @memcg: pointer to the memcg this cache belongs to + * @root_cache: pointer to the global, root cache, this cache was derived from + * + * Both root and child caches of the same kind are linked into a list chained + * through @list. + */ +struct memcg_cache_params { + bool is_root_cache; + struct list_head list; + union { + struct memcg_cache_array __rcu *memcg_caches; + struct { + struct mem_cgroup *memcg; + struct kmem_cache *root_cache; + }; + }; +}; + +int memcg_update_all_caches(int num_memcgs); + /** * kmalloc_array - allocate memory for an array. * @n: number of elements. @@ -622,31 +597,11 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) */ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) { - size_t bytes; - - if (unlikely(check_mul_overflow(n, size, &bytes))) + if (size != 0 && n > SIZE_MAX / size) return NULL; if (__builtin_constant_p(n) && __builtin_constant_p(size)) - return kmalloc(bytes, flags); - return __kmalloc(bytes, flags); -} - -/** - * krealloc_array - reallocate memory for an array. - * @p: pointer to the memory chunk to reallocate - * @new_n: new number of elements to alloc - * @new_size: new size of a single member of the array - * @flags: the type of memory to allocate (see kmalloc) - */ -static __must_check inline void * -krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags) -{ - size_t bytes; - - if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) - return NULL; - - return krealloc(p, bytes, flags); + return kmalloc(n * size, flags); + return __kmalloc(n * size, flags); } /** @@ -672,24 +627,6 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, _RET_IP_) -static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, - int node) -{ - size_t bytes; - - if (unlikely(check_mul_overflow(n, size, &bytes))) - return NULL; - if (__builtin_constant_p(n) && __builtin_constant_p(size)) - return kmalloc_node(bytes, flags, node); - return __kmalloc_node(bytes, flags, node); -} - -static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) -{ - return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); -} - - #ifdef CONFIG_NUMA extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 3aa5e1e73a..ebff702ff2 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -1,8 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SLAB_DEF_H #define _LINUX_SLAB_DEF_H -#include #include /* @@ -21,7 +19,7 @@ struct kmem_cache { struct reciprocal_value reciprocal_buffer_size; /* 2) touched by every alloc & free from the backend */ - slab_flags_t flags; /* constant flags */ + unsigned int flags; /* constant flags */ unsigned int num; /* # of objs per slab */ /* 3) cache_grow/shrink */ @@ -42,7 +40,7 @@ struct kmem_cache { /* 4) cache creation/removal */ const char *name; struct list_head list; - int refcount; + atomic_t refcount; int object_size; int align; @@ -58,21 +56,30 @@ struct kmem_cache { unsigned long node_allocs; unsigned long node_frees; unsigned long node_overflow; - atomic_t allochit; - atomic_t allocmiss; - atomic_t freehit; - atomic_t freemiss; + atomic_unchecked_t allochit; + atomic_unchecked_t allocmiss; + atomic_unchecked_t freehit; + atomic_unchecked_t freemiss; +#ifdef CONFIG_PAX_MEMORY_SANITIZE + atomic_unchecked_t sanitized; + atomic_unchecked_t not_sanitized; +#endif +#ifdef CONFIG_DEBUG_SLAB_LEAK + atomic_t store_user_clean; +#endif /* * If debugging is enabled, then the allocator can add additional - * fields and/or padding to every object. 'size' contains the total - * object size including these internal fields, while 'obj_offset' - * and 'object_size' contain the offset to the user object and its - * size. + * fields and/or padding to every object. size contains the total + * object size including these internal fields, the following two + * variables contain the offset to the user object and its size. */ int obj_offset; #endif /* CONFIG_DEBUG_SLAB */ +#ifdef CONFIG_MEMCG + struct memcg_cache_params memcg_params; +#endif #ifdef CONFIG_KASAN struct kasan_cache kasan_info; #endif @@ -81,8 +88,8 @@ struct kmem_cache { unsigned int *random_seq; #endif - unsigned int useroffset; /* Usercopy region offset */ - unsigned int usersize; /* Usercopy region size */ + size_t useroffset; /* USERCOPY region offset */ + size_t usersize; /* USERCOPY region size */ struct kmem_cache_node *node[MAX_NUMNODES]; }; @@ -99,25 +106,4 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, return object; } -/* - * We want to avoid an expensive divide : (offset / cache->size) - * Using the fact that size is a constant for a particular cache, - * we can replace (offset / cache->size) by - * reciprocal_divide(offset, cache->reciprocal_buffer_size) - */ -static inline unsigned int obj_to_index(const struct kmem_cache *cache, - const struct page *page, void *obj) -{ - u32 offset = (obj - page->s_mem); - return reciprocal_divide(offset, cache->reciprocal_buffer_size); -} - -static inline int objs_per_slab_page(const struct kmem_cache *cache, - const struct page *page) -{ - if (is_kfence_address(page_address(page))) - return 1; - return cache->num; -} - #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 85499f0586..97880d2dfa 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SLUB_DEF_H #define _LINUX_SLUB_DEF_H @@ -7,10 +6,7 @@ * * (C) 2007 SGI, Christoph Lameter */ -#include #include -#include -#include enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -41,47 +37,23 @@ enum stat_item { CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ NR_SLUB_STAT_ITEMS }; -/* - * When changing the layout, make sure freelist and tid are still compatible - * with this_cpu_cmpxchg_double() alignment requirements. - */ struct kmem_cache_cpu { void **freelist; /* Pointer to next available object */ unsigned long tid; /* Globally unique transaction id */ struct page *page; /* The slab from which we are allocating */ -#ifdef CONFIG_SLUB_CPU_PARTIAL struct page *partial; /* Partially allocated frozen slabs */ -#endif - local_lock_t lock; /* Protects the fields above */ #ifdef CONFIG_SLUB_STATS unsigned stat[NR_SLUB_STAT_ITEMS]; #endif }; -#ifdef CONFIG_SLUB_CPU_PARTIAL -#define slub_percpu_partial(c) ((c)->partial) - -#define slub_set_percpu_partial(c, p) \ -({ \ - slub_percpu_partial(c) = (p)->next; \ -}) - -#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c)) -#else -#define slub_percpu_partial(c) NULL - -#define slub_set_percpu_partial(c, p) - -#define slub_percpu_partial_read_once(c) NULL -#endif // CONFIG_SLUB_CPU_PARTIAL - /* * Word size structure that can be atomically updated or read and that * contains both the order and the number of objects that a slab of the * given order would contain. */ struct kmem_cache_order_objects { - unsigned int x; + unsigned long x; }; /* @@ -89,42 +61,43 @@ struct kmem_cache_order_objects { */ struct kmem_cache { struct kmem_cache_cpu __percpu *cpu_slab; - /* Used for retrieving partial slabs, etc. */ - slab_flags_t flags; + /* Used for retriving partial slabs etc */ + unsigned long flags; unsigned long min_partial; - unsigned int size; /* The size of an object including metadata */ - unsigned int object_size;/* The size of an object without metadata */ - struct reciprocal_value reciprocal_size; - unsigned int offset; /* Free pointer offset */ -#ifdef CONFIG_SLUB_CPU_PARTIAL - /* Number of per cpu partial objects to keep around */ - unsigned int cpu_partial; -#endif + int size; /* The size of an object including meta data */ + int object_size; /* The size of an object without meta data */ + int offset; /* Free pointer offset. */ + int cpu_partial; /* Number of per cpu partial objects to keep around */ struct kmem_cache_order_objects oo; /* Allocation and freeing of slabs */ struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; gfp_t allocflags; /* gfp flags to use on each alloc */ - int refcount; /* Refcount for slab cache destroy */ + atomic_t refcount; /* Refcount for slab cache destroy */ void (*ctor)(void *); - unsigned int inuse; /* Offset to metadata */ - unsigned int align; /* Alignment */ - unsigned int red_left_pad; /* Left redzone padding size */ + int inuse; /* Offset to metadata */ + int align; /* Alignment */ + int reserved; /* Reserved bytes at the end of slabs */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ + int red_left_pad; /* Left redzone padding size */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ #endif -#ifdef CONFIG_SLAB_FREELIST_HARDENED - unsigned long random; +#ifdef CONFIG_MEMCG + struct memcg_cache_params memcg_params; + int max_attr_size; /* for propagation, maximum size of a stored attr */ +#ifdef CONFIG_SYSFS + struct kset *memcg_kset; +#endif #endif #ifdef CONFIG_NUMA /* * Defragmentation by allocating from a remote node. */ - unsigned int remote_node_defrag_ratio; + int remote_node_defrag_ratio; #endif #ifdef CONFIG_SLAB_FREELIST_RANDOM @@ -135,32 +108,17 @@ struct kmem_cache { struct kasan_cache kasan_info; #endif - unsigned int useroffset; /* Usercopy region offset */ - unsigned int usersize; /* Usercopy region size */ + size_t useroffset; /* USERCOPY region offset */ + size_t usersize; /* USERCOPY region size */ struct kmem_cache_node *node[MAX_NUMNODES]; }; -#ifdef CONFIG_SLUB_CPU_PARTIAL -#define slub_cpu_partial(s) ((s)->cpu_partial) -#define slub_set_cpu_partial(s, n) \ -({ \ - slub_cpu_partial(s) = (n); \ -}) -#else -#define slub_cpu_partial(s) (0) -#define slub_set_cpu_partial(s, n) -#endif /* CONFIG_SLUB_CPU_PARTIAL */ - #ifdef CONFIG_SYSFS #define SLAB_SUPPORTS_SYSFS -void sysfs_slab_unlink(struct kmem_cache *); -void sysfs_slab_release(struct kmem_cache *); +void sysfs_slab_remove(struct kmem_cache *); #else -static inline void sysfs_slab_unlink(struct kmem_cache *s) -{ -} -static inline void sysfs_slab_release(struct kmem_cache *s) +static inline void sysfs_slab_remove(struct kmem_cache *s) { } #endif @@ -181,25 +139,4 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, return result; } -/* Determine object index from a given position */ -static inline unsigned int __obj_to_index(const struct kmem_cache *cache, - void *addr, void *obj) -{ - return reciprocal_divide(kasan_reset_tag(obj) - addr, - cache->reciprocal_size); -} - -static inline unsigned int obj_to_index(const struct kmem_cache *cache, - const struct page *page, void *obj) -{ - if (is_kfence_address(obj)) - return 0; - return __obj_to_index(cache, page_address(page), obj); -} - -static inline int objs_per_slab_page(const struct kmem_cache *cache, - const struct page *page) -{ - return page->objects; -} #endif /* _LINUX_SLUB_DEF_H */ diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h index 2c5cb6ccc5..67ed2c5428 100644 --- a/include/linux/sm501-regs.h +++ b/include/linux/sm501-regs.h @@ -1,8 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* sm501-regs.h * * Copyright 2006 Simtec Electronics * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * Silicon Motion SM501 register definitions */ diff --git a/include/linux/sm501.h b/include/linux/sm501.h index 2f3488b287..02fde50a79 100644 --- a/include/linux/sm501.h +++ b/include/linux/sm501.h @@ -1,9 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* include/linux/sm501.h * * Copyright (c) 2006 Simtec Electronics * Ben Dooks * Vincent Sanders + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ extern int sm501_unit_power(struct device *dev, diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h index 8cace8189e..521f37143f 100644 --- a/include/linux/smc911x.h +++ b/include/linux/smc911x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SMC911X_H__ #define __SMC911X_H__ diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index f3b195fa78..e302c447e0 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SMC91X_H__ #define __SMC91X_H__ @@ -40,7 +39,6 @@ struct smc91x_platdata { unsigned long flags; unsigned char leda; unsigned char ledb; - bool pxa_u16_align4; /* PXA buggy u16 writes on 4*n+2 addresses */ }; #endif /* __SMC91X_H__ */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 510519e8a1..a702af8fef 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SMP_H #define __LINUX_SMP_H @@ -12,98 +11,44 @@ #include #include #include -#include +#include typedef void (*smp_call_func_t)(void *info); -typedef bool (*smp_cond_func_t)(int cpu, void *info); - -/* - * structure shares (partial) layout with struct irq_work - */ -struct __call_single_data { - struct __call_single_node node; +struct call_single_data { + struct llist_node llist; smp_call_func_t func; void *info; + unsigned int flags; }; -#define CSD_INIT(_func, _info) \ - (struct __call_single_data){ .func = (_func), .info = (_info), } - -/* Use __aligned() to avoid to use 2 cache lines for 1 csd */ -typedef struct __call_single_data call_single_data_t - __aligned(sizeof(struct __call_single_data)); - -#define INIT_CSD(_csd, _func, _info) \ -do { \ - *(_csd) = CSD_INIT((_func), (_info)); \ -} while (0) - -/* - * Enqueue a llist_node on the call_single_queue; be very careful, read - * flush_smp_call_function_queue() in detail. - */ -extern void __smp_call_single_queue(int cpu, struct llist_node *node); - /* total number of cpus in this system (may exceed NR_CPUS) */ extern unsigned int total_cpus; int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, int wait); -void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, - void *info, bool wait, const struct cpumask *mask); - -int smp_call_function_single_async(int cpu, struct __call_single_data *csd); - -/* - * Cpus stopping functions in panic. All have default weak definitions. - * Architecture-dependent code may override them. - */ -void panic_smp_self_stop(void); -void nmi_panic_self_stop(struct pt_regs *regs); -void crash_smp_send_stop(void); - /* * Call a function on all processors */ -static inline void on_each_cpu(smp_call_func_t func, void *info, int wait) -{ - on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask); -} +int on_each_cpu(smp_call_func_t func, void *info, int wait); -/** - * on_each_cpu_mask(): Run a function on processors specified by - * cpumask, which may include the local processor. - * @mask: The set of cpus to run on (only runs on online subset). - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed - * on other CPUs. - * - * If @wait is true, then returns once @func has returned. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. The - * exception is that it may be used during early boot while - * early_boot_irqs_disabled is set. +/* + * Call a function on processors specified by mask, which might include + * the local one. */ -static inline void on_each_cpu_mask(const struct cpumask *mask, - smp_call_func_t func, void *info, bool wait) -{ - on_each_cpu_cond_mask(NULL, func, info, wait, mask); -} +void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, + void *info, bool wait); /* * Call a function on each processor for which the supplied function * cond_func returns a positive value. This may include the local - * processor. May be used during early boot while early_boot_irqs_disabled is - * set. Use local_irq_save/restore() instead of local_irq_disable/enable(). + * processor. */ -static inline void on_each_cpu_cond(smp_cond_func_t cond_func, - smp_call_func_t func, void *info, bool wait) -{ - on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); -} +void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), + smp_call_func_t func, void *info, bool wait, + gfp_t gfp_flags); + +int smp_call_function_single_async(int cpu, struct call_single_data *csd); #ifdef CONFIG_SMP @@ -147,7 +92,7 @@ extern void smp_cpus_done(unsigned int max_cpus); /* * Call a function on all other processors */ -void smp_call_function(smp_call_func_t func, void *info, int wait); +int smp_call_function(smp_call_func_t func, void *info, int wait); void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait); @@ -175,13 +120,6 @@ extern unsigned int setup_max_cpus; extern void __init setup_nr_cpu_ids(void); extern void __init smp_init(void); -extern int __boot_cpu_id; - -static inline int get_boot_cpu_id(void) -{ - return __boot_cpu_id; -} - #else /* !SMP */ static inline void smp_send_stop(void) { } @@ -190,8 +128,9 @@ static inline void smp_send_stop(void) { } * These macros fold the SMP functionality into a single CPU system */ #define raw_smp_processor_id() 0 -static inline void up_smp_call_function(smp_call_func_t func, void *info) +static inline int up_smp_call_function(smp_call_func_t func, void *info) { + return 0; } #define smp_call_function(func, info, wait) \ (up_smp_call_function(func, info)) @@ -219,54 +158,34 @@ static inline void smp_init(void) { up_late_init(); } static inline void smp_init(void) { } #endif -static inline int get_boot_cpu_id(void) -{ - return 0; -} - #endif /* !SMP */ -/** - * raw_processor_id() - get the current (unstable) CPU id - * - * For then you know what you are doing and need an unstable - * CPU id. - */ - -/** - * smp_processor_id() - get the current (stable) CPU id - * - * This is the normal accessor to the CPU id and should be used - * whenever possible. - * - * The CPU id is stable when: - * - * - IRQs are disabled; - * - preemption is disabled; - * - the task is CPU affine. - * - * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN - * when smp_processor_id() is used when the CPU id is not stable. - */ - /* - * Allow the architecture to differentiate between a stable and unstable read. - * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a - * regular asm read for the stable. + * smp_processor_id(): get the current CPU ID. + * + * if DEBUG_PREEMPT is enabled then we check whether it is + * used in a preemption-safe way. (smp_processor_id() is safe + * if it's used in a preemption-off critical section, or in + * a thread that is bound to the current CPU.) + * + * NOTE: raw_smp_processor_id() is for internal use only + * (smp_processor_id() is the preferred variant), but in rare + * instances it might also be used to turn off false positives + * (i.e. smp_processor_id() use that the debugging code reports but + * which use for some reason is legal). Don't use this to hack around + * the warning message, as your code might not work under PREEMPT. */ -#ifndef __smp_processor_id -#define __smp_processor_id(x) raw_smp_processor_id(x) -#endif - #ifdef CONFIG_DEBUG_PREEMPT extern unsigned int debug_smp_processor_id(void); # define smp_processor_id() debug_smp_processor_id() #else -# define smp_processor_id() __smp_processor_id() +# define smp_processor_id() raw_smp_processor_id() #endif -#define get_cpu() ({ preempt_disable(); __smp_processor_id(); }) +#define get_cpu() ({ preempt_disable(); smp_processor_id(); }) +#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); }) #define put_cpu() preempt_enable() +#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched() /* * Callback to arch code if there's nosmp or maxcpus=0 on the @@ -274,8 +193,8 @@ static inline int get_boot_cpu_id(void) */ extern void arch_disable_smp_support(void); -extern void arch_thaw_secondary_cpus_begin(void); -extern void arch_thaw_secondary_cpus_end(void); +extern void arch_enable_nonboot_cpus_begin(void); +extern void arch_enable_nonboot_cpus_end(void); void smp_setup_processor_id(void); diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index 9d1bc65d22..12910cf198 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SMPBOOT_H #define _LINUX_SMPBOOT_H @@ -25,11 +24,13 @@ struct smpboot_thread_data; * parked (cpu offline) * @unpark: Optional unpark function, called when the thread is * unparked (cpu online) + * @cpumask: Internal state. To update which threads are unparked, + * call smpboot_update_cpumask_percpu_thread(). * @selfparking: Thread is not parked by the park function. * @thread_comm: The base name of the thread */ struct smp_hotplug_thread { - struct task_struct * __percpu *store; + struct task_struct __percpu **store; struct list_head list; int (*thread_should_run)(unsigned int cpu); void (*thread_fn)(unsigned int cpu); @@ -38,12 +39,23 @@ struct smp_hotplug_thread { void (*cleanup)(unsigned int cpu, bool online); void (*park)(unsigned int cpu); void (*unpark)(unsigned int cpu); + cpumask_var_t cpumask; bool selfparking; const char *thread_comm; }; -int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); +int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread, + const struct cpumask *cpumask); + +static inline int +smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) +{ + return smpboot_register_percpu_thread_cpumask(plug_thread, + cpu_possible_mask); +} void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); +int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, + const struct cpumask *); #endif diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h index 868348f7ea..eec3efd19b 100644 --- a/include/linux/smsc911x.h +++ b/include/linux/smsc911x.h @@ -1,9 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /*************************************************************************** * * Copyright (C) 2004-2008 SMSC * Copyright (C) 2005-2008 ARM * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * ***************************************************************************/ #ifndef __LINUX_SMSC911X_H__ #define __LINUX_SMSC911X_H__ diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h index 1a136271ba..f4bf16e16e 100644 --- a/include/linux/smscphy.h +++ b/include/linux/smscphy.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SMSCPHY_H__ #define __LINUX_SMSCPHY_H__ diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h index f2b7688527..337ce414e8 100644 --- a/include/linux/soc/brcmstb/brcmstb.h +++ b/include/linux/soc/brcmstb/brcmstb.h @@ -1,38 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __BRCMSTB_SOC_H #define __BRCMSTB_SOC_H -#include - -static inline u32 BRCM_ID(u32 reg) -{ - return reg >> 28 ? reg >> 16 : reg >> 8; -} - -static inline u32 BRCM_REV(u32 reg) -{ - return reg & 0xff; -} - -#if IS_ENABLED(CONFIG_SOC_BRCMSTB) - /* - * Helper functions for getting family or product id from the - * SoC driver. + * Bus Interface Unit control register setup, must happen early during boot, + * before SMP is brought up, called by machine entry point. */ -u32 brcmstb_get_family_id(void); -u32 brcmstb_get_product_id(void); - -#else -static inline u32 brcmstb_get_family_id(void) -{ - return 0; -} - -static inline u32 brcmstb_get_product_id(void) -{ - return 0; -} -#endif +void brcmstb_biuctrl_init(void); #endif /* __BRCMSTB_SOC_H */ diff --git a/include/linux/soc/dove/pmu.h b/include/linux/soc/dove/pmu.h index 1955c01dee..765386972b 100644 --- a/include/linux/soc/dove/pmu.h +++ b/include/linux/soc/dove/pmu.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SOC_DOVE_PMU_H #define LINUX_SOC_DOVE_PMU_H diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index 4615a228da..a5714e93fb 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -1,109 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SOC_MEDIATEK_INFRACFG_H #define __SOC_MEDIATEK_INFRACFG_H -#define MT8192_TOP_AXI_PROT_EN_STA1 0x228 -#define MT8192_TOP_AXI_PROT_EN_1_STA1 0x258 -#define MT8192_TOP_AXI_PROT_EN_SET 0x2a0 -#define MT8192_TOP_AXI_PROT_EN_CLR 0x2a4 -#define MT8192_TOP_AXI_PROT_EN_1_SET 0x2a8 -#define MT8192_TOP_AXI_PROT_EN_1_CLR 0x2ac -#define MT8192_TOP_AXI_PROT_EN_MM_SET 0x2d4 -#define MT8192_TOP_AXI_PROT_EN_MM_CLR 0x2d8 -#define MT8192_TOP_AXI_PROT_EN_MM_STA1 0x2ec -#define MT8192_TOP_AXI_PROT_EN_2_SET 0x714 -#define MT8192_TOP_AXI_PROT_EN_2_CLR 0x718 -#define MT8192_TOP_AXI_PROT_EN_2_STA1 0x724 -#define MT8192_TOP_AXI_PROT_EN_VDNR_SET 0xb84 -#define MT8192_TOP_AXI_PROT_EN_VDNR_CLR 0xb88 -#define MT8192_TOP_AXI_PROT_EN_VDNR_STA1 0xb90 -#define MT8192_TOP_AXI_PROT_EN_MM_2_SET 0xdcc -#define MT8192_TOP_AXI_PROT_EN_MM_2_CLR 0xdd0 -#define MT8192_TOP_AXI_PROT_EN_MM_2_STA1 0xdd8 - -#define MT8192_TOP_AXI_PROT_EN_DISP (BIT(6) | BIT(23)) -#define MT8192_TOP_AXI_PROT_EN_CONN (BIT(13) | BIT(18)) -#define MT8192_TOP_AXI_PROT_EN_CONN_2ND BIT(14) -#define MT8192_TOP_AXI_PROT_EN_MFG1 GENMASK(22, 21) -#define MT8192_TOP_AXI_PROT_EN_1_CONN BIT(10) -#define MT8192_TOP_AXI_PROT_EN_1_MFG1 BIT(21) -#define MT8192_TOP_AXI_PROT_EN_1_CAM BIT(22) -#define MT8192_TOP_AXI_PROT_EN_2_CAM BIT(0) -#define MT8192_TOP_AXI_PROT_EN_2_ADSP BIT(3) -#define MT8192_TOP_AXI_PROT_EN_2_AUDIO BIT(4) -#define MT8192_TOP_AXI_PROT_EN_2_MFG1 GENMASK(6, 5) -#define MT8192_TOP_AXI_PROT_EN_2_MFG1_2ND BIT(7) -#define MT8192_TOP_AXI_PROT_EN_MM_CAM (BIT(0) | BIT(2)) -#define MT8192_TOP_AXI_PROT_EN_MM_DISP (BIT(0) | BIT(2) | \ - BIT(10) | BIT(12) | \ - BIT(14) | BIT(16) | \ - BIT(24) | BIT(26)) -#define MT8192_TOP_AXI_PROT_EN_MM_CAM_2ND (BIT(1) | BIT(3)) -#define MT8192_TOP_AXI_PROT_EN_MM_DISP_2ND (BIT(1) | BIT(3) | \ - BIT(15) | BIT(17) | \ - BIT(25) | BIT(27)) -#define MT8192_TOP_AXI_PROT_EN_MM_ISP2 BIT(14) -#define MT8192_TOP_AXI_PROT_EN_MM_ISP2_2ND BIT(15) -#define MT8192_TOP_AXI_PROT_EN_MM_IPE BIT(16) -#define MT8192_TOP_AXI_PROT_EN_MM_IPE_2ND BIT(17) -#define MT8192_TOP_AXI_PROT_EN_MM_VDEC BIT(24) -#define MT8192_TOP_AXI_PROT_EN_MM_VDEC_2ND BIT(25) -#define MT8192_TOP_AXI_PROT_EN_MM_VENC BIT(26) -#define MT8192_TOP_AXI_PROT_EN_MM_VENC_2ND BIT(27) -#define MT8192_TOP_AXI_PROT_EN_MM_2_ISP BIT(8) -#define MT8192_TOP_AXI_PROT_EN_MM_2_DISP (BIT(8) | BIT(12)) -#define MT8192_TOP_AXI_PROT_EN_MM_2_ISP_2ND BIT(9) -#define MT8192_TOP_AXI_PROT_EN_MM_2_DISP_2ND (BIT(9) | BIT(13)) -#define MT8192_TOP_AXI_PROT_EN_MM_2_MDP BIT(12) -#define MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND BIT(13) -#define MT8192_TOP_AXI_PROT_EN_VDNR_CAM BIT(21) - -#define MT8183_TOP_AXI_PROT_EN_STA1 0x228 -#define MT8183_TOP_AXI_PROT_EN_STA1_1 0x258 -#define MT8183_TOP_AXI_PROT_EN_SET 0x2a0 -#define MT8183_TOP_AXI_PROT_EN_CLR 0x2a4 -#define MT8183_TOP_AXI_PROT_EN_1_SET 0x2a8 -#define MT8183_TOP_AXI_PROT_EN_1_CLR 0x2ac -#define MT8183_TOP_AXI_PROT_EN_MCU_SET 0x2c4 -#define MT8183_TOP_AXI_PROT_EN_MCU_CLR 0x2c8 -#define MT8183_TOP_AXI_PROT_EN_MCU_STA1 0x2e4 -#define MT8183_TOP_AXI_PROT_EN_MM_SET 0x2d4 -#define MT8183_TOP_AXI_PROT_EN_MM_CLR 0x2d8 -#define MT8183_TOP_AXI_PROT_EN_MM_STA1 0x2ec - -#define MT8183_TOP_AXI_PROT_EN_DISP (BIT(10) | BIT(11)) -#define MT8183_TOP_AXI_PROT_EN_CONN (BIT(13) | BIT(14)) -#define MT8183_TOP_AXI_PROT_EN_MFG (BIT(21) | BIT(22)) -#define MT8183_TOP_AXI_PROT_EN_CAM BIT(28) -#define MT8183_TOP_AXI_PROT_EN_VPU_TOP BIT(27) -#define MT8183_TOP_AXI_PROT_EN_1_DISP (BIT(16) | BIT(17)) -#define MT8183_TOP_AXI_PROT_EN_1_MFG GENMASK(21, 19) -#define MT8183_TOP_AXI_PROT_EN_MM_ISP (BIT(3) | BIT(8)) -#define MT8183_TOP_AXI_PROT_EN_MM_ISP_2ND BIT(10) -#define MT8183_TOP_AXI_PROT_EN_MM_CAM (BIT(4) | BIT(5) | \ - BIT(9) | BIT(13)) -#define MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP (GENMASK(9, 6) | \ - BIT(12)) -#define MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP_2ND (BIT(10) | BIT(11)) -#define MT8183_TOP_AXI_PROT_EN_MM_CAM_2ND BIT(11) -#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0_2ND (BIT(0) | BIT(2) | \ - BIT(4)) -#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1_2ND (BIT(1) | BIT(3) | \ - BIT(5)) -#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0 BIT(6) -#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1 BIT(7) - -#define MT8183_SMI_COMMON_CLAMP_EN 0x3c0 -#define MT8183_SMI_COMMON_CLAMP_EN_SET 0x3c4 -#define MT8183_SMI_COMMON_CLAMP_EN_CLR 0x3c8 - -#define MT8183_SMI_COMMON_SMI_CLAMP_DISP GENMASK(7, 0) -#define MT8183_SMI_COMMON_SMI_CLAMP_VENC BIT(1) -#define MT8183_SMI_COMMON_SMI_CLAMP_ISP BIT(2) -#define MT8183_SMI_COMMON_SMI_CLAMP_CAM (BIT(3) | BIT(4)) -#define MT8183_SMI_COMMON_SMI_CLAMP_VPU_TOP (BIT(5) | BIT(6)) -#define MT8183_SMI_COMMON_SMI_CLAMP_VDEC BIT(7) - #define MT8173_TOP_AXI_PROT_EN_MCI_M2 BIT(0) #define MT8173_TOP_AXI_PROT_EN_MM_M0 BIT(1) #define MT8173_TOP_AXI_PROT_EN_MM_M1 BIT(2) @@ -123,35 +20,7 @@ #define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22) #define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23) -#define MT8167_TOP_AXI_PROT_EN_MM_EMI BIT(1) -#define MT8167_TOP_AXI_PROT_EN_MCU_MFG BIT(2) -#define MT8167_TOP_AXI_PROT_EN_CONN_EMI BIT(4) -#define MT8167_TOP_AXI_PROT_EN_MFG_EMI BIT(5) -#define MT8167_TOP_AXI_PROT_EN_CONN_MCU BIT(8) -#define MT8167_TOP_AXI_PROT_EN_MCU_CONN BIT(9) -#define MT8167_TOP_AXI_PROT_EN_MCU_MM BIT(11) +int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask); +int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask); -#define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1) -#define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2) -#define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8) - -#define MT7622_TOP_AXI_PROT_EN_ETHSYS (BIT(3) | BIT(17)) -#define MT7622_TOP_AXI_PROT_EN_HIF0 (BIT(24) | BIT(25)) -#define MT7622_TOP_AXI_PROT_EN_HIF1 (BIT(26) | BIT(27) | \ - BIT(28)) -#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \ - BIT(7) | BIT(8)) - -#define INFRA_TOPAXI_PROTECTEN 0x0220 -#define INFRA_TOPAXI_PROTECTSTA1 0x0228 -#define INFRA_TOPAXI_PROTECTEN_SET 0x0260 -#define INFRA_TOPAXI_PROTECTEN_CLR 0x0264 - -#define REG_INFRA_MISC 0xf00 -#define F_DDR_4GB_SUPPORT_EN BIT(13) - -int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask, - bool reg_update); -int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask, - bool reg_update); #endif /* __SOC_MEDIATEK_INFRACFG_H */ diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h index 60e66fc9b6..2a53dcaeee 100644 --- a/include/linux/soc/qcom/smd-rpm.h +++ b/include/linux/soc/qcom/smd-rpm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __QCOM_SMD_RPM_H__ #define __QCOM_SMD_RPM_H__ @@ -10,8 +9,6 @@ struct qcom_smd_rpm; /* * Constants used for addressing resources in the RPM. */ -#define QCOM_SMD_RPM_BBYB 0x62796262 -#define QCOM_SMD_RPM_BOBB 0x62626f62 #define QCOM_SMD_RPM_BOOST 0x61747362 #define QCOM_SMD_RPM_BUS_CLK 0x316b6c63 #define QCOM_SMD_RPM_BUS_MASTER 0x73616d62 @@ -19,25 +16,16 @@ struct qcom_smd_rpm; #define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63 #define QCOM_SMD_RPM_LDOA 0x616f646c #define QCOM_SMD_RPM_LDOB 0x626F646C -#define QCOM_SMD_RPM_RWCX 0x78637772 -#define QCOM_SMD_RPM_RWMX 0x786d7772 -#define QCOM_SMD_RPM_RWLC 0x636c7772 -#define QCOM_SMD_RPM_RWLM 0x6d6c7772 #define QCOM_SMD_RPM_MEM_CLK 0x326b6c63 #define QCOM_SMD_RPM_MISC_CLK 0x306b6c63 #define QCOM_SMD_RPM_NCPA 0x6170636E #define QCOM_SMD_RPM_NCPB 0x6270636E #define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f #define QCOM_SMD_RPM_QPIC_CLK 0x63697071 -#define QCOM_SMD_RPM_QUP_CLK 0x707571 #define QCOM_SMD_RPM_SMPA 0x61706d73 #define QCOM_SMD_RPM_SMPB 0x62706d73 #define QCOM_SMD_RPM_SPDM 0x63707362 #define QCOM_SMD_RPM_VSA 0x00617376 -#define QCOM_SMD_RPM_MMAXI_CLK 0x69786d6d -#define QCOM_SMD_RPM_IPA_CLK 0x617069 -#define QCOM_SMD_RPM_CE_CLK 0x6563 -#define QCOM_SMD_RPM_AGGR_CLK 0x72676761 int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, int state, diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h new file mode 100644 index 0000000000..f148e0ffbe --- /dev/null +++ b/include/linux/soc/qcom/smd.h @@ -0,0 +1,139 @@ +#ifndef __QCOM_SMD_H__ +#define __QCOM_SMD_H__ + +#include +#include + +struct qcom_smd; +struct qcom_smd_channel; +struct qcom_smd_lookup; + +/** + * struct qcom_smd_id - struct used for matching a smd device + * @name: name of the channel + */ +struct qcom_smd_id { + char name[20]; +}; + +/** + * struct qcom_smd_device - smd device struct + * @dev: the device struct + * @channel: handle to the smd channel for this device + */ +struct qcom_smd_device { + struct device dev; + struct qcom_smd_channel *channel; +}; + +typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t); + +/** + * struct qcom_smd_driver - smd driver struct + * @driver: underlying device driver + * @smd_match_table: static channel match table + * @probe: invoked when the smd channel is found + * @remove: invoked when the smd channel is closed + * @callback: invoked when an inbound message is received on the channel, + * should return 0 on success or -EBUSY if the data cannot be + * consumed at this time + */ +struct qcom_smd_driver { + struct device_driver driver; + const struct qcom_smd_id *smd_match_table; + + int (*probe)(struct qcom_smd_device *dev); + void (*remove)(struct qcom_smd_device *dev); + qcom_smd_cb_t callback; +}; + +#if IS_ENABLED(CONFIG_QCOM_SMD) + +int qcom_smd_driver_register(struct qcom_smd_driver *drv); +void qcom_smd_driver_unregister(struct qcom_smd_driver *drv); + +struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel, + const char *name, + qcom_smd_cb_t cb); +void qcom_smd_close_channel(struct qcom_smd_channel *channel); +void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel); +void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data); +int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); + + +struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, + struct device_node *node); +int qcom_smd_unregister_edge(struct qcom_smd_edge *edge); + +#else + +static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv) +{ + return -ENXIO; +} + +static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline struct qcom_smd_channel * +qcom_smd_open_channel(struct qcom_smd_channel *channel, + const char *name, + qcom_smd_cb_t cb) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return NULL; +} + +static inline void qcom_smd_close_channel(struct qcom_smd_channel *channel) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return NULL; +} + +static inline void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline int qcom_smd_send(struct qcom_smd_channel *channel, + const void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return -ENXIO; +} + +static inline struct qcom_smd_edge * +qcom_smd_register_edge(struct device *parent, + struct device_node *node) +{ + return ERR_PTR(-ENXIO); +} + +static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return -ENXIO; +} + +#endif + +#define module_qcom_smd_driver(__smd_driver) \ + module_driver(__smd_driver, qcom_smd_driver_register, \ + qcom_smd_driver_unregister) + + +#endif diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h index 86e1b35868..785e196ee2 100644 --- a/include/linux/soc/qcom/smem.h +++ b/include/linux/soc/qcom/smem.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __QCOM_SMEM_H__ #define __QCOM_SMEM_H__ @@ -9,6 +8,4 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size); int qcom_smem_get_free_space(unsigned host); -phys_addr_t qcom_smem_virt_to_phys(void *p); - #endif diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h index 652c0158ba..7b88697929 100644 --- a/include/linux/soc/qcom/smem_state.h +++ b/include/linux/soc/qcom/smem_state.h @@ -1,8 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __QCOM_SMEM_STATE__ #define __QCOM_SMEM_STATE__ -#include +#include struct device_node; struct qcom_smem_state; @@ -14,7 +13,6 @@ struct qcom_smem_state_ops { #ifdef CONFIG_QCOM_SMEM_STATE struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit); -struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit); void qcom_smem_state_put(struct qcom_smem_state *); int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 value); @@ -30,13 +28,6 @@ static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev, return ERR_PTR(-EINVAL); } -static inline struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev, - const char *con_id, - unsigned *bit) -{ - return ERR_PTR(-EINVAL); -} - static inline void qcom_smem_state_put(struct qcom_smem_state *state) { } diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h index bbeb6b9c02..a37bc5538f 100644 --- a/include/linux/soc/qcom/wcnss_ctrl.h +++ b/include/linux/soc/qcom/wcnss_ctrl.h @@ -1,25 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __WCNSS_CTRL_H__ #define __WCNSS_CTRL_H__ -#include +#include -#if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL) - -struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, - rpmsg_rx_cb_t cb, void *priv); - -#else - -static struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, - const char *name, - rpmsg_rx_cb_t cb, - void *priv) -{ - WARN_ON(1); - return ERR_PTR(-ENXIO); -} - -#endif +struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb); #endif diff --git a/include/linux/soc/renesas/rcar-sysc.h b/include/linux/soc/renesas/rcar-sysc.h index 00fae6fd23..7b8b280c18 100644 --- a/include/linux/soc/renesas/rcar-sysc.h +++ b/include/linux/soc/renesas/rcar-sysc.h @@ -1,8 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SOC_RENESAS_RCAR_SYSC_H__ #define __LINUX_SOC_RENESAS_RCAR_SYSC_H__ -int rcar_sysc_power_down_cpu(unsigned int cpu); -int rcar_sysc_power_up_cpu(unsigned int cpu); +#include + +struct rcar_sysc_ch { + u16 chan_offs; + u8 chan_bit; + u8 isr_bit; +}; + +int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch); +int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch); +void rcar_sysc_init(phys_addr_t base, u32 syscier); #endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */ diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h index a4f5516cc9..e2e9de1acc 100644 --- a/include/linux/soc/samsung/exynos-pmu.h +++ b/include/linux/soc/samsung/exynos-pmu.h @@ -1,16 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Header for Exynos PMU Driver support + * Header for EXYNOS PMU Driver support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_SOC_EXYNOS_PMU_H #define __LINUX_SOC_EXYNOS_PMU_H -struct regmap; - enum sys_powerdown { SYS_AFTR, SYS_LPA, @@ -19,13 +20,5 @@ enum sys_powerdown { }; extern void exynos_sys_powerdown_conf(enum sys_powerdown mode); -#ifdef CONFIG_EXYNOS_PMU -extern struct regmap *exynos_get_pmu_regmap(void); -#else -static inline struct regmap *exynos_get_pmu_regmap(void) -{ - return ERR_PTR(-ENODEV); -} -#endif #endif /* __LINUX_SOC_EXYNOS_PMU_H */ diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index aa840ed043..d30186e2b6 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -1,15 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2010-2015 Samsung Electronics Co., Ltd. + * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Exynos - Power management unit definition + * EXYNOS - Power management unit definition * - * Notice: - * This is not a list of all Exynos Power Management Unit SFRs. - * There are too many of them, not mentioning subtle differences - * between SoCs. For now, put here only the used registers. - */ + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ #ifndef __LINUX_SOC_EXYNOS_REGS_PMU_H #define __LINUX_SOC_EXYNOS_REGS_PMU_H __FILE__ @@ -40,22 +38,13 @@ #define EXYNOS_CORE_PO_RESET(n) ((1 << 4) << n) #define EXYNOS_WAKEUP_FROM_LOWPWR (1 << 28) #define EXYNOS_SWRESET 0x0400 +#define EXYNOS5440_SWRESET 0x00C4 #define S5P_WAKEUP_STAT 0x0600 -/* Value for EXYNOS_EINT_WAKEUP_MASK disabling all external wakeup interrupts */ -#define EXYNOS_EINT_WAKEUP_MASK_DISABLED 0xffffffff -#define EXYNOS_EINT_WAKEUP_MASK 0x0604 +#define S5P_EINT_WAKEUP_MASK 0x0604 #define S5P_WAKEUP_MASK 0x0608 #define S5P_WAKEUP_MASK2 0x0614 -/* MIPI_PHYn_CONTROL, valid for Exynos3250, Exynos4, Exynos5250 and Exynos5433 */ -#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x0710 + (n) * 4) -/* Phy enable bit, common for all phy registers, not only MIPI */ -#define EXYNOS4_PHY_ENABLE (1 << 0) -#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1) -#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2) -#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1) - #define S5P_INFORM0 0x0800 #define S5P_INFORM1 0x0804 #define S5P_INFORM5 0x0814 @@ -147,6 +136,12 @@ #define EXYNOS_COMMON_OPTION(_nr) \ (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8) +#define EXYNOS_CORE_LOCAL_PWR_EN 0x3 + +#define EXYNOS_ARM_COMMON_STATUS 0x2504 +#define EXYNOS_COMMON_OPTION(_nr) \ + (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8) + #define EXYNOS_ARM_L2_CONFIGURATION 0x2600 #define EXYNOS_L2_CONFIGURATION(_nr) \ (EXYNOS_ARM_L2_CONFIGURATION + ((_nr) * 0x80)) @@ -154,8 +149,17 @@ (EXYNOS_L2_CONFIGURATION(_nr) + 0x4) #define EXYNOS_L2_OPTION(_nr) \ (EXYNOS_L2_CONFIGURATION(_nr) + 0x8) +#define EXYNOS_L2_COMMON_PWR_EN 0x3 -#define EXYNOS_L2_USE_RETENTION BIT(4) +#define EXYNOS_ARM_CORE_X_STATUS_OFFSET 0x4 + +#define EXYNOS5_APLL_SYSCLK_CONFIGURATION 0x2A00 +#define EXYNOS5_APLL_SYSCLK_STATUS 0x2A04 + +#define EXYNOS5_ARM_L2_OPTION 0x2608 +#define EXYNOS5_USE_RETENTION BIT(4) + +#define EXYNOS5_L2RSTDISABLE_VALUE BIT(3) #define S5P_PAD_RET_MAUDIO_OPTION 0x3028 #define S5P_PAD_RET_MMC2_OPTION 0x30c8 @@ -182,10 +186,7 @@ #define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8) #define S5P_CORE_AUTOWAKEUP_EN (1 << 31) -/* Only for S5Pv210 */ -#define S5PV210_EINT_WAKEUP_MASK 0xC004 - -/* Only for Exynos4210 */ +/* Only for EXYNOS4210 */ #define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154 #define S5P_CMU_RESET_LCD1_LOWPWR 0x1174 #define S5P_MODIMIF_MEM_LOWPWR 0x11C4 @@ -193,7 +194,7 @@ #define S5P_SATA_MEM_LOWPWR 0x11E4 #define S5P_LCD1_LOWPWR 0x1394 -/* Only for Exynos4x12 */ +/* Only for EXYNOS4x12 */ #define S5P_ISP_ARM_LOWPWR 0x1050 #define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR 0x1054 #define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR 0x1058 @@ -234,7 +235,7 @@ #define S5P_SECSS_MEM_OPTION 0x2EC8 #define S5P_ROTATOR_MEM_OPTION 0x2F48 -/* Only for Exynos4412 */ +/* Only for EXYNOS4412 */ #define S5P_ARM_CORE2_LOWPWR 0x1020 #define S5P_DIS_IRQ_CORE2 0x1024 #define S5P_DIS_IRQ_CENTRAL2 0x1028 @@ -242,7 +243,7 @@ #define S5P_DIS_IRQ_CORE3 0x1034 #define S5P_DIS_IRQ_CENTRAL3 0x1038 -/* Only for Exynos3XXX */ +/* Only for EXYNOS3XXX */ #define EXYNOS3_ARM_CORE0_SYS_PWR_REG 0x1000 #define EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004 #define EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008 @@ -347,12 +348,10 @@ #define EXYNOS3_OPTION_USE_SC_FEEDBACK (1 << 1) #define EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7) -/* For Exynos5 */ +/* For EXYNOS5 */ #define EXYNOS5_AUTO_WDTRESET_DISABLE 0x0408 #define EXYNOS5_MASK_WDTRESET_REQUEST 0x040C -#define EXYNOS5_USBDRD_PHY_CONTROL 0x0704 -#define EXYNOS5_DPTX_PHY_CONTROL 0x0720 #define EXYNOS5_USE_RETENTION BIT(4) #define EXYNOS5_SYS_WDTRESET (1 << 20) @@ -412,6 +411,7 @@ #define EXYNOS5_SATA_MEM_SYS_PWR_REG 0x11FC #define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200 #define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG 0x1204 +#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG 0x1208 #define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220 #define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG 0x1224 #define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG 0x1228 @@ -484,7 +484,8 @@ #define EXYNOS5420_SWRESET_KFC_SEL 0x3 -/* Only for Exynos5420 */ +/* Only for EXYNOS5420 */ +#define EXYNOS5420_ISP_ARM_OPTION 0x2488 #define EXYNOS5420_L2RSTDISABLE_VALUE BIT(3) #define EXYNOS5420_LPI_MASK 0x0004 @@ -493,6 +494,9 @@ #define EXYNOS5420_ATB_KFC BIT(13) #define EXYNOS5420_ATB_ISP_ARM BIT(19) #define EXYNOS5420_EMULATION BIT(31) +#define ATB_ISP_ARM BIT(12) +#define ATB_KFC BIT(13) +#define ATB_NOC BIT(14) #define EXYNOS5420_ARM_INTR_SPREAD_ENABLE 0x0100 #define EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI 0x0104 @@ -506,9 +510,11 @@ #define EXYNOS5420_KFC_CORE_RESET(_nr) \ ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr)) -#define EXYNOS5420_USBDRD1_PHY_CONTROL 0x0708 -#define EXYNOS5420_MIPI_PHY_CONTROL(n) (0x0714 + (n) * 4) -#define EXYNOS5420_DPTX_PHY_CONTROL 0x0728 +#define EXYNOS5420_BB_CON1 0x0784 +#define EXYNOS5420_BB_SEL_EN BIT(31) +#define EXYNOS5420_BB_PMOS_EN BIT(7) +#define EXYNOS5420_BB_1300X 0XF + #define EXYNOS5420_ARM_CORE2_SYS_PWR_REG 0x1020 #define EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG 0x1024 #define EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG 0x1028 @@ -540,6 +546,15 @@ #define EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG 0x1178 #define EXYNOS5420_INTRAM_MEM_SYS_PWR_REG 0x11B8 #define EXYNOS5420_INTROM_MEM_SYS_PWR_REG 0x11BC +#define EXYNOS5420_ONENANDXL_MEM_SYS_PWR 0x11C0 +#define EXYNOS5420_USBDEV_MEM_SYS_PWR 0x11CC +#define EXYNOS5420_USBDEV1_MEM_SYS_PWR 0x11D0 +#define EXYNOS5420_SDMMC_MEM_SYS_PWR 0x11D4 +#define EXYNOS5420_CSSYS_MEM_SYS_PWR 0x11D8 +#define EXYNOS5420_SECSS_MEM_SYS_PWR 0x11DC +#define EXYNOS5420_ROTATOR_MEM_SYS_PWR 0x11E0 +#define EXYNOS5420_INTRAM_MEM_SYS_PWR 0x11E4 +#define EXYNOS5420_INTROM_MEM_SYS_PWR 0x11E8 #define EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1208 #define EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1210 #define EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG 0x1214 @@ -590,7 +605,13 @@ #define EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG 0x159C #define EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG 0x15A0 #define EXYNOS5420_SFR_AXI_CGDIS1 0x15E4 +#define EXYNOS_ARM_CORE2_CONFIGURATION 0x2100 +#define EXYNOS5420_ARM_CORE2_OPTION 0x2108 +#define EXYNOS_ARM_CORE3_CONFIGURATION 0x2180 +#define EXYNOS5420_ARM_CORE3_OPTION 0x2188 +#define EXYNOS5420_ARM_COMMON_STATUS 0x2504 #define EXYNOS5420_ARM_COMMON_OPTION 0x2508 +#define EXYNOS5420_KFC_COMMON_STATUS 0x2584 #define EXYNOS5420_KFC_COMMON_OPTION 0x2588 #define EXYNOS5420_LOGIC_RESET_DURATION3 0x2D1C @@ -605,12 +626,42 @@ #define EXYNOS_PAD_RET_DRAM_OPTION 0x3008 #define EXYNOS_PAD_RET_MAUDIO_OPTION 0x3028 #define EXYNOS_PAD_RET_JTAG_OPTION 0x3048 +#define EXYNOS_PAD_RET_GPIO_OPTION 0x3108 +#define EXYNOS_PAD_RET_UART_OPTION 0x3128 +#define EXYNOS_PAD_RET_MMCA_OPTION 0x3148 +#define EXYNOS_PAD_RET_MMCB_OPTION 0x3168 #define EXYNOS_PAD_RET_EBIA_OPTION 0x3188 #define EXYNOS_PAD_RET_EBIB_OPTION 0x31A8 +#define EXYNOS_PS_HOLD_CONTROL 0x330C + +/* For SYS_PWR_REG */ +#define EXYNOS_SYS_PWR_CFG BIT(0) + +#define EXYNOS5420_MFC_CONFIGURATION 0x4060 +#define EXYNOS5420_MFC_STATUS 0x4064 +#define EXYNOS5420_MFC_OPTION 0x4068 +#define EXYNOS5420_G3D_CONFIGURATION 0x4080 +#define EXYNOS5420_G3D_STATUS 0x4084 +#define EXYNOS5420_G3D_OPTION 0x4088 +#define EXYNOS5420_DISP0_CONFIGURATION 0x40A0 +#define EXYNOS5420_DISP0_STATUS 0x40A4 +#define EXYNOS5420_DISP0_OPTION 0x40A8 +#define EXYNOS5420_DISP1_CONFIGURATION 0x40C0 +#define EXYNOS5420_DISP1_STATUS 0x40C4 +#define EXYNOS5420_DISP1_OPTION 0x40C8 +#define EXYNOS5420_MAU_CONFIGURATION 0x40E0 +#define EXYNOS5420_MAU_STATUS 0x40E4 +#define EXYNOS5420_MAU_OPTION 0x40E8 #define EXYNOS5420_FSYS2_OPTION 0x4168 #define EXYNOS5420_PSGEN_OPTION 0x4188 +/* For EXYNOS_CENTRAL_SEQ_OPTION */ +#define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16) +#define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17) +#define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24) +#define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25) + #define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4) #define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5) #define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6) @@ -639,22 +690,4 @@ | EXYNOS5420_KFC_USE_STANDBY_WFI2 \ | EXYNOS5420_KFC_USE_STANDBY_WFI3) -/* For Exynos5433 */ -#define EXYNOS5433_EINT_WAKEUP_MASK (0x060C) -#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728) -#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028) -#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8) -#define EXYNOS5433_PAD_RETENTION_TOP_OPTION (0x3108) -#define EXYNOS5433_PAD_RETENTION_UART_OPTION (0x3128) -#define EXYNOS5433_PAD_RETENTION_MMC0_OPTION (0x3148) -#define EXYNOS5433_PAD_RETENTION_MMC1_OPTION (0x3168) -#define EXYNOS5433_PAD_RETENTION_EBIA_OPTION (0x3188) -#define EXYNOS5433_PAD_RETENTION_EBIB_OPTION (0x31A8) -#define EXYNOS5433_PAD_RETENTION_SPI_OPTION (0x31C8) -#define EXYNOS5433_PAD_RETENTION_MIF_OPTION (0x31E8) -#define EXYNOS5433_PAD_RETENTION_USBXTI_OPTION (0x3228) -#define EXYNOS5433_PAD_RETENTION_BOOTLDO_OPTION (0x3248) -#define EXYNOS5433_PAD_RETENTION_UFS_OPTION (0x3268) -#define EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION (0x32A8) - #endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */ diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h index 7127ec3015..35cb9264e0 100644 --- a/include/linux/soc/ti/knav_dma.h +++ b/include/linux/soc/ti/knav_dma.h @@ -17,8 +17,6 @@ #ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ #define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ -#include - /* * PKTDMA descriptor manipulation macros for host packet descriptor */ @@ -43,8 +41,6 @@ #define KNAV_DMA_DESC_RETQ_SHIFT 0 #define KNAV_DMA_DESC_RETQ_MASK MASK(14) #define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22) -#define KNAV_DMA_DESC_EFLAGS_MASK MASK(4) -#define KNAV_DMA_DESC_EFLAGS_SHIFT 20 #define KNAV_DMA_NUM_EPIB_WORDS 4 #define KNAV_DMA_NUM_PS_WORDS 16 @@ -167,8 +163,6 @@ struct knav_dma_desc { void *knav_dma_open_channel(struct device *dev, const char *name, struct knav_dma_cfg *config); void knav_dma_close_channel(void *channel); -int knav_dma_get_flow(void *channel); -bool knav_dma_device_ready(void); #else static inline void *knav_dma_open_channel(struct device *dev, const char *name, struct knav_dma_cfg *config) @@ -178,16 +172,6 @@ static inline void *knav_dma_open_channel(struct device *dev, const char *name, static inline void knav_dma_close_channel(void *channel) {} -static inline int knav_dma_get_flow(void *channel) -{ - return -EINVAL; -} - -static inline bool knav_dma_device_ready(void) -{ - return false; -} - #endif #endif /* __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ */ diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h index c75ef99c99..9f0ebb3bad 100644 --- a/include/linux/soc/ti/knav_qmss.h +++ b/include/linux/soc/ti/knav_qmss.h @@ -1,7 +1,7 @@ /* * Keystone Navigator Queue Management Sub-System header * - * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com * Author: Sandeep Nair * Cyril Chemparathy * Santosh Shilimkar @@ -86,6 +86,5 @@ int knav_pool_desc_map(void *ph, void *desc, unsigned size, void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz); dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt); void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma); -bool knav_qmss_device_ready(void); #endif /* __SOC_TI_KNAV_QMSS_H__ */ diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h index 1f6e76d423..eac8e0c6fe 100644 --- a/include/linux/soc/ti/ti-msgmgr.h +++ b/include/linux/soc/ti/ti-msgmgr.h @@ -1,7 +1,7 @@ /* * Texas Instruments' Message Manager * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ * Nishanth Menon * * This program is free software; you can redistribute it and/or modify diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index 0b9ecd8cf9..6c9245f32f 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SOCK_DIAG_H__ #define __SOCK_DIAG_H__ @@ -17,7 +16,7 @@ struct sock_diag_handler { int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); int (*get_info)(struct sk_buff *skb, struct sock *sk); int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh); -}; +} __do_const; int sock_diag_register(const struct sock_diag_handler *h); void sock_diag_unregister(const struct sock_diag_handler *h); @@ -25,19 +24,6 @@ void sock_diag_unregister(const struct sock_diag_handler *h); void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); -u64 __sock_gen_cookie(struct sock *sk); - -static inline u64 sock_gen_cookie(struct sock *sk) -{ - u64 cookie; - - preempt_disable(); - cookie = __sock_gen_cookie(sk); - preempt_enable(); - - return cookie; -} - int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie); void sock_diag_save_cookie(struct sock *sk, __u32 *cookie); diff --git a/include/linux/socket.h b/include/linux/socket.h index 041d6032a3..b5cc5a6d70 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SOCKET_H #define _LINUX_SOCKET_H @@ -10,10 +9,8 @@ #include /* __user */ #include -struct file; struct pid; struct cred; -struct socket; #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) @@ -28,7 +25,7 @@ typedef __kernel_sa_family_t sa_family_t; /* * 1003.1g requires sa_family_t and that sa_data is char. */ - + struct sockaddr { sa_family_t sa_family; /* address family, AF_xxx */ char sa_data[14]; /* 14 bytes of protocol address */ @@ -46,27 +43,17 @@ struct linger { * system, not 4.3. Thus msg_accrights(len) are now missing. They * belong in an obscure libc emulation or the bin. */ - + struct msghdr { void *msg_name; /* ptr to socket address structure */ int msg_namelen; /* size of socket address structure */ struct iov_iter msg_iter; /* data */ - - /* - * Ancillary data. msg_control_user is the user buffer used for the - * recv* side when msg_control_is_user is set, msg_control is the kernel - * buffer used for all other cases. - */ - union { - void *msg_control; - void __user *msg_control_user; - }; - bool msg_control_is_user : 1; + void *msg_control; /* ancillary data */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ struct kiocb *msg_iocb; /* ptr to iocb for async requests */ }; - + struct user_msghdr { void __user *msg_name; /* ptr to socket address structure */ int msg_namelen; /* size of socket address structure */ @@ -85,7 +72,7 @@ struct mmsghdr { /* * POSIX 1003.1g - ancillary data object information - * Ancillary data consists of a sequence of pairs of + * Ancillary data consits of a sequence of pairs of * (cmsghdr, cmsg_data[]) */ @@ -105,12 +92,9 @@ struct cmsghdr { #define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) ) -#define CMSG_DATA(cmsg) \ - ((void *)(cmsg) + sizeof(struct cmsghdr)) -#define CMSG_USER_DATA(cmsg) \ - ((void __user *)(cmsg) + sizeof(struct cmsghdr)) -#define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len)) -#define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len)) +#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + CMSG_ALIGN(sizeof(struct cmsghdr)))) +#define CMSG_SPACE(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + CMSG_ALIGN(len)) +#define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + (len)) #define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \ (struct cmsghdr *)(ctl) : \ @@ -137,7 +121,7 @@ struct cmsghdr { * inside range, given by msg->msg_controllen before using * ancillary object DATA. --ANK (980731) */ - + static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, struct cmsghdr *__cmsg) { @@ -218,16 +202,8 @@ struct ucred { #define AF_VSOCK 40 /* vSockets */ #define AF_KCM 41 /* Kernel Connection Multiplexor*/ #define AF_QIPCRTR 42 /* Qualcomm IPC Router */ -#define AF_SMC 43 /* smc sockets: reserve number for - * PF_SMC protocol family that - * reuses AF_INET address family - */ -#define AF_XDP 44 /* XDP sockets */ -#define AF_MCTP 45 /* Management component - * transport protocol - */ -#define AF_MAX 46 /* For now.. */ +#define AF_MAX 43 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -275,18 +251,15 @@ struct ucred { #define PF_VSOCK AF_VSOCK #define PF_KCM AF_KCM #define PF_QIPCRTR AF_QIPCRTR -#define PF_SMC AF_SMC -#define PF_XDP AF_XDP -#define PF_MCTP AF_MCTP #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ -#define SOMAXCONN 4096 +#define SOMAXCONN 128 -/* Flags we can use with send/ and recv. +/* Flags we can use with send/ and recv. Added those for 1003.1g not all are supported yet */ - + #define MSG_OOB 1 #define MSG_PEEK 2 #define MSG_DONTROUTE 4 @@ -305,16 +278,10 @@ struct ucred { #define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ #define MSG_MORE 0x8000 /* Sender will send more */ #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ -#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */ #define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN -#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ -#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry - * plain text and require encryption - */ -#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ #define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file descriptor received through @@ -362,8 +329,6 @@ struct ucred { #define SOL_ALG 279 #define SOL_NFC 280 #define SOL_KCM 281 -#define SOL_TLS 282 -#define SOL_XDP 283 /* IPX options */ #define IPX_TYPE 1 @@ -371,78 +336,13 @@ struct ucred { extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); -struct timespec64; -struct __kernel_timespec; -struct old_timespec32; +struct timespec; -struct scm_timestamping_internal { - struct timespec64 ts[3]; -}; - -extern void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss); -extern void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss); - -/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff - * forbid_cmsg_compat==false - */ -extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, - unsigned int flags, bool forbid_cmsg_compat); -extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, - unsigned int flags, bool forbid_cmsg_compat); -extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, - unsigned int vlen, unsigned int flags, - struct __kernel_timespec __user *timeout, - struct old_timespec32 __user *timeout32); +/* The __sys_...msg variants allow MSG_CMSG_COMPAT */ +extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); +extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); +extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, + unsigned int flags, struct timespec *timeout); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, - unsigned int vlen, unsigned int flags, - bool forbid_cmsg_compat); -extern long __sys_sendmsg_sock(struct socket *sock, struct msghdr *msg, - unsigned int flags); -extern long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg, - struct user_msghdr __user *umsg, - struct sockaddr __user *uaddr, - unsigned int flags); -extern int sendmsg_copy_msghdr(struct msghdr *msg, - struct user_msghdr __user *umsg, unsigned flags, - struct iovec **iov); -extern int recvmsg_copy_msghdr(struct msghdr *msg, - struct user_msghdr __user *umsg, unsigned flags, - struct sockaddr __user **uaddr, - struct iovec **iov); -extern int __copy_msghdr_from_user(struct msghdr *kmsg, - struct user_msghdr __user *umsg, - struct sockaddr __user **save_addr, - struct iovec __user **uiov, size_t *nsegs); - -/* helpers which do the actual work for syscalls */ -extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, - unsigned int flags, struct sockaddr __user *addr, - int __user *addr_len); -extern int __sys_sendto(int fd, void __user *buff, size_t len, - unsigned int flags, struct sockaddr __user *addr, - int addr_len); -extern int __sys_accept4_file(struct file *file, unsigned file_flags, - struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, int flags, - unsigned long nofile); -extern struct file *do_accept(struct file *file, unsigned file_flags, - struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, int flags); -extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, int flags); -extern int __sys_socket(int family, int type, int protocol); -extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen); -extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr, - int addrlen, int file_flags); -extern int __sys_connect(int fd, struct sockaddr __user *uservaddr, - int addrlen); -extern int __sys_listen(int fd, int backlog); -extern int __sys_getsockname(int fd, struct sockaddr __user *usockaddr, - int __user *usockaddr_len); -extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr, - int __user *usockaddr_len); -extern int __sys_socketpair(int family, int type, int protocol, - int __user *usockvec); -extern int __sys_shutdown_sock(struct socket *sock, int how); -extern int __sys_shutdown(int fd, int how); + unsigned int vlen, unsigned int flags); #endif /* _LINUX_SOCKET_H */ diff --git a/include/linux/sonet.h b/include/linux/sonet.h index 2b802b6d12..f13aeb0437 100644 --- a/include/linux/sonet.h +++ b/include/linux/sonet.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* sonet.h - SONET/SHD physical layer control */ #ifndef LINUX_SONET_H #define LINUX_SONET_H @@ -8,7 +7,7 @@ #include struct k_sonet_stats { -#define __HANDLE_ITEM(i) atomic_t i +#define __HANDLE_ITEM(i) atomic_unchecked_t i __SONET_ITEMS #undef __HANDLE_ITEM }; diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h index 1e3c92feea..e2e036d94e 100644 --- a/include/linux/sony-laptop.h +++ b/include/linux/sony-laptop.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SONYLAPTOP_H_ #define _SONYLAPTOP_H_ @@ -28,11 +27,7 @@ #define SONY_PIC_COMMAND_GETCAMERAROMVERSION 18 /* obsolete */ #define SONY_PIC_COMMAND_GETCAMERAREVISION 19 /* obsolete */ -#if IS_ENABLED(CONFIG_SONY_LAPTOP) int sony_pic_camera_command(int command, u8 value); -#else -static inline int sony_pic_camera_command(int command, u8 value) { return 0; } -#endif #endif /* __KERNEL__ */ diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h index 50e48e94ad..0b7cc265cc 100644 --- a/include/linux/sonypi.h +++ b/include/linux/sonypi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Sony Programmable I/O Control Device driver for VAIO * @@ -17,6 +16,21 @@ * Copyright (C) 2000 Andrew Tridgell * * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * */ #ifndef _SONYPI_H_ #define _SONYPI_H_ diff --git a/include/linux/sort.h b/include/linux/sort.h index b5898725fe..d534da2b55 100644 --- a/include/linux/sort.h +++ b/include/linux/sort.h @@ -1,16 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SORT_H #define _LINUX_SORT_H #include -void sort_r(void *base, size_t num, size_t size, - cmp_r_func_t cmp_func, - swap_func_t swap_func, - const void *priv); - void sort(void *base, size_t num, size_t size, - cmp_func_t cmp_func, - swap_func_t swap_func); + int (*cmp)(const void *, const void *), + void (*swap)(void *, void *, int)); #endif diff --git a/include/linux/sound.h b/include/linux/sound.h index ec85b7a1f8..73ded040f1 100644 --- a/include/linux/sound.h +++ b/include/linux/sound.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SOUND_H #define _LINUX_SOUND_H @@ -12,9 +11,11 @@ struct device; extern int register_sound_special(const struct file_operations *fops, int unit); extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev); extern int register_sound_mixer(const struct file_operations *fops, int dev); +extern int register_sound_midi(const struct file_operations *fops, int dev); extern int register_sound_dsp(const struct file_operations *fops, int dev); extern void unregister_sound_special(int unit); extern void unregister_sound_mixer(int unit); +extern void unregister_sound_midi(int unit); extern void unregister_sound_dsp(int unit); #endif /* _LINUX_SOUND_H */ diff --git a/include/linux/spi/ad7877.h b/include/linux/spi/ad7877.h index b7be843c88..cdbed816f2 100644 --- a/include/linux/spi/ad7877.h +++ b/include/linux/spi/ad7877.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* linux/spi/ad7877.h */ /* Touchscreen characteristics vary between boards and models. The diff --git a/include/linux/spi/adi_spi3.h b/include/linux/spi/adi_spi3.h new file mode 100644 index 0000000000..c84123aa1d --- /dev/null +++ b/include/linux/spi/adi_spi3.h @@ -0,0 +1,254 @@ +/* + * Analog Devices SPI3 controller driver + * + * Copyright (c) 2014 Analog Devices Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ADI_SPI3_H_ +#define _ADI_SPI3_H_ + +#include + +/* SPI_CONTROL */ +#define SPI_CTL_EN 0x00000001 /* Enable */ +#define SPI_CTL_MSTR 0x00000002 /* Master/Slave */ +#define SPI_CTL_PSSE 0x00000004 /* controls modf error in master mode */ +#define SPI_CTL_ODM 0x00000008 /* Open Drain Mode */ +#define SPI_CTL_CPHA 0x00000010 /* Clock Phase */ +#define SPI_CTL_CPOL 0x00000020 /* Clock Polarity */ +#define SPI_CTL_ASSEL 0x00000040 /* Slave Select Pin Control */ +#define SPI_CTL_SELST 0x00000080 /* Slave Select Polarity in-between transfers */ +#define SPI_CTL_EMISO 0x00000100 /* Enable MISO */ +#define SPI_CTL_SIZE 0x00000600 /* Word Transfer Size */ +#define SPI_CTL_SIZE08 0x00000000 /* SIZE: 8 bits */ +#define SPI_CTL_SIZE16 0x00000200 /* SIZE: 16 bits */ +#define SPI_CTL_SIZE32 0x00000400 /* SIZE: 32 bits */ +#define SPI_CTL_LSBF 0x00001000 /* LSB First */ +#define SPI_CTL_FCEN 0x00002000 /* Flow-Control Enable */ +#define SPI_CTL_FCCH 0x00004000 /* Flow-Control Channel Selection */ +#define SPI_CTL_FCPL 0x00008000 /* Flow-Control Polarity */ +#define SPI_CTL_FCWM 0x00030000 /* Flow-Control Water-Mark */ +#define SPI_CTL_FIFO0 0x00000000 /* FCWM: TFIFO empty or RFIFO Full */ +#define SPI_CTL_FIFO1 0x00010000 /* FCWM: TFIFO 75% or more empty or RFIFO 75% or more full */ +#define SPI_CTL_FIFO2 0x00020000 /* FCWM: TFIFO 50% or more empty or RFIFO 50% or more full */ +#define SPI_CTL_FMODE 0x00040000 /* Fast-mode Enable */ +#define SPI_CTL_MIOM 0x00300000 /* Multiple I/O Mode */ +#define SPI_CTL_MIO_DIS 0x00000000 /* MIOM: Disable */ +#define SPI_CTL_MIO_DUAL 0x00100000 /* MIOM: Enable DIOM (Dual I/O Mode) */ +#define SPI_CTL_MIO_QUAD 0x00200000 /* MIOM: Enable QUAD (Quad SPI Mode) */ +#define SPI_CTL_SOSI 0x00400000 /* Start on MOSI */ +/* SPI_RX_CONTROL */ +#define SPI_RXCTL_REN 0x00000001 /* Receive Channel Enable */ +#define SPI_RXCTL_RTI 0x00000004 /* Receive Transfer Initiate */ +#define SPI_RXCTL_RWCEN 0x00000008 /* Receive Word Counter Enable */ +#define SPI_RXCTL_RDR 0x00000070 /* Receive Data Request */ +#define SPI_RXCTL_RDR_DIS 0x00000000 /* RDR: Disabled */ +#define SPI_RXCTL_RDR_NE 0x00000010 /* RDR: RFIFO not empty */ +#define SPI_RXCTL_RDR_25 0x00000020 /* RDR: RFIFO 25% full */ +#define SPI_RXCTL_RDR_50 0x00000030 /* RDR: RFIFO 50% full */ +#define SPI_RXCTL_RDR_75 0x00000040 /* RDR: RFIFO 75% full */ +#define SPI_RXCTL_RDR_FULL 0x00000050 /* RDR: RFIFO full */ +#define SPI_RXCTL_RDO 0x00000100 /* Receive Data Over-Run */ +#define SPI_RXCTL_RRWM 0x00003000 /* FIFO Regular Water-Mark */ +#define SPI_RXCTL_RWM_0 0x00000000 /* RRWM: RFIFO Empty */ +#define SPI_RXCTL_RWM_25 0x00001000 /* RRWM: RFIFO 25% full */ +#define SPI_RXCTL_RWM_50 0x00002000 /* RRWM: RFIFO 50% full */ +#define SPI_RXCTL_RWM_75 0x00003000 /* RRWM: RFIFO 75% full */ +#define SPI_RXCTL_RUWM 0x00070000 /* FIFO Urgent Water-Mark */ +#define SPI_RXCTL_UWM_DIS 0x00000000 /* RUWM: Disabled */ +#define SPI_RXCTL_UWM_25 0x00010000 /* RUWM: RFIFO 25% full */ +#define SPI_RXCTL_UWM_50 0x00020000 /* RUWM: RFIFO 50% full */ +#define SPI_RXCTL_UWM_75 0x00030000 /* RUWM: RFIFO 75% full */ +#define SPI_RXCTL_UWM_FULL 0x00040000 /* RUWM: RFIFO full */ +/* SPI_TX_CONTROL */ +#define SPI_TXCTL_TEN 0x00000001 /* Transmit Channel Enable */ +#define SPI_TXCTL_TTI 0x00000004 /* Transmit Transfer Initiate */ +#define SPI_TXCTL_TWCEN 0x00000008 /* Transmit Word Counter Enable */ +#define SPI_TXCTL_TDR 0x00000070 /* Transmit Data Request */ +#define SPI_TXCTL_TDR_DIS 0x00000000 /* TDR: Disabled */ +#define SPI_TXCTL_TDR_NF 0x00000010 /* TDR: TFIFO not full */ +#define SPI_TXCTL_TDR_25 0x00000020 /* TDR: TFIFO 25% empty */ +#define SPI_TXCTL_TDR_50 0x00000030 /* TDR: TFIFO 50% empty */ +#define SPI_TXCTL_TDR_75 0x00000040 /* TDR: TFIFO 75% empty */ +#define SPI_TXCTL_TDR_EMPTY 0x00000050 /* TDR: TFIFO empty */ +#define SPI_TXCTL_TDU 0x00000100 /* Transmit Data Under-Run */ +#define SPI_TXCTL_TRWM 0x00003000 /* FIFO Regular Water-Mark */ +#define SPI_TXCTL_RWM_FULL 0x00000000 /* TRWM: TFIFO full */ +#define SPI_TXCTL_RWM_25 0x00001000 /* TRWM: TFIFO 25% empty */ +#define SPI_TXCTL_RWM_50 0x00002000 /* TRWM: TFIFO 50% empty */ +#define SPI_TXCTL_RWM_75 0x00003000 /* TRWM: TFIFO 75% empty */ +#define SPI_TXCTL_TUWM 0x00070000 /* FIFO Urgent Water-Mark */ +#define SPI_TXCTL_UWM_DIS 0x00000000 /* TUWM: Disabled */ +#define SPI_TXCTL_UWM_25 0x00010000 /* TUWM: TFIFO 25% empty */ +#define SPI_TXCTL_UWM_50 0x00020000 /* TUWM: TFIFO 50% empty */ +#define SPI_TXCTL_UWM_75 0x00030000 /* TUWM: TFIFO 75% empty */ +#define SPI_TXCTL_UWM_EMPTY 0x00040000 /* TUWM: TFIFO empty */ +/* SPI_CLOCK */ +#define SPI_CLK_BAUD 0x0000FFFF /* Baud Rate */ +/* SPI_DELAY */ +#define SPI_DLY_STOP 0x000000FF /* Transfer delay time in multiples of SCK period */ +#define SPI_DLY_LEADX 0x00000100 /* Extended (1 SCK) LEAD Control */ +#define SPI_DLY_LAGX 0x00000200 /* Extended (1 SCK) LAG control */ +/* SPI_SSEL */ +#define SPI_SLVSEL_SSE1 0x00000002 /* SPISSEL1 Enable */ +#define SPI_SLVSEL_SSE2 0x00000004 /* SPISSEL2 Enable */ +#define SPI_SLVSEL_SSE3 0x00000008 /* SPISSEL3 Enable */ +#define SPI_SLVSEL_SSE4 0x00000010 /* SPISSEL4 Enable */ +#define SPI_SLVSEL_SSE5 0x00000020 /* SPISSEL5 Enable */ +#define SPI_SLVSEL_SSE6 0x00000040 /* SPISSEL6 Enable */ +#define SPI_SLVSEL_SSE7 0x00000080 /* SPISSEL7 Enable */ +#define SPI_SLVSEL_SSEL1 0x00000200 /* SPISSEL1 Value */ +#define SPI_SLVSEL_SSEL2 0x00000400 /* SPISSEL2 Value */ +#define SPI_SLVSEL_SSEL3 0x00000800 /* SPISSEL3 Value */ +#define SPI_SLVSEL_SSEL4 0x00001000 /* SPISSEL4 Value */ +#define SPI_SLVSEL_SSEL5 0x00002000 /* SPISSEL5 Value */ +#define SPI_SLVSEL_SSEL6 0x00004000 /* SPISSEL6 Value */ +#define SPI_SLVSEL_SSEL7 0x00008000 /* SPISSEL7 Value */ +/* SPI_RWC */ +#define SPI_RWC_VALUE 0x0000FFFF /* Received Word-Count */ +/* SPI_RWCR */ +#define SPI_RWCR_VALUE 0x0000FFFF /* Received Word-Count Reload */ +/* SPI_TWC */ +#define SPI_TWC_VALUE 0x0000FFFF /* Transmitted Word-Count */ +/* SPI_TWCR */ +#define SPI_TWCR_VALUE 0x0000FFFF /* Transmitted Word-Count Reload */ +/* SPI_IMASK */ +#define SPI_IMSK_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */ +#define SPI_IMSK_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */ +#define SPI_IMSK_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */ +#define SPI_IMSK_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */ +#define SPI_IMSK_RSM 0x00000100 /* Receive Start Interrupt Mask */ +#define SPI_IMSK_TSM 0x00000200 /* Transmit Start Interrupt Mask */ +#define SPI_IMSK_RFM 0x00000400 /* Receive Finish Interrupt Mask */ +#define SPI_IMSK_TFM 0x00000800 /* Transmit Finish Interrupt Mask */ +/* SPI_IMASKCL */ +#define SPI_IMSK_CLR_RUW 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_CLR_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_CLR_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */ +#define SPI_IMSK_CLR_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */ +#define SPI_IMSK_CLR_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */ +#define SPI_IMSK_CLR_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */ +#define SPI_IMSK_CLR_RSM 0x00000100 /* Receive Start Interrupt Mask */ +#define SPI_IMSK_CLR_TSM 0x00000200 /* Transmit Start Interrupt Mask */ +#define SPI_IMSK_CLR_RFM 0x00000400 /* Receive Finish Interrupt Mask */ +#define SPI_IMSK_CLR_TFM 0x00000800 /* Transmit Finish Interrupt Mask */ +/* SPI_IMASKST */ +#define SPI_IMSK_SET_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_SET_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */ +#define SPI_IMSK_SET_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */ +#define SPI_IMSK_SET_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */ +#define SPI_IMSK_SET_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */ +#define SPI_IMSK_SET_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */ +#define SPI_IMSK_SET_RSM 0x00000100 /* Receive Start Interrupt Mask */ +#define SPI_IMSK_SET_TSM 0x00000200 /* Transmit Start Interrupt Mask */ +#define SPI_IMSK_SET_RFM 0x00000400 /* Receive Finish Interrupt Mask */ +#define SPI_IMSK_SET_TFM 0x00000800 /* Transmit Finish Interrupt Mask */ +/* SPI_STATUS */ +#define SPI_STAT_SPIF 0x00000001 /* SPI Finished */ +#define SPI_STAT_RUWM 0x00000002 /* Receive Urgent Water-Mark Breached */ +#define SPI_STAT_TUWM 0x00000004 /* Transmit Urgent Water-Mark Breached */ +#define SPI_STAT_ROE 0x00000010 /* Receive Over-Run Error Indication */ +#define SPI_STAT_TUE 0x00000020 /* Transmit Under-Run Error Indication */ +#define SPI_STAT_TCE 0x00000040 /* Transmit Collision Error Indication */ +#define SPI_STAT_MODF 0x00000080 /* Mode Fault Error Indication */ +#define SPI_STAT_RS 0x00000100 /* Receive Start Indication */ +#define SPI_STAT_TS 0x00000200 /* Transmit Start Indication */ +#define SPI_STAT_RF 0x00000400 /* Receive Finish Indication */ +#define SPI_STAT_TF 0x00000800 /* Transmit Finish Indication */ +#define SPI_STAT_RFS 0x00007000 /* SPI_RFIFO status */ +#define SPI_STAT_RFIFO_EMPTY 0x00000000 /* RFS: RFIFO Empty */ +#define SPI_STAT_RFIFO_25 0x00001000 /* RFS: RFIFO 25% Full */ +#define SPI_STAT_RFIFO_50 0x00002000 /* RFS: RFIFO 50% Full */ +#define SPI_STAT_RFIFO_75 0x00003000 /* RFS: RFIFO 75% Full */ +#define SPI_STAT_RFIFO_FULL 0x00004000 /* RFS: RFIFO Full */ +#define SPI_STAT_TFS 0x00070000 /* SPI_TFIFO status */ +#define SPI_STAT_TFIFO_FULL 0x00000000 /* TFS: TFIFO full */ +#define SPI_STAT_TFIFO_25 0x00010000 /* TFS: TFIFO 25% empty */ +#define SPI_STAT_TFIFO_50 0x00020000 /* TFS: TFIFO 50% empty */ +#define SPI_STAT_TFIFO_75 0x00030000 /* TFS: TFIFO 75% empty */ +#define SPI_STAT_TFIFO_EMPTY 0x00040000 /* TFS: TFIFO empty */ +#define SPI_STAT_FCS 0x00100000 /* Flow-Control Stall Indication */ +#define SPI_STAT_RFE 0x00400000 /* SPI_RFIFO Empty */ +#define SPI_STAT_TFF 0x00800000 /* SPI_TFIFO Full */ +/* SPI_ILAT */ +#define SPI_ILAT_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */ +#define SPI_ILAT_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */ +#define SPI_ILAT_ROI 0x00000010 /* Receive Over-Run Error Indication */ +#define SPI_ILAT_TUI 0x00000020 /* Transmit Under-Run Error Indication */ +#define SPI_ILAT_TCI 0x00000040 /* Transmit Collision Error Indication */ +#define SPI_ILAT_MFI 0x00000080 /* Mode Fault Error Indication */ +#define SPI_ILAT_RSI 0x00000100 /* Receive Start Indication */ +#define SPI_ILAT_TSI 0x00000200 /* Transmit Start Indication */ +#define SPI_ILAT_RFI 0x00000400 /* Receive Finish Indication */ +#define SPI_ILAT_TFI 0x00000800 /* Transmit Finish Indication */ +/* SPI_ILATCL */ +#define SPI_ILAT_CLR_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */ +#define SPI_ILAT_CLR_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */ +#define SPI_ILAT_CLR_ROI 0x00000010 /* Receive Over-Run Error Indication */ +#define SPI_ILAT_CLR_TUI 0x00000020 /* Transmit Under-Run Error Indication */ +#define SPI_ILAT_CLR_TCI 0x00000040 /* Transmit Collision Error Indication */ +#define SPI_ILAT_CLR_MFI 0x00000080 /* Mode Fault Error Indication */ +#define SPI_ILAT_CLR_RSI 0x00000100 /* Receive Start Indication */ +#define SPI_ILAT_CLR_TSI 0x00000200 /* Transmit Start Indication */ +#define SPI_ILAT_CLR_RFI 0x00000400 /* Receive Finish Indication */ +#define SPI_ILAT_CLR_TFI 0x00000800 /* Transmit Finish Indication */ + +/* + * adi spi3 registers layout + */ +struct adi_spi_regs { + u32 revid; + u32 control; + u32 rx_control; + u32 tx_control; + u32 clock; + u32 delay; + u32 ssel; + u32 rwc; + u32 rwcr; + u32 twc; + u32 twcr; + u32 reserved0; + u32 emask; + u32 emaskcl; + u32 emaskst; + u32 reserved1; + u32 status; + u32 elat; + u32 elatcl; + u32 reserved2; + u32 rfifo; + u32 reserved3; + u32 tfifo; +}; + +#define MAX_CTRL_CS 8 /* cs in spi controller */ + +/* device.platform_data for SSP controller devices */ +struct adi_spi3_master { + u16 num_chipselect; + u16 pin_req[7]; +}; + +/* spi_board_info.controller_data for SPI slave devices, + * copied to spi_device.platform_data ... mostly for dma tuning + */ +struct adi_spi3_chip { + u32 control; + u16 cs_chg_udelay; /* Some devices require 16-bit delays */ + u32 tx_dummy_val; /* tx value for rx only transfer */ + bool enable_dma; +}; + +#endif /* _ADI_SPI3_H_ */ diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h index 1a5eaef3b7..2f694f3846 100644 --- a/include/linux/spi/ads7846.h +++ b/include/linux/spi/ads7846.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* linux/spi/ads7846.h */ /* Touchscreen characteristics vary between boards and models. The diff --git a/include/linux/spi/at73c213.h b/include/linux/spi/at73c213.h index cbca6654aa..0f20a70e5e 100644 --- a/include/linux/spi/at73c213.h +++ b/include/linux/spi/at73c213.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Board-specific data used to set up AT73c213 audio DAC driver. */ diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h index d278576ab6..b63fe6f5fd 100644 --- a/include/linux/spi/at86rf230.h +++ b/include/linux/spi/at86rf230.h @@ -1,9 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * AT86RF230/RF231 driver * * Copyright (C) 2009-2012 Siemens AG * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * * Written by: * Dmitry Eremin-Solenikov */ diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h index 449bacf107..85b8ee67e9 100644 --- a/include/linux/spi/cc2520.h +++ b/include/linux/spi/cc2520.h @@ -1,9 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Header file for cc2520 radio driver * * Copyright (C) 2014 Varka Bhadram * Md.Jamal Mohiuddin * P Sowjanya + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * */ #ifndef __CC2520_H diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h index 0b85761691..6692b3418c 100644 --- a/include/linux/spi/corgi_lcd.h +++ b/include/linux/spi/corgi_lcd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SPI_CORGI_LCD_H #define __LINUX_SPI_CORGI_LCD_H @@ -11,6 +10,9 @@ struct corgi_lcd_platform_data { int default_intensity; int limit_mask; + int gpio_backlight_on; /* -1 if n/a */ + int gpio_backlight_cont; /* -1 if n/a */ + void (*notify)(int intensity); void (*kick_battery)(void); }; diff --git a/include/linux/spi/ds1305.h b/include/linux/spi/ds1305.h index 82db6cd152..287ec830ea 100644 --- a/include/linux/spi/ds1305.h +++ b/include/linux/spi/ds1305.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SPI_DS1305_H #define __LINUX_SPI_DS1305_H diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h index 1cca3dd5a7..e34e169f9d 100644 --- a/include/linux/spi/eeprom.h +++ b/include/linux/spi/eeprom.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SPI_EEPROM_H #define __LINUX_SPI_EEPROM_H @@ -14,7 +13,7 @@ struct spi_eeprom { u32 byte_len; char name[10]; - u32 page_size; /* for writes */ + u16 page_size; /* for writes */ u16 flags; #define EE_ADDR1 0x0001 /* 8 bit addrs */ #define EE_ADDR2 0x0002 /* 16 bit addrs */ diff --git a/include/linux/spi/flash.h b/include/linux/spi/flash.h index 2401a08872..3f22932e67 100644 --- a/include/linux/spi/flash.h +++ b/include/linux/spi/flash.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SPI_FLASH_H #define LINUX_SPI_FLASH_H @@ -8,7 +7,7 @@ struct mtd_partition; * struct flash_platform_data: board-specific flash data * @name: optional flash device name (eg, as used with mtdparts=) * @parts: optional array of mtd_partitions for static partitioning - * @nr_parts: number of mtd_partitions for static partitioning + * @nr_parts: number of mtd_partitions for static partitoning * @type: optional flash device type (e.g. m25p80 vs m25p64), for use * with chips that can't be queried for JEDEC or other IDs * diff --git a/include/linux/spi/ifx_modem.h b/include/linux/spi/ifx_modem.h index 6d19b09139..394fec9e77 100644 --- a/include/linux/spi/ifx_modem.h +++ b/include/linux/spi/ifx_modem.h @@ -1,9 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_IFX_MODEM_H #define LINUX_IFX_MODEM_H struct ifx_modem_platform_data { + unsigned short rst_out; /* modem reset out */ + unsigned short pwr_on; /* power on */ + unsigned short rst_pmu; /* reset modem */ unsigned short tx_pwr; /* modem power threshold */ + unsigned short srdy; /* SRDY */ + unsigned short mrdy; /* MRDY */ unsigned char modem_type; /* Modem type */ unsigned long max_hz; /* max SPI frequency */ unsigned short use_dma:1; /* spi protocol driver supplies diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h new file mode 100644 index 0000000000..e69e9b51b2 --- /dev/null +++ b/include/linux/spi/l4f00242t03.h @@ -0,0 +1,25 @@ +/* + * l4f00242t03.h -- Platform glue for Epson L4F00242T03 LCD + * + * Copyright (c) 2009 Alberto Panizzo + * Based on Marek Vasut work in lms283gf05.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_ +#define _INCLUDE_LINUX_SPI_L4F00242T03_H_ + +struct l4f00242t03_pdata { + unsigned int reset_gpio; + unsigned int data_enable_gpio; +}; + +#endif /* _INCLUDE_LINUX_SPI_L4F00242T03_H_ */ diff --git a/include/linux/spi/libertas_spi.h b/include/linux/spi/libertas_spi.h index 156326d9b5..1b5d5384fc 100644 --- a/include/linux/spi/libertas_spi.h +++ b/include/linux/spi/libertas_spi.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * board-specific data for the libertas_spi driver. * * Copyright 2008 Analog Devices Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. */ #ifndef _LIBERTAS_SPI_H_ #define _LIBERTAS_SPI_H_ diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h index f237b2d062..fdd1d1d51d 100644 --- a/include/linux/spi/lms283gf05.h +++ b/include/linux/spi/lms283gf05.h @@ -1,8 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD * * Copyright (C) 2009 Marek Vasut + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h index 21449067ae..bcaa2f762c 100644 --- a/include/linux/spi/max7301.h +++ b/include/linux/spi/max7301.h @@ -1,8 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SPI_MAX7301_H #define LINUX_SPI_MAX7301_H -#include +#include /* * Some registers must be read back to modify. diff --git a/include/linux/spi/mc33880.h b/include/linux/spi/mc33880.h index 205a49cb99..82ffccd6fb 100644 --- a/include/linux/spi/mc33880.h +++ b/include/linux/spi/mc33880.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SPI_MC33880_H #define LINUX_SPI_MC33880_H diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h new file mode 100644 index 0000000000..aa07d7b325 --- /dev/null +++ b/include/linux/spi/mcp23s08.h @@ -0,0 +1,43 @@ + +/* FIXME driver should be able to handle IRQs... */ + +struct mcp23s08_chip_info { + bool is_present; /* true if populated */ + unsigned pullups; /* BIT(x) means enable pullup x */ +}; + +struct mcp23s08_platform_data { + /* For mcp23s08, up to 4 slaves (numbered 0..3) can share one SPI + * chipselect, each providing 1 gpio_chip instance with 8 gpios. + * For mpc23s17, up to 8 slaves (numbered 0..7) can share one SPI + * chipselect, each providing 1 gpio_chip (port A + port B) with + * 16 gpios. + */ + struct mcp23s08_chip_info chip[8]; + + /* "base" is the number of the first GPIO. Dynamic assignment is + * not currently supported, and even if there are gaps in chip + * addressing the GPIO numbers are sequential .. so for example + * if only slaves 0 and 3 are present, their GPIOs range from + * base to base+15 (or base+31 for s17 variant). + */ + unsigned base; + /* Marks the device as a interrupt controller. + * NOTE: The interrupt functionality is only supported for i2c + * versions of the chips. The spi chips can also do the interrupts, + * but this is not supported by the linux driver yet. + */ + bool irq_controller; + + /* Sets the mirror flag in the IOCON register. Devices + * with two interrupt outputs (these are the devices ending with 17 and + * those that have 16 IOs) have two IO banks: IO 0-7 form bank 1 and + * IO 8-15 are bank 2. These chips have two different interrupt outputs: + * One for bank 1 and another for bank 2. If irq-mirror is set, both + * interrupts are generated regardless of the bank that an input change + * occurred on. If it is not set, the interrupt are only generated for + * the bank they belong to. + * On devices with only one interrupt output this property is useless. + */ + bool mirror; +}; diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h index 9ad9a06e48..274bc0fa00 100644 --- a/include/linux/spi/mmc_spi.h +++ b/include/linux/spi/mmc_spi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SPI_MMC_SPI_H #define __LINUX_SPI_MMC_SPI_H @@ -8,6 +7,11 @@ struct device; struct mmc_host; +#define MMC_SPI_USE_CD_GPIO (1 << 0) +#define MMC_SPI_USE_RO_GPIO (1 << 1) +#define MMC_SPI_CD_GPIO_ACTIVE_LOW (1 << 2) +#define MMC_SPI_RO_GPIO_ACTIVE_LOW (1 << 3) + /* Put this in platform_data of a device being used to manage an MMC/SD * card slot. (Modeled after PXA mmc glue; see that for usage examples.) * @@ -22,6 +26,16 @@ struct mmc_spi_platform_data { void *); void (*exit)(struct device *, void *); + /* + * Card Detect and Read Only GPIOs. To enable debouncing on the card + * detect GPIO, set the cd_debounce to the debounce time in + * microseconds. + */ + unsigned int flags; + unsigned int cd_gpio; + unsigned int cd_debounce; + unsigned int ro_gpio; + /* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */ unsigned long caps; unsigned long caps2; @@ -35,7 +49,16 @@ struct mmc_spi_platform_data { void (*setpower)(struct device *, unsigned int maskval); }; +#ifdef CONFIG_OF extern struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi); extern void mmc_spi_put_pdata(struct spi_device *spi); +#else +static inline struct mmc_spi_platform_data * +mmc_spi_get_pdata(struct spi_device *spi) +{ + return spi->dev.platform_data; +} +static inline void mmc_spi_put_pdata(struct spi_device *spi) {} +#endif /* CONFIG_OF */ #endif /* __LINUX_SPI_MMC_SPI_H */ diff --git a/include/linux/spi/mxs-spi.h b/include/linux/spi/mxs-spi.h index 3c57d5e56e..381d368b91 100644 --- a/include/linux/spi/mxs-spi.h +++ b/include/linux/spi/mxs-spi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/spi/mxs-spi.h * @@ -6,6 +5,16 @@ * * Copyright 2008 Embedded Alley Solutions, Inc. * Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __LINUX_SPI_MXS_SPI_H__ diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index eaab121ee5..9ec4c147ab 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -1,11 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ -#ifndef __LINUX_SPI_PXA2XX_SPI_H -#define __LINUX_SPI_PXA2XX_SPI_H - -#include +#ifndef __linux_pxa2xx_spi_h +#define __linux_pxa2xx_spi_h #include @@ -14,15 +21,10 @@ struct dma_chan; -/* - * The platform data for SSP controller devices - * (resides in device.platform_data). - */ -struct pxa2xx_spi_controller { +/* device.platform_data for SSP controller devices */ +struct pxa2xx_spi_master { u16 num_chipselect; u8 enable_dma; - u8 dma_burst_size; - bool is_slave; /* DMA engine specific config */ bool (*dma_filter)(struct dma_chan *chan, void *param); @@ -33,11 +35,8 @@ struct pxa2xx_spi_controller { struct ssp_device ssp; }; -/* - * The controller specific data for SPI slave devices - * (resides in spi_board_info.controller_data), - * copied to spi_device.platform_data ... mostly for - * DMA tuning. +/* spi_board_info.controller_data for SPI slave devices, + * copied to spi_device.platform_data ... mostly for dma tuning */ struct pxa2xx_spi_chip { u8 tx_threshold; @@ -54,8 +53,7 @@ struct pxa2xx_spi_chip { #include -extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info); +extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); #endif - -#endif /* __LINUX_SPI_PXA2XX_SPI_H */ +#endif diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h index dbdfcc7a3d..a693188cc0 100644 --- a/include/linux/spi/rspi.h +++ b/include/linux/spi/rspi.h @@ -1,8 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Renesas SPI driver * * Copyright (C) 2012 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __LINUX_SPI_RENESAS_SPI_H__ diff --git a/include/linux/spi/s3c24xx.h b/include/linux/spi/s3c24xx.h index 440a715931..ca271c06c5 100644 --- a/include/linux/spi/s3c24xx.h +++ b/include/linux/spi/s3c24xx.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2006 Simtec Electronics * Ben Dooks * * S3C2410 - SPI Controller platform_device info + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __LINUX_SPI_S3C24XX_H @@ -20,6 +23,6 @@ struct s3c2410_spi_info { void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); }; -extern int s3c24xx_set_fiq(unsigned int irq, u32 *ack_ptr, bool on); +extern int s3c24xx_set_fiq(unsigned int irq, bool on); #endif /* __LINUX_SPI_S3C24XX_H */ diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h index 02f36b2f3f..aa0d440ab4 100644 --- a/include/linux/spi/sh_hspi.h +++ b/include/linux/spi/sh_hspi.h @@ -1,6 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 Kuninori Morimoto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef SH_HSPI_H #define SH_HSPI_H diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h index dc2a0cbd21..b087a85f5f 100644 --- a/include/linux/spi/sh_msiof.h +++ b/include/linux/spi/sh_msiof.h @@ -1,17 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SPI_SH_MSIOF_H__ #define __SPI_SH_MSIOF_H__ -enum { - MSIOF_SPI_MASTER, - MSIOF_SPI_SLAVE, -}; - struct sh_msiof_spi_info { int tx_fifo_override; int rx_fifo_override; u16 num_chipselect; - int mode; unsigned int dma_tx_id; unsigned int dma_rx_id; u32 dtdl; diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 6b0b686f6f..4b743ac353 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -1,32 +1,35 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later - * +/* * Copyright (C) 2005 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef __LINUX_SPI_H #define __LINUX_SPI_H -#include #include #include #include #include #include #include -#include -#include - -#include struct dma_chan; -struct software_node; -struct spi_controller; +struct spi_master; struct spi_transfer; -struct spi_controller_mem_ops; +struct spi_flash_read_message; /* - * INTERFACES between SPI master-side drivers and SPI slave protocol handlers, - * and SPI infrastructure. + * INTERFACES between SPI master-side drivers and SPI infrastructure. + * (There's no SPI slave support for Linux yet...) */ extern struct bus_type spi_bus_type; @@ -80,7 +83,7 @@ struct spi_statistics { void spi_statistics_add_transfer_stats(struct spi_statistics *stats, struct spi_transfer *xfer, - struct spi_controller *ctlr); + struct spi_master *master); #define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \ do { \ @@ -94,30 +97,13 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats, SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1) /** - * struct spi_delay - SPI delay information - * @value: Value for the delay - * @unit: Unit for the delay - */ -struct spi_delay { -#define SPI_DELAY_UNIT_USECS 0 -#define SPI_DELAY_UNIT_NSECS 1 -#define SPI_DELAY_UNIT_SCK 2 - u16 value; - u8 unit; -}; - -extern int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer); -extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); - -/** - * struct spi_device - Controller side proxy for an SPI slave device + * struct spi_device - Master side proxy for an SPI slave device * @dev: Driver model representation of the device. - * @controller: SPI controller used with the device. - * @master: Copy of controller, for backwards compatibility. + * @master: SPI controller used with the device. * @max_speed_hz: Maximum clock rate to be used with this chip * (on this board); may be changed by the device's driver. * The spi_transfer.speed_hz can override this for each transfer. - * @chip_select: Chipselect, distinguishing chips handled by @controller. + * @chip_select: Chipselect, distinguishing chips handled by @master. * @mode: The spi mode defines how data is clocked out and in. * This may be changed by the device's driver. * The "active low" default for chipselect mode can be overridden @@ -129,7 +115,6 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); * This may be changed by the device's driver, or left at the * default (0) indicating protocol words are eight bit bytes. * The spi_transfer.bits_per_word can override this for each transfer. - * @rt: Make the pump thread real time priority. * @irq: Negative, or the number passed to request_irq() to receive * interrupts from this device. * @controller_state: Controller's runtime state @@ -138,20 +123,9 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); * @modalias: Name of the driver to use with this device, or an alias * for that name. This appears in the sysfs "modalias" attribute * for driver coldplugging, and in uevents used for hotplugging - * @driver_override: If the name of a driver is written to this attribute, then - * the device will bind to the named driver and only the named driver. - * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when - * not using a GPIO line) use cs_gpiod in new drivers by opting in on - * the spi_master. - * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when - * not using a GPIO line) - * @word_delay: delay to be inserted between consecutive - * words of a transfer - * @cs_setup: delay to be introduced by the controller after CS is asserted - * @cs_hold: delay to be introduced by the controller before CS is deasserted - * @cs_inactive: delay to be introduced by the controller after CS is - * deasserted. If @cs_change_delay is used from @spi_transfer, then the - * two delays will be added up. + * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when + * when not using a GPIO line) + * * @statistics: statistics for the spi_device * * A @spi_device is used to interchange data between an SPI slave @@ -165,37 +139,32 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); */ struct spi_device { struct device dev; - struct spi_controller *controller; - struct spi_controller *master; /* compatibility layer */ + struct spi_master *master; u32 max_speed_hz; u8 chip_select; u8 bits_per_word; - bool rt; -#define SPI_NO_TX BIT(31) /* no transmit wire */ -#define SPI_NO_RX BIT(30) /* no receive wire */ - /* - * All bits defined above should be covered by SPI_MODE_KERNEL_MASK. - * The SPI_MODE_KERNEL_MASK has the SPI_MODE_USER_MASK counterpart, - * which is defined in 'include/uapi/linux/spi/spi.h'. - * The bits defined here are from bit 31 downwards, while in - * SPI_MODE_USER_MASK are from 0 upwards. - * These bits must not overlap. A static assert check should make sure of that. - * If adding extra bits, make sure to decrease the bit index below as well. - */ -#define SPI_MODE_KERNEL_MASK (~(BIT(30) - 1)) - u32 mode; + u16 mode; +#define SPI_CPHA 0x01 /* clock phase */ +#define SPI_CPOL 0x02 /* clock polarity */ +#define SPI_MODE_0 (0|0) /* (original MicroWire) */ +#define SPI_MODE_1 (0|SPI_CPHA) +#define SPI_MODE_2 (SPI_CPOL|0) +#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) +#define SPI_CS_HIGH 0x04 /* chipselect active high? */ +#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ +#define SPI_3WIRE 0x10 /* SI/SO signals shared */ +#define SPI_LOOP 0x20 /* loopback mode */ +#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */ +#define SPI_READY 0x80 /* slave pulls low to pause */ +#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */ +#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */ +#define SPI_RX_DUAL 0x400 /* receive with 2 wires */ +#define SPI_RX_QUAD 0x800 /* receive with 4 wires */ int irq; void *controller_state; void *controller_data; char modalias[SPI_NAME_SIZE]; - const char *driver_override; - int cs_gpio; /* LEGACY: chip select gpio */ - struct gpio_desc *cs_gpiod; /* chip select gpio desc */ - struct spi_delay word_delay; /* inter-word delay */ - /* CS delays */ - struct spi_delay cs_setup; - struct spi_delay cs_hold; - struct spi_delay cs_inactive; + int cs_gpio; /* chip select gpio */ /* the statistics */ struct spi_statistics statistics; @@ -205,15 +174,12 @@ struct spi_device { * the controller talks to each chip, like: * - memory packing (12 bit samples into low bits, others zeroed) * - priority + * - drop chipselect after each word * - chipselect delays * - ... */ }; -/* Make sure that SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK don't overlap */ -static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0, - "SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap"); - static inline struct spi_device *to_spi_device(struct device *dev) { return dev ? container_of(dev, struct spi_device, dev) : NULL; @@ -231,7 +197,7 @@ static inline void spi_dev_put(struct spi_device *spi) put_device(&spi->dev); } -/* ctldata is for the bus_controller driver's runtime state */ +/* ctldata is for the bus_master driver's runtime state */ static inline void *spi_get_ctldata(struct spi_device *spi) { return spi->controller_state; @@ -255,6 +221,7 @@ static inline void *spi_get_drvdata(struct spi_device *spi) } struct spi_message; +struct spi_transfer; /** * struct spi_driver - Host side "protocol" driver @@ -307,8 +274,6 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) driver_unregister(&sdrv->driver); } -extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select); - /* use a define to avoid include chaining to get THIS_MODULE */ #define spi_register_driver(driver) \ __spi_register_driver(THIS_MODULE, driver) @@ -326,9 +291,9 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch spi_unregister_driver) /** - * struct spi_controller - interface to SPI master or slave controller + * struct spi_master - interface to SPI master controller * @dev: device interface to this driver - * @list: link with the global spi_controller list + * @list: link with the global spi_master list * @bus_num: board-specific (and often SOC-specific) identifier for a * given SPI controller. * @num_chipselect: chipselects are used to distinguish individual @@ -337,7 +302,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * every chipselect is connected to a slave. * @dma_alignment: SPI controller constraint on DMA buffers alignment. * @mode_bits: flags understood by this controller driver - * @buswidth_override_bits: flags to override for this controller driver * @bits_per_word_mask: A mask indicating which values of bits_per_word are * supported by the driver. Bit n indicates that a bits_per_word n+1 is * supported. If set, the SPI core will reject any transfer with an @@ -346,8 +310,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @min_speed_hz: Lowest supported transfer speed * @max_speed_hz: Highest supported transfer speed * @flags: other constraints relevant to this driver - * @slave: indicates that this is an SPI slave controller - * @devm_allocated: whether the allocation of this struct is devres-managed * @max_transfer_size: function that returns the max transfer size for * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. * @max_message_size: function that returns the max message size for @@ -361,14 +323,12 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * must fail if an unrecognized or unsupported mode is requested. * It's always safe to call this unless transfers are pending on * the device whose settings are being modified. - * @set_cs_timing: optional hook for SPI devices to request SPI master - * controller for configuring specific CS setup time, hold time and inactive - * delay interms of clock counts * @transfer: adds a message to the controller's transfer queue. * @cleanup: frees controller-specific state - * @can_dma: determine whether this controller supports DMA - * @queued: whether this controller is providing an internal message queue - * @kworker: pointer to thread struct for message pump + * @can_dma: determine whether this master supports DMA + * @queued: whether this master is providing an internal message queue + * @kworker: thread struct for message pump + * @kworker_task: pointer to task for message pump kworker thread * @pump_messages: work struct for scheduling work to the message pump * @queue_lock: spinlock to syncronise access to message queue * @queue: message queue @@ -377,8 +337,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @cur_msg_prepared: spi_prepare_message was called for the currently * in-flight message * @cur_msg_mapped: message has been mapped for DMA - * @last_cs_enable: was enable true on the last call to set_cs. - * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs. * @xfer_completion: used by core transfer_one_message() * @busy: message pump is busy * @running: message pump is running @@ -398,14 +356,12 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @unprepare_transfer_hardware: there are currently no more messages on the * queue so the subsystem notifies the driver that it may relax the * hardware by issuing this call - * * @set_cs: set the logic level of the chip select line. May be called * from interrupt context. * @prepare_message: set up the controller to transfer a single message, * for example doing DMA mapping. Called from threaded * context. * @transfer_one: transfer a single spi_transfer. - * * - return 0 if the transfer is finished, * - return 1 if the transfer is still in progress. When * the driver is finished with this transfer it must @@ -416,29 +372,14 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * transfer_one callback. * @handle_err: the subsystem calls the driver to handle an error that occurs * in the generic implementation of transfer_one_message(). - * @mem_ops: optimized/dedicated operations for interactions with SPI memory. - * This field is optional and should only be implemented if the - * controller has native support for memory like operations. * @unprepare_message: undo any work done by prepare_message(). - * @slave_abort: abort the ongoing transfer request on an SPI slave controller - * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per - * CS number. Any individual value may be -ENOENT for CS lines that - * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods - * in new drivers. - * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS - * number. Any individual value may be NULL for CS lines that + * @spi_flash_read: to support spi-controller hardwares that provide + * accelerated interface to read from flash devices. + * @flash_read_supported: spi device supports flash read + * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS + * number. Any individual value may be -ENOENT for CS lines that * are not GPIOs (driven by the SPI controller itself). - * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab - * GPIO descriptors rather than using global GPIO numbers grabbed by the - * driver. This will fill in @cs_gpiods and @cs_gpios should not be used, - * and SPI devices will have the cs_gpiod assigned rather than cs_gpio. - * @unused_native_cs: When cs_gpiods is used, spi_register_controller() will - * fill in this field with the first unused native CS, to be used by SPI - * controller drivers that need to drive a native CS when using GPIO CS. - * @max_native_cs: When cs_gpiods is used, and this field is filled in, - * spi_register_controller() will validate all native CS (including the - * unused native CS) against this value. - * @statistics: statistics for the spi_controller + * @statistics: statistics for the spi_master * @dma_tx: DMA transmit channel * @dma_rx: DMA receive channel * @dummy_rx: dummy receive buffer for full-duplex devices @@ -446,17 +387,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @fw_translate_cs: If the boot firmware uses different numbering scheme * what Linux expects, this optional hook can be used to translate * between the two. - * @ptp_sts_supported: If the driver sets this to true, it must provide a - * time snapshot in @spi_transfer->ptp_sts as close as possible to the - * moment in time when @spi_transfer->ptp_sts_word_pre and - * @spi_transfer->ptp_sts_word_post were transmitted. - * If the driver does not set this, the SPI core takes the snapshot as - * close to the driver hand-over as possible. - * @irq_flags: Interrupt enable state during PTP system timestamping - * @fallback: fallback to pio if dma transfer return failure with - * SPI_TRANS_FAIL_NO_START. * - * Each SPI controller can communicate with one or more @spi_device + * Each SPI master controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals * but not chip select signals. Each device may be configured to use a * different clock rate, since those shared signals are ignored unless @@ -467,7 +399,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * an SPI slave device. For each such message it queues, it calls the * message's completion function when the transaction completes. */ -struct spi_controller { +struct spi_master { struct device dev; struct list_head list; @@ -491,15 +423,13 @@ struct spi_controller { u16 dma_alignment; /* spi_device.mode flags understood by this controller driver */ - u32 mode_bits; - - /* spi_device.mode flags override flags for this controller */ - u32 buswidth_override_bits; + u16 mode_bits; /* bitmask of supported bits_per_word for transfers */ u32 bits_per_word_mask; #define SPI_BPW_MASK(bits) BIT((bits) - 1) -#define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1) +#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1)) +#define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1)) /* limits on transfer speed */ u32 min_speed_hz; @@ -507,19 +437,11 @@ struct spi_controller { /* other constraints relevant to this driver */ u16 flags; -#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */ -#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */ -#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */ -#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */ -#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */ - -#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ - - /* flag indicating if the allocation of this struct is devres-managed */ - bool devm_allocated; - - /* flag indicating this is an SPI slave controller */ - bool slave; +#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ +#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ +#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ +#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ +#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ /* * on some hardware transfer / message size may be constrained @@ -531,9 +453,6 @@ struct spi_controller { /* I/O mutex */ struct mutex io_mutex; - /* Used to avoid adding the same CS twice */ - struct mutex add_lock; - /* lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; @@ -549,16 +468,6 @@ struct spi_controller { */ int (*setup)(struct spi_device *spi); - /* - * set_cs_timing() method is for SPI controllers that supports - * configuring CS timing. - * - * This hook allows SPI client drivers to request SPI controllers - * to configure specific CS timing through spi_set_cs_timing() after - * spi_setup(). - */ - int (*set_cs_timing)(struct spi_device *spi); - /* bidirectional bulk transfers * * + The transfer() method may not sleep; its main role is @@ -567,8 +476,8 @@ struct spi_controller { * any other request management * + To a given spi_device, message queueing is pure fifo * - * + The controller's main job is to process its message queue, - * selecting a chip (for masters), then transferring data + * + The master's main job is to process its message queue, + * selecting a chip then transferring data * + If there are multiple spi_device children, the i/o queue * arbitration algorithm is unspecified (round robin, fifo, * priority, reservations, preemption, etc) @@ -581,7 +490,7 @@ struct spi_controller { int (*transfer)(struct spi_device *spi, struct spi_message *mesg); - /* called on release() to free memory provided by spi_controller */ + /* called on release() to free memory provided by spi_master */ void (*cleanup)(struct spi_device *spi); /* @@ -591,19 +500,19 @@ struct spi_controller { * not modify or store xfer and dma_tx and dma_rx must be set * while the device is prepared. */ - bool (*can_dma)(struct spi_controller *ctlr, + bool (*can_dma)(struct spi_master *master, struct spi_device *spi, struct spi_transfer *xfer); - struct device *dma_map_dev; /* * These hooks are for drivers that want to use the generic - * controller transfer queueing mechanism. If these are used, the + * master transfer queueing mechanism. If these are used, the * transfer() function above must NOT be specified by the driver. * Over time we expect SPI drivers to be phased over to this API. */ bool queued; - struct kthread_worker *kworker; + struct kthread_worker kworker; + struct task_struct *kworker_task; struct kthread_work pump_messages; spinlock_t queue_lock; struct list_head queue; @@ -615,41 +524,33 @@ struct spi_controller { bool auto_runtime_pm; bool cur_msg_prepared; bool cur_msg_mapped; - bool last_cs_enable; - bool last_cs_mode_high; - bool fallback; struct completion xfer_completion; size_t max_dma_len; - int (*prepare_transfer_hardware)(struct spi_controller *ctlr); - int (*transfer_one_message)(struct spi_controller *ctlr, + int (*prepare_transfer_hardware)(struct spi_master *master); + int (*transfer_one_message)(struct spi_master *master, struct spi_message *mesg); - int (*unprepare_transfer_hardware)(struct spi_controller *ctlr); - int (*prepare_message)(struct spi_controller *ctlr, + int (*unprepare_transfer_hardware)(struct spi_master *master); + int (*prepare_message)(struct spi_master *master, struct spi_message *message); - int (*unprepare_message)(struct spi_controller *ctlr, + int (*unprepare_message)(struct spi_master *master, struct spi_message *message); - int (*slave_abort)(struct spi_controller *ctlr); + int (*spi_flash_read)(struct spi_device *spi, + struct spi_flash_read_message *msg); + bool (*flash_read_supported)(struct spi_device *spi); /* * These hooks are for drivers that use a generic implementation - * of transfer_one_message() provided by the core. + * of transfer_one_message() provied by the core. */ void (*set_cs)(struct spi_device *spi, bool enable); - int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi, + int (*transfer_one)(struct spi_master *master, struct spi_device *spi, struct spi_transfer *transfer); - void (*handle_err)(struct spi_controller *ctlr, + void (*handle_err)(struct spi_master *master, struct spi_message *message); - /* Optimized handlers for SPI memory-like operations. */ - const struct spi_controller_mem_ops *mem_ops; - /* gpio chip select */ int *cs_gpios; - struct gpio_desc **cs_gpiods; - bool use_gpio_descriptors; - s8 unused_native_cs; - s8 max_native_cs; /* statistics */ struct spi_statistics statistics; @@ -662,114 +563,57 @@ struct spi_controller { void *dummy_rx; void *dummy_tx; - int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs); - - /* - * Driver sets this field to indicate it is able to snapshot SPI - * transfers (needed e.g. for reading the time of POSIX clocks) - */ - bool ptp_sts_supported; - - /* Interrupt enable state during PTP system timestamping */ - unsigned long irq_flags; + int (*fw_translate_cs)(struct spi_master *master, unsigned cs); }; -static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) +static inline void *spi_master_get_devdata(struct spi_master *master) { - return dev_get_drvdata(&ctlr->dev); + return dev_get_drvdata(&master->dev); } -static inline void spi_controller_set_devdata(struct spi_controller *ctlr, - void *data) +static inline void spi_master_set_devdata(struct spi_master *master, void *data) { - dev_set_drvdata(&ctlr->dev, data); + dev_set_drvdata(&master->dev, data); } -static inline struct spi_controller *spi_controller_get(struct spi_controller *ctlr) +static inline struct spi_master *spi_master_get(struct spi_master *master) { - if (!ctlr || !get_device(&ctlr->dev)) + if (!master || !get_device(&master->dev)) return NULL; - return ctlr; + return master; } -static inline void spi_controller_put(struct spi_controller *ctlr) +static inline void spi_master_put(struct spi_master *master) { - if (ctlr) - put_device(&ctlr->dev); -} - -static inline bool spi_controller_is_slave(struct spi_controller *ctlr) -{ - return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave; + if (master) + put_device(&master->dev); } /* PM calls that need to be issued by the driver */ -extern int spi_controller_suspend(struct spi_controller *ctlr); -extern int spi_controller_resume(struct spi_controller *ctlr); +extern int spi_master_suspend(struct spi_master *master); +extern int spi_master_resume(struct spi_master *master); /* Calls the driver make to interact with the message queue */ -extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr); -extern void spi_finalize_current_message(struct spi_controller *ctlr); -extern void spi_finalize_current_transfer(struct spi_controller *ctlr); +extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); +extern void spi_finalize_current_message(struct spi_master *master); +extern void spi_finalize_current_transfer(struct spi_master *master); -/* Helper calls for driver to timestamp transfer */ -void spi_take_timestamp_pre(struct spi_controller *ctlr, - struct spi_transfer *xfer, - size_t progress, bool irqs_off); -void spi_take_timestamp_post(struct spi_controller *ctlr, - struct spi_transfer *xfer, - size_t progress, bool irqs_off); +/* the spi driver core manages memory for the spi_master classdev */ +extern struct spi_master * +spi_alloc_master(struct device *host, unsigned size); -/* the spi driver core manages memory for the spi_controller classdev */ -extern struct spi_controller *__spi_alloc_controller(struct device *host, - unsigned int size, bool slave); +extern int spi_register_master(struct spi_master *master); +extern int devm_spi_register_master(struct device *dev, + struct spi_master *master); +extern void spi_unregister_master(struct spi_master *master); -static inline struct spi_controller *spi_alloc_master(struct device *host, - unsigned int size) -{ - return __spi_alloc_controller(host, size, false); -} - -static inline struct spi_controller *spi_alloc_slave(struct device *host, - unsigned int size) -{ - if (!IS_ENABLED(CONFIG_SPI_SLAVE)) - return NULL; - - return __spi_alloc_controller(host, size, true); -} - -struct spi_controller *__devm_spi_alloc_controller(struct device *dev, - unsigned int size, - bool slave); - -static inline struct spi_controller *devm_spi_alloc_master(struct device *dev, - unsigned int size) -{ - return __devm_spi_alloc_controller(dev, size, false); -} - -static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev, - unsigned int size) -{ - if (!IS_ENABLED(CONFIG_SPI_SLAVE)) - return NULL; - - return __devm_spi_alloc_controller(dev, size, true); -} - -extern int spi_register_controller(struct spi_controller *ctlr); -extern int devm_spi_register_controller(struct device *dev, - struct spi_controller *ctlr); -extern void spi_unregister_controller(struct spi_controller *ctlr); - -extern struct spi_controller *spi_busnum_to_master(u16 busnum); +extern struct spi_master *spi_busnum_to_master(u16 busnum); /* * SPI resource management while processing a SPI message */ -typedef void (*spi_res_release_t)(struct spi_controller *ctlr, +typedef void (*spi_res_release_t)(struct spi_master *master, struct spi_message *msg, void *res); @@ -794,7 +638,7 @@ extern void *spi_res_alloc(struct spi_device *spi, extern void spi_res_add(struct spi_message *message, void *res); extern void spi_res_free(void *res); -extern void spi_res_release(struct spi_controller *ctlr, +extern void spi_res_release(struct spi_master *master, struct spi_message *message); /*---------------------------------------------------------------------------*/ @@ -831,46 +675,13 @@ extern void spi_res_release(struct spi_controller *ctlr, * transfer. If 0 the default (from @spi_device) is used. * @bits_per_word: select a bits_per_word other than the device default * for this transfer. If 0 the default (from @spi_device) is used. - * @dummy_data: indicates transfer is dummy bytes transfer. * @cs_change: affects chipselect after this transfer completes - * @cs_change_delay: delay between cs deassert and assert when - * @cs_change is set and @spi_transfer is not the last in @spi_message - * @delay: delay to be introduced after this transfer before + * @delay_usecs: microseconds to delay after this transfer before * (optionally) changing the chipselect status, then starting * the next transfer or completing this @spi_message. - * @word_delay: inter word delay to be introduced after each word size - * (set by bits_per_word) transmission. - * @effective_speed_hz: the effective SCK-speed that was used to - * transfer this transfer. Set to 0 if the spi bus driver does - * not support it. * @transfer_list: transfers are sequenced through @spi_message.transfers * @tx_sg: Scatterlist for transmit, currently not for client use * @rx_sg: Scatterlist for receive, currently not for client use - * @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset - * within @tx_buf for which the SPI device is requesting that the time - * snapshot for this transfer begins. Upon completing the SPI transfer, - * this value may have changed compared to what was requested, depending - * on the available snapshotting resolution (DMA transfer, - * @ptp_sts_supported is false, etc). - * @ptp_sts_word_post: See @ptp_sts_word_post. The two can be equal (meaning - * that a single byte should be snapshotted). - * If the core takes care of the timestamp (if @ptp_sts_supported is false - * for this controller), it will set @ptp_sts_word_pre to 0, and - * @ptp_sts_word_post to the length of the transfer. This is done - * purposefully (instead of setting to spi_transfer->len - 1) to denote - * that a transfer-level snapshot taken from within the driver may still - * be of higher quality. - * @ptp_sts: Pointer to a memory location held by the SPI slave device where a - * PTP system timestamp structure may lie. If drivers use PIO or their - * hardware has some sort of assist for retrieving exact transfer timing, - * they can (and should) assert @ptp_sts_supported and populate this - * structure using the ptp_read_system_*ts helper functions. - * The timestamp must represent the time at which the SPI slave device has - * processed the word, i.e. the "pre" timestamp should be taken before - * transmitting the "pre" word, and the "post" timestamp after receiving - * transmit confirmation from the controller for the "post" word. - * @timestamped: true if the transfer has been timestamped - * @error: Error status logged by spi controller driver. * * SPI transfers always write the same number of bytes as they read. * Protocol drivers should always provide @rx_buf and/or @tx_buf. @@ -941,7 +752,6 @@ struct spi_transfer { struct sg_table tx_sg; struct sg_table rx_sg; - unsigned dummy_data:1; unsigned cs_change:1; unsigned tx_nbits:3; unsigned rx_nbits:3; @@ -949,24 +759,10 @@ struct spi_transfer { #define SPI_NBITS_DUAL 0x02 /* 2bits transfer */ #define SPI_NBITS_QUAD 0x04 /* 4bits transfer */ u8 bits_per_word; - struct spi_delay delay; - struct spi_delay cs_change_delay; - struct spi_delay word_delay; + u16 delay_usecs; u32 speed_hz; - u32 effective_speed_hz; - - unsigned int ptp_sts_word_pre; - unsigned int ptp_sts_word_post; - - struct ptp_system_timestamp *ptp_sts; - - bool timestamped; - struct list_head transfer_list; - -#define SPI_TRANS_FAIL_NO_START BIT(0) - u16 error; }; /** @@ -989,7 +785,7 @@ struct spi_transfer { * each represented by a struct spi_transfer. The sequence is "atomic" * in the sense that no other spi_message may use that SPI bus until that * sequence completes. On some systems, many such sequences can execute as - * a single programmed DMA transfer. On all systems, these messages are + * as single programmed DMA transfer. On all systems, these messages are * queued, and might complete after transactions to other devices. Messages * sent to a given spi_device are always executed in FIFO order. * @@ -1026,7 +822,7 @@ struct spi_message { /* for optional use by whatever driver currently owns the * spi_message ... between calls to spi_async and then later - * complete(), that's the spi_controller controller driver. + * complete(), that's the spi_master controller driver. */ struct list_head queue; void *state; @@ -1059,12 +855,6 @@ spi_transfer_del(struct spi_transfer *t) list_del(&t->transfer_list); } -static inline int -spi_transfer_delay_exec(struct spi_transfer *t) -{ - return spi_delay_exec(&t->delay, t); -} - /** * spi_message_init_with_transfers - Initialize spi_message and append transfers * @m: spi_message to be initialized @@ -1100,7 +890,7 @@ static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags unsigned i; struct spi_transfer *t = (struct spi_transfer *)(m + 1); - spi_message_init_no_memset(m); + INIT_LIST_HEAD(&m->transfers); for (i = 0; i < ntrans; i++, t++) spi_message_add_tail(t, m); } @@ -1116,58 +906,36 @@ extern int spi_setup(struct spi_device *spi); extern int spi_async(struct spi_device *spi, struct spi_message *message); extern int spi_async_locked(struct spi_device *spi, struct spi_message *message); -extern int spi_slave_abort(struct spi_device *spi); static inline size_t spi_max_message_size(struct spi_device *spi) { - struct spi_controller *ctlr = spi->controller; - - if (!ctlr->max_message_size) + struct spi_master *master = spi->master; + if (!master->max_message_size) return SIZE_MAX; - return ctlr->max_message_size(spi); + return master->max_message_size(spi); } static inline size_t spi_max_transfer_size(struct spi_device *spi) { - struct spi_controller *ctlr = spi->controller; + struct spi_master *master = spi->master; size_t tr_max = SIZE_MAX; size_t msg_max = spi_max_message_size(spi); - if (ctlr->max_transfer_size) - tr_max = ctlr->max_transfer_size(spi); + if (master->max_transfer_size) + tr_max = master->max_transfer_size(spi); /* transfer size limit must not be greater than messsage size limit */ return min(tr_max, msg_max); } -/** - * spi_is_bpw_supported - Check if bits per word is supported - * @spi: SPI device - * @bpw: Bits per word - * - * This function checks to see if the SPI controller supports @bpw. - * - * Returns: - * True if @bpw is supported, false otherwise. - */ -static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw) -{ - u32 bpw_mask = spi->master->bits_per_word_mask; - - if (bpw == 8 || (bpw <= 32 && bpw_mask & SPI_BPW_MASK(bpw))) - return true; - - return false; -} - /*---------------------------------------------------------------------------*/ /* SPI transfer replacement methods which make use of spi_res */ struct spi_replaced_transfers; -typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr, +typedef void (*spi_replaced_release_t)(struct spi_master *master, struct spi_message *msg, struct spi_replaced_transfers *res); /** @@ -1211,7 +979,7 @@ extern struct spi_replaced_transfers *spi_replace_transfers( /* SPI transfer transformation methods */ -extern int spi_split_transfers_maxsize(struct spi_controller *ctlr, +extern int spi_split_transfers_maxsize(struct spi_master *master, struct spi_message *msg, size_t maxsize, gfp_t gfp); @@ -1225,8 +993,8 @@ extern int spi_split_transfers_maxsize(struct spi_controller *ctlr, extern int spi_sync(struct spi_device *spi, struct spi_message *message); extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message); -extern int spi_bus_lock(struct spi_controller *ctlr); -extern int spi_bus_unlock(struct spi_controller *ctlr); +extern int spi_bus_lock(struct spi_master *master); +extern int spi_bus_unlock(struct spi_master *master); /** * spi_sync_transfer - synchronous SPI data transfer @@ -1239,7 +1007,7 @@ extern int spi_bus_unlock(struct spi_controller *ctlr); * * For more specific semantics see spi_sync(). * - * Return: zero on success, else a negative error code. + * Return: Return: zero on success, else a negative error code. */ static inline int spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, @@ -1377,6 +1145,48 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) return be16_to_cpu(result); } +/** + * struct spi_flash_read_message - flash specific information for + * spi-masters that provide accelerated flash read interfaces + * @buf: buffer to read data + * @from: offset within the flash from where data is to be read + * @len: length of data to be read + * @retlen: actual length of data read + * @read_opcode: read_opcode to be used to communicate with flash + * @addr_width: number of address bytes + * @dummy_bytes: number of dummy bytes + * @opcode_nbits: number of lines to send opcode + * @addr_nbits: number of lines to send address + * @data_nbits: number of lines for data + * @rx_sg: Scatterlist for receive data read from flash + * @cur_msg_mapped: message has been mapped for DMA + */ +struct spi_flash_read_message { + void *buf; + loff_t from; + size_t len; + size_t retlen; + u8 read_opcode; + u8 addr_width; + u8 dummy_bytes; + u8 opcode_nbits; + u8 addr_nbits; + u8 data_nbits; + struct sg_table rx_sg; + bool cur_msg_mapped; +}; + +/* SPI core interface for flash read support */ +static inline bool spi_flash_read_supported(struct spi_device *spi) +{ + return spi->master->spi_flash_read && + (!spi->master->flash_read_supported || + spi->master->flash_read_supported(spi)); +} + +int spi_flash_read(struct spi_device *spi, + struct spi_flash_read_message *msg); + /*---------------------------------------------------------------------------*/ /* @@ -1398,13 +1208,12 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) * @modalias: Initializes spi_device.modalias; identifies the driver. * @platform_data: Initializes spi_device.platform_data; the particular * data stored there is driver-specific. - * @swnode: Software node for the device. * @controller_data: Initializes spi_device.controller_data; some * controllers need hints about hardware setup, e.g. for DMA. * @irq: Initializes spi_device.irq; depends on how the board is wired. * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits * from the chip datasheet and board-specific signal quality issues. - * @bus_num: Identifies which spi_controller parents the spi_device; unused + * @bus_num: Identifies which spi_master parents the spi_device; unused * by spi_new_device(), and otherwise depends on board wiring. * @chip_select: Initializes spi_device.chip_select; depends on how * the board is wired. @@ -1435,7 +1244,6 @@ struct spi_board_info { */ char modalias[SPI_NAME_SIZE]; const void *platform_data; - const struct software_node *swnode; void *controller_data; int irq; @@ -1444,7 +1252,7 @@ struct spi_board_info { /* bus_num is board specific and matches the bus_num of some - * spi_controller that will probably be registered later. + * spi_master that will probably be registered later. * * chip_select reflects how this chip is wired to that master; * it's less than num_chipselect. @@ -1455,7 +1263,7 @@ struct spi_board_info { /* mode becomes spi_device.mode, and is essential for chips * where the default of SPI_CS_HIGH = 0 is wrong. */ - u32 mode; + u16 mode; /* ... may need additional spi_device chip config data here. * avoid stuff protocol drivers can set; but include stuff @@ -1474,10 +1282,11 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) { return 0; } #endif + /* If you're hotplugging an adapter with devices (parport, usb, etc) * use spi_new_device() to describe each device. You can also call * spi_unregister_device() to start making that device vanish, but - * normally that would be handled by spi_unregister_controller(). + * normally that would be handled by spi_unregister_master(). * * You can also use spi_alloc_device() and spi_add_device() to use a two * stage registration sequence for each spi_device. This gives the caller @@ -1486,13 +1295,13 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) * be defined using the board info. */ extern struct spi_device * -spi_alloc_device(struct spi_controller *ctlr); +spi_alloc_device(struct spi_master *master); extern int spi_add_device(struct spi_device *spi); extern struct spi_device * -spi_new_device(struct spi_controller *, struct spi_board_info *); +spi_new_device(struct spi_master *, struct spi_board_info *); extern void spi_unregister_device(struct spi_device *spi); @@ -1500,48 +1309,9 @@ extern const struct spi_device_id * spi_get_device_id(const struct spi_device *sdev); static inline bool -spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer) +spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer) { - return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers); + return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers); } -/* OF support code */ -#if IS_ENABLED(CONFIG_OF) - -/* must call put_device() when done with returned spi_device device */ -extern struct spi_device * -of_find_spi_device_by_node(struct device_node *node); - -#else - -static inline struct spi_device * -of_find_spi_device_by_node(struct device_node *node) -{ - return NULL; -} - -#endif /* IS_ENABLED(CONFIG_OF) */ - -/* Compatibility layer */ -#define spi_master spi_controller - -#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX -#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX -#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX -#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX -#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX - -#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr) -#define spi_master_set_devdata(_ctlr, _data) \ - spi_controller_set_devdata(_ctlr, _data) -#define spi_master_get(_ctlr) spi_controller_get(_ctlr) -#define spi_master_put(_ctlr) spi_controller_put(_ctlr) -#define spi_master_suspend(_ctlr) spi_controller_suspend(_ctlr) -#define spi_master_resume(_ctlr) spi_controller_resume(_ctlr) - -#define spi_register_master(_ctlr) spi_register_controller(_ctlr) -#define devm_spi_register_master(_dev, _ctlr) \ - devm_spi_register_controller(_dev, _ctlr) -#define spi_unregister_master(_ctlr) spi_unregister_controller(_ctlr) - #endif /* __LINUX_SPI_H */ diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index 4444c2a992..154788ed21 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SPI_BITBANG_H #define __SPI_BITBANG_H @@ -8,7 +7,7 @@ struct spi_bitbang { struct mutex lock; u8 busy; u8 use_dma; - u16 flags; /* extra spi->mode support */ + u8 flags; /* extra spi->mode support */ struct spi_master *master; @@ -30,8 +29,7 @@ struct spi_bitbang { /* txrx_word[SPI_MODE_*]() just looks like a shift register */ u32 (*txrx_word[4])(struct spi_device *spi, unsigned nsecs, - u32 word, u8 bits, unsigned flags); - int (*set_line_direction)(struct spi_device *spi, bool output); + u32 word, u8 bits); }; /* you can call these default bitbang->master methods from your custom @@ -44,7 +42,6 @@ extern int spi_bitbang_setup_transfer(struct spi_device *spi, /* start or stop queue processing */ extern int spi_bitbang_start(struct spi_bitbang *spi); -extern int spi_bitbang_init(struct spi_bitbang *spi); extern void spi_bitbang_stop(struct spi_bitbang *spi); #endif /* __SPI_BITBANG_H */ diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h index 9e7e83d864..1634ce31c0 100644 --- a/include/linux/spi/spi_gpio.h +++ b/include/linux/spi/spi_gpio.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SPI_GPIO_H #define __LINUX_SPI_GPIO_H @@ -8,17 +7,64 @@ * - id the same as the SPI bus number it implements * - dev.platform data pointing to a struct spi_gpio_platform_data * - * Use spi_board_info with these busses in the usual way. + * Or, see the driver code for information about speedups that are + * possible on platforms that support inlined access for GPIOs (no + * spi_gpio_platform_data is used). + * + * Use spi_board_info with these busses in the usual way, being sure + * that the controller_data being the GPIO used for each device's + * chipselect: + * + * static struct spi_board_info ... [] = { + * ... + * // this slave uses GPIO 42 for its chipselect + * .controller_data = (void *) 42, + * ... + * // this one uses GPIO 86 for its chipselect + * .controller_data = (void *) 86, + * ... + * }; + * + * If chipselect is not used (there's only one device on the bus), assign + * SPI_GPIO_NO_CHIPSELECT to the controller_data: + * .controller_data = (void *) SPI_GPIO_NO_CHIPSELECT; + * + * If the MISO or MOSI pin is not available then it should be set to + * SPI_GPIO_NO_MISO or SPI_GPIO_NO_MOSI. * * If the bitbanged bus is later switched to a "native" controller, * that platform_device and controller_data should be removed. */ +#define SPI_GPIO_NO_CHIPSELECT ((unsigned long)-1l) +#define SPI_GPIO_NO_MISO ((unsigned long)-1l) +#define SPI_GPIO_NO_MOSI ((unsigned long)-1l) + /** * struct spi_gpio_platform_data - parameter for bitbanged SPI master + * @sck: number of the GPIO used for clock output + * @mosi: number of the GPIO used for Master Output, Slave In (MOSI) data + * @miso: number of the GPIO used for Master Input, Slave Output (MISO) data * @num_chipselect: how many slaves to allow + * + * All GPIO signals used with the SPI bus managed through this driver + * (chipselects, MOSI, MISO, SCK) must be configured as GPIOs, instead + * of some alternate function. + * + * It can be convenient to use this driver with pins that have alternate + * functions associated with a "native" SPI controller if a driver for that + * controller is not available, or is missing important functionality. + * + * On platforms which can do so, configure MISO with a weak pullup unless + * there's an external pullup on that signal. That saves power by avoiding + * floating signals. (A weak pulldown would save power too, but many + * drivers expect to see all-ones data as the no slave "response".) */ struct spi_gpio_platform_data { + unsigned sck; + unsigned long mosi; + unsigned long miso; + u16 num_chipselect; }; diff --git a/include/linux/spi/spi_oc_tiny.h b/include/linux/spi/spi_oc_tiny.h index 284872ac13..1ac529cf4f 100644 --- a/include/linux/spi/spi_oc_tiny.h +++ b/include/linux/spi/spi_oc_tiny.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SPI_SPI_OC_TINY_H #define _LINUX_SPI_SPI_OC_TINY_H @@ -6,12 +5,16 @@ * struct tiny_spi_platform_data - platform data of the OpenCores tiny SPI * @freq: input clock freq to the core. * @baudwidth: baud rate divider width of the core. + * @gpio_cs_count: number of gpio pins used for chipselect. + * @gpio_cs: array of gpio pins used for chipselect. * * freq and baudwidth are used only if the divider is programmable. */ struct tiny_spi_platform_data { unsigned int freq; unsigned int baudwidth; + unsigned int gpio_cs_count; + int *gpio_cs; }; #endif /* _LINUX_SPI_SPI_OC_TINY_H */ diff --git a/include/linux/spi/tdo24m.h b/include/linux/spi/tdo24m.h index 48dd58ac53..7572d4e1fe 100644 --- a/include/linux/spi/tdo24m.h +++ b/include/linux/spi/tdo24m.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __TDO24M_H__ #define __TDO24M_H__ diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h index 5d74b9fffc..414c6fddfc 100644 --- a/include/linux/spi/tle62x0.h +++ b/include/linux/spi/tle62x0.h @@ -1,9 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * tle62x0.h - platform glue to Infineon TLE62x0 driver chips * * Copyright 2007 Simtec Electronics * Ben Dooks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ struct tle62x0_pdata { diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h new file mode 100644 index 0000000000..563b3b1799 --- /dev/null +++ b/include/linux/spi/tsc2005.h @@ -0,0 +1,34 @@ +/* + * This file is part of TSC2005 touchscreen driver + * + * Copyright (C) 2009-2010 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_SPI_TSC2005_H +#define _LINUX_SPI_TSC2005_H + +#include + +struct tsc2005_platform_data { + int ts_pressure_max; + int ts_pressure_fudge; + int ts_x_max; + int ts_x_fudge; + int ts_y_max; + int ts_y_fudge; + int ts_x_plate_ohm; + unsigned int esd_timeout_ms; + void (*set_reset)(bool enable); +}; + +#endif diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h index c15d69d28e..333ecdfee0 100644 --- a/include/linux/spi/xilinx_spi.h +++ b/include/linux/spi/xilinx_spi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SPI_XILINX_SPI_H #define __LINUX_SPI_XILINX_SPI_H diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 45310ea1b1..3275f16a55 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SPINLOCK_H #define __LINUX_SPINLOCK_H @@ -12,8 +11,6 @@ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the * initializers * - * linux/spinlock_types_raw: - * The raw types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * @@ -33,8 +30,6 @@ * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * - * linux/spinlock_types_raw: - * The raw RT types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * @@ -60,9 +55,7 @@ #include #include #include -#include #include -#include /* @@ -80,7 +73,7 @@ #define LOCK_SECTION_END \ ".previous\n\t" -#define __lockfunc __section(".spinlock.text") +#define __lockfunc __attribute__((section(".spinlock.text"))) /* * Pull the arch_spinlock_t and arch_rwlock_t definitions: @@ -98,13 +91,12 @@ #ifdef CONFIG_DEBUG_SPINLOCK extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, - struct lock_class_key *key, short inner); - -# define raw_spin_lock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ + struct lock_class_key *key); +# define raw_spin_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __raw_spin_lock_init((lock), #lock, &__key); \ } while (0) #else @@ -114,105 +106,66 @@ do { \ #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) +#ifdef CONFIG_GENERIC_LOCKBREAK +#define raw_spin_is_contended(lock) ((lock)->break_lock) +#else + #ifdef arch_spin_is_contended #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) #else #define raw_spin_is_contended(lock) (((void)(lock), 0)) #endif /*arch_spin_is_contended*/ +#endif /* - * smp_mb__after_spinlock() provides the equivalent of a full memory barrier - * between program-order earlier lock acquisitions and program-order later - * memory accesses. - * - * This guarantees that the following two properties hold: - * - * 1) Given the snippet: - * - * { X = 0; Y = 0; } - * - * CPU0 CPU1 - * - * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); - * spin_lock(S); smp_mb(); - * smp_mb__after_spinlock(); r1 = READ_ONCE(X); - * r0 = READ_ONCE(Y); - * spin_unlock(S); - * - * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) - * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments - * preceding the call to smp_mb__after_spinlock() in __schedule() and in - * try_to_wake_up(). - * - * 2) Given the snippet: - * - * { X = 0; Y = 0; } - * - * CPU0 CPU1 CPU2 - * - * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); - * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); - * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); - * WRITE_ONCE(Y, 1); - * spin_unlock(S); - * - * it is forbidden that CPU0's critical section executes before CPU1's - * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) - * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments - * preceding the calls to smp_rmb() in try_to_wake_up() for similar - * snippets but "projected" onto two CPUs. - * - * Property (2) upgrades the lock to an RCsc lock. - * - * Since most load-store architectures implement ACQUIRE with an smp_mb() after - * the LL/SC loop, they need no further barriers. Similarly all our TSO - * architectures imply an smp_mb() for each atomic instruction and equally don't - * need more. - * - * Architectures that can implement ACQUIRE better need to take care. + * Despite its name it doesn't necessarily has to be a full barrier. + * It should only guarantee that a STORE before the critical section + * can not be reordered with LOADs and STOREs inside this section. + * spin_lock() is the one-way barrier, this LOAD can not escape out + * of the region. So the default implementation simply ensures that + * a STORE can not move into the critical section, smp_wmb() should + * serialize it with another STORE done by spin_lock(). */ -#ifndef smp_mb__after_spinlock -#define smp_mb__after_spinlock() do { } while (0) +#ifndef smp_mb__before_spinlock +#define smp_mb__before_spinlock() smp_wmb() #endif +/** + * raw_spin_unlock_wait - wait until the spinlock gets unlocked + * @lock: the spinlock in question. + */ +#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) + #ifdef CONFIG_DEBUG_SPINLOCK extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) extern int do_raw_spin_trylock(raw_spinlock_t *lock); extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); #else -static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) +static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); +static inline void do_raw_spin_lock(raw_spinlock_t *lock) { __acquire(lock); arch_spin_lock(&lock->raw_lock); - mmiowb_spin_lock(); } -#ifndef arch_spin_lock_flags -#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -#endif - static inline void -do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) +do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock); +static inline void +do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) { __acquire(lock); arch_spin_lock_flags(&lock->raw_lock, *flags); - mmiowb_spin_lock(); } static inline int do_raw_spin_trylock(raw_spinlock_t *lock) { - int ret = arch_spin_trylock(&(lock)->raw_lock); - - if (ret) - mmiowb_spin_lock(); - - return ret; + return arch_spin_trylock(&(lock)->raw_lock); } -static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) +static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); +static inline void do_raw_spin_unlock(raw_spinlock_t *lock) { - mmiowb_spin_unlock(); arch_spin_unlock(&lock->raw_lock); __release(lock); } @@ -220,7 +173,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) /* * Define the various spin_lock methods. Note we define these - * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The * various methods are defined as nops in the case they are not * required. */ @@ -231,6 +184,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock_nested(lock, subclass) +# define raw_spin_lock_bh_nested(lock, subclass) \ + _raw_spin_lock_bh_nested(lock, subclass) # define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ @@ -246,6 +201,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock(((void)(subclass), (lock))) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) +# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock) #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) @@ -312,10 +268,14 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 1 : ({ local_irq_restore(flags); 0; }); \ }) -#ifndef CONFIG_PREEMPT_RT -/* Include rwlock functions for !RT */ +/** + * raw_spin_can_lock - would raw_spin_trylock() succeed? + * @lock: the spinlock in question. + */ +#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) + +/* Include rwlock functions */ #include -#endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -326,9 +286,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # include #endif -/* Non PREEMPT_RT kernel, map to raw spinlocks: */ -#ifndef CONFIG_PREEMPT_RT - /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ @@ -338,31 +295,19 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) return &lock->rlock; } -#ifdef CONFIG_DEBUG_SPINLOCK - -# define spin_lock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - __raw_spin_lock_init(spinlock_check(lock), \ - #lock, &__key, LD_WAIT_CONFIG); \ +#define spin_lock_init(_lock) \ +do { \ + spinlock_check(_lock); \ + raw_spin_lock_init(&(_lock)->rlock); \ } while (0) -#else - -# define spin_lock_init(_lock) \ -do { \ - spinlock_check(_lock); \ - *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ -} while (0) - -#endif - +static __always_inline void spin_lock(spinlock_t *lock) __acquires(lock); static __always_inline void spin_lock(spinlock_t *lock) { raw_spin_lock(&lock->rlock); } +static __always_inline void spin_lock_bh(spinlock_t *lock) __acquires(lock); static __always_inline void spin_lock_bh(spinlock_t *lock) { raw_spin_lock_bh(&lock->rlock); @@ -378,11 +323,17 @@ do { \ raw_spin_lock_nested(spinlock_check(lock), subclass); \ } while (0) +#define spin_lock_bh_nested(lock, subclass) \ +do { \ + raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\ +} while (0) + #define spin_lock_nest_lock(lock, nest_lock) \ do { \ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ } while (0) +static __always_inline void spin_lock_irq(spinlock_t *lock) __acquires(lock); static __always_inline void spin_lock_irq(spinlock_t *lock) { raw_spin_lock_irq(&lock->rlock); @@ -398,21 +349,25 @@ do { \ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ } while (0) +static __always_inline void spin_unlock(spinlock_t *lock) __releases(lock); static __always_inline void spin_unlock(spinlock_t *lock) { raw_spin_unlock(&lock->rlock); } +static __always_inline void spin_unlock_bh(spinlock_t *lock) __releases(lock); static __always_inline void spin_unlock_bh(spinlock_t *lock) { raw_spin_unlock_bh(&lock->rlock); } +static __always_inline void spin_unlock_irq(spinlock_t *lock) __releases(lock); static __always_inline void spin_unlock_irq(spinlock_t *lock) { raw_spin_unlock_irq(&lock->rlock); } +static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(lock); static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { raw_spin_unlock_irqrestore(&lock->rlock, flags); @@ -433,24 +388,11 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock) raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) -/** - * spin_is_locked() - Check whether a spinlock is locked. - * @lock: Pointer to the spinlock. - * - * This function is NOT required to provide any memory ordering - * guarantees; it could be used for debugging purposes or, when - * additional synchronization is needed, accompanied with other - * constructs (memory barriers) enforcing the synchronization. - * - * Returns: 1 if @lock is locked, 0 otherwise. - * - * Note that the function only tells you that the spinlock is - * seen to be locked, not that it is locked on your CPU. - * - * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, - * the return value is always 0 (see include/linux/spinlock_up.h). - * Therefore you should not rely heavily on the return value. - */ +static __always_inline void spin_unlock_wait(spinlock_t *lock) +{ + raw_spin_unlock_wait(&lock->rlock); +} + static __always_inline int spin_is_locked(spinlock_t *lock) { return raw_spin_is_locked(&lock->rlock); @@ -461,11 +403,12 @@ static __always_inline int spin_is_contended(spinlock_t *lock) return raw_spin_is_contended(&lock->rlock); } -#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) +static __always_inline int spin_can_lock(spinlock_t *lock) +{ + return raw_spin_can_lock(&lock->rlock); +} -#else /* !CONFIG_PREEMPT_RT */ -# include -#endif /* CONFIG_PREEMPT_RT */ +#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) /* * Pull the atomic_t declaration: @@ -484,26 +427,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) -extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, - unsigned long *flags); -#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ - __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) - -int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, - size_t max_size, unsigned int cpu_mult, - gfp_t gfp, const char *name, - struct lock_class_key *key); - -#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ - ({ \ - static struct lock_class_key key; \ - int ret; \ - \ - ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ - cpu_mult, gfp, #locks, &key); \ - ret; \ - }) - -void free_bucket_spinlocks(spinlock_t *locks); - #endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 6b8e1a0b13..5344268e6e 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr); void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) __acquires(lock); +void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass) + __acquires(lock); void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) __acquires(lock); @@ -96,7 +98,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) /* * If lockdep is enabled then we use the non-preemption spin-ops - * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are * not re-enabled during lock-acquire (which the preempt-spin-ops do): */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) @@ -147,7 +149,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) static inline void __raw_spin_unlock(raw_spinlock_t *lock) { - spin_release(&lock->dep_map, _RET_IP_); + spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); preempt_enable(); } @@ -155,7 +157,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { - spin_release(&lock->dep_map, _RET_IP_); + spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); @@ -163,7 +165,7 @@ static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) { - spin_release(&lock->dep_map, _RET_IP_); + spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); @@ -171,7 +173,7 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) { - spin_release(&lock->dep_map, _RET_IP_); + spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); } @@ -187,9 +189,6 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) return 0; } -/* PREEMPT_RT has its own rwlock implementation */ -#ifndef CONFIG_PREEMPT_RT #include -#endif #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index d0d188861a..d3afef9d8d 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h @@ -57,6 +57,7 @@ #define _raw_spin_lock(lock) __LOCK(lock) #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) +#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock) #define _raw_read_lock(lock) __LOCK(lock) #define _raw_write_lock(lock) __LOCK(lock) #define _raw_spin_lock_bh(lock) __LOCK_BH(lock) diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 2dfa35ffec..73548eb13a 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,11 +9,58 @@ * Released under the General Public License (GPL). */ -#include +#if defined(CONFIG_SMP) +# include +#else +# include +#endif -#ifndef CONFIG_PREEMPT_RT +#include + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_GENERIC_LOCKBREAK + unsigned int break_lock; +#endif +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, +#else +# define SPIN_DEBUG_INIT(lockname) +#endif + +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ + { \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) -/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */ typedef struct spinlock { union { struct raw_spinlock rlock; @@ -28,49 +75,14 @@ typedef struct spinlock { }; } spinlock_t; -#define ___SPIN_LOCK_INITIALIZER(lockname) \ - { \ - .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ - SPIN_DEBUG_INIT(lockname) \ - SPIN_DEP_MAP_INIT(lockname) } - #define __SPIN_LOCK_INITIALIZER(lockname) \ - { { .rlock = ___SPIN_LOCK_INITIALIZER(lockname) } } + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } #define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) __SPIN_LOCK_INITIALIZER(lockname) + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -#else /* !CONFIG_PREEMPT_RT */ - -/* PREEMPT_RT kernels map spinlock to rt_mutex */ -#include - -typedef struct spinlock { - struct rt_mutex_base lock; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} spinlock_t; - -#define __SPIN_LOCK_UNLOCKED(name) \ - { \ - .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ - SPIN_DEP_MAP_INIT(name) \ - } - -#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \ - { \ - .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ - LOCAL_SPIN_DEP_MAP_INIT(name) \ - } - -#define DEFINE_SPINLOCK(name) \ - spinlock_t name = __SPIN_LOCK_UNLOCKED(name) - -#endif /* CONFIG_PREEMPT_RT */ - #include #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 0ac9112c1b..0d9848de67 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -26,12 +26,25 @@ #ifdef CONFIG_DEBUG_SPINLOCK #define arch_spin_is_locked(x) ((x)->slock == 0) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) +{ + smp_cond_load_acquire(&lock->slock, VAL); +} + static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; barrier(); } +static inline void +arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +{ + local_irq_save(flags); + lock->slock = 0; + barrier(); +} + static inline int arch_spin_trylock(arch_spinlock_t *lock) { char oldval = lock->slock; @@ -60,6 +73,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) +#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0) /* for sched/core.c and kernel_lock.c: */ # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) @@ -69,4 +83,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #define arch_spin_is_contended(lock) (((void)(lock), 0)) +#define arch_read_can_lock(lock) (((void)(lock), 1)) +#define arch_write_can_lock(lock) (((void)(lock), 1)) + #endif /* __LINUX_SPINLOCK_UP_H */ diff --git a/include/linux/splice.h b/include/linux/splice.h index a55179fd60..00a21166e2 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Function declerations and data structures related to the splice * implementation. @@ -21,8 +20,6 @@ #define SPLICE_F_MORE (0x04) /* expect more data */ #define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */ -#define SPLICE_F_ALL (SPLICE_F_MOVE|SPLICE_F_NONBLOCK|SPLICE_F_MORE|SPLICE_F_GIFT) - /* * Passed to the actors */ @@ -58,6 +55,7 @@ struct splice_pipe_desc { struct partial_page *partial; /* pages[] may not be contig */ int nr_pages; /* number of populated pages in map */ unsigned int nr_pages_max; /* pages[] & partial[] arrays size */ + unsigned int flags; /* splice flags */ const struct pipe_buf_operations *ops;/* ops associated with output pipe */ void (*spd_release)(struct splice_pipe_desc *, unsigned int); }; @@ -78,18 +76,13 @@ extern ssize_t add_to_pipe(struct pipe_inode_info *, struct pipe_buffer *); extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, splice_direct_actor *); -extern long do_splice(struct file *in, loff_t *off_in, - struct file *out, loff_t *off_out, - size_t len, unsigned int flags); - -extern long do_tee(struct file *in, struct file *out, size_t len, - unsigned int flags); /* * for dynamic pipe sizing */ extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *); extern void splice_shrink_spd(struct splice_pipe_desc *); +extern void spd_release_page(struct splice_pipe_desc *, unsigned int); extern const struct pipe_buf_operations page_cache_pipe_buf_ops; extern const struct pipe_buf_operations default_pipe_buf_ops; diff --git a/include/linux/spmi.h b/include/linux/spmi.h index 729bcbf9f5..1396a255d2 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h @@ -1,5 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _LINUX_SPMI_H #define _LINUX_SPMI_H @@ -138,7 +146,6 @@ struct spmi_driver { struct device_driver driver; int (*probe)(struct spmi_device *sdev); void (*remove)(struct spmi_device *sdev); - void (*shutdown)(struct spmi_device *sdev); }; static inline struct spmi_driver *to_spmi_driver(struct device_driver *d) diff --git a/include/linux/srcu.h b/include/linux/srcu.h index e6011a9975..b4b9482cfa 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -1,15 +1,28 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Sleepable Read-Copy Update mechanism for mutual exclusion * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * * Copyright (C) IBM Corporation, 2006 * Copyright (C) Fujitsu, 2012 * - * Author: Paul McKenney + * Author: Paul McKenney * Lai Jiangshan * * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU/ *.txt + * Documentation/RCU/ *.txt * */ @@ -19,62 +32,129 @@ #include #include #include -#include -struct srcu_struct; +struct srcu_struct_array { + unsigned long c[2]; + unsigned long seq[2]; +}; + +struct rcu_batch { + struct rcu_head *head, **tail; +}; + +#define RCU_BATCH_INIT(name) { NULL, &(name.head) } + +struct srcu_struct { + unsigned long completed; + struct srcu_struct_array __percpu *per_cpu_ref; + spinlock_t queue_lock; /* protect ->batch_queue, ->running */ + bool running; + /* callbacks just queued */ + struct rcu_batch batch_queue; + /* callbacks try to do the first check_zero */ + struct rcu_batch batch_check0; + /* callbacks done with the first check_zero and the flip */ + struct rcu_batch batch_check1; + struct rcu_batch batch_done; + struct delayed_work work; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +}; #ifdef CONFIG_DEBUG_LOCK_ALLOC -int __init_srcu_struct(struct srcu_struct *ssp, const char *name, +int __init_srcu_struct(struct srcu_struct *sp, const char *name, struct lock_class_key *key); -#define init_srcu_struct(ssp) \ +#define init_srcu_struct(sp) \ ({ \ static struct lock_class_key __srcu_key; \ \ - __init_srcu_struct((ssp), #ssp, &__srcu_key); \ + __init_srcu_struct((sp), #sp, &__srcu_key); \ }) #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -int init_srcu_struct(struct srcu_struct *ssp); +int init_srcu_struct(struct srcu_struct *sp); #define __SRCU_DEP_MAP_INIT(srcu_name) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -#ifdef CONFIG_TINY_SRCU -#include -#elif defined(CONFIG_TREE_SRCU) -#include -#elif defined(CONFIG_SRCU) -#error "Unknown SRCU implementation specified to kernel configuration" -#else -/* Dummy definition for things like notifiers. Actual use gets link error. */ -struct srcu_struct { }; -#endif +void process_srcu(struct work_struct *work); -void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, +#define __SRCU_STRUCT_INIT(name) \ + { \ + .completed = -300, \ + .per_cpu_ref = &name##_srcu_array, \ + .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ + .running = false, \ + .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ + .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \ + .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \ + .batch_done = RCU_BATCH_INIT(name.batch_done), \ + .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ + __SRCU_DEP_MAP_INIT(name) \ + } + +/* + * Define and initialize a srcu struct at build time. + * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it. + * + * Note that although DEFINE_STATIC_SRCU() hides the name from other + * files, the per-CPU variable rules nevertheless require that the + * chosen name be globally unique. These rules also prohibit use of + * DEFINE_STATIC_SRCU() within a function. If these rules are too + * restrictive, declare the srcu_struct manually. For example, in + * each file: + * + * static struct srcu_struct my_srcu; + * + * Then, before the first use of each my_srcu, manually initialize it: + * + * init_srcu_struct(&my_srcu); + * + * See include/linux/percpu-defs.h for the rules on per-CPU variables. + */ +#define __DEFINE_SRCU(name, is_static) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ + is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) +#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) +#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) + +/** + * call_srcu() - Queue a callback for invocation after an SRCU grace period + * @sp: srcu_struct in queue the callback + * @head: structure to be used for queueing the SRCU callback. + * @func: function to be invoked after the SRCU grace period + * + * The callback function will be invoked some time after a full SRCU + * grace period elapses, in other words after all pre-existing SRCU + * read-side critical sections have completed. However, the callback + * function might well execute concurrently with other SRCU read-side + * critical sections that started after call_srcu() was invoked. SRCU + * read-side critical sections are delimited by srcu_read_lock() and + * srcu_read_unlock(), and may be nested. + * + * The callback will be invoked from process context, but must nevertheless + * be fast and must not block. + */ +void call_srcu(struct srcu_struct *sp, struct rcu_head *head, void (*func)(struct rcu_head *head)); -void cleanup_srcu_struct(struct srcu_struct *ssp); -int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); -void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); -void synchronize_srcu(struct srcu_struct *ssp); -unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); -unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); -bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); -#ifdef CONFIG_SRCU -void srcu_init(void); -#else /* #ifdef CONFIG_SRCU */ -static inline void srcu_init(void) { } -#endif /* #else #ifdef CONFIG_SRCU */ +void cleanup_srcu_struct(struct srcu_struct *sp); +int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); +void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); +void synchronize_srcu(struct srcu_struct *sp); +void synchronize_srcu_expedited(struct srcu_struct *sp); +unsigned long srcu_batches_completed(struct srcu_struct *sp); +void srcu_barrier(struct srcu_struct *sp); #ifdef CONFIG_DEBUG_LOCK_ALLOC /** * srcu_read_lock_held - might we be in SRCU read-side critical section? - * @ssp: The srcu_struct structure to check * * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, @@ -88,16 +168,16 @@ static inline void srcu_init(void) { } * relies on normal RCU, it can be called from the CPU which * is in the idle loop from an RCU point of view or offline. */ -static inline int srcu_read_lock_held(const struct srcu_struct *ssp) +static inline int srcu_read_lock_held(struct srcu_struct *sp) { if (!debug_lockdep_rcu_enabled()) return 1; - return lock_is_held(&ssp->dep_map); + return lock_is_held(&sp->dep_map); } #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -static inline int srcu_read_lock_held(const struct srcu_struct *ssp) +static inline int srcu_read_lock_held(struct srcu_struct *sp) { return 1; } @@ -107,7 +187,7 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) /** * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing * @p: the pointer to fetch and protect for later dereferencing - * @ssp: pointer to the srcu_struct, which is used to check that we + * @sp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. * @c: condition to check for update-side use * @@ -116,32 +196,24 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) * to 1. The @c argument will normally be a logical expression containing * lockdep_is_held() calls. */ -#define srcu_dereference_check(p, ssp, c) \ - __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu) +#define srcu_dereference_check(p, sp, c) \ + __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu) /** * srcu_dereference - fetch SRCU-protected pointer for later dereferencing * @p: the pointer to fetch and protect for later dereferencing - * @ssp: pointer to the srcu_struct, which is used to check that we + * @sp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. * * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU * is enabled, invoking this outside of an RCU read-side critical * section will result in an RCU-lockdep splat. */ -#define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0) - -/** - * srcu_dereference_notrace - no tracing and no lockdep calls from here - * @p: the pointer to fetch and protect for later dereferencing - * @ssp: pointer to the srcu_struct, which is used to check that we - * really are in an SRCU read-side critical section. - */ -#define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1) +#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) /** * srcu_read_lock - register a new reader for an SRCU-protected structure. - * @ssp: srcu_struct in which to register the new reader. + * @sp: srcu_struct in which to register the new reader. * * Enter an SRCU read-side critical section. Note that SRCU read-side * critical sections may be nested. However, it is illegal to @@ -156,45 +228,30 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() * was invoked in process context. */ -static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) +static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp); +static inline int srcu_read_lock(struct srcu_struct *sp) { int retval; - retval = __srcu_read_lock(ssp); - rcu_lock_acquire(&(ssp)->dep_map); - return retval; -} - -/* Used by tracing, cannot be traced and cannot invoke lockdep. */ -static inline notrace int -srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) -{ - int retval; - - retval = __srcu_read_lock(ssp); + preempt_disable(); + retval = __srcu_read_lock(sp); + preempt_enable(); + rcu_lock_acquire(&(sp)->dep_map); return retval; } /** * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. - * @ssp: srcu_struct in which to unregister the old reader. + * @sp: srcu_struct in which to unregister the old reader. * @idx: return value from corresponding srcu_read_lock(). * * Exit an SRCU read-side critical section. */ -static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) - __releases(ssp) +static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); +static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) { - WARN_ON_ONCE(idx & ~0x1); - rcu_lock_release(&(ssp)->dep_map); - __srcu_read_unlock(ssp, idx); -} - -/* Used by tracing, cannot be traced and cannot call lockdep. */ -static inline notrace void -srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) -{ - __srcu_read_unlock(ssp, idx); + rcu_lock_release(&(sp)->dep_map); + __srcu_read_unlock(sp, idx); } /** diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index f9b53acb4e..26a0b3c3ce 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_H_ #define LINUX_SSB_H_ @@ -7,7 +6,7 @@ #include #include #include -#include +#include #include #include #include @@ -499,9 +498,11 @@ struct ssb_bus { /* Internal-only stuff follows. Do not touch. */ struct list_head list; +#ifdef CONFIG_SSB_DEBUG /* Is the bus already powered up? */ bool powered_up; int power_warn_count; +#endif /* DEBUG */ }; enum ssb_quirks { diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h index c44335b011..6fcfe99bd9 100644 --- a/include/linux/ssb/ssb_driver_chipcommon.h +++ b/include/linux/ssb/ssb_driver_chipcommon.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef LINUX_SSB_CHIPCO_H_ #define LINUX_SSB_CHIPCO_H_ @@ -10,6 +9,8 @@ * * Copyright 2005, Broadcom Corporation * Copyright 2006, Michael Buesch + * + * Licensed under the GPL version 2. See COPYING for details. */ /** ChipCommon core registers. **/ diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h index 19253bfacd..a410e841eb 100644 --- a/include/linux/ssb/ssb_driver_extif.h +++ b/include/linux/ssb/ssb_driver_extif.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Hardware-specific External Interface I/O core definitions * for the BCM47xx family of SiliconBackplane-based chips. @@ -15,6 +14,8 @@ * * Copyright 2005, Broadcom Corporation * Copyright 2006, Michael Buesch + * + * Licensed under the GPL version 2. See COPYING for details. */ #ifndef LINUX_SSB_EXTIFCORE_H_ #define LINUX_SSB_EXTIFCORE_H_ @@ -197,7 +198,7 @@ struct ssb_extif { static inline bool ssb_extif_available(struct ssb_extif *extif) { - return false; + return 0; } static inline diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h index 15ba0df1ee..0688472500 100644 --- a/include/linux/ssb/ssb_driver_gige.h +++ b/include/linux/ssb/ssb_driver_gige.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_DRIVER_GIGE_H_ #define LINUX_SSB_DRIVER_GIGE_H_ @@ -76,7 +75,7 @@ static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) if (dev) return !!(dev->dev->bus->sprom.boardflags_lo & SSB_GIGE_BFL_ROBOSWITCH); - return false; + return 0; } /* Returns whether we can only do one DMA at once. */ @@ -86,7 +85,7 @@ static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) if (dev) return ((dev->dev->bus->chip_id == 0x4785) && (dev->dev->bus->chip_rev < 2)); - return false; + return 0; } /* Returns whether we must flush posted writes. */ @@ -159,7 +158,7 @@ static inline void ssb_gige_exit(void) static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev) { - return false; + return 0; } static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) { @@ -167,19 +166,19 @@ static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) } static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) { - return false; + return 0; } static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) { - return false; + return 0; } static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) { - return false; + return 0; } static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) { - return false; + return 0; } static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) { diff --git a/include/linux/ssb/ssb_driver_mips.h b/include/linux/ssb/ssb_driver_mips.h index bef6bba32d..6535e4718f 100644 --- a/include/linux/ssb/ssb_driver_mips.h +++ b/include/linux/ssb/ssb_driver_mips.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_MIPSCORE_H_ #define LINUX_SSB_MIPSCORE_H_ diff --git a/include/linux/ssb/ssb_driver_pci.h b/include/linux/ssb/ssb_driver_pci.h index 42824bdfe1..41e330e51c 100644 --- a/include/linux/ssb/ssb_driver_pci.h +++ b/include/linux/ssb/ssb_driver_pci.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_PCICORE_H_ #define LINUX_SSB_PCICORE_H_ diff --git a/include/linux/ssb/ssb_embedded.h b/include/linux/ssb/ssb_embedded.h index 49604ac3db..8d8dedff05 100644 --- a/include/linux/ssb/ssb_embedded.h +++ b/include/linux/ssb/ssb_embedded.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_EMBEDDED_H_ #define LINUX_SSB_EMBEDDED_H_ diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index 210f464940..c0f707ac19 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_REGS_H_ #define LINUX_SSB_REGS_H_ diff --git a/include/linux/ssbi.h b/include/linux/ssbi.h index 61007afba0..087b08a4d3 100644 --- a/include/linux/ssbi.h +++ b/include/linux/ssbi.h @@ -1,7 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (C) 2010 Google, Inc. * Copyright (c) 2011, Code Aurora Forum. All rights reserved. * Author: Dima Zavin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _LINUX_SSBI_H diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index 6bb4bc1a5f..7978b3e2c1 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * A generic stack depot implementation * @@ -6,6 +5,17 @@ * Copyright (C) 2016 Google, Inc. * * Based on code by Dmitry Chernenkov. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #ifndef _LINUX_STACKDEPOT_H @@ -13,21 +23,10 @@ typedef u32 depot_stack_handle_t; -depot_stack_handle_t stack_depot_save(unsigned long *entries, - unsigned int nr_entries, gfp_t gfp_flags); +struct stack_trace; -unsigned int stack_depot_fetch(depot_stack_handle_t handle, - unsigned long **entries); +depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags); -unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries); - -#ifdef CONFIG_STACKDEPOT -int stack_depot_init(void); -#else -static inline int stack_depot_init(void) -{ - return 0; -} -#endif /* CONFIG_STACKDEPOT */ +void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace); #endif diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h index 4c678c4fec..6f3e54c704 100644 --- a/include/linux/stackprotector.h +++ b/include/linux/stackprotector.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_STACKPROTECTOR_H #define _LINUX_STACKPROTECTOR_H 1 @@ -6,7 +5,7 @@ #include #include -#if defined(CONFIG_STACKPROTECTOR) || defined(CONFIG_ARM64_PTR_AUTH) +#ifdef CONFIG_CC_STACKPROTECTOR # include #else static inline void boot_init_stack_canary(void) diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 9edecb494e..0a34489a46 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -1,86 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_STACKTRACE_H #define __LINUX_STACKTRACE_H #include -#include struct task_struct; struct pt_regs; #ifdef CONFIG_STACKTRACE -void stack_trace_print(const unsigned long *trace, unsigned int nr_entries, - int spaces); -int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, - unsigned int nr_entries, int spaces); -unsigned int stack_trace_save(unsigned long *store, unsigned int size, - unsigned int skipnr); -unsigned int stack_trace_save_tsk(struct task_struct *task, - unsigned long *store, unsigned int size, - unsigned int skipnr); -unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, - unsigned int size, unsigned int skipnr); -unsigned int stack_trace_save_user(unsigned long *store, unsigned int size); - -/* Internal interfaces. Do not use in generic code */ -#ifdef CONFIG_ARCH_STACKWALK - -/** - * stack_trace_consume_fn - Callback for arch_stack_walk() - * @cookie: Caller supplied pointer handed back by arch_stack_walk() - * @addr: The stack entry address to consume - * - * Return: True, if the entry was consumed or skipped - * False, if there is no space left to store - */ -typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr); -/** - * arch_stack_walk - Architecture specific function to walk the stack - * @consume_entry: Callback which is invoked by the architecture code for - * each entry. - * @cookie: Caller supplied pointer which is handed back to - * @consume_entry - * @task: Pointer to a task struct, can be NULL - * @regs: Pointer to registers, can be NULL - * - * ============ ======= ============================================ - * task regs - * ============ ======= ============================================ - * task NULL Stack trace from task (can be current) - * current regs Stack trace starting on regs->stackpointer - * ============ ======= ============================================ - */ -void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, - struct task_struct *task, struct pt_regs *regs); - -/** - * arch_stack_walk_reliable - Architecture specific function to walk the - * stack reliably - * - * @consume_entry: Callback which is invoked by the architecture code for - * each entry. - * @cookie: Caller supplied pointer which is handed back to - * @consume_entry - * @task: Pointer to a task struct, can be NULL - * - * This function returns an error if it detects any unreliable - * features of the stack. Otherwise it guarantees that the stack - * trace is reliable. - * - * If the task is not 'current', the caller *must* ensure the task is - * inactive and its stack is pinned. - */ -int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, - struct task_struct *task); - -void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, - const struct pt_regs *regs); - -#else /* CONFIG_ARCH_STACKWALK */ struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; - unsigned int skip; /* input argument: How many entries to skip */ + int skip; /* input argument: How many entries to skip */ }; extern void save_stack_trace(struct stack_trace *trace); @@ -88,22 +18,23 @@ extern void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace); extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); -extern int save_stack_trace_tsk_reliable(struct task_struct *tsk, - struct stack_trace *trace); -extern void save_stack_trace_user(struct stack_trace *trace); -#endif /* !CONFIG_ARCH_STACKWALK */ -#endif /* CONFIG_STACKTRACE */ -#if defined(CONFIG_STACKTRACE) && defined(CONFIG_HAVE_RELIABLE_STACKTRACE) -int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store, - unsigned int size); +extern void print_stack_trace(struct stack_trace *trace, int spaces); +extern int snprint_stack_trace(char *buf, size_t size, + struct stack_trace *trace, int spaces); + +#ifdef CONFIG_USER_STACKTRACE_SUPPORT +extern void save_stack_trace_user(struct stack_trace *trace); #else -static inline int stack_trace_save_tsk_reliable(struct task_struct *tsk, - unsigned long *store, - unsigned int size) -{ - return -ENOSYS; -} +# define save_stack_trace_user(trace) do { } while (0) #endif -#endif /* __LINUX_STACKTRACE_H */ +#else +# define save_stack_trace(trace) do { } while (0) +# define save_stack_trace_tsk(tsk, trace) do { } while (0) +# define save_stack_trace_user(trace) do { } while (0) +# define print_stack_trace(trace, spaces) do { } while (0) +# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) +#endif + +#endif diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h index 8b369a41c0..d3e5f27565 100644 --- a/include/linux/start_kernel.h +++ b/include/linux/start_kernel.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_START_KERNEL_H #define _LINUX_START_KERNEL_H @@ -9,7 +8,5 @@ up something else. */ extern asmlinkage void __init start_kernel(void); -extern void __init arch_call_rest_init(void); -extern void __ref rest_init(void); #endif /* _LINUX_START_KERNEL_H */ diff --git a/include/linux/stat.h b/include/linux/stat.h index 7df06931f2..075cb0c7eb 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_STAT_H #define _LINUX_STAT_H @@ -20,36 +19,19 @@ #include struct kstat { - u32 result_mask; /* What fields the user got */ - umode_t mode; - unsigned int nlink; - uint32_t blksize; /* Preferred I/O size */ - u64 attributes; - u64 attributes_mask; -#define KSTAT_ATTR_FS_IOC_FLAGS \ - (STATX_ATTR_COMPRESSED | \ - STATX_ATTR_IMMUTABLE | \ - STATX_ATTR_APPEND | \ - STATX_ATTR_NODUMP | \ - STATX_ATTR_ENCRYPTED | \ - STATX_ATTR_VERITY \ - )/* Attrs corresponding to FS_*_FL flags */ -#define KSTAT_ATTR_VFS_FLAGS \ - (STATX_ATTR_IMMUTABLE | \ - STATX_ATTR_APPEND \ - ) /* Attrs corresponding to S_* flags that are enforced by the VFS */ u64 ino; dev_t dev; - dev_t rdev; + umode_t mode; + unsigned int nlink; kuid_t uid; kgid_t gid; + dev_t rdev; loff_t size; - struct timespec64 atime; - struct timespec64 mtime; - struct timespec64 ctime; - struct timespec64 btime; /* File creation time */ - u64 blocks; - u64 mnt_id; + struct timespec atime; + struct timespec mtime; + struct timespec ctime; + unsigned long blksize; + unsigned long long blocks; }; #endif diff --git a/include/linux/statfs.h b/include/linux/statfs.h index 02c862686e..0166d320a7 100644 --- a/include/linux/statfs.h +++ b/include/linux/statfs.h @@ -1,10 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_STATFS_H #define _LINUX_STATFS_H #include #include -#include struct kstatfs { long f_type; @@ -41,21 +39,5 @@ struct kstatfs { #define ST_NOATIME 0x0400 /* do not update access times */ #define ST_NODIRATIME 0x0800 /* do not update directory access times */ #define ST_RELATIME 0x1000 /* update atime relative to mtime/ctime */ -#define ST_NOSYMFOLLOW 0x2000 /* do not follow symlinks */ - -struct dentry; -extern int vfs_get_fsid(struct dentry *dentry, __kernel_fsid_t *fsid); - -static inline __kernel_fsid_t u64_to_fsid(u64 v) -{ - return (__kernel_fsid_t){.val = {(u32)v, (u32)(v>>32)}}; -} - -/* Fold 16 bytes uuid to 64 bit fsid */ -static inline __kernel_fsid_t uuid_to_fsid(__u8 *uuid) -{ - return u64_to_fsid(le64_to_cpup((void *)uuid) ^ - le64_to_cpup((void *)(uuid + sizeof(u64)))); -} #endif diff --git a/include/linux/stddef.h b/include/linux/stddef.h index 998a4ba28e..9c61c7cda9 100644 --- a/include/linux/stddef.h +++ b/include/linux/stddef.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_STDDEF_H #define _LINUX_STDDEF_H @@ -19,14 +18,6 @@ enum { #define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) #endif -/** - * sizeof_field(TYPE, MEMBER) - * - * @TYPE: The structure containing the field of interest - * @MEMBER: The field to return the size of - */ -#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) - /** * offsetofend(TYPE, MEMBER) * @@ -34,6 +25,6 @@ enum { * @MEMBER: The member within the structure to get the end offset of */ #define offsetofend(TYPE, MEMBER) \ - (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) + (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) #endif diff --git a/include/linux/ste_modem_shm.h b/include/linux/ste_modem_shm.h new file mode 100644 index 0000000000..8444a4eff1 --- /dev/null +++ b/include/linux/ste_modem_shm.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) ST-Ericsson AB 2012 + * Author: Sjur Brendeland / sjur.brandeland@stericsson.com + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef __INC_MODEM_DEV_H +#define __INC_MODEM_DEV_H +#include +#include + +struct ste_modem_device; + +/** + * struct ste_modem_dev_cb - Callbacks for modem initiated events. + * @kick: Called when the modem kicks the host. + * + * This structure contains callbacks for actions triggered by the modem. + */ +struct ste_modem_dev_cb { + void (*kick)(struct ste_modem_device *mdev, int notify_id); +}; + +/** + * struct ste_modem_dev_ops - Functions to control modem and modem interface. + * + * @power: Main power switch, used for cold-start or complete power off. + * @kick: Kick the modem. + * @kick_subscribe: Subscribe for notifications from the modem. + * @setup: Provide callback functions to modem device. + * + * This structure contains functions used by the ste remoteproc driver + * to manage the modem. + */ +struct ste_modem_dev_ops { + int (*power)(struct ste_modem_device *mdev, bool on); + int (*kick)(struct ste_modem_device *mdev, int notify_id); + int (*kick_subscribe)(struct ste_modem_device *mdev, int notify_id); + int (*setup)(struct ste_modem_device *mdev, + struct ste_modem_dev_cb *cfg); +}; + +/** + * struct ste_modem_device - represent the STE modem device + * @pdev: Reference to platform device + * @ops: Operations used to manage the modem. + * @drv_data: Driver private data. + */ +struct ste_modem_device { + struct platform_device pdev; + struct ste_modem_dev_ops ops; + void *drv_data; +}; + +#endif /*INC_MODEM_DEV_H*/ diff --git a/include/linux/stm.h b/include/linux/stm.h index 3b22689512..8369d8a8ca 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h @@ -1,7 +1,15 @@ -// SPDX-License-Identifier: GPL-2.0 /* * System Trace Module (STM) infrastructure apis * Copyright (C) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef _STM_H_ @@ -57,7 +65,7 @@ struct stm_device; * * Normally, an STM device will have a range of masters available to software * and the rest being statically assigned to various hardware trace sources. - * The former is defined by the range [@sw_start..@sw_end] of the device + * The former is defined by the the range [@sw_start..@sw_end] of the device * description. That is, the lowest master that can be allocated to software * writers is @sw_start and data from this writer will appear is @sw_start * master in the STP stream. @@ -125,7 +133,7 @@ int stm_source_register_device(struct device *parent, struct stm_source_data *data); void stm_source_unregister_device(struct stm_source_data *data); -int notrace stm_source_write(struct stm_source_data *data, unsigned int chan, - const char *buf, size_t count); +int stm_source_write(struct stm_source_data *data, unsigned int chan, + const char *buf, size_t count); #endif /* _STM_H_ */ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index a6f03b36fc..705840e043 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -1,10 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /******************************************************************************* Header file for stmmac platform data Copyright (C) 2009 STMicroelectronics Ltd + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". Author: Giuseppe Cavallaro *******************************************************************************/ @@ -13,11 +27,6 @@ #define __STMMAC_PLATFORM_DATA #include -#include - -#define MTL_MAX_RX_QUEUES 8 -#define MTL_MAX_TX_QUEUES 8 -#define STMMAC_CH_MAX 8 #define STMMAC_RX_COE_NONE 0 #define STMMAC_RX_COE_TYPE1 1 @@ -35,18 +44,6 @@ #define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */ #define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */ -/* MTL algorithms identifiers */ -#define MTL_TX_ALGORITHM_WRR 0x0 -#define MTL_TX_ALGORITHM_WFQ 0x1 -#define MTL_TX_ALGORITHM_DWRR 0x2 -#define MTL_TX_ALGORITHM_SP 0x3 -#define MTL_RX_ALGORITHM_SP 0x4 -#define MTL_RX_ALGORITHM_WSP 0x5 - -/* RX/TX Queue Mode */ -#define MTL_QUEUE_AVB 0x0 -#define MTL_QUEUE_DCB 0x1 - /* The MDC clock could be set higher than the IEEE 802.3 * specified frequency limit 0f 2.5 MHz, by programming a clock divider * of value different than the above defined values. The resultant MDIO @@ -79,25 +76,21 @@ /* Platfrom data for platform device structure's platform_data field */ struct stmmac_mdio_bus_data { + int (*phy_reset)(void *priv); unsigned int phy_mask; - unsigned int has_xpcs; - unsigned int xpcs_an_inband; int *irqs; int probed_phy_irq; - bool needs_reset; +#ifdef CONFIG_OF + int reset_gpio, active_low; + u32 delays[3]; +#endif }; struct stmmac_dma_cfg { int pbl; - int txpbl; - int rxpbl; - bool pblx8; int fixed_burst; int mixed_burst; bool aal; - bool eame; - bool multi_msi_en; - bool dche; }; #define AXI_BLEN 7 @@ -107,98 +100,21 @@ struct stmmac_axi { u32 axi_wr_osr_lmt; u32 axi_rd_osr_lmt; bool axi_kbbe; + bool axi_axi_all; u32 axi_blen[AXI_BLEN]; bool axi_fb; bool axi_mb; bool axi_rb; }; -#define EST_GCL 1024 -struct stmmac_est { - struct mutex lock; - int enable; - u32 btr_reserve[2]; - u32 btr_offset[2]; - u32 btr[2]; - u32 ctr[2]; - u32 ter; - u32 gcl_unaligned[EST_GCL]; - u32 gcl[EST_GCL]; - u32 gcl_size; -}; - -struct stmmac_rxq_cfg { - u8 mode_to_use; - u32 chan; - u8 pkt_route; - bool use_prio; - u32 prio; -}; - -struct stmmac_txq_cfg { - u32 weight; - u8 mode_to_use; - /* Credit Base Shaper parameters */ - u32 send_slope; - u32 idle_slope; - u32 high_credit; - u32 low_credit; - bool use_prio; - u32 prio; - int tbs_en; -}; - -/* FPE link state */ -enum stmmac_fpe_state { - FPE_STATE_OFF = 0, - FPE_STATE_CAPABLE = 1, - FPE_STATE_ENTERING_ON = 2, - FPE_STATE_ON = 3, -}; - -/* FPE link-partner hand-shaking mPacket type */ -enum stmmac_mpacket_type { - MPACKET_VERIFY = 0, - MPACKET_RESPONSE = 1, -}; - -enum stmmac_fpe_task_state_t { - __FPE_REMOVING, - __FPE_TASK_SCHED, -}; - -struct stmmac_fpe_cfg { - bool enable; /* FPE enable */ - bool hs_enable; /* FPE handshake enable */ - enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */ - enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */ -}; - -struct stmmac_safety_feature_cfg { - u32 tsoee; - u32 mrxpee; - u32 mestee; - u32 mrxee; - u32 mtxee; - u32 epsi; - u32 edpp; - u32 prtyen; - u32 tmouten; -}; - struct plat_stmmacenet_data { int bus_id; int phy_addr; int interface; - phy_interface_t phy_interface; struct stmmac_mdio_bus_data *mdio_bus_data; struct device_node *phy_node; - struct device_node *phylink_node; struct device_node *mdio_node; struct stmmac_dma_cfg *dma_cfg; - struct stmmac_est *est; - struct stmmac_fpe_cfg *fpe_cfg; - struct stmmac_safety_feature_cfg *safety_feat_cfg; int clk_csr; int has_gmac; int enh_desc; @@ -215,58 +131,16 @@ struct plat_stmmacenet_data { int unicast_filter_entries; int tx_fifo_size; int rx_fifo_size; - u32 addr64; - u32 rx_queues_to_use; - u32 tx_queues_to_use; - u8 rx_sched_algorithm; - u8 tx_sched_algorithm; - struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES]; - struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES]; void (*fix_mac_speed)(void *priv, unsigned int speed); - int (*serdes_powerup)(struct net_device *ndev, void *priv); - void (*serdes_powerdown)(struct net_device *ndev, void *priv); - void (*speed_mode_2500)(struct net_device *ndev, void *priv); - void (*ptp_clk_freq_config)(void *priv); + void (*bus_setup)(void __iomem *ioaddr); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); - struct mac_device_info *(*setup)(void *priv); - int (*clks_config)(void *priv, bool enabled); - int (*crosststamp)(ktime_t *device, struct system_counterval_t *system, - void *ctx); + void (*suspend)(struct platform_device *pdev, void *priv); + void (*resume)(struct platform_device *pdev, void *priv); void *bsp_priv; - struct clk *stmmac_clk; - struct clk *pclk; - struct clk *clk_ptp_ref; - unsigned int clk_ptp_rate; - unsigned int clk_ref_rate; - unsigned int mult_fact_100ns; - s32 ptp_max_adj; - struct reset_control *stmmac_rst; - struct reset_control *stmmac_ahb_rst; struct stmmac_axi *axi; int has_gmac4; - bool has_sun8i; bool tso_en; - int rss_en; int mac_port_sel_speed; - bool en_tx_lpi_clockgating; - int has_xgmac; - bool vlan_fail_q_en; - u8 vlan_fail_q; - unsigned int eee_usecs_rate; - struct pci_dev *pdev; - bool has_crossts; - int int_snapshot_num; - int ext_snapshot_num; - bool ext_snapshot_en; - bool multi_msi_en; - int msi_mac_vec; - int msi_wol_vec; - int msi_lpi_vec; - int msi_sfty_ce_vec; - int msi_sfty_ue_vec; - int msi_rx_base_vec; - int msi_tx_base_vec; - bool use_phy_wol; }; #endif diff --git a/include/linux/stmp3xxx_rtc_wdt.h b/include/linux/stmp3xxx_rtc_wdt.h index be71a59b77..1dd12c9623 100644 --- a/include/linux/stmp3xxx_rtc_wdt.h +++ b/include/linux/stmp3xxx_rtc_wdt.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * stmp3xxx_rtc_wdt.h * * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K. + * + * This file is released under the GPLv2. */ #ifndef __LINUX_STMP3XXX_RTC_WDT_H #define __LINUX_STMP3XXX_RTC_WDT_H diff --git a/include/linux/stmp_device.h b/include/linux/stmp_device.h index 23046916a0..6cf7ec9547 100644 --- a/include/linux/stmp_device.h +++ b/include/linux/stmp_device.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * basic functions for devices following the "stmp" style register layout * * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __STMP_DEVICE_H__ diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 46fb3ebdd1..3cc9632dcc 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_STOP_MACHINE #define _LINUX_STOP_MACHINE @@ -24,7 +23,6 @@ typedef int (*cpu_stop_fn_t)(void *arg); struct cpu_stop_work { struct list_head list; /* cpu_stopper->works */ cpu_stop_fn_t fn; - unsigned long caller; void *arg; struct cpu_stop_done *done; }; @@ -33,11 +31,10 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg); bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf); +int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); +int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); void stop_machine_park(int cpu); void stop_machine_unpark(int cpu); -void stop_machine_yield(const struct cpumask *cpumask); - -extern void print_stop_info(const char *log_lvl, struct task_struct *task); #else /* CONFIG_SMP */ @@ -83,7 +80,19 @@ static inline bool stop_one_cpu_nowait(unsigned int cpu, return false; } -static inline void print_stop_info(const char *log_lvl, struct task_struct *task) { } +static inline int stop_cpus(const struct cpumask *cpumask, + cpu_stop_fn_t fn, void *arg) +{ + if (cpumask_test_cpu(raw_smp_processor_id(), cpumask)) + return stop_one_cpu(raw_smp_processor_id(), fn, arg); + return -ENOENT; +} + +static inline int try_stop_cpus(const struct cpumask *cpumask, + cpu_stop_fn_t fn, void *arg) +{ + return stop_cpus(cpumask, fn, arg); +} #endif /* CONFIG_SMP */ @@ -107,29 +116,15 @@ static inline void print_stop_info(const char *log_lvl, struct task_struct *task * @fn() runs. * * This can be thought of as a very heavy write lock, equivalent to - * grabbing every spinlock in the kernel. - * - * Protects against CPU hotplug. - */ + * grabbing every spinlock in the kernel. */ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); -/** - * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function - * @fn: the function to run - * @data: the data ptr for the @fn() - * @cpus: the cpus to run the @fn() on (NULL = any online cpu) - * - * Same as above. Must be called from with in a cpus_read_lock() protected - * region. Avoids nested calls to cpus_read_lock(). - */ -int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); - int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ -static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, - const struct cpumask *cpus) +static inline int stop_machine(cpu_stop_fn_t fn, void *data, + const struct cpumask *cpus) { unsigned long flags; int ret; @@ -139,15 +134,8 @@ static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, return ret; } -static __always_inline int -stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) -{ - return stop_machine_cpuslocked(fn, data, cpus); -} - -static __always_inline int -stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, - const struct cpumask *cpus) +static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, + const struct cpumask *cpus) { return stop_machine(fn, data, cpus); } diff --git a/include/linux/string.h b/include/linux/string.h index 5e96d656be..434ee17cc7 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -1,17 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_STRING_H_ #define _LINUX_STRING_H_ + #include /* for inline */ #include /* for size_t */ #include /* for NULL */ -#include /* for E2BIG */ -#include +#include #include extern char *strndup_user(const char __user *, long); extern void *memdup_user(const void __user *, size_t); -extern void *vmemdup_user(const void __user *, size_t); extern void *memdup_user_nul(const void __user *, size_t); /* @@ -20,56 +18,51 @@ extern void *memdup_user_nul(const void __user *, size_t); #include #ifndef __HAVE_ARCH_STRCPY -extern char * strcpy(char *,const char *); +extern char * strcpy(char *,const char *) __nocapture(2); #endif #ifndef __HAVE_ARCH_STRNCPY -extern char * strncpy(char *,const char *, __kernel_size_t); +extern char * strncpy(char *,const char *, __kernel_size_t) __nocapture(2); #endif #ifndef __HAVE_ARCH_STRLCPY -size_t strlcpy(char *, const char *, size_t); +size_t strlcpy(char *, const char *, size_t) __nocapture(2); #endif #ifndef __HAVE_ARCH_STRSCPY -ssize_t strscpy(char *, const char *, size_t); +ssize_t __must_check strscpy(char *, const char *, size_t) __nocapture(2); #endif - -/* Wraps calls to strscpy()/memset(), no arch specific code required */ -ssize_t strscpy_pad(char *dest, const char *src, size_t count); - #ifndef __HAVE_ARCH_STRCAT -extern char * strcat(char *, const char *); +extern char * strcat(char *, const char *) __nocapture(2); #endif #ifndef __HAVE_ARCH_STRNCAT -extern char * strncat(char *, const char *, __kernel_size_t); +extern char * strncat(char *, const char *, __kernel_size_t) __nocapture(2); #endif #ifndef __HAVE_ARCH_STRLCAT -extern size_t strlcat(char *, const char *, __kernel_size_t); +extern size_t strlcat(char *, const char *, __kernel_size_t) __nocapture(2); #endif #ifndef __HAVE_ARCH_STRCMP -extern int strcmp(const char *,const char *); +extern int strcmp(const char *,const char *) __nocapture(); #endif #ifndef __HAVE_ARCH_STRNCMP -extern int strncmp(const char *,const char *,__kernel_size_t); +extern int strncmp(const char *,const char *,__kernel_size_t) __nocapture(1, 2); #endif #ifndef __HAVE_ARCH_STRCASECMP -extern int strcasecmp(const char *s1, const char *s2); +extern int strcasecmp(const char *s1, const char *s2) __nocapture(); #endif #ifndef __HAVE_ARCH_STRNCASECMP -extern int strncasecmp(const char *s1, const char *s2, size_t n); +extern int strncasecmp(const char *s1, const char *s2, size_t n) __nocapture(1, 2); #endif #ifndef __HAVE_ARCH_STRCHR -extern char * strchr(const char *,int); +extern char * strchr(const char *,int) __nocapture(-1); #endif #ifndef __HAVE_ARCH_STRCHRNUL -extern char * strchrnul(const char *,int); +extern char * strchrnul(const char *,int) __nocapture(-1); #endif -extern char * strnchrnul(const char *, size_t, int); #ifndef __HAVE_ARCH_STRNCHR -extern char * strnchr(const char *, size_t, int); +extern char * strnchr(const char *, size_t, int) __nocapture(-1); #endif #ifndef __HAVE_ARCH_STRRCHR -extern char * strrchr(const char *,int); +extern char * strrchr(const char *,int) __nocapture(-1); #endif -extern char * __must_check skip_spaces(const char *); +extern char * __must_check skip_spaces(const char *) __nocapture(-1); extern char *strim(char *); @@ -79,133 +72,79 @@ static inline __must_check char *strstrip(char *str) } #ifndef __HAVE_ARCH_STRSTR -extern char * strstr(const char *, const char *); +extern char * strstr(const char *, const char *) __nocapture(-1, 2); #endif #ifndef __HAVE_ARCH_STRNSTR extern char * strnstr(const char *, const char *, size_t); #endif #ifndef __HAVE_ARCH_STRLEN -extern __kernel_size_t strlen(const char *); +extern __kernel_size_t strlen(const char *) __nocapture(1); #endif #ifndef __HAVE_ARCH_STRNLEN -extern __kernel_size_t strnlen(const char *,__kernel_size_t); +extern __kernel_size_t strnlen(const char *,__kernel_size_t) __nocapture(1); #endif #ifndef __HAVE_ARCH_STRPBRK -extern char * strpbrk(const char *,const char *); +extern char * strpbrk(const char *,const char *) __nocapture(-1, 2); #endif #ifndef __HAVE_ARCH_STRSEP -extern char * strsep(char **,const char *); +extern char * strsep(char **,const char *) __nocapture(2); #endif #ifndef __HAVE_ARCH_STRSPN -extern __kernel_size_t strspn(const char *,const char *); +extern __kernel_size_t strspn(const char *,const char *) __nocapture(); #endif #ifndef __HAVE_ARCH_STRCSPN -extern __kernel_size_t strcspn(const char *,const char *); +extern __kernel_size_t strcspn(const char *,const char *) __nocapture(); #endif #ifndef __HAVE_ARCH_MEMSET extern void * memset(void *,int,__kernel_size_t); #endif - -#ifndef __HAVE_ARCH_MEMSET16 -extern void *memset16(uint16_t *, uint16_t, __kernel_size_t); -#endif - -#ifndef __HAVE_ARCH_MEMSET32 -extern void *memset32(uint32_t *, uint32_t, __kernel_size_t); -#endif - -#ifndef __HAVE_ARCH_MEMSET64 -extern void *memset64(uint64_t *, uint64_t, __kernel_size_t); -#endif - -static inline void *memset_l(unsigned long *p, unsigned long v, - __kernel_size_t n) -{ - if (BITS_PER_LONG == 32) - return memset32((uint32_t *)p, v, n); - else - return memset64((uint64_t *)p, v, n); -} - -static inline void *memset_p(void **p, void *v, __kernel_size_t n) -{ - if (BITS_PER_LONG == 32) - return memset32((uint32_t *)p, (uintptr_t)v, n); - else - return memset64((uint64_t *)p, (uintptr_t)v, n); -} - -extern void **__memcat_p(void **a, void **b); -#define memcat_p(a, b) ({ \ - BUILD_BUG_ON_MSG(!__same_type(*(a), *(b)), \ - "type mismatch in memcat_p()"); \ - (typeof(*a) *)__memcat_p((void **)(a), (void **)(b)); \ -}) - #ifndef __HAVE_ARCH_MEMCPY -extern void * memcpy(void *,const void *,__kernel_size_t); +extern void * memcpy(void *,const void *,__kernel_size_t) __nocapture(2); #endif #ifndef __HAVE_ARCH_MEMMOVE -extern void * memmove(void *,const void *,__kernel_size_t); +extern void * memmove(void *,const void *,__kernel_size_t) __nocapture(2); #endif #ifndef __HAVE_ARCH_MEMSCAN extern void * memscan(void *,int,__kernel_size_t); #endif #ifndef __HAVE_ARCH_MEMCMP -extern int memcmp(const void *,const void *,__kernel_size_t); -#endif -#ifndef __HAVE_ARCH_BCMP -extern int bcmp(const void *,const void *,__kernel_size_t); +extern int memcmp(const void *,const void *,__kernel_size_t) __nocapture(1, 2); #endif #ifndef __HAVE_ARCH_MEMCHR -extern void * memchr(const void *,int,__kernel_size_t); +extern void * memchr(const void *,int,__kernel_size_t) __nocapture(-1); #endif -#ifndef __HAVE_ARCH_MEMCPY_FLUSHCACHE -static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt) -{ - memcpy(dst, src, cnt); -} -#endif - -void *memchr_inv(const void *s, int c, size_t n); +void *memchr_inv(const void *s, int c, size_t n) __nocapture(-1); char *strreplace(char *s, char old, char new); extern void kfree_const(const void *x); -extern char *kstrdup(const char *s, gfp_t gfp) __malloc; -extern const char *kstrdup_const(const char *s, gfp_t gfp); -extern char *kstrndup(const char *s, size_t len, gfp_t gfp); -extern void *kmemdup(const void *src, size_t len, gfp_t gfp); -extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp); +extern char *kstrdup(const char *s, gfp_t gfp) __malloc __nocapture(1); +extern const char *kstrdup_const(const char *s, gfp_t gfp) __nocapture(1); +extern char *kstrndup(const char *s, size_t len, gfp_t gfp) __nocapture(1); +extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __nocapture(1); extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); -extern bool sysfs_streq(const char *s1, const char *s2); -int match_string(const char * const *array, size_t n, const char *string); -int __sysfs_match_string(const char * const *array, size_t n, const char *s); +extern bool sysfs_streq(const char *s1, const char *s2) __nocapture(); +extern int kstrtobool(const char *s, bool *res) __nocapture(1); +static inline int strtobool(const char *s, bool *res) +{ + return kstrtobool(s, res); +} -/** - * sysfs_match_string - matches given string in an array - * @_a: array of strings - * @_s: string to match with - * - * Helper for __sysfs_match_string(). Calculates the size of @a automatically. - */ -#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s) +int match_string(const char * const *array, size_t n, const char *string); #ifdef CONFIG_BINARY_PRINTF -int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); -int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); +int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args) __nocapture(3); +int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) __nocapture(3); int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); #endif extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, const void *from, size_t available); -int ptr_to_hashval(const void *ptr, unsigned long *hashval_out); - /** * strstarts - does @str start with @prefix? * @str: string to examine @@ -217,26 +156,7 @@ static inline bool strstarts(const char *str, const char *prefix) } size_t memweight(const void *ptr, size_t bytes); - -/** - * memzero_explicit - Fill a region of memory (e.g. sensitive - * keying data) with 0s. - * @s: Pointer to the start of the area. - * @count: The size of the area. - * - * Note: usually using memset() is just fine (!), but in cases - * where clearing out _local_ data at the end of a scope is - * necessary, memzero_explicit() should be used instead in - * order to prevent the compiler from optimising away zeroing. - * - * memzero_explicit() doesn't need an arch-specific version as - * it just invokes the one of memset() implicitly. - */ -static inline void memzero_explicit(void *s, size_t count) -{ - memset(s, 0, count); - barrier_data(s); -} +void memzero_explicit(void *s, size_t count); /** * kbasename - return the last part of a pathname. @@ -249,56 +169,4 @@ static inline const char *kbasename(const char *path) return tail ? tail + 1 : path; } -#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline)) -#define __RENAME(x) __asm__(#x) - -void fortify_panic(const char *name) __noreturn __cold; -void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter"); -void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter"); -void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter"); -void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); - -#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) -#include -#endif - -/** - * memcpy_and_pad - Copy one buffer to another with padding - * @dest: Where to copy to - * @dest_len: The destination buffer size - * @src: Where to copy from - * @count: The number of bytes to copy - * @pad: Character to use for padding if space is left in destination. - */ -static inline void memcpy_and_pad(void *dest, size_t dest_len, - const void *src, size_t count, int pad) -{ - if (dest_len > count) { - memcpy(dest, src, count); - memset(dest + count, pad, dest_len - count); - } else - memcpy(dest, src, dest_len); -} - -/** - * str_has_prefix - Test if a string has a given prefix - * @str: The string to test - * @prefix: The string to see if @str starts with - * - * A common way to test a prefix of a string is to do: - * strncmp(str, prefix, sizeof(prefix) - 1) - * - * But this can lead to bugs due to typos, or if prefix is a pointer - * and not a constant. Instead use str_has_prefix(). - * - * Returns: - * * strlen(@prefix) if @str starts with @prefix - * * 0 if @str does not start with @prefix - */ -static __always_inline size_t str_has_prefix(const char *str, const char *prefix) -{ - size_t len = strlen(prefix); - return strncmp(str, prefix, len) == 0 ? len : 0; -} - #endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index 68189c4a2e..5ce9538f29 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -1,13 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_STRING_HELPERS_H_ #define _LINUX_STRING_HELPERS_H_ -#include -#include #include struct file; -struct task_struct; /* Descriptions of the types of units to * print in */ @@ -19,15 +15,13 @@ enum string_size_units { void string_get_size(u64 size, u64 blk_size, enum string_size_units units, char *buf, int len); -#define UNESCAPE_SPACE BIT(0) -#define UNESCAPE_OCTAL BIT(1) -#define UNESCAPE_HEX BIT(2) -#define UNESCAPE_SPECIAL BIT(3) +#define UNESCAPE_SPACE 0x01 +#define UNESCAPE_OCTAL 0x02 +#define UNESCAPE_HEX 0x04 +#define UNESCAPE_SPECIAL 0x08 #define UNESCAPE_ANY \ (UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL) -#define UNESCAPE_ALL_MASK GENMASK(3, 0) - int string_unescape(char *src, char *dst, size_t size, unsigned int flags); static inline int string_unescape_inplace(char *buf, unsigned int flags) @@ -45,20 +39,15 @@ static inline int string_unescape_any_inplace(char *buf) return string_unescape_any(buf, buf, 0); } -#define ESCAPE_SPACE BIT(0) -#define ESCAPE_SPECIAL BIT(1) -#define ESCAPE_NULL BIT(2) -#define ESCAPE_OCTAL BIT(3) +#define ESCAPE_SPACE 0x01 +#define ESCAPE_SPECIAL 0x02 +#define ESCAPE_NULL 0x04 +#define ESCAPE_OCTAL 0x08 #define ESCAPE_ANY \ (ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL) -#define ESCAPE_NP BIT(4) +#define ESCAPE_NP 0x10 #define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP) -#define ESCAPE_HEX BIT(5) -#define ESCAPE_NA BIT(6) -#define ESCAPE_NAP BIT(7) -#define ESCAPE_APPEND BIT(8) - -#define ESCAPE_ALL_MASK GENMASK(8, 0) +#define ESCAPE_HEX 0x20 int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, unsigned int flags, const char *only); @@ -81,24 +70,8 @@ static inline int string_escape_str_any_np(const char *src, char *dst, return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only); } -static inline void string_upper(char *dst, const char *src) -{ - do { - *dst++ = toupper(*src); - } while (*src++); -} - -static inline void string_lower(char *dst, const char *src) -{ - do { - *dst++ = tolower(*src); - } while (*src++); -} - char *kstrdup_quotable(const char *src, gfp_t gfp); char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp); char *kstrdup_quotable_file(struct file *file, gfp_t gfp); -void kfree_strarray(char **array, size_t n); - #endif diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h index c0c5c5b73d..7c2d95170d 100644 --- a/include/linux/stringhash.h +++ b/include/linux/stringhash.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_STRINGHASH_H #define __LINUX_STRINGHASH_H @@ -50,9 +49,9 @@ partial_name_hash(unsigned long c, unsigned long prevhash) * losing bits). This also has the property (wanted by the dcache) * that the msbits make a good hash table index. */ -static inline unsigned int end_name_hash(unsigned long hash) +static inline unsigned long end_name_hash(unsigned long hash) { - return hash_long(hash, 32); + return __hash_32((unsigned int)hash); } /* diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h new file mode 100644 index 0000000000..377b8a5788 --- /dev/null +++ b/include/linux/sudmac.h @@ -0,0 +1,52 @@ +/* + * Header for the SUDMAC driver + * + * Copyright (C) 2013 Renesas Solutions Corp. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ +#ifndef SUDMAC_H +#define SUDMAC_H + +#include +#include +#include + +/* Used by slave DMA clients to request DMA to/from a specific peripheral */ +struct sudmac_slave { + struct shdma_slave shdma_slave; /* Set by the platform */ +}; + +/* + * Supplied by platforms to specify, how a DMA channel has to be configured for + * a certain peripheral + */ +struct sudmac_slave_config { + int slave_id; +}; + +struct sudmac_channel { + unsigned long offset; + unsigned long config; + unsigned long wait; /* The configuable range is 0 to 3 */ + unsigned long dint_end_bit; +}; + +struct sudmac_pdata { + const struct sudmac_slave_config *slave; + int slave_num; + const struct sudmac_channel *channel; + int channel_num; +}; + +/* Definitions for the sudmac_channel.config */ +#define SUDMAC_TX_BUFFER_MODE BIT(0) +#define SUDMAC_RX_END_MODE BIT(1) + +/* Definitions for the sudmac_channel.dint_end_bit */ +#define SUDMAC_DMA_BIT_CH0 BIT(0) +#define SUDMAC_DMA_BIT_CH1 BIT(1) + +#endif diff --git a/include/linux/sungem_phy.h b/include/linux/sungem_phy.h index 3a11fa41a1..bd9be9f59d 100644 --- a/include/linux/sungem_phy.h +++ b/include/linux/sungem_phy.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SUNGEM_PHY_H__ #define __SUNGEM_PHY_H__ diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h index 07d454873b..f16c5c90bb 100644 --- a/include/linux/sunrpc/addr.h +++ b/include/linux/sunrpc/addr.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/addr.h * @@ -24,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap) { switch (sap->sa_family) { case AF_INET: - return ntohs(((struct sockaddr_in *)sap)->sin_port); + return ntohs(((const struct sockaddr_in *)sap)->sin_port); case AF_INET6: - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port); + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port); } return 0; } @@ -59,7 +58,7 @@ static inline bool rpc_cmp_addr4(const struct sockaddr *sap1, static inline bool __rpc_copy_addr4(struct sockaddr *dst, const struct sockaddr *src) { - const struct sockaddr_in *ssin = (struct sockaddr_in *) src; + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src; struct sockaddr_in *dsin = (struct sockaddr_in *) dst; dsin->sin_family = ssin->sin_family; @@ -178,7 +177,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa) if (sa->sa_family != AF_INET6) return 0; - return ((struct sockaddr_in6 *) sa)->sin6_scope_id; + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id; } #endif /* _LINUX_SUNRPC_ADDR_H */ diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 98da816b5f..b1bc62ba20 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/auth.h * @@ -10,6 +9,8 @@ #ifndef _LINUX_SUNRPC_AUTH_H #define _LINUX_SUNRPC_AUTH_H +#ifdef __KERNEL__ + #include #include #include @@ -31,13 +32,24 @@ */ #define UNX_MAXNODENAME __NEW_UTS_LEN #define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME)) -#define UNX_NGROUPS 16 struct rpcsec_gss_info; +/* auth_cred ac_flags bits */ +enum { + RPC_CRED_KEY_EXPIRE_SOON = 1, /* underlying cred key will expire soon */ + RPC_CRED_NOTIFY_TIMEOUT = 2, /* nofity generic cred when underlying + key will expire soon */ +}; + +/* Work around the lack of a VFS credential */ struct auth_cred { - const struct cred *cred; - const char *principal; /* If present, this is a machine credential */ + kuid_t uid; + kgid_t gid; + struct group_info *group_info; + const char *principal; + unsigned long ac_flags; + unsigned char machine_cred : 1; }; /* @@ -51,10 +63,14 @@ struct rpc_cred { struct rcu_head cr_rcu; struct rpc_auth * cr_auth; const struct rpc_credops *cr_ops; +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + unsigned long cr_magic; /* 0x0f4aa4f0 */ +#endif unsigned long cr_expire; /* when to gc */ unsigned long cr_flags; /* various flags */ - refcount_t cr_count; /* ref count */ - const struct cred *cr_cred; + atomic_t cr_count; /* ref count */ + + kuid_t cr_uid; /* per-flavor data */ }; @@ -63,7 +79,10 @@ struct rpc_cred { #define RPCAUTH_CRED_HASHED 2 #define RPCAUTH_CRED_NEGATIVE 3 -const struct cred *rpc_machine_cred(void); +#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 + +/* rpc_auth au_flags */ +#define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */ /* * Client authentication handle @@ -72,25 +91,26 @@ struct rpc_cred_cache; struct rpc_authops; struct rpc_auth { unsigned int au_cslack; /* call cred size estimate */ - unsigned int au_rslack; /* reply cred size estimate */ - unsigned int au_verfsize; /* size of reply verifier */ - unsigned int au_ralign; /* words before UL header */ + /* guess at number of u32's auth adds before + * reply data; normally the verifier size: */ + unsigned int au_rslack; + /* for gss, used to calculate au_rslack: */ + unsigned int au_verfsize; - unsigned long au_flags; - const struct rpc_authops *au_ops; + unsigned int au_flags; /* various flags */ + const struct rpc_authops *au_ops; /* operations */ rpc_authflavor_t au_flavor; /* pseudoflavor (note may * differ from the flavor in * au_ops->au_flavor in gss * case) */ - refcount_t au_count; /* Reference counter */ + atomic_t au_count; /* Reference counter */ struct rpc_cred_cache * au_credcache; /* per-flavor data */ }; /* rpc_auth au_flags */ -#define RPCAUTH_AUTH_DATATOUCH (1) -#define RPCAUTH_AUTH_UPDATE_SLACK (2) +#define RPCAUTH_AUTH_DATATOUCH 0x00000002 struct rpc_auth_create_args { rpc_authflavor_t pseudoflavor; @@ -99,6 +119,7 @@ struct rpc_auth_create_args { /* Flags for rpcauth_lookupcred() */ #define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */ +#define RPCAUTH_LOOKUP_RCU 0x02 /* lock-less lookup */ /* * Client authentication ops @@ -107,13 +128,13 @@ struct rpc_authops { struct module *owner; rpc_authflavor_t au_flavor; /* flavor (RPC_AUTH_*) */ char * au_name; - struct rpc_auth * (*create)(const struct rpc_auth_create_args *, - struct rpc_clnt *); + struct rpc_auth * (*create)(struct rpc_auth_create_args *, struct rpc_clnt *); void (*destroy)(struct rpc_auth *); int (*hash_cred)(struct auth_cred *, unsigned int); struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int); struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t); + int (*list_pseudoflavors)(rpc_authflavor_t *, int); rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *); int (*flavor2info)(rpc_authflavor_t, struct rpcsec_gss_info *); @@ -127,68 +148,88 @@ struct rpc_credops { void (*crdestroy)(struct rpc_cred *); int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); - int (*crmarshal)(struct rpc_task *task, - struct xdr_stream *xdr); + struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int); + __be32 * (*crmarshal)(struct rpc_task *, __be32 *); int (*crrefresh)(struct rpc_task *); - int (*crvalidate)(struct rpc_task *task, - struct xdr_stream *xdr); - int (*crwrap_req)(struct rpc_task *task, - struct xdr_stream *xdr); - int (*crunwrap_resp)(struct rpc_task *task, - struct xdr_stream *xdr); + __be32 * (*crvalidate)(struct rpc_task *, __be32 *); + int (*crwrap_req)(struct rpc_task *, kxdreproc_t, + void *, __be32 *, void *); + int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t, + void *, __be32 *, void *); int (*crkey_timeout)(struct rpc_cred *); + bool (*crkey_to_expire)(struct rpc_cred *); char * (*crstringify_acceptor)(struct rpc_cred *); - bool (*crneed_reencode)(struct rpc_task *); }; extern const struct rpc_authops authunix_ops; extern const struct rpc_authops authnull_ops; int __init rpc_init_authunix(void); +int __init rpc_init_generic_auth(void); int __init rpcauth_init_module(void); void rpcauth_remove_module(void); +void rpc_destroy_generic_auth(void); void rpc_destroy_authunix(void); +struct rpc_cred * rpc_lookup_cred(void); +struct rpc_cred * rpc_lookup_cred_nonblock(void); +struct rpc_cred * rpc_lookup_generic_cred(struct auth_cred *, int, gfp_t); +struct rpc_cred * rpc_lookup_machine_cred(const char *service_name); int rpcauth_register(const struct rpc_authops *); int rpcauth_unregister(const struct rpc_authops *); -struct rpc_auth * rpcauth_create(const struct rpc_auth_create_args *, +struct rpc_auth * rpcauth_create(struct rpc_auth_create_args *, struct rpc_clnt *); void rpcauth_release(struct rpc_auth *); rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t, struct rpcsec_gss_info *); int rpcauth_get_gssinfo(rpc_authflavor_t, struct rpcsec_gss_info *); +int rpcauth_list_flavors(rpc_authflavor_t *, int); struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t); void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); +struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); void put_rpccred(struct rpc_cred *); -int rpcauth_marshcred(struct rpc_task *task, - struct xdr_stream *xdr); -int rpcauth_checkverf(struct rpc_task *task, - struct xdr_stream *xdr); -int rpcauth_wrap_req_encode(struct rpc_task *task, - struct xdr_stream *xdr); -int rpcauth_wrap_req(struct rpc_task *task, - struct xdr_stream *xdr); -int rpcauth_unwrap_resp_decode(struct rpc_task *task, - struct xdr_stream *xdr); -int rpcauth_unwrap_resp(struct rpc_task *task, - struct xdr_stream *xdr); -bool rpcauth_xmit_need_reencode(struct rpc_task *task); +__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); +__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); +int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj); +int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj); int rpcauth_refreshcred(struct rpc_task *); void rpcauth_invalcred(struct rpc_task *); int rpcauth_uptodatecred(struct rpc_task *); int rpcauth_init_credcache(struct rpc_auth *); void rpcauth_destroy_credcache(struct rpc_auth *); void rpcauth_clear_credcache(struct rpc_cred_cache *); +int rpcauth_key_timeout_notify(struct rpc_auth *, + struct rpc_cred *); +bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *); char * rpcauth_stringify_acceptor(struct rpc_cred *); static inline -struct rpc_cred *get_rpccred(struct rpc_cred *cred) +struct rpc_cred * get_rpccred(struct rpc_cred *cred) { - if (cred != NULL && refcount_inc_not_zero(&cred->cr_count)) + if (cred != NULL) + atomic_inc(&cred->cr_count); + return cred; +} + +/** + * get_rpccred_rcu - get a reference to a cred using rcu-protected pointer + * @cred: cred of which to take a reference + * + * In some cases, we may have a pointer to a credential to which we + * want to take a reference, but don't already have one. Because these + * objects are freed using RCU, we can access the cr_count while its + * on its way to destruction and only take a reference if it's not already + * zero. + */ +static inline struct rpc_cred * +get_rpccred_rcu(struct rpc_cred *cred) +{ + if (atomic_inc_not_zero(&cred->cr_count)) return cred; return NULL; } +#endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_AUTH_H */ diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h index 43e481aa34..36eebc451b 100644 --- a/include/linux/sunrpc/auth_gss.h +++ b/include/linux/sunrpc/auth_gss.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/auth_gss.h * @@ -13,7 +12,7 @@ #ifndef _LINUX_SUNRPC_AUTH_GSS_H #define _LINUX_SUNRPC_AUTH_GSS_H -#include +#ifdef __KERNEL__ #include #include #include @@ -66,10 +65,9 @@ struct rpc_gss_init_res { * the wire when communicating with a server. */ struct gss_cl_ctx { - refcount_t count; + atomic_t count; enum rpc_gss_proc gc_proc; u32 gc_seq; - u32 gc_seq_xmit; spinlock_t gc_seq_lock; struct gss_ctx *gc_gss_ctx; struct xdr_netobj gc_wire_ctx; @@ -89,5 +87,6 @@ struct gss_cred { unsigned long gc_upcall_timestamp; }; +#endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_AUTH_GSS_H */ diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index f07c334c59..4397a4824c 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -4,7 +4,7 @@ NetApp provides this source code under the GPL v2 License. The GPL v2 license is available at -https://opensource.org/licenses/gpl-license.php. +http://opensource.org/licenses/gpl-license.php. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -34,7 +34,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef CONFIG_SUNRPC_BACKCHANNEL struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid); void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied); -void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task); void xprt_free_bc_request(struct rpc_rqst *req); int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); @@ -43,19 +42,15 @@ void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs); void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs); void xprt_free_bc_rqst(struct rpc_rqst *req); -unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt); /* * Determine if a shared backchannel is in use */ -static inline bool svc_is_backchannel(const struct svc_rqst *rqstp) +static inline int svc_is_backchannel(const struct svc_rqst *rqstp) { - return rqstp->rq_server->sv_bc_enabled; -} - -static inline void set_bc_enabled(struct svc_serv *serv) -{ - serv->sv_bc_enabled = true; + if (rqstp->rq_server->sv_bc_xprt) + return 1; + return 0; } #else /* CONFIG_SUNRPC_BACKCHANNEL */ static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, @@ -64,18 +59,9 @@ static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, return 0; } -static inline void xprt_destroy_backchannel(struct rpc_xprt *xprt, - unsigned int max_reqs) -{ -} - -static inline bool svc_is_backchannel(const struct svc_rqst *rqstp) -{ - return false; -} - -static inline void set_bc_enabled(struct svc_serv *serv) +static inline int svc_is_backchannel(const struct svc_rqst *rqstp) { + return 0; } static inline void xprt_free_bc_request(struct rpc_rqst *req) diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index b134b2b337..62a60eeacb 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/sunrpc/cache.h * @@ -6,6 +5,9 @@ * used by sunrpc clients and servers. * * Copyright (C) 2002 Neil Brown + * + * Released under terms in GPL version 2. See COPYING. + * */ #ifndef _LINUX_SUNRPC_CACHE_H_ @@ -14,7 +16,6 @@ #include #include #include -#include #include /* @@ -46,9 +47,8 @@ */ struct cache_head { struct hlist_node cache_list; - time64_t expiry_time; /* After time expiry_time, don't use - * the data */ - time64_t last_refresh; /* If CACHE_PENDING, this is when upcall was + time_t expiry_time; /* After time time, don't use the data */ + time_t last_refresh; /* If CACHE_PENDING, this is when upcall was * sent, else this is when update was * received, though it is alway set to * be *after* ->flush_time. @@ -63,11 +63,20 @@ struct cache_head { #define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ +struct cache_detail_procfs { + struct proc_dir_entry *proc_ent; + struct proc_dir_entry *flush_ent, *channel_ent, *content_ent; +}; + +struct cache_detail_pipefs { + struct dentry *dir; +}; + struct cache_detail { struct module * owner; int hash_size; struct hlist_head * hash_table; - spinlock_t hash_lock; + rwlock_t hash_lock; char *name; void (*cache_put)(struct kref *); @@ -89,7 +98,6 @@ struct cache_detail { int has_died); struct cache_head * (*alloc)(void); - void (*flush)(void); int (*match)(struct cache_head *orig, struct cache_head *new); void (*init)(struct cache_head *orig, struct cache_head *new); void (*update)(struct cache_head *orig, struct cache_head *new); @@ -97,27 +105,27 @@ struct cache_detail { /* fields below this comment are for internal use * and should not be touched by cache owners */ - time64_t flush_time; /* flush all cache items with + time_t flush_time; /* flush all cache items with * last_refresh at or earlier * than this. last_refresh * is never set at or earlier * than this. */ struct list_head others; - time64_t nextcheck; + time_t nextcheck; int entries; /* fields for communication over channel */ struct list_head queue; - atomic_t writers; /* how many time is /channel open */ - time64_t last_close; /* if no writers, when did last close */ - time64_t last_warn; /* when we last warned about no writers */ + atomic_t readers; /* how many time is /chennel open */ + time_t last_close; /* if no readers, when did last close */ + time_t last_warn; /* when we last warned about no readers */ union { - struct proc_dir_entry *procfs; - struct dentry *pipefs; - }; + struct cache_detail_procfs procfs; + struct cache_detail_pipefs pipefs; + } u; struct net *net; }; @@ -149,22 +157,18 @@ struct cache_deferred_req { * timestamps kept in the cache are expressed in seconds * since boot. This is the best for measuring differences in * real time. - * This reimplemnts ktime_get_boottime_seconds() in a slightly - * faster but less accurate way. When we end up converting - * back to wallclock (CLOCK_REALTIME), that error often - * cancels out during the reverse operation. */ -static inline time64_t seconds_since_boot(void) +static inline time_t seconds_since_boot(void) { - struct timespec64 boot; - getboottime64(&boot); - return ktime_get_real_seconds() - boot.tv_sec; + struct timespec boot; + getboottime(&boot); + return get_seconds() - boot.tv_sec; } -static inline time64_t convert_to_wallclock(time64_t sinceboot) +static inline time_t convert_to_wallclock(time_t sinceboot) { - struct timespec64 boot; - getboottime64(&boot); + struct timespec boot; + getboottime(&boot); return boot.tv_sec + sinceboot; } @@ -173,17 +177,14 @@ extern const struct file_operations content_file_operations_pipefs; extern const struct file_operations cache_flush_operations_pipefs; extern struct cache_head * -sunrpc_cache_lookup_rcu(struct cache_detail *detail, - struct cache_head *key, int hash); +sunrpc_cache_lookup(struct cache_detail *detail, + struct cache_head *key, int hash); extern struct cache_head * sunrpc_cache_update(struct cache_detail *detail, struct cache_head *new, struct cache_head *old, int hash); extern int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h); -extern int -sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail, - struct cache_head *h); extern void cache_clean_deferred(void *owner); @@ -194,28 +195,19 @@ static inline struct cache_head *cache_get(struct cache_head *h) return h; } -static inline struct cache_head *cache_get_rcu(struct cache_head *h) -{ - if (kref_get_unless_zero(&h->ref)) - return h; - return NULL; -} static inline void cache_put(struct cache_head *h, struct cache_detail *cd) { - if (kref_read(&h->ref) <= 2 && + if (atomic_read(&h->ref.refcount) <= 2 && h->expiry_time < cd->nextcheck) cd->nextcheck = h->expiry_time; kref_put(&h->ref, cd->cache_put); } -static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) +static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) { - if (h->expiry_time < seconds_since_boot()) - return true; - if (!test_bit(CACHE_VALID, &h->flags)) - return false; - return detail->flush_time >= h->last_refresh; + return (h->expiry_time < seconds_since_boot()) || + (detail->flush_time >= h->last_refresh); } extern int cache_check(struct cache_detail *detail, @@ -227,7 +219,7 @@ extern void __init cache_initialize(void); extern int cache_register_net(struct cache_detail *cd, struct net *net); extern void cache_unregister_net(struct cache_detail *cd, struct net *net); -extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net); +extern struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net); extern void cache_destroy_net(struct cache_detail *cd, struct net *net); extern void sunrpc_init_cache_detail(struct cache_detail *cd); @@ -235,12 +227,11 @@ extern void sunrpc_destroy_cache_detail(struct cache_detail *cd); extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, umode_t, struct cache_detail *); extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); -extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *); /* Must store cache_detail in seq_file->private if using next three functions */ -extern void *cache_seq_start_rcu(struct seq_file *file, loff_t *pos); -extern void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos); -extern void cache_seq_stop_rcu(struct seq_file *file, void *p); +extern void *cache_seq_start(struct seq_file *file, loff_t *pos); +extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos); +extern void cache_seq_stop(struct seq_file *file, void *p); extern void qword_add(char **bpp, int *lp, char *str); extern void qword_addhex(char **bpp, int *lp, char *buf, int blen); @@ -282,7 +273,7 @@ static inline int get_uint(char **bpp, unsigned int *anint) return 0; } -static inline int get_time(char **bpp, time64_t *time) +static inline int get_time(char **bpp, time_t *time) { char buf[50]; long long ll; @@ -296,20 +287,20 @@ static inline int get_time(char **bpp, time64_t *time) if (kstrtoll(buf, 0, &ll)) return -EINVAL; - *time = ll; + *time = (time_t)ll; return 0; } -static inline time64_t get_expiry(char **bpp) +static inline time_t get_expiry(char **bpp) { - time64_t rv; - struct timespec64 boot; + time_t rv; + struct timespec boot; if (get_time(bpp, &rv)) return 0; if (rv < 0) return 0; - getboottime64(&boot); + getboottime(&boot); return rv - boot.tv_sec; } diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index a4661646ad..1c14c60ca9 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/clnt.h * @@ -14,7 +13,6 @@ #include #include #include -#include #include #include @@ -30,19 +28,18 @@ #include struct rpc_inode; -struct rpc_sysfs_client; /* * The high-level client handle */ struct rpc_clnt { - refcount_t cl_count; /* Number of references */ + atomic_t cl_count; /* Number of references */ unsigned int cl_clid; /* client id */ struct list_head cl_clients; /* Global list of clients */ struct list_head cl_tasks; /* List of tasks */ spinlock_t cl_lock; /* spinlock */ struct rpc_xprt __rcu * cl_xprt; /* transport */ - const struct rpc_procinfo *cl_procinfo; /* procedure info */ + struct rpc_procinfo * cl_procinfo; /* procedure info */ u32 cl_prog, /* RPC program number */ cl_vers, /* RPC version number */ cl_maxproc; /* max procedure number */ @@ -52,7 +49,6 @@ struct rpc_clnt { struct rpc_iostats * cl_metrics; /* per-client statistics */ unsigned int cl_softrtry : 1,/* soft timeouts */ - cl_softerr : 1,/* Timeouts return errors */ cl_discrtry : 1,/* disconnect before retry */ cl_noretranstimeo: 1,/* No retransmit timeouts */ cl_autobind : 1,/* use getport() */ @@ -69,20 +65,10 @@ struct rpc_clnt { struct rpc_rtt cl_rtt_default; struct rpc_timeout cl_timeout_default; const struct rpc_program *cl_program; - const char * cl_principal; /* use for machine cred */ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *cl_debugfs; /* debugfs directory */ #endif - struct rpc_sysfs_client *cl_sysfs; /* sysfs directory */ - /* cl_work is only needed after cl_xpi is no longer used, - * and that are of similar size - */ - union { - struct rpc_xprt_iter cl_xpi; - struct work_struct cl_work; - }; - const struct cred *cl_cred; - unsigned int cl_max_connect; /* max number of transports not to the same IP */ + struct rpc_xprt_iter cl_xpi; }; /* @@ -101,8 +87,7 @@ struct rpc_program { struct rpc_version { u32 number; /* version number */ unsigned int nrprocs; /* number of procs */ - const struct rpc_procinfo *procs; /* procedure array */ - unsigned int *counts; /* call counts */ + struct rpc_procinfo * procs; /* procedure array */ }; /* @@ -114,10 +99,13 @@ struct rpc_procinfo { kxdrdproc_t p_decode; /* XDR decode function */ unsigned int p_arglen; /* argument hdr length (u32) */ unsigned int p_replen; /* reply hdr length (u32) */ + unsigned int p_count; /* call count */ unsigned int p_timer; /* Which RTT timer to use */ u32 p_statidx; /* Which procedure to account */ const char * p_name; /* name of procedure */ -}; +} __do_const; + +#ifdef __KERNEL__ struct rpc_create_args { struct net *net; @@ -132,17 +120,14 @@ struct rpc_create_args { u32 prognumber; /* overrides program->number */ u32 version; rpc_authflavor_t authflavor; - u32 nconnect; unsigned long flags; char *client_name; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ - const struct cred *cred; - unsigned int max_connect; }; struct rpc_add_xprt_test { - void (*add_xprt_test)(struct rpc_clnt *clnt, - struct rpc_xprt *xprt, + int (*add_xprt_test)(struct rpc_clnt *, + struct rpc_xprt *, void *calldata); void *data; }; @@ -157,8 +142,6 @@ struct rpc_add_xprt_test { #define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7) #define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT (1UL << 8) #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9) -#define RPC_CLNT_CREATE_SOFTERR (1UL << 10) -#define RPC_CLNT_CREATE_REUSEPORT (1UL << 11) struct rpc_clnt *rpc_create(struct rpc_create_args *args); struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, @@ -172,10 +155,7 @@ int rpc_switch_client_transport(struct rpc_clnt *, void rpc_shutdown_client(struct rpc_clnt *); void rpc_release_client(struct rpc_clnt *); -void rpc_task_release_transport(struct rpc_task *); void rpc_task_release_client(struct rpc_task *); -struct rpc_xprt *rpc_task_get_xprt(struct rpc_clnt *clnt, - struct rpc_xprt *xprt); int rpcb_create_local(struct net *); void rpcb_put_local(struct net *); @@ -186,9 +166,6 @@ int rpcb_v4_register(struct net *net, const u32 program, const char *netid); void rpcb_getport_async(struct rpc_task *); -void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, - unsigned int base, unsigned int len, - unsigned int hdrsize); void rpc_call_start(struct rpc_task *); int rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, @@ -201,10 +178,11 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int rpc_restart_call_prepare(struct rpc_task *); int rpc_restart_call(struct rpc_task *); void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); +int rpc_protocol(struct rpc_clnt *); struct net * rpc_net_ns(struct rpc_clnt *); size_t rpc_max_payload(struct rpc_clnt *); size_t rpc_max_bc_payload(struct rpc_clnt *); -unsigned int rpc_num_bc_slots(struct rpc_clnt *); +unsigned long rpc_get_timeout(struct rpc_clnt *clnt); void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); @@ -224,9 +202,8 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *, struct rpc_xprt *, void *), void *data); -void rpc_set_connect_timeout(struct rpc_clnt *clnt, - unsigned long connect_timeout, - unsigned long reconnect_timeout); +void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, + unsigned long timeo); int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *, struct rpc_xprt_switch *, @@ -240,16 +217,5 @@ void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, const struct sockaddr *sap); void rpc_cleanup_clids(void); - -static inline int rpc_reply_expected(struct rpc_task *task) -{ - return (task->tk_msg.rpc_proc != NULL) && - (task->tk_msg.rpc_proc->p_decode != NULL); -} - -static inline void rpc_task_close_connection(struct rpc_task *task) -{ - if (task->tk_xprt) - xprt_force_disconnect(task->tk_xprt); -} +#endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index f6aeed07fe..59a7889e15 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/debug.h * @@ -21,55 +20,33 @@ extern unsigned int nfsd_debug; extern unsigned int nlm_debug; #endif -#define dprintk(fmt, ...) \ - dfprintk(FACILITY, fmt, ##__VA_ARGS__) -#define dprintk_cont(fmt, ...) \ - dfprintk_cont(FACILITY, fmt, ##__VA_ARGS__) -#define dprintk_rcu(fmt, ...) \ - dfprintk_rcu(FACILITY, fmt, ##__VA_ARGS__) -#define dprintk_rcu_cont(fmt, ...) \ - dfprintk_rcu_cont(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk(args...) dfprintk(FACILITY, ## args) +#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) #undef ifdebug #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) -# define dfprintk(fac, fmt, ...) \ -do { \ - ifdebug(fac) \ - printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ -} while (0) +# define dfprintk(fac, args...) \ + do { \ + ifdebug(fac) \ + printk(KERN_DEFAULT args); \ + } while (0) -# define dfprintk_cont(fac, fmt, ...) \ -do { \ - ifdebug(fac) \ - printk(KERN_CONT fmt, ##__VA_ARGS__); \ -} while (0) - -# define dfprintk_rcu(fac, fmt, ...) \ -do { \ - ifdebug(fac) { \ - rcu_read_lock(); \ - printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ - rcu_read_unlock(); \ - } \ -} while (0) - -# define dfprintk_rcu_cont(fac, fmt, ...) \ -do { \ - ifdebug(fac) { \ - rcu_read_lock(); \ - printk(KERN_CONT fmt, ##__VA_ARGS__); \ - rcu_read_unlock(); \ - } \ -} while (0) +# define dfprintk_rcu(fac, args...) \ + do { \ + ifdebug(fac) { \ + rcu_read_lock(); \ + printk(KERN_DEFAULT args); \ + rcu_read_unlock(); \ + } \ + } while (0) # define RPC_IFDEBUG(x) x #else # define ifdebug(fac) if (0) -# define dfprintk(fac, fmt, ...) do {} while (0) -# define dfprintk_cont(fac, fmt, ...) do {} while (0) -# define dfprintk_rcu(fac, fmt, ...) do {} while (0) +# define dfprintk(fac, args...) do {} while (0) +# define dfprintk_rcu(fac, args...) do {} while (0) # define RPC_IFDEBUG(x) #endif diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index bf4ac8a026..68ec78c1aa 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/gss_api.h * @@ -13,6 +12,7 @@ #ifndef _LINUX_SUNRPC_GSS_API_H #define _LINUX_SUNRPC_GSS_API_H +#ifdef __KERNEL__ #include #include #include @@ -21,7 +21,6 @@ struct gss_ctx { struct gss_api_mech *mech_type; void *internal_ctx_id; - unsigned int slack, align; }; #define GSS_C_NO_BUFFER ((struct xdr_netobj) 0) @@ -49,7 +48,7 @@ int gss_import_sec_context( size_t bufsize, struct gss_api_mech *mech, struct gss_ctx **ctx_id, - time64_t *endtime, + time_t *endtime, gfp_t gfp_mask); u32 gss_get_mic( struct gss_ctx *ctx_id, @@ -67,7 +66,6 @@ u32 gss_wrap( u32 gss_unwrap( struct gss_ctx *ctx_id, int offset, - int len, struct xdr_buf *inbuf); u32 gss_delete_sec_context( struct gss_ctx **ctx_id); @@ -84,7 +82,6 @@ struct pf_desc { u32 service; char *name; char *auth_domain_name; - struct auth_domain *domain; bool datatouch; }; @@ -111,7 +108,7 @@ struct gss_api_ops { const void *input_token, size_t bufsize, struct gss_ctx *ctx_id, - time64_t *endtime, + time_t *endtime, gfp_t gfp_mask); u32 (*gss_get_mic)( struct gss_ctx *ctx_id, @@ -129,7 +126,6 @@ struct gss_api_ops { u32 (*gss_unwrap)( struct gss_ctx *ctx_id, int offset, - int len, struct xdr_buf *buf); void (*gss_delete_sec_context)( void *internal_ctx_id); @@ -154,11 +150,15 @@ struct gss_api_mech *gss_mech_get_by_name(const char *); /* Similar, but get by pseudoflavor. */ struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32); +/* Fill in an array with a list of supported pseudoflavors */ +int gss_mech_list_pseudoflavors(rpc_authflavor_t *, int); + struct gss_api_mech * gss_mech_get(struct gss_api_mech *); /* For every successful gss_mech_get or gss_mech_get_by_* call there must be a * corresponding call to gss_mech_put. */ void gss_mech_put(struct gss_api_mech *); +#endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_GSS_API_H */ diff --git a/include/linux/sunrpc/gss_err.h b/include/linux/sunrpc/gss_err.h index b73c329c83..a6807867bd 100644 --- a/include/linux/sunrpc/gss_err.h +++ b/include/linux/sunrpc/gss_err.h @@ -34,6 +34,8 @@ #ifndef _LINUX_SUNRPC_GSS_ERR_H #define _LINUX_SUNRPC_GSS_ERR_H +#ifdef __KERNEL__ + typedef unsigned int OM_uint32; /* @@ -161,4 +163,5 @@ typedef unsigned int OM_uint32; /* XXXX This is a necessary evil until the spec is fixed */ #define GSS_S_CRED_UNAVAIL GSS_S_FAILURE +#endif /* __KERNEL__ */ #endif /* __LINUX_SUNRPC_GSS_ERR_H */ diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index 91f43d8687..7df625d41e 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -71,10 +71,10 @@ struct gss_krb5_enctype { const u32 keyed_cksum; /* is it a keyed cksum? */ const u32 keybytes; /* raw key len, in bytes */ const u32 keylength; /* final key len, in bytes */ - u32 (*encrypt) (struct crypto_sync_skcipher *tfm, + u32 (*encrypt) (struct crypto_skcipher *tfm, void *iv, void *in, void *out, int length); /* encryption function */ - u32 (*decrypt) (struct crypto_sync_skcipher *tfm, + u32 (*decrypt) (struct crypto_skcipher *tfm, void *iv, void *in, void *out, int length); /* decryption function */ u32 (*mk_key) (const struct gss_krb5_enctype *gk5e, @@ -83,7 +83,7 @@ struct gss_krb5_enctype { u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, struct page **pages); /* v2 encryption function */ - u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len, + u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u32 *headskip, u32 *tailskip); /* v2 decryption function */ }; @@ -98,17 +98,17 @@ struct krb5_ctx { u32 enctype; u32 flags; const struct gss_krb5_enctype *gk5e; /* enctype-specific info */ - struct crypto_sync_skcipher *enc; - struct crypto_sync_skcipher *seq; - struct crypto_sync_skcipher *acceptor_enc; - struct crypto_sync_skcipher *initiator_enc; - struct crypto_sync_skcipher *acceptor_enc_aux; - struct crypto_sync_skcipher *initiator_enc_aux; + struct crypto_skcipher *enc; + struct crypto_skcipher *seq; + struct crypto_skcipher *acceptor_enc; + struct crypto_skcipher *initiator_enc; + struct crypto_skcipher *acceptor_enc_aux; + struct crypto_skcipher *initiator_enc_aux; u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ u8 cksum[GSS_KRB5_MAX_KEYLEN]; - atomic_t seq_send; - atomic64_t seq_send64; - time64_t endtime; + s32 endtime; + u32 seq_send; + u64 seq_send64; struct xdr_netobj mech_used; u8 initiator_sign[GSS_KRB5_MAX_KEYLEN]; u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN]; @@ -118,6 +118,8 @@ struct krb5_ctx { u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN]; }; +extern spinlock_t krb5_seq_lock; + /* The length of the Kerberos GSS token header */ #define GSS_KRB5_TOK_HDR_LEN (16) @@ -141,12 +143,14 @@ enum sgn_alg { SGN_ALG_MD2_5 = 0x0001, SGN_ALG_DES_MAC = 0x0002, SGN_ALG_3 = 0x0003, /* not published */ + SGN_ALG_HMAC_MD5 = 0x0011, /* microsoft w2k; no support */ SGN_ALG_HMAC_SHA1_DES3_KD = 0x0004 }; enum seal_alg { SEAL_ALG_NONE = 0xffff, SEAL_ALG_DES = 0x0000, SEAL_ALG_1 = 0x0001, /* not published */ + SEAL_ALG_MICROSOFT_RC4 = 0x0010,/* microsoft w2k; no support */ SEAL_ALG_DES3KD = 0x0002 }; @@ -253,29 +257,29 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset, struct xdr_buf *outbuf, struct page **pages); u32 -gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len, +gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, struct xdr_buf *buf); u32 -krb5_encrypt(struct crypto_sync_skcipher *key, +krb5_encrypt(struct crypto_skcipher *key, void *iv, void *in, void *out, int length); u32 -krb5_decrypt(struct crypto_sync_skcipher *key, +krb5_decrypt(struct crypto_skcipher *key, void *iv, void *in, void *out, int length); int -gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf, +gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf, int offset, struct page **pages); int -gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf, +gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf, int offset); s32 krb5_make_seq_num(struct krb5_ctx *kctx, - struct crypto_sync_skcipher *key, + struct crypto_skcipher *key, int direction, u32 seqnum, unsigned char *cksum, unsigned char *buf); @@ -310,9 +314,18 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, struct page **pages); u32 -gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u32 *plainoffset, u32 *plainlen); +int +krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, + struct crypto_skcipher *cipher, + unsigned char *cksum); + +int +krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, + struct crypto_skcipher *cipher, + s32 seqnum); void gss_krb5_make_confounder(char *p, u32 conflen); diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h index 87eea679d7..ec6234eee8 100644 --- a/include/linux/sunrpc/gss_krb5_enctypes.h +++ b/include/linux/sunrpc/gss_krb5_enctypes.h @@ -1,41 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * Define the string that exports the set of kernel-supported - * Kerberos enctypes. This list is sent via upcall to gssd, and - * is also exposed via the nfsd /proc API. The consumers generally - * treat this as an ordered list, where the first item in the list - * is the most preferred. + * Dumb way to share this static piece of information with nfsd */ - -#ifndef _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H -#define _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H - -#ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES - -/* - * NB: This list includes DES3_CBC_SHA1, which was deprecated by RFC 8429. - * - * ENCTYPE_AES256_CTS_HMAC_SHA1_96 - * ENCTYPE_AES128_CTS_HMAC_SHA1_96 - * ENCTYPE_DES3_CBC_SHA1 - */ -#define KRB5_SUPPORTED_ENCTYPES "18,17,16" - -#else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */ - -/* - * NB: This list includes encryption types that were deprecated - * by RFC 8429 and RFC 6649. - * - * ENCTYPE_AES256_CTS_HMAC_SHA1_96 - * ENCTYPE_AES128_CTS_HMAC_SHA1_96 - * ENCTYPE_DES3_CBC_SHA1 - * ENCTYPE_DES_CBC_MD5 - * ENCTYPE_DES_CBC_CRC - * ENCTYPE_DES_CBC_MD4 - */ -#define KRB5_SUPPORTED_ENCTYPES "18,17,16,3,1,2" - -#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */ - -#endif /* _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H */ +#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2" diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h index 0ee3f70528..694eecb2f1 100644 --- a/include/linux/sunrpc/metrics.h +++ b/include/linux/sunrpc/metrics.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/metrics.h * @@ -30,7 +29,7 @@ #include #include -#define RPC_IOSTATS_VERS "1.1" +#define RPC_IOSTATS_VERS "1.0" struct rpc_iostats { spinlock_t om_lock; @@ -66,11 +65,6 @@ struct rpc_iostats { ktime_t om_queue, /* queued for xmit */ om_rtt, /* RPC RTT */ om_execute; /* RPC execution */ - /* - * The count of operations that complete with tk_status < 0. - * These statuses usually indicate error conditions. - */ - unsigned long om_error_status; } ____cacheline_aligned; struct rpc_task; @@ -87,7 +81,7 @@ void rpc_count_iostats(const struct rpc_task *, struct rpc_iostats *); void rpc_count_iostats_metrics(const struct rpc_task *, struct rpc_iostats *); -void rpc_clnt_show_stats(struct seq_file *, struct rpc_clnt *); +void rpc_print_iostats(struct seq_file *, struct rpc_clnt *); void rpc_free_iostats(struct rpc_iostats *); #else /* CONFIG_PROC_FS */ @@ -100,7 +94,7 @@ static inline void rpc_count_iostats_metrics(const struct rpc_task *task, { } -static inline void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt) {} +static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {} static inline void rpc_free_iostats(struct rpc_iostats *stats) {} #endif /* CONFIG_PROC_FS */ diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index 02117ed0fa..59cbf16eae 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/msg_prot.h * @@ -8,8 +7,13 @@ #ifndef _LINUX_SUNRPC_MSGPROT_H_ #define _LINUX_SUNRPC_MSGPROT_H_ +#ifdef __KERNEL__ /* user programs should get these from the rpc header files */ + #define RPC_VERSION 2 +/* size of an XDR encoding unit in bytes, i.e. 32bit */ +#define XDR_UNIT (4) + /* spec defines authentication flavor as an unsigned 32 bit integer */ typedef u32 rpc_authflavor_t; @@ -20,7 +24,6 @@ enum rpc_auth_flavors { RPC_AUTH_DES = 3, RPC_AUTH_KRB = 4, RPC_AUTH_GSS = 6, - RPC_AUTH_TLS = 7, RPC_AUTH_MAXFLAVOR = 8, /* pseudoflavors: */ RPC_AUTH_GSS_KRB5 = 390003, @@ -141,7 +144,7 @@ typedef __be32 rpc_fraghdr; /* * Well-known netids. See: * - * https://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml + * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml */ #define RPCBIND_NETID_UDP "udp" #define RPCBIND_NETID_TCP "tcp" @@ -213,4 +216,5 @@ typedef __be32 rpc_fraghdr; /* Assume INET6_ADDRSTRLEN will always be larger than INET_ADDRSTRLEN... */ #define RPCBIND_MAXUADDRLEN RPCBIND_MAXUADDR6LEN +#endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_MSGPROT_H_ */ diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index cd188a527d..7f490bef9e 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SUNRPC_RPC_PIPE_FS_H #define _LINUX_SUNRPC_RPC_PIPE_FS_H +#ifdef __KERNEL__ + #include struct rpc_pipe_dir_head { @@ -120,6 +121,8 @@ extern struct dentry *rpc_create_cache_dir(struct dentry *, struct cache_detail *); extern void rpc_remove_cache_dir(struct dentry *); +extern int rpc_rmdir(struct dentry *dentry); + struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags); void rpc_destroy_pipe_data(struct rpc_pipe *pipe); extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *, @@ -131,3 +134,4 @@ extern void unregister_rpc_pipefs(void); extern bool gssd_running(struct net *net); #endif +#endif diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 4af31bbc88..cfda6adcf3 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -1,6 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (c) 2015-2017 Oracle. All rights reserved. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -52,14 +50,63 @@ enum { RPCRDMA_V1_DEF_INLINE_SIZE = 1024, }; +struct rpcrdma_segment { + __be32 rs_handle; /* Registered memory handle */ + __be32 rs_length; /* Length of the chunk in bytes */ + __be64 rs_offset; /* Chunk virtual address or offset */ +}; + /* - * XDR sizes, in quads + * read chunk(s), encoded as a linked list. */ -enum { - rpcrdma_fixed_maxsz = 4, - rpcrdma_segment_maxsz = 4, - rpcrdma_readseg_maxsz = 1 + rpcrdma_segment_maxsz, - rpcrdma_readchunk_maxsz = 1 + rpcrdma_readseg_maxsz, +struct rpcrdma_read_chunk { + __be32 rc_discrim; /* 1 indicates presence */ + __be32 rc_position; /* Position in XDR stream */ + struct rpcrdma_segment rc_target; +}; + +/* + * write chunk, and reply chunk. + */ +struct rpcrdma_write_chunk { + struct rpcrdma_segment wc_target; +}; + +/* + * write chunk(s), encoded as a counted array. + */ +struct rpcrdma_write_array { + __be32 wc_discrim; /* 1 indicates presence */ + __be32 wc_nchunks; /* Array count */ + struct rpcrdma_write_chunk wc_array[0]; +}; + +struct rpcrdma_msg { + __be32 rm_xid; /* Mirrors the RPC header xid */ + __be32 rm_vers; /* Version of this protocol */ + __be32 rm_credit; /* Buffers requested/granted */ + __be32 rm_type; /* Type of message (enum rpcrdma_proc) */ + union { + + struct { /* no chunks */ + __be32 rm_empty[3]; /* 3 empty chunk lists */ + } rm_nochunks; + + struct { /* no chunks and padded */ + __be32 rm_align; /* Padding alignment */ + __be32 rm_thresh; /* Padding threshold */ + __be32 rm_pempty[3]; /* 3 empty chunk lists */ + } rm_padded; + + struct { + __be32 rm_err; + __be32 rm_vers_low; + __be32 rm_vers_high; + } rm_error; + + __be32 rm_chunks[0]; /* read, write and reply chunks */ + + } rm_body; }; /* @@ -87,9 +134,6 @@ enum rpcrdma_proc { #define rdma_done cpu_to_be32(RDMA_DONE) #define rdma_error cpu_to_be32(RDMA_ERROR) -#define err_vers cpu_to_be32(ERR_VERS) -#define err_chunk cpu_to_be32(ERR_CHUNK) - /* * Private extension to RPC-over-RDMA Version One. * Message passed during RDMA-CM connection set-up. @@ -124,78 +168,4 @@ rpcrdma_decode_buffer_size(u8 val) return ((unsigned int)val + 1) << 10; } -/** - * xdr_encode_rdma_segment - Encode contents of an RDMA segment - * @p: Pointer into a send buffer - * @handle: The RDMA handle to encode - * @length: The RDMA length to encode - * @offset: The RDMA offset to encode - * - * Return value: - * Pointer to the XDR position that follows the encoded RDMA segment - */ -static inline __be32 *xdr_encode_rdma_segment(__be32 *p, u32 handle, - u32 length, u64 offset) -{ - *p++ = cpu_to_be32(handle); - *p++ = cpu_to_be32(length); - return xdr_encode_hyper(p, offset); -} - -/** - * xdr_encode_read_segment - Encode contents of a Read segment - * @p: Pointer into a send buffer - * @position: The position to encode - * @handle: The RDMA handle to encode - * @length: The RDMA length to encode - * @offset: The RDMA offset to encode - * - * Return value: - * Pointer to the XDR position that follows the encoded Read segment - */ -static inline __be32 *xdr_encode_read_segment(__be32 *p, u32 position, - u32 handle, u32 length, - u64 offset) -{ - *p++ = cpu_to_be32(position); - return xdr_encode_rdma_segment(p, handle, length, offset); -} - -/** - * xdr_decode_rdma_segment - Decode contents of an RDMA segment - * @p: Pointer to the undecoded RDMA segment - * @handle: Upon return, the RDMA handle - * @length: Upon return, the RDMA length - * @offset: Upon return, the RDMA offset - * - * Return value: - * Pointer to the XDR item that follows the RDMA segment - */ -static inline __be32 *xdr_decode_rdma_segment(__be32 *p, u32 *handle, - u32 *length, u64 *offset) -{ - *handle = be32_to_cpup(p++); - *length = be32_to_cpup(p++); - return xdr_decode_hyper(p, offset); -} - -/** - * xdr_decode_read_segment - Decode contents of a Read segment - * @p: Pointer to the undecoded Read segment - * @position: Upon return, the segment's position - * @handle: Upon return, the RDMA handle - * @length: Upon return, the RDMA length - * @offset: Upon return, the RDMA offset - * - * Return value: - * Pointer to the XDR item that follows the Read segment - */ -static inline __be32 *xdr_decode_read_segment(__be32 *p, u32 *position, - u32 *handle, u32 *length, - u64 *offset) -{ - *position = be32_to_cpup(p++); - return xdr_decode_rdma_segment(p, handle, length, offset); -} - #endif /* _LINUX_SUNRPC_RPC_RDMA_H */ diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index a237b8dbf6..7ba040c797 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/sched.h * @@ -14,7 +13,7 @@ #include #include #include -#include +#include #include #include @@ -23,10 +22,10 @@ */ struct rpc_procinfo; struct rpc_message { - const struct rpc_procinfo *rpc_proc; /* Procedure information */ + struct rpc_procinfo * rpc_proc; /* Procedure information */ void * rpc_argp; /* Arguments */ void * rpc_resp; /* Result */ - const struct cred * rpc_cred; /* Credentials */ + struct rpc_cred * rpc_cred; /* Credentials */ }; struct rpc_call_ops; @@ -35,6 +34,7 @@ struct rpc_wait { struct list_head list; /* wait queue links */ struct list_head links; /* Links to related tasks */ struct list_head timer_list; /* Timer list */ + unsigned long expires; }; /* @@ -61,8 +61,6 @@ struct rpc_task { struct rpc_wait tk_wait; /* RPC wait */ } u; - int tk_rpc_status; /* Result of last RPC operation */ - /* * RPC call state */ @@ -72,7 +70,6 @@ struct rpc_task { struct rpc_clnt * tk_client; /* RPC client */ struct rpc_xprt * tk_xprt; /* Transport */ - struct rpc_cred * tk_op_cred; /* cred being operated on */ struct rpc_rqst * tk_rqstp; /* RPC request */ @@ -107,7 +104,6 @@ struct rpc_task_setup { struct rpc_task *task; struct rpc_clnt *rpc_client; struct rpc_xprt *rpc_xprt; - struct rpc_cred *rpc_op_cred; /* credential being operated on */ const struct rpc_message *rpc_message; const struct rpc_call_ops *callback_ops; void *callback_data; @@ -121,34 +117,28 @@ struct rpc_task_setup { */ #define RPC_TASK_ASYNC 0x0001 /* is an async task */ #define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */ -#define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */ -#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */ #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ -#define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */ +#define RPC_TASK_KILLED 0x0100 /* task was killed */ #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ #define RPC_TASK_SENT 0x0800 /* message was sent */ #define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ #define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */ #define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */ -#define RPC_TASK_CRED_NOREF 0x8000 /* No refcount on the credential */ #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) +#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) +#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) #define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) #define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) #define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) -#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE) #define RPC_TASK_RUNNING 0 #define RPC_TASK_QUEUED 1 #define RPC_TASK_ACTIVE 2 -#define RPC_TASK_NEED_XMIT 3 -#define RPC_TASK_NEED_RECV 4 -#define RPC_TASK_MSG_PIN_WAIT 5 -#define RPC_TASK_SIGNALLED 6 #define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) #define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) @@ -172,8 +162,6 @@ struct rpc_task_setup { #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) -#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate) - /* * Task priorities. * Note: if you change these, you must also change @@ -186,9 +174,9 @@ struct rpc_task_setup { #define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW) struct rpc_timer { + struct timer_list timer; struct list_head list; unsigned long expires; - struct delayed_work dwork; }; /* @@ -197,6 +185,7 @@ struct rpc_timer { struct rpc_wait_queue { spinlock_t lock; struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ + pid_t owner; /* process id of last task serviced */ unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ unsigned char priority; /* current priority */ unsigned char nr; /* # tasks remaining for cookie */ @@ -212,6 +201,7 @@ struct rpc_wait_queue { * from a single cookie. The aim is to improve * performance of NFS operations such as read/write. */ +#define RPC_BATCH_COUNT 16 #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) /* @@ -222,7 +212,6 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *); struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req); void rpc_put_task(struct rpc_task *); void rpc_put_task_async(struct rpc_task *); -void rpc_signal_task(struct rpc_task *); void rpc_exit_task(struct rpc_task *); void rpc_exit(struct rpc_task *, int); void rpc_release_calldata(const struct rpc_call_ops *, void *); @@ -231,25 +220,14 @@ void rpc_execute(struct rpc_task *); void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); void rpc_destroy_wait_queue(struct rpc_wait_queue *); -unsigned long rpc_task_timeout(const struct rpc_task *task); -void rpc_sleep_on_timeout(struct rpc_wait_queue *queue, - struct rpc_task *task, - rpc_action action, - unsigned long timeout); void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, rpc_action action); -void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *queue, - struct rpc_task *task, - unsigned long timeout, - int priority); void rpc_sleep_on_priority(struct rpc_wait_queue *, struct rpc_task *, + rpc_action action, int priority); void rpc_wake_up_queued_task(struct rpc_wait_queue *, struct rpc_task *); -void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *, - struct rpc_task *, - int); void rpc_wake_up(struct rpc_wait_queue *); struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h index d94d4f4105..edc64219f9 100644 --- a/include/linux/sunrpc/stats.h +++ b/include/linux/sunrpc/stats.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/stats.h * @@ -63,7 +62,7 @@ struct proc_dir_entry * rpc_proc_register(struct net *,struct rpc_stat *); void rpc_proc_unregister(struct net *,const char *); void rpc_proc_zero(const struct rpc_program *); struct proc_dir_entry * svc_proc_register(struct net *, struct svc_stat *, - const struct proc_ops *); + const struct file_operations *); void svc_proc_unregister(struct net *, const char *); void svc_seq_show(struct seq_file *, @@ -75,7 +74,7 @@ static inline void rpc_proc_unregister(struct net *net, const char *p) {} static inline void rpc_proc_zero(const struct rpc_program *p) {} static inline struct proc_dir_entry *svc_proc_register(struct net *net, struct svc_stat *s, - const struct proc_ops *proc_ops) { return NULL; } + const struct file_operations *f) { return NULL; } static inline void svc_proc_unregister(struct net *net, const char *p) {} static inline void svc_seq_show(struct seq_file *seq, diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 064c96157d..f37a11eb7b 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/svc.h * @@ -19,7 +18,6 @@ #include #include #include -#include /* statistics for svc_pool structures */ struct svc_pool_stats { @@ -48,7 +46,6 @@ struct svc_pool { struct svc_pool_stats sp_stats; /* statistics on pool operation */ #define SP_TASK_PENDING (0) /* still work to do even if no * xprt is queued. */ -#define SP_CONGESTED (1) unsigned long sp_flags; } ____cacheline_aligned_in_smp; @@ -102,7 +99,7 @@ struct svc_serv { unsigned int sv_nrpools; /* number of thread pools */ struct svc_pool * sv_pools; /* array of thread pools */ - const struct svc_serv_ops *sv_ops; /* server operations */ + struct svc_serv_ops *sv_ops; /* server operations */ #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct list_head sv_cb_list; /* queue for callback requests * that arrive over the same @@ -110,7 +107,7 @@ struct svc_serv { spinlock_t sv_cb_lock; /* protects the svc_cb_list */ wait_queue_head_t sv_cb_waitq; /* sleep here if there are no * entries in the svc_cb_list */ - bool sv_bc_enabled; /* service uses backchannel */ + struct svc_xprt *sv_bc_xprt; /* callback on fore channel */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ }; @@ -240,7 +237,7 @@ struct svc_rqst { struct svc_serv * rq_server; /* RPC service definition */ struct svc_pool * rq_pool; /* thread pool */ - const struct svc_procedure *rq_procinfo;/* procedure info */ + struct svc_procedure * rq_procinfo; /* procedure info */ struct auth_ops * rq_authop; /* authentication flavour */ struct svc_cred rq_cred; /* auth info */ void * rq_xprt_ctxt; /* transport specific context ptr */ @@ -248,18 +245,13 @@ struct svc_rqst { size_t rq_xprt_hlen; /* xprt header len */ struct xdr_buf rq_arg; - struct xdr_stream rq_arg_stream; - struct xdr_stream rq_res_stream; - struct page *rq_scratch_page; struct xdr_buf rq_res; - struct page *rq_pages[RPCSVC_MAXPAGES + 1]; + struct page * rq_pages[RPCSVC_MAXPAGES]; struct page * *rq_respages; /* points into rq_pages */ struct page * *rq_next_page; /* next reply page to use */ struct page * *rq_page_end; /* one past the last page */ - struct pagevec rq_pvec; struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ - struct bio_vec rq_bvec[RPCSVC_MAXPAGES]; __be32 rq_xid; /* transmission id */ u32 rq_prog; /* program number */ @@ -278,12 +270,10 @@ struct svc_rqst { #define RQ_BUSY (6) /* request is busy */ #define RQ_DATA (7) /* request has data */ unsigned long rq_flags; /* flags field */ - ktime_t rq_qtime; /* enqueue time */ void * rq_argp; /* decoded arguments */ void * rq_resp; /* xdr'd results */ void * rq_auth_data; /* flavor-specific data */ - __be32 rq_auth_stat; /* authentication status */ int rq_auth_slack; /* extra space xdr code * should leave in head * for krb5i, krb5p. @@ -291,7 +281,6 @@ struct svc_rqst { int rq_reserved; /* space on socket outq * reserved for this request */ - ktime_t rq_stime; /* start time */ struct cache_req rq_chandle; /* handle passed to caches for * request delaying @@ -302,13 +291,9 @@ struct svc_rqst { struct svc_cacherep * rq_cacherep; /* cache info */ struct task_struct *rq_task; /* service thread */ spinlock_t rq_lock; /* per-request lock */ - struct net *rq_bc_net; /* pointer to backchannel's - * net namespace - */ - void ** rq_lease_breaker; /* The v4 client breaking a lease */ }; -#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) +#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net) /* * Rigorous type checking on sockaddr type conversions @@ -387,17 +372,7 @@ struct svc_deferred_req { struct cache_deferred_req handle; size_t xprt_hlen; int argslen; - __be32 args[]; -}; - -struct svc_process_info { - union { - int (*dispatch)(struct svc_rqst *, __be32 *); - struct { - unsigned int lovers; - unsigned int hivers; - } mismatch; - }; + __be32 args[0]; }; /* @@ -409,19 +384,11 @@ struct svc_program { unsigned int pg_lovers; /* lowest version */ unsigned int pg_hivers; /* highest version */ unsigned int pg_nvers; /* number of versions */ - const struct svc_version **pg_vers; /* version array */ + struct svc_version ** pg_vers; /* version array */ char * pg_name; /* service name */ char * pg_class; /* class name: services sharing authentication */ struct svc_stat * pg_stats; /* rpc statistics */ int (*pg_authenticate)(struct svc_rqst *); - __be32 (*pg_init_request)(struct svc_rqst *, - const struct svc_program *, - struct svc_process_info *); - int (*pg_rpcbind_set)(struct net *net, - const struct svc_program *, - u32 version, int family, - unsigned short proto, - unsigned short port); }; /* @@ -430,18 +397,13 @@ struct svc_program { struct svc_version { u32 vs_vers; /* version number */ u32 vs_nproc; /* number of procedures */ - const struct svc_procedure *vs_proc; /* per-procedure info */ - unsigned int *vs_count; /* call counts */ + struct svc_procedure * vs_proc; /* per-procedure info */ u32 vs_xdrsize; /* xdrsize needed for this version */ - /* Don't register with rpcbind */ - bool vs_hidden; - - /* Don't care if the rpcbind registration fails */ - bool vs_rpcb_optnl; - - /* Need xprt with congestion control */ - bool vs_need_cong_ctrl; + unsigned int vs_hidden : 1, /* Don't register with portmapper. + * Only used for nfsacl so far. */ + vs_rpcb_optnl:1;/* Don't care the result of register. + * Only used for nfsv4. */ /* Override dispatch function (e.g. when caching replies). * A return value of 0 means drop the request. @@ -453,21 +415,18 @@ struct svc_version { /* * RPC procedure info */ +typedef __be32 (*svc_procfunc)(struct svc_rqst *, void *argp, void *resp); struct svc_procedure { - /* process the request: */ - __be32 (*pc_func)(struct svc_rqst *); - /* XDR decode args: */ - int (*pc_decode)(struct svc_rqst *, __be32 *data); - /* XDR encode result: */ - int (*pc_encode)(struct svc_rqst *, __be32 *data); - /* XDR free result: */ - void (*pc_release)(struct svc_rqst *); + svc_procfunc pc_func; /* process the request */ + kxdrproc_t pc_decode; /* XDR decode args */ + kxdrproc_t pc_encode; /* XDR encode result */ + kxdrproc_t pc_release; /* XDR free result */ unsigned int pc_argsize; /* argument struct size */ unsigned int pc_ressize; /* result struct size */ + unsigned int pc_count; /* call count */ unsigned int pc_cachetype; /* cache info (NFS) */ unsigned int pc_xdrressize; /* maximum size of XDR reply */ - const char * pc_name; /* for display */ -}; +} __do_const; /* * Mode for mapping cpus to pools. @@ -499,21 +458,18 @@ int svc_rpcb_setup(struct svc_serv *serv, struct net *net); void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); int svc_bind(struct svc_serv *serv, struct net *net); struct svc_serv *svc_create(struct svc_program *, unsigned int, - const struct svc_serv_ops *); + struct svc_serv_ops *); struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node); struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node); -void svc_rqst_replace_page(struct svc_rqst *rqstp, - struct page *page); void svc_rqst_free(struct svc_rqst *); void svc_exit_thread(struct svc_rqst *); unsigned int svc_pool_map_get(void); void svc_pool_map_put(void); struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, - const struct svc_serv_ops *); + struct svc_serv_ops *); int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); -int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int); int svc_pool_stats_open(struct svc_serv *serv, struct file *file); void svc_destroy(struct svc_serv *); void svc_shutdown_net(struct svc_serv *, struct net *); @@ -527,29 +483,6 @@ void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); -const char * svc_proc_name(const struct svc_rqst *rqstp); -int svc_encode_result_payload(struct svc_rqst *rqstp, - unsigned int offset, - unsigned int length); -unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, - struct page **pages, - struct kvec *first, size_t total); -char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, - struct kvec *first, void *p, - size_t total); -__be32 svc_generic_init_request(struct svc_rqst *rqstp, - const struct svc_program *progp, - struct svc_process_info *procinfo); -int svc_generic_rpcbind_set(struct net *net, - const struct svc_program *progp, - u32 version, int family, - unsigned short proto, - unsigned short port); -int svc_rpcbind_set_version(struct net *net, - const struct svc_program *progp, - u32 version, int family, - unsigned short proto, - unsigned short port); #define RPC_MAX_ADDRBUFLEN (63U) @@ -565,42 +498,4 @@ static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space) svc_reserve(rqstp, space + rqstp->rq_auth_slack); } -/** - * svcxdr_init_decode - Prepare an xdr_stream for svc Call decoding - * @rqstp: controlling server RPC transaction context - * - */ -static inline void svcxdr_init_decode(struct svc_rqst *rqstp) -{ - struct xdr_stream *xdr = &rqstp->rq_arg_stream; - struct kvec *argv = rqstp->rq_arg.head; - - xdr_init_decode(xdr, &rqstp->rq_arg, argv->iov_base, NULL); - xdr_set_scratch_page(xdr, rqstp->rq_scratch_page); -} - -/** - * svcxdr_init_encode - Prepare an xdr_stream for svc Reply encoding - * @rqstp: controlling server RPC transaction context - * - */ -static inline void svcxdr_init_encode(struct svc_rqst *rqstp) -{ - struct xdr_stream *xdr = &rqstp->rq_res_stream; - struct xdr_buf *buf = &rqstp->rq_res; - struct kvec *resv = buf->head; - - xdr_reset_scratch_buffer(xdr); - - xdr->buf = buf; - xdr->iov = resv; - xdr->p = resv->iov_base + resv->iov_len; - xdr->end = resv->iov_base + PAGE_SIZE - rqstp->rq_auth_slack; - buf->len = resv->iov_len; - xdr->page_ptr = buf->pages - 1; - buf->buflen = PAGE_SIZE * (1 + rqstp->rq_page_end - buf->pages); - buf->buflen -= rqstp->rq_auth_slack; - xdr->rqst = NULL; -} - #endif /* SUNRPC_SVC_H */ diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 24aa159d29..abf3af5241 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. * @@ -42,23 +41,12 @@ #ifndef SVC_RDMA_H #define SVC_RDMA_H -#include #include #include #include -#include -#include - -#include #include #include - -/* Default and maximum inline threshold sizes */ -enum { - RPCRDMA_PULLUP_THRESH = RPCRDMA_V1_DEF_INLINE_SIZE >> 1, - RPCRDMA_DEF_INLINE_THRESH = 4096, - RPCRDMA_MAX_INLINE_THRESH = 65536 -}; +#define SVCRDMA_DEBUG /* RPC/RDMA parameters and stats */ extern unsigned int svcrdma_ord; @@ -66,159 +54,223 @@ extern unsigned int svcrdma_max_requests; extern unsigned int svcrdma_max_bc_requests; extern unsigned int svcrdma_max_req_size; -extern struct percpu_counter svcrdma_stat_read; -extern struct percpu_counter svcrdma_stat_recv; -extern struct percpu_counter svcrdma_stat_sq_starve; -extern struct percpu_counter svcrdma_stat_write; +extern atomic_unchecked_t rdma_stat_recv; +extern atomic_unchecked_t rdma_stat_read; +extern atomic_unchecked_t rdma_stat_write; +extern atomic_unchecked_t rdma_stat_sq_starve; +extern atomic_unchecked_t rdma_stat_rq_starve; +extern atomic_unchecked_t rdma_stat_rq_poll; +extern atomic_unchecked_t rdma_stat_rq_prod; +extern atomic_unchecked_t rdma_stat_sq_poll; +extern atomic_unchecked_t rdma_stat_sq_prod; + +/* + * Contexts are built when an RDMA request is created and are a + * record of the resources that can be recovered when the request + * completes. + */ +struct svc_rdma_op_ctxt { + struct list_head free; + struct svc_rdma_op_ctxt *read_hdr; + struct svc_rdma_fastreg_mr *frmr; + int hdr_count; + struct xdr_buf arg; + struct ib_cqe cqe; + struct ib_cqe reg_cqe; + struct ib_cqe inv_cqe; + struct list_head dto_q; + enum ib_wc_status wc_status; + u32 byte_len; + u32 position; + struct svcxprt_rdma *xprt; + unsigned long flags; + enum dma_data_direction direction; + int count; + unsigned int mapped_sges; + struct ib_sge sge[RPCSVC_MAXPAGES]; + struct page *pages[RPCSVC_MAXPAGES]; +}; + +/* + * NFS_ requests are mapped on the client side by the chunk lists in + * the RPCRDMA header. During the fetching of the RPC from the client + * and the writing of the reply to the client, the memory in the + * client and the memory in the server must be mapped as contiguous + * vaddr/len for access by the hardware. These data strucures keep + * these mappings. + * + * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the + * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the + * 'ch' field maps the read-list of the RPCRDMA header to the 'sge' + * mapping of the reply. + */ +struct svc_rdma_chunk_sge { + int start; /* sge no for this chunk */ + int count; /* sge count for this chunk */ +}; +struct svc_rdma_fastreg_mr { + struct ib_mr *mr; + struct scatterlist *sg; + int sg_nents; + unsigned long access_flags; + enum dma_data_direction direction; + struct list_head frmr_list; +}; +struct svc_rdma_req_map { + struct list_head free; + unsigned long count; + union { + struct kvec sge[RPCSVC_MAXPAGES]; + struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES]; + unsigned long lkey[RPCSVC_MAXPAGES]; + }; +}; +#define RDMACTXT_F_LAST_CTXT 2 + +#define SVCRDMA_DEVCAP_FAST_REG 1 /* fast mr registration */ +#define SVCRDMA_DEVCAP_READ_W_INV 2 /* read w/ invalidate */ struct svcxprt_rdma { struct svc_xprt sc_xprt; /* SVC transport structure */ struct rdma_cm_id *sc_cm_id; /* RDMA connection id */ struct list_head sc_accept_q; /* Conn. waiting accept */ int sc_ord; /* RDMA read limit */ - int sc_max_send_sges; + int sc_max_sge; + int sc_max_sge_rd; /* max sge for read target */ bool sc_snd_w_inv; /* OK to use Send With Invalidate */ - atomic_t sc_sq_avail; /* SQEs ready to be consumed */ + atomic_t sc_sq_count; /* Number of SQ WR on queue */ unsigned int sc_sq_depth; /* Depth of SQ */ - __be32 sc_fc_credits; /* Forward credits */ - u32 sc_max_requests; /* Max requests */ + unsigned int sc_rq_depth; /* Depth of RQ */ + u32 sc_max_requests; /* Forward credits */ u32 sc_max_bc_requests;/* Backward credits */ int sc_max_req_size; /* Size of each RQ WR buf */ - u8 sc_port_num; struct ib_pd *sc_pd; - spinlock_t sc_send_lock; - struct llist_head sc_send_ctxts; - spinlock_t sc_rw_ctxt_lock; - struct llist_head sc_rw_ctxts; + atomic_t sc_dma_used; + spinlock_t sc_ctxt_lock; + struct list_head sc_ctxts; + int sc_ctxt_used; + spinlock_t sc_map_lock; + struct list_head sc_maps; - u32 sc_pending_recvs; - u32 sc_recv_batch; struct list_head sc_rq_dto_q; spinlock_t sc_rq_dto_lock; struct ib_qp *sc_qp; struct ib_cq *sc_rq_cq; struct ib_cq *sc_sq_cq; + int (*sc_reader)(struct svcxprt_rdma *, + struct svc_rqst *, + struct svc_rdma_op_ctxt *, + int *, u32 *, u32, u32, u64, bool); + u32 sc_dev_caps; /* distilled device caps */ + unsigned int sc_frmr_pg_list_len; + struct list_head sc_frmr_q; + spinlock_t sc_frmr_q_lock; spinlock_t sc_lock; /* transport lock */ wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */ unsigned long sc_flags; + struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */ + struct list_head sc_read_complete_q; struct work_struct sc_work; - - struct llist_head sc_recv_ctxts; - - atomic_t sc_completion_ids; }; /* sc_flags */ #define RDMAXPRT_CONN_PENDING 3 -/* - * Default connection parameters +#define RPCRDMA_LISTEN_BACKLOG 10 +/* The default ORD value is based on two outstanding full-size writes with a + * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ +#define RPCRDMA_ORD (64/4) +#define RPCRDMA_SQ_DEPTH_MULT 8 +#define RPCRDMA_MAX_REQUESTS 32 +#define RPCRDMA_MAX_REQ_SIZE 4096 + +/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our + * current NFSv4.1 implementation supports one backchannel slot. */ -enum { - RPCRDMA_LISTEN_BACKLOG = 10, - RPCRDMA_MAX_REQUESTS = 64, - RPCRDMA_MAX_BC_REQUESTS = 2, -}; +#define RPCRDMA_MAX_BC_REQUESTS 2 #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD -struct svc_rdma_recv_ctxt { - struct llist_node rc_node; - struct list_head rc_list; - struct ib_recv_wr rc_recv_wr; - struct ib_cqe rc_cqe; - struct rpc_rdma_cid rc_cid; - struct ib_sge rc_recv_sge; - void *rc_recv_buf; - struct xdr_stream rc_stream; - bool rc_temp; - u32 rc_byte_len; - unsigned int rc_page_count; - u32 rc_inv_rkey; - __be32 rc_msgtype; - - struct svc_rdma_pcl rc_call_pcl; - - struct svc_rdma_pcl rc_read_pcl; - struct svc_rdma_chunk *rc_cur_result_payload; - struct svc_rdma_pcl rc_write_pcl; - struct svc_rdma_pcl rc_reply_pcl; -}; - -struct svc_rdma_send_ctxt { - struct llist_node sc_node; - struct rpc_rdma_cid sc_cid; - - struct ib_send_wr sc_send_wr; - struct ib_cqe sc_cqe; - struct completion sc_done; - struct xdr_buf sc_hdrbuf; - struct xdr_stream sc_stream; - void *sc_xprt_buf; - int sc_cur_sge_no; - - struct ib_sge sc_sges[]; -}; +/* Track DMA maps for this transport and context */ +static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma, + struct svc_rdma_op_ctxt *ctxt) +{ + ctxt->mapped_sges++; + atomic_inc(&rdma->sc_dma_used); +} /* svc_rdma_backchannel.c */ -extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, - struct svc_rdma_recv_ctxt *rctxt); +extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, + struct rpcrdma_msg *rmsgp, + struct xdr_buf *rcvbuf); + +/* svc_rdma_marshal.c */ +extern int svc_rdma_xdr_decode_req(struct xdr_buf *); +extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, + struct rpcrdma_msg *, + enum rpcrdma_errcode, __be32 *); +extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); +extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); +extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, + __be32, __be64, u32); +extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *, + struct rpcrdma_msg *, + struct rpcrdma_msg *, + enum rpcrdma_proc); +extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *); /* svc_rdma_recvfrom.c */ -extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); -extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma); -extern struct svc_rdma_recv_ctxt * - svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma); -extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, - struct svc_rdma_recv_ctxt *ctxt); -extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma); -extern void svc_rdma_release_rqst(struct svc_rqst *rqstp); extern int svc_rdma_recvfrom(struct svc_rqst *); - -/* svc_rdma_rw.c */ -extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma); -extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, - const struct svc_rdma_chunk *chunk, - const struct xdr_buf *xdr); -extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, - const struct svc_rdma_recv_ctxt *rctxt, - const struct xdr_buf *xdr); -extern int svc_rdma_process_read_list(struct svcxprt_rdma *rdma, - struct svc_rqst *rqstp, - struct svc_rdma_recv_ctxt *head); +extern int rdma_read_chunk_lcl(struct svcxprt_rdma *, struct svc_rqst *, + struct svc_rdma_op_ctxt *, int *, u32 *, + u32, u32, u64, bool); +extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *, + struct svc_rdma_op_ctxt *, int *, u32 *, + u32, u32, u64, bool); /* svc_rdma_sendto.c */ -extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma); -extern struct svc_rdma_send_ctxt * - svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma); -extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *ctxt); -extern int svc_rdma_send(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *ctxt); -extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *sctxt, - const struct svc_rdma_recv_ctxt *rctxt, - const struct xdr_buf *xdr); -extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *sctxt, - struct svc_rdma_recv_ctxt *rctxt, - int status); -extern void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail); +extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, + struct svc_rdma_req_map *, bool); extern int svc_rdma_sendto(struct svc_rqst *); -extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset, - unsigned int length); +extern struct rpcrdma_read_chunk * + svc_rdma_get_read_chunk(struct rpcrdma_msg *); +extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, + int); /* svc_rdma_transport.c */ +extern void svc_rdma_wc_send(struct ib_cq *, struct ib_wc *); +extern void svc_rdma_wc_write(struct ib_cq *, struct ib_wc *); +extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *); +extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *); +extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *); +extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); +extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t); +extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t); +extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); +extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); +extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); +extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt); +extern struct svc_rdma_req_map *svc_rdma_get_req_map(struct svcxprt_rdma *); +extern void svc_rdma_put_req_map(struct svcxprt_rdma *, + struct svc_rdma_req_map *); +extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *); +extern void svc_rdma_put_frmr(struct svcxprt_rdma *, + struct svc_rdma_fastreg_mr *); +extern void svc_sq_reap(struct svcxprt_rdma *); +extern void svc_rq_reap(struct svcxprt_rdma *); +extern void svc_rdma_prep_reply_hdr(struct svc_rqst *); + extern struct svc_xprt_class svc_rdma_class; #ifdef CONFIG_SUNRPC_BACKCHANNEL extern struct svc_xprt_class svc_rdma_bc_class; #endif /* svc_rdma.c */ +extern struct workqueue_struct *svc_rdma_wq; extern int svc_rdma_init(void); extern void svc_rdma_cleanup(void); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 571f605bc9..7440290f64 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/svc_xprt.h * @@ -20,20 +19,19 @@ struct svc_xprt_ops { struct svc_xprt *(*xpo_accept)(struct svc_xprt *); int (*xpo_has_wspace)(struct svc_xprt *); int (*xpo_recvfrom)(struct svc_rqst *); + void (*xpo_prep_reply_hdr)(struct svc_rqst *); int (*xpo_sendto)(struct svc_rqst *); - int (*xpo_result_payload)(struct svc_rqst *, unsigned int, - unsigned int); void (*xpo_release_rqst)(struct svc_rqst *); void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); - void (*xpo_secure_port)(struct svc_rqst *rqstp); + int (*xpo_secure_port)(struct svc_rqst *); void (*xpo_kill_temp_xprt)(struct svc_xprt *); }; struct svc_xprt_class { const char *xcl_name; struct module *xcl_owner; - const struct svc_xprt_ops *xcl_ops; + struct svc_xprt_ops *xcl_ops; struct list_head xcl_list; u32 xcl_max_payload; int xcl_ident; @@ -51,7 +49,7 @@ struct svc_xpt_user { struct svc_xprt { struct svc_xprt_class *xpt_class; - const struct svc_xprt_ops *xpt_ops; + struct svc_xprt_ops *xpt_ops; struct kref xpt_ref; struct list_head xpt_list; struct list_head xpt_ready; @@ -69,7 +67,6 @@ struct svc_xprt { #define XPT_CACHE_AUTH 11 /* cache auth info */ #define XPT_LOCAL 12 /* connection from loopback interface */ #define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */ -#define XPT_CONG_CTRL 14 /* has congestion control */ struct svc_serv *xpt_server; /* service for transport */ atomic_t xpt_reserved; /* space on outq that is rsvd */ @@ -84,11 +81,10 @@ struct svc_xprt { size_t xpt_locallen; /* length of address */ struct sockaddr_storage xpt_remote; /* remote peer's address */ size_t xpt_remotelen; /* length of address */ - char xpt_remotebuf[INET6_ADDRSTRLEN + 10]; + struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */ struct list_head xpt_users; /* callbacks on free */ struct net *xpt_net; - const struct cred *xpt_cred; struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */ }; @@ -117,20 +113,12 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u return 0; } -static inline bool svc_xprt_is_dead(const struct svc_xprt *xprt) -{ - return (test_bit(XPT_DEAD, &xprt->xpt_flags) != 0) || - (test_bit(XPT_CLOSE, &xprt->xpt_flags) != 0); -} - int svc_reg_xprt_class(struct svc_xprt_class *); void svc_unreg_xprt_class(struct svc_xprt_class *); void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, struct svc_serv *); int svc_create_xprt(struct svc_serv *, const char *, struct net *, - const int, const unsigned short, int, - const struct cred *); -void svc_xprt_received(struct svc_xprt *xprt); + const int, const unsigned short, int); void svc_xprt_do_enqueue(struct svc_xprt *xprt); void svc_xprt_enqueue(struct svc_xprt *xprt); void svc_xprt_put(struct svc_xprt *xprt); @@ -144,7 +132,6 @@ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen); void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *xprt); void svc_age_temp_xprts_now(struct svc_serv *, struct sockaddr *); -void svc_xprt_deferred_close(struct svc_xprt *xprt); static inline void svc_xprt_get(struct svc_xprt *xprt) { @@ -163,10 +150,7 @@ static inline void svc_xprt_set_remote(struct svc_xprt *xprt, { memcpy(&xprt->xpt_remote, sa, salen); xprt->xpt_remotelen = salen; - snprintf(xprt->xpt_remotebuf, sizeof(xprt->xpt_remotebuf) - 1, - "%pISpc", sa); } - static inline unsigned short svc_addr_port(const struct sockaddr *sa) { const struct sockaddr_in *sin = (const struct sockaddr_in *)sa; diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index 6d9cc9080a..035edad0aa 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/svcauth.h * @@ -10,6 +9,8 @@ #ifndef _LINUX_SUNRPC_SVCAUTH_H_ #define _LINUX_SUNRPC_SVCAUTH_H_ +#ifdef __KERNEL__ + #include #include #include @@ -29,7 +30,6 @@ struct svc_cred { /* name of form servicetype@hostname, passed down by * rpc.svcgssd, or computed from the above: */ char *cr_principal; - char *cr_targ_princ; struct gss_api_mech *cr_gss_mech; }; @@ -38,7 +38,6 @@ static inline void init_svc_cred(struct svc_cred *cred) cred->cr_group_info = NULL; cred->cr_raw_principal = NULL; cred->cr_principal = NULL; - cred->cr_targ_princ = NULL; cred->cr_gss_mech = NULL; } @@ -48,7 +47,6 @@ static inline void free_svc_cred(struct svc_cred *cred) put_group_info(cred->cr_group_info); kfree(cred->cr_raw_principal); kfree(cred->cr_principal); - kfree(cred->cr_targ_princ); gss_mech_put(cred->cr_gss_mech); init_svc_cred(cred); } @@ -80,7 +78,6 @@ struct auth_domain { struct hlist_node hash; char *name; struct auth_ops *flavour; - struct rcu_head rcu_head; }; /* @@ -127,11 +124,11 @@ struct auth_ops { char * name; struct module *owner; int flavour; - int (*accept)(struct svc_rqst *rq); + int (*accept)(struct svc_rqst *rq, __be32 *authp); int (*release)(struct svc_rqst *rq); void (*domain_release)(struct auth_domain *); int (*set_client)(struct svc_rqst *rq); -}; +} __do_const; #define SVC_GARBAGE 1 #define SVC_SYSERR 2 @@ -149,7 +146,7 @@ struct auth_ops { struct svc_xprt; -extern int svc_authenticate(struct svc_rqst *rqstp); +extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp); extern int svc_authorise(struct svc_rqst *rqstp); extern int svc_set_client(struct svc_rqst *rqstp); extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops); @@ -183,4 +180,6 @@ static inline unsigned long hash_mem(char const *buf, int length, int bits) return full_name_hash(NULL, buf, length) >> (32 - bits); } +#endif /* __KERNEL__ */ + #endif /* _LINUX_SUNRPC_SVCAUTH_H_ */ diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index f09c82b0a7..726aff1a52 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/svcauth_gss.h * @@ -9,6 +8,7 @@ #ifndef _LINUX_SUNRPC_SVCAUTH_GSS_H #define _LINUX_SUNRPC_SVCAUTH_GSS_H +#ifdef __KERNEL__ #include #include #include @@ -20,8 +20,8 @@ int gss_svc_init(void); void gss_svc_shutdown(void); int gss_svc_init_net(struct net *net); void gss_svc_shutdown_net(struct net *net); -struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, - char *name); +int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); u32 svcauth_gss_flavor(struct auth_domain *dom); +#endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index bcc555c7ae..2e780134f4 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/svcsock.h * @@ -28,27 +27,25 @@ struct svc_sock { /* private TCP part */ /* On-the-wire fragment header: */ - __be32 sk_marker; + __be32 sk_reclen; /* As we receive a record, this includes the length received so * far (including the fragment header): */ u32 sk_tcplen; /* Total length of the data (not including fragment headers) * received so far in the fragments making up this rpc: */ u32 sk_datalen; - /* Number of queued send requests */ - atomic_t sk_sendqlen; struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */ }; static inline u32 svc_sock_reclen(struct svc_sock *svsk) { - return be32_to_cpu(svsk->sk_marker) & RPC_FRAGMENT_SIZE_MASK; + return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK; } static inline u32 svc_sock_final_rec(struct svc_sock *svsk) { - return be32_to_cpu(svsk->sk_marker) & RPC_LAST_STREAM_FRAGMENT; + return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT; } /* @@ -61,8 +58,7 @@ void svc_drop(struct svc_rqst *); void svc_sock_update_bufs(struct svc_serv *serv); bool svc_alien_sock(struct net *net, int fd); int svc_addsock(struct svc_serv *serv, const int fd, - char *name_return, const size_t len, - const struct cred *cred); + char *name_return, const size_t len); void svc_init_xprt_sock(void); void svc_cleanup_xprt_sock(void); struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot); diff --git a/include/linux/sunrpc/timer.h b/include/linux/sunrpc/timer.h index 242dbe00b5..697d6e69d6 100644 --- a/include/linux/sunrpc/timer.h +++ b/include/linux/sunrpc/timer.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/timer.h * diff --git a/include/linux/sunrpc/types.h b/include/linux/sunrpc/types.h index bd3c8e0563..d222f47550 100644 --- a/include/linux/sunrpc/types.h +++ b/include/linux/sunrpc/types.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/types.h * @@ -11,7 +10,6 @@ #define _LINUX_SUNRPC_TYPES_H_ #include -#include #include #include #include diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index b519609af1..56c48c884a 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * XDR standard data types and function declarations * @@ -11,28 +10,21 @@ #ifndef _SUNRPC_XDR_H_ #define _SUNRPC_XDR_H_ +#ifdef __KERNEL__ + #include #include #include #include -struct bio_vec; -struct rpc_rqst; - -/* - * Size of an XDR encoding unit in bytes, i.e. 32 bits, - * as defined in Section 3 of RFC 4506. All encoded - * XDR data items are aligned on a boundary of 32 bits. - */ -#define XDR_UNIT sizeof(__be32) - /* * Buffer adjustment */ #define XDR_QUADLEN(l) (((l) + 3) >> 2) /* - * Generic opaque `network object.' + * Generic opaque `network object.' At the kernel level, this type + * is used only by lockd. */ #define XDR_MAX_NETOBJ 1024 struct xdr_netobj { @@ -40,6 +32,13 @@ struct xdr_netobj { u8 * data; }; +/* + * This is the legacy generic XDR function. rqstp is either a rpc_rqst + * (client side) or svc_rqst pointer (server side). + * Encode functions always assume there's enough room in the buffer. + */ +typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj); + /* * Basic structure for transmission/reception of a client XDR message. * Features a header (for a linear buffer containing RPC headers @@ -57,14 +56,12 @@ struct xdr_buf { struct kvec head[1], /* RPC header + non-page data */ tail[1]; /* Appended after page data */ - struct bio_vec *bvec; struct page ** pages; /* Array of pages */ unsigned int page_base, /* Start of page data */ page_len, /* Length of page data */ flags; /* Flags for data disposition */ #define XDRBUF_READ 0x01 /* target of file read */ #define XDRBUF_WRITE 0x02 /* source of file write */ -#define XDRBUF_SPARSE_PAGES 0x04 /* Page array is sparse */ unsigned int buflen, /* Total length of storage buffer */ len; /* Length of XDR encoded message */ @@ -76,7 +73,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) buf->head[0].iov_base = start; buf->head[0].iov_len = len; buf->tail[0].iov_len = 0; - buf->pages = NULL; buf->page_len = 0; buf->flags = 0; buf->len = 0; @@ -91,17 +87,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) #define xdr_one cpu_to_be32(1) #define xdr_two cpu_to_be32(2) -#define rpc_auth_null cpu_to_be32(RPC_AUTH_NULL) -#define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX) -#define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT) -#define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS) -#define rpc_auth_tls cpu_to_be32(RPC_AUTH_TLS) - -#define rpc_call cpu_to_be32(RPC_CALL) -#define rpc_reply cpu_to_be32(RPC_REPLY) - -#define rpc_msg_accepted cpu_to_be32(RPC_MSG_ACCEPTED) - #define rpc_success cpu_to_be32(RPC_SUCCESS) #define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL) #define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH) @@ -110,9 +95,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) #define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR) #define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY) -#define rpc_mismatch cpu_to_be32(RPC_MISMATCH) -#define rpc_auth_error cpu_to_be32(RPC_AUTH_ERROR) - #define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK) #define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED) #define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED) @@ -121,6 +103,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) #define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK) #define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM) #define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM) +#define rpc_autherr_oldseqnum cpu_to_be32(101) /* * Miscellaneous XDR helper functions @@ -135,10 +118,7 @@ __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *); void xdr_inline_pages(struct xdr_buf *, unsigned int, struct page **, unsigned int, unsigned int); -void xdr_terminate_string(const struct xdr_buf *, const u32); -size_t xdr_buf_pagecount(const struct xdr_buf *buf); -int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp); -void xdr_free_bvec(struct xdr_buf *buf); +void xdr_terminate_string(struct xdr_buf *, const u32); static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len) { @@ -169,13 +149,6 @@ xdr_decode_opaque_fixed(__be32 *p, void *ptr, unsigned int len) return p + XDR_QUADLEN(len); } -static inline void xdr_netobj_dup(struct xdr_netobj *dst, - struct xdr_netobj *src, gfp_t gfp_mask) -{ - dst->data = kmemdup(src->data, src->len, gfp_mask); - dst->len = src->len; -} - /* * Adjust kvec to reflect end of xdr'ed data (RPC client XDR) */ @@ -189,14 +162,32 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) * XDR buffer helper functions */ extern void xdr_shift_buf(struct xdr_buf *, size_t); -extern void xdr_buf_from_iov(const struct kvec *, struct xdr_buf *); -extern int xdr_buf_subsegment(const struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); +extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); +extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); extern void xdr_buf_trim(struct xdr_buf *, unsigned int); -extern int read_bytes_from_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); -extern int write_bytes_to_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); +extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int); +extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); +extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); -extern int xdr_encode_word(const struct xdr_buf *, unsigned int, u32); -extern int xdr_decode_word(const struct xdr_buf *, unsigned int, u32 *); +/* + * Helper structure for copying from an sk_buff. + */ +struct xdr_skb_reader { + struct sk_buff *skb; + unsigned int offset; + size_t count; + __wsum csum; +}; + +typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len); + +size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len); +extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *); +extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int, + struct xdr_skb_reader *, xdr_skb_read_actor); + +extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32); +extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *); struct xdr_array2_desc; typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem); @@ -207,9 +198,9 @@ struct xdr_array2_desc { xdr_xcode_elem_t xcode; }; -extern int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base, +extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc); -extern int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base, +extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc); extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len); @@ -226,526 +217,31 @@ struct xdr_stream { struct kvec scratch; /* Scratch buffer */ struct page **page_ptr; /* pointer to the current page */ unsigned int nwords; /* Remaining decode buffer length */ - - struct rpc_rqst *rqst; /* For debugging */ }; /* * These are the xdr_stream style generic XDR encode and decode functions. */ -typedef void (*kxdreproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, - const void *obj); -typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, - void *obj); +typedef void (*kxdreproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj); +typedef int (*kxdrdproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj); -extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, - __be32 *p, struct rpc_rqst *rqst); +extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); -extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, - size_t nbytes); extern void xdr_commit_encode(struct xdr_stream *xdr); extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len); extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen); extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, unsigned int len); extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr); -extern unsigned int xdr_page_pos(const struct xdr_stream *xdr); -extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, - __be32 *p, struct rpc_rqst *rqst); +extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, unsigned int len); +extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen); extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); -extern int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); -extern unsigned int xdr_align_data(struct xdr_stream *, unsigned int offset, unsigned int length); -extern unsigned int xdr_expand_hole(struct xdr_stream *, unsigned int offset, unsigned int length); -extern bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, - unsigned int len); +extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); -/** - * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. - * @xdr: pointer to xdr_stream struct - * @buf: pointer to an empty buffer - * @buflen: size of 'buf' - * - * The scratch buffer is used when decoding from an array of pages. - * If an xdr_inline_decode() call spans across page boundaries, then - * we copy the data into the scratch buffer in order to allow linear - * access. - */ -static inline void -xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen) -{ - xdr->scratch.iov_base = buf; - xdr->scratch.iov_len = buflen; -} - -/** - * xdr_set_scratch_page - Attach a scratch buffer for decoding data - * @xdr: pointer to xdr_stream struct - * @page: an anonymous page - * - * See xdr_set_scratch_buffer(). - */ -static inline void -xdr_set_scratch_page(struct xdr_stream *xdr, struct page *page) -{ - xdr_set_scratch_buffer(xdr, page_address(page), PAGE_SIZE); -} - -/** - * xdr_reset_scratch_buffer - Clear scratch buffer information - * @xdr: pointer to xdr_stream struct - * - * See xdr_set_scratch_buffer(). - */ -static inline void -xdr_reset_scratch_buffer(struct xdr_stream *xdr) -{ - xdr_set_scratch_buffer(xdr, NULL, 0); -} - -/** - * xdr_stream_remaining - Return the number of bytes remaining in the stream - * @xdr: pointer to struct xdr_stream - * - * Return value: - * Number of bytes remaining in @xdr before xdr->end - */ -static inline size_t -xdr_stream_remaining(const struct xdr_stream *xdr) -{ - return xdr->nwords << 2; -} - -ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, - size_t size); -ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, - size_t maxlen, gfp_t gfp_flags); -ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, - size_t size); -ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, - size_t maxlen, gfp_t gfp_flags); -/** - * xdr_align_size - Calculate padded size of an object - * @n: Size of an object being XDR encoded (in bytes) - * - * Return value: - * Size (in bytes) of the object including xdr padding - */ -static inline size_t -xdr_align_size(size_t n) -{ - const size_t mask = XDR_UNIT - 1; - - return (n + mask) & ~mask; -} - -/** - * xdr_pad_size - Calculate size of an object's pad - * @n: Size of an object being XDR encoded (in bytes) - * - * This implementation avoids the need for conditional - * branches or modulo division. - * - * Return value: - * Size (in bytes) of the needed XDR pad - */ -static inline size_t xdr_pad_size(size_t n) -{ - return xdr_align_size(n) - n; -} - -/** - * xdr_stream_encode_item_present - Encode a "present" list item - * @xdr: pointer to xdr_stream - * - * Return values: - * On success, returns length in bytes of XDR buffer consumed - * %-EMSGSIZE on XDR buffer overflow - */ -static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr) -{ - const size_t len = XDR_UNIT; - __be32 *p = xdr_reserve_space(xdr, len); - - if (unlikely(!p)) - return -EMSGSIZE; - *p = xdr_one; - return len; -} - -/** - * xdr_stream_encode_item_absent - Encode a "not present" list item - * @xdr: pointer to xdr_stream - * - * Return values: - * On success, returns length in bytes of XDR buffer consumed - * %-EMSGSIZE on XDR buffer overflow - */ -static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr) -{ - const size_t len = XDR_UNIT; - __be32 *p = xdr_reserve_space(xdr, len); - - if (unlikely(!p)) - return -EMSGSIZE; - *p = xdr_zero; - return len; -} - -/** - * xdr_encode_bool - Encode a boolean item - * @p: address in a buffer into which to encode - * @n: boolean value to encode - * - * Return value: - * Address of item following the encoded boolean - */ -static inline __be32 *xdr_encode_bool(__be32 *p, u32 n) -{ - *p = n ? xdr_one : xdr_zero; - return p++; -} - -/** - * xdr_stream_encode_bool - Encode a boolean item - * @xdr: pointer to xdr_stream - * @n: boolean value to encode - * - * Return values: - * On success, returns length in bytes of XDR buffer consumed - * %-EMSGSIZE on XDR buffer overflow - */ -static inline int xdr_stream_encode_bool(struct xdr_stream *xdr, __u32 n) -{ - const size_t len = XDR_UNIT; - __be32 *p = xdr_reserve_space(xdr, len); - - if (unlikely(!p)) - return -EMSGSIZE; - xdr_encode_bool(p, n); - return len; -} - -/** - * xdr_stream_encode_u32 - Encode a 32-bit integer - * @xdr: pointer to xdr_stream - * @n: integer to encode - * - * Return values: - * On success, returns length in bytes of XDR buffer consumed - * %-EMSGSIZE on XDR buffer overflow - */ -static inline ssize_t -xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n) -{ - const size_t len = sizeof(n); - __be32 *p = xdr_reserve_space(xdr, len); - - if (unlikely(!p)) - return -EMSGSIZE; - *p = cpu_to_be32(n); - return len; -} - -/** - * xdr_stream_encode_u64 - Encode a 64-bit integer - * @xdr: pointer to xdr_stream - * @n: 64-bit integer to encode - * - * Return values: - * On success, returns length in bytes of XDR buffer consumed - * %-EMSGSIZE on XDR buffer overflow - */ -static inline ssize_t -xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n) -{ - const size_t len = sizeof(n); - __be32 *p = xdr_reserve_space(xdr, len); - - if (unlikely(!p)) - return -EMSGSIZE; - xdr_encode_hyper(p, n); - return len; -} - -/** - * xdr_stream_encode_opaque_inline - Encode opaque xdr data - * @xdr: pointer to xdr_stream - * @ptr: pointer to void pointer - * @len: size of object - * - * Return values: - * On success, returns length in bytes of XDR buffer consumed - * %-EMSGSIZE on XDR buffer overflow - */ -static inline ssize_t -xdr_stream_encode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t len) -{ - size_t count = sizeof(__u32) + xdr_align_size(len); - __be32 *p = xdr_reserve_space(xdr, count); - - if (unlikely(!p)) { - *ptr = NULL; - return -EMSGSIZE; - } - xdr_encode_opaque(p, NULL, len); - *ptr = ++p; - return count; -} - -/** - * xdr_stream_encode_opaque_fixed - Encode fixed length opaque xdr data - * @xdr: pointer to xdr_stream - * @ptr: pointer to opaque data object - * @len: size of object pointed to by @ptr - * - * Return values: - * On success, returns length in bytes of XDR buffer consumed - * %-EMSGSIZE on XDR buffer overflow - */ -static inline ssize_t -xdr_stream_encode_opaque_fixed(struct xdr_stream *xdr, const void *ptr, size_t len) -{ - __be32 *p = xdr_reserve_space(xdr, len); - - if (unlikely(!p)) - return -EMSGSIZE; - xdr_encode_opaque_fixed(p, ptr, len); - return xdr_align_size(len); -} - -/** - * xdr_stream_encode_opaque - Encode variable length opaque xdr data - * @xdr: pointer to xdr_stream - * @ptr: pointer to opaque data object - * @len: size of object pointed to by @ptr - * - * Return values: - * On success, returns length in bytes of XDR buffer consumed - * %-EMSGSIZE on XDR buffer overflow - */ -static inline ssize_t -xdr_stream_encode_opaque(struct xdr_stream *xdr, const void *ptr, size_t len) -{ - size_t count = sizeof(__u32) + xdr_align_size(len); - __be32 *p = xdr_reserve_space(xdr, count); - - if (unlikely(!p)) - return -EMSGSIZE; - xdr_encode_opaque(p, ptr, len); - return count; -} - -/** - * xdr_stream_encode_uint32_array - Encode variable length array of integers - * @xdr: pointer to xdr_stream - * @array: array of integers - * @array_size: number of elements in @array - * - * Return values: - * On success, returns length in bytes of XDR buffer consumed - * %-EMSGSIZE on XDR buffer overflow - */ -static inline ssize_t -xdr_stream_encode_uint32_array(struct xdr_stream *xdr, - const __u32 *array, size_t array_size) -{ - ssize_t ret = (array_size+1) * sizeof(__u32); - __be32 *p = xdr_reserve_space(xdr, ret); - - if (unlikely(!p)) - return -EMSGSIZE; - *p++ = cpu_to_be32(array_size); - for (; array_size > 0; p++, array++, array_size--) - *p = cpu_to_be32p(array); - return ret; -} - -/** - * xdr_item_is_absent - symbolically handle XDR discriminators - * @p: pointer to undecoded discriminator - * - * Return values: - * %true if the following XDR item is absent - * %false if the following XDR item is present - */ -static inline bool xdr_item_is_absent(const __be32 *p) -{ - return *p == xdr_zero; -} - -/** - * xdr_item_is_present - symbolically handle XDR discriminators - * @p: pointer to undecoded discriminator - * - * Return values: - * %true if the following XDR item is present - * %false if the following XDR item is absent - */ -static inline bool xdr_item_is_present(const __be32 *p) -{ - return *p != xdr_zero; -} - -/** - * xdr_stream_decode_bool - Decode a boolean - * @xdr: pointer to xdr_stream - * @ptr: pointer to a u32 in which to store the result - * - * Return values: - * %0 on success - * %-EBADMSG on XDR buffer overflow - */ -static inline ssize_t -xdr_stream_decode_bool(struct xdr_stream *xdr, __u32 *ptr) -{ - const size_t count = sizeof(*ptr); - __be32 *p = xdr_inline_decode(xdr, count); - - if (unlikely(!p)) - return -EBADMSG; - *ptr = (*p != xdr_zero); - return 0; -} - -/** - * xdr_stream_decode_u32 - Decode a 32-bit integer - * @xdr: pointer to xdr_stream - * @ptr: location to store integer - * - * Return values: - * %0 on success - * %-EBADMSG on XDR buffer overflow - */ -static inline ssize_t -xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr) -{ - const size_t count = sizeof(*ptr); - __be32 *p = xdr_inline_decode(xdr, count); - - if (unlikely(!p)) - return -EBADMSG; - *ptr = be32_to_cpup(p); - return 0; -} - -/** - * xdr_stream_decode_u64 - Decode a 64-bit integer - * @xdr: pointer to xdr_stream - * @ptr: location to store 64-bit integer - * - * Return values: - * %0 on success - * %-EBADMSG on XDR buffer overflow - */ -static inline ssize_t -xdr_stream_decode_u64(struct xdr_stream *xdr, __u64 *ptr) -{ - const size_t count = sizeof(*ptr); - __be32 *p = xdr_inline_decode(xdr, count); - - if (unlikely(!p)) - return -EBADMSG; - xdr_decode_hyper(p, ptr); - return 0; -} - -/** - * xdr_stream_decode_opaque_fixed - Decode fixed length opaque xdr data - * @xdr: pointer to xdr_stream - * @ptr: location to store data - * @len: size of buffer pointed to by @ptr - * - * Return values: - * On success, returns size of object stored in @ptr - * %-EBADMSG on XDR buffer overflow - */ -static inline ssize_t -xdr_stream_decode_opaque_fixed(struct xdr_stream *xdr, void *ptr, size_t len) -{ - __be32 *p = xdr_inline_decode(xdr, len); - - if (unlikely(!p)) - return -EBADMSG; - xdr_decode_opaque_fixed(p, ptr, len); - return len; -} - -/** - * xdr_stream_decode_opaque_inline - Decode variable length opaque xdr data - * @xdr: pointer to xdr_stream - * @ptr: location to store pointer to opaque data - * @maxlen: maximum acceptable object size - * - * Note: the pointer stored in @ptr cannot be assumed valid after the XDR - * buffer has been destroyed, or even after calling xdr_inline_decode() - * on @xdr. It is therefore expected that the object it points to should - * be processed immediately. - * - * Return values: - * On success, returns size of object stored in *@ptr - * %-EBADMSG on XDR buffer overflow - * %-EMSGSIZE if the size of the object would exceed @maxlen - */ -static inline ssize_t -xdr_stream_decode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t maxlen) -{ - __be32 *p; - __u32 len; - - *ptr = NULL; - if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) - return -EBADMSG; - if (len != 0) { - p = xdr_inline_decode(xdr, len); - if (unlikely(!p)) - return -EBADMSG; - if (unlikely(len > maxlen)) - return -EMSGSIZE; - *ptr = p; - } - return len; -} - -/** - * xdr_stream_decode_uint32_array - Decode variable length array of integers - * @xdr: pointer to xdr_stream - * @array: location to store the integer array or NULL - * @array_size: number of elements to store - * - * Return values: - * On success, returns number of elements stored in @array - * %-EBADMSG on XDR buffer overflow - * %-EMSGSIZE if the size of the array exceeds @array_size - */ -static inline ssize_t -xdr_stream_decode_uint32_array(struct xdr_stream *xdr, - __u32 *array, size_t array_size) -{ - __be32 *p; - __u32 len; - ssize_t retval; - - if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) - return -EBADMSG; - p = xdr_inline_decode(xdr, len * sizeof(*p)); - if (unlikely(!p)) - return -EBADMSG; - if (array == NULL) - return len; - if (len <= array_size) { - if (len < array_size) - memset(array+len, 0, (array_size-len)*sizeof(*array)); - array_size = len; - retval = len; - } else - retval = -EMSGSIZE; - for (; array_size > 0; p++, array++, array_size--) - *array = be32_to_cpup(p); - return retval; -} +#endif /* __KERNEL__ */ #endif /* _SUNRPC_XDR_H_ */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 955ea4d7af..a5da60b24d 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/xprt.h * @@ -19,6 +18,8 @@ #include #include +#ifdef __KERNEL__ + #define RPC_MIN_SLOT_TABLE (2U) #define RPC_DEF_SLOT_TABLE (16U) #define RPC_MAX_SLOT_TABLE_LIMIT (65536U) @@ -53,7 +54,6 @@ enum rpc_display_format_t { struct rpc_task; struct rpc_xprt; -struct xprt_class; struct seq_file; struct svc_serv; struct net; @@ -81,15 +81,9 @@ struct rpc_rqst { struct page **rq_enc_pages; /* scratch pages for use by gss privacy code */ void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ + struct list_head rq_list; - union { - struct list_head rq_list; /* Slot allocation list */ - struct rb_node rq_recv; /* Receive queue */ - }; - - struct list_head rq_xmit; /* Send queue */ - struct list_head rq_xmit2; /* Send queue */ - + void *rq_xprtdata; /* Per-xprt private data */ void *rq_buffer; /* Call XDR encode buffer */ size_t rq_callsize; void *rq_rbuffer; /* Reply XDR decode buffer */ @@ -102,7 +96,6 @@ struct rpc_rqst { * used in the softirq. */ unsigned long rq_majortimeo; /* major timeout alarm */ - unsigned long rq_minortimeo; /* minor timeout alarm */ unsigned long rq_timeout; /* Current timeout value */ ktime_t rq_rtt; /* round-trip time */ unsigned int rq_retries; /* # of retries */ @@ -110,7 +103,6 @@ struct rpc_rqst { /* A cookie used to track the state of the transport connection */ - atomic_t rq_pin; /* * Partial send handling @@ -134,31 +126,25 @@ struct rpc_xprt_ops { int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task); - void (*free_slot)(struct rpc_xprt *xprt, - struct rpc_rqst *req); void (*rpcbind)(struct rpc_task *task); void (*set_port)(struct rpc_xprt *xprt, unsigned short port); void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); int (*buf_alloc)(struct rpc_task *task); void (*buf_free)(struct rpc_task *task); - void (*prepare_request)(struct rpc_rqst *req); - int (*send_request)(struct rpc_rqst *req); - void (*wait_for_reply_request)(struct rpc_task *task); + int (*send_request)(struct rpc_task *task); + void (*set_retrans_timeout)(struct rpc_task *task); void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); void (*release_request)(struct rpc_task *task); void (*close)(struct rpc_xprt *xprt); void (*destroy)(struct rpc_xprt *xprt); - void (*set_connect_timeout)(struct rpc_xprt *xprt, - unsigned long connect_timeout, - unsigned long reconnect_timeout); void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); int (*enable_swap)(struct rpc_xprt *xprt); void (*disable_swap)(struct rpc_xprt *xprt); void (*inject_disconnect)(struct rpc_xprt *xprt); int (*bc_setup)(struct rpc_xprt *xprt, unsigned int min_reqs); + int (*bc_up)(struct svc_serv *serv, struct net *net); size_t (*bc_maxpayload)(struct rpc_xprt *xprt); - unsigned int (*bc_num_slots)(struct rpc_xprt *xprt); void (*bc_free_rqst)(struct rpc_rqst *rqst); void (*bc_destroy)(struct rpc_xprt *xprt, unsigned int max_reqs); @@ -183,11 +169,9 @@ enum xprt_transports { XPRT_TRANSPORT_LOCAL = 257, }; -struct rpc_sysfs_xprt; struct rpc_xprt { struct kref kref; /* Reference count */ - const struct rpc_xprt_ops *ops; /* transport methods */ - unsigned int id; /* transport id */ + struct rpc_xprt_ops * ops; /* transport methods */ const struct rpc_timeout *timeout; /* timeout parms */ struct sockaddr_storage addr; /* server address */ @@ -199,6 +183,8 @@ struct rpc_xprt { size_t max_payload; /* largest RPC payload size, in bytes */ + unsigned int tsh_size; /* size of transport specific + header */ struct rpc_wait_queue binding; /* requests waiting on rpcbind */ struct rpc_wait_queue sending; /* requests waiting to send */ @@ -207,10 +193,9 @@ struct rpc_xprt { struct list_head free; /* free slots */ unsigned int max_reqs; /* max number of slots */ unsigned int min_reqs; /* min number of slots */ - unsigned int num_reqs; /* total slots */ + atomic_t num_reqs; /* total slots */ unsigned long state; /* transport state */ - unsigned char resvport : 1, /* use a reserved port */ - reuseport : 1; /* reuse port on reconnect */ + unsigned char resvport : 1; /* use a reserved port */ atomic_t swapper; /* we're swapping over this transport */ unsigned int bind_index; /* bind function index */ @@ -236,36 +221,27 @@ struct rpc_xprt { struct timer_list timer; unsigned long last_used, idle_timeout, - connect_timeout, max_reconnect_timeout; /* * Send stuff */ - atomic_long_t queuelen; spinlock_t transport_lock; /* lock transport info */ spinlock_t reserve_lock; /* lock slot table */ - spinlock_t queue_lock; /* send/receive queue lock */ u32 xid; /* Next XID value to use */ struct rpc_task * snd_task; /* Task blocked in send */ - - struct list_head xmit_queue; /* Send queue */ - atomic_long_t xmit_queuelen; - struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct svc_serv *bc_serv; /* The RPC service which will */ /* process the callback */ - unsigned int bc_alloc_max; - unsigned int bc_alloc_count; /* Total number of preallocs */ - atomic_t bc_slot_count; /* Number of allocated slots */ + int bc_alloc_count; /* Total number of preallocs */ + atomic_t bc_free_slots; spinlock_t bc_pa_lock; /* Protects the preallocated * items */ struct list_head bc_pa_list; /* List of preallocated * backchannel rpc_rqst's */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ - - struct rb_root recv_queue; /* Receive queue */ + struct list_head recv; struct { unsigned long bind_count, /* total number of binds */ @@ -288,11 +264,9 @@ struct rpc_xprt { const char *address_strings[RPC_DISPLAY_MAX]; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *debugfs; /* debugfs directory */ + atomic_t inject_disconnect; #endif struct rcu_head rcu; - const struct xprt_class *xprt_class; - struct rpc_sysfs_xprt *xprt_sysfs; - bool main; /*mark if this is the 1st transport */ }; #if defined(CONFIG_SUNRPC_BACKCHANNEL) @@ -336,7 +310,6 @@ struct xprt_class { struct rpc_xprt * (*setup)(struct xprt_create *); struct module *owner; char name[32]; - const char * netid[]; }; /* @@ -344,23 +317,13 @@ struct xprt_class { */ struct rpc_xprt *xprt_create_transport(struct xprt_create *args); void xprt_connect(struct rpc_task *task); -unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt); -void xprt_reconnect_backoff(struct rpc_xprt *xprt, - unsigned long init_to); void xprt_reserve(struct rpc_task *task); void xprt_retry_reserve(struct rpc_task *task); int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); -void xprt_free_slot(struct rpc_xprt *xprt, - struct rpc_rqst *req); -void xprt_request_prepare(struct rpc_rqst *req); +void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); bool xprt_prepare_transmit(struct rpc_task *task); -void xprt_request_enqueue_transmit(struct rpc_task *task); -void xprt_request_enqueue_receive(struct rpc_task *task); -void xprt_request_wait_receive(struct rpc_task *task); -void xprt_request_dequeue_xprt(struct rpc_task *task); -bool xprt_request_need_retransmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task); void xprt_end_transmit(struct rpc_task *task); int xprt_adjust_timeout(struct rpc_rqst *req); @@ -373,9 +336,11 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size, unsigned int num_prealloc, unsigned int max_req); void xprt_free(struct rpc_xprt *); -void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task); -bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req); -void xprt_cleanup_ids(void); + +static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p) +{ + return p + xprt->tsh_size; +} static inline int xprt_enable_swap(struct rpc_xprt *xprt) @@ -394,27 +359,22 @@ xprt_disable_swap(struct rpc_xprt *xprt) */ int xprt_register_transport(struct xprt_class *type); int xprt_unregister_transport(struct xprt_class *type); -int xprt_find_transport_ident(const char *); -void xprt_wait_for_reply_request_def(struct rpc_task *task); -void xprt_wait_for_reply_request_rtt(struct rpc_task *task); +int xprt_load_transport(const char *); +void xprt_set_retrans_timeout_def(struct rpc_task *task); +void xprt_set_retrans_timeout_rtt(struct rpc_task *task); void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); -void xprt_wait_for_buffer_space(struct rpc_xprt *xprt); -bool xprt_write_space(struct rpc_xprt *xprt); +void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action); +void xprt_write_space(struct rpc_xprt *xprt); void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result); struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); -void xprt_update_rtt(struct rpc_task *task); void xprt_complete_rqst(struct rpc_task *task, int copied); -void xprt_pin_rqst(struct rpc_rqst *req); -void xprt_unpin_rqst(struct rpc_rqst *req); void xprt_release_rqst_cong(struct rpc_task *task); -bool xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req); void xprt_disconnect_done(struct rpc_xprt *xprt); void xprt_force_disconnect(struct rpc_xprt *xprt); void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *); void xprt_unlock_connect(struct rpc_xprt *, void *); -void xprt_release_write(struct rpc_xprt *, struct rpc_task *); /* * Reserved bit positions in xprt->state @@ -426,12 +386,7 @@ void xprt_release_write(struct rpc_xprt *, struct rpc_task *); #define XPRT_BOUND (4) #define XPRT_BINDING (5) #define XPRT_CLOSING (6) -#define XPRT_OFFLINE (7) -#define XPRT_REMOVE (8) #define XPRT_CONGESTED (9) -#define XPRT_CWND_WAIT (10) -#define XPRT_WRITE_SPACE (11) -#define XPRT_SND_IS_COOKIE (12) static inline void xprt_set_connected(struct rpc_xprt *xprt) { @@ -502,4 +457,23 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) return test_and_set_bit(XPRT_BINDING, &xprt->state); } +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) +extern unsigned int rpc_inject_disconnect; +static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) +{ + if (!rpc_inject_disconnect) + return; + if (atomic_dec_return(&xprt->inject_disconnect)) + return; + atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect); + xprt->ops->inject_disconnect(xprt); +} +#else +static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) +{ +} +#endif + +#endif /* __KERNEL__*/ + #endif /* _LINUX_SUNRPC_XPRT_H */ diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h index bbb8a5fa08..507418c1c6 100644 --- a/include/linux/sunrpc/xprtmultipath.h +++ b/include/linux/sunrpc/xprtmultipath.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * RPC client multipathing definitions * @@ -10,23 +9,17 @@ #define _NET_SUNRPC_XPRTMULTIPATH_H struct rpc_xprt_iter_ops; -struct rpc_sysfs_xprt_switch; struct rpc_xprt_switch { spinlock_t xps_lock; struct kref xps_kref; - unsigned int xps_id; unsigned int xps_nxprts; - unsigned int xps_nactive; - unsigned int xps_nunique_destaddr_xprts; - atomic_long_t xps_queuelen; struct list_head xps_xprt_list; struct net * xps_net; const struct rpc_xprt_iter_ops *xps_iter_ops; - struct rpc_sysfs_xprt_switch *xps_sysfs; struct rcu_head xps_rcu; }; @@ -75,7 +68,4 @@ extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi); extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps, const struct sockaddr *sap); - -extern void xprt_multipath_cleanup_ids(void); - #endif diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index 16c239e0d6..221b7a2e54 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. * @@ -49,9 +48,9 @@ * fully-chunked NFS message (read chunks are the largest). Note only * a single chunk type per message is supported currently. */ -#define RPCRDMA_MIN_SLOT_TABLE (4U) +#define RPCRDMA_MIN_SLOT_TABLE (2U) #define RPCRDMA_DEF_SLOT_TABLE (128U) -#define RPCRDMA_MAX_SLOT_TABLE (16384U) +#define RPCRDMA_MAX_SLOT_TABLE (256U) #define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */ #define RPCRDMA_DEF_INLINE (4096) /* default inline thresh */ @@ -65,7 +64,7 @@ enum rpcrdma_memreg { RPCRDMA_MEMWINDOWS, RPCRDMA_MEMWINDOWS_ASYNC, RPCRDMA_MTHCAFMR, - RPCRDMA_FRWR, + RPCRDMA_FRMR, RPCRDMA_ALLPHYSICAL, RPCRDMA_LAST }; diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 8c2a712cb2..bef3fb0abb 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/xprtsock.h * @@ -8,9 +7,10 @@ #ifndef _LINUX_SUNRPC_XPRTSOCK_H #define _LINUX_SUNRPC_XPRTSOCK_H +#ifdef __KERNEL__ + int init_socket_xprt(void); void cleanup_socket_xprt(void); -unsigned short get_srcport(struct rpc_xprt *); #define RPC_MIN_RESVPORT (1U) #define RPC_MAX_RESVPORT (65535U) @@ -25,42 +25,29 @@ struct sock_xprt { */ struct socket * sock; struct sock * inet; - struct file * file; /* * State of TCP reply receive */ - struct { - struct { - __be32 fraghdr, - xid, - calldir; - } __attribute__((packed)); + __be32 tcp_fraghdr, + tcp_xid, + tcp_calldir; - u32 offset, - len; + u32 tcp_offset, + tcp_reclen; - unsigned long copied; - } recv; - - /* - * State of TCP transmit queue - */ - struct { - u32 offset; - } xmit; + unsigned long tcp_copied, + tcp_flags; /* * Connection of transports */ unsigned long sock_state; struct delayed_work connect_worker; - struct work_struct error_worker; struct work_struct recv_worker; struct mutex recv_mutex; struct sockaddr_storage srcaddr; unsigned short srcport; - int xprt_err; /* * UDP socket buffer size parameters @@ -68,8 +55,6 @@ struct sock_xprt { size_t rcvsize, sndsize; - struct rpc_timeout tcp_timeout; - /* * Saved socket callback addresses */ @@ -79,15 +64,24 @@ struct sock_xprt { void (*old_error_report)(struct sock *); }; +/* + * TCP receive state flags + */ +#define TCP_RCV_LAST_FRAG (1UL << 0) +#define TCP_RCV_COPY_FRAGHDR (1UL << 1) +#define TCP_RCV_COPY_XID (1UL << 2) +#define TCP_RCV_COPY_DATA (1UL << 3) +#define TCP_RCV_READ_CALLDIR (1UL << 4) +#define TCP_RCV_COPY_CALLDIR (1UL << 5) + /* * TCP RPC flags */ +#define TCP_RPC_REPLY (1UL << 6) + #define XPRT_SOCK_CONNECTING 1U #define XPRT_SOCK_DATA_READY (2) -#define XPRT_SOCK_UPD_TIMEOUT (3) -#define XPRT_SOCK_WAKE_ERROR (4) -#define XPRT_SOCK_WAKE_WRITE (5) -#define XPRT_SOCK_WAKE_PENDING (6) -#define XPRT_SOCK_WAKE_DISCONNECT (7) + +#endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/include/linux/sunserialcore.h b/include/linux/sunserialcore.h index c12d1c7fa4..dbe4d7fca1 100644 --- a/include/linux/sunserialcore.h +++ b/include/linux/sunserialcore.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* sunserialcore.h * * Generic SUN serial/kbd/ms layer. Based entirely diff --git a/include/linux/sunxi-rsb.h b/include/linux/sunxi-rsb.h index bf0d365f47..7e75bb0346 100644 --- a/include/linux/sunxi-rsb.h +++ b/include/linux/sunxi-rsb.h @@ -59,7 +59,7 @@ static inline void sunxi_rsb_device_set_drvdata(struct sunxi_rsb_device *rdev, struct sunxi_rsb_driver { struct device_driver driver; int (*probe)(struct sunxi_rsb_device *rdev); - void (*remove)(struct sunxi_rsb_device *rdev); + int (*remove)(struct sunxi_rsb_device *rdev); }; static inline struct sunxi_rsb_driver *to_sunxi_rsb_driver(struct device_driver *d) diff --git a/include/linux/superhyway.h b/include/linux/superhyway.h index 8d33767758..17ea468fa3 100644 --- a/include/linux/superhyway.h +++ b/include/linux/superhyway.h @@ -101,7 +101,7 @@ int superhyway_add_device(unsigned long base, struct superhyway_device *, struct int superhyway_add_devices(struct superhyway_bus *bus, struct superhyway_device **devices, int nr_devices); /* drivers/sh/superhyway/superhyway-sysfs.c */ -extern const struct attribute_group *superhyway_dev_groups[]; +extern struct device_attribute superhyway_dev_attrs[]; #endif /* __LINUX_SUPERHYWAY_H */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 8af13ba60c..d9718378a8 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SUSPEND_H #define _LINUX_SUSPEND_H @@ -34,10 +33,10 @@ static inline void pm_restore_console(void) typedef int __bitwise suspend_state_t; #define PM_SUSPEND_ON ((__force suspend_state_t) 0) -#define PM_SUSPEND_TO_IDLE ((__force suspend_state_t) 1) +#define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1) #define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2) #define PM_SUSPEND_MEM ((__force suspend_state_t) 3) -#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE +#define PM_SUSPEND_MIN PM_SUSPEND_FREEZE #define PM_SUSPEND_MAX ((__force suspend_state_t) 4) enum suspend_stat_step { @@ -187,20 +186,14 @@ struct platform_suspend_ops { void (*recover)(void); }; -struct platform_s2idle_ops { +struct platform_freeze_ops { int (*begin)(void); int (*prepare)(void); - int (*prepare_late)(void); - bool (*wake)(void); - void (*restore_early)(void); void (*restore)(void); void (*end)(void); }; #ifdef CONFIG_SUSPEND -extern suspend_state_t mem_sleep_current; -extern suspend_state_t mem_sleep_default; - /** * suspend_set_ops - set platform dependent suspend operations * @ops: The new suspend operations to set. @@ -210,9 +203,8 @@ extern int suspend_valid_only_mem(suspend_state_t state); extern unsigned int pm_suspend_global_flags; -#define PM_SUSPEND_FLAG_FW_SUSPEND BIT(0) -#define PM_SUSPEND_FLAG_FW_RESUME BIT(1) -#define PM_SUSPEND_FLAG_NO_PLATFORM BIT(2) +#define PM_SUSPEND_FLAG_FW_SUSPEND (1 << 0) +#define PM_SUSPEND_FLAG_FW_RESUME (1 << 1) static inline void pm_suspend_clear_flags(void) { @@ -229,86 +221,33 @@ static inline void pm_set_resume_via_firmware(void) pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME; } -static inline void pm_set_suspend_no_platform(void) -{ - pm_suspend_global_flags |= PM_SUSPEND_FLAG_NO_PLATFORM; -} - -/** - * pm_suspend_via_firmware - Check if platform firmware will suspend the system. - * - * To be called during system-wide power management transitions to sleep states - * or during the subsequent system-wide transitions back to the working state. - * - * Return 'true' if the platform firmware is going to be invoked at the end of - * the system-wide power management transition (to a sleep state) in progress in - * order to complete it, or if the platform firmware has been invoked in order - * to complete the last (or preceding) transition of the system to a sleep - * state. - * - * This matters if the caller needs or wants to carry out some special actions - * depending on whether or not control will be passed to the platform firmware - * subsequently (for example, the device may need to be reset before letting the - * platform firmware manipulate it, which is not necessary when the platform - * firmware is not going to be invoked) or when such special actions may have - * been carried out during the preceding transition of the system to a sleep - * state (as they may need to be taken into account). - */ static inline bool pm_suspend_via_firmware(void) { return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND); } -/** - * pm_resume_via_firmware - Check if platform firmware has woken up the system. - * - * To be called during system-wide power management transitions from sleep - * states. - * - * Return 'true' if the platform firmware has passed control to the kernel at - * the beginning of the system-wide power management transition in progress, so - * the event that woke up the system from sleep has been handled by the platform - * firmware. - */ static inline bool pm_resume_via_firmware(void) { return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME); } -/** - * pm_suspend_no_platform - Check if platform may change device power states. - * - * To be called during system-wide power management transitions to sleep states - * or during the subsequent system-wide transitions back to the working state. - * - * Return 'true' if the power states of devices remain under full control of the - * kernel throughout the system-wide suspend and resume cycle in progress (that - * is, if a device is put into a certain power state during suspend, it can be - * expected to remain in that state during resume). - */ -static inline bool pm_suspend_no_platform(void) -{ - return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_NO_PLATFORM); -} - /* Suspend-to-idle state machnine. */ -enum s2idle_states { - S2IDLE_STATE_NONE, /* Not suspended/suspending. */ - S2IDLE_STATE_ENTER, /* Enter suspend-to-idle. */ - S2IDLE_STATE_WAKE, /* Wake up from suspend-to-idle. */ +enum freeze_state { + FREEZE_STATE_NONE, /* Not suspended/suspending. */ + FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */ + FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */ }; -extern enum s2idle_states __read_mostly s2idle_state; +extern enum freeze_state __read_mostly suspend_freeze_state; -static inline bool idle_should_enter_s2idle(void) +static inline bool idle_should_freeze(void) { - return unlikely(s2idle_state == S2IDLE_STATE_ENTER); + return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER); } -extern bool pm_suspend_default_s2idle(void); extern void __init pm_states_init(void); -extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); -extern void s2idle_wake(void); +extern void freeze_set_ops(const struct platform_freeze_ops *ops); +extern void freeze_wake(void); /** * arch_suspend_disable_irqs - disable IRQs for suspend @@ -329,7 +268,6 @@ extern void arch_suspend_disable_irqs(void); extern void arch_suspend_enable_irqs(void); extern int pm_suspend(suspend_state_t state); -extern bool sync_on_suspend_enabled; #else /* !CONFIG_SUSPEND */ #define suspend_valid_only_mem NULL @@ -338,16 +276,13 @@ static inline void pm_set_suspend_via_firmware(void) {} static inline void pm_set_resume_via_firmware(void) {} static inline bool pm_suspend_via_firmware(void) { return false; } static inline bool pm_resume_via_firmware(void) { return false; } -static inline bool pm_suspend_no_platform(void) { return false; } -static inline bool pm_suspend_default_s2idle(void) { return false; } static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } -static inline bool sync_on_suspend_enabled(void) { return true; } -static inline bool idle_should_enter_s2idle(void) { return false; } +static inline bool idle_should_freeze(void) { return false; } static inline void __init pm_states_init(void) {} -static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {} -static inline void s2idle_wake(void) {} +static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} +static inline void freeze_wake(void) {} #endif /* !CONFIG_SUSPEND */ /* struct pbe is used for creating lists of pages that should be restored @@ -416,7 +351,7 @@ extern void mark_free_pages(struct zone *zone); * platforms which require special recovery actions in that situation. */ struct platform_hibernation_ops { - int (*begin)(pm_message_t stage); + int (*begin)(void); void (*end)(void); int (*pre_snapshot)(void); void (*finish)(void); @@ -443,8 +378,6 @@ extern int swsusp_page_is_forbidden(struct page *); extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); extern unsigned long get_safe_page(gfp_t gfp_mask); -extern asmlinkage int swsusp_arch_suspend(void); -extern asmlinkage int swsusp_arch_resume(void); extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); extern int hibernate(void); @@ -452,9 +385,6 @@ extern bool system_entering_hibernation(void); extern bool hibernation_available(void); asmlinkage int swsusp_save(void); extern struct pbe *restore_pblist; -int pfn_is_nosave(unsigned long pfn); - -int hibernate_quiet_exec(int (*func)(void *data), void *data); #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} @@ -466,18 +396,8 @@ static inline void hibernation_set_ops(const struct platform_hibernation_ops *op static inline int hibernate(void) { return -ENOSYS; } static inline bool system_entering_hibernation(void) { return false; } static inline bool hibernation_available(void) { return false; } - -static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) { - return -ENOTSUPP; -} #endif /* CONFIG_HIBERNATION */ -#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV -int is_hibernate_resume_dev(dev_t dev); -#else -static inline int is_hibernate_resume_dev(dev_t dev) { return 0; } -#endif - /* Hibernation and suspend events */ #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ @@ -486,7 +406,7 @@ static inline int is_hibernate_resume_dev(dev_t dev) { return 0; } #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ #define PM_POST_RESTORE 0x0006 /* Restore failed */ -extern struct mutex system_transition_mutex; +extern struct mutex pm_mutex; #ifdef CONFIG_PM_SLEEP void save_processor_state(void); @@ -495,7 +415,6 @@ void restore_processor_state(void); /* kernel/power/main.c */ extern int register_pm_notifier(struct notifier_block *nb); extern int unregister_pm_notifier(struct notifier_block *nb); -extern void ksys_sync_helper(void); #define pm_notifier(fn, pri) { \ static struct notifier_block fn##_nb = \ @@ -506,20 +425,42 @@ extern void ksys_sync_helper(void); /* drivers/base/power/wakeup.c */ extern bool events_check_enabled; extern unsigned int pm_wakeup_irq; -extern suspend_state_t pm_suspend_target_state; extern bool pm_wakeup_pending(void); extern void pm_system_wakeup(void); -extern void pm_system_cancel_wakeup(void); -extern void pm_wakeup_clear(bool reset); +extern void pm_wakeup_clear(void); extern void pm_system_irq_wakeup(unsigned int irq_number); extern bool pm_get_wakeup_count(unsigned int *count, bool block); extern bool pm_save_wakeup_count(unsigned int count); extern void pm_wakep_autosleep_enabled(bool set); extern void pm_print_active_wakeup_sources(void); -extern void lock_system_sleep(void); -extern void unlock_system_sleep(void); +static inline void lock_system_sleep(void) +{ + current->flags |= PF_FREEZER_SKIP; + mutex_lock(&pm_mutex); +} + +static inline void unlock_system_sleep(void) +{ + /* + * Don't use freezer_count() because we don't want the call to + * try_to_freeze() here. + * + * Reason: + * Fundamentally, we just don't need it, because freezing condition + * doesn't come into effect until we release the pm_mutex lock, + * since the freezer always works with pm_mutex held. + * + * More importantly, in the case of hibernation, + * unlock_system_sleep() gets called in snapshot_read() and + * snapshot_write() when the freezing condition is still in effect. + * Which means, if we use try_to_freeze() here, it would make them + * enter the refrigerator, thus causing hibernation to lockup. + */ + current->flags &= ~PF_FREEZER_SKIP; + mutex_unlock(&pm_mutex); +} #else /* !CONFIG_PM_SLEEP */ @@ -533,13 +474,11 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) return 0; } -static inline void ksys_sync_helper(void) {} - #define pm_notifier(fn, pri) do { (void)(fn); } while (0) static inline bool pm_wakeup_pending(void) { return false; } static inline void pm_system_wakeup(void) {} -static inline void pm_wakeup_clear(bool reset) {} +static inline void pm_wakeup_clear(void) {} static inline void pm_system_irq_wakeup(unsigned int irq_number) {} static inline void lock_system_sleep(void) {} @@ -549,24 +488,10 @@ static inline void unlock_system_sleep(void) {} #ifdef CONFIG_PM_SLEEP_DEBUG extern bool pm_print_times_enabled; -extern bool pm_debug_messages_on; -extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...); #else #define pm_print_times_enabled (false) -#define pm_debug_messages_on (false) - -#include - -#define __pm_pr_dbg(defer, fmt, ...) \ - no_printk(KERN_DEBUG fmt, ##__VA_ARGS__) #endif -#define pm_pr_dbg(fmt, ...) \ - __pm_pr_dbg(false, fmt, ##__VA_ARGS__) - -#define pm_deferred_pr_dbg(fmt, ...) \ - __pm_pr_dbg(true, fmt, ##__VA_ARGS__) - #ifdef CONFIG_PM_AUTOSLEEP /* kernel/power/autosleep.c */ @@ -578,4 +503,38 @@ static inline void queue_up_suspend_work(void) {} #endif /* !CONFIG_PM_AUTOSLEEP */ +#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS +/* + * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture + * to save/restore additional information to/from the array of page + * frame numbers in the hibernation image. For s390 this is used to + * save and restore the storage key for each page that is included + * in the hibernation image. + */ +unsigned long page_key_additional_pages(unsigned long pages); +int page_key_alloc(unsigned long pages); +void page_key_free(void); +void page_key_read(unsigned long *pfn); +void page_key_memorize(unsigned long *pfn); +void page_key_write(void *address); + +#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ + +static inline unsigned long page_key_additional_pages(unsigned long pages) +{ + return 0; +} + +static inline int page_key_alloc(unsigned long pages) +{ + return 0; +} + +static inline void page_key_free(void) {} +static inline void page_key_read(unsigned long *pfn) {} +static inline void page_key_memorize(unsigned long *pfn) {} +static inline void page_key_write(void *address) {} + +#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ + #endif /* _LINUX_SUSPEND_H */ diff --git a/include/linux/svga.h b/include/linux/svga.h index 3bfe462695..bfa68e837d 100644 --- a/include/linux/svga.h +++ b/include/linux/svga.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SVGA_H #define _LINUX_SVGA_H diff --git a/include/linux/sw842.h b/include/linux/sw842.h index 3e29f5dcc6..109ba041c2 100644 --- a/include/linux/sw842.h +++ b/include/linux/sw842.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SW842_H__ #define __SW842_H__ diff --git a/include/linux/swab.h b/include/linux/swab.h index bcff514986..9ad3c60f6c 100644 --- a/include/linux/swab.h +++ b/include/linux/swab.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SWAB_H #define _LINUX_SWAB_H @@ -7,7 +6,6 @@ # define swab16 __swab16 # define swab32 __swab32 # define swab64 __swab64 -# define swab __swab # define swahw32 __swahw32 # define swahb32 __swahb32 # define swab16p __swab16p diff --git a/include/linux/swait.h b/include/linux/swait.h index 6a8c22b8c2..c1f9c62a8a 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h @@ -1,41 +1,35 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SWAIT_H #define _LINUX_SWAIT_H #include #include #include -#include #include /* - * Simple waitqueues are semantically very different to regular wait queues - * (wait.h). The most important difference is that the simple waitqueue allows - * for deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold + * Simple wait queues + * + * While these are very similar to the other/complex wait queues (wait.h) the + * most important difference is that the simple waitqueue allows for + * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold * times. * - * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all - * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher - * priority task a chance to run. - * - * Secondly, we had to drop a fair number of features of the other waitqueue - * code; notably: + * In order to make this so, we had to drop a fair number of features of the + * other waitqueue code; notably: * * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right * sleeper state. * - * - the !exclusive mode; because that leads to O(n) wakeups, everything is - * exclusive. As such swake_up_one will only ever awake _one_ waiter. + * - the exclusive mode; because this requires preserving the list order + * and this is hard. * - * - custom wake callback functions; because you cannot give any guarantees - * about random code. This also allows swait to be used in RT, such that - * raw spinlock can be used for the swait queue head. + * - custom wake functions; because you cannot give any guarantees about + * random code. * - * As a side effect of these; the data structures are slimmer albeit more ad-hoc. - * For all the above, note that simple wait queues should _only_ be used under - * very specific realtime constraints -- it is best to stick with the regular - * wait queues in most cases. + * As a side effect of this; the data structures are slimmer. + * + * One would recommend using this wait queue where possible. */ struct task_struct; @@ -85,79 +79,25 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name DECLARE_SWAIT_QUEUE_HEAD(name) #endif -/** - * swait_active -- locklessly test for waiters on the queue - * @wq: the waitqueue to test for waiters - * - * returns true if the wait list is not empty - * - * NOTE: this function is lockless and requires care, incorrect usage _will_ - * lead to sporadic and non-obvious failure. - * - * NOTE2: this function has the same above implications as regular waitqueues. - * - * Use either while holding swait_queue_head::lock or when used for wakeups - * with an extra smp_mb() like: - * - * CPU0 - waker CPU1 - waiter - * - * for (;;) { - * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state); - * smp_mb(); // smp_mb() from set_current_state() - * if (swait_active(wq_head)) if (@cond) - * wake_up(wq_head); break; - * schedule(); - * } - * finish_swait(&wq_head, &wait); - * - * Because without the explicit smp_mb() it's possible for the - * swait_active() load to get hoisted over the @cond store such that we'll - * observe an empty wait list while the waiter might not observe @cond. - * This, in turn, can trigger missing wakeups. - * - * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), - * which (when the lock is uncontended) are of roughly equal cost. - */ -static inline int swait_active(struct swait_queue_head *wq) +static inline int swait_active(struct swait_queue_head *q) { - return !list_empty(&wq->task_list); + return !list_empty(&q->task_list); } -/** - * swq_has_sleeper - check if there are any waiting processes - * @wq: the waitqueue to test for waiters - * - * Returns true if @wq has waiting processes - * - * Please refer to the comment for swait_active. - */ -static inline bool swq_has_sleeper(struct swait_queue_head *wq) -{ - /* - * We need to be sure we are in sync with the list_add() - * modifications to the wait queue (task_list). - * - * This memory barrier should be paired with one on the - * waiting side. - */ - smp_mb(); - return swait_active(wq); -} - -extern void swake_up_one(struct swait_queue_head *q); +extern void swake_up(struct swait_queue_head *q); extern void swake_up_all(struct swait_queue_head *q); extern void swake_up_locked(struct swait_queue_head *q); -extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); +extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); +extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); -/* as per ___wait_event() but for swait, therefore "exclusive == 1" */ +/* as per ___wait_event() but for swait, therefore "exclusive == 0" */ #define ___swait_event(wq, condition, state, ret, cmd) \ ({ \ - __label__ __out; \ struct swait_queue __wait; \ long __ret = ret; \ \ @@ -170,20 +110,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); \ if (___wait_is_interruptible(state) && __int) { \ __ret = __int; \ - goto __out; \ + break; \ } \ \ cmd; \ } \ finish_swait(&wq, &__wait); \ -__out: __ret; \ + __ret; \ }) #define __swait_event(wq, condition) \ (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ schedule()) -#define swait_event_exclusive(wq, condition) \ +#define swait_event(wq, condition) \ do { \ if (condition) \ break; \ @@ -195,7 +135,7 @@ do { \ TASK_UNINTERRUPTIBLE, timeout, \ __ret = schedule_timeout(__ret)) -#define swait_event_timeout_exclusive(wq, condition, timeout) \ +#define swait_event_timeout(wq, condition, timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ @@ -207,7 +147,7 @@ do { \ ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ schedule()) -#define swait_event_interruptible_exclusive(wq, condition) \ +#define swait_event_interruptible(wq, condition) \ ({ \ int __ret = 0; \ if (!(condition)) \ @@ -220,7 +160,7 @@ do { \ TASK_INTERRUPTIBLE, timeout, \ __ret = schedule_timeout(__ret)) -#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\ +#define swait_event_interruptible_timeout(wq, condition, timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ @@ -229,59 +169,4 @@ do { \ __ret; \ }) -#define __swait_event_idle(wq, condition) \ - (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) - -/** - * swait_event_idle_exclusive - wait without system load contribution - * @wq: the waitqueue to wait on - * @condition: a C expression for the event to wait for - * - * The process is put to sleep (TASK_IDLE) until the @condition evaluates to - * true. The @condition is checked each time the waitqueue @wq is woken up. - * - * This function is mostly used when a kthread or workqueue waits for some - * condition and doesn't want to contribute to system load. Signals are - * ignored. - */ -#define swait_event_idle_exclusive(wq, condition) \ -do { \ - if (condition) \ - break; \ - __swait_event_idle(wq, condition); \ -} while (0) - -#define __swait_event_idle_timeout(wq, condition, timeout) \ - ___swait_event(wq, ___wait_cond_timeout(condition), \ - TASK_IDLE, timeout, \ - __ret = schedule_timeout(__ret)) - -/** - * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution - * @wq: the waitqueue to wait on - * @condition: a C expression for the event to wait for - * @timeout: timeout at which we'll give up in jiffies - * - * The process is put to sleep (TASK_IDLE) until the @condition evaluates to - * true. The @condition is checked each time the waitqueue @wq is woken up. - * - * This function is mostly used when a kthread or workqueue waits for some - * condition and doesn't want to contribute to system load. Signals are - * ignored. - * - * Returns: - * 0 if the @condition evaluated to %false after the @timeout elapsed, - * 1 if the @condition evaluated to %true after the @timeout elapsed, - * or the remaining jiffies (at least 1) if the @condition evaluated - * to %true before the @timeout elapsed. - */ -#define swait_event_idle_timeout_exclusive(wq, condition, timeout) \ -({ \ - long __ret = timeout; \ - if (!___wait_cond_timeout(condition)) \ - __ret = __swait_event_idle_timeout(wq, \ - condition, timeout); \ - __ret; \ -}) - #endif /* _LINUX_SWAIT_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index ba52f3a347..55ff5593c1 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SWAP_H #define _LINUX_SWAP_H @@ -10,18 +9,14 @@ #include #include #include -#include #include #include -#include #include struct notifier_block; struct bio; -struct pagevec; - #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ #define SWAP_FLAG_PRIO_MASK 0x7fff #define SWAP_FLAG_PRIO_SHIFT 0 @@ -32,7 +27,6 @@ struct pagevec; #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ SWAP_FLAG_DISCARD_PAGES) -#define SWAP_BATCH 64 static inline int current_is_kswapd(void) { @@ -55,28 +49,6 @@ static inline int current_is_kswapd(void) * actions on faults. */ -/* - * Unaddressable device memory support. See include/linux/hmm.h and - * Documentation/vm/hmm.rst. Short description is we need struct pages for - * device memory that is unaddressable (inaccessible) by CPU, so that we can - * migrate part of a process memory to device memory. - * - * When a page is migrated from CPU to device, we set the CPU page table entry - * to a special SWP_DEVICE_{READ|WRITE} entry. - * - * When a page is mapped by the device for exclusive access we set the CPU page - * table entries to special SWP_DEVICE_EXCLUSIVE_* entries. - */ -#ifdef CONFIG_DEVICE_PRIVATE -#define SWP_DEVICE_NUM 4 -#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) -#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) -#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) -#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3) -#else -#define SWP_DEVICE_NUM 0 -#endif - /* * NUMA node memory migration support */ @@ -99,8 +71,7 @@ static inline int current_is_kswapd(void) #endif #define MAX_SWAPFILES \ - ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \ - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) + ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) /* * Magic header for a swap area. The first part of the union is @@ -155,7 +126,7 @@ struct zone; * We always assume that blocks are of size PAGE_SIZE. */ struct swap_extent { - struct rb_node rb_node; + struct list_head list; pgoff_t start_page; pgoff_t nr_pages; sector_t start_block; @@ -164,9 +135,9 @@ struct swap_extent { /* * Max bad pages in the new format.. */ +#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) #define MAX_SWAP_BADPAGES \ - ((offsetof(union swap_header, magic.magic) - \ - offsetof(union swap_header, info.badpages)) / sizeof(int)) + ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) enum { SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ @@ -176,30 +147,23 @@ enum { SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ SWP_BLKDEV = (1 << 6), /* its a block device */ - SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */ - SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */ - SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */ - SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ - SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ - SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ + SWP_FILE = (1 << 7), /* set after swap_activate success */ + SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ + SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ + SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */ /* add others here before... */ - SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ + SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */ }; #define SWAP_CLUSTER_MAX 32UL #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX -/* Bit flag in swap_map */ +#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ +#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ -#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ - -/* Special value in first swap_map */ -#define SWAP_MAP_MAX 0x3e /* Max count */ -#define SWAP_MAP_BAD 0x3f /* Note page is bad */ -#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */ - -/* Special value in each swap_map continuation */ -#define SWAP_CONT_MAX 0x7f /* Max count */ +#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ +#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ +#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ /* * We use this to track usage of a cluster. A cluster is a block of swap disk @@ -212,18 +176,11 @@ enum { * protected by swap_info_struct.lock. */ struct swap_cluster_info { - spinlock_t lock; /* - * Protect swap_cluster_info fields - * and swap_info_struct->swap_map - * elements correspond to the swap - * cluster - */ unsigned int data:24; unsigned int flags:8; }; #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ -#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */ /* * We assign a cluster to each CPU, so each CPU can allocate swap entry from @@ -244,10 +201,10 @@ struct swap_cluster_list { * The in-memory structure used to track swap areas. */ struct swap_info_struct { - struct percpu_ref users; /* indicate and keep swap device valid. */ unsigned long flags; /* SWP_USED etc: see above */ signed short prio; /* swap priority of this type */ struct plist_node list; /* entry in swap_active_head */ + struct plist_node avail_list; /* entry in swap_avail_head */ signed char type; /* strange name for an index */ unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ @@ -259,13 +216,12 @@ struct swap_info_struct { unsigned int inuse_pages; /* number of those currently in use */ unsigned int cluster_next; /* likely index for next allocation */ unsigned int cluster_nr; /* countdown to next cluster search */ - unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ - struct rb_root swap_extent_root;/* root of the swap extent rbtree */ + struct swap_extent *curr_swap_extent; + struct swap_extent first_swap_extent; struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ - struct completion comp; /* seldom referenced */ #ifdef CONFIG_FRONTSWAP unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ atomic_t frontswap_pages; /* frontswap pages in-use counter */ @@ -283,102 +239,85 @@ struct swap_info_struct { * both locks need hold, hold swap_lock * first. */ - spinlock_t cont_lock; /* - * protect swap count continuation page - * list. - */ struct work_struct discard_work; /* discard worker */ struct swap_cluster_list discard_clusters; /* discard clusters list */ - struct plist_node avail_lists[]; /* - * entries in swap_avail_heads, one - * entry per node. - * Must be last as the number of the - * array is nr_node_ids, which is not - * a fixed value so have to allocate - * dynamically. - * And it has to be an array so that - * plist_for_each_* can work. - */ -}; - -#ifdef CONFIG_64BIT -#define SWAP_RA_ORDER_CEILING 5 -#else -/* Avoid stack overflow, because we need to save part of page table */ -#define SWAP_RA_ORDER_CEILING 3 -#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING) -#endif - -struct vma_swap_readahead { - unsigned short win; - unsigned short offset; - unsigned short nr_pte; -#ifdef CONFIG_64BIT - pte_t *ptes; -#else - pte_t ptes[SWAP_RA_PTE_CACHE_SIZE]; -#endif }; /* linux/mm/workingset.c */ -void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); -void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg); -void workingset_refault(struct page *page, void *shadow); +void *workingset_eviction(struct address_space *mapping, struct page *page); +bool workingset_refault(void *shadow); void workingset_activation(struct page *page); +extern struct list_lru workingset_shadow_nodes; -/* Only track the nodes of mappings with shadow entries */ -void workingset_update_node(struct xa_node *node); -#define mapping_set_update(xas, mapping) do { \ - if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \ - xas_set_update(xas, workingset_update_node); \ -} while (0) +static inline unsigned int workingset_node_pages(struct radix_tree_node *node) +{ + return node->count & RADIX_TREE_COUNT_MASK; +} + +static inline void workingset_node_pages_inc(struct radix_tree_node *node) +{ + node->count++; +} + +static inline void workingset_node_pages_dec(struct radix_tree_node *node) +{ + VM_WARN_ON_ONCE(!workingset_node_pages(node)); + node->count--; +} + +static inline unsigned int workingset_node_shadows(struct radix_tree_node *node) +{ + return node->count >> RADIX_TREE_COUNT_SHIFT; +} + +static inline void workingset_node_shadows_inc(struct radix_tree_node *node) +{ + node->count += 1U << RADIX_TREE_COUNT_SHIFT; +} + +static inline void workingset_node_shadows_dec(struct radix_tree_node *node) +{ + VM_WARN_ON_ONCE(!workingset_node_shadows(node)); + node->count -= 1U << RADIX_TREE_COUNT_SHIFT; +} /* linux/mm/page_alloc.c */ +extern unsigned long totalram_pages; extern unsigned long totalreserve_pages; extern unsigned long nr_free_buffer_pages(void); +extern unsigned long nr_free_pagecache_pages(void); -/* Definition of global_zone_page_state not available yet */ -#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) +/* Definition of global_page_state not available yet */ +#define nr_free_pages() global_page_state(NR_FREE_PAGES) /* linux/mm/swap.c */ -extern void lru_note_cost(struct lruvec *lruvec, bool file, - unsigned int nr_pages); -extern void lru_note_cost_page(struct page *); extern void lru_cache_add(struct page *); +extern void lru_cache_add_anon(struct page *page); +extern void lru_cache_add_file(struct page *page); +extern void lru_add_page_tail(struct page *page, struct page *page_tail, + struct lruvec *lruvec, struct list_head *head); +extern void activate_page(struct page *); extern void mark_page_accessed(struct page *); - -extern atomic_t lru_disable_count; - -static inline bool lru_cache_disabled(void) -{ - return atomic_read(&lru_disable_count); -} - -static inline void lru_cache_enable(void) -{ - atomic_dec(&lru_disable_count); -} - -extern void lru_cache_disable(void); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); -extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); extern void deactivate_page(struct page *page); -extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); -extern void lru_cache_add_inactive_or_unevictable(struct page *page, +extern void add_page_to_unevictable_list(struct page *page); + +extern void lru_cache_add_active_or_unevictable(struct page *page, struct vm_area_struct *vma); /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); +extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); -extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode); +extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, @@ -390,33 +329,31 @@ extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); +extern unsigned long vm_total_pages; -extern unsigned long reclaim_pages(struct list_head *page_list); #ifdef CONFIG_NUMA extern int node_reclaim_mode; extern int sysctl_min_unmapped_ratio; extern int sysctl_min_slab_ratio; +extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); #else #define node_reclaim_mode 0 +static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, + unsigned int order) +{ + return 0; +} #endif -static inline bool node_reclaim_enabled(void) -{ - /* Is any node_reclaim_mode bit set? */ - return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); -} +extern int page_evictable(struct page *page); +extern void check_move_unevictable_pages(struct page **, int nr_pages); -extern void check_move_unevictable_pages(struct pagevec *pvec); - -extern void kswapd_run(int nid); +extern int kswapd_run(int nid); extern void kswapd_stop(int nid); #ifdef CONFIG_SWAP - -#include /* for bio_end_io_t */ - /* linux/mm/page_io.c */ -extern int swap_readpage(struct page *page, bool do_poll); +extern int swap_readpage(struct page *); extern int swap_writepage(struct page *page, struct writeback_control *wbc); extern void end_swap_bio_write(struct bio *bio); extern int __swap_writepage(struct page *page, struct writeback_control *wbc, @@ -429,51 +366,29 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *, sector_t *); /* linux/mm/swap_state.c */ -/* One swap address space for each 64M swap space */ -#define SWAP_ADDRESS_SPACE_SHIFT 14 -#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) -extern struct address_space *swapper_spaces[]; -#define swap_address_space(entry) \ - (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ - >> SWAP_ADDRESS_SPACE_SHIFT]) -static inline unsigned long total_swapcache_pages(void) -{ - return global_node_page_state(NR_SWAPCACHE); -} - +extern struct address_space swapper_spaces[]; +#define swap_address_space(entry) (&swapper_spaces[swp_type(entry)]) +extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); -extern int add_to_swap(struct page *page); -extern void *get_shadow_from_swap_cache(swp_entry_t entry); -extern int add_to_swap_cache(struct page *page, swp_entry_t entry, - gfp_t gfp, void **shadowp); -extern void __delete_from_swap_cache(struct page *page, - swp_entry_t entry, void *shadow); +extern int add_to_swap(struct page *, struct list_head *list); +extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); +extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); +extern void __delete_from_swap_cache(struct page *); extern void delete_from_swap_cache(struct page *); -extern void clear_shadow_from_swap_cache(int type, unsigned long begin, - unsigned long end); -extern void free_swap_cache(struct page *); extern void free_page_and_swap_cache(struct page *); extern void free_pages_and_swap_cache(struct page **, int); -extern struct page *lookup_swap_cache(swp_entry_t entry, - struct vm_area_struct *vma, - unsigned long addr); -struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index); +extern struct page *lookup_swap_cache(swp_entry_t); extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, - struct vm_area_struct *vma, unsigned long addr, - bool do_poll); + struct vm_area_struct *vma, unsigned long addr); extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated); -extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, - struct vm_fault *vmf); -extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, - struct vm_fault *vmf); +extern struct page *swapin_readahead(swp_entry_t, gfp_t, + struct vm_area_struct *vma, unsigned long addr); /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; extern long total_swap_pages; -extern atomic_t nr_rotate_swap; -extern bool has_usable_swap(void); /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) @@ -487,66 +402,29 @@ static inline long get_nr_swap_pages(void) } extern void si_swapinfo(struct sysinfo *); -extern swp_entry_t get_swap_page(struct page *page); -extern void put_swap_page(struct page *page, swp_entry_t entry); +extern swp_entry_t get_swap_page(void); extern swp_entry_t get_swap_page_of_type(int); -extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); -extern void swapcache_free_entries(swp_entry_t *entries, int n); +extern void swapcache_free(swp_entry_t); extern int free_swap_and_cache(swp_entry_t); -int swap_type_of(dev_t device, sector_t offset); -int find_first_swap(dev_t *device); +extern int swap_type_of(dev_t, sector_t, struct block_device **); extern unsigned int count_swap_pages(int, int); +extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); extern int page_swapcount(struct page *); -extern int __swap_count(swp_entry_t entry); -extern int __swp_swapcount(swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); -extern struct swap_info_struct *swp_swap_info(swp_entry_t entry); extern bool reuse_swap_page(struct page *, int *); extern int try_to_free_swap(struct page *); struct backing_dev_info; -extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); -extern void exit_swap_address_space(unsigned int type); -extern struct swap_info_struct *get_swap_device(swp_entry_t entry); -sector_t swap_page_sector(struct page *page); - -static inline void put_swap_device(struct swap_info_struct *si) -{ - percpu_ref_put(&si->users); -} #else /* CONFIG_SWAP */ -static inline int swap_readpage(struct page *page, bool do_poll) -{ - return 0; -} - -static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) -{ - return NULL; -} - -static inline struct swap_info_struct *get_swap_device(swp_entry_t entry) -{ - return NULL; -} - -static inline void put_swap_device(struct swap_info_struct *si) -{ -} - -static inline struct address_space *swap_address_space(swp_entry_t entry) -{ - return NULL; -} - +#define swap_address_space(entry) (NULL) #define get_nr_swap_pages() 0L #define total_swap_pages 0L #define total_swapcache_pages() 0UL @@ -559,18 +437,14 @@ static inline struct address_space *swap_address_space(swp_entry_t entry) #define free_page_and_swap_cache(page) \ put_page(page) #define free_pages_and_swap_cache(pages, nr) \ - release_pages((pages), (nr)); - -static inline void free_swap_cache(struct page *page) -{ -} + release_pages((pages), (nr), false); static inline void show_swap_cache_info(void) { } -/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ -#define free_swap_and_cache(e) is_pfn_swap_entry(e) +#define free_swap_and_cache(swp) is_migration_entry(swp) +#define swapcache_prepare(swp) is_migration_entry(swp) static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) { @@ -590,18 +464,12 @@ static inline void swap_free(swp_entry_t swp) { } -static inline void put_swap_page(struct page *page, swp_entry_t swp) +static inline void swapcache_free(swp_entry_t swp) { } -static inline struct page *swap_cluster_readahead(swp_entry_t entry, - gfp_t gfp_mask, struct vm_fault *vmf) -{ - return NULL; -} - static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, - struct vm_fault *vmf) + struct vm_area_struct *vma, unsigned long addr) { return NULL; } @@ -611,37 +479,23 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc) return 0; } -static inline struct page *lookup_swap_cache(swp_entry_t swp, - struct vm_area_struct *vma, - unsigned long addr) +static inline struct page *lookup_swap_cache(swp_entry_t swp) { return NULL; } -static inline -struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index) -{ - return find_get_page(mapping, index); -} - -static inline int add_to_swap(struct page *page) +static inline int add_to_swap(struct page *page, struct list_head *list) { return 0; } -static inline void *get_shadow_from_swap_cache(swp_entry_t entry) -{ - return NULL; -} - static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, - gfp_t gfp_mask, void **shadowp) + gfp_t gfp_mask) { return -1; } -static inline void __delete_from_swap_cache(struct page *page, - swp_entry_t entry, void *shadow) +static inline void __delete_from_swap_cache(struct page *page) { } @@ -649,40 +503,25 @@ static inline void delete_from_swap_cache(struct page *page) { } -static inline void clear_shadow_from_swap_cache(int type, unsigned long begin, - unsigned long end) -{ -} - static inline int page_swapcount(struct page *page) { return 0; } -static inline int __swap_count(swp_entry_t entry) -{ - return 0; -} - -static inline int __swp_swapcount(swp_entry_t entry) -{ - return 0; -} - static inline int swp_swapcount(swp_entry_t entry) { return 0; } -#define reuse_swap_page(page, total_map_swapcount) \ - (page_trans_huge_mapcount(page, total_map_swapcount) == 1) +#define reuse_swap_page(page, total_mapcount) \ + (page_trans_huge_mapcount(page, total_mapcount) == 1) static inline int try_to_free_swap(struct page *page) { return 0; } -static inline swp_entry_t get_swap_page(struct page *page) +static inline swp_entry_t get_swap_page(void) { swp_entry_t entry; entry.val = 0; @@ -691,15 +530,6 @@ static inline swp_entry_t get_swap_page(struct page *page) #endif /* CONFIG_SWAP */ -#ifdef CONFIG_THP_SWAP -extern int split_swap_cluster(swp_entry_t entry); -#else -static inline int split_swap_cluster(swp_entry_t entry) -{ - return 0; -} -#endif - #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { @@ -708,11 +538,12 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) return vm_swappiness; /* root ? */ - if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) + if (mem_cgroup_disabled() || !memcg->css.parent) return vm_swappiness; return memcg->swappiness; } + #else static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) { @@ -720,38 +551,10 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) } #endif -#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask); -static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) -{ - if (mem_cgroup_disabled()) - return; - __cgroup_throttle_swaprate(page, gfp_mask); -} -#else -static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) -{ -} -#endif - #ifdef CONFIG_MEMCG_SWAP extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); -extern int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); -static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) -{ - if (mem_cgroup_disabled()) - return 0; - return __mem_cgroup_try_charge_swap(page, entry); -} - -extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); -static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) -{ - if (mem_cgroup_disabled()) - return; - __mem_cgroup_uncharge_swap(entry, nr_pages); -} - +extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); +extern void mem_cgroup_uncharge_swap(swp_entry_t entry); extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct page *page); #else @@ -765,8 +568,7 @@ static inline int mem_cgroup_try_charge_swap(struct page *page, return 0; } -static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, - unsigned int nr_pages) +static inline void mem_cgroup_uncharge_swap(swp_entry_t entry) { } diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h index a12dd1c396..145306bdc9 100644 --- a/include/linux/swap_cgroup.h +++ b/include/linux/swap_cgroup.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SWAP_CGROUP_H #define __LINUX_SWAP_CGROUP_H @@ -8,8 +7,7 @@ extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, unsigned short old, unsigned short new); -extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, - unsigned int nr_ents); +extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); extern int swap_cgroup_swapon(int type, unsigned long max_pages); extern void swap_cgroup_swapoff(int type); @@ -17,8 +15,7 @@ extern void swap_cgroup_swapoff(int type); #else static inline -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, - unsigned int nr_ents) +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) { return 0; } diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h index e06febf629..388293a91e 100644 --- a/include/linux/swapfile.h +++ b/include/linux/swapfile.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SWAPFILE_H #define _LINUX_SWAPFILE_H @@ -10,7 +9,5 @@ extern spinlock_t swap_lock; extern struct plist_head swap_active_head; extern struct swap_info_struct *swap_info[]; extern int try_to_unuse(unsigned int, bool, unsigned long); -extern unsigned long generic_max_swapfile_size(void); -extern unsigned long max_swapfile_size(void); #endif /* _LINUX_SWAPFILE_H */ diff --git a/include/linux/swapops.h b/include/linux/swapops.h index d356ab4047..84a8bef098 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -1,12 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SWAPOPS_H #define _LINUX_SWAPOPS_H #include #include -#include - -#ifdef CONFIG_MMU /* * swapcache pages are stored in the swapper_space radix tree. We want to @@ -20,18 +16,9 @@ * * swp_entry_t's are *never* stored anywhere in their arch-dependent format. */ -#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) -#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) - -/* Clear all flags but only keep swp_entry_t related information */ -static inline pte_t pte_swp_clear_flags(pte_t pte) -{ - if (pte_swp_soft_dirty(pte)) - pte = pte_swp_clear_soft_dirty(pte); - if (pte_swp_uffd_wp(pte)) - pte = pte_swp_clear_uffd_wp(pte); - return pte; -} +#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ + (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) +#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) /* * Store a type+offset into a swp_entry_t in an arch-independent format @@ -40,7 +27,8 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) { swp_entry_t ret; - ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK); + ret.val = (type << SWP_TYPE_SHIFT(ret)) | + (offset & SWP_OFFSET_MASK(ret)); return ret; } @@ -50,7 +38,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) */ static inline unsigned swp_type(swp_entry_t entry) { - return (entry.val >> SWP_TYPE_SHIFT); + return (entry.val >> SWP_TYPE_SHIFT(entry)); } /* @@ -59,14 +47,16 @@ static inline unsigned swp_type(swp_entry_t entry) */ static inline pgoff_t swp_offset(swp_entry_t entry) { - return entry.val & SWP_OFFSET_MASK; + return entry.val & SWP_OFFSET_MASK(entry); } +#ifdef CONFIG_MMU /* check whether a pte points to a swap entry */ static inline int is_swap_pte(pte_t pte) { return !pte_none(pte) && !pte_present(pte); } +#endif /* * Convert the arch-dependent pte representation of a swp_entry_t into an @@ -76,7 +66,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte) { swp_entry_t arch_entry; - pte = pte_swp_clear_flags(pte); + if (pte_swp_soft_dirty(pte)) + pte = pte_swp_clear_soft_dirty(pte); arch_entry = __pte_to_swp_entry(pte); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } @@ -97,119 +88,51 @@ static inline swp_entry_t radix_to_swp_entry(void *arg) { swp_entry_t entry; - entry.val = xa_to_value(arg); + entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT; return entry; } static inline void *swp_to_radix_entry(swp_entry_t entry) { - return xa_mk_value(entry.val); -} + unsigned long value; -#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) -static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) -{ - return swp_entry(SWP_DEVICE_READ, offset); + value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT; + return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY); } -static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) -{ - return swp_entry(SWP_DEVICE_WRITE, offset); -} - -static inline bool is_device_private_entry(swp_entry_t entry) -{ - int type = swp_type(entry); - return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; -} - -static inline bool is_writable_device_private_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); -} - -static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) -{ - return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset); -} - -static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) -{ - return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset); -} - -static inline bool is_device_exclusive_entry(swp_entry_t entry) -{ - return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ || - swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE; -} - -static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE); -} -#else /* CONFIG_DEVICE_PRIVATE */ -static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) -{ - return swp_entry(0, 0); -} - -static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) -{ - return swp_entry(0, 0); -} - -static inline bool is_device_private_entry(swp_entry_t entry) -{ - return false; -} - -static inline bool is_writable_device_private_entry(swp_entry_t entry) -{ - return false; -} - -static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) -{ - return swp_entry(0, 0); -} - -static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) -{ - return swp_entry(0, 0); -} - -static inline bool is_device_exclusive_entry(swp_entry_t entry) -{ - return false; -} - -static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) -{ - return false; -} -#endif /* CONFIG_DEVICE_PRIVATE */ - #ifdef CONFIG_MIGRATION +static inline swp_entry_t make_migration_entry(struct page *page, int write) +{ + BUG_ON(!PageLocked(page)); + return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ, + page_to_pfn(page)); +} + static inline int is_migration_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_MIGRATION_READ || swp_type(entry) == SWP_MIGRATION_WRITE); } -static inline int is_writable_migration_entry(swp_entry_t entry) +static inline int is_write_migration_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); } -static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) +static inline struct page *migration_entry_to_page(swp_entry_t entry) { - return swp_entry(SWP_MIGRATION_READ, offset); + struct page *p = pfn_to_page(swp_offset(entry)); + /* + * Any use of migration entries may only occur while the + * corresponding page is locked + */ + BUG_ON(!PageLocked(p)); + return p; } -static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) +static inline void make_migration_entry_read(swp_entry_t *entry) { - return swp_entry(SWP_MIGRATION_WRITE, offset); + *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); } extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, @@ -219,127 +142,30 @@ extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, extern void migration_entry_wait_huge(struct vm_area_struct *vma, struct mm_struct *mm, pte_t *pte); #else -static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) -{ - return swp_entry(0, 0); -} - -static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) -{ - return swp_entry(0, 0); -} +#define make_migration_entry(page, write) swp_entry(0, 0) static inline int is_migration_entry(swp_entry_t swp) { return 0; } - +#define migration_entry_to_page(swp) NULL +static inline void make_migration_entry_read(swp_entry_t *entryp) { } static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, spinlock_t *ptl) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } static inline void migration_entry_wait_huge(struct vm_area_struct *vma, struct mm_struct *mm, pte_t *pte) { } -static inline int is_writable_migration_entry(swp_entry_t entry) +static inline int is_write_migration_entry(swp_entry_t entry) { return 0; } #endif -static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry) -{ - struct page *p = pfn_to_page(swp_offset(entry)); - - /* - * Any use of migration entries may only occur while the - * corresponding page is locked - */ - BUG_ON(is_migration_entry(entry) && !PageLocked(p)); - - return p; -} - -/* - * A pfn swap entry is a special type of swap entry that always has a pfn stored - * in the swap offset. They are used to represent unaddressable device memory - * and to restrict access to a page undergoing migration. - */ -static inline bool is_pfn_swap_entry(swp_entry_t entry) -{ - return is_migration_entry(entry) || is_device_private_entry(entry) || - is_device_exclusive_entry(entry); -} - -struct page_vma_mapped_walk; - -#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION -extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, - struct page *page); - -extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, - struct page *new); - -extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); - -static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) -{ - swp_entry_t arch_entry; - - if (pmd_swp_soft_dirty(pmd)) - pmd = pmd_swp_clear_soft_dirty(pmd); - if (pmd_swp_uffd_wp(pmd)) - pmd = pmd_swp_clear_uffd_wp(pmd); - arch_entry = __pmd_to_swp_entry(pmd); - return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); -} - -static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) -{ - swp_entry_t arch_entry; - - arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); - return __swp_entry_to_pmd(arch_entry); -} - -static inline int is_pmd_migration_entry(pmd_t pmd) -{ - return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); -} -#else -static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, - struct page *page) -{ - BUILD_BUG(); -} - -static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, - struct page *new) -{ - BUILD_BUG(); -} - -static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } - -static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) -{ - return swp_entry(0, 0); -} - -static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) -{ - return __pmd(0); -} - -static inline int is_pmd_migration_entry(pmd_t pmd) -{ - return 0; -} -#endif - #ifdef CONFIG_MEMORY_FAILURE -extern atomic_long_t num_poisoned_pages __read_mostly; +extern atomic_long_unchecked_t num_poisoned_pages __read_mostly; /* * Support for hardware poisoned pages @@ -355,21 +181,30 @@ static inline int is_hwpoison_entry(swp_entry_t entry) return swp_type(entry) == SWP_HWPOISON; } -static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry) +static inline bool test_set_page_hwpoison(struct page *page) { - return swp_offset(entry); + return TestSetPageHWPoison(page); } static inline void num_poisoned_pages_inc(void) { - atomic_long_inc(&num_poisoned_pages); + atomic_long_inc_unchecked(&num_poisoned_pages); } static inline void num_poisoned_pages_dec(void) { - atomic_long_dec(&num_poisoned_pages); + atomic_long_dec_unchecked(&num_poisoned_pages); } +static inline void num_poisoned_pages_add(long num) +{ + atomic_long_add_unchecked(num, &num_poisoned_pages); +} + +static inline void num_poisoned_pages_sub(long num) +{ + atomic_long_sub_unchecked(num, &num_poisoned_pages); +} #else static inline swp_entry_t make_hwpoison_entry(struct page *page) @@ -382,13 +217,17 @@ static inline int is_hwpoison_entry(swp_entry_t swp) return 0; } +static inline bool test_set_page_hwpoison(struct page *page) +{ + return false; +} + static inline void num_poisoned_pages_inc(void) { } #endif -#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \ - defined(CONFIG_DEVICE_PRIVATE) +#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) static inline int non_swap_entry(swp_entry_t entry) { return swp_type(entry) >= MAX_SWAPFILES; @@ -400,5 +239,4 @@ static inline int non_swap_entry(swp_entry_t entry) } #endif -#endif /* CONFIG_MMU */ #endif /* _LINUX_SWAPOPS_H */ diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index b0cb2a9973..d2613536fd 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -1,13 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SWIOTLB_H #define __LINUX_SWIOTLB_H -#include #include #include #include -#include -#include struct device; struct page; @@ -19,6 +15,8 @@ enum swiotlb_force { SWIOTLB_NO_FORCE, /* swiotlb=noforce */ }; +extern enum swiotlb_force swiotlb_force; + /* * Maximum allowable number of contiguous slabs to map, * must be a power of 2. What is the appropriate value ? @@ -31,158 +29,102 @@ enum swiotlb_force { * controllable. */ #define IO_TLB_SHIFT 11 -#define IO_TLB_SIZE (1 << IO_TLB_SHIFT) - -/* default to 64MB */ -#define IO_TLB_DEFAULT_SIZE (64UL<<20) extern void swiotlb_init(int verbose); int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); +extern unsigned long swiotlb_nr_tbl(void); unsigned long swiotlb_size_or_default(void); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); -extern int swiotlb_late_init_with_default_size(size_t default_size); -extern void __init swiotlb_update_mem_attributes(void); -phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, - size_t mapping_size, size_t alloc_size, - enum dma_data_direction dir, unsigned long attrs); +/* + * Enumeration for sync targets + */ +enum dma_sync_target { + SYNC_FOR_CPU = 0, + SYNC_FOR_DEVICE = 1, +}; + +/* define the last possible byte of physical address space as a mapping error */ +#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0) + +extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, + dma_addr_t tbl_dma_addr, + phys_addr_t phys, size_t size, + enum dma_data_direction dir); extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t mapping_size, - enum dma_data_direction dir, - unsigned long attrs); + size_t size, enum dma_data_direction dir); -void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir); -void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir); -dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, - size_t size, enum dma_data_direction dir, unsigned long attrs); +extern void swiotlb_tbl_sync_single(struct device *hwdev, + phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, + enum dma_sync_target target); + +/* Accessory functions. */ +extern void +*swiotlb_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags); + +extern void +swiotlb_free_coherent(struct device *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle); + +extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs); +extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + +extern int +swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, + enum dma_data_direction dir); + +extern void +swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, + enum dma_data_direction dir); + +extern int +swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, + enum dma_data_direction dir, + unsigned long attrs); + +extern void +swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir, + unsigned long attrs); + +extern void +swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir); + +extern void +swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); + +extern void +swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir); + +extern void +swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); + +extern int +swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); + +extern int +swiotlb_dma_supported(struct device *hwdev, u64 mask); #ifdef CONFIG_SWIOTLB -extern enum swiotlb_force swiotlb_force; - -/** - * struct io_tlb_mem - IO TLB Memory Pool Descriptor - * - * @start: The start address of the swiotlb memory pool. Used to do a quick - * range check to see if the memory was in fact allocated by this - * API. - * @end: The end address of the swiotlb memory pool. Used to do a quick - * range check to see if the memory was in fact allocated by this - * API. - * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and - * @end. For default swiotlb, this is command line adjustable via - * setup_io_tlb_npages. - * @used: The number of used IO TLB block. - * @list: The free list describing the number of free entries available - * from each index. - * @index: The index to start searching in the next round. - * @orig_addr: The original address corresponding to a mapped entry. - * @alloc_size: Size of the allocated buffer. - * @lock: The lock to protect the above data structures in the map and - * unmap calls. - * @debugfs: The dentry to debugfs. - * @late_alloc: %true if allocated using the page allocator - * @force_bounce: %true if swiotlb bouncing is forced - * @for_alloc: %true if the pool is used for memory allocation - */ -struct io_tlb_mem { - phys_addr_t start; - phys_addr_t end; - unsigned long nslabs; - unsigned long used; - unsigned int index; - spinlock_t lock; - struct dentry *debugfs; - bool late_alloc; - bool force_bounce; - bool for_alloc; - struct io_tlb_slot { - phys_addr_t orig_addr; - size_t alloc_size; - unsigned int list; - } *slots; -}; -extern struct io_tlb_mem io_tlb_default_mem; - -static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) -{ - struct io_tlb_mem *mem = dev->dma_io_tlb_mem; - - return mem && paddr >= mem->start && paddr < mem->end; -} - -static inline bool is_swiotlb_force_bounce(struct device *dev) -{ - struct io_tlb_mem *mem = dev->dma_io_tlb_mem; - - return mem && mem->force_bounce; -} - -void __init swiotlb_exit(void); -unsigned int swiotlb_max_segment(void); -size_t swiotlb_max_mapping_size(struct device *dev); -bool is_swiotlb_active(struct device *dev); -void __init swiotlb_adjust_size(unsigned long size); +extern void __init swiotlb_free(void); #else -#define swiotlb_force SWIOTLB_NO_FORCE -static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) -{ - return false; -} -static inline bool is_swiotlb_force_bounce(struct device *dev) -{ - return false; -} -static inline void swiotlb_exit(void) -{ -} -static inline unsigned int swiotlb_max_segment(void) -{ - return 0; -} -static inline size_t swiotlb_max_mapping_size(struct device *dev) -{ - return SIZE_MAX; -} - -static inline bool is_swiotlb_active(struct device *dev) -{ - return false; -} - -static inline void swiotlb_adjust_size(unsigned long size) -{ -} -#endif /* CONFIG_SWIOTLB */ +static inline void swiotlb_free(void) { } +#endif extern void swiotlb_print_info(void); -extern void swiotlb_set_max_segment(unsigned int); - -#ifdef CONFIG_DMA_RESTRICTED_POOL -struct page *swiotlb_alloc(struct device *dev, size_t size); -bool swiotlb_free(struct device *dev, struct page *page, size_t size); - -static inline bool is_swiotlb_for_alloc(struct device *dev) -{ - return dev->dma_io_tlb_mem->for_alloc; -} -#else -static inline struct page *swiotlb_alloc(struct device *dev, size_t size) -{ - return NULL; -} -static inline bool swiotlb_free(struct device *dev, struct page *page, - size_t size) -{ - return false; -} -static inline bool is_swiotlb_for_alloc(struct device *dev) -{ - return false; -} -#endif /* CONFIG_DMA_RESTRICTED_POOL */ +extern int is_swiotlb_buffer(phys_addr_t paddr); #endif /* __LINUX_SWIOTLB_H */ diff --git a/include/linux/sxgbe_platform.h b/include/linux/sxgbe_platform.h index 966146f726..a62442cf00 100644 --- a/include/linux/sxgbe_platform.h +++ b/include/linux/sxgbe_platform.h @@ -1,17 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* - * 10G controller driver for Samsung Exynos SoCs + * 10G controller driver for Samsung EXYNOS SoCs * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef __SXGBE_PLATFORM_H__ #define __SXGBE_PLATFORM_H__ -#include - /* MDC Clock Selection define*/ #define SXGBE_CSR_100_150M 0x0 /* MDC = clk_scr_i/62 */ #define SXGBE_CSR_150_250M 0x1 /* MDC = clk_scr_i/102 */ @@ -40,7 +41,7 @@ struct sxgbe_plat_data { char *phy_bus_name; int bus_id; int phy_addr; - phy_interface_t interface; + int interface; struct sxgbe_mdio_bus_data *mdio_bus_data; struct sxgbe_dma_cfg *dma_cfg; int clk_csr; diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index 790ca02120..aa17ccfc2f 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h @@ -14,49 +14,40 @@ #define _LINUX_SYNC_FILE_H #include +#include #include #include #include -#include -#include +#include +#include /** * struct sync_file - sync file to export to the userspace * @file: file representing this fence + * @kref: reference count on fence. + * @name: name of sync_file. Useful for debugging * @sync_file_list: membership in global file list * @wq: wait queue for fence signaling - * @flags: flags for the sync_file * @fence: fence with the fences in the sync_file * @cb: fence callback information - * - * flags: - * POLL_ENABLED: whether userspace is currently poll()'ing or not */ struct sync_file { struct file *file; - /** - * @user_name: - * - * Name of the sync file provided by userspace, for merged fences. - * Otherwise generated through driver callbacks (in which case the - * entire array is 0). - */ - char user_name[32]; + struct kref kref; + char name[32]; #ifdef CONFIG_DEBUG_FS struct list_head sync_file_list; #endif wait_queue_head_t wq; - unsigned long flags; - struct dma_fence *fence; - struct dma_fence_cb cb; + struct fence *fence; + struct fence_cb cb; }; -#define POLL_ENABLED 0 +#define POLL_ENABLED FENCE_FLAG_USER_BITS -struct sync_file *sync_file_create(struct dma_fence *fence); -struct dma_fence *sync_file_get_fence(int fd); -char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len); +struct sync_file *sync_file_create(struct fence *fence); +struct fence *sync_file_get_fence(int fd); #endif /* _LINUX_SYNC_H */ diff --git a/include/linux/sys.h b/include/linux/sys.h index 3ebd9812f2..daa6008bfd 100644 --- a/include/linux/sys.h +++ b/include/linux/sys.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SYS_H #define _LINUX_SYS_H diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h index d9b3cf0f41..2739ccb695 100644 --- a/include/linux/sys_soc.h +++ b/include/linux/sys_soc.h @@ -1,7 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2011 * Author: Lee Jones for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2 */ #ifndef __SOC_BUS_H #define __SOC_BUS_H @@ -12,10 +12,7 @@ struct soc_device_attribute { const char *machine; const char *family; const char *revision; - const char *serial_number; const char *soc_id; - const void *data; - const struct attribute_group *custom_attr_group; }; /** @@ -37,12 +34,4 @@ void soc_device_unregister(struct soc_device *soc_dev); */ struct device *soc_device_to_device(struct soc_device *soc); -#ifdef CONFIG_SOC_BUS -const struct soc_device_attribute *soc_device_match( - const struct soc_device_attribute *matches); -#else -static inline const struct soc_device_attribute *soc_device_match( - const struct soc_device_attribute *matches) { return NULL; } -#endif - #endif /* __SOC_BUS_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 252243c778..a3666d45fe 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1,22 +1,24 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * syscalls.h - Linux syscall interfaces (non-arch-specific) * * Copyright (c) 2004 Randy Dunlap * Copyright (c) 2004 Open Source Development Labs + * + * This file is released under the GPLv2. + * See the file COPYING for more details. */ #ifndef _LINUX_SYSCALLS_H #define _LINUX_SYSCALLS_H -struct __aio_sigset; struct epoll_event; struct iattr; struct inode; struct iocb; struct io_event; struct iovec; -struct __kernel_old_itimerval; +struct itimerspec; +struct itimerval; struct kexec_segment; struct linux_dirent; struct linux_dirent64; @@ -46,31 +48,24 @@ struct stat; struct stat64; struct statfs; struct statfs64; -struct statx; +struct __sysctl_args; struct sysinfo; struct timespec; -struct __kernel_old_timeval; -struct __kernel_timex; +struct timeval; +struct timex; struct timezone; struct tms; struct utimbuf; struct mq_attr; struct compat_stat; -struct old_timeval32; +struct compat_timeval; struct robust_list_head; struct getcpu_cache; struct old_linux_dirent; struct perf_event_attr; struct file_handle; struct sigaltstack; -struct rseq; union bpf_attr; -struct io_uring_params; -struct clone_args; -struct open_how; -struct mount_attr; -struct landlock_ruleset_attr; -enum landlock_rule_type; #include #include @@ -83,20 +78,8 @@ enum landlock_rule_type; #include #include #include -#include #include -#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER -/* - * It may be useful for an architecture to override the definitions of the - * SYSCALL_DEFINE0() and __SYSCALL_DEFINEx() macros, in particular to use a - * different calling convention for syscalls. To allow for that, the prototypes - * for the sys_*() functions below will *not* be included if - * CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. - */ -#include -#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ - /* * __MAP - apply a macro to syscall arguments * __MAP(n, m, t1, a1, t2, a2, ..., tn, an) will expand to @@ -107,7 +90,7 @@ enum landlock_rule_type; * for SYSCALL_DEFINE/COMPAT_SYSCALL_DEFINE */ #define __MAP0(m,...) -#define __MAP1(m,t,a,...) m(t,a) +#define __MAP1(m,t,a) m(t,a) #define __MAP2(m,t,a,...) m(t,a), __MAP1(m,__VA_ARGS__) #define __MAP3(m,t,a,...) m(t,a), __MAP2(m,__VA_ARGS__) #define __MAP4(m,t,a,...) m(t,a), __MAP3(m,__VA_ARGS__) @@ -115,13 +98,29 @@ enum landlock_rule_type; #define __MAP6(m,t,a,...) m(t,a), __MAP5(m,__VA_ARGS__) #define __MAP(n,...) __MAP##n(__VA_ARGS__) +#define __RAP_MAP0(m,...) __RAP_MAP1(m,__VA_ARGS__,void,a) +#define __RAP_MAP1(m,...) __RAP_MAP2(m,__VA_ARGS__,void,b) +#define __RAP_MAP2(m,...) __RAP_MAP3(m,__VA_ARGS__,void,c) +#define __RAP_MAP3(m,...) __RAP_MAP4(m,__VA_ARGS__,void,d) +#define __RAP_MAP4(m,...) __RAP_MAP5(m,__VA_ARGS__,void,e) +#define __RAP_MAP5(m,...) __RAP_MAP6(m,__VA_ARGS__,void,f) +#define __RAP_MAP6(m,...) __MAP6(m,__VA_ARGS__) +#define __RAP_MAP(n,...) __RAP_MAP##n(__VA_ARGS__) + #define __SC_DECL(t, a) t a -#define __TYPE_AS(t, v) __same_type((__force t)0, v) -#define __TYPE_IS_L(t) (__TYPE_AS(t, 0L)) -#define __TYPE_IS_UL(t) (__TYPE_AS(t, 0UL)) -#define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL)) -#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a -#define __SC_CAST(t, a) (__force t) a +#define __TYPE_IS_L(t) (__same_type((t)0, 0L)) +#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL)) +#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL)) +#define __SC_TYPE(t) __typeof__( \ + __builtin_choose_expr( \ + sizeof(t) > sizeof(int), \ + (t) 0, \ + __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L) \ + )) +#define __SC_LONG(t, a) __SC_TYPE(t) a +#define __RAP_SC_LONG(t, a) unsigned long a +#define __SC_WRAP(t, a) (__SC_TYPE(t)) a +#define __SC_CAST(t, a) (t) a #define __SC_ARGS(t, a) a #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long)) @@ -147,7 +146,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; .flags = TRACE_EVENT_FL_CAP_ANY, \ }; \ static struct trace_event_call __used \ - __section("_ftrace_events") \ + __attribute__((section("_ftrace_events"))) \ *__event_enter_##sname = &event_enter_##sname; #define SYSCALL_TRACE_EXIT_EVENT(sname) \ @@ -163,7 +162,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; .flags = TRACE_EVENT_FL_CAP_ANY, \ }; \ static struct trace_event_call __used \ - __section("_ftrace_events") \ + __attribute__((section("_ftrace_events"))) \ *__event_exit_##sname = &event_exit_##sname; #define SYSCALL_METADATA(sname, nb, ...) \ @@ -187,31 +186,26 @@ extern struct trace_event_functions exit_syscall_print_funcs; .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ }; \ static struct syscall_metadata __used \ - __section("__syscalls_metadata") \ + __attribute__((section("__syscalls_metadata"))) \ *__p_syscall_meta_##sname = &__syscall_meta_##sname; - -static inline int is_syscall_trace_event(struct trace_event_call *tp_event) -{ - return tp_event->class == &event_class_syscall_enter || - tp_event->class == &event_class_syscall_exit; -} - #else #define SYSCALL_METADATA(sname, nb, ...) - -static inline int is_syscall_trace_event(struct trace_event_call *tp_event) -{ - return 0; -} #endif -#ifndef SYSCALL_DEFINE0 +#ifdef CONFIG_PAX_RAP +#define RAP_SYSCALL_DEFINE0(sname) \ + asmlinkage long rap_sys_##sname(unsigned long a, unsigned long b, unsigned long c, unsigned long d, unsigned long e, unsigned long f)\ + { \ + return sys_##sname(); \ + } +#else +#define RAP_SYSCALL_DEFINE0(sname) +#endif + #define SYSCALL_DEFINE0(sname) \ SYSCALL_METADATA(_##sname, 0); \ - asmlinkage long sys_##sname(void); \ - ALLOW_ERROR_INJECTION(sys_##sname, ERRNO); \ + RAP_SYSCALL_DEFINE0(sname) \ asmlinkage long sys_##sname(void) -#endif /* SYSCALL_DEFINE0 */ #define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) #define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) @@ -220,135 +214,258 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event) #define SYSCALL_DEFINE5(name, ...) SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) #define SYSCALL_DEFINE6(name, ...) SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) -#define SYSCALL_DEFINE_MAXARGS 6 - #define SYSCALL_DEFINEx(x, sname, ...) \ SYSCALL_METADATA(sname, x, __VA_ARGS__) \ __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) #define __PROTECT(...) asmlinkage_protect(__VA_ARGS__) -/* - * The asmlinkage stub is aliased to a function named __se_sys_*() which - * sign-extends 32-bit ints to longs whenever needed. The actual work is - * done within __do_sys_*(). - */ -#ifndef __SYSCALL_DEFINEx -#define __SYSCALL_DEFINEx(x, name, ...) \ - __diag_push(); \ - __diag_ignore(GCC, 8, "-Wattribute-alias", \ - "Type aliasing is used to sanitize syscall arguments");\ - asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ - __attribute__((alias(__stringify(__se_sys##name)))); \ - ALLOW_ERROR_INJECTION(sys##name, ERRNO); \ - static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ - asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ - asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ +#ifdef CONFIG_PAX_RAP +#define __RAP_SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage __intentional_overflow(-1) \ + long rap_sys##name(__RAP_MAP(x,__RAP_SC_LONG,__VA_ARGS__)) \ { \ - long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\ + return sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + } +#else +#define __RAP_SYSCALL_DEFINEx(x, name, ...) +#endif + +#define __SYSCALL_DEFINEx(x, name, ...) \ + static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + static inline asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ __MAP(x,__SC_TEST,__VA_ARGS__); \ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ return ret; \ } \ - __diag_pop(); \ - static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) -#endif /* __SYSCALL_DEFINEx */ + asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ + { \ + return SyS##name(__MAP(x,__SC_WRAP,__VA_ARGS__)); \ + } \ + __RAP_SYSCALL_DEFINEx(x,name,__VA_ARGS__) \ + static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) -/* For split 64-bit arguments on 32-bit architectures */ -#ifdef __LITTLE_ENDIAN -#define SC_ARG64(name) u32, name##_lo, u32, name##_hi -#else -#define SC_ARG64(name) u32, name##_hi, u32, name##_lo -#endif -#define SC_VAL64(type, name) ((type) name##_hi << 32 | name##_lo) +asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, + qid_t id, void __user *addr); +asmlinkage long sys_time(time_t __user *tloc); +asmlinkage long sys_stime(time_t __user *tptr); +asmlinkage long sys_gettimeofday(struct timeval __user *tv, + struct timezone __user *tz); +asmlinkage long sys_settimeofday(struct timeval __user *tv, + struct timezone __user *tz); +asmlinkage long sys_adjtimex(struct timex __user *txc_p); -#ifdef CONFIG_COMPAT -#define SYSCALL32_DEFINE1 COMPAT_SYSCALL_DEFINE1 -#define SYSCALL32_DEFINE2 COMPAT_SYSCALL_DEFINE2 -#define SYSCALL32_DEFINE3 COMPAT_SYSCALL_DEFINE3 -#define SYSCALL32_DEFINE4 COMPAT_SYSCALL_DEFINE4 -#define SYSCALL32_DEFINE5 COMPAT_SYSCALL_DEFINE5 -#define SYSCALL32_DEFINE6 COMPAT_SYSCALL_DEFINE6 -#else -#define SYSCALL32_DEFINE1 SYSCALL_DEFINE1 -#define SYSCALL32_DEFINE2 SYSCALL_DEFINE2 -#define SYSCALL32_DEFINE3 SYSCALL_DEFINE3 -#define SYSCALL32_DEFINE4 SYSCALL_DEFINE4 -#define SYSCALL32_DEFINE5 SYSCALL_DEFINE5 -#define SYSCALL32_DEFINE6 SYSCALL_DEFINE6 +asmlinkage long sys_times(struct tms __user *tbuf); + +asmlinkage long sys_gettid(void); +asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp); +asmlinkage long sys_alarm(unsigned int seconds); +asmlinkage long sys_getpid(void); +asmlinkage long sys_getppid(void); +asmlinkage long sys_getuid(void); +asmlinkage long sys_geteuid(void); +asmlinkage long sys_getgid(void); +asmlinkage long sys_getegid(void); +asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid); +asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid); +asmlinkage long sys_getpgid(pid_t pid); +asmlinkage long sys_getpgrp(void); +asmlinkage long sys_getsid(pid_t pid); +asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist); + +asmlinkage long sys_setregid(gid_t rgid, gid_t egid); +asmlinkage long sys_setgid(gid_t gid); +asmlinkage long sys_setreuid(uid_t ruid, uid_t euid); +asmlinkage long sys_setuid(uid_t uid); +asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); +asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); +asmlinkage long sys_setfsuid(uid_t uid); +asmlinkage long sys_setfsgid(gid_t gid); +asmlinkage long sys_setpgid(pid_t pid, pid_t pgid); +asmlinkage long sys_setsid(void); +asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist); + +asmlinkage long sys_acct(const char __user *name); +asmlinkage long sys_capget(cap_user_header_t header, + cap_user_data_t dataptr); +asmlinkage long sys_capset(cap_user_header_t header, + const cap_user_data_t data); +asmlinkage long sys_personality(unsigned int personality); + +asmlinkage long sys_sigpending(old_sigset_t __user *set); +asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set, + old_sigset_t __user *oset); +asmlinkage long sys_sigaltstack(const struct sigaltstack __user *uss, + struct sigaltstack __user *uoss); + +asmlinkage long sys_getitimer(int which, struct itimerval __user *value); +asmlinkage long sys_setitimer(int which, + struct itimerval __user *value, + struct itimerval __user *ovalue); +asmlinkage long sys_timer_create(clockid_t which_clock, + struct sigevent __user *timer_event_spec, + timer_t __user * created_timer_id); +asmlinkage long sys_timer_gettime(timer_t timer_id, + struct itimerspec __user *setting); +asmlinkage long sys_timer_getoverrun(timer_t timer_id); +asmlinkage long sys_timer_settime(timer_t timer_id, int flags, + const struct itimerspec __user *new_setting, + struct itimerspec __user *old_setting); +asmlinkage long sys_timer_delete(timer_t timer_id); +asmlinkage long sys_clock_settime(clockid_t which_clock, + const struct timespec __user *tp); +asmlinkage long sys_clock_gettime(clockid_t which_clock, + struct timespec __user *tp); +asmlinkage long sys_clock_adjtime(clockid_t which_clock, + struct timex __user *tx); +asmlinkage long sys_clock_getres(clockid_t which_clock, + struct timespec __user *tp); +asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags, + const struct timespec __user *rqtp, + struct timespec __user *rmtp); + +asmlinkage long sys_nice(int increment); +asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, + struct sched_param __user *param); +asmlinkage long sys_sched_setparam(pid_t pid, + struct sched_param __user *param); +asmlinkage long sys_sched_setattr(pid_t pid, + struct sched_attr __user *attr, + unsigned int flags); +asmlinkage long sys_sched_getscheduler(pid_t pid); +asmlinkage long sys_sched_getparam(pid_t pid, + struct sched_param __user *param); +asmlinkage long sys_sched_getattr(pid_t pid, + struct sched_attr __user *attr, + unsigned int size, + unsigned int flags); +asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, + unsigned long __user *user_mask_ptr); +asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, + unsigned long __user *user_mask_ptr); +asmlinkage long sys_sched_yield(void); +asmlinkage long sys_sched_get_priority_max(int policy); +asmlinkage long sys_sched_get_priority_min(int policy); +asmlinkage long sys_sched_rr_get_interval(pid_t pid, + struct timespec __user *interval); +asmlinkage long sys_setpriority(int which, int who, int niceval); +asmlinkage long sys_getpriority(int which, int who); + +asmlinkage long sys_shutdown(int, int); +asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, + void __user *arg); +asmlinkage long sys_restart_syscall(void); +asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, + struct kexec_segment __user *segments, + unsigned long flags); +asmlinkage long sys_kexec_file_load(int kernel_fd, int initrd_fd, + unsigned long cmdline_len, + const char __user *cmdline_ptr, + unsigned long flags); + +asmlinkage long sys_exit(int error_code); +asmlinkage long sys_exit_group(int error_code); +asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, + int options, struct rusage __user *ru); +asmlinkage long sys_waitid(int which, pid_t pid, + struct siginfo __user *infop, + int options, struct rusage __user *ru); +asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options); +asmlinkage long sys_set_tid_address(int __user *tidptr); +asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, + struct timespec __user *utime, u32 __user *uaddr2, + u32 val3); + +asmlinkage long sys_init_module(void __user *umod, unsigned long len, + const char __user *uargs); +asmlinkage long sys_delete_module(const char __user *name_user, + unsigned int flags); + +#ifdef CONFIG_OLD_SIGSUSPEND +asmlinkage long sys_sigsuspend(old_sigset_t mask); #endif -/* - * Called before coming back to user-mode. Returning to user-mode with an - * address limit different than USER_DS can allow to overwrite kernel memory. - */ -static inline void addr_limit_user_check(void) -{ -#ifdef TIF_FSCHECK - if (!test_thread_flag(TIF_FSCHECK)) - return; +#ifdef CONFIG_OLD_SIGSUSPEND3 +asmlinkage long sys_sigsuspend(int unused1, int unused2, old_sigset_t mask); #endif - if (CHECK_DATA_CORRUPTION(uaccess_kernel(), - "Invalid address limit on user-mode return")) - force_sig(SIGKILL); +asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize); -#ifdef TIF_FSCHECK - clear_thread_flag(TIF_FSCHECK); +#ifdef CONFIG_OLD_SIGACTION +asmlinkage long sys_sigaction(int, const struct old_sigaction __user *, + struct old_sigaction __user *); #endif -} -/* - * These syscall function prototypes are kept in the same order as - * include/uapi/asm-generic/unistd.h. Architecture specific entries go below, - * followed by deprecated or obsolete system calls. - * - * Please note that these prototypes here are only provided for information - * purposes, for static analysis, and for linking from the syscall table. - * These functions should not be called elsewhere from kernel code. - * - * As the syscall calling convention may be different from the default - * for architectures overriding the syscall calling convention, do not - * include the prototypes if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. - */ -#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER -asmlinkage long sys_io_setup(unsigned nr_reqs, aio_context_t __user *ctx); -asmlinkage long sys_io_destroy(aio_context_t ctx); -asmlinkage long sys_io_submit(aio_context_t, long, - struct iocb __user * __user *); -asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, - struct io_event __user *result); -asmlinkage long sys_io_getevents(aio_context_t ctx_id, - long min_nr, - long nr, - struct io_event __user *events, - struct __kernel_timespec __user *timeout); -asmlinkage long sys_io_getevents_time32(__u32 ctx_id, - __s32 min_nr, - __s32 nr, - struct io_event __user *events, - struct old_timespec32 __user *timeout); -asmlinkage long sys_io_pgetevents(aio_context_t ctx_id, - long min_nr, - long nr, - struct io_event __user *events, - struct __kernel_timespec __user *timeout, - const struct __aio_sigset *sig); -asmlinkage long sys_io_pgetevents_time32(aio_context_t ctx_id, - long min_nr, - long nr, - struct io_event __user *events, - struct old_timespec32 __user *timeout, - const struct __aio_sigset *sig); -asmlinkage long sys_io_uring_setup(u32 entries, - struct io_uring_params __user *p); -asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit, - u32 min_complete, u32 flags, - const void __user *argp, size_t argsz); -asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op, - void __user *arg, unsigned int nr_args); +#ifndef CONFIG_ODD_RT_SIGACTION +asmlinkage long sys_rt_sigaction(int, + const struct sigaction __user *, + struct sigaction __user *, + size_t); +#endif +asmlinkage long sys_rt_sigprocmask(int how, sigset_t __user *set, + sigset_t __user *oset, size_t sigsetsize); +asmlinkage long sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize); +asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, + siginfo_t __user *uinfo, + const struct timespec __user *uts, + size_t sigsetsize); +asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, + siginfo_t __user *uinfo); +asmlinkage long sys_kill(pid_t pid, int sig); +asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig); +asmlinkage long sys_tkill(pid_t pid, int sig); +asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo); +asmlinkage long sys_sgetmask(void); +asmlinkage long sys_ssetmask(int newmask); +asmlinkage long sys_signal(int sig, __sighandler_t handler); +asmlinkage long sys_pause(void); + +asmlinkage long sys_sync(void); +asmlinkage long sys_fsync(unsigned int fd); +asmlinkage long sys_fdatasync(unsigned int fd); +asmlinkage long sys_bdflush(int func, long data); +asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name, + const char __user *type, unsigned long flags, + void __user *data); +asmlinkage long sys_umount(const char __user *name, int flags); +asmlinkage long sys_oldumount(const char __user *name); +asmlinkage long sys_truncate(const char __user *path, long length); +asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length); +asmlinkage long sys_stat(const char __user *filename, + struct __old_kernel_stat __user *statbuf); +asmlinkage long sys_statfs(const char __user * path, + struct statfs __user *buf); +asmlinkage long sys_statfs64(const char __user *path, size_t sz, + struct statfs64 __user *buf); +asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf); +asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz, + struct statfs64 __user *buf); +asmlinkage long sys_lstat(const char __user *filename, + struct __old_kernel_stat __user *statbuf); +asmlinkage long sys_fstat(unsigned int fd, + struct __old_kernel_stat __user *statbuf); +asmlinkage long sys_newstat(const char __user *filename, + struct stat __user *statbuf); +asmlinkage long sys_newlstat(const char __user *filename, + struct stat __user *statbuf); +asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf); +asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf); +#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) +asmlinkage long sys_stat64(const char __user *filename, + struct stat64 __user *statbuf); +asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf); +asmlinkage long sys_lstat64(const char __user *filename, + struct stat64 __user *statbuf); +asmlinkage long sys_fstatat64(int dfd, const char __user *filename, + struct stat64 __user *statbuf, int flag); +#endif +#if BITS_PER_LONG == 32 +asmlinkage long sys_truncate64(const char __user *path, loff_t length); +asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length); +#endif -/* fs/xattr.c */ asmlinkage long sys_setxattr(const char __user *path, const char __user *name, const void __user *value, size_t size, int flags); asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name, @@ -372,836 +489,83 @@ asmlinkage long sys_lremovexattr(const char __user *path, const char __user *name); asmlinkage long sys_fremovexattr(int fd, const char __user *name); -/* fs/dcache.c */ -asmlinkage long sys_getcwd(char __user *buf, unsigned long size); +asmlinkage long sys_brk(unsigned long brk); +asmlinkage long sys_mprotect(unsigned long start, size_t len, + unsigned long prot); +asmlinkage long sys_mremap(unsigned long addr, + unsigned long old_len, unsigned long new_len, + unsigned long flags, unsigned long new_addr); +asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, + unsigned long prot, unsigned long pgoff, + unsigned long flags); +asmlinkage long sys_msync(unsigned long start, size_t len, int flags); +asmlinkage long sys_fadvise64(int fd, loff_t offset, loff_t len, int advice); +asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice); +asmlinkage long sys_munmap(unsigned long addr, size_t len); +asmlinkage long sys_mlock(unsigned long start, size_t len); +asmlinkage long sys_munlock(unsigned long start, size_t len); +asmlinkage long sys_mlockall(int flags); +asmlinkage long sys_munlockall(void); +asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); +asmlinkage long sys_mincore(unsigned long start, size_t len, + unsigned char __user * vec); -/* fs/cookies.c */ -asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len); +asmlinkage long sys_pivot_root(const char __user *new_root, + const char __user *put_old); +asmlinkage long sys_chroot(const char __user *filename); +asmlinkage long sys_mknod(const char __user *filename, umode_t mode, + unsigned dev); +asmlinkage long sys_link(const char __user *oldname, + const char __user *newname); +asmlinkage long sys_symlink(const char __user *old, const char __user *new); +asmlinkage long sys_unlink(const char __user *pathname); +asmlinkage long sys_rename(const char __user *oldname, + const char __user *newname); +asmlinkage long sys_chmod(const char __user *filename, umode_t mode); +asmlinkage long sys_fchmod(unsigned int fd, umode_t mode); -/* fs/eventfd.c */ -asmlinkage long sys_eventfd2(unsigned int count, int flags); - -/* fs/eventpoll.c */ -asmlinkage long sys_epoll_create1(int flags); -asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, - struct epoll_event __user *event); -asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, - int maxevents, int timeout, - const sigset_t __user *sigmask, - size_t sigsetsize); -asmlinkage long sys_epoll_pwait2(int epfd, struct epoll_event __user *events, - int maxevents, - const struct __kernel_timespec __user *timeout, - const sigset_t __user *sigmask, - size_t sigsetsize); - -/* fs/fcntl.c */ -asmlinkage long sys_dup(unsigned int fildes); -asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags); asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg); #if BITS_PER_LONG == 32 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg); #endif - -/* fs/inotify_user.c */ -asmlinkage long sys_inotify_init1(int flags); -asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, - u32 mask); -asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd); - -/* fs/ioctl.c */ +asmlinkage long sys_pipe(int __user *fildes); +asmlinkage long sys_pipe2(int __user *fildes, int flags); +asmlinkage long sys_dup(unsigned int fildes); +asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); +asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags); +asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); - -/* fs/ioprio.c */ -asmlinkage long sys_ioprio_set(int which, int who, int ioprio); -asmlinkage long sys_ioprio_get(int which, int who); - -/* fs/locks.c */ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd); - -/* fs/namei.c */ -asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode, - unsigned dev); -asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode); -asmlinkage long sys_unlinkat(int dfd, const char __user * pathname, int flag); -asmlinkage long sys_symlinkat(const char __user * oldname, - int newdfd, const char __user * newname); -asmlinkage long sys_linkat(int olddfd, const char __user *oldname, - int newdfd, const char __user *newname, int flags); -asmlinkage long sys_renameat(int olddfd, const char __user * oldname, - int newdfd, const char __user * newname); - -/* fs/namespace.c */ -asmlinkage long sys_umount(char __user *name, int flags); -asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name, - char __user *type, unsigned long flags, - void __user *data); -asmlinkage long sys_pivot_root(const char __user *new_root, - const char __user *put_old); - -/* fs/nfsctl.c */ - -/* fs/open.c */ -asmlinkage long sys_statfs(const char __user * path, - struct statfs __user *buf); -asmlinkage long sys_statfs64(const char __user *path, size_t sz, - struct statfs64 __user *buf); -asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf); -asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz, - struct statfs64 __user *buf); -asmlinkage long sys_truncate(const char __user *path, long length); -asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length); -#if BITS_PER_LONG == 32 -asmlinkage long sys_truncate64(const char __user *path, loff_t length); -asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length); -#endif -asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); -asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode); -asmlinkage long sys_faccessat2(int dfd, const char __user *filename, int mode, - int flags); -asmlinkage long sys_chdir(const char __user *filename); -asmlinkage long sys_fchdir(unsigned int fd); -asmlinkage long sys_chroot(const char __user *filename); -asmlinkage long sys_fchmod(unsigned int fd, umode_t mode); -asmlinkage long sys_fchmodat(int dfd, const char __user * filename, - umode_t mode); -asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user, - gid_t group, int flag); -asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); -asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, - umode_t mode); -asmlinkage long sys_openat2(int dfd, const char __user *filename, - struct open_how *how, size_t size); -asmlinkage long sys_close(unsigned int fd); -asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd, - unsigned int flags); -asmlinkage long sys_vhangup(void); - -/* fs/pipe.c */ -asmlinkage long sys_pipe2(int __user *fildes, int flags); - -/* fs/quota.c */ -asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, - qid_t id, void __user *addr); -asmlinkage long sys_quotactl_fd(unsigned int fd, unsigned int cmd, qid_t id, - void __user *addr); - -/* fs/readdir.c */ -asmlinkage long sys_getdents64(unsigned int fd, - struct linux_dirent64 __user *dirent, - unsigned int count); - -/* fs/read_write.c */ -asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high, - unsigned long offset_low, loff_t __user *result, - unsigned int whence); -asmlinkage long sys_lseek(unsigned int fd, off_t offset, - unsigned int whence); -asmlinkage long sys_read(unsigned int fd, char __user *buf, size_t count); -asmlinkage long sys_write(unsigned int fd, const char __user *buf, - size_t count); -asmlinkage long sys_readv(unsigned long fd, - const struct iovec __user *vec, - unsigned long vlen); -asmlinkage long sys_writev(unsigned long fd, - const struct iovec __user *vec, - unsigned long vlen); -asmlinkage long sys_pread64(unsigned int fd, char __user *buf, - size_t count, loff_t pos); -asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, - size_t count, loff_t pos); -asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, unsigned long pos_l, unsigned long pos_h); -asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, unsigned long pos_l, unsigned long pos_h); - -/* fs/sendfile.c */ -asmlinkage long sys_sendfile64(int out_fd, int in_fd, - loff_t __user *offset, size_t count); - -/* fs/select.c */ -asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *, - fd_set __user *, struct __kernel_timespec __user *, - void __user *); -asmlinkage long sys_pselect6_time32(int, fd_set __user *, fd_set __user *, - fd_set __user *, struct old_timespec32 __user *, - void __user *); -asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, - struct __kernel_timespec __user *, const sigset_t __user *, - size_t); -asmlinkage long sys_ppoll_time32(struct pollfd __user *, unsigned int, - struct old_timespec32 __user *, const sigset_t __user *, - size_t); - -/* fs/signalfd.c */ -asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags); - -/* fs/splice.c */ -asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov, - unsigned long nr_segs, unsigned int flags); -asmlinkage long sys_splice(int fd_in, loff_t __user *off_in, - int fd_out, loff_t __user *off_out, - size_t len, unsigned int flags); -asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags); - -/* fs/stat.c */ -asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf, - int bufsiz); -asmlinkage long sys_newfstatat(int dfd, const char __user *filename, - struct stat __user *statbuf, int flag); -asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf); -#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) -asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf); -asmlinkage long sys_fstatat64(int dfd, const char __user *filename, - struct stat64 __user *statbuf, int flag); -#endif - -/* fs/sync.c */ -asmlinkage long sys_sync(void); -asmlinkage long sys_fsync(unsigned int fd); -asmlinkage long sys_fdatasync(unsigned int fd); -asmlinkage long sys_sync_file_range2(int fd, unsigned int flags, - loff_t offset, loff_t nbytes); -asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, - unsigned int flags); - -/* fs/timerfd.c */ -asmlinkage long sys_timerfd_create(int clockid, int flags); -asmlinkage long sys_timerfd_settime(int ufd, int flags, - const struct __kernel_itimerspec __user *utmr, - struct __kernel_itimerspec __user *otmr); -asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr); -asmlinkage long sys_timerfd_gettime32(int ufd, - struct old_itimerspec32 __user *otmr); -asmlinkage long sys_timerfd_settime32(int ufd, int flags, - const struct old_itimerspec32 __user *utmr, - struct old_itimerspec32 __user *otmr); - -/* fs/utimes.c */ -asmlinkage long sys_utimensat(int dfd, const char __user *filename, - struct __kernel_timespec __user *utimes, - int flags); -asmlinkage long sys_utimensat_time32(unsigned int dfd, - const char __user *filename, - struct old_timespec32 __user *t, int flags); - -/* kernel/acct.c */ -asmlinkage long sys_acct(const char __user *name); - -/* kernel/capability.c */ -asmlinkage long sys_capget(cap_user_header_t header, - cap_user_data_t dataptr); -asmlinkage long sys_capset(cap_user_header_t header, - const cap_user_data_t data); - -/* kernel/exec_domain.c */ -asmlinkage long sys_personality(unsigned int personality); - -/* kernel/exit.c */ -asmlinkage long sys_exit(int error_code); -asmlinkage long sys_exit_group(int error_code); -asmlinkage long sys_waitid(int which, pid_t pid, - struct siginfo __user *infop, - int options, struct rusage __user *ru); - -/* kernel/fork.c */ -asmlinkage long sys_set_tid_address(int __user *tidptr); -asmlinkage long sys_unshare(unsigned long unshare_flags); - -/* kernel/futex.c */ -asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, - const struct __kernel_timespec __user *utime, - u32 __user *uaddr2, u32 val3); -asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val, - const struct old_timespec32 __user *utime, - u32 __user *uaddr2, u32 val3); -asmlinkage long sys_get_robust_list(int pid, - struct robust_list_head __user * __user *head_ptr, - size_t __user *len_ptr); -asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, - size_t len); - -/* kernel/hrtimer.c */ -asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp, - struct __kernel_timespec __user *rmtp); -asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp, - struct old_timespec32 __user *rmtp); - -/* kernel/itimer.c */ -asmlinkage long sys_getitimer(int which, struct __kernel_old_itimerval __user *value); -asmlinkage long sys_setitimer(int which, - struct __kernel_old_itimerval __user *value, - struct __kernel_old_itimerval __user *ovalue); - -/* kernel/kexec.c */ -asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, - struct kexec_segment __user *segments, - unsigned long flags); - -/* kernel/module.c */ -asmlinkage long sys_init_module(void __user *umod, unsigned long len, - const char __user *uargs); -asmlinkage long sys_delete_module(const char __user *name_user, - unsigned int flags); - -/* kernel/posix-timers.c */ -asmlinkage long sys_timer_create(clockid_t which_clock, - struct sigevent __user *timer_event_spec, - timer_t __user * created_timer_id); -asmlinkage long sys_timer_gettime(timer_t timer_id, - struct __kernel_itimerspec __user *setting); -asmlinkage long sys_timer_getoverrun(timer_t timer_id); -asmlinkage long sys_timer_settime(timer_t timer_id, int flags, - const struct __kernel_itimerspec __user *new_setting, - struct __kernel_itimerspec __user *old_setting); -asmlinkage long sys_timer_delete(timer_t timer_id); -asmlinkage long sys_clock_settime(clockid_t which_clock, - const struct __kernel_timespec __user *tp); -asmlinkage long sys_clock_gettime(clockid_t which_clock, - struct __kernel_timespec __user *tp); -asmlinkage long sys_clock_getres(clockid_t which_clock, - struct __kernel_timespec __user *tp); -asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags, - const struct __kernel_timespec __user *rqtp, - struct __kernel_timespec __user *rmtp); -asmlinkage long sys_timer_gettime32(timer_t timer_id, - struct old_itimerspec32 __user *setting); -asmlinkage long sys_timer_settime32(timer_t timer_id, int flags, - struct old_itimerspec32 __user *new, - struct old_itimerspec32 __user *old); -asmlinkage long sys_clock_settime32(clockid_t which_clock, - struct old_timespec32 __user *tp); -asmlinkage long sys_clock_gettime32(clockid_t which_clock, - struct old_timespec32 __user *tp); -asmlinkage long sys_clock_getres_time32(clockid_t which_clock, - struct old_timespec32 __user *tp); -asmlinkage long sys_clock_nanosleep_time32(clockid_t which_clock, int flags, - struct old_timespec32 __user *rqtp, - struct old_timespec32 __user *rmtp); - -/* kernel/printk.c */ -asmlinkage long sys_syslog(int type, char __user *buf, int len); - -/* kernel/ptrace.c */ -asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, - unsigned long data); -/* kernel/sched/core.c */ - -asmlinkage long sys_sched_setparam(pid_t pid, - struct sched_param __user *param); -asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, - struct sched_param __user *param); -asmlinkage long sys_sched_getscheduler(pid_t pid); -asmlinkage long sys_sched_getparam(pid_t pid, - struct sched_param __user *param); -asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, - unsigned long __user *user_mask_ptr); -asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, - unsigned long __user *user_mask_ptr); -asmlinkage long sys_sched_yield(void); -asmlinkage long sys_sched_get_priority_max(int policy); -asmlinkage long sys_sched_get_priority_min(int policy); -asmlinkage long sys_sched_rr_get_interval(pid_t pid, - struct __kernel_timespec __user *interval); -asmlinkage long sys_sched_rr_get_interval_time32(pid_t pid, - struct old_timespec32 __user *interval); - -/* kernel/signal.c */ -asmlinkage long sys_restart_syscall(void); -asmlinkage long sys_kill(pid_t pid, int sig); -asmlinkage long sys_tkill(pid_t pid, int sig); -asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig); -asmlinkage long sys_sigaltstack(const struct sigaltstack __user *uss, - struct sigaltstack __user *uoss); -asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize); -#ifndef CONFIG_ODD_RT_SIGACTION -asmlinkage long sys_rt_sigaction(int, - const struct sigaction __user *, - struct sigaction __user *, - size_t); -#endif -asmlinkage long sys_rt_sigprocmask(int how, sigset_t __user *set, - sigset_t __user *oset, size_t sigsetsize); -asmlinkage long sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize); -asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, - siginfo_t __user *uinfo, - const struct __kernel_timespec __user *uts, - size_t sigsetsize); -asmlinkage long sys_rt_sigtimedwait_time32(const sigset_t __user *uthese, - siginfo_t __user *uinfo, - const struct old_timespec32 __user *uts, - size_t sigsetsize); -asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo); - -/* kernel/sys.c */ -asmlinkage long sys_setpriority(int which, int who, int niceval); -asmlinkage long sys_getpriority(int which, int who); -asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, - void __user *arg); -asmlinkage long sys_setregid(gid_t rgid, gid_t egid); -asmlinkage long sys_setgid(gid_t gid); -asmlinkage long sys_setreuid(uid_t ruid, uid_t euid); -asmlinkage long sys_setuid(uid_t uid); -asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); -asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid); -asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); -asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid); -asmlinkage long sys_setfsuid(uid_t uid); -asmlinkage long sys_setfsgid(gid_t gid); -asmlinkage long sys_times(struct tms __user *tbuf); -asmlinkage long sys_setpgid(pid_t pid, pid_t pgid); -asmlinkage long sys_getpgid(pid_t pid); -asmlinkage long sys_getsid(pid_t pid); -asmlinkage long sys_setsid(void); -asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist); -asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist); -asmlinkage long sys_newuname(struct new_utsname __user *name); -asmlinkage long sys_sethostname(char __user *name, int len); -asmlinkage long sys_setdomainname(char __user *name, int len); -asmlinkage long sys_getrlimit(unsigned int resource, - struct rlimit __user *rlim); -asmlinkage long sys_setrlimit(unsigned int resource, - struct rlimit __user *rlim); -asmlinkage long sys_getrusage(int who, struct rusage __user *ru); -asmlinkage long sys_umask(int mask); -asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5); -asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); - -/* kernel/time.c */ -asmlinkage long sys_gettimeofday(struct __kernel_old_timeval __user *tv, - struct timezone __user *tz); -asmlinkage long sys_settimeofday(struct __kernel_old_timeval __user *tv, - struct timezone __user *tz); -asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p); -asmlinkage long sys_adjtimex_time32(struct old_timex32 __user *txc_p); - -/* kernel/sys.c */ -asmlinkage long sys_getpid(void); -asmlinkage long sys_getppid(void); -asmlinkage long sys_getuid(void); -asmlinkage long sys_geteuid(void); -asmlinkage long sys_getgid(void); -asmlinkage long sys_getegid(void); -asmlinkage long sys_gettid(void); -asmlinkage long sys_sysinfo(struct sysinfo __user *info); - -/* ipc/mqueue.c */ -asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr); -asmlinkage long sys_mq_unlink(const char __user *name); -asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct __kernel_timespec __user *abs_timeout); -asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct __kernel_timespec __user *abs_timeout); -asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification); -asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat); -asmlinkage long sys_mq_timedreceive_time32(mqd_t mqdes, - char __user *u_msg_ptr, - unsigned int msg_len, unsigned int __user *u_msg_prio, - const struct old_timespec32 __user *u_abs_timeout); -asmlinkage long sys_mq_timedsend_time32(mqd_t mqdes, - const char __user *u_msg_ptr, - unsigned int msg_len, unsigned int msg_prio, - const struct old_timespec32 __user *u_abs_timeout); - -/* ipc/msg.c */ -asmlinkage long sys_msgget(key_t key, int msgflg); -asmlinkage long sys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); -asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); -asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, - size_t msgsz, long msgtyp, int msgflg); -asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp, - size_t msgsz, int msgflg); - -/* ipc/sem.c */ -asmlinkage long sys_semget(key_t key, int nsems, int semflg); -asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg); -asmlinkage long sys_old_semctl(int semid, int semnum, int cmd, unsigned long arg); -asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops, - unsigned nsops, - const struct __kernel_timespec __user *timeout); -asmlinkage long sys_semtimedop_time32(int semid, struct sembuf __user *sops, - unsigned nsops, - const struct old_timespec32 __user *timeout); -asmlinkage long sys_semop(int semid, struct sembuf __user *sops, - unsigned nsops); - -/* ipc/shm.c */ -asmlinkage long sys_shmget(key_t key, size_t size, int flag); -asmlinkage long sys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); -asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); -asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); -asmlinkage long sys_shmdt(char __user *shmaddr); - -/* net/socket.c */ -asmlinkage long sys_socket(int, int, int); -asmlinkage long sys_socketpair(int, int, int, int __user *); -asmlinkage long sys_bind(int, struct sockaddr __user *, int); -asmlinkage long sys_listen(int, int); -asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *); -asmlinkage long sys_connect(int, struct sockaddr __user *, int); -asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *); -asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); -asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, - struct sockaddr __user *, int); -asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, - struct sockaddr __user *, int __user *); -asmlinkage long sys_setsockopt(int fd, int level, int optname, - char __user *optval, int optlen); -asmlinkage long sys_getsockopt(int fd, int level, int optname, - char __user *optval, int __user *optlen); -asmlinkage long sys_shutdown(int, int); -asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); -asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); - -/* mm/filemap.c */ -asmlinkage long sys_readahead(int fd, loff_t offset, size_t count); - -/* mm/nommu.c, also with MMU */ -asmlinkage long sys_brk(unsigned long brk); -asmlinkage long sys_munmap(unsigned long addr, size_t len); -asmlinkage long sys_mremap(unsigned long addr, - unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long new_addr); - -/* security/keys/keyctl.c */ -asmlinkage long sys_add_key(const char __user *_type, - const char __user *_description, - const void __user *_payload, - size_t plen, - key_serial_t destringid); -asmlinkage long sys_request_key(const char __user *_type, - const char __user *_description, - const char __user *_callout_info, - key_serial_t destringid); -asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5); - -/* arch/example/kernel/sys_example.c */ -#ifdef CONFIG_CLONE_BACKWARDS -asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, unsigned long, - int __user *); -#else -#ifdef CONFIG_CLONE_BACKWARDS3 -asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *, - int __user *, unsigned long); -#else -asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, - int __user *, unsigned long); -#endif -#endif - -asmlinkage long sys_clone3(struct clone_args __user *uargs, size_t size); - -asmlinkage long sys_execve(const char __user *filename, - const char __user *const __user *argv, - const char __user *const __user *envp); - -/* mm/fadvise.c */ -asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice); - -/* mm/, CONFIG_MMU only */ -asmlinkage long sys_swapon(const char __user *specialfile, int swap_flags); -asmlinkage long sys_swapoff(const char __user *specialfile); -asmlinkage long sys_mprotect(unsigned long start, size_t len, - unsigned long prot); -asmlinkage long sys_msync(unsigned long start, size_t len, int flags); -asmlinkage long sys_mlock(unsigned long start, size_t len); -asmlinkage long sys_munlock(unsigned long start, size_t len); -asmlinkage long sys_mlockall(int flags); -asmlinkage long sys_munlockall(void); -asmlinkage long sys_mincore(unsigned long start, size_t len, - unsigned char __user * vec); -asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); -asmlinkage long sys_process_madvise(int pidfd, const struct iovec __user *vec, - size_t vlen, int behavior, unsigned int flags); -asmlinkage long sys_process_mrelease(int pidfd, unsigned int flags); -asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, - unsigned long prot, unsigned long pgoff, - unsigned long flags); -asmlinkage long sys_mbind(unsigned long start, unsigned long len, - unsigned long mode, - const unsigned long __user *nmask, - unsigned long maxnode, - unsigned flags); -asmlinkage long sys_get_mempolicy(int __user *policy, - unsigned long __user *nmask, - unsigned long maxnode, - unsigned long addr, unsigned long flags); -asmlinkage long sys_set_mempolicy(int mode, const unsigned long __user *nmask, - unsigned long maxnode); -asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, - const unsigned long __user *from, - const unsigned long __user *to); -asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, - const void __user * __user *pages, - const int __user *nodes, - int __user *status, - int flags); - -asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, - siginfo_t __user *uinfo); -asmlinkage long sys_perf_event_open( - struct perf_event_attr __user *attr_uptr, - pid_t pid, int cpu, int group_fd, unsigned long flags); -asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int); -asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, - unsigned int vlen, unsigned flags, - struct __kernel_timespec __user *timeout); -asmlinkage long sys_recvmmsg_time32(int fd, struct mmsghdr __user *msg, - unsigned int vlen, unsigned flags, - struct old_timespec32 __user *timeout); - -asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, - int options, struct rusage __user *ru); -asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource, - const struct rlimit64 __user *new_rlim, - struct rlimit64 __user *old_rlim); -asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags); -asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, - u64 mask, int fd, - const char __user *pathname); -asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name, - struct file_handle __user *handle, - int __user *mnt_id, int flag); -asmlinkage long sys_open_by_handle_at(int mountdirfd, - struct file_handle __user *handle, - int flags); -asmlinkage long sys_clock_adjtime(clockid_t which_clock, - struct __kernel_timex __user *tx); -asmlinkage long sys_clock_adjtime32(clockid_t which_clock, - struct old_timex32 __user *tx); -asmlinkage long sys_syncfs(int fd); -asmlinkage long sys_setns(int fd, int nstype); -asmlinkage long sys_pidfd_open(pid_t pid, unsigned int flags); -asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, - unsigned int vlen, unsigned flags); -asmlinkage long sys_process_vm_readv(pid_t pid, - const struct iovec __user *lvec, - unsigned long liovcnt, - const struct iovec __user *rvec, - unsigned long riovcnt, - unsigned long flags); -asmlinkage long sys_process_vm_writev(pid_t pid, - const struct iovec __user *lvec, - unsigned long liovcnt, - const struct iovec __user *rvec, - unsigned long riovcnt, - unsigned long flags); -asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type, - unsigned long idx1, unsigned long idx2); -asmlinkage long sys_finit_module(int fd, const char __user *uargs, int flags); -asmlinkage long sys_sched_setattr(pid_t pid, - struct sched_attr __user *attr, - unsigned int flags); -asmlinkage long sys_sched_getattr(pid_t pid, - struct sched_attr __user *attr, - unsigned int size, - unsigned int flags); -asmlinkage long sys_renameat2(int olddfd, const char __user *oldname, - int newdfd, const char __user *newname, - unsigned int flags); -asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, - void __user *uargs); -asmlinkage long sys_getrandom(char __user *buf, size_t count, - unsigned int flags); -asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags); -asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); -asmlinkage long sys_execveat(int dfd, const char __user *filename, - const char __user *const __user *argv, - const char __user *const __user *envp, int flags); -asmlinkage long sys_userfaultfd(int flags); -asmlinkage long sys_membarrier(int cmd, unsigned int flags, int cpu_id); -asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); -asmlinkage long sys_copy_file_range(int fd_in, loff_t __user *off_in, - int fd_out, loff_t __user *off_out, - size_t len, unsigned int flags); -asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, unsigned long pos_l, unsigned long pos_h, - rwf_t flags); -asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, unsigned long pos_l, unsigned long pos_h, - rwf_t flags); -asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len, - unsigned long prot, int pkey); -asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val); -asmlinkage long sys_pkey_free(int pkey); -asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags, - unsigned mask, struct statx __user *buffer); -asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len, - int flags, uint32_t sig); -asmlinkage long sys_open_tree(int dfd, const char __user *path, unsigned flags); -asmlinkage long sys_move_mount(int from_dfd, const char __user *from_path, - int to_dfd, const char __user *to_path, - unsigned int ms_flags); -asmlinkage long sys_mount_setattr(int dfd, const char __user *path, - unsigned int flags, - struct mount_attr __user *uattr, size_t usize); -asmlinkage long sys_fsopen(const char __user *fs_name, unsigned int flags); -asmlinkage long sys_fsconfig(int fs_fd, unsigned int cmd, const char __user *key, - const void __user *value, int aux); -asmlinkage long sys_fsmount(int fs_fd, unsigned int flags, unsigned int ms_flags); -asmlinkage long sys_fspick(int dfd, const char __user *path, unsigned int flags); -asmlinkage long sys_pidfd_send_signal(int pidfd, int sig, - siginfo_t __user *info, - unsigned int flags); -asmlinkage long sys_pidfd_getfd(int pidfd, int fd, unsigned int flags); -asmlinkage long sys_landlock_create_ruleset(const struct landlock_ruleset_attr __user *attr, - size_t size, __u32 flags); -asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type rule_type, - const void __user *rule_attr, __u32 flags); -asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags); -asmlinkage long sys_memfd_secret(unsigned int flags); - -/* - * Architecture-specific system calls - */ - -/* arch/x86/kernel/ioport.c */ -asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); - -/* pciconfig: alpha, arm, arm64, ia64, sparc */ -asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn, - unsigned long off, unsigned long len, - void __user *buf); -asmlinkage long sys_pciconfig_write(unsigned long bus, unsigned long dfn, - unsigned long off, unsigned long len, - void __user *buf); -asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn); - -/* powerpc */ -asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, - __u32 __user *ustatus); -asmlinkage long sys_spu_create(const char __user *name, - unsigned int flags, umode_t mode, int fd); - - -/* - * Deprecated system calls which are still defined in - * include/uapi/asm-generic/unistd.h and wanted by >= 1 arch - */ - -/* __ARCH_WANT_SYSCALL_NO_AT */ -asmlinkage long sys_open(const char __user *filename, - int flags, umode_t mode); -asmlinkage long sys_link(const char __user *oldname, - const char __user *newname); -asmlinkage long sys_unlink(const char __user *pathname); -asmlinkage long sys_mknod(const char __user *filename, umode_t mode, - unsigned dev); -asmlinkage long sys_chmod(const char __user *filename, umode_t mode); -asmlinkage long sys_chown(const char __user *filename, - uid_t user, gid_t group); -asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode); -asmlinkage long sys_rmdir(const char __user *pathname); -asmlinkage long sys_lchown(const char __user *filename, - uid_t user, gid_t group); -asmlinkage long sys_access(const char __user *filename, int mode); -asmlinkage long sys_rename(const char __user *oldname, - const char __user *newname); -asmlinkage long sys_symlink(const char __user *old, const char __user *new); -#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) -asmlinkage long sys_stat64(const char __user *filename, - struct stat64 __user *statbuf); -asmlinkage long sys_lstat64(const char __user *filename, - struct stat64 __user *statbuf); -#endif - -/* __ARCH_WANT_SYSCALL_NO_FLAGS */ -asmlinkage long sys_pipe(int __user *fildes); -asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); -asmlinkage long sys_epoll_create(int size); -asmlinkage long sys_inotify_init(void); -asmlinkage long sys_eventfd(unsigned int count); -asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); - -/* __ARCH_WANT_SYSCALL_OFF_T */ +asmlinkage long sys_io_setup(unsigned nr_reqs, aio_context_t __user *ctx); +asmlinkage long sys_io_destroy(aio_context_t ctx); +asmlinkage long sys_io_getevents(aio_context_t ctx_id, + long min_nr, + long nr, + struct io_event __user *events, + struct timespec __user *timeout); +asmlinkage long sys_io_submit(aio_context_t, long, + struct iocb __user * __user *); +asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, + struct io_event __user *result); asmlinkage long sys_sendfile(int out_fd, int in_fd, off_t __user *offset, size_t count); -asmlinkage long sys_newstat(const char __user *filename, - struct stat __user *statbuf); -asmlinkage long sys_newlstat(const char __user *filename, - struct stat __user *statbuf); -asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice); - -/* __ARCH_WANT_SYSCALL_DEPRECATED */ -asmlinkage long sys_alarm(unsigned int seconds); -asmlinkage long sys_getpgrp(void); -asmlinkage long sys_pause(void); -asmlinkage long sys_time(__kernel_old_time_t __user *tloc); -asmlinkage long sys_time32(old_time32_t __user *tloc); -#ifdef __ARCH_WANT_SYS_UTIME -asmlinkage long sys_utime(char __user *filename, - struct utimbuf __user *times); -asmlinkage long sys_utimes(char __user *filename, - struct __kernel_old_timeval __user *utimes); -asmlinkage long sys_futimesat(int dfd, const char __user *filename, - struct __kernel_old_timeval __user *utimes); -#endif -asmlinkage long sys_futimesat_time32(unsigned int dfd, - const char __user *filename, - struct old_timeval32 __user *t); -asmlinkage long sys_utime32(const char __user *filename, - struct old_utimbuf32 __user *t); -asmlinkage long sys_utimes_time32(const char __user *filename, - struct old_timeval32 __user *t); +asmlinkage long sys_sendfile64(int out_fd, int in_fd, + loff_t __user *offset, size_t count); +asmlinkage long sys_readlink(const char __user *path, + char __user *buf, int bufsiz); asmlinkage long sys_creat(const char __user *pathname, umode_t mode); -asmlinkage long sys_getdents(unsigned int fd, - struct linux_dirent __user *dirent, - unsigned int count); -asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, struct __kernel_old_timeval __user *tvp); -asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, - int timeout); -asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, - int maxevents, int timeout); -asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf); -asmlinkage long sys_vfork(void); -asmlinkage long sys_recv(int, void __user *, size_t, unsigned); -asmlinkage long sys_send(int, void __user *, size_t, unsigned); -asmlinkage long sys_oldumount(char __user *name); -asmlinkage long sys_uselib(const char __user *library); -asmlinkage long sys_sysfs(int option, - unsigned long arg1, unsigned long arg2); -asmlinkage long sys_fork(void); - -/* obsolete: kernel/time/time.c */ -asmlinkage long sys_stime(__kernel_old_time_t __user *tptr); -asmlinkage long sys_stime32(old_time32_t __user *tptr); - -/* obsolete: kernel/signal.c */ -asmlinkage long sys_sigpending(old_sigset_t __user *uset); -asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set, - old_sigset_t __user *oset); -#ifdef CONFIG_OLD_SIGSUSPEND -asmlinkage long sys_sigsuspend(old_sigset_t mask); -#endif - -#ifdef CONFIG_OLD_SIGSUSPEND3 -asmlinkage long sys_sigsuspend(int unused1, int unused2, old_sigset_t mask); -#endif - -#ifdef CONFIG_OLD_SIGACTION -asmlinkage long sys_sigaction(int, const struct old_sigaction __user *, - struct old_sigaction __user *); -#endif -asmlinkage long sys_sgetmask(void); -asmlinkage long sys_ssetmask(int newmask); -asmlinkage long sys_signal(int sig, __sighandler_t handler); - -/* obsolete: kernel/sched/core.c */ -asmlinkage long sys_nice(int increment); - -/* obsolete: kernel/kexec_file.c */ -asmlinkage long sys_kexec_file_load(int kernel_fd, int initrd_fd, - unsigned long cmdline_len, - const char __user *cmdline_ptr, - unsigned long flags); - -/* obsolete: kernel/exit.c */ -asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options); - -/* obsolete: kernel/uid16.c */ +asmlinkage long sys_open(const char __user *filename, + int flags, umode_t mode); +asmlinkage long sys_close(unsigned int fd); +asmlinkage long sys_access(const char __user *filename, int mode); +asmlinkage long sys_vhangup(void); +asmlinkage long sys_chown(const char __user *filename, + uid_t user, gid_t group); +asmlinkage long sys_lchown(const char __user *filename, + uid_t user, gid_t group); +asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); #ifdef CONFIG_HAVE_UID16 asmlinkage long sys_chown16(const char __user *filename, old_uid_t user, old_gid_t group); @@ -1228,157 +592,357 @@ asmlinkage long sys_getgid16(void); asmlinkage long sys_getegid16(void); #endif -/* obsolete: net/socket.c */ +asmlinkage long sys_utime(char __user *filename, + struct utimbuf __user *times); +asmlinkage long sys_utimes(char __user *filename, + struct timeval __user *utimes); +asmlinkage long sys_lseek(unsigned int fd, off_t offset, + unsigned int whence); +asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high, + unsigned long offset_low, loff_t __user *result, + unsigned int whence); +asmlinkage long sys_read(unsigned int fd, char __user *buf, size_t count); +asmlinkage long sys_readahead(int fd, loff_t offset, size_t count); +asmlinkage long sys_readv(unsigned long fd, + const struct iovec __user *vec, + unsigned long vlen); +asmlinkage long sys_write(unsigned int fd, const char __user *buf, + size_t count); +asmlinkage long sys_writev(unsigned long fd, + const struct iovec __user *vec, + unsigned long vlen); +asmlinkage long sys_pread64(unsigned int fd, char __user *buf, + size_t count, loff_t pos); +asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, + size_t count, loff_t pos); +asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h); +asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h, + int flags); +asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h); +asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h, + int flags); +asmlinkage long sys_getcwd(char __user *buf, unsigned long size); +asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode); +asmlinkage long sys_chdir(const char __user *filename); +asmlinkage long sys_fchdir(unsigned int fd); +asmlinkage long sys_rmdir(const char __user *pathname); +asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len); +asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, + qid_t id, void __user *addr); +asmlinkage long sys_getdents(unsigned int fd, + struct linux_dirent __user *dirent, + unsigned int count); +asmlinkage long sys_getdents64(unsigned int fd, + struct linux_dirent64 __user *dirent, + unsigned int count); + +asmlinkage long sys_setsockopt(int fd, int level, int optname, + char __user *optval, int optlen); +asmlinkage long sys_getsockopt(int fd, int level, int optname, + char __user *optval, int __user *optlen); +asmlinkage long sys_bind(int, struct sockaddr __user *, int); +asmlinkage long sys_connect(int, struct sockaddr __user *, int); +asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *); +asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int); +asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *); +asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); +asmlinkage long sys_send(int, void __user *, size_t, unsigned); +asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, + struct sockaddr __user *, int) __intentional_overflow(0); +asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); +asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, + unsigned int vlen, unsigned flags); +asmlinkage long sys_recv(int, void __user *, size_t, unsigned); +asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, + struct sockaddr __user *, int __user *); +asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); +asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, + unsigned int vlen, unsigned flags, + struct timespec __user *timeout); +asmlinkage long sys_socket(int, int, int); +asmlinkage long sys_socketpair(int, int, int, int __user *); asmlinkage long sys_socketcall(int call, unsigned long __user *args); - -/* obsolete: fs/stat.c */ -asmlinkage long sys_stat(const char __user *filename, - struct __old_kernel_stat __user *statbuf); -asmlinkage long sys_lstat(const char __user *filename, - struct __old_kernel_stat __user *statbuf); -asmlinkage long sys_fstat(unsigned int fd, - struct __old_kernel_stat __user *statbuf); -asmlinkage long sys_readlink(const char __user *path, - char __user *buf, int bufsiz); - -/* obsolete: fs/select.c */ +asmlinkage long sys_listen(int, int); +asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, + int timeout); +asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, + fd_set __user *exp, struct timeval __user *tvp); asmlinkage long sys_old_select(struct sel_arg_struct __user *arg); - -/* obsolete: fs/readdir.c */ -asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int); - -/* obsolete: kernel/sys.c */ +asmlinkage long sys_epoll_create(int size); +asmlinkage long sys_epoll_create1(int flags); +asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, + struct epoll_event __user *event); +asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, + int maxevents, int timeout); +asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, + int maxevents, int timeout, + const sigset_t __user *sigmask, + size_t sigsetsize); asmlinkage long sys_gethostname(char __user *name, int len); +asmlinkage long sys_sethostname(char __user *name, int len); +asmlinkage long sys_setdomainname(char __user *name, int len); +asmlinkage long sys_newuname(struct new_utsname __user *name); asmlinkage long sys_uname(struct old_utsname __user *); asmlinkage long sys_olduname(struct oldold_utsname __user *); -#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT + +asmlinkage long sys_getrlimit(unsigned int resource, + struct rlimit __user *rlim); +#if defined(COMPAT_RLIM_OLD_INFINITY) || !(defined(CONFIG_IA64)) asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim); #endif +asmlinkage long sys_setrlimit(unsigned int resource, + struct rlimit __user *rlim); +asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource, + const struct rlimit64 __user *new_rlim, + struct rlimit64 __user *old_rlim); +asmlinkage long sys_getrusage(int who, struct rusage __user *ru); +asmlinkage long sys_umask(int mask); -/* obsolete: ipc */ +asmlinkage long sys_msgget(key_t key, int msgflg); +asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp, + size_t msgsz, int msgflg); +asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, + size_t msgsz, long msgtyp, int msgflg); +asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); + +asmlinkage long sys_semget(key_t key, int nsems, int semflg); +asmlinkage long sys_semop(int semid, struct sembuf __user *sops, + long nsops); +asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg); +asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops, + long nsops, + const struct timespec __user *timeout); +asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); +asmlinkage long sys_shmget(key_t key, size_t size, int flag); +asmlinkage long sys_shmdt(char __user *shmaddr); +asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, unsigned long third, void __user *ptr, long fifth); -/* obsolete: mm/ */ +asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr); +asmlinkage long sys_mq_unlink(const char __user *name); +asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout); +asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout); +asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification); +asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat); + +asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn); +asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, + void __user *buf); +asmlinkage long sys_pciconfig_write(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, + void __user *buf); + +asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); +asmlinkage long sys_swapon(const char __user *specialfile, int swap_flags); +asmlinkage long sys_swapoff(const char __user *specialfile); +asmlinkage long sys_sysctl(struct __sysctl_args __user *args); +asmlinkage long sys_sysinfo(struct sysinfo __user *info); +asmlinkage long sys_sysfs(int option, + unsigned long arg1, unsigned long arg2); +asmlinkage long sys_syslog(int type, char __user *buf, int len); +asmlinkage long sys_uselib(const char __user *library); +asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, + unsigned long data); + +asmlinkage long sys_add_key(const char __user *_type, + const char __user *_description, + const void __user *_payload, + size_t plen, + key_serial_t destringid); + +asmlinkage long sys_request_key(const char __user *_type, + const char __user *_description, + const char __user *_callout_info, + key_serial_t destringid); + +asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); + +asmlinkage long sys_ioprio_set(int which, int who, int ioprio); +asmlinkage long sys_ioprio_get(int which, int who); +asmlinkage long sys_set_mempolicy(int mode, const unsigned long __user *nmask, + unsigned long maxnode); +asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, + const unsigned long __user *from, + const unsigned long __user *to); +asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, + const void __user * __user *pages, + const int __user *nodes, + int __user *status, + int flags); +asmlinkage long sys_mbind(unsigned long start, unsigned long len, + unsigned long mode, + const unsigned long __user *nmask, + unsigned long maxnode, + unsigned flags); +asmlinkage long sys_get_mempolicy(int __user *policy, + unsigned long __user *nmask, + unsigned long maxnode, + unsigned long addr, unsigned long flags); + +asmlinkage long sys_inotify_init(void); +asmlinkage long sys_inotify_init1(int flags); +asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, + u32 mask); +asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd); + +asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, + __u32 __user *ustatus); +asmlinkage long sys_spu_create(const char __user *name, + unsigned int flags, umode_t mode, int fd); + +asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode, + unsigned dev); +asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode); +asmlinkage long sys_unlinkat(int dfd, const char __user * pathname, int flag); +asmlinkage long sys_symlinkat(const char __user * oldname, + int newdfd, const char __user * newname); +asmlinkage long sys_linkat(int olddfd, const char __user *oldname, + int newdfd, const char __user *newname, int flags); +asmlinkage long sys_renameat(int olddfd, const char __user * oldname, + int newdfd, const char __user * newname); +asmlinkage long sys_renameat2(int olddfd, const char __user *oldname, + int newdfd, const char __user *newname, + unsigned int flags); +asmlinkage long sys_futimesat(int dfd, const char __user *filename, + struct timeval __user *utimes); +asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode); +asmlinkage long sys_fchmodat(int dfd, const char __user * filename, + umode_t mode); +asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user, + gid_t group, int flag); +asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, + umode_t mode); +asmlinkage long sys_newfstatat(int dfd, const char __user *filename, + struct stat __user *statbuf, int flag); +asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf, + int bufsiz); +asmlinkage long sys_utimensat(int dfd, const char __user *filename, + struct timespec __user *utimes, int flags); +asmlinkage long sys_unshare(unsigned long unshare_flags); + +asmlinkage long sys_splice(int fd_in, loff_t __user *off_in, + int fd_out, loff_t __user *off_out, + size_t len, unsigned int flags); + +asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov, + unsigned long nr_segs, unsigned int flags); + +asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags); + +asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, + unsigned int flags); +asmlinkage long sys_sync_file_range2(int fd, unsigned int flags, + loff_t offset, loff_t nbytes); +asmlinkage long sys_get_robust_list(int pid, + struct robust_list_head __user * __user *head_ptr, + size_t __user *len_ptr); +asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, + size_t len); +asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); +asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); +asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags); +asmlinkage long sys_timerfd_create(int clockid, int flags); +asmlinkage long sys_timerfd_settime(int ufd, int flags, + const struct itimerspec __user *utmr, + struct itimerspec __user *otmr); +asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); +asmlinkage long sys_eventfd(unsigned int count); +asmlinkage long sys_eventfd2(unsigned int count, int flags); +asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags); +asmlinkage long sys_userfaultfd(int flags); +asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); +asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int); +asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *, + fd_set __user *, struct timespec __user *, + void __user *); +asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, + struct timespec __user *, const sigset_t __user *, + size_t); +asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags); +asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, + u64 mask, int fd, + const char __user *pathname); +asmlinkage long sys_syncfs(int fd); + +asmlinkage long sys_fork(void); +asmlinkage long sys_vfork(void); +#ifdef CONFIG_CLONE_BACKWARDS +asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, unsigned long, + int __user *); +#else +#ifdef CONFIG_CLONE_BACKWARDS3 +asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *, + int __user *, unsigned long); +#else +asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, + int __user *, unsigned long); +#endif +#endif + +asmlinkage long sys_execve(const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp); + +asmlinkage long sys_perf_event_open( + struct perf_event_attr __user *attr_uptr, + pid_t pid, int cpu, int group_fd, unsigned long flags); + asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg); +asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name, + struct file_handle __user *handle, + int __user *mnt_id, int flag); +asmlinkage long sys_open_by_handle_at(int mountdirfd, + struct file_handle __user *handle, + int flags); +asmlinkage long sys_setns(int fd, int nstype); +asmlinkage long sys_process_vm_readv(pid_t pid, + const struct iovec __user *lvec, + unsigned long liovcnt, + const struct iovec __user *rvec, + unsigned long riovcnt, + unsigned long flags); +asmlinkage long sys_process_vm_writev(pid_t pid, + const struct iovec __user *lvec, + unsigned long liovcnt, + const struct iovec __user *rvec, + unsigned long riovcnt, + unsigned long flags); +asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type, + unsigned long idx1, unsigned long idx2); +asmlinkage long sys_finit_module(int fd, const char __user *uargs, int flags); +asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, + const char __user *uargs); +asmlinkage long sys_getrandom(char __user *buf, size_t count, + unsigned int flags); +asmlinkage long sys_bpf(int cmd, union bpf_attr __user *attr, unsigned int size); -/* - * Not a real system call, but a placeholder for syscalls which are - * not implemented -- see kernel/sys_ni.c - */ -asmlinkage long sys_ni_syscall(void); +asmlinkage long sys_execveat(int dfd, const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp, int flags); -#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ +asmlinkage long sys_membarrier(int cmd, int flags); +asmlinkage long sys_copy_file_range(int fd_in, loff_t __user *off_in, + int fd_out, loff_t __user *off_out, + size_t len, unsigned int flags); +asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); + +asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len, + unsigned long prot, int pkey); +asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val); +asmlinkage long sys_pkey_free(int pkey); -/* - * Kernel code should not call syscalls (i.e., sys_xyzyyz()) directly. - * Instead, use one of the functions which work equivalently, such as - * the ksys_xyzyyz() functions prototyped below. - */ -ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); -int ksys_fchown(unsigned int fd, uid_t user, gid_t group); -ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count); -void ksys_sync(void); -int ksys_unshare(unsigned long unshare_flags); -int ksys_setsid(void); -int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes, - unsigned int flags); -ssize_t ksys_pread64(unsigned int fd, char __user *buf, size_t count, - loff_t pos); -ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf, - size_t count, loff_t pos); -int ksys_fallocate(int fd, int mode, loff_t offset, loff_t len); -#ifdef CONFIG_ADVISE_SYSCALLS -int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice); -#else -static inline int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, - int advice) -{ - return -EINVAL; -} -#endif -unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff); -ssize_t ksys_readahead(int fd, loff_t offset, size_t count); -int ksys_ipc(unsigned int call, int first, unsigned long second, - unsigned long third, void __user * ptr, long fifth); -int compat_ksys_ipc(u32 call, int first, int second, - u32 third, u32 ptr, u32 fifth); - -/* - * The following kernel syscall equivalents are just wrappers to fs-internal - * functions. Therefore, provide stubs to be inlined at the callsites. - */ -extern int do_fchownat(int dfd, const char __user *filename, uid_t user, - gid_t group, int flag); - -static inline long ksys_chown(const char __user *filename, uid_t user, - gid_t group) -{ - return do_fchownat(AT_FDCWD, filename, user, group, 0); -} - -static inline long ksys_lchown(const char __user *filename, uid_t user, - gid_t group) -{ - return do_fchownat(AT_FDCWD, filename, user, group, - AT_SYMLINK_NOFOLLOW); -} - -extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small); - -static inline long ksys_ftruncate(unsigned int fd, loff_t length) -{ - return do_sys_ftruncate(fd, length, 1); -} - -extern long do_sys_truncate(const char __user *pathname, loff_t length); - -static inline long ksys_truncate(const char __user *pathname, loff_t length) -{ - return do_sys_truncate(pathname, length); -} - -static inline unsigned int ksys_personality(unsigned int personality) -{ - unsigned int old = current->personality; - - if (personality != 0xffffffff) - set_personality(personality); - - return old; -} - -/* for __ARCH_WANT_SYS_IPC */ -long ksys_semtimedop(int semid, struct sembuf __user *tsops, - unsigned int nsops, - const struct __kernel_timespec __user *timeout); -long ksys_semget(key_t key, int nsems, int semflg); -long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg); -long ksys_msgget(key_t key, int msgflg); -long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); -long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, - long msgtyp, int msgflg); -long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, - int msgflg); -long ksys_shmget(key_t key, size_t size, int shmflg); -long ksys_shmdt(char __user *shmaddr); -long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); -long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, - unsigned int nsops, - const struct old_timespec32 __user *timeout); -long __do_semtimedop(int semid, struct sembuf *tsems, unsigned int nsops, - const struct timespec64 *timeout, - struct ipc_namespace *ns); - -int __sys_getsockopt(int fd, int level, int optname, char __user *optval, - int __user *optlen); -int __sys_setsockopt(int fd, int level, int optname, char __user *optval, - int optlen); #endif diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h index ae4d48e4c9..e093dd9797 100644 --- a/include/linux/syscore_ops.h +++ b/include/linux/syscore_ops.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * syscore_ops.h - System core operations. * * Copyright (C) 2011 Rafael J. Wysocki , Novell Inc. + * + * This file is released under the GPLv2. */ #ifndef _LINUX_SYSCORE_OPS_H @@ -15,7 +16,7 @@ struct syscore_ops { int (*suspend)(void); void (*resume)(void); void (*shutdown)(void); -}; +} __do_const; extern void register_syscore_ops(struct syscore_ops *ops); extern void unregister_syscore_ops(struct syscore_ops *ops); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 1fa2b69c6f..807838b419 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * sysctl.h: General linux system control interface * @@ -37,37 +36,35 @@ struct ctl_table_root; struct ctl_table_header; struct ctl_dir; -/* Keep the same order as in fs/proc/proc_sysctl.c */ -#define SYSCTL_ZERO ((void *)&sysctl_vals[0]) -#define SYSCTL_ONE ((void *)&sysctl_vals[1]) -#define SYSCTL_INT_MAX ((void *)&sysctl_vals[2]) +typedef int proc_handler (struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); -extern const int sysctl_vals[]; - -typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer, - size_t *lenp, loff_t *ppos); - -int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *); -int proc_dobool(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); -int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *); -int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *); -int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); -int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); -int proc_dou8vec_minmax(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); -int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *); -int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *, - loff_t *); -int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *, - loff_t *); -int proc_doulongvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); -int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void *, - size_t *, loff_t *); -int proc_do_large_bitmap(struct ctl_table *, int, void *, size_t *, loff_t *); -int proc_do_static_key(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); +extern int proc_dostring(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dostring_modpriv(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec_secure(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_douintvec(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec_minmax(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec_minmax_secure(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec_jiffies(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_doulongvec_minmax(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, + void __user *, size_t *, loff_t *); +extern int proc_do_large_bitmap(struct ctl_table *, int, + void __user *, size_t *, loff_t *); /* * Register a set of sysctl names by calling register_sysctl_table @@ -78,13 +75,15 @@ int proc_do_static_key(struct ctl_table *table, int write, void *buffer, * sysctl names can be mirrored automatically under /proc/sys. The * procname supplied controls /proc naming. * - * The table's mode will be honoured for proc-fs access. + * The table's mode will be honoured both for sys_sysctl(2) and + * proc-fs access. * * Leaf nodes in the sysctl tree will be represented by a single file * under /proc; non-leaf nodes will be represented by directories. A * null procname disables /proc mirroring at this node. * - * The data and maxlen fields of the ctl_table + * sysctl(2) can automatically manage read and write requests through + * the sysctl table. The data and maxlen fields of the ctl_table * struct enable minimal validation of the values being written to be * performed, and the mode field allows minimal authentication. * @@ -113,7 +112,8 @@ static inline void *proc_sys_poll_event(struct ctl_table_poll *poll) struct ctl_table_poll name = __CTL_TABLE_POLL_INITIALIZER(name) /* A sysctl table is an array of struct ctl_table: */ -struct ctl_table { +struct ctl_table +{ const char *procname; /* Text ID for /proc/sys, or zero */ void *data; int maxlen; @@ -123,7 +123,8 @@ struct ctl_table { struct ctl_table_poll *poll; void *extra1; void *extra2; -} __randomize_layout; +} __do_const __randomize_layout; +typedef struct ctl_table __no_const ctl_table_no_const; struct ctl_node { struct rb_node node; @@ -132,13 +133,14 @@ struct ctl_node { /* struct ctl_table_header is used to maintain dynamic lists of struct ctl_table trees. */ -struct ctl_table_header { +struct ctl_table_header +{ union { struct { struct ctl_table *ctl_table; - int used; - int count; - int nreg; + atomic_t used; + atomic_t count; + atomic_t nreg; }; struct rcu_head rcu; }; @@ -148,7 +150,6 @@ struct ctl_table_header { struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; - struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */ }; struct ctl_dir { @@ -185,6 +186,7 @@ extern void setup_sysctl_set(struct ctl_table_set *p, int (*is_seen)(struct ctl_table_set *)); extern void retire_sysctl_set(struct ctl_table_set *set); +void register_sysctl_root(struct ctl_table_root *root); struct ctl_table_header *__register_sysctl_table( struct ctl_table_set *set, const char *path, struct ctl_table *table); @@ -199,17 +201,8 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, void unregister_sysctl_table(struct ctl_table_header * table); extern int sysctl_init(void); -void do_sysctl_args(void); - -extern int pwrsw_enabled; -extern int unaligned_enabled; -extern int unaligned_dump_stack; -extern int no_unaligned_warning; extern struct ctl_table sysctl_mount_point[]; -extern struct ctl_table random_table[]; -extern struct ctl_table firmware_config_table[]; -extern struct ctl_table epoll_table[]; #else /* CONFIG_SYSCTL */ static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table) @@ -223,11 +216,6 @@ static inline struct ctl_table_header *register_sysctl_paths( return NULL; } -static inline struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table) -{ - return NULL; -} - static inline void unregister_sysctl_table(struct ctl_table_header * table) { } @@ -238,12 +226,9 @@ static inline void setup_sysctl_set(struct ctl_table_set *p, { } -static inline void do_sysctl_args(void) -{ -} #endif /* CONFIG_SYSCTL */ -int sysctl_max_threads(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos); +int sysctl_max_threads(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); #endif /* _LINUX_SYSCTL_H */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index e3f1e8ac1f..e6635671be 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * sysfs.h - definitions for the device driver filesystem * @@ -7,7 +6,7 @@ * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo * - * Please see Documentation/filesystems/sysfs.rst for more information. + * Please see Documentation/filesystems/sysfs.txt for more information. */ #ifndef _SYSFS_H_ @@ -35,7 +34,8 @@ struct attribute { struct lock_class_key *key; struct lock_class_key skey; #endif -}; +} __do_const; +typedef struct attribute __no_const attribute_no_const; /** * sysfs_attr_init - initialize a dynamically allocated sysfs attribute @@ -89,11 +89,12 @@ struct attribute_group { struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; -}; +} __do_const; +typedef struct attribute_group __no_const attribute_group_no_const; -/* - * Use these macros to make defining attributes easier. - * See include/linux/device.h for examples.. +/** + * Use these macros to make defining attributes easier. See include/linux/device.h + * for examples.. */ #define SYSFS_PREALLOC 010000 @@ -113,29 +114,17 @@ struct attribute_group { } #define __ATTR_RO(_name) { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ .show = _name##_show, \ } -#define __ATTR_RO_MODE(_name, _mode) { \ - .attr = { .name = __stringify(_name), \ - .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ - .show = _name##_show, \ -} - -#define __ATTR_RW_MODE(_name, _mode) { \ - .attr = { .name = __stringify(_name), \ - .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ - .show = _name##_show, \ - .store = _name##_store, \ -} - #define __ATTR_WO(_name) { \ - .attr = { .name = __stringify(_name), .mode = 0200 }, \ + .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \ .store = _name##_store, \ } -#define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store) +#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \ + _name##_show, _name##_store) #define __ATTR_NULL { .attr = { .name = NULL } } @@ -162,28 +151,21 @@ static const struct attribute_group _name##_group = { \ }; \ __ATTRIBUTE_GROUPS(_name) -#define BIN_ATTRIBUTE_GROUPS(_name) \ -static const struct attribute_group _name##_group = { \ - .bin_attrs = _name##_attrs, \ -}; \ -__ATTRIBUTE_GROUPS(_name) - struct file; struct vm_area_struct; -struct address_space; struct bin_attribute { struct attribute attr; size_t size; void *private; - struct address_space *(*f_mapping)(void); ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, struct vm_area_struct *vma); -}; +} __do_const; +typedef struct bin_attribute __no_const bin_attribute_no_const; /** * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute @@ -206,19 +188,14 @@ struct bin_attribute { } #define __BIN_ATTR_RO(_name, _size) { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ .read = _name##_read, \ .size = _size, \ } -#define __BIN_ATTR_WO(_name, _size) { \ - .attr = { .name = __stringify(_name), .mode = 0200 }, \ - .write = _name##_write, \ - .size = _size, \ -} - -#define __BIN_ATTR_RW(_name, _size) \ - __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size) +#define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name, \ + (S_IWUSR | S_IRUGO), _name##_read, \ + _name##_write, _size) #define __BIN_ATTR_NULL __ATTR_NULL @@ -229,9 +206,6 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR(_name, _mode, _read, \ #define BIN_ATTR_RO(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size) -#define BIN_ATTR_WO(_name, _size) \ -struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size) - #define BIN_ATTR_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size) @@ -258,16 +232,13 @@ int __must_check sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); int __must_check sysfs_create_files(struct kobject *kobj, - const struct attribute * const *attr); + const struct attribute **attr); int __must_check sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode); -struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, - const struct attribute *attr); -void sysfs_unbreak_active_protection(struct kernfs_node *kn); void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); -void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr); +void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); int __must_check sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr); @@ -292,8 +263,6 @@ int __must_check sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp); int __must_check sysfs_create_groups(struct kobject *kobj, const struct attribute_group **groups); -int __must_check sysfs_update_groups(struct kobject *kobj, - const struct attribute_group **groups); int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_remove_group(struct kobject *kobj, @@ -312,10 +281,9 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name); void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name); -int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, - struct kobject *target_kobj, - const char *target_name, - const char *symlink_name); +int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name); void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); @@ -326,22 +294,6 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn) return kernfs_enable_ns(kn); } -int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid, - kgid_t kgid); -int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid); -int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ, - const char *name, kuid_t kuid, kgid_t kgid); -int sysfs_groups_change_owner(struct kobject *kobj, - const struct attribute_group **groups, - kuid_t kuid, kgid_t kgid); -int sysfs_group_change_owner(struct kobject *kobj, - const struct attribute_group *groups, kuid_t kuid, - kgid_t kgid); -__printf(2, 3) -int sysfs_emit(char *buf, const char *fmt, ...); -__printf(3, 4) -int sysfs_emit_at(char *buf, int at, const char *fmt, ...); - #else /* CONFIG_SYSFS */ static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) @@ -385,7 +337,7 @@ static inline int sysfs_create_file_ns(struct kobject *kobj, } static inline int sysfs_create_files(struct kobject *kobj, - const struct attribute * const *attr) + const struct attribute **attr) { return 0; } @@ -396,17 +348,6 @@ static inline int sysfs_chmod_file(struct kobject *kobj, return 0; } -static inline struct kernfs_node * -sysfs_break_active_protection(struct kobject *kobj, - const struct attribute *attr) -{ - return NULL; -} - -static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn) -{ -} - static inline void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) @@ -420,7 +361,7 @@ static inline bool sysfs_remove_file_self(struct kobject *kobj, } static inline void sysfs_remove_files(struct kobject *kobj, - const struct attribute * const *attr) + const struct attribute **attr) { } @@ -476,12 +417,6 @@ static inline int sysfs_create_groups(struct kobject *kobj, return 0; } -static inline int sysfs_update_groups(struct kobject *kobj, - const struct attribute_group **groups) -{ - return 0; -} - static inline int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp) { @@ -532,10 +467,10 @@ static inline void sysfs_remove_link_from_group(struct kobject *kobj, { } -static inline int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, - struct kobject *target_kobj, - const char *target_name, - const char *symlink_name) +static inline int __compat_only_sysfs_link_entry_to_kobj( + struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name) { return 0; } @@ -554,51 +489,6 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn) { } -static inline int sysfs_file_change_owner(struct kobject *kobj, - const char *name, kuid_t kuid, - kgid_t kgid) -{ - return 0; -} - -static inline int sysfs_link_change_owner(struct kobject *kobj, - struct kobject *targ, - const char *name, kuid_t kuid, - kgid_t kgid) -{ - return 0; -} - -static inline int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid) -{ - return 0; -} - -static inline int sysfs_groups_change_owner(struct kobject *kobj, - const struct attribute_group **groups, - kuid_t kuid, kgid_t kgid) -{ - return 0; -} - -static inline int sysfs_group_change_owner(struct kobject *kobj, - const struct attribute_group *groups, - kuid_t kuid, kgid_t kgid) -{ - return 0; -} - -__printf(2, 3) -static inline int sysfs_emit(char *buf, const char *fmt, ...) -{ - return 0; -} - -__printf(3, 4) -static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...) -{ - return 0; -} #endif /* CONFIG_SYSFS */ static inline int __must_check sysfs_create_file(struct kobject *kobj, diff --git a/include/linux/syslog.h b/include/linux/syslog.h index 86af908e26..c3a7f0cc3a 100644 --- a/include/linux/syslog.h +++ b/include/linux/syslog.h @@ -1,8 +1,21 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Syslog internals * * Copyright 2010 Canonical, Ltd. * Author: Kees Cook + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _LINUX_SYSLOG_H @@ -36,4 +49,13 @@ int do_syslog(int type, char __user *buf, int count, int source); +#ifdef CONFIG_PRINTK +int check_syslog_permissions(int type, int source); +#else +static inline int check_syslog_permissions(int type, int source) +{ + return 0; +} +#endif + #endif /* _LINUX_SYSLOG_H */ diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h index 3a582ec7a2..3fcde6b847 100644 --- a/include/linux/sysrq.h +++ b/include/linux/sysrq.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* -*- linux-c -*- * * $Id: sysrq.h,v 1.3 1997/07/17 11:54:33 mj Exp $ @@ -17,6 +16,7 @@ #include #include +#include /* Possible values of bitmask for enabling sysrq functions */ /* 0x0001 is reserved for enable everything */ @@ -30,11 +30,11 @@ #define SYSRQ_ENABLE_RTNICE 0x0100 struct sysrq_key_op { - void (* const handler)(int); - const char * const help_msg; - const char * const action_msg; - const int enable_mask; -}; + void (*handler)(int); + char *help_msg; + char *action_msg; + int enable_mask; +} __do_const; #ifdef CONFIG_MAGIC_SYSRQ @@ -45,12 +45,11 @@ struct sysrq_key_op { void handle_sysrq(int key); void __handle_sysrq(int key, bool check_mask); -int register_sysrq_key(int key, const struct sysrq_key_op *op); -int unregister_sysrq_key(int key, const struct sysrq_key_op *op); -extern const struct sysrq_key_op *__sysrq_reboot_op; +int register_sysrq_key(int key, struct sysrq_key_op *op); +int unregister_sysrq_key(int key, struct sysrq_key_op *op); +struct sysrq_key_op *__sysrq_get_key_op(int key); int sysrq_toggle_support(int enable_mask); -int sysrq_mask(void); #else @@ -62,22 +61,16 @@ static inline void __handle_sysrq(int key, bool check_mask) { } -static inline int register_sysrq_key(int key, const struct sysrq_key_op *op) +static inline int register_sysrq_key(int key, struct sysrq_key_op *op) { return -EINVAL; } -static inline int unregister_sysrq_key(int key, const struct sysrq_key_op *op) +static inline int unregister_sysrq_key(int key, struct sysrq_key_op *op) { return -EINVAL; } -static inline int sysrq_mask(void) -{ - /* Magic SysRq disabled mask */ - return 0; -} - #endif #endif /* _LINUX_SYSRQ_H */ diff --git a/include/linux/sysv_fs.h b/include/linux/sysv_fs.h index 5cf77dbb8d..e47d6d9002 100644 --- a/include/linux/sysv_fs.h +++ b/include/linux/sysv_fs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SYSV_FS_H #define _LINUX_SYSV_FS_H diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index 96305a64a5..9fba9dd335 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_T10_PI_H #define _LINUX_T10_PI_H @@ -34,23 +33,10 @@ struct t10_pi_tuple { __be32 ref_tag; /* Target LBA or indirect LBA */ }; -#define T10_PI_APP_ESCAPE cpu_to_be16(0xffff) -#define T10_PI_REF_ESCAPE cpu_to_be32(0xffffffff) -static inline u32 t10_pi_ref_tag(struct request *rq) -{ - unsigned int shift = ilog2(queue_logical_block_size(rq->q)); - -#ifdef CONFIG_BLK_DEV_INTEGRITY - if (rq->q->integrity.interval_exp) - shift = rq->q->integrity.interval_exp; -#endif - return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; -} - -extern const struct blk_integrity_profile t10_pi_type1_crc; -extern const struct blk_integrity_profile t10_pi_type1_ip; -extern const struct blk_integrity_profile t10_pi_type3_crc; -extern const struct blk_integrity_profile t10_pi_type3_ip; +extern struct blk_integrity_profile t10_pi_type1_crc; +extern struct blk_integrity_profile t10_pi_type1_ip; +extern struct blk_integrity_profile t10_pi_type3_crc; +extern struct blk_integrity_profile t10_pi_type3_ip; #endif diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h index 6f6acce064..bdf855c285 100644 --- a/include/linux/task_io_accounting.h +++ b/include/linux/task_io_accounting.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * task_io_accounting: a structure which is used for recording a single task's * IO statistics. diff --git a/include/linux/task_io_accounting_ops.h b/include/linux/task_io_accounting_ops.h index bb5498bcdd..4d090f9ee6 100644 --- a/include/linux/task_io_accounting_ops.h +++ b/include/linux/task_io_accounting_ops.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Task I/O accounting operations */ diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 5b8a93f288..ca5a1cf27d 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TASK_WORK_H #define _LINUX_TASK_WORK_H @@ -13,17 +12,7 @@ init_task_work(struct callback_head *twork, task_work_func_t func) twork->func = func; } -enum task_work_notify_mode { - TWA_NONE, - TWA_RESUME, - TWA_SIGNAL, -}; - -int task_work_add(struct task_struct *task, struct callback_head *twork, - enum task_work_notify_mode mode); - -struct callback_head *task_work_cancel_match(struct task_struct *task, - bool (*match)(struct callback_head *, void *data), void *data); +int task_work_add(struct task_struct *task, struct callback_head *twork, bool); struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); void task_work_run(void); diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h index dbb4d124c7..58de6edf75 100644 --- a/include/linux/taskstats_kern.h +++ b/include/linux/taskstats_kern.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* taskstats_kern.h - kernel header for per-task statistics interface * * Copyright (C) Shailabh Nagar, IBM Corp. 2006 @@ -9,7 +8,7 @@ #define _LINUX_TASKSTATS_KERN_H #include -#include +#include #include #ifdef CONFIG_TASKSTATS diff --git a/include/linux/tboot.h b/include/linux/tboot.h index 5146d2574e..9a54b331f9 100644 --- a/include/linux/tboot.h +++ b/include/linux/tboot.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * tboot.h: shared data structure with tboot and kernel and functions * used by kernel for runtime support of Intel(R) Trusted * Execution Technology * * Copyright (c) 2006-2009, Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * */ #ifndef _LINUX_TBOOT_H @@ -44,7 +57,7 @@ struct tboot_acpi_generic_address { /* * combines Sx info from FADT and FACS tables per ACPI 2.0+ spec - * (https://uefi.org/specifications) + * (http://www.acpi.info/) */ struct tboot_acpi_sleep_info { struct tboot_acpi_generic_address pm1a_cnt_blk; @@ -121,7 +134,13 @@ struct tboot { #define TBOOT_UUID {0xff, 0x8d, 0x3c, 0x66, 0xb3, 0xe8, 0x82, 0x4b, 0xbf,\ 0xaa, 0x19, 0xea, 0x4d, 0x5, 0x7a, 0x8} -bool tboot_enabled(void); +extern struct tboot *tboot; + +static inline int tboot_enabled(void) +{ + return tboot != NULL; +} + extern void tboot_probe(void); extern void tboot_shutdown(u32 shutdown_type); extern struct acpi_table_header *tboot_get_dmar_table( diff --git a/include/linux/tc.h b/include/linux/tc.h index a60639f379..f92511e57c 100644 --- a/include/linux/tc.h +++ b/include/linux/tc.h @@ -84,7 +84,6 @@ struct tc_dev { device. */ struct device dev; /* Generic device interface. */ struct resource resource; /* Address space of this device. */ - u64 dma_mask; /* DMA addressable range. */ char vendor[9]; char name[9]; char firmware[9]; diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h index b0d36a9934..7bd266f352 100644 --- a/include/linux/tca6416_keypad.h +++ b/include/linux/tca6416_keypad.h @@ -1,10 +1,13 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * tca6416 keypad platform support * * Copyright (C) 2010 Texas Instruments * * Author: Sriramakrishnan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef _TCA6416_KEYS_H diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 48d8a36331..87a727df34 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -9,6 +8,11 @@ * Version: @(#)tcp.h 1.0.2 04/28/93 * * Author: Fred N. van Kempen, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_TCP_H #define _LINUX_TCP_H @@ -58,29 +62,35 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb) /* TCP Fast Open Cookie as stored in memory */ struct tcp_fastopen_cookie { - __le64 val[DIV_ROUND_UP(TCP_FASTOPEN_COOKIE_MAX, sizeof(u64))]; + union { + u8 val[TCP_FASTOPEN_COOKIE_MAX]; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr addr; +#endif + }; s8 len; bool exp; /* In RFC6994 experimental option format */ }; /* This defines a selective acknowledgement block. */ struct tcp_sack_block_wire { - __be32 start_seq; - __be32 end_seq; + __be32 start_seq __intentional_overflow(-1); + __be32 end_seq __intentional_overflow(-1); }; struct tcp_sack_block { - u32 start_seq; - u32 end_seq; + u32 start_seq __intentional_overflow(-1); + u32 end_seq __intentional_overflow(-1); }; /*These are used to set the sack_ok field in struct tcp_options_received */ #define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ +#define TCP_FACK_ENABLED (1 << 1) /*1 = FACK is enabled locally*/ #define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ struct tcp_options_received { /* PAWS/RTTM data */ - int ts_recent_stamp;/* Time we stored ts_recent (for aging) */ + long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ u32 ts_recent; /* Time stamp to echo next */ u32 rcv_tsval; /* Time stamp value */ u32 rcv_tsecr; /* Time stamp echo reply */ @@ -88,12 +98,9 @@ struct tcp_options_received { tstamp_ok : 1, /* TIMESTAMP seen on SYN packet */ dsack : 1, /* D-SACK is scheduled */ wscale_ok : 1, /* Wscale seen on SYN packet */ - sack_ok : 3, /* SACK seen on SYN packet */ - smc_ok : 1, /* SMC seen on SYN packet */ + sack_ok : 4, /* SACK seen on SYN packet */ snd_wscale : 4, /* Window scaling received from sender */ rcv_wscale : 4; /* Window scaling to send to receiver */ - u8 saw_unknown:1, /* Received unknown option */ - unused:7; u8 num_sacks; /* Number of SACK blocks */ u16 user_mss; /* mss requested by user in ioctl */ u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ @@ -103,9 +110,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt) { rx_opt->tstamp_ok = rx_opt->sack_ok = 0; rx_opt->wscale_ok = rx_opt->snd_wscale = 0; -#if IS_ENABLED(CONFIG_SMC) - rx_opt->smc_ok = 0; -#endif } /* This is the max number of SACKS that we'll generate and process. It's safe @@ -119,22 +123,16 @@ struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; const struct tcp_request_sock_ops *af_specific; - u64 snt_synack; /* first SYNACK sent time */ + struct skb_mstamp snt_synack; /* first SYNACK sent time */ bool tfo_listener; - bool is_mptcp; -#if IS_ENABLED(CONFIG_MPTCP) - bool drop_req; -#endif u32 txhash; u32 rcv_isn; u32 snt_isn; - u32 ts_off; u32 last_oow_ack_time; /* last SYNACK */ u32 rcv_nxt; /* the ack # by SYNACK. For * FastOpen it's the seq# * after data-in-SYN. */ - u8 syn_tos; }; static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) @@ -170,7 +168,7 @@ struct tcp_sock { * total number of data segments in. */ u32 rcv_nxt; /* What we want to receive next */ - u32 copied_seq; /* Head of yet unread data */ + u32 copied_seq __intentional_overflow(-1); /* Head of yet unread data */ u32 rcv_wup; /* rcv_nxt on last window update sent */ u32 snd_nxt; /* Next sequence we send */ u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut @@ -179,27 +177,31 @@ struct tcp_sock { u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut * total number of data segments sent. */ - u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut - * total number of data bytes sent. - */ u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked * sum(delta(snd_una)), or how many bytes * were acked. */ - u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups - * total number of DSACK blocks received - */ + struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */ + u32 snd_una; /* First byte we want an ack for */ u32 snd_sml; /* Last byte of the most recently transmitted small packet */ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ - u32 compressed_ack_rcv_nxt; u32 tsoffset; /* timestamp offset */ struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ - struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */ + unsigned long tsq_flags; + + /* Data for direct copy to user */ + struct { + struct sk_buff_head prequeue; + struct task_struct *task; + struct msghdr *msg; + int memory; + int len; + } ucopy; u32 snd_wl1; /* Sequence for window update */ u32 snd_wnd; /* The window we expect to receive */ @@ -211,50 +213,29 @@ struct tcp_sock { /* Information of the most recently (s)acked skb */ struct tcp_rack { - u64 mstamp; /* (Re)sent time of the skb */ - u32 rtt_us; /* Associated RTT */ - u32 end_seq; /* Ending TCP sequence of the skb */ - u32 last_delivered; /* tp->delivered at last reo_wnd adj */ - u8 reo_wnd_steps; /* Allowed reordering window */ -#define TCP_RACK_RECOVERY_THRESH 16 - u8 reo_wnd_persist:5, /* No. of recovery since last adj */ - dsack_seen:1, /* Whether DSACK seen after last adj */ - advanced:1; /* mstamp advanced since last lost marking */ + struct skb_mstamp mstamp; /* (Re)sent time of the skb */ + u8 advanced; /* mstamp advanced since last lost marking */ + u8 reord; /* reordering detected */ } rack; u16 advmss; /* Advertised MSS */ - u8 compressed_ack; - u8 dup_ack_counter:2, - tlp_retrans:1, /* TLP is a retransmission */ - unused:5; - u32 chrono_start; /* Start time in jiffies of a TCP chrono */ - u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ - u8 chrono_type:2, /* current chronograph type */ - rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ - fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ - fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */ - is_sack_reneg:1, /* in recovery from loss with SACK reneg? */ - fastopen_client_fail:2; /* reason why fastopen failed */ + u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ + unused:7; u8 nonagle : 4,/* Disable Nagle algorithm? */ thin_lto : 1,/* Use linear timeouts for thin streams */ - recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */ + thin_dupack : 1,/* Fast retransmit on first dupack */ repair : 1, frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */ u8 repair_queue; - u8 save_syn:2, /* Save headers of SYN packet */ + u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ syn_data:1, /* SYN includes data */ syn_fastopen:1, /* SYN includes Fast Open option */ syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ - syn_fastopen_ch:1, /* Active TFO re-enabling probe */ syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ + save_syn:1, /* Save headers of SYN packet */ is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ - u32 tlp_high_seq; /* snd_nxt at the time of TLP */ - - u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ - u64 tcp_wstamp_ns; /* departure time for next sent data packet */ - u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ + u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ /* RTT measurement */ - u64 tcp_mstamp; /* most recent packet received/sent */ u32 srtt_us; /* smoothed round trip time << 3 in usecs */ u32 mdev_us; /* medium deviation */ u32 mdev_max_us; /* maximal mdev for the last rtt period */ @@ -271,7 +252,6 @@ struct tcp_sock { u8 ecn_flags; /* ECN status bits. */ u8 keepalive_probes; /* num of allowed keep alive probes */ u32 reordering; /* Packet reordering metric. */ - u32 reord_seen; /* number of data packet reordering events */ u32 snd_up; /* Urgent pointer */ /* @@ -288,28 +268,25 @@ struct tcp_sock { u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ u32 snd_cwnd_used; u32 snd_cwnd_stamp; - u32 prior_cwnd; /* cwnd right before starting loss recovery */ + u32 prior_cwnd; /* Congestion window at start of Recovery. */ u32 prr_delivered; /* Number of newly delivered packets to * receiver in Recovery. */ u32 prr_out; /* Total number of pkts sent during Recovery. */ u32 delivered; /* Total data packets delivered incl. rexmits */ - u32 delivered_ce; /* Like the above but only ECE marked packets */ u32 lost; /* Total data packets lost incl. rexmits */ u32 app_limited; /* limited until "delivered" reaches this val */ - u64 first_tx_mstamp; /* start of window send phase */ - u64 delivered_mstamp; /* time we reached "delivered" */ + struct skb_mstamp first_tx_mstamp; /* start of window send phase */ + struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */ u32 rate_delivered; /* saved rate sample: packets delivered */ u32 rate_interval_us; /* saved rate sample: time elapsed */ u32 rcv_wnd; /* Current receiver window */ - u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ + u32 write_seq __intentional_overflow(-1); /* Tail(+1) of data held in tcp send buffer */ u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */ u32 pushed_seq; /* Last pushed seq, required to talk to windows */ u32 lost_out; /* Lost packets */ u32 sacked_out; /* SACK'd packets */ - - struct hrtimer pacing_timer; - struct hrtimer compressed_ack_timer; + u32 fackets_out; /* FACK'd packets */ /* from STCP, retrans queue hinting */ struct sk_buff* lost_skb_hint; @@ -332,6 +309,7 @@ struct tcp_sock { */ int lost_cnt_hint; + u32 retransmit_high; /* L-bits may be on up to this seqno */ u32 prior_ssthresh; /* ssthresh saved at recovery start */ u32 high_seq; /* snd_nxt at onset of congestion */ @@ -341,45 +319,26 @@ struct tcp_sock { * the first SYN. */ u32 undo_marker; /* snd_una upon a new recovery episode. */ int undo_retrans; /* number of undoable retransmissions. */ - u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans - * Total data bytes retransmitted - */ u32 total_retrans; /* Total retransmits for entire connection */ - u32 urg_seq; /* Seq of received urgent pointer */ + u32 urg_seq __intentional_overflow(-1); /* Seq of received urgent pointer */ unsigned int keepalive_time; /* time before keep alive takes place */ unsigned int keepalive_intvl; /* time interval between keep alive probes */ int linger2; - -/* Sock_ops bpf program related variables */ -#ifdef CONFIG_BPF - u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs - * values defined in uapi/linux/tcp.h - */ -#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) -#else -#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 -#endif - - u16 timeout_rehash; /* Timeout-triggered rehash attempts */ - - u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */ - /* Receiver side RTT estimation */ - u32 rcv_rtt_last_tsecr; struct { - u32 rtt_us; + u32 rtt; u32 seq; - u64 time; + u32 time; } rcv_rtt_est; /* Receiver queue space */ struct { - u32 space; + int space; u32 seq; - u64 time; + u32 time; } rcvq_space; /* TCP-specific MTU probe information. */ @@ -390,12 +349,6 @@ struct tcp_sock { u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG * while socket was owned by user. */ -#if IS_ENABLED(CONFIG_MPTCP) - bool is_mptcp; -#endif -#if IS_ENABLED(CONFIG_SMC) - bool syn_smc; /* SYN includes SMC */ -#endif #ifdef CONFIG_TCP_MD5SIG /* TCP AF-Specific parts; only used by MD5 Signature support so far */ @@ -410,11 +363,11 @@ struct tcp_sock { /* fastopen_rsk points to request_sock that resulted in this big * socket. Used to retransmit SYNACKs etc. */ - struct request_sock __rcu *fastopen_rsk; - struct saved_syn *saved_syn; + struct request_sock *fastopen_rsk; + u32 *saved_syn; }; -enum tsq_enum { +enum tsq_flags { TSQ_THROTTLED, TSQ_QUEUED, TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */ @@ -425,15 +378,6 @@ enum tsq_enum { */ }; -enum tsq_flags { - TSQF_THROTTLED = (1UL << TSQ_THROTTLED), - TSQF_QUEUED = (1UL << TSQ_QUEUED), - TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED), - TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED), - TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED), - TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), -}; - static inline struct tcp_sock *tcp_sk(const struct sock *sk) { return (struct tcp_sock *)sk; @@ -450,8 +394,7 @@ struct tcp_timewait_sock { /* The time we sent the last out-of-window ACK: */ u32 tw_last_oow_ack_time; - int tw_ts_recent_stamp; - u32 tw_tx_delay; + long tw_ts_recent_stamp; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif @@ -464,8 +407,8 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) static inline bool tcp_passive_fastopen(const struct sock *sk) { - return sk->sk_state == TCP_SYN_RECV && - rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL; + return (sk->sk_state == TCP_SYN_RECV && + tcp_sk(sk)->fastopen_rsk != NULL); } static inline void fastopen_queue_tune(struct sock *sk, int backlog) @@ -489,37 +432,4 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) tp->saved_syn = NULL; } -static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn) -{ - return saved_syn->mac_hdrlen + saved_syn->network_hdrlen + - saved_syn->tcp_hdrlen; -} - -struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, - const struct sk_buff *orig_skb, - const struct sk_buff *ack_skb); - -static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) -{ - /* We use READ_ONCE() here because socket might not be locked. - * This happens for listeners. - */ - u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); - - return (user_mss && user_mss < mss) ? user_mss : mss; -} - -int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, - int shiftlen); - -void tcp_sock_set_cork(struct sock *sk, bool on); -int tcp_sock_set_keepcnt(struct sock *sk, int val); -int tcp_sock_set_keepidle_locked(struct sock *sk, int val); -int tcp_sock_set_keepidle(struct sock *sk, int val); -int tcp_sock_set_keepintvl(struct sock *sk, int val); -void tcp_sock_set_nodelay(struct sock *sk); -void tcp_sock_set_quickack(struct sock *sk, int val); -int tcp_sock_set_syncnt(struct sock *sk, int val); -void tcp_sock_set_user_timeout(struct sock *sk, u32 val); - #endif /* _LINUX_TCP_H */ diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h index 6673e4d4ac..cfaee86914 100644 --- a/include/linux/textsearch.h +++ b/include/linux/textsearch.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_TEXTSEARCH_H #define __LINUX_TEXTSEARCH_H @@ -23,7 +22,7 @@ struct ts_config; struct ts_state { unsigned int offset; - char cb[48]; + char cb[40]; }; /** @@ -62,7 +61,7 @@ struct ts_config int flags; /** - * @get_next_block: fetch next block of data + * get_next_block - fetch next block of data * @consumed: number of bytes consumed by the caller * @dst: destination buffer * @conf: search configuration @@ -79,7 +78,7 @@ struct ts_config struct ts_state *state); /** - * @finish: finalize/clean a series of get_next_block() calls + * finish - finalize/clean a series of get_next_block() calls * @conf: search configuration * @state: search state * diff --git a/include/linux/textsearch_fsm.h b/include/linux/textsearch_fsm.h index b57231ff65..fdfa078c66 100644 --- a/include/linux/textsearch_fsm.h +++ b/include/linux/textsearch_fsm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_TEXTSEARCH_FSM_H #define __LINUX_TEXTSEARCH_FSM_H diff --git a/include/linux/tfrc.h b/include/linux/tfrc.h index a5acc76808..8a8462b4a4 100644 --- a/include/linux/tfrc.h +++ b/include/linux/tfrc.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _LINUX_TFRC_H_ #define _LINUX_TFRC_H_ /* @@ -9,6 +8,11 @@ * Copyright (c) 2005 Ian McDonald * Copyright (c) 2005 Arnaldo Carvalho de Melo * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #include diff --git a/include/linux/thermal.h b/include/linux/thermal.h index c314893970..511182a88e 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -1,10 +1,25 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * thermal.h ($Revision: 0 $) * * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Zhang Rui * Copyright (C) 2008 Sujith Thomas + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef __THERMAL_H__ @@ -13,10 +28,10 @@ #include #include #include -#include #include #include +#define THERMAL_TRIPS_NONE -1 #define THERMAL_MAX_TRIPS 12 /* invalid cooling state */ @@ -31,10 +46,43 @@ /* use value, which < 0K, to indicate an invalid/uninitialized temperature */ #define THERMAL_TEMP_INVALID -274000 +/* Unit conversion macros */ +#define DECI_KELVIN_TO_CELSIUS(t) ({ \ + long _t = (t); \ + ((_t-2732 >= 0) ? (_t-2732+5)/10 : (_t-2732-5)/10); \ +}) +#define CELSIUS_TO_DECI_KELVIN(t) ((t)*10+2732) +#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100) +#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732) +#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off)) +#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732) + +/* Default Thermal Governor */ +#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) +#define DEFAULT_THERMAL_GOVERNOR "step_wise" +#elif defined(CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE) +#define DEFAULT_THERMAL_GOVERNOR "fair_share" +#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) +#define DEFAULT_THERMAL_GOVERNOR "user_space" +#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR) +#define DEFAULT_THERMAL_GOVERNOR "power_allocator" +#endif + struct thermal_zone_device; struct thermal_cooling_device; struct thermal_instance; -struct thermal_attr; + +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED, +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE, + THERMAL_TRIP_HOT, + THERMAL_TRIP_CRITICAL, +}; enum thermal_trend { THERMAL_TREND_STABLE, /* temperature is stable */ @@ -53,8 +101,6 @@ enum thermal_notify_event { THERMAL_DEVICE_DOWN, /* Thermal device is down */ THERMAL_DEVICE_UP, /* Thermal device is up after a down event */ THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */ - THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */ - THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */ }; struct thermal_zone_device_ops { @@ -64,7 +110,9 @@ struct thermal_zone_device_ops { struct thermal_cooling_device *); int (*get_temp) (struct thermal_zone_device *, int *); int (*set_trips) (struct thermal_zone_device *, int, int); - int (*change_mode) (struct thermal_zone_device *, + int (*get_mode) (struct thermal_zone_device *, + enum thermal_device_mode *); + int (*set_mode) (struct thermal_zone_device *, enum thermal_device_mode); int (*get_trip_type) (struct thermal_zone_device *, int, enum thermal_trip_type *); @@ -76,26 +124,28 @@ struct thermal_zone_device_ops { int (*set_emul_temp) (struct thermal_zone_device *, int); int (*get_trend) (struct thermal_zone_device *, int, enum thermal_trend *); - void (*hot)(struct thermal_zone_device *); - void (*critical)(struct thermal_zone_device *); + int (*notify) (struct thermal_zone_device *, int, + enum thermal_trip_type); }; struct thermal_cooling_device_ops { int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); - int (*get_requested_power)(struct thermal_cooling_device *, u32 *); - int (*state2power)(struct thermal_cooling_device *, unsigned long, u32 *); - int (*power2state)(struct thermal_cooling_device *, u32, unsigned long *); + int (*get_requested_power)(struct thermal_cooling_device *, + struct thermal_zone_device *, u32 *); + int (*state2power)(struct thermal_cooling_device *, + struct thermal_zone_device *, unsigned long, u32 *); + int (*power2state)(struct thermal_cooling_device *, + struct thermal_zone_device *, u32, unsigned long *); }; struct thermal_cooling_device { int id; - char *type; + char type[THERMAL_NAME_LENGTH]; struct device device; struct device_node *np; void *devdata; - void *stats; const struct thermal_cooling_device_ops *ops; bool updated; /* true if the cooling device does not need update */ struct mutex lock; /* protect thermal_instances list */ @@ -103,6 +153,11 @@ struct thermal_cooling_device { struct list_head node; }; +struct thermal_attr { + struct device_attribute attr; + char name[THERMAL_NAME_LENGTH]; +}; + /** * struct thermal_zone_device - structure for a thermal zone * @id: unique id number for each thermal zone @@ -111,13 +166,12 @@ struct thermal_cooling_device { * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature * @trip_type_attrs: attributes for trip points for sysfs: trip type * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis - * @mode: current mode of this thermal zone * @devdata: private pointer for device private data * @trips: number of trip points the thermal zone supports * @trips_disabled; bitmap for disabled trips - * @passive_delay_jiffies: number of jiffies to wait between polls when + * @passive_delay: number of milliseconds to wait between polls when * performing passive cooling. - * @polling_delay_jiffies: number of jiffies to wait between polls when + * @polling_delay: number of milliseconds to wait between polls when * checking whether trip points have been crossed (0 for * interrupt driven systems) * @temperature: current temperature. This is only for core code, @@ -130,13 +184,16 @@ struct thermal_cooling_device { trip point. * @prev_high_trip: the above current temperature if you've crossed a passive trip point. + * @forced_passive: If > 0, temperature at which to switch on all ACPI + * processor cooling devices. Currently only used by the + * step-wise governor. * @need_update: if equals 1, thermal_zone_device_update needs to be invoked. * @ops: operations this &thermal_zone_device supports * @tzp: thermal zone parameters * @governor: pointer to the governor for this thermal zone * @governor_data: private pointer for governor data * @thermal_instances: list of &struct thermal_instance of this thermal zone - * @ida: &struct ida to generate unique id for this zone's cooling + * @idr: &struct idr to generate unique id for this zone's cooling * devices * @lock: lock to protect thermal_instances list * @node: node in thermal_tz_list (in thermal_core.c) @@ -147,29 +204,28 @@ struct thermal_zone_device { int id; char type[THERMAL_NAME_LENGTH]; struct device device; - struct attribute_group trips_attribute_group; struct thermal_attr *trip_temp_attrs; struct thermal_attr *trip_type_attrs; struct thermal_attr *trip_hyst_attrs; - enum thermal_device_mode mode; void *devdata; int trips; unsigned long trips_disabled; /* bitmap for disabled trips */ - unsigned long passive_delay_jiffies; - unsigned long polling_delay_jiffies; + int passive_delay; + int polling_delay; int temperature; int last_temperature; int emul_temperature; int passive; int prev_low_trip; int prev_high_trip; + unsigned int forced_passive; atomic_t need_update; struct thermal_zone_device_ops *ops; struct thermal_zone_params *tzp; struct thermal_governor *governor; void *governor_data; struct list_head thermal_instances; - struct ida ida; + struct idr idr; struct mutex lock; struct list_head node; struct delayed_work poll_queue; @@ -206,7 +262,7 @@ struct thermal_bind_params { * platform characterization. This value is relative to the * rest of the weights so a cooling device whose weight is * double that of another cooling device is twice as - * effective. See Documentation/driver-api/thermal/sysfs-api.rst for more + * effective. See Documentation/thermal/sysfs-api.txt for more * information. */ int weight; @@ -214,7 +270,7 @@ struct thermal_bind_params { /* * This is a bit mask that gives the binding relation between this * thermal zone and cdev, for a particular trip point. - * See Documentation/driver-api/thermal/sysfs-api.rst for more information. + * See Documentation/thermal/sysfs-api.txt for more information. */ int trip_mask; @@ -284,8 +340,13 @@ struct thermal_zone_params { int offset; }; +struct thermal_genl_event { + u32 orig; + enum events event; +}; + /** - * struct thermal_zone_of_device_ops - callbacks for handling DT based zones + * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones * * Mandatory: * @get_temp: a pointer to a function that reads the sensor temperature. @@ -308,11 +369,23 @@ struct thermal_zone_of_device_ops { int (*set_trip_temp)(void *, int, int); }; +/** + * struct thermal_trip - representation of a point in temperature domain + * @np: pointer to struct device_node that this trip point was created from + * @temperature: temperature value in miliCelsius + * @hysteresis: relative hysteresis in miliCelsius + * @type: trip point type + */ + +struct thermal_trip { + struct device_node *np; + int temperature; + int hysteresis; + enum thermal_trip_type type; +}; + /* Function declarations */ #ifdef CONFIG_THERMAL_OF -int thermal_zone_of_get_sensor_id(struct device_node *tz_np, - struct device_node *sensor_np, - u32 *id); struct thermal_zone_device * thermal_zone_of_sensor_register(struct device *dev, int id, void *data, const struct thermal_zone_of_device_ops *ops); @@ -324,13 +397,6 @@ struct thermal_zone_device *devm_thermal_zone_of_sensor_register( void devm_thermal_zone_of_sensor_unregister(struct device *dev, struct thermal_zone_device *tz); #else - -static inline int thermal_zone_of_get_sensor_id(struct device_node *tz_np, - struct device_node *sensor_np, - u32 *id) -{ - return -ENOENT; -} static inline struct thermal_zone_device * thermal_zone_of_sensor_register(struct device *dev, int id, void *data, const struct thermal_zone_of_device_ops *ops) @@ -359,7 +425,19 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev, #endif -#ifdef CONFIG_THERMAL +#if IS_ENABLED(CONFIG_THERMAL) +static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) +{ + return cdev->ops->get_requested_power && cdev->ops->state2power && + cdev->ops->power2state; +} + +int power_actor_get_max_power(struct thermal_cooling_device *, + struct thermal_zone_device *tz, u32 *max_power); +int power_actor_get_min_power(struct thermal_cooling_device *, + struct thermal_zone_device *tz, u32 *min_power); +int power_actor_set_power(struct thermal_cooling_device *, + struct thermal_instance *, u32); struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, struct thermal_zone_params *, int, int); @@ -373,53 +451,69 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *, enum thermal_notify_event); +void thermal_zone_set_trips(struct thermal_zone_device *); -struct thermal_cooling_device *thermal_cooling_device_register(const char *, - void *, const struct thermal_cooling_device_ops *); +struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, + const struct thermal_cooling_device_ops *); struct thermal_cooling_device * -thermal_of_cooling_device_register(struct device_node *np, const char *, void *, +thermal_of_cooling_device_register(struct device_node *np, char *, void *, const struct thermal_cooling_device_ops *); -struct thermal_cooling_device * -devm_thermal_of_cooling_device_register(struct device *dev, - struct device_node *np, - char *type, void *devdata, - const struct thermal_cooling_device_ops *ops); void thermal_cooling_device_unregister(struct thermal_cooling_device *); struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name); int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); -int thermal_zone_device_enable(struct thermal_zone_device *tz); -int thermal_zone_device_disable(struct thermal_zone_device *tz); -void thermal_zone_device_critical(struct thermal_zone_device *tz); +int get_tz_trend(struct thermal_zone_device *, int); +struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, + struct thermal_cooling_device *, int); +void thermal_cdev_update(struct thermal_cooling_device *); +void thermal_notify_framework(struct thermal_zone_device *, int); #else +static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) +{ return false; } +static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, u32 *max_power) +{ return 0; } +static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, + u32 *min_power) +{ return -ENODEV; } +static inline int power_actor_set_power(struct thermal_cooling_device *cdev, + struct thermal_instance *tz, u32 power) +{ return 0; } static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, - struct thermal_zone_params *tzp, + const struct thermal_zone_params *tzp, int passive_delay, int polling_delay) { return ERR_PTR(-ENODEV); } static inline void thermal_zone_device_unregister( struct thermal_zone_device *tz) { } +static inline int thermal_zone_bind_cooling_device( + struct thermal_zone_device *tz, int trip, + struct thermal_cooling_device *cdev, + unsigned long upper, unsigned long lower, + unsigned int weight) +{ return -ENODEV; } +static inline int thermal_zone_unbind_cooling_device( + struct thermal_zone_device *tz, int trip, + struct thermal_cooling_device *cdev) +{ return -ENODEV; } +static inline void thermal_zone_device_update(struct thermal_zone_device *tz, + enum thermal_notify_event event) +{ } +static inline void thermal_zone_set_trips(struct thermal_zone_device *tz) +{ } static inline struct thermal_cooling_device * -thermal_cooling_device_register(const char *type, void *devdata, +thermal_cooling_device_register(char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline struct thermal_cooling_device * thermal_of_cooling_device_register(struct device_node *np, - const char *type, void *devdata, - const struct thermal_cooling_device_ops *ops) + char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } -static inline struct thermal_cooling_device * -devm_thermal_of_cooling_device_register(struct device *dev, - struct device_node *np, - char *type, void *devdata, - const struct thermal_cooling_device_ops *ops) -{ - return ERR_PTR(-ENODEV); -} static inline void thermal_cooling_device_unregister( struct thermal_cooling_device *cdev) { } @@ -435,12 +529,28 @@ static inline int thermal_zone_get_slope( static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } - -static inline int thermal_zone_device_enable(struct thermal_zone_device *tz) -{ return -ENODEV; } - -static inline int thermal_zone_device_disable(struct thermal_zone_device *tz) +static inline int get_tz_trend(struct thermal_zone_device *tz, int trip) { return -ENODEV; } +static inline struct thermal_instance * +get_thermal_instance(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, int trip) +{ return ERR_PTR(-ENODEV); } +static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) +{ } +static inline void thermal_notify_framework(struct thermal_zone_device *tz, + int trip) +{ } #endif /* CONFIG_THERMAL */ +#if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL) +extern int thermal_generate_netlink_event(struct thermal_zone_device *tz, + enum events event); +#else +static inline int thermal_generate_netlink_event(struct thermal_zone_device *tz, + enum events event) +{ + return 0; +} +#endif + #endif /* __THERMAL_H__ */ diff --git a/include/linux/thinkpad_acpi.h b/include/linux/thinkpad_acpi.h new file mode 100644 index 0000000000..361de59a22 --- /dev/null +++ b/include/linux/thinkpad_acpi.h @@ -0,0 +1,15 @@ +#ifndef __THINKPAD_ACPI_H__ +#define __THINKPAD_ACPI_H__ + +/* These two functions return 0 if success, or negative error code + (e g -ENODEV if no led present) */ + +enum { + TPACPI_LED_MUTE, + TPACPI_LED_MICMUTE, + TPACPI_LED_MAX, +}; + +int tpacpi_led_set(int whichled, bool on); + +#endif diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 0999f63179..19e866c1f5 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* thread_info.h: common low-level thread information accessors * * Copyright (C) 2002 David Howells (dhowells@redhat.com) @@ -9,81 +8,94 @@ #define _LINUX_THREAD_INFO_H #include -#include #include -#include -#include + +struct timespec; +struct compat_timespec; #ifdef CONFIG_THREAD_INFO_IN_TASK -/* - * For CONFIG_THREAD_INFO_IN_TASK kernels we need for the - * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels, - * including can cause a circular dependency on some platforms. - */ -#include -#define current_thread_info() ((struct thread_info *)current) +#ifndef current_thread_info +struct thread_info *current_thread_info(void); +#endif #endif -#include - /* - * For per-arch arch_within_stack_frames() implementations, defined in - * asm/thread_info.h. + * System call restart block. */ +struct restart_block { + long (*fn)(struct restart_block *); + union { + /* For futex_wait and futex_wait_requeue_pi */ + struct { + u32 __user *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 __user *uaddr2; + } futex; + /* For nanosleep */ + struct { + clockid_t clockid; + struct timespec __user *rmtp; +#ifdef CONFIG_COMPAT + struct compat_timespec __user *compat_rmtp; +#endif + u64 expires; + } nanosleep; + /* For poll */ + struct { + struct pollfd __user *ufds; + int nfds; + int has_timeout; + unsigned long tv_sec; + unsigned long tv_nsec; + } poll; + }; +}; + +extern long do_no_restart_syscall(struct restart_block *parm); + enum { BAD_STACK = -1, NOT_STACK = 0, - GOOD_FRAME, GOOD_STACK, + GOOD_FRAME, }; -#ifdef CONFIG_GENERIC_ENTRY -enum syscall_work_bit { - SYSCALL_WORK_BIT_SECCOMP, - SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT, - SYSCALL_WORK_BIT_SYSCALL_TRACE, - SYSCALL_WORK_BIT_SYSCALL_EMU, - SYSCALL_WORK_BIT_SYSCALL_AUDIT, - SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH, - SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP, -}; - -#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP) -#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT) -#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE) -#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU) -#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT) -#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH) -#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP) -#endif - +#include #include #ifdef __KERNEL__ -#ifndef arch_set_restart_data -#define arch_set_restart_data(restart) do { } while (0) +#ifdef CONFIG_DEBUG_STACK_USAGE +# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ + __GFP_ZERO) +#else +# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK) #endif -static inline long set_restart_fn(struct restart_block *restart, - long (*fn)(struct restart_block *)) -{ - restart->fn = fn; - arch_set_restart_data(restart); - return -ERESTART_RESTARTBLOCK; -} - -#ifndef THREAD_ALIGN -#define THREAD_ALIGN THREAD_SIZE -#endif - -#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) - /* * flag set/clear/test wrappers * - pass TIF_xxxx constants to these functions */ +#ifdef CONFIG_THREAD_INFO_IN_TASK +#define set_ti_thread_flag(ti, flag) \ + set_bit(flag, (unsigned long *)&ti->flags) + +#define clear_ti_thread_flag(ti, flag) \ + clear_bit(flag, (unsigned long *)&ti->flags) + +#define test_and_set_ti_thread_flag(ti, flag) \ + test_and_set_bit(flag, (unsigned long *)&ti->flags) + +#define test_and_clear_ti_thread_flag(ti, flag) \ + test_and_clear_bit(flag, (unsigned long *)&ti->flags) + +#define test_ti_thread_flag(ti, flag) \ + test_bit(flag, (unsigned long *)&ti->flags) +#else static inline void set_ti_thread_flag(struct thread_info *ti, int flag) { set_bit(flag, (unsigned long *)&ti->flags); @@ -94,15 +106,6 @@ static inline void clear_ti_thread_flag(struct thread_info *ti, int flag) clear_bit(flag, (unsigned long *)&ti->flags); } -static inline void update_ti_thread_flag(struct thread_info *ti, int flag, - bool value) -{ - if (value) - set_ti_thread_flag(ti, flag); - else - clear_ti_thread_flag(ti, flag); -} - static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) { return test_and_set_bit(flag, (unsigned long *)&ti->flags); @@ -117,13 +120,12 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) { return test_bit(flag, (unsigned long *)&ti->flags); } +#endif #define set_thread_flag(flag) \ set_ti_thread_flag(current_thread_info(), flag) #define clear_thread_flag(flag) \ clear_ti_thread_flag(current_thread_info(), flag) -#define update_thread_flag(flag, value) \ - update_ti_thread_flag(current_thread_info(), flag, value) #define test_and_set_thread_flag(flag) \ test_and_set_ti_thread_flag(current_thread_info(), flag) #define test_and_clear_thread_flag(flag) \ @@ -131,46 +133,14 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) #define test_thread_flag(flag) \ test_ti_thread_flag(current_thread_info(), flag) -#ifdef CONFIG_GENERIC_ENTRY -#define set_syscall_work(fl) \ - set_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) -#define test_syscall_work(fl) \ - test_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) -#define clear_syscall_work(fl) \ - clear_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) - -#define set_task_syscall_work(t, fl) \ - set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) -#define test_task_syscall_work(t, fl) \ - test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) -#define clear_task_syscall_work(t, fl) \ - clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) - -#else /* CONFIG_GENERIC_ENTRY */ - -#define set_syscall_work(fl) \ - set_ti_thread_flag(current_thread_info(), TIF_##fl) -#define test_syscall_work(fl) \ - test_ti_thread_flag(current_thread_info(), TIF_##fl) -#define clear_syscall_work(fl) \ - clear_ti_thread_flag(current_thread_info(), TIF_##fl) - -#define set_task_syscall_work(t, fl) \ - set_ti_thread_flag(task_thread_info(t), TIF_##fl) -#define test_task_syscall_work(t, fl) \ - test_ti_thread_flag(task_thread_info(t), TIF_##fl) -#define clear_task_syscall_work(t, fl) \ - clear_ti_thread_flag(task_thread_info(t), TIF_##fl) -#endif /* !CONFIG_GENERIC_ENTRY */ - #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES -static inline int arch_within_stack_frames(const void * const stack, - const void * const stackend, - const void *obj, unsigned long len) +static inline int arch_within_stack_frames(unsigned long stack, + unsigned long stackend, + unsigned long obj, unsigned long len) { - return 0; + return GOOD_STACK; } #endif @@ -190,38 +160,7 @@ static inline void check_object_size(const void *ptr, unsigned long n, { } #endif /* CONFIG_HARDENED_USERCOPY */ -extern void __compiletime_error("copy source size is too small") -__bad_copy_from(void); -extern void __compiletime_error("copy destination size is too small") -__bad_copy_to(void); - -static inline void copy_overflow(int size, unsigned long count) -{ - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); -} - -static __always_inline __must_check bool -check_copy_size(const void *addr, size_t bytes, bool is_source) -{ - int sz = __compiletime_object_size(addr); - if (unlikely(sz >= 0 && sz < bytes)) { - if (!__builtin_constant_p(bytes)) - copy_overflow(sz, bytes); - else if (is_source) - __bad_copy_from(); - else - __bad_copy_to(); - return false; - } - if (WARN_ON_ONCE(bytes > INT_MAX)) - return false; - check_object_size(addr, bytes, is_source); - return true; -} - -#ifndef arch_setup_new_exec -static inline void arch_setup_new_exec(void) { } -#endif +bool __access_ok(int type, unsigned long addr, size_t size); #endif /* __KERNEL__ */ diff --git a/include/linux/threads.h b/include/linux/threads.h index c34173e6c5..383ab9592b 100644 --- a/include/linux/threads.h +++ b/include/linux/threads.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_THREADS_H #define _LINUX_THREADS_H @@ -29,7 +28,7 @@ /* * A maximum of 4 million PIDs should be enough for a while. - * [NOTE: PID/TIDs are limited to 2^30 ~= 1 billion, see FUTEX_TID_MASK.] + * [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.] */ #define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) @@ -38,7 +37,7 @@ * Define a minimum number of pids per cpu. Heuristically based * on original pid max of 32k for 32 cpus. Also, increase the * minimum settable value for pid_max on the running system based - * on similar defaults. See kernel/pid.c:pid_idr_init() for details. + * on similar defaults. See kernel/pid.c:pidmap_init() for details. */ #define PIDS_PER_CPU_DEFAULT 1024 #define PIDS_PER_CPU_MIN 8 diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index 44a7f9169a..f2293028ab 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Shared Transport Header file * To be included by the protocol stack drivers for @@ -7,6 +6,20 @@ * * Copyright (C) 2009-2010 Texas Instruments * Author: Pavan Savoy + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * */ #ifndef TI_WILINK_ST_H @@ -295,7 +308,7 @@ struct bts_header { u32 magic; u32 version; u8 future[24]; - u8 actions[]; + u8 actions[0]; } __attribute__ ((packed)); /** @@ -305,7 +318,7 @@ struct bts_header { struct bts_action { u16 type; u16 size; - u8 data[]; + u8 data[0]; } __attribute__ ((packed)); struct bts_action_send { @@ -315,7 +328,7 @@ struct bts_action_send { struct bts_action_wait { u32 msec; u32 size; - u8 data[]; + u8 data[0]; } __attribute__ ((packed)); struct bts_action_delay { diff --git a/include/linux/tick.h b/include/linux/tick.h index bfd571f18c..62be0786d6 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Tick related global functions */ @@ -11,7 +10,6 @@ #include #include #include -#include #ifdef CONFIG_GENERIC_CLOCKEVENTS extern void __init tick_init(void); @@ -69,12 +67,6 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode); static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } #endif /* BROADCAST */ -#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU) -extern void tick_offline_cpu(unsigned int cpu); -#else -static inline void tick_offline_cpu(unsigned int cpu) { } -#endif - #ifdef CONFIG_GENERIC_CLOCKEVENTS extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); #else @@ -109,94 +101,60 @@ enum tick_dep_bits { TICK_DEP_BIT_POSIX_TIMER = 0, TICK_DEP_BIT_PERF_EVENTS = 1, TICK_DEP_BIT_SCHED = 2, - TICK_DEP_BIT_CLOCK_UNSTABLE = 3, - TICK_DEP_BIT_RCU = 4, - TICK_DEP_BIT_RCU_EXP = 5 + TICK_DEP_BIT_CLOCK_UNSTABLE = 3 }; -#define TICK_DEP_BIT_MAX TICK_DEP_BIT_RCU_EXP #define TICK_DEP_MASK_NONE 0 #define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER) #define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS) #define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED) #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE) -#define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU) -#define TICK_DEP_MASK_RCU_EXP (1 << TICK_DEP_BIT_RCU_EXP) #ifdef CONFIG_NO_HZ_COMMON extern bool tick_nohz_enabled; -extern bool tick_nohz_tick_stopped(void); -extern bool tick_nohz_tick_stopped_cpu(int cpu); -extern void tick_nohz_idle_stop_tick(void); -extern void tick_nohz_idle_retain_tick(void); -extern void tick_nohz_idle_restart_tick(void); +extern int tick_nohz_tick_stopped(void); extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); -extern bool tick_nohz_idle_got_tick(void); -extern ktime_t tick_nohz_get_next_hrtimer(void); -extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); -extern unsigned long tick_nohz_get_idle_calls(void); -extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); +extern ktime_t tick_nohz_get_sleep_length(void); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); - -static inline void tick_nohz_idle_stop_tick_protected(void) -{ - local_irq_disable(); - tick_nohz_idle_stop_tick(); - local_irq_enable(); -} - #else /* !CONFIG_NO_HZ_COMMON */ #define tick_nohz_enabled (0) static inline int tick_nohz_tick_stopped(void) { return 0; } -static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; } -static inline void tick_nohz_idle_stop_tick(void) { } -static inline void tick_nohz_idle_retain_tick(void) { } -static inline void tick_nohz_idle_restart_tick(void) { } static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_exit(void) { } -static inline bool tick_nohz_idle_got_tick(void) { return false; } -static inline ktime_t tick_nohz_get_next_hrtimer(void) + +static inline ktime_t tick_nohz_get_sleep_length(void) { - /* Next wake up is the tick period, assume it starts now */ - return ktime_add(ktime_get(), TICK_NSEC); -} -static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) -{ - *delta_next = TICK_NSEC; - return *delta_next; + ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; + + return len; } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } - -static inline void tick_nohz_idle_stop_tick_protected(void) { } #endif /* !CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL extern bool tick_nohz_full_running; extern cpumask_var_t tick_nohz_full_mask; +extern cpumask_var_t housekeeping_mask; static inline bool tick_nohz_full_enabled(void) { - if (!context_tracking_enabled()) + if (!context_tracking_is_enabled()) return false; return tick_nohz_full_running; } -/* - * Check if a CPU is part of the nohz_full subset. Arrange for evaluating - * the cpu expression (typically smp_processor_id()) _after_ the static - * key. - */ -#define tick_nohz_full_cpu(_cpu) ({ \ - bool __ret = false; \ - if (tick_nohz_full_enabled()) \ - __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \ - __ret; \ -}) +static inline bool tick_nohz_full_cpu(int cpu) +{ + if (!tick_nohz_full_enabled()) + return false; + + return cpumask_test_cpu(cpu, tick_nohz_full_mask); +} static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { @@ -204,6 +162,11 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) cpumask_or(mask, mask, tick_nohz_full_mask); } +static inline int housekeeping_any_cpu(void) +{ + return cpumask_any_and(housekeeping_mask, cpu_online_mask); +} + extern void tick_nohz_dep_set(enum tick_dep_bits bit); extern void tick_nohz_dep_clear(enum tick_dep_bits bit); extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit); @@ -212,7 +175,7 @@ extern void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit); extern void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit); -extern void tick_nohz_dep_set_signal(struct task_struct *tsk, +extern void tick_nohz_dep_set_signal(struct signal_struct *signal, enum tick_dep_bits bit); extern void tick_nohz_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit); @@ -257,11 +220,11 @@ static inline void tick_dep_clear_task(struct task_struct *tsk, if (tick_nohz_full_enabled()) tick_nohz_dep_clear_task(tsk, bit); } -static inline void tick_dep_set_signal(struct task_struct *tsk, +static inline void tick_dep_set_signal(struct signal_struct *signal, enum tick_dep_bits bit) { if (tick_nohz_full_enabled()) - tick_nohz_dep_set_signal(tsk, bit); + tick_nohz_dep_set_signal(signal, bit); } static inline void tick_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit) @@ -272,15 +235,15 @@ static inline void tick_dep_clear_signal(struct signal_struct *signal, extern void tick_nohz_full_kick_cpu(int cpu); extern void __tick_nohz_task_switch(void); -extern void __init tick_nohz_full_setup(cpumask_var_t cpumask); #else +static inline int housekeeping_any_cpu(void) +{ + return smp_processor_id(); +} static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } -static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } -static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } - static inline void tick_dep_set(enum tick_dep_bits bit) { } static inline void tick_dep_clear(enum tick_dep_bits bit) { } static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } @@ -289,26 +252,46 @@ static inline void tick_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) { } static inline void tick_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) { } -static inline void tick_dep_set_signal(struct task_struct *tsk, +static inline void tick_dep_set_signal(struct signal_struct *signal, enum tick_dep_bits bit) { } static inline void tick_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit) { } static inline void tick_nohz_full_kick_cpu(int cpu) { } static inline void __tick_nohz_task_switch(void) { } -static inline void tick_nohz_full_setup(cpumask_var_t cpumask) { } #endif +static inline const struct cpumask *housekeeping_cpumask(void) +{ +#ifdef CONFIG_NO_HZ_FULL + if (tick_nohz_full_enabled()) + return housekeeping_mask; +#endif + return cpu_possible_mask; +} + +static inline bool is_housekeeping_cpu(int cpu) +{ +#ifdef CONFIG_NO_HZ_FULL + if (tick_nohz_full_enabled()) + return cpumask_test_cpu(cpu, housekeeping_mask); +#endif + return true; +} + +static inline void housekeeping_affine(struct task_struct *t) +{ +#ifdef CONFIG_NO_HZ_FULL + if (tick_nohz_full_enabled()) + set_cpus_allowed_ptr(t, housekeeping_mask); + +#endif +} + static inline void tick_nohz_task_switch(void) { if (tick_nohz_full_enabled()) __tick_nohz_task_switch(); } -static inline void tick_nohz_user_enter_prepare(void) -{ - if (tick_nohz_full_cpu(smp_processor_id())) - rcu_nocb_flush_deferred_wakeup(); -} - #endif diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 44073d0671..848c0f3925 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -1,8 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * tifm.h - TI FlashMedia driver * * Copyright (C) 2006 Alex Dubov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * */ #ifndef _TIFM_H @@ -124,7 +128,7 @@ struct tifm_adapter { int (*has_ms_pif)(struct tifm_adapter *fm, struct tifm_dev *sock); - struct tifm_dev *sockets[]; + struct tifm_dev *sockets[0]; }; struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, diff --git a/include/linux/timb_dma.h b/include/linux/timb_dma.h index 33ad3b0ab8..bb043e970b 100644 --- a/include/linux/timb_dma.h +++ b/include/linux/timb_dma.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * timb_dma.h timberdale FPGA DMA driver defines * Copyright (c) 2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supports: diff --git a/include/linux/timb_gpio.h b/include/linux/timb_gpio.h index 3faf5a6bb1..ce456eaae8 100644 --- a/include/linux/timb_gpio.h +++ b/include/linux/timb_gpio.h @@ -1,7 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * timb_gpio.h timberdale FPGA GPIO driver, platform data definition * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _LINUX_TIMB_GPIO_H diff --git a/include/linux/time.h b/include/linux/time.h index 16cf4522d6..4cea09d942 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -1,33 +1,183 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIME_H #define _LINUX_TIME_H # include +# include # include # include extern struct timezone sys_tz; -int get_timespec64(struct timespec64 *ts, - const struct __kernel_timespec __user *uts); -int put_timespec64(const struct timespec64 *ts, - struct __kernel_timespec __user *uts); -int get_itimerspec64(struct itimerspec64 *it, - const struct __kernel_itimerspec __user *uit); -int put_itimerspec64(const struct itimerspec64 *it, - struct __kernel_itimerspec __user *uit); +#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) + +static inline int timespec_equal(const struct timespec *a, + const struct timespec *b) +{ + return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); +} + +/* + * lhs < rhs: return <0 + * lhs == rhs: return 0 + * lhs > rhs: return >0 + */ +static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_nsec - rhs->tv_nsec; +} + +static inline int timeval_compare(const struct timeval *lhs, const struct timeval *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_usec - rhs->tv_usec; +} extern time64_t mktime64(const unsigned int year, const unsigned int mon, const unsigned int day, const unsigned int hour, const unsigned int min, const unsigned int sec); -#ifdef CONFIG_POSIX_TIMERS -extern void clear_itimer(void); -#else -static inline void clear_itimer(void) {} +/** + * Deprecated. Use mktime64(). + */ +static inline unsigned long mktime(const unsigned int year, + const unsigned int mon, const unsigned int day, + const unsigned int hour, const unsigned int min, + const unsigned int sec) +{ + return mktime64(year, mon, day, hour, min, sec); +} + +extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); + +/* + * timespec_add_safe assumes both values are positive and checks + * for overflow. It will return TIME_T_MAX if the reutrn would be + * smaller then either of the arguments. + */ +extern struct timespec timespec_add_safe(const struct timespec lhs, + const struct timespec rhs); + + +static inline struct timespec timespec_add(struct timespec lhs, + struct timespec rhs) +{ + struct timespec ts_delta; + set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, + lhs.tv_nsec + rhs.tv_nsec); + return ts_delta; +} + +/* + * sub = lhs - rhs, in normalized form + */ +static inline struct timespec timespec_sub(struct timespec lhs, + struct timespec rhs) +{ + struct timespec ts_delta; + set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, + lhs.tv_nsec - rhs.tv_nsec); + return ts_delta; +} + +/* + * Returns true if the timespec is norm, false if denorm: + */ +static inline bool timespec_valid(const struct timespec *ts) +{ + /* Dates before 1970 are bogus */ + if (ts->tv_sec < 0) + return false; + /* Can't have more nanoseconds then a second */ + if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) + return false; + return true; +} + +static inline bool timespec_valid_strict(const struct timespec *ts) +{ + if (!timespec_valid(ts)) + return false; + /* Disallow values that could overflow ktime_t */ + if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) + return false; + return true; +} + +static inline bool timeval_valid(const struct timeval *tv) +{ + /* Dates before 1970 are bogus */ + if (tv->tv_sec < 0) + return false; + + /* Can't have more microseconds then a second */ + if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) + return false; + + return true; +} + +extern struct timespec timespec_trunc(struct timespec t, unsigned gran); + +/* + * Validates if a timespec/timeval used to inject a time offset is valid. + * Offsets can be postive or negative. The value of the timeval/timespec + * is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must + * always be non-negative. + */ +static inline bool timeval_inject_offset_valid(const struct timeval *tv) +{ + /* We don't check the tv_sec as it can be positive or negative */ + + /* Can't have more microseconds then a second */ + if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) + return false; + return true; +} + +static inline bool timespec_inject_offset_valid(const struct timespec *ts) +{ + /* We don't check the tv_sec as it can be positive or negative */ + + /* Can't have more nanoseconds then a second */ + if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC) + return false; + return true; +} + +#define CURRENT_TIME (current_kernel_time()) +#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) + +/* Some architectures do not supply their own clocksource. + * This is mainly the case in architectures that get their + * inter-tick times by reading the counter on their interval + * timer. Since these timers wrap every tick, they're not really + * useful as clocksources. Wrapping them to act like one is possible + * but not very efficient. So we provide a callout these arches + * can implement for use with the jiffies clocksource to provide + * finer then tick granular time. + */ +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET +extern u32 (*arch_gettimeoffset)(void); #endif -extern long do_utimes(int dfd, const char __user *filename, struct timespec64 *times, int flags); +struct itimerval; +extern int do_setitimer(int which, struct itimerval *value, + struct itimerval *ovalue); +extern int do_getitimer(int which, struct itimerval *value); + +extern unsigned int alarm_setitimer(unsigned int seconds); + +extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); + +struct tms; +extern void do_sys_times(struct tms *); /* * Similar to the struct tm in userspace , but it needs to be here so @@ -57,46 +207,72 @@ struct tm { void time64_to_tm(time64_t totalsecs, int offset, struct tm *result); -# include - -static inline bool itimerspec64_valid(const struct itimerspec64 *its) +/** + * time_to_tm - converts the calendar time to local broken-down time + * + * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, + * Coordinated Universal Time (UTC). + * @offset offset seconds adding to totalsecs. + * @result pointer to struct tm variable to receive broken-down time + */ +static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result) { - if (!timespec64_valid(&(its->it_interval)) || - !timespec64_valid(&(its->it_value))) - return false; - - return true; + time64_to_tm(totalsecs, offset, result); } /** - * time_after32 - compare two 32-bit relative times - * @a: the time which may be after @b - * @b: the time which may be before @a + * timespec_to_ns - Convert timespec to nanoseconds + * @ts: pointer to the timespec variable to be converted * - * time_after32(a, b) returns true if the time @a is after time @b. - * time_before32(b, a) returns true if the time @b is before time @a. - * - * Similar to time_after(), compare two 32-bit timestamps for relative - * times. This is useful for comparing 32-bit seconds values that can't - * be converted to 64-bit values (e.g. due to disk format or wire protocol - * issues) when it is known that the times are less than 68 years apart. + * Returns the scalar nanosecond representation of the timespec + * parameter. */ -#define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0) -#define time_before32(b, a) time_after32(a, b) +static inline s64 timespec_to_ns(const struct timespec *ts) +{ + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; +} /** - * time_between32 - check if a 32-bit timestamp is within a given time range - * @t: the time which may be within [l,h] - * @l: the lower bound of the range - * @h: the higher bound of the range + * timeval_to_ns - Convert timeval to nanoseconds + * @ts: pointer to the timeval variable to be converted * - * time_before32(t, l, h) returns true if @l <= @t <= @h. All operands are - * treated as 32-bit integers. - * - * Equivalent to !(time_before32(@t, @l) || time_after32(@t, @h)). + * Returns the scalar nanosecond representation of the timeval + * parameter. */ -#define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l)) +static inline s64 timeval_to_ns(const struct timeval *tv) +{ + return ((s64) tv->tv_sec * NSEC_PER_SEC) + + tv->tv_usec * NSEC_PER_USEC; +} -# include +/** + * ns_to_timespec - Convert nanoseconds to timespec + * @nsec: the nanoseconds value to be converted + * + * Returns the timespec representation of the nsec parameter. + */ +extern struct timespec ns_to_timespec(const s64 nsec); + +/** + * ns_to_timeval - Convert nanoseconds to timeval + * @nsec: the nanoseconds value to be converted + * + * Returns the timeval representation of the nsec parameter. + */ +extern struct timeval ns_to_timeval(const s64 nsec); + +/** + * timespec_add_ns - Adds nanoseconds to a timespec + * @a: pointer to timespec to be incremented + * @ns: unsigned nanoseconds value to be added + * + * This must always be inlined because its used from the x86-64 vdso, + * which cannot call other kernel functions. + */ +static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) +{ + a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); + a->tv_nsec = ns; +} #endif diff --git a/include/linux/time64.h b/include/linux/time64.h index 81b9686a20..980c71b300 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -1,15 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIME64_H #define _LINUX_TIME64_H +#include #include -#include typedef __s64 time64_t; typedef __u64 timeu64_t; -#include - +/* + * This wants to go into uapi/linux/time.h once we agreed about the + * userspace interfaces. + */ +#if __BITS_PER_LONG == 64 +# define timespec64 timespec +#define itimerspec64 itimerspec +#else struct timespec64 { time64_t tv_sec; /* seconds */ long tv_nsec; /* nanoseconds */ @@ -20,25 +25,92 @@ struct itimerspec64 { struct timespec64 it_value; }; +#endif + +/* Parameters used to convert the timespec values: */ +#define MSEC_PER_SEC 1000L +#define USEC_PER_MSEC 1000L +#define NSEC_PER_USEC 1000L +#define NSEC_PER_MSEC 1000000L +#define USEC_PER_SEC 1000000L +#define NSEC_PER_SEC 1000000000L +#define FSEC_PER_SEC 1000000000000000LL + /* Located here for timespec[64]_valid_strict */ #define TIME64_MAX ((s64)~((u64)1 << 63)) -#define TIME64_MIN (-TIME64_MAX - 1) - #define KTIME_MAX ((s64)~((u64)1 << 63)) -#define KTIME_MIN (-KTIME_MAX - 1) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) -#define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC) -/* - * Limits for settimeofday(): - * - * To prevent setting the time close to the wraparound point time setting - * is limited so a reasonable uptime can be accomodated. Uptime of 30 years - * should be really sufficient, which means the cutoff is 2232. At that - * point the cutoff is just a small part of the larger problem. - */ -#define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600) -#define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX) +#if __BITS_PER_LONG == 64 + +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} + +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} + +static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64) +{ + return *its64; +} + +static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its) +{ + return *its; +} + +# define timespec64_equal timespec_equal +# define timespec64_compare timespec_compare +# define set_normalized_timespec64 set_normalized_timespec +# define timespec64_add timespec_add +# define timespec64_sub timespec_sub +# define timespec64_valid timespec_valid +# define timespec64_valid_strict timespec_valid_strict +# define timespec64_to_ns timespec_to_ns +# define ns_to_timespec64 ns_to_timespec +# define timespec64_add_ns timespec_add_ns + +#else + +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + struct timespec ret; + + ret.tv_sec = (time_t)ts64.tv_sec; + ret.tv_nsec = ts64.tv_nsec; + return ret; +} + +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + struct timespec64 ret; + + ret.tv_sec = ts.tv_sec; + ret.tv_nsec = ts.tv_nsec; + return ret; +} + +static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64) +{ + struct itimerspec ret; + + ret.it_interval = timespec64_to_timespec(its64->it_interval); + ret.it_value = timespec64_to_timespec(its64->it_value); + return ret; +} + +static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its) +{ + struct itimerspec64 ret; + + ret.it_interval = timespec_to_timespec64(its->it_interval); + ret.it_value = timespec_to_timespec64(its->it_value); + return ret; +} static inline int timespec64_equal(const struct timespec64 *a, const struct timespec64 *b) @@ -107,16 +179,6 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts) return true; } -static inline bool timespec64_valid_settod(const struct timespec64 *ts) -{ - if (!timespec64_valid(ts)) - return false; - /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */ - if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX) - return false; - return true; -} - /** * timespec64_to_ns - Convert timespec64 to nanoseconds * @ts: pointer to the timespec64 variable to be converted @@ -126,13 +188,6 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts) */ static inline s64 timespec64_to_ns(const struct timespec64 *ts) { - /* Prevent multiplication overflow / underflow */ - if (ts->tv_sec >= KTIME_SEC_MAX) - return KTIME_MAX; - - if (ts->tv_sec <= KTIME_SEC_MIN) - return KTIME_MIN; - return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; } @@ -158,6 +213,8 @@ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) a->tv_nsec = ns; } +#endif + /* * timespec64_add_safe assumes both values are positive and checks for * overflow. It will return TIME64_MAX in case of overflow. diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h index c6540ceea1..4382035a75 100644 --- a/include/linux/timecounter.h +++ b/include/linux/timecounter.h @@ -1,9 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/include/linux/timecounter.h * * based on code that migrated away from * linux/include/linux/clocksource.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _LINUX_TIMECOUNTER_H #define _LINUX_TIMECOUNTER_H @@ -11,7 +20,7 @@ #include /* simplify initialization of mask field */ -#define CYCLECOUNTER_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) +#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) /** * struct cyclecounter - hardware abstraction for a free running counter @@ -28,8 +37,8 @@ * @shift: cycle to nanosecond divisor (power of two) */ struct cyclecounter { - u64 (*read)(const struct cyclecounter *cc); - u64 mask; + cycle_t (*read)(const struct cyclecounter *cc); + cycle_t mask; u32 mult; u32 shift; }; @@ -54,7 +63,7 @@ struct cyclecounter { */ struct timecounter { const struct cyclecounter *cc; - u64 cycle_last; + cycle_t cycle_last; u64 nsec; u64 mask; u64 frac; @@ -68,7 +77,7 @@ struct timecounter { * @frac: pointer to storage for the fractional nanoseconds. */ static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, - u64 cycles, u64 mask, u64 *frac) + cycle_t cycles, u64 mask, u64 *frac) { u64 ns = (u64) cycles; @@ -124,7 +133,7 @@ extern u64 timecounter_read(struct timecounter *tc); * This allows conversion of cycle counter values which were generated * in the past. */ -extern u64 timecounter_cyc2time(const struct timecounter *tc, - u64 cycle_tstamp); +extern u64 timecounter_cyc2time(struct timecounter *tc, + cycle_t cycle_tstamp); #endif diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 84ff2844df..e880054590 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * You SHOULD NOT be including this unless you're vsyscall * handling code or timekeeping internal code! @@ -14,32 +13,29 @@ /** * struct tk_read_base - base structure for timekeeping readout * @clock: Current clocksource used for timekeeping. + * @read: Read function of @clock * @mask: Bitmask for two's complement subtraction of non 64bit clocks * @cycle_last: @clock cycle value at last update * @mult: (NTP adjusted) multiplier for scaled math conversion * @shift: Shift value for scaled math conversion * @xtime_nsec: Shifted (fractional) nano seconds offset for readout * @base: ktime_t (nanoseconds) base time for readout - * @base_real: Nanoseconds base value for clock REALTIME readout * * This struct has size 56 byte on 64 bit. Together with a seqcount it * occupies a single 64byte cache line. * * The struct is separate from struct timekeeper as it is also used * for a fast NMI safe accessors. - * - * @base_real is for the fast NMI safe accessor to allow reading clock - * realtime from any context. */ struct tk_read_base { struct clocksource *clock; - u64 mask; - u64 cycle_last; + cycle_t (*read)(struct clocksource *cs); + cycle_t mask; + cycle_t cycle_last; u32 mult; u32 shift; u64 xtime_nsec; ktime_t base; - u64 base_real; }; /** @@ -56,14 +52,13 @@ struct tk_read_base { * @clock_was_set_seq: The sequence number of clock was set events * @cs_was_changed_seq: The sequence number of clocksource change events * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second - * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds - * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset + * @raw_time: Monotonic raw base time in timespec64 format * @cycle_interval: Number of clock cycles in one NTP interval * @xtime_interval: Number of clock shifted nano seconds in one NTP * interval. * @xtime_remainder: Shifted nano seconds left over when rounding * @cycle_interval - * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. + * @raw_interval: Raw nano seconds accumulated per NTP interval. * @ntp_error: Difference between accumulated time and NTP time in ntp * shifted nano seconds. * @ntp_error_shift: Shift conversion between clock shifted nano seconds and @@ -85,9 +80,6 @@ struct tk_read_base { * * wall_to_monotonic is no longer the boot time, getboottime must be * used instead. - * - * @monotonic_to_boottime is a timespec64 representation of @offs_boot to - * accelerate the VDSO update for CLOCK_BOOTTIME. */ struct timekeeper { struct tk_read_base tkr_mono; @@ -102,14 +94,13 @@ struct timekeeper { unsigned int clock_was_set_seq; u8 cs_was_changed_seq; ktime_t next_leap_ktime; - u64 raw_sec; - struct timespec64 monotonic_to_boot; + struct timespec64 raw_time; /* The following members are for timekeeping internal use */ - u64 cycle_interval; + cycle_t cycle_interval; u64 xtime_interval; s64 xtime_remainder; - u64 raw_interval; + u32 raw_interval; /* The ntp_tick_length() value currently being used. * This cached copy ensures we consistently apply the tick * length for an entire tick, as ntp_tick_length may change @@ -122,8 +113,6 @@ struct timekeeper { s64 ntp_error; u32 ntp_error_shift; u32 ntp_err_mult; - /* Flag used to avoid updating NTP twice with same second */ - u32 skip_second_overflow; #ifdef CONFIG_DEBUG_TIMEKEEPING long last_warning; /* @@ -143,6 +132,13 @@ struct timekeeper { extern void update_vsyscall(struct timekeeper *tk); extern void update_vsyscall_tz(void); +#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD) + +extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, + struct clocksource *c, u32 mult, + cycle_t cycle_last); +extern void update_vsyscall_tz(void); + #else static inline void update_vsyscall(struct timekeeper *tk) diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 78a98bdff7..09168c52ab 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -1,58 +1,175 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIMEKEEPING_H #define _LINUX_TIMEKEEPING_H #include -#include /* Included from linux/ktime.h */ void timekeeping_init(void); extern int timekeeping_suspended; -/* Architecture timer tick functions: */ -extern void legacy_timer_tick(unsigned long ticks); - /* * Get and set timeofday */ +extern void do_gettimeofday(struct timeval *tv); extern int do_settimeofday64(const struct timespec64 *ts); extern int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz); +static inline int do_sys_settimeofday(const struct timespec *tv, + const struct timezone *tz) +{ + struct timespec64 ts64; + + if (!tv) + return do_sys_settimeofday64(NULL, tz); + + if (!timespec_valid(tv)) + return -EINVAL; + + ts64 = timespec_to_timespec64(*tv); + return do_sys_settimeofday64(&ts64, tz); +} /* - * ktime_get() family: read the current time in a multitude of ways, - * - * The default time reference is CLOCK_MONOTONIC, starting at - * boot time but not counting the time spent in suspend. - * For other references, use the functions with "real", "clocktai", - * "boottime" and "raw" suffixes. - * - * To get the time in a different format, use the ones wit - * "ns", "ts64" and "seconds" suffix. - * - * See Documentation/core-api/timekeeping.rst for more details. + * Kernel time accessors */ +unsigned long get_seconds(void); +struct timespec64 current_kernel_time64(void); +/* does not take xtime_lock */ +struct timespec __current_kernel_time(void); +static inline struct timespec current_kernel_time(void) +{ + struct timespec64 now = current_kernel_time64(); + + return timespec64_to_timespec(now); +} /* - * timespec64 based interfaces + * timespec based interfaces */ -extern void ktime_get_raw_ts64(struct timespec64 *ts); +struct timespec64 get_monotonic_coarse64(void); +extern void getrawmonotonic64(struct timespec64 *ts); extern void ktime_get_ts64(struct timespec64 *ts); -extern void ktime_get_real_ts64(struct timespec64 *tv); -extern void ktime_get_coarse_ts64(struct timespec64 *ts); -extern void ktime_get_coarse_real_ts64(struct timespec64 *ts); - -void getboottime64(struct timespec64 *ts); - -/* - * time64_t base interfaces - */ extern time64_t ktime_get_seconds(void); -extern time64_t __ktime_get_real_seconds(void); extern time64_t ktime_get_real_seconds(void); +extern int __getnstimeofday64(struct timespec64 *tv); +extern void getnstimeofday64(struct timespec64 *tv); +extern void getboottime64(struct timespec64 *ts); + +#if BITS_PER_LONG == 64 +/** + * Deprecated. Use do_settimeofday64(). + */ +static inline int do_settimeofday(const struct timespec *ts) +{ + return do_settimeofday64(ts); +} + +static inline int __getnstimeofday(struct timespec *ts) +{ + return __getnstimeofday64(ts); +} + +static inline void getnstimeofday(struct timespec *ts) +{ + getnstimeofday64(ts); +} + +static inline void ktime_get_ts(struct timespec *ts) +{ + ktime_get_ts64(ts); +} + +static inline void ktime_get_real_ts(struct timespec *ts) +{ + getnstimeofday64(ts); +} + +static inline void getrawmonotonic(struct timespec *ts) +{ + getrawmonotonic64(ts); +} + +static inline struct timespec get_monotonic_coarse(void) +{ + return get_monotonic_coarse64(); +} + +static inline void getboottime(struct timespec *ts) +{ + return getboottime64(ts); +} +#else +/** + * Deprecated. Use do_settimeofday64(). + */ +static inline int do_settimeofday(const struct timespec *ts) +{ + struct timespec64 ts64; + + ts64 = timespec_to_timespec64(*ts); + return do_settimeofday64(&ts64); +} + +static inline int __getnstimeofday(struct timespec *ts) +{ + struct timespec64 ts64; + int ret = __getnstimeofday64(&ts64); + + *ts = timespec64_to_timespec(ts64); + return ret; +} + +static inline void getnstimeofday(struct timespec *ts) +{ + struct timespec64 ts64; + + getnstimeofday64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline void ktime_get_ts(struct timespec *ts) +{ + struct timespec64 ts64; + + ktime_get_ts64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline void ktime_get_real_ts(struct timespec *ts) +{ + struct timespec64 ts64; + + getnstimeofday64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline void getrawmonotonic(struct timespec *ts) +{ + struct timespec64 ts64; + + getrawmonotonic64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline struct timespec get_monotonic_coarse(void) +{ + return timespec64_to_timespec(get_monotonic_coarse64()); +} + +static inline void getboottime(struct timespec *ts) +{ + struct timespec64 ts64; + + getboottime64(&ts64); + *ts = timespec64_to_timespec(ts64); +} +#endif + +#define ktime_get_real_ts64(ts) getnstimeofday64(ts) + /* * ktime_t based interfaces */ @@ -66,7 +183,6 @@ enum tk_offsets { extern ktime_t ktime_get(void); extern ktime_t ktime_get_with_offset(enum tk_offsets offs); -extern ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs); extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs); extern ktime_t ktime_get_raw(void); extern u32 ktime_get_resolution_ns(void); @@ -79,11 +195,6 @@ static inline ktime_t ktime_get_real(void) return ktime_get_with_offset(TK_OFFS_REAL); } -static inline ktime_t ktime_get_coarse_real(void) -{ - return ktime_get_coarse_with_offset(TK_OFFS_REAL); -} - /** * ktime_get_boottime - Returns monotonic time since boot in ktime_t format * @@ -95,11 +206,6 @@ static inline ktime_t ktime_get_boottime(void) return ktime_get_with_offset(TK_OFFS_BOOT); } -static inline ktime_t ktime_get_coarse_boottime(void) -{ - return ktime_get_coarse_with_offset(TK_OFFS_BOOT); -} - /** * ktime_get_clocktai - Returns the TAI time of day in ktime_t format */ @@ -108,39 +214,6 @@ static inline ktime_t ktime_get_clocktai(void) return ktime_get_with_offset(TK_OFFS_TAI); } -static inline ktime_t ktime_get_coarse_clocktai(void) -{ - return ktime_get_coarse_with_offset(TK_OFFS_TAI); -} - -static inline ktime_t ktime_get_coarse(void) -{ - struct timespec64 ts; - - ktime_get_coarse_ts64(&ts); - return timespec64_to_ktime(ts); -} - -static inline u64 ktime_get_coarse_ns(void) -{ - return ktime_to_ns(ktime_get_coarse()); -} - -static inline u64 ktime_get_coarse_real_ns(void) -{ - return ktime_to_ns(ktime_get_coarse_real()); -} - -static inline u64 ktime_get_coarse_boottime_ns(void) -{ - return ktime_to_ns(ktime_get_coarse_boottime()); -} - -static inline u64 ktime_get_coarse_clocktai_ns(void) -{ - return ktime_to_ns(ktime_get_coarse_clocktai()); -} - /** * ktime_mono_to_real - Convert monotonic time to clock realtime */ @@ -159,12 +232,12 @@ static inline u64 ktime_get_real_ns(void) return ktime_to_ns(ktime_get_real()); } -static inline u64 ktime_get_boottime_ns(void) +static inline u64 ktime_get_boot_ns(void) { return ktime_to_ns(ktime_get_boottime()); } -static inline u64 ktime_get_clocktai_ns(void) +static inline u64 ktime_get_tai_ns(void) { return ktime_to_ns(ktime_get_clocktai()); } @@ -176,42 +249,23 @@ static inline u64 ktime_get_raw_ns(void) extern u64 ktime_get_mono_fast_ns(void); extern u64 ktime_get_raw_fast_ns(void); -extern u64 ktime_get_boot_fast_ns(void); -extern u64 ktime_get_real_fast_ns(void); /* - * timespec64/time64_t interfaces utilizing the ktime based ones - * for API completeness, these could be implemented more efficiently - * if needed. + * Timespec interfaces utilizing the ktime based ones */ -static inline void ktime_get_boottime_ts64(struct timespec64 *ts) +static inline void get_monotonic_boottime(struct timespec *ts) +{ + *ts = ktime_to_timespec(ktime_get_boottime()); +} + +static inline void get_monotonic_boottime64(struct timespec64 *ts) { *ts = ktime_to_timespec64(ktime_get_boottime()); } -static inline void ktime_get_coarse_boottime_ts64(struct timespec64 *ts) +static inline void timekeeping_clocktai(struct timespec *ts) { - *ts = ktime_to_timespec64(ktime_get_coarse_boottime()); -} - -static inline time64_t ktime_get_boottime_seconds(void) -{ - return ktime_divns(ktime_get_coarse_boottime(), NSEC_PER_SEC); -} - -static inline void ktime_get_clocktai_ts64(struct timespec64 *ts) -{ - *ts = ktime_to_timespec64(ktime_get_clocktai()); -} - -static inline void ktime_get_coarse_clocktai_ts64(struct timespec64 *ts) -{ - *ts = ktime_to_timespec64(ktime_get_coarse_clocktai()); -} - -static inline time64_t ktime_get_clocktai_seconds(void) -{ - return ktime_divns(ktime_get_coarse_clocktai(), NSEC_PER_SEC); + *ts = ktime_to_timespec(ktime_get_clocktai()); } /* @@ -220,23 +274,17 @@ static inline time64_t ktime_get_clocktai_seconds(void) extern bool timekeeping_rtc_skipsuspend(void); extern bool timekeeping_rtc_skipresume(void); -extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); +extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); /* - * struct ktime_timestanps - Simultaneous mono/boot/real timestamps - * @mono: Monotonic timestamp - * @boot: Boottime timestamp - * @real: Realtime timestamp + * PPS accessor */ -struct ktime_timestamps { - u64 mono; - u64 boot; - u64 real; -}; +extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, + struct timespec64 *ts_real); -/** +/* * struct system_time_snapshot - simultaneous raw/real time capture with - * counter value + * counter value * @cycles: Clocksource counter value to produce the system times * @real: Realtime system time * @raw: Monotonic raw system time @@ -244,17 +292,16 @@ struct ktime_timestamps { * @cs_was_changed_seq: The sequence number of clocksource change events */ struct system_time_snapshot { - u64 cycles; - ktime_t real; - ktime_t raw; - enum clocksource_ids cs_id; - unsigned int clock_was_set_seq; - u8 cs_was_changed_seq; + cycle_t cycles; + ktime_t real; + ktime_t raw; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; }; -/** +/* * struct system_device_crosststamp - system/device cross-timestamp - * (synchronized capture) + * (syncronized capture) * @device: Device time * @sys_realtime: Realtime simultaneous with device time * @sys_monoraw: Monotonic raw simultaneous with device time @@ -265,15 +312,15 @@ struct system_device_crosststamp { ktime_t sys_monoraw; }; -/** +/* * struct system_counterval_t - system counter value with the pointer to the - * corresponding clocksource + * corresponding clocksource * @cycles: System counter value * @cs: Clocksource corresponding to system counter value. Used by - * timekeeping code to verify comparibility of two cycle values + * timekeeping code to verify comparibility of two cycle values */ struct system_counterval_t { - u64 cycles; + cycle_t cycles; struct clocksource *cs; }; @@ -293,19 +340,16 @@ extern int get_device_system_crosststamp( */ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot); -/* NMI safe mono/boot/realtime timestamps */ -extern void ktime_get_fast_timestamps(struct ktime_timestamps *snap); - /* * Persistent clock related interfaces */ extern int persistent_clock_is_local; +extern void read_persistent_clock(struct timespec *ts); extern void read_persistent_clock64(struct timespec64 *ts); -void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock, - struct timespec64 *boot_offset); -#ifdef CONFIG_GENERIC_CMOS_UPDATE +extern void read_boot_clock64(struct timespec64 *ts); +extern int update_persistent_clock(struct timespec now); extern int update_persistent_clock64(struct timespec64 now); -#endif + #endif diff --git a/include/linux/timer.h b/include/linux/timer.h index fda13c9d12..6769812d0a 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIMER_H #define _LINUX_TIMER_H @@ -8,6 +7,8 @@ #include #include +struct tvec_base; + struct timer_list { /* * All fields that change during normal runtime grouped to the @@ -15,13 +16,19 @@ struct timer_list { */ struct hlist_node entry; unsigned long expires; - void (*function)(struct timer_list *); + void (*function)(unsigned long); + unsigned long data; u32 flags; +#ifdef CONFIG_TIMER_STATS + int start_pid; + void *start_site; + char start_comm[16]; +#endif #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif -}; +} __randomize_layout; #ifdef CONFIG_LOCKDEP /* @@ -36,30 +43,19 @@ struct timer_list { #define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) #endif -/** - * @TIMER_DEFERRABLE: A deferrable timer will work normally when the - * system is busy, but will not cause a CPU to come out of idle just - * to service it; instead, the timer will be serviced when the CPU - * eventually wakes up with a subsequent non-deferrable timer. +/* + * A deferrable timer will work normally when the system is busy, but + * will not cause a CPU to come out of idle just to service it; instead, + * the timer will be serviced when the CPU eventually wakes up with a + * subsequent non-deferrable timer. * - * @TIMER_IRQSAFE: An irqsafe timer is executed with IRQ disabled and - * it's safe to wait for the completion of the running instance from - * IRQ handlers, for example, by calling del_timer_sync(). + * An irqsafe timer is executed with IRQ disabled and it's safe to wait for + * the completion of the running instance from IRQ handlers, for example, + * by calling del_timer_sync(). * * Note: The irq disabled callback execution is a special case for * workqueue locking issues. It's not meant for executing random crap * with interrupts disabled. Abuse is monitored! - * - * @TIMER_PINNED: A pinned timer will not be affected by any timer - * placement heuristics (like, NOHZ) and will always expire on the CPU - * on which the timer was enqueued. - * - * Note: Because enqueuing of timers can migrate the timer from one - * CPU to another, pinned timers are not guaranteed to stay on the - * initialy selected CPU. They move to the CPU on which the enqueue - * function is invoked via mod_timer() or add_timer(). If the timer - * should be placed on a particular CPU, then add_timer_on() has to be - * used. */ #define TIMER_CPUMASK 0x0003FFFF #define TIMER_MIGRATING 0x00040000 @@ -67,91 +63,113 @@ struct timer_list { #define TIMER_DEFERRABLE 0x00080000 #define TIMER_PINNED 0x00100000 #define TIMER_IRQSAFE 0x00200000 -#define TIMER_INIT_FLAGS (TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE) #define TIMER_ARRAYSHIFT 22 #define TIMER_ARRAYMASK 0xFFC00000 -#define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE) - -#define __TIMER_INITIALIZER(_function, _flags) { \ +#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ .entry = { .next = TIMER_ENTRY_STATIC }, \ .function = (_function), \ + .expires = (_expires), \ + .data = (_data), \ .flags = (_flags), \ __TIMER_LOCKDEP_MAP_INITIALIZER( \ __FILE__ ":" __stringify(__LINE__)) \ } -#define DEFINE_TIMER(_name, _function) \ - struct timer_list _name = \ - __TIMER_INITIALIZER(_function, 0) +#define TIMER_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), 0) -/* - * LOCKDEP and DEBUG timer interfaces. - */ -void init_timer_key(struct timer_list *timer, - void (*func)(struct timer_list *), unsigned int flags, +#define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED) + +#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE) + +#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED) + +#define DEFINE_TIMER(_name, _function, _expires, _data) \ + struct timer_list _name = \ + TIMER_INITIALIZER(_function, _expires, _data) + +void init_timer_key(struct timer_list *timer, unsigned int flags, const char *name, struct lock_class_key *key); #ifdef CONFIG_DEBUG_OBJECTS_TIMERS extern void init_timer_on_stack_key(struct timer_list *timer, - void (*func)(struct timer_list *), unsigned int flags, const char *name, struct lock_class_key *key); +extern void destroy_timer_on_stack(struct timer_list *timer); #else +static inline void destroy_timer_on_stack(struct timer_list *timer) { } static inline void init_timer_on_stack_key(struct timer_list *timer, - void (*func)(struct timer_list *), - unsigned int flags, - const char *name, + unsigned int flags, const char *name, struct lock_class_key *key) { - init_timer_key(timer, func, flags, name, key); + init_timer_key(timer, flags, name, key); } #endif #ifdef CONFIG_LOCKDEP -#define __init_timer(_timer, _fn, _flags) \ +#define __init_timer(_timer, _flags) \ do { \ static struct lock_class_key __key; \ - init_timer_key((_timer), (_fn), (_flags), #_timer, &__key);\ + init_timer_key((_timer), (_flags), #_timer, &__key); \ } while (0) -#define __init_timer_on_stack(_timer, _fn, _flags) \ +#define __init_timer_on_stack(_timer, _flags) \ do { \ static struct lock_class_key __key; \ - init_timer_on_stack_key((_timer), (_fn), (_flags), \ - #_timer, &__key); \ + init_timer_on_stack_key((_timer), (_flags), #_timer, &__key); \ } while (0) #else -#define __init_timer(_timer, _fn, _flags) \ - init_timer_key((_timer), (_fn), (_flags), NULL, NULL) -#define __init_timer_on_stack(_timer, _fn, _flags) \ - init_timer_on_stack_key((_timer), (_fn), (_flags), NULL, NULL) +#define __init_timer(_timer, _flags) \ + init_timer_key((_timer), (_flags), NULL, NULL) +#define __init_timer_on_stack(_timer, _flags) \ + init_timer_on_stack_key((_timer), (_flags), NULL, NULL) #endif -/** - * timer_setup - prepare a timer for first use - * @timer: the timer in question - * @callback: the function to call when timer expires - * @flags: any TIMER_* flags - * - * Regular timer initialization should use either DEFINE_TIMER() above, - * or timer_setup(). For timers on the stack, timer_setup_on_stack() must - * be used and must be balanced with a call to destroy_timer_on_stack(). - */ -#define timer_setup(timer, callback, flags) \ - __init_timer((timer), (callback), (flags)) +#define init_timer(timer) \ + __init_timer((timer), 0) +#define init_timer_pinned(timer) \ + __init_timer((timer), TIMER_PINNED) +#define init_timer_deferrable(timer) \ + __init_timer((timer), TIMER_DEFERRABLE) +#define init_timer_pinned_deferrable(timer) \ + __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED) +#define init_timer_on_stack(timer) \ + __init_timer_on_stack((timer), 0) -#define timer_setup_on_stack(timer, callback, flags) \ - __init_timer_on_stack((timer), (callback), (flags)) +#define __setup_timer(_timer, _fn, _data, _flags) \ + do { \ + __init_timer((_timer), (_flags)); \ + (_timer)->function = (_fn); \ + (_timer)->data = (_data); \ + } while (0) -#ifdef CONFIG_DEBUG_OBJECTS_TIMERS -extern void destroy_timer_on_stack(struct timer_list *timer); -#else -static inline void destroy_timer_on_stack(struct timer_list *timer) { } -#endif +#define __setup_timer_on_stack(_timer, _fn, _data, _flags) \ + do { \ + __init_timer_on_stack((_timer), (_flags)); \ + (_timer)->function = (_fn); \ + (_timer)->data = (_data); \ + } while (0) -#define from_timer(var, callback_timer, timer_fieldname) \ - container_of(callback_timer, typeof(*var), timer_fieldname) +#define setup_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), 0) +#define setup_pinned_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_PINNED) +#define setup_deferrable_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) +#define setup_pinned_deferrable_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) +#define setup_timer_on_stack(timer, fn, data) \ + __setup_timer_on_stack((timer), (fn), (data), 0) +#define setup_pinned_timer_on_stack(timer, fn, data) \ + __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED) +#define setup_deferrable_timer_on_stack(timer, fn, data) \ + __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE) +#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ + __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) /** * timer_pending - is a timer pending? @@ -165,14 +183,13 @@ static inline void destroy_timer_on_stack(struct timer_list *timer) { } */ static inline int timer_pending(const struct timer_list * timer) { - return !hlist_unhashed_lockless(&timer->entry); + return timer->entry.pprev != NULL; } extern void add_timer_on(struct timer_list *timer, int cpu); extern int del_timer(struct timer_list * timer); extern int mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); -extern int timer_reduce(struct timer_list *timer, unsigned long expires); /* * The jiffies value which is added to now, when there is no timer @@ -180,11 +197,51 @@ extern int timer_reduce(struct timer_list *timer, unsigned long expires); */ #define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) +/* + * Timer-statistics info: + */ +#ifdef CONFIG_TIMER_STATS + +extern int timer_stats_active; + +extern void init_timer_stats(void); + +extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, + void *timerf, char *comm, u32 flags); + +extern void __timer_stats_timer_set_start_info(struct timer_list *timer, + void *addr); + +static inline void timer_stats_timer_set_start_info(struct timer_list *timer) +{ + if (likely(!timer_stats_active)) + return; + __timer_stats_timer_set_start_info(timer, __builtin_return_address(0)); +} + +static inline void timer_stats_timer_clear_start_info(struct timer_list *timer) +{ + timer->start_site = NULL; +} +#else +static inline void init_timer_stats(void) +{ +} + +static inline void timer_stats_timer_set_start_info(struct timer_list *timer) +{ +} + +static inline void timer_stats_timer_clear_start_info(struct timer_list *timer) +{ +} +#endif + extern void add_timer(struct timer_list *timer); extern int try_to_del_timer_sync(struct timer_list *timer); -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) +#ifdef CONFIG_SMP extern int del_timer_sync(struct timer_list *timer); #else # define del_timer_sync(t) del_timer(t) @@ -193,15 +250,17 @@ extern int try_to_del_timer_sync(struct timer_list *timer); #define del_singleshot_timer_sync(t) del_timer_sync(t) extern void init_timers(void); +extern void run_local_timers(void); struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -struct ctl_table; +#include extern unsigned int sysctl_timer_migration; int timer_migration_handler(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); + void __user *buffer, size_t *lenp, + loff_t *ppos); #endif unsigned long __round_jiffies(unsigned long j, int cpu); @@ -215,11 +274,9 @@ unsigned long round_jiffies_up(unsigned long j); unsigned long round_jiffies_up_relative(unsigned long j); #ifdef CONFIG_HOTPLUG_CPU -int timers_prepare_cpu(unsigned int cpu); int timers_dead_cpu(unsigned int cpu); #else -#define timers_prepare_cpu NULL -#define timers_dead_cpu NULL +#define timers_dead_cpu NULL #endif #endif diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h index 0c33260e5d..bd36ce431e 100644 --- a/include/linux/timerfd.h +++ b/include/linux/timerfd.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/timerfd.h * @@ -9,7 +8,23 @@ #ifndef _LINUX_TIMERFD_H #define _LINUX_TIMERFD_H -#include +/* For O_CLOEXEC and O_NONBLOCK */ +#include + +/* For _IO helpers */ +#include + +/* + * CAREFUL: Check include/asm-generic/fcntl.h when defining + * new flags, since they might collide with O_* ones. We want + * to re-use O_* flags that couldn't possibly have a meaning + * from eventfd, in order to leave a free define-space for + * shared O_* flags. + */ +#define TFD_TIMER_ABSTIME (1 << 0) +#define TFD_TIMER_CANCEL_ON_SET (1 << 1) +#define TFD_CLOEXEC O_CLOEXEC +#define TFD_NONBLOCK O_NONBLOCK #define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK) /* Flags for timerfd_create. */ @@ -17,4 +32,6 @@ /* Flags for timerfd_settime. */ #define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET) +#define TFD_IOC_SET_TICKS _IOW('T', 0, u64) + #endif /* _LINUX_TIMERFD_H */ diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h index 672df7fbf6..46eb27ddbf 100644 --- a/include/linux/timeriomem-rng.h +++ b/include/linux/timeriomem-rng.h @@ -1,21 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/timeriomem-rng.h * * Copyright (c) 2009 Alexander Clouter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ -#ifndef _LINUX_TIMERIOMEM_RNG_H -#define _LINUX_TIMERIOMEM_RNG_H - struct timeriomem_rng_data { void __iomem *address; /* measures in usecs */ unsigned int period; - - /* bits of entropy per 1024 bits read */ - unsigned int quality; }; - -#endif /* _LINUX_TIMERIOMEM_RNG_H */ diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index 93884086f3..7eec17ad7f 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIMERQUEUE_H #define _LINUX_TIMERQUEUE_H @@ -12,7 +11,8 @@ struct timerqueue_node { }; struct timerqueue_head { - struct rb_root_cached rb_root; + struct rb_root head; + struct timerqueue_node *next; }; @@ -28,14 +28,13 @@ extern struct timerqueue_node *timerqueue_iterate_next( * * @head: head of timerqueue * - * Returns a pointer to the timer node that has the earliest expiration time. + * Returns a pointer to the timer node that has the + * earliest expiration time. */ static inline struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) { - struct rb_node *leftmost = rb_first_cached(&head->rb_root); - - return rb_entry(leftmost, struct timerqueue_node, node); + return head->next; } static inline void timerqueue_init(struct timerqueue_node *node) @@ -43,18 +42,9 @@ static inline void timerqueue_init(struct timerqueue_node *node) RB_CLEAR_NODE(&node->node); } -static inline bool timerqueue_node_queued(struct timerqueue_node *node) -{ - return !RB_EMPTY_NODE(&node->node); -} - -static inline bool timerqueue_node_expires(struct timerqueue_node *node) -{ - return node->expires; -} - static inline void timerqueue_init_head(struct timerqueue_head *head) { - head->rb_root = RB_ROOT_CACHED; + head->head = RB_ROOT; + head->next = NULL; } #endif /* _LINUX_TIMERQUEUE_H */ diff --git a/include/linux/timex.h b/include/linux/timex.h index 059b18eb1f..39c25dbebf 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -133,7 +133,7 @@ /* * kernel variables - * Note: maximum error = NTP sync distance = dispersion + delay / 2; + * Note: maximum error = NTP synch distance = dispersion + delay / 2; * estimated error = NTP dispersion. */ extern unsigned long tick_usec; /* USER_HZ period (usec) */ @@ -151,12 +151,11 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ #define NTP_INTERVAL_FREQ (HZ) #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) -extern int do_adjtimex(struct __kernel_timex *); -extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx); - +extern int do_adjtimex(struct timex *); extern void hardpps(const struct timespec64 *, const struct timespec64 *); int read_current_timer(unsigned long *timer_val); +void ntp_notify_cmos_timer(void); /* The clock frequency of the i8253/i8254 PIT */ #define PIT_TICK_RATE 1193182ul diff --git a/include/linux/topology.h b/include/linux/topology.h index 7634cd7370..cb0775e1ee 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -27,7 +27,6 @@ #ifndef _LINUX_TOPOLOGY_H #define _LINUX_TOPOLOGY_H -#include #include #include #include @@ -48,7 +47,6 @@ int arch_update_cpu_topology(void); /* Conform to ACPI 2.0 SLIT distance definitions */ #define LOCAL_DISTANCE 10 #define REMOTE_DISTANCE 20 -#define DISTANCE_BITS 8 #ifndef node_distance #define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE) #endif @@ -61,20 +59,6 @@ int arch_update_cpu_topology(void); */ #define RECLAIM_DISTANCE 30 #endif - -/* - * The following tunable allows platforms to override the default node - * reclaim distance (RECLAIM_DISTANCE) if remote memory accesses are - * sufficiently fast that the default value actually hurts - * performance. - * - * AMD EPYC machines use this because even though the 2-hop distance - * is 32 (3.2x slower than a local memory access) performance actually - * *improves* if allowed to reclaim memory and load balance tasks - * between NUMA nodes 2-hops apart. - */ -extern int __read_mostly node_reclaim_distance; - #ifndef PENALTY_FOR_NODE_WITH_CPUS #define PENALTY_FOR_NODE_WITH_CPUS (1) #endif @@ -131,11 +115,20 @@ static inline int numa_node_id(void) * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). */ DECLARE_PER_CPU(int, _numa_mem_); +extern int _node_numa_mem_[MAX_NUMNODES]; #ifndef set_numa_mem static inline void set_numa_mem(int node) { this_cpu_write(_numa_mem_, node); + _node_numa_mem_[numa_node_id()] = node; +} +#endif + +#ifndef node_to_mem_node +static inline int node_to_mem_node(int node) +{ + return _node_numa_mem_[node]; } #endif @@ -158,6 +151,7 @@ static inline int cpu_to_mem(int cpu) static inline void set_cpu_numa_mem(int cpu, int node) { per_cpu(_numa_mem_, cpu) = node; + _node_numa_mem_[cpu_to_node(cpu)] = node; } #endif @@ -171,6 +165,13 @@ static inline int numa_mem_id(void) } #endif +#ifndef node_to_mem_node +static inline int node_to_mem_node(int node) +{ + return node; +} +#endif + #ifndef cpu_to_mem static inline int cpu_to_mem(int cpu) { @@ -183,9 +184,6 @@ static inline int cpu_to_mem(int cpu) #ifndef topology_physical_package_id #define topology_physical_package_id(cpu) ((void)(cpu), -1) #endif -#ifndef topology_die_id -#define topology_die_id(cpu) ((void)(cpu), -1) -#endif #ifndef topology_core_id #define topology_core_id(cpu) ((void)(cpu), 0) #endif @@ -195,11 +193,8 @@ static inline int cpu_to_mem(int cpu) #ifndef topology_core_cpumask #define topology_core_cpumask(cpu) cpumask_of(cpu) #endif -#ifndef topology_die_cpumask -#define topology_die_cpumask(cpu) cpumask_of(cpu) -#endif -#if defined(CONFIG_SCHED_SMT) && !defined(cpu_smt_mask) +#ifdef CONFIG_SCHED_SMT static inline const struct cpumask *cpu_smt_mask(int cpu) { return topology_sibling_cpumask(cpu); diff --git a/include/linux/torture.h b/include/linux/torture.h index 0910c5803f..a45702eb3e 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -1,10 +1,23 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ /* * Common functions for in-kernel torture tests. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * * Copyright IBM Corporation, 2014 * - * Author: Paul E. McKenney + * Author: Paul E. McKenney */ #ifndef __LINUX_TORTURE_H @@ -32,33 +45,16 @@ #define TOROUT_STRING(s) \ pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s) #define VERBOSE_TOROUT_STRING(s) \ -do { \ - if (verbose) { \ - verbose_torout_sleep(); \ - pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); \ - } \ -} while (0) + do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0) #define VERBOSE_TOROUT_ERRSTRING(s) \ -do { \ - if (verbose) { \ - verbose_torout_sleep(); \ - pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); \ - } \ -} while (0) -void verbose_torout_sleep(void); + do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) /* Definitions for online/offline exerciser. */ -#ifdef CONFIG_HOTPLUG_CPU -int torture_num_online_cpus(void); -#else /* #ifdef CONFIG_HOTPLUG_CPU */ -static inline int torture_num_online_cpus(void) { return 1; } -#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ -typedef void torture_ofl_func(void); bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_offl, int *min_onl, int *max_onl); bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_onl, int *min_onl, int *max_onl); -int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f); +int torture_onoff_init(long ooholdoff, long oointerval); void torture_onoff_stats(void); bool torture_onoff_failures(void); @@ -68,21 +64,7 @@ struct torture_random_state { long trs_count; }; #define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 } -#define DEFINE_TORTURE_RANDOM_PERCPU(name) \ - DEFINE_PER_CPU(struct torture_random_state, name) unsigned long torture_random(struct torture_random_state *trsp); -static inline void torture_random_init(struct torture_random_state *trsp) -{ - trsp->trs_state = 0; - trsp->trs_count = 0; -} - -/* Definitions for high-resolution-timer sleeps. */ -int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp); -int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp); -int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp); -int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp); -int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *trsp); /* Task shuffler, which causes CPUs to occasionally go idle. */ void torture_shuffle_task_register(struct task_struct *tp); @@ -93,11 +75,11 @@ void torture_shutdown_absorb(const char *title); int torture_shutdown_init(int ssecs, void (*cleanup)(void)); /* Task stuttering, which forces load/no-load transitions. */ -bool stutter_wait(const char *title); -int torture_stutter_init(int s, int sgap); +void stutter_wait(const char *title); +int torture_stutter_init(int s); /* Initialization and cleanup. */ -bool torture_init_begin(char *ttype, int v); +bool torture_init_begin(char *ttype, bool v, int *runnable); void torture_init_end(void); bool torture_cleanup_begin(void); void torture_cleanup_end(void); @@ -114,10 +96,4 @@ void _torture_stop_kthread(char *m, struct task_struct **tp); #define torture_stop_kthread(n, tp) \ _torture_stop_kthread("Stopping " #n " task", &(tp)) -#ifdef CONFIG_PREEMPTION -#define torture_preempt_schedule() preempt_schedule() -#else -#define torture_preempt_schedule() do { } while (0) -#endif - #endif /* __LINUX_TORTURE_H */ diff --git a/include/linux/toshiba.h b/include/linux/toshiba.h index 2e0b7dd1b5..915c3bb164 100644 --- a/include/linux/toshiba.h +++ b/include/linux/toshiba.h @@ -1,10 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* toshiba.h -- Linux driver for accessing the SMM on Toshiba laptops * * Copyright (c) 1996-2000 Jonathan A. Buzzard (jonathan@buzzard.org.uk) * * Thanks to Juergen Heinzl for the pointers * on making sure the structure is aligned and packed. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * */ #ifndef _LINUX_TOSHIBA_H #define _LINUX_TOSHIBA_H diff --git a/include/linux/tpm.h b/include/linux/tpm.h index aa11fe323c..da158f06e0 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004,2007,2008 IBM Corporation * @@ -13,53 +12,27 @@ * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * */ #ifndef __LINUX_TPM_H__ #define __LINUX_TPM_H__ -#include -#include -#include -#include -#include -#include - #define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */ -#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE + +/* + * Chip num is this value or a valid tpm idx + */ +#define TPM_ANY_NUM 0xFFFF struct tpm_chip; struct trusted_key_payload; struct trusted_key_options; -/* if you add a new hash to this, increment TPM_MAX_HASHES below */ -enum tpm_algorithms { - TPM_ALG_ERROR = 0x0000, - TPM_ALG_SHA1 = 0x0004, - TPM_ALG_KEYEDHASH = 0x0008, - TPM_ALG_SHA256 = 0x000B, - TPM_ALG_SHA384 = 0x000C, - TPM_ALG_SHA512 = 0x000D, - TPM_ALG_NULL = 0x0010, - TPM_ALG_SM3_256 = 0x0012, -}; - -/* - * maximum number of hashing algorithms a TPM can have. This is - * basically a count of every hash in tpm_algorithms above - */ -#define TPM_MAX_HASHES 5 - -struct tpm_digest { - u16 alg_id; - u8 digest[TPM_MAX_DIGEST_SIZE]; -} __packed; - -struct tpm_bank_info { - u16 alg_id; - u16 digest_size; - u16 crypto_id; -}; - enum TPM_OPS_FLAGS { TPM_OPS_AUTO_STARTUP = BIT(0), }; @@ -73,380 +46,53 @@ struct tpm_class_ops { int (*send) (struct tpm_chip *chip, u8 *buf, size_t len); void (*cancel) (struct tpm_chip *chip); u8 (*status) (struct tpm_chip *chip); - void (*update_timeouts)(struct tpm_chip *chip, + bool (*update_timeouts)(struct tpm_chip *chip, unsigned long *timeout_cap); - void (*update_durations)(struct tpm_chip *chip, - unsigned long *duration_cap); - int (*go_idle)(struct tpm_chip *chip); - int (*cmd_ready)(struct tpm_chip *chip); - int (*request_locality)(struct tpm_chip *chip, int loc); - int (*relinquish_locality)(struct tpm_chip *chip, int loc); - void (*clk_enable)(struct tpm_chip *chip, bool value); + }; -#define TPM_NUM_EVENT_LOG_FILES 3 - -/* Indexes the duration array */ -enum tpm_duration { - TPM_SHORT = 0, - TPM_MEDIUM = 1, - TPM_LONG = 2, - TPM_LONG_LONG = 3, - TPM_UNDEFINED, - TPM_NUM_DURATIONS = TPM_UNDEFINED, -}; - -#define TPM_PPI_VERSION_LEN 3 - -struct tpm_space { - u32 context_tbl[3]; - u8 *context_buf; - u32 session_tbl[3]; - u8 *session_buf; - u32 buf_size; -}; - -struct tpm_bios_log { - void *bios_event_log; - void *bios_event_log_end; -}; - -struct tpm_chip_seqops { - struct tpm_chip *chip; - const struct seq_operations *seqops; -}; - -struct tpm_chip { - struct device dev; - struct device devs; - struct cdev cdev; - struct cdev cdevs; - - /* A driver callback under ops cannot be run unless ops_sem is held - * (sometimes implicitly, eg for the sysfs code). ops becomes null - * when the driver is unregistered, see tpm_try_get_ops. - */ - struct rw_semaphore ops_sem; - const struct tpm_class_ops *ops; - - struct tpm_bios_log log; - struct tpm_chip_seqops bin_log_seqops; - struct tpm_chip_seqops ascii_log_seqops; - - unsigned int flags; - - int dev_num; /* /dev/tpm# */ - unsigned long is_open; /* only one allowed */ - - char hwrng_name[64]; - struct hwrng hwrng; - - struct mutex tpm_mutex; /* tpm is processing */ - - unsigned long timeout_a; /* jiffies */ - unsigned long timeout_b; /* jiffies */ - unsigned long timeout_c; /* jiffies */ - unsigned long timeout_d; /* jiffies */ - bool timeout_adjusted; - unsigned long duration[TPM_NUM_DURATIONS]; /* jiffies */ - bool duration_adjusted; - - struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES]; - - const struct attribute_group *groups[3 + TPM_MAX_HASHES]; - unsigned int groups_cnt; - - u32 nr_allocated_banks; - struct tpm_bank_info *allocated_banks; -#ifdef CONFIG_ACPI - acpi_handle acpi_dev_handle; - char ppi_version[TPM_PPI_VERSION_LEN + 1]; -#endif /* CONFIG_ACPI */ - - struct tpm_space work_space; - u32 last_cc; - u32 nr_commands; - u32 *cc_attrs_tbl; - - /* active locality */ - int locality; -}; - -#define TPM_HEADER_SIZE 10 - -enum tpm2_const { - TPM2_PLATFORM_PCR = 24, - TPM2_PCR_SELECT_MIN = ((TPM2_PLATFORM_PCR + 7) / 8), -}; - -enum tpm2_timeouts { - TPM2_TIMEOUT_A = 750, - TPM2_TIMEOUT_B = 2000, - TPM2_TIMEOUT_C = 200, - TPM2_TIMEOUT_D = 30, - TPM2_DURATION_SHORT = 20, - TPM2_DURATION_MEDIUM = 750, - TPM2_DURATION_LONG = 2000, - TPM2_DURATION_LONG_LONG = 300000, - TPM2_DURATION_DEFAULT = 120000, -}; - -enum tpm2_structures { - TPM2_ST_NO_SESSIONS = 0x8001, - TPM2_ST_SESSIONS = 0x8002, -}; - -/* Indicates from what layer of the software stack the error comes from */ -#define TSS2_RC_LAYER_SHIFT 16 -#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT) - -enum tpm2_return_codes { - TPM2_RC_SUCCESS = 0x0000, - TPM2_RC_HASH = 0x0083, /* RC_FMT1 */ - TPM2_RC_HANDLE = 0x008B, - TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */ - TPM2_RC_FAILURE = 0x0101, - TPM2_RC_DISABLED = 0x0120, - TPM2_RC_COMMAND_CODE = 0x0143, - TPM2_RC_TESTING = 0x090A, /* RC_WARN */ - TPM2_RC_REFERENCE_H0 = 0x0910, - TPM2_RC_RETRY = 0x0922, -}; - -enum tpm2_command_codes { - TPM2_CC_FIRST = 0x011F, - TPM2_CC_HIERARCHY_CONTROL = 0x0121, - TPM2_CC_HIERARCHY_CHANGE_AUTH = 0x0129, - TPM2_CC_CREATE_PRIMARY = 0x0131, - TPM2_CC_SEQUENCE_COMPLETE = 0x013E, - TPM2_CC_SELF_TEST = 0x0143, - TPM2_CC_STARTUP = 0x0144, - TPM2_CC_SHUTDOWN = 0x0145, - TPM2_CC_NV_READ = 0x014E, - TPM2_CC_CREATE = 0x0153, - TPM2_CC_LOAD = 0x0157, - TPM2_CC_SEQUENCE_UPDATE = 0x015C, - TPM2_CC_UNSEAL = 0x015E, - TPM2_CC_CONTEXT_LOAD = 0x0161, - TPM2_CC_CONTEXT_SAVE = 0x0162, - TPM2_CC_FLUSH_CONTEXT = 0x0165, - TPM2_CC_VERIFY_SIGNATURE = 0x0177, - TPM2_CC_GET_CAPABILITY = 0x017A, - TPM2_CC_GET_RANDOM = 0x017B, - TPM2_CC_PCR_READ = 0x017E, - TPM2_CC_PCR_EXTEND = 0x0182, - TPM2_CC_EVENT_SEQUENCE_COMPLETE = 0x0185, - TPM2_CC_HASH_SEQUENCE_START = 0x0186, - TPM2_CC_CREATE_LOADED = 0x0191, - TPM2_CC_LAST = 0x0193, /* Spec 1.36 */ -}; - -enum tpm2_permanent_handles { - TPM2_RS_PW = 0x40000009, -}; - -enum tpm2_capabilities { - TPM2_CAP_HANDLES = 1, - TPM2_CAP_COMMANDS = 2, - TPM2_CAP_PCRS = 5, - TPM2_CAP_TPM_PROPERTIES = 6, -}; - -enum tpm2_properties { - TPM_PT_TOTAL_COMMANDS = 0x0129, -}; - -enum tpm2_startup_types { - TPM2_SU_CLEAR = 0x0000, - TPM2_SU_STATE = 0x0001, -}; - -enum tpm2_cc_attrs { - TPM2_CC_ATTR_CHANDLES = 25, - TPM2_CC_ATTR_RHANDLE = 28, -}; - -#define TPM_VID_INTEL 0x8086 -#define TPM_VID_WINBOND 0x1050 -#define TPM_VID_STM 0x104A - -enum tpm_chip_flags { - TPM_CHIP_FLAG_TPM2 = BIT(1), - TPM_CHIP_FLAG_IRQ = BIT(2), - TPM_CHIP_FLAG_VIRTUAL = BIT(3), - TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4), - TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5), - TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6), -}; - -#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) - -struct tpm_header { - __be16 tag; - __be32 length; - union { - __be32 ordinal; - __be32 return_code; - }; -} __packed; - -/* A string buffer type for constructing TPM commands. This is based on the - * ideas of string buffer code in security/keys/trusted.h but is heap based - * in order to keep the stack usage minimal. - */ - -enum tpm_buf_flags { - TPM_BUF_OVERFLOW = BIT(0), -}; - -struct tpm_buf { - unsigned int flags; - u8 *data; -}; - -enum tpm2_object_attributes { - TPM2_OA_FIXED_TPM = BIT(1), - TPM2_OA_FIXED_PARENT = BIT(4), - TPM2_OA_USER_WITH_AUTH = BIT(6), -}; - -enum tpm2_session_attributes { - TPM2_SA_CONTINUE_SESSION = BIT(0), -}; - -struct tpm2_hash { - unsigned int crypto_id; - unsigned int tpm_id; -}; - -static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal) -{ - struct tpm_header *head = (struct tpm_header *)buf->data; - - head->tag = cpu_to_be16(tag); - head->length = cpu_to_be32(sizeof(*head)); - head->ordinal = cpu_to_be32(ordinal); -} - -static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal) -{ - buf->data = (u8 *)__get_free_page(GFP_KERNEL); - if (!buf->data) - return -ENOMEM; - - buf->flags = 0; - tpm_buf_reset(buf, tag, ordinal); - return 0; -} - -static inline void tpm_buf_destroy(struct tpm_buf *buf) -{ - free_page((unsigned long)buf->data); -} - -static inline u32 tpm_buf_length(struct tpm_buf *buf) -{ - struct tpm_header *head = (struct tpm_header *)buf->data; - - return be32_to_cpu(head->length); -} - -static inline u16 tpm_buf_tag(struct tpm_buf *buf) -{ - struct tpm_header *head = (struct tpm_header *)buf->data; - - return be16_to_cpu(head->tag); -} - -static inline void tpm_buf_append(struct tpm_buf *buf, - const unsigned char *new_data, - unsigned int new_len) -{ - struct tpm_header *head = (struct tpm_header *)buf->data; - u32 len = tpm_buf_length(buf); - - /* Return silently if overflow has already happened. */ - if (buf->flags & TPM_BUF_OVERFLOW) - return; - - if ((len + new_len) > PAGE_SIZE) { - WARN(1, "tpm_buf: overflow\n"); - buf->flags |= TPM_BUF_OVERFLOW; - return; - } - - memcpy(&buf->data[len], new_data, new_len); - head->length = cpu_to_be32(len + new_len); -} - -static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value) -{ - tpm_buf_append(buf, &value, 1); -} - -static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value) -{ - __be16 value2 = cpu_to_be16(value); - - tpm_buf_append(buf, (u8 *) &value2, 2); -} - -static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value) -{ - __be32 value2 = cpu_to_be32(value); - - tpm_buf_append(buf, (u8 *) &value2, 4); -} - -static inline u32 tpm2_rc_value(u32 rc) -{ - return (rc & BIT(7)) ? rc & 0xff : rc; -} - #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) -extern int tpm_is_tpm2(struct tpm_chip *chip); -extern __must_check int tpm_try_get_ops(struct tpm_chip *chip); -extern void tpm_put_ops(struct tpm_chip *chip); -extern ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf, - size_t min_rsp_body_length, const char *desc); -extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, - struct tpm_digest *digest); -extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, - struct tpm_digest *digests); -extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen); -extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max); -extern struct tpm_chip *tpm_default_chip(void); -void tpm2_flush_context(struct tpm_chip *chip, u32 handle); +extern int tpm_is_tpm2(u32 chip_num); +extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); +extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); +extern int tpm_send(u32 chip_num, void *cmd, size_t buflen); +extern int tpm_get_random(u32 chip_num, u8 *data, size_t max); +extern int tpm_seal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options); +extern int tpm_unseal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options); #else -static inline int tpm_is_tpm2(struct tpm_chip *chip) +static inline int tpm_is_tpm2(u32 chip_num) { return -ENODEV; } -static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, - struct tpm_digest *digest) -{ +static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { + return -ENODEV; +} +static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) { + return -ENODEV; +} +static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) { + return -ENODEV; +} +static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) { return -ENODEV; } -static inline int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, - struct tpm_digest *digests) +static inline int tpm_seal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options) { return -ENODEV; } - -static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) +static inline int tpm_unseal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options) { return -ENODEV; } -static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max) -{ - return -ENODEV; -} - -static inline struct tpm_chip *tpm_default_chip(void) -{ - return NULL; -} #endif #endif diff --git a/include/linux/tpm_command.h b/include/linux/tpm_command.h index f5c03e9c39..727512e249 100644 --- a/include/linux/tpm_command.h +++ b/include/linux/tpm_command.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_TPM_COMMAND_H__ #define __LINUX_TPM_COMMAND_H__ diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h index 00e8f98c94..1d7ca27392 100644 --- a/include/linux/trace_clock.h +++ b/include/linux/trace_clock.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TRACE_CLOCK_H #define _LINUX_TRACE_CLOCK_H diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 3e475eeb5a..be007610ce 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TRACE_EVENT_H #define _LINUX_TRACE_EVENT_H @@ -11,7 +10,7 @@ #include struct trace_array; -struct array_buffer; +struct trace_buffer; struct tracer; struct dentry; struct bpf_prog; @@ -24,10 +23,6 @@ const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val, const struct trace_print_flags *symbol_array); #if BITS_PER_LONG == 32 -const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim, - unsigned long long flags, - const struct trace_print_flags_u64 *flag_array); - const char *trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, const struct trace_print_flags_u64 @@ -38,25 +33,17 @@ const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, unsigned int bitmask_size); const char *trace_print_hex_seq(struct trace_seq *p, - const unsigned char *buf, int len, - bool concatenate); + const unsigned char *buf, int len); const char *trace_print_array_seq(struct trace_seq *p, const void *buf, int count, size_t el_size); -const char * -trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str, - int prefix_type, int rowsize, int groupsize, - const void *buf, size_t len, bool ascii); - struct trace_iterator; struct trace_event; int trace_raw_output_prep(struct trace_iterator *iter, struct trace_event *event); -extern __printf(2, 3) -void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...); /* * The trace entry - the most basic unit of tracing. This is what @@ -81,16 +68,12 @@ struct trace_entry { struct trace_iterator { struct trace_array *tr; struct tracer *trace; - struct array_buffer *array_buffer; + struct trace_buffer *trace_buffer; void *private; int cpu_file; struct mutex mutex; struct ring_buffer_iter **buffer_iter; unsigned long iter_flags; - void *temp; /* temp holder */ - unsigned int temp_size; - char *fmt; /* modified format holder */ - unsigned int fmt_size; /* trace_seq for __print_flags() and __print_symbolic() etc. */ struct trace_seq tmp_seq; @@ -150,87 +133,29 @@ enum print_line_t { TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ }; -enum print_line_t trace_handle_return(struct trace_seq *s); - -static inline void tracing_generic_entry_update(struct trace_entry *entry, - unsigned short type, - unsigned int trace_ctx) +/* + * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq + * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function + * simplifies those functions and keeps them in sync. + */ +static inline enum print_line_t trace_handle_return(struct trace_seq *s) { - entry->preempt_count = trace_ctx & 0xff; - entry->pid = current->pid; - entry->type = type; - entry->flags = trace_ctx >> 16; -} - -unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status); - -enum trace_flag_type { - TRACE_FLAG_IRQS_OFF = 0x01, - TRACE_FLAG_IRQS_NOSUPPORT = 0x02, - TRACE_FLAG_NEED_RESCHED = 0x04, - TRACE_FLAG_HARDIRQ = 0x08, - TRACE_FLAG_SOFTIRQ = 0x10, - TRACE_FLAG_PREEMPT_RESCHED = 0x20, - TRACE_FLAG_NMI = 0x40, -}; - -#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT -static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags) -{ - unsigned int irq_status = irqs_disabled_flags(irqflags) ? - TRACE_FLAG_IRQS_OFF : 0; - return tracing_gen_ctx_irq_test(irq_status); -} -static inline unsigned int tracing_gen_ctx(void) -{ - unsigned long irqflags; - - local_save_flags(irqflags); - return tracing_gen_ctx_flags(irqflags); -} -#else - -static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags) -{ - return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT); -} -static inline unsigned int tracing_gen_ctx(void) -{ - return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT); -} -#endif - -static inline unsigned int tracing_gen_ctx_dec(void) -{ - unsigned int trace_ctx; - - trace_ctx = tracing_gen_ctx(); - /* - * Subtract one from the preemption counter if preemption is enabled, - * see trace_event_buffer_reserve()for details. - */ - if (IS_ENABLED(CONFIG_PREEMPTION)) - trace_ctx--; - return trace_ctx; + return trace_seq_has_overflowed(s) ? + TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; } +void tracing_generic_entry_update(struct trace_entry *entry, + unsigned long flags, + int pc); struct trace_event_file; struct ring_buffer_event * -trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer, +trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, struct trace_event_file *trace_file, int type, unsigned long len, - unsigned int trace_ctx); + unsigned long flags, int pc); -#define TRACE_RECORD_CMDLINE BIT(0) -#define TRACE_RECORD_TGID BIT(1) - -void tracing_record_taskinfo(struct task_struct *task, int flags); -void tracing_record_taskinfo_sched_switch(struct task_struct *prev, - struct task_struct *next, int flags); - -void tracing_record_cmdline(struct task_struct *task); -void tracing_record_tgid(struct task_struct *task); +void tracing_record_cmdline(struct task_struct *tsk); int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); @@ -244,11 +169,6 @@ enum trace_reg { TRACE_REG_PERF_UNREGISTER, TRACE_REG_PERF_OPEN, TRACE_REG_PERF_CLOSE, - /* - * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a - * custom action was taken and the default action is not to be - * performed. - */ TRACE_REG_PERF_ADD, TRACE_REG_PERF_DEL, #endif @@ -256,22 +176,6 @@ enum trace_reg { struct trace_event_call; -#define TRACE_FUNCTION_TYPE ((const char *)~0UL) - -struct trace_event_fields { - const char *type; - union { - struct { - const char *name; - const int size; - const int align; - const int is_signed; - const int filter_type; - }; - int (*define_fields)(struct trace_event_call *); - }; -}; - struct trace_event_class { const char *system; void *probe; @@ -280,7 +184,7 @@ struct trace_event_class { #endif int (*reg)(struct trace_event_call *event, enum trace_reg type, void *data); - struct trace_event_fields *fields_array; + int (*define_fields)(struct trace_event_call *); struct list_head *(*get_fields)(struct trace_event_call *); struct list_head fields; int (*raw_init)(struct trace_event_call *); @@ -290,12 +194,12 @@ extern int trace_event_reg(struct trace_event_call *event, enum trace_reg type, void *data); struct trace_event_buffer { - struct trace_buffer *buffer; + struct ring_buffer *buffer; struct ring_buffer_event *event; struct trace_event_file *trace_file; void *entry; - unsigned int trace_ctx; - struct pt_regs *regs; + unsigned long flags; + int pc; }; void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, @@ -309,11 +213,10 @@ enum { TRACE_EVENT_FL_CAP_ANY_BIT, TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, + TRACE_EVENT_FL_WAS_ENABLED_BIT, TRACE_EVENT_FL_TRACEPOINT_BIT, - TRACE_EVENT_FL_DYNAMIC_BIT, TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT, - TRACE_EVENT_FL_EPROBE_BIT, }; /* @@ -322,22 +225,22 @@ enum { * CAP_ANY - Any user can enable for perf * NO_SET_FILTER - Set when filter has error and is to be ignored * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file + * WAS_ENABLED - Set and stays set when an event was ever enabled + * (used for module unloading, if a module event is enabled, + * it is best to clear the buffers that used it). * TRACEPOINT - Event is a tracepoint - * DYNAMIC - Event is a dynamic event (created at run time) * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe - * EPROBE - Event is an event probe */ enum { TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), + TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), - TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT), TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), - TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT), }; #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE) @@ -353,89 +256,29 @@ struct trace_event_call { struct trace_event event; char *print_fmt; struct event_filter *filter; - /* - * Static events can disappear with modules, - * where as dynamic ones need their own ref count. - */ - union { - void *module; - atomic_t refcnt; - }; + void *mod; void *data; - - /* See the TRACE_EVENT_FL_* flags above */ + /* + * bit 0: filter_active + * bit 1: allow trace by non root (cap any) + * bit 2: failed to apply filter + * bit 3: trace internal event (do not enable) + * bit 4: Event was enabled by module + * bit 5: use call filter rather than file filter + * bit 6: Event is a tracepoint + */ int flags; /* static flags of different events */ #ifdef CONFIG_PERF_EVENTS int perf_refcount; struct hlist_head __percpu *perf_events; - struct bpf_prog_array __rcu *prog_array; + struct bpf_prog *prog; int (*perf_perm)(struct trace_event_call *, struct perf_event *); #endif }; -#ifdef CONFIG_DYNAMIC_EVENTS -bool trace_event_dyn_try_get_ref(struct trace_event_call *call); -void trace_event_dyn_put_ref(struct trace_event_call *call); -bool trace_event_dyn_busy(struct trace_event_call *call); -#else -static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call) -{ - /* Without DYNAMIC_EVENTS configured, nothing should be calling this */ - return false; -} -static inline void trace_event_dyn_put_ref(struct trace_event_call *call) -{ -} -static inline bool trace_event_dyn_busy(struct trace_event_call *call) -{ - /* Nothing should call this without DYNAIMIC_EVENTS configured. */ - return true; -} -#endif - -static inline bool trace_event_try_get_ref(struct trace_event_call *call) -{ - if (call->flags & TRACE_EVENT_FL_DYNAMIC) - return trace_event_dyn_try_get_ref(call); - else - return try_module_get(call->module); -} - -static inline void trace_event_put_ref(struct trace_event_call *call) -{ - if (call->flags & TRACE_EVENT_FL_DYNAMIC) - trace_event_dyn_put_ref(call); - else - module_put(call->module); -} - -#ifdef CONFIG_PERF_EVENTS -static inline bool bpf_prog_array_valid(struct trace_event_call *call) -{ - /* - * This inline function checks whether call->prog_array - * is valid or not. The function is called in various places, - * outside rcu_read_lock/unlock, as a heuristic to speed up execution. - * - * If this function returns true, and later call->prog_array - * becomes false inside rcu_read_lock/unlock region, - * we bail out then. If this function return false, - * there is a risk that we might miss a few events if the checking - * were delayed until inside rcu_read_lock/unlock region and - * call->prog_array happened to become non-NULL then. - * - * Here, READ_ONCE() is used instead of rcu_access_pointer(). - * rcu_access_pointer() requires the actual definition of - * "struct bpf_prog_array" while READ_ONCE() only needs - * a declaration of the same type. - */ - return !!READ_ONCE(call->prog_array); -} -#endif - static inline const char * trace_event_name(struct trace_event_call *call) { @@ -445,20 +288,12 @@ trace_event_name(struct trace_event_call *call) return call->name; } -static inline struct list_head * -trace_get_fields(struct trace_event_call *event_call) -{ - if (!event_call->class->get_fields) - return &event_call->class->fields; - return event_call->class->get_fields(event_call); -} - +struct trace_array; struct trace_subsystem_dir; enum { EVENT_FILE_FL_ENABLED_BIT, EVENT_FILE_FL_RECORDED_CMD_BIT, - EVENT_FILE_FL_RECORDED_TGID_BIT, EVENT_FILE_FL_FILTERED_BIT, EVENT_FILE_FL_NO_SET_FILTER_BIT, EVENT_FILE_FL_SOFT_MODE_BIT, @@ -466,136 +301,12 @@ enum { EVENT_FILE_FL_TRIGGER_MODE_BIT, EVENT_FILE_FL_TRIGGER_COND_BIT, EVENT_FILE_FL_PID_FILTER_BIT, - EVENT_FILE_FL_WAS_ENABLED_BIT, }; -extern struct trace_event_file *trace_get_event_file(const char *instance, - const char *system, - const char *event); -extern void trace_put_event_file(struct trace_event_file *file); - -#define MAX_DYNEVENT_CMD_LEN (2048) - -enum dynevent_type { - DYNEVENT_TYPE_SYNTH = 1, - DYNEVENT_TYPE_KPROBE, - DYNEVENT_TYPE_NONE, -}; - -struct dynevent_cmd; - -typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd); - -struct dynevent_cmd { - struct seq_buf seq; - const char *event_name; - unsigned int n_fields; - enum dynevent_type type; - dynevent_create_fn_t run_command; - void *private_data; -}; - -extern int dynevent_create(struct dynevent_cmd *cmd); - -extern int synth_event_delete(const char *name); - -extern void synth_event_cmd_init(struct dynevent_cmd *cmd, - char *buf, int maxlen); - -extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, - const char *name, - struct module *mod, ...); - -#define synth_event_gen_cmd_start(cmd, name, mod, ...) \ - __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL) - -struct synth_field_desc { - const char *type; - const char *name; -}; - -extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, - const char *name, - struct module *mod, - struct synth_field_desc *fields, - unsigned int n_fields); -extern int synth_event_create(const char *name, - struct synth_field_desc *fields, - unsigned int n_fields, struct module *mod); - -extern int synth_event_add_field(struct dynevent_cmd *cmd, - const char *type, - const char *name); -extern int synth_event_add_field_str(struct dynevent_cmd *cmd, - const char *type_name); -extern int synth_event_add_fields(struct dynevent_cmd *cmd, - struct synth_field_desc *fields, - unsigned int n_fields); - -#define synth_event_gen_cmd_end(cmd) \ - dynevent_create(cmd) - -struct synth_event; - -struct synth_event_trace_state { - struct trace_event_buffer fbuffer; - struct synth_trace_event *entry; - struct trace_buffer *buffer; - struct synth_event *event; - unsigned int cur_field; - unsigned int n_u64; - bool disabled; - bool add_next; - bool add_name; -}; - -extern int synth_event_trace(struct trace_event_file *file, - unsigned int n_vals, ...); -extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals, - unsigned int n_vals); -extern int synth_event_trace_start(struct trace_event_file *file, - struct synth_event_trace_state *trace_state); -extern int synth_event_add_next_val(u64 val, - struct synth_event_trace_state *trace_state); -extern int synth_event_add_val(const char *field_name, u64 val, - struct synth_event_trace_state *trace_state); -extern int synth_event_trace_end(struct synth_event_trace_state *trace_state); - -extern int kprobe_event_delete(const char *name); - -extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd, - char *buf, int maxlen); - -#define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \ - __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL) - -#define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \ - __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL) - -extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, - bool kretprobe, - const char *name, - const char *loc, ...); - -#define kprobe_event_add_fields(cmd, ...) \ - __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL) - -#define kprobe_event_add_field(cmd, field) \ - __kprobe_event_add_fields(cmd, field, NULL) - -extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...); - -#define kprobe_event_gen_cmd_end(cmd) \ - dynevent_create(cmd) - -#define kretprobe_event_gen_cmd_end(cmd) \ - dynevent_create(cmd) - /* * Event file flags: * ENABLED - The event is enabled * RECORDED_CMD - The comms should be recorded at sched_switch - * RECORDED_TGID - The tgids should be recorded at sched_switch * FILTERED - The event has a filter attached * NO_SET_FILTER - Set when filter has error and is to be ignored * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED @@ -604,12 +315,10 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...); * TRIGGER_MODE - When set, invoke the triggers associated with the event * TRIGGER_COND - When set, one or more triggers has an associated filter * PID_FILTER - When set, the event is filtered based on pid - * WAS_ENABLED - Set when enabled to know to clear trace on module removal */ enum { EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT), - EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT), EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT), EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT), EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT), @@ -617,13 +326,12 @@ enum { EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), - EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT), }; struct trace_event_file { struct list_head list; struct trace_event_call *event_call; - struct event_filter __rcu *filter; + struct event_filter *filter; struct dentry *dir; struct trace_array *tr; struct trace_subsystem_dir *system; @@ -683,18 +391,15 @@ enum event_trigger_type { ETT_EVENT_ENABLE = (1 << 3), ETT_EVENT_HIST = (1 << 4), ETT_HIST_ENABLE = (1 << 5), - ETT_EVENT_EPROBE = (1 << 6), }; extern int filter_match_preds(struct event_filter *filter, void *rec); -extern enum event_trigger_type -event_triggers_call(struct trace_event_file *file, - struct trace_buffer *buffer, void *rec, - struct ring_buffer_event *event); -extern void -event_triggers_post_call(struct trace_event_file *file, - enum event_trigger_type tt); +extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, + void *rec); +extern void event_triggers_post_call(struct trace_event_file *file, + enum event_trigger_type tt, + void *rec); bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); @@ -714,7 +419,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { if (eflags & EVENT_FILE_FL_TRIGGER_MODE) - event_triggers_call(file, NULL, NULL, NULL); + event_triggers_call(file, NULL); if (eflags & EVENT_FILE_FL_SOFT_DISABLED) return true; if (eflags & EVENT_FILE_FL_PID_FILTER) @@ -724,58 +429,12 @@ trace_trigger_soft_disabled(struct trace_event_file *file) } #ifdef CONFIG_BPF_EVENTS -unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); -int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); -void perf_event_detach_bpf_prog(struct perf_event *event); -int perf_event_query_prog_array(struct perf_event *event, void __user *info); -int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); -int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog); -struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); -void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); -int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, - u32 *fd_type, const char **buf, - u64 *probe_offset, u64 *probe_addr); +unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx); #else -static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) +static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) { return 1; } - -static inline int -perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie) -{ - return -EOPNOTSUPP; -} - -static inline void perf_event_detach_bpf_prog(struct perf_event *event) { } - -static inline int -perf_event_query_prog_array(struct perf_event *event, void __user *info) -{ - return -EOPNOTSUPP; -} -static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p) -{ - return -EOPNOTSUPP; -} -static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p) -{ - return -EOPNOTSUPP; -} -static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) -{ - return NULL; -} -static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) -{ -} -static inline int bpf_get_perf_event_info(const struct perf_event *event, - u32 *prog_id, u32 *fd_type, - const char **buf, u64 *probe_offset, - u64 *probe_addr) -{ - return -EOPNOTSUPP; -} #endif enum { @@ -798,10 +457,8 @@ extern int trace_event_get_offsets(struct trace_event_call *call); #define is_signed_type(type) (((type)(-1)) < (type)1) -int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); int trace_set_clr_event(const char *system, const char *event, int set); -int trace_array_set_clr_event(struct trace_array *tr, const char *system, - const char *event, bool enable); + /* * The double __builtin_constant_p is because gcc will give us an error * if we try to allocate the static variable to fmt if it is not a @@ -813,7 +470,7 @@ do { \ tracing_record_cmdline(current); \ if (__builtin_constant_p(fmt)) { \ static const char *trace_printk_fmt \ - __section("__trace_printk_fmt") = \ + __attribute__((section("__trace_printk_fmt"))) = \ __builtin_constant_p(fmt) ? fmt : NULL; \ \ __trace_bprintk(ip, trace_printk_fmt, ##args); \ @@ -825,64 +482,17 @@ do { \ struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); -DECLARE_PER_CPU(int, bpf_kprobe_override); extern int perf_trace_init(struct perf_event *event); extern void perf_trace_destroy(struct perf_event *event); extern int perf_trace_add(struct perf_event *event, int flags); extern void perf_trace_del(struct perf_event *event, int flags); -#ifdef CONFIG_KPROBE_EVENTS -extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe); -extern void perf_kprobe_destroy(struct perf_event *event); -extern int bpf_get_kprobe_info(const struct perf_event *event, - u32 *fd_type, const char **symbol, - u64 *probe_offset, u64 *probe_addr, - bool perf_type_tracepoint); -#endif -#ifdef CONFIG_UPROBE_EVENTS -extern int perf_uprobe_init(struct perf_event *event, - unsigned long ref_ctr_offset, bool is_retprobe); -extern void perf_uprobe_destroy(struct perf_event *event); -extern int bpf_get_uprobe_info(const struct perf_event *event, - u32 *fd_type, const char **filename, - u64 *probe_offset, bool perf_type_tracepoint); -#endif extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); void perf_trace_buf_update(void *record, u16 type); void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); -int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); -void perf_event_free_bpf_prog(struct perf_event *event); - -void bpf_trace_run1(struct bpf_prog *prog, u64 arg1); -void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2); -void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2, - u64 arg3); -void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2, - u64 arg3, u64 arg4); -void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2, - u64 arg3, u64 arg4, u64 arg5); -void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2, - u64 arg3, u64 arg4, u64 arg5, u64 arg6); -void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2, - u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7); -void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2, - u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, - u64 arg8); -void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2, - u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, - u64 arg8, u64 arg9); -void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2, - u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, - u64 arg8, u64 arg9, u64 arg10); -void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2, - u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, - u64 arg8, u64 arg9, u64 arg10, u64 arg11); -void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2, - u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, - u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12); void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, struct trace_event_call *call, u64 count, struct pt_regs *regs, struct hlist_head *head, @@ -895,7 +505,6 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, { perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); } - #endif #endif /* _LINUX_TRACE_EVENT_H */ diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 5a2c650d9e..cfaf5a1d4b 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TRACE_SEQ_H #define _LINUX_TRACE_SEQ_H @@ -12,7 +11,7 @@ */ struct trace_seq { - char buffer[PAGE_SIZE]; + unsigned char buffer[PAGE_SIZE]; struct seq_buf seq; int full; }; @@ -51,7 +50,7 @@ static inline int trace_seq_used(struct trace_seq *s) * that is about to be written to and then return the result * of that write. */ -static inline char * +static inline unsigned char * trace_seq_buffer_ptr(struct trace_seq *s) { return s->buffer + seq_buf_used(&s->seq); @@ -92,10 +91,6 @@ extern int trace_seq_path(struct trace_seq *s, const struct path *path); extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, int nmaskbits); -extern int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str, - int prefix_type, int rowsize, int groupsize, - const void *buf, size_t len, bool ascii); - #else /* CONFIG_TRACING */ static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h index 9991244597..5b727a17be 100644 --- a/include/linux/tracefs.h +++ b/include/linux/tracefs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * tracefs.h - a pseudo file system for activating tracing * @@ -6,7 +5,12 @@ * * Copyright (C) 2014 Red Hat Inc, author: Steven Rostedt * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * * tracefs is the file system that is used by the tracing infrastructure. + * */ #ifndef _TRACEFS_H_ @@ -28,6 +32,7 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode, struct dentry *tracefs_create_dir(const char *name, struct dentry *parent); void tracefs_remove(struct dentry *dentry); +void tracefs_remove_recursive(struct dentry *dentry); struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent, int (*mkdir)(const char *name), diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 2564b7434b..26c152122a 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Tracing hooks * * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * * This file defines hook entry points called by core code where * user tracing/debugging support might need to do something. These * entry points are called tracehook_*(). Each hook declared below @@ -48,21 +51,18 @@ #include #include #include -#include struct linux_binprm; /* * ptrace report for syscall entry and exit looks identical. */ -static inline int ptrace_report_syscall(struct pt_regs *regs, - unsigned long message) +static inline int ptrace_report_syscall(struct pt_regs *regs) { int ptrace = current->ptrace; if (!(ptrace & PT_PTRACED)) return 0; - current->ptrace_message = message; ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* @@ -75,7 +75,6 @@ static inline int ptrace_report_syscall(struct pt_regs *regs, current->exit_code = 0; } - current->ptrace_message = 0; return fatal_signal_pending(current); } @@ -83,12 +82,11 @@ static inline int ptrace_report_syscall(struct pt_regs *regs, * tracehook_report_syscall_entry - task is about to attempt a system call * @regs: user register state of current task * - * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or - * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just - * entered the kernel for a system call. Full user register state is - * available here. Changing the values in @regs can affect the system - * call number and arguments to be tried. It is safe to block here, - * preventing the system call from beginning. + * This will be called if %TIF_SYSCALL_TRACE has been set, when the + * current task has just entered the kernel for a system call. + * Full user register state is available here. Changing the values + * in @regs can affect the system call number and arguments to be tried. + * It is safe to block here, preventing the system call from beginning. * * Returns zero normally, or nonzero if the calling arch code should abort * the system call. That must prevent normal entry so no system call is @@ -102,7 +100,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs, static inline __must_check int tracehook_report_syscall_entry( struct pt_regs *regs) { - return ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_ENTRY); + return ptrace_report_syscall(regs); } /** @@ -110,24 +108,28 @@ static inline __must_check int tracehook_report_syscall_entry( * @regs: user register state of current task * @step: nonzero if simulating single-step or block-step * - * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when - * the current task has just finished an attempted system call. Full + * This will be called if %TIF_SYSCALL_TRACE has been set, when the + * current task has just finished an attempted system call. Full * user register state is available here. It is safe to block here, * preventing signals from being processed. * * If @step is nonzero, this report is also in lieu of the normal * trap that would follow the system call instruction because * user_enable_block_step() or user_enable_single_step() was used. - * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set. + * In this case, %TIF_SYSCALL_TRACE might not be set. * * Called without locks, just before checking for pending signals. */ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) { - if (step) - user_single_step_report(regs); - else - ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_EXIT); + if (step) { + siginfo_t info; + user_single_step_siginfo(current, regs, &info); + force_sig_info(SIGTRAP, &info, current); + return; + } + + ptrace_report_syscall(regs); } /** @@ -179,49 +181,16 @@ static inline void set_notify_resume(struct task_struct *task) */ static inline void tracehook_notify_resume(struct pt_regs *regs) { - clear_thread_flag(TIF_NOTIFY_RESUME); /* - * This barrier pairs with task_work_add()->set_notify_resume() after + * The caller just cleared TIF_NOTIFY_RESUME. This barrier + * pairs with task_work_add()->set_notify_resume() after * hlist_add_head(task->task_works); */ smp_mb__after_atomic(); if (unlikely(current->task_works)) task_work_run(); -#ifdef CONFIG_KEYS_REQUEST_CACHE - if (unlikely(current->cached_requested_key)) { - key_put(current->cached_requested_key); - current->cached_requested_key = NULL; - } -#endif - mem_cgroup_handle_over_high(); - blkcg_maybe_throttle_current(); - - rseq_handle_notify_resume(NULL, regs); -} - -/* - * called by exit_to_user_mode_loop() if ti_work & _TIF_NOTIFY_SIGNAL. This - * is currently used by TWA_SIGNAL based task_work, which requires breaking - * wait loops to ensure that task_work is noticed and run. - */ -static inline void tracehook_notify_signal(void) -{ - clear_thread_flag(TIF_NOTIFY_SIGNAL); - smp_mb__after_atomic(); - if (current->task_works) - task_work_run(); -} - -/* - * Called when we have work to process from exit_to_user_mode_loop() - */ -static inline void set_notify_signal(struct task_struct *task) -{ - if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && - !wake_up_state(task, TASK_INTERRUPTIBLE)) - kick_process(task); } #endif /* */ diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index e7c2276be3..4ac89acb61 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef TRACEPOINT_DEFS_H #define TRACEPOINT_DEFS_H 1 @@ -11,8 +10,6 @@ #include #include -struct static_call_key; - struct trace_print_flags { unsigned long mask; const char *name; @@ -32,59 +29,9 @@ struct tracepoint_func { struct tracepoint { const char *name; /* Tracepoint name */ struct static_key key; - struct static_call_key *static_call_key; - void *static_call_tramp; - void *iterator; - int (*regfunc)(void); + void (*regfunc)(void); void (*unregfunc)(void); struct tracepoint_func __rcu *funcs; }; -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS -typedef const int tracepoint_ptr_t; -#else -typedef struct tracepoint * const tracepoint_ptr_t; -#endif - -struct bpf_raw_event_map { - struct tracepoint *tp; - void *bpf_func; - u32 num_args; - u32 writable_size; -} __aligned(32); - -/* - * If a tracepoint needs to be called from a header file, it is not - * recommended to call it directly, as tracepoints in header files - * may cause side-effects and bloat the kernel. Instead, use - * tracepoint_enabled() to test if the tracepoint is enabled, then if - * it is, call a wrapper function defined in a C file that will then - * call the tracepoint. - * - * For "trace_foo_bar()", you would need to create a wrapper function - * in a C file to call trace_foo_bar(): - * void do_trace_foo_bar(args) { trace_foo_bar(args); } - * Then in the header file, declare the tracepoint: - * DECLARE_TRACEPOINT(foo_bar); - * And call your wrapper: - * static inline void some_inlined_function() { - * [..] - * if (tracepoint_enabled(foo_bar)) - * do_trace_foo_bar(args); - * [..] - * } - * - * Note: tracepoint_enabled(foo_bar) is equivalent to trace_foo_bar_enabled() - * but is safe to have in headers, where trace_foo_bar_enabled() is not. - */ -#define DECLARE_TRACEPOINT(tp) \ - extern struct tracepoint __tracepoint_##tp - -#ifdef CONFIG_TRACEPOINTS -# define tracepoint_enabled(tp) \ - static_key_false(&(__tracepoint_##tp).key) -#else -# define tracepoint_enabled(tracepoint) false -#endif - #endif diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 28031b15f8..be586c632a 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -1,57 +1,45 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _LINUX_TRACEPOINT_H #define _LINUX_TRACEPOINT_H /* * Kernel Tracepoint API. * - * See Documentation/trace/tracepoints.rst. + * See Documentation/trace/tracepoints.txt. * * Copyright (C) 2008-2014 Mathieu Desnoyers * * Heavily inspired from the Linux Kernel Markers. + * + * This file is released under the GPLv2. + * See the file COPYING for more details. */ #include -#include #include #include #include #include #include -#include struct module; struct tracepoint; struct notifier_block; -struct trace_eval_map { +struct trace_enum_map { const char *system; - const char *eval_string; - unsigned long eval_value; + const char *enum_string; + unsigned long enum_value; }; #define TRACEPOINT_DEFAULT_PRIO 10 -extern struct srcu_struct tracepoint_srcu; - extern int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); extern int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, int prio); extern int -tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data, - int prio); -extern int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); -static inline int -tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe, - void *data) -{ - return tracepoint_probe_register_prio_may_exist(tp, probe, data, - TRACEPOINT_DEFAULT_PRIO); -} extern void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), void *priv); @@ -87,50 +75,19 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb) * probe unregistration and the end of module exit to make sure there is no * caller executing a probe when it is freed. */ -#ifdef CONFIG_TRACEPOINTS static inline void tracepoint_synchronize_unregister(void) { - synchronize_srcu(&tracepoint_srcu); - synchronize_rcu(); + synchronize_sched(); } -#else -static inline void tracepoint_synchronize_unregister(void) -{ } -#endif #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS -extern int syscall_regfunc(void); +extern void syscall_regfunc(void); extern void syscall_unregfunc(void); #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ -#ifndef PARAMS #define PARAMS(args...) args -#endif #define TRACE_DEFINE_ENUM(x) -#define TRACE_DEFINE_SIZEOF(x) - -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS -static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) -{ - return offset_to_ptr(p); -} - -#define __TRACEPOINT_ENTRY(name) \ - asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \ - " .balign 4 \n" \ - " .long __tracepoint_" #name " - . \n" \ - " .previous \n") -#else -static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) -{ - return *p; -} - -#define __TRACEPOINT_ENTRY(name) \ - static tracepoint_ptr_t __tracepoint_ptr_##name __used \ - __section("__tracepoints_ptrs") = &__tracepoint_##name -#endif #endif /* _LINUX_TRACEPOINT_H */ @@ -161,69 +118,52 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #ifdef TRACEPOINTS_ENABLED -#ifdef CONFIG_HAVE_STATIC_CALL -#define __DO_TRACE_CALL(name, args) \ - do { \ - struct tracepoint_func *it_func_ptr; \ - void *__data; \ - it_func_ptr = \ - rcu_dereference_raw((&__tracepoint_##name)->funcs); \ - if (it_func_ptr) { \ - __data = (it_func_ptr)->data; \ - static_call(tp_func_##name)(__data, args); \ - } \ - } while (0) -#else -#define __DO_TRACE_CALL(name, args) __traceiter_##name(NULL, args) -#endif /* CONFIG_HAVE_STATIC_CALL */ - /* * it_func[0] is never NULL because there is at least one element in the array * when the array itself is non NULL. + * + * Note, the proto and args passed in includes "__data" as the first parameter. + * The reason for this is to handle the "void" prototype. If a tracepoint + * has a "void" prototype, then it is invalid to declare a function + * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just + * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". */ -#define __DO_TRACE(name, args, cond, rcuidle) \ +#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \ do { \ - int __maybe_unused __idx = 0; \ + struct tracepoint_func *it_func_ptr; \ + void *it_func; \ + void *__data; \ \ if (!(cond)) \ return; \ - \ - /* srcu can't be used from NMI */ \ - WARN_ON_ONCE(rcuidle && in_nmi()); \ - \ - /* keep srcu and sched-rcu usage consistent */ \ - preempt_disable_notrace(); \ - \ - /* \ - * For rcuidle callers, use srcu since sched-rcu \ - * doesn't work from the idle path. \ - */ \ - if (rcuidle) { \ - __idx = srcu_read_lock_notrace(&tracepoint_srcu);\ - rcu_irq_enter_irqson(); \ + prercu; \ + rcu_read_lock_sched_notrace(); \ + it_func_ptr = rcu_dereference_sched((tp)->funcs); \ + if (it_func_ptr) { \ + do { \ + it_func = (it_func_ptr)->func; \ + __data = (it_func_ptr)->data; \ + ((void(*)(proto))(it_func))(args); \ + } while ((++it_func_ptr)->func); \ } \ - \ - __DO_TRACE_CALL(name, TP_ARGS(args)); \ - \ - if (rcuidle) { \ - rcu_irq_exit_irqson(); \ - srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\ - } \ - \ - preempt_enable_notrace(); \ + rcu_read_unlock_sched_notrace(); \ + postrcu; \ } while (0) #ifndef MODULE -#define __DECLARE_TRACE_RCU(name, proto, args, cond) \ +#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \ static inline void trace_##name##_rcuidle(proto) \ { \ if (static_key_false(&__tracepoint_##name.key)) \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 1); \ + __DO_TRACE(&__tracepoint_##name, \ + TP_PROTO(data_proto), \ + TP_ARGS(data_args), \ + TP_CONDITION(cond), \ + rcu_irq_enter_irqson(), \ + rcu_irq_exit_irqson()); \ } #else -#define __DECLARE_TRACE_RCU(name, proto, args, cond) +#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) #endif /* @@ -238,16 +178,15 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * even when this tracepoint is off. This code has no purpose other than * poking RCU a bit. */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ - extern int __traceiter_##name(data_proto); \ - DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \ +#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ if (static_key_false(&__tracepoint_##name.key)) \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 0); \ + __DO_TRACE(&__tracepoint_##name, \ + TP_PROTO(data_proto), \ + TP_ARGS(data_args), \ + TP_CONDITION(cond),,); \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ rcu_read_lock_sched_notrace(); \ rcu_dereference_sched(__tracepoint_##name.funcs);\ @@ -255,7 +194,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) } \ } \ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ - PARAMS(cond)) \ + PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ static inline int \ register_trace_##name(void (*probe)(data_proto), void *data) \ { \ @@ -290,55 +229,26 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * structures, so we create an array of pointers that will be used for iteration * on the tracepoints. */ -#define DEFINE_TRACE_FN(_name, _reg, _unreg, proto, args) \ - static const char __tpstrtab_##_name[] \ - __section("__tracepoints_strings") = #_name; \ - extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \ - int __traceiter_##_name(void *__data, proto); \ - struct tracepoint __tracepoint_##_name __used \ - __section("__tracepoints") = { \ - .name = __tpstrtab_##_name, \ - .key = STATIC_KEY_INIT_FALSE, \ - .static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \ - .static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \ - .iterator = &__traceiter_##_name, \ - .regfunc = _reg, \ - .unregfunc = _unreg, \ - .funcs = NULL }; \ - __TRACEPOINT_ENTRY(_name); \ - int __traceiter_##_name(void *__data, proto) \ - { \ - struct tracepoint_func *it_func_ptr; \ - void *it_func; \ - \ - it_func_ptr = \ - rcu_dereference_raw((&__tracepoint_##_name)->funcs); \ - if (it_func_ptr) { \ - do { \ - it_func = READ_ONCE((it_func_ptr)->func); \ - __data = (it_func_ptr)->data; \ - ((void(*)(void *, proto))(it_func))(__data, args); \ - } while ((++it_func_ptr)->func); \ - } \ - return 0; \ - } \ - DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); +#define DEFINE_TRACE_FN(name, reg, unreg) \ + static const char __tpstrtab_##name[] \ + __attribute__((section("__tracepoints_strings"))) = #name; \ + struct tracepoint __tracepoint_##name \ + __attribute__((section("__tracepoints"))) = \ + { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\ + static struct tracepoint * const __tracepoint_ptr_##name __used \ + __attribute__((section("__tracepoints_ptrs"))) = \ + &__tracepoint_##name; -#define DEFINE_TRACE(name, proto, args) \ - DEFINE_TRACE_FN(name, NULL, NULL, PARAMS(proto), PARAMS(args)); +#define DEFINE_TRACE(name) \ + DEFINE_TRACE_FN(name, NULL, NULL); #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ - EXPORT_SYMBOL_GPL(__tracepoint_##name); \ - EXPORT_SYMBOL_GPL(__traceiter_##name); \ - EXPORT_STATIC_CALL_GPL(tp_func_##name) + EXPORT_SYMBOL_GPL(__tracepoint_##name) #define EXPORT_TRACEPOINT_SYMBOL(name) \ - EXPORT_SYMBOL(__tracepoint_##name); \ - EXPORT_SYMBOL(__traceiter_##name); \ - EXPORT_STATIC_CALL(tp_func_##name) - + EXPORT_SYMBOL(__tracepoint_##name) #else /* !TRACEPOINTS_ENABLED */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ +#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ static inline void trace_##name(proto) \ { } \ static inline void trace_##name##_rcuidle(proto) \ @@ -364,8 +274,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) return false; \ } -#define DEFINE_TRACE_FN(name, reg, unreg, proto, args) -#define DEFINE_TRACE(name, proto, args) +#define DEFINE_TRACE_FN(name, reg, unreg) +#define DEFINE_TRACE(name) #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) #define EXPORT_TRACEPOINT_SYMBOL(name) @@ -404,7 +314,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static const char *___tp_str __tracepoint_string = str; \ ___tp_str; \ }) -#define __tracepoint_string __used __section("__tracepoint_str") +#define __tracepoint_string __attribute__((section("__tracepoint_str"))) #else /* * tracepoint_string() is used to save the string address for userspace @@ -415,15 +325,36 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) # define __tracepoint_string #endif +/* + * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype + * (void). "void" is a special value in a function prototype and can + * not be combined with other arguments. Since the DECLARE_TRACE() + * macro adds a data element at the beginning of the prototype, + * we need a way to differentiate "(void *data, proto)" from + * "(void *data, void)". The second prototype is invalid. + * + * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype + * and "void *__data" as the callback prototype. + * + * DECLARE_TRACE() passes "proto" as the tracepoint protoype and + * "void *__data, proto" as the callback prototype. + */ +#define DECLARE_TRACE_NOARGS(name) \ + __DECLARE_TRACE(name, void, , \ + cpu_online(raw_smp_processor_id()), \ + void *__data, __data) + #define DECLARE_TRACE(name, proto, args) \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ cpu_online(raw_smp_processor_id()), \ - PARAMS(void *__data, proto)) + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ - PARAMS(void *__data, proto)) + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) #define TRACE_EVENT_FLAGS(event, flag) @@ -475,7 +406,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * * * * The declared 'local variable' is called '__entry' * * - * * __field(pid_t, prev_pid) is equivalent to a standard declaration: + * * __field(pid_t, prev_prid) is equivalent to a standard declariton: * * * * pid_t prev_pid; * * @@ -567,19 +498,4 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define TRACE_EVENT_PERF_PERM(event, expr...) -#define DECLARE_EVENT_NOP(name, proto, args) \ - static inline void trace_##name(proto) \ - { } \ - static inline bool trace_##name##_enabled(void) \ - { \ - return false; \ - } - -#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print) \ - DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args)) - -#define DECLARE_EVENT_CLASS_NOP(name, proto, args, tstruct, assign, print) -#define DEFINE_EVENT_NOP(template, name, proto, args) \ - DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args)) - #endif /* ifdef TRACE_EVENT (see note above) */ diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h index 63076fb835..11087cdd4a 100644 --- a/include/linux/transport_class.h +++ b/include/linux/transport_class.h @@ -1,8 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * transport_class.h - a generic container for all transport classes * * Copyright (c) 2005 - James Bottomley + * + * This file is licensed under GPLv2 */ #ifndef _TRANSPORT_CLASS_H_ @@ -62,16 +63,16 @@ struct transport_container { container_of(x, struct transport_container, ac) void transport_remove_device(struct device *); -int transport_add_device(struct device *); +void transport_add_device(struct device *); void transport_setup_device(struct device *); void transport_configure_device(struct device *); void transport_destroy_device(struct device *); -static inline int +static inline void transport_register_device(struct device *dev) { transport_setup_device(dev); - return transport_add_device(dev); + transport_add_device(dev); } static inline void diff --git a/include/linux/tsacct_kern.h b/include/linux/tsacct_kern.h index d8ddce26e1..3251965bf4 100644 --- a/include/linux/tsacct_kern.h +++ b/include/linux/tsacct_kern.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * tsacct_kern.h - kernel header for system accounting over taskstats interface * diff --git a/include/linux/tty.h b/include/linux/tty.h index 168e57e40b..610732a17f 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_H #define _LINUX_TTY_H @@ -6,10 +5,8 @@ #include #include #include -#include #include #include -#include #include #include #include @@ -17,6 +14,30 @@ #include +/* + * Lock subclasses for tty locks + * + * TTY_LOCK_NORMAL is for normal ttys and master ptys. + * TTY_LOCK_SLAVE is for slave ptys only. + * + * Lock subclasses are necessary for handling nested locking with pty pairs. + * tty locks which use nested locking: + * + * legacy_mutex - Nested tty locks are necessary for releasing pty pairs. + * The stable lock order is master pty first, then slave pty. + * termios_rwsem - The stable lock order is tty_buffer lock->termios_rwsem. + * Subclassing this lock enables the slave pty to hold its + * termios_rwsem when claiming the master tty_buffer lock. + * tty_buffer lock - slave ptys can claim nested buffer lock when handling + * signal chars. The stable lock order is slave pty, then + * master. + */ + +enum { + TTY_LOCK_NORMAL = 0, + TTY_LOCK_SLAVE, +}; + /* * (Note: the *_driver.minor_start values 1, 64, 128, 192 are * hardcoded at present.) @@ -32,6 +53,54 @@ */ #define __DISABLED_CHAR '\0' +struct tty_buffer { + union { + struct tty_buffer *next; + struct llist_node free; + }; + int used; + int size; + int commit; + int read; + int flags; + /* Data points here */ + unsigned long data[0]; +}; + +/* Values for .flags field of tty_buffer */ +#define TTYB_NORMAL 1 /* buffer has no flags buffer */ + +static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs) +{ + return ((unsigned char *)b->data) + ofs; +} + +static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs) +{ + return (char *)char_buf_ptr(b, ofs) + b->size; +} + +struct tty_bufhead { + struct tty_buffer *head; /* Queue head */ + struct work_struct work; + struct mutex lock; + atomic_t priority; + struct tty_buffer sentinel; + struct llist_head free; /* Free queue head */ + atomic_t mem_used; /* In-use buffers excluding free list */ + int mem_limit; + struct tty_buffer *tail; /* Active buffer */ +}; +/* + * When a break, frame error, or parity error happens, these codes are + * stuffed into the flags buffer. + */ +#define TTY_NORMAL 0 +#define TTY_BREAK 1 +#define TTY_FRAME 2 +#define TTY_PARITY 3 +#define TTY_OVERRUN 4 + #define INTR_CHAR(tty) ((tty)->termios.c_cc[VINTR]) #define QUIT_CHAR(tty) ((tty)->termios.c_cc[VQUIT]) #define ERASE_CHAR(tty) ((tty)->termios.c_cc[VERASE]) @@ -117,33 +186,93 @@ struct device; struct signal_struct; + +/* + * Port level information. Each device keeps its own port level information + * so provide a common structure for those ports wanting to use common support + * routines. + * + * The tty port has a different lifetime to the tty so must be kept apart. + * In addition be careful as tty -> port mappings are valid for the life + * of the tty object but in many cases port -> tty mappings are valid only + * until a hangup so don't use the wrong path. + */ + +struct tty_port; + +struct tty_port_operations { + /* Return 1 if the carrier is raised */ + int (*carrier_raised)(struct tty_port *port); + /* Control the DTR line */ + void (*dtr_rts)(struct tty_port *port, int raise); + /* Called when the last close completes or a hangup finishes + IFF the port was initialized. Do not use to free resources. Called + under the port mutex to serialize against activate/shutdowns */ + void (*shutdown)(struct tty_port *port); + /* Called under the port mutex from tty_port_open, serialized using + the port mutex */ + /* FIXME: long term getting the tty argument *out* of this would be + good for consoles */ + int (*activate)(struct tty_port *port, struct tty_struct *tty); + /* Called on the final put of a port */ + void (*destruct)(struct tty_port *port); +}; + +struct tty_port { + struct tty_bufhead buf; /* Locked internally */ + struct tty_struct *tty; /* Back pointer */ + struct tty_struct *itty; /* internal back ptr */ + const struct tty_port_operations *ops; /* Port operations */ + spinlock_t lock; /* Lock protecting tty field */ + int blocked_open; /* Waiting to open */ + atomic_t count; /* Usage count */ + wait_queue_head_t open_wait; /* Open waiters */ + wait_queue_head_t delta_msr_wait; /* Modem status change */ + unsigned long flags; /* User TTY flags ASYNC_ */ + unsigned long iflags; /* Internal flags TTY_PORT_ */ + unsigned char console:1, /* port is a console */ + low_latency:1; /* optional: tune for latency */ + struct mutex mutex; /* Locking */ + struct mutex buf_mutex; /* Buffer alloc lock */ + unsigned char *xmit_buf; /* Optional buffer */ + unsigned int close_delay; /* Close port delay */ + unsigned int closing_wait; /* Delay for output */ + int drain_delay; /* Set to zero if no pure time + based drain is needed else + set to size of fifo */ + struct kref kref; /* Ref counter */ +}; + +/* tty_port::iflags bits -- use atomic bit ops */ +#define TTY_PORT_INITIALIZED 0 /* device is initialized */ +#define TTY_PORT_SUSPENDED 1 /* device is suspended */ +#define TTY_PORT_ACTIVE 2 /* device is open */ + +/* + * uart drivers: use the uart_port::status field and the UPSTAT_* defines + * for s/w-based flow control steering and carrier detection status + */ +#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ +#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ + +/* + * Where all of the state associated with a tty is kept while the tty + * is open. Since the termios state should be kept even if the tty + * has been closed --- for things like the baud rate, etc --- it is + * not stored here, but rather a pointer to the real state is stored + * here. Possible the winsize structure should have the same + * treatment, but (1) the default 80x24 is usually right and (2) it's + * most often used by a windowing system, which will set the correct + * size each time the window is created or resized anyway. + * - TYT, 9/14/92 + */ + struct tty_operations; -/** - * struct tty_struct - state associated with a tty while open - * - * @flow.lock: lock for flow members - * @flow.stopped: tty stopped/started by tty_stop/tty_start - * @flow.tco_stopped: tty stopped/started by TCOOFF/TCOON ioctls (it has - * precedense over @flow.stopped) - * @flow.unused: alignment for Alpha, so that no members other than @flow.* are - * modified by the same 64b word store. The @flow's __aligned is - * there for the very same reason. - * @ctrl.lock: lock for ctrl members - * @ctrl.pgrp: process group of this tty (setpgrp(2)) - * @ctrl.session: session of this tty (setsid(2)). Writes are protected by both - * @ctrl.lock and legacy mutex, readers must use at least one of - * them. - * @ctrl.pktstatus: packet mode status (bitwise OR of TIOCPKT_* constants) - * @ctrl.packet: packet mode enabled - * - * All of the state associated with a tty while the tty is open. Persistent - * storage for tty devices is referenced here as @port in struct tty_port. - */ struct tty_struct { int magic; struct kref kref; - struct device *dev; /* class device or NULL (e.g. ptys, serdev) */ + struct device *dev; struct tty_driver *driver; const struct tty_operations *ops; int index; @@ -157,35 +286,30 @@ struct tty_struct { struct mutex throttle_mutex; struct rw_semaphore termios_rwsem; struct mutex winsize_mutex; + spinlock_t ctrl_lock; + spinlock_t flow_lock; /* Termios values are protected by the termios rwsem */ struct ktermios termios, termios_locked; + struct termiox *termiox; /* May be NULL for unsupported */ char name[64]; + struct pid *pgrp; /* Protected by ctrl lock */ + struct pid *session; unsigned long flags; int count; struct winsize winsize; /* winsize_mutex */ - - struct { - spinlock_t lock; - bool stopped; - bool tco_stopped; - unsigned long unused[0]; - } __aligned(sizeof(unsigned long)) flow; - - struct { - spinlock_t lock; - struct pid *pgrp; - struct pid *session; - unsigned char pktstatus; - bool packet; - unsigned long unused[0]; - } __aligned(sizeof(unsigned long)) ctrl; - + unsigned long stopped:1, /* flow_lock */ + flow_stopped:1, + unused:BITS_PER_LONG - 2; int hw_stopped; + unsigned long ctrl_status:8, /* ctrl_lock */ + packet:1, + unused_ctrl:BITS_PER_LONG - 9; unsigned int receive_room; /* Bytes free for queue */ int flow_change; struct tty_struct *link; struct fasync_struct *fasync; + int alt_speed; /* For magic substitution of 38400 bps */ wait_queue_head_t write_wait; wait_queue_head_t read_wait; struct work_struct hangup_work; @@ -231,14 +355,21 @@ struct tty_file_private { #define TTY_PTY_LOCK 16 /* pty private */ #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ #define TTY_HUPPED 18 /* Post driver->hangup() */ -#define TTY_HUPPING 19 /* Hangup in progress */ -#define TTY_LDISC_CHANGING 20 /* Change pending - non-block IO */ #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ -static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file) +/* Values for tty->flow_change */ +#define TTY_THROTTLE_SAFE 1 +#define TTY_UNTHROTTLE_SAFE 2 + +static inline void __tty_set_flow_change(struct tty_struct *tty, int val) { - return file->f_flags & O_NONBLOCK || - test_bit(TTY_LDISC_CHANGING, &tty->flags); + tty->flow_change = val; +} + +static inline void tty_set_flow_change(struct tty_struct *tty, int val) +{ + tty->flow_change = val; + smp_mb(); } static inline bool tty_io_error(struct tty_struct *tty) @@ -252,6 +383,7 @@ static inline bool tty_throttled(struct tty_struct *tty) } #ifdef CONFIG_TTY +extern void console_init(void); extern void tty_kref_put(struct tty_struct *tty); extern struct pid *tty_get_pgrp(struct tty_struct *tty); extern void tty_vhangup_self(void); @@ -262,11 +394,9 @@ extern struct tty_struct *get_current_tty(void); /* tty_io.c */ extern int __init tty_init(void); extern const char *tty_name(const struct tty_struct *tty); -extern struct tty_struct *tty_kopen_exclusive(dev_t device); -extern struct tty_struct *tty_kopen_shared(dev_t device); -extern void tty_kclose(struct tty_struct *tty); -extern int tty_dev_name_to_number(const char *name, dev_t *number); #else +static inline void console_init(void) +{ } static inline void tty_kref_put(struct tty_struct *tty) { } static inline struct pid *tty_get_pgrp(struct tty_struct *tty) @@ -286,12 +416,6 @@ static inline int __init tty_init(void) { return 0; } static inline const char *tty_name(const struct tty_struct *tty) { return "(none)"; } -static inline struct tty_struct *tty_kopen_exclusive(dev_t device) -{ return ERR_PTR(-ENODEV); } -static inline void tty_kclose(struct tty_struct *tty) -{ } -static inline int tty_dev_name_to_number(const char *name, dev_t *number) -{ return -ENOTSUPP; } #endif extern struct ktermios tty_std_termios; @@ -318,20 +442,32 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) extern const char *tty_driver_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); +extern int __tty_check_change(struct tty_struct *tty, int sig); +extern int tty_check_change(struct tty_struct *tty); +extern void __stop_tty(struct tty_struct *tty); extern void stop_tty(struct tty_struct *tty); +extern void __start_tty(struct tty_struct *tty); extern void start_tty(struct tty_struct *tty); +extern int tty_register_driver(struct tty_driver *driver); +extern int tty_unregister_driver(struct tty_driver *driver); +extern struct device *tty_register_device(struct tty_driver *driver, + unsigned index, struct device *dev); +extern struct device *tty_register_device_attr(struct tty_driver *driver, + unsigned index, struct device *device, + void *drvdata, + const struct attribute_group **attr_grp); +extern void tty_unregister_device(struct tty_driver *driver, unsigned index); extern void tty_write_message(struct tty_struct *tty, char *msg); extern int tty_send_xchar(struct tty_struct *tty, char ch); extern int tty_put_char(struct tty_struct *tty, unsigned char c); -extern unsigned int tty_chars_in_buffer(struct tty_struct *tty); -extern unsigned int tty_write_room(struct tty_struct *tty); +extern int tty_chars_in_buffer(struct tty_struct *tty); +extern int tty_write_room(struct tty_struct *tty); extern void tty_driver_flush_buffer(struct tty_struct *tty); +extern void tty_throttle(struct tty_struct *tty); extern void tty_unthrottle(struct tty_struct *tty); extern int tty_throttle_safe(struct tty_struct *tty); extern int tty_unthrottle_safe(struct tty_struct *tty); extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); -extern int tty_get_icount(struct tty_struct *tty, - struct serial_icounter_struct *icount); extern int is_current_pgrp_orphaned(void); extern void tty_hangup(struct tty_struct *tty); extern void tty_vhangup(struct tty_struct *tty); @@ -339,7 +475,15 @@ extern int tty_hung_up_p(struct file *filp); extern void do_SAK(struct tty_struct *tty); extern void __do_SAK(struct tty_struct *tty); extern void no_tty(void); +extern void tty_buffer_free_all(struct tty_port *port); +extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); +extern void tty_buffer_init(struct tty_port *port); +extern void tty_buffer_set_lock_subclass(struct tty_port *port); +extern bool tty_buffer_restart_work(struct tty_port *port); +extern bool tty_buffer_cancel_work(struct tty_port *port); +extern void tty_buffer_flush_work(struct tty_port *port); extern speed_t tty_termios_baud_rate(struct ktermios *termios); +extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); extern void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, speed_t obaud); extern void tty_encode_baud_rate(struct tty_struct *tty, @@ -360,41 +504,181 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty) return tty_termios_baud_rate(&tty->termios); } -unsigned char tty_get_char_size(unsigned int cflag); -unsigned char tty_get_frame_size(unsigned int cflag); - extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); -extern int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b); +extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); -extern void tty_wakeup(struct tty_struct *tty); +extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); +extern void tty_ldisc_deref(struct tty_ldisc *); +extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); +extern void tty_ldisc_hangup(struct tty_struct *tty, bool reset); +extern int tty_ldisc_reinit(struct tty_struct *tty, int disc); +extern const struct file_operations tty_ldiscs_proc_fops; +extern void tty_wakeup(struct tty_struct *tty); +extern void tty_ldisc_flush(struct tty_struct *tty); + +extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg); +extern void tty_default_fops(struct file_operations *fops); +extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx); +extern int tty_alloc_file(struct file *file); +extern void tty_add_file(struct tty_struct *tty, struct file *file); +extern void tty_free_file(struct file *file); extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); -extern void tty_release_struct(struct tty_struct *tty, int idx); +extern int tty_release(struct inode *inode, struct file *filp); extern void tty_init_termios(struct tty_struct *tty); -extern void tty_save_termios(struct tty_struct *tty); extern int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty); extern struct mutex tty_mutex; +#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock)) + +extern void tty_port_init(struct tty_port *port); +extern void tty_port_link_device(struct tty_port *port, + struct tty_driver *driver, unsigned index); +extern struct device *tty_port_register_device(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device); +extern struct device *tty_port_register_device_attr(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +extern int tty_port_alloc_xmit_buf(struct tty_port *port); +extern void tty_port_free_xmit_buf(struct tty_port *port); +extern void tty_port_destroy(struct tty_port *port); +extern void tty_port_put(struct tty_port *port); + +static inline struct tty_port *tty_port_get(struct tty_port *port) +{ + if (port && kref_get_unless_zero(&port->kref)) + return port; + return NULL; +} + +/* If the cts flow control is enabled, return true. */ +static inline bool tty_port_cts_enabled(struct tty_port *port) +{ + return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); +} + +static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_CTS_FLOW, &port->iflags); + else + clear_bit(TTY_PORT_CTS_FLOW, &port->iflags); +} + +static inline bool tty_port_active(struct tty_port *port) +{ + return test_bit(TTY_PORT_ACTIVE, &port->iflags); +} + +static inline void tty_port_set_active(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_ACTIVE, &port->iflags); + else + clear_bit(TTY_PORT_ACTIVE, &port->iflags); +} + +static inline bool tty_port_check_carrier(struct tty_port *port) +{ + return test_bit(TTY_PORT_CHECK_CD, &port->iflags); +} + +static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_CHECK_CD, &port->iflags); + else + clear_bit(TTY_PORT_CHECK_CD, &port->iflags); +} + +static inline bool tty_port_suspended(struct tty_port *port) +{ + return test_bit(TTY_PORT_SUSPENDED, &port->iflags); +} + +static inline void tty_port_set_suspended(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_SUSPENDED, &port->iflags); + else + clear_bit(TTY_PORT_SUSPENDED, &port->iflags); +} + +static inline bool tty_port_initialized(struct tty_port *port) +{ + return test_bit(TTY_PORT_INITIALIZED, &port->iflags); +} + +static inline void tty_port_set_initialized(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_INITIALIZED, &port->iflags); + else + clear_bit(TTY_PORT_INITIALIZED, &port->iflags); +} + +extern struct tty_struct *tty_port_tty_get(struct tty_port *port); +extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); +extern int tty_port_carrier_raised(struct tty_port *port); +extern void tty_port_raise_dtr_rts(struct tty_port *port); +extern void tty_port_lower_dtr_rts(struct tty_port *port); +extern void tty_port_hangup(struct tty_port *port); +extern void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); +extern void tty_port_tty_wakeup(struct tty_port *port); +extern int tty_port_block_til_ready(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +extern int tty_port_close_start(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); +extern void tty_port_close(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +extern int tty_port_install(struct tty_port *port, struct tty_driver *driver, + struct tty_struct *tty); +extern int tty_port_open(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +static inline int tty_port_users(struct tty_port *port) +{ + return atomic_read(&port->count) + port->blocked_open; +} + +extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); +extern int tty_unregister_ldisc(int disc); +extern int tty_set_ldisc(struct tty_struct *tty, int disc); +extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); +extern void tty_ldisc_release(struct tty_struct *tty); +extern void tty_ldisc_init(struct tty_struct *tty); +extern void tty_ldisc_deinit(struct tty_struct *tty); +extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p, + char *f, int count); + /* n_tty.c */ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); -#ifdef CONFIG_TTY extern void __init n_tty_init(void); -#else -static inline void n_tty_init(void) { } -#endif /* tty_audit.c */ #ifdef CONFIG_AUDIT +extern void tty_audit_add_data(struct tty_struct *tty, const void *data, + size_t size); extern void tty_audit_exit(void); extern void tty_audit_fork(struct signal_struct *sig); +extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); extern int tty_audit_push(void); #else +static inline void tty_audit_add_data(struct tty_struct *tty, const void *data, + size_t size) +{ +} +static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) +{ +} static inline void tty_audit_exit(void) { } @@ -410,6 +694,8 @@ static inline int tty_audit_push(void) /* tty_ioctl.c */ extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); +extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg); /* vt.c */ @@ -428,4 +714,24 @@ extern void tty_lock_slave(struct tty_struct *tty); extern void tty_unlock_slave(struct tty_struct *tty); extern void tty_set_lock_subclass(struct tty_struct *tty); +#ifdef CONFIG_PROC_FS +extern void proc_tty_register_driver(struct tty_driver *); +extern void proc_tty_unregister_driver(struct tty_driver *); +#else +static inline void proc_tty_register_driver(struct tty_driver *d) {} +static inline void proc_tty_unregister_driver(struct tty_driver *d) {} +#endif + +#define tty_msg(fn, tty, f, ...) \ + fn("%s %s: " f, tty_driver_name(tty), tty_name(tty), ##__VA_ARGS__) + +#define tty_debug(tty, f, ...) tty_msg(pr_debug, tty, f, ##__VA_ARGS__) +#define tty_info(tty, f, ...) tty_msg(pr_info, tty, f, ##__VA_ARGS__) +#define tty_notice(tty, f, ...) tty_msg(pr_notice, tty, f, ##__VA_ARGS__) +#define tty_warn(tty, f, ...) tty_msg(pr_warn, tty, f, ##__VA_ARGS__) +#define tty_err(tty, f, ...) tty_msg(pr_err, tty, f, ##__VA_ARGS__) + +#define tty_info_ratelimited(tty, f, ...) \ + tty_msg(pr_info_ratelimited, tty, f, ##__VA_ARGS__) + #endif diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index c20431d8de..76dc1fa826 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_DRIVER_H #define _LINUX_TTY_DRIVER_H @@ -89,7 +88,7 @@ * * Note: Do not call this function directly, call tty_driver_flush_chars * - * unsigned int (*write_room)(struct tty_struct *tty); + * int (*write_room)(struct tty_struct *tty); * * This routine returns the numbers of characters the tty driver * will accept for queuing to be written. This number is subject @@ -136,7 +135,7 @@ * the line discipline are close to full, and it should somehow * signal that no more characters should be sent to the tty. * - * Optional: Always invoke via tty_throttle_safe(), called under the + * Optional: Always invoke via tty_throttle(), called under the * termios lock. * * void (*unthrottle)(struct tty_struct * tty); @@ -153,7 +152,7 @@ * This routine notifies the tty driver that it should stop * outputting characters to the tty device. * - * Called with ->flow.lock held. Serialized with start() method. + * Called with ->flow_lock held. Serialized with start() method. * * Optional: * @@ -164,7 +163,7 @@ * This routine notifies the tty driver that it resume sending * characters to the tty device. * - * Called with ->flow.lock held. Serialized with stop() method. + * Called with ->flow_lock held. Serialized with stop() method. * * Optional: * @@ -224,25 +223,30 @@ * line). See tty_do_resize() if you need to wrap the standard method * in your own logic - the usual case. * + * void (*set_termiox)(struct tty_struct *tty, struct termiox *new); + * + * Called when the device receives a termiox based ioctl. Passes down + * the requested data from user space. This method will not be invoked + * unless the tty also has a valid tty->termiox pointer. + * + * Optional: Called under the termios lock + * * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount); * * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel * structure to complete. This method is optional and will only be called - * if provided (otherwise ENOTTY will be returned). + * if provided (otherwise EINVAL will be returned). */ #include #include -#include #include #include #include -#include struct tty_struct; struct tty_driver; struct serial_icounter_struct; -struct serial_struct; struct tty_operations { struct tty_struct * (*lookup)(struct tty_driver *driver, @@ -257,8 +261,8 @@ struct tty_operations { const unsigned char *buf, int count); int (*put_char)(struct tty_struct *tty, unsigned char ch); void (*flush_chars)(struct tty_struct *tty); - unsigned int (*write_room)(struct tty_struct *tty); - unsigned int (*chars_in_buffer)(struct tty_struct *tty); + int (*write_room)(struct tty_struct *tty); + int (*chars_in_buffer)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); long (*compat_ioctl)(struct tty_struct *tty, @@ -278,18 +282,16 @@ struct tty_operations { int (*tiocmset)(struct tty_struct *tty, unsigned int set, unsigned int clear); int (*resize)(struct tty_struct *tty, struct winsize *ws); + int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); int (*get_icount)(struct tty_struct *tty, struct serial_icounter_struct *icount); - int (*get_serial)(struct tty_struct *tty, struct serial_struct *p); - int (*set_serial)(struct tty_struct *tty, struct serial_struct *p); - void (*show_fdinfo)(struct tty_struct *tty, struct seq_file *m); #ifdef CONFIG_CONSOLE_POLL int (*poll_init)(struct tty_driver *driver, int line, char *options); int (*poll_get_char)(struct tty_driver *driver, int line); void (*poll_put_char)(struct tty_driver *driver, int line, char ch); #endif - int (*proc_show)(struct seq_file *, void *); -} __randomize_layout; + const struct file_operations *proc_fops; +} __do_const __randomize_layout; struct tty_driver { int magic; /* magic number for this structure */ @@ -329,6 +331,9 @@ extern struct list_head tty_drivers; extern struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner, unsigned long flags); +extern void put_tty_driver(struct tty_driver *driver); +extern void tty_set_operations(struct tty_driver *driver, + const struct tty_operations *op); extern struct tty_driver *tty_find_polling_driver(char *name, int *line); extern void tty_driver_kref_put(struct tty_driver *driver); @@ -337,18 +342,24 @@ extern void tty_driver_kref_put(struct tty_driver *driver); #define tty_alloc_driver(lines, flags) \ __tty_alloc_driver(lines, THIS_MODULE, flags) +/* + * DEPRECATED Do not use this in new code, use tty_alloc_driver instead. + * (And change the return value checks.) + */ +static inline struct tty_driver *alloc_tty_driver(unsigned int lines) +{ + struct tty_driver *ret = tty_alloc_driver(lines, 0); + if (IS_ERR(ret)) + return NULL; + return ret; +} + static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) { kref_get(&d->kref); return d; } -static inline void tty_set_operations(struct tty_driver *driver, - const struct tty_operations *op) -{ - driver->ops = op; -} - /* tty driver magic number */ #define TTY_DRIVER_MAGIC 0x5402 @@ -426,21 +437,4 @@ static inline void tty_set_operations(struct tty_driver *driver, /* serial subtype definitions */ #define SERIAL_TYPE_NORMAL 1 -int tty_register_driver(struct tty_driver *driver); -void tty_unregister_driver(struct tty_driver *driver); -struct device *tty_register_device(struct tty_driver *driver, unsigned index, - struct device *dev); -struct device *tty_register_device_attr(struct tty_driver *driver, - unsigned index, struct device *device, void *drvdata, - const struct attribute_group **attr_grp); -void tty_unregister_device(struct tty_driver *driver, unsigned index); - -#ifdef CONFIG_PROC_FS -void proc_tty_register_driver(struct tty_driver *); -void proc_tty_unregister_driver(struct tty_driver *); -#else -static inline void proc_tty_register_driver(struct tty_driver *d) {} -static inline void proc_tty_unregister_driver(struct tty_driver *d) {} -#endif - #endif /* #ifdef _LINUX_TTY_DRIVER_H */ diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h index 32284992b3..c28dd523f9 100644 --- a/include/linux/tty_flip.h +++ b/include/linux/tty_flip.h @@ -1,14 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_FLIP_H #define _LINUX_TTY_FLIP_H -#include -#include - -struct tty_ldisc; - extern int tty_buffer_set_limit(struct tty_port *port, int limit); -extern unsigned int tty_buffer_space_avail(struct tty_port *port); +extern int tty_buffer_space_avail(struct tty_port *port); extern int tty_buffer_request_room(struct tty_port *port, size_t size); extern int tty_insert_flip_string_flags(struct tty_port *port, const unsigned char *chars, const char *flags, size_t size); @@ -18,7 +12,6 @@ extern int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars, size_t size); extern void tty_flip_buffer_push(struct tty_port *port); void tty_schedule_flip(struct tty_port *port); -int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag); static inline int tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag) @@ -33,7 +26,7 @@ static inline int tty_insert_flip_char(struct tty_port *port, *char_buf_ptr(tb, tb->used++) = ch; return 1; } - return __tty_insert_flip_char(port, ch, flag); + return tty_insert_flip_string_flags(port, &ch, &flag, 1); } static inline int tty_insert_flip_string(struct tty_port *port, @@ -42,9 +35,6 @@ static inline int tty_insert_flip_string(struct tty_port *port, return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size); } -int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, - const char *f, int count); - extern void tty_buffer_lock_exclusive(struct tty_port *port); extern void tty_buffer_unlock_exclusive(struct tty_port *port); diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index b1d812e902..7704c4834b 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -1,9 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_LDISC_H #define _LINUX_TTY_LDISC_H -struct tty_struct; - /* * This structure defines the interface between the tty line discipline * implementation and the tty routines. The following routines can be @@ -56,17 +53,11 @@ struct tty_struct; * low-level driver can "grab" an ioctl request before the line * discpline has a chance to see it. * - * int (*compat_ioctl)(struct tty_struct * tty, struct file * file, + * long (*compat_ioctl)(struct tty_struct * tty, struct file * file, * unsigned int cmd, unsigned long arg); * * Process ioctl calls from 32-bit process on 64-bit system * - * NOTE: only ioctls that are neither "pointer to compatible - * structure" nor tty-generic. Something private that takes - * an integer or a pointer to wordsize-sensitive structure - * belongs here, but most of ldiscs will happily leave - * it NULL. - * * void (*set_termios)(struct tty_struct *tty, struct ktermios * old); * * This function notifies the line discpline that a change has @@ -127,16 +118,13 @@ struct tty_struct; #include #include -#include -#include -#include -#include + /* * the semaphore definition */ struct ld_semaphore { - atomic_long_t count; + long count; raw_spinlock_t wait_lock; unsigned int wait_readers; struct list_head read_wait; @@ -178,6 +166,7 @@ extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, struct tty_ldisc_ops { + int magic; char *name; int num; int flags; @@ -189,16 +178,15 @@ struct tty_ldisc_ops { void (*close)(struct tty_struct *); void (*flush_buffer)(struct tty_struct *tty); ssize_t (*read)(struct tty_struct *tty, struct file *file, - unsigned char *buf, size_t nr, - void **cookie, unsigned long offset); + unsigned char __user *buf, size_t nr); ssize_t (*write)(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr); int (*ioctl)(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); - int (*compat_ioctl)(struct tty_struct *tty, struct file *file, + long (*compat_ioctl)(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); void (*set_termios)(struct tty_struct *tty, struct ktermios *old); - __poll_t (*poll)(struct tty_struct *, struct file *, + unsigned int (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); int (*hangup)(struct tty_struct *tty); @@ -206,13 +194,15 @@ struct tty_ldisc_ops { * The following routines are called from below. */ void (*receive_buf)(struct tty_struct *, const unsigned char *cp, - const char *fp, int count); + char *fp, int count); void (*write_wakeup)(struct tty_struct *); void (*dcd_change)(struct tty_struct *, unsigned int); int (*receive_buf2)(struct tty_struct *, const unsigned char *cp, - const char *fp, int count); + char *fp, int count); struct module *owner; + + atomic_t refcount; }; struct tty_ldisc { @@ -220,21 +210,11 @@ struct tty_ldisc { struct tty_struct *tty; }; +#define TTY_LDISC_MAGIC 0x5403 + #define LDISC_FLAG_DEFINED 0x00000001 #define MODULE_ALIAS_LDISC(ldisc) \ MODULE_ALIAS("tty-ldisc-" __stringify(ldisc)) -extern const struct seq_operations tty_ldiscs_seq_ops; - -struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); -void tty_ldisc_deref(struct tty_ldisc *); -struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); - -void tty_ldisc_flush(struct tty_struct *tty); - -int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc); -void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc); -int tty_set_ldisc(struct tty_struct *tty, int disc); - #endif /* _LINUX_TTY_LDISC_H */ diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h index 46b15e2aae..eb5b74a575 100644 --- a/include/linux/typecheck.h +++ b/include/linux/typecheck.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef TYPECHECK_H_INCLUDED #define TYPECHECK_H_INCLUDED @@ -22,13 +21,4 @@ (void)__tmp; \ }) -/* - * Check at compile time that something is a pointer type. - */ -#define typecheck_pointer(x) \ -({ typeof(x) __dummy; \ - (void)sizeof(*__dummy); \ - 1; \ -}) - #endif /* TYPECHECK_H_INCLUDED */ diff --git a/include/linux/types.h b/include/linux/types.h index ac825ad90e..161f20fa02 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TYPES_H #define _LINUX_TYPES_H @@ -10,14 +9,14 @@ #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] -typedef u32 __kernel_dev_t; +typedef __u32 __kernel_dev_t; typedef __kernel_fd_set fd_set; typedef __kernel_dev_t dev_t; -typedef __kernel_ulong_t ino_t; +typedef __kernel_ino_t ino_t; typedef __kernel_mode_t mode_t; typedef unsigned short umode_t; -typedef u32 nlink_t; +typedef __u32 nlink_t; typedef __kernel_off_t off_t; typedef __kernel_pid_t pid_t; typedef __kernel_daddr_t daddr_t; @@ -65,6 +64,11 @@ typedef __kernel_ssize_t ssize_t; typedef __kernel_ptrdiff_t ptrdiff_t; #endif +#ifndef _TIME_T +#define _TIME_T +typedef __kernel_time_t time_t; +#endif + #ifndef _CLOCK_T #define _CLOCK_T typedef __kernel_clock_t clock_t; @@ -90,29 +94,29 @@ typedef unsigned long ulong; #ifndef __BIT_TYPES_DEFINED__ #define __BIT_TYPES_DEFINED__ -typedef u8 u_int8_t; -typedef s8 int8_t; -typedef u16 u_int16_t; -typedef s16 int16_t; -typedef u32 u_int32_t; -typedef s32 int32_t; +typedef __u8 u_int8_t; +typedef __s8 int8_t; +typedef __u16 u_int16_t; +typedef __s16 int16_t; +typedef __u32 u_int32_t; +typedef __s32 int32_t; #endif /* !(__BIT_TYPES_DEFINED__) */ -typedef u8 uint8_t; -typedef u16 uint16_t; -typedef u32 uint32_t; +typedef __u8 uint8_t; +typedef __u16 uint16_t; +typedef __u32 uint32_t; #if defined(__GNUC__) -typedef u64 uint64_t; -typedef u64 u_int64_t; -typedef s64 int64_t; +typedef __u64 uint64_t; +typedef __u64 u_int64_t; +typedef __s64 int64_t; #endif /* this is a special 64bit data type that is 8-byte aligned */ -#define aligned_u64 __aligned_u64 -#define aligned_be64 __aligned_be64 -#define aligned_le64 __aligned_le64 +#define aligned_u64 __u64 __attribute__((aligned(8))) +#define aligned_be64 __be64 __attribute__((aligned(8))) +#define aligned_le64 __le64 __attribute__((aligned(8))) /** * The type used for indexing onto a disc or disc partition. @@ -122,8 +126,13 @@ typedef s64 int64_t; * * blkcnt_t is the type of the inode's block count. */ +#ifdef CONFIG_LBDAF typedef u64 sector_t; typedef u64 blkcnt_t; +#else +typedef unsigned long sector_t; +typedef unsigned long blkcnt_t; +#endif /* * The type of an index into the pagecache. @@ -145,14 +154,15 @@ typedef u64 dma_addr_t; typedef u32 dma_addr_t; #endif -typedef unsigned int __bitwise gfp_t; -typedef unsigned int __bitwise slab_flags_t; -typedef unsigned int __bitwise fmode_t; +typedef unsigned __bitwise__ gfp_t; +typedef unsigned __bitwise__ fmode_t; #ifdef CONFIG_PHYS_ADDR_T_64BIT typedef u64 phys_addr_t; +#define RESOURCE_SIZE_MAX ULLONG_MAX #else typedef u32 phys_addr_t; +#define RESOURCE_SIZE_MAX ULONG_MAX #endif typedef phys_addr_t resource_size_t; @@ -167,12 +177,26 @@ typedef struct { int counter; } atomic_t; -#define ATOMIC_INIT(i) { (i) } +#ifdef CONFIG_PAX_REFCOUNT +typedef struct { + int counter; +} atomic_unchecked_t; +#else +typedef atomic_t atomic_unchecked_t; +#endif #ifdef CONFIG_64BIT typedef struct { - s64 counter; + long counter; } atomic64_t; + +#ifdef CONFIG_PAX_REFCOUNT +typedef struct { + long counter; +} atomic64_unchecked_t; +#else +typedef atomic64_t atomic64_unchecked_t; +#endif #endif struct list_head { @@ -189,11 +213,7 @@ struct hlist_node { struct ustat { __kernel_daddr_t f_tfree; -#ifdef CONFIG_ARCH_32BIT_USTAT_F_TINODE - unsigned int f_tinode; -#else - unsigned long f_tinode; -#endif + __kernel_ino_t f_tinode; char f_fname[6]; char f_fpack[6]; }; @@ -207,13 +227,13 @@ struct ustat { * naturally due ABI requirements, but some architectures (like CRIS) have * weird ABI and we need to ask it explicitly. * - * The alignment is required to guarantee that bit 0 of @next will be - * clear under normal conditions -- as long as we use call_rcu() or - * call_srcu() to queue the callback. + * The alignment is required to guarantee that bits 0 and 1 of @next will be + * clear under normal conditions -- as long as we use call_rcu(), + * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. * * This guarantee is important for few reasons: * - future call_rcu_lazy() will make use of lower bits in the pointer; - * - the structure shares storage space in struct page with @compound_head, + * - the structure shares storage spacer in struct page with @compound_head, * which encode PageTail() in bit 0. The guarantee is needed to avoid * false-positive PageTail(). */ @@ -226,10 +246,8 @@ struct callback_head { typedef void (*rcu_callback_t)(struct rcu_head *head); typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); -typedef void (*swap_func_t)(void *a, void *b, int size); - -typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv); -typedef int (*cmp_func_t)(const void *a, const void *b); +/* clocksource cycle base type */ +typedef u64 cycle_t; #endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index e81856c0ba..650f3dd6b8 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -1,38 +1,34 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_U64_STATS_SYNC_H #define _LINUX_U64_STATS_SYNC_H /* - * Protect against 64-bit values tearing on 32-bit architectures. This is - * typically used for statistics read/update in different subsystems. + * To properly implement 64bits network statistics on 32bit and 64bit hosts, + * we provide a synchronization point, that is a noop on 64bit or UP kernels. * * Key points : - * - * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP. - * - The whole thing is a no-op on 64-bit architectures. - * - * Usage constraints: - * - * 1) Write side must ensure mutual exclusion, or one seqcount update could + * 1) Use a seqcount on SMP 32bits, with low overhead. + * 2) Whole thing is a noop on 64bit arches or UP kernels. + * 3) Write side must ensure mutual exclusion or one seqcount update could * be lost, thus blocking readers forever. - * - * 2) Write side must disable preemption, or a seqcount reader can preempt the - * writer and also spin forever. - * - * 3) Write side must use the _irqsave() variant if other writers, or a reader, - * can be invoked from an IRQ context. + * If this synchronization point is not a mutex, but a spinlock or + * spinlock_bh() or disable_bh() : + * 3.1) Write side should not sleep. + * 3.2) Write side should not allow preemption. + * 3.3) If applicable, interrupts should be disabled. * * 4) If reader fetches several counters, there is no guarantee the whole values - * are consistent w.r.t. each other (remember point #2: seqcounts are not - * used for 64bit architectures). + * are consistent (remember point 1) : this is a noop on 64bit arches anyway) * - * 5) Readers are allowed to sleep or be preempted/interrupted: they perform - * pure reads. + * 5) readers are allowed to sleep or be preempted/interrupted : They perform + * pure reads. But if they have to fetch many values, it's better to not allow + * preemptions/interruptions to avoid many retries. * - * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats - * might be updated from a hardirq or softirq context (remember point #1: - * seqcounts are not used for UP kernels). 32-bit UP stat readers could read - * corrupted 64-bit values otherwise. + * 6) If counter might be written by an interrupt, readers should block interrupts. + * (On UP, there is no seqcount_t protection, a reader allowing interrupts could + * read partial values) + * + * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and + * u64_stats_fetch_retry_irq() helpers * * Usage : * @@ -43,8 +39,8 @@ * spin_lock_bh(...) or other synchronization to get exclusive access * ... * u64_stats_update_begin(&stats->syncp); - * u64_stats_add(&stats->bytes64, len); // non atomic operation - * u64_stats_inc(&stats->packets64); // non atomic operation + * stats->bytes64 += len; // non atomic operation + * stats->packets64++; // non atomic operation * u64_stats_update_end(&stats->syncp); * * While a consumer (reader) should use following template to get consistent @@ -55,8 +51,8 @@ * * do { * start = u64_stats_fetch_begin(&stats->syncp); - * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation - * tpackets = u64_stats_read(&stats->packets64); // non atomic operation + * tbytes = stats->bytes64; // non atomic operation + * tpackets = stats->packets64; // non atomic operation * } while (u64_stats_fetch_retry(&stats->syncp, start)); * * @@ -71,57 +67,13 @@ struct u64_stats_sync { #endif }; -#if BITS_PER_LONG == 64 -#include -typedef struct { - local64_t v; -} u64_stats_t ; - -static inline u64 u64_stats_read(const u64_stats_t *p) -{ - return local64_read(&p->v); -} - -static inline void u64_stats_add(u64_stats_t *p, unsigned long val) -{ - local64_add(val, &p->v); -} - -static inline void u64_stats_inc(u64_stats_t *p) -{ - local64_inc(&p->v); -} - -#else - -typedef struct { - u64 v; -} u64_stats_t; - -static inline u64 u64_stats_read(const u64_stats_t *p) -{ - return p->v; -} - -static inline void u64_stats_add(u64_stats_t *p, unsigned long val) -{ - p->v += val; -} - -static inline void u64_stats_inc(u64_stats_t *p) -{ - p->v++; -} -#endif - -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) -#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) -#else static inline void u64_stats_init(struct u64_stats_sync *syncp) { -} +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) + seqcount_init(&syncp->seq); #endif +} static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { @@ -137,25 +89,17 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp) #endif } -static inline unsigned long -u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) +static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp) { - unsigned long flags = 0; - #if BITS_PER_LONG==32 && defined(CONFIG_SMP) - local_irq_save(flags); - write_seqcount_begin(&syncp->seq); + raw_write_seqcount_begin(&syncp->seq); #endif - return flags; } -static inline void -u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, - unsigned long flags) +static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) - write_seqcount_end(&syncp->seq); - local_irq_restore(flags); + raw_write_seqcount_end(&syncp->seq); #endif } diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index ac0394087f..d2b4ce20f6 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -1,219 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__ -#include -#include -#include #include -#include - #include -#ifdef CONFIG_SET_FS -/* - * Force the uaccess routines to be wired up for actual userspace access, - * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone - * using force_uaccess_end below. - */ -static inline mm_segment_t force_uaccess_begin(void) -{ - mm_segment_t fs = get_fs(); - - set_fs(USER_DS); - return fs; -} - -static inline void force_uaccess_end(mm_segment_t oldfs) -{ - set_fs(oldfs); -} -#else /* CONFIG_SET_FS */ -typedef struct { - /* empty dummy */ -} mm_segment_t; - -#ifndef TASK_SIZE_MAX -#define TASK_SIZE_MAX TASK_SIZE -#endif - -#define uaccess_kernel() (false) -#define user_addr_max() (TASK_SIZE_MAX) - -static inline mm_segment_t force_uaccess_begin(void) -{ - return (mm_segment_t) { }; -} - -static inline void force_uaccess_end(mm_segment_t oldfs) -{ -} -#endif /* CONFIG_SET_FS */ - -/* - * Architectures should provide two primitives (raw_copy_{to,from}_user()) - * and get rid of their private instances of copy_{to,from}_user() and - * __copy_{to,from}_user{,_inatomic}(). - * - * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and - * return the amount left to copy. They should assume that access_ok() has - * already been checked (and succeeded); they should *not* zero-pad anything. - * No KASAN or object size checks either - those belong here. - * - * Both of these functions should attempt to copy size bytes starting at from - * into the area starting at to. They must not fetch or store anything - * outside of those areas. Return value must be between 0 (everything - * copied successfully) and size (nothing copied). - * - * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting - * at to must become equal to the bytes fetched from the corresponding area - * starting at from. All data past to + size - N must be left unmodified. - * - * If copying succeeds, the return value must be 0. If some data cannot be - * fetched, it is permitted to copy less than had been fetched; the only - * hard requirement is that not storing anything at all (i.e. returning size) - * should happen only when nothing could be copied. In other words, you don't - * have to squeeze as much as possible - it is allowed, but not necessary. - * - * For raw_copy_from_user() to always points to kernel memory and no faults - * on store should happen. Interpretation of from is affected by set_fs(). - * For raw_copy_to_user() it's the other way round. - * - * Both can be inlined - it's up to architectures whether it wants to bother - * with that. They should not be used directly; they are used to implement - * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) - * that are used instead. Out of those, __... ones are inlined. Plain - * copy_{to,from}_user() might or might not be inlined. If you want them - * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. - * - * NOTE: only copy_from_user() zero-pads the destination in case of short copy. - * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything - * at all; their callers absolutely must check the return value. - * - * Biarch ones should also provide raw_copy_in_user() - similar to the above, - * but both source and destination are __user pointers (affected by set_fs() - * as usual) and both source and destination can trigger faults. - */ - -static __always_inline __must_check unsigned long -__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) -{ - instrument_copy_from_user(to, from, n); - check_object_size(to, n, false); - return raw_copy_from_user(to, from, n); -} - -static __always_inline __must_check unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) -{ - might_fault(); - if (should_fail_usercopy()) - return n; - instrument_copy_from_user(to, from, n); - check_object_size(to, n, false); - return raw_copy_from_user(to, from, n); -} - -/** - * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * The caller should also make sure he pins the user space address - * so that we don't result in page fault and sleep. - */ -static __always_inline __must_check unsigned long -__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) -{ - if (should_fail_usercopy()) - return n; - instrument_copy_to_user(to, from, n); - check_object_size(from, n, true); - return raw_copy_to_user(to, from, n); -} - -static __always_inline __must_check unsigned long -__copy_to_user(void __user *to, const void *from, unsigned long n) -{ - might_fault(); - if (should_fail_usercopy()) - return n; - instrument_copy_to_user(to, from, n); - check_object_size(from, n, true); - return raw_copy_to_user(to, from, n); -} - -#ifdef INLINE_COPY_FROM_USER -static inline __must_check unsigned long -_copy_from_user(void *to, const void __user *from, unsigned long n) -{ - unsigned long res = n; - might_fault(); - if (!should_fail_usercopy() && likely(access_ok(from, n))) { - instrument_copy_from_user(to, from, n); - res = raw_copy_from_user(to, from, n); - } - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; -} -#else -extern __must_check unsigned long -_copy_from_user(void *, const void __user *, unsigned long); -#endif - -#ifdef INLINE_COPY_TO_USER -static inline __must_check unsigned long -_copy_to_user(void __user *to, const void *from, unsigned long n) -{ - might_fault(); - if (should_fail_usercopy()) - return n; - if (access_ok(to, n)) { - instrument_copy_to_user(to, from, n); - n = raw_copy_to_user(to, from, n); - } - return n; -} -#else -extern __must_check unsigned long -_copy_to_user(void __user *, const void *, unsigned long); -#endif - -static __always_inline unsigned long __must_check -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (likely(check_copy_size(to, n, false))) - n = _copy_from_user(to, from, n); - return n; -} - -static __always_inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (likely(check_copy_size(from, n, true))) - n = _copy_to_user(to, from, n); - return n; -} - -#ifndef copy_mc_to_kernel -/* - * Without arch opt-in this generic copy_mc_to_kernel() will not handle - * #MC (or arch equivalent) during source read. - */ -static inline unsigned long __must_check -copy_mc_to_kernel(void *dst, const void *src, size_t cnt) -{ - memcpy(dst, src, cnt); - return 0; -} -#endif - static __always_inline void pagefault_disabled_inc(void) { current->pagefault_disabled++; @@ -222,6 +12,7 @@ static __always_inline void pagefault_disabled_inc(void) static __always_inline void pagefault_disabled_dec(void) { current->pagefault_disabled--; + WARN_ON(current->pagefault_disabled < 0); } /* @@ -254,10 +45,7 @@ static inline void pagefault_enable(void) /* * Is the pagefault handler disabled? If so, user access methods will not sleep. */ -static inline bool pagefault_disabled(void) -{ - return current->pagefault_disabled != 0; -} +#define pagefault_disabled() (current->pagefault_disabled != 0) /* * The pagefault handler is in general disabled by pagefault_disable() or @@ -273,139 +61,61 @@ static inline bool pagefault_disabled(void) #ifndef ARCH_HAS_NOCACHE_UACCESS -static inline __must_check unsigned long -__copy_from_user_inatomic_nocache(void *to, const void __user *from, - unsigned long n) +static inline unsigned long __copy_from_user_inatomic_nocache(void *to, + const void __user *from, unsigned long n) { return __copy_from_user_inatomic(to, from, n); } -#endif /* ARCH_HAS_NOCACHE_UACCESS */ - -extern __must_check int check_zeroed_user(const void __user *from, size_t size); - -/** - * copy_struct_from_user: copy a struct from userspace - * @dst: Destination address, in kernel space. This buffer must be @ksize - * bytes long. - * @ksize: Size of @dst struct. - * @src: Source address, in userspace. - * @usize: (Alleged) size of @src struct. - * - * Copies a struct from userspace to kernel space, in a way that guarantees - * backwards-compatibility for struct syscall arguments (as long as future - * struct extensions are made such that all new fields are *appended* to the - * old struct, and zeroed-out new fields have the same meaning as the old - * struct). - * - * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. - * The recommended usage is something like the following: - * - * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) - * { - * int err; - * struct foo karg = {}; - * - * if (usize > PAGE_SIZE) - * return -E2BIG; - * if (usize < FOO_SIZE_VER0) - * return -EINVAL; - * - * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); - * if (err) - * return err; - * - * // ... - * } - * - * There are three cases to consider: - * * If @usize == @ksize, then it's copied verbatim. - * * If @usize < @ksize, then the userspace has passed an old struct to a - * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) - * are to be zero-filled. - * * If @usize > @ksize, then the userspace has passed a new struct to an - * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) - * are checked to ensure they are zeroed, otherwise -E2BIG is returned. - * - * Returns (in all cases, some data may have been copied): - * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. - * * -EFAULT: access to userspace failed. - */ -static __always_inline __must_check int -copy_struct_from_user(void *dst, size_t ksize, const void __user *src, - size_t usize) +static inline unsigned long __copy_from_user_nocache(void *to, + const void __user *from, unsigned long n) { - size_t size = min(ksize, usize); - size_t rest = max(ksize, usize) - size; - - /* Deal with trailing bytes. */ - if (usize < ksize) { - memset(dst + size, 0, rest); - } else if (usize > ksize) { - int ret = check_zeroed_user(src + size, rest); - if (ret <= 0) - return ret ?: -E2BIG; - } - /* Copy the interoperable parts of the struct. */ - if (copy_from_user(dst, src, size)) - return -EFAULT; - return 0; + return __copy_from_user(to, from, n); } -bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); +#endif /* ARCH_HAS_NOCACHE_UACCESS */ -long copy_from_kernel_nofault(void *dst, const void *src, size_t size); -long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); +/* + * probe_kernel_read(): safely attempt to read from a location + * @dst: pointer to the buffer that shall take the data + * @src: address to read from + * @size: size of the data chunk + * + * Safely read from address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long probe_kernel_read(void *dst, const void *src, size_t size); +extern long __probe_kernel_read(void *dst, const void *src, size_t size); -long copy_from_user_nofault(void *dst, const void __user *src, size_t size); -long notrace copy_to_user_nofault(void __user *dst, const void *src, - size_t size); +/* + * probe_kernel_write(): safely attempt to write to a location + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); +extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); -long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, - long count); - -long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, - long count); -long strnlen_user_nofault(const void __user *unsafe_addr, long count); +extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); /** - * get_kernel_nofault(): safely attempt to read from a location - * @val: read into this variable - * @ptr: address to read from + * probe_kernel_address(): safely attempt to read from a location + * @addr: address to read from + * @retval: read into this variable * * Returns 0 on success, or -EFAULT. */ -#define get_kernel_nofault(val, ptr) ({ \ - const typeof(val) *__gk_ptr = (ptr); \ - copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ -}) +#define probe_kernel_address(addr, retval) \ + probe_kernel_read(&(retval), addr, sizeof(retval)) #ifndef user_access_begin -#define user_access_begin(ptr,len) access_ok(ptr, len) +#define user_access_begin() do { } while (0) #define user_access_end() do { } while (0) -#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) -#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) -#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) -#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) -#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e) -static inline unsigned long user_access_save(void) { return 0UL; } -static inline void user_access_restore(unsigned long flags) { } -#endif -#ifndef user_write_access_begin -#define user_write_access_begin user_access_begin -#define user_write_access_end user_access_end -#endif -#ifndef user_read_access_begin -#define user_read_access_begin user_access_begin -#define user_read_access_end user_access_end -#endif - -#ifdef CONFIG_HARDENED_USERCOPY -void usercopy_warn(const char *name, const char *detail, bool to_user, - unsigned long offset, unsigned long len); -void __noreturn usercopy_abort(const char *name, const char *detail, - bool to_user, unsigned long offset, - unsigned long len); +#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) +#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) #endif #endif /* __LINUX_UACCESS_H__ */ diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h index 0968ef4584..2e9ee4d1c6 100644 --- a/include/linux/ucb1400.h +++ b/include/linux/ucb1400.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * Register definitions and functions for: * Philips UCB1400 driver @@ -12,6 +11,10 @@ * If something doesn't work and it worked before spliting, e-mail me, * dont bother Nicolas please ;-) * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * * This code is heavily based on ucb1x00-*.c copyrighted by Russell King * covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has * been made separate from ucb1x00-core/ucb1x00-ts on Russell's request. diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h index cf3ada3e82..bb679b48f4 100644 --- a/include/linux/ucs2_string.h +++ b/include/linux/ucs2_string.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UCS2_STRING_H_ #define _LINUX_UCS2_STRING_H_ diff --git a/include/linux/udp.h b/include/linux/udp.h index ae66dadd85..d1fd8cd394 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -9,6 +8,11 @@ * Version: @(#)udp.h 1.0.2 04/28/93 * * Author: Fred N. van Kempen, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_UDP_H #define _LINUX_UDP_H @@ -45,21 +49,12 @@ struct udp_sock { unsigned int corkflag; /* Cork is required */ __u8 encap_type; /* Is this an Encapsulation socket? */ unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ - no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ - encap_enabled:1, /* This socket enabled encap - * processing; UDP tunnels and - * different encapsulation layer set - * this - */ - gro_enabled:1, /* Request GRO aggregation */ - accept_udp_l4:1, - accept_udp_fraglist:1; + no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */ /* * Following member retains the information to create a UDP header * when the socket is uncorked. */ __u16 len; /* total length of pending frames */ - __u16 gso_size; /* * Fields specific to UDP-Lite. */ @@ -75,26 +70,17 @@ struct udp_sock { * For encapsulation sockets. */ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); - int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb); void (*encap_destroy)(struct sock *sk); /* GRO functions for UDP socket */ - struct sk_buff * (*gro_receive)(struct sock *sk, - struct list_head *head, + struct sk_buff ** (*gro_receive)(struct sock *sk, + struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sock *sk, struct sk_buff *skb, int nhoff); - - /* udp_recvmsg try to use this before splicing sk_receive_queue */ - struct sk_buff_head reader_queue ____cacheline_aligned_in_smp; - - /* This field is dirtied by udp_recvmsg() */ - int forward_deficit; }; -#define UDP_MAX_SEGMENTS (1 << 6UL) - static inline struct udp_sock *udp_sk(const struct sock *sk) { return (struct udp_sock *)sk; @@ -120,43 +106,12 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) return udp_sk(sk)->no_check6_rx; } -static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk, - struct sk_buff *skb) -{ - int gso_size; - - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { - gso_size = skb_shinfo(skb)->gso_size; - put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size); - } -} - -static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) -{ - if (!skb_is_gso(skb)) - return false; - - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4) - return true; - - if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist) - return true; - - return false; -} - -static inline void udp_allow_gso(struct sock *sk) -{ - udp_sk(sk)->accept_udp_l4 = 1; - udp_sk(sk)->accept_udp_fraglist = 1; -} - #define udp_portaddr_for_each_entry(__sk, list) \ hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node) #define udp_portaddr_for_each_entry_rcu(__sk, list) \ hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node) -#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE) +#define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag) #endif /* _LINUX_UDP_H */ diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h index b0542cd11a..1b34fffb9c 100644 --- a/include/linux/uidgid.h +++ b/include/linux/uidgid.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UIDGID_H #define _LINUX_UIDGID_H @@ -188,4 +187,10 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) #endif /* CONFIG_USER_NS */ +#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x)) +#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x)) +#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID) +#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID)) +#define gr_is_global_nonroot_gid(x) (!gid_eq((x), GLOBAL_ROOT_GID)) + #endif /* _LINUX_UIDGID_H */ diff --git a/include/linux/uinput.h b/include/linux/uinput.h new file mode 100644 index 0000000000..75de43da23 --- /dev/null +++ b/include/linux/uinput.h @@ -0,0 +1,81 @@ +/* + * User level driver support for input subsystem + * + * Heavily based on evdev.c by Vojtech Pavlik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Aristeu Sergio Rozanski Filho + * + * Changes/Revisions: + * 0.5 08/13/2015 (David Herrmann & + * Benjamin Tissoires ) + * - add UI_DEV_SETUP ioctl + * - add UI_ABS_SETUP ioctl + * - add UI_GET_VERSION ioctl + * 0.4 01/09/2014 (Benjamin Tissoires ) + * - add UI_GET_SYSNAME ioctl + * 0.3 24/05/2006 (Anssi Hannula ) + * - update ff support for the changes in kernel interface + * - add UINPUT_VERSION + * 0.2 16/10/2004 (Micah Dowty ) + * - added force feedback support + * - added UI_SET_PHYS + * 0.1 20/06/2002 + * - first public version + */ +#ifndef __UINPUT_H_ +#define __UINPUT_H_ + +#include + +#define UINPUT_NAME "uinput" +#define UINPUT_BUFFER_SIZE 16 +#define UINPUT_NUM_REQUESTS 16 + +enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED }; + +struct uinput_request { + unsigned int id; + unsigned int code; /* UI_FF_UPLOAD, UI_FF_ERASE */ + + int retval; + struct completion done; + + union { + unsigned int effect_id; + struct { + struct ff_effect *effect; + struct ff_effect *old; + } upload; + } u; +}; + +struct uinput_device { + struct input_dev *dev; + struct mutex mutex; + enum uinput_state state; + wait_queue_head_t waitq; + unsigned char ready; + unsigned char head; + unsigned char tail; + struct input_event buff[UINPUT_BUFFER_SIZE]; + unsigned int ff_effects_max; + + struct uinput_request *requests[UINPUT_NUM_REQUESTS]; + wait_queue_head_t requests_waitq; + spinlock_t requests_lock; +}; +#endif /* __UINPUT_H_ */ diff --git a/include/linux/uio.h b/include/linux/uio.h index 207101a9c5..c146ebc69c 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -1,12 +1,15 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Berkeley style UIO structures - Alan Cox 1994. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #ifndef __LINUX_UIO_H #define __LINUX_UIO_H #include -#include #include struct page; @@ -17,92 +20,32 @@ struct kvec { size_t iov_len; }; -enum iter_type { - /* iter types */ - ITER_IOVEC, - ITER_KVEC, - ITER_BVEC, - ITER_PIPE, - ITER_XARRAY, - ITER_DISCARD, -}; - -struct iov_iter_state { - size_t iov_offset; - size_t count; - unsigned long nr_segs; +enum { + ITER_IOVEC = 0, + ITER_KVEC = 2, + ITER_BVEC = 4, + ITER_PIPE = 8, }; struct iov_iter { - u8 iter_type; - bool data_source; + int type; size_t iov_offset; size_t count; union { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; - struct xarray *xarray; struct pipe_inode_info *pipe; }; union { unsigned long nr_segs; struct { - unsigned int head; - unsigned int start_head; + int idx; + int start_idx; }; - loff_t xarray_start; }; }; -static inline enum iter_type iov_iter_type(const struct iov_iter *i) -{ - return i->iter_type; -} - -static inline void iov_iter_save_state(struct iov_iter *iter, - struct iov_iter_state *state) -{ - state->iov_offset = iter->iov_offset; - state->count = iter->count; - state->nr_segs = iter->nr_segs; -} - -static inline bool iter_is_iovec(const struct iov_iter *i) -{ - return iov_iter_type(i) == ITER_IOVEC; -} - -static inline bool iov_iter_is_kvec(const struct iov_iter *i) -{ - return iov_iter_type(i) == ITER_KVEC; -} - -static inline bool iov_iter_is_bvec(const struct iov_iter *i) -{ - return iov_iter_type(i) == ITER_BVEC; -} - -static inline bool iov_iter_is_pipe(const struct iov_iter *i) -{ - return iov_iter_type(i) == ITER_PIPE; -} - -static inline bool iov_iter_is_discard(const struct iov_iter *i) -{ - return iov_iter_type(i) == ITER_DISCARD; -} - -static inline bool iov_iter_is_xarray(const struct iov_iter *i) -{ - return iov_iter_type(i) == ITER_XARRAY; -} - -static inline unsigned char iov_iter_rw(const struct iov_iter *i) -{ - return i->data_source ? WRITE : READ; -} - /* * Total number of bytes covered by an iovec. * @@ -129,124 +72,44 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) }; } -size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, - size_t bytes, struct iov_iter *i); +#define iov_for_each(iov, iter, start) \ + if (!((start).type & (ITER_BVEC | ITER_PIPE))) \ + for (iter = (start); \ + (iter).count && \ + ((iov = iov_iter_iovec(&(iter))), 1); \ + iov_iter_advance(&(iter), (iov).iov_len)) + +unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); + +size_t iov_iter_copy_from_user_atomic(struct page *page, + struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); void iov_iter_revert(struct iov_iter *i, size_t bytes); -int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes); +int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); size_t iov_iter_single_seg_count(const struct iov_iter *i); size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); - -size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); -size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); -size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); - -static __always_inline __must_check -size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) -{ - if (unlikely(!check_copy_size(addr, bytes, true))) - return 0; - else - return _copy_to_iter(addr, bytes, i); -} - -static __always_inline __must_check -size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) -{ - if (unlikely(!check_copy_size(addr, bytes, false))) - return 0; - else - return _copy_from_iter(addr, bytes, i); -} - -static __always_inline __must_check -bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) -{ - size_t copied = copy_from_iter(addr, bytes, i); - if (likely(copied == bytes)) - return true; - iov_iter_revert(i, copied); - return false; -} - -static __always_inline __must_check -size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) -{ - if (unlikely(!check_copy_size(addr, bytes, false))) - return 0; - else - return _copy_from_iter_nocache(addr, bytes, i); -} - -static __always_inline __must_check -bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) -{ - size_t copied = copy_from_iter_nocache(addr, bytes, i); - if (likely(copied == bytes)) - return true; - iov_iter_revert(i, copied); - return false; -} - -#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE -/* - * Note, users like pmem that depend on the stricter semantics of - * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for - * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the - * destination is flushed from the cache on return. - */ -size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); -#else -#define _copy_from_iter_flushcache _copy_from_iter_nocache -#endif - -#ifdef CONFIG_ARCH_HAS_COPY_MC -size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i); -#else -#define _copy_mc_to_iter _copy_to_iter -#endif - -static __always_inline __must_check -size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) -{ - if (unlikely(!check_copy_size(addr, bytes, false))) - return 0; - else - return _copy_from_iter_flushcache(addr, bytes, i); -} - -static __always_inline __must_check -size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i) -{ - if (unlikely(!check_copy_size(addr, bytes, true))) - return 0; - else - return _copy_mc_to_iter(addr, bytes, i); -} - +size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); +size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); +size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t iov_iter_zero(size_t bytes, struct iov_iter *); unsigned long iov_iter_alignment(const struct iov_iter *i); unsigned long iov_iter_gap_alignment(const struct iov_iter *i); -void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, +void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, unsigned long nr_segs, size_t count); -void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, +void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec, unsigned long nr_segs, size_t count); -void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, +void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count); -void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, +void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe, size_t count); -void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); -void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, - loff_t start, size_t count); ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start); ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); -void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); @@ -255,6 +118,19 @@ static inline size_t iov_iter_count(const struct iov_iter *i) return i->count; } +static inline bool iter_is_iovec(const struct iov_iter *i) +{ + return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE)); +} + +/* + * Get one of READ or WRITE out of iter->type without any other flags OR'd in + * with it. + * + * The ?: is just for type safety. + */ +#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK) + /* * Cap the iov_iter by given limit; note that the second argument is * *not* the new size - it's upper limit for such. Passing it a value @@ -281,37 +157,20 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { i->count = count; } - -struct csum_state { - __wsum csum; - size_t off; -}; - -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i); +size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); -static __always_inline __must_check -bool csum_and_copy_from_iter_full(void *addr, size_t bytes, - __wsum *csum, struct iov_iter *i) -{ - size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i); - if (likely(copied == bytes)) - return true; - iov_iter_revert(i, copied); - return false; -} -size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, - struct iov_iter *i); +int import_iovec(int type, const struct iovec __user * uvector, + unsigned nr_segs, unsigned fast_segs, + struct iovec **iov, struct iov_iter *i); + +#ifdef CONFIG_COMPAT +struct compat_iovec; +int compat_import_iovec(int type, const struct compat_iovec __user * uvector, + unsigned nr_segs, unsigned fast_segs, + struct iovec **iov, struct iov_iter *i); +#endif -struct iovec *iovec_from_user(const struct iovec __user *uvector, - unsigned long nr_segs, unsigned long fast_segs, - struct iovec *fast_iov, bool compat); -ssize_t import_iovec(int type, const struct iovec __user *uvec, - unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, - struct iov_iter *i); -ssize_t __import_iovec(int type, const struct iovec __user *uvec, - unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, - struct iov_iter *i, bool compat); int import_single_range(int type, void __user *buf, size_t len, struct iovec *iov, struct iov_iter *i); diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 47c5962b87..671eb3518a 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/uio_driver.h * @@ -8,12 +7,13 @@ * Copyright(C) 2006, Greg Kroah-Hartman * * Userspace IO driver. + * + * Licensed under the GPLv2 only. */ #ifndef _UIO_DRIVER_H_ #define _UIO_DRIVER_H_ -#include #include #include @@ -23,13 +23,11 @@ struct uio_map; /** * struct uio_mem - description of a UIO memory region * @name: name of the memory region for identification - * @addr: address of the device's memory rounded to page - * size (phys_addr is used since addr can be - * logical, virtual, or physical & phys_addr_t - * should always be large enough to handle any of - * the address types) - * @offs: offset of device memory within the page - * @size: size of IO (multiple of page size) + * @addr: address of the device's memory (phys_addr is used since + * addr can be logical, virtual, or physical & phys_addr_t + * should always be large enough to handle any of the + * address types) + * @size: size of IO * @memtype: type of memory addr points to * @internal_addr: ioremap-ped version of addr, for driver internal use * @map: for use by the UIO core only. @@ -37,7 +35,6 @@ struct uio_map; struct uio_mem { const char *name; phys_addr_t addr; - unsigned long offs; resource_size_t size; int memtype; void __iomem *internal_addr; @@ -67,16 +64,15 @@ struct uio_port { #define MAX_UIO_PORT_REGIONS 5 struct uio_device { - struct module *owner; - struct device dev; - int minor; - atomic_t event; - struct fasync_struct *async_queue; - wait_queue_head_t wait; - struct uio_info *info; - struct mutex info_lock; - struct kobject *map_dir; - struct kobject *portio_dir; + struct module *owner; + struct device *dev; + int minor; + atomic_unchecked_t event; + struct fasync_struct *async_queue; + wait_queue_head_t wait; + struct uio_info *info; + struct kobject *map_dir; + struct kobject *portio_dir; }; /** @@ -117,37 +113,12 @@ extern int __must_check struct uio_info *info); /* use a define to avoid include chaining to get THIS_MODULE */ - -/** - * uio_register_device - register a new userspace IO device - * @parent: parent device - * @info: UIO device capabilities - * - * returns zero on success or a negative error code. - */ #define uio_register_device(parent, info) \ __uio_register_device(THIS_MODULE, parent, info) extern void uio_unregister_device(struct uio_info *info); extern void uio_event_notify(struct uio_info *info); -extern int __must_check - __devm_uio_register_device(struct module *owner, - struct device *parent, - struct uio_info *info); - -/* use a define to avoid include chaining to get THIS_MODULE */ - -/** - * devm_uio_register_device - Resource managed uio_register_device() - * @parent: parent device - * @info: UIO device capabilities - * - * returns zero on success or a negative error code. - */ -#define devm_uio_register_device(parent, info) \ - __devm_uio_register_device(THIS_MODULE, parent, info) - /* defines for uio_info->irq */ #define UIO_IRQ_CUSTOM -1 #define UIO_IRQ_NONE 0 @@ -157,7 +128,6 @@ extern int __must_check #define UIO_MEM_PHYS 1 #define UIO_MEM_LOGICAL 2 #define UIO_MEM_VIRTUAL 3 -#define UIO_MEM_IOVA 4 /* defines for uio_port->porttype */ #define UIO_PORT_NONE 0 diff --git a/include/linux/ulpi/driver.h b/include/linux/ulpi/driver.h index c7a1810373..a7af21a552 100644 --- a/include/linux/ulpi/driver.h +++ b/include/linux/ulpi/driver.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_ULPI_DRIVER_H #define __LINUX_ULPI_DRIVER_H diff --git a/include/linux/ulpi/interface.h b/include/linux/ulpi/interface.h index e93cfa36c3..a2011a919e 100644 --- a/include/linux/ulpi/interface.h +++ b/include/linux/ulpi/interface.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_ULPI_INTERFACE_H #define __LINUX_ULPI_INTERFACE_H diff --git a/include/linux/ulpi/regs.h b/include/linux/ulpi/regs.h index 9f607872b2..b5b8b88045 100644 --- a/include/linux/ulpi/regs.h +++ b/include/linux/ulpi/regs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_ULPI_REGS_H #define __LINUX_ULPI_REGS_H diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h index 167aa849c0..44211d60f6 100644 --- a/include/linux/unaligned/access_ok.h +++ b/include/linux/unaligned/access_ok.h @@ -1,38 +1,37 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_ACCESS_OK_H #define _LINUX_UNALIGNED_ACCESS_OK_H #include #include -static __always_inline u16 get_unaligned_le16(const void *p) +static __always_inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p) { - return le16_to_cpup((__le16 *)p); + return le16_to_cpup((const __le16 *)p); } -static __always_inline u32 get_unaligned_le32(const void *p) +static __always_inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p) { - return le32_to_cpup((__le32 *)p); + return le32_to_cpup((const __le32 *)p); } -static __always_inline u64 get_unaligned_le64(const void *p) +static __always_inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p) { - return le64_to_cpup((__le64 *)p); + return le64_to_cpup((const __le64 *)p); } -static __always_inline u16 get_unaligned_be16(const void *p) +static __always_inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p) { - return be16_to_cpup((__be16 *)p); + return be16_to_cpup((const __be16 *)p); } -static __always_inline u32 get_unaligned_be32(const void *p) +static __always_inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p) { - return be32_to_cpup((__be32 *)p); + return be32_to_cpup((const __be32 *)p); } -static __always_inline u64 get_unaligned_be64(const void *p) +static __always_inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p) { - return be64_to_cpup((__be64 *)p); + return be64_to_cpup((const __be64 *)p); } static __always_inline void put_unaligned_le16(u16 val, void *p) diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h index c43ff5918c..9356b24223 100644 --- a/include/linux/unaligned/be_byteshift.h +++ b/include/linux/unaligned/be_byteshift.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H #define _LINUX_UNALIGNED_BE_BYTESHIFT_H @@ -40,17 +39,17 @@ static inline void __put_unaligned_be64(u64 val, u8 *p) static inline u16 get_unaligned_be16(const void *p) { - return __get_unaligned_be16(p); + return __get_unaligned_be16((const u8 *)p); } static inline u32 get_unaligned_be32(const void *p) { - return __get_unaligned_be32(p); + return __get_unaligned_be32((const u8 *)p); } static inline u64 get_unaligned_be64(const void *p) { - return __get_unaligned_be64(p); + return __get_unaligned_be64((const u8 *)p); } static inline void put_unaligned_be16(u16 val, void *p) diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h index 7164214a4b..c2a76c5c9e 100644 --- a/include/linux/unaligned/be_memmove.h +++ b/include/linux/unaligned/be_memmove.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H #define _LINUX_UNALIGNED_BE_MEMMOVE_H diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h index 15ea503a13..132415836c 100644 --- a/include/linux/unaligned/be_struct.h +++ b/include/linux/unaligned/be_struct.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_BE_STRUCT_H #define _LINUX_UNALIGNED_BE_STRUCT_H diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h index 3032894928..02d97ff3df 100644 --- a/include/linux/unaligned/generic.h +++ b/include/linux/unaligned/generic.h @@ -1,9 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_GENERIC_H #define _LINUX_UNALIGNED_GENERIC_H -#include - /* * Cause a link-time error if we try an unaligned access other than * 1,2,4 or 8 bytes long @@ -68,48 +65,4 @@ extern void __bad_unaligned_access_size(void); } \ (void)0; }) -static inline u32 __get_unaligned_be24(const u8 *p) -{ - return p[0] << 16 | p[1] << 8 | p[2]; -} - -static inline u32 get_unaligned_be24(const void *p) -{ - return __get_unaligned_be24(p); -} - -static inline u32 __get_unaligned_le24(const u8 *p) -{ - return p[0] | p[1] << 8 | p[2] << 16; -} - -static inline u32 get_unaligned_le24(const void *p) -{ - return __get_unaligned_le24(p); -} - -static inline void __put_unaligned_be24(const u32 val, u8 *p) -{ - *p++ = val >> 16; - *p++ = val >> 8; - *p++ = val; -} - -static inline void put_unaligned_be24(const u32 val, void *p) -{ - __put_unaligned_be24(val, p); -} - -static inline void __put_unaligned_le24(const u32 val, u8 *p) -{ - *p++ = val; - *p++ = val >> 8; - *p++ = val >> 16; -} - -static inline void put_unaligned_le24(const u32 val, void *p) -{ - __put_unaligned_le24(val, p); -} - #endif /* _LINUX_UNALIGNED_GENERIC_H */ diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h index 2248dcb0df..be376fb79b 100644 --- a/include/linux/unaligned/le_byteshift.h +++ b/include/linux/unaligned/le_byteshift.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H #define _LINUX_UNALIGNED_LE_BYTESHIFT_H @@ -40,17 +39,17 @@ static inline void __put_unaligned_le64(u64 val, u8 *p) static inline u16 get_unaligned_le16(const void *p) { - return __get_unaligned_le16(p); + return __get_unaligned_le16((const u8 *)p); } static inline u32 get_unaligned_le32(const void *p) { - return __get_unaligned_le32(p); + return __get_unaligned_le32((const u8 *)p); } static inline u64 get_unaligned_le64(const void *p) { - return __get_unaligned_le64(p); + return __get_unaligned_le64((const u8 *)p); } static inline void put_unaligned_le16(u16 val, void *p) diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h index 9202e864d0..269849bee4 100644 --- a/include/linux/unaligned/le_memmove.h +++ b/include/linux/unaligned/le_memmove.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H #define _LINUX_UNALIGNED_LE_MEMMOVE_H diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h index 9977987883..088c4572fa 100644 --- a/include/linux/unaligned/le_struct.h +++ b/include/linux/unaligned/le_struct.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_LE_STRUCT_H #define _LINUX_UNALIGNED_LE_STRUCT_H diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h index ac71b53bc6..eeb5a779a4 100644 --- a/include/linux/unaligned/memmove.h +++ b/include/linux/unaligned/memmove.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_MEMMOVE_H #define _LINUX_UNALIGNED_MEMMOVE_H diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index f46e0ca016..4a29c75b14 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -1,9 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _LINUX_UPROBES_H #define _LINUX_UPROBES_H /* * User-space Probes (UProbes) * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * * Copyright (C) IBM Corporation, 2008-2012 * Authors: * Srikar Dronamraju @@ -14,7 +27,6 @@ #include #include #include -#include struct vm_area_struct; struct mm_struct; @@ -102,16 +114,14 @@ struct uprobes_state { struct xol_area *xol_area; }; -extern void __init uprobes_init(void); extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern bool is_swbp_insn(uprobe_opcode_t *insn); extern bool is_trap_insn(uprobe_opcode_t *insn); extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs); extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); -extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); +extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); -extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc); extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_mmap(struct vm_area_struct *vma); @@ -142,10 +152,6 @@ extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, struct uprobes_state { }; -static inline void uprobes_init(void) -{ -} - #define uprobe_get_trap_addr(regs) instruction_pointer(regs) static inline int @@ -153,10 +159,6 @@ uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { return -ENOSYS; } -static inline int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc) -{ - return -ENOSYS; -} static inline int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add) { diff --git a/include/linux/usb.h b/include/linux/usb.h index 77d9a69534..eac1b5280b 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_USB_H #define __LINUX_USB_H @@ -100,76 +99,6 @@ enum usb_interface_condition { USB_INTERFACE_UNBINDING, }; -int __must_check -usb_find_common_endpoints(struct usb_host_interface *alt, - struct usb_endpoint_descriptor **bulk_in, - struct usb_endpoint_descriptor **bulk_out, - struct usb_endpoint_descriptor **int_in, - struct usb_endpoint_descriptor **int_out); - -int __must_check -usb_find_common_endpoints_reverse(struct usb_host_interface *alt, - struct usb_endpoint_descriptor **bulk_in, - struct usb_endpoint_descriptor **bulk_out, - struct usb_endpoint_descriptor **int_in, - struct usb_endpoint_descriptor **int_out); - -static inline int __must_check -usb_find_bulk_in_endpoint(struct usb_host_interface *alt, - struct usb_endpoint_descriptor **bulk_in) -{ - return usb_find_common_endpoints(alt, bulk_in, NULL, NULL, NULL); -} - -static inline int __must_check -usb_find_bulk_out_endpoint(struct usb_host_interface *alt, - struct usb_endpoint_descriptor **bulk_out) -{ - return usb_find_common_endpoints(alt, NULL, bulk_out, NULL, NULL); -} - -static inline int __must_check -usb_find_int_in_endpoint(struct usb_host_interface *alt, - struct usb_endpoint_descriptor **int_in) -{ - return usb_find_common_endpoints(alt, NULL, NULL, int_in, NULL); -} - -static inline int __must_check -usb_find_int_out_endpoint(struct usb_host_interface *alt, - struct usb_endpoint_descriptor **int_out) -{ - return usb_find_common_endpoints(alt, NULL, NULL, NULL, int_out); -} - -static inline int __must_check -usb_find_last_bulk_in_endpoint(struct usb_host_interface *alt, - struct usb_endpoint_descriptor **bulk_in) -{ - return usb_find_common_endpoints_reverse(alt, bulk_in, NULL, NULL, NULL); -} - -static inline int __must_check -usb_find_last_bulk_out_endpoint(struct usb_host_interface *alt, - struct usb_endpoint_descriptor **bulk_out) -{ - return usb_find_common_endpoints_reverse(alt, NULL, bulk_out, NULL, NULL); -} - -static inline int __must_check -usb_find_last_int_in_endpoint(struct usb_host_interface *alt, - struct usb_endpoint_descriptor **int_in) -{ - return usb_find_common_endpoints_reverse(alt, NULL, NULL, int_in, NULL); -} - -static inline int __must_check -usb_find_last_int_out_endpoint(struct usb_host_interface *alt, - struct usb_endpoint_descriptor **int_out) -{ - return usb_find_common_endpoints_reverse(alt, NULL, NULL, NULL, int_out); -} - /** * struct usb_interface - what usb device drivers talk to * @altsetting: array of interface structures, one for each alternate @@ -200,6 +129,7 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt, * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. + * @pm_usage_cnt: PM usage counter for this interface * @reset_ws: Used for scheduling resets from atomic context. * @resetting_device: USB core reset the device, so use alt setting 0 as * current; needs bandwidth alloc after reset. @@ -256,6 +186,7 @@ struct usb_interface { struct device dev; /* interface specific device info */ struct device *usb_dev; + atomic_t pm_usage_cnt; /* usage counter for autosuspend */ struct work_struct reset_ws; /* for resets in atomic context */ }; #define to_usb_interface(d) container_of(d, struct usb_interface, dev) @@ -317,7 +248,7 @@ void usb_put_intf(struct usb_interface *intf); * struct usb_interface (which persists only as long as its configuration * is installed). The altsetting arrays can be accessed through these * structures at any time, permitting comparison of configurations and - * providing support for the /sys/kernel/debug/usb/devices pseudo-file. + * providing support for the /proc/bus/usb/devices pseudo-file. */ struct usb_interface_cache { unsigned num_altsetting; /* number of alternate settings */ @@ -325,7 +256,7 @@ struct usb_interface_cache { /* variable-length array of alternate settings for this interface, * stored in no particular order */ - struct usb_host_interface altsetting[]; + struct usb_host_interface altsetting[0]; }; #define ref_to_usb_interface_cache(r) \ container_of(r, struct usb_interface_cache, ref) @@ -341,7 +272,7 @@ struct usb_interface_cache { * @interface: array of pointers to usb_interface structures, one for each * interface in the configuration. The number of interfaces is stored * in desc.bNumInterfaces. These pointers are valid only while the - * configuration is active. + * the configuration is active. * @intf_cache: array of pointers to usb_interface_cache structures, one * for each interface in the configuration. These structures exist * for the entire life of the device. @@ -405,11 +336,11 @@ struct usb_host_bos { }; int __usb_get_extra_descriptor(char *buffer, unsigned size, - unsigned char type, void **ptr, size_t min); + unsigned char type, void **ptr); #define usb_get_extra_descriptor(ifpoint, type, ptr) \ __usb_get_extra_descriptor((ifpoint)->extra, \ (ifpoint)->extralen, \ - type, (void **)ptr, sizeof(**(ptr))) + type, (void **)ptr) /* ----------------------------------------------------------------------- */ @@ -422,10 +353,10 @@ struct usb_devmap { * Allocated per bus (tree of devices) we have: */ struct usb_bus { - struct device *controller; /* host side hardware */ - struct device *sysdev; /* as seen from firmware or bus */ + struct device *controller; /* host/master side hardware */ int busnum; /* Bus number (in order of reg) */ const char *bus_name; /* stable id (PCI slot_name etc) */ + u8 uses_dma; /* Does the host controller use DMA? */ u8 uses_pio_for_control; /* * Does the host controller use PIO * for control transfers? @@ -439,7 +370,7 @@ struct usb_bus { * with the URB_SHORT_NOT_OK flag set. */ unsigned no_sg_constraint:1; /* no sg constraint */ - unsigned sg_tablesize; /* 0 or largest number of sg list entries */ + unsigned short sg_tablesize; /* 0 or largest number of sg list entries */ int devnum_next; /* Next open device number in * round-robin allocation */ @@ -473,6 +404,12 @@ struct usb_dev_state; struct usb_tt; +enum usb_device_removable { + USB_DEVICE_REMOVABLE_UNKNOWN = 0, + USB_DEVICE_REMOVABLE, + USB_DEVICE_FIXED, +}; + enum usb_port_connect_type { USB_PORT_CONNECT_TYPE_UNKNOWN = 0, USB_PORT_CONNECT_TYPE_HOT_PLUG, @@ -480,16 +417,6 @@ enum usb_port_connect_type { USB_PORT_NOT_USED, }; -/* - * USB port quirks. - */ - -/* For the given port, prefer the old (faster) enumeration scheme. */ -#define USB_PORT_QUIRK_OLD_SCHEME BIT(0) - -/* Decrease TRSTRCY to 10ms during device enumeration. */ -#define USB_PORT_QUIRK_FAST_ENUM BIT(1) - /* * USB 2.0 Link Power Management (LPM) parameters. */ @@ -552,9 +479,6 @@ struct usb3_lpm_parameters { * @route: tree topology hex string for use with xHCI * @state: device state: configured, not attached, etc. * @speed: device speed: high/full/low (or error) - * @rx_lanes: number of rx lanes in use, USB 3.2 adds dual-lane support - * @tx_lanes: number of tx lanes in use, USB 3.2 adds dual-lane support - * @ssp_rate: SuperSpeed Plus phy signaling rate and lane count * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub * @ttport: device port on that tt hub * @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints @@ -572,7 +496,6 @@ struct usb3_lpm_parameters { * @bus_mA: Current available from the bus * @portnum: parent port number (origin 1) * @level: number of USB hub ancestors - * @devaddr: device address, XHCI: assigned by HW, others: same as devnum * @can_submit: URBs may be submitted * @persist_enabled: USB_PERSIST enabled for this device * @have_langid: whether string_langid is valid @@ -614,10 +537,6 @@ struct usb3_lpm_parameters { * to keep track of the number of functions that require USB 3.0 Link Power * Management to be disabled for this usb_device. This count should only * be manipulated by those functions, with the bandwidth_mutex is held. - * @hub_delay: cached value consisting of: - * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns) - * Will be used as wValue for SetIsochDelay requests. - * @use_generic_driver: ask driver core to reprobe using the generic driver. * * Notes: * Usbcore drivers should not set usbdev->state directly. Instead use @@ -629,9 +548,6 @@ struct usb_device { u32 route; enum usb_device_state state; enum usb_device_speed speed; - unsigned int rx_lanes; - unsigned int tx_lanes; - enum usb_ssp_rate ssp_rate; struct usb_tt *tt; int ttport; @@ -657,7 +573,6 @@ struct usb_device { unsigned short bus_mA; u8 portnum; u8 level; - u8 devaddr; unsigned can_submit:1; unsigned persist_enabled:1; @@ -684,7 +599,7 @@ struct usb_device { int maxchild; u32 quirks; - atomic_t urbnum; + atomic_unchecked_t urbnum; unsigned long active_duration; @@ -697,13 +612,11 @@ struct usb_device { #endif struct wusb_dev *wusb_dev; int slot_id; + enum usb_device_removable removable; struct usb2_lpm_parameters l1_params; struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u2_params; unsigned lpm_disable_count; - - u16 hub_delay; - unsigned use_generic_driver:1; }; #define to_usb_device(d) container_of(d, struct usb_device, dev) @@ -741,8 +654,6 @@ extern int usb_lock_device_for_reset(struct usb_device *udev, extern int usb_reset_device(struct usb_device *dev); extern void usb_queue_reset_device(struct usb_interface *dev); -extern struct device *usb_intf_get_dma_device(struct usb_interface *intf); - #ifdef CONFIG_ACPI extern int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable); @@ -836,7 +747,7 @@ extern int usb_free_streams(struct usb_interface *interface, /* used these for multi-interface device registration */ extern int usb_driver_claim_interface(struct usb_driver *driver, - struct usb_interface *iface, void *data); + struct usb_interface *iface, void *priv); /** * usb_interface_claimed - returns true iff an interface is claimed @@ -875,15 +786,6 @@ extern struct usb_host_interface *usb_find_alt_setting( unsigned int iface_num, unsigned int alt_num); -#if IS_REACHABLE(CONFIG_USB) -int usb_for_each_port(void *data, int (*fn)(struct device *, void *)); -#else -static inline int usb_for_each_port(void *data, int (*fn)(struct device *, void *)) -{ - return 0; -} -#endif - /* port claiming functions */ int usb_hub_claim_port(struct usb_device *hdev, unsigned port1, struct usb_dev_state *owner); @@ -1157,8 +1059,6 @@ struct usbdrv_wrap { * @id_table: USB drivers use ID table to support hotplugging. * Export this with MODULE_DEVICE_TABLE(usb,...). This must be set * or your driver's probe function will never get called. - * @dev_groups: Attributes attached to the device that will be created once it - * is bound to the driver. * @dynids: used internally to hold the list of dynamically added device * ids for this driver. * @drvwrap: Driver-model core structure wrapper. @@ -1206,7 +1106,6 @@ struct usb_driver { int (*post_reset)(struct usb_interface *intf); const struct usb_device_id *id_table; - const struct attribute_group **dev_groups; struct usb_dynids dynids; struct usbdrv_wrap drvwrap; @@ -1221,7 +1120,6 @@ struct usb_driver { * struct usb_device_driver - identifies USB device driver to usbcore * @name: The driver name should be unique among USB drivers, * and should normally be the same as the module name. - * @match: If set, used for better device/driver matching. * @probe: Called to see if the driver is willing to manage a particular * device. If it is, probe returns zero and uses dev_set_drvdata() * to associate driver-specific data with the device. If unwilling @@ -1231,38 +1129,28 @@ struct usb_driver { * module is being unloaded. * @suspend: Called when the device is going to be suspended by the system. * @resume: Called when the device is being resumed by the system. - * @dev_groups: Attributes attached to the device that will be created once it - * is bound to the driver. * @drvwrap: Driver-model core structure wrapper. - * @id_table: used with @match() to select better matching driver at - * probe() time. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend * for devices bound to this driver. - * @generic_subclass: if set to 1, the generic USB driver's probe, disconnect, - * resume and suspend functions will be called in addition to the driver's - * own, so this part of the setup does not need to be replicated. * - * USB drivers must provide all the fields listed above except drvwrap, - * match, and id_table. + * USB drivers must provide all the fields listed above except drvwrap. */ struct usb_device_driver { const char *name; - bool (*match) (struct usb_device *udev); int (*probe) (struct usb_device *udev); void (*disconnect) (struct usb_device *udev); int (*suspend) (struct usb_device *udev, pm_message_t message); int (*resume) (struct usb_device *udev, pm_message_t message); - const struct attribute_group **dev_groups; struct usbdrv_wrap drvwrap; - const struct usb_device_id *id_table; unsigned int supports_autosuspend:1; - unsigned int generic_subclass:1; }; #define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \ drvwrap.driver) +extern struct bus_type usb_bus_type; + /** * struct usb_class_driver - identifies a USB driver that wants to use the USB major number * @name: the usb class device name for this driver. Will show up in sysfs. @@ -1272,7 +1160,7 @@ struct usb_device_driver { * @minor_base: the start of the minor range for this driver. * * This structure is used for the usb_register_dev() and - * usb_deregister_dev() functions, to consolidate a number of the + * usb_unregister_dev() functions, to consolidate a number of the * parameters used for them. */ struct usb_class_driver { @@ -1333,6 +1221,7 @@ extern int usb_disabled(void); #define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired * slot in the schedule */ #define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ +#define URB_NO_FSBR 0x0020 /* UHCI-specific */ #define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */ #define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt * needed */ @@ -1477,8 +1366,8 @@ typedef void (*usb_complete_t)(struct urb *); * field rather than determining a dma address themselves. * * Note that transfer_buffer must still be set if the controller - * does not support DMA (as indicated by hcd_uses_dma()) and when talking - * to root hub. If you have to transfer between highmem zone and the device + * does not support DMA (as indicated by bus.uses_dma) and when talking + * to root hub. If you have to trasfer between highmem zone and the device * on such controller, create a bounce buffer or bail out with an error. * If transfer_buffer cannot be set (is in highmem) and the controller is DMA * capable, assign NULL to it, so that usbmon knows not to use the value. @@ -1565,10 +1454,10 @@ typedef void (*usb_complete_t)(struct urb *); struct urb { /* private: usb core and host controller only fields in the urb */ struct kref kref; /* reference count of the URB */ - int unlinked; /* unlink error code */ void *hcpriv; /* private data for host controller */ atomic_t use_count; /* concurrent submissions counter */ atomic_t reject; /* submissions will fail */ + int unlinked; /* unlink error code */ /* public: documented fields in the urb that can be used by drivers */ struct list_head urb_list; /* list head for use by the urb's @@ -1597,7 +1486,7 @@ struct urb { int error_count; /* (return) number of ISO errors */ void *context; /* (in) context for completion */ usb_complete_t complete; /* (in) completion routine */ - struct usb_iso_packet_descriptor iso_frame_desc[]; + struct usb_iso_packet_descriptor iso_frame_desc[0]; /* (in) ISO ONLY */ }; @@ -1768,9 +1657,6 @@ static inline int usb_urb_dir_out(struct urb *urb) return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT; } -int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe); -int usb_urb_ep_type_check(const struct urb *urb); - void *usb_alloc_coherent(struct usb_device *dev, size_t size, gfp_t mem_flags, dma_addr_t *dma); void usb_free_coherent(struct usb_device *dev, size_t size, @@ -1806,42 +1692,18 @@ extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, int timeout); /* wrappers around usb_control_msg() for the most common standard requests */ -int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request, - __u8 requesttype, __u16 value, __u16 index, - const void *data, __u16 size, int timeout, - gfp_t memflags); -int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request, - __u8 requesttype, __u16 value, __u16 index, - void *data, __u16 size, int timeout, - gfp_t memflags); extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype, unsigned char descindex, void *buf, int size); extern int usb_get_status(struct usb_device *dev, - int recip, int type, int target, void *data); - -static inline int usb_get_std_status(struct usb_device *dev, - int recip, int target, void *data) -{ - return usb_get_status(dev, recip, USB_STATUS_TYPE_STANDARD, target, - data); -} - -static inline int usb_get_ptm_status(struct usb_device *dev, void *data) -{ - return usb_get_status(dev, USB_RECIP_DEVICE, USB_STATUS_TYPE_PTM, - 0, data); -} - + int type, int target, void *data); extern int usb_string(struct usb_device *dev, int index, char *buf, size_t size); /* wrappers that also update important state inside usbcore */ -extern int usb_clear_halt(struct usb_device *dev, int pipe); +extern int usb_clear_halt(struct usb_device *dev, unsigned int pipe); extern int usb_reset_configuration(struct usb_device *dev); extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate); extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr); -extern void usb_fixup_endpoint(struct usb_device *dev, int epaddr, - int interval); /* this request isn't really synchronous, but it belongs with the others */ extern int usb_driver_set_configuration(struct usb_device *udev, int config); @@ -1931,10 +1793,10 @@ void usb_sg_wait(struct usb_sg_request *io); /* NOTE: these are not the standard USB_ENDPOINT_XFER_* values!! */ /* (yet ... they're the values used by usbfs) */ -#define PIPE_ISOCHRONOUS 0 -#define PIPE_INTERRUPT 1 -#define PIPE_CONTROL 2 -#define PIPE_BULK 3 +#define PIPE_ISOCHRONOUS 0U +#define PIPE_INTERRUPT 1U +#define PIPE_CONTROL 2U +#define PIPE_BULK 3U #define usb_pipein(pipe) ((pipe) & USB_DIR_IN) #define usb_pipeout(pipe) (!usb_pipein(pipe)) @@ -1983,7 +1845,7 @@ usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe) /*-------------------------------------------------------------------------*/ static inline __u16 -usb_maxpacket(struct usb_device *udev, int pipe, int is_out) +usb_maxpacket(struct usb_device *udev, unsigned int pipe, int is_out) { struct usb_host_endpoint *ep; unsigned epnum = usb_pipeendpoint(pipe); diff --git a/include/linux/usb/association.h b/include/linux/usb/association.h new file mode 100644 index 0000000000..0a4a18b3c1 --- /dev/null +++ b/include/linux/usb/association.h @@ -0,0 +1,150 @@ +/* + * Wireless USB - Cable Based Association + * + * Copyright (C) 2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + */ +#ifndef __LINUX_USB_ASSOCIATION_H +#define __LINUX_USB_ASSOCIATION_H + + +/* + * Association attributes + * + * Association Models Supplement to WUSB 1.0 T[3-1] + * + * Each field in the structures has it's ID, it's length and then the + * value. This is the actual definition of the field's ID and its + * length. + */ +struct wusb_am_attr { + __u8 id; + __u8 len; +}; + +/* Different fields defined by the spec */ +#define WUSB_AR_AssociationTypeId { .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) } +#define WUSB_AR_AssociationSubTypeId { .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) } +#define WUSB_AR_Length { .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) } +#define WUSB_AR_AssociationStatus { .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) } +#define WUSB_AR_LangID { .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) } +#define WUSB_AR_DeviceFriendlyName { .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */ +#define WUSB_AR_HostFriendlyName { .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */ +#define WUSB_AR_CHID { .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) } +#define WUSB_AR_CDID { .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) } +#define WUSB_AR_ConnectionContext { .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) } +#define WUSB_AR_BandGroups { .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) } + +/* CBAF Control Requests (AMS1.0[T4-1] */ +enum { + CBAF_REQ_GET_ASSOCIATION_INFORMATION = 0x01, + CBAF_REQ_GET_ASSOCIATION_REQUEST, + CBAF_REQ_SET_ASSOCIATION_RESPONSE +}; + +/* + * CBAF USB-interface defitions + * + * No altsettings, one optional interrupt endpoint. + */ +enum { + CBAF_IFACECLASS = 0xef, + CBAF_IFACESUBCLASS = 0x03, + CBAF_IFACEPROTOCOL = 0x01, +}; + +/* Association Information (AMS1.0[T4-3]) */ +struct wusb_cbaf_assoc_info { + __le16 Length; + __u8 NumAssociationRequests; + __le16 Flags; + __u8 AssociationRequestsArray[]; +} __attribute__((packed)); + +/* Association Request (AMS1.0[T4-4]) */ +struct wusb_cbaf_assoc_request { + __u8 AssociationDataIndex; + __u8 Reserved; + __le16 AssociationTypeId; + __le16 AssociationSubTypeId; + __le32 AssociationTypeInfoSize; +} __attribute__((packed)); + +enum { + AR_TYPE_WUSB = 0x0001, + AR_TYPE_WUSB_RETRIEVE_HOST_INFO = 0x0000, + AR_TYPE_WUSB_ASSOCIATE = 0x0001, +}; + +/* Association Attribute header (AMS1.0[3.8]) */ +struct wusb_cbaf_attr_hdr { + __le16 id; + __le16 len; +} __attribute__((packed)); + +/* Host Info (AMS1.0[T4-7]) (yeah, more headers and fields...) */ +struct wusb_cbaf_host_info { + struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; + __le16 AssociationTypeId; + struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; + __le16 AssociationSubTypeId; + struct wusb_cbaf_attr_hdr CHID_hdr; + struct wusb_ckhdid CHID; + struct wusb_cbaf_attr_hdr LangID_hdr; + __le16 LangID; + struct wusb_cbaf_attr_hdr HostFriendlyName_hdr; + __u8 HostFriendlyName[]; +} __attribute__((packed)); + +/* Device Info (AMS1.0[T4-8]) + * + * I still don't get this tag'n'header stuff for each goddamn + * field... + */ +struct wusb_cbaf_device_info { + struct wusb_cbaf_attr_hdr Length_hdr; + __le32 Length; + struct wusb_cbaf_attr_hdr CDID_hdr; + struct wusb_ckhdid CDID; + struct wusb_cbaf_attr_hdr BandGroups_hdr; + __le16 BandGroups; + struct wusb_cbaf_attr_hdr LangID_hdr; + __le16 LangID; + struct wusb_cbaf_attr_hdr DeviceFriendlyName_hdr; + __u8 DeviceFriendlyName[]; +} __attribute__((packed)); + +/* Connection Context; CC_DATA - Success case (AMS1.0[T4-9]) */ +struct wusb_cbaf_cc_data { + struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; + __le16 AssociationTypeId; + struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; + __le16 AssociationSubTypeId; + struct wusb_cbaf_attr_hdr Length_hdr; + __le32 Length; + struct wusb_cbaf_attr_hdr ConnectionContext_hdr; + struct wusb_ckhdid CHID; + struct wusb_ckhdid CDID; + struct wusb_ckhdid CK; + struct wusb_cbaf_attr_hdr BandGroups_hdr; + __le16 BandGroups; +} __attribute__((packed)); + +/* CC_DATA - Failure case (AMS1.0[T4-10]) */ +struct wusb_cbaf_cc_data_fail { + struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; + __le16 AssociationTypeId; + struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; + __le16 AssociationSubTypeId; + struct wusb_cbaf_attr_hdr Length_hdr; + __le16 Length; + struct wusb_cbaf_attr_hdr AssociationStatus_hdr; + __u32 AssociationStatus; +} __attribute__((packed)); + +#endif /* __LINUX_USB_ASSOCIATION_H */ diff --git a/include/linux/usb/atmel_usba_udc.h b/include/linux/usb/atmel_usba_udc.h new file mode 100644 index 0000000000..ba99af275a --- /dev/null +++ b/include/linux/usb/atmel_usba_udc.h @@ -0,0 +1,23 @@ +/* + * Platform data definitions for Atmel USBA gadget driver. + */ +#ifndef __LINUX_USB_USBA_H +#define __LINUX_USB_USBA_H + +struct usba_ep_data { + char *name; + int index; + int fifo_size; + int nr_banks; + int can_dma; + int can_isoc; +}; + +struct usba_platform_data { + int vbus_pin; + int vbus_pin_inverted; + int num_ep; + struct usba_ep_data ep[0]; +}; + +#endif /* __LINUX_USB_USBA_H */ diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index 8fc2abd7ae..c5f2158ab0 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2010 Daniel Mack * @@ -34,14 +33,14 @@ * */ -static inline bool uac_v2v3_control_is_readable(u32 bmControls, u8 control) +static inline bool uac2_control_is_readable(u32 bmControls, u8 control) { - return (bmControls >> ((control - 1) * 2)) & 0x1; + return (bmControls >> (control * 2)) & 0x1; } -static inline bool uac_v2v3_control_is_writeable(u32 bmControls, u8 control) +static inline bool uac2_control_is_writeable(u32 bmControls, u8 control) { - return (bmControls >> ((control - 1) * 2)) & 0x2; + return (bmControls >> (control * 2)) & 0x2; } /* 4.7.2 Class-Specific AC Interface Descriptor */ @@ -94,7 +93,7 @@ struct uac_clock_selector_descriptor { __u8 bClockID; __u8 bNrInPins; __u8 baCSourceID[]; - /* bmControls and iClockSource omitted */ + /* bmControls, bAssocTerminal and iClockSource omitted */ } __attribute__((packed)); /* 4.7.2.3 Clock Multiplier Descriptor */ @@ -116,13 +115,13 @@ struct uac2_input_terminal_descriptor { __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; - __le16 wTerminalType; + __u16 wTerminalType; __u8 bAssocTerminal; __u8 bCSourceID; __u8 bNrChannels; - __le32 bmChannelConfig; + __u32 bmChannelConfig; __u8 iChannelNames; - __le16 bmControls; + __u16 bmControls; __u8 iTerminal; } __attribute__((packed)); @@ -133,11 +132,11 @@ struct uac2_output_terminal_descriptor { __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; - __le16 wTerminalType; + __u16 wTerminalType; __u8 bAssocTerminal; __u8 bSourceID; __u8 bCSourceID; - __le16 bmControls; + __u16 bmControls; __u8 iTerminal; } __attribute__((packed)); @@ -153,33 +152,7 @@ struct uac2_feature_unit_descriptor { __u8 bSourceID; /* bmaControls is actually u32, * but u8 is needed for the hybrid parser */ - __u8 bmaControls[]; /* variable length */ -} __attribute__((packed)); - -#define UAC2_DT_FEATURE_UNIT_SIZE(ch) (6 + ((ch) + 1) * 4) - -/* As above, but more useful for defining your own descriptors: */ -#define DECLARE_UAC2_FEATURE_UNIT_DESCRIPTOR(ch) \ -struct uac2_feature_unit_descriptor_##ch { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubtype; \ - __u8 bUnitID; \ - __u8 bSourceID; \ - __le32 bmaControls[ch + 1]; \ - __u8 iFeature; \ -} __packed - -/* 4.7.2.10 Effect Unit Descriptor */ - -struct uac2_effect_unit_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubtype; - __u8 bUnitID; - __le16 wEffectType; - __u8 bSourceID; - __u8 bmaControls[]; /* variable length */ + __u8 bmaControls[0]; /* variable length */ } __attribute__((packed)); /* 4.9.2 Class-Specific AS Interface Descriptor */ @@ -191,9 +164,9 @@ struct uac2_as_header_descriptor { __u8 bTerminalLink; __u8 bmControls; __u8 bFormatType; - __le32 bmFormats; + __u32 bmFormats; __u8 bNrChannels; - __le32 bmChannelConfig; + __u32 bmChannelConfig; __u8 iChannelNames; } __attribute__((packed)); @@ -215,13 +188,6 @@ struct uac2_iso_endpoint_descriptor { #define UAC2_CONTROL_DATA_OVERRUN (3 << 2) #define UAC2_CONTROL_DATA_UNDERRUN (3 << 4) -/* 5.2.5.4.2 Connector Control Parameter Block */ -struct uac2_connectors_ctl_blk { - __u8 bNrChannels; - __le32 bmChannelConfig; - __u8 iChannelNames; -} __attribute__((packed)); - /* 6.1 Interrupt Data Message */ #define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0) diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index 170acd500e..3d84619110 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * -- USB Audio definitions. * diff --git a/include/linux/usb/c67x00.h b/include/linux/usb/c67x00.h index 2fc39e3b72..83c6b45470 100644 --- a/include/linux/usb/c67x00.h +++ b/include/linux/usb/c67x00.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * usb_c67x00.h: platform definitions for the Cypress C67X00 USB chip * diff --git a/include/linux/usb/cdc-wdm.h b/include/linux/usb/cdc-wdm.h index 9f5a51f79b..0b3f4295c0 100644 --- a/include/linux/usb/cdc-wdm.h +++ b/include/linux/usb/cdc-wdm.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * USB CDC Device Management subdriver * @@ -12,12 +11,11 @@ #ifndef __LINUX_USB_CDC_WDM_H #define __LINUX_USB_CDC_WDM_H -#include #include extern struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf, struct usb_endpoint_descriptor *ep, - int bufsize, enum wwan_port_type type, + int bufsize, int (*manage_power)(struct usb_interface *, int)); #endif /* __LINUX_USB_CDC_WDM_H */ diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h index 35d784cf32..b5706f94ee 100644 --- a/include/linux/usb/cdc.h +++ b/include/linux/usb/cdc.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * USB CDC common helpers * diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index f7cb3ddce7..00d232406f 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) /* * Copyright (C) ST-Ericsson 2010-2012 * Contact: Alexey Orishko @@ -46,12 +45,9 @@ #define CDC_NCM_DATA_ALTSETTING_NCM 1 #define CDC_NCM_DATA_ALTSETTING_MBIM 2 -/* CDC NCM subclass 3.3.1 */ +/* CDC NCM subclass 3.2.1 */ #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 -/* CDC NCM subclass 3.3.2 */ -#define USB_CDC_NCM_NDP32_LENGTH_MIN 0x20 - /* Maximum NTB length */ #define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */ #define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */ @@ -87,7 +83,6 @@ /* Driver flags */ #define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ #define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */ -#define CDC_NCM_FLAG_PREFER_NTB32 0x08 /* prefer NDP32 over NDP16 */ #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) @@ -98,8 +93,6 @@ struct cdc_ncm_ctx { struct hrtimer tx_timer; struct tasklet_struct bh; - struct usbnet *dev; - const struct usb_cdc_ncm_desc *func_desc; const struct usb_cdc_mbim_desc *mbim_desc; const struct usb_cdc_mbim_extended_desc *mbim_extended_desc; @@ -118,19 +111,12 @@ struct cdc_ncm_ctx { u32 timer_interval; u32 max_ndp_size; - u8 is_ndp16; - union { - struct usb_cdc_ncm_ndp16 *delayed_ndp16; - struct usb_cdc_ncm_ndp32 *delayed_ndp32; - }; + struct usb_cdc_ncm_ndp16 *delayed_ndp16; u32 tx_timer_pending; u32 tx_curr_frame_num; u32 rx_max; u32 tx_max; - u32 tx_curr_size; - u32 tx_low_mem_max_cnt; - u32 tx_low_mem_val; u32 max_datagram_size; u16 tx_max_datagrams; u16 tx_remainder; @@ -159,8 +145,6 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset); -int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); -int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset); struct sk_buff * cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags); int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in); diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 1cffa34740..6cc96bb12d 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This file holds USB constants and structures that are needed for * USB device APIs. These are used by the USB device model, which is @@ -6,13 +5,13 @@ * Wireless USB 1.0 (spread around). Linux has several APIs in C that * need these: * - * - the host side Linux-USB kernel driver API; + * - the master/host side Linux-USB kernel driver API; * - the "usbfs" user space API; and - * - the Linux "gadget" device/peripheral side driver API. + * - the Linux "gadget" slave/device/peripheral side driver API. * * USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems - * act either as a USB host or as a USB device. That means the host and - * device side APIs benefit from working well together. + * act either as a USB master/host or as a USB slave/device. That means + * the master and slave side APIs benefit from working well together. * * There's also "Wireless USB", using low power short range radios for * peripheral interconnection but otherwise building on the USB framework. @@ -36,27 +35,30 @@ #include #include -/* USB 3.2 SuperSpeed Plus phy signaling rate generation and lane count */ - -enum usb_ssp_rate { - USB_SSP_GEN_UNKNOWN = 0, - USB_SSP_GEN_2x1, - USB_SSP_GEN_1x2, - USB_SSP_GEN_2x2, -}; - -extern const char *usb_ep_type_string(int ep_type); +/** + * usb_speed_string() - Returns human readable-name of the speed. + * @speed: The speed to return human-readable name for. If it's not + * any of the speeds defined in usb_device_speed enum, string for + * USB_SPEED_UNKNOWN will be returned. + */ extern const char *usb_speed_string(enum usb_device_speed speed); -extern enum usb_device_speed usb_get_maximum_speed(struct device *dev); -extern enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev); -extern const char *usb_state_string(enum usb_device_state state); -unsigned int usb_decode_interval(const struct usb_endpoint_descriptor *epd, - enum usb_device_speed speed); -#ifdef CONFIG_TRACING -extern const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, - __u8 bRequest, __u16 wValue, __u16 wIndex, - __u16 wLength); -#endif +/** + * usb_get_maximum_speed - Get maximum requested speed for a given USB + * controller. + * @dev: Pointer to the given USB controller device + * + * The function gets the maximum speed string from property "maximum-speed", + * and returns the corresponding enum usb_device_speed. + */ +extern enum usb_device_speed usb_get_maximum_speed(struct device *dev); + +/** + * usb_state_string - Returns human readable name for the state. + * @state: The state to return a human-readable name for. If it's not + * any of the states devices in usb_device_state_string enum, + * the string UNKNOWN will be returned. + */ +extern const char *usb_state_string(enum usb_device_state state); #endif /* __LINUX_USB_CH9_H */ diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index edf3342507..5dd75fa47d 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Platform data for the chipidea USB dual role controller */ @@ -13,18 +12,16 @@ struct ci_hdrc; /** * struct ci_hdrc_cable - structure for external connector cable state tracking - * @connected: true if cable is connected, false otherwise + * @state: current state of the line * @changed: set to true when extcon event happen - * @enabled: set to true if we've enabled the vbus or id interrupt * @edev: device which generate events * @ci: driver state of the chipidea device * @nb: hold event notification callback * @conn: used for notification registration */ struct ci_hdrc_cable { - bool connected; + bool state; bool changed; - bool enabled; struct extcon_dev *edev; struct ci_hdrc *ci; struct notifier_block nb; @@ -58,17 +55,10 @@ struct ci_hdrc_platform_data { #define CI_HDRC_OVERRIDE_AHB_BURST BIT(9) #define CI_HDRC_OVERRIDE_TX_BURST BIT(10) #define CI_HDRC_OVERRIDE_RX_BURST BIT(11) -#define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */ -#define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13) -#define CI_HDRC_IMX_IS_HSIC BIT(14) -#define CI_HDRC_PMQOS BIT(15) enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 -#define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2 -#define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3 -#define CI_HDRC_CONTROLLER_VBUS_EVENT 4 - int (*notify_event) (struct ci_hdrc *ci, unsigned event); + void (*notify_event) (struct ci_hdrc *ci, unsigned event); struct regulator *reg_vbus; struct usb_otg_caps ci_otg_caps; bool tpl_support; @@ -82,18 +72,6 @@ struct ci_hdrc_platform_data { struct ci_hdrc_cable vbus_extcon; struct ci_hdrc_cable id_extcon; u32 phy_clkgate_delay_us; - - /* pins */ - struct pinctrl *pctl; - struct pinctrl_state *pins_default; - struct pinctrl_state *pins_host; - struct pinctrl_state *pins_device; - - /* platform-specific hooks */ - int (*hub_control)(struct ci_hdrc *ci, u16 typeReq, u16 wValue, - u16 wIndex, char *buf, u16 wLength, - bool *done, unsigned long *flags); - void (*enter_lpm)(struct ci_hdrc *ci, bool enable); }; /* Default offset of capability registers */ @@ -105,7 +83,5 @@ struct platform_device *ci_hdrc_add_device(struct device *dev, struct ci_hdrc_platform_data *platdata); /* Remove ci hdrc device */ void ci_hdrc_remove_device(struct platform_device *pdev); -/* Get current available role */ -enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev); #endif diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 9d27622792..4616a49a1c 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * composite.h -- framework for usb gadgets which are composite devices * @@ -52,10 +51,7 @@ #define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */ /* big enough to hold our biggest descriptor */ -#define USB_COMP_EP0_BUFSIZ 4096 - -/* OS feature descriptor length <= 4kB */ -#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096 +#define USB_COMP_EP0_BUFSIZ 1024 #define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1) struct usb_configuration; @@ -249,9 +245,6 @@ int usb_function_activate(struct usb_function *); int usb_interface_id(struct usb_configuration *, struct usb_function *); -int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f, - struct usb_ep *_ep, u8 alt); - int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep); @@ -271,7 +264,7 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, * @bConfigurationValue: Copied into configuration descriptor. * @iConfiguration: Copied into configuration descriptor. * @bmAttributes: Copied into configuration descriptor. - * @MaxPower: Power consumption in mA. Used to compute bMaxPower in the + * @MaxPower: Power consumtion in mA. Used to compute bMaxPower in the * configuration descriptor after considering the bus speed. * @cdev: assigned by @usb_add_config() before calling @bind(); this is * the device associated with this configuration. @@ -437,7 +430,7 @@ static inline struct usb_composite_driver *to_cdriver( #define OS_STRING_IDX 0xEE /** - * struct usb_composite_dev - represents one composite usb gadget + * struct usb_composite_device - represents one composite usb gadget * @gadget: read-only, abstracts the gadget's usb peripheral controller * @req: used for control responses; buffer is pre-allocated * @os_desc_req: used for OS descriptors responses; buffer is pre-allocated @@ -458,7 +451,6 @@ static inline struct usb_composite_driver *to_cdriver( * sure doing that won't hurt too much. * * One notion for how to handle Wireless USB devices involves: - * * (a) a second gadget here, discovery mechanism TBD, but likely * needing separate "register/unregister WUSB gadget" calls; * (b) updates to usb_gadget to include flags "is it wireless", @@ -511,9 +503,8 @@ struct usb_composite_dev { /* protects deactivations and delayed_status counts*/ spinlock_t lock; - /* public: */ - unsigned int setup_pending:1; - unsigned int os_desc_pending:1; + unsigned setup_pending:1; + unsigned os_desc_pending:1; }; extern int usb_string_id(struct usb_composite_dev *c); @@ -525,8 +516,6 @@ extern struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev, extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); extern void composite_disconnect(struct usb_gadget *gadget); -extern void composite_reset(struct usb_gadget *gadget); - extern int composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl); extern void composite_suspend(struct usb_gadget *gadget); @@ -575,8 +564,8 @@ static inline u16 get_default_bcdDevice(void) { u16 bcdDevice; - bcdDevice = bin2bcd(LINUX_VERSION_MAJOR) << 8; - bcdDevice |= bin2bcd(LINUX_VERSION_PATCHLEVEL); + bcdDevice = bin2bcd((LINUX_VERSION_CODE >> 16 & 0xff)) << 8; + bcdDevice |= bin2bcd((LINUX_VERSION_CODE >> 8 & 0xff)); return bcdDevice; } diff --git a/include/linux/usb/ehci-dbgp.h b/include/linux/usb/ehci-dbgp.h index 62ab380517..7344d9e591 100644 --- a/include/linux/usb/ehci-dbgp.h +++ b/include/linux/usb/ehci-dbgp.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Standalone EHCI usb debug driver * diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index c892c5bc66..e479033bd7 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2001-2002 by David Brownell * @@ -45,7 +44,6 @@ struct ehci_caps { #define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */ #define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */ #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ -#define HCS_N_PORTS_MAX 15 /* N_PORTS valid 0x1-0xF */ u32 hcc_params; /* HCCPARAMS - offset 0x8 */ /* EHCI 1.1 addendum */ @@ -127,9 +125,8 @@ struct ehci_regs { u32 configured_flag; #define FLAG_CF (1<<0) /* true: we'll support "high speed" */ - union { - /* PORTSC: offset 0x44 */ - u32 port_status[HCS_N_PORTS_MAX]; /* up to N_PORTS */ + /* PORTSC: offset 0x44 */ + u32 port_status[0]; /* up to N_PORTS */ /* EHCI 1.1 addendum */ #define PORTSC_SUSPEND_STS_ACK 0 #define PORTSC_SUSPEND_STS_NYET 1 @@ -153,7 +150,7 @@ struct ehci_regs { #define PORT_OWNER (1<<13) /* true: companion hc owns this port */ #define PORT_POWER (1<<12) /* true: has power (see PPC) */ #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ -#define PORT_LS_MASK (3<<10) /* Link status (SE0, K or J */ +/* 11:10 for detecting lowspeed devices (reset vs release ownership) */ /* 9 reserved */ #define PORT_LPM (1<<9) /* LPM transaction */ #define PORT_RESET (1<<8) /* reset port */ @@ -166,35 +163,28 @@ struct ehci_regs { #define PORT_CSC (1<<1) /* connect status change */ #define PORT_CONNECT (1<<0) /* device connected */ #define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) - struct { - u32 reserved3[9]; - /* USBMODE: offset 0x68 */ - u32 usbmode; /* USB Device mode */ - }; + + u32 reserved3[9]; + + /* USBMODE: offset 0x68 */ + u32 usbmode; /* USB Device mode */ #define USBMODE_SDIS (1<<3) /* Stream disable */ #define USBMODE_BE (1<<2) /* BE/LE endianness select */ #define USBMODE_CM_HC (3<<0) /* host controller mode */ #define USBMODE_CM_IDLE (0<<0) /* idle state */ - }; + + u32 reserved4[6]; /* Moorestown has some non-standard registers, partially due to the fact that * its EHCI controller has both TT and LPM support. HOSTPCx are extensions to * PORTSCx */ - union { - struct { - u32 reserved4; - /* HOSTPC: offset 0x84 */ - u32 hostpc[HCS_N_PORTS_MAX]; + /* HOSTPC: offset 0x84 */ + u32 hostpc[0]; /* HOSTPC extension */ #define HOSTPC_PHCD (1<<22) /* Phy clock disable */ #define HOSTPC_PSPD (3<<25) /* Port speed detection */ - }; - /* Broadcom-proprietary USB_EHCI_INSNREG00 @ 0x80 */ - u32 brcm_insnreg[4]; - }; - - u32 reserved5[2]; + u32 reserved5[17]; /* USBMODE_EX: offset 0xc8 */ u32 usbmode_ex; /* USB Device mode extension */ diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h index 89fc901e77..db0431b39a 100644 --- a/include/linux/usb/ehci_pdriver.h +++ b/include/linux/usb/ehci_pdriver.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2012 Hauke Mehrtens * @@ -50,7 +49,6 @@ struct usb_ehci_pdata { unsigned no_io_watchdog:1; unsigned reset_on_resume:1; unsigned dma_mask_64:1; - unsigned spurious_oc:1; /* Turn on all power and clocks */ int (*power_on)(struct platform_device *pdev); diff --git a/include/linux/usb/ezusb.h b/include/linux/usb/ezusb.h index 487047162c..639ee45779 100644 --- a/include/linux/usb/ezusb.h +++ b/include/linux/usb/ezusb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __EZUSB_H #define __EZUSB_H diff --git a/include/linux/usb/functionfs.h b/include/linux/usb/functionfs.h index 570578cc98..71190663f1 100644 --- a/include/linux/usb/functionfs.h +++ b/include/linux/usb/functionfs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_FUNCTIONFS_H__ #define __LINUX_FUNCTIONFS_H__ 1 diff --git a/include/linux/usb/g_hid.h b/include/linux/usb/g_hid.h index 7581e488c2..50f5745df2 100644 --- a/include/linux/usb/g_hid.h +++ b/include/linux/usb/g_hid.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * g_hid.h -- Header file for USB HID gadget driver * diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 10fe57cf40..e4516e9ded 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -1,11 +1,9 @@ -// SPDX-License-Identifier: GPL-2.0 /* * * * We call the USB code inside a Linux-based peripheral device a "gadget" * driver, except for the hardware-specific bus glue. One USB host can - * talk to many USB gadgets, but the gadgets are only able to communicate - * to one host. + * master many USB gadgets, but the gadgets are only slaved to one host. * * * (C) Copyright 2002-2004 by David Brownell @@ -43,8 +41,6 @@ struct usb_ep; * @num_mapped_sgs: number of SG entries mapped to DMA (internal) * @length: Length of that data * @stream_id: The stream id, when USB3.0 bulk streams are being used - * @is_last: Indicates if this is the last request of a stream_id before - * switching to a different stream (required for DWC3 controllers). * @no_interrupt: If true, hints that no completion irq is needed. * Helpful sometimes with deep request queues that are handled * directly by DMA controllers. @@ -52,7 +48,6 @@ struct usb_ep; * by adding a zero length packet as needed; * @short_not_ok: When reading data, makes short packets be * treated as errors (queue stops advancing till cleanup). - * @dma_mapped: Indicates if request has been mapped to DMA (internal) * @complete: Function called when request completes, so this request and * its buffer may be re-used. The function will always be called with * interrupts disabled, and it must not sleep. @@ -64,8 +59,6 @@ struct usb_ep; * invalidated by the error may first be dequeued. * @context: For use by the completion callback * @list: For use by the gadget driver. - * @frame_number: Reports the interval number in (micro)frame in which the - * isochronous transfer was transmitted or received. * @status: Reports completion code, zero or a negative errno. * Normally, faults block the transfer queue from advancing until * the completion callback returns. @@ -107,19 +100,15 @@ struct usb_request { unsigned num_mapped_sgs; unsigned stream_id:16; - unsigned is_last:1; unsigned no_interrupt:1; unsigned zero:1; unsigned short_not_ok:1; - unsigned dma_mapped:1; void (*complete)(struct usb_ep *ep, struct usb_request *req); void *context; struct list_head list; - unsigned frame_number; /* ISO ONLY */ - int status; unsigned actual; }; @@ -137,7 +126,6 @@ struct usb_ep_ops { int (*enable) (struct usb_ep *ep, const struct usb_endpoint_descriptor *desc); int (*disable) (struct usb_ep *ep); - void (*dispose) (struct usb_ep *ep); struct usb_request *(*alloc_request) (struct usb_ep *ep, gfp_t gfp_flags); @@ -197,12 +185,10 @@ struct usb_ep_caps { * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk" * @ops: Function pointers used to access hardware-specific operations. * @ep_list:the gadget's ep_list holds all of its endpoints - * @caps:The structure describing types and directions supported by endpoint. - * @enabled: The current endpoint enabled/disabled state. - * @claimed: True if this endpoint is claimed by a function. + * @caps:The structure describing types and directions supported by endoint. * @maxpacket:The maximum packet size used on this endpoint. The initial * value can sometimes be reduced (hardware allowing), according to - * the endpoint descriptor used to configure the endpoint. + * the endpoint descriptor used to configure the endpoint. * @maxpacket_limit:The maximum packet size value which can be handled by this * endpoint. It's set once by UDC driver when endpoint is initialized, and * should not be changed. Should not be confused with maxpacket. @@ -295,9 +281,6 @@ struct usb_dcd_config_params { #define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */ __le16 bU2DevExitLat; /* U2 Device exit Latency */ #define USB_DEFAULT_U2_DEV_EXIT_LAT 0x1F4 /* Less then 500 microsec */ - __u8 besl_baseline; /* Recommended baseline BESL (0-15) */ - __u8 besl_deep; /* Recommended deep BESL (0-15) */ -#define USB_DEFAULT_BESL_UNSPECIFIED 0xFF /* No recommended value */ }; @@ -317,23 +300,17 @@ struct usb_gadget_ops { int (*pullup) (struct usb_gadget *, int is_on); int (*ioctl)(struct usb_gadget *, unsigned code, unsigned long param); - void (*get_config_params)(struct usb_gadget *, - struct usb_dcd_config_params *); + void (*get_config_params)(struct usb_dcd_config_params *); int (*udc_start)(struct usb_gadget *, struct usb_gadget_driver *); int (*udc_stop)(struct usb_gadget *); - void (*udc_set_speed)(struct usb_gadget *, enum usb_device_speed); - void (*udc_set_ssp_rate)(struct usb_gadget *gadget, - enum usb_ssp_rate rate); - void (*udc_async_callbacks)(struct usb_gadget *gadget, bool enable); struct usb_ep *(*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); - int (*check_config)(struct usb_gadget *gadget); }; /** - * struct usb_gadget - represents a usb device + * struct usb_gadget - represents a usb slave device * @work: (internal use) Workqueue to be used for sysfs_notify() * @udc: struct usb_udc pointer for this gadget * @ops: Function pointers used to access hardware-specific operations. @@ -343,15 +320,10 @@ struct usb_gadget_ops { * @speed: Speed of current connection to USB host. * @max_speed: Maximal speed the UDC can handle. UDC must support this * and all slower speeds. - * @ssp_rate: Current connected SuperSpeed Plus signaling rate and lane count. - * @max_ssp_rate: Maximum SuperSpeed Plus signaling rate and lane count the UDC - * can handle. The UDC must support this and all slower speeds and lower - * number of lanes. * @state: the state we are now (attached, suspended, configured, etc) * @name: Identifies the controller hardware type. Used in diagnostics * and sometimes configuration. * @dev: Driver model state for this abstract device. - * @isoch_delay: value from Set Isoch Delay request. Only valid on SS/SSP * @out_epnum: last used out ep number * @in_epnum: last used in ep number * @mA: last set mA value @@ -374,18 +346,12 @@ struct usb_gadget_ops { * or B-Peripheral wants to take host role. * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to * MaxPacketSize. - * @quirk_altset_not_supp: UDC controller doesn't support alt settings. - * @quirk_stall_not_supp: UDC controller doesn't support stalling. - * @quirk_zlp_not_supp: UDC controller doesn't support ZLP. * @quirk_avoids_skb_reserve: udc/platform wants to avoid skb_reserve() in * u_ether.c to improve performance. * @is_selfpowered: if the gadget is self-powered. * @deactivated: True if gadget is deactivated - in deactivated state it cannot * be connected. * @connected: True if gadget is connected. - * @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag - * indicates that it supports LPM as per the LPM ECN & errata. - * @irq: the interrupt number for device controller. * * Gadgets have a mostly-portable "gadget driver" implementing device * functions, handling all usb configurations and interfaces. Gadget @@ -414,15 +380,9 @@ struct usb_gadget { struct list_head ep_list; /* of usb_ep */ enum usb_device_speed speed; enum usb_device_speed max_speed; - - /* USB SuperSpeed Plus only */ - enum usb_ssp_rate ssp_rate; - enum usb_ssp_rate max_ssp_rate; - enum usb_device_state state; const char *name; struct device dev; - unsigned isoch_delay; unsigned out_epnum; unsigned in_epnum; unsigned mA; @@ -444,12 +404,9 @@ struct usb_gadget { unsigned is_selfpowered:1; unsigned deactivated:1; unsigned connected:1; - unsigned lpm_capable:1; - int irq; }; #define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) -/* Interface to the device model */ static inline void set_gadget_data(struct usb_gadget *gadget, void *data) { dev_set_drvdata(&gadget->dev, data); } static inline void *get_gadget_data(struct usb_gadget *gadget) @@ -458,26 +415,6 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev) { return container_of(dev, struct usb_gadget, dev); } -static inline struct usb_gadget *usb_get_gadget(struct usb_gadget *gadget) -{ - get_device(&gadget->dev); - return gadget; -} -static inline void usb_put_gadget(struct usb_gadget *gadget) -{ - put_device(&gadget->dev); -} -extern void usb_initialize_gadget(struct device *parent, - struct usb_gadget *gadget, void (*release)(struct device *dev)); -extern int usb_add_gadget(struct usb_gadget *gadget); -extern void usb_del_gadget(struct usb_gadget *gadget); - -/* Legacy device-model interface */ -extern int usb_add_gadget_udc_release(struct device *parent, - struct usb_gadget *gadget, void (*release)(struct device *dev)); -extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); -extern void usb_del_gadget_udc(struct usb_gadget *gadget); -extern char *usb_get_gadget_udc_name(void); /* iterates the non-control endpoints; 'tmp' is a struct usb_ep pointer */ #define gadget_for_each_ep(tmp, gadget) \ @@ -492,7 +429,7 @@ extern char *usb_get_gadget_udc_name(void); */ static inline size_t usb_ep_align(struct usb_ep *ep, size_t len) { - int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc); + int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff; return round_up(len, max_packet_size); } @@ -609,7 +546,6 @@ int usb_gadget_connect(struct usb_gadget *gadget); int usb_gadget_disconnect(struct usb_gadget *gadget); int usb_gadget_deactivate(struct usb_gadget *gadget); int usb_gadget_activate(struct usb_gadget *gadget); -int usb_gadget_check_config(struct usb_gadget *gadget); #else static inline int usb_gadget_frame_number(struct usb_gadget *gadget) { return 0; } @@ -633,14 +569,12 @@ static inline int usb_gadget_deactivate(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_activate(struct usb_gadget *gadget) { return 0; } -static inline int usb_gadget_check_config(struct usb_gadget *gadget) -{ return 0; } #endif /* CONFIG_USB_GADGET */ /*-------------------------------------------------------------------------*/ /** - * struct usb_gadget_driver - driver for usb gadget devices + * struct usb_gadget_driver - driver for usb 'slave' devices * @function: String describing the gadget's function * @max_speed: Highest speed the driver handles. * @setup: Invoked for ep0 control requests that aren't handled by @@ -768,10 +702,16 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver); * it will first disconnect(). The driver is also requested * to unbind() and clean up any device state, before this procedure * finally returns. It's expected that the unbind() functions - * will be in exit sections, so may not be linked in some kernels. + * will in in exit sections, so may not be linked in some kernels. */ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver); +extern int usb_add_gadget_udc_release(struct device *parent, + struct usb_gadget *gadget, void (*release)(struct device *dev)); +extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); +extern void usb_del_gadget_udc(struct usb_gadget *gadget); +extern char *usb_get_gadget_udc_name(void); + /*-------------------------------------------------------------------------*/ /* utility to simplify dealing with string descriptors */ @@ -804,14 +744,11 @@ struct usb_gadget_strings { struct usb_gadget_string_container { struct list_head list; - u8 *stash[]; + u8 *stash[0]; }; /* put descriptor for string with that id into buf (buflen >= 256) */ -int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *buf); - -/* check if the given language identifier is valid */ -bool usb_validate_langid(u16 langid); +int usb_gadget_get_string(struct usb_gadget_strings *table, int id, u8 *buf); /*-------------------------------------------------------------------------*/ @@ -854,7 +791,6 @@ int usb_otg_descriptor_init(struct usb_gadget *gadget, /* utility to simplify map/unmap of usb_requests to/from DMA */ -#ifdef CONFIG_HAS_DMA extern int usb_gadget_map_request_by_dev(struct device *dev, struct usb_request *req, int is_in); extern int usb_gadget_map_request(struct usb_gadget *gadget, @@ -864,17 +800,6 @@ extern void usb_gadget_unmap_request_by_dev(struct device *dev, struct usb_request *req, int is_in); extern void usb_gadget_unmap_request(struct usb_gadget *gadget, struct usb_request *req, int is_in); -#else /* !CONFIG_HAS_DMA */ -static inline int usb_gadget_map_request_by_dev(struct device *dev, - struct usb_request *req, int is_in) { return -ENOSYS; } -static inline int usb_gadget_map_request(struct usb_gadget *gadget, - struct usb_request *req, int is_in) { return -ENOSYS; } - -static inline void usb_gadget_unmap_request_by_dev(struct device *dev, - struct usb_request *req, int is_in) { } -static inline void usb_gadget_unmap_request(struct usb_gadget *gadget, - struct usb_request *req, int is_in) { } -#endif /* !CONFIG_HAS_DMA */ /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/gadget_configfs.h b/include/linux/usb/gadget_configfs.h index d61aebd681..c36e95730d 100644 --- a/include/linux/usb/gadget_configfs.h +++ b/include/linux/usb/gadget_configfs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __GADGET_CONFIGFS__ #define __GADGET_CONFIGFS__ diff --git a/include/linux/usb/gpio_vbus.h b/include/linux/usb/gpio_vbus.h new file mode 100644 index 0000000000..837bba604a --- /dev/null +++ b/include/linux/usb/gpio_vbus.h @@ -0,0 +1,32 @@ +/* + * A simple GPIO VBUS sensing driver for B peripheral only devices + * with internal transceivers. + * Optionally D+ pullup can be controlled by a second GPIO. + * + * Copyright (c) 2008 Philipp Zabel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +/** + * struct gpio_vbus_mach_info - configuration for gpio_vbus + * @gpio_vbus: VBUS sensing GPIO + * @gpio_pullup: optional D+ or D- pullup GPIO (else negative/invalid) + * @gpio_vbus_inverted: true if gpio_vbus is active low + * @gpio_pullup_inverted: true if gpio_pullup is active low + * @wakeup: configure gpio_vbus as a wake-up source + * + * The VBUS sensing GPIO should have a pulldown, which will normally be + * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a + * value the GPIO detects as active. Some systems will use comparators. + */ +struct gpio_vbus_mach_info { + int gpio_vbus; + int gpio_pullup; + bool gpio_vbus_inverted; + bool gpio_pullup_inverted; + bool wakeup; +}; diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index d397194881..9602956bb6 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2001-2002 by David Brownell * @@ -25,6 +24,7 @@ #include #include #include +#include #define MAX_TOPO_LEVEL 6 @@ -59,7 +59,7 @@ * USB Host Controller Driver (usb_hcd) framework * * Since "struct usb_bus" is so thin, you can't share much code in it. - * This framework is a layer over that, and should be more shareable. + * This framework is a layer over that, and should be more sharable. */ /*-------------------------------------------------------------------------*/ @@ -72,12 +72,6 @@ struct giveback_urb_bh { struct usb_host_endpoint *completing_ep; }; -enum usb_dev_authorize_policy { - USB_DEVICE_AUTHORIZE_NONE = 0, - USB_DEVICE_AUTHORIZE_ALL = 1, - USB_DEVICE_AUTHORIZE_INTERNAL = 2, -}; - struct usb_hcd { /* @@ -98,7 +92,6 @@ struct usb_hcd { #ifdef CONFIG_PM struct work_struct wakeup_work; /* for remote wakeup */ #endif - struct work_struct died_work; /* for when the device dies */ /* * hardware info/state @@ -110,7 +103,7 @@ struct usb_hcd { * other external phys should be software-transparent */ struct usb_phy *usb_phy; - struct usb_phy_roothub *phy_roothub; + struct phy *phy; /* Flags that need to be manipulated atomically because they can * change while the host controller is running. Always use @@ -124,6 +117,7 @@ struct usb_hcd { #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ #define HCD_FLAG_DEAD 6 /* controller has died? */ #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ +#define HCD_FLAG_DEV_AUTHORIZED 8 /* authorize devices? */ /* The flags can be tested using these macros; they are likely to * be slightly faster than test_bit(). @@ -148,19 +142,14 @@ struct usb_hcd { * or they require explicit user space authorization; this bit is * settable through /sys/class/usb_host/X/authorized_default */ - enum usb_dev_authorize_policy dev_policy; +#define HCD_DEV_AUTHORIZED(hcd) \ + ((hcd)->flags & (1U << HCD_FLAG_DEV_AUTHORIZED)) /* Flags that get set only during HCD registration or removal. */ unsigned rh_registered:1;/* is root hub registered? */ unsigned rh_pollable:1; /* may we poll the root hub? */ unsigned msix_enabled:1; /* driver has MSI-X enabled? */ - unsigned msi_enabled:1; /* driver has MSI enabled? */ - /* - * do not manage the PHY state in the HCD core, instead let the driver - * handle this (for example if the PHY can only be turned on after a - * specific event) - */ - unsigned skip_phy_initialization:1; + unsigned remove_phy:1; /* auto-remove USB phy */ /* The next flag is a stopgap, to be removed when all the HCDs * support the new root-hub polling mechanism. */ @@ -216,9 +205,6 @@ struct usb_hcd { #define HC_IS_RUNNING(state) ((state) & __ACTIVE) #define HC_IS_SUSPENDED(state) ((state) & __SUSPEND) - /* memory pool for HCs having local memory, or %NULL */ - struct gen_pool *localmem_pool; - /* more shared queuing code would be good; it should support * smarter scheduling, handle transaction translators, etc; * input size of periodic table to an interrupt scheduler. @@ -228,7 +214,7 @@ struct usb_hcd { /* The HC driver's private data is stored at the end of * this structure. */ - unsigned long hcd_priv[] + unsigned long hcd_priv[0] __attribute__ ((aligned(sizeof(s64)))); }; @@ -243,6 +229,11 @@ static inline struct usb_hcd *bus_to_hcd(struct usb_bus *bus) return container_of(bus, struct usb_hcd, self); } +struct hcd_timeout { /* timeouts we allocate */ + struct list_head timeout_list; + struct timer_list timer; +}; + /*-------------------------------------------------------------------------*/ @@ -256,14 +247,13 @@ struct hc_driver { int flags; #define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */ -#define HCD_DMA 0x0002 /* HC uses DMA */ +#define HCD_LOCAL_MEM 0x0002 /* HC needs local memory */ #define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */ #define HCD_USB11 0x0010 /* USB 1.1 */ #define HCD_USB2 0x0020 /* USB 2.0 */ #define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/ #define HCD_USB3 0x0040 /* USB 3.0 */ #define HCD_USB31 0x0050 /* USB 3.1 */ -#define HCD_USB32 0x0060 /* USB 3.2 */ #define HCD_MASK 0x0070 #define HCD_BH 0x0100 /* URB complete in BH context */ @@ -299,7 +289,7 @@ struct hc_driver { * (optional) these hooks allow an HCD to override the default DMA * mapping and unmapping routines. In general, they shouldn't be * necessary unless the host controller has special DMA requirements, - * such as alignment constraints. If these are not specified, the + * such as alignment contraints. If these are not specified, the * general usb_hcd_(un)?map_urb_for_dma functions will be used instead * (and it may be a good idea to call these functions in your HCD * implementation) @@ -325,7 +315,6 @@ struct hc_driver { int (*bus_suspend)(struct usb_hcd *); int (*bus_resume)(struct usb_hcd *); int (*start_port_reset)(struct usb_hcd *, unsigned port_num); - unsigned long (*get_resuming_ports)(struct usb_hcd *); /* force handover of high-speed port to full-speed companion */ void (*relinquish_port)(struct usb_hcd *, int); @@ -382,11 +371,6 @@ struct hc_driver { * or bandwidth constraints. */ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); - /* Override the endpoint-derived interval - * (if there is any cached hardware state). - */ - void (*fixup_endpoint)(struct usb_hcd *hcd, struct usb_device *udev, - struct usb_host_endpoint *ep, int interval); /* Returns the hardware-chosen device address */ int (*address_device)(struct usb_hcd *, struct usb_device *udev); /* prepares the hardware to send commands to the device */ @@ -414,10 +398,7 @@ struct hc_driver { int (*find_raw_port_number)(struct usb_hcd *, int); /* Call for power on/off the port if necessary */ int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable); - /* Call for SINGLE_STEP_SET_FEATURE Test for USB2 EH certification */ -#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06 - int (*submit_single_step_set_feature)(struct usb_hcd *, - struct urb *, int); + }; static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) @@ -431,11 +412,6 @@ static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd, return hcd->high_prio_bh.completing_ep == ep; } -static inline bool hcd_uses_dma(struct usb_hcd *hcd) -{ - return IS_ENABLED(CONFIG_HAS_DMA) && (hcd->driver->flags & HCD_DMA); -} - extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, int status); @@ -451,8 +427,6 @@ extern void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *, struct urb *); extern void usb_hcd_unmap_urb_for_dma(struct usb_hcd *, struct urb *); extern void usb_hcd_flush_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); -extern void usb_hcd_fixup_endpoint(struct usb_device *udev, - struct usb_host_endpoint *ep, int interval); extern void usb_hcd_disable_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); extern void usb_hcd_reset_endpoint(struct usb_device *udev, @@ -464,9 +438,6 @@ extern int usb_hcd_alloc_bandwidth(struct usb_device *udev, struct usb_host_interface *new_alt); extern int usb_hcd_get_frame_number(struct usb_device *udev); -struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver, - struct device *sysdev, struct device *dev, const char *bus_name, - struct usb_hcd *primary_hcd); extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver, struct device *dev, const char *bus_name); extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, @@ -479,26 +450,15 @@ extern int usb_add_hcd(struct usb_hcd *hcd, unsigned int irqnum, unsigned long irqflags); extern void usb_remove_hcd(struct usb_hcd *hcd); extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1); -int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, - dma_addr_t dma, size_t size); struct platform_device; extern void usb_hcd_platform_shutdown(struct platform_device *dev); -#ifdef CONFIG_USB_HCD_TEST_MODE -extern int ehset_single_step_set_feature(struct usb_hcd *hcd, int port); -#else -static inline int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) -{ - return 0; -} -#endif /* CONFIG_USB_HCD_TEST_MODE */ -#ifdef CONFIG_USB_PCI +#ifdef CONFIG_PCI struct pci_dev; struct pci_device_id; extern int usb_hcd_pci_probe(struct pci_dev *dev, - const struct pci_device_id *id, - const struct hc_driver *driver); + const struct pci_device_id *id); extern void usb_hcd_pci_remove(struct pci_dev *dev); extern void usb_hcd_pci_shutdown(struct pci_dev *dev); @@ -507,7 +467,7 @@ extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev); #ifdef CONFIG_PM extern const struct dev_pm_ops usb_hcd_pci_pm_ops; #endif -#endif /* CONFIG_USB_PCI */ +#endif /* CONFIG_PCI */ /* pci-ish (pdev null is ok) buffer alloc/mapping support */ void usb_init_pool_max(void); @@ -602,31 +562,26 @@ extern void usb_ep0_reinit(struct usb_device *); ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) #define EndpointRequest \ - ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) + ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) #define EndpointOutRequest \ - ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) + ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) /* class requests from the USB 2.0 hub spec, table 11-15 */ -#define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request)) /* GetBusState and SetHubDescriptor are optional, omitted */ -#define ClearHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_CLEAR_FEATURE) -#define ClearPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_CLEAR_FEATURE) -#define GetHubDescriptor HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_DESCRIPTOR) -#define GetHubStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_STATUS) -#define GetPortStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS) -#define SetHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE) -#define SetPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE) -#define ClearTTBuffer HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_CLEAR_TT_BUFFER) -#define ResetTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_RESET_TT) -#define GetTTState HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_TT_STATE) -#define StopTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_STOP_TT) +#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE) +#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE) +#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR) +#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS) +#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS) +#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE) +#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE) /*-------------------------------------------------------------------------*/ /* class requests from USB 3.1 hub spec, table 10-7 */ -#define SetHubDepth HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, HUB_SET_DEPTH) -#define GetPortErrorCount HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_PORT_ERR_COUNT) +#define SetHubDepth (0x2000 | HUB_SET_DEPTH) +#define GetPortErrorCount (0xa300 | HUB_GET_PORT_ERR_COUNT) /* * Generic bandwidth allocation constants/support @@ -686,16 +641,11 @@ extern wait_queue_head_t usb_kill_urb_queue; #define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN)) #ifdef CONFIG_PM -extern unsigned usb_wakeup_enabled_descendants(struct usb_device *udev); extern void usb_root_hub_lost_power(struct usb_device *rhdev); extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); #else -static inline unsigned usb_wakeup_enabled_descendants(struct usb_device *udev) -{ - return 0; -} static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) { return; @@ -752,6 +702,10 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, /* random stuff */ +#define RUN_CONTEXT (in_irq() ? "in_irq" \ + : (in_interrupt() ? "in_interrupt" : "can sleep")) + + /* This rwsem is for use only by the hub driver and ehci-hcd. * Nobody else should touch it. */ diff --git a/include/linux/usb/input.h b/include/linux/usb/input.h index 974befa72a..0e010b220e 100644 --- a/include/linux/usb/input.h +++ b/include/linux/usb/input.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2005 Dmitry Torokhov * diff --git a/include/linux/usb/iowarrior.h b/include/linux/usb/iowarrior.h index 56559bc532..4fd6513d56 100644 --- a/include/linux/usb/iowarrior.h +++ b/include/linux/usb/iowarrior.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_USB_IOWARRIOR_H #define __LINUX_USB_IOWARRIOR_H diff --git a/include/linux/usb/irda.h b/include/linux/usb/irda.h index 556a801efc..e345ceaf72 100644 --- a/include/linux/usb/irda.h +++ b/include/linux/usb/irda.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * USB IrDA Bridge Device Definition */ @@ -119,22 +118,11 @@ struct usb_irda_cs_descriptor { * 6 - 115200 bps * 7 - 576000 bps * 8 - 1.152 Mbps - * 9 - 4 Mbps + * 9 - 5 mbps * 10..15 - Reserved */ #define USB_IRDA_STATUS_LINK_SPEED 0x0f -#define USB_IRDA_LS_NO_CHANGE 0 -#define USB_IRDA_LS_2400 1 -#define USB_IRDA_LS_9600 2 -#define USB_IRDA_LS_19200 3 -#define USB_IRDA_LS_38400 4 -#define USB_IRDA_LS_57600 5 -#define USB_IRDA_LS_115200 6 -#define USB_IRDA_LS_576000 7 -#define USB_IRDA_LS_1152000 8 -#define USB_IRDA_LS_4000000 9 - /* The following is a 4-bit value used only for * outbound header: * diff --git a/include/linux/usb/isp116x.h b/include/linux/usb/isp116x.h index 1f331c28bf..96ca114e88 100644 --- a/include/linux/usb/isp116x.h +++ b/include/linux/usb/isp116x.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Board initialization code should put one of these into dev->platform_data * and place the isp116x onto platform_bus. diff --git a/include/linux/usb/isp1301.h b/include/linux/usb/isp1301.h index dedb3b2473..d3a851c28b 100644 --- a/include/linux/usb/isp1301.h +++ b/include/linux/usb/isp1301.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * NXP ISP1301 USB transceiver driver * diff --git a/include/linux/usb/isp1362.h b/include/linux/usb/isp1362.h index 5356c4ae38..642684bb92 100644 --- a/include/linux/usb/isp1362.h +++ b/include/linux/usb/isp1362.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * board initialization code should put one of these into dev->platform_data * and place the isp1362 onto platform_bus. diff --git a/include/linux/usb/isp1760.h b/include/linux/usb/isp1760.h index b75ded28db..de7de53c55 100644 --- a/include/linux/usb/isp1760.h +++ b/include/linux/usb/isp1760.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * board initialization should put one of these into dev->platform_data * and place the isp1760 onto platform_bus named "isp1760-hcd". diff --git a/include/linux/usb/m66592.h b/include/linux/usb/m66592.h index 2dfe681834..a4ba31ab2f 100644 --- a/include/linux/usb/m66592.h +++ b/include/linux/usb/m66592.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * M66592 driver platform data * diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h new file mode 100644 index 0000000000..974c3796a2 --- /dev/null +++ b/include/linux/usb/msm_hsusb_hw.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__ +#define __LINUX_USB_GADGET_MSM72K_UDC_H__ + +/* USB phy selector - in TCSR address range */ +#define USB2_PHY_SEL 0xfd4ab000 + +#define USB_AHBBURST (MSM_USB_BASE + 0x0090) +#define USB_AHBMODE (MSM_USB_BASE + 0x0098) +#define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0) +#define ULPI_TX_PKT_EN_CLR_FIX BIT(19) + +#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ + +#define USB_USBCMD (MSM_USB_BASE + 0x0140) +#define USB_PORTSC (MSM_USB_BASE + 0x0184) +#define USB_OTGSC (MSM_USB_BASE + 0x01A4) +#define USB_USBMODE (MSM_USB_BASE + 0x01A8) +#define USB_PHY_CTRL (MSM_USB_BASE + 0x0240) +#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278) + +#define GENCONFIG_2_SESS_VLD_CTRL_EN BIT(7) +#define USBCMD_SESS_VLD_CTRL BIT(25) + +#define USBCMD_RESET 2 +#define USB_USBINTR (MSM_USB_BASE + 0x0148) + +#define PORTSC_PHCD (1 << 23) /* phy suspend mode */ +#define PORTSC_PTS_MASK (3 << 30) +#define PORTSC_PTS_ULPI (2 << 30) +#define PORTSC_PTS_SERIAL (3 << 30) + +#define USB_ULPI_VIEWPORT (MSM_USB_BASE + 0x0170) +#define ULPI_RUN (1 << 30) +#define ULPI_WRITE (1 << 29) +#define ULPI_READ (0 << 29) +#define ULPI_ADDR(n) (((n) & 255) << 16) +#define ULPI_DATA(n) ((n) & 255) +#define ULPI_DATA_READ(n) (((n) >> 8) & 255) + +/* synopsys 28nm phy registers */ +#define ULPI_PWR_CLK_MNG_REG 0x88 +#define OTG_COMP_DISABLE BIT(0) + +#define ULPI_MISC_A 0x96 +#define ULPI_MISC_A_VBUSVLDEXTSEL BIT(1) +#define ULPI_MISC_A_VBUSVLDEXT BIT(0) + +#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */ +#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */ +#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */ +#define PHY_POR_ASSERT (1 << 0) /* USB2 28nm PHY POR ASSERT */ + +/* OTG definitions */ +#define OTGSC_INTSTS_MASK (0x7f << 16) +#define OTGSC_ID (1 << 8) +#define OTGSC_BSV (1 << 11) +#define OTGSC_IDIS (1 << 16) +#define OTGSC_BSVIS (1 << 19) +#define OTGSC_IDIE (1 << 24) +#define OTGSC_BSVIE (1 << 27) + +#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */ diff --git a/include/linux/usb/musb-ux500.h b/include/linux/usb/musb-ux500.h index c4b7ad9850..1e2c7130f6 100644 --- a/include/linux/usb/musb-ux500.h +++ b/include/linux/usb/musb-ux500.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2013 ST-Ericsson AB * diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index fc6c779184..d315c89078 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This is used to for host and peripheral modes of the driver for * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. @@ -67,13 +66,35 @@ struct musb_hdrc_config { /* MUSB configuration-specific details */ unsigned multipoint:1; /* multipoint device */ unsigned dyn_fifo:1 __deprecated; /* supports dynamic fifo sizing */ + unsigned soft_con:1 __deprecated; /* soft connect required */ + unsigned utm_16:1 __deprecated; /* utm data witdh is 16 bits */ + unsigned big_endian:1; /* true if CPU uses big-endian */ + unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ + unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ + unsigned high_iso_tx:1; /* Tx ep required for HB iso */ + unsigned high_iso_rx:1; /* Rx ep required for HD iso */ + unsigned dma:1 __deprecated; /* supports DMA */ + unsigned vendor_req:1 __deprecated; /* vendor registers required */ /* need to explicitly de-assert the port reset after resume? */ unsigned host_port_deassert_reset_at_resume:1; u8 num_eps; /* number of endpoints _with_ ep0 */ + u8 dma_channels __deprecated; /* number of dma channels */ + u8 dyn_fifo_size; /* dynamic size in bytes */ + u8 vendor_ctrl __deprecated; /* vendor control reg width */ + u8 vendor_stat __deprecated; /* vendor status reg witdh */ + u8 dma_req_chan __deprecated; /* bitmask for required dma channels */ u8 ram_bits; /* ram address size */ + struct musb_hdrc_eps_bits *eps_bits __deprecated; +#ifdef CONFIG_BLACKFIN + /* A GPIO controlling VRSEL in Blackfin */ + unsigned int gpio_vrsel; + unsigned int gpio_vrsel_active; + /* musb CLKIN in Blackfin in MHZ */ + unsigned char clkin; +#endif u32 maximum_speed; }; diff --git a/include/linux/usb/net2280.h b/include/linux/usb/net2280.h index 08b85caecf..7251202244 100644 --- a/include/linux/usb/net2280.h +++ b/include/linux/usb/net2280.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * NetChip 2280 high/full speed USB device controller. * Unlike many such controllers, this one talks PCI. diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index dba55ccb9b..5ff9032ee1 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * OF helpers for usb devices. * @@ -12,18 +11,13 @@ #include #include -struct usb_device; - #if IS_ENABLED(CONFIG_OF) enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0); bool of_usb_host_tpl_support(struct device_node *np); int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps); -struct device_node *usb_of_get_device_node(struct usb_device *hub, int port1); -bool usb_of_has_combined_node(struct usb_device *udev); -struct device_node *usb_of_get_interface_node(struct usb_device *udev, - u8 config, u8 ifnum); -struct device *usb_of_get_companion_dev(struct device *dev); +struct device_node *usb_of_get_child_node(struct device_node *parent, + int portnum); #else static inline enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0) @@ -39,21 +33,8 @@ static inline int of_usb_update_otg_caps(struct device_node *np, { return 0; } -static inline struct device_node * -usb_of_get_device_node(struct usb_device *hub, int port1) -{ - return NULL; -} -static inline bool usb_of_has_combined_node(struct usb_device *udev) -{ - return false; -} -static inline struct device_node * -usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum) -{ - return NULL; -} -static inline struct device *usb_of_get_companion_dev(struct device *dev) +static inline struct device_node *usb_of_get_child_node + (struct device_node *parent, int portnum) { return NULL; } diff --git a/include/linux/usb/ohci_pdriver.h b/include/linux/usb/ohci_pdriver.h index 7eb16cf587..012f2b7eb2 100644 --- a/include/linux/usb/ohci_pdriver.h +++ b/include/linux/usb/ohci_pdriver.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2012 Hauke Mehrtens * diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index 784659d4dc..7a0350535c 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* Copyright (C) 2007,2008 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it @@ -22,6 +21,21 @@ #include #include +#undef VERBOSE + +#ifdef VERBOSE +#define VDBG(fmt, args...) pr_debug("[%s] " fmt , \ + __func__, ## args) +#else +#define VDBG(stuff...) do {} while (0) +#endif + +#ifdef VERBOSE +#define MPC_LOC printk("Current Location [%s]:[%d]\n", __FILE__, __LINE__) +#else +#define MPC_LOC do {} while (0) +#endif + #define PROTO_UNDEF (0) #define PROTO_HOST (1) #define PROTO_GADGET (2) @@ -98,7 +112,7 @@ enum otg_fsm_timer { * @b_bus_req: TRUE during the time that the Application running on the * B-device wants to use the bus * - * Auxiliary inputs (OTG v1.3 only. Obsolete now.) + * Auxilary inputs (OTG v1.3 only. Obsolete now.) * @a_sess_vld: TRUE if the A-device detects that VBUS is above VA_SESS_VLD * @b_bus_suspend: TRUE when the A-device detects that the B-device has put * the bus into suspend @@ -153,7 +167,7 @@ struct otg_fsm { int a_bus_req; int b_bus_req; - /* Auxiliary inputs */ + /* Auxilary inputs */ int a_sess_vld; int b_bus_resume; int b_bus_suspend; @@ -177,7 +191,7 @@ struct otg_fsm { int a_bus_req_inf; int a_clr_err_inf; int b_bus_req_inf; - /* Auxiliary informative variables */ + /* Auxilary informative variables */ int a_suspend_req_inf; /* Timeout indicator for timers */ @@ -196,7 +210,6 @@ struct otg_fsm { struct mutex lock; u8 *host_req_flag; struct delayed_work hnp_polling_work; - bool hnp_work_inited; bool state_changed; }; diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 6475f880be..67929df86d 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* USB OTG (On The Go) defines */ /* * @@ -125,9 +124,8 @@ enum usb_dr_mode { * @dev: Pointer to the given device * * The function gets phy interface string from property 'dr_mode', - * and returns the corresponding enum usb_dr_mode + * and returns the correspondig enum usb_dr_mode */ extern enum usb_dr_mode usb_get_dr_mode(struct device *dev); -extern enum usb_dr_mode usb_get_role_switch_default_mode(struct device *dev); #endif /* __LINUX_USB_OTG_H */ diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index e4de6bc1f6..31a8068c42 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * USB PHY defines * @@ -10,10 +9,8 @@ #ifndef __LINUX_USB_PHY_H #define __LINUX_USB_PHY_H -#include #include #include -#include enum usb_phy_interface { USBPHY_INTERFACE_MODE_UNKNOWN, @@ -74,17 +71,6 @@ struct usb_phy_io_ops { int (*write)(struct usb_phy *x, u32 val, u32 reg); }; -struct usb_charger_current { - unsigned int sdp_min; - unsigned int sdp_max; - unsigned int dcp_min; - unsigned int dcp_max; - unsigned int cdp_min; - unsigned int cdp_max; - unsigned int aca_min; - unsigned int aca_max; -}; - struct usb_phy { struct device *dev; const char *label; @@ -99,19 +85,6 @@ struct usb_phy { struct usb_phy_io_ops *io_ops; void __iomem *io_priv; - /* to support extcon device */ - struct extcon_dev *edev; - struct extcon_dev *id_edev; - struct notifier_block vbus_nb; - struct notifier_block id_nb; - struct notifier_block type_nb; - - /* Support USB charger */ - enum usb_charger_type chg_type; - enum usb_charger_state chg_state; - struct usb_charger_current chg_cur; - struct work_struct chg_work; - /* for notification of usb_phy_events */ struct atomic_notifier_head notifier; @@ -149,12 +122,22 @@ struct usb_phy { enum usb_device_speed speed); int (*notify_disconnect)(struct usb_phy *x, enum usb_device_speed speed); +}; - /* - * Charger detection method can be implemented if you need to - * manually detect the charger type. - */ - enum usb_charger_type (*charger_detect)(struct usb_phy *x); +/** + * struct usb_phy_bind - represent the binding for the phy + * @dev_name: the device name of the device that will bind to the phy + * @phy_dev_name: the device name of the phy + * @index: used if a single controller uses multiple phys + * @phy: reference to the phy + * @list: to maintain a linked list of the binding information + */ +struct usb_phy_bind { + const char *dev_name; + const char *phy_dev_name; + u8 index; + struct usb_phy *phy; + struct list_head list; }; /* for board-specific init logic */ @@ -218,19 +201,17 @@ usb_phy_vbus_off(struct usb_phy *x) extern struct usb_phy *usb_get_phy(enum usb_phy_type type); extern struct usb_phy *devm_usb_get_phy(struct device *dev, enum usb_phy_type type); +extern struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index); +extern struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index); extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev, const char *phandle, u8 index); extern struct usb_phy *devm_usb_get_phy_by_node(struct device *dev, struct device_node *node, struct notifier_block *nb); extern void usb_put_phy(struct usb_phy *); extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); +extern int usb_bind_phy(const char *dev_name, u8 index, + const char *phy_dev_name); extern void usb_phy_set_event(struct usb_phy *x, unsigned long event); -extern void usb_phy_set_charger_current(struct usb_phy *usb_phy, - unsigned int mA); -extern void usb_phy_get_charger_current(struct usb_phy *usb_phy, - unsigned int *min, unsigned int *max); -extern void usb_phy_set_charger_state(struct usb_phy *usb_phy, - enum usb_charger_state state); #else static inline struct usb_phy *usb_get_phy(enum usb_phy_type type) { @@ -243,6 +224,16 @@ static inline struct usb_phy *devm_usb_get_phy(struct device *dev, return ERR_PTR(-ENXIO); } +static inline struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index) +{ + return ERR_PTR(-ENXIO); +} + +static inline struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index) +{ + return ERR_PTR(-ENXIO); +} + static inline struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev, const char *phandle, u8 index) { @@ -263,36 +254,21 @@ static inline void devm_usb_put_phy(struct device *dev, struct usb_phy *x) { } +static inline int usb_bind_phy(const char *dev_name, u8 index, + const char *phy_dev_name) +{ + return -EOPNOTSUPP; +} + static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event) { } - -static inline void usb_phy_set_charger_current(struct usb_phy *usb_phy, - unsigned int mA) -{ -} - -static inline void usb_phy_get_charger_current(struct usb_phy *usb_phy, - unsigned int *min, - unsigned int *max) -{ -} - -static inline void usb_phy_set_charger_state(struct usb_phy *usb_phy, - enum usb_charger_state state) -{ -} #endif static inline int usb_phy_set_power(struct usb_phy *x, unsigned mA) { - if (!x) - return 0; - - usb_phy_set_charger_current(x, mA); - - if (x->set_power) + if (x && x->set_power) return x->set_power(x, mA); return 0; } diff --git a/include/linux/usb/phy_companion.h b/include/linux/usb/phy_companion.h index 263196f050..edd2ec23d2 100644 --- a/include/linux/usb/phy_companion.h +++ b/include/linux/usb/phy_companion.h @@ -1,8 +1,7 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * phy-companion.h -- phy companion to indicate the comparator part of PHY * - * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index eeb7c2157c..de2a722fe3 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * This file holds the definitions of quirks found in USB devices. * Only quirks that affect the whole device, not an interface, @@ -32,7 +31,7 @@ #define USB_QUIRK_DELAY_INIT BIT(6) /* - * For high speed and super speed interrupt endpoints, the USB 2.0 and + * For high speed and super speed interupt endpoints, the USB 2.0 and * USB 3.0 spec require the interval in microframes * (1 microframe = 125 microseconds) to be calculated as * interval = 2 ^ (bInterval-1). @@ -57,19 +56,4 @@ */ #define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11) -/* - * Device needs to be disconnected before suspend to prevent spurious - * wakeup. - */ -#define USB_QUIRK_DISCONNECT_SUSPEND BIT(12) - -/* Device needs a pause after every control message. */ -#define USB_QUIRK_DELAY_CTRL_MSG BIT(13) - -/* Hub needs extra delay after resetting its port. */ -#define USB_QUIRK_HUB_SLOW_RESET BIT(14) - -/* device has endpoints that should be ignored */ -#define USB_QUIRK_ENDPOINT_IGNORE BIT(15) - #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h index c0753d026b..55805f9dcf 100644 --- a/include/linux/usb/r8a66597.h +++ b/include/linux/usb/r8a66597.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * R8A66597 driver platform data * diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index d418c55523..ed482765fd 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -1,9 +1,7 @@ -// SPDX-License-Identifier: GPL-1.0+ /* * Renesas USB * * Copyright (C) 2011 Renesas Solutions Corp. - * Copyright (C) 2019 Renesas Electronics Corporation * Kuninori Morimoto * * This program is distributed in the hope that it will be useful, @@ -18,7 +16,6 @@ */ #ifndef RENESAS_USB_H #define RENESAS_USB_H -#include #include #include @@ -33,6 +30,17 @@ enum { USBHS_MAX, }; +/* + * callback functions table for driver + * + * These functions are called from platform for driver. + * Callback function's pointer will be set before + * renesas_usbhs_platform_callback :: hardware_init was called + */ +struct renesas_usbhs_driver_callback { + int (*notify_hotplug)(struct platform_device *pdev); +} __no_const; + /* * callback functions for platform * @@ -89,13 +97,6 @@ struct renesas_usbhs_platform_callback { * VBUS control is needed for Host */ int (*set_vbus)(struct platform_device *pdev, int enable); - - /* - * option: - * extcon notifier to set host/peripheral mode. - */ - int (*notifier)(struct notifier_block *nb, unsigned long event, - void *data); }; /* @@ -170,18 +171,21 @@ struct renesas_usbhs_driver_param { */ int pio_dma_border; /* default is 64byte */ + uintptr_t type; + u32 enable_gpio; + /* * option: */ + u32 has_otg:1; /* for controlling PWEN/EXTLP */ + u32 has_sudmac:1; /* for SUDMAC */ u32 has_usb_dmac:1; /* for USB-DMAC */ - u32 runtime_pwctrl:1; - u32 has_cnen:1; - u32 cfifo_byte_addr:1; /* CFIFO is byte addressable */ #define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */ - u32 multi_clks:1; - u32 has_new_pipe_configs:1; }; +#define USBHS_TYPE_RCAR_GEN2 1 +#define USBHS_TYPE_RCAR_GEN3 2 + /* * option: * @@ -196,6 +200,12 @@ struct renesas_usbhs_platform_info { */ struct renesas_usbhs_platform_callback platform_callback; + /* + * driver set these callback functions pointer. + * platform can use it on callback functions + */ + struct renesas_usbhs_driver_callback driver_callback; + /* * option: * @@ -209,4 +219,12 @@ struct renesas_usbhs_platform_info { */ #define renesas_usbhs_get_info(pdev)\ ((struct renesas_usbhs_platform_info *)(pdev)->dev.platform_data) + +#define renesas_usbhs_call_notify_hotplug(pdev) \ + ({ \ + struct renesas_usbhs_driver_callback *dc; \ + dc = &(renesas_usbhs_get_info(pdev)->driver_callback); \ + if (dc && dc->notify_hotplug) \ + dc->notify_hotplug(pdev); \ + }) #endif /* RENESAS_USB_H */ diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h index 809bccd084..d44ef85db1 100644 --- a/include/linux/usb/rndis_host.h +++ b/include/linux/usb/rndis_host.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * Host Side support for RNDIS Networking Links * Copyright (C) 2005 by David Brownell diff --git a/include/linux/usb/samsung_usb_phy.h b/include/linux/usb/samsung_usb_phy.h new file mode 100644 index 0000000000..916782699f --- /dev/null +++ b/include/linux/usb/samsung_usb_phy.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * http://www.samsung.com/ + * + * Defines phy types for samsung usb phy controllers - HOST or DEIVCE. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +enum samsung_usb_phy_type { + USB_PHY_TYPE_DEVICE, + USB_PHY_TYPE_HOST, +}; diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 16ea5a4cc5..704a1ab824 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * USB Serial Converter stuff * @@ -17,14 +16,17 @@ #include #include #include +#include #include /* The maximum number of ports one device can grab at once */ -#define MAX_NUM_PORTS 16 +#define MAX_NUM_PORTS 8 + +/* parity check flag */ +#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) /* USB serial flags */ #define USB_SERIAL_WRITE_BUSY 0 -#define USB_SERIAL_THROTTLED 1 /** * usb_serial_port: structure for the specific ports of a device. @@ -62,7 +64,10 @@ * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this * port. * @flags: usb serial port flags + * @write_wait: a wait_queue_head_t used by the port. * @work: work queue entry for the line discipline waking up. + * @throttled: nonzero if the read urb is inactive to throttle the device + * @throttle_req: nonzero if the tty wants to throttle us * @dev: pointer to the serial device * * This structure is used by the usb-serial core and drivers for the specific @@ -107,7 +112,10 @@ struct usb_serial_port { int tx_bytes; unsigned long flags; + wait_queue_head_t write_wait; struct work_struct work; + char throttled; + char throttle_req; unsigned long sysrq; /* sysrq timeout */ struct device dev; }; @@ -130,8 +138,6 @@ static inline void usb_set_serial_port_data(struct usb_serial_port *port, * @dev: pointer to the struct usb_device for this device * @type: pointer to the struct usb_serial_driver for this device * @interface: pointer to the struct usb_interface for this device - * @sibling: pointer to the struct usb_interface of any sibling interface - * @suspend_count: number of suspended (sibling) interfaces * @num_ports: the number of ports this device has * @num_interrupt_in: number of interrupt in endpoints we have * @num_interrupt_out: number of interrupt out endpoints we have @@ -147,17 +153,16 @@ struct usb_serial { struct usb_device *dev; struct usb_serial_driver *type; struct usb_interface *interface; - struct usb_interface *sibling; - unsigned int suspend_count; unsigned char disconnected:1; + unsigned char suspending:1; unsigned char attached:1; unsigned char minors_reserved:1; unsigned char num_ports; unsigned char num_port_pointers; - unsigned char num_interrupt_in; - unsigned char num_interrupt_out; - unsigned char num_bulk_in; - unsigned char num_bulk_out; + char num_interrupt_in; + char num_interrupt_out; + char num_bulk_in; + char num_bulk_out; struct usb_serial_port *port[MAX_NUM_PORTS]; struct kref kref; struct mutex disc_mutex; @@ -176,17 +181,6 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data) serial->private = data; } -struct usb_serial_endpoints { - unsigned char num_bulk_in; - unsigned char num_bulk_out; - unsigned char num_interrupt_in; - unsigned char num_interrupt_out; - struct usb_endpoint_descriptor *bulk_in[MAX_NUM_PORTS]; - struct usb_endpoint_descriptor *bulk_out[MAX_NUM_PORTS]; - struct usb_endpoint_descriptor *interrupt_in[MAX_NUM_PORTS]; - struct usb_endpoint_descriptor *interrupt_out[MAX_NUM_PORTS]; -}; - /** * usb_serial_driver - describes a usb serial driver * @description: pointer to a string that describes this driver. This string @@ -194,17 +188,12 @@ struct usb_serial_endpoints { * @id_table: pointer to a list of usb_device_id structures that define all * of the devices this structure can support. * @num_ports: the number of different ports this device will have. - * @num_bulk_in: minimum number of bulk-in endpoints - * @num_bulk_out: minimum number of bulk-out endpoints - * @num_interrupt_in: minimum number of interrupt-in endpoints - * @num_interrupt_out: minimum number of interrupt-out endpoints * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer * (0 = end-point size) * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size) * @calc_num_ports: pointer to a function to determine how many ports this - * device has dynamically. It can also be used to verify the number of - * endpoints or to modify the port-endpoint mapping. It will be called - * after the probe() callback is called, but before attach(). + * device has dynamically. It will be called after the probe() + * callback is called, but before attach() * @probe: pointer to the driver's probe function. * This will be called when the device is inserted into the system, * but before the device has been fully initialized by the usb_serial @@ -213,7 +202,7 @@ struct usb_serial_endpoints { * Return 0 to continue on with the initialization sequence. Anything * else will abort it. * @attach: pointer to the driver's attach function. - * This will be called when the struct usb_serial structure is fully + * This will be called when the struct usb_serial structure is fully set * set up. Do any local initialization of the device, or any private * memory structure allocation at this point in time. * @disconnect: pointer to the driver's disconnect function. This will be @@ -238,32 +227,25 @@ struct usb_serial_endpoints { struct usb_serial_driver { const char *description; const struct usb_device_id *id_table; + char num_ports; struct list_head driver_list; struct device_driver driver; struct usb_driver *usb_driver; struct usb_dynids dynids; - unsigned char num_ports; - - unsigned char num_bulk_in; - unsigned char num_bulk_out; - unsigned char num_interrupt_in; - unsigned char num_interrupt_out; - size_t bulk_in_size; size_t bulk_out_size; int (*probe)(struct usb_serial *serial, const struct usb_device_id *id); int (*attach)(struct usb_serial *serial); - int (*calc_num_ports)(struct usb_serial *serial, - struct usb_serial_endpoints *epds); + int (*calc_num_ports) (struct usb_serial *serial); void (*disconnect)(struct usb_serial *serial); void (*release)(struct usb_serial *serial); int (*port_probe)(struct usb_serial_port *port); - void (*port_remove)(struct usb_serial_port *port); + int (*port_remove)(struct usb_serial_port *port); int (*suspend)(struct usb_serial *serial, pm_message_t message); int (*resume)(struct usb_serial *serial); @@ -276,15 +258,13 @@ struct usb_serial_driver { int (*write)(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); /* Called only by the tty layer */ - unsigned int (*write_room)(struct tty_struct *tty); + int (*write_room)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); - void (*get_serial)(struct tty_struct *tty, struct serial_struct *ss); - int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss); void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); void (*break_ctl)(struct tty_struct *tty, int break_state); - unsigned int (*chars_in_buffer)(struct tty_struct *tty); + int (*chars_in_buffer)(struct tty_struct *tty); void (*wait_until_sent)(struct tty_struct *tty, long timeout); bool (*tx_empty)(struct usb_serial_port *port); void (*throttle)(struct tty_struct *tty); @@ -316,19 +296,19 @@ struct usb_serial_driver { #define to_usb_serial_driver(d) \ container_of(d, struct usb_serial_driver, driver) -int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[], +extern int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[], const char *name, const struct usb_device_id *id_table); -void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]); -void usb_serial_port_softint(struct usb_serial_port *port); +extern void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]); +extern void usb_serial_port_softint(struct usb_serial_port *port); -int usb_serial_suspend(struct usb_interface *intf, pm_message_t message); -int usb_serial_resume(struct usb_interface *intf); +extern int usb_serial_suspend(struct usb_interface *intf, pm_message_t message); +extern int usb_serial_resume(struct usb_interface *intf); /* USB Serial console functions */ #ifdef CONFIG_USB_SERIAL_CONSOLE -void usb_serial_console_init(int minor); -void usb_serial_console_exit(void); -void usb_serial_console_disconnect(struct usb_serial *serial); +extern void usb_serial_console_init(int minor); +extern void usb_serial_console_exit(void); +extern void usb_serial_console_disconnect(struct usb_serial *serial); #else static inline void usb_serial_console_init(int minor) { } static inline void usb_serial_console_exit(void) { } @@ -336,53 +316,47 @@ static inline void usb_serial_console_disconnect(struct usb_serial *serial) {} #endif /* Functions needed by other parts of the usbserial core */ -struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor); -void usb_serial_put(struct usb_serial *serial); - -int usb_serial_claim_interface(struct usb_serial *serial, struct usb_interface *intf); - -int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port); -int usb_serial_generic_write_start(struct usb_serial_port *port, gfp_t mem_flags); -int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port, - const unsigned char *buf, int count); -void usb_serial_generic_close(struct usb_serial_port *port); -int usb_serial_generic_resume(struct usb_serial *serial); -unsigned int usb_serial_generic_write_room(struct tty_struct *tty); -unsigned int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); -void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout); -void usb_serial_generic_read_bulk_callback(struct urb *urb); -void usb_serial_generic_write_bulk_callback(struct urb *urb); -void usb_serial_generic_throttle(struct tty_struct *tty); -void usb_serial_generic_unthrottle(struct tty_struct *tty); -int usb_serial_generic_tiocmiwait(struct tty_struct *tty, unsigned long arg); -int usb_serial_generic_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); -int usb_serial_generic_register(void); -void usb_serial_generic_deregister(void); -int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, gfp_t mem_flags); -void usb_serial_generic_process_read_urb(struct urb *urb); -int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size); - -#if defined(CONFIG_USB_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) -int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch); -int usb_serial_handle_break(struct usb_serial_port *port); -#else -static inline int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) -{ - return 0; -} -static inline int usb_serial_handle_break(struct usb_serial_port *port) -{ - return 0; -} -#endif - -void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, - struct tty_struct *tty, unsigned int status); +extern struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor); +extern void usb_serial_put(struct usb_serial *serial); +extern int usb_serial_generic_open(struct tty_struct *tty, + struct usb_serial_port *port); +extern int usb_serial_generic_write_start(struct usb_serial_port *port, + gfp_t mem_flags); +extern int usb_serial_generic_write(struct tty_struct *tty, + struct usb_serial_port *port, const unsigned char *buf, int count); +extern void usb_serial_generic_close(struct usb_serial_port *port); +extern int usb_serial_generic_resume(struct usb_serial *serial); +extern int usb_serial_generic_write_room(struct tty_struct *tty); +extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); +extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty, + long timeout); +extern void usb_serial_generic_read_bulk_callback(struct urb *urb); +extern void usb_serial_generic_write_bulk_callback(struct urb *urb); +extern void usb_serial_generic_throttle(struct tty_struct *tty); +extern void usb_serial_generic_unthrottle(struct tty_struct *tty); +extern int usb_serial_generic_tiocmiwait(struct tty_struct *tty, + unsigned long arg); +extern int usb_serial_generic_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount); +extern int usb_serial_generic_register(void); +extern void usb_serial_generic_deregister(void); +extern int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, + gfp_t mem_flags); +extern void usb_serial_generic_process_read_urb(struct urb *urb); +extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, + void *dest, size_t size); +extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port, + unsigned int ch); +extern int usb_serial_handle_break(struct usb_serial_port *port); +extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, + struct tty_struct *tty, + unsigned int status); -int usb_serial_bus_register(struct usb_serial_driver *device); -void usb_serial_bus_deregister(struct usb_serial_driver *device); +extern int usb_serial_bus_register(struct usb_serial_driver *device); +extern void usb_serial_bus_deregister(struct usb_serial_driver *device); +extern struct usb_serial_driver usb_serial_generic_device; extern struct bus_type usb_serial_bus_type; extern struct tty_driver *usb_serial_tty_driver; @@ -395,7 +369,7 @@ static inline void usb_serial_debug_data(struct device *dev, } /* - * Macro for reporting errors in write path to avoid infinite loop + * Macro for reporting errors in write path to avoid inifinite loop * when port is used as a console. */ #define dev_err_console(usport, fmt, ...) \ diff --git a/include/linux/usb/sl811.h b/include/linux/usb/sl811.h index 6c97f8e664..3afe4d16fc 100644 --- a/include/linux/usb/sl811.h +++ b/include/linux/usb/sl811.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * board initialization should put one of these into dev->platform_data * and place the sl811hs onto platform_bus named "sl811-hcd". diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h index e0240f8645..305ee8db7f 100644 --- a/include/linux/usb/storage.h +++ b/include/linux/usb/storage.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 #ifndef __LINUX_USB_STORAGE_H #define __LINUX_USB_STORAGE_H diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h index fd1c9f6a4e..1de16c324e 100644 --- a/include/linux/usb/tegra_usb_phy.h +++ b/include/linux/usb/tegra_usb_phy.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2010 Google, Inc. * @@ -17,8 +16,6 @@ #define __TEGRA_USB_PHY_H #include -#include -#include #include /* @@ -77,11 +74,7 @@ struct tegra_usb_phy { struct usb_phy u_phy; bool is_legacy_phy; bool is_ulpi_phy; - struct gpio_desc *reset_gpio; - struct reset_control *pad_rst; - bool wakeup_enabled; - bool pad_wakeup; - bool powered_on; + int reset_gpio; }; void tegra_usb_phy_preresume(struct usb_phy *phy); diff --git a/include/linux/usb/tilegx.h b/include/linux/usb/tilegx.h new file mode 100644 index 0000000000..2d65e34356 --- /dev/null +++ b/include/linux/usb/tilegx.h @@ -0,0 +1,34 @@ +/* + * Copyright 2012 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * Structure to contain platform-specific data related to Tile-Gx USB + * controllers. + */ + +#ifndef _LINUX_USB_TILEGX_H +#define _LINUX_USB_TILEGX_H + +#include + +struct tilegx_usb_platform_data { + /* GXIO device index. */ + int dev_index; + + /* GXIO device context. */ + gxio_usb_host_context_t usb_ctx; + + /* Device IRQ. */ + unsigned int irq; +}; + +#endif /* _LINUX_USB_TILEGX_H */ diff --git a/include/linux/usb/uas.h b/include/linux/usb/uas.h index aa3ad39d39..3fc8e8b9f0 100644 --- a/include/linux/usb/uas.h +++ b/include/linux/usb/uas.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __USB_UAS_H__ #define __USB_UAS_H__ diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h index 36c2982780..5f07407a36 100644 --- a/include/linux/usb/ulpi.h +++ b/include/linux/usb/ulpi.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0 /* * ulpi.h -- ULPI defines and function prorotypes * @@ -55,23 +54,12 @@ #if IS_ENABLED(CONFIG_USB_ULPI) struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, unsigned int flags); - -struct usb_phy *devm_otg_ulpi_create(struct device *dev, - struct usb_phy_io_ops *ops, - unsigned int flags); #else static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, unsigned int flags) { return NULL; } - -static inline struct usb_phy *devm_otg_ulpi_create(struct device *dev, - struct usb_phy_io_ops *ops, - unsigned int flags) -{ - return NULL; -} #endif #ifdef CONFIG_USB_ULPI_VIEWPORT diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h index 20020c1336..11525d8d89 100644 --- a/include/linux/usb/usb338x.h +++ b/include/linux/usb/usb338x.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * USB 338x super/high/full speed USB device controller. * Unlike many such controllers, this one talks PCI. @@ -113,10 +112,7 @@ struct usb338x_ll_regs { u32 ll_ltssm_ctrl1; u32 ll_ltssm_ctrl2; u32 ll_ltssm_ctrl3; - u32 unused1; - - /* 0x710 */ - u32 unused2; + u32 unused[2]; u32 ll_general_ctrl0; u32 ll_general_ctrl1; #define PM_U3_AUTO_EXIT 29 @@ -139,41 +135,29 @@ struct usb338x_ll_regs { u32 ll_general_ctrl2; #define SELECT_INVERT_LANE_POLARITY 7 #define FORCE_INVERT_LANE_POLARITY 6 - - /* 0x720 */ u32 ll_general_ctrl3; u32 ll_general_ctrl4; u32 ll_error_gen; - u32 unused3; +} __packed; - /* 0x730 */ - u32 unused4[4]; - - /* 0x740 */ - u32 unused5[2]; +struct usb338x_ll_lfps_regs { + /* offset 0x748 */ u32 ll_lfps_5; #define TIMER_LFPS_6US 16 u32 ll_lfps_6; #define TIMER_LFPS_80US 0 +} __packed; - /* 0x750 */ - u32 unused6[8]; - - /* 0x770 */ - u32 unused7[3]; +struct usb338x_ll_tsn_regs { + /* offset 0x77C */ u32 ll_tsn_counters_2; #define HOT_TX_NORESET_TS2 24 - - /* 0x780 */ u32 ll_tsn_counters_3; #define HOT_RX_RESET_TS2 0 - u32 unused8[3]; +} __packed; - /* 0x790 */ - u32 unused9; - u32 ll_lfps_timers_2; -#define LFPS_TIMERS_2_WORKAROUND_VALUE 0x084d - u32 unused10; +struct usb338x_ll_chi_regs { + /* offset 0x79C */ u32 ll_tsn_chicken_bit; #define RECOVERY_IDLE_TO_RECOVER_FMW 3 } __packed; diff --git a/include/linux/usb/usb_phy_generic.h b/include/linux/usb/usb_phy_generic.h index cd9e70a552..c13632d529 100644 --- a/include/linux/usb/usb_phy_generic.h +++ b/include/linux/usb/usb_phy_generic.h @@ -1,8 +1,19 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_USB_NOP_XCEIV_H #define __LINUX_USB_NOP_XCEIV_H #include +#include + +struct usb_phy_generic_platform_data { + enum usb_phy_type type; + unsigned long clk_rate; + + /* if set fails with -EPROBE_DEFER if can't get regulator */ + unsigned int needs_vcc:1; + unsigned int needs_reset:1; /* deprecated */ + int gpio_reset; + struct gpio_desc *gpiod_vbus; +}; #if IS_ENABLED(CONFIG_NOP_USB_XCEIV) /* sometimes transceivers are accessed only through e.g. ULPI */ diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 8336e86ce6..6e0ce8c7b8 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /* * USB Networking Link Interface * @@ -28,7 +27,7 @@ struct usbnet { /* housekeeping */ struct usb_device *udev; struct usb_interface *intf; - const struct driver_info *driver_info; + struct driver_info *driver_info; const char *driver_name; void *driver_priv; wait_queue_head_t wait; @@ -53,9 +52,6 @@ struct usbnet { u32 hard_mtu; /* count any extra framing */ size_t rx_urb_size; /* size for rx urbs */ struct mii_if_info mii; - long rx_speed; /* If MII not used */ - long tx_speed; /* If MII not used */ -# define SPEED_UNSET -1 /* various kinds of pending driver work */ struct sk_buff_head rxq; @@ -83,7 +79,6 @@ struct usbnet { # define EVENT_RX_KILL 10 # define EVENT_LINK_CHANGE 11 # define EVENT_SET_RX_MODE 12 -# define EVENT_NO_IP_ALIGN 13 }; static inline struct usb_driver *driver_of(struct usb_interface *intf) @@ -208,9 +203,7 @@ struct cdc_state { struct usb_interface *data; }; -extern void usbnet_cdc_update_filter(struct usbnet *dev); extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); -extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf); extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *); extern void usbnet_cdc_status(struct usbnet *, struct urb *); @@ -255,7 +248,7 @@ extern int usbnet_open(struct net_device *net); extern int usbnet_stop(struct net_device *net); extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net); -extern void usbnet_tx_timeout(struct net_device *net, unsigned int txqueue); +extern void usbnet_tx_timeout(struct net_device *net); extern int usbnet_change_mtu(struct net_device *net, int new_mtu); extern int usbnet_get_endpoints(struct usbnet *, struct usb_interface *); @@ -268,16 +261,13 @@ extern void usbnet_pause_rx(struct usbnet *); extern void usbnet_resume_rx(struct usbnet *); extern void usbnet_purge_paused_rxq(struct usbnet *); -extern int usbnet_get_link_ksettings_mii(struct net_device *net, - struct ethtool_link_ksettings *cmd); -extern int usbnet_set_link_ksettings_mii(struct net_device *net, - const struct ethtool_link_ksettings *cmd); -extern int usbnet_get_link_ksettings_internal(struct net_device *net, - struct ethtool_link_ksettings *cmd); +extern int usbnet_get_settings(struct net_device *net, + struct ethtool_cmd *cmd); +extern int usbnet_set_settings(struct net_device *net, + struct ethtool_cmd *cmd); extern u32 usbnet_get_link(struct net_device *net); extern u32 usbnet_get_msglevel(struct net_device *); extern void usbnet_set_msglevel(struct net_device *, u32); -extern void usbnet_set_rx_mode(struct net_device *net); extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); extern int usbnet_nway_reset(struct net_device *net); diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h new file mode 100644 index 0000000000..c125713076 --- /dev/null +++ b/include/linux/usb/wusb-wa.h @@ -0,0 +1,303 @@ +/* + * Wireless USB Wire Adapter constants and structures. + * + * Copyright (C) 2005-2006 Intel Corporation. + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * FIXME: organize properly, group logically + * + * All the event structures are defined in uwb/spec.h, as they are + * common to the WHCI and WUSB radio control interfaces. + * + * References: + * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8 + */ +#ifndef __LINUX_USB_WUSB_WA_H +#define __LINUX_USB_WUSB_WA_H + +/** + * Radio Command Request for the Radio Control Interface + * + * Radio Control Interface command and event codes are the same as + * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_* + */ +enum { + WA_EXEC_RC_CMD = 40, /* Radio Control command Request */ +}; + +/* Wireless Adapter Requests ([WUSB] table 8-51) */ +enum { + WUSB_REQ_ADD_MMC_IE = 20, + WUSB_REQ_REMOVE_MMC_IE = 21, + WUSB_REQ_SET_NUM_DNTS = 22, + WUSB_REQ_SET_CLUSTER_ID = 23, + WUSB_REQ_SET_DEV_INFO = 24, + WUSB_REQ_GET_TIME = 25, + WUSB_REQ_SET_STREAM_IDX = 26, + WUSB_REQ_SET_WUSB_MAS = 27, + WUSB_REQ_CHAN_STOP = 28, +}; + + +/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */ +enum { + WUSB_TIME_ADJ = 0, + WUSB_TIME_BPST = 1, + WUSB_TIME_WUSB = 2, +}; + +enum { + WA_ENABLE = 0x01, + WA_RESET = 0x02, + RPIPE_PAUSE = 0x1, + RPIPE_STALL = 0x2, +}; + +/* Responses from Get Status request ([WUSB] section 8.3.1.6) */ +enum { + WA_STATUS_ENABLED = 0x01, + WA_STATUS_RESETTING = 0x02 +}; + +enum rpipe_crs { + RPIPE_CRS_CTL = 0x01, + RPIPE_CRS_ISO = 0x02, + RPIPE_CRS_BULK = 0x04, + RPIPE_CRS_INTR = 0x08 +}; + +/** + * RPipe descriptor ([WUSB] section 8.5.2.11) + * + * FIXME: explain rpipes + */ +struct usb_rpipe_descriptor { + u8 bLength; + u8 bDescriptorType; + __le16 wRPipeIndex; + __le16 wRequests; + __le16 wBlocks; /* rw if 0 */ + __le16 wMaxPacketSize; /* rw */ + union { + u8 dwa_bHSHubAddress; /* rw: DWA. */ + u8 hwa_bMaxBurst; /* rw: HWA. */ + }; + union { + u8 dwa_bHSHubPort; /* rw: DWA. */ + u8 hwa_bDeviceInfoIndex; /* rw: HWA. */ + }; + u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */ + union { + u8 dwa_bDeviceAddress; /* rw: DWA Target device address. */ + u8 hwa_reserved; /* rw: HWA. */ + }; + u8 bEndpointAddress; /* rw: Target EP address */ + u8 bDataSequence; /* ro: Current Data sequence */ + __le32 dwCurrentWindow; /* ro */ + u8 bMaxDataSequence; /* ro?: max supported seq */ + u8 bInterval; /* rw: */ + u8 bOverTheAirInterval; /* rw: */ + u8 bmAttribute; /* ro? */ + u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */ + u8 bmRetryOptions; /* rw? */ + __le16 wNumTransactionErrors; /* rw */ +} __attribute__ ((packed)); + +/** + * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4) + * + * These are the notifications coming on the notification endpoint of + * an HWA and a DWA. + */ +enum wa_notif_type { + DWA_NOTIF_RWAKE = 0x91, + DWA_NOTIF_PORTSTATUS = 0x92, + WA_NOTIF_TRANSFER = 0x93, + HWA_NOTIF_BPST_ADJ = 0x94, + HWA_NOTIF_DN = 0x95, +}; + +/** + * Wire Adapter notification header + * + * Notifications coming from a wire adapter use a common header + * defined in [WUSB] sections 8.4.5 & 8.5.4. + */ +struct wa_notif_hdr { + u8 bLength; + u8 bNotifyType; /* enum wa_notif_type */ +} __packed; + +/** + * HWA DN Received notification [(WUSB] section 8.5.4.2) + * + * The DNData is specified in WUSB1.0[7.6]. For each device + * notification we received, we just need to dispatch it. + * + * @dndata: this is really an array of notifications, but all start + * with the same header. + */ +struct hwa_notif_dn { + struct wa_notif_hdr hdr; + u8 bSourceDeviceAddr; /* from errata 2005/07 */ + u8 bmAttributes; + struct wusb_dn_hdr dndata[]; +} __packed; + +/* [WUSB] section 8.3.3 */ +enum wa_xfer_type { + WA_XFER_TYPE_CTL = 0x80, + WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */ + WA_XFER_TYPE_ISO = 0x82, + WA_XFER_RESULT = 0x83, + WA_XFER_ABORT = 0x84, + WA_XFER_ISO_PACKET_INFO = 0xA0, + WA_XFER_ISO_PACKET_STATUS = 0xA1, +}; + +/* [WUSB] section 8.3.3 */ +struct wa_xfer_hdr { + u8 bLength; /* 0x18 */ + u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */ + __le16 wRPipe; /* RPipe index */ + __le32 dwTransferID; /* Host-assigned ID */ + __le32 dwTransferLength; /* Length of data to xfer */ + u8 bTransferSegment; +} __packed; + +struct wa_xfer_ctl { + struct wa_xfer_hdr hdr; + u8 bmAttribute; + __le16 wReserved; + struct usb_ctrlrequest baSetupData; +} __packed; + +struct wa_xfer_bi { + struct wa_xfer_hdr hdr; + u8 bReserved; + __le16 wReserved; +} __packed; + +/* [WUSB] section 8.5.5 */ +struct wa_xfer_hwaiso { + struct wa_xfer_hdr hdr; + u8 bReserved; + __le16 wPresentationTime; + __le32 dwNumOfPackets; +} __packed; + +struct wa_xfer_packet_info_hwaiso { + __le16 wLength; + u8 bPacketType; + u8 bReserved; + __le16 PacketLength[0]; +} __packed; + +struct wa_xfer_packet_status_len_hwaiso { + __le16 PacketLength; + __le16 PacketStatus; +} __packed; + +struct wa_xfer_packet_status_hwaiso { + __le16 wLength; + u8 bPacketType; + u8 bReserved; + struct wa_xfer_packet_status_len_hwaiso PacketStatus[0]; +} __packed; + +/* [WUSB] section 8.3.3.5 */ +struct wa_xfer_abort { + u8 bLength; + u8 bRequestType; + __le16 wRPipe; /* RPipe index */ + __le32 dwTransferID; /* Host-assigned ID */ +} __packed; + +/** + * WA Transfer Complete notification ([WUSB] section 8.3.3.3) + * + */ +struct wa_notif_xfer { + struct wa_notif_hdr hdr; + u8 bEndpoint; + u8 Reserved; +} __packed; + +/** Transfer result basic codes [WUSB] table 8-15 */ +enum { + WA_XFER_STATUS_SUCCESS, + WA_XFER_STATUS_HALTED, + WA_XFER_STATUS_DATA_BUFFER_ERROR, + WA_XFER_STATUS_BABBLE, + WA_XFER_RESERVED, + WA_XFER_STATUS_NOT_FOUND, + WA_XFER_STATUS_INSUFFICIENT_RESOURCE, + WA_XFER_STATUS_TRANSACTION_ERROR, + WA_XFER_STATUS_ABORTED, + WA_XFER_STATUS_RPIPE_NOT_READY, + WA_XFER_INVALID_FORMAT, + WA_XFER_UNEXPECTED_SEGMENT_NUMBER, + WA_XFER_STATUS_RPIPE_TYPE_MISMATCH, +}; + +/** [WUSB] section 8.3.3.4 */ +struct wa_xfer_result { + struct wa_notif_hdr hdr; + __le32 dwTransferID; + __le32 dwTransferLength; + u8 bTransferSegment; + u8 bTransferStatus; + __le32 dwNumOfPackets; +} __packed; + +/** + * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7). + * + * NOTE: u16 fields are read Little Endian from the hardware. + * + * @bNumPorts is the original max number of devices that the host can + * connect; we might chop this so the stack can handle + * it. In case you need to access it, use wusbhc->ports_max + * if it is a Wireless USB WA. + */ +struct usb_wa_descriptor { + u8 bLength; + u8 bDescriptorType; + __le16 bcdWAVersion; + u8 bNumPorts; /* don't use!! */ + u8 bmAttributes; /* Reserved == 0 */ + __le16 wNumRPipes; + __le16 wRPipeMaxBlock; + u8 bRPipeBlockSize; + u8 bPwrOn2PwrGood; + u8 bNumMMCIEs; + u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */ +} __packed; + +/** + * HWA Device Information Buffer (WUSB1.0[T8.54]) + */ +struct hwa_dev_info { + u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */ + u8 bDeviceAddress; + __le16 wPHYRates; + u8 bmDeviceAttribute; +} __packed; + +#endif /* #ifndef __LINUX_USB_WUSB_WA_H */ diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h new file mode 100644 index 0000000000..eeb28329fa --- /dev/null +++ b/include/linux/usb/wusb.h @@ -0,0 +1,377 @@ +/* + * Wireless USB Standard Definitions + * Event Size Tables + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * FIXME: organize properly, group logically + * + * All the event structures are defined in uwb/spec.h, as they are + * common to the WHCI and WUSB radio control interfaces. + */ + +#ifndef __WUSB_H__ +#define __WUSB_H__ + +#include +#include +#include +#include +#include + +/** + * WUSB Information Element header + * + * I don't know why, they decided to make it different to the MBOA MAC + * IE Header; beats me. + */ +struct wuie_hdr { + u8 bLength; + u8 bIEIdentifier; +} __attribute__((packed)); + +enum { + WUIE_ID_WCTA = 0x80, + WUIE_ID_CONNECTACK, + WUIE_ID_HOST_INFO, + WUIE_ID_CHANGE_ANNOUNCE, + WUIE_ID_DEVICE_DISCONNECT, + WUIE_ID_HOST_DISCONNECT, + WUIE_ID_KEEP_ALIVE = 0x89, + WUIE_ID_ISOCH_DISCARD, + WUIE_ID_RESET_DEVICE, +}; + +/** + * Maximum number of array elements in a WUSB IE. + * + * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that + * are "arrays" have to limited to 4 elements. So we define it + * like that to ease up and submit only the neeed size. + */ +#define WUIE_ELT_MAX 4 + +/** + * Wrapper for the data that defines a CHID, a CDID or a CK + * + * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of + * data. In order to avoid confusion and enforce types, we wrap it. + * + * Make it packed, as we use it in some hw definitions. + */ +struct wusb_ckhdid { + u8 data[16]; +} __attribute__((packed)); + +static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; + +#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) + +/** + * WUSB IE: Host Information (WUSB1.0[7.5.2]) + * + * Used to provide information about the host to the Wireless USB + * devices in range (CHID can be used as an ASCII string). + */ +struct wuie_host_info { + struct wuie_hdr hdr; + __le16 attributes; + struct wusb_ckhdid CHID; +} __attribute__((packed)); + +/** + * WUSB IE: Connect Ack (WUSB1.0[7.5.1]) + * + * Used to acknowledge device connect requests. See note for + * WUIE_ELT_MAX. + */ +struct wuie_connect_ack { + struct wuie_hdr hdr; + struct { + struct wusb_ckhdid CDID; + u8 bDeviceAddress; /* 0 means unused */ + u8 bReserved; + } blk[WUIE_ELT_MAX]; +} __attribute__((packed)); + +/** + * WUSB IE Host Information Element, Connect Availability + * + * WUSB1.0[7.5.2], bmAttributes description + */ +enum { + WUIE_HI_CAP_RECONNECT = 0, + WUIE_HI_CAP_LIMITED, + WUIE_HI_CAP_RESERVED, + WUIE_HI_CAP_ALL, +}; + +/** + * WUSB IE: Channel Stop (WUSB1.0[7.5.8]) + * + * Tells devices the host is going to stop sending MMCs and will disappear. + */ +struct wuie_channel_stop { + struct wuie_hdr hdr; + u8 attributes; + u8 timestamp[3]; +} __attribute__((packed)); + +/** + * WUSB IE: Keepalive (WUSB1.0[7.5.9]) + * + * Ask device(s) to send keepalives. + */ +struct wuie_keep_alive { + struct wuie_hdr hdr; + u8 bDeviceAddress[WUIE_ELT_MAX]; +} __attribute__((packed)); + +/** + * WUSB IE: Reset device (WUSB1.0[7.5.11]) + * + * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only + * use it for one at the time... + * + * In any case, this request is a wee bit silly: why don't they target + * by address?? + */ +struct wuie_reset { + struct wuie_hdr hdr; + struct wusb_ckhdid CDID; +} __attribute__((packed)); + +/** + * WUSB IE: Disconnect device (WUSB1.0[7.5.11]) + * + * Tell device to disconnect; we can fit 4 addresses, but we only use + * it for one at the time... + */ +struct wuie_disconnect { + struct wuie_hdr hdr; + u8 bDeviceAddress; + u8 padding; +} __attribute__((packed)); + +/** + * WUSB IE: Host disconnect ([WUSB] section 7.5.5) + * + * Tells all connected devices to disconnect. + */ +struct wuie_host_disconnect { + struct wuie_hdr hdr; +} __attribute__((packed)); + +/** + * WUSB Device Notification header (WUSB1.0[7.6]) + */ +struct wusb_dn_hdr { + u8 bType; + u8 notifdata[]; +} __attribute__((packed)); + +/** Device Notification codes (WUSB1.0[Table 7-54]) */ +enum WUSB_DN { + WUSB_DN_CONNECT = 0x01, + WUSB_DN_DISCONNECT = 0x02, + WUSB_DN_EPRDY = 0x03, + WUSB_DN_MASAVAILCHANGED = 0x04, + WUSB_DN_RWAKE = 0x05, + WUSB_DN_SLEEP = 0x06, + WUSB_DN_ALIVE = 0x07, +}; + +/** WUSB Device Notification Connect */ +struct wusb_dn_connect { + struct wusb_dn_hdr hdr; + __le16 attributes; + struct wusb_ckhdid CDID; +} __attribute__((packed)); + +static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn) +{ + return le16_to_cpu(dn->attributes) & 0xff; +} + +static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn) +{ + return (le16_to_cpu(dn->attributes) >> 8) & 0x1; +} + +static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn) +{ + return (le16_to_cpu(dn->attributes) >> 9) & 0x03; +} + +/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */ +struct wusb_dn_alive { + struct wusb_dn_hdr hdr; +} __attribute__((packed)); + +/** Device is disconnecting (WUSB1.0[7.6.2]) */ +struct wusb_dn_disconnect { + struct wusb_dn_hdr hdr; +} __attribute__((packed)); + +/* General constants */ +enum { + WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */ +}; + +static inline size_t ckhdid_printf(char *pr_ckhdid, size_t size, + const struct wusb_ckhdid *ckhdid) +{ + return scnprintf(pr_ckhdid, size, + "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx " + "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx", + ckhdid->data[0], ckhdid->data[1], + ckhdid->data[2], ckhdid->data[3], + ckhdid->data[4], ckhdid->data[5], + ckhdid->data[6], ckhdid->data[7], + ckhdid->data[8], ckhdid->data[9], + ckhdid->data[10], ckhdid->data[11], + ckhdid->data[12], ckhdid->data[13], + ckhdid->data[14], ckhdid->data[15]); +} + +/* + * WUSB Crypto stuff (WUSB1.0[6]) + */ + +extern const char *wusb_et_name(u8); + +/** + * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for + * the host or the device. + */ +static inline u8 wusb_key_index(int index, int type, int originator) +{ + return (originator << 6) | (type << 4) | index; +} + +#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */ +#define WUSB_KEY_INDEX_TYPE_ASSOC 1 +#define WUSB_KEY_INDEX_TYPE_GTK 2 +#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 +#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 +/* bits 0-3 used for the key index. */ +#define WUSB_KEY_INDEX_MAX 15 + +/* A CCM Nonce, defined in WUSB1.0[6.4.1] */ +struct aes_ccm_nonce { + u8 sfn[6]; /* Little Endian */ + u8 tkid[3]; /* LE */ + struct uwb_dev_addr dest_addr; + struct uwb_dev_addr src_addr; +} __attribute__((packed)); + +/* A CCM operation label, defined on WUSB1.0[6.5.x] */ +struct aes_ccm_label { + u8 data[14]; +} __attribute__((packed)); + +/* + * Input to the key derivation sequence defined in + * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the + * PRF function. + */ +struct wusb_keydvt_in { + u8 hnonce[16]; + u8 dnonce[16]; +} __attribute__((packed)); + +/* + * Output from the key derivation sequence defined in + * WUSB1.0[6.5.1]. + */ +struct wusb_keydvt_out { + u8 kck[16]; + u8 ptk[16]; +} __attribute__((packed)); + +/* Pseudo Random Function WUSB1.0[6.5] */ +extern int wusb_crypto_init(void); +extern void wusb_crypto_exit(void); +extern ssize_t wusb_prf(void *out, size_t out_size, + const u8 key[16], const struct aes_ccm_nonce *_n, + const struct aes_ccm_label *a, + const void *b, size_t blen, size_t len); + +static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16], + const struct aes_ccm_nonce *n, + const struct aes_ccm_label *a, + const void *b, size_t blen) +{ + return wusb_prf(out, out_size, key, n, a, b, blen, 64); +} + +static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16], + const struct aes_ccm_nonce *n, + const struct aes_ccm_label *a, + const void *b, size_t blen) +{ + return wusb_prf(out, out_size, key, n, a, b, blen, 128); +} + +static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16], + const struct aes_ccm_nonce *n, + const struct aes_ccm_label *a, + const void *b, size_t blen) +{ + return wusb_prf(out, out_size, key, n, a, b, blen, 256); +} + +/* Key derivation WUSB1.0[6.5.1] */ +static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out, + const u8 key[16], + const struct aes_ccm_nonce *n, + const struct wusb_keydvt_in *keydvt_in) +{ + const struct aes_ccm_label a = { .data = "Pair-wise keys" }; + return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a, + keydvt_in, sizeof(*keydvt_in)); +} + +/* + * Out-of-band MIC Generation WUSB1.0[6.5.2] + * + * Compute the MIC over @key, @n and @hs and place it in @mic_out. + * + * @mic_out: Where to place the 8 byte MIC tag + * @key: KCK from the derivation process + * @n: CCM nonce, n->sfn == 0, TKID as established in the + * process. + * @hs: Handshake struct for phase 2 of the 4-way. + * hs->bStatus and hs->bReserved are zero. + * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2] + * hs->dest_addr is the device's USB address padded with 0 + * hs->src_addr is the hosts's UWB device address + * hs->mic is ignored (as we compute that value). + */ +static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16], + const struct aes_ccm_nonce *n, + const struct usb_handshake *hs) +{ + const struct aes_ccm_label a = { .data = "out-of-bandMIC" }; + return wusb_prf_64(mic_out, 8, key, n, &a, + hs, sizeof(*hs) - sizeof(hs->MIC)); +} + +#endif /* #ifndef __WUSB_H__ */ diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index 712363c7a2..0aae1b2ee9 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * Interface to the libusual. * @@ -84,10 +83,6 @@ /* Cannot handle REPORT_LUNS */ \ US_FLAG(ALWAYS_SYNC, 0x20000000) \ /* lies about caching, so always sync */ \ - US_FLAG(NO_SAME, 0x40000000) \ - /* Cannot handle WRITE_SAME */ \ - US_FLAG(SENSE_AFTER_SYNC, 0x80000000) \ - /* Do REQUEST_SENSE after SYNCHRONIZE_CACHE */ \ #define US_FLAG(name, value) US_FL_##name = value , enum { US_DO_ALL_FLAGS }; @@ -96,6 +91,6 @@ enum { US_DO_ALL_FLAGS }; #include extern int usb_usual_ignore_device(struct usb_interface *intf); -extern const struct usb_device_id usb_storage_usb_ids[]; +extern struct usb_device_id usb_storage_usb_ids[]; #endif /* __LINUX_USB_USUAL_H */ diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h index 14ea197ce3..04a2628541 100644 --- a/include/linux/usbdevice_fs.h +++ b/include/linux/usbdevice_fs.h @@ -1,4 +1,3 @@ -// SPDX-License-Identifier: GPL-2.0+ /*****************************************************************************/ /* @@ -69,7 +68,7 @@ struct usbdevfs_urb32 { compat_int_t error_count; compat_uint_t signr; compat_caddr_t usercontext; /* unused */ - struct usbdevfs_iso_packet_desc iso_frame_desc[]; + struct usbdevfs_iso_packet_desc iso_frame_desc[0]; }; struct usbdevfs_ioctl32 { diff --git a/include/linux/user-return-notifier.h b/include/linux/user-return-notifier.h index c07b386a9e..9c4a445bb4 100644 --- a/include/linux/user-return-notifier.h +++ b/include/linux/user-return-notifier.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_USER_RETURN_NOTIFIER_H #define _LINUX_USER_RETURN_NOTIFIER_H diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 33a4240e6a..5d396eaaae 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_USER_NAMESPACE_H #define _LINUX_USER_NAMESPACE_H @@ -6,29 +5,17 @@ #include #include #include -#include -#include -#include #include -#define UID_GID_MAP_MAX_BASE_EXTENTS 5 -#define UID_GID_MAP_MAX_EXTENTS 340 +#define UID_GID_MAP_MAX_EXTENTS 5 -struct uid_gid_extent { - u32 first; - u32 lower_first; - u32 count; -}; - -struct uid_gid_map { /* 64 bytes -- 1 cache line */ +struct uid_gid_map { /* 64 bytes -- 1 cache line */ u32 nr_extents; - union { - struct uid_gid_extent extent[UID_GID_MAP_MAX_BASE_EXTENTS]; - struct { - struct uid_gid_extent *forward; - struct uid_gid_extent *reverse; - }; - }; + struct uid_gid_extent { + u32 first; + u32 lower_first; + u32 count; + } extent[UID_GID_MAP_MAX_EXTENTS]; }; #define USERNS_SETGROUPS_ALLOWED 1UL @@ -45,52 +32,25 @@ enum ucount_type { UCOUNT_NET_NAMESPACES, UCOUNT_MNT_NAMESPACES, UCOUNT_CGROUP_NAMESPACES, - UCOUNT_TIME_NAMESPACES, -#ifdef CONFIG_INOTIFY_USER - UCOUNT_INOTIFY_INSTANCES, - UCOUNT_INOTIFY_WATCHES, -#endif -#ifdef CONFIG_FANOTIFY - UCOUNT_FANOTIFY_GROUPS, - UCOUNT_FANOTIFY_MARKS, -#endif - UCOUNT_RLIMIT_NPROC, - UCOUNT_RLIMIT_MSGQUEUE, - UCOUNT_RLIMIT_SIGPENDING, - UCOUNT_RLIMIT_MEMLOCK, UCOUNT_COUNTS, }; -#define MAX_PER_NAMESPACE_UCOUNTS UCOUNT_RLIMIT_NPROC - struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; + atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; - /* parent_could_setfcap: true if the creator if this ns had CAP_SETFCAP - * in its effective capability set at the child ns creation time. */ - bool parent_could_setfcap; - -#ifdef CONFIG_KEYS - /* List of joinable keyrings in this namespace. Modification access of - * these pointers is controlled by keyring_sem. Once - * user_keyring_register is set, it won't be changed, so it can be - * accessed directly with READ_ONCE(). - */ - struct list_head keyring_name_list; - struct key *user_keyring_register; - struct rw_semaphore keyring_sem; -#endif /* Register of per-UID persistent keyrings for this namespace */ #ifdef CONFIG_PERSISTENT_KEYRINGS struct key *persistent_keyring_register; + struct rw_semaphore persistent_keyring_register_sem; #endif struct work_struct work; #ifdef CONFIG_SYSCTL @@ -98,51 +58,30 @@ struct user_namespace { struct ctl_table_header *sysctls; #endif struct ucounts *ucounts; - long ucount_max[UCOUNT_COUNTS]; + int ucount_max[UCOUNT_COUNTS]; } __randomize_layout; struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; - atomic_t count; - atomic_long_t ucount[UCOUNT_COUNTS]; + int count; + atomic_t ucount[UCOUNT_COUNTS]; }; extern struct user_namespace init_user_ns; -extern struct ucounts init_ucounts; bool setup_userns_sysctls(struct user_namespace *ns); void retire_userns_sysctls(struct user_namespace *ns); struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type); void dec_ucount(struct ucounts *ucounts, enum ucount_type type); -struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid); -struct ucounts * __must_check get_ucounts(struct ucounts *ucounts); -void put_ucounts(struct ucounts *ucounts); - -static inline long get_ucounts_value(struct ucounts *ucounts, enum ucount_type type) -{ - return atomic_long_read(&ucounts->ucount[type]); -} - -long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v); -bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v); -long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type); -void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type); -bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max); - -static inline void set_rlimit_ucount_max(struct user_namespace *ns, - enum ucount_type type, unsigned long max) -{ - ns->ucount_max[type] = max <= LONG_MAX ? max : LONG_MAX; -} #ifdef CONFIG_USER_NS static inline struct user_namespace *get_user_ns(struct user_namespace *ns) { if (ns) - refcount_inc(&ns->ns.count); + atomic_inc(&ns->count); return ns; } @@ -152,7 +91,7 @@ extern void __put_user_ns(struct user_namespace *ns); static inline void put_user_ns(struct user_namespace *ns) { - if (ns && refcount_dec_and_test(&ns->ns.count)) + if (ns && atomic_dec_and_test(&ns->count)) __put_user_ns(ns); } @@ -166,9 +105,8 @@ extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *); extern int proc_setgroups_show(struct seq_file *m, void *v); extern bool userns_may_setgroups(const struct user_namespace *ns); -extern bool in_userns(const struct user_namespace *ancestor, - const struct user_namespace *child); extern bool current_in_userns(const struct user_namespace *target_ns); + struct ns_common *ns_get_owner(struct ns_common *ns); #else @@ -199,12 +137,6 @@ static inline bool userns_may_setgroups(const struct user_namespace *ns) return true; } -static inline bool in_userns(const struct user_namespace *ancestor, - const struct user_namespace *child) -{ - return true; -} - static inline bool current_in_userns(const struct user_namespace *target_ns) { return true; diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 33cea484d1..dd66a952e8 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/userfaultfd_k.h * @@ -14,11 +13,6 @@ #include /* linux/include/uapi/linux/userfaultfd.h */ #include -#include -#include - -/* The set of all possible UFFD-related VM flags. */ -#define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR) /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining @@ -33,43 +27,13 @@ #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) -extern int sysctl_unprivileged_userfaultfd; - -extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); - -/* - * The mode of operation for __mcopy_atomic and its helpers. - * - * This is almost an implementation detail (mcopy_atomic below doesn't take this - * as a parameter), but it's exposed here because memory-kind-specific - * implementations (e.g. hugetlbfs) need to know the mode of operation. - */ -enum mcopy_atomic_mode { - /* A normal copy_from_user into the destination range. */ - MCOPY_ATOMIC_NORMAL, - /* Don't copy; map the destination range to the zero page. */ - MCOPY_ATOMIC_ZEROPAGE, - /* Just install pte(s) with the existing page(s) in the page cache. */ - MCOPY_ATOMIC_CONTINUE, -}; - -extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, - struct vm_area_struct *dst_vma, - unsigned long dst_addr, struct page *page, - bool newly_allocated, bool wp_copy); +extern int handle_userfault(struct fault_env *fe, unsigned long reason); extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, - unsigned long src_start, unsigned long len, - atomic_t *mmap_changing, __u64 mode); + unsigned long src_start, unsigned long len); extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long dst_start, - unsigned long len, - atomic_t *mmap_changing); -extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start, - unsigned long len, atomic_t *mmap_changing); -extern int mwriteprotect_range(struct mm_struct *dst_mm, - unsigned long start, unsigned long len, - bool enable_wp, atomic_t *mmap_changing); + unsigned long len); /* mm helpers */ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, @@ -78,78 +42,20 @@ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; } -/* - * Never enable huge pmd sharing on some uffd registered vmas: - * - * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry. - * - * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for - * VMAs which share huge pmds. (If you have two mappings to the same - * underlying pages, and fault in the non-UFFD-registered one with a write, - * with huge pmd sharing this would *also* setup the second UFFD-registered - * mapping, and we'd not get minor faults.) - */ -static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) -{ - return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); -} - static inline bool userfaultfd_missing(struct vm_area_struct *vma) { return vma->vm_flags & VM_UFFD_MISSING; } -static inline bool userfaultfd_wp(struct vm_area_struct *vma) -{ - return vma->vm_flags & VM_UFFD_WP; -} - -static inline bool userfaultfd_minor(struct vm_area_struct *vma) -{ - return vma->vm_flags & VM_UFFD_MINOR; -} - -static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, - pte_t pte) -{ - return userfaultfd_wp(vma) && pte_uffd_wp(pte); -} - -static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, - pmd_t pmd) -{ - return userfaultfd_wp(vma) && pmd_uffd_wp(pmd); -} - static inline bool userfaultfd_armed(struct vm_area_struct *vma) { - return vma->vm_flags & __VM_UFFD_FLAGS; + return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); } -extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); -extern void dup_userfaultfd_complete(struct list_head *); - -extern void mremap_userfaultfd_prep(struct vm_area_struct *, - struct vm_userfaultfd_ctx *); -extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, - unsigned long from, unsigned long to, - unsigned long len); - -extern bool userfaultfd_remove(struct vm_area_struct *vma, - unsigned long start, - unsigned long end); - -extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, - unsigned long start, unsigned long end, - struct list_head *uf); -extern void userfaultfd_unmap_complete(struct mm_struct *mm, - struct list_head *uf); - #else /* CONFIG_USERFAULTFD */ /* mm helpers */ -static inline vm_fault_t handle_userfault(struct vm_fault *vmf, - unsigned long reason) +static inline int handle_userfault(struct fault_env *fe, unsigned long reason) { return VM_FAULT_SIGBUS; } @@ -165,75 +71,11 @@ static inline bool userfaultfd_missing(struct vm_area_struct *vma) return false; } -static inline bool userfaultfd_wp(struct vm_area_struct *vma) -{ - return false; -} - -static inline bool userfaultfd_minor(struct vm_area_struct *vma) -{ - return false; -} - -static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, - pte_t pte) -{ - return false; -} - -static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, - pmd_t pmd) -{ - return false; -} - - static inline bool userfaultfd_armed(struct vm_area_struct *vma) { return false; } -static inline int dup_userfaultfd(struct vm_area_struct *vma, - struct list_head *l) -{ - return 0; -} - -static inline void dup_userfaultfd_complete(struct list_head *l) -{ -} - -static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma, - struct vm_userfaultfd_ctx *ctx) -{ -} - -static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, - unsigned long from, - unsigned long to, - unsigned long len) -{ -} - -static inline bool userfaultfd_remove(struct vm_area_struct *vma, - unsigned long start, - unsigned long end) -{ - return true; -} - -static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, - unsigned long start, unsigned long end, - struct list_head *uf) -{ - return 0; -} - -static inline void userfaultfd_unmap_complete(struct mm_struct *mm, - struct list_head *uf) -{ -} - #endif /* CONFIG_USERFAULTFD */ #endif /* _LINUX_USERFAULTFD_K_H */ diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h index 72299f261b..f9b2ce5803 100644 --- a/include/linux/util_macros.h +++ b/include/linux/util_macros.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HELPER_MACROS_H_ #define _LINUX_HELPER_MACROS_H_ diff --git a/include/linux/uts.h b/include/linux/uts.h index d62829530c..6ddbd86377 100644 --- a/include/linux/uts.h +++ b/include/linux/uts.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UTS_H #define _LINUX_UTS_H diff --git a/include/linux/utsname.h b/include/linux/utsname.h index 2b1737c9b2..da826ed059 100644 --- a/include/linux/utsname.h +++ b/include/linux/utsname.h @@ -1,9 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UTSNAME_H #define _LINUX_UTSNAME_H #include +#include #include #include #include @@ -21,6 +21,7 @@ struct user_namespace; extern struct user_namespace init_user_ns; struct uts_namespace { + struct kref kref; struct new_utsname name; struct user_namespace *user_ns; struct ucounts *ucounts; @@ -31,20 +32,17 @@ extern struct uts_namespace init_uts_ns; #ifdef CONFIG_UTS_NS static inline void get_uts_ns(struct uts_namespace *ns) { - refcount_inc(&ns->ns.count); + kref_get(&ns->kref); } extern struct uts_namespace *copy_utsname(unsigned long flags, struct user_namespace *user_ns, struct uts_namespace *old_ns); -extern void free_uts_ns(struct uts_namespace *ns); +extern void free_uts_ns(struct kref *kref); static inline void put_uts_ns(struct uts_namespace *ns) { - if (refcount_dec_and_test(&ns->ns.count)) - free_uts_ns(ns); + kref_put(&ns->kref, free_uts_ns); } - -void uts_ns_init(void); #else static inline void get_uts_ns(struct uts_namespace *ns) { @@ -62,10 +60,6 @@ static inline struct uts_namespace *copy_utsname(unsigned long flags, return old_ns; } - -static inline void uts_ns_init(void) -{ -} #endif #ifdef CONFIG_PROC_SYSCTL diff --git a/include/linux/uuid.h b/include/linux/uuid.h index 8cdc0d3567..2d095fc602 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h @@ -1,28 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * UUID/GUID definition * * Copyright (C) 2010, 2016 Intel Corp. * Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #ifndef _LINUX_UUID_H_ #define _LINUX_UUID_H_ #include -#include - -#define UUID_SIZE 16 - -typedef struct { - __u8 b[UUID_SIZE]; -} uuid_t; - -#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ -((uuid_t) \ -{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ - ((b) >> 8) & 0xff, (b) & 0xff, \ - ((c) >> 8) & 0xff, (c) & 0xff, \ - (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) /* * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee") @@ -30,77 +24,27 @@ typedef struct { */ #define UUID_STRING_LEN 36 -extern const guid_t guid_null; -extern const uuid_t uuid_null; - -static inline bool guid_equal(const guid_t *u1, const guid_t *u2) +static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2) { - return memcmp(u1, u2, sizeof(guid_t)) == 0; + return memcmp(&u1, &u2, sizeof(uuid_le)); } -static inline void guid_copy(guid_t *dst, const guid_t *src) +static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2) { - memcpy(dst, src, sizeof(guid_t)); -} - -static inline void import_guid(guid_t *dst, const __u8 *src) -{ - memcpy(dst, src, sizeof(guid_t)); -} - -static inline void export_guid(__u8 *dst, const guid_t *src) -{ - memcpy(dst, src, sizeof(guid_t)); -} - -static inline bool guid_is_null(const guid_t *guid) -{ - return guid_equal(guid, &guid_null); -} - -static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2) -{ - return memcmp(u1, u2, sizeof(uuid_t)) == 0; -} - -static inline void uuid_copy(uuid_t *dst, const uuid_t *src) -{ - memcpy(dst, src, sizeof(uuid_t)); -} - -static inline void import_uuid(uuid_t *dst, const __u8 *src) -{ - memcpy(dst, src, sizeof(uuid_t)); -} - -static inline void export_uuid(__u8 *dst, const uuid_t *src) -{ - memcpy(dst, src, sizeof(uuid_t)); -} - -static inline bool uuid_is_null(const uuid_t *uuid) -{ - return uuid_equal(uuid, &uuid_null); + return memcmp(&u1, &u2, sizeof(uuid_be)); } void generate_random_uuid(unsigned char uuid[16]); -void generate_random_guid(unsigned char guid[16]); -extern void guid_gen(guid_t *u); -extern void uuid_gen(uuid_t *u); +extern void uuid_le_gen(uuid_le *u); +extern void uuid_be_gen(uuid_be *u); bool __must_check uuid_is_valid(const char *uuid); -extern const u8 guid_index[16]; -extern const u8 uuid_index[16]; +extern const u8 uuid_le_index[16]; +extern const u8 uuid_be_index[16]; -int guid_parse(const char *uuid, guid_t *u); -int uuid_parse(const char *uuid, uuid_t *u); - -/* backwards compatibility, don't use in new code */ -static inline int uuid_le_cmp(const guid_t u1, const guid_t u2) -{ - return memcmp(&u1, &u2, sizeof(guid_t)); -} +int uuid_le_to_bin(const char *uuid, uuid_le *u); +int uuid_be_to_bin(const char *uuid, uuid_be *u); #endif diff --git a/include/linux/uwb.h b/include/linux/uwb.h new file mode 100644 index 0000000000..7dbbee9741 --- /dev/null +++ b/include/linux/uwb.h @@ -0,0 +1,831 @@ +/* + * Ultra Wide Band + * UWB API + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: doc: overview of the API, different parts and pointers + */ + +#ifndef __LINUX__UWB_H__ +#define __LINUX__UWB_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct uwb_dev; +struct uwb_beca_e; +struct uwb_rc; +struct uwb_rsv; +struct uwb_dbg; + +/** + * struct uwb_dev - a UWB Device + * @rc: UWB Radio Controller that discovered the device (kind of its + * parent). + * @bce: a beacon cache entry for this device; or NULL if the device + * is a local radio controller. + * @mac_addr: the EUI-48 address of this device. + * @dev_addr: the current DevAddr used by this device. + * @beacon_slot: the slot number the beacon is using. + * @streams: bitmap of streams allocated to reservations targeted at + * this device. For an RC, this is the streams allocated for + * reservations targeted at DevAddrs. + * + * A UWB device may either by a neighbor or part of a local radio + * controller. + */ +struct uwb_dev { + struct mutex mutex; + struct list_head list_node; + struct device dev; + struct uwb_rc *rc; /* radio controller */ + struct uwb_beca_e *bce; /* Beacon Cache Entry */ + + struct uwb_mac_addr mac_addr; + struct uwb_dev_addr dev_addr; + int beacon_slot; + DECLARE_BITMAP(streams, UWB_NUM_STREAMS); + DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS); +}; +#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) + +/** + * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs + * + * RC[CE]Bs have a 'context ID' field that matches the command with + * the event received to confirm it. + * + * Maximum number of context IDs + */ +enum { UWB_RC_CTX_MAX = 256 }; + + +/** Notification chain head for UWB generated events to listeners */ +struct uwb_notifs_chain { + struct list_head list; + struct mutex mutex; +}; + +/* Beacon cache list */ +struct uwb_beca { + struct list_head list; + size_t entries; + struct mutex mutex; +}; + +/* Event handling thread. */ +struct uwbd { + int pid; + struct task_struct *task; + wait_queue_head_t wq; + struct list_head event_list; + spinlock_t event_list_lock; +}; + +/** + * struct uwb_mas_bm - a bitmap of all MAS in a superframe + * @bm: a bitmap of length #UWB_NUM_MAS + */ +struct uwb_mas_bm { + DECLARE_BITMAP(bm, UWB_NUM_MAS); + DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS); + int safe; + int unsafe; +}; + +/** + * uwb_rsv_state - UWB Reservation state. + * + * NONE - reservation is not active (no DRP IE being transmitted). + * + * Owner reservation states: + * + * INITIATED - owner has sent an initial DRP request. + * PENDING - target responded with pending Reason Code. + * MODIFIED - reservation manager is modifying an established + * reservation with a different MAS allocation. + * ESTABLISHED - the reservation has been successfully negotiated. + * + * Target reservation states: + * + * DENIED - request is denied. + * ACCEPTED - request is accepted. + * PENDING - PAL has yet to make a decision to whether to accept or + * deny. + * + * FIXME: further target states TBD. + */ +enum uwb_rsv_state { + UWB_RSV_STATE_NONE = 0, + UWB_RSV_STATE_O_INITIATED, + UWB_RSV_STATE_O_PENDING, + UWB_RSV_STATE_O_MODIFIED, + UWB_RSV_STATE_O_ESTABLISHED, + UWB_RSV_STATE_O_TO_BE_MOVED, + UWB_RSV_STATE_O_MOVE_EXPANDING, + UWB_RSV_STATE_O_MOVE_COMBINING, + UWB_RSV_STATE_O_MOVE_REDUCING, + UWB_RSV_STATE_T_ACCEPTED, + UWB_RSV_STATE_T_DENIED, + UWB_RSV_STATE_T_CONFLICT, + UWB_RSV_STATE_T_PENDING, + UWB_RSV_STATE_T_EXPANDING_ACCEPTED, + UWB_RSV_STATE_T_EXPANDING_CONFLICT, + UWB_RSV_STATE_T_EXPANDING_PENDING, + UWB_RSV_STATE_T_EXPANDING_DENIED, + UWB_RSV_STATE_T_RESIZED, + + UWB_RSV_STATE_LAST, +}; + +enum uwb_rsv_target_type { + UWB_RSV_TARGET_DEV, + UWB_RSV_TARGET_DEVADDR, +}; + +/** + * struct uwb_rsv_target - the target of a reservation. + * + * Reservations unicast and targeted at a single device + * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a + * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR). + */ +struct uwb_rsv_target { + enum uwb_rsv_target_type type; + union { + struct uwb_dev *dev; + struct uwb_dev_addr devaddr; + }; +}; + +struct uwb_rsv_move { + struct uwb_mas_bm final_mas; + struct uwb_ie_drp *companion_drp_ie; + struct uwb_mas_bm companion_mas; +}; + +/* + * Number of streams reserved for reservations targeted at DevAddrs. + */ +#define UWB_NUM_GLOBAL_STREAMS 1 + +typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv); + +/** + * struct uwb_rsv - a DRP reservation + * + * Data structure management: + * + * @rc: the radio controller this reservation is for + * (as target or owner) + * @rc_node: a list node for the RC + * @pal_node: a list node for the PAL + * + * Owner and target parameters: + * + * @owner: the UWB device owning this reservation + * @target: the target UWB device + * @type: reservation type + * + * Owner parameters: + * + * @max_mas: maxiumum number of MAS + * @min_mas: minimum number of MAS + * @sparsity: owner selected sparsity + * @is_multicast: true iff multicast + * + * @callback: callback function when the reservation completes + * @pal_priv: private data for the PAL making the reservation + * + * Reservation status: + * + * @status: negotiation status + * @stream: stream index allocated for this reservation + * @tiebreaker: conflict tiebreaker for this reservation + * @mas: reserved MAS + * @drp_ie: the DRP IE + * @ie_valid: true iff the DRP IE matches the reservation parameters + * + * DRP reservations are uniquely identified by the owner, target and + * stream index. However, when using a DevAddr as a target (e.g., for + * a WUSB cluster reservation) the responses may be received from + * devices with different DevAddrs. In this case, reservations are + * uniquely identified by just the stream index. A number of stream + * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this. + */ +struct uwb_rsv { + struct uwb_rc *rc; + struct list_head rc_node; + struct list_head pal_node; + struct kref kref; + + struct uwb_dev *owner; + struct uwb_rsv_target target; + enum uwb_drp_type type; + int max_mas; + int min_mas; + int max_interval; + bool is_multicast; + + uwb_rsv_cb_f callback; + void *pal_priv; + + enum uwb_rsv_state state; + bool needs_release_companion_mas; + u8 stream; + u8 tiebreaker; + struct uwb_mas_bm mas; + struct uwb_ie_drp *drp_ie; + struct uwb_rsv_move mv; + bool ie_valid; + struct timer_list timer; + struct work_struct handle_timeout_work; +}; + +static const +struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } }; + +static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas) +{ + bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS); +} + +/** + * struct uwb_drp_avail - a radio controller's view of MAS usage + * @global: MAS unused by neighbors (excluding reservations targeted + * or owned by the local radio controller) or the beaon period + * @local: MAS unused by local established reservations + * @pending: MAS unused by local pending reservations + * @ie: DRP Availability IE to be included in the beacon + * @ie_valid: true iff @ie is valid and does not need to regenerated from + * @global and @local + * + * Each radio controller maintains a view of MAS usage or + * availability. MAS available for a new reservation are determined + * from the intersection of @global, @local, and @pending. + * + * The radio controller must transmit a DRP Availability IE that's the + * intersection of @global and @local. + * + * A set bit indicates the MAS is unused and available. + * + * rc->rsvs_mutex should be held before accessing this data structure. + * + * [ECMA-368] section 17.4.3. + */ +struct uwb_drp_avail { + DECLARE_BITMAP(global, UWB_NUM_MAS); + DECLARE_BITMAP(local, UWB_NUM_MAS); + DECLARE_BITMAP(pending, UWB_NUM_MAS); + struct uwb_ie_drp_avail ie; + bool ie_valid; +}; + +struct uwb_drp_backoff_win { + u8 window; + u8 n; + int total_expired; + struct timer_list timer; + bool can_reserve_extra_mases; +}; + +const char *uwb_rsv_state_str(enum uwb_rsv_state state); +const char *uwb_rsv_type_str(enum uwb_drp_type type); + +struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, + void *pal_priv); +void uwb_rsv_destroy(struct uwb_rsv *rsv); + +int uwb_rsv_establish(struct uwb_rsv *rsv); +int uwb_rsv_modify(struct uwb_rsv *rsv, + int max_mas, int min_mas, int sparsity); +void uwb_rsv_terminate(struct uwb_rsv *rsv); + +void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); + +void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas); + +/** + * Radio Control Interface instance + * + * + * Life cycle rules: those of the UWB Device. + * + * @index: an index number for this radio controller, as used in the + * device name. + * @version: version of protocol supported by this device + * @priv: Backend implementation; rw with uwb_dev.dev.sem taken. + * @cmd: Backend implementation to execute commands; rw and call + * only with uwb_dev.dev.sem taken. + * @reset: Hardware reset of radio controller and any PAL controllers. + * @filter: Backend implementation to manipulate data to and from device + * to be compliant to specification assumed by driver (WHCI + * 0.95). + * + * uwb_dev.dev.mutex is used to execute commands and update + * the corresponding structures; can't use a spinlock + * because rc->cmd() can sleep. + * @ies: This is a dynamically allocated array cacheing the + * IEs (settable by the host) that the beacon of this + * radio controller is currently sending. + * + * In reality, we store here the full command we set to + * the radio controller (which is basically a command + * prefix followed by all the IEs the beacon currently + * contains). This way we don't have to realloc and + * memcpy when setting it. + * + * We set this up in uwb_rc_ie_setup(), where we alloc + * this struct, call get_ie() [so we know which IEs are + * currently being sent, if any]. + * + * @ies_capacity:Amount of space (in bytes) allocated in @ies. The + * amount used is given by sizeof(*ies) plus ies->wIELength + * (which is a little endian quantity all the time). + * @ies_mutex: protect the IE cache + * @dbg: information for the debug interface + */ +struct uwb_rc { + struct uwb_dev uwb_dev; + int index; + u16 version; + + struct module *owner; + void *priv; + int (*start)(struct uwb_rc *rc); + void (*stop)(struct uwb_rc *rc); + int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t); + int (*reset)(struct uwb_rc *rc); + int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *); + int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t, + size_t *, size_t *); + + spinlock_t neh_lock; /* protects neh_* and ctx_* */ + struct list_head neh_list; /* Open NE handles */ + unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)]; + u8 ctx_roll; + + int beaconing; /* Beaconing state [channel number] */ + int beaconing_forced; + int scanning; + enum uwb_scan_type scan_type:3; + unsigned ready:1; + struct uwb_notifs_chain notifs_chain; + struct uwb_beca uwb_beca; + + struct uwbd uwbd; + + struct uwb_drp_backoff_win bow; + struct uwb_drp_avail drp_avail; + struct list_head reservations; + struct list_head cnflt_alien_list; + struct uwb_mas_bm cnflt_alien_bitmap; + struct mutex rsvs_mutex; + spinlock_t rsvs_lock; + struct workqueue_struct *rsv_workq; + + struct delayed_work rsv_update_work; + struct delayed_work rsv_alien_bp_work; + int set_drp_ie_pending; + struct mutex ies_mutex; + struct uwb_rc_cmd_set_ie *ies; + size_t ies_capacity; + + struct list_head pals; + int active_pals; + + struct uwb_dbg *dbg; +}; + + +/** + * struct uwb_pal - a UWB PAL + * @name: descriptive name for this PAL (wusbhc, wlp, etc.). + * @device: a device for the PAL. Used to link the PAL and the radio + * controller in sysfs. + * @rc: the radio controller the PAL uses. + * @channel_changed: called when the channel used by the radio changes. + * A channel of -1 means the channel has been stopped. + * @new_rsv: called when a peer requests a reservation (may be NULL if + * the PAL cannot accept reservation requests). + * @channel: channel being used by the PAL; 0 if the PAL isn't using + * the radio; -1 if the PAL wishes to use the radio but + * cannot. + * @debugfs_dir: a debugfs directory which the PAL can use for its own + * debugfs files. + * + * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB + * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). + * + * The PALs using a radio controller must register themselves to + * permit the UWB stack to coordinate usage of the radio between the + * various PALs or to allow PALs to response to certain requests from + * peers. + * + * A struct uwb_pal should be embedded in a containing structure + * belonging to the PAL and initialized with uwb_pal_init()). Fields + * should be set appropriately by the PAL before registering the PAL + * with uwb_pal_register(). + */ +struct uwb_pal { + struct list_head node; + const char *name; + struct device *device; + struct uwb_rc *rc; + + void (*channel_changed)(struct uwb_pal *pal, int channel); + void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv); + + int channel; + struct dentry *debugfs_dir; +}; + +void uwb_pal_init(struct uwb_pal *pal); +int uwb_pal_register(struct uwb_pal *pal); +void uwb_pal_unregister(struct uwb_pal *pal); + +int uwb_radio_start(struct uwb_pal *pal); +void uwb_radio_stop(struct uwb_pal *pal); + +/* + * General public API + * + * This API can be used by UWB device drivers or by those implementing + * UWB Radio Controllers + */ +struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, + const struct uwb_dev_addr *devaddr); +struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *); +static inline void uwb_dev_get(struct uwb_dev *uwb_dev) +{ + get_device(&uwb_dev->dev); +} +static inline void uwb_dev_put(struct uwb_dev *uwb_dev) +{ + put_device(&uwb_dev->dev); +} +struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev); + +/** + * Callback function for 'uwb_{dev,rc}_foreach()'. + * + * @dev: Linux device instance + * 'uwb_dev = container_of(dev, struct uwb_dev, dev)' + * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'. + * + * @returns: 0 to continue the iterations, any other val to stop + * iterating and return the value to the caller of + * _foreach(). + */ +typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv); +int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv); + +struct uwb_rc *uwb_rc_alloc(void); +struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *); +struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *); +void uwb_rc_put(struct uwb_rc *rc); + +typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg, + struct uwb_rceb *reply, ssize_t reply_size); + +int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + u8 expected_type, u16 expected_event, + uwb_rc_cmd_cb_f cb, void *arg); +ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + struct uwb_rceb *reply, size_t reply_size); +ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + u8 expected_type, u16 expected_event, + struct uwb_rceb **preply); + +size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); + +int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *); +int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *); +int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *); +int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *); +int __uwb_mac_addr_assigned_check(struct device *, void *); +int __uwb_dev_addr_assigned_check(struct device *, void *); + +/* Print in @buf a pretty repr of @addr */ +static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size, + const struct uwb_dev_addr *addr) +{ + return __uwb_addr_print(buf, buf_size, addr->data, 0); +} + +/* Print in @buf a pretty repr of @addr */ +static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size, + const struct uwb_mac_addr *addr) +{ + return __uwb_addr_print(buf, buf_size, addr->data, 1); +} + +/* @returns 0 if device addresses @addr2 and @addr1 are equal */ +static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1, + const struct uwb_dev_addr *addr2) +{ + return memcmp(addr1, addr2, sizeof(*addr1)); +} + +/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */ +static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1, + const struct uwb_mac_addr *addr2) +{ + return memcmp(addr1, addr2, sizeof(*addr1)); +} + +/* @returns !0 if a MAC @addr is a broadcast address */ +static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr) +{ + struct uwb_mac_addr bcast = { + .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } + }; + return !uwb_mac_addr_cmp(addr, &bcast); +} + +/* @returns !0 if a MAC @addr is all zeroes*/ +static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr) +{ + struct uwb_mac_addr unset = { + .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } + }; + return !uwb_mac_addr_cmp(addr, &unset); +} + +/* @returns !0 if the address is in use. */ +static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc, + struct uwb_dev_addr *addr) +{ + return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr); +} + +/* + * UWB Radio Controller API + * + * This API is used (in addition to the general API) to implement UWB + * Radio Controllers. + */ +void uwb_rc_init(struct uwb_rc *); +int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv); +void uwb_rc_rm(struct uwb_rc *); +void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); +void uwb_rc_neh_error(struct uwb_rc *, int); +void uwb_rc_reset_all(struct uwb_rc *rc); +void uwb_rc_pre_reset(struct uwb_rc *rc); +int uwb_rc_post_reset(struct uwb_rc *rc); + +/** + * uwb_rsv_is_owner - is the owner of this reservation the RC? + * @rsv: the reservation + */ +static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) +{ + return rsv->owner == &rsv->rc->uwb_dev; +} + +/** + * enum uwb_notifs - UWB events that can be passed to any listeners + * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group. + * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group. + * + * Higher layers can register callback functions with the radio + * controller using uwb_notifs_register(). The radio controller + * maintains a list of all registered handlers and will notify all + * nodes when an event occurs. + */ +enum uwb_notifs { + UWB_NOTIF_ONAIR, + UWB_NOTIF_OFFAIR, +}; + +/* Callback function registered with UWB */ +struct uwb_notifs_handler { + struct list_head list_node; + void (*cb)(void *, struct uwb_dev *, enum uwb_notifs); + void *data; +}; + +int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *); +int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *); + + +/** + * UWB radio controller Event Size Entry (for creating entry tables) + * + * WUSB and WHCI define events and notifications, and they might have + * fixed or variable size. + * + * Each event/notification has a size which is not necessarily known + * in advance based on the event code. As well, vendor specific + * events/notifications will have a size impossible to determine + * unless we know about the device's specific details. + * + * It was way too smart of the spec writers not to think that it would + * be impossible for a generic driver to skip over vendor specific + * events/notifications if there are no LENGTH fields in the HEADER of + * each message...the transaction size cannot be counted on as the + * spec does not forbid to pack more than one event in a single + * transaction. + * + * Thus, we guess sizes with tables (or for events, when you know the + * size ahead of time you can use uwb_rc_neh_extra_size*()). We + * register tables with the known events and their sizes, and then we + * traverse those tables. For those with variable length, we provide a + * way to lookup the size inside the event/notification's + * payload. This allows device-specific event size tables to be + * registered. + * + * @size: Size of the payload + * + * @offset: if != 0, at offset @offset-1 starts a field with a length + * that has to be added to @size. The format of the field is + * given by @type. + * + * @type: Type and length of the offset field. Most common is LE 16 + * bits (that's why that is zero); others are there mostly to + * cover for bugs and weirdos. + */ +struct uwb_est_entry { + size_t size; + unsigned offset; + enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type; +}; + +int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product, + const struct uwb_est_entry *, size_t entries); +int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product, + const struct uwb_est_entry *, size_t entries); +ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, + size_t len); + +/* -- Misc */ + +enum { + EDC_MAX_ERRORS = 10, + EDC_ERROR_TIMEFRAME = HZ, +}; + +/* error density counter */ +struct edc { + unsigned long timestart; + u16 errorcount; +}; + +static inline +void edc_init(struct edc *edc) +{ + edc->timestart = jiffies; +} + +/* Called when an error occurred. + * This is way to determine if the number of acceptable errors per time + * period has been exceeded. It is not accurate as there are cases in which + * this scheme will not work, for example if there are periodic occurrences + * of errors that straddle updates to the start time. This scheme is + * sufficient for our usage. + * + * @returns 1 if maximum acceptable errors per timeframe has been exceeded. + */ +static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe) +{ + unsigned long now; + + now = jiffies; + if (now - err_hist->timestart > timeframe) { + err_hist->errorcount = 1; + err_hist->timestart = now; + } else if (++err_hist->errorcount > max_err) { + err_hist->errorcount = 0; + err_hist->timestart = now; + return 1; + } + return 0; +} + + +/* Information Element handling */ + +struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); +int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size); +int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id); + +/* + * Transmission statistics + * + * UWB uses LQI and RSSI (one byte values) for reporting radio signal + * strength and line quality indication. We do quick and dirty + * averages of those. They are signed values, btw. + * + * For 8 bit quantities, we keep the min, the max, an accumulator + * (@sigma) and a # of samples. When @samples gets to 255, we compute + * the average (@sigma / @samples), place it in @sigma and reset + * @samples to 1 (so we use it as the first sample). + * + * Now, statistically speaking, probably I am kicking the kidneys of + * some books I have in my shelves collecting dust, but I just want to + * get an approx, not the Nobel. + * + * LOCKING: there is no locking per se, but we try to keep a lockless + * schema. Only _add_samples() modifies the values--as long as you + * have other locking on top that makes sure that no two calls of + * _add_sample() happen at the same time, then we are fine. Now, for + * resetting the values we just set @samples to 0 and that makes the + * next _add_sample() to start with defaults. Reading the values in + * _show() currently can race, so you need to make sure the calls are + * under the same lock that protects calls to _add_sample(). FIXME: + * currently unlocked (It is not ultraprecise but does the trick. Bite + * me). + */ +struct stats { + s8 min, max; + s16 sigma; + atomic_t samples; +}; + +static inline +void stats_init(struct stats *stats) +{ + atomic_set(&stats->samples, 0); + wmb(); +} + +static inline +void stats_add_sample(struct stats *stats, s8 sample) +{ + s8 min, max; + s16 sigma; + unsigned samples = atomic_read(&stats->samples); + if (samples == 0) { /* it was zero before, so we initialize */ + min = 127; + max = -128; + sigma = 0; + } else { + min = stats->min; + max = stats->max; + sigma = stats->sigma; + } + + if (sample < min) /* compute new values */ + min = sample; + else if (sample > max) + max = sample; + sigma += sample; + + stats->min = min; /* commit */ + stats->max = max; + stats->sigma = sigma; + if (atomic_add_return(1, &stats->samples) > 255) { + /* wrapped around! reset */ + stats->sigma = sigma / 256; + atomic_set(&stats->samples, 1); + } +} + +static inline ssize_t stats_show(struct stats *stats, char *buf) +{ + int min, max, avg; + int samples = atomic_read(&stats->samples); + if (samples == 0) + min = max = avg = 0; + else { + min = stats->min; + max = stats->max; + avg = stats->sigma / samples; + } + return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg); +} + +static inline ssize_t stats_store(struct stats *stats, const char *buf, + size_t size) +{ + stats_init(stats); + return size; +} + +#endif /* #ifndef __LINUX__UWB_H__ */ diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h new file mode 100644 index 0000000000..8da004e256 --- /dev/null +++ b/include/linux/uwb/debug-cmd.h @@ -0,0 +1,68 @@ +/* + * Ultra Wide Band + * Debug interface commands + * + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __LINUX__UWB__DEBUG_CMD_H__ +#define __LINUX__UWB__DEBUG_CMD_H__ + +#include + +/* + * Debug interface commands + * + * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation. + * + * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation. + */ + +enum uwb_dbg_cmd_type { + UWB_DBG_CMD_RSV_ESTABLISH = 1, + UWB_DBG_CMD_RSV_TERMINATE = 2, + UWB_DBG_CMD_IE_ADD = 3, + UWB_DBG_CMD_IE_RM = 4, + UWB_DBG_CMD_RADIO_START = 5, + UWB_DBG_CMD_RADIO_STOP = 6, +}; + +struct uwb_dbg_cmd_rsv_establish { + __u8 target[6]; + __u8 type; + __u16 max_mas; + __u16 min_mas; + __u8 max_interval; +}; + +struct uwb_dbg_cmd_rsv_terminate { + int index; +}; + +struct uwb_dbg_cmd_ie { + __u8 data[128]; + int len; +}; + +struct uwb_dbg_cmd { + __u32 type; + union { + struct uwb_dbg_cmd_rsv_establish rsv_establish; + struct uwb_dbg_cmd_rsv_terminate rsv_terminate; + struct uwb_dbg_cmd_ie ie_add; + struct uwb_dbg_cmd_ie ie_rm; + }; +}; + +#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */ diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h new file mode 100644 index 0000000000..0df24bfcdb --- /dev/null +++ b/include/linux/uwb/spec.h @@ -0,0 +1,781 @@ +/* + * Ultra Wide Band + * UWB Standard definitions + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * All these definitions are based on the ECMA-368 standard. + * + * Note all definitions are Little Endian in the wire, and we will + * convert them to host order before operating on the bitfields (that + * yes, we use extensively). + */ + +#ifndef __LINUX__UWB_SPEC_H__ +#define __LINUX__UWB_SPEC_H__ + +#include +#include +#include + +#define i1480_FW 0x00000303 +/* #define i1480_FW 0x00000302 */ + +/** + * Number of Medium Access Slots in a superframe. + * + * UWB divides time in SuperFrames, each one divided in 256 pieces, or + * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the + * basic bandwidth allocation unit in UWB. + */ +enum { UWB_NUM_MAS = 256 }; + +/** + * Number of Zones in superframe. + * + * UWB divides the superframe into zones with numbering starting from BPST. + * See MBOA MAC[16.8.6] + */ +enum { UWB_NUM_ZONES = 16 }; + +/* + * Number of MAS in a zone. + */ +#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) + +/* + * Number of MAS required before a row can be considered available. + */ +#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1) + +/* + * Number of streams per DRP reservation between a pair of devices. + * + * [ECMA-368] section 16.8.6. + */ +enum { UWB_NUM_STREAMS = 8 }; + +/* + * mMasLength + * + * The length of a MAS in microseconds. + * + * [ECMA-368] section 17.16. + */ +enum { UWB_MAS_LENGTH_US = 256 }; + +/* + * mBeaconSlotLength + * + * The length of the beacon slot in microseconds. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_BEACON_SLOT_LENGTH_US = 85 }; + +/* + * mMaxLostBeacons + * + * The number beacons missing in consecutive superframes before a + * device can be considered as unreachable. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_MAX_LOST_BEACONS = 3 }; + +/* + * mDRPBackOffWinMin + * + * The minimum number of superframes to wait before trying to reserve + * extra MAS. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_DRP_BACKOFF_WIN_MIN = 2 }; + +/* + * mDRPBackOffWinMax + * + * The maximum number of superframes to wait before trying to reserve + * extra MAS. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; + +/* + * Length of a superframe in microseconds. + */ +#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS) + +/** + * UWB MAC address + * + * It is *imperative* that this struct is exactly 6 packed bytes (as + * it is also used to define headers sent down and up the wire/radio). + */ +struct uwb_mac_addr { + u8 data[ETH_ALEN]; +} __attribute__((packed)); + + +/** + * UWB device address + * + * It is *imperative* that this struct is exactly 6 packed bytes (as + * it is also used to define headers sent down and up the wire/radio). + */ +struct uwb_dev_addr { + u8 data[2]; +} __attribute__((packed)); + + +/** + * Types of UWB addresses + * + * Order matters (by size). + */ +enum uwb_addr_type { + UWB_ADDR_DEV = 0, + UWB_ADDR_MAC = 1, +}; + + +/** Size of a char buffer for printing a MAC/device address */ +enum { UWB_ADDR_STRSIZE = 32 }; + + +/** UWB WiMedia protocol IDs. */ +enum uwb_prid { + UWB_PRID_WLP_RESERVED = 0x0000, + UWB_PRID_WLP = 0x0001, + UWB_PRID_WUSB_BOT = 0x0010, + UWB_PRID_WUSB = 0x0010, + UWB_PRID_WUSB_TOP = 0x001F, +}; + + +/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */ +enum uwb_phy_rate { + UWB_PHY_RATE_53 = 0, + UWB_PHY_RATE_80, + UWB_PHY_RATE_106, + UWB_PHY_RATE_160, + UWB_PHY_RATE_200, + UWB_PHY_RATE_320, + UWB_PHY_RATE_400, + UWB_PHY_RATE_480, + UWB_PHY_RATE_INVALID +}; + + +/** + * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78]) + */ +enum uwb_scan_type { + UWB_SCAN_ONLY = 0, + UWB_SCAN_OUTSIDE_BP, + UWB_SCAN_WHILE_INACTIVE, + UWB_SCAN_DISABLED, + UWB_SCAN_ONLY_STARTTIME, + UWB_SCAN_TOP +}; + + +/** ACK Policy types (MBOA MAC[7.2.1.3]) */ +enum uwb_ack_pol { + UWB_ACK_NO = 0, + UWB_ACK_INM = 1, + UWB_ACK_B = 2, + UWB_ACK_B_REQ = 3, +}; + + +/** DRP reservation types ([ECMA-368 table 106) */ +enum uwb_drp_type { + UWB_DRP_TYPE_ALIEN_BP = 0, + UWB_DRP_TYPE_HARD, + UWB_DRP_TYPE_SOFT, + UWB_DRP_TYPE_PRIVATE, + UWB_DRP_TYPE_PCA, +}; + + +/** DRP Reason Codes ([ECMA-368] table 107) */ +enum uwb_drp_reason { + UWB_DRP_REASON_ACCEPTED = 0, + UWB_DRP_REASON_CONFLICT, + UWB_DRP_REASON_PENDING, + UWB_DRP_REASON_DENIED, + UWB_DRP_REASON_MODIFIED, +}; + +/** Relinquish Request Reason Codes ([ECMA-368] table 113) */ +enum uwb_relinquish_req_reason { + UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0, + UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION, +}; + +/** + * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) + */ +enum uwb_drp_notif_reason { + UWB_DRP_NOTIF_DRP_IE_RCVD = 0, + UWB_DRP_NOTIF_CONFLICT, + UWB_DRP_NOTIF_TERMINATE, +}; + + +/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */ +struct uwb_drp_alloc { + __le16 zone_bm; + __le16 mas_bm; +} __attribute__((packed)); + + +/** General MAC Header format (ECMA-368[16.2]) */ +struct uwb_mac_frame_hdr { + __le16 Frame_Control; + struct uwb_dev_addr DestAddr; + struct uwb_dev_addr SrcAddr; + __le16 Sequence_Control; + __le16 Access_Information; +} __attribute__((packed)); + + +/** + * uwb_beacon_frame - a beacon frame including MAC headers + * + * [ECMA] section 16.3. + */ +struct uwb_beacon_frame { + struct uwb_mac_frame_hdr hdr; + struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */ + u8 Beacon_Slot_Number; + u8 Device_Control; + u8 IEData[]; +} __attribute__((packed)); + + +/** Information Element codes (MBOA MAC[T54]) */ +enum uwb_ie { + UWB_PCA_AVAILABILITY = 2, + UWB_IE_DRP_AVAILABILITY = 8, + UWB_IE_DRP = 9, + UWB_BP_SWITCH_IE = 11, + UWB_MAC_CAPABILITIES_IE = 12, + UWB_PHY_CAPABILITIES_IE = 13, + UWB_APP_SPEC_PROBE_IE = 15, + UWB_IDENTIFICATION_IE = 19, + UWB_MASTER_KEY_ID_IE = 20, + UWB_RELINQUISH_REQUEST_IE = 21, + UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ + UWB_APP_SPEC_IE = 255, +}; + + +/** + * Header common to all Information Elements (IEs) + */ +struct uwb_ie_hdr { + u8 element_id; /* enum uwb_ie */ + u8 length; +} __attribute__((packed)); + + +/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */ +struct uwb_ie_drp { + struct uwb_ie_hdr hdr; + __le16 drp_control; + struct uwb_dev_addr dev_addr; + struct uwb_drp_alloc allocs[]; +} __attribute__((packed)); + +static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 0) & 0x7; +} + +static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 3) & 0x7; +} + +static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 6) & 0x7; +} + +static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 9) & 0x1; +} + +static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 10) & 0x1; +} + +static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 11) & 0x1; +} + +static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 12) & 0x1; +} + +static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x7 << 0)) | (type << 0); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie, + enum uwb_drp_reason reason_code) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 9)) | (status << 9); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12); + ie->drp_control = cpu_to_le16(drp_control); +} + +/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */ +struct uwb_ie_drp_avail { + struct uwb_ie_hdr hdr; + DECLARE_BITMAP(bmp, UWB_NUM_MAS); +} __attribute__((packed)); + +/* Relinqish Request IE ([ECMA-368] section 16.8.19). */ +struct uwb_relinquish_request_ie { + struct uwb_ie_hdr hdr; + __le16 relinquish_req_control; + struct uwb_dev_addr dev_addr; + struct uwb_drp_alloc allocs[]; +} __attribute__((packed)); + +static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie) +{ + return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf; +} + +static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie, + int reason_code) +{ + u16 ctrl = le16_to_cpu(ie->relinquish_req_control); + ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0); + ie->relinquish_req_control = cpu_to_le16(ctrl); +} + +/** + * The Vendor ID is set to an OUI that indicates the vendor of the device. + * ECMA-368 [16.8.10] + */ +struct uwb_vendor_id { + u8 data[3]; +} __attribute__((packed)); + +/** + * The device type ID + * FIXME: clarify what this means + * ECMA-368 [16.8.10] + */ +struct uwb_device_type_id { + u8 data[3]; +} __attribute__((packed)); + + +/** + * UWB device information types + * ECMA-368 [16.8.10] + */ +enum uwb_dev_info_type { + UWB_DEV_INFO_VENDOR_ID = 0, + UWB_DEV_INFO_VENDOR_TYPE, + UWB_DEV_INFO_NAME, +}; + +/** + * UWB device information found in Identification IE + * ECMA-368 [16.8.10] + */ +struct uwb_dev_info { + u8 type; /* enum uwb_dev_info_type */ + u8 length; + u8 data[]; +} __attribute__((packed)); + +/** + * UWB Identification IE + * ECMA-368 [16.8.10] + */ +struct uwb_identification_ie { + struct uwb_ie_hdr hdr; + struct uwb_dev_info info[]; +} __attribute__((packed)); + +/* + * UWB Radio Controller + * + * These definitions are common to the Radio Control layers as + * exported by the WUSB1.0 HWA and WHCI interfaces. + */ + +/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */ +struct uwb_rccb { + u8 bCommandType; /* enum hwa_cet */ + __le16 wCommand; /* Command code */ + u8 bCommandContext; /* Context ID */ +} __attribute__((packed)); + + +/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */ +struct uwb_rceb { + u8 bEventType; /* enum hwa_cet */ + __le16 wEvent; /* Event code */ + u8 bEventContext; /* Context ID */ +} __attribute__((packed)); + + +enum { + UWB_RC_CET_GENERAL = 0, /* General Command/Event type */ + UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */ +}; + +/* Commands to the radio controller */ +enum uwb_rc_cmd { + UWB_RC_CMD_CHANNEL_CHANGE = 16, + UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */ + UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */ + UWB_RC_CMD_RESET = 19, + UWB_RC_CMD_SCAN = 20, /* Scan management */ + UWB_RC_CMD_SET_BEACON_FILTER = 21, + UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */ + UWB_RC_CMD_SET_IE = 23, /* Information Element management */ + UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24, + UWB_RC_CMD_SET_TX_POWER = 25, + UWB_RC_CMD_SLEEP = 26, + UWB_RC_CMD_START_BEACON = 27, + UWB_RC_CMD_STOP_BEACON = 28, + UWB_RC_CMD_BP_MERGE = 29, + UWB_RC_CMD_SEND_COMMAND_FRAME = 30, + UWB_RC_CMD_SET_ASIE_NOTIF = 31, +}; + +/* Notifications from the radio controller */ +enum uwb_rc_evt { + UWB_RC_EVT_IE_RCV = 0, + UWB_RC_EVT_BEACON = 1, + UWB_RC_EVT_BEACON_SIZE = 2, + UWB_RC_EVT_BPOIE_CHANGE = 3, + UWB_RC_EVT_BP_SLOT_CHANGE = 4, + UWB_RC_EVT_BP_SWITCH_IE_RCV = 5, + UWB_RC_EVT_DEV_ADDR_CONFLICT = 6, + UWB_RC_EVT_DRP_AVAIL = 7, + UWB_RC_EVT_DRP = 8, + UWB_RC_EVT_BP_SWITCH_STATUS = 9, + UWB_RC_EVT_CMD_FRAME_RCV = 10, + UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11, + /* Events (command responses) use the same code as the command */ + UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535, +}; + +enum uwb_rc_extended_type_1_cmd { + UWB_RC_SET_DAA_ENERGY_MASK = 32, + UWB_RC_SET_NOTIFICATION_FILTER_EX = 33, +}; + +enum uwb_rc_extended_type_1_evt { + UWB_RC_DAA_ENERGY_DETECTED = 0, +}; + +/* Radio Control Result Code. [WHCI] table 3-3. */ +enum { + UWB_RC_RES_SUCCESS = 0, + UWB_RC_RES_FAIL, + UWB_RC_RES_FAIL_HARDWARE, + UWB_RC_RES_FAIL_NO_SLOTS, + UWB_RC_RES_FAIL_BEACON_TOO_LARGE, + UWB_RC_RES_FAIL_INVALID_PARAMETER, + UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL, + UWB_RC_RES_FAIL_INVALID_IE_DATA, + UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED, + UWB_RC_RES_FAIL_CANCELLED, + UWB_RC_RES_FAIL_INVALID_STATE, + UWB_RC_RES_FAIL_INVALID_SIZE, + UWB_RC_RES_FAIL_ACK_NOT_RECEIVED, + UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF, + UWB_RC_RES_FAIL_TIME_OUT = 255, +}; + +/* Confirm event. [WHCI] section 3.1.3.1 etc. */ +struct uwb_rc_evt_confirm { + struct uwb_rceb rceb; + u8 bResultCode; +} __attribute__((packed)); + +/* Device Address Management event. [WHCI] section 3.1.3.2. */ +struct uwb_rc_evt_dev_addr_mgmt { + struct uwb_rceb rceb; + u8 baAddr[ETH_ALEN]; + u8 bResultCode; +} __attribute__((packed)); + + +/* Get IE Event. [WHCI] section 3.1.3.3. */ +struct uwb_rc_evt_get_ie { + struct uwb_rceb rceb; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* Set DRP IE Event. [WHCI] section 3.1.3.7. */ +struct uwb_rc_evt_set_drp_ie { + struct uwb_rceb rceb; + __le16 wRemainingSpace; + u8 bResultCode; +} __attribute__((packed)); + +/* Set IE Event. [WHCI] section 3.1.3.8. */ +struct uwb_rc_evt_set_ie { + struct uwb_rceb rceb; + __le16 RemainingSpace; + u8 bResultCode; +} __attribute__((packed)); + +/* Scan command. [WHCI] 3.1.3.5. */ +struct uwb_rc_cmd_scan { + struct uwb_rccb rccb; + u8 bChannelNumber; + u8 bScanState; + __le16 wStartTime; +} __attribute__((packed)); + +/* Set DRP IE command. [WHCI] section 3.1.3.7. */ +struct uwb_rc_cmd_set_drp_ie { + struct uwb_rccb rccb; + __le16 wIELength; + struct uwb_ie_drp IEData[]; +} __attribute__((packed)); + +/* Set IE command. [WHCI] section 3.1.3.8. */ +struct uwb_rc_cmd_set_ie { + struct uwb_rccb rccb; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */ +struct uwb_rc_evt_set_daa_energy_mask { + struct uwb_rceb rceb; + __le16 wLength; + u8 result; +} __attribute__((packed)); + +/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */ +struct uwb_rc_evt_set_notification_filter_ex { + struct uwb_rceb rceb; + __le16 wLength; + u8 result; +} __attribute__((packed)); + +/* IE Received notification. [WHCI] section 3.1.4.1. */ +struct uwb_rc_evt_ie_rcv { + struct uwb_rceb rceb; + struct uwb_dev_addr SrcAddr; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* Type of the received beacon. [WHCI] section 3.1.4.2. */ +enum uwb_rc_beacon_type { + UWB_RC_BEACON_TYPE_SCAN = 0, + UWB_RC_BEACON_TYPE_NEIGHBOR, + UWB_RC_BEACON_TYPE_OL_ALIEN, + UWB_RC_BEACON_TYPE_NOL_ALIEN, +}; + +/* Beacon received notification. [WHCI] 3.1.4.2. */ +struct uwb_rc_evt_beacon { + struct uwb_rceb rceb; + u8 bChannelNumber; + u8 bBeaconType; + __le16 wBPSTOffset; + u8 bLQI; + u8 bRSSI; + __le16 wBeaconInfoLength; + u8 BeaconInfo[]; +} __attribute__((packed)); + + +/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */ +struct uwb_rc_evt_beacon_size { + struct uwb_rceb rceb; + __le16 wNewBeaconSize; +} __attribute__((packed)); + + +/* BPOIE Change notification. [WHCI] section 3.1.4.4. */ +struct uwb_rc_evt_bpoie_change { + struct uwb_rceb rceb; + __le16 wBPOIELength; + u8 BPOIE[]; +} __attribute__((packed)); + + +/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */ +struct uwb_rc_evt_bp_slot_change { + struct uwb_rceb rceb; + u8 slot_info; +} __attribute__((packed)); + +static inline int uwb_rc_evt_bp_slot_change_slot_num( + const struct uwb_rc_evt_bp_slot_change *evt) +{ + return evt->slot_info & 0x7f; +} + +static inline int uwb_rc_evt_bp_slot_change_no_slot( + const struct uwb_rc_evt_bp_slot_change *evt) +{ + return (evt->slot_info & 0x80) >> 7; +} + +/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */ +struct uwb_rc_evt_bp_switch_ie_rcv { + struct uwb_rceb rceb; + struct uwb_dev_addr wSrcAddr; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */ +struct uwb_rc_evt_dev_addr_conflict { + struct uwb_rceb rceb; +} __attribute__((packed)); + +/* DRP notification. [WHCI] section 3.1.4.9. */ +struct uwb_rc_evt_drp { + struct uwb_rceb rceb; + struct uwb_dev_addr src_addr; + u8 reason; + u8 beacon_slot_number; + __le16 ie_length; + u8 ie_data[]; +} __attribute__((packed)); + +static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt) +{ + return evt->reason & 0x0f; +} + + +/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */ +struct uwb_rc_evt_drp_avail { + struct uwb_rceb rceb; + DECLARE_BITMAP(bmp, UWB_NUM_MAS); +} __attribute__((packed)); + +/* BP switch status notification. [WHCI] section 3.1.4.10. */ +struct uwb_rc_evt_bp_switch_status { + struct uwb_rceb rceb; + u8 status; + u8 slot_offset; + __le16 bpst_offset; + u8 move_countdown; +} __attribute__((packed)); + +/* Command Frame Received notification. [WHCI] section 3.1.4.11. */ +struct uwb_rc_evt_cmd_frame_rcv { + struct uwb_rceb rceb; + __le16 receive_time; + struct uwb_dev_addr wSrcAddr; + struct uwb_dev_addr wDstAddr; + __le16 control; + __le16 reserved; + __le16 dataLength; + u8 data[]; +} __attribute__((packed)); + +/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */ +struct uwb_rc_evt_channel_change_ie_rcv { + struct uwb_rceb rceb; + struct uwb_dev_addr wSrcAddr; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */ +struct uwb_rc_evt_daa_energy_detected { + struct uwb_rceb rceb; + __le16 wLength; + u8 bandID; + u8 reserved; + u8 toneBmp[16]; +} __attribute__((packed)); + + +/** + * Radio Control Interface Class Descriptor + * + * WUSB 1.0 [8.6.1.2] + */ +struct uwb_rc_control_intf_class_desc { + u8 bLength; + u8 bDescriptorType; + __le16 bcdRCIVersion; +} __attribute__((packed)); + +#endif /* #ifndef __LINUX__UWB_SPEC_H__ */ diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h new file mode 100644 index 0000000000..02112299a1 --- /dev/null +++ b/include/linux/uwb/umc.h @@ -0,0 +1,193 @@ +/* + * UWB Multi-interface Controller support. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This file is released under the GPLv2 + * + * UMC (UWB Multi-interface Controller) capabilities (e.g., radio + * controller, host controller) are presented as devices on the "umc" + * bus. + * + * The radio controller is not strictly a UMC capability but it's + * useful to present it as such. + * + * References: + * + * [WHCI] Wireless Host Controller Interface Specification for + * Certified Wireless Universal Serial Bus, revision 0.95. + * + * How this works is kind of convoluted but simple. The whci.ko driver + * loads when WHCI devices are detected. These WHCI devices expose + * many devices in the same PCI function (they couldn't have reused + * functions, no), so for each PCI function that exposes these many + * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()] + * with umc_device_create() and adds it to the bus with + * umc_device_register(). + * + * umc_device_register() calls device_register() which will push the + * bus management code to load your UMC driver's somehting_probe() + * that you have registered for that capability code. + * + * Now when the WHCI device is removed, whci_remove() will go over + * each umc_dev assigned to each of the PCI function's capabilities + * and through whci_del_cap() call umc_device_unregister() each + * created umc_dev. Of course, if you are bound to the device, your + * driver's something_remove() will be called. + */ + +#ifndef _LINUX_UWB_UMC_H_ +#define _LINUX_UWB_UMC_H_ + +#include +#include + +/* + * UMC capability IDs. + * + * 0x00 is reserved so use it for the radio controller device. + * + * [WHCI] table 2-8 + */ +#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */ +#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */ + +/** + * struct umc_dev - UMC capability device + * + * @version: version of the specification this capability conforms to. + * @cap_id: capability ID. + * @bar: PCI Bar (64 bit) where the resource lies + * @resource: register space resource. + * @irq: interrupt line. + */ +struct umc_dev { + u16 version; + u8 cap_id; + u8 bar; + struct resource resource; + unsigned irq; + struct device dev; +}; + +#define to_umc_dev(d) container_of(d, struct umc_dev, dev) + +/** + * struct umc_driver - UMC capability driver + * @cap_id: supported capability ID. + * @match: driver specific capability matching function. + * @match_data: driver specific data for match() (e.g., a + * table of pci_device_id's if umc_match_pci_id() is used). + */ +struct umc_driver { + char *name; + u8 cap_id; + int (*match)(struct umc_driver *, struct umc_dev *); + const void *match_data; + + int (*probe)(struct umc_dev *); + void (*remove)(struct umc_dev *); + int (*pre_reset)(struct umc_dev *); + int (*post_reset)(struct umc_dev *); + + struct device_driver driver; +}; + +#define to_umc_driver(d) container_of(d, struct umc_driver, driver) + +extern struct bus_type umc_bus_type; + +struct umc_dev *umc_device_create(struct device *parent, int n); +int __must_check umc_device_register(struct umc_dev *umc); +void umc_device_unregister(struct umc_dev *umc); + +int __must_check __umc_driver_register(struct umc_driver *umc_drv, + struct module *mod, + const char *mod_name); + +/** + * umc_driver_register - register a UMC capabiltity driver. + * @umc_drv: pointer to the driver. + */ +#define umc_driver_register(umc_drv) \ + __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME) + +void umc_driver_unregister(struct umc_driver *umc_drv); + +/* + * Utility function you can use to match (umc_driver->match) against a + * null-terminated array of 'struct pci_device_id' in + * umc_driver->match_data. + */ +int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc); + +/** + * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none + * @umc_dev: UMC device whose parent PCI device we are looking for + * + * DIRTY!!! DON'T RELY ON THIS + * + * FIXME: This is as dirty as it gets, but we need some way to check + * the correct type of umc_dev->parent (so that for example, we can + * cast to pci_dev). Casting to pci_dev is necessary because at some + * point we need to request resources from the device. Mapping is + * easily over come (ioremap and stuff are bus agnostic), but hooking + * up to some error handlers (such as pci error handlers) might need + * this. + * + * THIS might (probably will) be removed in the future, so don't count + * on it. + */ +static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev) +{ + struct pci_dev *pci_dev = NULL; + if (dev_is_pci(umc_dev->dev.parent)) + pci_dev = to_pci_dev(umc_dev->dev.parent); + return pci_dev; +} + +/** + * umc_dev_get() - reference a UMC device. + * @umc_dev: Pointer to UMC device. + * + * NOTE: we are assuming in this whole scheme that the parent device + * is referenced at _probe() time and unreferenced at _remove() + * time by the parent's subsystem. + */ +static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev) +{ + get_device(&umc_dev->dev); + return umc_dev; +} + +/** + * umc_dev_put() - unreference a UMC device. + * @umc_dev: Pointer to UMC device. + */ +static inline void umc_dev_put(struct umc_dev *umc_dev) +{ + put_device(&umc_dev->dev); +} + +/** + * umc_set_drvdata - set UMC device's driver data. + * @umc_dev: Pointer to UMC device. + * @data: Data to set. + */ +static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data) +{ + dev_set_drvdata(&umc_dev->dev, data); +} + +/** + * umc_get_drvdata - recover UMC device's driver data. + * @umc_dev: Pointer to UMC device. + */ +static inline void *umc_get_drvdata(struct umc_dev *umc_dev) +{ + return dev_get_drvdata(&umc_dev->dev); +} + +int umc_controller_reset(struct umc_dev *umc); + +#endif /* #ifndef _LINUX_UWB_UMC_H_ */ diff --git a/include/linux/uwb/whci.h b/include/linux/uwb/whci.h new file mode 100644 index 0000000000..915ec23042 --- /dev/null +++ b/include/linux/uwb/whci.h @@ -0,0 +1,117 @@ +/* + * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * + * References: + * [WHCI] Wireless Host Controller Interface Specification for + * Certified Wireless Universal Serial Bus, revision 0.95. + */ +#ifndef _LINUX_UWB_WHCI_H_ +#define _LINUX_UWB_WHCI_H_ + +#include + +/* + * UWB interface capability registers (offsets from UWBBASE) + * + * [WHCI] section 2.2 + */ +#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */ +# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull) +#define UWBCAPDATA(n) (8*(n)) +# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull) +# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull) +# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull) +# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32)) +# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull) + +/* Size of the WHCI capability data (including the RC capability) for + a device with n capabilities. */ +#define UWBCAPDATA_SIZE(n) (8 + 8*(n)) + + +/* + * URC registers (offsets from URCBASE) + * + * [WHCI] section 2.3 + */ +#define URCCMD 0x00 +# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */ +# define URCCMD_RS (1 << 30) /* Run/Stop */ +# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */ +# define URCCMD_ACTIVE (1 << 15) /* Command is active */ +# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */ +# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */ +#define URCSTS 0x04 +# define URCSTS_EPS (1 << 17) /* Event Processing Status */ +# define URCSTS_HALTED (1 << 16) /* RC halted */ +# define URCSTS_HSE (1 << 10) /* Host System Error...fried */ +# define URCSTS_ER (1 << 9) /* Event Ready */ +# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */ +# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */ +# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */ +#define URCINTR 0x08 +# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */ +#define URCCMDADDR 0x10 +#define URCEVTADDR 0x18 +# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */ + + +/** Write 32 bit @value to little endian register at @addr */ +static inline +void le_writel(u32 value, void __iomem *addr) +{ + iowrite32(value, addr); +} + + +/** Read from 32 bit little endian register at @addr */ +static inline +u32 le_readl(void __iomem *addr) +{ + return ioread32(addr); +} + + +/** Write 64 bit @value to little endian register at @addr */ +static inline +void le_writeq(u64 value, void __iomem *addr) +{ + iowrite32(value, addr); + iowrite32(value >> 32, addr + 4); +} + + +/** Read from 64 bit little endian register at @addr */ +static inline +u64 le_readq(void __iomem *addr) +{ + u64 value; + value = ioread32(addr); + value |= (u64)ioread32(addr + 4) << 32; + return value; +} + +extern int whci_wait_for(struct device *dev, u32 __iomem *reg, + u32 mask, u32 result, + unsigned long max_ms, const char *tag); + +#endif /* #ifndef _LINUX_UWB_WHCI_H_ */ diff --git a/include/linux/verification.h b/include/linux/verification.h index a655923335..a10549a6c7 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h @@ -1,22 +1,17 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Signature verification * * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. */ #ifndef _LINUX_VERIFICATION_H #define _LINUX_VERIFICATION_H -#include - -/* - * Indicate that both builtin trusted keys and secondary trusted keys - * should be used. - */ -#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL) -#define VERIFY_USE_PLATFORM_KEYRING ((struct key *)2UL) - /* * The use to which an asymmetric key is being put. */ @@ -34,7 +29,6 @@ extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR]; #ifdef CONFIG_SYSTEM_DATA_VERIFICATION struct key; -struct pkcs7_message; extern int verify_pkcs7_signature(const void *data, size_t len, const void *raw_pkcs7, size_t pkcs7_len, @@ -44,15 +38,6 @@ extern int verify_pkcs7_signature(const void *data, size_t len, const void *data, size_t len, size_t asn1hdrlen), void *ctx); -extern int verify_pkcs7_message_sig(const void *data, size_t len, - struct pkcs7_message *pkcs7, - struct key *trusted_keys, - enum key_being_used_for usage, - int (*view_content)(void *ctx, - const void *data, - size_t len, - size_t asn1hdrlen), - void *ctx); #ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION extern int verify_pefile_signature(const void *pebuf, unsigned pelen, diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index 1eaaa93c37..4efc177ca5 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -1,13 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_VERMAGIC_H -#define _LINUX_VERMAGIC_H - -#ifndef INCLUDE_VERMAGIC -#error "This header can be included from kernel/module.c or *.mod.c only" -#endif - #include -#include /* Simply sanity version stamp for modules. */ #ifdef CONFIG_SMP @@ -17,8 +8,6 @@ #endif #ifdef CONFIG_PREEMPT #define MODULE_VERMAGIC_PREEMPT "preempt " -#elif defined(CONFIG_PREEMPT_RT) -#define MODULE_VERMAGIC_PREEMPT "preempt_rt " #else #define MODULE_VERMAGIC_PREEMPT "" #endif @@ -32,6 +21,28 @@ #else #define MODULE_VERMAGIC_MODVERSIONS "" #endif +#ifndef MODULE_ARCH_VERMAGIC +#define MODULE_ARCH_VERMAGIC "" +#endif + +#ifdef CONFIG_PAX_REFCOUNT +#define MODULE_PAX_REFCOUNT "REFCOUNT " +#else +#define MODULE_PAX_REFCOUNT "" +#endif + +#ifdef CONSTIFY_PLUGIN +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN " +#else +#define MODULE_CONSTIFY_PLUGIN "" +#endif + +#ifdef STACKLEAK_PLUGIN +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN " +#else +#define MODULE_STACKLEAK_PLUGIN "" +#endif + #ifdef RANDSTRUCT_PLUGIN #include #define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED @@ -39,11 +50,17 @@ #define MODULE_RANDSTRUCT_PLUGIN #endif +#ifdef CONFIG_GRKERNSEC +#define MODULE_GRSEC "GRSEC " +#else +#define MODULE_GRSEC "" +#endif + #define VERMAGIC_STRING \ UTS_RELEASE " " \ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ MODULE_ARCH_VERMAGIC \ - MODULE_RANDSTRUCT_PLUGIN + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \ + MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN -#endif /* _LINUX_VERMAGIC_H */ diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h index 2f9dd072f1..f8e76e08eb 100644 --- a/include/linux/vexpress.h +++ b/include/linux/vexpress.h @@ -1,5 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. * * Copyright (C) 2012 ARM Limited */ @@ -10,8 +17,38 @@ #include #include +#define VEXPRESS_SITE_MB 0 +#define VEXPRESS_SITE_DB1 1 +#define VEXPRESS_SITE_DB2 2 +#define VEXPRESS_SITE_MASTER 0xf + +/* Config infrastructure */ + +void vexpress_config_set_master(u32 site); +u32 vexpress_config_get_master(void); + +void vexpress_config_lock(void *arg); +void vexpress_config_unlock(void *arg); + +int vexpress_config_get_topo(struct device_node *node, u32 *site, + u32 *position, u32 *dcc); + +/* Config bridge API */ + +struct vexpress_config_bridge_ops { + struct regmap * (*regmap_init)(struct device *dev, void *context); + void (*regmap_exit)(struct regmap *regmap, void *context); +}; + +struct device *vexpress_config_bridge_register(struct device *parent, + struct vexpress_config_bridge_ops *ops, void *context); + /* Config regmap API */ struct regmap *devm_regmap_init_vexpress_config(struct device *dev); +/* Platform control */ + +void vexpress_flags_set(u32 data); + #endif diff --git a/include/linux/vfio.h b/include/linux/vfio.h index b53a955788..0ecae0b1cd 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -1,9 +1,12 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * VFIO API definition * * Copyright (C) 2012 Red Hat, Inc. All rights reserved. * Author: Alex Williamson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #ifndef VFIO_H #define VFIO_H @@ -15,79 +18,43 @@ #include #include -/* - * VFIO devices can be placed in a set, this allows all devices to share this - * structure and the VFIO core will provide a lock that is held around - * open_device()/close_device() for all devices in the set. - */ -struct vfio_device_set { - void *set_id; - struct mutex lock; - struct list_head device_list; - unsigned int device_count; -}; - -struct vfio_device { - struct device *dev; - const struct vfio_device_ops *ops; - struct vfio_group *group; - struct vfio_device_set *dev_set; - struct list_head dev_set_list; - - /* Members below here are private, not for driver use */ - refcount_t refcount; - unsigned int open_count; - struct completion comp; - struct list_head group_next; -}; - /** * struct vfio_device_ops - VFIO bus driver device callbacks * - * @open_device: Called when the first file descriptor is opened for this device - * @close_device: Opposite of open_device + * @open: Called when userspace creates new file descriptor for device + * @release: Called when userspace releases file descriptor for device * @read: Perform read(2) on device file descriptor * @write: Perform write(2) on device file descriptor * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_* * operations documented below * @mmap: Perform mmap(2) on a region of the device file descriptor * @request: Request for the bus driver to release the device - * @match: Optional device name match callback (return: 0 for no-match, >0 for - * match, -errno for abort (ex. match with insufficient or incorrect - * additional args) */ struct vfio_device_ops { char *name; - int (*open_device)(struct vfio_device *vdev); - void (*close_device)(struct vfio_device *vdev); - ssize_t (*read)(struct vfio_device *vdev, char __user *buf, + int (*open)(void *device_data); + void (*release)(void *device_data); + ssize_t (*read)(void *device_data, char __user *buf, size_t count, loff_t *ppos); - ssize_t (*write)(struct vfio_device *vdev, const char __user *buf, + ssize_t (*write)(void *device_data, const char __user *buf, size_t count, loff_t *size); - long (*ioctl)(struct vfio_device *vdev, unsigned int cmd, + long (*ioctl)(void *device_data, unsigned int cmd, unsigned long arg); - int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma); - void (*request)(struct vfio_device *vdev, unsigned int count); - int (*match)(struct vfio_device *vdev, char *buf); + int (*mmap)(void *device_data, struct vm_area_struct *vma); + void (*request)(void *device_data, unsigned int count); }; extern struct iommu_group *vfio_iommu_group_get(struct device *dev); extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); -void vfio_init_group_dev(struct vfio_device *device, struct device *dev, - const struct vfio_device_ops *ops); -void vfio_uninit_group_dev(struct vfio_device *device); -int vfio_register_group_dev(struct vfio_device *device); -void vfio_unregister_group_dev(struct vfio_device *device); +extern int vfio_add_group_dev(struct device *dev, + const struct vfio_device_ops *ops, + void *device_data); + +extern void *vfio_del_group_dev(struct device *dev); extern struct vfio_device *vfio_device_get_from_dev(struct device *dev); extern void vfio_device_put(struct vfio_device *device); - -int vfio_assign_device_set(struct vfio_device *device, void *set_id); - -/* events for the backend driver notify callback */ -enum vfio_iommu_notify_type { - VFIO_IOMMU_CONTAINER_CLOSE = 0, -}; +extern void *vfio_device_data(struct vfio_device *device); /** * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks @@ -108,24 +75,7 @@ struct vfio_iommu_driver_ops { struct iommu_group *group); void (*detach_group)(void *iommu_data, struct iommu_group *group); - int (*pin_pages)(void *iommu_data, - struct iommu_group *group, - unsigned long *user_pfn, - int npage, int prot, - unsigned long *phys_pfn); - int (*unpin_pages)(void *iommu_data, - unsigned long *user_pfn, int npage); - int (*register_notifier)(void *iommu_data, - unsigned long *events, - struct notifier_block *nb); - int (*unregister_notifier)(void *iommu_data, - struct notifier_block *nb); - int (*dma_rw)(void *iommu_data, dma_addr_t user_iova, - void *data, size_t count, bool write); - struct iommu_domain *(*group_iommu_domain)(void *iommu_data, - struct iommu_group *group); - void (*notify)(void *iommu_data, - enum vfio_iommu_notify_type event); + }; extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); @@ -138,55 +88,10 @@ extern void vfio_unregister_iommu_driver( */ extern struct vfio_group *vfio_group_get_external_user(struct file *filep); extern void vfio_group_put_external_user(struct vfio_group *group); -extern struct vfio_group *vfio_group_get_external_user_from_dev(struct device - *dev); -extern bool vfio_external_group_match_file(struct vfio_group *group, - struct file *filep); extern int vfio_external_user_iommu_id(struct vfio_group *group); extern long vfio_external_check_extension(struct vfio_group *group, unsigned long arg); -#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long)) - -extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, - int npage, int prot, unsigned long *phys_pfn); -extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, - int npage); - -extern int vfio_group_pin_pages(struct vfio_group *group, - unsigned long *user_iova_pfn, int npage, - int prot, unsigned long *phys_pfn); -extern int vfio_group_unpin_pages(struct vfio_group *group, - unsigned long *user_iova_pfn, int npage); - -extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, - void *data, size_t len, bool write); - -extern struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group); - -/* each type has independent events */ -enum vfio_notify_type { - VFIO_IOMMU_NOTIFY = 0, - VFIO_GROUP_NOTIFY = 1, -}; - -/* events for VFIO_IOMMU_NOTIFY */ -#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0) - -/* events for VFIO_GROUP_NOTIFY */ -#define VFIO_GROUP_NOTIFY_SET_KVM BIT(0) - -extern int vfio_register_notifier(struct device *dev, - enum vfio_notify_type type, - unsigned long *required_events, - struct notifier_block *nb); -extern int vfio_unregister_notifier(struct device *dev, - enum vfio_notify_type type, - struct notifier_block *nb); - -struct kvm; -extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm); - /* * Sub-module helpers */ @@ -198,16 +103,8 @@ extern struct vfio_info_cap_header *vfio_info_cap_add( struct vfio_info_cap *caps, size_t size, u16 id, u16 version); extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); -extern int vfio_info_add_capability(struct vfio_info_cap *caps, - struct vfio_info_cap_header *cap, - size_t size); - -extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, - int num_irqs, int max_irq_type, - size_t *data_size); - struct pci_dev; -#if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH) +#ifdef CONFIG_EEH extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev); extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, @@ -228,7 +125,7 @@ static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, { return -ENOTTY; } -#endif /* CONFIG_VFIO_SPAPR_EEH */ +#endif /* CONFIG_EEH */ /* * IRQfd - generic @@ -240,7 +137,7 @@ struct virqfd { void (*thread)(void *, void *); void *data; struct work_struct inject; - wait_queue_entry_t wait; + wait_queue_t wait; poll_table pt; struct work_struct shutdown; struct virqfd **pvirqfd; diff --git a/include/linux/vfs.h b/include/linux/vfs.h index 33952ac00a..e701d05414 100644 --- a/include/linux/vfs.h +++ b/include/linux/vfs.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_VFS_H #define _LINUX_VFS_H diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index 7e6ac0114d..161604386b 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -84,8 +84,8 @@ enum vga_switcheroo_state { * Client identifier. Audio clients use the same identifier & 0x100. */ enum vga_switcheroo_client_id { - VGA_SWITCHEROO_UNKNOWN_ID = 0x1000, - VGA_SWITCHEROO_IGD = 0, + VGA_SWITCHEROO_UNKNOWN_ID = -1, + VGA_SWITCHEROO_IGD, VGA_SWITCHEROO_DIS, VGA_SWITCHEROO_MAX_CLIENTS, }; @@ -133,18 +133,15 @@ struct vga_switcheroo_handler { * @can_switch: check if the device is in a position to switch now. * Mandatory. The client should return false if a user space process * has one of its device files open - * @gpu_bound: notify the client id to audio client when the GPU is bound. * * Client callbacks. A client can be either a GPU or an audio device on a GPU. * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be * set to NULL. For audio clients, the @reprobe member is bogus. - * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients. */ struct vga_switcheroo_client_ops { void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state); void (*reprobe)(struct pci_dev *dev); bool (*can_switch)(struct pci_dev *dev); - void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id); }; #if defined(CONFIG_VGA_SWITCHEROO) @@ -154,7 +151,7 @@ int vga_switcheroo_register_client(struct pci_dev *dev, bool driver_power_control); int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, - struct pci_dev *vga_dev); + enum vga_switcheroo_client_id id); void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info); @@ -171,8 +168,11 @@ int vga_switcheroo_process_delayed_switch(void); bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev); enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); -int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain); +void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); + +int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain); void vga_switcheroo_fini_domain_pm_ops(struct device *dev); +int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain); #else static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} @@ -183,7 +183,7 @@ static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_ha enum vga_switcheroo_handler_flags_t handler_flags) { return 0; } static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, - struct pci_dev *vga_dev) { return 0; } + enum vga_switcheroo_client_id id) { return 0; } static inline void vga_switcheroo_unregister_handler(void) {} static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; } static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; } @@ -192,8 +192,11 @@ static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; } static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } -static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } +static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} + +static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; } static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {} +static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; } #endif #endif /* _LINUX_VGA_SWITCHEROO_H_ */ diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index b4b9137f97..ee162e3e87 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -33,8 +33,6 @@ #include